diff options
2175 files changed, 57479 insertions, 28563 deletions
@@ -71,6 +71,9 @@ Chao Yu <chao@kernel.org> <chao2.yu@samsung.com> Chao Yu <chao@kernel.org> <yuchao0@huawei.com> Chris Chiu <chris.chiu@canonical.com> <chiu@endlessm.com> Chris Chiu <chris.chiu@canonical.com> <chiu@endlessos.org> +Christian Borntraeger <borntraeger@linux.ibm.com> <borntraeger@de.ibm.com> +Christian Borntraeger <borntraeger@linux.ibm.com> <cborntra@de.ibm.com> +Christian Borntraeger <borntraeger@linux.ibm.com> <borntrae@de.ibm.com> Christophe Ricard <christophe.ricard@gmail.com> Christoph Hellwig <hch@lst.de> Colin Ian King <colin.king@intel.com> <colin.king@canonical.com> @@ -123,6 +126,8 @@ Greg Kroah-Hartman <gregkh@suse.de> Greg Kroah-Hartman <greg@kroah.com> Greg Kurz <groug@kaod.org> <gkurz@linux.vnet.ibm.com> Gregory CLEMENT <gregory.clement@bootlin.com> <gregory.clement@free-electrons.com> +Guo Ren <guoren@kernel.org> <guoren@linux.alibaba.com> +Guo Ren <guoren@kernel.org> <ren_guo@c-sky.com> Gustavo Padovan <gustavo@las.ic.unicamp.br> Gustavo Padovan <padovan@profusion.mobi> Hanjun Guo <guohanjun@huawei.com> <hanjun.guo@linaro.org> diff --git a/Documentation/admin-guide/blockdev/drbd/figures.rst b/Documentation/admin-guide/blockdev/drbd/figures.rst index bd9a4901fe46..9f73253ea353 100644 --- a/Documentation/admin-guide/blockdev/drbd/figures.rst +++ b/Documentation/admin-guide/blockdev/drbd/figures.rst @@ -25,6 +25,6 @@ Sub graphs of DRBD's state transitions :alt: disk-states-8.dot :align: center -.. kernel-figure:: node-states-8.dot - :alt: node-states-8.dot +.. kernel-figure:: peer-states-8.dot + :alt: peer-states-8.dot :align: center diff --git a/Documentation/admin-guide/blockdev/drbd/node-states-8.dot b/Documentation/admin-guide/blockdev/drbd/peer-states-8.dot index bfa54e1f8016..6dc3954954d6 100644 --- a/Documentation/admin-guide/blockdev/drbd/node-states-8.dot +++ b/Documentation/admin-guide/blockdev/drbd/peer-states-8.dot @@ -1,8 +1,3 @@ -digraph node_states { - Secondary -> Primary [ label = "ioctl_set_state()" ] - Primary -> Secondary [ label = "ioctl_set_state()" ] -} - digraph peer_states { Secondary -> Primary [ label = "recv state packet" ] Primary -> Secondary [ label = "recv state packet" ] diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 9725c546a0d4..eb258526d70d 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -3545,6 +3545,13 @@ shutdown the other cpus. Instead use the REBOOT_VECTOR irq. + nomodeset Disable kernel modesetting. DRM drivers will not perform + display-mode changes or accelerated rendering. Only the + system framebuffer will be available for use if this was + set-up by the firmware or boot loader. + + Useful as fallback, or for testing and debugging. + nomodule Disable module load nopat [X86] Disable PAT (page attribute table extension of diff --git a/Documentation/admin-guide/laptops/thinkpad-acpi.rst b/Documentation/admin-guide/laptops/thinkpad-acpi.rst index 6721a80a2d4f..475eb0e81e4a 100644 --- a/Documentation/admin-guide/laptops/thinkpad-acpi.rst +++ b/Documentation/admin-guide/laptops/thinkpad-acpi.rst @@ -1520,15 +1520,15 @@ This sysfs attribute controls the keyboard "face" that will be shown on the Lenovo X1 Carbon 2nd gen (2014)'s adaptive keyboard. The value can be read and set. -- 1 = Home mode -- 2 = Web-browser mode -- 3 = Web-conference mode -- 4 = Function mode -- 5 = Layflat mode +- 0 = Home mode +- 1 = Web-browser mode +- 2 = Web-conference mode +- 3 = Function mode +- 4 = Layflat mode For more details about which buttons will appear depending on the mode, please review the laptop's user guide: -http://www.lenovo.com/shop/americas/content/user_guides/x1carbon_2_ug_en.pdf +https://download.lenovo.com/ibmdl/pub/pc/pccbbs/mobiles_pdf/x1carbon_2_ug_en.pdf Battery charge control ---------------------- diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst index 426162009ce9..0e486f41185e 100644 --- a/Documentation/admin-guide/sysctl/kernel.rst +++ b/Documentation/admin-guide/sysctl/kernel.rst @@ -1099,7 +1099,7 @@ task_delayacct =============== Enables/disables task delay accounting (see -:doc:`accounting/delay-accounting.rst`). Enabling this feature incurs +Documentation/accounting/delay-accounting.rst. Enabling this feature incurs a small amount of overhead in the scheduler but is useful for debugging and performance tuning. It is required by some tools such as iotop. diff --git a/Documentation/arm/marvell.rst b/Documentation/arm/marvell.rst index 8323c79d321b..9485a5a2e2e9 100644 --- a/Documentation/arm/marvell.rst +++ b/Documentation/arm/marvell.rst @@ -104,6 +104,8 @@ Discovery family Not supported by the Linux kernel. + Homepage: + https://web.archive.org/web/20110924171043/http://www.marvell.com/embedded-processors/discovery-innovation/ Core: Feroceon 88fr571-vd ARMv5 compatible @@ -120,6 +122,7 @@ EBU Armada family - 88F6707 - 88F6W11 + - Product infos: https://web.archive.org/web/20141002083258/http://www.marvell.com/embedded-processors/armada-370/ - Product Brief: https://web.archive.org/web/20121115063038/http://www.marvell.com/embedded-processors/armada-300/assets/Marvell_ARMADA_370_SoC.pdf - Hardware Spec: https://web.archive.org/web/20140617183747/http://www.marvell.com/embedded-processors/armada-300/assets/ARMADA370-datasheet.pdf - Functional Spec: https://web.archive.org/web/20140617183701/http://www.marvell.com/embedded-processors/armada-300/assets/ARMADA370-FunctionalSpec-datasheet.pdf @@ -127,9 +130,29 @@ EBU Armada family Core: Sheeva ARMv7 compatible PJ4B + Armada XP Flavors: + - MV78230 + - MV78260 + - MV78460 + + NOTE: + not to be confused with the non-SMP 78xx0 SoCs + + - Product infos: https://web.archive.org/web/20150101215721/http://www.marvell.com/embedded-processors/armada-xp/ + - Product Brief: https://web.archive.org/web/20121021173528/http://www.marvell.com/embedded-processors/armada-xp/assets/Marvell-ArmadaXP-SoC-product%20brief.pdf + - Functional Spec: https://web.archive.org/web/20180829171131/http://www.marvell.com/embedded-processors/armada-xp/assets/ARMADA-XP-Functional-SpecDatasheet.pdf + - Hardware Specs: + - https://web.archive.org/web/20141127013651/http://www.marvell.com/embedded-processors/armada-xp/assets/HW_MV78230_OS.PDF + - https://web.archive.org/web/20141222000224/http://www.marvell.com/embedded-processors/armada-xp/assets/HW_MV78260_OS.PDF + - https://web.archive.org/web/20141222000230/http://www.marvell.com/embedded-processors/armada-xp/assets/HW_MV78460_OS.PDF + + Core: + Sheeva ARMv7 compatible Dual-core or Quad-core PJ4B-MP + Armada 375 Flavors: - 88F6720 + - Product infos: https://web.archive.org/web/20140108032402/http://www.marvell.com/embedded-processors/armada-375/ - Product Brief: https://web.archive.org/web/20131216023516/http://www.marvell.com/embedded-processors/armada-300/assets/ARMADA_375_SoC-01_product_brief.pdf Core: @@ -162,29 +185,6 @@ EBU Armada family Core: ARM Cortex-A9 - Armada XP Flavors: - - MV78230 - - MV78260 - - MV78460 - - NOTE: - not to be confused with the non-SMP 78xx0 SoCs - - Product Brief: - https://web.archive.org/web/20121021173528/http://www.marvell.com/embedded-processors/armada-xp/assets/Marvell-ArmadaXP-SoC-product%20brief.pdf - - Functional Spec: - https://web.archive.org/web/20180829171131/http://www.marvell.com/embedded-processors/armada-xp/assets/ARMADA-XP-Functional-SpecDatasheet.pdf - - - Hardware Specs: - - - https://web.archive.org/web/20141127013651/http://www.marvell.com/embedded-processors/armada-xp/assets/HW_MV78230_OS.PDF - - https://web.archive.org/web/20141222000224/http://www.marvell.com/embedded-processors/armada-xp/assets/HW_MV78260_OS.PDF - - https://web.archive.org/web/20141222000230/http://www.marvell.com/embedded-processors/armada-xp/assets/HW_MV78460_OS.PDF - - Core: - Sheeva ARMv7 compatible Dual-core or Quad-core PJ4B-MP - Linux kernel mach directory: arch/arm/mach-mvebu Linux kernel plat directory: @@ -436,7 +436,7 @@ Berlin family (Multimedia Solutions) - Flavors: - 88DE3010, Armada 1000 (no Linux support) - Core: Marvell PJ1 (ARMv5TE), Dual-core - - Product Brief: http://www.marvell.com.cn/digital-entertainment/assets/armada_1000_pb.pdf + - Product Brief: https://web.archive.org/web/20131103162620/http://www.marvell.com/digital-entertainment/assets/armada_1000_pb.pdf - 88DE3005, Armada 1500 Mini - Design name: BG2CD - Core: ARM Cortex-A9, PL310 L2CC diff --git a/Documentation/arm64/pointer-authentication.rst b/Documentation/arm64/pointer-authentication.rst index f127666ea3a8..e5dad2e40aa8 100644 --- a/Documentation/arm64/pointer-authentication.rst +++ b/Documentation/arm64/pointer-authentication.rst @@ -53,11 +53,10 @@ The number of bits that the PAC occupies in a pointer is 55 minus the virtual address size configured by the kernel. For example, with a virtual address size of 48, the PAC is 7 bits wide. -Recent versions of GCC can compile code with APIAKey-based return -address protection when passed the -msign-return-address option. This -uses instructions in the HINT space (unless -march=armv8.3-a or higher -is also passed), and such code can run on systems without the pointer -authentication extension. +When ARM64_PTR_AUTH_KERNEL is selected, the kernel will be compiled +with HINT space pointer authentication instructions protecting +function returns. Kernels built with this option will work on hardware +with or without pointer authentication support. In addition to exec(), keys can also be reinitialized to random values using the PR_PAC_RESET_KEYS prctl. A bitmask of PR_PAC_APIAKEY, diff --git a/Documentation/bpf/index.rst b/Documentation/bpf/index.rst index 37f273a7e8b6..610450f59e05 100644 --- a/Documentation/bpf/index.rst +++ b/Documentation/bpf/index.rst @@ -15,7 +15,7 @@ that goes into great technical depth about the BPF Architecture. libbpf ====== -Documentation/bpf/libbpf/libbpf.rst is a userspace library for loading and interacting with bpf programs. +Documentation/bpf/libbpf/index.rst is a userspace library for loading and interacting with bpf programs. BPF Type Format (BTF) ===================== diff --git a/Documentation/conf.py b/Documentation/conf.py index 17f7cee56987..76e5eb5cb62b 100644 --- a/Documentation/conf.py +++ b/Documentation/conf.py @@ -249,11 +249,16 @@ except ImportError: html_static_path = ['sphinx-static'] -html_context = { - 'css_files': [ - '_static/theme_overrides.css', - ], -} +html_css_files = [ + 'theme_overrides.css', +] + +if major <= 1 and minor < 8: + html_context = { + 'css_files': [ + '_static/theme_overrides.css', + ], + } # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied diff --git a/Documentation/cpu-freq/core.rst b/Documentation/cpu-freq/core.rst index 33cb90bd1d8f..4ceef8e7217c 100644 --- a/Documentation/cpu-freq/core.rst +++ b/Documentation/cpu-freq/core.rst @@ -73,12 +73,12 @@ CPUFREQ_POSTCHANGE. The third argument is a struct cpufreq_freqs with the following values: -===== =========================== -cpu number of the affected CPU +====== ====================================== +policy a pointer to the struct cpufreq_policy old old frequency new new frequency flags flags of the cpufreq driver -===== =========================== +====== ====================================== 3. CPUFreq Table Generation with Operating Performance Point (OPP) ================================================================== diff --git a/Documentation/devicetree/bindings/display/bridge/analogix,anx7625.yaml b/Documentation/devicetree/bindings/display/bridge/analogix,anx7625.yaml index ab48ab2f4240..1d3e88daca04 100644 --- a/Documentation/devicetree/bindings/display/bridge/analogix,anx7625.yaml +++ b/Documentation/devicetree/bindings/display/bridge/analogix,anx7625.yaml @@ -43,14 +43,70 @@ properties: vdd33-supply: description: Regulator that provides the supply 3.3V power. + analogix,lane0-swing: + $ref: /schemas/types.yaml#/definitions/uint8-array + minItems: 1 + maxItems: 20 + description: + an array of swing register setting for DP tx lane0 PHY. + Registers 0~9 are Swing0_Pre0, Swing1_Pre0, Swing2_Pre0, + Swing3_Pre0, Swing0_Pre1, Swing1_Pre1, Swing2_Pre1, Swing0_Pre2, + Swing1_Pre2, Swing0_Pre3, they are for [Boost control] and + [Swing control] setting. + Registers 0~9, bit 3:0 is [Boost control], these bits control + post cursor manual, increase the [Boost control] to increase + Pre-emphasis value. + Registers 0~9, bit 6:4 is [Swing control], these bits control + swing manual, increase [Swing control] setting to add Vp-p value + for each Swing, Pre. + Registers 10~19 are Swing0_Pre0, Swing1_Pre0, Swing2_Pre0, + Swing3_Pre0, Swing0_Pre1, Swing1_Pre1, Swing2_Pre1, Swing0_Pre2, + Swing1_Pre2, Swing0_Pre3, they are for [R select control] and + [R Termination control] setting. + Registers 10~19, bit 4:0 is [R select control], these bits are + compensation manual, increase it can enhance IO driven strength + and Vp-p. + Registers 10~19, bit 5:6 is [R termination control], these bits + adjust 50ohm impedance of DP tx termination. 00:55 ohm, + 01:50 ohm(default), 10:45 ohm, 11:40 ohm. + + analogix,lane1-swing: + $ref: /schemas/types.yaml#/definitions/uint8-array + minItems: 1 + maxItems: 20 + description: + an array of swing register setting for DP tx lane1 PHY. + DP TX lane1 swing register setting same with lane0 + swing, please refer lane0-swing property description. + + analogix,audio-enable: + type: boolean + description: let the driver enable audio HDMI codec function or not. + ports: $ref: /schemas/graph.yaml#/properties/ports properties: port@0: - $ref: /schemas/graph.yaml#/properties/port + $ref: /schemas/graph.yaml#/$defs/port-base + unevaluatedProperties: false description: - Video port for MIPI DSI input. + MIPI DSI/DPI input. + + properties: + endpoint: + $ref: /schemas/media/video-interfaces.yaml# + type: object + additionalProperties: false + + properties: + remote-endpoint: true + + bus-type: + enum: [1, 5] + default: 1 + + data-lanes: true port@1: $ref: /schemas/graph.yaml#/properties/port @@ -87,6 +143,9 @@ examples: vdd10-supply = <&pp1000_mipibrdg>; vdd18-supply = <&pp1800_mipibrdg>; vdd33-supply = <&pp3300_mipibrdg>; + analogix,audio-enable; + analogix,lane0-swing = /bits/ 8 <0x14 0x54 0x64 0x74>; + analogix,lane1-swing = /bits/ 8 <0x14 0x54 0x64 0x74>; ports { #address-cells = <1>; @@ -96,6 +155,8 @@ examples: reg = <0>; anx7625_in: endpoint { remote-endpoint = <&mipi_dsi>; + bus-type = <5>; + data-lanes = <0 1 2 3>; }; }; diff --git a/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml b/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml index 1faae3e323a4..708de84ac138 100644 --- a/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml +++ b/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml @@ -79,6 +79,14 @@ properties: - port@0 - port@1 + pclk-sample: + description: + Data sampling on rising or falling edge. + enum: + - 0 # Falling edge + - 1 # Rising edge + default: 0 + powerdown-gpios: description: The GPIO used to control the power down line of this device. @@ -102,6 +110,16 @@ then: properties: data-mapping: false +if: + not: + properties: + compatible: + contains: + const: lvds-encoder +then: + properties: + pclk-sample: false + required: - compatible - ports diff --git a/Documentation/devicetree/bindings/display/bridge/nxp,ptn3460.yaml b/Documentation/devicetree/bindings/display/bridge/nxp,ptn3460.yaml new file mode 100644 index 000000000000..107dd138e6c6 --- /dev/null +++ b/Documentation/devicetree/bindings/display/bridge/nxp,ptn3460.yaml @@ -0,0 +1,106 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/bridge/nxp,ptn3460.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: NXP PTN3460 eDP to LVDS bridge + +maintainers: + - Sean Paul <seanpaul@chromium.org> + +properties: + compatible: + const: nxp,ptn3460 + + reg: + description: I2C address of the bridge + maxItems: 1 + + edid-emulation: + $ref: "/schemas/types.yaml#/definitions/uint32" + description: + The EDID emulation entry to use + Value Resolution Description + 0 1024x768 NXP Generic + 1 1920x1080 NXP Generic + 2 1920x1080 NXP Generic + 3 1600x900 Samsung LTM200KT + 4 1920x1080 Samsung LTM230HT + 5 1366x768 NXP Generic + 6 1600x900 ChiMei M215HGE + enum: [0, 1, 2, 3, 4, 5, 6] + + powerdown-gpios: + description: GPIO connected to the PD_N signal. + maxItems: 1 + + reset-gpios: + description: GPIO connected to the RST_N signal. + maxItems: 1 + + ports: + $ref: /schemas/graph.yaml#/properties/ports + + properties: + port@0: + $ref: /schemas/graph.yaml#/properties/port + description: + Video port for LVDS output + + port@1: + $ref: /schemas/graph.yaml#/properties/port + description: + Video port for eDP input + + required: + - port@0 + - port@1 + +required: + - compatible + - reg + - edid-emulation + - powerdown-gpios + - reset-gpios + - ports + +additionalProperties: false + +examples: + - | + #include <dt-bindings/gpio/gpio.h> + + i2c1 { + #address-cells = <1>; + #size-cells = <0>; + + bridge@20 { + compatible = "nxp,ptn3460"; + reg = <0x20>; + edid-emulation = <5>; + powerdown-gpios = <&gpy2 5 GPIO_ACTIVE_HIGH>; + reset-gpios = <&gpx1 5 GPIO_ACTIVE_LOW>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + bridge_out: endpoint { + remote-endpoint = <&panel_in>; + }; + }; + + port@1 { + reg = <1>; + bridge_in: endpoint { + remote-endpoint = <&dp_out>; + }; + }; + }; + }; + }; + +... diff --git a/Documentation/devicetree/bindings/display/bridge/ptn3460.txt b/Documentation/devicetree/bindings/display/bridge/ptn3460.txt deleted file mode 100644 index 361971ba104d..000000000000 --- a/Documentation/devicetree/bindings/display/bridge/ptn3460.txt +++ /dev/null @@ -1,39 +0,0 @@ -ptn3460 bridge bindings - -Required properties: - - compatible: "nxp,ptn3460" - - reg: i2c address of the bridge - - powerdown-gpio: OF device-tree gpio specification for PD_N pin. - - reset-gpio: OF device-tree gpio specification for RST_N pin. - - edid-emulation: The EDID emulation entry to use - +-------+------------+------------------+ - | Value | Resolution | Description | - | 0 | 1024x768 | NXP Generic | - | 1 | 1920x1080 | NXP Generic | - | 2 | 1920x1080 | NXP Generic | - | 3 | 1600x900 | Samsung LTM200KT | - | 4 | 1920x1080 | Samsung LTM230HT | - | 5 | 1366x768 | NXP Generic | - | 6 | 1600x900 | ChiMei M215HGE | - +-------+------------+------------------+ - - - video interfaces: Device node can contain video interface port - nodes for panel according to [1]. - -[1]: Documentation/devicetree/bindings/media/video-interfaces.txt - -Example: - lvds-bridge@20 { - compatible = "nxp,ptn3460"; - reg = <0x20>; - powerdown-gpio = <&gpy2 5 1 0 0>; - reset-gpio = <&gpx1 5 1 0 0>; - edid-emulation = <5>; - ports { - port@0 { - bridge_out: endpoint { - remote-endpoint = <&panel_in>; - }; - }; - }; - }; diff --git a/Documentation/devicetree/bindings/display/bridge/renesas,dsi-csi2-tx.yaml b/Documentation/devicetree/bindings/display/bridge/renesas,dsi-csi2-tx.yaml new file mode 100644 index 000000000000..afeeb967393d --- /dev/null +++ b/Documentation/devicetree/bindings/display/bridge/renesas,dsi-csi2-tx.yaml @@ -0,0 +1,118 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/bridge/renesas,dsi-csi2-tx.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Renesas R-Car MIPI DSI/CSI-2 Encoder + +maintainers: + - Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com> + +description: | + This binding describes the MIPI DSI/CSI-2 encoder embedded in the Renesas + R-Car V3U SoC. The encoder can operate in either DSI or CSI-2 mode, with up + to four data lanes. + +properties: + compatible: + enum: + - renesas,r8a779a0-dsi-csi2-tx # for V3U + + reg: + maxItems: 1 + + clocks: + items: + - description: Functional clock + - description: DSI (and CSI-2) functional clock + - description: PLL reference clock + + clock-names: + items: + - const: fck + - const: dsi + - const: pll + + power-domains: + maxItems: 1 + + resets: + maxItems: 1 + + ports: + $ref: /schemas/graph.yaml#/properties/ports + + properties: + port@0: + $ref: /schemas/graph.yaml#/properties/port + description: Parallel input port + + port@1: + $ref: /schemas/graph.yaml#/$defs/port-base + unevaluatedProperties: false + description: DSI/CSI-2 output port + + properties: + endpoint: + $ref: /schemas/media/video-interfaces.yaml# + unevaluatedProperties: false + + properties: + data-lanes: + minItems: 1 + maxItems: 4 + + required: + - data-lanes + + required: + - port@0 + - port@1 + +required: + - compatible + - reg + - clocks + - power-domains + - resets + - ports + +additionalProperties: false + +examples: + - | + #include <dt-bindings/clock/r8a779a0-cpg-mssr.h> + #include <dt-bindings/power/r8a779a0-sysc.h> + + dsi0: dsi-encoder@fed80000 { + compatible = "renesas,r8a779a0-dsi-csi2-tx"; + reg = <0xfed80000 0x10000>; + power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>; + clocks = <&cpg CPG_MOD 415>, + <&cpg CPG_CORE R8A779A0_CLK_DSI>, + <&cpg CPG_CORE R8A779A0_CLK_CP>; + clock-names = "fck", "dsi", "pll"; + resets = <&cpg 415>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + dsi0_in: endpoint { + remote-endpoint = <&du_out_dsi0>; + }; + }; + + port@1 { + reg = <1>; + dsi0_out: endpoint { + data-lanes = <1 2>; + remote-endpoint = <&sn65dsi86_in>; + }; + }; + }; + }; +... diff --git a/Documentation/devicetree/bindings/display/bridge/sii9234.txt b/Documentation/devicetree/bindings/display/bridge/sii9234.txt deleted file mode 100644 index a55bf77bd960..000000000000 --- a/Documentation/devicetree/bindings/display/bridge/sii9234.txt +++ /dev/null @@ -1,49 +0,0 @@ -Silicon Image SiI9234 HDMI/MHL bridge bindings - -Required properties: - - compatible : "sil,sii9234". - - reg : I2C address for TPI interface, use 0x39 - - avcc33-supply : MHL/USB Switch Supply Voltage (3.3V) - - iovcc18-supply : I/O Supply Voltage (1.8V) - - avcc12-supply : TMDS Analog Supply Voltage (1.2V) - - cvcc12-supply : Digital Core Supply Voltage (1.2V) - - interrupts: interrupt specifier of INT pin - - reset-gpios: gpio specifier of RESET pin (active low) - - video interfaces: Device node can contain two video interface port - nodes for HDMI encoder and connector according to [1]. - - port@0 - MHL to HDMI - - port@1 - MHL to connector - -[1]: Documentation/devicetree/bindings/media/video-interfaces.txt - - -Example: - sii9234@39 { - compatible = "sil,sii9234"; - reg = <0x39>; - avcc33-supply = <&vcc33mhl>; - iovcc18-supply = <&vcc18mhl>; - avcc12-supply = <&vsil12>; - cvcc12-supply = <&vsil12>; - reset-gpios = <&gpf3 4 GPIO_ACTIVE_LOW>; - interrupt-parent = <&gpf3>; - interrupts = <5 IRQ_TYPE_LEVEL_HIGH>; - - ports { - #address-cells = <1>; - #size-cells = <0>; - - port@0 { - reg = <0>; - mhl_to_hdmi: endpoint { - remote-endpoint = <&hdmi_to_mhl>; - }; - }; - port@1 { - reg = <1>; - mhl_to_connector: endpoint { - remote-endpoint = <&connector_to_mhl>; - }; - }; - }; - }; diff --git a/Documentation/devicetree/bindings/display/bridge/sil,sii9234.yaml b/Documentation/devicetree/bindings/display/bridge/sil,sii9234.yaml new file mode 100644 index 000000000000..f88ddfe4818b --- /dev/null +++ b/Documentation/devicetree/bindings/display/bridge/sil,sii9234.yaml @@ -0,0 +1,110 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/bridge/sil,sii9234.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Silicon Image SiI9234 HDMI/MHL bridge + +maintainers: + - Maciej Purski <m.purski@samsung.com> + +properties: + compatible: + const: sil,sii9234 + + reg: + description: I2C address for TPI interface + maxItems: 1 + + avcc12-supply: + description: TMDS Analog Supply Voltage, 1.2V + + avcc33-supply: + description: MHL/USB Switch Supply Voltage, 3.3V + + cvcc12-supply: + description: Digital Core Supply Voltage, 1.2V + + iovcc18-supply: + description: I/O voltage supply, 1.8V + + interrupts: + maxItems: 1 + + reset-gpios: + description: GPIO connected to the reset pin. + maxItems: 1 + + ports: + $ref: /schemas/graph.yaml#/properties/ports + + properties: + port@0: + $ref: /schemas/graph.yaml#/properties/port + description: + Video port for HDMI (encoder) input + + port@1: + $ref: /schemas/graph.yaml#/properties/port + description: + MHL to connector port + + required: + - port@0 + +required: + - compatible + - reg + - avcc12-supply + - avcc33-supply + - cvcc12-supply + - iovcc18-supply + - interrupts + - reset-gpios + - ports + +additionalProperties: false + +examples: + - | + #include <dt-bindings/gpio/gpio.h> + #include <dt-bindings/interrupt-controller/irq.h> + + i2c1 { + #address-cells = <1>; + #size-cells = <0>; + + bridge@39 { + compatible = "sil,sii9234"; + reg = <0x39>; + avcc12-supply = <&vsil12>; + avcc33-supply = <&vcc33mhl>; + cvcc12-supply = <&vsil12>; + iovcc18-supply = <&vcc18mhl>; + interrupt-parent = <&gpf3>; + interrupts = <5 IRQ_TYPE_LEVEL_HIGH>; + reset-gpios = <&gpf3 4 GPIO_ACTIVE_LOW>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + mhl_to_hdmi: endpoint { + remote-endpoint = <&hdmi_to_mhl>; + }; + }; + + port@1 { + reg = <1>; + mhl_to_connector: endpoint { + remote-endpoint = <&connector_to_mhl>; + }; + }; + }; + }; + }; + +... diff --git a/Documentation/devicetree/bindings/display/panel/boe,bf060y8m-aj0.yaml b/Documentation/devicetree/bindings/display/panel/boe,bf060y8m-aj0.yaml new file mode 100644 index 000000000000..a8f3afa922c8 --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/boe,bf060y8m-aj0.yaml @@ -0,0 +1,81 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/panel/boe,bf060y8m-aj0.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: BOE BF060Y8M-AJ0 5.99" 1080x2160 AMOLED Panel + +maintainers: + - AngeloGioacchino Del Regno <angelogioacchino.delregno@somainline.org> + +description: | + This is a 5.99" 1080x2160 16.7M Color active matrix AMOLED + video mode panel module on MIPI-DSI 4-Lane interface, GGRB + pixel arrangement, 63 micrometers pitch, with an active + area of 68.04 x 136.08 millimeters. + Each pixel is divided into red and green dots, or blue and + green dots, and two pixels share red or blue dots which are + arranged in vertical stripe. + The DriverIC for this panel module is SW43404. + +allOf: + - $ref: panel-common.yaml# + +properties: + compatible: + const: boe,bf060y8m-aj0 + + elvdd-supply: + description: EL Driving positive (VDD) supply (4.40-4.80V) + elvss-supply: + description: EL Driving negative (VSS) supply (-5.00V to -1.40V) + vcc-supply: + description: Core (TSP) voltage supply (2.70-3.60V) + vci-supply: + description: DriverIC Operation supply (2.60-3.60V) + vddio-supply: + description: I/O voltage supply (1.62-1.98V) + + port: true + reg: true + reset-gpios: true + +required: + - compatible + - elvdd-supply + - elvss-supply + - vcc-supply + - vci-supply + - vddio-supply + - reg + - reset-gpios + +additionalProperties: false + +examples: + - | + #include <dt-bindings/gpio/gpio.h> + + dsi { + #address-cells = <1>; + #size-cells = <0>; + panel@0 { + compatible = "boe,bf060y8m-aj0"; + reg = <0>; + + reset-gpios = <&tlmm 94 GPIO_ACTIVE_HIGH>; + + vcc-supply = <&disp_vcc_vreg>; + vddio-supply = <&disp_vddio_vreg>; + vci-supply = <&disp_vci_vreg>; + elvdd-supply = <&disp_elvdd_vreg>; + elvss-supply = <&disp_elvss_vreg>; + + port { + panel_in: endpoint { + remote-endpoint = <&dsi0_out>; + }; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/display/panel/ilitek,ili9163.yaml b/Documentation/devicetree/bindings/display/panel/ilitek,ili9163.yaml new file mode 100644 index 000000000000..7e7a8362b951 --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/ilitek,ili9163.yaml @@ -0,0 +1,69 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/panel/ilitek,ili9163.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Ilitek ILI9163 display panels device tree bindings + +maintainers: + - Daniel Mack <daniel@zonque.org> + +description: + This binding is for display panels using an Ilitek ILI9163 controller in SPI + mode. + +allOf: + - $ref: panel-common.yaml# + +properties: + compatible: + items: + - enum: + - newhaven,1.8-128160EF + - const: ilitek,ili9163 + + spi-max-frequency: + maximum: 32000000 + + dc-gpios: + maxItems: 1 + description: Display data/command selection (D/CX) + + backlight: true + reg: true + reset-gpios: true + rotation: true + +required: + - compatible + - reg + - dc-gpios + - reset-gpios + +additionalProperties: false + +examples: + - | + #include <dt-bindings/gpio/gpio.h> + + backlight: backlight { + compatible = "gpio-backlight"; + gpios = <&gpio 22 GPIO_ACTIVE_HIGH>; + }; + spi { + #address-cells = <1>; + #size-cells = <0>; + + display@0 { + compatible = "newhaven,1.8-128160EF", "ilitek,ili9163"; + reg = <0>; + spi-max-frequency = <32000000>; + dc-gpios = <&gpio0 24 GPIO_ACTIVE_HIGH>; + reset-gpios = <&gpio0 25 GPIO_ACTIVE_HIGH>; + rotation = <180>; + backlight = <&backlight>; + }; + }; + +... diff --git a/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml b/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml index b2fcec4f22fd..c5d1df680858 100644 --- a/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml +++ b/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml @@ -9,24 +9,28 @@ title: Ilitek ILI9881c based MIPI-DSI panels maintainers: - Maxime Ripard <mripard@kernel.org> +allOf: + - $ref: panel-common.yaml# + properties: compatible: items: - enum: - bananapi,lhr050h41 - feixin,k101-im2byl02 + - wanchanglong,w552946aba - const: ilitek,ili9881c backlight: true power-supply: true reg: true reset-gpios: true + rotation: true required: - compatible - power-supply - reg - - reset-gpios additionalProperties: false diff --git a/Documentation/devicetree/bindings/display/panel/novatek,nt35950.yaml b/Documentation/devicetree/bindings/display/panel/novatek,nt35950.yaml new file mode 100644 index 000000000000..377a05d48a02 --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/novatek,nt35950.yaml @@ -0,0 +1,106 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/panel/novatek,nt35950.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Novatek NT35950-based display panels + +maintainers: + - AngeloGioacchino Del Regno <angelogioacchino.delregno@somainline.org> + +description: | + The nt35950 IC from Novatek is a Driver IC used to drive MIPI-DSI panels, + with Static RAM for content retention in command mode and also supports + video mode with VESA Frame Buffer Compression or Display Stream Compression + on single, or dual dsi port(s). + This DDIC is also capable of upscaling an input image to the panel's native + resolution, for example it can upscale a 1920x1080 input to 3840x2160 with + either bilinear interpolation or pixel duplication. + +allOf: + - $ref: panel-common.yaml# + +properties: + compatible: + items: + - enum: + - sharp,ls055d1sx04 + - const: novatek,nt35950 + description: This indicates the panel manufacturer of the panel + that is in turn using the NT35950 panel driver. The compatible + string determines how the NT35950 panel driver shall be configured + to work with the indicated panel. The novatek,nt35950 compatible shall + always be provided as a fallback. + + reset-gpios: + maxItems: 1 + description: phandle of gpio for reset line - This should be 8mA, gpio + can be configured using mux, pinctrl, pinctrl-names (active high) + + avdd-supply: + description: positive boost supply regulator + avee-supply: + description: negative boost supply regulator + dvdd-supply: + description: regulator that supplies the digital voltage + vddio-supply: + description: regulator that supplies the I/O voltage + + backlight: true + ports: true + reg: true + +required: + - compatible + - reg + - reset-gpios + - avdd-supply + - avee-supply + - dvdd-supply + - vddio-supply + +additionalProperties: false + +examples: + - | + #include <dt-bindings/gpio/gpio.h> + + dsi0 { + #address-cells = <1>; + #size-cells = <0>; + + panel@0 { + compatible = "sharp,ls055d1sx04", "novatek,nt35950"; + reg = <0>; + + backlight = <&pmi8998_wled>; + reset-gpios = <&tlmm 94 GPIO_ACTIVE_HIGH>; + + avdd-supply = <&lab>; + avee-supply = <&ibb>; + dvdd-supply = <&disp_dvdd_vreg>; + vddio-supply = <&vreg_l14a_1p85>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + panel_in0: endpoint { + remote-endpoint = <&dsi0_out>; + }; + }; + + port@1 { + reg = <1>; + panel_in1: endpoint { + remote-endpoint = <&dsi1_out>; + }; + }; + }; + }; + }; + +... diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml index fbd71669248f..2c00813f5d20 100644 --- a/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml +++ b/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml @@ -35,6 +35,8 @@ properties: - boe,tv080wum-nl0 # Innolux P079ZCA 7.85" 768x1024 TFT LCD panel - innolux,p079zca + # JDI FHD_R63452 1080x1920 5.2" IPS LCD Panel + - jdi,fhd-r63452 # Khadas TS050 5" 1080x1920 LCD panel - khadas,ts050 # Kingdisplay KD097D04 9.7" 1536x2048 TFT LCD panel diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml index f3c9395d23b6..62f5f050c1bc 100644 --- a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml +++ b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml @@ -290,6 +290,8 @@ properties: - starry,kr070pe2t # Starry 12.2" (1920x1200 pixels) TFT LCD panel - starry,kr122ea0sra + # Team Source Display Technology TST043015CMHX 4.3" WQVGA TFT LCD panel + - team-source-display,tst043015cmhx # Tianma Micro-electronics TM070JDHG30 7.0" WXGA TFT LCD panel - tianma,tm070jdhg30 # Tianma Micro-electronics TM070JVHG33 7.0" WXGA TFT LCD panel diff --git a/Documentation/devicetree/bindings/display/panel/sony,tulip-truly-nt35521.yaml b/Documentation/devicetree/bindings/display/panel/sony,tulip-truly-nt35521.yaml new file mode 100644 index 000000000000..967972939598 --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/sony,tulip-truly-nt35521.yaml @@ -0,0 +1,72 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/panel/sony,tulip-truly-nt35521.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Sony Tulip Truly NT35521 5.24" 1280x720 MIPI-DSI Panel + +maintainers: + - Shawn Guo <shawn.guo@linaro.org> + +description: | + The Sony Tulip Truly NT35521 is a 5.24" 1280x720 MIPI-DSI panel, which + can be found no Sony Xperia M4 phone. The panel backlight is managed + through DSI link. + +allOf: + - $ref: panel-common.yaml# + +properties: + compatible: + const: sony,tulip-truly-nt35521 + + reg: true + + positive5-supply: + description: Positive 5V supply + + negative5-supply: + description: Negative 5V supply + + reset-gpios: true + + enable-gpios: true + + port: true + +required: + - compatible + - reg + - positive5-supply + - negative5-supply + - reset-gpios + - enable-gpios + - port + +additionalProperties: false + +examples: + - | + #include <dt-bindings/gpio/gpio.h> + + dsi { + #address-cells = <1>; + #size-cells = <0>; + + panel@0 { + compatible = "sony,tulip-truly-nt35521"; + reg = <0>; + positive5-supply = <&positive5_reg>; + negative5-supply = <&negative5_reg>; + reset-gpios = <&msmgpio 25 GPIO_ACTIVE_LOW>; + enable-gpios = <&msmgpio 10 GPIO_ACTIVE_HIGH>; + + port { + panel_in: endpoint { + remote-endpoint = <&dsi0_out>; + }; + }; + }; + }; +... diff --git a/Documentation/devicetree/bindings/display/sprd/sprd,display-subsystem.yaml b/Documentation/devicetree/bindings/display/sprd/sprd,display-subsystem.yaml new file mode 100644 index 000000000000..3d107e9434be --- /dev/null +++ b/Documentation/devicetree/bindings/display/sprd/sprd,display-subsystem.yaml @@ -0,0 +1,64 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/sprd/sprd,display-subsystem.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Unisoc DRM master device + +maintainers: + - Kevin Tang <kevin.tang@unisoc.com> + +description: | + The Unisoc DRM master device is a virtual device needed to list all + DPU devices or other display interface nodes that comprise the + graphics subsystem. + + Unisoc's display pipeline have several components as below description, + multi display controllers and corresponding physical interfaces. + For different display scenarios, dpu0 and dpu1 maybe binding to different + encoder. + + E.g: + dpu0 and dpu1 both binding to DSI for dual mipi-dsi display; + dpu0 binding to DSI for primary display, and dpu1 binding to DP for external display; + + +-----------------------------------------+ + | | + | +---------+ | + +----+ | +----+ +---------+ |DPHY/CPHY| | +------+ + | +----->+dpu0+--->+MIPI|DSI +--->+Combo +----->+Panel0| + |AXI | | +----+ +---------+ +---------+ | +------+ + | | | ^ | + | | | | | + | | | +-----------+ | + | | | | | + |APB | | +--+-+ +-----------+ +---+ | +------+ + | +----->+dpu1+--->+DisplayPort+--->+PHY+--------->+Panel1| + | | | +----+ +-----------+ +---+ | +------+ + +----+ | | + +-----------------------------------------+ + +properties: + compatible: + const: sprd,display-subsystem + + ports: + $ref: /schemas/types.yaml#/definitions/phandle-array + description: + Should contain a list of phandles pointing to display interface port + of DPU devices. + +required: + - compatible + - ports + +additionalProperties: false + +examples: + - | + display-subsystem { + compatible = "sprd,display-subsystem"; + ports = <&dpu_out>; + }; + diff --git a/Documentation/devicetree/bindings/display/sprd/sprd,sharkl3-dpu.yaml b/Documentation/devicetree/bindings/display/sprd/sprd,sharkl3-dpu.yaml new file mode 100644 index 000000000000..4ebea60b8c5b --- /dev/null +++ b/Documentation/devicetree/bindings/display/sprd/sprd,sharkl3-dpu.yaml @@ -0,0 +1,77 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/sprd/sprd,sharkl3-dpu.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Unisoc Sharkl3 Display Processor Unit (DPU) + +maintainers: + - Kevin Tang <kevin.tang@unisoc.com> + +description: | + DPU (Display Processor Unit) is the Display Controller for the Unisoc SoCs + which transfers the image data from a video memory buffer to an internal + LCD interface. + +properties: + compatible: + const: sprd,sharkl3-dpu + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + clocks: + minItems: 2 + + clock-names: + items: + - const: clk_src_128m + - const: clk_src_384m + + power-domains: + maxItems: 1 + + iommus: + maxItems: 1 + + port: + type: object + description: + A port node with endpoint definitions as defined in + Documentation/devicetree/bindings/media/video-interfaces.txt. + That port should be the output endpoint, usually output to + the associated DSI. + +required: + - compatible + - reg + - interrupts + - clocks + - clock-names + - port + +additionalProperties: false + +examples: + - | + #include <dt-bindings/interrupt-controller/arm-gic.h> + #include <dt-bindings/clock/sprd,sc9860-clk.h> + dpu: dpu@63000000 { + compatible = "sprd,sharkl3-dpu"; + reg = <0x63000000 0x1000>; + interrupts = <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>; + clock-names = "clk_src_128m", "clk_src_384m"; + + clocks = <&pll CLK_TWPLL_128M>, + <&pll CLK_TWPLL_384M>; + + dpu_port: port { + dpu_out: endpoint { + remote-endpoint = <&dsi_in>; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/display/sprd/sprd,sharkl3-dsi-host.yaml b/Documentation/devicetree/bindings/display/sprd/sprd,sharkl3-dsi-host.yaml new file mode 100644 index 000000000000..bc5594d18643 --- /dev/null +++ b/Documentation/devicetree/bindings/display/sprd/sprd,sharkl3-dsi-host.yaml @@ -0,0 +1,88 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/sprd/sprd,sharkl3-dsi-host.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Unisoc MIPI DSI Controller + +maintainers: + - Kevin Tang <kevin.tang@unisoc.com> + +properties: + compatible: + const: sprd,sharkl3-dsi-host + + reg: + maxItems: 1 + + interrupts: + maxItems: 2 + + clocks: + minItems: 1 + + clock-names: + items: + - const: clk_src_96m + + power-domains: + maxItems: 1 + + ports: + type: object + + properties: + "#address-cells": + const: 1 + + "#size-cells": + const: 0 + + port@0: + type: object + description: + A port node with endpoint definitions as defined in + Documentation/devicetree/bindings/media/video-interfaces.txt. + That port should be the input endpoint, usually coming from + the associated DPU. + + required: + - "#address-cells" + - "#size-cells" + - port@0 + + additionalProperties: false + +required: + - compatible + - reg + - interrupts + - clocks + - clock-names + - ports + +additionalProperties: false + +examples: + - | + #include <dt-bindings/interrupt-controller/arm-gic.h> + #include <dt-bindings/clock/sprd,sc9860-clk.h> + dsi: dsi@63100000 { + compatible = "sprd,sharkl3-dsi-host"; + reg = <0x63100000 0x1000>; + interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>; + clock-names = "clk_src_96m"; + clocks = <&pll CLK_TWPLL_96M>; + ports { + #address-cells = <1>; + #size-cells = <0>; + port@0 { + reg = <0>; + dsi_in: endpoint { + remote-endpoint = <&dpu_out>; + }; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.yaml b/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.yaml index 29b9447f3b84..fe0c89edf7c1 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.yaml +++ b/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.yaml @@ -17,9 +17,10 @@ properties: oneOf: - enum: - fsl,imx7ulp-lpi2c - - fsl,imx8qm-lpi2c - items: - - const: fsl,imx8qxp-lpi2c + - enum: + - fsl,imx8qxp-lpi2c + - fsl,imx8qm-lpi2c - const: fsl,imx7ulp-lpi2c reg: diff --git a/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.yaml b/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.yaml index c65921e66dc1..81c87295912c 100644 --- a/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.yaml +++ b/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.yaml @@ -136,7 +136,7 @@ examples: samsung,syscon-phandle = <&pmu_system_controller>; /* NTC thermistor is a hwmon device */ - ncp15wb473 { + thermistor { compatible = "murata,ncp15wb473"; pullup-uv = <1800000>; pullup-ohm = <47000>; diff --git a/Documentation/devicetree/bindings/input/gpio-keys.yaml b/Documentation/devicetree/bindings/input/gpio-keys.yaml index 060a309ff8e7..dbe7ecc19ccb 100644 --- a/Documentation/devicetree/bindings/input/gpio-keys.yaml +++ b/Documentation/devicetree/bindings/input/gpio-keys.yaml @@ -142,7 +142,7 @@ examples: down { label = "GPIO Key DOWN"; linux,code = <108>; - interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>; + interrupts = <1 IRQ_TYPE_EDGE_FALLING>; }; }; diff --git a/Documentation/devicetree/bindings/media/nxp,imx7-mipi-csi2.yaml b/Documentation/devicetree/bindings/media/nxp,imx7-mipi-csi2.yaml index 877183cf4278..1ef849dc74d7 100644 --- a/Documentation/devicetree/bindings/media/nxp,imx7-mipi-csi2.yaml +++ b/Documentation/devicetree/bindings/media/nxp,imx7-mipi-csi2.yaml @@ -79,6 +79,8 @@ properties: properties: data-lanes: + description: + Note that 'fsl,imx7-mipi-csi2' only supports up to 2 data lines. items: minItems: 1 maxItems: 4 @@ -91,18 +93,6 @@ properties: required: - data-lanes - allOf: - - if: - properties: - compatible: - contains: - const: fsl,imx7-mipi-csi2 - then: - properties: - data-lanes: - items: - maxItems: 2 - port@1: $ref: /schemas/graph.yaml#/properties/port description: diff --git a/Documentation/devicetree/bindings/net/ethernet-phy.yaml b/Documentation/devicetree/bindings/net/ethernet-phy.yaml index 2766fe45bb98..ee42328a109d 100644 --- a/Documentation/devicetree/bindings/net/ethernet-phy.yaml +++ b/Documentation/devicetree/bindings/net/ethernet-phy.yaml @@ -91,6 +91,14 @@ properties: compensate for the board being designed with the lanes swapped. + enet-phy-lane-no-swap: + $ref: /schemas/types.yaml#/definitions/flag + description: + If set, indicates that PHY will disable swap of the + TX/RX lanes. This property allows the PHY to work correcly after + e.g. wrong bootstrap configuration caused by issues in PCB + layout design. + eee-broken-100tx: $ref: /schemas/types.yaml#/definitions/flag description: diff --git a/Documentation/devicetree/bindings/phy/xlnx,zynqmp-psgtr.yaml b/Documentation/devicetree/bindings/phy/xlnx,zynqmp-psgtr.yaml index 04d5654efb38..79906519c652 100644 --- a/Documentation/devicetree/bindings/phy/xlnx,zynqmp-psgtr.yaml +++ b/Documentation/devicetree/bindings/phy/xlnx,zynqmp-psgtr.yaml @@ -29,7 +29,7 @@ properties: - PHY_TYPE_PCIE - PHY_TYPE_SATA - PHY_TYPE_SGMII - - PHY_TYPE_USB + - PHY_TYPE_USB3 - description: The PHY instance minimum: 0 maximum: 1 # for DP, SATA or USB diff --git a/Documentation/devicetree/bindings/power/supply/bq25980.yaml b/Documentation/devicetree/bindings/power/supply/bq25980.yaml index 06eca6667f67..8367a1fd4057 100644 --- a/Documentation/devicetree/bindings/power/supply/bq25980.yaml +++ b/Documentation/devicetree/bindings/power/supply/bq25980.yaml @@ -105,7 +105,7 @@ examples: reg = <0x65>; interrupt-parent = <&gpio1>; interrupts = <16 IRQ_TYPE_EDGE_FALLING>; - ti,watchdog-timer = <0>; + ti,watchdog-timeout-ms = <0>; ti,sc-ocp-limit-microamp = <2000000>; ti,sc-ovp-limit-microvolt = <17800000>; monitored-battery = <&bat>; diff --git a/Documentation/devicetree/bindings/sound/wlf,wm8962.yaml b/Documentation/devicetree/bindings/sound/wlf,wm8962.yaml index 0e6249d7c133..5e172e9462b9 100644 --- a/Documentation/devicetree/bindings/sound/wlf,wm8962.yaml +++ b/Documentation/devicetree/bindings/sound/wlf,wm8962.yaml @@ -19,6 +19,9 @@ properties: clocks: maxItems: 1 + interrupts: + maxItems: 1 + "#sound-dai-cells": const: 0 diff --git a/Documentation/devicetree/bindings/spi/spi-rockchip.yaml b/Documentation/devicetree/bindings/spi/spi-rockchip.yaml index 7f987e79337c..52a78a2e362e 100644 --- a/Documentation/devicetree/bindings/spi/spi-rockchip.yaml +++ b/Documentation/devicetree/bindings/spi/spi-rockchip.yaml @@ -33,6 +33,7 @@ properties: - rockchip,rk3328-spi - rockchip,rk3368-spi - rockchip,rk3399-spi + - rockchip,rk3568-spi - rockchip,rv1126-spi - const: rockchip,rk3066-spi diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml index 66d6432fd781..03777900be8b 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.yaml +++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml @@ -1236,6 +1236,8 @@ patternProperties: description: Truly Semiconductors Limited "^visionox,.*": description: Visionox + "^team-source-display,.*": + description: Shenzhen Team Source Display Technology Co., Ltd. (TSD) "^tsd,.*": description: Theobroma Systems Design und Consulting GmbH "^tyan,.*": @@ -1328,6 +1330,8 @@ patternProperties: description: Wondermedia Technologies, Inc. "^wobo,.*": description: Wobo + "^wanchanglong,.*": + description: Wanchanglong Electronics Technology(SHENZHEN)Co.,Ltd. "^x-powers,.*": description: X-Powers "^xes,.*": diff --git a/Documentation/doc-guide/sphinx.rst b/Documentation/doc-guide/sphinx.rst index ec3e71f56009..e445cb146efe 100644 --- a/Documentation/doc-guide/sphinx.rst +++ b/Documentation/doc-guide/sphinx.rst @@ -27,7 +27,7 @@ Sphinx Install ============== The ReST markups currently used by the Documentation/ files are meant to be -built with ``Sphinx`` version 1.3 or higher. +built with ``Sphinx`` version 1.7 or higher. There's a script that checks for the Sphinx requirements. Please see :ref:`sphinx-pre-install` for further details. @@ -43,10 +43,6 @@ or ``virtualenv``, depending on how your distribution packaged Python 3. .. note:: - #) Sphinx versions below 1.5 don't work properly with Python's - docutils version 0.13.1 or higher. So, if you're willing to use - those versions, you should run ``pip install 'docutils==0.12'``. - #) It is recommended to use the RTD theme for html output. Depending on the Sphinx version, it should be installed separately, with ``pip install sphinx_rtd_theme``. @@ -55,13 +51,13 @@ or ``virtualenv``, depending on how your distribution packaged Python 3. those expressions are written using LaTeX notation. It needs texlive installed with amsfonts and amsmath in order to evaluate them. -In summary, if you want to install Sphinx version 1.7.9, you should do:: +In summary, if you want to install Sphinx version 2.4.4, you should do:: - $ virtualenv sphinx_1.7.9 - $ . sphinx_1.7.9/bin/activate - (sphinx_1.7.9) $ pip install -r Documentation/sphinx/requirements.txt + $ virtualenv sphinx_2.4.4 + $ . sphinx_2.4.4/bin/activate + (sphinx_2.4.4) $ pip install -r Documentation/sphinx/requirements.txt -After running ``. sphinx_1.7.9/bin/activate``, the prompt will change, +After running ``. sphinx_2.4.4/bin/activate``, the prompt will change, in order to indicate that you're using the new environment. If you open a new shell, you need to rerun this command to enter again at the virtual environment before building the documentation. @@ -81,7 +77,7 @@ output. PDF and LaTeX builds -------------------- -Such builds are currently supported only with Sphinx versions 1.4 and higher. +Such builds are currently supported only with Sphinx versions 2.4 and higher. For PDF and LaTeX output, you'll also need ``XeLaTeX`` version 3.14159265. @@ -104,8 +100,8 @@ command line options for your distro:: You should run: sudo dnf install -y texlive-luatex85 - /usr/bin/virtualenv sphinx_1.7.9 - . sphinx_1.7.9/bin/activate + /usr/bin/virtualenv sphinx_2.4.4 + . sphinx_2.4.4/bin/activate pip install -r Documentation/sphinx/requirements.txt Can't build as 1 mandatory dependency is missing at ./scripts/sphinx-pre-install line 468. diff --git a/Documentation/filesystems/autofs.rst b/Documentation/filesystems/autofs.rst index 681c6a492bc0..4f490278d22f 100644 --- a/Documentation/filesystems/autofs.rst +++ b/Documentation/filesystems/autofs.rst @@ -35,7 +35,7 @@ This document describes only the kernel module and the interactions required with any user-space program. Subsequent text refers to this as the "automount daemon" or simply "the daemon". -"autofs" is a Linux kernel module with provides the "autofs" +"autofs" is a Linux kernel module which provides the "autofs" filesystem type. Several "autofs" filesystems can be mounted and they can each be managed separately, or all managed by the same daemon. diff --git a/Documentation/filesystems/cifs/ksmbd.rst b/Documentation/filesystems/cifs/ksmbd.rst index a1326157d53f..b0d354fd8066 100644 --- a/Documentation/filesystems/cifs/ksmbd.rst +++ b/Documentation/filesystems/cifs/ksmbd.rst @@ -50,11 +50,11 @@ ksmbd.mountd (user space daemon) -------------------------------- ksmbd.mountd is userspace process to, transfer user account and password that -are registered using ksmbd.adduser(part of utils for user space). Further it +are registered using ksmbd.adduser (part of utils for user space). Further it allows sharing information parameters that parsed from smb.conf to ksmbd in kernel. For the execution part it has a daemon which is continuously running and connected to the kernel interface using netlink socket, it waits for the -requests(dcerpc and share/user info). It handles RPC calls (at a minimum few +requests (dcerpc and share/user info). It handles RPC calls (at a minimum few dozen) that are most important for file server from NetShareEnum and NetServerGetInfo. Complete DCE/RPC response is prepared from the user space and passed over to the associated kernel thread for the client. @@ -154,11 +154,11 @@ Each layer 1. Enable all component prints # sudo ksmbd.control -d "all" -2. Enable one of components(smb, auth, vfs, oplock, ipc, conn, rdma) +2. Enable one of components (smb, auth, vfs, oplock, ipc, conn, rdma) # sudo ksmbd.control -d "smb" -3. Show what prints are enable. - # cat/sys/class/ksmbd-control/debug +3. Show what prints are enabled. + # cat /sys/class/ksmbd-control/debug [smb] auth vfs oplock ipc conn [rdma] 4. Disable prints: diff --git a/Documentation/filesystems/netfs_library.rst b/Documentation/filesystems/netfs_library.rst index bb68d39f03b7..375baca7edcd 100644 --- a/Documentation/filesystems/netfs_library.rst +++ b/Documentation/filesystems/netfs_library.rst @@ -1,7 +1,7 @@ .. SPDX-License-Identifier: GPL-2.0 ================================= -NETWORK FILESYSTEM HELPER LIBRARY +Network Filesystem Helper Library ================================= .. Contents: @@ -37,22 +37,22 @@ into a common call framework. The following services are provided: - * Handles transparent huge pages (THPs). + * Handle folios that span multiple pages. - * Insulates the netfs from VM interface changes. + * Insulate the netfs from VM interface changes. - * Allows the netfs to arbitrarily split reads up into pieces, even ones that - don't match page sizes or page alignments and that may cross pages. + * Allow the netfs to arbitrarily split reads up into pieces, even ones that + don't match folio sizes or folio alignments and that may cross folios. - * Allows the netfs to expand a readahead request in both directions to meet - its needs. + * Allow the netfs to expand a readahead request in both directions to meet its + needs. - * Allows the netfs to partially fulfil a read, which will then be resubmitted. + * Allow the netfs to partially fulfil a read, which will then be resubmitted. - * Handles local caching, allowing cached data and server-read data to be + * Handle local caching, allowing cached data and server-read data to be interleaved for a single request. - * Handles clearing of bufferage that aren't on the server. + * Handle clearing of bufferage that aren't on the server. * Handle retrying of reads that failed, switching reads from the cache to the server as necessary. @@ -70,22 +70,22 @@ Read Helper Functions Three read helpers are provided:: - * void netfs_readahead(struct readahead_control *ractl, - const struct netfs_read_request_ops *ops, - void *netfs_priv);`` - * int netfs_readpage(struct file *file, - struct page *page, - const struct netfs_read_request_ops *ops, - void *netfs_priv); - * int netfs_write_begin(struct file *file, - struct address_space *mapping, - loff_t pos, - unsigned int len, - unsigned int flags, - struct page **_page, - void **_fsdata, - const struct netfs_read_request_ops *ops, - void *netfs_priv); + void netfs_readahead(struct readahead_control *ractl, + const struct netfs_read_request_ops *ops, + void *netfs_priv); + int netfs_readpage(struct file *file, + struct folio *folio, + const struct netfs_read_request_ops *ops, + void *netfs_priv); + int netfs_write_begin(struct file *file, + struct address_space *mapping, + loff_t pos, + unsigned int len, + unsigned int flags, + struct folio **_folio, + void **_fsdata, + const struct netfs_read_request_ops *ops, + void *netfs_priv); Each corresponds to a VM operation, with the addition of a couple of parameters for the use of the read helpers: @@ -103,8 +103,8 @@ Both of these values will be stored into the read request structure. For ->readahead() and ->readpage(), the network filesystem should just jump into the corresponding read helper; whereas for ->write_begin(), it may be a little more complicated as the network filesystem might want to flush -conflicting writes or track dirty data and needs to put the acquired page if an -error occurs after calling the helper. +conflicting writes or track dirty data and needs to put the acquired folio if +an error occurs after calling the helper. The helpers manage the read request, calling back into the network filesystem through the suppplied table of operations. Waits will be performed as @@ -253,7 +253,7 @@ through which it can issue requests and negotiate:: void (*issue_op)(struct netfs_read_subrequest *subreq); bool (*is_still_valid)(struct netfs_read_request *rreq); int (*check_write_begin)(struct file *file, loff_t pos, unsigned len, - struct page *page, void **_fsdata); + struct folio *folio, void **_fsdata); void (*done)(struct netfs_read_request *rreq); void (*cleanup)(struct address_space *mapping, void *netfs_priv); }; @@ -313,13 +313,14 @@ The operations are as follows: There is no return value; the netfs_subreq_terminated() function should be called to indicate whether or not the operation succeeded and how much data - it transferred. The filesystem also should not deal with setting pages + it transferred. The filesystem also should not deal with setting folios uptodate, unlocking them or dropping their refs - the helpers need to deal with this as they have to coordinate with copying to the local cache. - Note that the helpers have the pages locked, but not pinned. It is possible - to use the ITER_XARRAY iov iterator to refer to the range of the inode that - is being operated upon without the need to allocate large bvec tables. + Note that the helpers have the folios locked, but not pinned. It is + possible to use the ITER_XARRAY iov iterator to refer to the range of the + inode that is being operated upon without the need to allocate large bvec + tables. * ``is_still_valid()`` @@ -330,15 +331,15 @@ The operations are as follows: * ``check_write_begin()`` [Optional] This is called from the netfs_write_begin() helper once it has - allocated/grabbed the page to be modified to allow the filesystem to flush + allocated/grabbed the folio to be modified to allow the filesystem to flush conflicting state before allowing it to be modified. - It should return 0 if everything is now fine, -EAGAIN if the page should be + It should return 0 if everything is now fine, -EAGAIN if the folio should be regrabbed and any other error code to abort the operation. * ``done`` - [Optional] This is called after the pages in the request have all been + [Optional] This is called after the folios in the request have all been unlocked (and marked uptodate if applicable). * ``cleanup`` @@ -390,7 +391,7 @@ The read helpers work by the following general procedure: * If NETFS_SREQ_CLEAR_TAIL was set, a short read will be cleared to the end of the slice instead of reissuing. - * Once the data is read, the pages that have been fully read/cleared: + * Once the data is read, the folios that have been fully read/cleared: * Will be marked uptodate. @@ -398,11 +399,11 @@ The read helpers work by the following general procedure: * Unlocked - * Any pages that need writing to the cache will then have DIO writes issued. + * Any folios that need writing to the cache will then have DIO writes issued. * Synchronous operations will wait for reading to be complete. - * Writes to the cache will proceed asynchronously and the pages will have the + * Writes to the cache will proceed asynchronously and the folios will have the PG_fscache mark removed when that completes. * The request structures will be cleaned up when everything has completed. @@ -452,6 +453,9 @@ operation table looks like the following:: netfs_io_terminated_t term_func, void *term_func_priv); + int (*prepare_write)(struct netfs_cache_resources *cres, + loff_t *_start, size_t *_len, loff_t i_size); + int (*write)(struct netfs_cache_resources *cres, loff_t start_pos, struct iov_iter *iter, @@ -509,6 +513,14 @@ The methods defined in the table are: indicating whether the termination is definitely happening in the caller's context. + * ``prepare_write()`` + + [Required] Called to adjust a write to the cache and check that there is + sufficient space in the cache. The start and length values indicate the + size of the write that netfslib is proposing, and this can be adjusted by + the cache to respect DIO boundaries. The file size is passed for + information. + * ``write()`` [Required] Called to write to the cache. The start file offset is given @@ -525,4 +537,9 @@ not the read request structure as they could be used in other situations where there isn't a read request structure as well, such as writing dirty data to the cache. + +API Function Reference +====================== + .. kernel-doc:: include/linux/netfs.h +.. kernel-doc:: fs/netfs/read_helper.c diff --git a/Documentation/gpu/amdgpu-dc.rst b/Documentation/gpu/amdgpu-dc.rst deleted file mode 100644 index f7ff7e1309de..000000000000 --- a/Documentation/gpu/amdgpu-dc.rst +++ /dev/null @@ -1,74 +0,0 @@ -=================================== -drm/amd/display - Display Core (DC) -=================================== - -*placeholder - general description of supported platforms, what dc is, etc.* - -Because it is partially shared with other operating systems, the Display Core -Driver is divided in two pieces. - -1. **Display Core (DC)** contains the OS-agnostic components. Things like - hardware programming and resource management are handled here. -2. **Display Manager (DM)** contains the OS-dependent components. Hooks to the - amdgpu base driver and DRM are implemented here. - -It doesn't help that the entire package is frequently referred to as DC. But -with the context in mind, it should be clear. - -When CONFIG_DRM_AMD_DC is enabled, DC will be initialized by default for -supported ASICs. To force disable, set `amdgpu.dc=0` on kernel command line. -Likewise, to force enable on unsupported ASICs, set `amdgpu.dc=1`. - -To determine if DC is loaded, search dmesg for the following entry: - -``Display Core initialized with <version number here>`` - -AMDgpu Display Manager -====================== - -.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c - :doc: overview - -.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h - :internal: - -Lifecycle ---------- - -.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c - :doc: DM Lifecycle - -.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c - :functions: dm_hw_init dm_hw_fini - -Interrupts ----------- - -.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c - :doc: overview - -.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c - :internal: - -.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c - :functions: register_hpd_handlers dm_crtc_high_irq dm_pflip_high_irq - -Atomic Implementation ---------------------- - -.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c - :doc: atomic - -.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c - :functions: amdgpu_dm_atomic_check amdgpu_dm_atomic_commit_tail - -Display Core -============ - -**WIP** - -FreeSync Video --------------- - -.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c - :doc: FreeSync Video diff --git a/Documentation/gpu/amdgpu.rst b/Documentation/gpu/amdgpu.rst deleted file mode 100644 index 8ba72e898099..000000000000 --- a/Documentation/gpu/amdgpu.rst +++ /dev/null @@ -1,324 +0,0 @@ -========================= - drm/amdgpu AMDgpu driver -========================= - -The drm/amdgpu driver supports all AMD Radeon GPUs based on the Graphics Core -Next (GCN) architecture. - -Module Parameters -================= - -The amdgpu driver supports the following module parameters: - -.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c - -Core Driver Infrastructure -========================== - -This section covers core driver infrastructure. - -.. _amdgpu_memory_domains: - -Memory Domains --------------- - -.. kernel-doc:: include/uapi/drm/amdgpu_drm.h - :doc: memory domains - -Buffer Objects --------------- - -.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_object.c - :doc: amdgpu_object - -.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_object.c - :internal: - -PRIME Buffer Sharing --------------------- - -.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c - :doc: PRIME Buffer Sharing - -.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c - :internal: - -MMU Notifier ------------- - -.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c - :doc: MMU Notifier - -.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c - :internal: - -AMDGPU Virtual Memory ---------------------- - -.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c - :doc: GPUVM - -.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c - :internal: - -Interrupt Handling ------------------- - -.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c - :doc: Interrupt Handling - -.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c - :internal: - -IP Blocks ------------------- - -.. kernel-doc:: drivers/gpu/drm/amd/include/amd_shared.h - :doc: IP Blocks - -.. kernel-doc:: drivers/gpu/drm/amd/include/amd_shared.h - :identifiers: amd_ip_block_type amd_ip_funcs - -AMDGPU XGMI Support -=================== - -.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c - -AMDGPU RAS Support -================== - -The AMDGPU RAS interfaces are exposed via sysfs (for informational queries) and -debugfs (for error injection). - -RAS debugfs/sysfs Control and Error Injection Interfaces --------------------------------------------------------- - -.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c - :doc: AMDGPU RAS debugfs control interface - -RAS Reboot Behavior for Unrecoverable Errors --------------------------------------------------------- - -.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c - :doc: AMDGPU RAS Reboot Behavior for Unrecoverable Errors - -RAS Error Count sysfs Interface -------------------------------- - -.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c - :doc: AMDGPU RAS sysfs Error Count Interface - -RAS EEPROM debugfs Interface ----------------------------- - -.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c - :doc: AMDGPU RAS debugfs EEPROM table reset interface - -RAS VRAM Bad Pages sysfs Interface ----------------------------------- - -.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c - :doc: AMDGPU RAS sysfs gpu_vram_bad_pages Interface - -Sample Code ------------ -Sample code for testing error injection can be found here: -https://cgit.freedesktop.org/mesa/drm/tree/tests/amdgpu/ras_tests.c - -This is part of the libdrm amdgpu unit tests which cover several areas of the GPU. -There are four sets of tests: - -RAS Basic Test - -The test verifies the RAS feature enabled status and makes sure the necessary sysfs and debugfs files -are present. - -RAS Query Test - -This test checks the RAS availability and enablement status for each supported IP block as well as -the error counts. - -RAS Inject Test - -This test injects errors for each IP. - -RAS Disable Test - -This test tests disabling of RAS features for each IP block. - - -GPU Power/Thermal Controls and Monitoring -========================================= - -This section covers hwmon and power/thermal controls. - -HWMON Interfaces ----------------- - -.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c - :doc: hwmon - -GPU sysfs Power State Interfaces --------------------------------- - -GPU power controls are exposed via sysfs files. - -power_dpm_state -~~~~~~~~~~~~~~~ - -.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c - :doc: power_dpm_state - -power_dpm_force_performance_level -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c - :doc: power_dpm_force_performance_level - -pp_table -~~~~~~~~ - -.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c - :doc: pp_table - -pp_od_clk_voltage -~~~~~~~~~~~~~~~~~ - -.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c - :doc: pp_od_clk_voltage - -pp_dpm_* -~~~~~~~~ - -.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c - :doc: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie - -pp_power_profile_mode -~~~~~~~~~~~~~~~~~~~~~ - -.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c - :doc: pp_power_profile_mode - -\*_busy_percent -~~~~~~~~~~~~~~~ - -.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c - :doc: gpu_busy_percent - -.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c - :doc: mem_busy_percent - -gpu_metrics -~~~~~~~~~~~~~~~~~~~~~ - -.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c - :doc: gpu_metrics - -GPU Product Information -======================= - -Information about the GPU can be obtained on certain cards -via sysfs - -product_name ------------- - -.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_device.c - :doc: product_name - -product_number --------------- - -.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_device.c - :doc: product_name - -serial_number -------------- - -.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_device.c - :doc: serial_number - -unique_id ---------- - -.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c - :doc: unique_id - -GPU Memory Usage Information -============================ - -Various memory accounting can be accessed via sysfs - -mem_info_vram_total -------------------- - -.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c - :doc: mem_info_vram_total - -mem_info_vram_used ------------------- - -.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c - :doc: mem_info_vram_used - -mem_info_vis_vram_total ------------------------ - -.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c - :doc: mem_info_vis_vram_total - -mem_info_vis_vram_used ----------------------- - -.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c - :doc: mem_info_vis_vram_used - -mem_info_gtt_total ------------------- - -.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c - :doc: mem_info_gtt_total - -mem_info_gtt_used ------------------ - -.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c - :doc: mem_info_gtt_used - -PCIe Accounting Information -=========================== - -pcie_bw -------- - -.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c - :doc: pcie_bw - -pcie_replay_count ------------------ - -.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_device.c - :doc: pcie_replay_count - -GPU SmartShift Information -========================== - -GPU SmartShift information via sysfs - -smartshift_apu_power --------------------- - -.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c - :doc: smartshift_apu_power - -smartshift_dgpu_power ---------------------- - -.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c - :doc: smartshift_dgpu_power - -smartshift_bias ---------------- - -.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c - :doc: smartshift_bias diff --git a/Documentation/gpu/amdgpu/amdgpu-glossary.rst b/Documentation/gpu/amdgpu/amdgpu-glossary.rst new file mode 100644 index 000000000000..859dcec6c6f9 --- /dev/null +++ b/Documentation/gpu/amdgpu/amdgpu-glossary.rst @@ -0,0 +1,87 @@ +=============== +AMDGPU Glossary +=============== + +Here you can find some generic acronyms used in the amdgpu driver. Notice that +we have a dedicated glossary for Display Core at +'Documentation/gpu/amdgpu/display/dc-glossary.rst'. + +.. glossary:: + + CP + Command Processor + + CPLIB + Content Protection Library + + DFS + Digital Frequency Synthesizer + + ECP + Enhanced Content Protection + + EOP + End Of Pipe/Pipeline + + GC + Graphics and Compute + + GMC + Graphic Memory Controller + + IH + Interrupt Handler + + HQD + Hardware Queue Descriptor + + IB + Indirect Buffer + + IP + Intellectual Property blocks + + KCQ + Kernel Compute Queue + + KGQ + Kernel Graphics Queue + + KIQ + Kernel Interface Queue + + MEC + MicroEngine Compute + + MES + MicroEngine Scheduler + + MMHUB + Multi-Media HUB + + MQD + Memory Queue Descriptor + + PPLib + PowerPlay Library - PowerPlay is the power management component. + + PSP + Platform Security Processor + + RCL + RunList Controller + + SDMA + System DMA + + SMU + System Management Unit + + SS + Spread Spectrum + + VCE + Video Compression Engine + + VCN + Video Codec Next diff --git a/Documentation/gpu/amdgpu/display/config_example.svg b/Documentation/gpu/amdgpu/display/config_example.svg new file mode 100644 index 000000000000..cdac9858601c --- /dev/null +++ b/Documentation/gpu/amdgpu/display/config_example.svg @@ -0,0 +1,414 @@ +<?xml version="1.0" encoding="UTF-8" standalone="no"?> +<!-- Created with Inkscape (http://www.inkscape.org/) --> + +<svg + xmlns:dc="http://purl.org/dc/elements/1.1/" + xmlns:cc="http://creativecommons.org/ns#" + xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" + xmlns:svg="http://www.w3.org/2000/svg" + xmlns="http://www.w3.org/2000/svg" + xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd" + xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" + width="144.63406mm" + height="66.596054mm" + viewBox="0 0 144.15195 66.596054" + version="1.1" + id="svg8" + inkscape:version="0.92.5 (2060ec1f9f, 2020-04-08)" + sodipodi:docname="config_example.svg"> + <defs + id="defs2"> + <marker + inkscape:stockid="Arrow1Mend" + orient="auto" + refY="0" + refX="0" + id="Arrow1Mend" + style="overflow:visible" + inkscape:isstock="true"> + <path + id="path4547" + d="M 0,0 5,-5 -12.5,0 5,5 Z" + style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:1.00000003pt;stroke-opacity:1" + transform="matrix(-0.4,0,0,-0.4,-4,0)" + inkscape:connector-curvature="0" /> + </marker> + <marker + inkscape:stockid="Arrow1Mend" + orient="auto" + refY="0" + refX="0" + id="Arrow1Mend-3" + style="overflow:visible" + inkscape:isstock="true"> + <path + inkscape:connector-curvature="0" + id="path4547-6" + d="M 0,0 5,-5 -12.5,0 5,5 Z" + style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:1.00000003pt;stroke-opacity:1" + transform="matrix(-0.4,0,0,-0.4,-4,0)" /> + </marker> + <marker + inkscape:stockid="Arrow1Mend" + orient="auto" + refY="0" + refX="0" + id="Arrow1Mend-3-5" + style="overflow:visible" + inkscape:isstock="true"> + <path + inkscape:connector-curvature="0" + id="path4547-6-3" + d="M 0,0 5,-5 -12.5,0 5,5 Z" + style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:1.00000003pt;stroke-opacity:1" + transform="matrix(-0.4,0,0,-0.4,-4,0)" /> + </marker> + <marker + inkscape:stockid="Arrow1Mend" + orient="auto" + refY="0" + refX="0" + id="Arrow1Mend-3-5-0" + style="overflow:visible" + inkscape:isstock="true"> + <path + inkscape:connector-curvature="0" + id="path4547-6-3-6" + d="M 0,0 5,-5 -12.5,0 5,5 Z" + style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:1.00000003pt;stroke-opacity:1" + transform="matrix(-0.4,0,0,-0.4,-4,0)" /> + </marker> + <marker + inkscape:stockid="Arrow1Mend" + orient="auto" + refY="0" + refX="0" + id="Arrow1Mend-3-5-7" + style="overflow:visible" + inkscape:isstock="true"> + <path + inkscape:connector-curvature="0" + id="path4547-6-3-3" + d="M 0,0 5,-5 -12.5,0 5,5 Z" + style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:1.00000003pt;stroke-opacity:1" + transform="matrix(-0.4,0,0,-0.4,-4,0)" /> + </marker> + </defs> + <sodipodi:namedview + id="base" + pagecolor="#ffffff" + bordercolor="#666666" + borderopacity="1.0" + inkscape:pageopacity="0.0" + inkscape:pageshadow="2" + inkscape:zoom="0.98994949" + inkscape:cx="518.91791" + inkscape:cy="172.50112" + inkscape:document-units="mm" + inkscape:current-layer="layer1" + showgrid="true" + viewbox-width="209.3" + inkscape:window-width="3840" + inkscape:window-height="1136" + inkscape:window-x="0" + inkscape:window-y="27" + inkscape:window-maximized="1" + fit-margin-top="0" + fit-margin-left="0" + fit-margin-right="0" + fit-margin-bottom="0"> + <inkscape:grid + type="xygrid" + id="grid817" + originx="4.390216" + originy="-208.88856" /> + </sodipodi:namedview> + <metadata + id="metadata5"> + <rdf:RDF> + <cc:Work + rdf:about=""> + <dc:format>image/svg+xml</dc:format> + <dc:type + rdf:resource="http://purl.org/dc/dcmitype/StillImage" /> + <dc:title></dc:title> + </cc:Work> + </rdf:RDF> + </metadata> + <g + inkscape:label="Layer 1" + inkscape:groupmode="layer" + id="layer1" + transform="translate(4.4048992,-21.515392)"> + <path + style="fill:none;stroke:#000000;stroke-width:0.26458335px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" + d="m 20.816662,35.062492 h 23.8125 v -5.291667 h 5.291667 v 5.291667 h 10.583334 v -5.291667 h 5.291667 v 5.291667 h 2.645833 v -5.291667 h 5.291667 v 5.291667 h 66.14583" + id="path4522" + inkscape:connector-curvature="0" /> + <path + style="fill:none;stroke:#000000;stroke-width:0.26458335px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" + d="m 20.816662,48.291659 h 7.9375 v -5.291667 h 5.291667 v 5.291667 h 58.208335 v -5.291667 h 5.291666 v 5.291667 h 42.33333" + id="path4524" + inkscape:connector-curvature="0" /> + <path + style="fill:none;stroke:#000000;stroke-width:0.26458335px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" + d="m 20.816662,61.520826 h 26.458334 v -5.291667 h 44.979168 v 5.291667 h 47.624996" + id="path4526" + inkscape:connector-curvature="0" /> + <path + style="fill:none;stroke:#000000;stroke-width:0.26458335px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" + d="M 20.816662,72.104159 H 139.87916" + id="path4528" + inkscape:connector-curvature="0" /> + <path + style="fill:none;stroke:#000000;stroke-width:0.26458335px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" + d="M 20.816662,77.395826 H 139.87916" + id="path4530" + inkscape:connector-curvature="0" /> + <path + style="fill:none;stroke:#000000;stroke-width:0.26458335px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" + d="M 20.816662,82.687493 H 139.87916" + id="path4532" + inkscape:connector-curvature="0" /> + <path + style="fill:none;stroke:#000000;stroke-width:0.26458335px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" + d="M 20.816662,87.97916 H 139.87916" + id="path4534" + inkscape:connector-curvature="0" /> + <path + style="fill:none;stroke:#ff0000;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:0.52916668, 0.52916668;stroke-dashoffset:0;stroke-opacity:1;marker-end:url(#Arrow1Mend)" + d="m 47.274996,29.770826 c 3.836215,14.933158 3.472151,27.586643 0.264583,41.010418" + id="path4536" + inkscape:connector-curvature="0" + sodipodi:nodetypes="cc" /> + <path + style="fill:none;stroke:#ff0000;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:0.52916669, 0.52916669;stroke-dashoffset:0;stroke-opacity:1;marker-end:url(#Arrow1Mend-3)" + d="m 63.149996,29.770826 c 3.836214,14.933158 5.059652,27.586642 1.852084,41.010418" + id="path4536-7" + inkscape:connector-curvature="0" + sodipodi:nodetypes="cc" /> + <path + style="fill:none;stroke:#ff0000;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:0.5291667, 0.5291667;stroke-dashoffset:0;stroke-opacity:1;marker-end:url(#Arrow1Mend-3-5)" + d="m 71.087496,29.770825 c 3.836214,14.933158 5.059652,27.586643 1.852084,41.010419" + id="path4536-7-5" + inkscape:connector-curvature="0" + sodipodi:nodetypes="cc" /> + <text + xml:space="preserve" + style="font-style:normal;font-weight:normal;font-size:10.58333397px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458335" + x="59.359009" + y="24.195677" + id="text6572"><tspan + sodipodi:role="line" + x="59.359009" + y="24.195677" + style="font-size:3.52777791px;line-height:5.39999962;text-align:center;text-anchor:middle;stroke-width:0.26458335" + id="tspan6574">Configurations</tspan></text> + <text + xml:space="preserve" + style="font-style:normal;font-weight:normal;font-size:10.58333397px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458335" + x="46.825508" + y="28.542402" + id="text6572-6"><tspan + sodipodi:role="line" + x="46.825508" + y="28.542402" + style="font-size:3.52777815px;line-height:5.39999962;text-align:center;text-anchor:middle;fill:#008000;stroke-width:0.26458335" + id="tspan6574-2">A</tspan></text> + <text + xml:space="preserve" + style="font-style:normal;font-weight:normal;font-size:10.58333397px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458335" + x="62.8895" + y="28.825886" + id="text6572-6-2"><tspan + sodipodi:role="line" + x="62.8895" + y="28.825886" + style="font-size:3.52777839px;line-height:5.39999962;text-align:center;text-anchor:middle;fill:#0000ff;stroke-width:0.26458335" + id="tspan6574-2-7">B</tspan></text> + <text + xml:space="preserve" + style="font-style:normal;font-weight:normal;font-size:10.58333397px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458335" + x="70.827003" + y="29.109362" + id="text6572-6-2-3"><tspan + sodipodi:role="line" + x="70.827003" + y="29.109362" + style="font-size:3.52777863px;line-height:5.39999962;text-align:center;text-anchor:middle;fill:#c87137;stroke-width:0.26458335" + id="tspan6574-2-7-6">C</tspan></text> + <path + style="fill:none;stroke:#ff0000;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:0.52916671, 0.52916671;stroke-dashoffset:0;stroke-opacity:1;marker-end:url(#Arrow1Mend-3-5-0)" + d="m 92.254164,42.999993 c 9.142136,12.745655 4.411987,28.608461 0.529167,38.364584" + id="path4536-7-5-2" + inkscape:connector-curvature="0" + sodipodi:nodetypes="cc" /> + <path + style="fill:none;stroke:#000000;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" + d="m 47.274996,72.104159 v 5.291667" + id="path8053" + inkscape:connector-curvature="0" /> + <path + style="fill:none;stroke:#000000;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" + d="m 64.472913,72.10416 v 5.291667" + id="path8053-6" + inkscape:connector-curvature="0" /> + <path + style="fill:none;stroke:#000000;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" + d="m 72.410413,72.10416 v 5.291667" + id="path8053-6-1" + inkscape:connector-curvature="0" /> + <path + style="fill:none;stroke:#000000;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" + d="m 92.254164,82.687494 v 5.291667" + id="path8053-6-1-8" + inkscape:connector-curvature="0" /> + <text + xml:space="preserve" + style="font-style:normal;font-weight:normal;font-size:10.58333397px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458335" + x="55.802444" + y="76.167412" + id="text6572-6-7"><tspan + sodipodi:role="line" + x="55.802444" + y="76.167412" + style="font-size:3.52777839px;line-height:5.39999962;text-align:center;text-anchor:middle;fill:#008000;stroke-width:0.26458335" + id="tspan6574-2-9">A</tspan></text> + <text + xml:space="preserve" + style="font-style:normal;font-weight:normal;font-size:10.58333397px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458335" + x="68.559143" + y="75.883926" + id="text6572-6-2-2"><tspan + sodipodi:role="line" + x="68.559143" + y="75.883926" + style="font-size:3.52777863px;line-height:5.39999962;text-align:center;text-anchor:middle;fill:#0000ff;stroke-width:0.26458335" + id="tspan6574-2-7-0">B</tspan></text> + <text + xml:space="preserve" + style="font-style:normal;font-weight:normal;font-size:10.58333397px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458335" + x="84.812119" + y="75.883911" + id="text6572-6-2-3-2"><tspan + sodipodi:role="line" + x="84.812119" + y="75.883911" + style="font-size:3.52777863px;line-height:5.39999962;text-align:center;text-anchor:middle;fill:#c87137;stroke-width:0.26458335" + id="tspan6574-2-7-6-3">C</tspan></text> + <text + xml:space="preserve" + style="font-style:normal;font-weight:normal;font-size:10.58333397px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458335" + x="98.513756" + y="86.845222" + id="text6572-6-2-3-2-7"><tspan + sodipodi:role="line" + x="98.513756" + y="86.845222" + style="font-size:3.52777863px;line-height:5.39999962;text-align:center;text-anchor:middle;fill:#c87137;stroke-width:0.26458335" + id="tspan6574-2-7-6-3-5">C</tspan></text> + <text + xml:space="preserve" + style="font-style:normal;font-weight:normal;font-size:10.58333397px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458335" + x="35.452015" + y="75.694931" + id="text6572-9"><tspan + sodipodi:role="line" + x="35.452015" + y="75.694931" + style="font-size:3.52777815px;line-height:5.39999962;text-align:center;text-anchor:middle;stroke-width:0.26458335" + id="tspan6574-22">Old config</tspan></text> + <text + xml:space="preserve" + style="font-style:normal;font-weight:normal;font-size:10.58333397px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458335" + x="55.484753" + y="86.656235" + id="text6572-9-8"><tspan + sodipodi:role="line" + x="55.484753" + y="86.656235" + style="font-size:3.52777839px;line-height:5.39999962;text-align:center;text-anchor:middle;stroke-width:0.26458335" + id="tspan6574-22-9">Old config</tspan></text> + <path + style="fill:none;stroke:#ff0000;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:0.52916671, 0.52916671;stroke-dashoffset:0;stroke-opacity:1;marker-end:url(#Arrow1Mend-3-5-7)" + d="m 92.254164,42.999993 c 4.233333,4.7625 2.645833,13.229167 0.79375,17.197917" + inkscape:connector-curvature="0" + sodipodi:nodetypes="cc" /> + <text + xml:space="preserve" + style="font-style:normal;font-weight:normal;font-size:10.58333397px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458335" + x="3.7020128" + y="33.550579" + id="text6572-1"><tspan + sodipodi:role="line" + x="3.7020128" + y="42.914349" + style="font-size:3.52777815px;line-height:5.39999962;text-align:center;text-anchor:middle;stroke-width:0.26458335" + id="tspan15310" /></text> + <text + xml:space="preserve" + style="font-style:normal;font-weight:normal;font-size:3.17500019px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458335" + x="13.366468" + y="46.590767" + id="text15316"><tspan + sodipodi:role="line" + x="13.366468" + y="46.590767" + style="text-align:center;text-anchor:middle;stroke-width:0.26458335" + id="tspan15318">VUpdate</tspan></text> + <text + xml:space="preserve" + style="font-style:normal;font-weight:normal;font-size:3.17500043px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458335" + x="14.45245" + y="29.676321" + id="text15316-3"><tspan + sodipodi:role="line" + id="tspan15314-1" + x="14.45245" + y="29.676321" + style="text-align:center;text-anchor:middle;stroke-width:0.26458335">Update</tspan><tspan + sodipodi:role="line" + x="14.45245" + y="33.645073" + style="text-align:center;text-anchor:middle;stroke-width:0.26458335" + id="tspan15318-9">Lock</tspan></text> + <text + xml:space="preserve" + style="font-style:normal;font-weight:normal;font-size:3.17500043px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458335" + x="7.5676007" + y="56.985115" + id="text15316-4"><tspan + sodipodi:role="line" + x="7.5676007" + y="56.985115" + style="text-align:center;text-anchor:middle;stroke-width:0.26458335" + id="tspan15318-7">Register update</tspan><tspan + sodipodi:role="line" + x="7.5676007" + y="60.953865" + style="text-align:center;text-anchor:middle;stroke-width:0.26458335" + id="tspan15361">Pending Status</tspan></text> + <text + xml:space="preserve" + style="font-style:normal;font-weight:normal;font-size:3.17500043px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458335" + x="16.074829" + y="76.167404" + id="text15316-8"><tspan + sodipodi:role="line" + x="16.074829" + y="76.167404" + style="text-align:center;text-anchor:middle;stroke-width:0.26458335" + id="tspan15318-4">Buf 0</tspan></text> + <text + xml:space="preserve" + style="font-style:normal;font-weight:normal;font-size:3.17500067px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458335" + x="16.156994" + y="86.089279" + id="text15316-8-5"><tspan + sodipodi:role="line" + x="16.156994" + y="86.089279" + style="text-align:center;text-anchor:middle;stroke-width:0.26458335" + id="tspan15318-4-0">Buf 1</tspan></text> + </g> +</svg> diff --git a/Documentation/gpu/amdgpu/display/dc-debug.rst b/Documentation/gpu/amdgpu/display/dc-debug.rst new file mode 100644 index 000000000000..40c55a618918 --- /dev/null +++ b/Documentation/gpu/amdgpu/display/dc-debug.rst @@ -0,0 +1,77 @@ +======================== +Display Core Debug tools +======================== + +DC Visual Confirmation +====================== + +Display core provides a feature named visual confirmation, which is a set of +bars added at the scanout time by the driver to convey some specific +information. In general, you can enable this debug option by using:: + + echo <N> > /sys/kernel/debug/dri/0/amdgpu_dm_visual_confirm + +Where `N` is an integer number for some specific scenarios that the developer +wants to enable, you will see some of these debug cases in the following +subsection. + +Multiple Planes Debug +--------------------- + +If you want to enable or debug multiple planes in a specific user-space +application, you can leverage a debug feature named visual confirm. For +enabling it, you will need:: + + echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_visual_confirm + +You need to reload your GUI to see the visual confirmation. When the plane +configuration changes or a full update occurs there will be a colored bar at +the bottom of each hardware plane being drawn on the screen. + +* The color indicates the format - For example, red is AR24 and green is NV12 +* The height of the bar indicates the index of the plane +* Pipe split can be observed if there are two bars with a difference in height + covering the same plane + +Consider the video playback case in which a video is played in a specific +plane, and the desktop is drawn in another plane. The video plane should +feature one or two green bars at the bottom of the video depending on pipe +split configuration. + +* There should **not** be any visual corruption +* There should **not** be any underflow or screen flashes +* There should **not** be any black screens +* There should **not** be any cursor corruption +* Multiple plane **may** be briefly disabled during window transitions or + resizing but should come back after the action has finished + +Pipe Split Debug +---------------- + +Sometimes we need to debug if DCN is splitting pipes correctly, and visual +confirmation is also handy for this case. Similar to the MPO case, you can use +the below command to enable visual confirmation:: + + echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_visual_confirm + +In this case, if you have a pipe split, you will see one small red bar at the +bottom of the display covering the entire display width and another bar +covering the second pipe. In other words, you will see a bit high bar in the +second pipe. + +DTN Debug +========= + +DC (DCN) provides an extensive log that dumps multiple details from our +hardware configuration. Via debugfs, you can capture those status values by +using Display Test Next (DTN) log, which can be captured via debugfs by using:: + + cat /sys/kernel/debug/dri/0/amdgpu_dm_dtn_log + +Since this log is updated accordingly with DCN status, you can also follow the +change in real-time by using something like:: + + sudo watch -d cat /sys/kernel/debug/dri/0/amdgpu_dm_dtn_log + +When reporting a bug related to DC, consider attaching this log before and +after you reproduce the bug. diff --git a/Documentation/gpu/amdgpu/display/dc-glossary.rst b/Documentation/gpu/amdgpu/display/dc-glossary.rst new file mode 100644 index 000000000000..116f5f0942fd --- /dev/null +++ b/Documentation/gpu/amdgpu/display/dc-glossary.rst @@ -0,0 +1,237 @@ +=========== +DC Glossary +=========== + +On this page, we try to keep track of acronyms related to the display +component. If you do not find what you are looking for, look at the +'Documentation/gpu/amdgpu/amdgpu-glossary.rst'; if you cannot find it anywhere, +consider asking in the amdgfx and update this page. + +.. glossary:: + + ABM + Adaptive Backlight Modulation + + APU + Accelerated Processing Unit + + ASIC + Application-Specific Integrated Circuit + + ASSR + Alternate Scrambler Seed Reset + + AZ + Azalia (HD audio DMA engine) + + BPC + Bits Per Colour/Component + + BPP + Bits Per Pixel + + Clocks + * PCLK: Pixel Clock + * SYMCLK: Symbol Clock + * SOCCLK: GPU Engine Clock + * DISPCLK: Display Clock + * DPPCLK: DPP Clock + * DCFCLK: Display Controller Fabric Clock + * REFCLK: Real Time Reference Clock + * PPLL: Pixel PLL + * FCLK: Fabric Clock + * MCLK: Memory Clock + + CRC + Cyclic Redundancy Check + + CRTC + Cathode Ray Tube Controller - commonly called "Controller" - Generates + raw stream of pixels, clocked at pixel clock + + CVT + Coordinated Video Timings + + DAL + Display Abstraction layer + + DC (Software) + Display Core + + DC (Hardware) + Display Controller + + DCC + Delta Colour Compression + + DCE + Display Controller Engine + + DCHUB + Display Controller HUB + + ARB + Arbiter + + VTG + Vertical Timing Generator + + DCN + Display Core Next + + DCCG + Display Clock Generator block + + DDC + Display Data Channel + + DIO + Display IO + + DPP + Display Pipes and Planes + + DSC + Display Stream Compression (Reduce the amount of bits to represent pixel + count while at the same pixel clock) + + dGPU + discrete GPU + + DMIF + Display Memory Interface + + DML + Display Mode Library + + DMCU + Display Micro-Controller Unit + + DMCUB + Display Micro-Controller Unit, version B + + DPCD + DisplayPort Configuration Data + + DPM(S) + Display Power Management (Signaling) + + DRR + Dynamic Refresh Rate + + DWB + Display Writeback + + FB + Frame Buffer + + FBC + Frame Buffer Compression + + FEC + Forward Error Correction + + FRL + Fixed Rate Link + + GCO + Graphical Controller Object + + GSL + Global Swap Lock + + iGPU + integrated GPU + + ISR + Interrupt Service Request + + ISV + Independent Software Vendor + + KMD + Kernel Mode Driver + + LB + Line Buffer + + LFC + Low Framerate Compensation + + LTTPR + Link Training Tunable Phy Repeater + + LUT + Lookup Table + + MALL + Memory Access at Last Level + + MC + Memory Controller + + MPC + Multiple pipes and plane combine + + MPO + Multi Plane Overlay + + MST + Multi Stream Transport + + NBP State + Northbridge Power State + + NBIO + North Bridge Input/Output + + ODM + Output Data Mapping + + OPM + Output Protection Manager + + OPP + Output Plane Processor + + OPTC + Output Pipe Timing Combiner + + OTG + Output Timing Generator + + PCON + Power Controller + + PGFSM + Power Gate Finite State Machine + + PSR + Panel Self Refresh + + SCL + Scaler + + SDP + Scalable Data Port + + SLS + Single Large Surface + + SST + Single Stream Transport + + TMDS + Transition-Minimized Differential Signaling + + TMZ + Trusted Memory Zone + + TTU + Time to Underflow + + VRR + Variable Refresh Rate + + UVD + Unified Video Decoder diff --git a/Documentation/gpu/amdgpu/display/dc_pipeline_overview.svg b/Documentation/gpu/amdgpu/display/dc_pipeline_overview.svg new file mode 100644 index 000000000000..9adecebfe65b --- /dev/null +++ b/Documentation/gpu/amdgpu/display/dc_pipeline_overview.svg @@ -0,0 +1,1125 @@ +<?xml version="1.0" encoding="UTF-8" standalone="no"?> +<!-- Created with Inkscape (http://www.inkscape.org/) --> + +<svg + xmlns:dc="http://purl.org/dc/elements/1.1/" + xmlns:cc="http://creativecommons.org/ns#" + xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" + xmlns:svg="http://www.w3.org/2000/svg" + xmlns="http://www.w3.org/2000/svg" + xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd" + xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" + width="1296.7491" + height="741.97845" + viewBox="0 0 343.0982 196.31514" + version="1.1" + id="svg8" + inkscape:version="0.92.5 (2060ec1f9f, 2020-04-08)" + sodipodi:docname="dc_pipeline_overview.svg"> + <defs + id="defs2"> + <marker + inkscape:stockid="Arrow2Mend" + orient="auto" + refY="0" + refX="0" + id="marker8858" + style="overflow:visible" + inkscape:isstock="true"> + <path + id="path8616" + style="fill:#aa00d4;fill-opacity:1;fill-rule:evenodd;stroke:#aa00d4;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1" + d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z" + transform="scale(-0.6)" + inkscape:connector-curvature="0" /> + </marker> + <marker + inkscape:stockid="Arrow2Send" + orient="auto" + refY="0" + refX="0" + id="Arrow2Send" + style="overflow:visible" + inkscape:isstock="true"> + <path + id="path8622" + style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1" + d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z" + transform="matrix(-0.3,0,0,-0.3,0.69,0)" + inkscape:connector-curvature="0" /> + </marker> + <marker + inkscape:stockid="Arrow1Lend" + orient="auto" + refY="0" + refX="0" + id="Arrow1Lend" + style="overflow:visible" + inkscape:isstock="true"> + <path + id="path8592" + d="M 0,0 5,-5 -12.5,0 5,5 Z" + style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:1.00000003pt;stroke-opacity:1" + transform="matrix(-0.8,0,0,-0.8,-10,0)" + inkscape:connector-curvature="0" /> + </marker> + <marker + inkscape:stockid="Arrow2Lend" + orient="auto" + refY="0" + refX="0" + id="Arrow2Lend" + style="overflow:visible" + inkscape:isstock="true"> + <path + id="path8610" + style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1" + d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z" + transform="matrix(-1.1,0,0,-1.1,-1.1,0)" + inkscape:connector-curvature="0" /> + </marker> + <marker + inkscape:stockid="Arrow2Mend" + orient="auto" + refY="0" + refX="0" + id="Arrow2Mend" + style="overflow:visible" + inkscape:isstock="true"> + <path + id="path1200" + style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1" + d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z" + transform="scale(-0.6)" + inkscape:connector-curvature="0" /> + </marker> + <marker + inkscape:stockid="Arrow2Mend" + orient="auto" + refY="0" + refX="0" + id="Arrow2Mend-8" + style="overflow:visible" + inkscape:isstock="true"> + <path + inkscape:connector-curvature="0" + id="path1200-9" + style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1" + d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z" + transform="scale(-0.6)" /> + </marker> + <marker + inkscape:stockid="Arrow2Mend" + orient="auto" + refY="0" + refX="0" + id="Arrow2Mend-8-3" + style="overflow:visible" + inkscape:isstock="true"> + <path + inkscape:connector-curvature="0" + id="path1200-9-6" + style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1" + d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z" + transform="scale(-0.6)" /> + </marker> + <marker + inkscape:stockid="Arrow2Mend" + orient="auto" + refY="0" + refX="0" + id="Arrow2Mend-8-3-2" + style="overflow:visible" + inkscape:isstock="true"> + <path + inkscape:connector-curvature="0" + id="path1200-9-6-9" + style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1" + d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z" + transform="scale(-0.6)" /> + </marker> + <marker + inkscape:stockid="Arrow2Mend" + orient="auto" + refY="0" + refX="0" + id="Arrow2Mend-8-3-2-1" + style="overflow:visible" + inkscape:isstock="true"> + <path + inkscape:connector-curvature="0" + id="path1200-9-6-9-9" + style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1" + d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z" + transform="scale(-0.6)" /> + </marker> + <marker + inkscape:stockid="Arrow2Mend" + orient="auto" + refY="0" + refX="0" + id="Arrow2Mend-8-3-2-7" + style="overflow:visible" + inkscape:isstock="true"> + <path + inkscape:connector-curvature="0" + id="path1200-9-6-9-8" + style="fill:#008000;fill-opacity:1;fill-rule:evenodd;stroke:#008000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1" + d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z" + transform="scale(-0.6)" /> + </marker> + <marker + inkscape:stockid="Arrow2Mend" + orient="auto" + refY="0" + refX="0" + id="Arrow2Mend-8-3-4" + style="overflow:visible" + inkscape:isstock="true"> + <path + inkscape:connector-curvature="0" + id="path1200-9-6-5" + style="fill:#008000;fill-opacity:1;fill-rule:evenodd;stroke:#008000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1" + d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z" + transform="scale(-0.6)" /> + </marker> + <marker + inkscape:stockid="Arrow2Mend" + orient="auto" + refY="0" + refX="0" + id="Arrow2Mend-8-0" + style="overflow:visible" + inkscape:isstock="true"> + <path + inkscape:connector-curvature="0" + id="path1200-9-3" + style="fill:#008000;fill-opacity:1;fill-rule:evenodd;stroke:#008000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1" + d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z" + transform="scale(-0.6)" /> + </marker> + <marker + inkscape:stockid="Arrow2Mend" + orient="auto" + refY="0" + refX="0" + id="Arrow2Mend-6" + style="overflow:visible" + inkscape:isstock="true"> + <path + inkscape:connector-curvature="0" + id="path1200-1" + style="fill:#008000;fill-opacity:1;fill-rule:evenodd;stroke:#008000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1" + d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z" + transform="scale(-0.6)" /> + </marker> + <marker + inkscape:stockid="Arrow2Mend" + orient="auto" + refY="0" + refX="0" + id="Arrow2Mend-8-3-2-6" + style="overflow:visible" + inkscape:isstock="true"> + <path + inkscape:connector-curvature="0" + id="path1200-9-6-9-1" + style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1" + d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z" + transform="scale(-0.6)" /> + </marker> + <marker + inkscape:stockid="Arrow2Mend" + orient="auto" + refY="0" + refX="0" + id="Arrow2Mend-8-0-7" + style="overflow:visible" + inkscape:isstock="true"> + <path + inkscape:connector-curvature="0" + id="path1200-9-3-4" + style="fill:#008000;fill-opacity:1;fill-rule:evenodd;stroke:#008000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1" + d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z" + transform="scale(-0.6)" /> + </marker> + <marker + inkscape:stockid="Arrow2Mend" + orient="auto" + refY="0" + refX="0" + id="Arrow2Mend-6-3" + style="overflow:visible" + inkscape:isstock="true"> + <path + inkscape:connector-curvature="0" + id="path1200-1-0" + style="fill:#008000;fill-opacity:1;fill-rule:evenodd;stroke:#008000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1" + d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z" + transform="scale(-0.6)" /> + </marker> + <marker + inkscape:stockid="Arrow2Mend" + orient="auto" + refY="0" + refX="0" + id="Arrow2Mend-8-3-2-8" + style="overflow:visible" + inkscape:isstock="true"> + <path + inkscape:connector-curvature="0" + id="path1200-9-6-9-6" + style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1" + d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z" + transform="scale(-0.6)" /> + </marker> + <marker + inkscape:stockid="Arrow2Mend" + orient="auto" + refY="0" + refX="0" + id="Arrow2Mend-3" + style="overflow:visible" + inkscape:isstock="true"> + <path + id="path1200-6" + style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1" + d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z" + transform="scale(-0.6)" + inkscape:connector-curvature="0" /> + </marker> + <marker + inkscape:stockid="Arrow2Mend" + orient="auto" + refY="0" + refX="0" + id="marker8858-3" + style="overflow:visible" + inkscape:isstock="true"> + <path + id="path8616-5" + style="fill:#00ffcc;fill-opacity:1;fill-rule:evenodd;stroke:#00ffcc;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1" + d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z" + transform="scale(-0.6)" + inkscape:connector-curvature="0" /> + </marker> + <marker + inkscape:stockid="Arrow2Mend" + orient="auto" + refY="0" + refX="0" + id="Arrow2Mend-8-3-3" + style="overflow:visible" + inkscape:isstock="true"> + <path + inkscape:connector-curvature="0" + id="path1200-9-6-56" + style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1" + d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z" + transform="scale(-0.6)" /> + </marker> + <marker + inkscape:stockid="Arrow2Mend" + orient="auto" + refY="0" + refX="0" + id="Arrow2Mend-8-0-2" + style="overflow:visible" + inkscape:isstock="true"> + <path + inkscape:connector-curvature="0" + id="path1200-9-3-9" + style="fill:#008000;fill-opacity:1;fill-rule:evenodd;stroke:#008000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1" + d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z" + transform="scale(-0.6)" /> + </marker> + </defs> + <sodipodi:namedview + id="base" + pagecolor="#ffffff" + bordercolor="#666666" + borderopacity="1.0" + inkscape:pageopacity="0.0" + inkscape:pageshadow="2" + inkscape:zoom="2.8" + inkscape:cx="603.80172" + inkscape:cy="404.14319" + inkscape:document-units="mm" + inkscape:current-layer="layer1" + showgrid="false" + inkscape:window-width="3840" + inkscape:window-height="2096" + inkscape:window-x="0" + inkscape:window-y="27" + inkscape:window-maximized="1" + showguides="false" + fit-margin-top="0" + fit-margin-left="0" + fit-margin-right="0" + fit-margin-bottom="0" + units="px" + inkscape:snap-global="false" /> + <metadata + id="metadata5"> + <rdf:RDF> + <cc:Work + rdf:about=""> + <dc:format>image/svg+xml</dc:format> + <dc:type + rdf:resource="http://purl.org/dc/dcmitype/StillImage" /> + <dc:title /> + </cc:Work> + </rdf:RDF> + </metadata> + <g + inkscape:label="Layer 1" + inkscape:groupmode="layer" + id="layer1" + transform="translate(419.79645,20.103767)"> + <path + style="fill:#008000;stroke:#008000;stroke-width:0.59275198;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#Arrow2Mend-8-3-2-7)" + d="m -340.37552,57.5332 h -14.81024" + id="path1171-7-1-3-0" + inkscape:connector-curvature="0" /> + <path + style="fill:#008000;stroke:#008000;stroke-width:0.59715915;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#Arrow2Mend-8-3-4)" + d="m -293.23443,57.5332 h -15.03129" + id="path1171-7-1-32" + inkscape:connector-curvature="0" /> + <path + style="fill:#008000;stroke:#008000;stroke-width:0.59275198;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#Arrow2Mend-8-0)" + d="M -246.45946,57.5332 H -261.2697" + id="path1171-7-6" + inkscape:connector-curvature="0" /> + <path + style="fill:#008000;stroke:#008000;stroke-width:0.59275198;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#Arrow2Mend-6)" + d="m -151.28623,57.5332 h -14.81024" + id="path1171-0" + inkscape:connector-curvature="0" /> + <path + style="fill:none;stroke:#ff0000;stroke-width:0.98222464;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#Arrow2Mend-8-3-2-6)" + d="m -310.11621,-10.988713 h -35.41856" + id="path1171-7-1-3-5" + inkscape:connector-curvature="0" + sodipodi:nodetypes="cc" /> + <path + style="fill:none;stroke:#ff0000;stroke-width:1.33745635;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#Arrow2Mend-8-3-2-1)" + d="M -174.42569,48.441117 V -10.963061 L -277.26548,-11.45916" + id="path1171-7-1-3-4" + inkscape:connector-curvature="0" + sodipodi:nodetypes="ccc" /> + <path + style="fill:none;stroke:#ff0000;stroke-width:0.95872593;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#Arrow2Mend-8-3)" + d="m -262.79442,87.935594 h 14.32069" + id="path1171-7-1" + inkscape:connector-curvature="0" /> + <path + style="fill:none;stroke:#ff0000;stroke-width:0.97006679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#Arrow2Mend-8)" + d="m -309.80088,87.935594 h 14.44587" + id="path1171-7" + inkscape:connector-curvature="0" /> + <path + style="fill:none;stroke:#ff0000;stroke-width:0.96187615;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#Arrow2Mend)" + d="m -356.45657,87.935594 h 14.20296" + id="path1171" + inkscape:connector-curvature="0" /> + <path + style="fill:none;stroke:#ff0000;stroke-width:0.96061862;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#Arrow2Mend-8-3-2)" + d="m -167.44556,87.935594 h 14.16584" + id="path1171-7-1-3" + inkscape:connector-curvature="0" /> + <path + style="fill:none;stroke:#008000;stroke-width:0.87091714;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#Arrow2Mend-8-0-7)" + d="M -193.82812,48.312503 V 14.168502 l -84.03577,-0.467726" + id="path1171-7-6-4" + inkscape:connector-curvature="0" + sodipodi:nodetypes="ccc" /> + <path + style="fill:none;stroke:#000000;stroke-width:0.81852055;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:4.91112339, 0.81852057;stroke-dashoffset:0;stroke-opacity:1" + d="m -133.33998,42.989657 v 5.457081" + id="path7149-3-7" + inkscape:connector-curvature="0" /> + <path + style="fill:none;stroke:#000000;stroke-width:0.81852055;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:4.91112339, 0.81852057;stroke-dashoffset:0;stroke-opacity:1" + d="m -298.69506,162.44998 v 13.31197" + id="path7149" + inkscape:connector-curvature="0" /> + <path + style="fill:none;stroke:#008080;stroke-width:0.81852055;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:0.81852057, 0.81852057;stroke-dashoffset:0;stroke-opacity:1" + d="m -242.80131,107.00907 v 9.60171" + id="path7040-5-4-7-5-6-9" + inkscape:connector-curvature="0" /> + <path + style="fill:none;stroke:#008080;stroke-width:0.81852055;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:0.81852057, 0.81852057;stroke-dashoffset:0;stroke-opacity:1" + d="m -300.34873,107.17445 v 9.6017" + id="path7040-5-4-7-5-6" + inkscape:connector-curvature="0" /> + <path + style="fill:none;stroke:#008080;stroke-width:0.81852055;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:0.81852057, 0.81852057;stroke-dashoffset:0;stroke-opacity:1" + d="m -359.26293,106.99745 v 9.60171" + id="path7040-5-4-7-5" + inkscape:connector-curvature="0" /> + <path + style="fill:none;stroke:#008080;stroke-width:0.81852055;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:0.81852057, 0.81852057;stroke-dashoffset:0;stroke-opacity:1" + d="M -369.74543,25.114933 V 37.991587" + id="path7040-5-4-7-6" + inkscape:connector-curvature="0" /> + <path + style="fill:none;stroke:#008080;stroke-width:0.91136348;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:0.91136346, 0.91136346;stroke-dashoffset:0;stroke-opacity:1" + d="M -135.17034,93.582486 V 107.10642 H -403.93077 V 37.882965 h 109.60575 V 25.225991" + id="path7038" + inkscape:connector-curvature="0" + sodipodi:nodetypes="cccccc" /> + <path + style="fill:none;stroke:#008080;stroke-width:0.81852055;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:0.81852057, 0.81852057;stroke-dashoffset:0;stroke-opacity:1" + d="M -231.106,94.010086 V 106.96943" + id="path7040" + inkscape:connector-curvature="0" /> + <path + style="fill:none;stroke:#008080;stroke-width:0.81852055;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:0.81852057, 0.81852057;stroke-dashoffset:0;stroke-opacity:1" + d="M -278.50224,93.844719 V 106.80406" + id="path7040-5" + inkscape:connector-curvature="0" /> + <path + style="fill:none;stroke:#008080;stroke-width:0.81852055;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:0.81852057, 0.81852057;stroke-dashoffset:0;stroke-opacity:1" + d="M -325.89848,93.701083 V 106.99115" + id="path7040-5-4" + inkscape:connector-curvature="0" /> + <path + style="fill:none;stroke:#008080;stroke-width:0.81852055;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:0.81852057, 0.81852057;stroke-dashoffset:0;stroke-opacity:1" + d="M -373.29471,93.899037 V 107.27179" + id="path7040-5-4-7" + inkscape:connector-curvature="0" /> + <g + id="g934" + transform="matrix(0.61872421,0,0,0.61872421,-154.16506,-3.5724799)"> + <rect + ry="2.1052283e-06" + y="84.280701" + x="-376.383" + height="72.786827" + width="49.352299" + id="rect834" + style="fill:none;stroke:#000000;stroke-width:1.16258347;stroke-linecap:round;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none" /> + <text + id="text838" + y="95.916664" + x="-371.17261" + style="font-style:normal;font-weight:normal;font-size:10.58333302px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332" + xml:space="preserve"><tspan + style="stroke-width:0.26458332" + y="95.916664" + x="-371.17261" + id="tspan836" + sodipodi:role="line">DCHUB</tspan></text> + <text + id="text846" + y="121.99702" + x="-352.74997" + style="font-style:normal;font-weight:normal;font-size:10.58333302px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332" + xml:space="preserve"><tspan + style="text-align:center;text-anchor:middle;stroke-width:0.26458332" + y="121.99702" + x="-352.74997" + id="tspan844" + sodipodi:role="line">HUBP</tspan><tspan + id="tspan863" + style="text-align:center;text-anchor:middle;stroke-width:0.26458332" + y="135.22618" + x="-352.74997" + sodipodi:role="line">(n)</tspan></text> + </g> + <g + id="g942" + transform="matrix(0.61872421,0,0,0.61872421,-158.40385,-3.2216813)"> + <text + id="text838-5" + y="116.65257" + x="-269.45752" + style="font-style:normal;font-weight:normal;font-size:10.58333302px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332" + xml:space="preserve"><tspan + style="text-align:center;text-anchor:middle;stroke-width:0.26458332" + y="116.65257" + x="-269.45752" + id="tspan836-3" + sodipodi:role="line">DPP</tspan><tspan + id="tspan936" + style="text-align:center;text-anchor:middle;stroke-width:0.26458332" + y="129.88174" + x="-269.45752" + sodipodi:role="line">(n)</tspan></text> + <rect + ry="2.1052283e-06" + y="83.71373" + x="-293.7952" + height="72.786827" + width="49.352303" + id="rect834-5" + style="fill:none;stroke:#000000;stroke-width:1.16258347;stroke-linecap:round;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none" /> + </g> + <g + id="g1158" + transform="matrix(0.61872421,0,0,0.61872421,-154.34048,-6.2618995)"> + <text + id="text838-5-2" + y="128.87331" + x="-200.18195" + style="font-style:normal;font-weight:normal;font-size:10.58333302px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332" + xml:space="preserve"><tspan + id="tspan936-1" + style="text-align:center;text-anchor:middle;stroke-width:0.26458332" + y="128.87331" + x="-200.18195" + sodipodi:role="line">MPC</tspan></text> + <rect + ry="2.1052283e-06" + y="88.627419" + x="-224.62555" + height="72.786827" + width="49.352303" + id="rect834-5-2" + style="fill:none;stroke:#000000;stroke-width:1.16258347;stroke-linecap:round;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none" /> + </g> + <g + id="g1153" + transform="matrix(0.61872421,0,0,0.61872421,-108.51628,-6.4957668)"> + <text + id="text838-5-2-7" + y="129.2513" + x="-120.96272" + style="font-style:normal;font-weight:normal;font-size:10.58333302px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332" + xml:space="preserve"><tspan + id="tspan936-1-0" + style="text-align:center;text-anchor:middle;stroke-width:0.26458332" + y="129.2513" + x="-120.96272" + sodipodi:role="line">OPTC</tspan></text> + <rect + ry="2.1052283e-06" + y="89.005402" + x="-145.62854" + height="72.786827" + width="49.352306" + id="rect834-5-2-9" + style="fill:none;stroke:#000000;stroke-width:1.16258347;stroke-linecap:round;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none" /> + </g> + <g + id="g1148" + transform="matrix(0.61872421,0,0,0.61872421,-105.25474,-7.6650796)"> + <text + id="text838-5-2-7-3" + y="131.14117" + x="-48.981136" + style="font-style:normal;font-weight:normal;font-size:10.58333302px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332" + xml:space="preserve"><tspan + id="tspan936-1-0-6" + style="text-align:center;text-anchor:middle;stroke-width:0.26458332" + y="131.14117" + x="-48.981136" + sodipodi:role="line">DIO</tspan></text> + <rect + ry="2.1052283e-06" + y="90.895279" + x="-73.435081" + height="72.786827" + width="49.352306" + id="rect834-5-2-9-0" + style="fill:none;stroke:#000000;stroke-width:1.16258347;stroke-linecap:round;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none" /> + </g> + <g + id="g1133" + transform="matrix(0.61872421,0,0,0.61872421,-181.52704,-7.6650796)"> + <text + id="text838-5-2-6" + y="241.13223" + x="-286.96921" + style="font-style:normal;font-weight:normal;font-size:10.58333302px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332" + xml:space="preserve"><tspan + id="tspan936-1-2" + style="text-align:center;text-anchor:middle;stroke-width:0.26458332" + y="241.13223" + x="-286.96921" + sodipodi:role="line">DCCG</tspan></text> + <rect + ry="2.1052283e-06" + y="200.88634" + x="-311.56009" + height="72.786827" + width="49.352306" + id="rect834-5-2-6" + style="fill:none;stroke:#000000;stroke-width:1.16258347;stroke-linecap:round;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none" /> + </g> + <g + id="g1138" + transform="matrix(0.61872421,0,0,0.61872421,-181.52704,-7.6650796)"> + <text + id="text838-5-2-6-1" + y="241.81844" + x="-190.55942" + style="font-style:normal;font-weight:normal;font-size:10.58333302px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332" + xml:space="preserve"><tspan + id="tspan936-1-2-8" + style="text-align:center;text-anchor:middle;stroke-width:0.26458332" + y="241.81844" + x="-190.55942" + sodipodi:role="line">DMU</tspan></text> + <rect + ry="2.1052283e-06" + y="201.6423" + x="-215.17615" + height="72.786827" + width="49.352306" + id="rect834-5-2-6-7" + style="fill:none;stroke:#000000;stroke-width:1.16258347;stroke-linecap:round;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none" /> + </g> + <text + xml:space="preserve" + style="font-style:normal;font-weight:normal;font-size:6.54816437px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.1637041" + x="-297.75696" + y="109.44505" + id="text1063"><tspan + sodipodi:role="line" + id="tspan1061" + x="-297.75696" + y="115.23865" + style="stroke-width:0.1637041" /></text> + <g + id="g1143" + transform="matrix(0.61872421,0,0,0.61872421,-181.52704,-8.9747125)"> + <text + id="text838-5-2-6-1-9" + y="243.02728" + x="-99.967323" + style="font-style:normal;font-weight:normal;font-size:10.58333302px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332" + xml:space="preserve"><tspan + id="tspan936-1-2-8-2" + style="text-align:center;text-anchor:middle;stroke-width:0.26458332" + y="243.02728" + x="-99.967323" + sodipodi:role="line">AZ</tspan></text> + <rect + ry="2.1052283e-06" + y="202.77623" + x="-124.83984" + height="72.786827" + width="49.352306" + id="rect834-5-2-6-7-0" + style="fill:none;stroke:#000000;stroke-width:1.16258347;stroke-linecap:round;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none" /> + </g> + <g + id="g1169" + transform="matrix(0.61872421,0,0,0.61872421,-154.16506,1.4555785)"> + <text + id="text838-5-2-6-2" + y="5.9612885" + x="-348.74365" + style="font-style:normal;font-weight:normal;font-size:10.58333302px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332" + xml:space="preserve"><tspan + id="tspan936-1-2-3" + style="text-align:center;text-anchor:middle;stroke-width:0.26458332" + y="5.9612885" + x="-348.74365" + sodipodi:role="line">MMHUBBUB</tspan></text> + <rect + ry="2.1010696e-06" + y="-34.142948" + x="-384.64743" + height="72.643044" + width="72.096924" + id="rect834-5-2-6-75" + style="fill:none;stroke:#000000;stroke-width:1.40378118;stroke-linecap:round;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none" /> + </g> + <g + id="g1164" + transform="matrix(0.61872421,0,0,0.61872421,-154.16506,-7.6650796)"> + <text + id="text838-5-2-6-9" + y="13.465075" + x="-227.30836" + style="font-style:normal;font-weight:normal;font-size:10.58333302px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332" + xml:space="preserve"><tspan + id="tspan936-1-2-2" + style="text-align:center;text-anchor:middle;stroke-width:0.26458332" + y="13.465075" + x="-227.30836" + sodipodi:role="line">DWB</tspan><tspan + id="tspan1128" + style="text-align:center;text-anchor:middle;stroke-width:0.26458332" + y="26.694241" + x="-227.30836" + sodipodi:role="line">(n)</tspan></text> + <rect + ry="2.1052283e-06" + y="-19.473768" + x="-251.83983" + height="72.786827" + width="49.352306" + id="rect834-5-2-6-2" + style="fill:none;stroke:#000000;stroke-width:1.16258347;stroke-linecap:round;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none" /> + </g> + <path + style="fill:none;stroke:#000000;stroke-width:0.91371936;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:5.48231601, 0.91371934;stroke-dashoffset:0;stroke-opacity:1" + d="m -358.95963,161.63019 v 14.12431 h 250.20395 V 43.149938 H -361.845 V 25.478973" + id="path7147" + inkscape:connector-curvature="0" + sodipodi:nodetypes="cccccc" /> + <path + style="fill:none;stroke:#000000;stroke-width:0.81852055;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:4.91112339, 0.81852057;stroke-dashoffset:0;stroke-opacity:1" + d="m -242.92533,161.58513 v 14.05612" + id="path7149-3" + inkscape:connector-curvature="0" /> + <path + style="fill:none;stroke:#000000;stroke-width:0.81852055;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:4.91112339, 0.81852057;stroke-dashoffset:0;stroke-opacity:1" + d="m -184.37695,42.955607 v 5.457082" + id="path7149-3-7-4" + inkscape:connector-curvature="0" /> + <path + style="fill:none;stroke:#000000;stroke-width:0.81852055;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:4.91112339, 0.81852057;stroke-dashoffset:0;stroke-opacity:1" + d="m -277.36283,43.141644 v 5.457082" + id="path7149-3-7-5" + inkscape:connector-curvature="0" /> + <path + style="fill:none;stroke:#000000;stroke-width:0.81852055;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:4.91112339, 0.81852057;stroke-dashoffset:0;stroke-opacity:1" + d="M -325.48437,42.976278 V 48.43336" + id="path7149-3-7-2" + inkscape:connector-curvature="0" /> + <path + style="fill:none;stroke:#000000;stroke-width:0.81852055;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:4.91112339, 0.81852057;stroke-dashoffset:0;stroke-opacity:1" + d="m -361.86492,43.141644 v 5.457083" + id="path7149-3-7-54" + inkscape:connector-curvature="0" /> + <path + style="fill:#008000;stroke:#008000;stroke-width:0.46329758;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#Arrow2Mend-6-3)" + d="m -147.58542,-8.2978166 h -9.04766" + id="path1171-0-7" + inkscape:connector-curvature="0" /> + <path + style="fill:none;stroke:#ff0000;stroke-width:0.98222464;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#Arrow2Mend-8-3-2-8)" + d="m -157.13421,-1.6500501 h 8.66407" + id="path1171-7-1-3-8" + inkscape:connector-curvature="0" /> + <path + style="fill:none;stroke:#000000;stroke-width:0.74503672;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:4.47022031, 0.74503672;stroke-dashoffset:0;stroke-opacity:1" + d="m -148.50314,4.9845652 h -7.91265" + id="path7149-3-7-8" + inkscape:connector-curvature="0" /> + <path + style="fill:none;stroke:#008080;stroke-width:0.81852055;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:0.81852057, 0.81852057;stroke-dashoffset:0;stroke-opacity:1" + d="m -157.59442,11.623513 h 10.26991" + id="path7040-4" + inkscape:connector-curvature="0" /> + <text + xml:space="preserve" + style="font-style:normal;font-weight:normal;font-size:6.54816437px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.1637041" + x="-142.72867" + y="-6.9685979" + id="text12079"><tspan + sodipodi:role="line" + id="tspan12077" + x="-142.72867" + y="-6.9685979" + style="font-size:4.80198765px;stroke-width:0.1637041">Global sync</tspan></text> + <text + xml:space="preserve" + style="font-style:normal;font-weight:normal;font-size:6.54816437px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.1637041" + x="-142.93031" + y="0.13578746" + id="text12079-3"><tspan + sodipodi:role="line" + id="tspan12077-1" + x="-142.93031" + y="0.13578746" + style="font-size:4.80198765px;stroke-width:0.1637041">Pixel data</tspan></text> + <text + xml:space="preserve" + style="font-style:normal;font-weight:normal;font-size:6.54816437px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.1637041" + x="-142.77556" + y="6.3093324" + id="text12079-3-4"><tspan + sodipodi:role="line" + id="tspan12077-1-9" + x="-142.77556" + y="6.3093324" + style="font-size:4.80198765px;stroke-width:0.1637041">Sideband signal</tspan></text> + <text + xml:space="preserve" + style="font-style:normal;font-weight:normal;font-size:6.54816437px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.1637041" + x="-142.72867" + y="12.948278" + id="text12079-3-4-2"><tspan + sodipodi:role="line" + id="tspan12077-1-9-0" + x="-142.72867" + y="12.948278" + style="font-size:4.80198765px;stroke-width:0.1637041">Config. Bus</tspan></text> + <path + style="fill:none;stroke:#aa00d4;stroke-width:1.32291663;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#marker8858)" + d="m -406.68795,73.185276 h 14.20296" + id="path1171-75" + inkscape:connector-curvature="0" /> + <text + xml:space="preserve" + style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332" + x="-420.21503" + y="75.065918" + id="text8862"><tspan + sodipodi:role="line" + id="tspan8860" + x="-420.21503" + y="75.065918" + style="font-size:6.3499999px;stroke-width:0.26458332">SDP</tspan></text> + <path + style="fill:none;stroke:#00ffcc;stroke-width:1.25980031;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#marker8858-3)" + d="m -119.19923,72.243805 h 12.88004" + id="path1171-75-6" + inkscape:connector-curvature="0" /> + <text + xml:space="preserve" + style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332" + x="-104.87327" + y="74.54258" + id="text8862-2"><tspan + sodipodi:role="line" + id="tspan8860-9" + x="-104.87327" + y="74.54258" + style="font-size:6.3499999px;stroke-width:0.26458332">Monitor</tspan></text> + <g + id="g6280" + transform="translate(-133.43389,-37.35791)"> + <text + id="text838-5-2-7-6" + y="110.67171" + x="-97.4758" + style="font-style:normal;font-weight:normal;font-size:6.54816437px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.16370411" + xml:space="preserve"><tspan + id="tspan936-1-0-7" + style="text-align:center;text-anchor:middle;stroke-width:0.16370411" + y="110.67171" + x="-97.4758" + sodipodi:role="line">OPP</tspan></text> + <rect + ry="1.3025557e-06" + y="85.770599" + x="-112.73714" + height="45.034973" + width="30.535467" + id="rect834-5-2-9-5" + style="fill:none;stroke:#000000;stroke-width:0.71931857;stroke-linecap:round;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none" /> + </g> + <path + style="fill:#008000;stroke:#008000;stroke-width:0.59275198;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#Arrow2Mend-8-0-2)" + d="m -199.6735,57.600919 h -14.81024" + id="path1171-7-6-1" + inkscape:connector-curvature="0" /> + <path + style="fill:none;stroke:#ff0000;stroke-width:0.95872593;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#Arrow2Mend-8-3-3)" + d="m -214.95012,88.003315 h 14.32069" + id="path1171-7-1-2" + inkscape:connector-curvature="0" /> + <path + style="fill:none;stroke:#008080;stroke-width:0.81852055;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:0.81852058, 0.81852058;stroke-dashoffset:0;stroke-opacity:1" + d="M -182.99565,94.057598 V 107.01694" + id="path7040-7" + inkscape:connector-curvature="0" /> + <path + style="fill:none;stroke:#000000;stroke-width:0.81852055;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:4.91112344, 0.81852058;stroke-dashoffset:0;stroke-opacity:1" + d="m -231.7616,43.563759 v 5.457082" + id="path7149-3-7-4-0" + inkscape:connector-curvature="0" /> + <g + aria-label="[" + transform="matrix(0,-1,0.74237844,0,14.567595,39.540924)" + style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332" + id="text6872"> + <path + d="m -65.936923,-548.78511 h 8.816294 v 2.79112 h -6.82247 v 176.34952 h 6.82247 v 2.41314 h -8.816294 z" + style="font-size:50.79999924px;stroke-width:0.26458332" + id="path6874" + inkscape:connector-curvature="0" + sodipodi:nodetypes="ccccccccc" /> + </g> + <g + aria-label="[" + transform="rotate(-90,182.49521,-144.01791)" + style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332" + id="text6872-3"> + <path + d="m -63.825546,-623.34091 h 7.228794 v 2.26195 h -5.764137 l 0,127.08032 h 5.764137 v 1.88397 h -7.228794 z" + style="font-size:50.79999924px;stroke-width:0.26458332" + id="path6874-6" + inkscape:connector-curvature="0" + sodipodi:nodetypes="ccccccccc" /> + </g> + <text + xml:space="preserve" + style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332" + x="-359.80389" + y="99.104233" + id="text6929"><tspan + sodipodi:role="line" + id="tspan6927" + x="-359.80389" + y="99.104233" + style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:5.64444447px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';stroke-width:0.26458332">dc_plane</tspan></text> + <text + xml:space="preserve" + style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332" + x="-223.56163" + y="99.142021" + id="text6933"><tspan + sodipodi:role="line" + id="tspan6931" + x="-223.56163" + y="99.142021" + style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:5.64444447px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';stroke-width:0.26458332">dc_stream</tspan></text> + <g + aria-label="[" + transform="matrix(0,1,1,0,153.30551,96.566025)" + style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332" + id="text6872-35"> + <path + d="m -65.936923,-545.95029 h 8.816294 v 2.79112 h -6.898066 v 271.78851 h 6.898066 v 2.41314 h -8.816294 z" + style="font-size:50.79999924px;stroke-width:0.26458332" + id="path6874-62" + inkscape:connector-curvature="0" + sodipodi:nodetypes="ccccccccc" /> + </g> + <text + xml:space="preserve" + style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332" + x="-267.43958" + y="28.5028" + id="text6933-9"><tspan + sodipodi:role="line" + id="tspan6931-1" + x="-267.43958" + y="28.5028" + style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:5.64444447px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';stroke-width:0.26458332">dc_state</tspan></text> + <g + aria-label="[" + transform="matrix(0,0.98158883,-1.0187565,0,0,-7.4835468)" + style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:6.62759447px;line-height:1.25;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.5522995" + id="text6973"> + <path + d="m 23.679381,144.30265 h 3.028123 v 1.29445 h -1.820839 v 7.91629 h 1.820839 v 1.29445 h -3.028123 z" + style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:11.78239059px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';stroke-width:0.5522995" + id="path6975" + inkscape:connector-curvature="0" + sodipodi:nodetypes="ccccccccc" /> + </g> + <text + xml:space="preserve" + style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332" + x="-142.71655" + y="18.955769" + id="text6980"><tspan + sodipodi:role="line" + id="tspan6978" + x="-142.71655" + y="18.955769" + style="font-size:4.58611107px;stroke-width:0.26458332">Code struct</tspan></text> + <g + aria-label="[" + transform="rotate(-90,94.826273,-58.762727)" + style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332" + id="text6872-35-2"> + <path + d="m -66.881863,-308.95922 h 7.115401 l 0,1.69499 h -5.197173 v 42.03568 h 5.197173 v 1.78948 h -7.115401 z" + style="font-size:50.79999924px;stroke-width:0.26458332" + id="path6874-62-7" + inkscape:connector-curvature="0" + sodipodi:nodetypes="ccccccccc" /> + </g> + <text + xml:space="preserve" + style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332" + x="-134.09625" + y="99.354439" + id="text6933-9-0"><tspan + sodipodi:role="line" + id="tspan6931-1-9" + x="-134.09625" + y="99.354439" + style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:5.64444447px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';stroke-width:0.26458332">dc_link</tspan></text> + <g + aria-label="}" + transform="rotate(90,-145.27371,-140.09832)" + style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#3771c8;fill-opacity:1;stroke:none;stroke-width:0.26458332" + id="text1003"> + <path + d="m 102.58571,58.211269 h 0.86816 c 1.15755,0 3.11267,-0.177767 3.45994,-0.5333 0.35553,-0.355534 1.28925,-1.124479 1.28925,-2.306836 V -61.475482 c 0,-1.289844 0.18603,-2.228288 0.5581,-2.815332 0.37207,-0.587044 0.26105,-0.992187 1.17882,-1.215429 -0.91777,-0.206706 -0.80675,-0.603581 -1.17882,-1.190625 -0.37207,-0.587045 -0.5581,-1.529623 -0.5581,-2.827735 v -3.075781 c 0,-1.174088 -0.93372,-1.938899 -1.28925,-2.294433 -0.34727,-0.363802 -2.30239,-0.545703 -3.45994,-0.545703 h -0.86816 v -1.773536 h 0.78134 c 2.05879,0 4.63403,0.305924 5.32029,0.917774 0.69453,0.60358 1.0418,1.81901 1.0418,3.646289 v 2.976562 c 0,1.231966 0.22324,2.087728 0.66973,2.567285 0.44648,0.471289 5.80035,0.706934 6.97444,0.706934 h 0.76894 v 1.773535 h -0.76894 c -1.17409,0 -6.52796,0.239778 -6.97444,0.719336 -0.44649,0.479557 -0.66973,1.343587 -0.66973,2.59209 V 55.420742 c 0,1.827279 -0.34727,3.046842 -1.0418,3.658691 -0.68626,0.611849 -3.2615,0.917774 -5.32029,0.917774 h -0.78134 z" + style="font-size:25.39999962px;fill:#3771c8;stroke-width:0.26458332" + id="path1005" + inkscape:connector-curvature="0" + sodipodi:nodetypes="cscsscccsscsccscsscsccscsscscc" /> + </g> + <text + xml:space="preserve" + style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332" + x="-200.59984" + y="129.60852" + id="text1010"><tspan + sodipodi:role="line" + id="tspan1008" + x="-200.59984" + y="129.60852" + style="font-style:italic;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:6.3499999px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold Italic';text-align:center;text-anchor:middle;stroke-width:0.26458332">Floating point</tspan><tspan + sodipodi:role="line" + x="-200.59984" + y="137.54602" + style="font-size:6.3499999px;text-align:center;text-anchor:middle;stroke-width:0.26458332" + id="tspan1059">calculation</tspan></text> + <g + aria-label="}" + transform="rotate(90,-94.294068,-92.593178)" + style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#3771c8;fill-opacity:1;stroke:none;stroke-width:0.26458332" + id="text1003-5"> + <path + d="m 102.58571,58.211269 h 0.86816 c 1.15755,0 3.11267,-0.177767 3.45994,-0.5333 0.35553,-0.355534 1.10026,-1.124479 1.10026,-2.306836 V -44.637502 c 0,-1.289844 0.18603,-2.228288 0.5581,-2.815332 0.37207,-0.587044 0.45004,-0.992187 1.36781,-1.215429 -0.91777,-0.206706 -0.99574,-0.603581 -1.36781,-1.190625 -0.37207,-0.587045 -0.5581,-1.529623 -0.5581,-2.827735 v -19.913761 c 0,-1.174088 -0.74473,-1.938899 -1.10026,-2.294433 -0.34727,-0.363802 -2.30239,-0.545703 -3.45994,-0.545703 h -0.86816 v -1.773536 h 0.78134 c 2.05879,0 4.63403,0.305924 5.32029,0.917774 0.69453,0.60358 1.0418,1.81901 1.0418,3.646289 v 19.814542 c 0,1.231966 0.22324,2.087728 0.66973,2.567285 0.44648,0.471289 1.25677,0.706934 2.43086,0.706934 h 0.76894 v 1.773535 h -0.76894 c -1.17409,0 -1.98438,0.239778 -2.43086,0.719336 -0.44649,0.479557 -0.66973,1.343587 -0.66973,2.59209 v 99.897013 c 0,1.827279 -0.34727,3.046842 -1.0418,3.658691 -0.68626,0.611849 -3.2615,0.917774 -5.32029,0.917774 h -0.78134 z" + style="font-size:25.39999962px;fill:#3771c8;stroke-width:0.26458332" + id="path1005-3" + inkscape:connector-curvature="0" + sodipodi:nodetypes="cscsscccsscsccscsscsccscsscscc" /> + </g> + <text + xml:space="preserve" + style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332" + x="-137.43764" + y="122.46283" + id="text1010-5"><tspan + sodipodi:role="line" + id="tspan1008-6" + x="-137.43764" + y="122.46283" + style="font-size:6.3499999px;text-align:center;text-anchor:middle;stroke-width:0.26458332">bit-depth</tspan><tspan + sodipodi:role="line" + x="-137.43764" + y="130.40033" + style="font-size:6.3499999px;text-align:center;text-anchor:middle;stroke-width:0.26458332" + id="tspan1057">reduction/dither</tspan></text> + <text + xml:space="preserve" + style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#3771c8;fill-opacity:1;stroke:none;stroke-width:0.26458332;" + x="21.087883" + y="155.64751" + id="text1064" + transform="rotate(90)"><tspan + sodipodi:role="line" + id="tspan1062" + x="21.087883" + y="155.64751" + style="font-size:9.87777805px;stroke-width:0.26458332;fill:#3771c8;">}</tspan></text> + <text + xml:space="preserve" + style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332" + x="-142.71655" + y="25.939869" + id="text6980-9"><tspan + sodipodi:role="line" + id="tspan6978-1" + x="-142.71655" + y="25.939869" + style="font-size:4.58611107px;stroke-width:0.26458332">Notes</tspan></text> + </g> +</svg> diff --git a/Documentation/gpu/amdgpu/display/dcn-overview.rst b/Documentation/gpu/amdgpu/display/dcn-overview.rst new file mode 100644 index 000000000000..f98624d7828e --- /dev/null +++ b/Documentation/gpu/amdgpu/display/dcn-overview.rst @@ -0,0 +1,171 @@ +======================= +Display Core Next (DCN) +======================= + +To equip our readers with the basic knowledge of how AMD Display Core Next +(DCN) works, we need to start with an overview of the hardware pipeline. Below +you can see a picture that provides a DCN overview, keep in mind that this is a +generic diagram, and we have variations per ASIC. + +.. kernel-figure:: dc_pipeline_overview.svg + +Based on this diagram, we can pass through each block and briefly describe +them: + +* **Display Controller Hub (DCHUB)**: This is the gateway between the Scalable + Data Port (SDP) and DCN. This component has multiple features, such as memory + arbitration, rotation, and cursor manipulation. + +* **Display Pipe and Plane (DPP)**: This block provides pre-blend pixel + processing such as color space conversion, linearization of pixel data, tone + mapping, and gamut mapping. + +* **Multiple Pipe/Plane Combined (MPC)**: This component performs blending of + multiple planes, using global or per-pixel alpha. + +* **Output Pixel Processing (OPP)**: Process and format pixels to be sent to + the display. + +* **Output Pipe Timing Combiner (OPTC)**: It generates time output to combine + streams or divide capabilities. CRC values are generated in this block. + +* **Display Output (DIO)**: Codify the output to the display connected to our + GPU. + +* **Display Writeback (DWB)**: It provides the ability to write the output of + the display pipe back to memory as video frames. + +* **Multi-Media HUB (MMHUBBUB)**: Memory controller interface for DMCUB and DWB + (Note that DWB is not hooked yet). + +* **DCN Management Unit (DMU)**: It provides registers with access control and + interrupts the controller to the SOC host interrupt unit. This block includes + the Display Micro-Controller Unit - version B (DMCUB), which is handled via + firmware. + +* **DCN Clock Generator Block (DCCG)**: It provides the clocks and resets + for all of the display controller clock domains. + +* **Azalia (AZ)**: Audio engine. + +The above diagram is an architecture generalization of DCN, which means that +every ASIC has variations around this base model. Notice that the display +pipeline is connected to the Scalable Data Port (SDP) via DCHUB; you can see +the SDP as the element from our Data Fabric that feeds the display pipe. + +Always approach the DCN architecture as something flexible that can be +configured and reconfigured in multiple ways; in other words, each block can be +setup or ignored accordingly with userspace demands. For example, if we +want to drive an 8k@60Hz with a DSC enabled, our DCN may require 4 DPP and 2 +OPP. It is DC's responsibility to drive the best configuration for each +specific scenario. Orchestrate all of these components together requires a +sophisticated communication interface which is highlighted in the diagram by +the edges that connect each block; from the chart, each connection between +these blocks represents: + +1. Pixel data interface (red): Represents the pixel data flow; +2. Global sync signals (green): It is a set of synchronization signals composed + by VStartup, VUpdate, and VReady; +3. Config interface: Responsible to configure blocks; +4. Sideband signals: All other signals that do not fit the previous one. + +These signals are essential and play an important role in DCN. Nevertheless, +the Global Sync deserves an extra level of detail described in the next +section. + +All of these components are represented by a data structure named dc_state. +From DCHUB to MPC, we have a representation called dc_plane; from MPC to OPTC, +we have dc_stream, and the output (DIO) is handled by dc_link. Keep in mind +that HUBP accesses a surface using a specific format read from memory, and our +dc_plane should work to convert all pixels in the plane to something that can +be sent to the display via dc_stream and dc_link. + +Front End and Back End +---------------------- + +Display pipeline can be broken down into two components that are usually +referred as **Front End (FE)** and **Back End (BE)**, where FE consists of: + +* DCHUB (Mainly referring to a subcomponent named HUBP) +* DPP +* MPC + +On the other hand, BE consist of + +* OPP +* OPTC +* DIO (DP/HDMI stream encoder and link encoder) + +OPP and OPTC are two joining blocks between FE and BE. On a side note, this is +a one-to-one mapping of the link encoder to PHY, but we can configure the DCN +to choose which link encoder to connect to which PHY. FE's main responsibility +is to change, blend and compose pixel data, while BE's job is to frame a +generic pixel stream to a specific display's pixel stream. + +Data Flow +--------- + +Initially, data is passed in from VRAM through Data Fabric (DF) in native pixel +formats. Such data format stays through till HUBP in DCHUB, where HUBP unpacks +different pixel formats and outputs them to DPP in uniform streams through 4 +channels (1 for alpha + 3 for colors). + +The Converter and Cursor (CNVC) in DPP would then normalize the data +representation and convert them to a DCN specific floating-point format (i.e., +different from the IEEE floating-point format). In the process, CNVC also +applies a degamma function to transform the data from non-linear to linear +space to relax the floating-point calculations following. Data would stay in +this floating-point format from DPP to OPP. + +Starting OPP, because color transformation and blending have been completed +(i.e alpha can be dropped), and the end sinks do not require the precision and +dynamic range that floating points provide (i.e. all displays are in integer +depth format), bit-depth reduction/dithering would kick in. In OPP, we would +also apply a regamma function to introduce the gamma removed earlier back. +Eventually, we output data in integer format at DIO. + +Global Sync +----------- + +Many DCN registers are double buffered, most importantly the surface address. +This allows us to update DCN hardware atomically for page flips, as well as +for most other updates that don't require enabling or disabling of new pipes. + +(Note: There are many scenarios when DC will decide to reserve extra pipes +in order to support outputs that need a very high pixel clock, or for +power saving purposes.) + +These atomic register updates are driven by global sync signals in DCN. In +order to understand how atomic updates interact with DCN hardware, and how DCN +signals page flip and vblank events it is helpful to understand how global sync +is programmed. + +Global sync consists of three signals, VSTARTUP, VUPDATE, and VREADY. These are +calculated by the Display Mode Library - DML (drivers/gpu/drm/amd/display/dc/dml) +based on a large number of parameters and ensure our hardware is able to feed +the DCN pipeline without underflows or hangs in any given system configuration. +The global sync signals always happen during VBlank, are independent from the +VSync signal, and do not overlap each other. + +VUPDATE is the only signal that is of interest to the rest of the driver stack +or userspace clients as it signals the point at which hardware latches to +atomically programmed (i.e. double buffered) registers. Even though it is +independent of the VSync signal we use VUPDATE to signal the VSync event as it +provides the best indication of how atomic commits and hardware interact. + +Since DCN hardware is double-buffered the DC driver is able to program the +hardware at any point during the frame. + +The below picture illustrates the global sync signals: + +.. kernel-figure:: global_sync_vblank.svg + +These signals affect core DCN behavior. Programming them incorrectly will lead +to a number of negative consequences, most of them quite catastrophic. + +The following picture shows how global sync allows for a mailbox style of +updates, i.e. it allows for multiple re-configurations between VUpdate +events where only the last configuration programmed before the VUpdate signal +becomes effective. + +.. kernel-figure:: config_example.svg diff --git a/Documentation/gpu/amdgpu/display/display-manager.rst b/Documentation/gpu/amdgpu/display/display-manager.rst new file mode 100644 index 000000000000..7ce31f89d9a0 --- /dev/null +++ b/Documentation/gpu/amdgpu/display/display-manager.rst @@ -0,0 +1,42 @@ +====================== +AMDgpu Display Manager +====================== + +.. contents:: Table of Contents + :depth: 3 + +.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c + :doc: overview + +.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h + :internal: + +Lifecycle +========= + +.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c + :doc: DM Lifecycle + +.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c + :functions: dm_hw_init dm_hw_fini + +Interrupts +========== + +.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c + :doc: overview + +.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c + :internal: + +.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c + :functions: register_hpd_handlers dm_crtc_high_irq dm_pflip_high_irq + +Atomic Implementation +===================== + +.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c + :doc: atomic + +.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c + :functions: amdgpu_dm_atomic_check amdgpu_dm_atomic_commit_tail diff --git a/Documentation/gpu/amdgpu/display/global_sync_vblank.svg b/Documentation/gpu/amdgpu/display/global_sync_vblank.svg new file mode 100644 index 000000000000..48f5dc4fd5d3 --- /dev/null +++ b/Documentation/gpu/amdgpu/display/global_sync_vblank.svg @@ -0,0 +1,485 @@ +<?xml version="1.0" encoding="UTF-8" standalone="no"?> +<!-- Created with Inkscape (http://www.inkscape.org/) --> + +<svg + xmlns:dc="http://purl.org/dc/elements/1.1/" + xmlns:cc="http://creativecommons.org/ns#" + xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" + xmlns:svg="http://www.w3.org/2000/svg" + xmlns="http://www.w3.org/2000/svg" + xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd" + xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" + width="232.24133mm" + height="96.174995mm" + viewBox="0 0 232.24133 96.174995" + version="1.1" + id="svg8" + inkscape:version="0.92.5 (2060ec1f9f, 2020-04-08)" + sodipodi:docname="global_sync_vblank.svg"> + <defs + id="defs2"> + <marker + inkscape:stockid="Arrow2Mend" + orient="auto" + refY="0" + refX="0" + id="Arrow2Mend" + style="overflow:visible" + inkscape:isstock="true"> + <path + id="path862" + style="fill:#800080;fill-opacity:1;fill-rule:evenodd;stroke:#800080;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1" + d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z" + transform="scale(-0.6)" + inkscape:connector-curvature="0" /> + </marker> + <marker + inkscape:stockid="Arrow2Send" + orient="auto" + refY="0" + refX="0" + id="Arrow2Send" + style="overflow:visible" + inkscape:isstock="true"> + <path + id="path868" + style="fill:#ff00ff;fill-opacity:1;fill-rule:evenodd;stroke:#ff00ff;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1" + d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z" + transform="matrix(-0.3,0,0,-0.3,0.69,0)" + inkscape:connector-curvature="0" /> + </marker> + <marker + inkscape:stockid="Arrow2Lend" + orient="auto" + refY="0" + refX="0" + id="Arrow2Lend" + style="overflow:visible" + inkscape:isstock="true"> + <path + id="path856" + style="fill:#ff00ff;fill-opacity:1;fill-rule:evenodd;stroke:#ff00ff;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1" + d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z" + transform="matrix(-1.1,0,0,-1.1,-1.1,0)" + inkscape:connector-curvature="0" /> + </marker> + <marker + inkscape:stockid="Arrow1Lend" + orient="auto" + refY="0" + refX="0" + id="marker1719" + style="overflow:visible" + inkscape:isstock="true"> + <path + id="path1717" + d="M 0,0 5,-5 -12.5,0 5,5 Z" + style="fill:#ff00ff;fill-opacity:1;fill-rule:evenodd;stroke:#ff00ff;stroke-width:1.00000003pt;stroke-opacity:1" + transform="matrix(-0.8,0,0,-0.8,-10,0)" + inkscape:connector-curvature="0" /> + </marker> + <marker + inkscape:stockid="Arrow1Lend" + orient="auto" + refY="0" + refX="0" + id="marker1661" + style="overflow:visible" + inkscape:isstock="true"> + <path + id="path1659" + d="M 0,0 5,-5 -12.5,0 5,5 Z" + style="fill:#ff00ff;fill-opacity:1;fill-rule:evenodd;stroke:#ff00ff;stroke-width:1.00000003pt;stroke-opacity:1" + transform="matrix(-0.8,0,0,-0.8,-10,0)" + inkscape:connector-curvature="0" /> + </marker> + <marker + inkscape:isstock="true" + style="overflow:visible" + id="marker1311" + refX="0" + refY="0" + orient="auto" + inkscape:stockid="Arrow1Lend" + inkscape:collect="always"> + <path + transform="matrix(-0.8,0,0,-0.8,-10,0)" + style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:1.00000003pt;stroke-opacity:1" + d="M 0,0 5,-5 -12.5,0 5,5 Z" + id="path1309" + inkscape:connector-curvature="0" /> + </marker> + <marker + inkscape:isstock="true" + style="overflow:visible" + id="marker1253" + refX="0" + refY="0" + orient="auto" + inkscape:stockid="Arrow1Lstart"> + <path + transform="matrix(0.8,0,0,0.8,10,0)" + style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:1.00000003pt;stroke-opacity:1" + d="M 0,0 5,-5 -12.5,0 5,5 Z" + id="path1251" + inkscape:connector-curvature="0" /> + </marker> + <marker + inkscape:stockid="Arrow1Lend" + orient="auto" + refY="0" + refX="0" + id="Arrow1Lend" + style="overflow:visible" + inkscape:isstock="true" + inkscape:collect="always"> + <path + id="path838" + d="M 0,0 5,-5 -12.5,0 5,5 Z" + style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:1.00000003pt;stroke-opacity:1" + transform="matrix(-0.8,0,0,-0.8,-10,0)" + inkscape:connector-curvature="0" /> + </marker> + <marker + inkscape:stockid="Arrow1Lstart" + orient="auto" + refY="0" + refX="0" + id="Arrow1Lstart" + style="overflow:visible" + inkscape:isstock="true" + inkscape:collect="always"> + <path + id="path835" + d="M 0,0 5,-5 -12.5,0 5,5 Z" + style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:1.00000003pt;stroke-opacity:1" + transform="matrix(0.8,0,0,0.8,10,0)" + inkscape:connector-curvature="0" /> + </marker> + <marker + inkscape:stockid="Arrow1Send" + orient="auto" + refY="0" + refX="0" + id="Arrow1Send" + style="overflow:visible" + inkscape:isstock="true"> + <path + id="path850" + d="M 0,0 5,-5 -12.5,0 5,5 Z" + style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1" + transform="matrix(-0.2,0,0,-0.2,-1.2,0)" + inkscape:connector-curvature="0" /> + </marker> + <marker + inkscape:stockid="Arrow2Sstart" + orient="auto" + refY="0" + refX="0" + id="Arrow2Sstart" + style="overflow:visible" + inkscape:isstock="true"> + <path + id="path865" + style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1" + d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z" + transform="matrix(0.3,0,0,0.3,-0.69,0)" + inkscape:connector-curvature="0" /> + </marker> + <marker + inkscape:stockid="Arrow2Mend" + orient="auto" + refY="0" + refX="0" + id="Arrow2Mend-2" + style="overflow:visible" + inkscape:isstock="true"> + <path + inkscape:connector-curvature="0" + id="path862-3" + style="fill:#800080;fill-opacity:1;fill-rule:evenodd;stroke:#800080;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1" + d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z" + transform="scale(-0.6)" /> + </marker> + <marker + inkscape:stockid="Arrow2Mend" + orient="auto" + refY="0" + refX="0" + id="Arrow2Mend-2-5" + style="overflow:visible" + inkscape:isstock="true"> + <path + inkscape:connector-curvature="0" + id="path862-3-9" + style="fill:#800080;fill-opacity:1;fill-rule:evenodd;stroke:#800080;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1" + d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z" + transform="scale(-0.6)" /> + </marker> + </defs> + <sodipodi:namedview + id="base" + pagecolor="#ffffff" + bordercolor="#666666" + borderopacity="1.0" + inkscape:pageopacity="0.0" + inkscape:pageshadow="2" + inkscape:zoom="1.979899" + inkscape:cx="747.52324" + inkscape:cy="319.84503" + inkscape:document-units="mm" + inkscape:current-layer="layer1" + showgrid="true" + inkscape:window-width="3840" + inkscape:window-height="2096" + inkscape:window-x="0" + inkscape:window-y="27" + inkscape:window-maximized="1" + fit-margin-top="0" + fit-margin-left="0" + fit-margin-right="0" + fit-margin-bottom="0"> + <inkscape:grid + type="xygrid" + id="grid815" + originx="15.282997" + originy="-184.54792" /> + </sodipodi:namedview> + <metadata + id="metadata5"> + <rdf:RDF> + <cc:Work + rdf:about=""> + <dc:format>image/svg+xml</dc:format> + <dc:type + rdf:resource="http://purl.org/dc/dcmitype/StillImage" /> + <dc:title /> + </cc:Work> + </rdf:RDF> + </metadata> + <g + inkscape:label="Layer 1" + inkscape:groupmode="layer" + id="layer1" + transform="translate(15.282998,-16.277083)"> + <path + style="fill:none;stroke:#000000;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" + d="M 15.875,27.125001 V 16.541666 H 26.458333 V 27.125001 H 177.27084 V 16.541666 h 10.58333 v 10.583335 h 29.10416" + id="path817" + inkscape:connector-curvature="0" + sodipodi:nodetypes="ccccccccc" /> + <path + style="fill:none;stroke:#000000;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" + d="M 15.875,37.708334 H 44.979166 V 48.291667 H 100.54167 V 37.708334 H 206.375 v 10.583333 h 10.58333" + id="path819" + inkscape:connector-curvature="0" /> + <path + style="fill:none;stroke:#000000;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" + d="m 15.875,66.8125 h 97.89583 V 56.229167 h 7.9375 V 66.8125 h 92.60417" + id="path821" + inkscape:connector-curvature="0" /> + <path + style="fill:none;stroke:#000000;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" + d="m 15.875,85.333334 c 0,0 132.29166,0 132.29166,0 V 74.75 h 15.875 v 10.583334 h 47.625" + id="path823" + inkscape:connector-curvature="0" /> + <path + style="fill:none;stroke:#000000;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" + d="M 15.875,101.20833 H 187.85416 V 90.625 h 10.58334 v 10.58333 h 10.58333" + id="path825" + inkscape:connector-curvature="0" /> + <path + style="fill:none;stroke:#000000;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:0.52916667, 0.52916667;stroke-dashoffset:0;stroke-opacity:1" + d="M 100.54167,48.291667 V 111.79167" + id="path827" + inkscape:connector-curvature="0" /> + <path + style="fill:none;stroke:#000000;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:0.52916667, 0.52916667;stroke-dashoffset:0;stroke-opacity:1" + d="m 113.77083,66.8125 v 44.97917" + id="path829" + inkscape:connector-curvature="0" /> + <path + style="fill:none;stroke:#000000;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:0.52916667, 0.52916667;stroke-dashoffset:0;stroke-opacity:1" + d="M 206.375,48.291667 V 109.14583" + id="path831" + inkscape:connector-curvature="0" /> + <path + style="fill:none;stroke:#ff0000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-start:url(#Arrow1Lstart);marker-end:url(#Arrow1Lend)" + d="m 100.54167,106.5 h 13.22916" + id="path833" + inkscape:connector-curvature="0" /> + <path + style="fill:none;stroke:#ff0000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-start:url(#marker1253);marker-end:url(#marker1311)" + d="M 113.77083,106.5 H 206.375" + id="path1243" + inkscape:connector-curvature="0" /> + <text + xml:space="preserve" + style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332" + x="105.83333" + y="111.79166" + id="text1405"><tspan + sodipodi:role="line" + id="tspan1403" + x="105.83333" + y="111.79166" + style="stroke-width:0.26458332">To</tspan></text> + <text + xml:space="preserve" + style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332" + x="145.52083" + y="111.79166" + id="text1409"><tspan + sodipodi:role="line" + id="tspan1407" + x="145.52083" + y="111.79166" + style="stroke-width:0.26458332">VStartup Period</tspan></text> + <text + xml:space="preserve" + style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332" + x="156.01123" + y="78.71875" + id="text1413"><tspan + sodipodi:role="line" + x="156.01123" + y="78.71875" + style="font-weight:bold;text-align:center;text-anchor:middle;stroke-width:0.26458332" + id="tspan1415">VUpdate</tspan><tspan + sodipodi:role="line" + x="156.01123" + y="82.6875" + style="font-weight:bold;text-align:center;text-anchor:middle;stroke-width:0.26458332" + id="tspan1440">Width</tspan></text> + <text + xml:space="preserve" + style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332" + x="173.77611" + y="92.703873" + id="text1413-3"><tspan + sodipodi:role="line" + id="tspan1411-6" + x="173.77611" + y="92.703873" + style="font-weight:bold;text-align:center;text-anchor:middle;stroke-width:0.26458332">VReady</tspan><tspan + sodipodi:role="line" + x="173.77611" + y="96.672623" + style="font-weight:bold;text-align:center;text-anchor:middle;stroke-width:0.26458332" + id="tspan1415-7">Offset</tspan></text> + <text + xml:space="preserve" + style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332" + x="135.78951" + y="70.78125" + id="text1413-5"><tspan + sodipodi:role="line" + x="135.78951" + y="70.78125" + style="font-weight:bold;text-align:center;text-anchor:middle;stroke-width:0.26458332" + id="tspan1440-5">VUpdate</tspan><tspan + sodipodi:role="line" + x="135.78951" + y="74.75" + style="font-weight:bold;text-align:center;text-anchor:middle;stroke-width:0.26458332" + id="tspan1465">Offset</tspan></text> + <text + xml:space="preserve" + style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332" + x="137.39433" + y="48.291664" + id="text1479"><tspan + sodipodi:role="line" + id="tspan1477" + x="137.39433" + y="48.291664" + style="font-weight:bold;stroke-width:0.26458332">VSTARTUP_START</tspan></text> + <text + xml:space="preserve" + style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332" + x="-5.4806676" + y="22.778271" + id="text1479-1"><tspan + sodipodi:role="line" + id="tspan1477-2" + x="-5.4806676" + y="22.778271" + style="font-weight:bold;font-size:4.93888903px;stroke-width:0.26458332">VSYNC</tspan></text> + <text + xml:space="preserve" + style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332" + x="-9.3767252" + y="45.64584" + id="text1479-1-7"><tspan + sodipodi:role="line" + id="tspan1477-2-0" + x="-9.3767252" + y="45.64584" + style="font-weight:bold;font-size:5.64444447px;stroke-width:0.26458332">VBlank</tspan></text> + <text + xml:space="preserve" + style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332" + x="-15.310558" + y="64.92263" + id="text1479-1-7-9"><tspan + sodipodi:role="line" + id="tspan1477-2-0-3" + x="-15.310558" + y="64.92263" + style="font-weight:bold;font-size:5.64444447px;stroke-width:0.26458332">VStartup</tspan></text> + <text + xml:space="preserve" + style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332" + x="-14.17781" + y="85.144356" + id="text1479-1-7-9-6"><tspan + sodipodi:role="line" + id="tspan1477-2-0-3-0" + x="-14.17781" + y="85.144356" + style="font-weight:bold;font-size:5.64444447px;stroke-width:0.26458332">VUpdate</tspan></text> + <text + xml:space="preserve" + style="font-style:normal;font-weight:normal;font-size:3.17499995px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332" + x="-11.052421" + y="101.39733" + id="text1479-1-7-9-6-6"><tspan + sodipodi:role="line" + id="tspan1477-2-0-3-0-2" + x="-11.052421" + y="101.39733" + style="font-weight:bold;font-size:5.64444447px;stroke-width:0.26458332">VReady</tspan></text> + <g + id="g5189" + transform="translate(269.875,-14.287499)"> + <path + sodipodi:nodetypes="cc" + inkscape:connector-curvature="0" + id="path5143" + d="m -202.40625,45.645828 3.96875,-7.9375" + style="fill:none;stroke:#000000;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" /> + <path + sodipodi:nodetypes="cc" + inkscape:connector-curvature="0" + id="path5143-2" + d="m -199.76042,45.645828 3.96874,-7.937499" + style="fill:none;stroke:#000000;stroke-width:0.52916676;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" /> + </g> + <g + id="g5189-3" + transform="translate(268.55209,7.9375003)"> + <path + sodipodi:nodetypes="cc" + inkscape:connector-curvature="0" + id="path5143-6" + d="m -202.40625,45.645828 3.96875,-7.9375" + style="fill:none;stroke:#000000;stroke-width:0.5291667;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" /> + <path + sodipodi:nodetypes="cc" + inkscape:connector-curvature="0" + id="path5143-2-1" + d="m -199.76042,45.645828 3.96874,-7.937499" + style="fill:none;stroke:#000000;stroke-width:0.52916676;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" /> + </g> + </g> +</svg> diff --git a/Documentation/gpu/amdgpu/display/index.rst b/Documentation/gpu/amdgpu/display/index.rst new file mode 100644 index 000000000000..c1fb2fb3c710 --- /dev/null +++ b/Documentation/gpu/amdgpu/display/index.rst @@ -0,0 +1,31 @@ +.. _amdgpu-display-core: + +=================================== +drm/amd/display - Display Core (DC) +=================================== + +AMD display engine is partially shared with other operating systems; for this +reason, our Display Core Driver is divided into two pieces: + +1. **Display Core (DC)** contains the OS-agnostic components. Things like + hardware programming and resource management are handled here. +2. **Display Manager (DM)** contains the OS-dependent components. Hooks to the + amdgpu base driver and DRM are implemented here. + +The display pipe is responsible for "scanning out" a rendered frame from the +GPU memory (also called VRAM, FrameBuffer, etc.) to a display. In other words, +it would: + +1. Read frame information from memory; +2. Perform required transformation; +3. Send pixel data to sink devices. + +If you want to learn more about our driver details, take a look at the below +table of content: + +.. toctree:: + + display-manager.rst + dc-debug.rst + dcn-overview.rst + dc-glossary.rst diff --git a/Documentation/gpu/amdgpu/driver-core.rst b/Documentation/gpu/amdgpu/driver-core.rst new file mode 100644 index 000000000000..ebf5932845a9 --- /dev/null +++ b/Documentation/gpu/amdgpu/driver-core.rst @@ -0,0 +1,182 @@ +============================ + Core Driver Infrastructure +============================ + +GPU Hardware Structure +====================== + +Each ASIC is a collection of hardware blocks. We refer to them as +"IPs" (Intellectual Property blocks). Each IP encapsulates certain +functionality. IPs are versioned and can also be mixed and matched. +E.g., you might have two different ASICs that both have System DMA (SDMA) 5.x IPs. +The driver is arranged by IPs. There are driver components to handle +the initialization and operation of each IP. There are also a bunch +of smaller IPs that don't really need much if any driver interaction. +Those end up getting lumped into the common stuff in the soc files. +The soc files (e.g., vi.c, soc15.c nv.c) contain code for aspects of +the SoC itself rather than specific IPs. E.g., things like GPU resets +and register access functions are SoC dependent. + +An APU contains more than just CPU and GPU, it also contains all of +the platform stuff (audio, usb, gpio, etc.). Also, a lot of +components are shared between the CPU, platform, and the GPU (e.g., +SMU, PSP, etc.). Specific components (CPU, GPU, etc.) usually have +their interface to interact with those common components. For things +like S0i3 there is a ton of coordination required across all the +components, but that is probably a bit beyond the scope of this +section. + +With respect to the GPU, we have the following major IPs: + +GMC (Graphics Memory Controller) + This was a dedicated IP on older pre-vega chips, but has since + become somewhat decentralized on vega and newer chips. They now + have dedicated memory hubs for specific IPs or groups of IPs. We + still treat it as a single component in the driver however since + the programming model is still pretty similar. This is how the + different IPs on the GPU get the memory (VRAM or system memory). + It also provides the support for per process GPU virtual address + spaces. + +IH (Interrupt Handler) + This is the interrupt controller on the GPU. All of the IPs feed + their interrupts into this IP and it aggregates them into a set of + ring buffers that the driver can parse to handle interrupts from + different IPs. + +PSP (Platform Security Processor) + This handles security policy for the SoC and executes trusted + applications, and validates and loads firmwares for other blocks. + +SMU (System Management Unit) + This is the power management microcontroller. It manages the entire + SoC. The driver interacts with it to control power management + features like clocks, voltages, power rails, etc. + +DCN (Display Controller Next) + This is the display controller. It handles the display hardware. + It is described in more details in :ref:`Display Core <amdgpu-display-core>`. + +SDMA (System DMA) + This is a multi-purpose DMA engine. The kernel driver uses it for + various things including paging and GPU page table updates. It's also + exposed to userspace for use by user mode drivers (OpenGL, Vulkan, + etc.) + +GC (Graphics and Compute) + This is the graphics and compute engine, i.e., the block that + encompasses the 3D pipeline and and shader blocks. This is by far the + largest block on the GPU. The 3D pipeline has tons of sub-blocks. In + addition to that, it also contains the CP microcontrollers (ME, PFP, + CE, MEC) and the RLC microcontroller. It's exposed to userspace for + user mode drivers (OpenGL, Vulkan, OpenCL, etc.) + +VCN (Video Core Next) + This is the multi-media engine. It handles video and image encode and + decode. It's exposed to userspace for user mode drivers (VA-API, + OpenMAX, etc.) + +Graphics and Compute Microcontrollers +------------------------------------- + +CP (Command Processor) + The name for the hardware block that encompasses the front end of the + GFX/Compute pipeline. Consists mainly of a bunch of microcontrollers + (PFP, ME, CE, MEC). The firmware that runs on these microcontrollers + provides the driver interface to interact with the GFX/Compute engine. + + MEC (MicroEngine Compute) + This is the microcontroller that controls the compute queues on the + GFX/compute engine. + + MES (MicroEngine Scheduler) + This is a new engine for managing queues. This is currently unused. + +RLC (RunList Controller) + This is another microcontroller in the GFX/Compute engine. It handles + power management related functionality within the GFX/Compute engine. + The name is a vestige of old hardware where it was originally added + and doesn't really have much relation to what the engine does now. + +Driver Structure +================ + +In general, the driver has a list of all of the IPs on a particular +SoC and for things like init/fini/suspend/resume, more or less just +walks the list and handles each IP. + +Some useful constructs: + +KIQ (Kernel Interface Queue) + This is a control queue used by the kernel driver to manage other gfx + and compute queues on the GFX/compute engine. You can use it to + map/unmap additional queues, etc. + +IB (Indirect Buffer) + A command buffer for a particular engine. Rather than writing + commands directly to the queue, you can write the commands into a + piece of memory and then put a pointer to the memory into the queue. + The hardware will then follow the pointer and execute the commands in + the memory, then returning to the rest of the commands in the ring. + +.. _amdgpu_memory_domains: + +Memory Domains +============== + +.. kernel-doc:: include/uapi/drm/amdgpu_drm.h + :doc: memory domains + +Buffer Objects +============== + +.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_object.c + :doc: amdgpu_object + +.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_object.c + :internal: + +PRIME Buffer Sharing +==================== + +.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c + :doc: PRIME Buffer Sharing + +.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c + :internal: + +MMU Notifier +============ + +.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c + :doc: MMU Notifier + +.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c + :internal: + +AMDGPU Virtual Memory +===================== + +.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c + :doc: GPUVM + +.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c + :internal: + +Interrupt Handling +================== + +.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c + :doc: Interrupt Handling + +.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c + :internal: + +IP Blocks +========= + +.. kernel-doc:: drivers/gpu/drm/amd/include/amd_shared.h + :doc: IP Blocks + +.. kernel-doc:: drivers/gpu/drm/amd/include/amd_shared.h + :identifiers: amd_ip_block_type amd_ip_funcs diff --git a/Documentation/gpu/amdgpu/driver-misc.rst b/Documentation/gpu/amdgpu/driver-misc.rst new file mode 100644 index 000000000000..e3d6b2fa2493 --- /dev/null +++ b/Documentation/gpu/amdgpu/driver-misc.rst @@ -0,0 +1,112 @@ +================================ + Misc AMDGPU driver information +================================ + +GPU Product Information +======================= + +Information about the GPU can be obtained on certain cards +via sysfs + +product_name +------------ + +.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_device.c + :doc: product_name + +product_number +-------------- + +.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_device.c + :doc: product_name + +serial_number +------------- + +.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_device.c + :doc: serial_number + +unique_id +--------- + +.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c + :doc: unique_id + +GPU Memory Usage Information +============================ + +Various memory accounting can be accessed via sysfs + +mem_info_vram_total +------------------- + +.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c + :doc: mem_info_vram_total + +mem_info_vram_used +------------------ + +.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c + :doc: mem_info_vram_used + +mem_info_vis_vram_total +----------------------- + +.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c + :doc: mem_info_vis_vram_total + +mem_info_vis_vram_used +---------------------- + +.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c + :doc: mem_info_vis_vram_used + +mem_info_gtt_total +------------------ + +.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c + :doc: mem_info_gtt_total + +mem_info_gtt_used +----------------- + +.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c + :doc: mem_info_gtt_used + +PCIe Accounting Information +=========================== + +pcie_bw +------- + +.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c + :doc: pcie_bw + +pcie_replay_count +----------------- + +.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_device.c + :doc: pcie_replay_count + +GPU SmartShift Information +========================== + +GPU SmartShift information via sysfs + +smartshift_apu_power +-------------------- + +.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c + :doc: smartshift_apu_power + +smartshift_dgpu_power +--------------------- + +.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c + :doc: smartshift_dgpu_power + +smartshift_bias +--------------- + +.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c + :doc: smartshift_bias diff --git a/Documentation/gpu/amdgpu/index.rst b/Documentation/gpu/amdgpu/index.rst new file mode 100644 index 000000000000..a24e1cfa7407 --- /dev/null +++ b/Documentation/gpu/amdgpu/index.rst @@ -0,0 +1,17 @@ +========================== + drm/amdgpu AMDgpu driver +========================== + +The drm/amdgpu driver supports all AMD Radeon GPUs based on the Graphics Core +Next (GCN) architecture. + +.. toctree:: + + module-parameters + driver-core + display/index + xgmi + ras + thermal + driver-misc + amdgpu-glossary diff --git a/Documentation/gpu/amdgpu/module-parameters.rst b/Documentation/gpu/amdgpu/module-parameters.rst new file mode 100644 index 000000000000..ea538c8dda35 --- /dev/null +++ b/Documentation/gpu/amdgpu/module-parameters.rst @@ -0,0 +1,7 @@ +=================== + Module Parameters +=================== + +The amdgpu driver supports the following module parameters: + +.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c diff --git a/Documentation/gpu/amdgpu/ras.rst b/Documentation/gpu/amdgpu/ras.rst new file mode 100644 index 000000000000..047f76e395cf --- /dev/null +++ b/Documentation/gpu/amdgpu/ras.rst @@ -0,0 +1,62 @@ +==================== + AMDGPU RAS Support +==================== + +The AMDGPU RAS interfaces are exposed via sysfs (for informational queries) and +debugfs (for error injection). + +RAS debugfs/sysfs Control and Error Injection Interfaces +======================================================== + +.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c + :doc: AMDGPU RAS debugfs control interface + +RAS Reboot Behavior for Unrecoverable Errors +============================================ + +.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c + :doc: AMDGPU RAS Reboot Behavior for Unrecoverable Errors + +RAS Error Count sysfs Interface +=============================== + +.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c + :doc: AMDGPU RAS sysfs Error Count Interface + +RAS EEPROM debugfs Interface +============================ + +.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c + :doc: AMDGPU RAS debugfs EEPROM table reset interface + +RAS VRAM Bad Pages sysfs Interface +================================== + +.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c + :doc: AMDGPU RAS sysfs gpu_vram_bad_pages Interface + +Sample Code +=========== +Sample code for testing error injection can be found here: +https://cgit.freedesktop.org/mesa/drm/tree/tests/amdgpu/ras_tests.c + +This is part of the libdrm amdgpu unit tests which cover several areas of the GPU. +There are four sets of tests: + +RAS Basic Test + +The test verifies the RAS feature enabled status and makes sure the necessary sysfs and debugfs files +are present. + +RAS Query Test + +This test checks the RAS availability and enablement status for each supported IP block as well as +the error counts. + +RAS Inject Test + +This test injects errors for each IP. + +RAS Disable Test + +This test tests disabling of RAS features for each IP block. diff --git a/Documentation/gpu/amdgpu/thermal.rst b/Documentation/gpu/amdgpu/thermal.rst new file mode 100644 index 000000000000..8aeb0186c9ef --- /dev/null +++ b/Documentation/gpu/amdgpu/thermal.rst @@ -0,0 +1,65 @@ +=========================================== + GPU Power/Thermal Controls and Monitoring +=========================================== + +HWMON Interfaces +================ + +.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c + :doc: hwmon + +GPU sysfs Power State Interfaces +================================ + +GPU power controls are exposed via sysfs files. + +power_dpm_state +--------------- + +.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c + :doc: power_dpm_state + +power_dpm_force_performance_level +--------------------------------- + +.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c + :doc: power_dpm_force_performance_level + +pp_table +-------- + +.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c + :doc: pp_table + +pp_od_clk_voltage +----------------- + +.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c + :doc: pp_od_clk_voltage + +pp_dpm_* +-------- + +.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c + :doc: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie + +pp_power_profile_mode +--------------------- + +.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c + :doc: pp_power_profile_mode + +\*_busy_percent +--------------- + +.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c + :doc: gpu_busy_percent + +.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c + :doc: mem_busy_percent + +gpu_metrics +----------- + +.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c + :doc: gpu_metrics diff --git a/Documentation/gpu/amdgpu/xgmi.rst b/Documentation/gpu/amdgpu/xgmi.rst new file mode 100644 index 000000000000..23f2856f4524 --- /dev/null +++ b/Documentation/gpu/amdgpu/xgmi.rst @@ -0,0 +1,5 @@ +===================== + AMDGPU XGMI Support +===================== + +.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c diff --git a/Documentation/gpu/drivers.rst b/Documentation/gpu/drivers.rst index b4a0ed3ca961..3a52f48215a3 100644 --- a/Documentation/gpu/drivers.rst +++ b/Documentation/gpu/drivers.rst @@ -4,8 +4,7 @@ GPU Driver Documentation .. toctree:: - amdgpu - amdgpu-dc + amdgpu/index i915 mcde meson diff --git a/Documentation/gpu/drm-kms-helpers.rst b/Documentation/gpu/drm-kms-helpers.rst index ec2f65b31930..5bb55ec1b9b5 100644 --- a/Documentation/gpu/drm-kms-helpers.rst +++ b/Documentation/gpu/drm-kms-helpers.rst @@ -435,3 +435,18 @@ Legacy CRTC/Modeset Helper Functions Reference .. kernel-doc:: drivers/gpu/drm/drm_crtc_helper.c :export: + +Privacy-screen class +==================== + +.. kernel-doc:: drivers/gpu/drm/drm_privacy_screen.c + :doc: overview + +.. kernel-doc:: include/drm/drm_privacy_screen_driver.h + :internal: + +.. kernel-doc:: include/drm/drm_privacy_screen_machine.h + :internal: + +.. kernel-doc:: drivers/gpu/drm/drm_privacy_screen.c + :export: diff --git a/Documentation/gpu/drm-kms.rst b/Documentation/gpu/drm-kms.rst index 1ef7951ded5e..d14bf1c35d7e 100644 --- a/Documentation/gpu/drm-kms.rst +++ b/Documentation/gpu/drm-kms.rst @@ -506,6 +506,8 @@ Property Types and Blob Property Support .. kernel-doc:: drivers/gpu/drm/drm_property.c :export: +.. _standard_connector_properties: + Standard Connector Properties ----------------------------- diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst index 60d1d7ee0719..da138dd39883 100644 --- a/Documentation/gpu/todo.rst +++ b/Documentation/gpu/todo.rst @@ -268,17 +268,6 @@ Contact: Daniel Vetter Level: Intermediate -Clean up mmap forwarding ------------------------- - -A lot of drivers forward gem mmap calls to dma-buf mmap for imported buffers. -And also a lot of them forward dma-buf mmap to the gem mmap implementations. -There's drm_gem_prime_mmap() for this now, but still needs to be rolled out. - -Contact: Daniel Vetter - -Level: Intermediate - Generic fbdev defio support --------------------------- @@ -463,6 +452,21 @@ Contact: Thomas Zimmermann <tzimmermann@suse.de>, Christian König, Daniel Vette Level: Intermediate +Review all drivers for setting struct drm_mode_config.{max_width,max_height} correctly +-------------------------------------------------------------------------------------- + +The values in struct drm_mode_config.{max_width,max_height} describe the +maximum supported framebuffer size. It's the virtual screen size, but many +drivers treat it like limitations of the physical resolution. + +The maximum width depends on the hardware's maximum scanline pitch. The +maximum height depends on the amount of addressable video memory. Review all +drivers to initialize the fields to the correct values. + +Contact: Thomas Zimmermann <tzimmermann@suse.de> + +Level: Intermediate + Core refactorings ================= @@ -642,6 +646,17 @@ See drivers/gpu/drm/amd/display/TODO for tasks. Contact: Harry Wentland, Alex Deucher +vmwgfx: Replace hashtable with Linux' implementation +---------------------------------------------------- + +The vmwgfx driver uses its own hashtable implementation. Replace the +code with Linux' implementation and update the callers. It's mostly a +refactoring task, but the interfaces are different. + +Contact: Zack Rusin, Thomas Zimmermann <tzimmermann@suse.de> + +Level: Intermediate + Bootsplash ========== diff --git a/Documentation/i2c/smbus-protocol.rst b/Documentation/i2c/smbus-protocol.rst index 9e07e6bbe6a3..00d8e17d0aca 100644 --- a/Documentation/i2c/smbus-protocol.rst +++ b/Documentation/i2c/smbus-protocol.rst @@ -36,6 +36,8 @@ Key to symbols =============== ============================================================= S Start condition +Sr Repeated start condition, used to switch from write to + read mode. P Stop condition Rd/Wr (1 bit) Read/Write bit. Rd equals 1, Wr equals 0. A, NA (1 bit) Acknowledge (ACK) and Not Acknowledge (NACK) bit @@ -100,7 +102,7 @@ Implemented by i2c_smbus_read_byte_data() This reads a single byte from a device, from a designated register. The register is specified through the Comm byte:: - S Addr Wr [A] Comm [A] S Addr Rd [A] [Data] NA P + S Addr Wr [A] Comm [A] Sr Addr Rd [A] [Data] NA P Functionality flag: I2C_FUNC_SMBUS_READ_BYTE_DATA @@ -114,7 +116,7 @@ This operation is very like Read Byte; again, data is read from a device, from a designated register that is specified through the Comm byte. But this time, the data is a complete word (16 bits):: - S Addr Wr [A] Comm [A] S Addr Rd [A] [DataLow] A [DataHigh] NA P + S Addr Wr [A] Comm [A] Sr Addr Rd [A] [DataLow] A [DataHigh] NA P Functionality flag: I2C_FUNC_SMBUS_READ_WORD_DATA @@ -164,7 +166,7 @@ This command selects a device register (through the Comm byte), sends 16 bits of data to it, and reads 16 bits of data in return:: S Addr Wr [A] Comm [A] DataLow [A] DataHigh [A] - S Addr Rd [A] [DataLow] A [DataHigh] NA P + Sr Addr Rd [A] [DataLow] A [DataHigh] NA P Functionality flag: I2C_FUNC_SMBUS_PROC_CALL @@ -181,7 +183,7 @@ of data is specified by the device in the Count byte. :: S Addr Wr [A] Comm [A] - S Addr Rd [A] [Count] A [Data] A [Data] A ... A [Data] NA P + Sr Addr Rd [A] [Count] A [Data] A [Data] A ... A [Data] NA P Functionality flag: I2C_FUNC_SMBUS_READ_BLOCK_DATA @@ -212,7 +214,7 @@ This command selects a device register (through the Comm byte), sends 1 to 31 bytes of data to it, and reads 1 to 31 bytes of data in return:: S Addr Wr [A] Comm [A] Count [A] Data [A] ... - S Addr Rd [A] [Count] A [Data] ... A P + Sr Addr Rd [A] [Count] A [Data] ... A P Functionality flag: I2C_FUNC_SMBUS_BLOCK_PROC_CALL @@ -300,7 +302,7 @@ This command reads a block of bytes from a device, from a designated register that is specified through the Comm byte:: S Addr Wr [A] Comm [A] - S Addr Rd [A] [Data] A [Data] A ... A [Data] NA P + Sr Addr Rd [A] [Data] A [Data] A ... A [Data] NA P Functionality flag: I2C_FUNC_SMBUS_READ_I2C_BLOCK diff --git a/Documentation/locking/locktypes.rst b/Documentation/locking/locktypes.rst index ddada4a53749..4fd7b70fcde1 100644 --- a/Documentation/locking/locktypes.rst +++ b/Documentation/locking/locktypes.rst @@ -439,11 +439,9 @@ preemption. The following substitution works on both kernels:: spin_lock(&p->lock); p->count += this_cpu_read(var2); -On a non-PREEMPT_RT kernel migrate_disable() maps to preempt_disable() -which makes the above code fully equivalent. On a PREEMPT_RT kernel migrate_disable() ensures that the task is pinned on the current CPU which in turn guarantees that the per-CPU access to var1 and var2 are staying on -the same CPU. +the same CPU while the task remains preemptible. The migrate_disable() substitution is not valid for the following scenario:: @@ -456,9 +454,8 @@ scenario:: p = this_cpu_ptr(&var1); p->val = func2(); -While correct on a non-PREEMPT_RT kernel, this breaks on PREEMPT_RT because -here migrate_disable() does not protect against reentrancy from a -preempting task. A correct substitution for this case is:: +This breaks because migrate_disable() does not protect against reentrancy from +a preempting task. A correct substitution for this case is:: func() { diff --git a/Documentation/networking/ipvs-sysctl.rst b/Documentation/networking/ipvs-sysctl.rst index 95ef56d62077..387fda80f05f 100644 --- a/Documentation/networking/ipvs-sysctl.rst +++ b/Documentation/networking/ipvs-sysctl.rst @@ -37,8 +37,7 @@ conn_reuse_mode - INTEGER 0: disable any special handling on port reuse. The new connection will be delivered to the same real server that was - servicing the previous connection. This will effectively - disable expire_nodest_conn. + servicing the previous connection. bit 1: enable rescheduling of new connections when it is safe. That is, whenever expire_nodest_conn and for TCP sockets, when diff --git a/Documentation/networking/timestamping.rst b/Documentation/networking/timestamping.rst index a722eb30e014..80b13353254a 100644 --- a/Documentation/networking/timestamping.rst +++ b/Documentation/networking/timestamping.rst @@ -486,8 +486,8 @@ of packets. Drivers are free to use a more permissive configuration than the requested configuration. It is expected that drivers should only implement directly the most generic mode that can be supported. For example if the hardware can -support HWTSTAMP_FILTER_V2_EVENT, then it should generally always upscale -HWTSTAMP_FILTER_V2_L2_SYNC_MESSAGE, and so forth, as HWTSTAMP_FILTER_V2_EVENT +support HWTSTAMP_FILTER_PTP_V2_EVENT, then it should generally always upscale +HWTSTAMP_FILTER_PTP_V2_L2_SYNC, and so forth, as HWTSTAMP_FILTER_PTP_V2_EVENT is more generic (and more useful to applications). A driver which supports hardware time stamping shall update the struct diff --git a/Documentation/power/energy-model.rst b/Documentation/power/energy-model.rst index 8a2788afe89b..5ac62a7b4b7c 100644 --- a/Documentation/power/energy-model.rst +++ b/Documentation/power/energy-model.rst @@ -84,6 +84,16 @@ CONFIG_ENERGY_MODEL must be enabled to use the EM framework. 2.2 Registration of performance domains ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Registration of 'advanced' EM +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The 'advanced' EM gets it's name due to the fact that the driver is allowed +to provide more precised power model. It's not limited to some implemented math +formula in the framework (like it's in 'simple' EM case). It can better reflect +the real power measurements performed for each performance state. Thus, this +registration method should be preferred in case considering EM static power +(leakage) is important. + Drivers are expected to register performance domains into the EM framework by calling the following API:: @@ -103,6 +113,18 @@ to: return warning/error, stop working or panic. See Section 3. for an example of driver implementing this callback, or Section 2.4 for further documentation on this API +Registration of 'simple' EM +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The 'simple' EM is registered using the framework helper function +cpufreq_register_em_with_opp(). It implements a power model which is tight to +math formula:: + + Power = C * V^2 * f + +The EM which is registered using this method might not reflect correctly the +physics of a real device, e.g. when static power (leakage) is important. + 2.3 Accessing performance domains ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -138,6 +160,10 @@ or in Section 2.4 3. Example driver ----------------- +The CPUFreq framework supports dedicated callback for registering +the EM for a given CPU(s) 'policy' object: cpufreq_driver::register_em(). +That callback has to be implemented properly for a given driver, +because the framework would call it at the right time during setup. This section provides a simple example of a CPUFreq driver registering a performance domain in the Energy Model framework using the (fake) 'foo' protocol. The driver implements an est_power() function to be provided to the @@ -167,25 +193,22 @@ EM framework:: 20 return 0; 21 } 22 - 23 static int foo_cpufreq_init(struct cpufreq_policy *policy) + 23 static void foo_cpufreq_register_em(struct cpufreq_policy *policy) 24 { 25 struct em_data_callback em_cb = EM_DATA_CB(est_power); 26 struct device *cpu_dev; - 27 int nr_opp, ret; + 27 int nr_opp; 28 29 cpu_dev = get_cpu_device(cpumask_first(policy->cpus)); 30 - 31 /* Do the actual CPUFreq init work ... */ - 32 ret = do_foo_cpufreq_init(policy); - 33 if (ret) - 34 return ret; - 35 - 36 /* Find the number of OPPs for this policy */ - 37 nr_opp = foo_get_nr_opp(policy); + 31 /* Find the number of OPPs for this policy */ + 32 nr_opp = foo_get_nr_opp(policy); + 33 + 34 /* And register the new performance domain */ + 35 em_dev_register_perf_domain(cpu_dev, nr_opp, &em_cb, policy->cpus, + 36 true); + 37 } 38 - 39 /* And register the new performance domain */ - 40 em_dev_register_perf_domain(cpu_dev, nr_opp, &em_cb, policy->cpus, - 41 true); - 42 - 43 return 0; - 44 } + 39 static struct cpufreq_driver foo_cpufreq_driver = { + 40 .register_em = foo_cpufreq_register_em, + 41 }; diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst index e35ab74a0f80..cf908d79666e 100644 --- a/Documentation/process/changes.rst +++ b/Documentation/process/changes.rst @@ -35,6 +35,7 @@ GNU make 3.81 make --version binutils 2.23 ld -v flex 2.5.35 flex --version bison 2.0 bison --version +pahole 1.16 pahole --version util-linux 2.10o fdformat --version kmod 13 depmod -V e2fsprogs 1.41.4 e2fsck -V @@ -54,7 +55,7 @@ mcelog 0.6 mcelog --version iptables 1.4.2 iptables -V openssl & libcrypto 1.0.0 openssl version bc 1.06.95 bc --version -Sphinx\ [#f1]_ 1.3 sphinx-build --version +Sphinx\ [#f1]_ 1.7 sphinx-build --version ====================== =============== ======================================== .. [#f1] Sphinx is needed only to build the Kernel documentation @@ -108,6 +109,16 @@ Bison Since Linux 4.16, the build system generates parsers during build. This requires bison 2.0 or later. +pahole: +------- + +Since Linux 5.2, if CONFIG_DEBUG_INFO_BTF is selected, the build system +generates BTF (BPF Type Format) from DWARF in vmlinux, a bit later from kernel +modules as well. This requires pahole v1.16 or later. + +It is found in the 'dwarves' or 'pahole' distro packages or from +https://fedorapeople.org/~acme/dwarves/. + Perl ---- diff --git a/Documentation/process/submitting-patches.rst b/Documentation/process/submitting-patches.rst index a0cc96923ea7..6b3aaed66fba 100644 --- a/Documentation/process/submitting-patches.rst +++ b/Documentation/process/submitting-patches.rst @@ -14,7 +14,8 @@ works, see Documentation/process/development-process.rst. Also, read Documentation/process/submit-checklist.rst for a list of items to check before submitting code. If you are submitting a driver, also read Documentation/process/submitting-drivers.rst; for device -tree binding patches, read Documentation/process/submitting-patches.rst. +tree binding patches, read +Documentation/devicetree/bindings/submitting-patches.rst. This documentation assumes that you're using ``git`` to prepare your patches. If you're unfamiliar with ``git``, you would be well-advised to learn how to @@ -22,8 +23,8 @@ use it, it will make your life as a kernel developer and in general much easier. Some subsystems and maintainer trees have additional information about -their workflow and expectations, see :ref:`Documentation/process/maintainer -handbooks <maintainer_handbooks_main>`. +their workflow and expectations, see +:ref:`Documentation/process/maintainer-handbooks.rst <maintainer_handbooks_main>`. Obtain a current source tree ---------------------------- diff --git a/Documentation/trace/ftrace.rst b/Documentation/trace/ftrace.rst index 4e5b26f03d5b..b3166c4a7867 100644 --- a/Documentation/trace/ftrace.rst +++ b/Documentation/trace/ftrace.rst @@ -2442,11 +2442,10 @@ Or this simple script! #!/bin/bash tracefs=`sed -ne 's/^tracefs \(.*\) tracefs.*/\1/p' /proc/mounts` - echo nop > $tracefs/tracing/current_tracer - echo 0 > $tracefs/tracing/tracing_on - echo $$ > $tracefs/tracing/set_ftrace_pid - echo function > $tracefs/tracing/current_tracer - echo 1 > $tracefs/tracing/tracing_on + echo 0 > $tracefs/tracing_on + echo $$ > $tracefs/set_ftrace_pid + echo function > $tracefs/current_tracer + echo 1 > $tracefs/tracing_on exec "$@" diff --git a/Documentation/translations/it_IT/doc-guide/sphinx.rst b/Documentation/translations/it_IT/doc-guide/sphinx.rst index 0046d75d9a70..9762452c584c 100644 --- a/Documentation/translations/it_IT/doc-guide/sphinx.rst +++ b/Documentation/translations/it_IT/doc-guide/sphinx.rst @@ -35,7 +35,7 @@ Installazione Sphinx ==================== I marcatori ReST utilizzati nei file in Documentation/ sono pensati per essere -processati da ``Sphinx`` nella versione 1.3 o superiore. +processati da ``Sphinx`` nella versione 1.7 o superiore. Esiste uno script che verifica i requisiti Sphinx. Per ulteriori dettagli consultate :ref:`it_sphinx-pre-install`. @@ -53,11 +53,6 @@ pacchettizzato dalla vostra distribuzione. .. note:: - #) Le versioni di Sphinx inferiori alla 1.5 non funzionano bene - con il pacchetto Python docutils versione 0.13.1 o superiore. - Se volete usare queste versioni, allora dovere eseguire - ``pip install 'docutils==0.12'``. - #) Viene raccomandato l'uso del tema RTD per la documentazione in HTML. A seconda della versione di Sphinx, potrebbe essere necessaria l'installazione tramite il comando ``pip install sphinx_rtd_theme``. @@ -67,13 +62,13 @@ pacchettizzato dalla vostra distribuzione. utilizzando LaTeX. Per una corretta interpretazione, è necessario aver installato texlive con i pacchetti amdfonts e amsmath. -Riassumendo, se volete installare la versione 1.7.9 di Sphinx dovete eseguire:: +Riassumendo, se volete installare la versione 2.4.4 di Sphinx dovete eseguire:: - $ virtualenv sphinx_1.7.9 - $ . sphinx_1.7.9/bin/activate - (sphinx_1.7.9) $ pip install -r Documentation/sphinx/requirements.txt + $ virtualenv sphinx_2.4.4 + $ . sphinx_2.4.4/bin/activate + (sphinx_2.4.4) $ pip install -r Documentation/sphinx/requirements.txt -Dopo aver eseguito ``. sphinx_1.7.9/bin/activate``, il prompt cambierà per +Dopo aver eseguito ``. sphinx_2.4.4/bin/activate``, il prompt cambierà per indicare che state usando il nuovo ambiente. Se aprite un nuova sessione, prima di generare la documentazione, dovrete rieseguire questo comando per rientrare nell'ambiente virtuale. @@ -94,7 +89,7 @@ Generazione in PDF e LaTeX -------------------------- Al momento, la generazione di questi documenti è supportata solo dalle -versioni di Sphinx superiori alla 1.4. +versioni di Sphinx superiori alla 2.4. Per la generazione di PDF e LaTeX, avrete bisogno anche del pacchetto ``XeLaTeX`` nella versione 3.14159265 @@ -119,8 +114,8 @@ l'installazione:: You should run: sudo dnf install -y texlive-luatex85 - /usr/bin/virtualenv sphinx_1.7.9 - . sphinx_1.7.9/bin/activate + /usr/bin/virtualenv sphinx_2.4.4 + . sphinx_2.4.4/bin/activate pip install -r Documentation/sphinx/requirements.txt Can't build as 1 mandatory dependency is missing at ./scripts/sphinx-pre-install line 468. diff --git a/Documentation/translations/it_IT/process/changes.rst b/Documentation/translations/it_IT/process/changes.rst index 87d081889bfc..dc7193377b7f 100644 --- a/Documentation/translations/it_IT/process/changes.rst +++ b/Documentation/translations/it_IT/process/changes.rst @@ -57,7 +57,7 @@ mcelog 0.6 mcelog --version iptables 1.4.2 iptables -V openssl & libcrypto 1.0.0 openssl version bc 1.06.95 bc --version -Sphinx\ [#f1]_ 1.3 sphinx-build --version +Sphinx\ [#f1]_ 1.7 sphinx-build --version ====================== ================= ======================================== .. [#f1] Sphinx è necessario solo per produrre la documentazione del Kernel diff --git a/Documentation/translations/zh_CN/doc-guide/sphinx.rst b/Documentation/translations/zh_CN/doc-guide/sphinx.rst index 951595c7d599..23eac67fbc30 100644 --- a/Documentation/translations/zh_CN/doc-guide/sphinx.rst +++ b/Documentation/translations/zh_CN/doc-guide/sphinx.rst @@ -26,7 +26,7 @@ reStructuredText文件å¯èƒ½åŒ…å«åŒ…嫿¥è‡ªæºæ–‡ä»¶çš„结构化文档注释æ 安装Sphinx ========== -Documentation/ 下的ReST文件现在使用sphinx1.3或更高版本构建。 +Documentation/ 下的ReST文件现在使用sphinx1.7或更高版本构建。 这有一个脚本å¯ä»¥æ£€æŸ¥Sphinxçš„ä¾èµ–项。更多详细信æ¯è§ :ref:`sphinx-pre-install_zh` 。 @@ -40,22 +40,19 @@ Documentation/ 下的ReST文件现在使用sphinx1.3或更高版本构建。 .. note:: - #) 低于1.5版本的Sphinxæ— æ³•ä¸ŽPythonçš„0.13.1或更高版本docutils一起æ£å¸¸å·¥ä½œã€‚ - 如果您想使用这些版本,那么应该è¿è¡Œ ``pip install 'docutils==0.12'`` 。 - #) html输出建议使用RTDä¸»é¢˜ã€‚æ ¹æ®Sphinx版本的ä¸åŒï¼Œå®ƒåº”该用 ``pip install sphinx_rtd_theme`` å•独安装。 #) 一些ReST页é¢åŒ…嫿•°å¦è¡¨è¾¾å¼ã€‚由于Sphinx的工作方å¼ï¼Œè¿™äº›è¡¨è¾¾å¼æ˜¯ä½¿ç”¨ LaTeX 编写的。它需è¦å®‰è£…amsfontså’Œamsmathå®åŒ…,以便显示。 -总之,如您è¦å®‰è£…Sphinx 1.7.9版本,应执行:: +总之,如您è¦å®‰è£…Sphinx 2.4.4版本,应执行:: - $ virtualenv sphinx_1.7.9 - $ . sphinx_1.7.9/bin/activate - (sphinx_1.7.9) $ pip install -r Documentation/sphinx/requirements.txt + $ virtualenv sphinx_2.4.4 + $ . sphinx_2.4.4/bin/activate + (sphinx_2.4.4) $ pip install -r Documentation/sphinx/requirements.txt -在è¿è¡Œ ``. sphinx_1.7.9/bin/activate`` 之åŽï¼Œæç¤ºç¬¦å°†å˜åŒ–,以指示您æ£åœ¨ä½¿ç”¨æ–° +在è¿è¡Œ ``. sphinx_2.4.4/bin/activate`` 之åŽï¼Œæç¤ºç¬¦å°†å˜åŒ–,以指示您æ£åœ¨ä½¿ç”¨æ–° 环境。如果您打开了一个新的shell,那么在构建文档之å‰ï¼Œæ‚¨éœ€è¦é‡æ–°è¿è¡Œæ¤å‘½ä»¤ä»¥å† 次进入虚拟环境ä¸ã€‚ @@ -71,7 +68,7 @@ Documentation/ 下的ReST文件现在使用sphinx1.3或更高版本构建。 PDFå’ŒLaTeX构建 -------------- -ç›®å‰åªæœ‰Sphinx 1.4åŠæ›´é«˜ç‰ˆæœ¬æ‰æ”¯æŒè¿™ç§æž„建。 +ç›®å‰åªæœ‰Sphinx 2.4åŠæ›´é«˜ç‰ˆæœ¬æ‰æ”¯æŒè¿™ç§æž„建。 对于PDFå’ŒLaTeXè¾“å‡ºï¼Œè¿˜éœ€è¦ ``XeLaTeX`` 3.14159265版本。(译注:æ¤ç‰ˆæœ¬å·çœŸå®ž å˜åœ¨ï¼‰ @@ -93,8 +90,8 @@ PDFå’ŒLaTeX构建 You should run: sudo dnf install -y texlive-luatex85 - /usr/bin/virtualenv sphinx_1.7.9 - . sphinx_1.7.9/bin/activate + /usr/bin/virtualenv sphinx_2.4.4 + . sphinx_2.4.4/bin/activate pip install -r Documentation/sphinx/requirements.txt Can't build as 1 mandatory dependency is missing at ./scripts/sphinx-pre-install line 468. diff --git a/Documentation/translations/zh_CN/process/management-style.rst b/Documentation/translations/zh_CN/process/management-style.rst index c6a5bb285797..8053ae474328 100644 --- a/Documentation/translations/zh_CN/process/management-style.rst +++ b/Documentation/translations/zh_CN/process/management-style.rst @@ -36,14 +36,14 @@ Linuxå†…æ ¸ç®¡ç†é£Žæ ¼ æ¯ä¸ªäººéƒ½è®¤ä¸ºç®¡ç†è€…åšå†³å®šï¼Œè€Œä¸”决ç–很é‡è¦ã€‚决定越大越痛苦,管ç†è€…就必须越高级。 这很明显,但事实并éžå¦‚æ¤ã€‚ -游æˆçš„åå—æ˜¯ **é¿å…** åšå‡ºå†³å®šã€‚å°¤å…¶æ˜¯ï¼Œå¦‚æžœæœ‰äººå‘Šè¯‰ä½ â€œé€‰æ‹©ï¼ˆa)或(b), +最é‡è¦çš„æ˜¯ **é¿å…** åšå‡ºå†³å®šã€‚å°¤å…¶æ˜¯ï¼Œå¦‚æžœæœ‰äººå‘Šè¯‰ä½ â€œé€‰æ‹©ï¼ˆa)或(b), 我们真的需è¦ä½ æ¥åšå†³å®šâ€ï¼Œä½ 就是陷入麻烦的管ç†è€…ã€‚ä½ ç®¡ç†çš„äººæ¯”ä½ æ›´äº†è§£ç»†èŠ‚ï¼Œ æ‰€ä»¥å¦‚æžœä»–ä»¬æ¥æ‰¾ä½ åšæŠ€æœ¯å†³ç–ï¼Œä½ å®Œè›‹äº†ã€‚ä½ æ˜¾ç„¶æ²¡æœ‰èƒ½åŠ›ä¸ºä»–ä»¬åšè¿™ä¸ªå†³å®šã€‚ ï¼ˆæŽ¨è®ºï¼šå¦‚æžœä½ ç®¡ç†çš„äººä¸æ¯”ä½ æ›´äº†è§£ç»†èŠ‚ï¼Œä½ ä¹Ÿä¼šè¢«æžç ¸ï¼Œå°½ç®¡åŽŸå› å®Œå…¨ä¸åŒã€‚ ä¹Ÿå°±æ˜¯è¯´ï¼Œä½ çš„å·¥ä½œæ˜¯é”™çš„ï¼Œä»–ä»¬åº”è¯¥ç®¡ç†ä½ çš„æ‰æ™ºï¼‰ -所以游æˆçš„åå—æ˜¯ **é¿å…** åšå‡ºå†³å®šï¼Œè‡³å°‘是那些大而痛苦的决定。åšä¸€äº›å°çš„ +所以最é‡è¦çš„æ˜¯ **é¿å…** åšå‡ºå†³å®šï¼Œè‡³å°‘是那些大而痛苦的决定。åšä¸€äº›å°çš„ å’Œéžç»“果性的决定是很好的,并且使您看起æ¥å¥½åƒçŸ¥é“自己在åšä»€ä¹ˆï¼Œæ‰€ä»¥å†…æ ¸ç®¡ç†è€… 需è¦åšçš„æ˜¯å°†é‚£äº›å¤§çš„å’Œç—›è‹¦çš„å†³å®šå˜æˆé‚£äº›æ²¡æœ‰äººçœŸæ£å…³å¿ƒçš„å°äº‹æƒ…。 diff --git a/MAINTAINERS b/MAINTAINERS index 7a2345ce8521..d03ad8da1f36 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -966,6 +966,7 @@ F: drivers/gpu/drm/amd/include/kgd_kfd_interface.h F: drivers/gpu/drm/amd/include/v9_structs.h F: drivers/gpu/drm/amd/include/vi_structs.h F: include/uapi/linux/kfd_ioctl.h +F: include/uapi/linux/kfd_sysfs.h AMD SPI DRIVER M: Sanjay R Mehta <sanju.mehta@amd.com> @@ -2263,6 +2264,15 @@ L: linux-iio@vger.kernel.org S: Maintained F: drivers/counter/microchip-tcb-capture.c +ARM/MILBEAUT ARCHITECTURE +M: Taichi Sugaya <sugaya.taichi@socionext.com> +M: Takao Orito <orito.takao@socionext.com> +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +S: Maintained +F: arch/arm/boot/dts/milbeaut* +F: arch/arm/mach-milbeaut/ +N: milbeaut + ARM/MIOA701 MACHINE SUPPORT M: Robert Jarzmik <robert.jarzmik@free.fr> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) @@ -2729,10 +2739,11 @@ S: Maintained F: drivers/memory/*emif* ARM/TEXAS INSTRUMENT KEYSTONE ARCHITECTURE +M: Nishanth Menon <nm@ti.com> M: Santosh Shilimkar <ssantosh@kernel.org> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained -T: git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/ti/linux.git F: arch/arm/boot/dts/keystone-* F: arch/arm/mach-keystone/ @@ -3570,13 +3581,14 @@ L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/broadcom/b44.* -BROADCOM B53 ETHERNET SWITCH DRIVER +BROADCOM B53/SF2 ETHERNET SWITCH DRIVER M: Florian Fainelli <f.fainelli@gmail.com> L: netdev@vger.kernel.org L: openwrt-devel@lists.openwrt.org (subscribers-only) S: Supported F: Documentation/devicetree/bindings/net/dsa/brcm,b53.yaml F: drivers/net/dsa/b53/* +F: drivers/net/dsa/bcm_sf2* F: include/linux/dsa/brcm.h F: include/linux/platform_data/b53.h @@ -3733,7 +3745,7 @@ F: drivers/scsi/bnx2i/ BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER M: Ariel Elior <aelior@marvell.com> M: Sudarsana Kalluru <skalluru@marvell.com> -M: GR-everest-linux-l2@marvell.com +M: Manish Chopra <manishc@marvell.com> L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/broadcom/bnx2x/ @@ -6063,10 +6075,17 @@ F: drivers/gpu/drm/panel/panel-novatek-nt36672a.c DRM DRIVER FOR NVIDIA GEFORCE/QUADRO GPUS M: Ben Skeggs <bskeggs@redhat.com> +M: Karol Herbst <kherbst@redhat.com> +M: Lyude Paul <lyude@redhat.com> L: dri-devel@lists.freedesktop.org L: nouveau@lists.freedesktop.org S: Supported -T: git git://github.com/skeggsb/linux +W: https://nouveau.freedesktop.org/ +Q: https://patchwork.freedesktop.org/project/nouveau/ +Q: https://gitlab.freedesktop.org/drm/nouveau/-/merge_requests +B: https://gitlab.freedesktop.org/drm/nouveau/-/issues +C: irc://irc.oftc.net/nouveau +T: git https://gitlab.freedesktop.org/drm/nouveau.git F: drivers/gpu/drm/nouveau/ F: include/uapi/drm/nouveau_drm.h @@ -6406,6 +6425,7 @@ L: dri-devel@lists.freedesktop.org L: linux-renesas-soc@vger.kernel.org S: Supported T: git git://linuxtv.org/pinchartl/media drm/du/next +F: Documentation/devicetree/bindings/display/bridge/renesas,dsi-csi2-tx.yaml F: Documentation/devicetree/bindings/display/bridge/renesas,dw-hdmi.yaml F: Documentation/devicetree/bindings/display/bridge/renesas,lvds.yaml F: Documentation/devicetree/bindings/display/renesas,du.yaml @@ -6524,6 +6544,14 @@ F: drivers/gpu/drm/drm_panel.c F: drivers/gpu/drm/panel/ F: include/drm/drm_panel.h +DRM PRIVACY-SCREEN CLASS +M: Hans de Goede <hdegoede@redhat.com> +L: dri-devel@lists.freedesktop.org +S: Maintained +T: git git://anongit.freedesktop.org/drm/drm-misc +F: drivers/gpu/drm/drm_privacy_screen* +F: include/drm/drm_privacy_screen* + DRM TTM SUBSYSTEM M: Christian Koenig <christian.koenig@amd.com> M: Huang Rui <ray.huang@amd.com> @@ -9318,7 +9346,6 @@ S: Maintained F: drivers/iio/pressure/dps310.c INFINIBAND SUBSYSTEM -M: Doug Ledford <dledford@redhat.com> M: Jason Gunthorpe <jgg@nvidia.com> L: linux-rdma@vger.kernel.org S: Supported @@ -9479,6 +9506,7 @@ INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets) M: Jani Nikula <jani.nikula@linux.intel.com> M: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> M: Rodrigo Vivi <rodrigo.vivi@intel.com> +M: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com> L: intel-gfx@lists.freedesktop.org S: Supported W: https://01.org/linuxgraphics/ @@ -10269,9 +10297,9 @@ F: lib/Kconfig.kcsan F: scripts/Makefile.kcsan KDUMP -M: Dave Young <dyoung@redhat.com> M: Baoquan He <bhe@redhat.com> R: Vivek Goyal <vgoyal@redhat.com> +R: Dave Young <dyoung@redhat.com> L: kexec@lists.infradead.org S: Maintained W: http://lse.sourceforge.net/kdump/ @@ -10445,7 +10473,7 @@ F: arch/riscv/include/uapi/asm/kvm* F: arch/riscv/kvm/ KERNEL VIRTUAL MACHINE for s390 (KVM/s390) -M: Christian Borntraeger <borntraeger@de.ibm.com> +M: Christian Borntraeger <borntraeger@linux.ibm.com> M: Janosch Frank <frankja@linux.ibm.com> R: David Hildenbrand <david@redhat.com> R: Claudio Imbrenda <imbrenda@linux.ibm.com> @@ -12169,8 +12197,8 @@ F: drivers/net/ethernet/mellanox/mlx5/core/fpga/* F: include/linux/mlx5/mlx5_ifc_fpga.h MELLANOX ETHERNET SWITCH DRIVERS -M: Jiri Pirko <jiri@nvidia.com> M: Ido Schimmel <idosch@nvidia.com> +M: Petr Machata <petrm@nvidia.com> L: netdev@vger.kernel.org S: Supported W: http://www.mellanox.com @@ -15593,7 +15621,7 @@ F: drivers/scsi/qedi/ QLOGIC QL4xxx ETHERNET DRIVER M: Ariel Elior <aelior@marvell.com> -M: GR-everest-linux-l2@marvell.com +M: Manish Chopra <manishc@marvell.com> L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/qlogic/qed/ @@ -15760,6 +15788,15 @@ S: Maintained F: Documentation/devicetree/bindings/net/qcom,ethqos.txt F: drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c +QUALCOMM FASTRPC DRIVER +M: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> +M: Amol Maheshwari <amahesh@qti.qualcomm.com> +L: linux-arm-msm@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/misc/qcom,fastrpc.txt +F: drivers/misc/fastrpc.c +F: include/uapi/misc/fastrpc.h + QUALCOMM GENERIC INTERFACE I2C DRIVER M: Akash Asthana <akashast@codeaurora.org> M: Mukesh Savaliya <msavaliy@codeaurora.org> @@ -15968,6 +16005,7 @@ F: arch/mips/generic/board-ranchu.c RANDOM NUMBER DRIVER M: "Theodore Ts'o" <tytso@mit.edu> +M: Jason A. Donenfeld <Jason@zx2c4.com> S: Maintained F: drivers/char/random.c @@ -16490,6 +16528,12 @@ T: git git://linuxtv.org/media_tree.git F: Documentation/devicetree/bindings/media/allwinner,sun8i-a83t-de2-rotate.yaml F: drivers/media/platform/sunxi/sun8i-rotate/ +RPMSG TTY DRIVER +M: Arnaud Pouliquen <arnaud.pouliquen@foss.st.com> +L: linux-remoteproc@vger.kernel.org +S: Maintained +F: drivers/tty/rpmsg_tty.c + RTL2830 MEDIA DRIVER M: Antti Palosaari <crope@iki.fi> L: linux-media@vger.kernel.org @@ -16573,7 +16617,7 @@ F: drivers/video/fbdev/savage/ S390 M: Heiko Carstens <hca@linux.ibm.com> M: Vasily Gorbik <gor@linux.ibm.com> -M: Christian Borntraeger <borntraeger@de.ibm.com> +M: Christian Borntraeger <borntraeger@linux.ibm.com> R: Alexander Gordeev <agordeev@linux.ibm.com> L: linux-s390@vger.kernel.org S: Supported @@ -16611,8 +16655,8 @@ W: http://www.ibm.com/developerworks/linux/linux390/ F: drivers/iommu/s390-iommu.c S390 IUCV NETWORK LAYER -M: Julian Wiedmann <jwi@linux.ibm.com> -M: Karsten Graul <kgraul@linux.ibm.com> +M: Alexandra Winter <wintera@linux.ibm.com> +M: Wenjia Zhang <wenjia@linux.ibm.com> L: linux-s390@vger.kernel.org L: netdev@vger.kernel.org S: Supported @@ -16622,8 +16666,8 @@ F: include/net/iucv/ F: net/iucv/ S390 NETWORK DRIVERS -M: Julian Wiedmann <jwi@linux.ibm.com> -M: Karsten Graul <kgraul@linux.ibm.com> +M: Alexandra Winter <wintera@linux.ibm.com> +M: Wenjia Zhang <wenjia@linux.ibm.com> L: linux-s390@vger.kernel.org L: netdev@vger.kernel.org S: Supported @@ -18483,6 +18527,7 @@ F: include/uapi/linux/pkt_sched.h F: include/uapi/linux/tc_act/ F: include/uapi/linux/tc_ematch/ F: net/sched/ +F: tools/testing/selftests/tc-testing TC90522 MEDIA DRIVER M: Akihiro Tsukada <tskd08@gmail.com> @@ -19031,11 +19076,12 @@ F: drivers/mmc/host/tifm_sd.c F: include/linux/tifm.h TI KEYSTONE MULTICORE NAVIGATOR DRIVERS +M: Nishanth Menon <nm@ti.com> M: Santosh Shilimkar <ssantosh@kernel.org> L: linux-kernel@vger.kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained -T: git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/ti/linux.git F: drivers/soc/ti/* TI LM49xxx FAMILY ASoC CODEC DRIVERS @@ -20167,6 +20213,8 @@ F: include/uapi/linux/virtio_gpio.h VIRTIO GPU DRIVER M: David Airlie <airlied@linux.ie> M: Gerd Hoffmann <kraxel@redhat.com> +R: Gurchetan Singh <gurchetansingh@chromium.org> +R: Chia-I Wu <olvaffe@gmail.com> L: dri-devel@lists.freedesktop.org L: virtualization@lists.linux-foundation.org S: Maintained @@ -20317,7 +20365,8 @@ F: arch/x86/include/asm/vmware.h F: arch/x86/kernel/cpu/vmware.c VMWARE PVRDMA DRIVER -M: Adit Ranadive <aditr@vmware.com> +M: Bryan Tan <bryantan@vmware.com> +M: Vishnu Dasa <vdasa@vmware.com> M: VMware PV-Drivers <pv-drivers@vmware.com> L: linux-rdma@vger.kernel.org S: Maintained @@ -2,8 +2,8 @@ VERSION = 5 PATCHLEVEL = 16 SUBLEVEL = 0 -EXTRAVERSION = -rc1 -NAME = Trick or Treat +EXTRAVERSION = -rc5 +NAME = Gobble Gobble # *DOCUMENTATION* # To see a list of typical targets execute "make help" @@ -789,7 +789,7 @@ stackp-flags-$(CONFIG_STACKPROTECTOR_STRONG) := -fstack-protector-strong KBUILD_CFLAGS += $(stackp-flags-y) KBUILD_CFLAGS-$(CONFIG_WERROR) += -Werror -KBUILD_CFLAGS += $(KBUILD_CFLAGS-y) $(CONFIG_CC_IMPLICIT_FALLTHROUGH) +KBUILD_CFLAGS += $(KBUILD_CFLAGS-y) $(CONFIG_CC_IMPLICIT_FALLTHROUGH:"%"=%) ifdef CONFIG_CC_IS_CLANG KBUILD_CPPFLAGS += -Qunused-arguments @@ -1374,17 +1374,17 @@ endif ifneq ($(dtstree),) -%.dtb: dt_binding_check include/config/kernel.release scripts_dtc - $(Q)$(MAKE) $(build)=$(dtstree) $(dtstree)/$@ $(dtstree)/$*.dt.yaml +%.dtb: include/config/kernel.release scripts_dtc + $(Q)$(MAKE) $(build)=$(dtstree) $(dtstree)/$@ -%.dtbo: dt_binding_check include/config/kernel.release scripts_dtc - $(Q)$(MAKE) $(build)=$(dtstree) $(dtstree)/$@ $(dtstree)/$*.dt.yaml +%.dtbo: include/config/kernel.release scripts_dtc + $(Q)$(MAKE) $(build)=$(dtstree) $(dtstree)/$@ PHONY += dtbs dtbs_install dtbs_check dtbs: include/config/kernel.release scripts_dtc $(Q)$(MAKE) $(build)=$(dtstree) -ifneq ($(filter dtbs_check %.dtb %.dtbo, $(MAKECMDGOALS)),) +ifneq ($(filter dtbs_check, $(MAKECMDGOALS)),) export CHECK_DTBS=y dtbs: dt_binding_check endif diff --git a/arch/Kconfig b/arch/Kconfig index 26b8ed11639d..d3c4ab249e9c 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -991,6 +991,16 @@ config HAVE_ARCH_COMPAT_MMAP_BASES and vice-versa 32-bit applications to call 64-bit mmap(). Required for applications doing different bitness syscalls. +config PAGE_SIZE_LESS_THAN_64KB + def_bool y + depends on !ARM64_64K_PAGES + depends on !IA64_PAGE_SIZE_64KB + depends on !PAGE_SIZE_64KB + depends on !PARISC_PAGE_SIZE_64KB + depends on !PPC_64K_PAGES + depends on !PPC_256K_PAGES + depends on !PAGE_SIZE_256KB + # This allows to use a set of generic functions to determine mmap base # address by giving priority to top-down scheme only if the process # is not in legacy mode (compat task, unlimited stack size or diff --git a/arch/alpha/kernel/syscalls/syscall.tbl b/arch/alpha/kernel/syscalls/syscall.tbl index e4a041cd5715..ca5a32228cd6 100644 --- a/arch/alpha/kernel/syscalls/syscall.tbl +++ b/arch/alpha/kernel/syscalls/syscall.tbl @@ -488,3 +488,4 @@ 556 common landlock_restrict_self sys_landlock_restrict_self # 557 reserved for memfd_secret 558 common process_mrelease sys_process_mrelease +559 common futex_waitv sys_futex_waitv diff --git a/arch/arc/include/asm/cacheflush.h b/arch/arc/include/asm/cacheflush.h index e8c2c7469e10..e201b4b1655a 100644 --- a/arch/arc/include/asm/cacheflush.h +++ b/arch/arc/include/asm/cacheflush.h @@ -36,7 +36,6 @@ void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr); #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 void flush_dcache_page(struct page *page); -void flush_dcache_folio(struct folio *folio); void dma_cache_wback_inv(phys_addr_t start, unsigned long sz); void dma_cache_inv(phys_addr_t start, unsigned long sz); diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index f0f9e8bec83a..c2724d986fa0 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1463,6 +1463,7 @@ config HIGHMEM bool "High Memory Support" depends on MMU select KMAP_LOCAL + select KMAP_LOCAL_NON_LINEAR_PTE_ARRAY help The address space of ARM processors is only 4 Gigabytes large and it has to accommodate user address space, kernel address diff --git a/arch/arm/boot/dts/bcm2711.dtsi b/arch/arm/boot/dts/bcm2711.dtsi index 3b60297af7f6..9e01dbca4a01 100644 --- a/arch/arm/boot/dts/bcm2711.dtsi +++ b/arch/arm/boot/dts/bcm2711.dtsi @@ -506,11 +506,17 @@ #address-cells = <3>; #interrupt-cells = <1>; #size-cells = <2>; - interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>, + interrupts = <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>; interrupt-names = "pcie", "msi"; interrupt-map-mask = <0x0 0x0 0x0 0x7>; interrupt-map = <0 0 0 1 &gicv2 GIC_SPI 143 + IRQ_TYPE_LEVEL_HIGH>, + <0 0 0 2 &gicv2 GIC_SPI 144 + IRQ_TYPE_LEVEL_HIGH>, + <0 0 0 3 &gicv2 GIC_SPI 145 + IRQ_TYPE_LEVEL_HIGH>, + <0 0 0 4 &gicv2 GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>; msi-controller; msi-parent = <&pcie0>; diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi index d4f355015e3c..f69d2af3c1fa 100644 --- a/arch/arm/boot/dts/bcm5301x.dtsi +++ b/arch/arm/boot/dts/bcm5301x.dtsi @@ -242,6 +242,8 @@ gpio-controller; #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; }; pcie0: pcie@12000 { @@ -408,7 +410,7 @@ i2c0: i2c@18009000 { compatible = "brcm,iproc-i2c"; reg = <0x18009000 0x50>; - interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>; + interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; clock-frequency = <100000>; diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index e68fb879e4f9..5e56288e343b 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -290,7 +290,6 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr */ #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 extern void flush_dcache_page(struct page *); -void flush_dcache_folio(struct folio *folio); #define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1 static inline void flush_kernel_vmap_range(void *addr, int size) diff --git a/arch/arm/mach-socfpga/core.h b/arch/arm/mach-socfpga/core.h index fc2608b18a0d..18f01190dcfd 100644 --- a/arch/arm/mach-socfpga/core.h +++ b/arch/arm/mach-socfpga/core.h @@ -33,7 +33,7 @@ extern void __iomem *sdr_ctl_base_addr; u32 socfpga_sdram_self_refresh(u32 sdr_base); extern unsigned int socfpga_sdram_self_refresh_sz; -extern char secondary_trampoline, secondary_trampoline_end; +extern char secondary_trampoline[], secondary_trampoline_end[]; extern unsigned long socfpga_cpu1start_addr; diff --git a/arch/arm/mach-socfpga/platsmp.c b/arch/arm/mach-socfpga/platsmp.c index fbb80b883e5d..201191cf68f3 100644 --- a/arch/arm/mach-socfpga/platsmp.c +++ b/arch/arm/mach-socfpga/platsmp.c @@ -20,14 +20,14 @@ static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle) { - int trampoline_size = &secondary_trampoline_end - &secondary_trampoline; + int trampoline_size = secondary_trampoline_end - secondary_trampoline; if (socfpga_cpu1start_addr) { /* This will put CPU #1 into reset. */ writel(RSTMGR_MPUMODRST_CPU1, rst_manager_base_addr + SOCFPGA_RSTMGR_MODMPURST); - memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size); + memcpy(phys_to_virt(0), secondary_trampoline, trampoline_size); writel(__pa_symbol(secondary_startup), sys_manager_base_addr + (socfpga_cpu1start_addr & 0x000000ff)); @@ -45,12 +45,12 @@ static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle) static int socfpga_a10_boot_secondary(unsigned int cpu, struct task_struct *idle) { - int trampoline_size = &secondary_trampoline_end - &secondary_trampoline; + int trampoline_size = secondary_trampoline_end - secondary_trampoline; if (socfpga_cpu1start_addr) { writel(RSTMGR_MPUMODRST_CPU1, rst_manager_base_addr + SOCFPGA_A10_RSTMGR_MODMPURST); - memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size); + memcpy(phys_to_virt(0), secondary_trampoline, trampoline_size); writel(__pa_symbol(secondary_startup), sys_manager_base_addr + (socfpga_cpu1start_addr & 0x00000fff)); diff --git a/arch/arm64/boot/dts/apple/t8103.dtsi b/arch/arm64/boot/dts/apple/t8103.dtsi index fc8b2bb06ffe..e22c9433d5e0 100644 --- a/arch/arm64/boot/dts/apple/t8103.dtsi +++ b/arch/arm64/boot/dts/apple/t8103.dtsi @@ -7,6 +7,7 @@ * Copyright The Asahi Linux Contributors */ +#include <dt-bindings/gpio/gpio.h> #include <dt-bindings/interrupt-controller/apple-aic.h> #include <dt-bindings/interrupt-controller/irq.h> #include <dt-bindings/pinctrl/apple.h> @@ -281,7 +282,7 @@ port00: pci@0,0 { device_type = "pci"; reg = <0x0 0x0 0x0 0x0 0x0>; - reset-gpios = <&pinctrl_ap 152 0>; + reset-gpios = <&pinctrl_ap 152 GPIO_ACTIVE_LOW>; max-link-speed = <2>; #address-cells = <3>; @@ -301,7 +302,7 @@ port01: pci@1,0 { device_type = "pci"; reg = <0x800 0x0 0x0 0x0 0x0>; - reset-gpios = <&pinctrl_ap 153 0>; + reset-gpios = <&pinctrl_ap 153 GPIO_ACTIVE_LOW>; max-link-speed = <2>; #address-cells = <3>; @@ -321,7 +322,7 @@ port02: pci@2,0 { device_type = "pci"; reg = <0x1000 0x0 0x0 0x0 0x0>; - reset-gpios = <&pinctrl_ap 33 0>; + reset-gpios = <&pinctrl_ap 33 GPIO_ACTIVE_LOW>; max-link-speed = <1>; #address-cells = <3>; diff --git a/arch/arm64/boot/dts/exynos/exynosautov9.dtsi b/arch/arm64/boot/dts/exynos/exynosautov9.dtsi index 3e4727344b4a..a960c0bc2dba 100644 --- a/arch/arm64/boot/dts/exynos/exynosautov9.dtsi +++ b/arch/arm64/boot/dts/exynos/exynosautov9.dtsi @@ -296,8 +296,7 @@ pinctrl-0 = <&ufs_rst_n &ufs_refclk_out>; phys = <&ufs_0_phy>; phy-names = "ufs-phy"; - samsung,sysreg = <&syscon_fsys2>; - samsung,ufs-shareability-reg-offset = <0x710>; + samsung,sysreg = <&syscon_fsys2 0x710>; status = "disabled"; }; }; diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h index 347b0cc68f07..1494cfa8639b 100644 --- a/arch/arm64/include/asm/ftrace.h +++ b/arch/arm64/include/asm/ftrace.h @@ -12,6 +12,17 @@ #define HAVE_FUNCTION_GRAPH_FP_TEST +/* + * HAVE_FUNCTION_GRAPH_RET_ADDR_PTR means that the architecture can provide a + * "return address pointer" which can be used to uniquely identify a return + * address which has been overwritten. + * + * On arm64 we use the address of the caller's frame record, which remains the + * same for the lifetime of the instrumented function, unlike the return + * address in the LR. + */ +#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR + #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS #define ARCH_SUPPORTS_FTRACE_OPS 1 #else diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index a39fcf318c77..01d47c5886dc 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -91,7 +91,7 @@ #define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H) /* TCR_EL2 Registers bits */ -#define TCR_EL2_RES1 ((1 << 31) | (1 << 23)) +#define TCR_EL2_RES1 ((1U << 31) | (1 << 23)) #define TCR_EL2_TBI (1 << 20) #define TCR_EL2_PS_SHIFT 16 #define TCR_EL2_PS_MASK (7 << TCR_EL2_PS_SHIFT) @@ -276,7 +276,7 @@ #define CPTR_EL2_TFP_SHIFT 10 /* Hyp Coprocessor Trap Register */ -#define CPTR_EL2_TCPAC (1 << 31) +#define CPTR_EL2_TCPAC (1U << 31) #define CPTR_EL2_TAM (1 << 30) #define CPTR_EL2_TTA (1 << 20) #define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT) diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h index 8433a2058eb1..237224484d0f 100644 --- a/arch/arm64/include/asm/pgalloc.h +++ b/arch/arm64/include/asm/pgalloc.h @@ -76,7 +76,7 @@ static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t ptep, static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) { - VM_BUG_ON(mm != &init_mm); + VM_BUG_ON(mm && mm != &init_mm); __pmd_populate(pmdp, __pa(ptep), PMD_TYPE_TABLE | PMD_TABLE_UXN); } diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h index a4e046ef4568..6564a01cc085 100644 --- a/arch/arm64/include/asm/stacktrace.h +++ b/arch/arm64/include/asm/stacktrace.h @@ -47,9 +47,6 @@ struct stack_info { * @prev_type: The type of stack this frame record was on, or a synthetic * value of STACK_TYPE_UNKNOWN. This is used to detect a * transition from one stack to another. - * - * @graph: When FUNCTION_GRAPH_TRACER is selected, holds the index of a - * replacement lr value in the ftrace graph stack. */ struct stackframe { unsigned long fp; @@ -57,9 +54,6 @@ struct stackframe { DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES); unsigned long prev_fp; enum stack_type prev_type; -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - int graph; -#endif #ifdef CONFIG_KRETPROBES struct llist_node *kr_cur; #endif diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 6e2e0b7031ab..3a5ff5e20586 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -281,12 +281,22 @@ do { \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ } while (0) +/* + * We must not call into the scheduler between uaccess_ttbr0_enable() and + * uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions, + * we must evaluate these outside of the critical section. + */ #define __raw_get_user(x, ptr, err) \ do { \ + __typeof__(*(ptr)) __user *__rgu_ptr = (ptr); \ + __typeof__(x) __rgu_val; \ __chk_user_ptr(ptr); \ + \ uaccess_ttbr0_enable(); \ - __raw_get_mem("ldtr", x, ptr, err); \ + __raw_get_mem("ldtr", __rgu_val, __rgu_ptr, err); \ uaccess_ttbr0_disable(); \ + \ + (x) = __rgu_val; \ } while (0) #define __get_user_error(x, ptr, err) \ @@ -310,14 +320,22 @@ do { \ #define get_user __get_user +/* + * We must not call into the scheduler between __uaccess_enable_tco_async() and + * __uaccess_disable_tco_async(). As `dst` and `src` may contain blocking + * functions, we must evaluate these outside of the critical section. + */ #define __get_kernel_nofault(dst, src, type, err_label) \ do { \ + __typeof__(dst) __gkn_dst = (dst); \ + __typeof__(src) __gkn_src = (src); \ int __gkn_err = 0; \ \ __uaccess_enable_tco_async(); \ - __raw_get_mem("ldr", *((type *)(dst)), \ - (__force type *)(src), __gkn_err); \ + __raw_get_mem("ldr", *((type *)(__gkn_dst)), \ + (__force type *)(__gkn_src), __gkn_err); \ __uaccess_disable_tco_async(); \ + \ if (unlikely(__gkn_err)) \ goto err_label; \ } while (0) @@ -351,11 +369,19 @@ do { \ } \ } while (0) +/* + * We must not call into the scheduler between uaccess_ttbr0_enable() and + * uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions, + * we must evaluate these outside of the critical section. + */ #define __raw_put_user(x, ptr, err) \ do { \ - __chk_user_ptr(ptr); \ + __typeof__(*(ptr)) __user *__rpu_ptr = (ptr); \ + __typeof__(*(ptr)) __rpu_val = (x); \ + __chk_user_ptr(__rpu_ptr); \ + \ uaccess_ttbr0_enable(); \ - __raw_put_mem("sttr", x, ptr, err); \ + __raw_put_mem("sttr", __rpu_val, __rpu_ptr, err); \ uaccess_ttbr0_disable(); \ } while (0) @@ -380,14 +406,22 @@ do { \ #define put_user __put_user +/* + * We must not call into the scheduler between __uaccess_enable_tco_async() and + * __uaccess_disable_tco_async(). As `dst` and `src` may contain blocking + * functions, we must evaluate these outside of the critical section. + */ #define __put_kernel_nofault(dst, src, type, err_label) \ do { \ + __typeof__(dst) __pkn_dst = (dst); \ + __typeof__(src) __pkn_src = (src); \ int __pkn_err = 0; \ \ __uaccess_enable_tco_async(); \ - __raw_put_mem("str", *((type *)(src)), \ - (__force type *)(dst), __pkn_err); \ + __raw_put_mem("str", *((type *)(__pkn_src)), \ + (__force type *)(__pkn_dst), __pkn_err); \ __uaccess_disable_tco_async(); \ + \ if (unlikely(__pkn_err)) \ goto err_label; \ } while(0) diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S index b3e4f9a088b1..8cf970d219f5 100644 --- a/arch/arm64/kernel/entry-ftrace.S +++ b/arch/arm64/kernel/entry-ftrace.S @@ -77,11 +77,17 @@ .endm SYM_CODE_START(ftrace_regs_caller) +#ifdef BTI_C + BTI_C +#endif ftrace_regs_entry 1 b ftrace_common SYM_CODE_END(ftrace_regs_caller) SYM_CODE_START(ftrace_caller) +#ifdef BTI_C + BTI_C +#endif ftrace_regs_entry 0 b ftrace_common SYM_CODE_END(ftrace_caller) diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c index fc62dfe73f93..4506c4a90ac1 100644 --- a/arch/arm64/kernel/ftrace.c +++ b/arch/arm64/kernel/ftrace.c @@ -244,8 +244,6 @@ void arch_ftrace_update_code(int command) * on the way back to parent. For this purpose, this function is called * in _mcount() or ftrace_caller() to replace return address (*parent) on * the call stack to return_to_handler. - * - * Note that @frame_pointer is used only for sanity check later. */ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent, unsigned long frame_pointer) @@ -263,8 +261,10 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent, */ old = *parent; - if (!function_graph_enter(old, self_addr, frame_pointer, NULL)) + if (!function_graph_enter(old, self_addr, frame_pointer, + (void *)frame_pointer)) { *parent = return_hooker; + } } #ifdef CONFIG_DYNAMIC_FTRACE diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c index 1038494135c8..6fb31c117ebe 100644 --- a/arch/arm64/kernel/machine_kexec.c +++ b/arch/arm64/kernel/machine_kexec.c @@ -147,7 +147,7 @@ int machine_kexec_post_load(struct kimage *kimage) if (rc) return rc; kimage->arch.ttbr1 = __pa(trans_pgd); - kimage->arch.zero_page = __pa(empty_zero_page); + kimage->arch.zero_page = __pa_symbol(empty_zero_page); reloc_size = __relocate_new_kernel_end - __relocate_new_kernel_start; memcpy(reloc_code, __relocate_new_kernel_start, reloc_size); diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index c30624fff6ac..94f83cd44e50 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -38,9 +38,6 @@ void start_backtrace(struct stackframe *frame, unsigned long fp, { frame->fp = fp; frame->pc = pc; -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - frame->graph = 0; -#endif #ifdef CONFIG_KRETPROBES frame->kr_cur = NULL; #endif @@ -116,20 +113,23 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) frame->prev_fp = fp; frame->prev_type = info.type; + frame->pc = ptrauth_strip_insn_pac(frame->pc); + #ifdef CONFIG_FUNCTION_GRAPH_TRACER if (tsk->ret_stack && - (ptrauth_strip_insn_pac(frame->pc) == (unsigned long)return_to_handler)) { - struct ftrace_ret_stack *ret_stack; + (frame->pc == (unsigned long)return_to_handler)) { + unsigned long orig_pc; /* * This is a case where function graph tracer has * modified a return address (LR) in a stack frame * to hook a function return. * So replace it to an original value. */ - ret_stack = ftrace_graph_get_ret_stack(tsk, frame->graph++); - if (WARN_ON_ONCE(!ret_stack)) + orig_pc = ftrace_graph_ret_addr(tsk, NULL, frame->pc, + (void *)frame->fp); + if (WARN_ON_ONCE(frame->pc == orig_pc)) return -EINVAL; - frame->pc = ret_stack->ret; + frame->pc = orig_pc; } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #ifdef CONFIG_KRETPROBES @@ -137,8 +137,6 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) frame->pc = kretprobe_find_ret_addr(tsk, (void *)frame->fp, &frame->kr_cur); #endif - frame->pc = ptrauth_strip_insn_pac(frame->pc); - return 0; } NOKPROBE_SYMBOL(unwind_frame); diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 2f03cbfefe67..e4727dc771bf 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -223,7 +223,14 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = 1; break; case KVM_CAP_NR_VCPUS: - r = num_online_cpus(); + /* + * ARM64 treats KVM_CAP_NR_CPUS differently from all other + * architectures, as it does not always bound it to + * KVM_CAP_MAX_VCPUS. It should not matter much because + * this is just an advisory value. + */ + r = min_t(unsigned int, num_online_cpus(), + kvm_arm_default_max_vcpus()); break; case KVM_CAP_MAX_VCPUS: case KVM_CAP_MAX_VCPU_ID: diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 7a0af1d39303..96c5f3fb7838 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -403,6 +403,8 @@ typedef bool (*exit_handler_fn)(struct kvm_vcpu *, u64 *); static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu); +static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code); + /* * Allow the hypervisor to handle the exit with an exit handler if it has one. * @@ -429,6 +431,18 @@ static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code) */ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) { + /* + * Save PSTATE early so that we can evaluate the vcpu mode + * early on. + */ + vcpu->arch.ctxt.regs.pstate = read_sysreg_el2(SYS_SPSR); + + /* + * Check whether we want to repaint the state one way or + * another. + */ + early_exit_filter(vcpu, exit_code); + if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ) vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR); diff --git a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h index de7e14c862e6..7ecca8b07851 100644 --- a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h +++ b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h @@ -70,7 +70,12 @@ static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt) static inline void __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt) { ctxt->regs.pc = read_sysreg_el2(SYS_ELR); - ctxt->regs.pstate = read_sysreg_el2(SYS_SPSR); + /* + * Guest PSTATE gets saved at guest fixup time in all + * cases. We still need to handle the nVHE host side here. + */ + if (!has_vhe() && ctxt->__hyp_running_vcpu) + ctxt->regs.pstate = read_sysreg_el2(SYS_SPSR); if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) ctxt_sys_reg(ctxt, DISR_EL1) = read_sysreg_s(SYS_VDISR_EL2); diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index c0e3fed26d93..d13115a12434 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -233,7 +233,7 @@ static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu) * Returns false if the guest ran in AArch32 when it shouldn't have, and * thus should exit to the host, or true if a the guest run loop can continue. */ -static bool handle_aarch32_guest(struct kvm_vcpu *vcpu, u64 *exit_code) +static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code) { struct kvm *kvm = kern_hyp_va(vcpu->kvm); @@ -248,10 +248,7 @@ static bool handle_aarch32_guest(struct kvm_vcpu *vcpu, u64 *exit_code) vcpu->arch.target = -1; *exit_code &= BIT(ARM_EXIT_WITH_SERROR_BIT); *exit_code |= ARM_EXCEPTION_IL; - return false; } - - return true; } /* Switch to the guest for legacy non-VHE systems */ @@ -316,9 +313,6 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) /* Jump in the fire! */ exit_code = __guest_enter(vcpu); - if (unlikely(!handle_aarch32_guest(vcpu, &exit_code))) - break; - /* And we're baaack! */ } while (fixup_guest_exit(vcpu, &exit_code)); diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c index 5a2cb5d9bc4b..fbb26b93c347 100644 --- a/arch/arm64/kvm/hyp/vhe/switch.c +++ b/arch/arm64/kvm/hyp/vhe/switch.c @@ -112,6 +112,10 @@ static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu) return hyp_exit_handlers; } +static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code) +{ +} + /* Switch to the guest for VHE systems running in EL2 */ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) { diff --git a/arch/csky/kernel/traps.c b/arch/csky/kernel/traps.c index e5fbf8653a21..2020af88b636 100644 --- a/arch/csky/kernel/traps.c +++ b/arch/csky/kernel/traps.c @@ -209,7 +209,7 @@ asmlinkage void do_trap_illinsn(struct pt_regs *regs) asmlinkage void do_trap_fpe(struct pt_regs *regs) { -#ifdef CONFIG_CPU_HAS_FP +#ifdef CONFIG_CPU_HAS_FPU return fpu_fpe(regs); #else do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->pc, @@ -219,7 +219,7 @@ asmlinkage void do_trap_fpe(struct pt_regs *regs) asmlinkage void do_trap_priv(struct pt_regs *regs) { -#ifdef CONFIG_CPU_HAS_FP +#ifdef CONFIG_CPU_HAS_FPU if (user_mode(regs) && fpu_libc_helper(regs)) return; #endif diff --git a/arch/hexagon/include/asm/timer-regs.h b/arch/hexagon/include/asm/timer-regs.h deleted file mode 100644 index ee6c61423a05..000000000000 --- a/arch/hexagon/include/asm/timer-regs.h +++ /dev/null @@ -1,26 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Timer support for Hexagon - * - * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. - */ - -#ifndef _ASM_TIMER_REGS_H -#define _ASM_TIMER_REGS_H - -/* This stuff should go into a platform specific file */ -#define TCX0_CLK_RATE 19200 -#define TIMER_ENABLE 0 -#define TIMER_CLR_ON_MATCH 1 - -/* - * 8x50 HDD Specs 5-8. Simulator co-sim not fixed until - * release 1.1, and then it's "adjustable" and probably not defaulted. - */ -#define RTOS_TIMER_INT 3 -#ifdef CONFIG_HEXAGON_COMET -#define RTOS_TIMER_REGS_ADDR 0xAB000000UL -#endif -#define SLEEP_CLK_RATE 32000 - -#endif diff --git a/arch/hexagon/include/asm/timex.h b/arch/hexagon/include/asm/timex.h index 8d4ec76fceb4..dfe69e118b2b 100644 --- a/arch/hexagon/include/asm/timex.h +++ b/arch/hexagon/include/asm/timex.h @@ -7,11 +7,10 @@ #define _ASM_TIMEX_H #include <asm-generic/timex.h> -#include <asm/timer-regs.h> #include <asm/hexagon_vm.h> /* Using TCX0 as our clock. CLOCK_TICK_RATE scheduled to be removed. */ -#define CLOCK_TICK_RATE TCX0_CLK_RATE +#define CLOCK_TICK_RATE 19200 #define ARCH_HAS_READ_CURRENT_TIMER diff --git a/arch/hexagon/kernel/.gitignore b/arch/hexagon/kernel/.gitignore new file mode 100644 index 000000000000..c5f676c3c224 --- /dev/null +++ b/arch/hexagon/kernel/.gitignore @@ -0,0 +1 @@ +vmlinux.lds diff --git a/arch/hexagon/kernel/time.c b/arch/hexagon/kernel/time.c index feffe527ac92..febc95714d75 100644 --- a/arch/hexagon/kernel/time.c +++ b/arch/hexagon/kernel/time.c @@ -17,9 +17,10 @@ #include <linux/of_irq.h> #include <linux/module.h> -#include <asm/timer-regs.h> #include <asm/hexagon_vm.h> +#define TIMER_ENABLE BIT(0) + /* * For the clocksource we need: * pcycle frequency (600MHz) @@ -33,6 +34,13 @@ cycles_t pcycle_freq_mhz; cycles_t thread_freq_mhz; cycles_t sleep_clk_freq; +/* + * 8x50 HDD Specs 5-8. Simulator co-sim not fixed until + * release 1.1, and then it's "adjustable" and probably not defaulted. + */ +#define RTOS_TIMER_INT 3 +#define RTOS_TIMER_REGS_ADDR 0xAB000000UL + static struct resource rtos_timer_resources[] = { { .start = RTOS_TIMER_REGS_ADDR, @@ -80,7 +88,7 @@ static int set_next_event(unsigned long delta, struct clock_event_device *evt) iowrite32(0, &rtos_timer->clear); iowrite32(delta, &rtos_timer->match); - iowrite32(1 << TIMER_ENABLE, &rtos_timer->enable); + iowrite32(TIMER_ENABLE, &rtos_timer->enable); return 0; } diff --git a/arch/hexagon/lib/io.c b/arch/hexagon/lib/io.c index d35d69d6588c..55f75392857b 100644 --- a/arch/hexagon/lib/io.c +++ b/arch/hexagon/lib/io.c @@ -27,6 +27,7 @@ void __raw_readsw(const void __iomem *addr, void *data, int len) *dst++ = *src; } +EXPORT_SYMBOL(__raw_readsw); /* * __raw_writesw - read words a short at a time @@ -47,6 +48,7 @@ void __raw_writesw(void __iomem *addr, const void *data, int len) } +EXPORT_SYMBOL(__raw_writesw); /* Pretty sure len is pre-adjusted for the length of the access already */ void __raw_readsl(const void __iomem *addr, void *data, int len) @@ -62,6 +64,7 @@ void __raw_readsl(const void __iomem *addr, void *data, int len) } +EXPORT_SYMBOL(__raw_readsl); void __raw_writesl(void __iomem *addr, const void *data, int len) { @@ -76,3 +79,4 @@ void __raw_writesl(void __iomem *addr, const void *data, int len) } +EXPORT_SYMBOL(__raw_writesl); diff --git a/arch/ia64/kernel/syscalls/syscall.tbl b/arch/ia64/kernel/syscalls/syscall.tbl index 6fea1844fb95..707ae121f6d3 100644 --- a/arch/ia64/kernel/syscalls/syscall.tbl +++ b/arch/ia64/kernel/syscalls/syscall.tbl @@ -369,3 +369,4 @@ 446 common landlock_restrict_self sys_landlock_restrict_self # 447 reserved for memfd_secret 448 common process_mrelease sys_process_mrelease +449 common futex_waitv sys_futex_waitv diff --git a/arch/m68k/include/asm/cacheflush_mm.h b/arch/m68k/include/asm/cacheflush_mm.h index 8ab46625ddd3..1ac55e7b47f0 100644 --- a/arch/m68k/include/asm/cacheflush_mm.h +++ b/arch/m68k/include/asm/cacheflush_mm.h @@ -250,7 +250,6 @@ static inline void __flush_page_to_ram(void *vaddr) #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 #define flush_dcache_page(page) __flush_page_to_ram(page_address(page)) -void flush_dcache_folio(struct folio *folio); #define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0) #define flush_icache_page(vma, page) __flush_page_to_ram(page_address(page)) diff --git a/arch/m68k/kernel/syscalls/syscall.tbl b/arch/m68k/kernel/syscalls/syscall.tbl index 7976dff8f879..45bc32a41b90 100644 --- a/arch/m68k/kernel/syscalls/syscall.tbl +++ b/arch/m68k/kernel/syscalls/syscall.tbl @@ -448,3 +448,4 @@ 446 common landlock_restrict_self sys_landlock_restrict_self # 447 reserved for memfd_secret 448 common process_mrelease sys_process_mrelease +449 common futex_waitv sys_futex_waitv diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c index 99058a6da956..34d6458340b0 100644 --- a/arch/m68k/kernel/traps.c +++ b/arch/m68k/kernel/traps.c @@ -1145,7 +1145,7 @@ asmlinkage void set_esp0(unsigned long ssp) */ asmlinkage void fpsp040_die(void) { - force_fatal_sig(SIGSEGV); + force_exit_sig(SIGSEGV); } #ifdef CONFIG_M68KFPU_EMU diff --git a/arch/microblaze/kernel/syscalls/syscall.tbl b/arch/microblaze/kernel/syscalls/syscall.tbl index 6b0e11362bd2..2204bde3ce4a 100644 --- a/arch/microblaze/kernel/syscalls/syscall.tbl +++ b/arch/microblaze/kernel/syscalls/syscall.tbl @@ -454,3 +454,4 @@ 446 common landlock_restrict_self sys_landlock_restrict_self # 447 reserved for memfd_secret 448 common process_mrelease sys_process_mrelease +449 common futex_waitv sys_futex_waitv diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index de60ad190057..0215dc1529e9 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -3097,7 +3097,7 @@ config STACKTRACE_SUPPORT config PGTABLE_LEVELS int default 4 if PAGE_SIZE_4KB && MIPS_VA_BITS_48 - default 3 if 64BIT && !PAGE_SIZE_64KB + default 3 if 64BIT && (!PAGE_SIZE_64KB || MIPS_VA_BITS_48) default 2 config MIPS_AUTO_PFN_OFFSET diff --git a/arch/mips/bcm63xx/clk.c b/arch/mips/bcm63xx/clk.c index 5a3e325275d0..1c91064cb448 100644 --- a/arch/mips/bcm63xx/clk.c +++ b/arch/mips/bcm63xx/clk.c @@ -381,6 +381,12 @@ void clk_disable(struct clk *clk) EXPORT_SYMBOL(clk_disable); +struct clk *clk_get_parent(struct clk *clk) +{ + return NULL; +} +EXPORT_SYMBOL(clk_get_parent); + unsigned long clk_get_rate(struct clk *clk) { if (!clk) diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile index 2861a05c2e0c..f27cf31b4140 100644 --- a/arch/mips/boot/compressed/Makefile +++ b/arch/mips/boot/compressed/Makefile @@ -52,7 +52,7 @@ endif vmlinuzobjs-$(CONFIG_KERNEL_XZ) += $(obj)/ashldi3.o -vmlinuzobjs-$(CONFIG_KERNEL_ZSTD) += $(obj)/bswapdi.o +vmlinuzobjs-$(CONFIG_KERNEL_ZSTD) += $(obj)/bswapdi.o $(obj)/ashldi3.o targets := $(notdir $(vmlinuzobjs-y)) diff --git a/arch/mips/generic/yamon-dt.c b/arch/mips/generic/yamon-dt.c index a3aa22c77cad..a07a5edbcda7 100644 --- a/arch/mips/generic/yamon-dt.c +++ b/arch/mips/generic/yamon-dt.c @@ -75,7 +75,7 @@ static unsigned int __init gen_fdt_mem_array( __init int yamon_dt_append_memory(void *fdt, const struct yamon_mem_region *regions) { - unsigned long phys_memsize, memsize; + unsigned long phys_memsize = 0, memsize; __be32 mem_array[2 * MAX_MEM_ARRAY_ENTRIES]; unsigned int mem_entries; int i, err, mem_off; diff --git a/arch/mips/include/asm/cacheflush.h b/arch/mips/include/asm/cacheflush.h index f207388541d5..b3dc9c589442 100644 --- a/arch/mips/include/asm/cacheflush.h +++ b/arch/mips/include/asm/cacheflush.h @@ -61,8 +61,6 @@ static inline void flush_dcache_page(struct page *page) SetPageDcacheDirty(page); } -void flush_dcache_folio(struct folio *folio); - #define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0) diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index ac0e2cfc6d57..24a529c6c4be 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -1734,8 +1734,6 @@ static inline void decode_cpucfg(struct cpuinfo_mips *c) static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) { - decode_configs(c); - /* All Loongson processors covered here define ExcCode 16 as GSExc. */ c->options |= MIPS_CPU_GSEXCEX; @@ -1796,6 +1794,8 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) panic("Unknown Loongson Processor ID!"); break; } + + decode_configs(c); } #else static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) { } diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c index 376a6e2676e9..9f47a889b047 100644 --- a/arch/mips/kernel/proc.c +++ b/arch/mips/kernel/proc.c @@ -185,7 +185,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) seq_puts(m, " tx39_cache"); if (cpu_has_octeon_cache) seq_puts(m, " octeon_cache"); - if (cpu_has_fpu) + if (raw_cpu_has_fpu) seq_puts(m, " fpu"); if (cpu_has_32fpr) seq_puts(m, " 32fpr"); diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl index 70e32de2bcaa..72d02d363f36 100644 --- a/arch/mips/kernel/syscalls/syscall_n32.tbl +++ b/arch/mips/kernel/syscalls/syscall_n32.tbl @@ -387,3 +387,4 @@ 446 n32 landlock_restrict_self sys_landlock_restrict_self # 447 reserved for memfd_secret 448 n32 process_mrelease sys_process_mrelease +449 n32 futex_waitv sys_futex_waitv diff --git a/arch/mips/kernel/syscalls/syscall_n64.tbl b/arch/mips/kernel/syscalls/syscall_n64.tbl index 1ca7bc337932..e2c481fcede6 100644 --- a/arch/mips/kernel/syscalls/syscall_n64.tbl +++ b/arch/mips/kernel/syscalls/syscall_n64.tbl @@ -363,3 +363,4 @@ 446 n64 landlock_restrict_self sys_landlock_restrict_self # 447 reserved for memfd_secret 448 n64 process_mrelease sys_process_mrelease +449 n64 futex_waitv sys_futex_waitv diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl index a61c35edaa74..3714c97b2643 100644 --- a/arch/mips/kernel/syscalls/syscall_o32.tbl +++ b/arch/mips/kernel/syscalls/syscall_o32.tbl @@ -436,3 +436,4 @@ 446 o32 landlock_restrict_self sys_landlock_restrict_self # 447 reserved for memfd_secret 448 o32 process_mrelease sys_process_mrelease +449 o32 futex_waitv sys_futex_waitv diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 562aa878b266..aa20d074d388 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -1067,7 +1067,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = 1; break; case KVM_CAP_NR_VCPUS: - r = num_online_cpus(); + r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS); break; case KVM_CAP_MAX_VCPUS: r = KVM_MAX_VCPUS; diff --git a/arch/mips/lantiq/clk.c b/arch/mips/lantiq/clk.c index dd819e31fcbb..4916cccf378f 100644 --- a/arch/mips/lantiq/clk.c +++ b/arch/mips/lantiq/clk.c @@ -158,6 +158,12 @@ void clk_deactivate(struct clk *clk) } EXPORT_SYMBOL(clk_deactivate); +struct clk *clk_get_parent(struct clk *clk) +{ + return NULL; +} +EXPORT_SYMBOL(clk_get_parent); + static inline u32 get_counter_resolution(void) { u32 res; diff --git a/arch/mips/net/bpf_jit_comp.h b/arch/mips/net/bpf_jit_comp.h index 6f3a7b07294b..a37fe20818eb 100644 --- a/arch/mips/net/bpf_jit_comp.h +++ b/arch/mips/net/bpf_jit_comp.h @@ -98,7 +98,7 @@ do { \ #define emit(...) __emit(__VA_ARGS__) /* Workaround for R10000 ll/sc errata */ -#ifdef CONFIG_WAR_R10000 +#ifdef CONFIG_WAR_R10000_LLSC #define LLSC_beqz beqzl #else #define LLSC_beqz beqz diff --git a/arch/nds32/include/asm/cacheflush.h b/arch/nds32/include/asm/cacheflush.h index 3fc0bb7d6487..c2a222ebfa2a 100644 --- a/arch/nds32/include/asm/cacheflush.h +++ b/arch/nds32/include/asm/cacheflush.h @@ -27,7 +27,6 @@ void flush_cache_vunmap(unsigned long start, unsigned long end); #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 void flush_dcache_page(struct page *page); -void flush_dcache_folio(struct folio *folio); void copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, void *src, int len); void copy_from_user_page(struct vm_area_struct *vma, struct page *page, diff --git a/arch/nios2/include/asm/cacheflush.h b/arch/nios2/include/asm/cacheflush.h index 1999561b22aa..d0b71dd71287 100644 --- a/arch/nios2/include/asm/cacheflush.h +++ b/arch/nios2/include/asm/cacheflush.h @@ -29,7 +29,6 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn); #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 void flush_dcache_page(struct page *page); -void flush_dcache_folio(struct folio *folio); extern void flush_icache_range(unsigned long start, unsigned long end); extern void flush_icache_page(struct vm_area_struct *vma, struct page *page); diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index 8db4af4879d0..82d77f4b0d08 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile @@ -15,7 +15,12 @@ # Mike Shaver, Helge Deller and Martin K. Petersen # +ifdef CONFIG_PARISC_SELF_EXTRACT +boot := arch/parisc/boot +KBUILD_IMAGE := $(boot)/bzImage +else KBUILD_IMAGE := vmlinuz +endif NM = sh $(srctree)/arch/parisc/nm CHECKFLAGS += -D__hppa__=1 diff --git a/arch/parisc/configs/generic-32bit_defconfig b/arch/parisc/configs/generic-32bit_defconfig index d6fd8fa7ed8c..53061cb2cf7f 100644 --- a/arch/parisc/configs/generic-32bit_defconfig +++ b/arch/parisc/configs/generic-32bit_defconfig @@ -231,6 +231,7 @@ CONFIG_CRYPTO_DEFLATE=y CONFIG_CRC_CCITT=m CONFIG_CRC_T10DIF=y CONFIG_FONTS=y +CONFIG_PRINTK_TIME=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_FS=y CONFIG_DEBUG_MEMORY_INIT=y diff --git a/arch/parisc/configs/generic-64bit_defconfig b/arch/parisc/configs/generic-64bit_defconfig index d2daeac2b217..1b8fd80cbe7f 100644 --- a/arch/parisc/configs/generic-64bit_defconfig +++ b/arch/parisc/configs/generic-64bit_defconfig @@ -1,7 +1,9 @@ CONFIG_LOCALVERSION="-64bit" # CONFIG_LOCALVERSION_AUTO is not set +CONFIG_KERNEL_LZ4=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y +CONFIG_AUDIT=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_TASKSTATS=y @@ -35,6 +37,7 @@ CONFIG_MODVERSIONS=y CONFIG_BLK_DEV_INTEGRITY=y CONFIG_BINFMT_MISC=m # CONFIG_COMPACTION is not set +CONFIG_MEMORY_FAILURE=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -65,12 +68,15 @@ CONFIG_SCSI_ISCSI_ATTRS=y CONFIG_SCSI_SRP_ATTRS=y CONFIG_ISCSI_BOOT_SYSFS=y CONFIG_SCSI_MPT2SAS=y -CONFIG_SCSI_LASI700=m +CONFIG_SCSI_LASI700=y CONFIG_SCSI_SYM53C8XX_2=y CONFIG_SCSI_ZALON=y CONFIG_SCSI_QLA_ISCSI=m CONFIG_SCSI_DH=y CONFIG_ATA=y +CONFIG_SATA_SIL=y +CONFIG_SATA_SIS=y +CONFIG_SATA_VIA=y CONFIG_PATA_NS87415=y CONFIG_PATA_SIL680=y CONFIG_ATA_GENERIC=y @@ -79,6 +85,7 @@ CONFIG_MD_LINEAR=m CONFIG_BLK_DEV_DM=m CONFIG_DM_RAID=m CONFIG_DM_UEVENT=y +CONFIG_DM_AUDIT=y CONFIG_FUSION=y CONFIG_FUSION_SPI=y CONFIG_FUSION_SAS=y @@ -196,10 +203,15 @@ CONFIG_FB_MATROX_G=y CONFIG_FB_MATROX_I2C=y CONFIG_FB_MATROX_MAVEN=y CONFIG_FB_RADEON=y +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_CLUT224 is not set CONFIG_HIDRAW=y CONFIG_HID_PID=y CONFIG_USB_HIDDEV=y CONFIG_USB=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PLATFORM=y CONFIG_UIO=y CONFIG_UIO_PDRV_GENIRQ=m CONFIG_UIO_AEC=m diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h index 7085df079702..6d13ae236fcb 100644 --- a/arch/parisc/include/asm/assembly.h +++ b/arch/parisc/include/asm/assembly.h @@ -3,38 +3,19 @@ * Copyright (C) 1999 Hewlett-Packard (Frank Rowand) * Copyright (C) 1999 Philipp Rumpf <prumpf@tux.org> * Copyright (C) 1999 SuSE GmbH + * Copyright (C) 2021 Helge Deller <deller@gmx.de> */ #ifndef _PARISC_ASSEMBLY_H #define _PARISC_ASSEMBLY_H -#define CALLEE_FLOAT_FRAME_SIZE 80 - #ifdef CONFIG_64BIT -#define LDREG ldd -#define STREG std -#define LDREGX ldd,s -#define LDREGM ldd,mb -#define STREGM std,ma -#define SHRREG shrd -#define SHLREG shld -#define ANDCM andcm,* -#define COND(x) * ## x #define RP_OFFSET 16 #define FRAME_SIZE 128 #define CALLEE_REG_FRAME_SIZE 144 #define REG_SZ 8 #define ASM_ULONG_INSN .dword #else /* CONFIG_64BIT */ -#define LDREG ldw -#define STREG stw -#define LDREGX ldwx,s -#define LDREGM ldwm -#define STREGM stwm -#define SHRREG shr -#define SHLREG shlw -#define ANDCM andcm -#define COND(x) x #define RP_OFFSET 20 #define FRAME_SIZE 64 #define CALLEE_REG_FRAME_SIZE 128 @@ -45,6 +26,7 @@ /* Frame alignment for 32- and 64-bit */ #define FRAME_ALIGN 64 +#define CALLEE_FLOAT_FRAME_SIZE 80 #define CALLEE_SAVE_FRAME_SIZE (CALLEE_REG_FRAME_SIZE + CALLEE_FLOAT_FRAME_SIZE) #ifdef CONFIG_PA20 @@ -68,6 +50,28 @@ #ifdef __ASSEMBLY__ #ifdef CONFIG_64BIT +#define LDREG ldd +#define STREG std +#define LDREGX ldd,s +#define LDREGM ldd,mb +#define STREGM std,ma +#define SHRREG shrd +#define SHLREG shld +#define ANDCM andcm,* +#define COND(x) * ## x +#else /* CONFIG_64BIT */ +#define LDREG ldw +#define STREG stw +#define LDREGX ldwx,s +#define LDREGM ldwm +#define STREGM stwm +#define SHRREG shr +#define SHLREG shlw +#define ANDCM andcm +#define COND(x) x +#endif + +#ifdef CONFIG_64BIT /* the 64-bit pa gnu assembler unfortunately defaults to .level 1.1 or 2.0 so * work around that for now... */ .level 2.0w @@ -143,6 +147,17 @@ extrd,u \r, 63-(\sa), 64-(\sa), \t .endm + /* Extract unsigned for 32- and 64-bit + * The extru instruction leaves the most significant 32 bits of the + * target register in an undefined state on PA 2.0 systems. */ + .macro extru_safe r, p, len, t +#ifdef CONFIG_64BIT + extrd,u \r, 32+(\p), \len, \t +#else + extru \r, \p, \len, \t +#endif + .endm + /* load 32-bit 'value' into 'reg' compensating for the ldil * sign-extension when running in wide mode. * WARNING!! neither 'value' nor 'reg' can be expressions diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h index da0cd4b3a28f..859b8a34adcf 100644 --- a/arch/parisc/include/asm/cacheflush.h +++ b/arch/parisc/include/asm/cacheflush.h @@ -50,7 +50,6 @@ void invalidate_kernel_vmap_range(void *vaddr, int size); #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 void flush_dcache_page(struct page *page); -void flush_dcache_folio(struct folio *folio); #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages) #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages) diff --git a/arch/parisc/include/asm/jump_label.h b/arch/parisc/include/asm/jump_label.h index 7efb1aa2f7f8..af2a598bc0f8 100644 --- a/arch/parisc/include/asm/jump_label.h +++ b/arch/parisc/include/asm/jump_label.h @@ -5,6 +5,7 @@ #ifndef __ASSEMBLY__ #include <linux/types.h> +#include <linux/stringify.h> #include <asm/assembly.h> #define JUMP_LABEL_NOP_SIZE 4 diff --git a/arch/parisc/include/asm/rt_sigframe.h b/arch/parisc/include/asm/rt_sigframe.h index 4b9e3d707571..2b3010ade00e 100644 --- a/arch/parisc/include/asm/rt_sigframe.h +++ b/arch/parisc/include/asm/rt_sigframe.h @@ -2,7 +2,7 @@ #ifndef _ASM_PARISC_RT_SIGFRAME_H #define _ASM_PARISC_RT_SIGFRAME_H -#define SIGRETURN_TRAMP 3 +#define SIGRETURN_TRAMP 4 #define SIGRESTARTBLOCK_TRAMP 5 #define TRAMP_SIZE (SIGRETURN_TRAMP + SIGRESTARTBLOCK_TRAMP) diff --git a/arch/parisc/install.sh b/arch/parisc/install.sh index 056d588befdd..70d3cffb0251 100644 --- a/arch/parisc/install.sh +++ b/arch/parisc/install.sh @@ -39,6 +39,7 @@ verify "$3" if [ -n "${INSTALLKERNEL}" ]; then if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi + if [ -x /usr/sbin/${INSTALLKERNEL} ]; then exec /usr/sbin/${INSTALLKERNEL} "$@"; fi fi # Default install diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index 88c188a965d8..6e9cdb269862 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -366,17 +366,9 @@ */ .macro L2_ptep pmd,pte,index,va,fault #if CONFIG_PGTABLE_LEVELS == 3 - extru \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index + extru_safe \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index #else -# if defined(CONFIG_64BIT) - extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index - #else - # if PAGE_SIZE > 4096 - extru \va,31-ASM_PGDIR_SHIFT,32-ASM_PGDIR_SHIFT,\index - # else - extru \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index - # endif -# endif + extru_safe \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index #endif dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */ #if CONFIG_PGTABLE_LEVELS < 3 @@ -386,7 +378,7 @@ bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */ SHLREG \pmd,PxD_VALUE_SHIFT,\pmd - extru \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index + extru_safe \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */ shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */ .endm diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c index bbfe23c40c01..46b1050640b8 100644 --- a/arch/parisc/kernel/signal.c +++ b/arch/parisc/kernel/signal.c @@ -288,21 +288,22 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs, already in userspace. The first words of tramp are used to save the previous sigrestartblock trampoline that might be on the stack. We start the sigreturn trampoline at - SIGRESTARTBLOCK_TRAMP. */ + SIGRESTARTBLOCK_TRAMP+X. */ err |= __put_user(in_syscall ? INSN_LDI_R25_1 : INSN_LDI_R25_0, &frame->tramp[SIGRESTARTBLOCK_TRAMP+0]); - err |= __put_user(INSN_BLE_SR2_R0, + err |= __put_user(INSN_LDI_R20, &frame->tramp[SIGRESTARTBLOCK_TRAMP+1]); - err |= __put_user(INSN_LDI_R20, + err |= __put_user(INSN_BLE_SR2_R0, &frame->tramp[SIGRESTARTBLOCK_TRAMP+2]); + err |= __put_user(INSN_NOP, &frame->tramp[SIGRESTARTBLOCK_TRAMP+3]); - start = (unsigned long) &frame->tramp[SIGRESTARTBLOCK_TRAMP+0]; - end = (unsigned long) &frame->tramp[SIGRESTARTBLOCK_TRAMP+3]; + start = (unsigned long) &frame->tramp[0]; + end = (unsigned long) &frame->tramp[TRAMP_SIZE]; flush_user_dcache_range_asm(start, end); flush_user_icache_range_asm(start, end); /* TRAMP Words 0-4, Length 5 = SIGRESTARTBLOCK_TRAMP - * TRAMP Words 5-7, Length 3 = SIGRETURN_TRAMP + * TRAMP Words 5-9, Length 4 = SIGRETURN_TRAMP * So the SIGRETURN_TRAMP is at the end of SIGRESTARTBLOCK_TRAMP */ rp = (unsigned long) &frame->tramp[SIGRESTARTBLOCK_TRAMP]; diff --git a/arch/parisc/kernel/signal32.h b/arch/parisc/kernel/signal32.h index a5bdbb5678b7..f166250f2d06 100644 --- a/arch/parisc/kernel/signal32.h +++ b/arch/parisc/kernel/signal32.h @@ -36,7 +36,7 @@ struct compat_regfile { compat_int_t rf_sar; }; -#define COMPAT_SIGRETURN_TRAMP 3 +#define COMPAT_SIGRETURN_TRAMP 4 #define COMPAT_SIGRESTARTBLOCK_TRAMP 5 #define COMPAT_TRAMP_SIZE (COMPAT_SIGRETURN_TRAMP + \ COMPAT_SIGRESTARTBLOCK_TRAMP) diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index 4fb3b6a993bf..d2497b339d13 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S @@ -566,7 +566,7 @@ lws_compare_and_swap: ldo R%lws_lock_start(%r20), %r28 /* Extract eight bits from r26 and hash lock (Bits 3-11) */ - extru %r26, 28, 8, %r20 + extru_safe %r26, 28, 8, %r20 /* Find lock to use, the hash is either one of 0 to 15, multiplied by 16 (keep it 16-byte aligned) @@ -751,7 +751,7 @@ cas2_lock_start: ldo R%lws_lock_start(%r20), %r28 /* Extract eight bits from r26 and hash lock (Bits 3-11) */ - extru %r26, 28, 8, %r20 + extru_safe %r26, 28, 8, %r20 /* Find lock to use, the hash is either one of 0 to 15, multiplied by 16 (keep it 16-byte aligned) diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl index bf751e0732b7..358c00000755 100644 --- a/arch/parisc/kernel/syscalls/syscall.tbl +++ b/arch/parisc/kernel/syscalls/syscall.tbl @@ -446,3 +446,4 @@ 446 common landlock_restrict_self sys_landlock_restrict_self # 447 reserved for memfd_secret 448 common process_mrelease sys_process_mrelease +449 common futex_waitv sys_futex_waitv diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c index 9fb1e794831b..061119a56fbe 100644 --- a/arch/parisc/kernel/time.c +++ b/arch/parisc/kernel/time.c @@ -249,30 +249,16 @@ void __init time_init(void) static int __init init_cr16_clocksource(void) { /* - * The cr16 interval timers are not syncronized across CPUs on - * different sockets, so mark them unstable and lower rating on - * multi-socket SMP systems. + * The cr16 interval timers are not syncronized across CPUs, even if + * they share the same socket. */ if (num_online_cpus() > 1 && !running_on_qemu) { - int cpu; - unsigned long cpu0_loc; - cpu0_loc = per_cpu(cpu_data, 0).cpu_loc; - - for_each_online_cpu(cpu) { - if (cpu == 0) - continue; - if ((cpu0_loc != 0) && - (cpu0_loc == per_cpu(cpu_data, cpu).cpu_loc)) - continue; - - /* mark sched_clock unstable */ - clear_sched_clock_stable(); - - clocksource_cr16.name = "cr16_unstable"; - clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE; - clocksource_cr16.rating = 0; - break; - } + /* mark sched_clock unstable */ + clear_sched_clock_stable(); + + clocksource_cr16.name = "cr16_unstable"; + clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE; + clocksource_cr16.rating = 0; } /* register at clocksource framework */ diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S index 3d208afd15bc..2769eb991f58 100644 --- a/arch/parisc/kernel/vmlinux.lds.S +++ b/arch/parisc/kernel/vmlinux.lds.S @@ -57,8 +57,6 @@ SECTIONS { . = KERNEL_BINARY_TEXT_START; - _stext = .; /* start of kernel text, includes init code & data */ - __init_begin = .; HEAD_TEXT_SECTION MLONGCALL_DISCARD(INIT_TEXT_SECTION(8)) @@ -82,6 +80,7 @@ SECTIONS /* freed after init ends here */ _text = .; /* Text and read-only data */ + _stext = .; MLONGCALL_KEEP(INIT_TEXT_SECTION(8)) .text ALIGN(PAGE_SIZE) : { TEXT_TEXT diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 0e3640e14eb1..5fa68c2ef1f8 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -196,3 +196,6 @@ clean-files := vmlinux.lds # Force dependency (incbin is bad) $(obj)/vdso32_wrapper.o : $(obj)/vdso32/vdso32.so.dbg $(obj)/vdso64_wrapper.o : $(obj)/vdso64/vdso64.so.dbg + +# for cleaning +subdir- += vdso32 vdso64 diff --git a/arch/powerpc/kernel/head_32.h b/arch/powerpc/kernel/head_32.h index 6b1ec9e3541b..349c4a820231 100644 --- a/arch/powerpc/kernel/head_32.h +++ b/arch/powerpc/kernel/head_32.h @@ -202,11 +202,11 @@ vmap_stack_overflow: mfspr r1, SPRN_SPRG_THREAD lwz r1, TASK_CPU - THREAD(r1) slwi r1, r1, 3 - addis r1, r1, emergency_ctx@ha + addis r1, r1, emergency_ctx-PAGE_OFFSET@ha #else - lis r1, emergency_ctx@ha + lis r1, emergency_ctx-PAGE_OFFSET@ha #endif - lwz r1, emergency_ctx@l(r1) + lwz r1, emergency_ctx-PAGE_OFFSET@l(r1) addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE EXCEPTION_PROLOG_2 0 vmap_stack_overflow prepare_transfer_to_handler diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index 2d596881b70e..0d073b9fd52c 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -733,6 +733,7 @@ _GLOBAL(mmu_pin_tlb) #ifdef CONFIG_PIN_TLB_DATA LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET) LOAD_REG_IMMEDIATE(r7, MI_SVALID | MI_PS8MEG | _PMD_ACCESSED) + li r8, 0 #ifdef CONFIG_PIN_TLB_IMMR li r0, 3 #else @@ -741,26 +742,26 @@ _GLOBAL(mmu_pin_tlb) mtctr r0 cmpwi r4, 0 beq 4f - LOAD_REG_IMMEDIATE(r8, 0xf0 | _PAGE_RO | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT) LOAD_REG_ADDR(r9, _sinittext) 2: ori r0, r6, MD_EVALID + ori r12, r8, 0xf0 | _PAGE_RO | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT mtspr SPRN_MD_CTR, r5 mtspr SPRN_MD_EPN, r0 mtspr SPRN_MD_TWC, r7 - mtspr SPRN_MD_RPN, r8 + mtspr SPRN_MD_RPN, r12 addi r5, r5, 0x100 addis r6, r6, SZ_8M@h addis r8, r8, SZ_8M@h cmplw r6, r9 bdnzt lt, 2b - -4: LOAD_REG_IMMEDIATE(r8, 0xf0 | _PAGE_DIRTY | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT) +4: 2: ori r0, r6, MD_EVALID + ori r12, r8, 0xf0 | _PAGE_DIRTY | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT mtspr SPRN_MD_CTR, r5 mtspr SPRN_MD_EPN, r0 mtspr SPRN_MD_TWC, r7 - mtspr SPRN_MD_RPN, r8 + mtspr SPRN_MD_RPN, r12 addi r5, r5, 0x100 addis r6, r6, SZ_8M@h addis r8, r8, SZ_8M@h @@ -781,7 +782,7 @@ _GLOBAL(mmu_pin_tlb) #endif #if defined(CONFIG_PIN_TLB_IMMR) || defined(CONFIG_PIN_TLB_DATA) lis r0, (MD_RSV4I | MD_TWAM)@h - mtspr SPRN_MI_CTR, r0 + mtspr SPRN_MD_CTR, r0 #endif mtspr SPRN_SRR1, r10 mtspr SPRN_SRR0, r11 diff --git a/arch/powerpc/kernel/signal.h b/arch/powerpc/kernel/signal.h index 1f07317964e4..618aeccdf691 100644 --- a/arch/powerpc/kernel/signal.h +++ b/arch/powerpc/kernel/signal.h @@ -25,8 +25,14 @@ static inline int __get_user_sigset(sigset_t *dst, const sigset_t __user *src) return __get_user(dst->sig[0], (u64 __user *)&src->sig[0]); } -#define unsafe_get_user_sigset(dst, src, label) \ - unsafe_get_user((dst)->sig[0], (u64 __user *)&(src)->sig[0], label) +#define unsafe_get_user_sigset(dst, src, label) do { \ + sigset_t *__dst = dst; \ + const sigset_t __user *__src = src; \ + int i; \ + \ + for (i = 0; i < _NSIG_WORDS; i++) \ + unsafe_get_user(__dst->sig[i], &__src->sig[i], label); \ +} while (0) #ifdef CONFIG_VSX extern unsigned long copy_vsx_to_user(void __user *to, diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index 00a9c9cd6d42..3e053e2fd6b6 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -1063,7 +1063,7 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx, * We kill the task with a SIGSEGV in this situation. */ if (do_setcontext(new_ctx, regs, 0)) { - force_fatal_sig(SIGSEGV); + force_exit_sig(SIGSEGV); return -EFAULT; } diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index ef518535d436..d1e1fc0acbea 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -704,7 +704,7 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx, */ if (__get_user_sigset(&set, &new_ctx->uc_sigmask)) { - force_fatal_sig(SIGSEGV); + force_exit_sig(SIGSEGV); return -EFAULT; } set_current_blocked(&set); @@ -713,7 +713,7 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx, return -EFAULT; if (__unsafe_restore_sigcontext(current, NULL, 0, &new_ctx->uc_mcontext)) { user_read_access_end(); - force_fatal_sig(SIGSEGV); + force_exit_sig(SIGSEGV); return -EFAULT; } user_read_access_end(); diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl index 7bef917cc84e..15109af9d075 100644 --- a/arch/powerpc/kernel/syscalls/syscall.tbl +++ b/arch/powerpc/kernel/syscalls/syscall.tbl @@ -528,3 +528,4 @@ 446 common landlock_restrict_self sys_landlock_restrict_self # 447 reserved for memfd_secret 448 common process_mrelease sys_process_mrelease +449 common futex_waitv sys_futex_waitv diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c index f9ea0e5357f9..3fa6d240bade 100644 --- a/arch/powerpc/kernel/watchdog.c +++ b/arch/powerpc/kernel/watchdog.c @@ -187,6 +187,12 @@ static void watchdog_smp_panic(int cpu, u64 tb) if (sysctl_hardlockup_all_cpu_backtrace) trigger_allbutself_cpu_backtrace(); + /* + * Force flush any remote buffers that might be stuck in IRQ context + * and therefore could not run their irq_work. + */ + printk_trigger_flush(); + if (hardlockup_panic) nmi_panic(NULL, "Hard LOCKUP"); diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index fcf4760a3a0e..70b7a8f97153 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c @@ -695,6 +695,7 @@ static void flush_guest_tlb(struct kvm *kvm) "r" (0) : "memory"); } asm volatile("ptesync": : :"memory"); + // POWER9 congruence-class TLBIEL leaves ERAT. Flush it now. asm volatile(PPC_RADIX_INVALIDATE_ERAT_GUEST : : :"memory"); } else { for (set = 0; set < kvm->arch.tlb_sets; ++set) { @@ -705,7 +706,9 @@ static void flush_guest_tlb(struct kvm *kvm) rb += PPC_BIT(51); /* increment set number */ } asm volatile("ptesync": : :"memory"); - asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory"); + // POWER9 congruence-class TLBIEL leaves ERAT. Flush it now. + if (cpu_has_feature(CPU_FTR_ARCH_300)) + asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory"); } } diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index eb776d0c5d8e..32a4b4d412b9 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -2005,7 +2005,7 @@ hcall_real_table: .globl hcall_real_table_end hcall_real_table_end: -_GLOBAL(kvmppc_h_set_xdabr) +_GLOBAL_TOC(kvmppc_h_set_xdabr) EXPORT_SYMBOL_GPL(kvmppc_h_set_xdabr) andi. r0, r5, DABRX_USER | DABRX_KERNEL beq 6f @@ -2015,7 +2015,7 @@ EXPORT_SYMBOL_GPL(kvmppc_h_set_xdabr) 6: li r3, H_PARAMETER blr -_GLOBAL(kvmppc_h_set_dabr) +_GLOBAL_TOC(kvmppc_h_set_dabr) EXPORT_SYMBOL_GPL(kvmppc_h_set_dabr) li r5, DABRX_USER | DABRX_KERNEL 3: diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 35e9cccdeef9..a72920f4f221 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -641,9 +641,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) * implementations just count online CPUs. */ if (hv_enabled) - r = num_present_cpus(); + r = min_t(unsigned int, num_present_cpus(), KVM_MAX_VCPUS); else - r = num_online_cpus(); + r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS); break; case KVM_CAP_MAX_VCPUS: r = KVM_MAX_VCPUS; diff --git a/arch/powerpc/mm/nohash/kaslr_booke.c b/arch/powerpc/mm/nohash/kaslr_booke.c index 8fc49b1b4a91..6ec978967da0 100644 --- a/arch/powerpc/mm/nohash/kaslr_booke.c +++ b/arch/powerpc/mm/nohash/kaslr_booke.c @@ -314,7 +314,7 @@ static unsigned long __init kaslr_choose_location(void *dt_ptr, phys_addr_t size pr_warn("KASLR: No safe seed for randomizing the kernel base.\n"); ram = min_t(phys_addr_t, __max_low_memory, size); - ram = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, true, false); + ram = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, true, true); linear_sz = min_t(unsigned long, ram, SZ_512M); /* If the linear size is smaller than 64M, do not randmize */ diff --git a/arch/powerpc/mm/nohash/tlb.c b/arch/powerpc/mm/nohash/tlb.c index 89353d4f5604..647bf454a0fa 100644 --- a/arch/powerpc/mm/nohash/tlb.c +++ b/arch/powerpc/mm/nohash/tlb.c @@ -645,7 +645,7 @@ static void early_init_this_mmu(void) if (map) linear_map_top = map_mem_in_cams(linear_map_top, - num_cams, true, true); + num_cams, false, true); } #endif @@ -766,7 +766,7 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base, num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4; linear_sz = map_mem_in_cams(first_memblock_size, num_cams, - false, true); + true, true); ppc64_rma_size = min_t(u64, linear_sz, 0x40000000); } else diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 6f14c8fb6359..59d3cfcd7887 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -376,9 +376,9 @@ static void initialize_form2_numa_distance_lookup_table(void) { int i, j; struct device_node *root; - const __u8 *numa_dist_table; + const __u8 *form2_distances; const __be32 *numa_lookup_index; - int numa_dist_table_length; + int form2_distances_length; int max_numa_index, distance_index; if (firmware_has_feature(FW_FEATURE_OPAL)) @@ -392,45 +392,41 @@ static void initialize_form2_numa_distance_lookup_table(void) max_numa_index = of_read_number(&numa_lookup_index[0], 1); /* first element of the array is the size and is encode-int */ - numa_dist_table = of_get_property(root, "ibm,numa-distance-table", NULL); - numa_dist_table_length = of_read_number((const __be32 *)&numa_dist_table[0], 1); + form2_distances = of_get_property(root, "ibm,numa-distance-table", NULL); + form2_distances_length = of_read_number((const __be32 *)&form2_distances[0], 1); /* Skip the size which is encoded int */ - numa_dist_table += sizeof(__be32); + form2_distances += sizeof(__be32); - pr_debug("numa_dist_table_len = %d, numa_dist_indexes_len = %d\n", - numa_dist_table_length, max_numa_index); + pr_debug("form2_distances_len = %d, numa_dist_indexes_len = %d\n", + form2_distances_length, max_numa_index); for (i = 0; i < max_numa_index; i++) /* +1 skip the max_numa_index in the property */ numa_id_index_table[i] = of_read_number(&numa_lookup_index[i + 1], 1); - if (numa_dist_table_length != max_numa_index * max_numa_index) { + if (form2_distances_length != max_numa_index * max_numa_index) { WARN(1, "Wrong NUMA distance information\n"); - /* consider everybody else just remote. */ - for (i = 0; i < max_numa_index; i++) { - for (j = 0; j < max_numa_index; j++) { - int nodeA = numa_id_index_table[i]; - int nodeB = numa_id_index_table[j]; - - if (nodeA == nodeB) - numa_distance_table[nodeA][nodeB] = LOCAL_DISTANCE; - else - numa_distance_table[nodeA][nodeB] = REMOTE_DISTANCE; - } - } + form2_distances = NULL; // don't use it } - distance_index = 0; for (i = 0; i < max_numa_index; i++) { for (j = 0; j < max_numa_index; j++) { int nodeA = numa_id_index_table[i]; int nodeB = numa_id_index_table[j]; - - numa_distance_table[nodeA][nodeB] = numa_dist_table[distance_index++]; - pr_debug("dist[%d][%d]=%d ", nodeA, nodeB, numa_distance_table[nodeA][nodeB]); + int dist; + + if (form2_distances) + dist = form2_distances[distance_index++]; + else if (nodeA == nodeB) + dist = LOCAL_DISTANCE; + else + dist = REMOTE_DISTANCE; + numa_distance_table[nodeA][nodeB] = dist; + pr_debug("dist[%d][%d]=%d ", nodeA, nodeB, dist); } } + of_node_put(root); } diff --git a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c index bb789f33c70e..a38372f9ac12 100644 --- a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c +++ b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c @@ -186,7 +186,6 @@ err: static int mcu_remove(struct i2c_client *client) { struct mcu *mcu = i2c_get_clientdata(client); - int ret; kthread_stop(shutdown_thread); diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index 49b401536d29..8f998e55735b 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -1094,15 +1094,6 @@ static phys_addr_t ddw_memory_hotplug_max(void) phys_addr_t max_addr = memory_hotplug_max(); struct device_node *memory; - /* - * The "ibm,pmemory" can appear anywhere in the address space. - * Assuming it is still backed by page structs, set the upper limit - * for the huge DMA window as MAX_PHYSMEM_BITS. - */ - if (of_find_node_by_type(NULL, "ibm,pmemory")) - return (sizeof(phys_addr_t) * 8 <= MAX_PHYSMEM_BITS) ? - (phys_addr_t) -1 : (1ULL << MAX_PHYSMEM_BITS); - for_each_node_by_type(memory, "memory") { unsigned long start, size; int n_mem_addr_cells, n_mem_size_cells, len; @@ -1238,7 +1229,6 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) u32 ddw_avail[DDW_APPLICABLE_SIZE]; struct dma_win *window; struct property *win64; - bool ddw_enabled = false; struct failed_ddw_pdn *fpdn; bool default_win_removed = false, direct_mapping = false; bool pmem_present; @@ -1253,7 +1243,6 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) if (find_existing_ddw(pdn, &dev->dev.archdata.dma_offset, &len)) { direct_mapping = (len >= max_ram_len); - ddw_enabled = true; goto out_unlock; } @@ -1367,8 +1356,10 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) len = order_base_2(query.largest_available_block << page_shift); win_name = DMA64_PROPNAME; } else { - direct_mapping = true; - win_name = DIRECT64_PROPNAME; + direct_mapping = !default_win_removed || + (len == MAX_PHYSMEM_BITS) || + (!pmem_present && (len == max_ram_len)); + win_name = direct_mapping ? DIRECT64_PROPNAME : DMA64_PROPNAME; } ret = create_ddw(dev, ddw_avail, &create, page_shift, len); @@ -1406,8 +1397,8 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) dev_info(&dev->dev, "failed to map DMA window for %pOF: %d\n", dn, ret); - /* Make sure to clean DDW if any TCE was set*/ - clean_dma_window(pdn, win64->value); + /* Make sure to clean DDW if any TCE was set*/ + clean_dma_window(pdn, win64->value); goto out_del_list; } } else { @@ -1454,7 +1445,6 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) spin_unlock(&dma_win_list_lock); dev->dev.archdata.dma_offset = win_addr; - ddw_enabled = true; goto out_unlock; out_del_list: @@ -1490,10 +1480,10 @@ out_unlock: * as RAM, then we failed to create a window to cover persistent * memory and need to set the DMA limit. */ - if (pmem_present && ddw_enabled && direct_mapping && len == max_ram_len) + if (pmem_present && direct_mapping && len == max_ram_len) dev->dev.bus_dma_limit = dev->dev.archdata.dma_offset + (1ULL << len); - return ddw_enabled && direct_mapping; + return direct_mapping; } static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev) diff --git a/arch/powerpc/sysdev/xive/Kconfig b/arch/powerpc/sysdev/xive/Kconfig index 97796c6b63f0..785c292d104b 100644 --- a/arch/powerpc/sysdev/xive/Kconfig +++ b/arch/powerpc/sysdev/xive/Kconfig @@ -3,7 +3,6 @@ config PPC_XIVE bool select PPC_SMP_MUXED_IPI select HARDIRQS_SW_RESEND - select IRQ_DOMAIN_NOMAP config PPC_XIVE_NATIVE bool diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c index c5d75c02ad8b..7b69299c2912 100644 --- a/arch/powerpc/sysdev/xive/common.c +++ b/arch/powerpc/sysdev/xive/common.c @@ -1443,8 +1443,7 @@ static const struct irq_domain_ops xive_irq_domain_ops = { static void __init xive_init_host(struct device_node *np) { - xive_irq_domain = irq_domain_add_nomap(np, XIVE_MAX_IRQ, - &xive_irq_domain_ops, NULL); + xive_irq_domain = irq_domain_add_tree(np, &xive_irq_domain_ops, NULL); if (WARN_ON(xive_irq_domain == NULL)) return; irq_set_default_host(xive_irq_domain); diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile index 5927c94302b8..8a107ed18b0d 100644 --- a/arch/riscv/Makefile +++ b/arch/riscv/Makefile @@ -107,11 +107,13 @@ PHONY += vdso_install vdso_install: $(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso $@ +ifeq ($(KBUILD_EXTMOD),) ifeq ($(CONFIG_MMU),y) prepare: vdso_prepare vdso_prepare: prepare0 $(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso include/generated/vdso-offsets.h endif +endif ifneq ($(CONFIG_XIP_KERNEL),y) ifeq ($(CONFIG_RISCV_M_MODE)$(CONFIG_SOC_CANAAN),yy) diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig index c252fd5706d2..ef473e2f503b 100644 --- a/arch/riscv/configs/defconfig +++ b/arch/riscv/configs/defconfig @@ -19,6 +19,8 @@ CONFIG_SOC_VIRT=y CONFIG_SOC_MICROCHIP_POLARFIRE=y CONFIG_SMP=y CONFIG_HOTPLUG_CPU=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=m CONFIG_JUMP_LABEL=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y diff --git a/arch/riscv/configs/rv32_defconfig b/arch/riscv/configs/rv32_defconfig index 434ef5b64599..6e9f12ff968a 100644 --- a/arch/riscv/configs/rv32_defconfig +++ b/arch/riscv/configs/rv32_defconfig @@ -19,6 +19,8 @@ CONFIG_SOC_VIRT=y CONFIG_ARCH_RV32I=y CONFIG_SMP=y CONFIG_HOTPLUG_CPU=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=m CONFIG_JUMP_LABEL=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h index 25ba21f98504..2639b9ee48f9 100644 --- a/arch/riscv/include/asm/kvm_host.h +++ b/arch/riscv/include/asm/kvm_host.h @@ -12,14 +12,12 @@ #include <linux/types.h> #include <linux/kvm.h> #include <linux/kvm_types.h> +#include <asm/csr.h> #include <asm/kvm_vcpu_fp.h> #include <asm/kvm_vcpu_timer.h> -#ifdef CONFIG_64BIT -#define KVM_MAX_VCPUS (1U << 16) -#else -#define KVM_MAX_VCPUS (1U << 9) -#endif +#define KVM_MAX_VCPUS \ + ((HGATP_VMID_MASK >> HGATP_VMID_SHIFT) + 1) #define KVM_HALT_POLL_NS_DEFAULT 500000 diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c index d81bae8eb55e..fc058ff5f4b6 100644 --- a/arch/riscv/kvm/mmu.c +++ b/arch/riscv/kvm/mmu.c @@ -453,6 +453,12 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm) void kvm_arch_flush_shadow_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) { + gpa_t gpa = slot->base_gfn << PAGE_SHIFT; + phys_addr_t size = slot->npages << PAGE_SHIFT; + + spin_lock(&kvm->mmu_lock); + stage2_unmap_range(kvm, gpa, size, false); + spin_unlock(&kvm->mmu_lock); } void kvm_arch_commit_memory_region(struct kvm *kvm, diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index e3d3aed46184..fb84619df012 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -740,7 +740,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) * Ensure we set mode to IN_GUEST_MODE after we disable * interrupts and before the final VCPU requests check. * See the comment in kvm_vcpu_exiting_guest_mode() and - * Documentation/virtual/kvm/vcpu-requests.rst + * Documentation/virt/kvm/vcpu-requests.rst */ vcpu->mode = IN_GUEST_MODE; diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c index eb3c045edf11..3b0e703d22cf 100644 --- a/arch/riscv/kvm/vcpu_sbi.c +++ b/arch/riscv/kvm/vcpu_sbi.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/** +/* * Copyright (c) 2019 Western Digital Corporation or its affiliates. * * Authors: diff --git a/arch/riscv/kvm/vm.c b/arch/riscv/kvm/vm.c index 26399df15b63..fb18af34a4b5 100644 --- a/arch/riscv/kvm/vm.c +++ b/arch/riscv/kvm/vm.c @@ -74,7 +74,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = 1; break; case KVM_CAP_NR_VCPUS: - r = num_online_cpus(); + r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS); break; case KVM_CAP_MAX_VCPUS: r = KVM_MAX_VCPUS; diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 8857ec3b97eb..2a5bb4f29cfe 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -47,7 +47,7 @@ config ARCH_SUPPORTS_UPROBES config KASAN_SHADOW_OFFSET hex depends on KASAN - default 0x18000000000000 + default 0x1C000000000000 config S390 def_bool y @@ -194,6 +194,7 @@ config S390 select HAVE_RELIABLE_STACKTRACE select HAVE_RSEQ select HAVE_SAMPLE_FTRACE_DIRECT + select HAVE_SAMPLE_FTRACE_DIRECT_MULTI select HAVE_SOFTIRQ_ON_OWN_STACK select HAVE_SYSCALL_TRACEPOINTS select HAVE_VIRT_CPU_ACCOUNTING diff --git a/arch/s390/Makefile b/arch/s390/Makefile index 69c45f600273..609e3697324b 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile @@ -77,10 +77,12 @@ KBUILD_AFLAGS_DECOMPRESSOR += $(aflags-y) KBUILD_CFLAGS_DECOMPRESSOR += $(cflags-y) ifneq ($(call cc-option,-mstack-size=8192 -mstack-guard=128),) -cflags-$(CONFIG_CHECK_STACK) += -mstack-size=$(STACK_SIZE) -ifeq ($(call cc-option,-mstack-size=8192),) -cflags-$(CONFIG_CHECK_STACK) += -mstack-guard=$(CONFIG_STACK_GUARD) -endif + CC_FLAGS_CHECK_STACK := -mstack-size=$(STACK_SIZE) + ifeq ($(call cc-option,-mstack-size=8192),) + CC_FLAGS_CHECK_STACK += -mstack-guard=$(CONFIG_STACK_GUARD) + endif + export CC_FLAGS_CHECK_STACK + cflags-$(CONFIG_CHECK_STACK) += $(CC_FLAGS_CHECK_STACK) endif ifdef CONFIG_EXPOLINE diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c index 7571dee72a0c..1aa11a8f57dd 100644 --- a/arch/s390/boot/startup.c +++ b/arch/s390/boot/startup.c @@ -149,82 +149,56 @@ static void setup_ident_map_size(unsigned long max_physmem_end) static void setup_kernel_memory_layout(void) { - bool vmalloc_size_verified = false; - unsigned long vmemmap_off; - unsigned long vspace_left; + unsigned long vmemmap_start; unsigned long rte_size; unsigned long pages; - unsigned long vmax; pages = ident_map_size / PAGE_SIZE; /* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */ vmemmap_size = SECTION_ALIGN_UP(pages) * sizeof(struct page); /* choose kernel address space layout: 4 or 3 levels. */ - vmemmap_off = round_up(ident_map_size, _REGION3_SIZE); + vmemmap_start = round_up(ident_map_size, _REGION3_SIZE); if (IS_ENABLED(CONFIG_KASAN) || vmalloc_size > _REGION2_SIZE || - vmemmap_off + vmemmap_size + vmalloc_size + MODULES_LEN > _REGION2_SIZE) - vmax = _REGION1_SIZE; - else - vmax = _REGION2_SIZE; - - /* keep vmemmap_off aligned to a top level region table entry */ - rte_size = vmax == _REGION1_SIZE ? _REGION2_SIZE : _REGION3_SIZE; - MODULES_END = vmax; - if (is_prot_virt_host()) { - /* - * forcing modules and vmalloc area under the ultravisor - * secure storage limit, so that any vmalloc allocation - * we do could be used to back secure guest storage. - */ - adjust_to_uv_max(&MODULES_END); - } - -#ifdef CONFIG_KASAN - if (MODULES_END < vmax) { - /* force vmalloc and modules below kasan shadow */ - MODULES_END = min(MODULES_END, KASAN_SHADOW_START); + vmemmap_start + vmemmap_size + vmalloc_size + MODULES_LEN > + _REGION2_SIZE) { + MODULES_END = _REGION1_SIZE; + rte_size = _REGION2_SIZE; } else { - /* - * leave vmalloc and modules above kasan shadow but make - * sure they don't overlap with it - */ - vmalloc_size = min(vmalloc_size, vmax - KASAN_SHADOW_END - MODULES_LEN); - vmalloc_size_verified = true; - vspace_left = KASAN_SHADOW_START; + MODULES_END = _REGION2_SIZE; + rte_size = _REGION3_SIZE; } + /* + * forcing modules and vmalloc area under the ultravisor + * secure storage limit, so that any vmalloc allocation + * we do could be used to back secure guest storage. + */ + adjust_to_uv_max(&MODULES_END); +#ifdef CONFIG_KASAN + /* force vmalloc and modules below kasan shadow */ + MODULES_END = min(MODULES_END, KASAN_SHADOW_START); #endif MODULES_VADDR = MODULES_END - MODULES_LEN; VMALLOC_END = MODULES_VADDR; - if (vmalloc_size_verified) { - VMALLOC_START = VMALLOC_END - vmalloc_size; - } else { - vmemmap_off = round_up(ident_map_size, rte_size); - - if (vmemmap_off + vmemmap_size > VMALLOC_END || - vmalloc_size > VMALLOC_END - vmemmap_off - vmemmap_size) { - /* - * allow vmalloc area to occupy up to 1/2 of - * the rest virtual space left. - */ - vmalloc_size = min(vmalloc_size, VMALLOC_END / 2); - } - VMALLOC_START = VMALLOC_END - vmalloc_size; - vspace_left = VMALLOC_START; - } + /* allow vmalloc area to occupy up to about 1/2 of the rest virtual space left */ + vmalloc_size = min(vmalloc_size, round_down(VMALLOC_END / 2, _REGION3_SIZE)); + VMALLOC_START = VMALLOC_END - vmalloc_size; - pages = vspace_left / (PAGE_SIZE + sizeof(struct page)); + /* split remaining virtual space between 1:1 mapping & vmemmap array */ + pages = VMALLOC_START / (PAGE_SIZE + sizeof(struct page)); pages = SECTION_ALIGN_UP(pages); - vmemmap_off = round_up(vspace_left - pages * sizeof(struct page), rte_size); - /* keep vmemmap left most starting from a fresh region table entry */ - vmemmap_off = min(vmemmap_off, round_up(ident_map_size, rte_size)); - /* take care that identity map is lower then vmemmap */ - ident_map_size = min(ident_map_size, vmemmap_off); + /* keep vmemmap_start aligned to a top level region table entry */ + vmemmap_start = round_down(VMALLOC_START - pages * sizeof(struct page), rte_size); + /* vmemmap_start is the future VMEM_MAX_PHYS, make sure it is within MAX_PHYSMEM */ + vmemmap_start = min(vmemmap_start, 1UL << MAX_PHYSMEM_BITS); + /* make sure identity map doesn't overlay with vmemmap */ + ident_map_size = min(ident_map_size, vmemmap_start); vmemmap_size = SECTION_ALIGN_UP(ident_map_size / PAGE_SIZE) * sizeof(struct page); - VMALLOC_START = max(vmemmap_off + vmemmap_size, VMALLOC_START); - vmemmap = (struct page *)vmemmap_off; + /* make sure vmemmap doesn't overlay with vmalloc area */ + VMALLOC_START = max(vmemmap_start + vmemmap_size, VMALLOC_START); + vmemmap = (struct page *)vmemmap_start; } /* diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig index fd825097cf04..b626bc6e0eaf 100644 --- a/arch/s390/configs/debug_defconfig +++ b/arch/s390/configs/debug_defconfig @@ -403,7 +403,6 @@ CONFIG_DEVTMPFS=y CONFIG_CONNECTOR=y CONFIG_ZRAM=y CONFIG_BLK_DEV_LOOP=m -CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y @@ -476,6 +475,7 @@ CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_VXLAN=m CONFIG_BAREUDP=m +CONFIG_AMT=m CONFIG_TUN=m CONFIG_VETH=m CONFIG_VIRTIO_NET=m @@ -489,6 +489,7 @@ CONFIG_NLMON=m # CONFIG_NET_VENDOR_AMD is not set # CONFIG_NET_VENDOR_AQUANTIA is not set # CONFIG_NET_VENDOR_ARC is not set +# CONFIG_NET_VENDOR_ASIX is not set # CONFIG_NET_VENDOR_ATHEROS is not set # CONFIG_NET_VENDOR_BROADCOM is not set # CONFIG_NET_VENDOR_BROCADE is not set @@ -571,6 +572,7 @@ CONFIG_WATCHDOG=y CONFIG_WATCHDOG_NOWAYOUT=y CONFIG_SOFT_WATCHDOG=m CONFIG_DIAG288_WATCHDOG=m +# CONFIG_DRM_DEBUG_MODESET_LOCK is not set CONFIG_FB=y CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y @@ -775,12 +777,14 @@ CONFIG_CRC4=m CONFIG_CRC7=m CONFIG_CRC8=m CONFIG_RANDOM32_SELFTEST=y +CONFIG_XZ_DEC_MICROLZMA=y CONFIG_DMA_CMA=y CONFIG_CMA_SIZE_MBYTES=0 CONFIG_PRINTK_TIME=y CONFIG_DYNAMIC_DEBUG=y CONFIG_DEBUG_INFO=y CONFIG_DEBUG_INFO_DWARF4=y +CONFIG_DEBUG_INFO_BTF=y CONFIG_GDB_SCRIPTS=y CONFIG_HEADERS_INSTALL=y CONFIG_DEBUG_SECTION_MISMATCH=y @@ -807,6 +811,7 @@ CONFIG_DEBUG_MEMORY_INIT=y CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m CONFIG_DEBUG_PER_CPU_MAPS=y CONFIG_KFENCE=y +CONFIG_KFENCE_STATIC_KEYS=y CONFIG_DEBUG_SHIRQ=y CONFIG_PANIC_ON_OOPS=y CONFIG_DETECT_HUNG_TASK=y @@ -842,6 +847,7 @@ CONFIG_FTRACE_STARTUP_TEST=y CONFIG_SAMPLES=y CONFIG_SAMPLE_TRACE_PRINTK=m CONFIG_SAMPLE_FTRACE_DIRECT=m +CONFIG_SAMPLE_FTRACE_DIRECT_MULTI=m CONFIG_DEBUG_ENTRY=y CONFIG_CIO_INJECT=y CONFIG_KUNIT=m @@ -860,7 +866,7 @@ CONFIG_FAIL_FUNCTION=y CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y CONFIG_LKDTM=m CONFIG_TEST_MIN_HEAP=y -CONFIG_KPROBES_SANITY_TEST=y +CONFIG_KPROBES_SANITY_TEST=m CONFIG_RBTREE_TEST=y CONFIG_INTERVAL_TREE_TEST=m CONFIG_PERCPU_TEST=m diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig index c9c3cedff2d8..0056cab27372 100644 --- a/arch/s390/configs/defconfig +++ b/arch/s390/configs/defconfig @@ -394,7 +394,6 @@ CONFIG_DEVTMPFS=y CONFIG_CONNECTOR=y CONFIG_ZRAM=y CONFIG_BLK_DEV_LOOP=m -CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y @@ -467,6 +466,7 @@ CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_VXLAN=m CONFIG_BAREUDP=m +CONFIG_AMT=m CONFIG_TUN=m CONFIG_VETH=m CONFIG_VIRTIO_NET=m @@ -480,6 +480,7 @@ CONFIG_NLMON=m # CONFIG_NET_VENDOR_AMD is not set # CONFIG_NET_VENDOR_AQUANTIA is not set # CONFIG_NET_VENDOR_ARC is not set +# CONFIG_NET_VENDOR_ASIX is not set # CONFIG_NET_VENDOR_ATHEROS is not set # CONFIG_NET_VENDOR_BROADCOM is not set # CONFIG_NET_VENDOR_BROCADE is not set @@ -762,12 +763,14 @@ CONFIG_PRIME_NUMBERS=m CONFIG_CRC4=m CONFIG_CRC7=m CONFIG_CRC8=m +CONFIG_XZ_DEC_MICROLZMA=y CONFIG_DMA_CMA=y CONFIG_CMA_SIZE_MBYTES=0 CONFIG_PRINTK_TIME=y CONFIG_DYNAMIC_DEBUG=y CONFIG_DEBUG_INFO=y CONFIG_DEBUG_INFO_DWARF4=y +CONFIG_DEBUG_INFO_BTF=y CONFIG_GDB_SCRIPTS=y CONFIG_DEBUG_SECTION_MISMATCH=y CONFIG_MAGIC_SYSRQ=y @@ -792,9 +795,11 @@ CONFIG_HIST_TRIGGERS=y CONFIG_SAMPLES=y CONFIG_SAMPLE_TRACE_PRINTK=m CONFIG_SAMPLE_FTRACE_DIRECT=m +CONFIG_SAMPLE_FTRACE_DIRECT_MULTI=m CONFIG_KUNIT=m CONFIG_KUNIT_DEBUGFS=y CONFIG_LKDTM=m +CONFIG_KPROBES_SANITY_TEST=m CONFIG_PERCPU_TEST=m CONFIG_ATOMIC64_SELFTEST=y CONFIG_TEST_BPF=m diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig index aceccf3b9a88..eed3b9acfa71 100644 --- a/arch/s390/configs/zfcpdump_defconfig +++ b/arch/s390/configs/zfcpdump_defconfig @@ -65,9 +65,11 @@ CONFIG_ZFCP=y # CONFIG_NETWORK_FILESYSTEMS is not set CONFIG_LSM="yama,loadpin,safesetid,integrity" # CONFIG_ZLIB_DFLTCC is not set +CONFIG_XZ_DEC_MICROLZMA=y CONFIG_PRINTK_TIME=y # CONFIG_SYMBOLIC_ERRNAME is not set CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_BTF=y CONFIG_DEBUG_FS=y CONFIG_DEBUG_KERNEL=y CONFIG_PANIC_ON_OOPS=y diff --git a/arch/s390/include/asm/kexec.h b/arch/s390/include/asm/kexec.h index ea398a05f643..7f3c9ac34bd8 100644 --- a/arch/s390/include/asm/kexec.h +++ b/arch/s390/include/asm/kexec.h @@ -74,6 +74,12 @@ void *kexec_file_add_components(struct kimage *image, int arch_kexec_do_relocs(int r_type, void *loc, unsigned long val, unsigned long addr); +#define ARCH_HAS_KIMAGE_ARCH + +struct kimage_arch { + void *ipl_buf; +}; + extern const struct kexec_file_ops s390_kexec_image_ops; extern const struct kexec_file_ops s390_kexec_elf_ops; diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h index e4dc64cc9c55..287bb88f7698 100644 --- a/arch/s390/include/asm/pci_io.h +++ b/arch/s390/include/asm/pci_io.h @@ -14,12 +14,13 @@ /* I/O Map */ #define ZPCI_IOMAP_SHIFT 48 -#define ZPCI_IOMAP_ADDR_BASE 0x8000000000000000UL +#define ZPCI_IOMAP_ADDR_SHIFT 62 +#define ZPCI_IOMAP_ADDR_BASE (1UL << ZPCI_IOMAP_ADDR_SHIFT) #define ZPCI_IOMAP_ADDR_OFF_MASK ((1UL << ZPCI_IOMAP_SHIFT) - 1) #define ZPCI_IOMAP_MAX_ENTRIES \ - ((ULONG_MAX - ZPCI_IOMAP_ADDR_BASE + 1) / (1UL << ZPCI_IOMAP_SHIFT)) + (1UL << (ZPCI_IOMAP_ADDR_SHIFT - ZPCI_IOMAP_SHIFT)) #define ZPCI_IOMAP_ADDR_IDX_MASK \ - (~ZPCI_IOMAP_ADDR_OFF_MASK - ZPCI_IOMAP_ADDR_BASE) + ((ZPCI_IOMAP_ADDR_BASE - 1) & ~ZPCI_IOMAP_ADDR_OFF_MASK) struct zpci_iomap_entry { u32 fh; diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c index d72a6df058d7..785d54c9350c 100644 --- a/arch/s390/kernel/crash_dump.c +++ b/arch/s390/kernel/crash_dump.c @@ -191,8 +191,8 @@ static int copy_oldmem_user(void __user *dst, void *src, size_t count) return rc; } else { /* Check for swapped kdump oldmem areas */ - if (oldmem_data.start && from - oldmem_data.size < oldmem_data.size) { - from -= oldmem_data.size; + if (oldmem_data.start && from - oldmem_data.start < oldmem_data.size) { + from -= oldmem_data.start; len = min(count, oldmem_data.size - from); } else if (oldmem_data.start && from < oldmem_data.size) { len = min(count, oldmem_data.size - from); diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index e2cc35775b99..5ad1dde23dc5 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -2156,7 +2156,7 @@ void *ipl_report_finish(struct ipl_report *report) buf = vzalloc(report->size); if (!buf) - return ERR_PTR(-ENOMEM); + goto out; ptr = buf; memcpy(ptr, report->ipib, report->ipib->hdr.len); @@ -2195,6 +2195,7 @@ void *ipl_report_finish(struct ipl_report *report) } BUG_ON(ptr > buf + report->size); +out: return buf; } diff --git a/arch/s390/kernel/machine_kexec_file.c b/arch/s390/kernel/machine_kexec_file.c index 528edff085d9..9975ad200d74 100644 --- a/arch/s390/kernel/machine_kexec_file.c +++ b/arch/s390/kernel/machine_kexec_file.c @@ -12,6 +12,7 @@ #include <linux/kexec.h> #include <linux/module_signature.h> #include <linux/verification.h> +#include <linux/vmalloc.h> #include <asm/boot_data.h> #include <asm/ipl.h> #include <asm/setup.h> @@ -170,6 +171,7 @@ static int kexec_file_add_ipl_report(struct kimage *image, struct kexec_buf buf; unsigned long addr; void *ptr, *end; + int ret; buf.image = image; @@ -199,9 +201,13 @@ static int kexec_file_add_ipl_report(struct kimage *image, ptr += len; } + ret = -ENOMEM; buf.buffer = ipl_report_finish(data->report); + if (!buf.buffer) + goto out; buf.bufsz = data->report->size; buf.memsz = buf.bufsz; + image->arch.ipl_buf = buf.buffer; data->memsz += buf.memsz; @@ -209,7 +215,9 @@ static int kexec_file_add_ipl_report(struct kimage *image, data->kernel_buf + offsetof(struct lowcore, ipl_parmblock_ptr); *lc_ipl_parmblock_ptr = (__u32)buf.mem; - return kexec_add_buffer(&buf); + ret = kexec_add_buffer(&buf); +out: + return ret; } void *kexec_file_add_components(struct kimage *image, @@ -322,3 +330,11 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi, } return 0; } + +int arch_kimage_file_post_load_cleanup(struct kimage *image) +{ + vfree(image->arch.ipl_buf); + image->arch.ipl_buf = NULL; + + return kexec_image_post_load_cleanup_default(image); +} diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 40405f2304f1..225ab2d0a4c6 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -606,7 +606,7 @@ static void __init setup_resources(void) static void __init setup_memory_end(void) { - memblock_remove(ident_map_size, ULONG_MAX); + memblock_remove(ident_map_size, PHYS_ADDR_MAX - ident_map_size); max_pfn = max_low_pfn = PFN_DOWN(ident_map_size); pr_notice("The maximum memory size is %luMB\n", ident_map_size >> 20); } @@ -638,14 +638,6 @@ static struct notifier_block kdump_mem_nb = { #endif /* - * Make sure that the area above identity mapping is protected - */ -static void __init reserve_above_ident_map(void) -{ - memblock_reserve(ident_map_size, ULONG_MAX); -} - -/* * Reserve memory for kdump kernel to be loaded with kexec */ static void __init reserve_crashkernel(void) @@ -785,7 +777,6 @@ static void __init memblock_add_mem_detect_info(void) } memblock_set_bottom_up(false); memblock_set_node(0, ULONG_MAX, &memblock.memory, 0); - memblock_dump_all(); } /* @@ -826,9 +817,6 @@ static void __init setup_memory(void) storage_key_init_range(start, end); psw_set_key(PAGE_DEFAULT_KEY); - - /* Only cosmetics */ - memblock_enforce_memory_limit(memblock_end_of_DRAM()); } static void __init relocate_amode31_section(void) @@ -999,24 +987,24 @@ void __init setup_arch(char **cmdline_p) setup_control_program_code(); /* Do some memory reservations *before* memory is added to memblock */ - reserve_above_ident_map(); reserve_kernel(); reserve_initrd(); reserve_certificate_list(); reserve_mem_detect_info(); + memblock_set_current_limit(ident_map_size); memblock_allow_resize(); /* Get information about *all* installed memory */ memblock_add_mem_detect_info(); free_mem_detect_info(); + setup_memory_end(); + memblock_dump_all(); + setup_memory(); relocate_amode31_section(); setup_cr(); - setup_uv(); - setup_memory_end(); - setup_memory(); dma_contiguous_reserve(ident_map_size); vmcp_cma_reserve(); if (MACHINE_HAS_EDAT2) diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl index df5261e5cfe1..ed9c5c2eafad 100644 --- a/arch/s390/kernel/syscalls/syscall.tbl +++ b/arch/s390/kernel/syscalls/syscall.tbl @@ -451,3 +451,4 @@ 446 common landlock_restrict_self sys_landlock_restrict_self sys_landlock_restrict_self # 447 reserved for memfd_secret 448 common process_mrelease sys_process_mrelease sys_process_mrelease +449 common futex_waitv sys_futex_waitv sys_futex_waitv diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index 035705c9f23e..2b780786fc68 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -84,7 +84,7 @@ static void default_trap_handler(struct pt_regs *regs) { if (user_mode(regs)) { report_user_fault(regs, SIGSEGV, 0); - force_fatal_sig(SIGSEGV); + force_exit_sig(SIGSEGV); } else die(regs, "Unknown program exception"); } diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile index e3e6ac5686df..245bddfe9bc0 100644 --- a/arch/s390/kernel/vdso32/Makefile +++ b/arch/s390/kernel/vdso32/Makefile @@ -22,7 +22,7 @@ KBUILD_AFLAGS_32 += -m31 -s KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS)) KBUILD_CFLAGS_32 += -m31 -fPIC -shared -fno-common -fno-builtin -LDFLAGS_vdso32.so.dbg += -fPIC -shared -nostdlib -soname=linux-vdso32.so.1 \ +LDFLAGS_vdso32.so.dbg += -fPIC -shared -soname=linux-vdso32.so.1 \ --hash-style=both --build-id=sha1 -melf_s390 -T $(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_32) diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile index 6568de236701..9e2b95a222a9 100644 --- a/arch/s390/kernel/vdso64/Makefile +++ b/arch/s390/kernel/vdso64/Makefile @@ -8,8 +8,9 @@ ARCH_REL_TYPE_ABS += R_390_GOT|R_390_PLT include $(srctree)/lib/vdso/Makefile obj-vdso64 = vdso_user_wrapper.o note.o obj-cvdso64 = vdso64_generic.o getcpu.o -CFLAGS_REMOVE_getcpu.o = -pg $(CC_FLAGS_FTRACE) $(CC_FLAGS_EXPOLINE) -CFLAGS_REMOVE_vdso64_generic.o = -pg $(CC_FLAGS_FTRACE) $(CC_FLAGS_EXPOLINE) +VDSO_CFLAGS_REMOVE := -pg $(CC_FLAGS_FTRACE) $(CC_FLAGS_EXPOLINE) $(CC_FLAGS_CHECK_STACK) +CFLAGS_REMOVE_getcpu.o = $(VDSO_CFLAGS_REMOVE) +CFLAGS_REMOVE_vdso64_generic.o = $(VDSO_CFLAGS_REMOVE) # Build rules @@ -25,7 +26,7 @@ KBUILD_AFLAGS_64 += -m64 -s KBUILD_CFLAGS_64 := $(filter-out -m64,$(KBUILD_CFLAGS)) KBUILD_CFLAGS_64 += -m64 -fPIC -shared -fno-common -fno-builtin -ldflags-y := -fPIC -shared -nostdlib -soname=linux-vdso64.so.1 \ +ldflags-y := -fPIC -shared -soname=linux-vdso64.so.1 \ --hash-style=both --build-id=sha1 -T $(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_64) diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index c6257f625929..14a18ba5ff2c 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -585,6 +585,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = KVM_MAX_VCPUS; else if (sclp.has_esca && sclp.has_64bscao) r = KVM_S390_ESCA_CPU_SLOTS; + if (ext == KVM_CAP_NR_VCPUS) + r = min_t(unsigned int, num_online_cpus(), r); break; case KVM_CAP_S390_COW: r = MACHINE_HAS_ESOP; diff --git a/arch/s390/lib/test_unwind.c b/arch/s390/lib/test_unwind.c index cfc5f5557c06..bc7973359ae2 100644 --- a/arch/s390/lib/test_unwind.c +++ b/arch/s390/lib/test_unwind.c @@ -173,10 +173,11 @@ static noinline int unwindme_func4(struct unwindme *u) } /* - * trigger specification exception + * Trigger operation exception; use insn notation to bypass + * llvm's integrated assembler sanity checks. */ asm volatile( - " mvcl %%r1,%%r1\n" + " .insn e,0x0000\n" /* illegal opcode */ "0: nopr %%r7\n" EX_TABLE(0b, 0b) :); diff --git a/arch/sh/include/asm/cacheflush.h b/arch/sh/include/asm/cacheflush.h index c7a97f32432f..481a664287e2 100644 --- a/arch/sh/include/asm/cacheflush.h +++ b/arch/sh/include/asm/cacheflush.h @@ -43,7 +43,6 @@ extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 void flush_dcache_page(struct page *page); -void flush_dcache_folio(struct folio *folio); extern void flush_icache_range(unsigned long start, unsigned long end); #define flush_icache_user_range flush_icache_range extern void flush_icache_page(struct vm_area_struct *vma, diff --git a/arch/sh/kernel/syscalls/syscall.tbl b/arch/sh/kernel/syscalls/syscall.tbl index 208f131659c5..d9539d28bdaa 100644 --- a/arch/sh/kernel/syscalls/syscall.tbl +++ b/arch/sh/kernel/syscalls/syscall.tbl @@ -451,3 +451,4 @@ 446 common landlock_restrict_self sys_landlock_restrict_self # 447 reserved for memfd_secret 448 common process_mrelease sys_process_mrelease +449 common futex_waitv sys_futex_waitv diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c index cd677bc564a7..ffab16369bea 100644 --- a/arch/sparc/kernel/signal_32.c +++ b/arch/sparc/kernel/signal_32.c @@ -244,7 +244,7 @@ static int setup_frame(struct ksignal *ksig, struct pt_regs *regs, get_sigframe(ksig, regs, sigframe_size); if (invalid_frame_pointer(sf, sigframe_size)) { - force_fatal_sig(SIGILL); + force_exit_sig(SIGILL); return -EINVAL; } @@ -336,7 +336,7 @@ static int setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs, sf = (struct rt_signal_frame __user *) get_sigframe(ksig, regs, sigframe_size); if (invalid_frame_pointer(sf, sigframe_size)) { - force_fatal_sig(SIGILL); + force_exit_sig(SIGILL); return -EINVAL; } diff --git a/arch/sparc/kernel/syscalls/syscall.tbl b/arch/sparc/kernel/syscalls/syscall.tbl index c37764dc764d..46adabcb1720 100644 --- a/arch/sparc/kernel/syscalls/syscall.tbl +++ b/arch/sparc/kernel/syscalls/syscall.tbl @@ -494,3 +494,4 @@ 446 common landlock_restrict_self sys_landlock_restrict_self # 447 reserved for memfd_secret 448 common process_mrelease sys_process_mrelease +449 common futex_waitv sys_futex_waitv diff --git a/arch/sparc/kernel/windows.c b/arch/sparc/kernel/windows.c index bbbd40cc6b28..8f20862ccc83 100644 --- a/arch/sparc/kernel/windows.c +++ b/arch/sparc/kernel/windows.c @@ -122,7 +122,7 @@ void try_to_clear_window_buffer(struct pt_regs *regs, int who) if ((sp & 7) || copy_to_user((char __user *) sp, &tp->reg_window[window], sizeof(struct reg_window32))) { - force_fatal_sig(SIGILL); + force_exit_sig(SIGILL); return; } } diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 95dd1ee01546..5c2ccb85f2ef 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -193,7 +193,7 @@ config X86 select HAVE_DYNAMIC_FTRACE_WITH_ARGS if X86_64 select HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS select HAVE_SAMPLE_FTRACE_DIRECT if X86_64 - select HAVE_SAMPLE_FTRACE_MULTI_DIRECT if X86_64 + select HAVE_SAMPLE_FTRACE_DIRECT_MULTI if X86_64 select HAVE_EBPF_JIT select HAVE_EFFICIENT_UNALIGNED_ACCESS select HAVE_EISA @@ -1932,6 +1932,7 @@ config EFI depends on ACPI select UCS2_STRING select EFI_RUNTIME_WRAPPERS + select ARCH_USE_MEMREMAP_PROT help This enables the kernel to use EFI runtime services that are available (such as the EFI variable services). diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index e38a4cf795d9..97b1f84bb53f 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -574,6 +574,10 @@ SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL) ud2 1: #endif +#ifdef CONFIG_XEN_PV + ALTERNATIVE "", "jmp xenpv_restore_regs_and_return_to_usermode", X86_FEATURE_XENPV +#endif + POP_REGS pop_rdi=0 /* @@ -890,6 +894,7 @@ SYM_CODE_START_LOCAL(paranoid_entry) .Lparanoid_entry_checkgs: /* EBX = 1 -> kernel GSBASE active, no restore required */ movl $1, %ebx + /* * The kernel-enforced convention is a negative GSBASE indicates * a kernel value. No SWAPGS needed on entry and exit. @@ -897,21 +902,14 @@ SYM_CODE_START_LOCAL(paranoid_entry) movl $MSR_GS_BASE, %ecx rdmsr testl %edx, %edx - jns .Lparanoid_entry_swapgs - ret + js .Lparanoid_kernel_gsbase -.Lparanoid_entry_swapgs: + /* EBX = 0 -> SWAPGS required on exit */ + xorl %ebx, %ebx swapgs +.Lparanoid_kernel_gsbase: - /* - * The above SAVE_AND_SWITCH_TO_KERNEL_CR3 macro doesn't do an - * unconditional CR3 write, even in the PTI case. So do an lfence - * to prevent GS speculation, regardless of whether PTI is enabled. - */ FENCE_SWAPGS_KERNEL_ENTRY - - /* EBX = 0 -> SWAPGS required on exit */ - xorl %ebx, %ebx ret SYM_CODE_END(paranoid_entry) @@ -993,11 +991,6 @@ SYM_CODE_START_LOCAL(error_entry) pushq %r12 ret -.Lerror_entry_done_lfence: - FENCE_SWAPGS_KERNEL_ENTRY -.Lerror_entry_done: - ret - /* * There are two places in the kernel that can potentially fault with * usergs. Handle them here. B stepping K8s sometimes report a @@ -1020,8 +1013,14 @@ SYM_CODE_START_LOCAL(error_entry) * .Lgs_change's error handler with kernel gsbase. */ SWAPGS - FENCE_SWAPGS_USER_ENTRY - jmp .Lerror_entry_done + + /* + * Issue an LFENCE to prevent GS speculation, regardless of whether it is a + * kernel or user gsbase. + */ +.Lerror_entry_done_lfence: + FENCE_SWAPGS_KERNEL_ENTRY + ret .Lbstep_iret: /* Fix truncated RIP */ diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c index 0b6b277ee050..fd2ee9408e91 100644 --- a/arch/x86/entry/vsyscall/vsyscall_64.c +++ b/arch/x86/entry/vsyscall/vsyscall_64.c @@ -226,7 +226,7 @@ bool emulate_vsyscall(unsigned long error_code, if ((!tmp && regs->orig_ax != syscall_nr) || regs->ip != address) { warn_bad_vsyscall(KERN_DEBUG, regs, "seccomp tried to change syscall nr or ip"); - force_fatal_sig(SIGSYS); + force_exit_sig(SIGSYS); return true; } regs->orig_ax = -1; diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 42cf01ecdd13..ec6444f2c9dc 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -2211,7 +2211,6 @@ intel_pmu_snapshot_branch_stack(struct perf_branch_entry *entries, unsigned int /* must not have branches... */ local_irq_save(flags); __intel_pmu_disable_all(false); /* we don't care about BTS */ - __intel_pmu_pebs_disable_all(); __intel_pmu_lbr_disable(); /* ... until here */ return __intel_pmu_snapshot_branch_stack(entries, cnt, flags); @@ -2225,7 +2224,6 @@ intel_pmu_snapshot_arch_branch_stack(struct perf_branch_entry *entries, unsigned /* must not have branches... */ local_irq_save(flags); __intel_pmu_disable_all(false); /* we don't care about BTS */ - __intel_pmu_pebs_disable_all(); __intel_pmu_arch_lbr_disable(); /* ... until here */ return __intel_pmu_snapshot_branch_stack(entries, cnt, flags); diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index eb2c6cea9d0d..3660f698fb2a 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -3608,6 +3608,9 @@ static int skx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *ev struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; struct extra_reg *er; int idx = 0; + /* Any of the CHA events may be filtered by Thread/Core-ID.*/ + if (event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN) + idx = SKX_CHA_MSR_PMON_BOX_FILTER_TID; for (er = skx_uncore_cha_extra_regs; er->msr; er++) { if (er->event != (event->hw.config & er->config_mask)) @@ -3675,6 +3678,7 @@ static struct event_constraint skx_uncore_iio_constraints[] = { UNCORE_EVENT_CONSTRAINT(0xc0, 0xc), UNCORE_EVENT_CONSTRAINT(0xc5, 0xc), UNCORE_EVENT_CONSTRAINT(0xd4, 0xc), + UNCORE_EVENT_CONSTRAINT(0xd5, 0xc), EVENT_CONSTRAINT_END }; @@ -4525,6 +4529,13 @@ static void snr_iio_cleanup_mapping(struct intel_uncore_type *type) pmu_iio_cleanup_mapping(type, &snr_iio_mapping_group); } +static struct event_constraint snr_uncore_iio_constraints[] = { + UNCORE_EVENT_CONSTRAINT(0x83, 0x3), + UNCORE_EVENT_CONSTRAINT(0xc0, 0xc), + UNCORE_EVENT_CONSTRAINT(0xd5, 0xc), + EVENT_CONSTRAINT_END +}; + static struct intel_uncore_type snr_uncore_iio = { .name = "iio", .num_counters = 4, @@ -4536,6 +4547,7 @@ static struct intel_uncore_type snr_uncore_iio = { .event_mask_ext = SNR_IIO_PMON_RAW_EVENT_MASK_EXT, .box_ctl = SNR_IIO_MSR_PMON_BOX_CTL, .msr_offset = SNR_IIO_MSR_OFFSET, + .constraints = snr_uncore_iio_constraints, .ops = &ivbep_uncore_msr_ops, .format_group = &snr_uncore_iio_format_group, .attr_update = snr_iio_attr_update, diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c index 24f4a06ac46a..96eb7db31c8e 100644 --- a/arch/x86/hyperv/hv_init.c +++ b/arch/x86/hyperv/hv_init.c @@ -177,6 +177,9 @@ void set_hv_tscchange_cb(void (*cb)(void)) return; } + if (!hv_vp_index) + return; + hv_reenlightenment_cb = cb; /* Make sure callback is registered before we write to MSRs */ @@ -383,20 +386,13 @@ static void __init hv_get_partition_id(void) */ void __init hyperv_init(void) { - u64 guest_id, required_msrs; + u64 guest_id; union hv_x64_msr_hypercall_contents hypercall_msr; int cpuhp; if (x86_hyper_type != X86_HYPER_MS_HYPERV) return; - /* Absolutely required MSRs */ - required_msrs = HV_MSR_HYPERCALL_AVAILABLE | - HV_MSR_VP_INDEX_AVAILABLE; - - if ((ms_hyperv.features & required_msrs) != required_msrs) - return; - if (hv_common_init()) return; diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h index 6053674f9132..c2767a6a387e 100644 --- a/arch/x86/include/asm/fpu/api.h +++ b/arch/x86/include/asm/fpu/api.h @@ -102,12 +102,6 @@ extern void switch_fpu_return(void); */ extern int cpu_has_xfeatures(u64 xfeatures_mask, const char **feature_name); -/* - * Tasks that are not using SVA have mm->pasid set to zero to note that they - * will not have the valid bit set in MSR_IA32_PASID while they are running. - */ -#define PASID_DISABLED 0 - /* Trap handling */ extern int fpu__exception_code(struct fpu *fpu, int trap_nr); extern void fpu_sync_fpstate(struct fpu *fpu); diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h index 5a0bcf8b78d7..048b6d5aff50 100644 --- a/arch/x86/include/asm/intel-family.h +++ b/arch/x86/include/asm/intel-family.h @@ -108,7 +108,7 @@ #define INTEL_FAM6_ALDERLAKE 0x97 /* Golden Cove / Gracemont */ #define INTEL_FAM6_ALDERLAKE_L 0x9A /* Golden Cove / Gracemont */ -#define INTEL_FAM6_RAPTOR_LAKE 0xB7 +#define INTEL_FAM6_RAPTORLAKE 0xB7 /* "Small Core" Processors (Atom) */ diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index e5d8700319cc..2164b9f4c7b0 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -97,7 +97,7 @@ KVM_ARCH_REQ_FLAGS(25, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQ_TLB_FLUSH_CURRENT KVM_ARCH_REQ(26) #define KVM_REQ_TLB_FLUSH_GUEST \ - KVM_ARCH_REQ_FLAGS(27, KVM_REQUEST_NO_WAKEUP) + KVM_ARCH_REQ_FLAGS(27, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQ_APF_READY KVM_ARCH_REQ(28) #define KVM_REQ_MSR_FILTER_CHANGED KVM_ARCH_REQ(29) #define KVM_REQ_UPDATE_CPU_DIRTY_LOGGING \ @@ -363,6 +363,7 @@ union kvm_mmu_extended_role { unsigned int cr4_smap:1; unsigned int cr4_smep:1; unsigned int cr4_la57:1; + unsigned int efer_lma:1; }; }; @@ -1035,6 +1036,7 @@ struct kvm_x86_msr_filter { #define APICV_INHIBIT_REASON_PIT_REINJ 4 #define APICV_INHIBIT_REASON_X2APIC 5 #define APICV_INHIBIT_REASON_BLOCKIRQ 6 +#define APICV_INHIBIT_REASON_ABSENT 7 struct kvm_arch { unsigned long n_used_mmu_pages; diff --git a/arch/x86/include/asm/sev-common.h b/arch/x86/include/asm/sev-common.h index 2cef6c5a52c2..6acaf5af0a3d 100644 --- a/arch/x86/include/asm/sev-common.h +++ b/arch/x86/include/asm/sev-common.h @@ -73,4 +73,15 @@ #define GHCB_RESP_CODE(v) ((v) & GHCB_MSR_INFO_MASK) +/* + * Error codes related to GHCB input that can be communicated back to the guest + * by setting the lower 32-bits of the GHCB SW_EXITINFO1 field to 2. + */ +#define GHCB_ERR_NOT_REGISTERED 1 +#define GHCB_ERR_INVALID_USAGE 2 +#define GHCB_ERR_INVALID_SCRATCH_AREA 3 +#define GHCB_ERR_MISSING_INPUT 4 +#define GHCB_ERR_INVALID_INPUT 5 +#define GHCB_ERR_INVALID_EVENT 6 + #endif diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h index 0575f5863b7f..e5e0fe10c692 100644 --- a/arch/x86/include/asm/xen/hypercall.h +++ b/arch/x86/include/asm/xen/hypercall.h @@ -281,13 +281,13 @@ HYPERVISOR_callback_op(int cmd, void *arg) return _hypercall2(int, callback_op, cmd, arg); } -static inline int +static __always_inline int HYPERVISOR_set_debugreg(int reg, unsigned long value) { return _hypercall2(int, set_debugreg, reg, value); } -static inline unsigned long +static __always_inline unsigned long HYPERVISOR_get_debugreg(int reg) { return _hypercall1(unsigned long, get_debugreg, reg); diff --git a/arch/x86/include/asm/xen/hypervisor.h b/arch/x86/include/asm/xen/hypervisor.h index 4957f59deb40..5adab895127e 100644 --- a/arch/x86/include/asm/xen/hypervisor.h +++ b/arch/x86/include/asm/xen/hypervisor.h @@ -64,6 +64,7 @@ void xen_arch_unregister_cpu(int num); #ifdef CONFIG_PVH void __init xen_pvh_init(struct boot_params *boot_params); +void __init mem_map_via_hcall(struct boot_params *boot_params_p); #endif #endif /* _ASM_X86_XEN_HYPERVISOR_H */ diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index 4794b716ec79..ff55df60228f 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c @@ -163,12 +163,22 @@ static uint32_t __init ms_hyperv_platform(void) cpuid(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS, &eax, &hyp_signature[0], &hyp_signature[1], &hyp_signature[2]); - if (eax >= HYPERV_CPUID_MIN && - eax <= HYPERV_CPUID_MAX && - !memcmp("Microsoft Hv", hyp_signature, 12)) - return HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS; + if (eax < HYPERV_CPUID_MIN || eax > HYPERV_CPUID_MAX || + memcmp("Microsoft Hv", hyp_signature, 12)) + return 0; - return 0; + /* HYPERCALL and VP_INDEX MSRs are mandatory for all features. */ + eax = cpuid_eax(HYPERV_CPUID_FEATURES); + if (!(eax & HV_MSR_HYPERCALL_AVAILABLE)) { + pr_warn("x86/hyperv: HYPERCALL MSR not available.\n"); + return 0; + } + if (!(eax & HV_MSR_VP_INDEX_AVAILABLE)) { + pr_warn("x86/hyperv: VP_INDEX MSR not available.\n"); + return 0; + } + + return HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS; } static unsigned char hv_get_nmi_reason(void) diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c index 63d3de02bbcc..8471a8b9b48e 100644 --- a/arch/x86/kernel/cpu/sgx/main.c +++ b/arch/x86/kernel/cpu/sgx/main.c @@ -28,8 +28,7 @@ static DECLARE_WAIT_QUEUE_HEAD(ksgxd_waitq); static LIST_HEAD(sgx_active_page_list); static DEFINE_SPINLOCK(sgx_reclaimer_lock); -/* The free page list lock protected variables prepend the lock. */ -static unsigned long sgx_nr_free_pages; +static atomic_long_t sgx_nr_free_pages = ATOMIC_LONG_INIT(0); /* Nodes with one or more EPC sections. */ static nodemask_t sgx_numa_mask; @@ -403,14 +402,15 @@ skip: spin_lock(&node->lock); list_add_tail(&epc_page->list, &node->free_page_list); - sgx_nr_free_pages++; spin_unlock(&node->lock); + atomic_long_inc(&sgx_nr_free_pages); } } static bool sgx_should_reclaim(unsigned long watermark) { - return sgx_nr_free_pages < watermark && !list_empty(&sgx_active_page_list); + return atomic_long_read(&sgx_nr_free_pages) < watermark && + !list_empty(&sgx_active_page_list); } static int ksgxd(void *p) @@ -471,9 +471,9 @@ static struct sgx_epc_page *__sgx_alloc_epc_page_from_node(int nid) page = list_first_entry(&node->free_page_list, struct sgx_epc_page, list); list_del_init(&page->list); - sgx_nr_free_pages--; spin_unlock(&node->lock); + atomic_long_dec(&sgx_nr_free_pages); return page; } @@ -625,9 +625,9 @@ void sgx_free_epc_page(struct sgx_epc_page *page) spin_lock(&node->lock); list_add_tail(&page->list, &node->free_page_list); - sgx_nr_free_pages++; spin_unlock(&node->lock); + atomic_long_inc(&sgx_nr_free_pages); } static bool __init sgx_setup_epc_section(u64 phys_addr, u64 size, diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index 391a4e2b8604..fd2d3ab38ebb 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c @@ -554,6 +554,7 @@ static const struct pci_device_id intel_early_ids[] __initconst = { INTEL_RKL_IDS(&gen11_early_ops), INTEL_ADLS_IDS(&gen11_early_ops), INTEL_ADLP_IDS(&gen11_early_ops), + INTEL_RPLS_IDS(&gen11_early_ops), }; struct resource intel_graphics_stolen_res __ro_after_init = DEFINE_RES_MEM(0, 0); diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c index d5958278eba6..91d4b6de58ab 100644 --- a/arch/x86/kernel/fpu/signal.c +++ b/arch/x86/kernel/fpu/signal.c @@ -118,7 +118,7 @@ static inline bool save_xstate_epilog(void __user *buf, int ia32_frame, struct fpstate *fpstate) { struct xregs_state __user *x = buf; - struct _fpx_sw_bytes sw_bytes; + struct _fpx_sw_bytes sw_bytes = {}; u32 xfeatures; int err; diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index e9ee8b526319..04143a653a8a 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -964,6 +964,9 @@ unsigned long __get_wchan(struct task_struct *p) struct unwind_state state; unsigned long addr = 0; + if (!try_get_task_stack(p)) + return 0; + for (unwind_start(&state, p, NULL, NULL); !unwind_done(&state); unwind_next_frame(&state)) { addr = unwind_get_return_address(&state); @@ -974,6 +977,8 @@ unsigned long __get_wchan(struct task_struct *p) break; } + put_task_stack(p); + return addr; } diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 49b596db5631..6a190c7f4d71 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -742,6 +742,28 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p) return 0; } +static char * __init prepare_command_line(void) +{ +#ifdef CONFIG_CMDLINE_BOOL +#ifdef CONFIG_CMDLINE_OVERRIDE + strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); +#else + if (builtin_cmdline[0]) { + /* append boot loader cmdline to builtin */ + strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE); + strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE); + strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); + } +#endif +#endif + + strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); + + parse_early_param(); + + return command_line; +} + /* * Determine if we were loaded by an EFI loader. If so, then we have also been * passed the efi memmap, systab, etc., so we should use these data structures @@ -831,6 +853,23 @@ void __init setup_arch(char **cmdline_p) x86_init.oem.arch_setup(); /* + * x86_configure_nx() is called before parse_early_param() (called by + * prepare_command_line()) to detect whether hardware doesn't support + * NX (so that the early EHCI debug console setup can safely call + * set_fixmap()). It may then be called again from within noexec_setup() + * during parsing early parameters to honor the respective command line + * option. + */ + x86_configure_nx(); + + /* + * This parses early params and it needs to run before + * early_reserve_memory() because latter relies on such settings + * supplied as early params. + */ + *cmdline_p = prepare_command_line(); + + /* * Do some memory reservations *before* memory is added to memblock, so * memblock allocations won't overwrite it. * @@ -863,33 +902,6 @@ void __init setup_arch(char **cmdline_p) bss_resource.start = __pa_symbol(__bss_start); bss_resource.end = __pa_symbol(__bss_stop)-1; -#ifdef CONFIG_CMDLINE_BOOL -#ifdef CONFIG_CMDLINE_OVERRIDE - strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); -#else - if (builtin_cmdline[0]) { - /* append boot loader cmdline to builtin */ - strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE); - strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE); - strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); - } -#endif -#endif - - strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); - *cmdline_p = command_line; - - /* - * x86_configure_nx() is called before parse_early_param() to detect - * whether hardware doesn't support NX (so that the early EHCI debug - * console setup can safely call set_fixmap()). It may then be called - * again from within noexec_setup() during parsing early parameters - * to honor the respective command line option. - */ - x86_configure_nx(); - - parse_early_param(); - #ifdef CONFIG_MEMORY_HOTPLUG /* * Memory used by the kernel cannot be hot-removed because Linux diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c index 74f0ec955384..a9fc2ac7a8bd 100644 --- a/arch/x86/kernel/sev.c +++ b/arch/x86/kernel/sev.c @@ -294,11 +294,6 @@ static enum es_result vc_write_mem(struct es_em_ctxt *ctxt, char *dst, char *buf, size_t size) { unsigned long error_code = X86_PF_PROT | X86_PF_WRITE; - char __user *target = (char __user *)dst; - u64 d8; - u32 d4; - u16 d2; - u8 d1; /* * This function uses __put_user() independent of whether kernel or user @@ -320,26 +315,42 @@ static enum es_result vc_write_mem(struct es_em_ctxt *ctxt, * instructions here would cause infinite nesting. */ switch (size) { - case 1: + case 1: { + u8 d1; + u8 __user *target = (u8 __user *)dst; + memcpy(&d1, buf, 1); if (__put_user(d1, target)) goto fault; break; - case 2: + } + case 2: { + u16 d2; + u16 __user *target = (u16 __user *)dst; + memcpy(&d2, buf, 2); if (__put_user(d2, target)) goto fault; break; - case 4: + } + case 4: { + u32 d4; + u32 __user *target = (u32 __user *)dst; + memcpy(&d4, buf, 4); if (__put_user(d4, target)) goto fault; break; - case 8: + } + case 8: { + u64 d8; + u64 __user *target = (u64 __user *)dst; + memcpy(&d8, buf, 8); if (__put_user(d8, target)) goto fault; break; + } default: WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size); return ES_UNSUPPORTED; @@ -362,11 +373,6 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt, char *src, char *buf, size_t size) { unsigned long error_code = X86_PF_PROT; - char __user *s = (char __user *)src; - u64 d8; - u32 d4; - u16 d2; - u8 d1; /* * This function uses __get_user() independent of whether kernel or user @@ -388,26 +394,41 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt, * instructions here would cause infinite nesting. */ switch (size) { - case 1: + case 1: { + u8 d1; + u8 __user *s = (u8 __user *)src; + if (__get_user(d1, s)) goto fault; memcpy(buf, &d1, 1); break; - case 2: + } + case 2: { + u16 d2; + u16 __user *s = (u16 __user *)src; + if (__get_user(d2, s)) goto fault; memcpy(buf, &d2, 2); break; - case 4: + } + case 4: { + u32 d4; + u32 __user *s = (u32 __user *)src; + if (__get_user(d4, s)) goto fault; memcpy(buf, &d4, 4); break; - case 8: + } + case 8: { + u64 d8; + u64 __user *s = (u64 __user *)src; if (__get_user(d8, s)) goto fault; memcpy(buf, &d8, 8); break; + } default: WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size); return ES_UNSUPPORTED; diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index ac2909f0cab3..617012f4619f 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -579,6 +579,17 @@ static struct sched_domain_topology_level x86_numa_in_package_topology[] = { { NULL, }, }; +static struct sched_domain_topology_level x86_hybrid_topology[] = { +#ifdef CONFIG_SCHED_SMT + { cpu_smt_mask, x86_smt_flags, SD_INIT_NAME(SMT) }, +#endif +#ifdef CONFIG_SCHED_MC + { cpu_coregroup_mask, x86_core_flags, SD_INIT_NAME(MC) }, +#endif + { cpu_cpu_mask, SD_INIT_NAME(DIE) }, + { NULL, }, +}; + static struct sched_domain_topology_level x86_topology[] = { #ifdef CONFIG_SCHED_SMT { cpu_smt_mask, x86_smt_flags, SD_INIT_NAME(SMT) }, @@ -1469,8 +1480,11 @@ void __init native_smp_cpus_done(unsigned int max_cpus) calculate_max_logical_packages(); + /* XXX for now assume numa-in-package and hybrid don't overlap */ if (x86_has_numa_in_package) set_sched_topology(x86_numa_in_package_topology); + if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) + set_sched_topology(x86_hybrid_topology); nmi_selftest(); impress_friends(); diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 2e076a459a0c..a698196377be 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -1180,6 +1180,12 @@ void mark_tsc_unstable(char *reason) EXPORT_SYMBOL_GPL(mark_tsc_unstable); +static void __init tsc_disable_clocksource_watchdog(void) +{ + clocksource_tsc_early.flags &= ~CLOCK_SOURCE_MUST_VERIFY; + clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY; +} + static void __init check_system_tsc_reliable(void) { #if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC) @@ -1196,6 +1202,23 @@ static void __init check_system_tsc_reliable(void) #endif if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) tsc_clocksource_reliable = 1; + + /* + * Disable the clocksource watchdog when the system has: + * - TSC running at constant frequency + * - TSC which does not stop in C-States + * - the TSC_ADJUST register which allows to detect even minimal + * modifications + * - not more than two sockets. As the number of sockets cannot be + * evaluated at the early boot stage where this has to be + * invoked, check the number of online memory nodes as a + * fallback solution which is an reasonable estimate. + */ + if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) && + boot_cpu_has(X86_FEATURE_NONSTOP_TSC) && + boot_cpu_has(X86_FEATURE_TSC_ADJUST) && + nr_online_nodes <= 2) + tsc_disable_clocksource_watchdog(); } /* @@ -1387,9 +1410,6 @@ static int __init init_tsc_clocksource(void) if (tsc_unstable) goto unreg; - if (tsc_clocksource_reliable || no_tsc_watchdog) - clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY; - if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3)) clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP; @@ -1527,7 +1547,7 @@ void __init tsc_init(void) } if (tsc_clocksource_reliable || no_tsc_watchdog) - clocksource_tsc_early.flags &= ~CLOCK_SOURCE_MUST_VERIFY; + tsc_disable_clocksource_watchdog(); clocksource_register_khz(&clocksource_tsc_early, tsc_khz); detect_art(); diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c index 50a4515fe0ad..9452dc9664b5 100644 --- a/arch/x86/kernel/tsc_sync.c +++ b/arch/x86/kernel/tsc_sync.c @@ -30,6 +30,7 @@ struct tsc_adjust { }; static DEFINE_PER_CPU(struct tsc_adjust, tsc_adjust); +static struct timer_list tsc_sync_check_timer; /* * TSC's on different sockets may be reset asynchronously. @@ -77,6 +78,46 @@ void tsc_verify_tsc_adjust(bool resume) } } +/* + * Normally the tsc_sync will be checked every time system enters idle + * state, but there is still caveat that a system won't enter idle, + * either because it's too busy or configured purposely to not enter + * idle. + * + * So setup a periodic timer (every 10 minutes) to make sure the check + * is always on. + */ + +#define SYNC_CHECK_INTERVAL (HZ * 600) + +static void tsc_sync_check_timer_fn(struct timer_list *unused) +{ + int next_cpu; + + tsc_verify_tsc_adjust(false); + + /* Run the check for all onlined CPUs in turn */ + next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask); + if (next_cpu >= nr_cpu_ids) + next_cpu = cpumask_first(cpu_online_mask); + + tsc_sync_check_timer.expires += SYNC_CHECK_INTERVAL; + add_timer_on(&tsc_sync_check_timer, next_cpu); +} + +static int __init start_sync_check_timer(void) +{ + if (!cpu_feature_enabled(X86_FEATURE_TSC_ADJUST) || tsc_clocksource_reliable) + return 0; + + timer_setup(&tsc_sync_check_timer, tsc_sync_check_timer_fn, 0); + tsc_sync_check_timer.expires = jiffies + SYNC_CHECK_INTERVAL; + add_timer(&tsc_sync_check_timer); + + return 0; +} +late_initcall(start_sync_check_timer); + static void tsc_sanitize_first_cpu(struct tsc_adjust *cur, s64 bootval, unsigned int cpu, bool bootcpu) { diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index cce1c89cb7df..c21bcd668284 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c @@ -160,7 +160,7 @@ Efault_end: user_access_end(); Efault: pr_alert("could not access userspace vm86 info\n"); - force_fatal_sig(SIGSEGV); + force_exit_sig(SIGSEGV); goto exit_vm86; } diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index e19dabf1848b..07e9215e911d 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -125,7 +125,7 @@ static void kvm_update_kvm_cpuid_base(struct kvm_vcpu *vcpu) } } -struct kvm_cpuid_entry2 *kvm_find_kvm_cpuid_features(struct kvm_vcpu *vcpu) +static struct kvm_cpuid_entry2 *kvm_find_kvm_cpuid_features(struct kvm_vcpu *vcpu) { u32 base = vcpu->arch.kvm_cpuid_base; diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 4a555f32885a..8d8c1cc7cb53 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -1922,11 +1922,13 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool all_cpus = send_ipi_ex.vp_set.format == HV_GENERIC_SET_ALL; + if (all_cpus) + goto check_and_send_ipi; + if (!sparse_banks_len) goto ret_success; - if (!all_cpus && - kvm_read_guest(kvm, + if (kvm_read_guest(kvm, hc->ingpa + offsetof(struct hv_send_ipi_ex, vp_set.bank_contents), sparse_banks, @@ -1934,6 +1936,7 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool return HV_STATUS_INVALID_HYPERCALL_INPUT; } +check_and_send_ipi: if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR)) return HV_STATUS_INVALID_HYPERCALL_INPUT; @@ -2022,7 +2025,7 @@ static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result) { bool longmode; - longmode = is_64_bit_mode(vcpu); + longmode = is_64_bit_hypercall(vcpu); if (longmode) kvm_rax_write(vcpu, result); else { @@ -2171,7 +2174,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) } #ifdef CONFIG_X86_64 - if (is_64_bit_mode(vcpu)) { + if (is_64_bit_hypercall(vcpu)) { hc.param = kvm_rcx_read(vcpu); hc.ingpa = kvm_rdx_read(vcpu); hc.outgpa = kvm_r8_read(vcpu); diff --git a/arch/x86/kvm/ioapic.h b/arch/x86/kvm/ioapic.h index e66e620c3bed..539333ac4b38 100644 --- a/arch/x86/kvm/ioapic.h +++ b/arch/x86/kvm/ioapic.h @@ -81,7 +81,6 @@ struct kvm_ioapic { unsigned long irq_states[IOAPIC_NUM_PINS]; struct kvm_io_device dev; struct kvm *kvm; - void (*ack_notifier)(void *opaque, int irq); spinlock_t lock; struct rtc_status rtc_status; struct delayed_work eoi_inject; diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h index 650642b18d15..c2d7cfe82d00 100644 --- a/arch/x86/kvm/irq.h +++ b/arch/x86/kvm/irq.h @@ -56,7 +56,6 @@ struct kvm_pic { struct kvm_io_device dev_master; struct kvm_io_device dev_slave; struct kvm_io_device dev_elcr; - void (*ack_notifier)(void *opaque, int irq); unsigned long irq_states[PIC_NUM_PINS]; }; diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 759952dd1222..f206fc35deff 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -707,7 +707,7 @@ static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu) static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr) { int highest_irr; - if (apic->vcpu->arch.apicv_active) + if (kvm_x86_ops.sync_pir_to_irr) highest_irr = static_call(kvm_x86_sync_pir_to_irr)(apic->vcpu); else highest_irr = apic_find_highest_irr(apic); diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 33794379949e..e2e1d012df22 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1582,7 +1582,7 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) flush = kvm_handle_gfn_range(kvm, range, kvm_unmap_rmapp); if (is_tdp_mmu_enabled(kvm)) - flush |= kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush); + flush = kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush); return flush; } @@ -1936,7 +1936,11 @@ static void mmu_audit_disable(void) { } static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp) { - return sp->role.invalid || + if (sp->role.invalid) + return true; + + /* TDP MMU pages due not use the MMU generation. */ + return !sp->tdp_mmu_page && unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen); } @@ -2173,10 +2177,10 @@ static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterato iterator->shadow_addr = root; iterator->level = vcpu->arch.mmu->shadow_root_level; - if (iterator->level == PT64_ROOT_4LEVEL && + if (iterator->level >= PT64_ROOT_4LEVEL && vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL && !vcpu->arch.mmu->direct_map) - --iterator->level; + iterator->level = PT32E_ROOT_LEVEL; if (iterator->level == PT32E_ROOT_LEVEL) { /* @@ -3976,6 +3980,20 @@ out_retry: return true; } +/* + * Returns true if the page fault is stale and needs to be retried, i.e. if the + * root was invalidated by a memslot update or a relevant mmu_notifier fired. + */ +static bool is_page_fault_stale(struct kvm_vcpu *vcpu, + struct kvm_page_fault *fault, int mmu_seq) +{ + if (is_obsolete_sp(vcpu->kvm, to_shadow_page(vcpu->arch.mmu->root_hpa))) + return true; + + return fault->slot && + mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, fault->hva); +} + static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { bool is_tdp_mmu_fault = is_tdp_mmu(vcpu->arch.mmu); @@ -4013,8 +4031,9 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault else write_lock(&vcpu->kvm->mmu_lock); - if (fault->slot && mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, fault->hva)) + if (is_page_fault_stale(vcpu, fault, mmu_seq)) goto out_unlock; + r = make_mmu_pages_available(vcpu); if (r) goto out_unlock; @@ -4682,6 +4701,7 @@ static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu, /* PKEY and LA57 are active iff long mode is active. */ ext.cr4_pke = ____is_efer_lma(regs) && ____is_cr4_pke(regs); ext.cr4_la57 = ____is_efer_lma(regs) && ____is_cr4_la57(regs); + ext.efer_lma = ____is_efer_lma(regs); } ext.valid = 1; @@ -4854,7 +4874,7 @@ void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0, struct kvm_mmu *context = &vcpu->arch.guest_mmu; struct kvm_mmu_role_regs regs = { .cr0 = cr0, - .cr4 = cr4, + .cr4 = cr4 & ~X86_CR4_PKE, .efer = efer, }; union kvm_mmu_role new_role; @@ -4918,7 +4938,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, context->direct_map = false; update_permission_bitmask(context, true); - update_pkru_bitmask(context); + context->pkru_mask = 0; reset_rsvds_bits_mask_ept(vcpu, context, execonly); reset_ept_shadow_zero_bits_mask(vcpu, context, execonly); } @@ -5024,6 +5044,14 @@ void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu) /* * Invalidate all MMU roles to force them to reinitialize as CPUID * information is factored into reserved bit calculations. + * + * Correctly handling multiple vCPU models with respect to paging and + * physical address properties) in a single VM would require tracking + * all relevant CPUID information in kvm_mmu_page_role. That is very + * undesirable as it would increase the memory requirements for + * gfn_track (see struct kvm_mmu_page_role comments). For now that + * problem is swept under the rug; KVM's CPUID API is horrific and + * it's all but impossible to solve it without introducing a new API. */ vcpu->arch.root_mmu.mmu_role.ext.valid = 0; vcpu->arch.guest_mmu.mmu_role.ext.valid = 0; @@ -5031,24 +5059,10 @@ void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu) kvm_mmu_reset_context(vcpu); /* - * KVM does not correctly handle changing guest CPUID after KVM_RUN, as - * MAXPHYADDR, GBPAGES support, AMD reserved bit behavior, etc.. aren't - * tracked in kvm_mmu_page_role. As a result, KVM may miss guest page - * faults due to reusing SPs/SPTEs. Alert userspace, but otherwise - * sweep the problem under the rug. - * - * KVM's horrific CPUID ABI makes the problem all but impossible to - * solve, as correctly handling multiple vCPU models (with respect to - * paging and physical address properties) in a single VM would require - * tracking all relevant CPUID information in kvm_mmu_page_role. That - * is very undesirable as it would double the memory requirements for - * gfn_track (see struct kvm_mmu_page_role comments), and in practice - * no sane VMM mucks with the core vCPU model on the fly. + * Changing guest CPUID after KVM_RUN is forbidden, see the comment in + * kvm_arch_vcpu_ioctl(). */ - if (vcpu->arch.last_vmentry_cpu != -1) { - pr_warn_ratelimited("KVM: KVM_SET_CPUID{,2} after KVM_RUN may cause guest instability\n"); - pr_warn_ratelimited("KVM: KVM_SET_CPUID{,2} will fail after KVM_RUN starting with Linux 5.16\n"); - } + KVM_BUG_ON(vcpu->arch.last_vmentry_cpu != -1, vcpu->kvm); } void kvm_mmu_reset_context(struct kvm_vcpu *vcpu) @@ -5368,7 +5382,7 @@ void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva) { - kvm_mmu_invalidate_gva(vcpu, vcpu->arch.mmu, gva, INVALID_PAGE); + kvm_mmu_invalidate_gva(vcpu, vcpu->arch.walk_mmu, gva, INVALID_PAGE); ++vcpu->stat.invlpg; } EXPORT_SYMBOL_GPL(kvm_mmu_invlpg); @@ -5853,8 +5867,6 @@ restart: void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm, const struct kvm_memory_slot *slot) { - bool flush = false; - if (kvm_memslots_have_rmaps(kvm)) { write_lock(&kvm->mmu_lock); /* @@ -5862,17 +5874,14 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm, * logging at a 4k granularity and never creates collapsible * 2m SPTEs during dirty logging. */ - flush = slot_handle_level_4k(kvm, slot, kvm_mmu_zap_collapsible_spte, true); - if (flush) + if (slot_handle_level_4k(kvm, slot, kvm_mmu_zap_collapsible_spte, true)) kvm_arch_flush_remote_tlbs_memslot(kvm, slot); write_unlock(&kvm->mmu_lock); } if (is_tdp_mmu_enabled(kvm)) { read_lock(&kvm->mmu_lock); - flush = kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot, flush); - if (flush) - kvm_arch_flush_remote_tlbs_memslot(kvm, slot); + kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot); read_unlock(&kvm->mmu_lock); } } @@ -6181,23 +6190,46 @@ void kvm_mmu_module_exit(void) mmu_audit_disable(); } +/* + * Calculate the effective recovery period, accounting for '0' meaning "let KVM + * select a halving time of 1 hour". Returns true if recovery is enabled. + */ +static bool calc_nx_huge_pages_recovery_period(uint *period) +{ + /* + * Use READ_ONCE to get the params, this may be called outside of the + * param setters, e.g. by the kthread to compute its next timeout. + */ + bool enabled = READ_ONCE(nx_huge_pages); + uint ratio = READ_ONCE(nx_huge_pages_recovery_ratio); + + if (!enabled || !ratio) + return false; + + *period = READ_ONCE(nx_huge_pages_recovery_period_ms); + if (!*period) { + /* Make sure the period is not less than one second. */ + ratio = min(ratio, 3600u); + *period = 60 * 60 * 1000 / ratio; + } + return true; +} + static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel_param *kp) { bool was_recovery_enabled, is_recovery_enabled; uint old_period, new_period; int err; - was_recovery_enabled = nx_huge_pages_recovery_ratio; - old_period = nx_huge_pages_recovery_period_ms; + was_recovery_enabled = calc_nx_huge_pages_recovery_period(&old_period); err = param_set_uint(val, kp); if (err) return err; - is_recovery_enabled = nx_huge_pages_recovery_ratio; - new_period = nx_huge_pages_recovery_period_ms; + is_recovery_enabled = calc_nx_huge_pages_recovery_period(&new_period); - if (READ_ONCE(nx_huge_pages) && is_recovery_enabled && + if (is_recovery_enabled && (!was_recovery_enabled || old_period > new_period)) { struct kvm *kvm; @@ -6261,18 +6293,13 @@ static void kvm_recover_nx_lpages(struct kvm *kvm) static long get_nx_lpage_recovery_timeout(u64 start_time) { - uint ratio = READ_ONCE(nx_huge_pages_recovery_ratio); - uint period = READ_ONCE(nx_huge_pages_recovery_period_ms); + bool enabled; + uint period; - if (!period && ratio) { - /* Make sure the period is not less than one second. */ - ratio = min(ratio, 3600u); - period = 60 * 60 * 1000 / ratio; - } + enabled = calc_nx_huge_pages_recovery_period(&period); - return READ_ONCE(nx_huge_pages) && ratio - ? start_time + msecs_to_jiffies(period) - get_jiffies_64() - : MAX_SCHEDULE_TIMEOUT; + return enabled ? start_time + msecs_to_jiffies(period) - get_jiffies_64() + : MAX_SCHEDULE_TIMEOUT; } static int kvm_nx_lpage_recovery_worker(struct kvm *kvm, uintptr_t data) diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index f87d36898c44..708a5d297fe1 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -911,7 +911,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault r = RET_PF_RETRY; write_lock(&vcpu->kvm->mmu_lock); - if (fault->slot && mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, fault->hva)) + + if (is_page_fault_stale(vcpu, fault, mmu_seq)) goto out_unlock; kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT); diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index a54c3491af42..1db8496259ad 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -317,9 +317,6 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt, struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt)); int level = sp->role.level; gfn_t base_gfn = sp->gfn; - u64 old_child_spte; - u64 *sptep; - gfn_t gfn; int i; trace_kvm_mmu_prepare_zap_page(sp); @@ -327,8 +324,9 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt, tdp_mmu_unlink_page(kvm, sp, shared); for (i = 0; i < PT64_ENT_PER_PAGE; i++) { - sptep = rcu_dereference(pt) + i; - gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level); + u64 *sptep = rcu_dereference(pt) + i; + gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level); + u64 old_child_spte; if (shared) { /* @@ -374,7 +372,7 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt, shared); } - kvm_flush_remote_tlbs_with_address(kvm, gfn, + kvm_flush_remote_tlbs_with_address(kvm, base_gfn, KVM_PAGES_PER_HPAGE(level + 1)); call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback); @@ -1033,9 +1031,9 @@ bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range, { struct kvm_mmu_page *root; - for_each_tdp_mmu_root(kvm, root, range->slot->as_id) - flush |= zap_gfn_range(kvm, root, range->start, range->end, - range->may_block, flush, false); + for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id, false) + flush = zap_gfn_range(kvm, root, range->start, range->end, + range->may_block, flush, false); return flush; } @@ -1364,10 +1362,9 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, * Clear leaf entries which could be replaced by large mappings, for * GFNs within the slot. */ -static bool zap_collapsible_spte_range(struct kvm *kvm, +static void zap_collapsible_spte_range(struct kvm *kvm, struct kvm_mmu_page *root, - const struct kvm_memory_slot *slot, - bool flush) + const struct kvm_memory_slot *slot) { gfn_t start = slot->base_gfn; gfn_t end = start + slot->npages; @@ -1378,10 +1375,8 @@ static bool zap_collapsible_spte_range(struct kvm *kvm, tdp_root_for_each_pte(iter, root, start, end) { retry: - if (tdp_mmu_iter_cond_resched(kvm, &iter, flush, true)) { - flush = false; + if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) continue; - } if (!is_shadow_present_pte(iter.old_spte) || !is_last_spte(iter.old_spte, iter.level)) @@ -1393,6 +1388,7 @@ retry: pfn, PG_LEVEL_NUM)) continue; + /* Note, a successful atomic zap also does a remote TLB flush. */ if (!tdp_mmu_zap_spte_atomic(kvm, &iter)) { /* * The iter must explicitly re-read the SPTE because @@ -1401,30 +1397,24 @@ retry: iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep)); goto retry; } - flush = true; } rcu_read_unlock(); - - return flush; } /* * Clear non-leaf entries (and free associated page tables) which could * be replaced by large mappings, for GFNs within the slot. */ -bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, - const struct kvm_memory_slot *slot, - bool flush) +void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, + const struct kvm_memory_slot *slot) { struct kvm_mmu_page *root; lockdep_assert_held_read(&kvm->mmu_lock); for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) - flush = zap_collapsible_spte_range(kvm, root, slot, flush); - - return flush; + zap_collapsible_spte_range(kvm, root, slot); } /* diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h index 476b133544dd..3899004a5d91 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.h +++ b/arch/x86/kvm/mmu/tdp_mmu.h @@ -64,9 +64,8 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, unsigned long mask, bool wrprot); -bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, - const struct kvm_memory_slot *slot, - bool flush); +void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, + const struct kvm_memory_slot *slot); bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c index affc0ea98d30..8f9af7b7dbbe 100644 --- a/arch/x86/kvm/svm/avic.c +++ b/arch/x86/kvm/svm/avic.c @@ -900,6 +900,7 @@ out: bool svm_check_apicv_inhibit_reasons(ulong bit) { ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) | + BIT(APICV_INHIBIT_REASON_ABSENT) | BIT(APICV_INHIBIT_REASON_HYPERV) | BIT(APICV_INHIBIT_REASON_NESTED) | BIT(APICV_INHIBIT_REASON_IRQWIN) | @@ -989,16 +990,18 @@ void avic_vcpu_put(struct kvm_vcpu *vcpu) static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run) { struct vcpu_svm *svm = to_svm(vcpu); + int cpu = get_cpu(); + WARN_ON(cpu != vcpu->cpu); svm->avic_is_running = is_run; - if (!kvm_vcpu_apicv_active(vcpu)) - return; - - if (is_run) - avic_vcpu_load(vcpu, vcpu->cpu); - else - avic_vcpu_put(vcpu); + if (kvm_vcpu_apicv_active(vcpu)) { + if (is_run) + avic_vcpu_load(vcpu, cpu); + else + avic_vcpu_put(vcpu); + } + put_cpu(); } void svm_vcpu_blocking(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c index 871c426ec389..b4095dfeeee6 100644 --- a/arch/x86/kvm/svm/pmu.c +++ b/arch/x86/kvm/svm/pmu.c @@ -281,7 +281,7 @@ static void amd_pmu_refresh(struct kvm_vcpu *vcpu) pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS; pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1; - pmu->reserved_bits = 0xffffffff00200000ull; + pmu->reserved_bits = 0xfffffff000280000ull; pmu->version = 1; /* not applicable to AMD; but clean them to prevent any fall out */ pmu->counter_bitmask[KVM_PMC_FIXED] = 0; diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 902c52a8dd0c..7656a2c5662a 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -237,7 +237,6 @@ static void sev_unbind_asid(struct kvm *kvm, unsigned int handle) static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) { struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; - bool es_active = argp->id == KVM_SEV_ES_INIT; int asid, ret; if (kvm->created_vcpus) @@ -247,7 +246,8 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) if (unlikely(sev->active)) return ret; - sev->es_active = es_active; + sev->active = true; + sev->es_active = argp->id == KVM_SEV_ES_INIT; asid = sev_asid_new(sev); if (asid < 0) goto e_no_asid; @@ -257,8 +257,6 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) if (ret) goto e_free; - sev->active = true; - sev->asid = asid; INIT_LIST_HEAD(&sev->regions_list); return 0; @@ -268,6 +266,7 @@ e_free: sev->asid = 0; e_no_asid: sev->es_active = false; + sev->active = false; return ret; } @@ -1530,7 +1529,7 @@ static int sev_receive_finish(struct kvm *kvm, struct kvm_sev_cmd *argp) return sev_issue_cmd(kvm, SEV_CMD_RECEIVE_FINISH, &data, &argp->error); } -static bool cmd_allowed_from_miror(u32 cmd_id) +static bool is_cmd_allowed_from_mirror(u32 cmd_id) { /* * Allow mirrors VM to call KVM_SEV_LAUNCH_UPDATE_VMSA to enable SEV-ES @@ -1544,28 +1543,50 @@ static bool cmd_allowed_from_miror(u32 cmd_id) return false; } -static int sev_lock_for_migration(struct kvm *kvm) +static int sev_lock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm) { - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct kvm_sev_info *dst_sev = &to_kvm_svm(dst_kvm)->sev_info; + struct kvm_sev_info *src_sev = &to_kvm_svm(src_kvm)->sev_info; + int r = -EBUSY; + + if (dst_kvm == src_kvm) + return -EINVAL; /* - * Bail if this VM is already involved in a migration to avoid deadlock - * between two VMs trying to migrate to/from each other. + * Bail if these VMs are already involved in a migration to avoid + * deadlock between two VMs trying to migrate to/from each other. */ - if (atomic_cmpxchg_acquire(&sev->migration_in_progress, 0, 1)) + if (atomic_cmpxchg_acquire(&dst_sev->migration_in_progress, 0, 1)) return -EBUSY; - mutex_lock(&kvm->lock); + if (atomic_cmpxchg_acquire(&src_sev->migration_in_progress, 0, 1)) + goto release_dst; + r = -EINTR; + if (mutex_lock_killable(&dst_kvm->lock)) + goto release_src; + if (mutex_lock_killable(&src_kvm->lock)) + goto unlock_dst; return 0; + +unlock_dst: + mutex_unlock(&dst_kvm->lock); +release_src: + atomic_set_release(&src_sev->migration_in_progress, 0); +release_dst: + atomic_set_release(&dst_sev->migration_in_progress, 0); + return r; } -static void sev_unlock_after_migration(struct kvm *kvm) +static void sev_unlock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm) { - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct kvm_sev_info *dst_sev = &to_kvm_svm(dst_kvm)->sev_info; + struct kvm_sev_info *src_sev = &to_kvm_svm(src_kvm)->sev_info; - mutex_unlock(&kvm->lock); - atomic_set_release(&sev->migration_in_progress, 0); + mutex_unlock(&dst_kvm->lock); + mutex_unlock(&src_kvm->lock); + atomic_set_release(&dst_sev->migration_in_progress, 0); + atomic_set_release(&src_sev->migration_in_progress, 0); } @@ -1608,14 +1629,15 @@ static void sev_migrate_from(struct kvm_sev_info *dst, dst->asid = src->asid; dst->handle = src->handle; dst->pages_locked = src->pages_locked; + dst->enc_context_owner = src->enc_context_owner; src->asid = 0; src->active = false; src->handle = 0; src->pages_locked = 0; + src->enc_context_owner = NULL; - INIT_LIST_HEAD(&dst->regions_list); - list_replace_init(&src->regions_list, &dst->regions_list); + list_cut_before(&dst->regions_list, &src->regions_list, &src->regions_list); } static int sev_es_migrate_from(struct kvm *dst, struct kvm *src) @@ -1667,15 +1689,6 @@ int svm_vm_migrate_from(struct kvm *kvm, unsigned int source_fd) bool charged = false; int ret; - ret = sev_lock_for_migration(kvm); - if (ret) - return ret; - - if (sev_guest(kvm)) { - ret = -EINVAL; - goto out_unlock; - } - source_kvm_file = fget(source_fd); if (!file_is_kvm(source_kvm_file)) { ret = -EBADF; @@ -1683,16 +1696,26 @@ int svm_vm_migrate_from(struct kvm *kvm, unsigned int source_fd) } source_kvm = source_kvm_file->private_data; - ret = sev_lock_for_migration(source_kvm); + ret = sev_lock_two_vms(kvm, source_kvm); if (ret) goto out_fput; - if (!sev_guest(source_kvm)) { + if (sev_guest(kvm) || !sev_guest(source_kvm)) { ret = -EINVAL; - goto out_source; + goto out_unlock; } src_sev = &to_kvm_svm(source_kvm)->sev_info; + + /* + * VMs mirroring src's encryption context rely on it to keep the + * ASID allocated, but below we are clearing src_sev->asid. + */ + if (src_sev->num_mirrored_vms) { + ret = -EBUSY; + goto out_unlock; + } + dst_sev->misc_cg = get_current_misc_cg(); cg_cleanup_sev = dst_sev; if (dst_sev->misc_cg != src_sev->misc_cg) { @@ -1729,13 +1752,11 @@ out_dst_cgroup: sev_misc_cg_uncharge(cg_cleanup_sev); put_misc_cg(cg_cleanup_sev->misc_cg); cg_cleanup_sev->misc_cg = NULL; -out_source: - sev_unlock_after_migration(source_kvm); +out_unlock: + sev_unlock_two_vms(kvm, source_kvm); out_fput: if (source_kvm_file) fput(source_kvm_file); -out_unlock: - sev_unlock_after_migration(kvm); return ret; } @@ -1757,7 +1778,7 @@ int svm_mem_enc_op(struct kvm *kvm, void __user *argp) /* Only the enc_context_owner handles some memory enc operations. */ if (is_mirroring_enc_context(kvm) && - !cmd_allowed_from_miror(sev_cmd.id)) { + !is_cmd_allowed_from_mirror(sev_cmd.id)) { r = -EINVAL; goto out; } @@ -1954,71 +1975,60 @@ int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd) { struct file *source_kvm_file; struct kvm *source_kvm; - struct kvm_sev_info source_sev, *mirror_sev; + struct kvm_sev_info *source_sev, *mirror_sev; int ret; source_kvm_file = fget(source_fd); if (!file_is_kvm(source_kvm_file)) { ret = -EBADF; - goto e_source_put; + goto e_source_fput; } source_kvm = source_kvm_file->private_data; - mutex_lock(&source_kvm->lock); - - if (!sev_guest(source_kvm)) { - ret = -EINVAL; - goto e_source_unlock; - } + ret = sev_lock_two_vms(kvm, source_kvm); + if (ret) + goto e_source_fput; - /* Mirrors of mirrors should work, but let's not get silly */ - if (is_mirroring_enc_context(source_kvm) || source_kvm == kvm) { + /* + * Mirrors of mirrors should work, but let's not get silly. Also + * disallow out-of-band SEV/SEV-ES init if the target is already an + * SEV guest, or if vCPUs have been created. KVM relies on vCPUs being + * created after SEV/SEV-ES initialization, e.g. to init intercepts. + */ + if (sev_guest(kvm) || !sev_guest(source_kvm) || + is_mirroring_enc_context(source_kvm) || kvm->created_vcpus) { ret = -EINVAL; - goto e_source_unlock; + goto e_unlock; } - memcpy(&source_sev, &to_kvm_svm(source_kvm)->sev_info, - sizeof(source_sev)); - /* * The mirror kvm holds an enc_context_owner ref so its asid can't * disappear until we're done with it */ + source_sev = &to_kvm_svm(source_kvm)->sev_info; kvm_get_kvm(source_kvm); - - fput(source_kvm_file); - mutex_unlock(&source_kvm->lock); - mutex_lock(&kvm->lock); - - if (sev_guest(kvm)) { - ret = -EINVAL; - goto e_mirror_unlock; - } + source_sev->num_mirrored_vms++; /* Set enc_context_owner and copy its encryption context over */ mirror_sev = &to_kvm_svm(kvm)->sev_info; mirror_sev->enc_context_owner = source_kvm; mirror_sev->active = true; - mirror_sev->asid = source_sev.asid; - mirror_sev->fd = source_sev.fd; - mirror_sev->es_active = source_sev.es_active; - mirror_sev->handle = source_sev.handle; + mirror_sev->asid = source_sev->asid; + mirror_sev->fd = source_sev->fd; + mirror_sev->es_active = source_sev->es_active; + mirror_sev->handle = source_sev->handle; + INIT_LIST_HEAD(&mirror_sev->regions_list); + ret = 0; + /* * Do not copy ap_jump_table. Since the mirror does not share the same * KVM contexts as the original, and they may have different * memory-views. */ - mutex_unlock(&kvm->lock); - return 0; - -e_mirror_unlock: - mutex_unlock(&kvm->lock); - kvm_put_kvm(source_kvm); - return ret; -e_source_unlock: - mutex_unlock(&source_kvm->lock); -e_source_put: +e_unlock: + sev_unlock_two_vms(kvm, source_kvm); +e_source_fput: if (source_kvm_file) fput(source_kvm_file); return ret; @@ -2030,17 +2040,24 @@ void sev_vm_destroy(struct kvm *kvm) struct list_head *head = &sev->regions_list; struct list_head *pos, *q; + WARN_ON(sev->num_mirrored_vms); + if (!sev_guest(kvm)) return; /* If this is a mirror_kvm release the enc_context_owner and skip sev cleanup */ if (is_mirroring_enc_context(kvm)) { - kvm_put_kvm(sev->enc_context_owner); + struct kvm *owner_kvm = sev->enc_context_owner; + struct kvm_sev_info *owner_sev = &to_kvm_svm(owner_kvm)->sev_info; + + mutex_lock(&owner_kvm->lock); + if (!WARN_ON(!owner_sev->num_mirrored_vms)) + owner_sev->num_mirrored_vms--; + mutex_unlock(&owner_kvm->lock); + kvm_put_kvm(owner_kvm); return; } - mutex_lock(&kvm->lock); - /* * Ensure that all guest tagged cache entries are flushed before * releasing the pages back to the system for use. CLFLUSH will @@ -2060,8 +2077,6 @@ void sev_vm_destroy(struct kvm *kvm) } } - mutex_unlock(&kvm->lock); - sev_unbind_asid(kvm, sev->handle); sev_asid_free(sev); } @@ -2245,7 +2260,7 @@ void sev_free_vcpu(struct kvm_vcpu *vcpu) __free_page(virt_to_page(svm->sev_es.vmsa)); if (svm->sev_es.ghcb_sa_free) - kfree(svm->sev_es.ghcb_sa); + kvfree(svm->sev_es.ghcb_sa); } static void dump_ghcb(struct vcpu_svm *svm) @@ -2337,24 +2352,29 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *svm) memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap)); } -static int sev_es_validate_vmgexit(struct vcpu_svm *svm) +static bool sev_es_validate_vmgexit(struct vcpu_svm *svm) { struct kvm_vcpu *vcpu; struct ghcb *ghcb; - u64 exit_code = 0; + u64 exit_code; + u64 reason; ghcb = svm->sev_es.ghcb; - /* Only GHCB Usage code 0 is supported */ - if (ghcb->ghcb_usage) - goto vmgexit_err; - /* - * Retrieve the exit code now even though is may not be marked valid + * Retrieve the exit code now even though it may not be marked valid * as it could help with debugging. */ exit_code = ghcb_get_sw_exit_code(ghcb); + /* Only GHCB Usage code 0 is supported */ + if (ghcb->ghcb_usage) { + reason = GHCB_ERR_INVALID_USAGE; + goto vmgexit_err; + } + + reason = GHCB_ERR_MISSING_INPUT; + if (!ghcb_sw_exit_code_is_valid(ghcb) || !ghcb_sw_exit_info_1_is_valid(ghcb) || !ghcb_sw_exit_info_2_is_valid(ghcb)) @@ -2433,30 +2453,34 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm) case SVM_VMGEXIT_UNSUPPORTED_EVENT: break; default: + reason = GHCB_ERR_INVALID_EVENT; goto vmgexit_err; } - return 0; + return true; vmgexit_err: vcpu = &svm->vcpu; - if (ghcb->ghcb_usage) { + if (reason == GHCB_ERR_INVALID_USAGE) { vcpu_unimpl(vcpu, "vmgexit: ghcb usage %#x is not valid\n", ghcb->ghcb_usage); + } else if (reason == GHCB_ERR_INVALID_EVENT) { + vcpu_unimpl(vcpu, "vmgexit: exit code %#llx is not valid\n", + exit_code); } else { - vcpu_unimpl(vcpu, "vmgexit: exit reason %#llx is not valid\n", + vcpu_unimpl(vcpu, "vmgexit: exit code %#llx input is not valid\n", exit_code); dump_ghcb(svm); } - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON; - vcpu->run->internal.ndata = 2; - vcpu->run->internal.data[0] = exit_code; - vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu; + /* Clear the valid entries fields */ + memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap)); + + ghcb_set_sw_exit_info_1(ghcb, 2); + ghcb_set_sw_exit_info_2(ghcb, reason); - return -EINVAL; + return false; } void sev_es_unmap_ghcb(struct vcpu_svm *svm) @@ -2478,7 +2502,7 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm) svm->sev_es.ghcb_sa_sync = false; } - kfree(svm->sev_es.ghcb_sa); + kvfree(svm->sev_es.ghcb_sa); svm->sev_es.ghcb_sa = NULL; svm->sev_es.ghcb_sa_free = false; } @@ -2526,14 +2550,14 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len) scratch_gpa_beg = ghcb_get_sw_scratch(ghcb); if (!scratch_gpa_beg) { pr_err("vmgexit: scratch gpa not provided\n"); - return false; + goto e_scratch; } scratch_gpa_end = scratch_gpa_beg + len; if (scratch_gpa_end < scratch_gpa_beg) { pr_err("vmgexit: scratch length (%#llx) not valid for scratch address (%#llx)\n", len, scratch_gpa_beg); - return false; + goto e_scratch; } if ((scratch_gpa_beg & PAGE_MASK) == control->ghcb_gpa) { @@ -2551,7 +2575,7 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len) scratch_gpa_end > ghcb_scratch_end) { pr_err("vmgexit: scratch area is outside of GHCB shared buffer area (%#llx - %#llx)\n", scratch_gpa_beg, scratch_gpa_end); - return false; + goto e_scratch; } scratch_va = (void *)svm->sev_es.ghcb; @@ -2564,18 +2588,18 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len) if (len > GHCB_SCRATCH_AREA_LIMIT) { pr_err("vmgexit: scratch area exceeds KVM limits (%#llx requested, %#llx limit)\n", len, GHCB_SCRATCH_AREA_LIMIT); - return false; + goto e_scratch; } - scratch_va = kzalloc(len, GFP_KERNEL_ACCOUNT); + scratch_va = kvzalloc(len, GFP_KERNEL_ACCOUNT); if (!scratch_va) - return false; + goto e_scratch; if (kvm_read_guest(svm->vcpu.kvm, scratch_gpa_beg, scratch_va, len)) { /* Unable to copy scratch area from guest */ pr_err("vmgexit: kvm_read_guest for scratch area failed\n"); - kfree(scratch_va); - return false; + kvfree(scratch_va); + goto e_scratch; } /* @@ -2592,6 +2616,12 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len) svm->sev_es.ghcb_sa_len = len; return true; + +e_scratch: + ghcb_set_sw_exit_info_1(ghcb, 2); + ghcb_set_sw_exit_info_2(ghcb, GHCB_ERR_INVALID_SCRATCH_AREA); + + return false; } static void set_ghcb_msr_bits(struct vcpu_svm *svm, u64 value, u64 mask, @@ -2642,7 +2672,7 @@ static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm) ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_CPUID); if (!ret) { - ret = -EINVAL; + /* Error, keep GHCB MSR value as-is */ break; } @@ -2678,10 +2708,13 @@ static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm) GHCB_MSR_TERM_REASON_POS); pr_info("SEV-ES guest requested termination: %#llx:%#llx\n", reason_set, reason_code); - fallthrough; + + ret = -EINVAL; + break; } default: - ret = -EINVAL; + /* Error, keep GHCB MSR value as-is */ + break; } trace_kvm_vmgexit_msr_protocol_exit(svm->vcpu.vcpu_id, @@ -2705,14 +2738,18 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu) if (!ghcb_gpa) { vcpu_unimpl(vcpu, "vmgexit: GHCB gpa is not set\n"); - return -EINVAL; + + /* Without a GHCB, just return right back to the guest */ + return 1; } if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->sev_es.ghcb_map)) { /* Unable to map GHCB from guest */ vcpu_unimpl(vcpu, "vmgexit: error mapping GHCB [%#llx] from guest\n", ghcb_gpa); - return -EINVAL; + + /* Without a GHCB, just return right back to the guest */ + return 1; } svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva; @@ -2722,15 +2759,14 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu) exit_code = ghcb_get_sw_exit_code(ghcb); - ret = sev_es_validate_vmgexit(svm); - if (ret) - return ret; + if (!sev_es_validate_vmgexit(svm)) + return 1; sev_es_sync_from_ghcb(svm); ghcb_set_sw_exit_info_1(ghcb, 0); ghcb_set_sw_exit_info_2(ghcb, 0); - ret = -EINVAL; + ret = 1; switch (exit_code) { case SVM_VMGEXIT_MMIO_READ: if (!setup_vmgexit_scratch(svm, true, control->exit_info_2)) @@ -2771,20 +2807,17 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu) default: pr_err("svm: vmgexit: unsupported AP jump table request - exit_info_1=%#llx\n", control->exit_info_1); - ghcb_set_sw_exit_info_1(ghcb, 1); - ghcb_set_sw_exit_info_2(ghcb, - X86_TRAP_UD | - SVM_EVTINJ_TYPE_EXEPT | - SVM_EVTINJ_VALID); + ghcb_set_sw_exit_info_1(ghcb, 2); + ghcb_set_sw_exit_info_2(ghcb, GHCB_ERR_INVALID_INPUT); } - ret = 1; break; } case SVM_VMGEXIT_UNSUPPORTED_EVENT: vcpu_unimpl(vcpu, "vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n", control->exit_info_1, control->exit_info_2); + ret = -EINVAL; break; default: ret = svm_invoke_exit_handler(vcpu, exit_code); @@ -2806,7 +2839,7 @@ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in) return -EINVAL; if (!setup_vmgexit_scratch(svm, in, bytes)) - return -EINVAL; + return 1; return kvm_sev_es_string_io(&svm->vcpu, size, port, svm->sev_es.ghcb_sa, count, in); diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 5630c241d5f6..d0f68d11ec70 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -4651,7 +4651,6 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .load_eoi_exitmap = svm_load_eoi_exitmap, .hwapic_irr_update = svm_hwapic_irr_update, .hwapic_isr_update = svm_hwapic_isr_update, - .sync_pir_to_irr = kvm_lapic_find_highest_irr, .apicv_post_state_restore = avic_post_state_restore, .set_tss_addr = svm_set_tss_addr, diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 437e68504e66..1c7306c370fa 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -79,6 +79,7 @@ struct kvm_sev_info { struct list_head regions_list; /* List of registered regions */ u64 ap_jump_table; /* SEV-ES AP Jump Table address */ struct kvm *enc_context_owner; /* Owner of copied encryption context */ + unsigned long num_mirrored_vms; /* Number of VMs sharing this ASID */ struct misc_cg *misc_cg; /* For misc cgroup accounting */ atomic_t migration_in_progress; }; @@ -247,7 +248,7 @@ static __always_inline bool sev_es_guest(struct kvm *kvm) #ifdef CONFIG_KVM_AMD_SEV struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; - return sev_guest(kvm) && sev->es_active; + return sev->es_active && !WARN_ON_ONCE(!sev->active); #else return false; #endif diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index b213ca966d41..9c941535f78c 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -670,33 +670,39 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu, static void nested_cache_shadow_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { - struct kvm_host_map map; - struct vmcs12 *shadow; + struct vcpu_vmx *vmx = to_vmx(vcpu); + struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache; if (!nested_cpu_has_shadow_vmcs(vmcs12) || vmcs12->vmcs_link_pointer == INVALID_GPA) return; - shadow = get_shadow_vmcs12(vcpu); - - if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->vmcs_link_pointer), &map)) + if (ghc->gpa != vmcs12->vmcs_link_pointer && + kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, + vmcs12->vmcs_link_pointer, VMCS12_SIZE)) return; - memcpy(shadow, map.hva, VMCS12_SIZE); - kvm_vcpu_unmap(vcpu, &map, false); + kvm_read_guest_cached(vmx->vcpu.kvm, ghc, get_shadow_vmcs12(vcpu), + VMCS12_SIZE); } static void nested_flush_cached_shadow_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { struct vcpu_vmx *vmx = to_vmx(vcpu); + struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache; if (!nested_cpu_has_shadow_vmcs(vmcs12) || vmcs12->vmcs_link_pointer == INVALID_GPA) return; - kvm_write_guest(vmx->vcpu.kvm, vmcs12->vmcs_link_pointer, - get_shadow_vmcs12(vcpu), VMCS12_SIZE); + if (ghc->gpa != vmcs12->vmcs_link_pointer && + kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, + vmcs12->vmcs_link_pointer, VMCS12_SIZE)) + return; + + kvm_write_guest_cached(vmx->vcpu.kvm, ghc, get_shadow_vmcs12(vcpu), + VMCS12_SIZE); } /* @@ -1156,29 +1162,26 @@ static void nested_vmx_transition_tlb_flush(struct kvm_vcpu *vcpu, WARN_ON(!enable_vpid); /* - * If VPID is enabled and used by vmc12, but L2 does not have a unique - * TLB tag (ASID), i.e. EPT is disabled and KVM was unable to allocate - * a VPID for L2, flush the current context as the effective ASID is - * common to both L1 and L2. - * - * Defer the flush so that it runs after vmcs02.EPTP has been set by - * KVM_REQ_LOAD_MMU_PGD (if nested EPT is enabled) and to avoid - * redundant flushes further down the nested pipeline. - * - * If a TLB flush isn't required due to any of the above, and vpid12 is - * changing then the new "virtual" VPID (vpid12) will reuse the same - * "real" VPID (vpid02), and so needs to be flushed. There's no direct - * mapping between vpid02 and vpid12, vpid02 is per-vCPU and reused for - * all nested vCPUs. Remember, a flush on VM-Enter does not invalidate - * guest-physical mappings, so there is no need to sync the nEPT MMU. + * VPID is enabled and in use by vmcs12. If vpid12 is changing, then + * emulate a guest TLB flush as KVM does not track vpid12 history nor + * is the VPID incorporated into the MMU context. I.e. KVM must assume + * that the new vpid12 has never been used and thus represents a new + * guest ASID that cannot have entries in the TLB. */ - if (!nested_has_guest_tlb_tag(vcpu)) { - kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); - } else if (is_vmenter && - vmcs12->virtual_processor_id != vmx->nested.last_vpid) { + if (is_vmenter && vmcs12->virtual_processor_id != vmx->nested.last_vpid) { vmx->nested.last_vpid = vmcs12->virtual_processor_id; - vpid_sync_context(nested_get_vpid02(vcpu)); + kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); + return; } + + /* + * If VPID is enabled, used by vmc12, and vpid12 is not changing but + * does not have a unique TLB tag (ASID), i.e. EPT is disabled and + * KVM was unable to allocate a VPID for L2, flush the current context + * as the effective ASID is common to both L1 and L2. + */ + if (!nested_has_guest_tlb_tag(vcpu)) + kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); } static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask) @@ -2588,8 +2591,10 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) && WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL, - vmcs12->guest_ia32_perf_global_ctrl))) + vmcs12->guest_ia32_perf_global_ctrl))) { + *entry_failure_code = ENTRY_FAIL_DEFAULT; return -EINVAL; + } kvm_rsp_write(vcpu, vmcs12->guest_rsp); kvm_rip_write(vcpu, vmcs12->guest_rip); @@ -2830,6 +2835,17 @@ static int nested_vmx_check_controls(struct kvm_vcpu *vcpu, return 0; } +static int nested_vmx_check_address_space_size(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12) +{ +#ifdef CONFIG_X86_64 + if (CC(!!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) != + !!(vcpu->arch.efer & EFER_LMA))) + return -EINVAL; +#endif + return 0; +} + static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { @@ -2854,18 +2870,16 @@ static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu, return -EINVAL; #ifdef CONFIG_X86_64 - ia32e = !!(vcpu->arch.efer & EFER_LMA); + ia32e = !!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE); #else ia32e = false; #endif if (ia32e) { - if (CC(!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)) || - CC(!(vmcs12->host_cr4 & X86_CR4_PAE))) + if (CC(!(vmcs12->host_cr4 & X86_CR4_PAE))) return -EINVAL; } else { - if (CC(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) || - CC(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) || + if (CC(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) || CC(vmcs12->host_cr4 & X86_CR4_PCIDE) || CC((vmcs12->host_rip) >> 32)) return -EINVAL; @@ -2910,9 +2924,9 @@ static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu, static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { - int r = 0; - struct vmcs12 *shadow; - struct kvm_host_map map; + struct vcpu_vmx *vmx = to_vmx(vcpu); + struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache; + struct vmcs_hdr hdr; if (vmcs12->vmcs_link_pointer == INVALID_GPA) return 0; @@ -2920,17 +2934,21 @@ static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu, if (CC(!page_address_valid(vcpu, vmcs12->vmcs_link_pointer))) return -EINVAL; - if (CC(kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->vmcs_link_pointer), &map))) - return -EINVAL; + if (ghc->gpa != vmcs12->vmcs_link_pointer && + CC(kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, + vmcs12->vmcs_link_pointer, VMCS12_SIZE))) + return -EINVAL; - shadow = map.hva; + if (CC(kvm_read_guest_offset_cached(vcpu->kvm, ghc, &hdr, + offsetof(struct vmcs12, hdr), + sizeof(hdr)))) + return -EINVAL; - if (CC(shadow->hdr.revision_id != VMCS12_REVISION) || - CC(shadow->hdr.shadow_vmcs != nested_cpu_has_shadow_vmcs(vmcs12))) - r = -EINVAL; + if (CC(hdr.revision_id != VMCS12_REVISION) || + CC(hdr.shadow_vmcs != nested_cpu_has_shadow_vmcs(vmcs12))) + return -EINVAL; - kvm_vcpu_unmap(vcpu, &map, false); - return r; + return 0; } /* @@ -3325,8 +3343,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, }; u32 failed_index; - if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu)) - kvm_vcpu_flush_tlb_current(vcpu); + kvm_service_local_tlb_flush_requests(vcpu); evaluate_pending_interrupts = exec_controls_get(vmx) & (CPU_BASED_INTR_WINDOW_EXITING | CPU_BASED_NMI_WINDOW_EXITING); @@ -3535,6 +3552,9 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) if (nested_vmx_check_controls(vcpu, vmcs12)) return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); + if (nested_vmx_check_address_space_size(vcpu, vmcs12)) + return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD); + if (nested_vmx_check_host_state(vcpu, vmcs12)) return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD); @@ -4480,9 +4500,8 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason, (void)nested_get_evmcs_page(vcpu); } - /* Service the TLB flush request for L2 before switching to L1. */ - if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu)) - kvm_vcpu_flush_tlb_current(vcpu); + /* Service pending TLB flush requests for L2 before switching to L1. */ + kvm_service_local_tlb_flush_requests(vcpu); /* * VCPU_EXREG_PDPTR will be clobbered in arch/x86/kvm/vmx/vmx.h between @@ -4835,6 +4854,7 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu) if (!vmx->nested.cached_vmcs12) goto out_cached_vmcs12; + vmx->nested.shadow_vmcs12_cache.gpa = INVALID_GPA; vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT); if (!vmx->nested.cached_shadow_vmcs12) goto out_cached_shadow_vmcs12; @@ -5264,10 +5284,10 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu) return 1; if (vmx->nested.current_vmptr != vmptr) { - struct kvm_host_map map; - struct vmcs12 *new_vmcs12; + struct gfn_to_hva_cache *ghc = &vmx->nested.vmcs12_cache; + struct vmcs_hdr hdr; - if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmptr), &map)) { + if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, vmptr, VMCS12_SIZE)) { /* * Reads from an unbacked page return all 1s, * which means that the 32 bits located at the @@ -5278,12 +5298,16 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu) VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); } - new_vmcs12 = map.hva; + if (kvm_read_guest_offset_cached(vcpu->kvm, ghc, &hdr, + offsetof(struct vmcs12, hdr), + sizeof(hdr))) { + return nested_vmx_fail(vcpu, + VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); + } - if (new_vmcs12->hdr.revision_id != VMCS12_REVISION || - (new_vmcs12->hdr.shadow_vmcs && + if (hdr.revision_id != VMCS12_REVISION || + (hdr.shadow_vmcs && !nested_cpu_has_vmx_shadow_vmcs(vcpu))) { - kvm_vcpu_unmap(vcpu, &map, false); return nested_vmx_fail(vcpu, VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); } @@ -5294,8 +5318,11 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu) * Load VMCS12 from guest memory since it is not already * cached. */ - memcpy(vmx->nested.cached_vmcs12, new_vmcs12, VMCS12_SIZE); - kvm_vcpu_unmap(vcpu, &map, false); + if (kvm_read_guest_cached(vcpu->kvm, ghc, vmx->nested.cached_vmcs12, + VMCS12_SIZE)) { + return nested_vmx_fail(vcpu, + VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); + } set_current_vmptr(vmx, vmptr); } diff --git a/arch/x86/kvm/vmx/posted_intr.c b/arch/x86/kvm/vmx/posted_intr.c index 5f81ef092bd4..1c94783b5a54 100644 --- a/arch/x86/kvm/vmx/posted_intr.c +++ b/arch/x86/kvm/vmx/posted_intr.c @@ -5,6 +5,7 @@ #include <asm/cpu.h> #include "lapic.h" +#include "irq.h" #include "posted_intr.h" #include "trace.h" #include "vmx.h" @@ -77,13 +78,18 @@ after_clear_sn: pi_set_on(pi_desc); } +static bool vmx_can_use_vtd_pi(struct kvm *kvm) +{ + return irqchip_in_kernel(kvm) && enable_apicv && + kvm_arch_has_assigned_device(kvm) && + irq_remapping_cap(IRQ_POSTING_CAP); +} + void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu) { struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); - if (!kvm_arch_has_assigned_device(vcpu->kvm) || - !irq_remapping_cap(IRQ_POSTING_CAP) || - !kvm_vcpu_apicv_active(vcpu)) + if (!vmx_can_use_vtd_pi(vcpu->kvm)) return; /* Set SN when the vCPU is preempted */ @@ -141,9 +147,7 @@ int pi_pre_block(struct kvm_vcpu *vcpu) struct pi_desc old, new; struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); - if (!kvm_arch_has_assigned_device(vcpu->kvm) || - !irq_remapping_cap(IRQ_POSTING_CAP) || - !kvm_vcpu_apicv_active(vcpu)) + if (!vmx_can_use_vtd_pi(vcpu->kvm)) return 0; WARN_ON(irqs_disabled()); @@ -270,9 +274,7 @@ int pi_update_irte(struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq, struct vcpu_data vcpu_info; int idx, ret = 0; - if (!kvm_arch_has_assigned_device(kvm) || - !irq_remapping_cap(IRQ_POSTING_CAP) || - !kvm_vcpu_apicv_active(kvm->vcpus[0])) + if (!vmx_can_use_vtd_pi(kvm)) return 0; idx = srcu_read_lock(&kvm->irq_srcu); diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index ba66c171d951..5aadad3e7367 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -2646,15 +2646,6 @@ int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs) if (!loaded_vmcs->msr_bitmap) goto out_vmcs; memset(loaded_vmcs->msr_bitmap, 0xff, PAGE_SIZE); - - if (IS_ENABLED(CONFIG_HYPERV) && - static_branch_unlikely(&enable_evmcs) && - (ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)) { - struct hv_enlightened_vmcs *evmcs = - (struct hv_enlightened_vmcs *)loaded_vmcs->vmcs; - - evmcs->hv_enlightenments_control.msr_bitmap = 1; - } } memset(&loaded_vmcs->host_state, 0, sizeof(struct vmcs_host_state)); @@ -2918,6 +2909,13 @@ static void vmx_flush_tlb_all(struct kvm_vcpu *vcpu) } } +static inline int vmx_get_current_vpid(struct kvm_vcpu *vcpu) +{ + if (is_guest_mode(vcpu)) + return nested_get_vpid02(vcpu); + return to_vmx(vcpu)->vpid; +} + static void vmx_flush_tlb_current(struct kvm_vcpu *vcpu) { struct kvm_mmu *mmu = vcpu->arch.mmu; @@ -2930,31 +2928,29 @@ static void vmx_flush_tlb_current(struct kvm_vcpu *vcpu) if (enable_ept) ept_sync_context(construct_eptp(vcpu, root_hpa, mmu->shadow_root_level)); - else if (!is_guest_mode(vcpu)) - vpid_sync_context(to_vmx(vcpu)->vpid); else - vpid_sync_context(nested_get_vpid02(vcpu)); + vpid_sync_context(vmx_get_current_vpid(vcpu)); } static void vmx_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr) { /* - * vpid_sync_vcpu_addr() is a nop if vmx->vpid==0, see the comment in + * vpid_sync_vcpu_addr() is a nop if vpid==0, see the comment in * vmx_flush_tlb_guest() for an explanation of why this is ok. */ - vpid_sync_vcpu_addr(to_vmx(vcpu)->vpid, addr); + vpid_sync_vcpu_addr(vmx_get_current_vpid(vcpu), addr); } static void vmx_flush_tlb_guest(struct kvm_vcpu *vcpu) { /* - * vpid_sync_context() is a nop if vmx->vpid==0, e.g. if enable_vpid==0 - * or a vpid couldn't be allocated for this vCPU. VM-Enter and VM-Exit - * are required to flush GVA->{G,H}PA mappings from the TLB if vpid is + * vpid_sync_context() is a nop if vpid==0, e.g. if enable_vpid==0 or a + * vpid couldn't be allocated for this vCPU. VM-Enter and VM-Exit are + * required to flush GVA->{G,H}PA mappings from the TLB if vpid is * disabled (VM-Enter with vpid enabled and vpid==0 is disallowed), * i.e. no explicit INVVPID is necessary. */ - vpid_sync_context(to_vmx(vcpu)->vpid); + vpid_sync_context(vmx_get_current_vpid(vcpu)); } void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu) @@ -6262,9 +6258,9 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); int max_irr; - bool max_irr_updated; + bool got_posted_interrupt; - if (KVM_BUG_ON(!vcpu->arch.apicv_active, vcpu->kvm)) + if (KVM_BUG_ON(!enable_apicv, vcpu->kvm)) return -EIO; if (pi_test_on(&vmx->pi_desc)) { @@ -6274,22 +6270,33 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu) * But on x86 this is just a compiler barrier anyway. */ smp_mb__after_atomic(); - max_irr_updated = + got_posted_interrupt = kvm_apic_update_irr(vcpu, vmx->pi_desc.pir, &max_irr); - - /* - * If we are running L2 and L1 has a new pending interrupt - * which can be injected, this may cause a vmexit or it may - * be injected into L2. Either way, this interrupt will be - * processed via KVM_REQ_EVENT, not RVI, because we do not use - * virtual interrupt delivery to inject L1 interrupts into L2. - */ - if (is_guest_mode(vcpu) && max_irr_updated) - kvm_make_request(KVM_REQ_EVENT, vcpu); } else { max_irr = kvm_lapic_find_highest_irr(vcpu); + got_posted_interrupt = false; } - vmx_hwapic_irr_update(vcpu, max_irr); + + /* + * Newly recognized interrupts are injected via either virtual interrupt + * delivery (RVI) or KVM_REQ_EVENT. Virtual interrupt delivery is + * disabled in two cases: + * + * 1) If L2 is running and the vCPU has a new pending interrupt. If L1 + * wants to exit on interrupts, KVM_REQ_EVENT is needed to synthesize a + * VM-Exit to L1. If L1 doesn't want to exit, the interrupt is injected + * into L2, but KVM doesn't use virtual interrupt delivery to inject + * interrupts into L2, and so KVM_REQ_EVENT is again needed. + * + * 2) If APICv is disabled for this vCPU, assigned devices may still + * attempt to post interrupts. The posted interrupt vector will cause + * a VM-Exit and the subsequent entry will call sync_pir_to_irr. + */ + if (!is_guest_mode(vcpu) && kvm_vcpu_apicv_active(vcpu)) + vmx_set_rvi(max_irr); + else if (got_posted_interrupt) + kvm_make_request(KVM_REQ_EVENT, vcpu); + return max_irr; } @@ -6826,6 +6833,19 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu) if (err < 0) goto free_pml; + /* + * Use Hyper-V 'Enlightened MSR Bitmap' feature when KVM runs as a + * nested (L1) hypervisor and Hyper-V in L0 supports it. Enable the + * feature only for vmcs01, KVM currently isn't equipped to realize any + * performance benefits from enabling it for vmcs02. + */ + if (IS_ENABLED(CONFIG_HYPERV) && static_branch_unlikely(&enable_evmcs) && + (ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)) { + struct hv_enlightened_vmcs *evmcs = (void *)vmx->vmcs01.vmcs; + + evmcs->hv_enlightenments_control.msr_bitmap = 1; + } + /* The MSR bitmap starts with all ones */ bitmap_fill(vmx->shadow_msr_intercept.read, MAX_POSSIBLE_PASSTHROUGH_MSRS); bitmap_fill(vmx->shadow_msr_intercept.write, MAX_POSSIBLE_PASSTHROUGH_MSRS); @@ -7509,6 +7529,7 @@ static void hardware_unsetup(void) static bool vmx_check_apicv_inhibit_reasons(ulong bit) { ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) | + BIT(APICV_INHIBIT_REASON_ABSENT) | BIT(APICV_INHIBIT_REASON_HYPERV) | BIT(APICV_INHIBIT_REASON_BLOCKIRQ); @@ -7761,10 +7782,10 @@ static __init int hardware_setup(void) ple_window_shrink = 0; } - if (!cpu_has_vmx_apicv()) { + if (!cpu_has_vmx_apicv()) enable_apicv = 0; + if (!enable_apicv) vmx_x86_ops.sync_pir_to_irr = NULL; - } if (cpu_has_vmx_tsc_scaling()) { kvm_has_tsc_control = true; diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index a4ead6023133..4df2ac24ffc1 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -142,6 +142,16 @@ struct nested_vmx { struct vmcs12 *cached_shadow_vmcs12; /* + * GPA to HVA cache for accessing vmcs12->vmcs_link_pointer + */ + struct gfn_to_hva_cache shadow_vmcs12_cache; + + /* + * GPA to HVA cache for VMCS12 + */ + struct gfn_to_hva_cache vmcs12_cache; + + /* * Indicates if the shadow vmcs or enlightened vmcs must be updated * with the data held by struct vmcs12. */ diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index dc7eb5fddfd3..0cf1082455df 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -890,7 +890,8 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) !load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu))) return 1; - if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)) + if (!(cr0 & X86_CR0_PG) && + (is_64_bit_mode(vcpu) || kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE))) return 1; static_call(kvm_x86_set_cr0)(vcpu, cr0); @@ -3258,6 +3259,29 @@ static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu) static_call(kvm_x86_tlb_flush_guest)(vcpu); } + +static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu) +{ + ++vcpu->stat.tlb_flush; + static_call(kvm_x86_tlb_flush_current)(vcpu); +} + +/* + * Service "local" TLB flush requests, which are specific to the current MMU + * context. In addition to the generic event handling in vcpu_enter_guest(), + * TLB flushes that are targeted at an MMU context also need to be serviced + * prior before nested VM-Enter/VM-Exit. + */ +void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu) +{ + if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu)) + kvm_vcpu_flush_tlb_current(vcpu); + + if (kvm_check_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu)) + kvm_vcpu_flush_tlb_guest(vcpu); +} +EXPORT_SYMBOL_GPL(kvm_service_local_tlb_flush_requests); + static void record_steal_time(struct kvm_vcpu *vcpu) { struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache; @@ -3307,9 +3331,9 @@ static void record_steal_time(struct kvm_vcpu *vcpu) "xor %1, %1\n" "2:\n" _ASM_EXTABLE_UA(1b, 2b) - : "+r" (st_preempted), - "+&r" (err) - : "m" (st->preempted)); + : "+q" (st_preempted), + "+&r" (err), + "+m" (st->preempted)); if (err) goto out; @@ -4133,6 +4157,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_SGX_ATTRIBUTE: #endif case KVM_CAP_VM_COPY_ENC_CONTEXT_FROM: + case KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM: case KVM_CAP_SREGS2: case KVM_CAP_EXIT_ON_EMULATION_FAILURE: case KVM_CAP_VCPU_ATTRIBUTES: @@ -4179,7 +4204,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = !static_call(kvm_x86_cpu_has_accelerated_tpr)(); break; case KVM_CAP_NR_VCPUS: - r = num_online_cpus(); + r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS); break; case KVM_CAP_MAX_VCPUS: r = KVM_MAX_VCPUS; @@ -4448,8 +4473,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) { - if (vcpu->arch.apicv_active) - static_call(kvm_x86_sync_pir_to_irr)(vcpu); + static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu); return kvm_apic_get_state(vcpu, s); } @@ -5124,6 +5148,17 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_cpuid __user *cpuid_arg = argp; struct kvm_cpuid cpuid; + /* + * KVM does not correctly handle changing guest CPUID after KVM_RUN, as + * MAXPHYADDR, GBPAGES support, AMD reserved bit behavior, etc.. aren't + * tracked in kvm_mmu_page_role. As a result, KVM may miss guest page + * faults due to reusing SPs/SPTEs. In practice no sane VMM mucks with + * the core vCPU model on the fly, so fail. + */ + r = -EINVAL; + if (vcpu->arch.last_vmentry_cpu != -1) + goto out; + r = -EFAULT; if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) goto out; @@ -5134,6 +5169,14 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_cpuid2 __user *cpuid_arg = argp; struct kvm_cpuid2 cpuid; + /* + * KVM_SET_CPUID{,2} after KVM_RUN is forbidded, see the comment in + * KVM_SET_CPUID case above. + */ + r = -EINVAL; + if (vcpu->arch.last_vmentry_cpu != -1) + goto out; + r = -EFAULT; if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) goto out; @@ -5698,6 +5741,7 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, smp_wmb(); kvm->arch.irqchip_mode = KVM_IRQCHIP_SPLIT; kvm->arch.nr_reserved_ioapic_pins = cap->args[0]; + kvm_request_apicv_update(kvm, true, APICV_INHIBIT_REASON_ABSENT); r = 0; split_irqchip_unlock: mutex_unlock(&kvm->lock); @@ -6078,6 +6122,7 @@ set_identity_unlock: /* Write kvm->irq_routing before enabling irqchip_in_kernel. */ smp_wmb(); kvm->arch.irqchip_mode = KVM_IRQCHIP_KERNEL; + kvm_request_apicv_update(kvm, true, APICV_INHIBIT_REASON_ABSENT); create_irqchip_unlock: mutex_unlock(&kvm->lock); break; @@ -7077,7 +7122,13 @@ static int emulator_pio_in(struct kvm_vcpu *vcpu, int size, unsigned short port, void *val, unsigned int count) { if (vcpu->arch.pio.count) { - /* Complete previous iteration. */ + /* + * Complete a previous iteration that required userspace I/O. + * Note, @count isn't guaranteed to match pio.count as userspace + * can modify ECX before rerunning the vCPU. Ignore any such + * shenanigans as KVM doesn't support modifying the rep count, + * and the emulator ensures @count doesn't overflow the buffer. + */ } else { int r = __emulator_pio_in(vcpu, size, port, count); if (!r) @@ -7086,7 +7137,6 @@ static int emulator_pio_in(struct kvm_vcpu *vcpu, int size, /* Results already available, fall through. */ } - WARN_ON(count != vcpu->arch.pio.count); complete_emulator_pio_in(vcpu, val); return 1; } @@ -8776,10 +8826,9 @@ static void kvm_apicv_init(struct kvm *kvm) { init_rwsem(&kvm->arch.apicv_update_lock); - if (enable_apicv) - clear_bit(APICV_INHIBIT_REASON_DISABLE, - &kvm->arch.apicv_inhibit_reasons); - else + set_bit(APICV_INHIBIT_REASON_ABSENT, + &kvm->arch.apicv_inhibit_reasons); + if (!enable_apicv) set_bit(APICV_INHIBIT_REASON_DISABLE, &kvm->arch.apicv_inhibit_reasons); } @@ -8848,7 +8897,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) trace_kvm_hypercall(nr, a0, a1, a2, a3); - op_64_bit = is_64_bit_mode(vcpu); + op_64_bit = is_64_bit_hypercall(vcpu); if (!op_64_bit) { nr &= 0xFFFFFFFF; a0 &= 0xFFFFFFFF; @@ -9528,8 +9577,7 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu) if (irqchip_split(vcpu->kvm)) kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors); else { - if (vcpu->arch.apicv_active) - static_call(kvm_x86_sync_pir_to_irr)(vcpu); + static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu); if (ioapic_in_kernel(vcpu->kvm)) kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors); } @@ -9547,12 +9595,16 @@ static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu) if (!kvm_apic_hw_enabled(vcpu->arch.apic)) return; - if (to_hv_vcpu(vcpu)) + if (to_hv_vcpu(vcpu)) { bitmap_or((ulong *)eoi_exit_bitmap, vcpu->arch.ioapic_handled_vectors, to_hv_synic(vcpu)->vec_bitmap, 256); + static_call(kvm_x86_load_eoi_exitmap)(vcpu, eoi_exit_bitmap); + return; + } - static_call(kvm_x86_load_eoi_exitmap)(vcpu, eoi_exit_bitmap); + static_call(kvm_x86_load_eoi_exitmap)( + vcpu, (u64 *)vcpu->arch.ioapic_handled_vectors); } void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, @@ -9644,10 +9696,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) /* Flushing all ASIDs flushes the current ASID... */ kvm_clear_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); } - if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu)) - kvm_vcpu_flush_tlb_current(vcpu); - if (kvm_check_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu)) - kvm_vcpu_flush_tlb_guest(vcpu); + kvm_service_local_tlb_flush_requests(vcpu); if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) { vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS; @@ -9798,10 +9847,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) /* * This handles the case where a posted interrupt was - * notified with kvm_vcpu_kick. + * notified with kvm_vcpu_kick. Assigned devices can + * use the POSTED_INTR_VECTOR even if APICv is disabled, + * so do it even if APICv is disabled on this vCPU. */ - if (kvm_lapic_enabled(vcpu) && vcpu->arch.apicv_active) - static_call(kvm_x86_sync_pir_to_irr)(vcpu); + if (kvm_lapic_enabled(vcpu)) + static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu); if (kvm_vcpu_exit_request(vcpu)) { vcpu->mode = OUTSIDE_GUEST_MODE; @@ -9845,8 +9896,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST)) break; - if (vcpu->arch.apicv_active) - static_call(kvm_x86_sync_pir_to_irr)(vcpu); + if (kvm_lapic_enabled(vcpu)) + static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu); if (unlikely(kvm_vcpu_exit_request(vcpu))) { exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED; diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index ea264c4502e4..4abcd8d9836d 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -103,6 +103,7 @@ static inline unsigned int __shrink_ple_window(unsigned int val, #define MSR_IA32_CR_PAT_DEFAULT 0x0007040600070406ULL +void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu); int kvm_check_nested_events(struct kvm_vcpu *vcpu); static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu) @@ -153,12 +154,24 @@ static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu) { int cs_db, cs_l; + WARN_ON_ONCE(vcpu->arch.guest_state_protected); + if (!is_long_mode(vcpu)) return false; static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l); return cs_l; } +static inline bool is_64_bit_hypercall(struct kvm_vcpu *vcpu) +{ + /* + * If running with protected guest state, the CS register is not + * accessible. The hypercall register values will have had to been + * provided in 64-bit mode, so assume the guest is in 64-bit. + */ + return vcpu->arch.guest_state_protected || is_64_bit_mode(vcpu); +} + static inline bool x86_exception_has_error_code(unsigned int vector) { static u32 exception_has_error_code = BIT(DF_VECTOR) | BIT(TS_VECTOR) | @@ -173,12 +186,6 @@ static inline bool mmu_is_nested(struct kvm_vcpu *vcpu) return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu; } -static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu) -{ - ++vcpu->stat.tlb_flush; - static_call(kvm_x86_tlb_flush_current)(vcpu); -} - static inline int is_pae(struct kvm_vcpu *vcpu) { return kvm_read_cr4_bits(vcpu, X86_CR4_PAE); diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index 8f62baebd028..dff2bdf9507a 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -127,9 +127,9 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state) state_entry_time = vx->runstate_entry_time; state_entry_time |= XEN_RUNSTATE_UPDATE; - BUILD_BUG_ON(sizeof(((struct vcpu_runstate_info *)0)->state_entry_time) != + BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, state_entry_time) != sizeof(state_entry_time)); - BUILD_BUG_ON(sizeof(((struct compat_vcpu_runstate_info *)0)->state_entry_time) != + BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state_entry_time) != sizeof(state_entry_time)); if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache, @@ -144,9 +144,9 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state) */ BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state) != offsetof(struct compat_vcpu_runstate_info, state)); - BUILD_BUG_ON(sizeof(((struct vcpu_runstate_info *)0)->state) != + BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, state) != sizeof(vx->current_runstate)); - BUILD_BUG_ON(sizeof(((struct compat_vcpu_runstate_info *)0)->state) != + BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state) != sizeof(vx->current_runstate)); if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache, @@ -163,9 +163,9 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state) offsetof(struct vcpu_runstate_info, time) - sizeof(u64)); BUILD_BUG_ON(offsetof(struct compat_vcpu_runstate_info, state_entry_time) != offsetof(struct compat_vcpu_runstate_info, time) - sizeof(u64)); - BUILD_BUG_ON(sizeof(((struct vcpu_runstate_info *)0)->time) != - sizeof(((struct compat_vcpu_runstate_info *)0)->time)); - BUILD_BUG_ON(sizeof(((struct vcpu_runstate_info *)0)->time) != + BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, time) != + sizeof_field(struct compat_vcpu_runstate_info, time)); + BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, time) != sizeof(vx->runstate_times)); if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache, @@ -205,9 +205,9 @@ int __kvm_xen_has_interrupt(struct kvm_vcpu *v) BUILD_BUG_ON(offsetof(struct vcpu_info, evtchn_upcall_pending) != offsetof(struct compat_vcpu_info, evtchn_upcall_pending)); BUILD_BUG_ON(sizeof(rc) != - sizeof(((struct vcpu_info *)0)->evtchn_upcall_pending)); + sizeof_field(struct vcpu_info, evtchn_upcall_pending)); BUILD_BUG_ON(sizeof(rc) != - sizeof(((struct compat_vcpu_info *)0)->evtchn_upcall_pending)); + sizeof_field(struct compat_vcpu_info, evtchn_upcall_pending)); /* * For efficiency, this mirrors the checks for using the valid @@ -299,7 +299,7 @@ int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) break; case KVM_XEN_ATTR_TYPE_SHARED_INFO: - data->u.shared_info.gfn = gpa_to_gfn(kvm->arch.xen.shinfo_gfn); + data->u.shared_info.gfn = kvm->arch.xen.shinfo_gfn; r = 0; break; @@ -698,7 +698,7 @@ int kvm_xen_hypercall(struct kvm_vcpu *vcpu) kvm_hv_hypercall_enabled(vcpu)) return kvm_hv_hypercall(vcpu); - longmode = is_64_bit_mode(vcpu); + longmode = is_64_bit_hypercall(vcpu); if (!longmode) { params[0] = (u32)kvm_rbx_read(vcpu); params[1] = (u32)kvm_rcx_read(vcpu); diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c index b15ebfe40a73..b0b848d6933a 100644 --- a/arch/x86/platform/efi/quirks.c +++ b/arch/x86/platform/efi/quirks.c @@ -277,7 +277,8 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size) return; } - new = early_memremap(data.phys_map, data.size); + new = early_memremap_prot(data.phys_map, data.size, + pgprot_val(pgprot_encrypted(FIXMAP_PAGE_NORMAL))); if (!new) { pr_err("Failed to map new boot services memmap\n"); return; diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c index 4a3da7592b99..38d24d2ab38b 100644 --- a/arch/x86/realmode/init.c +++ b/arch/x86/realmode/init.c @@ -72,6 +72,7 @@ static void __init setup_real_mode(void) #ifdef CONFIG_X86_64 u64 *trampoline_pgd; u64 efer; + int i; #endif base = (unsigned char *)real_mode_header; @@ -128,8 +129,17 @@ static void __init setup_real_mode(void) trampoline_header->flags = 0; trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd); + + /* Map the real mode stub as virtual == physical */ trampoline_pgd[0] = trampoline_pgd_entry.pgd; - trampoline_pgd[511] = init_top_pgt[511].pgd; + + /* + * Include the entirety of the kernel mapping into the trampoline + * PGD. This way, all mappings present in the normal kernel page + * tables are usable while running on trampoline_pgd. + */ + for (i = pgd_index(__PAGE_OFFSET); i < PTRS_PER_PGD; i++) + trampoline_pgd[i] = init_top_pgt[i].pgd; #endif sme_sev_setup_real_mode(trampoline_header); diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S index 220dd9678494..444d824775f6 100644 --- a/arch/x86/xen/xen-asm.S +++ b/arch/x86/xen/xen-asm.S @@ -20,6 +20,7 @@ #include <linux/init.h> #include <linux/linkage.h> +#include <../entry/calling.h> .pushsection .noinstr.text, "ax" /* @@ -193,6 +194,25 @@ SYM_CODE_START(xen_iret) SYM_CODE_END(xen_iret) /* + * XEN pv doesn't use trampoline stack, PER_CPU_VAR(cpu_tss_rw + TSS_sp0) is + * also the kernel stack. Reusing swapgs_restore_regs_and_return_to_usermode() + * in XEN pv would cause %rsp to move up to the top of the kernel stack and + * leave the IRET frame below %rsp, which is dangerous to be corrupted if #NMI + * interrupts. And swapgs_restore_regs_and_return_to_usermode() pushing the IRET + * frame at the same address is useless. + */ +SYM_CODE_START(xenpv_restore_regs_and_return_to_usermode) + UNWIND_HINT_REGS + POP_REGS + + /* stackleak_erase() can work safely on the kernel stack. */ + STACKLEAK_ERASE_NOCLOBBER + + addq $8, %rsp /* skip regs->orig_ax */ + jmp xen_iret +SYM_CODE_END(xenpv_restore_regs_and_return_to_usermode) + +/* * Xen handles syscall callbacks much like ordinary exceptions, which * means we have: * - kernel gs diff --git a/arch/xtensa/include/asm/cacheflush.h b/arch/xtensa/include/asm/cacheflush.h index a8a041609c5d..7b4359312c25 100644 --- a/arch/xtensa/include/asm/cacheflush.h +++ b/arch/xtensa/include/asm/cacheflush.h @@ -121,7 +121,6 @@ void flush_cache_page(struct vm_area_struct*, #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 void flush_dcache_page(struct page *); -void flush_dcache_folio(struct folio *); void local_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); @@ -138,9 +137,7 @@ void local_flush_cache_page(struct vm_area_struct *vma, #define flush_cache_vunmap(start,end) do { } while (0) #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 -#define ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO #define flush_dcache_page(page) do { } while (0) -static inline void flush_dcache_folio(struct folio *folio) { } #define flush_icache_range local_flush_icache_range #define flush_cache_page(vma, addr, pfn) do { } while (0) diff --git a/arch/xtensa/kernel/syscalls/syscall.tbl b/arch/xtensa/kernel/syscalls/syscall.tbl index 104b327f8ac9..3e3e1a506bed 100644 --- a/arch/xtensa/kernel/syscalls/syscall.tbl +++ b/arch/xtensa/kernel/syscalls/syscall.tbl @@ -419,3 +419,4 @@ 446 common landlock_restrict_self sys_landlock_restrict_self # 447 reserved for memfd_secret 448 common process_mrelease sys_process_mrelease +449 common futex_waitv sys_futex_waitv diff --git a/block/bdev.c b/block/bdev.c index b4dab2fb6a74..b1d087e5e205 100644 --- a/block/bdev.c +++ b/block/bdev.c @@ -753,8 +753,7 @@ struct block_device *blkdev_get_no_open(dev_t dev) if (!bdev) return NULL; - if ((bdev->bd_disk->flags & GENHD_FL_HIDDEN) || - !try_module_get(bdev->bd_disk->fops->owner)) { + if ((bdev->bd_disk->flags & GENHD_FL_HIDDEN)) { put_device(&bdev->bd_device); return NULL; } @@ -764,7 +763,6 @@ struct block_device *blkdev_get_no_open(dev_t dev) void blkdev_put_no_open(struct block_device *bdev) { - module_put(bdev->bd_disk->fops->owner); put_device(&bdev->bd_device); } @@ -820,12 +818,14 @@ struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder) ret = -ENXIO; if (!disk_live(disk)) goto abort_claiming; + if (!try_module_get(disk->fops->owner)) + goto abort_claiming; if (bdev_is_partition(bdev)) ret = blkdev_get_part(bdev, mode); else ret = blkdev_get_whole(bdev, mode); if (ret) - goto abort_claiming; + goto put_module; if (mode & FMODE_EXCL) { bd_finish_claiming(bdev, holder); @@ -847,7 +847,8 @@ struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder) if (unblock_events) disk_unblock_events(disk); return bdev; - +put_module: + module_put(disk->fops->owner); abort_claiming: if (mode & FMODE_EXCL) bd_abort_claiming(bdev, holder); @@ -956,6 +957,7 @@ void blkdev_put(struct block_device *bdev, fmode_t mode) blkdev_put_whole(bdev, mode); mutex_unlock(&disk->open_mutex); + module_put(disk->fops->owner); blkdev_put_no_open(bdev); } EXPORT_SYMBOL(blkdev_put); diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 88b1fce90520..663aabfeba18 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -640,7 +640,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, */ ret = blk_queue_enter(q, 0); if (ret) - return ret; + goto fail; rcu_read_lock(); spin_lock_irq(&q->queue_lock); @@ -676,13 +676,13 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, new_blkg = blkg_alloc(pos, q, GFP_KERNEL); if (unlikely(!new_blkg)) { ret = -ENOMEM; - goto fail; + goto fail_exit_queue; } if (radix_tree_preload(GFP_KERNEL)) { blkg_free(new_blkg); ret = -ENOMEM; - goto fail; + goto fail_exit_queue; } rcu_read_lock(); @@ -722,9 +722,10 @@ fail_preloaded: fail_unlock: spin_unlock_irq(&q->queue_lock); rcu_read_unlock(); +fail_exit_queue: + blk_queue_exit(q); fail: blkdev_put_no_open(bdev); - blk_queue_exit(q); /* * If queue was bypassing, we should retry. Do so after a * short msleep(). It isn't strictly necessary but queue diff --git a/block/blk-core.c b/block/blk-core.c index 9ee32f85d74e..1378d084c770 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -363,8 +363,10 @@ void blk_cleanup_queue(struct request_queue *q) blk_queue_flag_set(QUEUE_FLAG_DEAD, q); blk_sync_queue(q); - if (queue_is_mq(q)) + if (queue_is_mq(q)) { + blk_mq_cancel_work_sync(q); blk_mq_exit_queue(q); + } /* * In theory, request pool of sched_tags belongs to request queue. @@ -1015,6 +1017,7 @@ EXPORT_SYMBOL(submit_bio); /** * bio_poll - poll for BIO completions * @bio: bio to poll for + * @iob: batches of IO * @flags: BLK_POLL_* flags that control the behavior * * Poll for completions on queue associated with the bio. Returns number of diff --git a/block/blk-flush.c b/block/blk-flush.c index 8e364bda5166..1fce6d16e6d3 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -379,7 +379,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error) * @rq is being submitted. Analyze what needs to be done and put it on the * right queue. */ -bool blk_insert_flush(struct request *rq) +void blk_insert_flush(struct request *rq) { struct request_queue *q = rq->q; unsigned long fflags = q->queue_flags; /* may change, cache */ @@ -409,7 +409,7 @@ bool blk_insert_flush(struct request *rq) */ if (!policy) { blk_mq_end_request(rq, 0); - return true; + return; } BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */ @@ -420,8 +420,10 @@ bool blk_insert_flush(struct request *rq) * for normal execution. */ if ((policy & REQ_FSEQ_DATA) && - !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) - return false; + !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { + blk_mq_request_bypass_insert(rq, false, true); + return; + } /* * @rq should go through flush machinery. Mark it part of flush @@ -437,8 +439,6 @@ bool blk_insert_flush(struct request *rq) spin_lock_irq(&fq->mq_flush_lock); blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0); spin_unlock_irq(&fq->mq_flush_lock); - - return true; } /** diff --git a/block/blk-mq.c b/block/blk-mq.c index 3ab34c4f20da..8874a63ae952 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -860,13 +860,14 @@ void blk_mq_end_request_batch(struct io_comp_batch *iob) if (iob->need_ts) __blk_mq_end_request_acct(rq, now); + rq_qos_done(rq->q, rq); + WRITE_ONCE(rq->state, MQ_RQ_IDLE); if (!refcount_dec_and_test(&rq->ref)) continue; blk_crypto_free_request(rq); blk_pm_mark_last_busy(rq); - rq_qos_done(rq->q, rq); if (nr_tags == TAG_COMP_BATCH || cur_hctx != rq->mq_hctx) { if (cur_hctx) @@ -2543,8 +2544,7 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q, return NULL; } -static inline bool blk_mq_can_use_cached_rq(struct request *rq, - struct bio *bio) +static inline bool blk_mq_can_use_cached_rq(struct request *rq, struct bio *bio) { if (blk_mq_get_hctx_type(bio->bi_opf) != rq->mq_hctx->type) return false; @@ -2565,7 +2565,6 @@ static inline struct request *blk_mq_get_request(struct request_queue *q, bool checked = false; if (plug) { - rq = rq_list_peek(&plug->cached_rq); if (rq && rq->q == q) { if (unlikely(!submit_bio_checks(bio))) @@ -2587,12 +2586,14 @@ static inline struct request *blk_mq_get_request(struct request_queue *q, fallback: if (unlikely(bio_queue_enter(bio))) return NULL; - if (!checked && !submit_bio_checks(bio)) - return NULL; + if (unlikely(!checked && !submit_bio_checks(bio))) + goto out_put; rq = blk_mq_get_new_requests(q, plug, bio, nsegs, same_queue_rq); - if (!rq) - blk_queue_exit(q); - return rq; + if (rq) + return rq; +out_put: + blk_queue_exit(q); + return NULL; } /** @@ -2647,8 +2648,10 @@ void blk_mq_submit_bio(struct bio *bio) return; } - if (op_is_flush(bio->bi_opf) && blk_insert_flush(rq)) + if (op_is_flush(bio->bi_opf)) { + blk_insert_flush(rq); return; + } if (plug && (q->nr_hw_queues == 1 || blk_mq_is_shared_tags(rq->mq_hctx->flags) || @@ -4417,6 +4420,19 @@ unsigned int blk_mq_rq_cpu(struct request *rq) } EXPORT_SYMBOL(blk_mq_rq_cpu); +void blk_mq_cancel_work_sync(struct request_queue *q) +{ + if (queue_is_mq(q)) { + struct blk_mq_hw_ctx *hctx; + int i; + + cancel_delayed_work_sync(&q->requeue_work); + + queue_for_each_hw_ctx(q, hctx, i) + cancel_delayed_work_sync(&hctx->run_work); + } +} + static int __init blk_mq_init(void) { int i; diff --git a/block/blk-mq.h b/block/blk-mq.h index 8acfa650f575..afcf9931a489 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -128,6 +128,8 @@ extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx); void blk_mq_free_plug_rqs(struct blk_plug *plug); void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); +void blk_mq_cancel_work_sync(struct request_queue *q); + void blk_mq_release(struct request_queue *q); static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index cef1f713370b..cd75b0f73dc6 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -791,16 +791,6 @@ static void blk_release_queue(struct kobject *kobj) blk_free_queue_stats(q->stats); - if (queue_is_mq(q)) { - struct blk_mq_hw_ctx *hctx; - int i; - - cancel_delayed_work_sync(&q->requeue_work); - - queue_for_each_hw_ctx(q, hctx, i) - cancel_delayed_work_sync(&hctx->run_work); - } - blk_exit_queue(q); blk_queue_free_zone_bitmaps(q); diff --git a/block/blk.h b/block/blk.h index b4fed2033e48..ccde6e6f1736 100644 --- a/block/blk.h +++ b/block/blk.h @@ -271,7 +271,7 @@ void __blk_account_io_done(struct request *req, u64 now); */ #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED) -bool blk_insert_flush(struct request *rq); +void blk_insert_flush(struct request *rq); int elevator_switch_mq(struct request_queue *q, struct elevator_type *new_e); diff --git a/block/elevator.c b/block/elevator.c index 1f39f6e8ebb9..19a78d5516ba 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -694,12 +694,18 @@ void elevator_init_mq(struct request_queue *q) if (!e) return; + /* + * We are called before adding disk, when there isn't any FS I/O, + * so freezing queue plus canceling dispatch work is enough to + * drain any dispatch activities originated from passthrough + * requests, then no need to quiesce queue which may add long boot + * latency, especially when lots of disks are involved. + */ blk_mq_freeze_queue(q); - blk_mq_quiesce_queue(q); + blk_mq_cancel_work_sync(q); err = blk_mq_init_sched(q, e); - blk_mq_unquiesce_queue(q); blk_mq_unfreeze_queue(q); if (err) { diff --git a/block/fops.c b/block/fops.c index ad732a36f9b3..0da147edbd18 100644 --- a/block/fops.c +++ b/block/fops.c @@ -15,6 +15,7 @@ #include <linux/falloc.h> #include <linux/suspend.h> #include <linux/fs.h> +#include <linux/module.h> #include "blk.h" static inline struct inode *bdev_file_inode(struct file *file) @@ -340,8 +341,7 @@ static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb, } else { ret = bio_iov_iter_get_pages(bio, iter); if (unlikely(ret)) { - bio->bi_status = BLK_STS_IOERR; - bio_endio(bio); + bio_put(bio); return ret; } } diff --git a/block/genhd.c b/block/genhd.c index c5392cc24d37..30362aeacac4 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -1111,6 +1111,8 @@ static void disk_release(struct device *dev) might_sleep(); WARN_ON_ONCE(disk_live(disk)); + blk_mq_cancel_work_sync(disk->queue); + disk_release_events(disk); kfree(disk->random); xa_destroy(&disk->part_tbl); diff --git a/block/ioprio.c b/block/ioprio.c index 0e4ff245f2bf..6f01d35a5145 100644 --- a/block/ioprio.c +++ b/block/ioprio.c @@ -69,7 +69,14 @@ int ioprio_check_cap(int ioprio) switch (class) { case IOPRIO_CLASS_RT: - if (!capable(CAP_SYS_NICE) && !capable(CAP_SYS_ADMIN)) + /* + * Originally this only checked for CAP_SYS_ADMIN, + * which was implicitly allowed for pid 0 by security + * modules such as SELinux. Make sure we check + * CAP_SYS_ADMIN first to avoid a denial/avc for + * possibly missing CAP_SYS_NICE permission. + */ + if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_NICE)) return -EPERM; fallthrough; /* rt has prio field too */ @@ -213,6 +220,7 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who) pgrp = task_pgrp(current); else pgrp = find_vpid(who); + read_lock(&tasklist_lock); do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { tmpio = get_task_ioprio(p); if (tmpio < 0) @@ -222,6 +230,8 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who) else ret = ioprio_best(ret, tmpio); } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); + read_unlock(&tasklist_lock); + break; case IOPRIO_WHO_USER: uid = make_kuid(current_user_ns(), who); diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index a85c351589be..b62c87b8ce4a 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c @@ -998,7 +998,14 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val) static int cppc_get_perf(int cpunum, enum cppc_regs reg_idx, u64 *perf) { struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum); - struct cpc_register_resource *reg = &cpc_desc->cpc_regs[reg_idx]; + struct cpc_register_resource *reg; + + if (!cpc_desc) { + pr_debug("No CPC descriptor for CPU:%d\n", cpunum); + return -ENODEV; + } + + reg = &cpc_desc->cpc_regs[reg_idx]; if (CPC_IN_PCC(reg)) { int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum); diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c index 7cd0009e7ff3..ef104809f27b 100644 --- a/drivers/acpi/glue.c +++ b/drivers/acpi/glue.c @@ -347,28 +347,3 @@ void acpi_device_notify_remove(struct device *dev) acpi_unbind_one(dev); } - -int acpi_dev_turn_off_if_unused(struct device *dev, void *not_used) -{ - struct acpi_device *adev = to_acpi_device(dev); - - /* - * Skip device objects with device IDs, because they may be in use even - * if they are not companions of any physical device objects. - */ - if (adev->pnp.type.hardware_id) - return 0; - - mutex_lock(&adev->physical_node_lock); - - /* - * Device objects without device IDs are not in use if they have no - * corresponding physical device objects. - */ - if (list_empty(&adev->physical_node_list)) - acpi_device_set_power(adev, ACPI_STATE_D3_COLD); - - mutex_unlock(&adev->physical_node_lock); - - return 0; -} diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 8fbdc172864b..d91b560e8867 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -117,7 +117,6 @@ bool acpi_device_is_battery(struct acpi_device *adev); bool acpi_device_is_first_physical_node(struct acpi_device *adev, const struct device *dev); int acpi_bus_register_early_device(int type); -int acpi_dev_turn_off_if_unused(struct device *dev, void *not_used); /* -------------------------------------------------------------------------- Device Matching and Notification diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c index e312ebaed8db..2366f54d8e9c 100644 --- a/drivers/acpi/property.c +++ b/drivers/acpi/property.c @@ -1084,21 +1084,17 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode, * Returns parent node of an ACPI device or data firmware node or %NULL if * not available. */ -struct fwnode_handle *acpi_node_get_parent(const struct fwnode_handle *fwnode) +static struct fwnode_handle * +acpi_node_get_parent(const struct fwnode_handle *fwnode) { if (is_acpi_data_node(fwnode)) { /* All data nodes have parent pointer so just return that */ return to_acpi_data_node(fwnode)->parent; } else if (is_acpi_device_node(fwnode)) { - acpi_handle handle, parent_handle; - - handle = to_acpi_device_node(fwnode)->handle; - if (ACPI_SUCCESS(acpi_get_parent(handle, &parent_handle))) { - struct acpi_device *adev; + struct device *dev = to_acpi_device_node(fwnode)->dev.parent; - if (!acpi_bus_get_device(parent_handle, &adev)) - return acpi_fwnode_handle(adev); - } + if (dev) + return acpi_fwnode_handle(to_acpi_device(dev)); } return NULL; diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index a50f1967c73d..2c80765670bc 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -2564,12 +2564,6 @@ int __init acpi_scan_init(void) } } - /* - * Make sure that power management resources are not blocked by ACPI - * device objects with no users. - */ - bus_for_each_dev(&acpi_bus_type, NULL, NULL, acpi_dev_turn_off_if_unused); - acpi_turn_off_unused_power_resources(); acpi_scan_initialized = true; diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 49fb74196d02..c75fb600740c 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -2710,7 +2710,7 @@ static void binder_transaction(struct binder_proc *proc, t->from = thread; else t->from = NULL; - t->sender_euid = proc->cred->euid; + t->sender_euid = task_euid(proc->tsk); t->to_proc = target_proc; t->to_thread = target_thread; t->code = tr->code; @@ -4422,23 +4422,20 @@ static int binder_thread_release(struct binder_proc *proc, __release(&t->lock); /* - * If this thread used poll, make sure we remove the waitqueue - * from any epoll data structures holding it with POLLFREE. - * waitqueue_active() is safe to use here because we're holding - * the inner lock. + * If this thread used poll, make sure we remove the waitqueue from any + * poll data structures holding it. */ - if ((thread->looper & BINDER_LOOPER_STATE_POLL) && - waitqueue_active(&thread->wait)) { - wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE); - } + if (thread->looper & BINDER_LOOPER_STATE_POLL) + wake_up_pollfree(&thread->wait); binder_inner_proc_unlock(thread->proc); /* - * This is needed to avoid races between wake_up_poll() above and - * and ep_remove_waitqueue() called for other reasons (eg the epoll file - * descriptor being closed); ep_remove_waitqueue() holds an RCU read - * lock, so we can be sure it's done after calling synchronize_rcu(). + * This is needed to avoid races between wake_up_pollfree() above and + * someone else removing the last entry from the queue for other reasons + * (e.g. ep_remove_wait_queue() being called due to an epoll file + * descriptor being closed). Such other users hold an RCU read lock, so + * we can be sure they're done after we call synchronize_rcu(). */ if (thread->looper & BINDER_LOOPER_STATE_POLL) synchronize_rcu(); diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index d60f34718b5d..1e1167e725a4 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -438,6 +438,7 @@ static const struct pci_device_id ahci_pci_tbl[] = { /* AMD */ { PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD Hudson-2 */ { PCI_VDEVICE(AMD, 0x7900), board_ahci }, /* AMD CZ */ + { PCI_VDEVICE(AMD, 0x7901), board_ahci_mobile }, /* AMD Green Sardine */ /* AMD is using RAID class only for ahci controllers */ { PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci }, diff --git a/drivers/ata/ahci_ceva.c b/drivers/ata/ahci_ceva.c index 50b56cd0039d..e9c7c07fd84c 100644 --- a/drivers/ata/ahci_ceva.c +++ b/drivers/ata/ahci_ceva.c @@ -94,6 +94,7 @@ struct ceva_ahci_priv { static unsigned int ceva_ahci_read_id(struct ata_device *dev, struct ata_taskfile *tf, u16 *id) { + __le16 *__id = (__le16 *)id; u32 err_mask; err_mask = ata_do_dev_read_id(dev, tf, id); @@ -103,7 +104,7 @@ static unsigned int ceva_ahci_read_id(struct ata_device *dev, * Since CEVA controller does not support device sleep feature, we * need to clear DEVSLP (bit 8) in word78 of the IDENTIFY DEVICE data. */ - id[ATA_ID_FEATURE_SUPP] &= cpu_to_le16(~(1 << 8)); + __id[ATA_ID_FEATURE_SUPP] &= cpu_to_le16(~(1 << 8)); return 0; } diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index 8a6835bfd18a..f76b8418e6fb 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -2323,6 +2323,18 @@ int ahci_port_resume(struct ata_port *ap) EXPORT_SYMBOL_GPL(ahci_port_resume); #ifdef CONFIG_PM +static void ahci_handle_s2idle(struct ata_port *ap) +{ + void __iomem *port_mmio = ahci_port_base(ap); + u32 devslp; + + if (pm_suspend_via_firmware()) + return; + devslp = readl(port_mmio + PORT_DEVSLP); + if ((devslp & PORT_DEVSLP_ADSE)) + ata_msleep(ap, devslp_idle_timeout); +} + static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg) { const char *emsg = NULL; @@ -2336,6 +2348,9 @@ static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg) ata_port_freeze(ap); } + if (acpi_storage_d3(ap->host->dev)) + ahci_handle_s2idle(ap); + ahci_rpm_put_port(ap); return rc; } diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 8a0ccb190d76..aba0c67d1bd6 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -2031,8 +2031,9 @@ retry: dev->horkage |= ATA_HORKAGE_NO_DMA_LOG; goto retry; } - ata_dev_err(dev, "Read log page 0x%02x failed, Emask 0x%x\n", - (unsigned int)page, err_mask); + ata_dev_err(dev, + "Read log 0x%02x page 0x%02x failed, Emask 0x%x\n", + (unsigned int)log, (unsigned int)page, err_mask); } return err_mask; @@ -2177,6 +2178,9 @@ static void ata_dev_config_ncq_prio(struct ata_device *dev) struct ata_port *ap = dev->link->ap; unsigned int err_mask; + if (!ata_identify_page_supported(dev, ATA_LOG_SATA_SETTINGS)) + return; + err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, ATA_LOG_SATA_SETTINGS, @@ -2453,7 +2457,8 @@ static void ata_dev_config_devslp(struct ata_device *dev) * Check device sleep capability. Get DevSlp timing variables * from SATA Settings page of Identify Device Data Log. */ - if (!ata_id_has_devslp(dev->id)) + if (!ata_id_has_devslp(dev->id) || + !ata_identify_page_supported(dev, ATA_LOG_SATA_SETTINGS)) return; err_mask = ata_read_log_page(dev, @@ -3915,6 +3920,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "VRFDFC22048UCHC-TE*", NULL, ATA_HORKAGE_NODMA }, /* Odd clown on sil3726/4726 PMPs */ { "Config Disk", NULL, ATA_HORKAGE_DISABLE }, + /* Similar story with ASMedia 1092 */ + { "ASMT109x- Config", NULL, ATA_HORKAGE_DISABLE }, /* Weird ATAPI devices */ { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 }, diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c index 4e88597aa9df..b9c77885b872 100644 --- a/drivers/ata/libata-sata.c +++ b/drivers/ata/libata-sata.c @@ -827,7 +827,7 @@ static ssize_t ata_scsi_lpm_show(struct device *dev, if (ap->target_lpm_policy >= ARRAY_SIZE(ata_lpm_policy_names)) return -EINVAL; - return snprintf(buf, PAGE_SIZE, "%s\n", + return sysfs_emit(buf, "%s\n", ata_lpm_policy_names[ap->target_lpm_policy]); } DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR, @@ -922,7 +922,7 @@ DEVICE_ATTR(ncq_prio_enable, S_IRUGO | S_IWUSR, ata_ncq_prio_enable_show, ata_ncq_prio_enable_store); EXPORT_SYMBOL_GPL(dev_attr_ncq_prio_enable); -struct attribute *ata_ncq_sdev_attrs[] = { +static struct attribute *ata_ncq_sdev_attrs[] = { &dev_attr_unload_heads.attr, &dev_attr_ncq_prio_enable.attr, &dev_attr_ncq_prio_supported.attr, diff --git a/drivers/ata/pata_falcon.c b/drivers/ata/pata_falcon.c index 121635aa8c00..823c88622e34 100644 --- a/drivers/ata/pata_falcon.c +++ b/drivers/ata/pata_falcon.c @@ -55,14 +55,14 @@ static unsigned int pata_falcon_data_xfer(struct ata_queued_cmd *qc, /* Transfer multiple of 2 bytes */ if (rw == READ) { if (swap) - raw_insw_swapw((u16 *)data_addr, (u16 *)buf, words); + raw_insw_swapw(data_addr, (u16 *)buf, words); else - raw_insw((u16 *)data_addr, (u16 *)buf, words); + raw_insw(data_addr, (u16 *)buf, words); } else { if (swap) - raw_outsw_swapw((u16 *)data_addr, (u16 *)buf, words); + raw_outsw_swapw(data_addr, (u16 *)buf, words); else - raw_outsw((u16 *)data_addr, (u16 *)buf, words); + raw_outsw(data_addr, (u16 *)buf, words); } /* Transfer trailing byte, if any. */ @@ -74,16 +74,16 @@ static unsigned int pata_falcon_data_xfer(struct ata_queued_cmd *qc, if (rw == READ) { if (swap) - raw_insw_swapw((u16 *)data_addr, (u16 *)pad, 1); + raw_insw_swapw(data_addr, (u16 *)pad, 1); else - raw_insw((u16 *)data_addr, (u16 *)pad, 1); + raw_insw(data_addr, (u16 *)pad, 1); *buf = pad[0]; } else { pad[0] = *buf; if (swap) - raw_outsw_swapw((u16 *)data_addr, (u16 *)pad, 1); + raw_outsw_swapw(data_addr, (u16 *)pad, 1); else - raw_outsw((u16 *)data_addr, (u16 *)pad, 1); + raw_outsw(data_addr, (u16 *)pad, 1); } words++; } diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c index e5838b23c9e0..3b31a4f596d8 100644 --- a/drivers/ata/sata_fsl.c +++ b/drivers/ata/sata_fsl.c @@ -1394,6 +1394,14 @@ static int sata_fsl_init_controller(struct ata_host *host) return 0; } +static void sata_fsl_host_stop(struct ata_host *host) +{ + struct sata_fsl_host_priv *host_priv = host->private_data; + + iounmap(host_priv->hcr_base); + kfree(host_priv); +} + /* * scsi mid-layer and libata interface structures */ @@ -1426,6 +1434,8 @@ static struct ata_port_operations sata_fsl_ops = { .port_start = sata_fsl_port_start, .port_stop = sata_fsl_port_stop, + .host_stop = sata_fsl_host_stop, + .pmp_attach = sata_fsl_pmp_attach, .pmp_detach = sata_fsl_pmp_detach, }; @@ -1480,9 +1490,9 @@ static int sata_fsl_probe(struct platform_device *ofdev) host_priv->ssr_base = ssr_base; host_priv->csr_base = csr_base; - irq = irq_of_parse_and_map(ofdev->dev.of_node, 0); - if (!irq) { - dev_err(&ofdev->dev, "invalid irq from platform\n"); + irq = platform_get_irq(ofdev, 0); + if (irq < 0) { + retval = irq; goto error_exit_with_cleanup; } host_priv->irq = irq; @@ -1557,10 +1567,6 @@ static int sata_fsl_remove(struct platform_device *ofdev) ata_host_detach(host); - irq_dispose_mapping(host_priv->irq); - iounmap(host_priv->hcr_base); - kfree(host_priv); - return 0; } diff --git a/drivers/block/loop.c b/drivers/block/loop.c index a154cab6cd98..c3a36cfaa855 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -2103,7 +2103,7 @@ static int loop_control_remove(int idx) int ret; if (idx < 0) { - pr_warn("deleting an unspecified loop device is not supported.\n"); + pr_warn_once("deleting an unspecified loop device is not supported.\n"); return -EINVAL; } diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 97bf051a50ce..6ae38776e30e 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -316,7 +316,7 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req = bd->rq; struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); unsigned long flags; - unsigned int num; + int num; int qid = hctx->queue_num; bool notify = false; blk_status_t status; @@ -1049,7 +1049,6 @@ static struct virtio_driver virtio_blk = { .feature_table_size = ARRAY_SIZE(features), .feature_table_legacy = features_legacy, .feature_table_size_legacy = ARRAY_SIZE(features_legacy), - .suppress_used_validation = true, .driver.name = KBUILD_MODNAME, .driver.owner = THIS_MODULE, .id_table = id_table, diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 08d7953ec5f1..25071126995b 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -1853,12 +1853,14 @@ static const struct block_device_operations zram_devops = { .owner = THIS_MODULE }; +#ifdef CONFIG_ZRAM_WRITEBACK static const struct block_device_operations zram_wb_devops = { .open = zram_open, .submit_bio = zram_submit_bio, .swap_slot_free_notify = zram_slot_free_notify, .owner = THIS_MODULE }; +#endif static DEVICE_ATTR_WO(compact); static DEVICE_ATTR_RW(disksize); diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c index fb99e3727155..547e6e769546 100644 --- a/drivers/bus/mhi/core/pm.c +++ b/drivers/bus/mhi/core/pm.c @@ -881,7 +881,7 @@ int mhi_pm_suspend(struct mhi_controller *mhi_cntrl) } EXPORT_SYMBOL_GPL(mhi_pm_suspend); -int mhi_pm_resume(struct mhi_controller *mhi_cntrl) +static int __mhi_pm_resume(struct mhi_controller *mhi_cntrl, bool force) { struct mhi_chan *itr, *tmp; struct device *dev = &mhi_cntrl->mhi_dev->dev; @@ -898,8 +898,12 @@ int mhi_pm_resume(struct mhi_controller *mhi_cntrl) if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) return -EIO; - if (mhi_get_mhi_state(mhi_cntrl) != MHI_STATE_M3) - return -EINVAL; + if (mhi_get_mhi_state(mhi_cntrl) != MHI_STATE_M3) { + dev_warn(dev, "Resuming from non M3 state (%s)\n", + TO_MHI_STATE_STR(mhi_get_mhi_state(mhi_cntrl))); + if (!force) + return -EINVAL; + } /* Notify clients about exiting LPM */ list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) { @@ -940,8 +944,19 @@ int mhi_pm_resume(struct mhi_controller *mhi_cntrl) return 0; } + +int mhi_pm_resume(struct mhi_controller *mhi_cntrl) +{ + return __mhi_pm_resume(mhi_cntrl, false); +} EXPORT_SYMBOL_GPL(mhi_pm_resume); +int mhi_pm_resume_force(struct mhi_controller *mhi_cntrl) +{ + return __mhi_pm_resume(mhi_cntrl, true); +} +EXPORT_SYMBOL_GPL(mhi_pm_resume_force); + int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl) { int ret; diff --git a/drivers/bus/mhi/pci_generic.c b/drivers/bus/mhi/pci_generic.c index 59a4896a8030..4c577a731709 100644 --- a/drivers/bus/mhi/pci_generic.c +++ b/drivers/bus/mhi/pci_generic.c @@ -20,7 +20,7 @@ #define MHI_PCI_DEFAULT_BAR_NUM 0 -#define MHI_POST_RESET_DELAY_MS 500 +#define MHI_POST_RESET_DELAY_MS 2000 #define HEALTH_CHECK_PERIOD (HZ * 2) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 5bfdf222d5f9..c53cc9868cd8 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -20,6 +20,7 @@ #include <linux/kernel.h> #include <linux/pagemap.h> #include <linux/agp_backend.h> +#include <linux/intel-iommu.h> #include <linux/delay.h> #include <asm/smp.h> #include "agp.h" diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c index ed3c4c42fc23..d68d05d5d383 100644 --- a/drivers/char/agp/parisc-agp.c +++ b/drivers/char/agp/parisc-agp.c @@ -281,7 +281,7 @@ agp_ioc_init(void __iomem *ioc_regs) return 0; } -static int +static int __init lba_find_capability(int cap) { struct _parisc_agp_info *info = &parisc_agp_info; @@ -366,7 +366,7 @@ fail: return error; } -static int +static int __init find_quicksilver(struct device *dev, void *data) { struct parisc_device **lba = data; @@ -378,7 +378,7 @@ find_quicksilver(struct device *dev, void *data) return 0; } -static int +static int __init parisc_agp_init(void) { extern struct sba_device *sba_list; diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index deed355422f4..c837d5416e0e 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -191,6 +191,8 @@ struct ipmi_user { struct work_struct remove_work; }; +static struct workqueue_struct *remove_work_wq; + static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index) __acquires(user->release_barrier) { @@ -1297,7 +1299,7 @@ static void free_user(struct kref *ref) struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount); /* SRCU cleanup must happen in task context. */ - schedule_work(&user->remove_work); + queue_work(remove_work_wq, &user->remove_work); } static void _ipmi_destroy_user(struct ipmi_user *user) @@ -3918,9 +3920,11 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf, /* We didn't find a user, deliver an error response. */ ipmi_inc_stat(intf, unhandled_commands); - msg->data[0] = ((netfn + 1) << 2) | (msg->rsp[4] & 0x3); - msg->data[1] = msg->rsp[2]; - msg->data[2] = msg->rsp[4] & ~0x3; + msg->data[0] = (netfn + 1) << 2; + msg->data[0] |= msg->rsp[2] & 0x3; /* rqLUN */ + msg->data[1] = msg->rsp[1]; /* Addr */ + msg->data[2] = msg->rsp[2] & ~0x3; /* rqSeq */ + msg->data[2] |= msg->rsp[0] & 0x3; /* rsLUN */ msg->data[3] = cmd; msg->data[4] = IPMI_INVALID_CMD_COMPLETION_CODE; msg->data_size = 5; @@ -4455,13 +4459,24 @@ return_unspecified: msg->rsp[2] = IPMI_ERR_UNSPECIFIED; msg->rsp_size = 3; } else if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) { - /* commands must have at least 3 bytes, responses 4. */ - if (is_cmd && (msg->rsp_size < 3)) { + /* commands must have at least 4 bytes, responses 5. */ + if (is_cmd && (msg->rsp_size < 4)) { ipmi_inc_stat(intf, invalid_commands); goto out; } - if (!is_cmd && (msg->rsp_size < 4)) - goto return_unspecified; + if (!is_cmd && (msg->rsp_size < 5)) { + ipmi_inc_stat(intf, invalid_ipmb_responses); + /* Construct a valid error response. */ + msg->rsp[0] = msg->data[0] & 0xfc; /* NetFN */ + msg->rsp[0] |= (1 << 2); /* Make it a response */ + msg->rsp[0] |= msg->data[2] & 3; /* rqLUN */ + msg->rsp[1] = msg->data[1]; /* Addr */ + msg->rsp[2] = msg->data[2] & 0xfc; /* rqSeq */ + msg->rsp[2] |= msg->data[0] & 0x3; /* rsLUN */ + msg->rsp[3] = msg->data[3]; /* Cmd */ + msg->rsp[4] = IPMI_ERR_UNSPECIFIED; + msg->rsp_size = 5; + } } else if ((msg->data_size >= 2) && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2)) && (msg->data[1] == IPMI_SEND_MSG_CMD) @@ -5031,6 +5046,7 @@ struct ipmi_smi_msg *ipmi_alloc_smi_msg(void) if (rv) { rv->done = free_smi_msg; rv->user_data = NULL; + rv->type = IPMI_SMI_MSG_TYPE_NORMAL; atomic_inc(&smi_msg_inuse_count); } return rv; @@ -5383,6 +5399,13 @@ static int ipmi_init_msghandler(void) atomic_notifier_chain_register(&panic_notifier_list, &panic_block); + remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq"); + if (!remove_work_wq) { + pr_err("unable to create ipmi-msghandler-remove-wq workqueue"); + rv = -ENOMEM; + goto out; + } + initialized = true; out: @@ -5408,6 +5431,8 @@ static void __exit cleanup_ipmi(void) int count; if (initialized) { + destroy_workqueue(remove_work_wq); + atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block); diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c index a254512965eb..3667b4d731e7 100644 --- a/drivers/clk/bcm/clk-bcm2835.c +++ b/drivers/clk/bcm/clk-bcm2835.c @@ -932,8 +932,7 @@ static int bcm2835_clock_is_on(struct clk_hw *hw) static u32 bcm2835_clock_choose_div(struct clk_hw *hw, unsigned long rate, - unsigned long parent_rate, - bool round_up) + unsigned long parent_rate) { struct bcm2835_clock *clock = bcm2835_clock_from_hw(hw); const struct bcm2835_clock_data *data = clock->data; @@ -945,10 +944,6 @@ static u32 bcm2835_clock_choose_div(struct clk_hw *hw, rem = do_div(temp, rate); div = temp; - - /* Round up and mask off the unused bits */ - if (round_up && ((div & unused_frac_mask) != 0 || rem != 0)) - div += unused_frac_mask + 1; div &= ~unused_frac_mask; /* different clamping limits apply for a mash clock */ @@ -1079,7 +1074,7 @@ static int bcm2835_clock_set_rate(struct clk_hw *hw, struct bcm2835_clock *clock = bcm2835_clock_from_hw(hw); struct bcm2835_cprman *cprman = clock->cprman; const struct bcm2835_clock_data *data = clock->data; - u32 div = bcm2835_clock_choose_div(hw, rate, parent_rate, false); + u32 div = bcm2835_clock_choose_div(hw, rate, parent_rate); u32 ctl; spin_lock(&cprman->regs_lock); @@ -1130,7 +1125,7 @@ static unsigned long bcm2835_clock_choose_div_and_prate(struct clk_hw *hw, if (!(BIT(parent_idx) & data->set_rate_parent)) { *prate = clk_hw_get_rate(parent); - *div = bcm2835_clock_choose_div(hw, rate, *prate, true); + *div = bcm2835_clock_choose_div(hw, rate, *prate); *avgrate = bcm2835_clock_rate_from_divisor(clock, *prate, *div); @@ -1216,7 +1211,7 @@ static int bcm2835_clock_determine_rate(struct clk_hw *hw, rate = bcm2835_clock_choose_div_and_prate(hw, i, req->rate, &div, &prate, &avgrate); - if (rate > best_rate && rate <= req->rate) { + if (abs(req->rate - rate) < abs(req->rate - best_rate)) { best_parent = parent; best_prate = prate; best_rate = rate; diff --git a/drivers/clk/imx/clk-imx8qxp-lpcg.c b/drivers/clk/imx/clk-imx8qxp-lpcg.c index d3e905cf867d..b23758083ce5 100644 --- a/drivers/clk/imx/clk-imx8qxp-lpcg.c +++ b/drivers/clk/imx/clk-imx8qxp-lpcg.c @@ -370,7 +370,7 @@ static struct platform_driver imx8qxp_lpcg_clk_driver = { .probe = imx8qxp_lpcg_clk_probe, }; -builtin_platform_driver(imx8qxp_lpcg_clk_driver); +module_platform_driver(imx8qxp_lpcg_clk_driver); MODULE_AUTHOR("Aisheng Dong <aisheng.dong@nxp.com>"); MODULE_DESCRIPTION("NXP i.MX8QXP LPCG clock driver"); diff --git a/drivers/clk/imx/clk-imx8qxp.c b/drivers/clk/imx/clk-imx8qxp.c index c53a688d8ccc..40a2efb1329b 100644 --- a/drivers/clk/imx/clk-imx8qxp.c +++ b/drivers/clk/imx/clk-imx8qxp.c @@ -308,7 +308,7 @@ static struct platform_driver imx8qxp_clk_driver = { }, .probe = imx8qxp_clk_probe, }; -builtin_platform_driver(imx8qxp_clk_driver); +module_platform_driver(imx8qxp_clk_driver); MODULE_AUTHOR("Aisheng Dong <aisheng.dong@nxp.com>"); MODULE_DESCRIPTION("NXP i.MX8QXP clock driver"); diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c index eaedcceb766f..8f65b9bdafce 100644 --- a/drivers/clk/qcom/clk-alpha-pll.c +++ b/drivers/clk/qcom/clk-alpha-pll.c @@ -1429,6 +1429,15 @@ EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_fabia_ops); void clk_trion_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, const struct alpha_pll_config *config) { + /* + * If the bootloader left the PLL enabled it's likely that there are + * RCGs that will lock up if we disable the PLL below. + */ + if (trion_pll_is_enabled(pll, regmap)) { + pr_debug("Trion PLL is already enabled, skipping configuration\n"); + return; + } + clk_alpha_pll_write_config(regmap, PLL_L_VAL(pll), config->l); regmap_write(regmap, PLL_CAL_L_VAL(pll), TRION_PLL_CAL_VAL); clk_alpha_pll_write_config(regmap, PLL_ALPHA_VAL(pll), config->alpha); diff --git a/drivers/clk/qcom/clk-regmap-mux.c b/drivers/clk/qcom/clk-regmap-mux.c index b2d00b451963..45d9cca28064 100644 --- a/drivers/clk/qcom/clk-regmap-mux.c +++ b/drivers/clk/qcom/clk-regmap-mux.c @@ -28,7 +28,7 @@ static u8 mux_get_parent(struct clk_hw *hw) val &= mask; if (mux->parent_map) - return qcom_find_src_index(hw, mux->parent_map, val); + return qcom_find_cfg_index(hw, mux->parent_map, val); return val; } diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c index 0932e019dd12..75f09e6e057e 100644 --- a/drivers/clk/qcom/common.c +++ b/drivers/clk/qcom/common.c @@ -69,6 +69,18 @@ int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map, u8 src) } EXPORT_SYMBOL_GPL(qcom_find_src_index); +int qcom_find_cfg_index(struct clk_hw *hw, const struct parent_map *map, u8 cfg) +{ + int i, num_parents = clk_hw_get_num_parents(hw); + + for (i = 0; i < num_parents; i++) + if (cfg == map[i].cfg) + return i; + + return -ENOENT; +} +EXPORT_SYMBOL_GPL(qcom_find_cfg_index); + struct regmap * qcom_cc_map(struct platform_device *pdev, const struct qcom_cc_desc *desc) { diff --git a/drivers/clk/qcom/common.h b/drivers/clk/qcom/common.h index bb39a7e106d8..9c8f7b798d9f 100644 --- a/drivers/clk/qcom/common.h +++ b/drivers/clk/qcom/common.h @@ -49,6 +49,8 @@ extern void qcom_pll_set_fsm_mode(struct regmap *m, u32 reg, u8 bias_count, u8 lock_count); extern int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map, u8 src); +extern int qcom_find_cfg_index(struct clk_hw *hw, const struct parent_map *map, + u8 cfg); extern int qcom_cc_register_board_clk(struct device *dev, const char *path, const char *name, unsigned long rate); diff --git a/drivers/clk/qcom/gcc-sm6125.c b/drivers/clk/qcom/gcc-sm6125.c index 543cfab7561f..431b55bb0d2f 100644 --- a/drivers/clk/qcom/gcc-sm6125.c +++ b/drivers/clk/qcom/gcc-sm6125.c @@ -1121,7 +1121,7 @@ static struct clk_rcg2 gcc_sdcc1_apps_clk_src = { .name = "gcc_sdcc1_apps_clk_src", .parent_data = gcc_parent_data_1, .num_parents = ARRAY_SIZE(gcc_parent_data_1), - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_floor_ops, }, }; @@ -1143,7 +1143,7 @@ static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = { .name = "gcc_sdcc1_ice_core_clk_src", .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), - .ops = &clk_rcg2_floor_ops, + .ops = &clk_rcg2_ops, }, }; diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c index d52f976dc875..d5cb372f0901 100644 --- a/drivers/clk/versatile/clk-icst.c +++ b/drivers/clk/versatile/clk-icst.c @@ -543,8 +543,8 @@ static void __init of_syscon_icst_setup(struct device_node *np) regclk = icst_clk_setup(NULL, &icst_desc, name, parent_name, map, ctype); if (IS_ERR(regclk)) { - kfree(name); pr_err("error setting up syscon ICST clock %s\n", name); + kfree(name); return; } of_clk_add_provider(np, of_clk_src_simple_get, regclk); diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index 9a04eacc4412..1ecd52f903b8 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -394,8 +394,13 @@ EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround); static atomic_t timer_unstable_counter_workaround_in_use = ATOMIC_INIT(0); -static void erratum_set_next_event_generic(const int access, unsigned long evt, - struct clock_event_device *clk) +/* + * Force the inlining of this function so that the register accesses + * can be themselves correctly inlined. + */ +static __always_inline +void erratum_set_next_event_generic(const int access, unsigned long evt, + struct clock_event_device *clk) { unsigned long ctrl; u64 cval; diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c index 3819ef5b7098..3245eb0c602d 100644 --- a/drivers/clocksource/dw_apb_timer_of.c +++ b/drivers/clocksource/dw_apb_timer_of.c @@ -47,7 +47,7 @@ static int __init timer_get_base_and_rate(struct device_node *np, pr_warn("pclk for %pOFn is present, but could not be activated\n", np); - if (!of_property_read_u32(np, "clock-freq", rate) && + if (!of_property_read_u32(np, "clock-freq", rate) || !of_property_read_u32(np, "clock-frequency", rate)) return 0; diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index e338d2f010fe..096c3848fa41 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1004,10 +1004,9 @@ static struct kobj_type ktype_cpufreq = { .release = cpufreq_sysfs_release, }; -static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu) +static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu, + struct device *dev) { - struct device *dev = get_cpu_device(cpu); - if (unlikely(!dev)) return; @@ -1296,8 +1295,9 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy) if (policy->max_freq_req) { /* - * CPUFREQ_CREATE_POLICY notification is sent only after - * successfully adding max_freq_req request. + * Remove max_freq_req after sending CPUFREQ_REMOVE_POLICY + * notification, since CPUFREQ_CREATE_POLICY notification was + * sent after adding max_freq_req earlier. */ blocking_notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_REMOVE_POLICY, policy); @@ -1391,7 +1391,7 @@ static int cpufreq_online(unsigned int cpu) if (new_policy) { for_each_cpu(j, policy->related_cpus) { per_cpu(cpufreq_cpu_data, j) = policy; - add_cpu_dev_symlink(policy, j); + add_cpu_dev_symlink(policy, j, get_cpu_device(j)); } policy->min_freq_req = kzalloc(2 * sizeof(*policy->min_freq_req), @@ -1565,7 +1565,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) /* Create sysfs link on CPU registration */ policy = per_cpu(cpufreq_cpu_data, cpu); if (policy) - add_cpu_dev_symlink(policy, cpu); + add_cpu_dev_symlink(policy, cpu, dev); return 0; } diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 815df3daae9d..dec2a5649ac1 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -338,6 +338,8 @@ static void intel_pstste_sched_itmt_work_fn(struct work_struct *work) static DECLARE_WORK(sched_itmt_work, intel_pstste_sched_itmt_work_fn); +#define CPPC_MAX_PERF U8_MAX + static void intel_pstate_set_itmt_prio(int cpu) { struct cppc_perf_caps cppc_perf; @@ -349,6 +351,14 @@ static void intel_pstate_set_itmt_prio(int cpu) return; /* + * On some systems with overclocking enabled, CPPC.highest_perf is hardcoded to 0xff. + * In this case we can't use CPPC.highest_perf to enable ITMT. + * In this case we can look at MSR_HWP_CAPABILITIES bits [8:0] to decide. + */ + if (cppc_perf.highest_perf == CPPC_MAX_PERF) + cppc_perf.highest_perf = HWP_HIGHEST_PERF(READ_ONCE(all_cpu_data[cpu]->hwp_cap_cached)); + + /* * The priorities can be set regardless of whether or not * sched_set_itmt_support(true) has been called and it is valid to * update them at any time after it has been called. @@ -1006,6 +1016,12 @@ static void intel_pstate_hwp_offline(struct cpudata *cpu) */ value &= ~GENMASK_ULL(31, 24); value |= HWP_ENERGY_PERF_PREFERENCE(cpu->epp_cached); + /* + * However, make sure that EPP will be set to "performance" when + * the CPU is brought back online again and the "performance" + * scaling algorithm is still in effect. + */ + cpu->epp_policy = CPUFREQ_POLICY_UNKNOWN; } /* @@ -2353,6 +2369,7 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { X86_MATCH(BROADWELL_D, core_funcs), X86_MATCH(BROADWELL_X, core_funcs), X86_MATCH(SKYLAKE_X, core_funcs), + X86_MATCH(ICELAKE_X, core_funcs), {} }; diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile index 1ef021273a06..511805dbeb75 100644 --- a/drivers/dma-buf/Makefile +++ b/drivers/dma-buf/Makefile @@ -11,6 +11,7 @@ obj-$(CONFIG_DMABUF_SYSFS_STATS) += dma-buf-sysfs-stats.o dmabuf_selftests-y := \ selftest.o \ st-dma-fence.o \ - st-dma-fence-chain.o + st-dma-fence-chain.o \ + st-dma-resv.o obj-$(CONFIG_DMABUF_SELFTESTS) += dmabuf_selftests.o diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 6437b2e978fb..602b12d7470d 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -299,10 +299,8 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll) /** * dma_buf_set_name - Set a name to a specific dma_buf to track the usage. - * The name of the dma-buf buffer can only be set when the dma-buf is not - * attached to any devices. It could theoritically support changing the - * name of the dma-buf if the same piece of memory is used for multiple - * purpose between different devices. + * It could support changing the name of the dma-buf if the same + * piece of memory is used for multiple purpose between different devices. * * @dmabuf: [in] dmabuf buffer that will be renamed. * @buf: [in] A piece of userspace memory that contains the name of @@ -315,25 +313,16 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll) static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf) { char *name = strndup_user(buf, DMA_BUF_NAME_LEN); - long ret = 0; if (IS_ERR(name)) return PTR_ERR(name); - dma_resv_lock(dmabuf->resv, NULL); - if (!list_empty(&dmabuf->attachments)) { - ret = -EBUSY; - kfree(name); - goto out_unlock; - } spin_lock(&dmabuf->name_lock); kfree(dmabuf->name); dmabuf->name = name; spin_unlock(&dmabuf->name_lock); -out_unlock: - dma_resv_unlock(dmabuf->resv); - return ret; + return 0; } static long dma_buf_ioctl(struct file *file, @@ -1058,8 +1047,8 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF); * * Interfaces:: * - * void \*dma_buf_vmap(struct dma_buf \*dmabuf) - * void dma_buf_vunmap(struct dma_buf \*dmabuf, void \*vaddr) + * void \*dma_buf_vmap(struct dma_buf \*dmabuf, struct dma_buf_map \*map) + * void dma_buf_vunmap(struct dma_buf \*dmabuf, struct dma_buf_map \*map) * * The vmap call can fail if there is no vmap support in the exporter, or if * it runs out of vmalloc space. Note that the dma-buf layer keeps a reference @@ -1338,8 +1327,6 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused) { struct dma_buf *buf_obj; struct dma_buf_attachment *attach_obj; - struct dma_resv_iter cursor; - struct dma_fence *fence; int count = 0, attach_count; size_t size = 0; int ret; @@ -1370,14 +1357,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused) buf_obj->name ?: ""); spin_unlock(&buf_obj->name_lock); - dma_resv_for_each_fence(&cursor, buf_obj->resv, true, fence) { - seq_printf(s, "\t%s fence: %s %s %ssignalled\n", - dma_resv_iter_is_exclusive(&cursor) ? - "Exclusive" : "Shared", - fence->ops->get_driver_name(fence), - fence->ops->get_timeline_name(fence), - dma_fence_is_signaled(fence) ? "" : "un"); - } + dma_resv_describe(buf_obj->resv, s); seq_puts(s, "\tAttached Devices:\n"); attach_count = 0; diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c index d3fbd950be94..3e07f961e2f3 100644 --- a/drivers/dma-buf/dma-fence-array.c +++ b/drivers/dma-buf/dma-fence-array.c @@ -104,7 +104,11 @@ static bool dma_fence_array_signaled(struct dma_fence *fence) { struct dma_fence_array *array = to_dma_fence_array(fence); - return atomic_read(&array->num_pending) <= 0; + if (atomic_read(&array->num_pending) > 0) + return false; + + dma_fence_array_clear_pending_error(array); + return true; } static void dma_fence_array_release(struct dma_fence *fence) diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index 1e82ecd443fa..066400ed8841 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -15,6 +15,7 @@ #include <linux/atomic.h> #include <linux/dma-fence.h> #include <linux/sched/signal.h> +#include <linux/seq_file.h> #define CREATE_TRACE_POINTS #include <trace/events/dma_fence.h> @@ -908,6 +909,22 @@ err_free_cb: EXPORT_SYMBOL(dma_fence_wait_any_timeout); /** + * dma_fence_describe - Dump fence describtion into seq_file + * @fence: the 6fence to describe + * @seq: the seq_file to put the textual description into + * + * Dump a textual description of the fence and it's state into the seq_file. + */ +void dma_fence_describe(struct dma_fence *fence, struct seq_file *seq) +{ + seq_printf(seq, "%s %s seq %llu %ssignalled\n", + fence->ops->get_driver_name(fence), + fence->ops->get_timeline_name(fence), fence->seqno, + dma_fence_is_signaled(fence) ? "" : "un"); +} +EXPORT_SYMBOL(dma_fence_describe); + +/** * dma_fence_init - Initialize a custom fence. * @fence: the fence to initialize * @ops: the dma_fence_ops for operations on this fence diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 9eb2baa387d4..4deea75c0b9c 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -38,6 +38,7 @@ #include <linux/mm.h> #include <linux/sched/mm.h> #include <linux/mmu_notifier.h> +#include <linux/seq_file.h> /** * DOC: Reservation Object Overview @@ -304,8 +305,7 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence) if (old) i = old->shared_count; - if (fence) - dma_fence_get(fence); + dma_fence_get(fence); write_seqcount_begin(&obj->seq); /* write_seqcount_begin provides the necessary memory barrier */ @@ -666,6 +666,28 @@ bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all) } EXPORT_SYMBOL_GPL(dma_resv_test_signaled); +/** + * dma_resv_describe - Dump description of the resv object into seq_file + * @obj: the reservation object + * @seq: the seq_file to dump the description into + * + * Dump a textual description of the fences inside an dma_resv object into the + * seq_file. + */ +void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq) +{ + struct dma_resv_iter cursor; + struct dma_fence *fence; + + dma_resv_for_each_fence(&cursor, obj, true, fence) { + seq_printf(seq, "\t%s fence:", + dma_resv_iter_is_exclusive(&cursor) ? + "Exclusive" : "Shared"); + dma_fence_describe(fence, seq); + } +} +EXPORT_SYMBOL_GPL(dma_resv_describe); + #if IS_ENABLED(CONFIG_LOCKDEP) static int __init dma_resv_lockdep(void) { diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c index f57a39ddd063..ab7fd896d2c4 100644 --- a/drivers/dma-buf/heaps/system_heap.c +++ b/drivers/dma-buf/heaps/system_heap.c @@ -290,7 +290,7 @@ static void system_heap_dma_buf_release(struct dma_buf *dmabuf) int i; table = &buffer->sg_table; - for_each_sg(table->sgl, sg, table->nents, i) { + for_each_sgtable_sg(table, sg, i) { struct page *page = sg_page(sg); __free_pages(page, compound_order(page)); diff --git a/drivers/dma-buf/selftests.h b/drivers/dma-buf/selftests.h index bc8cea67bf1e..97d73aaa31da 100644 --- a/drivers/dma-buf/selftests.h +++ b/drivers/dma-buf/selftests.h @@ -12,3 +12,4 @@ selftest(sanitycheck, __sanitycheck__) /* keep first (igt selfcheck) */ selftest(dma_fence, dma_fence) selftest(dma_fence_chain, dma_fence_chain) +selftest(dma_resv, dma_resv) diff --git a/drivers/dma-buf/st-dma-resv.c b/drivers/dma-buf/st-dma-resv.c new file mode 100644 index 000000000000..bc32b3eedcb6 --- /dev/null +++ b/drivers/dma-buf/st-dma-resv.c @@ -0,0 +1,371 @@ +/* SPDX-License-Identifier: MIT */ + +/* +* Copyright © 2019 Intel Corporation +* Copyright © 2021 Advanced Micro Devices, Inc. +*/ + +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/dma-resv.h> + +#include "selftest.h" + +static struct spinlock fence_lock; + +static const char *fence_name(struct dma_fence *f) +{ + return "selftest"; +} + +static const struct dma_fence_ops fence_ops = { + .get_driver_name = fence_name, + .get_timeline_name = fence_name, +}; + +static struct dma_fence *alloc_fence(void) +{ + struct dma_fence *f; + + f = kmalloc(sizeof(*f), GFP_KERNEL); + if (!f) + return NULL; + + dma_fence_init(f, &fence_ops, &fence_lock, 0, 0); + return f; +} + +static int sanitycheck(void *arg) +{ + struct dma_resv resv; + struct dma_fence *f; + int r; + + f = alloc_fence(); + if (!f) + return -ENOMEM; + + dma_fence_signal(f); + dma_fence_put(f); + + dma_resv_init(&resv); + r = dma_resv_lock(&resv, NULL); + if (r) + pr_err("Resv locking failed\n"); + else + dma_resv_unlock(&resv); + dma_resv_fini(&resv); + return r; +} + +static int test_signaling(void *arg, bool shared) +{ + struct dma_resv resv; + struct dma_fence *f; + int r; + + f = alloc_fence(); + if (!f) + return -ENOMEM; + + dma_resv_init(&resv); + r = dma_resv_lock(&resv, NULL); + if (r) { + pr_err("Resv locking failed\n"); + goto err_free; + } + + if (shared) { + r = dma_resv_reserve_shared(&resv, 1); + if (r) { + pr_err("Resv shared slot allocation failed\n"); + goto err_unlock; + } + + dma_resv_add_shared_fence(&resv, f); + } else { + dma_resv_add_excl_fence(&resv, f); + } + + if (dma_resv_test_signaled(&resv, shared)) { + pr_err("Resv unexpectedly signaled\n"); + r = -EINVAL; + goto err_unlock; + } + dma_fence_signal(f); + if (!dma_resv_test_signaled(&resv, shared)) { + pr_err("Resv not reporting signaled\n"); + r = -EINVAL; + goto err_unlock; + } +err_unlock: + dma_resv_unlock(&resv); +err_free: + dma_resv_fini(&resv); + dma_fence_put(f); + return r; +} + +static int test_excl_signaling(void *arg) +{ + return test_signaling(arg, false); +} + +static int test_shared_signaling(void *arg) +{ + return test_signaling(arg, true); +} + +static int test_for_each(void *arg, bool shared) +{ + struct dma_resv_iter cursor; + struct dma_fence *f, *fence; + struct dma_resv resv; + int r; + + f = alloc_fence(); + if (!f) + return -ENOMEM; + + dma_resv_init(&resv); + r = dma_resv_lock(&resv, NULL); + if (r) { + pr_err("Resv locking failed\n"); + goto err_free; + } + + if (shared) { + r = dma_resv_reserve_shared(&resv, 1); + if (r) { + pr_err("Resv shared slot allocation failed\n"); + goto err_unlock; + } + + dma_resv_add_shared_fence(&resv, f); + } else { + dma_resv_add_excl_fence(&resv, f); + } + + r = -ENOENT; + dma_resv_for_each_fence(&cursor, &resv, shared, fence) { + if (!r) { + pr_err("More than one fence found\n"); + r = -EINVAL; + goto err_unlock; + } + if (f != fence) { + pr_err("Unexpected fence\n"); + r = -EINVAL; + goto err_unlock; + } + if (dma_resv_iter_is_exclusive(&cursor) != !shared) { + pr_err("Unexpected fence usage\n"); + r = -EINVAL; + goto err_unlock; + } + r = 0; + } + if (r) { + pr_err("No fence found\n"); + goto err_unlock; + } + dma_fence_signal(f); +err_unlock: + dma_resv_unlock(&resv); +err_free: + dma_resv_fini(&resv); + dma_fence_put(f); + return r; +} + +static int test_excl_for_each(void *arg) +{ + return test_for_each(arg, false); +} + +static int test_shared_for_each(void *arg) +{ + return test_for_each(arg, true); +} + +static int test_for_each_unlocked(void *arg, bool shared) +{ + struct dma_resv_iter cursor; + struct dma_fence *f, *fence; + struct dma_resv resv; + int r; + + f = alloc_fence(); + if (!f) + return -ENOMEM; + + dma_resv_init(&resv); + r = dma_resv_lock(&resv, NULL); + if (r) { + pr_err("Resv locking failed\n"); + goto err_free; + } + + if (shared) { + r = dma_resv_reserve_shared(&resv, 1); + if (r) { + pr_err("Resv shared slot allocation failed\n"); + dma_resv_unlock(&resv); + goto err_free; + } + + dma_resv_add_shared_fence(&resv, f); + } else { + dma_resv_add_excl_fence(&resv, f); + } + dma_resv_unlock(&resv); + + r = -ENOENT; + dma_resv_iter_begin(&cursor, &resv, shared); + dma_resv_for_each_fence_unlocked(&cursor, fence) { + if (!r) { + pr_err("More than one fence found\n"); + r = -EINVAL; + goto err_iter_end; + } + if (!dma_resv_iter_is_restarted(&cursor)) { + pr_err("No restart flag\n"); + goto err_iter_end; + } + if (f != fence) { + pr_err("Unexpected fence\n"); + r = -EINVAL; + goto err_iter_end; + } + if (dma_resv_iter_is_exclusive(&cursor) != !shared) { + pr_err("Unexpected fence usage\n"); + r = -EINVAL; + goto err_iter_end; + } + + /* We use r as state here */ + if (r == -ENOENT) { + r = -EINVAL; + /* That should trigger an restart */ + cursor.seq--; + } else if (r == -EINVAL) { + r = 0; + } + } + if (r) + pr_err("No fence found\n"); +err_iter_end: + dma_resv_iter_end(&cursor); + dma_fence_signal(f); +err_free: + dma_resv_fini(&resv); + dma_fence_put(f); + return r; +} + +static int test_excl_for_each_unlocked(void *arg) +{ + return test_for_each_unlocked(arg, false); +} + +static int test_shared_for_each_unlocked(void *arg) +{ + return test_for_each_unlocked(arg, true); +} + +static int test_get_fences(void *arg, bool shared) +{ + struct dma_fence *f, *excl = NULL, **fences = NULL; + struct dma_resv resv; + int r, i; + + f = alloc_fence(); + if (!f) + return -ENOMEM; + + dma_resv_init(&resv); + r = dma_resv_lock(&resv, NULL); + if (r) { + pr_err("Resv locking failed\n"); + goto err_resv; + } + + if (shared) { + r = dma_resv_reserve_shared(&resv, 1); + if (r) { + pr_err("Resv shared slot allocation failed\n"); + dma_resv_unlock(&resv); + goto err_resv; + } + + dma_resv_add_shared_fence(&resv, f); + } else { + dma_resv_add_excl_fence(&resv, f); + } + dma_resv_unlock(&resv); + + r = dma_resv_get_fences(&resv, &excl, &i, &fences); + if (r) { + pr_err("get_fences failed\n"); + goto err_free; + } + + if (shared) { + if (excl != NULL) { + pr_err("get_fences returned unexpected excl fence\n"); + goto err_free; + } + if (i != 1 || fences[0] != f) { + pr_err("get_fences returned unexpected shared fence\n"); + goto err_free; + } + } else { + if (excl != f) { + pr_err("get_fences returned unexpected excl fence\n"); + goto err_free; + } + if (i != 0) { + pr_err("get_fences returned unexpected shared fence\n"); + goto err_free; + } + } + + dma_fence_signal(f); +err_free: + dma_fence_put(excl); + while (i--) + dma_fence_put(fences[i]); + kfree(fences); +err_resv: + dma_resv_fini(&resv); + dma_fence_put(f); + return r; +} + +static int test_excl_get_fences(void *arg) +{ + return test_get_fences(arg, false); +} + +static int test_shared_get_fences(void *arg) +{ + return test_get_fences(arg, true); +} + +int dma_resv(void) +{ + static const struct subtest tests[] = { + SUBTEST(sanitycheck), + SUBTEST(test_excl_signaling), + SUBTEST(test_shared_signaling), + SUBTEST(test_excl_for_each), + SUBTEST(test_shared_for_each), + SUBTEST(test_excl_for_each_unlocked), + SUBTEST(test_shared_for_each_unlocked), + SUBTEST(test_excl_get_fences), + SUBTEST(test_shared_get_fences), + }; + + spin_lock_init(&fence_lock); + return subtests(tests, NULL); +} diff --git a/drivers/firmware/arm_scmi/base.c b/drivers/firmware/arm_scmi/base.c index de416f9e7921..f5219334fd3a 100644 --- a/drivers/firmware/arm_scmi/base.c +++ b/drivers/firmware/arm_scmi/base.c @@ -34,6 +34,12 @@ struct scmi_msg_resp_base_attributes { __le16 reserved; }; +struct scmi_msg_resp_base_discover_agent { + __le32 agent_id; + u8 name[SCMI_MAX_STR_SIZE]; +}; + + struct scmi_msg_base_error_notify { __le32 event_control; #define BASE_TP_NOTIFY_ALL BIT(0) @@ -225,18 +231,21 @@ static int scmi_base_discover_agent_get(const struct scmi_protocol_handle *ph, int id, char *name) { int ret; + struct scmi_msg_resp_base_discover_agent *agent_info; struct scmi_xfer *t; ret = ph->xops->xfer_get_init(ph, BASE_DISCOVER_AGENT, - sizeof(__le32), SCMI_MAX_STR_SIZE, &t); + sizeof(__le32), sizeof(*agent_info), &t); if (ret) return ret; put_unaligned_le32(id, t->tx.buf); ret = ph->xops->do_xfer(ph, t); - if (!ret) - strlcpy(name, t->rx.buf, SCMI_MAX_STR_SIZE); + if (!ret) { + agent_info = t->rx.buf; + strlcpy(name, agent_info->name, SCMI_MAX_STR_SIZE); + } ph->xops->xfer_put(ph, t); diff --git a/drivers/firmware/arm_scmi/scmi_pm_domain.c b/drivers/firmware/arm_scmi/scmi_pm_domain.c index 4371fdcd5a73..581d34c95769 100644 --- a/drivers/firmware/arm_scmi/scmi_pm_domain.c +++ b/drivers/firmware/arm_scmi/scmi_pm_domain.c @@ -138,9 +138,7 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev) scmi_pd_data->domains = domains; scmi_pd_data->num_domains = num_domains; - of_genpd_add_provider_onecell(np, scmi_pd_data); - - return 0; + return of_genpd_add_provider_onecell(np, scmi_pd_data); } static const struct scmi_device_id scmi_id_table[] = { diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c index 308471586381..cdbb287bd8bc 100644 --- a/drivers/firmware/arm_scmi/sensors.c +++ b/drivers/firmware/arm_scmi/sensors.c @@ -637,7 +637,7 @@ static int scmi_sensor_config_get(const struct scmi_protocol_handle *ph, if (ret) return ret; - put_unaligned_le32(cpu_to_le32(sensor_id), t->tx.buf); + put_unaligned_le32(sensor_id, t->tx.buf); ret = ph->xops->do_xfer(ph, t); if (!ret) { struct sensors_info *si = ph->get_priv(ph); diff --git a/drivers/firmware/arm_scmi/virtio.c b/drivers/firmware/arm_scmi/virtio.c index 11e8efb71375..87039c5c03fd 100644 --- a/drivers/firmware/arm_scmi/virtio.c +++ b/drivers/firmware/arm_scmi/virtio.c @@ -82,7 +82,8 @@ static bool scmi_vio_have_vq_rx(struct virtio_device *vdev) } static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch, - struct scmi_vio_msg *msg) + struct scmi_vio_msg *msg, + struct device *dev) { struct scatterlist sg_in; int rc; @@ -94,8 +95,7 @@ static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch, rc = virtqueue_add_inbuf(vioch->vqueue, &sg_in, 1, msg, GFP_ATOMIC); if (rc) - dev_err_once(vioch->cinfo->dev, - "failed to add to virtqueue (%d)\n", rc); + dev_err_once(dev, "failed to add to virtqueue (%d)\n", rc); else virtqueue_kick(vioch->vqueue); @@ -108,7 +108,7 @@ static void scmi_finalize_message(struct scmi_vio_channel *vioch, struct scmi_vio_msg *msg) { if (vioch->is_rx) { - scmi_vio_feed_vq_rx(vioch, msg); + scmi_vio_feed_vq_rx(vioch, msg, vioch->cinfo->dev); } else { /* Here IRQs are assumed to be already disabled by the caller */ spin_lock(&vioch->lock); @@ -269,7 +269,7 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, list_add_tail(&msg->list, &vioch->free_list); spin_unlock_irqrestore(&vioch->lock, flags); } else { - scmi_vio_feed_vq_rx(vioch, msg); + scmi_vio_feed_vq_rx(vioch, msg, cinfo->dev); } } diff --git a/drivers/firmware/arm_scmi/voltage.c b/drivers/firmware/arm_scmi/voltage.c index a5048956a0be..ac08e819088b 100644 --- a/drivers/firmware/arm_scmi/voltage.c +++ b/drivers/firmware/arm_scmi/voltage.c @@ -156,7 +156,7 @@ static int scmi_voltage_descriptors_get(const struct scmi_protocol_handle *ph, int cnt; cmd->domain_id = cpu_to_le32(v->id); - cmd->level_index = desc_index; + cmd->level_index = cpu_to_le32(desc_index); ret = ph->xops->do_xfer(ph, tl); if (ret) break; diff --git a/drivers/firmware/smccc/soc_id.c b/drivers/firmware/smccc/soc_id.c index 581aa5e9b077..dd7c3d5e8b0b 100644 --- a/drivers/firmware/smccc/soc_id.c +++ b/drivers/firmware/smccc/soc_id.c @@ -50,7 +50,7 @@ static int __init smccc_soc_init(void) arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, ARM_SMCCC_ARCH_SOC_ID, &res); - if (res.a0 == SMCCC_RET_NOT_SUPPORTED) { + if ((int)res.a0 == SMCCC_RET_NOT_SUPPORTED) { pr_info("ARCH_SOC_ID not implemented, skipping ....\n"); return 0; } diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 072ed610f9c6..60d9374c72c0 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -523,6 +523,7 @@ config GPIO_REG config GPIO_ROCKCHIP tristate "Rockchip GPIO support" depends on ARCH_ROCKCHIP || COMPILE_TEST + select GENERIC_IRQ_CHIP select GPIOLIB_IRQCHIP default ARCH_ROCKCHIP help diff --git a/drivers/gpio/gpio-virtio.c b/drivers/gpio/gpio-virtio.c index aeec4bf0b625..84f96b78f32a 100644 --- a/drivers/gpio/gpio-virtio.c +++ b/drivers/gpio/gpio-virtio.c @@ -434,7 +434,7 @@ static void virtio_gpio_event_vq(struct virtqueue *vq) ret = generic_handle_domain_irq(vgpio->gc.irq.domain, gpio); if (ret) dev_err(dev, "failed to handle interrupt: %d\n", ret); - }; + } } static void virtio_gpio_request_vq(struct virtqueue *vq) diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 0039df26854b..b1f22e457fd0 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -8,6 +8,7 @@ menuconfig DRM tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)" depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && HAS_DMA + select DRM_NOMODESET select DRM_PANEL_ORIENTATION_QUIRKS select HDMI select FB_CMDLINE @@ -211,20 +212,13 @@ config DRM_TTM_HELPER Helpers for ttm-based gem objects config DRM_GEM_CMA_HELPER - bool + tristate depends on DRM help Choose this if you need the GEM CMA helper functions -config DRM_KMS_CMA_HELPER - bool - depends on DRM - select DRM_GEM_CMA_HELPER - help - Choose this if you need the KMS CMA helper functions - config DRM_GEM_SHMEM_HELPER - bool + tristate depends on DRM && MMU help Choose this if you need the GEM shmem helper functions @@ -394,6 +388,8 @@ source "drivers/gpu/drm/xlnx/Kconfig" source "drivers/gpu/drm/gud/Kconfig" +source "drivers/gpu/drm/sprd/Kconfig" + config DRM_HYPERV tristate "DRM Support for Hyper-V synthetic video device" depends on DRM && PCI && MMU && HYPERV @@ -492,6 +488,15 @@ config DRM_EXPORT_FOR_TESTS config DRM_PANEL_ORIENTATION_QUIRKS tristate +# Separate option because nomodeset parameter is global and expected built-in +config DRM_NOMODESET + bool + default n + config DRM_LIB_RANDOM bool default n + +config DRM_PRIVACY_SCREEN + bool + default n diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 0dff40bb863c..301a44dc18e3 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -4,37 +4,44 @@ # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. drm-y := drm_aperture.o drm_auth.o drm_cache.o \ - drm_file.o drm_gem.o drm_ioctl.o drm_irq.o \ + drm_file.o drm_gem.o drm_ioctl.o \ drm_drv.o \ - drm_sysfs.o drm_hashtab.o drm_mm.o \ + drm_sysfs.o drm_mm.o \ drm_crtc.o drm_fourcc.o drm_modes.o drm_edid.o drm_displayid.o \ - drm_encoder_slave.o \ drm_trace_points.o drm_prime.o \ - drm_rect.o drm_vma_manager.o drm_flip_work.o \ + drm_vma_manager.o \ drm_modeset_lock.o drm_atomic.o drm_bridge.o \ drm_framebuffer.o drm_connector.o drm_blend.o \ drm_encoder.o drm_mode_object.o drm_property.o \ drm_plane.o drm_color_mgmt.o drm_print.o \ drm_dumb_buffers.o drm_mode_config.o drm_vblank.o \ drm_syncobj.o drm_lease.o drm_writeback.o drm_client.o \ - drm_client_modeset.o drm_atomic_uapi.o drm_hdcp.o \ + drm_client_modeset.o drm_atomic_uapi.o \ drm_managed.o drm_vblank_work.o drm-$(CONFIG_DRM_LEGACY) += drm_agpsupport.o drm_bufs.o drm_context.o drm_dma.o \ - drm_legacy_misc.o drm_lock.o drm_memory.o drm_scatter.o \ - drm_vm.o + drm_hashtab.o drm_irq.o drm_legacy_misc.o drm_lock.o \ + drm_memory.o drm_scatter.o drm_vm.o drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o drm-$(CONFIG_COMPAT) += drm_ioc32.o -drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o -drm-$(CONFIG_DRM_GEM_SHMEM_HELPER) += drm_gem_shmem_helper.o drm-$(CONFIG_DRM_PANEL) += drm_panel.o drm-$(CONFIG_OF) += drm_of.o drm-$(CONFIG_PCI) += drm_pci.o drm-$(CONFIG_DEBUG_FS) += drm_debugfs.o drm_debugfs_crc.o drm-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o +drm-$(CONFIG_DRM_PRIVACY_SCREEN) += drm_privacy_screen.o drm_privacy_screen_x86.o obj-$(CONFIG_DRM_DP_AUX_BUS) += drm_dp_aux_bus.o +obj-$(CONFIG_DRM_NOMODESET) += drm_nomodeset.o + +drm_cma_helper-y := drm_gem_cma_helper.o +drm_cma_helper-$(CONFIG_DRM_KMS_HELPER) += drm_fb_cma_helper.o +obj-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_cma_helper.o + +drm_shmem_helper-y := drm_gem_shmem_helper.o +obj-$(CONFIG_DRM_GEM_SHMEM_HELPER) += drm_shmem_helper.o + drm_vram_helper-y := drm_gem_vram_helper.o obj-$(CONFIG_DRM_VRAM_HELPER) += drm_vram_helper.o @@ -42,18 +49,18 @@ drm_ttm_helper-y := drm_gem_ttm_helper.o obj-$(CONFIG_DRM_TTM_HELPER) += drm_ttm_helper.o drm_kms_helper-y := drm_bridge_connector.o drm_crtc_helper.o drm_dp_helper.o \ - drm_dsc.o drm_probe_helper.o \ + drm_dsc.o drm_encoder_slave.o drm_flip_work.o drm_hdcp.o \ + drm_probe_helper.o \ drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \ drm_kms_helper_common.o drm_dp_dual_mode_helper.o \ drm_simple_kms_helper.o drm_modeset_helper.o \ drm_scdc_helper.o drm_gem_atomic_helper.o \ drm_gem_framebuffer_helper.o \ drm_atomic_state_helper.o drm_damage_helper.o \ - drm_format_helper.o drm_self_refresh_helper.o + drm_format_helper.o drm_self_refresh_helper.o drm_rect.o drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o -drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o drm_kms_helper-$(CONFIG_DRM_DP_AUX_CHARDEV) += drm_dp_aux_dev.o drm_kms_helper-$(CONFIG_DRM_DP_CEC) += drm_dp_cec.o @@ -127,3 +134,4 @@ obj-$(CONFIG_DRM_TIDSS) += tidss/ obj-y += xlnx/ obj-y += gud/ obj-$(CONFIG_DRM_HYPERV) += hyperv/ +obj-$(CONFIG_DRM_SPRD) += sprd/ diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 653726588956..7fedbb725e17 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -45,7 +45,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \ amdgpu_atombios.o atombios_crtc.o amdgpu_connectors.o \ atom.o amdgpu_fence.o amdgpu_ttm.o amdgpu_object.o amdgpu_gart.o \ amdgpu_encoders.o amdgpu_display.o amdgpu_i2c.o \ - amdgpu_fb.o amdgpu_gem.o amdgpu_ring.o \ + amdgpu_gem.o amdgpu_ring.o \ amdgpu_cs.o amdgpu_bios.o amdgpu_benchmark.o amdgpu_test.o \ atombios_dp.o amdgpu_afmt.o amdgpu_trace_points.o \ atombios_encoders.o amdgpu_sa.o atombios_i2c.o \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index b85b67a88a3d..9eb7afa276f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -458,7 +458,6 @@ struct amdgpu_flip_work { uint64_t base; struct drm_pending_vblank_event *event; struct amdgpu_bo *old_abo; - struct dma_fence *excl; unsigned shared_count; struct dma_fence **shared; struct dma_fence_cb cb; @@ -1096,7 +1095,9 @@ struct amdgpu_device { pci_channel_state_t pci_channel_state; struct amdgpu_reset_control *reset_cntl; - uint32_t ip_versions[HW_ID_MAX][HWIP_MAX_INSTANCE]; + uint32_t ip_versions[MAX_HWIP][HWIP_MAX_INSTANCE]; + + bool ram_is_direct_mapped; }; static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev) @@ -1317,6 +1318,8 @@ void amdgpu_device_flush_hdp(struct amdgpu_device *adev, void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring); +void amdgpu_device_halt(struct amdgpu_device *adev); + /* atpx handler */ #if defined(CONFIG_VGA_SWITCHEROO) void amdgpu_register_atpx_handler(void); @@ -1360,8 +1363,6 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon); u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc); int amdgpu_enable_vblank_kms(struct drm_crtc *crtc); void amdgpu_disable_vblank_kms(struct drm_crtc *crtc); -long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd, - unsigned long arg); int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 7077f21f0021..46cf48b3904a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -72,7 +72,7 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev) if (!kfd_initialized) return; - adev->kfd.dev = kgd2kfd_probe((struct kgd_dev *)adev, vf); + adev->kfd.dev = kgd2kfd_probe(adev, vf); if (adev->kfd.dev) amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size; @@ -233,19 +233,16 @@ int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev) return r; } -void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd) +void amdgpu_amdkfd_gpu_reset(struct amdgpu_device *adev) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - if (amdgpu_device_should_recover_gpu(adev)) amdgpu_device_gpu_recover(adev, NULL); } -int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size, +int amdgpu_amdkfd_alloc_gtt_mem(struct amdgpu_device *adev, size_t size, void **mem_obj, uint64_t *gpu_addr, void **cpu_ptr, bool cp_mqd_gfx9) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; struct amdgpu_bo *bo = NULL; struct amdgpu_bo_param bp; int r; @@ -314,7 +311,7 @@ allocate_mem_reserve_bo_failed: return r; } -void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj) +void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void *mem_obj) { struct amdgpu_bo *bo = (struct amdgpu_bo *) mem_obj; @@ -325,10 +322,9 @@ void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj) amdgpu_bo_unref(&(bo)); } -int amdgpu_amdkfd_alloc_gws(struct kgd_dev *kgd, size_t size, +int amdgpu_amdkfd_alloc_gws(struct amdgpu_device *adev, size_t size, void **mem_obj) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; struct amdgpu_bo *bo = NULL; struct amdgpu_bo_user *ubo; struct amdgpu_bo_param bp; @@ -355,18 +351,16 @@ int amdgpu_amdkfd_alloc_gws(struct kgd_dev *kgd, size_t size, return 0; } -void amdgpu_amdkfd_free_gws(struct kgd_dev *kgd, void *mem_obj) +void amdgpu_amdkfd_free_gws(struct amdgpu_device *adev, void *mem_obj) { struct amdgpu_bo *bo = (struct amdgpu_bo *)mem_obj; amdgpu_bo_unref(&bo); } -uint32_t amdgpu_amdkfd_get_fw_version(struct kgd_dev *kgd, +uint32_t amdgpu_amdkfd_get_fw_version(struct amdgpu_device *adev, enum kgd_engine_type type) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - switch (type) { case KGD_ENGINE_PFP: return adev->gfx.pfp_fw_version; @@ -399,11 +393,9 @@ uint32_t amdgpu_amdkfd_get_fw_version(struct kgd_dev *kgd, return 0; } -void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd, +void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev, struct kfd_local_mem_info *mem_info) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - memset(mem_info, 0, sizeof(*mem_info)); mem_info->local_mem_size_public = adev->gmc.visible_vram_size; @@ -428,19 +420,15 @@ void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd, mem_info->mem_clk_max = 100; } -uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd) +uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct amdgpu_device *adev) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - if (adev->gfx.funcs->get_gpu_clock_counter) return adev->gfx.funcs->get_gpu_clock_counter(adev); return 0; } -uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd) +uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct amdgpu_device *adev) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - /* the sclk is in quantas of 10kHz */ if (amdgpu_sriov_vf(adev)) return adev->clock.default_sclk / 100; @@ -450,9 +438,8 @@ uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd) return 100; } -void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info) +void amdgpu_amdkfd_get_cu_info(struct amdgpu_device *adev, struct kfd_cu_info *cu_info) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; struct amdgpu_cu_info acu_info = adev->gfx.cu_info; memset(cu_info, 0, sizeof(*cu_info)); @@ -473,13 +460,12 @@ void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info) cu_info->lds_size = acu_info.lds_size; } -int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd, - struct kgd_dev **dma_buf_kgd, +int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd, + struct amdgpu_device **dmabuf_adev, uint64_t *bo_size, void *metadata_buffer, size_t buffer_size, uint32_t *metadata_size, uint32_t *flags) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; struct dma_buf *dma_buf; struct drm_gem_object *obj; struct amdgpu_bo *bo; @@ -507,8 +493,8 @@ int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd, goto out_put; r = 0; - if (dma_buf_kgd) - *dma_buf_kgd = (struct kgd_dev *)adev; + if (dmabuf_adev) + *dmabuf_adev = adev; if (bo_size) *bo_size = amdgpu_bo_size(bo); if (metadata_buffer) @@ -528,32 +514,18 @@ out_put: return r; } -uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd) +uint64_t amdgpu_amdkfd_get_vram_usage(struct amdgpu_device *adev) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); return amdgpu_vram_mgr_usage(vram_man); } -uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - - return adev->gmc.xgmi.hive_id; -} - -uint64_t amdgpu_amdkfd_get_unique_id(struct kgd_dev *kgd) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - - return adev->unique_id; -} - -uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src) +uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct amdgpu_device *dst, + struct amdgpu_device *src) { - struct amdgpu_device *peer_adev = (struct amdgpu_device *)src; - struct amdgpu_device *adev = (struct amdgpu_device *)dst; + struct amdgpu_device *peer_adev = src; + struct amdgpu_device *adev = dst; int ret = amdgpu_xgmi_get_hops_count(adev, peer_adev); if (ret < 0) { @@ -565,16 +537,18 @@ uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *s return (uint8_t)ret; } -int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct kgd_dev *dst, struct kgd_dev *src, bool is_min) +int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst, + struct amdgpu_device *src, + bool is_min) { - struct amdgpu_device *adev = (struct amdgpu_device *)dst, *peer_adev; + struct amdgpu_device *adev = dst, *peer_adev; int num_links; if (adev->asic_type != CHIP_ALDEBARAN) return 0; if (src) - peer_adev = (struct amdgpu_device *)src; + peer_adev = src; /* num links returns 0 for indirect peers since indirect route is unknown. */ num_links = is_min ? 1 : amdgpu_xgmi_get_num_links(adev, peer_adev); @@ -589,9 +563,8 @@ int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct kgd_dev *dst, struct kgd_dev return (num_links * 16 * 25000)/BITS_PER_BYTE; } -int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct kgd_dev *dev, bool is_min) +int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_min) { - struct amdgpu_device *adev = (struct amdgpu_device *)dev; int num_lanes_shift = (is_min ? ffs(adev->pm.pcie_mlw_mask) : fls(adev->pm.pcie_mlw_mask)) - 1; int gen_speed_shift = (is_min ? ffs(adev->pm.pcie_gen_mask & @@ -647,39 +620,11 @@ int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct kgd_dev *dev, bool is_min) return (num_lanes_factor * gen_speed_mbits_factor)/BITS_PER_BYTE; } -uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - - return adev->rmmio_remap.bus_addr; -} - -uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - - return adev->gds.gws_size; -} - -uint32_t amdgpu_amdkfd_get_asic_rev_id(struct kgd_dev *kgd) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - - return adev->rev_id; -} - -int amdgpu_amdkfd_get_noretry(struct kgd_dev *kgd) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - - return adev->gmc.noretry; -} - -int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine, +int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev, + enum kgd_engine_type engine, uint32_t vmid, uint64_t gpu_addr, uint32_t *ib_cmd, uint32_t ib_len) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; struct amdgpu_job *job; struct amdgpu_ib *ib; struct amdgpu_ring *ring; @@ -730,10 +675,8 @@ err: return ret; } -void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle) +void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_COMPUTE, !idle); @@ -747,10 +690,9 @@ bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid) return false; } -int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev *kgd, uint16_t vmid) +int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct amdgpu_device *adev, + uint16_t vmid) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - if (adev->family == AMDGPU_FAMILY_AI) { int i; @@ -763,10 +705,9 @@ int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev *kgd, uint16_t vmid) return 0; } -int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid, - enum TLB_FLUSH_TYPE flush_type) +int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct amdgpu_device *adev, + uint16_t pasid, enum TLB_FLUSH_TYPE flush_type) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; bool all_hub = false; if (adev->family == AMDGPU_FAMILY_AI) @@ -775,21 +716,18 @@ int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid, return amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, flush_type, all_hub); } -bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd) +bool amdgpu_amdkfd_have_atomics_support(struct amdgpu_device *adev) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - return adev->have_atomics_support; } -void amdgpu_amdkfd_ras_poison_consumption_handler(struct kgd_dev *kgd) +void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; struct ras_err_data err_data = {0, 0, 0, NULL}; /* CPU MCA will handle page retirement if connected_to_cpu is 1 */ if (!adev->gmc.xgmi.connected_to_cpu) amdgpu_umc_process_ras_data_cb(adev, &err_data, NULL); else - amdgpu_amdkfd_gpu_reset(kgd); + amdgpu_amdkfd_gpu_reset(adev); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index a15a4787c7ee..fcbc8a9c9e06 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -144,14 +144,16 @@ void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev, void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev); void amdgpu_amdkfd_device_init(struct amdgpu_device *adev); void amdgpu_amdkfd_device_fini_sw(struct amdgpu_device *adev); -int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine, +int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev, + enum kgd_engine_type engine, uint32_t vmid, uint64_t gpu_addr, uint32_t *ib_cmd, uint32_t ib_len); -void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle); -bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd); -int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev *kgd, uint16_t vmid); -int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid, - enum TLB_FLUSH_TYPE flush_type); +void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle); +bool amdgpu_amdkfd_have_atomics_support(struct amdgpu_device *adev); +int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct amdgpu_device *adev, + uint16_t vmid); +int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct amdgpu_device *adev, + uint16_t pasid, enum TLB_FLUSH_TYPE flush_type); bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid); @@ -159,7 +161,7 @@ int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev); int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev); -void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd); +void amdgpu_amdkfd_gpu_reset(struct amdgpu_device *adev); int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev, int queue_bit); @@ -198,37 +200,36 @@ int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm) } #endif /* Shared API */ -int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size, +int amdgpu_amdkfd_alloc_gtt_mem(struct amdgpu_device *adev, size_t size, void **mem_obj, uint64_t *gpu_addr, void **cpu_ptr, bool mqd_gfx9); -void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj); -int amdgpu_amdkfd_alloc_gws(struct kgd_dev *kgd, size_t size, void **mem_obj); -void amdgpu_amdkfd_free_gws(struct kgd_dev *kgd, void *mem_obj); +void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void *mem_obj); +int amdgpu_amdkfd_alloc_gws(struct amdgpu_device *adev, size_t size, + void **mem_obj); +void amdgpu_amdkfd_free_gws(struct amdgpu_device *adev, void *mem_obj); int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem); int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem); -uint32_t amdgpu_amdkfd_get_fw_version(struct kgd_dev *kgd, +uint32_t amdgpu_amdkfd_get_fw_version(struct amdgpu_device *adev, enum kgd_engine_type type); -void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd, +void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev, struct kfd_local_mem_info *mem_info); -uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd); +uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct amdgpu_device *adev); -uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd); -void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info); -int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd, - struct kgd_dev **dmabuf_kgd, +uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct amdgpu_device *adev); +void amdgpu_amdkfd_get_cu_info(struct amdgpu_device *adev, + struct kfd_cu_info *cu_info); +int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd, + struct amdgpu_device **dmabuf_adev, uint64_t *bo_size, void *metadata_buffer, size_t buffer_size, uint32_t *metadata_size, uint32_t *flags); -uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd); -uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd); -uint64_t amdgpu_amdkfd_get_unique_id(struct kgd_dev *kgd); -uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd); -uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd); -uint32_t amdgpu_amdkfd_get_asic_rev_id(struct kgd_dev *kgd); -int amdgpu_amdkfd_get_noretry(struct kgd_dev *kgd); -uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src); -int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct kgd_dev *dst, struct kgd_dev *src, bool is_min); -int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct kgd_dev *dev, bool is_min); +uint64_t amdgpu_amdkfd_get_vram_usage(struct amdgpu_device *adev); +uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct amdgpu_device *dst, + struct amdgpu_device *src); +int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst, + struct amdgpu_device *src, + bool is_min); +int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_min); /* Read user wptr from a specified user address space with page fault * disabled. The memory must be pinned and mapped to the hardware when @@ -258,45 +259,54 @@ int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct kgd_dev *dev, bool is_min); (&((struct amdgpu_fpriv *) \ ((struct drm_file *)(drm_priv))->driver_priv)->vm) -int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd, +int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev, struct file *filp, u32 pasid, void **process_info, struct dma_fence **ef); -void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *drm_priv); +void amdgpu_amdkfd_gpuvm_release_process_vm(struct amdgpu_device *adev, + void *drm_priv); uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv); int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( - struct kgd_dev *kgd, uint64_t va, uint64_t size, + struct amdgpu_device *adev, uint64_t va, uint64_t size, void *drm_priv, struct kgd_mem **mem, uint64_t *offset, uint32_t flags); int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( - struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv, + struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv, uint64_t *size); int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( - struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv, bool *table_freed); + struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv, + bool *table_freed); int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( - struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv); + struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv); int amdgpu_amdkfd_gpuvm_sync_memory( - struct kgd_dev *kgd, struct kgd_mem *mem, bool intr); -int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd, + struct amdgpu_device *adev, struct kgd_mem *mem, bool intr); +int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct amdgpu_device *adev, struct kgd_mem *mem, void **kptr, uint64_t *size); -void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_dev *kgd, struct kgd_mem *mem); +void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct amdgpu_device *adev, + struct kgd_mem *mem); int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info, struct dma_fence **ef); -int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd, +int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev, struct kfd_vm_fault_info *info); -int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd, +int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev, struct dma_buf *dmabuf, uint64_t va, void *drm_priv, struct kgd_mem **mem, uint64_t *size, uint64_t *mmap_offset); -int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd, +int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev, struct tile_config *config); -void amdgpu_amdkfd_ras_poison_consumption_handler(struct kgd_dev *kgd); +void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev); #if IS_ENABLED(CONFIG_HSA_AMD) void amdgpu_amdkfd_gpuvm_init_mem_limits(void); void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, struct amdgpu_vm *vm); + +/** + * @amdgpu_amdkfd_release_notify() - Notify KFD when GEM object is released + * + * Allows KFD to release its resources associated with the GEM object. + */ void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo); void amdgpu_amdkfd_reserve_system_mem(uint64_t size); #else @@ -324,7 +334,7 @@ int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm, #if IS_ENABLED(CONFIG_HSA_AMD) int kgd2kfd_init(void); void kgd2kfd_exit(void); -struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, bool vf); +struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf); bool kgd2kfd_device_init(struct kfd_dev *kfd, struct drm_device *ddev, const struct kgd2kfd_shared_resources *gpu_resources); @@ -348,7 +358,7 @@ static inline void kgd2kfd_exit(void) } static inline -struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, bool vf) +struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf) { return NULL; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c index 5a7f680bcb3f..abe93b3ff765 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c @@ -57,11 +57,6 @@ (*dump)[i++][1] = RREG32(addr); \ } while (0) -static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) -{ - return (struct amdgpu_device *)kgd; -} - static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd) { return (struct v9_sdma_mqd *)mqd; @@ -123,10 +118,9 @@ static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev, return sdma_rlc_reg_offset; } -int kgd_arcturus_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, +int kgd_arcturus_hqd_sdma_load(struct amdgpu_device *adev, void *mqd, uint32_t __user *wptr, struct mm_struct *mm) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v9_sdma_mqd *m; uint32_t sdma_rlc_reg_offset; unsigned long end_jiffies; @@ -193,11 +187,10 @@ int kgd_arcturus_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, return 0; } -int kgd_arcturus_hqd_sdma_dump(struct kgd_dev *kgd, +int kgd_arcturus_hqd_sdma_dump(struct amdgpu_device *adev, uint32_t engine_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, engine_id, queue_id); uint32_t i = 0, reg; @@ -225,9 +218,9 @@ int kgd_arcturus_hqd_sdma_dump(struct kgd_dev *kgd, return 0; } -bool kgd_arcturus_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) +bool kgd_arcturus_hqd_sdma_is_occupied(struct amdgpu_device *adev, + void *mqd) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v9_sdma_mqd *m; uint32_t sdma_rlc_reg_offset; uint32_t sdma_rlc_rb_cntl; @@ -244,10 +237,9 @@ bool kgd_arcturus_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) return false; } -int kgd_arcturus_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, +int kgd_arcturus_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd, unsigned int utimeout) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v9_sdma_mqd *m; uint32_t sdma_rlc_reg_offset; uint32_t temp; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.h index ce08131b7b5f..756c1a5679c0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.h @@ -20,11 +20,12 @@ * OTHER DEALINGS IN THE SOFTWARE. */ -int kgd_arcturus_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, +int kgd_arcturus_hqd_sdma_load(struct amdgpu_device *adev, void *mqd, uint32_t __user *wptr, struct mm_struct *mm); -int kgd_arcturus_hqd_sdma_dump(struct kgd_dev *kgd, +int kgd_arcturus_hqd_sdma_dump(struct amdgpu_device *adev, uint32_t engine_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs); -bool kgd_arcturus_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd); -int kgd_arcturus_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, +bool kgd_arcturus_hqd_sdma_is_occupied(struct amdgpu_device *adev, + void *mqd); +int kgd_arcturus_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd, unsigned int utimeout); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c index 960acf68150a..7b7f4b2764c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c @@ -39,37 +39,26 @@ enum hqd_dequeue_request_type { SAVE_WAVES }; -static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) -{ - return (struct amdgpu_device *)kgd; -} - -static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe, +static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe, uint32_t queue, uint32_t vmid) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - mutex_lock(&adev->srbm_mutex); nv_grbm_select(adev, mec, pipe, queue, vmid); } -static void unlock_srbm(struct kgd_dev *kgd) +static void unlock_srbm(struct amdgpu_device *adev) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - nv_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); } -static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id, +static void acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); - lock_srbm(kgd, mec, pipe, queue_id, 0); + lock_srbm(adev, mec, pipe, queue_id, 0); } static uint64_t get_queue_mask(struct amdgpu_device *adev, @@ -81,33 +70,29 @@ static uint64_t get_queue_mask(struct amdgpu_device *adev, return 1ull << bit; } -static void release_queue(struct kgd_dev *kgd) +static void release_queue(struct amdgpu_device *adev) { - unlock_srbm(kgd); + unlock_srbm(adev); } -static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, +static void kgd_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid, uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - - lock_srbm(kgd, 0, 0, 0, vmid); + lock_srbm(adev, 0, 0, 0, vmid); WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, sh_mem_config); WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases); /* APE1 no longer exists on GFX9 */ - unlock_srbm(kgd); + unlock_srbm(adev); } -static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid, +static int kgd_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid, unsigned int vmid) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - /* * We have to assume that there is no outstanding mapping. * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because @@ -150,22 +135,21 @@ static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid, * but still works */ -static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id) +static int kgd_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t mec; uint32_t pipe; mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); - lock_srbm(kgd, mec, pipe, 0, 0); + lock_srbm(adev, mec, pipe, 0, 0); WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK | CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK); - unlock_srbm(kgd); + unlock_srbm(adev); return 0; } @@ -218,12 +202,11 @@ static inline struct v10_sdma_mqd *get_sdma_mqd(void *mqd) return (struct v10_sdma_mqd *)mqd; } -static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, - uint32_t queue_id, uint32_t __user *wptr, - uint32_t wptr_shift, uint32_t wptr_mask, - struct mm_struct *mm) +static int kgd_hqd_load(struct amdgpu_device *adev, void *mqd, + uint32_t pipe_id, uint32_t queue_id, + uint32_t __user *wptr, uint32_t wptr_shift, + uint32_t wptr_mask, struct mm_struct *mm) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v10_compute_mqd *m; uint32_t *mqd_hqd; uint32_t reg, hqd_base, data; @@ -231,7 +214,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, m = get_mqd(mqd); pr_debug("Load hqd of pipe %d queue %d\n", pipe_id, queue_id); - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */ mqd_hqd = &m->cp_mqd_base_addr_lo; @@ -296,16 +279,15 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1); WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, data); - release_queue(kgd); + release_queue(adev); return 0; } -static int kgd_hiq_mqd_load(struct kgd_dev *kgd, void *mqd, +static int kgd_hiq_mqd_load(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t doorbell_off) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; struct v10_compute_mqd *m; uint32_t mec, pipe; @@ -313,7 +295,7 @@ static int kgd_hiq_mqd_load(struct kgd_dev *kgd, void *mqd, m = get_mqd(mqd); - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); @@ -349,16 +331,15 @@ static int kgd_hiq_mqd_load(struct kgd_dev *kgd, void *mqd, out_unlock: spin_unlock(&adev->gfx.kiq.ring_lock); - release_queue(kgd); + release_queue(adev); return r; } -static int kgd_hqd_dump(struct kgd_dev *kgd, +static int kgd_hqd_dump(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t i = 0, reg; #define HQD_N_REGS 56 #define DUMP_REG(addr) do { \ @@ -372,13 +353,13 @@ static int kgd_hqd_dump(struct kgd_dev *kgd, if (*dump == NULL) return -ENOMEM; - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); for (reg = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR); reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++) DUMP_REG(reg); - release_queue(kgd); + release_queue(adev); WARN_ON_ONCE(i != HQD_N_REGS); *n_regs = i; @@ -386,10 +367,9 @@ static int kgd_hqd_dump(struct kgd_dev *kgd, return 0; } -static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, +static int kgd_hqd_sdma_load(struct amdgpu_device *adev, void *mqd, uint32_t __user *wptr, struct mm_struct *mm) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v10_sdma_mqd *m; uint32_t sdma_rlc_reg_offset; unsigned long end_jiffies; @@ -456,11 +436,10 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, return 0; } -static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, +static int kgd_hqd_sdma_dump(struct amdgpu_device *adev, uint32_t engine_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, engine_id, queue_id); uint32_t i = 0, reg; @@ -488,15 +467,15 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, return 0; } -static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, - uint32_t pipe_id, uint32_t queue_id) +static bool kgd_hqd_is_occupied(struct amdgpu_device *adev, + uint64_t queue_address, uint32_t pipe_id, + uint32_t queue_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t act; bool retval = false; uint32_t low, high; - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); act = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE); if (act) { low = lower_32_bits(queue_address >> 8); @@ -506,13 +485,12 @@ static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, high == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI)) retval = true; } - release_queue(kgd); + release_queue(adev); return retval; } -static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) +static bool kgd_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v10_sdma_mqd *m; uint32_t sdma_rlc_reg_offset; uint32_t sdma_rlc_rb_cntl; @@ -529,12 +507,11 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) return false; } -static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, +static int kgd_hqd_destroy(struct amdgpu_device *adev, void *mqd, enum kfd_preempt_type reset_type, unsigned int utimeout, uint32_t pipe_id, uint32_t queue_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); enum hqd_dequeue_request_type type; unsigned long end_jiffies; uint32_t temp; @@ -548,7 +525,7 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, int retry; #endif - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); if (m->cp_hqd_vmid == 0) WREG32_FIELD15(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0); @@ -633,20 +610,19 @@ loop: break; if (time_after(jiffies, end_jiffies)) { pr_err("cp queue preemption time out.\n"); - release_queue(kgd); + release_queue(adev); return -ETIME; } usleep_range(500, 1000); } - release_queue(kgd); + release_queue(adev); return 0; } -static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, +static int kgd_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd, unsigned int utimeout) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v10_sdma_mqd *m; uint32_t sdma_rlc_reg_offset; uint32_t temp; @@ -683,11 +659,10 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, return 0; } -static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, +static bool get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev, uint8_t vmid, uint16_t *p_pasid) { uint32_t value; - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid); @@ -696,12 +671,12 @@ static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK); } -static int kgd_address_watch_disable(struct kgd_dev *kgd) +static int kgd_address_watch_disable(struct amdgpu_device *adev) { return 0; } -static int kgd_address_watch_execute(struct kgd_dev *kgd, +static int kgd_address_watch_execute(struct amdgpu_device *adev, unsigned int watch_point_id, uint32_t cntl_val, uint32_t addr_hi, @@ -710,11 +685,10 @@ static int kgd_address_watch_execute(struct kgd_dev *kgd, return 0; } -static int kgd_wave_control_execute(struct kgd_dev *kgd, +static int kgd_wave_control_execute(struct amdgpu_device *adev, uint32_t gfx_index_val, uint32_t sq_cmd) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t data = 0; mutex_lock(&adev->grbm_idx_mutex); @@ -735,18 +709,16 @@ static int kgd_wave_control_execute(struct kgd_dev *kgd, return 0; } -static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd, +static uint32_t kgd_address_watch_get_offset(struct amdgpu_device *adev, unsigned int watch_point_id, unsigned int reg_offset) { return 0; } -static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, - uint64_t page_table_base) +static void set_vm_context_page_table_base(struct amdgpu_device *adev, + uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) { pr_err("trying to set page table base for wrong VMID %u\n", vmid); @@ -757,12 +729,10 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base); } -static void program_trap_handler_settings(struct kgd_dev *kgd, +static void program_trap_handler_settings(struct amdgpu_device *adev, uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - - lock_srbm(kgd, 0, 0, 0, vmid); + lock_srbm(adev, 0, 0, 0, vmid); /* * Program TBA registers @@ -781,7 +751,7 @@ static void program_trap_handler_settings(struct kgd_dev *kgd, WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_HI), upper_32_bits(tma_addr >> 8)); - unlock_srbm(kgd); + unlock_srbm(adev); } const struct kfd2kgd_calls gfx_v10_kfd2kgd = { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c index dac0d751d5af..1f37d3574001 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c @@ -38,37 +38,26 @@ enum hqd_dequeue_request_type { SAVE_WAVES }; -static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) -{ - return (struct amdgpu_device *)kgd; -} - -static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe, +static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe, uint32_t queue, uint32_t vmid) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - mutex_lock(&adev->srbm_mutex); nv_grbm_select(adev, mec, pipe, queue, vmid); } -static void unlock_srbm(struct kgd_dev *kgd) +static void unlock_srbm(struct amdgpu_device *adev) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - nv_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); } -static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id, +static void acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); - lock_srbm(kgd, mec, pipe, queue_id, 0); + lock_srbm(adev, mec, pipe, queue_id, 0); } static uint64_t get_queue_mask(struct amdgpu_device *adev, @@ -80,34 +69,30 @@ static uint64_t get_queue_mask(struct amdgpu_device *adev, return 1ull << bit; } -static void release_queue(struct kgd_dev *kgd) +static void release_queue(struct amdgpu_device *adev) { - unlock_srbm(kgd); + unlock_srbm(adev); } -static void program_sh_mem_settings_v10_3(struct kgd_dev *kgd, uint32_t vmid, +static void program_sh_mem_settings_v10_3(struct amdgpu_device *adev, uint32_t vmid, uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - - lock_srbm(kgd, 0, 0, 0, vmid); + lock_srbm(adev, 0, 0, 0, vmid); WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, sh_mem_config); WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases); /* APE1 no longer exists on GFX9 */ - unlock_srbm(kgd); + unlock_srbm(adev); } /* ATC is defeatured on Sienna_Cichlid */ -static int set_pasid_vmid_mapping_v10_3(struct kgd_dev *kgd, unsigned int pasid, +static int set_pasid_vmid_mapping_v10_3(struct amdgpu_device *adev, unsigned int pasid, unsigned int vmid) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - uint32_t value = pasid << IH_VMID_0_LUT__PASID__SHIFT; /* Mapping vmid to pasid also for IH block */ @@ -118,22 +103,21 @@ static int set_pasid_vmid_mapping_v10_3(struct kgd_dev *kgd, unsigned int pasid, return 0; } -static int init_interrupts_v10_3(struct kgd_dev *kgd, uint32_t pipe_id) +static int init_interrupts_v10_3(struct amdgpu_device *adev, uint32_t pipe_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t mec; uint32_t pipe; mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); - lock_srbm(kgd, mec, pipe, 0, 0); + lock_srbm(adev, mec, pipe, 0, 0); WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK | CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK); - unlock_srbm(kgd); + unlock_srbm(adev); return 0; } @@ -188,12 +172,11 @@ static inline struct v10_sdma_mqd *get_sdma_mqd(void *mqd) return (struct v10_sdma_mqd *)mqd; } -static int hqd_load_v10_3(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, - uint32_t queue_id, uint32_t __user *wptr, - uint32_t wptr_shift, uint32_t wptr_mask, - struct mm_struct *mm) +static int hqd_load_v10_3(struct amdgpu_device *adev, void *mqd, + uint32_t pipe_id, uint32_t queue_id, + uint32_t __user *wptr, uint32_t wptr_shift, + uint32_t wptr_mask, struct mm_struct *mm) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v10_compute_mqd *m; uint32_t *mqd_hqd; uint32_t reg, hqd_base, data; @@ -201,7 +184,7 @@ static int hqd_load_v10_3(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, m = get_mqd(mqd); pr_debug("Load hqd of pipe %d queue %d\n", pipe_id, queue_id); - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); /* HIQ is set during driver init period with vmid set to 0*/ if (m->cp_hqd_vmid == 0) { @@ -281,16 +264,15 @@ static int hqd_load_v10_3(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1); WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, data); - release_queue(kgd); + release_queue(adev); return 0; } -static int hiq_mqd_load_v10_3(struct kgd_dev *kgd, void *mqd, +static int hiq_mqd_load_v10_3(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t doorbell_off) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; struct v10_compute_mqd *m; uint32_t mec, pipe; @@ -298,7 +280,7 @@ static int hiq_mqd_load_v10_3(struct kgd_dev *kgd, void *mqd, m = get_mqd(mqd); - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); @@ -334,16 +316,15 @@ static int hiq_mqd_load_v10_3(struct kgd_dev *kgd, void *mqd, out_unlock: spin_unlock(&adev->gfx.kiq.ring_lock); - release_queue(kgd); + release_queue(adev); return r; } -static int hqd_dump_v10_3(struct kgd_dev *kgd, +static int hqd_dump_v10_3(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t i = 0, reg; #define HQD_N_REGS 56 #define DUMP_REG(addr) do { \ @@ -357,13 +338,13 @@ static int hqd_dump_v10_3(struct kgd_dev *kgd, if (*dump == NULL) return -ENOMEM; - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); for (reg = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR); reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++) DUMP_REG(reg); - release_queue(kgd); + release_queue(adev); WARN_ON_ONCE(i != HQD_N_REGS); *n_regs = i; @@ -371,10 +352,9 @@ static int hqd_dump_v10_3(struct kgd_dev *kgd, return 0; } -static int hqd_sdma_load_v10_3(struct kgd_dev *kgd, void *mqd, +static int hqd_sdma_load_v10_3(struct amdgpu_device *adev, void *mqd, uint32_t __user *wptr, struct mm_struct *mm) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v10_sdma_mqd *m; uint32_t sdma_rlc_reg_offset; unsigned long end_jiffies; @@ -441,11 +421,10 @@ static int hqd_sdma_load_v10_3(struct kgd_dev *kgd, void *mqd, return 0; } -static int hqd_sdma_dump_v10_3(struct kgd_dev *kgd, +static int hqd_sdma_dump_v10_3(struct amdgpu_device *adev, uint32_t engine_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, engine_id, queue_id); uint32_t i = 0, reg; @@ -473,15 +452,15 @@ static int hqd_sdma_dump_v10_3(struct kgd_dev *kgd, return 0; } -static bool hqd_is_occupied_v10_3(struct kgd_dev *kgd, uint64_t queue_address, - uint32_t pipe_id, uint32_t queue_id) +static bool hqd_is_occupied_v10_3(struct amdgpu_device *adev, + uint64_t queue_address, uint32_t pipe_id, + uint32_t queue_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t act; bool retval = false; uint32_t low, high; - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); act = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE); if (act) { low = lower_32_bits(queue_address >> 8); @@ -491,13 +470,13 @@ static bool hqd_is_occupied_v10_3(struct kgd_dev *kgd, uint64_t queue_address, high == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI)) retval = true; } - release_queue(kgd); + release_queue(adev); return retval; } -static bool hqd_sdma_is_occupied_v10_3(struct kgd_dev *kgd, void *mqd) +static bool hqd_sdma_is_occupied_v10_3(struct amdgpu_device *adev, + void *mqd) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v10_sdma_mqd *m; uint32_t sdma_rlc_reg_offset; uint32_t sdma_rlc_rb_cntl; @@ -514,18 +493,17 @@ static bool hqd_sdma_is_occupied_v10_3(struct kgd_dev *kgd, void *mqd) return false; } -static int hqd_destroy_v10_3(struct kgd_dev *kgd, void *mqd, +static int hqd_destroy_v10_3(struct amdgpu_device *adev, void *mqd, enum kfd_preempt_type reset_type, unsigned int utimeout, uint32_t pipe_id, uint32_t queue_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); enum hqd_dequeue_request_type type; unsigned long end_jiffies; uint32_t temp; struct v10_compute_mqd *m = get_mqd(mqd); - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); if (m->cp_hqd_vmid == 0) WREG32_FIELD15(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0); @@ -555,20 +533,19 @@ static int hqd_destroy_v10_3(struct kgd_dev *kgd, void *mqd, if (time_after(jiffies, end_jiffies)) { pr_err("cp queue pipe %d queue %d preemption failed\n", pipe_id, queue_id); - release_queue(kgd); + release_queue(adev); return -ETIME; } usleep_range(500, 1000); } - release_queue(kgd); + release_queue(adev); return 0; } -static int hqd_sdma_destroy_v10_3(struct kgd_dev *kgd, void *mqd, +static int hqd_sdma_destroy_v10_3(struct amdgpu_device *adev, void *mqd, unsigned int utimeout) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v10_sdma_mqd *m; uint32_t sdma_rlc_reg_offset; uint32_t temp; @@ -606,12 +583,12 @@ static int hqd_sdma_destroy_v10_3(struct kgd_dev *kgd, void *mqd, } -static int address_watch_disable_v10_3(struct kgd_dev *kgd) +static int address_watch_disable_v10_3(struct amdgpu_device *adev) { return 0; } -static int address_watch_execute_v10_3(struct kgd_dev *kgd, +static int address_watch_execute_v10_3(struct amdgpu_device *adev, unsigned int watch_point_id, uint32_t cntl_val, uint32_t addr_hi, @@ -620,11 +597,10 @@ static int address_watch_execute_v10_3(struct kgd_dev *kgd, return 0; } -static int wave_control_execute_v10_3(struct kgd_dev *kgd, +static int wave_control_execute_v10_3(struct amdgpu_device *adev, uint32_t gfx_index_val, uint32_t sq_cmd) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t data = 0; mutex_lock(&adev->grbm_idx_mutex); @@ -645,28 +621,24 @@ static int wave_control_execute_v10_3(struct kgd_dev *kgd, return 0; } -static uint32_t address_watch_get_offset_v10_3(struct kgd_dev *kgd, +static uint32_t address_watch_get_offset_v10_3(struct amdgpu_device *adev, unsigned int watch_point_id, unsigned int reg_offset) { return 0; } -static void set_vm_context_page_table_base_v10_3(struct kgd_dev *kgd, uint32_t vmid, - uint64_t page_table_base) +static void set_vm_context_page_table_base_v10_3(struct amdgpu_device *adev, + uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - /* SDMA is on gfxhub as well for Navi1* series */ adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base); } -static void program_trap_handler_settings_v10_3(struct kgd_dev *kgd, +static void program_trap_handler_settings_v10_3(struct amdgpu_device *adev, uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - - lock_srbm(kgd, 0, 0, 0, vmid); + lock_srbm(adev, 0, 0, 0, vmid); /* * Program TBA registers @@ -685,15 +657,14 @@ static void program_trap_handler_settings_v10_3(struct kgd_dev *kgd, WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_HI), upper_32_bits(tma_addr >> 8)); - unlock_srbm(kgd); + unlock_srbm(adev); } #if 0 -uint32_t enable_debug_trap_v10_3(struct kgd_dev *kgd, +uint32_t enable_debug_trap_v10_3(struct amdgpu_device *adev, uint32_t trap_debug_wave_launch_mode, uint32_t vmid) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t data = 0; uint32_t orig_wave_cntl_value; uint32_t orig_stall_vmid; @@ -720,10 +691,8 @@ uint32_t enable_debug_trap_v10_3(struct kgd_dev *kgd, return 0; } -uint32_t disable_debug_trap_v10_3(struct kgd_dev *kgd) +uint32_t disable_debug_trap_v10_3(struct amdgpu_device *adev) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - mutex_lock(&adev->grbm_idx_mutex); WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), 0); @@ -733,11 +702,10 @@ uint32_t disable_debug_trap_v10_3(struct kgd_dev *kgd) return 0; } -uint32_t set_wave_launch_trap_override_v10_3(struct kgd_dev *kgd, +uint32_t set_wave_launch_trap_override_v10_3(struct amdgpu_device *adev, uint32_t trap_override, uint32_t trap_mask) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t data = 0; mutex_lock(&adev->grbm_idx_mutex); @@ -762,11 +730,10 @@ uint32_t set_wave_launch_trap_override_v10_3(struct kgd_dev *kgd, return 0; } -uint32_t set_wave_launch_mode_v10_3(struct kgd_dev *kgd, +uint32_t set_wave_launch_mode_v10_3(struct amdgpu_device *adev, uint8_t wave_launch_mode, uint32_t vmid) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t data = 0; bool is_stall_mode; bool is_mode_set; @@ -805,16 +772,14 @@ uint32_t set_wave_launch_mode_v10_3(struct kgd_dev *kgd, * sem_rearm_wait_time -- Wait Count for Semaphore re-arm. * deq_retry_wait_time -- Wait Count for Global Wave Syncs. */ -void get_iq_wait_times_v10_3(struct kgd_dev *kgd, +void get_iq_wait_times_v10_3(struct amdgpu_device *adev, uint32_t *wait_times) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - *wait_times = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2)); } -void build_grace_period_packet_info_v10_3(struct kgd_dev *kgd, +void build_grace_period_packet_info_v10_3(struct amdgpu_device *adev, uint32_t wait_times, uint32_t grace_period, uint32_t *reg_offset, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c index b91d27e39bad..36528dad7684 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c @@ -82,68 +82,54 @@ union TCP_WATCH_CNTL_BITS { float f32All; }; -static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) -{ - return (struct amdgpu_device *)kgd; -} - -static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe, +static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe, uint32_t queue, uint32_t vmid) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue); mutex_lock(&adev->srbm_mutex); WREG32(mmSRBM_GFX_CNTL, value); } -static void unlock_srbm(struct kgd_dev *kgd) +static void unlock_srbm(struct amdgpu_device *adev) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - WREG32(mmSRBM_GFX_CNTL, 0); mutex_unlock(&adev->srbm_mutex); } -static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id, +static void acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); - lock_srbm(kgd, mec, pipe, queue_id, 0); + lock_srbm(adev, mec, pipe, queue_id, 0); } -static void release_queue(struct kgd_dev *kgd) +static void release_queue(struct amdgpu_device *adev) { - unlock_srbm(kgd); + unlock_srbm(adev); } -static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, +static void kgd_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid, uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - - lock_srbm(kgd, 0, 0, 0, vmid); + lock_srbm(adev, 0, 0, 0, vmid); WREG32(mmSH_MEM_CONFIG, sh_mem_config); WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base); WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit); WREG32(mmSH_MEM_BASES, sh_mem_bases); - unlock_srbm(kgd); + unlock_srbm(adev); } -static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid, +static int kgd_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid, unsigned int vmid) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - /* * We have to assume that there is no outstanding mapping. * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because @@ -165,21 +151,20 @@ static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid, return 0; } -static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id) +static int kgd_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t mec; uint32_t pipe; mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); - lock_srbm(kgd, mec, pipe, 0, 0); + lock_srbm(adev, mec, pipe, 0, 0); WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK | CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK); - unlock_srbm(kgd); + unlock_srbm(adev); return 0; } @@ -207,12 +192,11 @@ static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd) return (struct cik_sdma_rlc_registers *)mqd; } -static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, - uint32_t queue_id, uint32_t __user *wptr, - uint32_t wptr_shift, uint32_t wptr_mask, - struct mm_struct *mm) +static int kgd_hqd_load(struct amdgpu_device *adev, void *mqd, + uint32_t pipe_id, uint32_t queue_id, + uint32_t __user *wptr, uint32_t wptr_shift, + uint32_t wptr_mask, struct mm_struct *mm) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct cik_mqd *m; uint32_t *mqd_hqd; uint32_t reg, wptr_val, data; @@ -220,7 +204,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, m = get_mqd(mqd); - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); /* HQD registers extend from CP_MQD_BASE_ADDR to CP_MQD_CONTROL. */ mqd_hqd = &m->cp_mqd_base_addr_lo; @@ -239,25 +223,24 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, * release srbm_mutex to avoid circular dependency between * srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex. */ - release_queue(kgd); + release_queue(adev); valid_wptr = read_user_wptr(mm, wptr, wptr_val); - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); if (valid_wptr) WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask); data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1); WREG32(mmCP_HQD_ACTIVE, data); - release_queue(kgd); + release_queue(adev); return 0; } -static int kgd_hqd_dump(struct kgd_dev *kgd, +static int kgd_hqd_dump(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t i = 0, reg; #define HQD_N_REGS (35+4) #define DUMP_REG(addr) do { \ @@ -271,7 +254,7 @@ static int kgd_hqd_dump(struct kgd_dev *kgd, if (*dump == NULL) return -ENOMEM; - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE0); DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE1); @@ -281,7 +264,7 @@ static int kgd_hqd_dump(struct kgd_dev *kgd, for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_MQD_CONTROL; reg++) DUMP_REG(reg); - release_queue(kgd); + release_queue(adev); WARN_ON_ONCE(i != HQD_N_REGS); *n_regs = i; @@ -289,10 +272,9 @@ static int kgd_hqd_dump(struct kgd_dev *kgd, return 0; } -static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, +static int kgd_hqd_sdma_load(struct amdgpu_device *adev, void *mqd, uint32_t __user *wptr, struct mm_struct *mm) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct cik_sdma_rlc_registers *m; unsigned long end_jiffies; uint32_t sdma_rlc_reg_offset; @@ -345,11 +327,10 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, return 0; } -static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, +static int kgd_hqd_sdma_dump(struct amdgpu_device *adev, uint32_t engine_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t sdma_offset = engine_id * SDMA1_REGISTER_OFFSET + queue_id * KFD_CIK_SDMA_QUEUE_OFFSET; uint32_t i = 0, reg; @@ -372,15 +353,15 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, return 0; } -static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, - uint32_t pipe_id, uint32_t queue_id) +static bool kgd_hqd_is_occupied(struct amdgpu_device *adev, + uint64_t queue_address, uint32_t pipe_id, + uint32_t queue_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t act; bool retval = false; uint32_t low, high; - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); act = RREG32(mmCP_HQD_ACTIVE); if (act) { low = lower_32_bits(queue_address >> 8); @@ -390,13 +371,12 @@ static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, high == RREG32(mmCP_HQD_PQ_BASE_HI)) retval = true; } - release_queue(kgd); + release_queue(adev); return retval; } -static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) +static bool kgd_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct cik_sdma_rlc_registers *m; uint32_t sdma_rlc_reg_offset; uint32_t sdma_rlc_rb_cntl; @@ -412,12 +392,11 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) return false; } -static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, +static int kgd_hqd_destroy(struct amdgpu_device *adev, void *mqd, enum kfd_preempt_type reset_type, unsigned int utimeout, uint32_t pipe_id, uint32_t queue_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t temp; enum hqd_dequeue_request_type type; unsigned long flags, end_jiffies; @@ -426,7 +405,7 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, if (amdgpu_in_reset(adev)) return -EIO; - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, 0); switch (reset_type) { @@ -504,20 +483,19 @@ loop: break; if (time_after(jiffies, end_jiffies)) { pr_err("cp queue preemption time out\n"); - release_queue(kgd); + release_queue(adev); return -ETIME; } usleep_range(500, 1000); } - release_queue(kgd); + release_queue(adev); return 0; } -static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, +static int kgd_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd, unsigned int utimeout) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct cik_sdma_rlc_registers *m; uint32_t sdma_rlc_reg_offset; uint32_t temp; @@ -551,9 +529,8 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, return 0; } -static int kgd_address_watch_disable(struct kgd_dev *kgd) +static int kgd_address_watch_disable(struct amdgpu_device *adev) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); union TCP_WATCH_CNTL_BITS cntl; unsigned int i; @@ -571,13 +548,12 @@ static int kgd_address_watch_disable(struct kgd_dev *kgd) return 0; } -static int kgd_address_watch_execute(struct kgd_dev *kgd, +static int kgd_address_watch_execute(struct amdgpu_device *adev, unsigned int watch_point_id, uint32_t cntl_val, uint32_t addr_hi, uint32_t addr_lo) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); union TCP_WATCH_CNTL_BITS cntl; cntl.u32All = cntl_val; @@ -602,11 +578,10 @@ static int kgd_address_watch_execute(struct kgd_dev *kgd, return 0; } -static int kgd_wave_control_execute(struct kgd_dev *kgd, +static int kgd_wave_control_execute(struct amdgpu_device *adev, uint32_t gfx_index_val, uint32_t sq_cmd) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t data; mutex_lock(&adev->grbm_idx_mutex); @@ -627,18 +602,17 @@ static int kgd_wave_control_execute(struct kgd_dev *kgd, return 0; } -static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd, +static uint32_t kgd_address_watch_get_offset(struct amdgpu_device *adev, unsigned int watch_point_id, unsigned int reg_offset) { return watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + reg_offset]; } -static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, +static bool get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev, uint8_t vmid, uint16_t *p_pasid) { uint32_t value; - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; value = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK; @@ -646,21 +620,17 @@ static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK); } -static void set_scratch_backing_va(struct kgd_dev *kgd, +static void set_scratch_backing_va(struct amdgpu_device *adev, uint64_t va, uint32_t vmid) { - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; - - lock_srbm(kgd, 0, 0, 0, vmid); + lock_srbm(adev, 0, 0, 0, vmid); WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID, va); - unlock_srbm(kgd); + unlock_srbm(adev); } -static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, - uint64_t page_table_base) +static void set_vm_context_page_table_base(struct amdgpu_device *adev, + uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) { pr_err("trying to set page table base for wrong VMID\n"); return; @@ -676,10 +646,8 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, * @vmid: vmid pointer * read vmid from register (CIK). */ -static uint32_t read_vmid_from_vmfault_reg(struct kgd_dev *kgd) +static uint32_t read_vmid_from_vmfault_reg(struct amdgpu_device *adev) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - uint32_t status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); return REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c index 5ce0ce704a21..52832cd69a93 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c @@ -39,68 +39,54 @@ enum hqd_dequeue_request_type { RESET_WAVES }; -static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) -{ - return (struct amdgpu_device *)kgd; -} - -static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe, +static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe, uint32_t queue, uint32_t vmid) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue); mutex_lock(&adev->srbm_mutex); WREG32(mmSRBM_GFX_CNTL, value); } -static void unlock_srbm(struct kgd_dev *kgd) +static void unlock_srbm(struct amdgpu_device *adev) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - WREG32(mmSRBM_GFX_CNTL, 0); mutex_unlock(&adev->srbm_mutex); } -static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id, +static void acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); - lock_srbm(kgd, mec, pipe, queue_id, 0); + lock_srbm(adev, mec, pipe, queue_id, 0); } -static void release_queue(struct kgd_dev *kgd) +static void release_queue(struct amdgpu_device *adev) { - unlock_srbm(kgd); + unlock_srbm(adev); } -static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, +static void kgd_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid, uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - - lock_srbm(kgd, 0, 0, 0, vmid); + lock_srbm(adev, 0, 0, 0, vmid); WREG32(mmSH_MEM_CONFIG, sh_mem_config); WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base); WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit); WREG32(mmSH_MEM_BASES, sh_mem_bases); - unlock_srbm(kgd); + unlock_srbm(adev); } -static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid, +static int kgd_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid, unsigned int vmid) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - /* * We have to assume that there is no outstanding mapping. * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because @@ -123,21 +109,20 @@ static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid, return 0; } -static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id) +static int kgd_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t mec; uint32_t pipe; mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); - lock_srbm(kgd, mec, pipe, 0, 0); + lock_srbm(adev, mec, pipe, 0, 0); WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK | CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK); - unlock_srbm(kgd); + unlock_srbm(adev); return 0; } @@ -165,12 +150,11 @@ static inline struct vi_sdma_mqd *get_sdma_mqd(void *mqd) return (struct vi_sdma_mqd *)mqd; } -static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, - uint32_t queue_id, uint32_t __user *wptr, - uint32_t wptr_shift, uint32_t wptr_mask, - struct mm_struct *mm) +static int kgd_hqd_load(struct amdgpu_device *adev, void *mqd, + uint32_t pipe_id, uint32_t queue_id, + uint32_t __user *wptr, uint32_t wptr_shift, + uint32_t wptr_mask, struct mm_struct *mm) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct vi_mqd *m; uint32_t *mqd_hqd; uint32_t reg, wptr_val, data; @@ -178,7 +162,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, m = get_mqd(mqd); - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); /* HIQ is set during driver init period with vmid set to 0*/ if (m->cp_hqd_vmid == 0) { @@ -206,7 +190,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, * on ASICs that do not support context-save. * EOP writes/reads can start anywhere in the ring. */ - if (get_amdgpu_device(kgd)->asic_type != CHIP_TONGA) { + if (adev->asic_type != CHIP_TONGA) { WREG32(mmCP_HQD_EOP_RPTR, m->cp_hqd_eop_rptr); WREG32(mmCP_HQD_EOP_WPTR, m->cp_hqd_eop_wptr); WREG32(mmCP_HQD_EOP_WPTR_MEM, m->cp_hqd_eop_wptr_mem); @@ -226,25 +210,24 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, * release srbm_mutex to avoid circular dependency between * srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex. */ - release_queue(kgd); + release_queue(adev); valid_wptr = read_user_wptr(mm, wptr, wptr_val); - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); if (valid_wptr) WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask); data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1); WREG32(mmCP_HQD_ACTIVE, data); - release_queue(kgd); + release_queue(adev); return 0; } -static int kgd_hqd_dump(struct kgd_dev *kgd, +static int kgd_hqd_dump(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t i = 0, reg; #define HQD_N_REGS (54+4) #define DUMP_REG(addr) do { \ @@ -258,7 +241,7 @@ static int kgd_hqd_dump(struct kgd_dev *kgd, if (*dump == NULL) return -ENOMEM; - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE0); DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE1); @@ -268,7 +251,7 @@ static int kgd_hqd_dump(struct kgd_dev *kgd, for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_HQD_EOP_DONES; reg++) DUMP_REG(reg); - release_queue(kgd); + release_queue(adev); WARN_ON_ONCE(i != HQD_N_REGS); *n_regs = i; @@ -276,10 +259,9 @@ static int kgd_hqd_dump(struct kgd_dev *kgd, return 0; } -static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, +static int kgd_hqd_sdma_load(struct amdgpu_device *adev, void *mqd, uint32_t __user *wptr, struct mm_struct *mm) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct vi_sdma_mqd *m; unsigned long end_jiffies; uint32_t sdma_rlc_reg_offset; @@ -331,11 +313,10 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, return 0; } -static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, +static int kgd_hqd_sdma_dump(struct amdgpu_device *adev, uint32_t engine_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t sdma_offset = engine_id * SDMA1_REGISTER_OFFSET + queue_id * KFD_VI_SDMA_QUEUE_OFFSET; uint32_t i = 0, reg; @@ -367,15 +348,15 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, return 0; } -static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, - uint32_t pipe_id, uint32_t queue_id) +static bool kgd_hqd_is_occupied(struct amdgpu_device *adev, + uint64_t queue_address, uint32_t pipe_id, + uint32_t queue_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t act; bool retval = false; uint32_t low, high; - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); act = RREG32(mmCP_HQD_ACTIVE); if (act) { low = lower_32_bits(queue_address >> 8); @@ -385,13 +366,12 @@ static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, high == RREG32(mmCP_HQD_PQ_BASE_HI)) retval = true; } - release_queue(kgd); + release_queue(adev); return retval; } -static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) +static bool kgd_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct vi_sdma_mqd *m; uint32_t sdma_rlc_reg_offset; uint32_t sdma_rlc_rb_cntl; @@ -407,12 +387,11 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) return false; } -static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, +static int kgd_hqd_destroy(struct amdgpu_device *adev, void *mqd, enum kfd_preempt_type reset_type, unsigned int utimeout, uint32_t pipe_id, uint32_t queue_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t temp; enum hqd_dequeue_request_type type; unsigned long flags, end_jiffies; @@ -422,7 +401,7 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, if (amdgpu_in_reset(adev)) return -EIO; - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); if (m->cp_hqd_vmid == 0) WREG32_FIELD(RLC_CP_SCHEDULERS, scheduler1, 0); @@ -502,20 +481,19 @@ loop: break; if (time_after(jiffies, end_jiffies)) { pr_err("cp queue preemption time out.\n"); - release_queue(kgd); + release_queue(adev); return -ETIME; } usleep_range(500, 1000); } - release_queue(kgd); + release_queue(adev); return 0; } -static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, +static int kgd_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd, unsigned int utimeout) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct vi_sdma_mqd *m; uint32_t sdma_rlc_reg_offset; uint32_t temp; @@ -549,11 +527,10 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, return 0; } -static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, +static bool get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev, uint8_t vmid, uint16_t *p_pasid) { uint32_t value; - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; value = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK; @@ -561,12 +538,12 @@ static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK); } -static int kgd_address_watch_disable(struct kgd_dev *kgd) +static int kgd_address_watch_disable(struct amdgpu_device *adev) { return 0; } -static int kgd_address_watch_execute(struct kgd_dev *kgd, +static int kgd_address_watch_execute(struct amdgpu_device *adev, unsigned int watch_point_id, uint32_t cntl_val, uint32_t addr_hi, @@ -575,11 +552,10 @@ static int kgd_address_watch_execute(struct kgd_dev *kgd, return 0; } -static int kgd_wave_control_execute(struct kgd_dev *kgd, +static int kgd_wave_control_execute(struct amdgpu_device *adev, uint32_t gfx_index_val, uint32_t sq_cmd) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t data = 0; mutex_lock(&adev->grbm_idx_mutex); @@ -600,28 +576,24 @@ static int kgd_wave_control_execute(struct kgd_dev *kgd, return 0; } -static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd, +static uint32_t kgd_address_watch_get_offset(struct amdgpu_device *adev, unsigned int watch_point_id, unsigned int reg_offset) { return 0; } -static void set_scratch_backing_va(struct kgd_dev *kgd, +static void set_scratch_backing_va(struct amdgpu_device *adev, uint64_t va, uint32_t vmid) { - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; - - lock_srbm(kgd, 0, 0, 0, vmid); + lock_srbm(adev, 0, 0, 0, vmid); WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID, va); - unlock_srbm(kgd); + unlock_srbm(adev); } -static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, - uint64_t page_table_base) +static void set_vm_context_page_table_base(struct amdgpu_device *adev, + uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) { pr_err("trying to set page table base for wrong VMID\n"); return; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index bcc1cbeb8799..ddfe7aff919d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -46,37 +46,26 @@ enum hqd_dequeue_request_type { SAVE_WAVES }; -static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) -{ - return (struct amdgpu_device *)kgd; -} - -static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe, +static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe, uint32_t queue, uint32_t vmid) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - mutex_lock(&adev->srbm_mutex); soc15_grbm_select(adev, mec, pipe, queue, vmid); } -static void unlock_srbm(struct kgd_dev *kgd) +static void unlock_srbm(struct amdgpu_device *adev) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - soc15_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); } -static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id, +static void acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); - lock_srbm(kgd, mec, pipe, queue_id, 0); + lock_srbm(adev, mec, pipe, queue_id, 0); } static uint64_t get_queue_mask(struct amdgpu_device *adev, @@ -88,33 +77,29 @@ static uint64_t get_queue_mask(struct amdgpu_device *adev, return 1ull << bit; } -static void release_queue(struct kgd_dev *kgd) +static void release_queue(struct amdgpu_device *adev) { - unlock_srbm(kgd); + unlock_srbm(adev); } -void kgd_gfx_v9_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, +void kgd_gfx_v9_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid, uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - - lock_srbm(kgd, 0, 0, 0, vmid); + lock_srbm(adev, 0, 0, 0, vmid); WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), sh_mem_config); WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_BASES), sh_mem_bases); /* APE1 no longer exists on GFX9 */ - unlock_srbm(kgd); + unlock_srbm(adev); } -int kgd_gfx_v9_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid, +int kgd_gfx_v9_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid, unsigned int vmid) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - /* * We have to assume that there is no outstanding mapping. * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because @@ -171,22 +156,21 @@ int kgd_gfx_v9_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid, * but still works */ -int kgd_gfx_v9_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id) +int kgd_gfx_v9_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t mec; uint32_t pipe; mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); - lock_srbm(kgd, mec, pipe, 0, 0); + lock_srbm(adev, mec, pipe, 0, 0); WREG32(SOC15_REG_OFFSET(GC, 0, mmCPC_INT_CNTL), CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK | CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK); - unlock_srbm(kgd); + unlock_srbm(adev); return 0; } @@ -233,19 +217,18 @@ static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd) return (struct v9_sdma_mqd *)mqd; } -int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, - uint32_t queue_id, uint32_t __user *wptr, - uint32_t wptr_shift, uint32_t wptr_mask, - struct mm_struct *mm) +int kgd_gfx_v9_hqd_load(struct amdgpu_device *adev, void *mqd, + uint32_t pipe_id, uint32_t queue_id, + uint32_t __user *wptr, uint32_t wptr_shift, + uint32_t wptr_mask, struct mm_struct *mm) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v9_mqd *m; uint32_t *mqd_hqd; uint32_t reg, hqd_base, data; m = get_mqd(mqd); - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */ mqd_hqd = &m->cp_mqd_base_addr_lo; @@ -308,16 +291,15 @@ int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1); WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE), data); - release_queue(kgd); + release_queue(adev); return 0; } -int kgd_gfx_v9_hiq_mqd_load(struct kgd_dev *kgd, void *mqd, +int kgd_gfx_v9_hiq_mqd_load(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t doorbell_off) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; struct v9_mqd *m; uint32_t mec, pipe; @@ -325,7 +307,7 @@ int kgd_gfx_v9_hiq_mqd_load(struct kgd_dev *kgd, void *mqd, m = get_mqd(mqd); - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); @@ -361,16 +343,15 @@ int kgd_gfx_v9_hiq_mqd_load(struct kgd_dev *kgd, void *mqd, out_unlock: spin_unlock(&adev->gfx.kiq.ring_lock); - release_queue(kgd); + release_queue(adev); return r; } -int kgd_gfx_v9_hqd_dump(struct kgd_dev *kgd, +int kgd_gfx_v9_hqd_dump(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t i = 0, reg; #define HQD_N_REGS 56 #define DUMP_REG(addr) do { \ @@ -384,13 +365,13 @@ int kgd_gfx_v9_hqd_dump(struct kgd_dev *kgd, if (*dump == NULL) return -ENOMEM; - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); for (reg = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR); reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++) DUMP_REG(reg); - release_queue(kgd); + release_queue(adev); WARN_ON_ONCE(i != HQD_N_REGS); *n_regs = i; @@ -398,10 +379,9 @@ int kgd_gfx_v9_hqd_dump(struct kgd_dev *kgd, return 0; } -static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, +static int kgd_hqd_sdma_load(struct amdgpu_device *adev, void *mqd, uint32_t __user *wptr, struct mm_struct *mm) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v9_sdma_mqd *m; uint32_t sdma_rlc_reg_offset; unsigned long end_jiffies; @@ -468,11 +448,10 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, return 0; } -static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, +static int kgd_hqd_sdma_dump(struct amdgpu_device *adev, uint32_t engine_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, engine_id, queue_id); uint32_t i = 0, reg; @@ -500,15 +479,15 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, return 0; } -bool kgd_gfx_v9_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, - uint32_t pipe_id, uint32_t queue_id) +bool kgd_gfx_v9_hqd_is_occupied(struct amdgpu_device *adev, + uint64_t queue_address, uint32_t pipe_id, + uint32_t queue_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t act; bool retval = false; uint32_t low, high; - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); act = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE)); if (act) { low = lower_32_bits(queue_address >> 8); @@ -518,13 +497,12 @@ bool kgd_gfx_v9_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, high == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE_HI))) retval = true; } - release_queue(kgd); + release_queue(adev); return retval; } -static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) +static bool kgd_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v9_sdma_mqd *m; uint32_t sdma_rlc_reg_offset; uint32_t sdma_rlc_rb_cntl; @@ -541,12 +519,11 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) return false; } -int kgd_gfx_v9_hqd_destroy(struct kgd_dev *kgd, void *mqd, +int kgd_gfx_v9_hqd_destroy(struct amdgpu_device *adev, void *mqd, enum kfd_preempt_type reset_type, unsigned int utimeout, uint32_t pipe_id, uint32_t queue_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); enum hqd_dequeue_request_type type; unsigned long end_jiffies; uint32_t temp; @@ -555,7 +532,7 @@ int kgd_gfx_v9_hqd_destroy(struct kgd_dev *kgd, void *mqd, if (amdgpu_in_reset(adev)) return -EIO; - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); if (m->cp_hqd_vmid == 0) WREG32_FIELD15_RLC(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0); @@ -584,20 +561,19 @@ int kgd_gfx_v9_hqd_destroy(struct kgd_dev *kgd, void *mqd, break; if (time_after(jiffies, end_jiffies)) { pr_err("cp queue preemption time out.\n"); - release_queue(kgd); + release_queue(adev); return -ETIME; } usleep_range(500, 1000); } - release_queue(kgd); + release_queue(adev); return 0; } -static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, +static int kgd_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd, unsigned int utimeout) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v9_sdma_mqd *m; uint32_t sdma_rlc_reg_offset; uint32_t temp; @@ -634,11 +610,10 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, return 0; } -bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, +bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev, uint8_t vmid, uint16_t *p_pasid) { uint32_t value; - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid); @@ -647,12 +622,12 @@ bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK); } -int kgd_gfx_v9_address_watch_disable(struct kgd_dev *kgd) +int kgd_gfx_v9_address_watch_disable(struct amdgpu_device *adev) { return 0; } -int kgd_gfx_v9_address_watch_execute(struct kgd_dev *kgd, +int kgd_gfx_v9_address_watch_execute(struct amdgpu_device *adev, unsigned int watch_point_id, uint32_t cntl_val, uint32_t addr_hi, @@ -661,11 +636,10 @@ int kgd_gfx_v9_address_watch_execute(struct kgd_dev *kgd, return 0; } -int kgd_gfx_v9_wave_control_execute(struct kgd_dev *kgd, +int kgd_gfx_v9_wave_control_execute(struct amdgpu_device *adev, uint32_t gfx_index_val, uint32_t sq_cmd) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t data = 0; mutex_lock(&adev->grbm_idx_mutex); @@ -686,18 +660,16 @@ int kgd_gfx_v9_wave_control_execute(struct kgd_dev *kgd, return 0; } -uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd, +uint32_t kgd_gfx_v9_address_watch_get_offset(struct amdgpu_device *adev, unsigned int watch_point_id, unsigned int reg_offset) { return 0; } -void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, +void kgd_gfx_v9_set_vm_context_page_table_base(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) { pr_err("trying to set page table base for wrong VMID %u\n", vmid); @@ -804,7 +776,7 @@ static void get_wave_count(struct amdgpu_device *adev, int queue_idx, * * Reading registers referenced above involves programming GRBM appropriately */ -void kgd_gfx_v9_get_cu_occupancy(struct kgd_dev *kgd, int pasid, +void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid, int *pasid_wave_cnt, int *max_waves_per_cu) { int qidx; @@ -818,10 +790,8 @@ void kgd_gfx_v9_get_cu_occupancy(struct kgd_dev *kgd, int pasid, int pasid_tmp; int max_queue_cnt; int vmid_wave_cnt = 0; - struct amdgpu_device *adev; DECLARE_BITMAP(cp_queue_bitmap, KGD_MAX_QUEUES); - adev = get_amdgpu_device(kgd); lock_spi_csq_mutexes(adev); soc15_grbm_select(adev, 1, 0, 0, 0); @@ -882,12 +852,10 @@ void kgd_gfx_v9_get_cu_occupancy(struct kgd_dev *kgd, int pasid, adev->gfx.cu_info.max_waves_per_simd; } -void kgd_gfx_v9_program_trap_handler_settings(struct kgd_dev *kgd, +void kgd_gfx_v9_program_trap_handler_settings(struct amdgpu_device *adev, uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - - lock_srbm(kgd, 0, 0, 0, vmid); + lock_srbm(adev, 0, 0, 0, vmid); /* * Program TBA registers @@ -905,7 +873,7 @@ void kgd_gfx_v9_program_trap_handler_settings(struct kgd_dev *kgd, WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_HI), upper_32_bits(tma_addr >> 8)); - unlock_srbm(kgd); + unlock_srbm(adev); } const struct kfd2kgd_calls gfx_v9_kfd2kgd = { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h index c63591106879..24be49df26fd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h @@ -22,48 +22,49 @@ -void kgd_gfx_v9_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, +void kgd_gfx_v9_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid, uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases); -int kgd_gfx_v9_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid, +int kgd_gfx_v9_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid, unsigned int vmid); -int kgd_gfx_v9_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id); -int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, +int kgd_gfx_v9_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id); +int kgd_gfx_v9_hqd_load(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t __user *wptr, uint32_t wptr_shift, uint32_t wptr_mask, struct mm_struct *mm); -int kgd_gfx_v9_hiq_mqd_load(struct kgd_dev *kgd, void *mqd, +int kgd_gfx_v9_hiq_mqd_load(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t doorbell_off); -int kgd_gfx_v9_hqd_dump(struct kgd_dev *kgd, +int kgd_gfx_v9_hqd_dump(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs); -bool kgd_gfx_v9_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, - uint32_t pipe_id, uint32_t queue_id); -int kgd_gfx_v9_hqd_destroy(struct kgd_dev *kgd, void *mqd, +bool kgd_gfx_v9_hqd_is_occupied(struct amdgpu_device *adev, + uint64_t queue_address, uint32_t pipe_id, + uint32_t queue_id); +int kgd_gfx_v9_hqd_destroy(struct amdgpu_device *adev, void *mqd, enum kfd_preempt_type reset_type, unsigned int utimeout, uint32_t pipe_id, uint32_t queue_id); -int kgd_gfx_v9_address_watch_disable(struct kgd_dev *kgd); -int kgd_gfx_v9_address_watch_execute(struct kgd_dev *kgd, +int kgd_gfx_v9_address_watch_disable(struct amdgpu_device *adev); +int kgd_gfx_v9_address_watch_execute(struct amdgpu_device *adev, unsigned int watch_point_id, uint32_t cntl_val, uint32_t addr_hi, uint32_t addr_lo); -int kgd_gfx_v9_wave_control_execute(struct kgd_dev *kgd, +int kgd_gfx_v9_wave_control_execute(struct amdgpu_device *adev, uint32_t gfx_index_val, uint32_t sq_cmd); -uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd, +uint32_t kgd_gfx_v9_address_watch_get_offset(struct amdgpu_device *adev, unsigned int watch_point_id, unsigned int reg_offset); -bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, +bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev, uint8_t vmid, uint16_t *p_pasid); -void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, +void kgd_gfx_v9_set_vm_context_page_table_base(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base); -void kgd_gfx_v9_get_cu_occupancy(struct kgd_dev *kgd, int pasid, +void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid, int *pasid_wave_cnt, int *max_waves_per_cu); -void kgd_gfx_v9_program_trap_handler_settings(struct kgd_dev *kgd, +void kgd_gfx_v9_program_trap_handler_settings(struct amdgpu_device *adev, uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 71acd577803e..f9bab963a948 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -60,12 +60,6 @@ static const char * const domain_bit_to_string[] = { static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work); - -static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) -{ - return (struct amdgpu_device *)kgd; -} - static bool kfd_mem_is_attached(struct amdgpu_vm *avm, struct kgd_mem *mem) { @@ -126,8 +120,19 @@ static size_t amdgpu_amdkfd_acc_size(uint64_t size) PAGE_ALIGN(size); } +/** + * @amdgpu_amdkfd_reserve_mem_limit() - Decrease available memory by size + * of buffer including any reserved for control structures + * + * @adev: Device to which allocated BO belongs to + * @size: Size of buffer, in bytes, encapsulated by B0. This should be + * equivalent to amdgpu_bo_size(BO) + * @alloc_flag: Flag used in allocating a BO as noted above + * + * Return: returns -ENOMEM in case of error, ZERO otherwise + */ static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, - uint64_t size, u32 domain, bool sg) + uint64_t size, u32 alloc_flag) { uint64_t reserved_for_pt = ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size); @@ -137,20 +142,24 @@ static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, acc_size = amdgpu_amdkfd_acc_size(size); vram_needed = 0; - if (domain == AMDGPU_GEM_DOMAIN_GTT) { - /* TTM GTT memory */ + if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_GTT) { system_mem_needed = acc_size + size; ttm_mem_needed = acc_size + size; - } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) { - /* Userptr */ + } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { + system_mem_needed = acc_size; + ttm_mem_needed = acc_size; + vram_needed = size; + } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) { system_mem_needed = acc_size + size; ttm_mem_needed = acc_size; - } else { - /* VRAM and SG */ + } else if (alloc_flag & + (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL | + KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) { system_mem_needed = acc_size; ttm_mem_needed = acc_size; - if (domain == AMDGPU_GEM_DOMAIN_VRAM) - vram_needed = size; + } else { + pr_err("%s: Invalid BO type %#x\n", __func__, alloc_flag); + return -ENOMEM; } spin_lock(&kfd_mem_limit.mem_limit_lock); @@ -166,64 +175,72 @@ static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, (adev->kfd.vram_used + vram_needed > adev->gmc.real_vram_size - reserved_for_pt)) { ret = -ENOMEM; - } else { - kfd_mem_limit.system_mem_used += system_mem_needed; - kfd_mem_limit.ttm_mem_used += ttm_mem_needed; - adev->kfd.vram_used += vram_needed; + goto release; } + /* Update memory accounting by decreasing available system + * memory, TTM memory and GPU memory as computed above + */ + adev->kfd.vram_used += vram_needed; + kfd_mem_limit.system_mem_used += system_mem_needed; + kfd_mem_limit.ttm_mem_used += ttm_mem_needed; + +release: spin_unlock(&kfd_mem_limit.mem_limit_lock); return ret; } static void unreserve_mem_limit(struct amdgpu_device *adev, - uint64_t size, u32 domain, bool sg) + uint64_t size, u32 alloc_flag) { size_t acc_size; acc_size = amdgpu_amdkfd_acc_size(size); spin_lock(&kfd_mem_limit.mem_limit_lock); - if (domain == AMDGPU_GEM_DOMAIN_GTT) { + + if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_GTT) { kfd_mem_limit.system_mem_used -= (acc_size + size); kfd_mem_limit.ttm_mem_used -= (acc_size + size); - } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) { + } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { + kfd_mem_limit.system_mem_used -= acc_size; + kfd_mem_limit.ttm_mem_used -= acc_size; + adev->kfd.vram_used -= size; + } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) { kfd_mem_limit.system_mem_used -= (acc_size + size); kfd_mem_limit.ttm_mem_used -= acc_size; - } else { + } else if (alloc_flag & + (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL | + KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) { kfd_mem_limit.system_mem_used -= acc_size; kfd_mem_limit.ttm_mem_used -= acc_size; - if (domain == AMDGPU_GEM_DOMAIN_VRAM) { - adev->kfd.vram_used -= size; - WARN_ONCE(adev->kfd.vram_used < 0, - "kfd VRAM memory accounting unbalanced"); - } + } else { + pr_err("%s: Invalid BO type %#x\n", __func__, alloc_flag); + goto release; } - WARN_ONCE(kfd_mem_limit.system_mem_used < 0, - "kfd system memory accounting unbalanced"); + + WARN_ONCE(adev->kfd.vram_used < 0, + "KFD VRAM memory accounting unbalanced"); WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0, - "kfd TTM memory accounting unbalanced"); + "KFD TTM memory accounting unbalanced"); + WARN_ONCE(kfd_mem_limit.system_mem_used < 0, + "KFD system memory accounting unbalanced"); +release: spin_unlock(&kfd_mem_limit.mem_limit_lock); } void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - u32 domain = bo->preferred_domains; - bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU); - - if (bo->flags & AMDGPU_AMDKFD_CREATE_USERPTR_BO) { - domain = AMDGPU_GEM_DOMAIN_CPU; - sg = false; - } + u32 alloc_flags = bo->kfd_bo->alloc_flags; + u64 size = amdgpu_bo_size(bo); - unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg); + unreserve_mem_limit(adev, size, alloc_flags); kfree(bo->kfd_bo); } - /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's * reservation object. * @@ -646,12 +663,6 @@ kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem, if (IS_ERR(gobj)) return PTR_ERR(gobj); - /* Import takes an extra reference on the dmabuf. Drop it now to - * avoid leaking it. We only need the one reference in - * kgd_mem->dmabuf. - */ - dma_buf_put(mem->dmabuf); - *bo = gem_to_amdgpu_bo(gobj); (*bo)->flags |= AMDGPU_GEM_CREATE_PREEMPTIBLE; (*bo)->parent = amdgpu_bo_ref(mem->bo); @@ -697,10 +708,12 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem, pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va, va + bo_size, vm); - if (adev == bo_adev || (mem->domain == AMDGPU_GEM_DOMAIN_VRAM && - amdgpu_xgmi_same_hive(adev, bo_adev))) { - /* Mappings on the local GPU and VRAM mappings in the - * local hive share the original BO + if (adev == bo_adev || + (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && adev->ram_is_direct_mapped) || + (mem->domain == AMDGPU_GEM_DOMAIN_VRAM && amdgpu_xgmi_same_hive(adev, bo_adev))) { + /* Mappings on the local GPU, or VRAM mappings in the + * local hive, or userptr mapping IOMMU direct map mode + * share the original BO */ attachment[i]->type = KFD_MEM_ATT_SHARED; bo[i] = mem->bo; @@ -1278,12 +1291,60 @@ create_evict_fence_fail: return ret; } -int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd, +/** + * amdgpu_amdkfd_gpuvm_pin_bo() - Pins a BO using following criteria + * @bo: Handle of buffer object being pinned + * @domain: Domain into which BO should be pinned + * + * - USERPTR BOs are UNPINNABLE and will return error + * - All other BO types (GTT, VRAM, MMIO and DOORBELL) will have their + * PIN count incremented. It is valid to PIN a BO multiple times + * + * Return: ZERO if successful in pinning, Non-Zero in case of error. + */ +static int amdgpu_amdkfd_gpuvm_pin_bo(struct amdgpu_bo *bo, u32 domain) +{ + int ret = 0; + + ret = amdgpu_bo_reserve(bo, false); + if (unlikely(ret)) + return ret; + + ret = amdgpu_bo_pin_restricted(bo, domain, 0, 0); + if (ret) + pr_err("Error in Pinning BO to domain: %d\n", domain); + + amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false); + amdgpu_bo_unreserve(bo); + + return ret; +} + +/** + * amdgpu_amdkfd_gpuvm_unpin_bo() - Unpins BO using following criteria + * @bo: Handle of buffer object being unpinned + * + * - Is a illegal request for USERPTR BOs and is ignored + * - All other BO types (GTT, VRAM, MMIO and DOORBELL) will have their + * PIN count decremented. Calls to UNPIN must balance calls to PIN + */ +static void amdgpu_amdkfd_gpuvm_unpin_bo(struct amdgpu_bo *bo) +{ + int ret = 0; + + ret = amdgpu_bo_reserve(bo, false); + if (unlikely(ret)) + return; + + amdgpu_bo_unpin(bo); + amdgpu_bo_unreserve(bo); +} + +int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev, struct file *filp, u32 pasid, void **process_info, struct dma_fence **ef) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct amdgpu_fpriv *drv_priv; struct amdgpu_vm *avm; int ret; @@ -1359,12 +1420,12 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, } } -void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *drm_priv) +void amdgpu_amdkfd_gpuvm_release_process_vm(struct amdgpu_device *adev, + void *drm_priv) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct amdgpu_vm *avm; - if (WARN_ON(!kgd || !drm_priv)) + if (WARN_ON(!adev || !drm_priv)) return; avm = drm_priv_to_vm(drm_priv); @@ -1392,17 +1453,16 @@ uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv) } int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( - struct kgd_dev *kgd, uint64_t va, uint64_t size, + struct amdgpu_device *adev, uint64_t va, uint64_t size, void *drm_priv, struct kgd_mem **mem, uint64_t *offset, uint32_t flags) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); enum ttm_bo_type bo_type = ttm_bo_type_device; struct sg_table *sg = NULL; uint64_t user_addr = 0; struct amdgpu_bo *bo; - struct drm_gem_object *gobj; + struct drm_gem_object *gobj = NULL; u32 domain, alloc_domain; u64 alloc_flags; int ret; @@ -1460,7 +1520,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( amdgpu_sync_create(&(*mem)->sync); - ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg); + ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, flags); if (ret) { pr_debug("Insufficient memory\n"); goto err_reserve_limit; @@ -1501,6 +1561,15 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( ret = init_user_pages(*mem, user_addr); if (ret) goto allocate_init_user_pages_failed; + } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL | + KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) { + ret = amdgpu_amdkfd_gpuvm_pin_bo(bo, AMDGPU_GEM_DOMAIN_GTT); + if (ret) { + pr_err("Pinning MMIO/DOORBELL BO during ALLOC FAILED\n"); + goto err_pin_bo; + } + bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; + bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT; } if (offset) @@ -1509,17 +1578,20 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( return 0; allocate_init_user_pages_failed: +err_pin_bo: remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info); drm_vma_node_revoke(&gobj->vma_node, drm_priv); err_node_allow: - drm_gem_object_put(gobj); /* Don't unreserve system mem limit twice */ goto err_reserve_limit; err_bo_create: - unreserve_mem_limit(adev, size, alloc_domain, !!sg); + unreserve_mem_limit(adev, size, flags); err_reserve_limit: mutex_destroy(&(*mem)->lock); - kfree(*mem); + if (gobj) + drm_gem_object_put(gobj); + else + kfree(*mem); err: if (sg) { sg_free_table(sg); @@ -1529,7 +1601,7 @@ err: } int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( - struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv, + struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv, uint64_t *size) { struct amdkfd_process_info *process_info = mem->process_info; @@ -1542,6 +1614,14 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( bool is_imported = false; mutex_lock(&mem->lock); + + /* Unpin MMIO/DOORBELL BO's that were pinnned during allocation */ + if (mem->alloc_flags & + (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL | + KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) { + amdgpu_amdkfd_gpuvm_unpin_bo(mem->bo); + } + mapped_to_gpu_memory = mem->mapped_to_gpu_memory; is_imported = mem->is_imported; mutex_unlock(&mem->lock); @@ -1621,10 +1701,9 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( } int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( - struct kgd_dev *kgd, struct kgd_mem *mem, + struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv, bool *table_freed) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); int ret; struct amdgpu_bo *bo; @@ -1751,7 +1830,7 @@ out: } int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( - struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv) + struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv) { struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); struct amdkfd_process_info *process_info = avm->process_info; @@ -1812,7 +1891,7 @@ out: } int amdgpu_amdkfd_gpuvm_sync_memory( - struct kgd_dev *kgd, struct kgd_mem *mem, bool intr) + struct amdgpu_device *adev, struct kgd_mem *mem, bool intr) { struct amdgpu_sync sync; int ret; @@ -1828,7 +1907,7 @@ int amdgpu_amdkfd_gpuvm_sync_memory( return ret; } -int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd, +int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct amdgpu_device *adev, struct kgd_mem *mem, void **kptr, uint64_t *size) { int ret; @@ -1884,7 +1963,8 @@ bo_reserve_failed: return ret; } -void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_dev *kgd, struct kgd_mem *mem) +void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct amdgpu_device *adev, + struct kgd_mem *mem) { struct amdgpu_bo *bo = mem->bo; @@ -1894,12 +1974,9 @@ void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_dev *kgd, struct kg amdgpu_bo_unreserve(bo); } -int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd, - struct kfd_vm_fault_info *mem) +int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev, + struct kfd_vm_fault_info *mem) { - struct amdgpu_device *adev; - - adev = (struct amdgpu_device *)kgd; if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) { *mem = *adev->gmc.vm_fault_info; mb(); @@ -1908,13 +1985,12 @@ int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd, return 0; } -int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd, +int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev, struct dma_buf *dma_buf, uint64_t va, void *drm_priv, struct kgd_mem **mem, uint64_t *size, uint64_t *mmap_offset) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); struct drm_gem_object *obj; struct amdgpu_bo *bo; @@ -2541,11 +2617,9 @@ int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem) } /* Returns GPU-specific tiling mode information */ -int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd, +int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev, struct tile_config *config) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - config->gb_addr_config = adev->gfx.config.gb_addr_config; config->tile_config_ptr = adev->gfx.config.tile_mode_array; config->num_tile_configs = diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 96b7bb13a2dd..12a6b1c99c93 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -1569,6 +1569,18 @@ void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev, WREG32(adev->bios_scratch_reg_offset + 3, tmp); } +void amdgpu_atombios_scratch_regs_set_backlight_level(struct amdgpu_device *adev, + u32 backlight_level) +{ + u32 tmp = RREG32(adev->bios_scratch_reg_offset + 2); + + tmp &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK; + tmp |= (backlight_level << ATOM_S2_CURRENT_BL_LEVEL_SHIFT) & + ATOM_S2_CURRENT_BL_LEVEL_MASK; + + WREG32(adev->bios_scratch_reg_offset + 2, tmp); +} + bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev) { u32 tmp = RREG32(adev->bios_scratch_reg_offset + 7); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h index 8cc0222dba19..27e74b1fc260 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h @@ -185,6 +185,8 @@ bool amdgpu_atombios_has_gpu_virtualization_table(struct amdgpu_device *adev); void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock); void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev, bool hung); +void amdgpu_atombios_scratch_regs_set_backlight_level(struct amdgpu_device *adev, + u32 backlight_level); bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev); void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c index 97178b307ed6..4d4ddf026faf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c @@ -470,8 +470,8 @@ bool amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device *ade /** * amdgpu_atomfirmware_ras_rom_addr -- Get the RAS EEPROM addr from VBIOS - * adev: amdgpu_device pointer - * i2c_address: pointer to u8; if not NULL, will contain + * @adev: amdgpu_device pointer + * @i2c_address: pointer to u8; if not NULL, will contain * the RAS EEPROM address if the function returns true * * Return true if VBIOS supports RAS EEPROM address reporting, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index 7abe9500c0c6..d6d986be906a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c @@ -11,6 +11,7 @@ #include <linux/pci.h> #include <linux/delay.h> +#include "amdgpu.h" #include "amd_acpi.h" #define AMDGPU_PX_QUIRK_FORCE_ATPX (1 << 0) @@ -165,7 +166,7 @@ static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mas } /** - * amdgpu_atpx_validate_functions - validate ATPX functions + * amdgpu_atpx_validate - validate ATPX functions * * @atpx: amdgpu atpx struct * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index b9c11c2b2885..c16a2704ced6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -108,7 +108,7 @@ int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector) case DRM_MODE_CONNECTOR_DVII: case DRM_MODE_CONNECTOR_HDMIB: if (amdgpu_connector->use_digital) { - if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) { + if (connector->display_info.is_hdmi) { if (connector->display_info.bpc) bpc = connector->display_info.bpc; } @@ -116,7 +116,7 @@ int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector) break; case DRM_MODE_CONNECTOR_DVID: case DRM_MODE_CONNECTOR_HDMIA: - if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) { + if (connector->display_info.is_hdmi) { if (connector->display_info.bpc) bpc = connector->display_info.bpc; } @@ -125,7 +125,7 @@ int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector) dig_connector = amdgpu_connector->con_priv; if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) || - drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) { + connector->display_info.is_hdmi) { if (connector->display_info.bpc) bpc = connector->display_info.bpc; } @@ -149,7 +149,7 @@ int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector) break; } - if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) { + if (connector->display_info.is_hdmi) { /* * Pre DCE-8 hw can't handle > 12 bpc, and more than 12 bpc doesn't make * much sense without support for > 12 bpc framebuffers. RGB 4:4:4 at @@ -315,8 +315,10 @@ static void amdgpu_connector_get_edid(struct drm_connector *connector) if (!amdgpu_connector->edid) { /* some laptops provide a hardcoded edid in rom for LCDs */ if (((connector->connector_type == DRM_MODE_CONNECTOR_LVDS) || - (connector->connector_type == DRM_MODE_CONNECTOR_eDP))) + (connector->connector_type == DRM_MODE_CONNECTOR_eDP))) { amdgpu_connector->edid = amdgpu_connector_get_hardcoded_edid(adev); + drm_connector_update_edid_property(connector, amdgpu_connector->edid); + } } } @@ -326,6 +328,7 @@ static void amdgpu_connector_free_edid(struct drm_connector *connector) kfree(amdgpu_connector->edid); amdgpu_connector->edid = NULL; + drm_connector_update_edid_property(connector, NULL); } static int amdgpu_connector_ddc_get_modes(struct drm_connector *connector) @@ -387,6 +390,9 @@ amdgpu_connector_lcd_native_mode(struct drm_encoder *encoder) native_mode->vdisplay != 0 && native_mode->clock != 0) { mode = drm_mode_duplicate(dev, native_mode); + if (!mode) + return NULL; + mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER; drm_mode_set_name(mode); @@ -401,6 +407,9 @@ amdgpu_connector_lcd_native_mode(struct drm_encoder *encoder) * simpler. */ mode = drm_cvt_mode(dev, native_mode->hdisplay, native_mode->vdisplay, 60, true, false, false); + if (!mode) + return NULL; + mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER; DRM_DEBUG_KMS("Adding cvt approximation of native panel mode %s\n", mode->name); } @@ -827,6 +836,7 @@ static int amdgpu_connector_vga_get_modes(struct drm_connector *connector) amdgpu_connector_get_edid(connector); ret = amdgpu_connector_ddc_get_modes(connector); + amdgpu_get_native_mode(connector); return ret; } @@ -1170,7 +1180,7 @@ static enum drm_mode_status amdgpu_connector_dvi_mode_valid(struct drm_connector (amdgpu_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) || (amdgpu_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B)) { return MODE_OK; - } else if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) { + } else if (connector->display_info.is_hdmi) { /* HDMI 1.3+ supports max clock of 340 Mhz */ if (mode->clock > 340000) return MODE_CLOCK_HIGH; @@ -1462,7 +1472,7 @@ static enum drm_mode_status amdgpu_connector_dp_mode_valid(struct drm_connector (amdgpu_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) { return amdgpu_atombios_dp_mode_valid_helper(connector, mode); } else { - if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) { + if (connector->display_info.is_hdmi) { /* HDMI 1.3+ supports max clock of 340 Mhz */ if (mode->clock > 340000) return MODE_CLOCK_HIGH; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index 164d6a9e9fbb..25e2e5bf90eb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -1618,6 +1618,9 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev) if (!debugfs_initialized()) return 0; + debugfs_create_x32("amdgpu_smu_debug", 0600, root, + &adev->pm.smu_debug_mask); + ent = debugfs_create_file("amdgpu_preempt_ib", 0600, root, adev, &fops_ib_preempt); if (IS_ERR(ent)) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 5625f7736e37..8219e40240ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -30,6 +30,7 @@ #include <linux/module.h> #include <linux/console.h> #include <linux/slab.h> +#include <linux/iommu.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_probe_helper.h> @@ -331,7 +332,7 @@ void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos, } /** - * amdgpu_device_vram_access - access vram by vram aperature + * amdgpu_device_aper_access - access vram by vram aperature * * @adev: amdgpu_device pointer * @pos: offset of the buffer in vram @@ -550,11 +551,11 @@ void amdgpu_device_wreg(struct amdgpu_device *adev, trace_amdgpu_device_wreg(adev->pdev->device, reg, v); } -/* +/** * amdgpu_mm_wreg_mmio_rlc - write register either with mmio or with RLC path if in range * * this function is invoked only the debugfs register access - * */ + */ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t v) { @@ -1100,7 +1101,7 @@ static void amdgpu_device_wb_fini(struct amdgpu_device *adev) } /** - * amdgpu_device_wb_init- Init Writeback driver info and allocate memory + * amdgpu_device_wb_init - Init Writeback driver info and allocate memory * * @adev: amdgpu_device pointer * @@ -2657,6 +2658,36 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) return 0; } +/** + * amdgpu_device_smu_fini_early - smu hw_fini wrapper + * + * @adev: amdgpu_device pointer + * + * For ASICs need to disable SMC first + */ +static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev) +{ + int i, r; + + if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0)) + return; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_blocks[i].status.hw) + continue; + if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { + r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev); + /* XXX handle errors */ + if (r) { + DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); + } + adev->ip_blocks[i].status.hw = false; + break; + } + } +} + static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev) { int i, r; @@ -2677,21 +2708,8 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev) amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); - /* need to disable SMC first */ - for (i = 0; i < adev->num_ip_blocks; i++) { - if (!adev->ip_blocks[i].status.hw) - continue; - if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { - r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev); - /* XXX handle errors */ - if (r) { - DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); - } - adev->ip_blocks[i].status.hw = false; - break; - } - } + /* Workaroud for ASICs need to disable SMC first */ + amdgpu_device_smu_fini_early(adev); for (i = adev->num_ip_blocks - 1; i >= 0; i--) { if (!adev->ip_blocks[i].status.hw) @@ -2733,8 +2751,6 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done) amdgpu_virt_release_ras_err_handler_data(adev); - amdgpu_ras_pre_fini(adev); - if (adev->gmc.xgmi.num_physical_nodes > 1) amdgpu_xgmi_remove_device(adev); @@ -3367,6 +3383,22 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev) return ret; } +/** + * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU + * + * @adev: amdgpu_device pointer + * + * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode + */ +static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev) +{ + struct iommu_domain *domain; + + domain = iommu_get_domain_for_dev(adev->dev); + if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY) + adev->ram_is_direct_mapped = true; +} + static const struct attribute *amdgpu_dev_attributes[] = { &dev_attr_product_name.attr, &dev_attr_product_number.attr, @@ -3509,6 +3541,9 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->rmmio_size = pci_resource_len(adev->pdev, 2); } + for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++) + atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN); + adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size); if (adev->rmmio == NULL) { return -ENOMEM; @@ -3684,8 +3719,6 @@ fence_driver_init: /* Get a log2 for easy divisions. */ adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps)); - amdgpu_fbdev_init(adev); - r = amdgpu_pm_sysfs_init(adev); if (r) { adev->pm_sysfs_en = false; @@ -3769,6 +3802,8 @@ fence_driver_init: queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work, msecs_to_jiffies(AMDGPU_RESUME_MS)); + amdgpu_device_check_iommu_direct_map(adev); + return 0; release_ras_con: @@ -3802,7 +3837,7 @@ static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev) } /** - * amdgpu_device_fini - tear down the driver + * amdgpu_device_fini_hw - tear down the driver * * @adev: amdgpu_device pointer * @@ -3830,7 +3865,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) /* disable all interrupts */ amdgpu_irq_disable_all(adev); if (adev->mode_info.mode_config_initialized){ - if (!amdgpu_device_has_dc_support(adev)) + if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev))) drm_helper_force_disable_all(adev_to_drm(adev)); else drm_atomic_helper_shutdown(adev_to_drm(adev)); @@ -3843,7 +3878,8 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) amdgpu_ucode_sysfs_fini(adev); sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes); - amdgpu_fbdev_fini(adev); + /* disable ras feature must before hw fini */ + amdgpu_ras_pre_fini(adev); amdgpu_device_ip_fini_early(adev); @@ -3939,7 +3975,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) drm_kms_helper_poll_disable(dev); if (fbcon) - amdgpu_fbdev_set_suspend(adev, 1); + drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true); cancel_delayed_work_sync(&adev->delayed_init_work); @@ -4016,7 +4052,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon) flush_delayed_work(&adev->delayed_init_work); if (fbcon) - amdgpu_fbdev_set_suspend(adev, 0); + drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false); drm_kms_helper_poll_enable(dev); @@ -4285,6 +4321,11 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, bool from_hypervisor) { int r; + struct amdgpu_hive_info *hive = NULL; + + amdgpu_amdkfd_pre_reset(adev); + + amdgpu_amdkfd_pre_reset(adev); if (from_hypervisor) r = amdgpu_virt_request_full_gpu(adev, true); @@ -4311,9 +4352,19 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, if (r) goto error; - amdgpu_irq_gpu_reset_resume_helper(adev); - r = amdgpu_ib_ring_tests(adev); - amdgpu_amdkfd_post_reset(adev); + hive = amdgpu_get_xgmi_hive(adev); + /* Update PSP FW topology after reset */ + if (hive && adev->gmc.xgmi.num_physical_nodes > 1) + r = amdgpu_xgmi_update_topology(hive, adev); + + if (hive) + amdgpu_put_xgmi_hive(hive); + + if (!r) { + amdgpu_irq_gpu_reset_resume_helper(adev); + r = amdgpu_ib_ring_tests(adev); + amdgpu_amdkfd_post_reset(adev); + } error: if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) { @@ -4646,7 +4697,7 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle, if (r) goto out; - amdgpu_fbdev_set_suspend(tmp_adev, 0); + drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false); /* * The GPU enters bad state once faulty pages @@ -4745,7 +4796,7 @@ static int amdgpu_device_lock_hive_adev(struct amdgpu_device *adev, struct amdgp { struct amdgpu_device *tmp_adev = NULL; - if (adev->gmc.xgmi.num_physical_nodes > 1) { + if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) { if (!hive) { dev_err(adev->dev, "Hive is NULL while device has multiple xgmi nodes"); return -ENODEV; @@ -4957,7 +5008,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, * We always reset all schedulers for device and all devices for XGMI * hive so that should take care of them too. */ - hive = amdgpu_get_xgmi_hive(adev); + if (!amdgpu_sriov_vf(adev)) + hive = amdgpu_get_xgmi_hive(adev); if (hive) { if (atomic_cmpxchg(&hive->in_reset, 0, 1) != 0) { DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress", @@ -4998,7 +5050,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, * to put adev in the 1st position. */ INIT_LIST_HEAD(&device_list); - if (adev->gmc.xgmi.num_physical_nodes > 1) { + if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) { list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) list_add_tail(&tmp_adev->reset_list, &device_list); if (!list_is_first(&adev->reset_list, &device_list)) @@ -5028,7 +5080,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, cancel_delayed_work_sync(&tmp_adev->delayed_init_work); - amdgpu_amdkfd_pre_reset(tmp_adev); + if (!amdgpu_sriov_vf(tmp_adev)) + amdgpu_amdkfd_pre_reset(tmp_adev); /* * Mark these ASICs to be reseted as untracked first @@ -5036,7 +5089,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, */ amdgpu_unregister_gpu_instance(tmp_adev); - amdgpu_fbdev_set_suspend(tmp_adev, 1); + drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true); /* disable ras on ALL IPs */ if (!need_emergency_restart && @@ -5086,7 +5139,7 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */ tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter)); /* Actual ASIC resets if needed.*/ - /* TODO Implement XGMI hive reset logic for SRIOV */ + /* Host driver will handle XGMI hive reset for SRIOV */ if (amdgpu_sriov_vf(adev)) { r = amdgpu_device_reset_sriov(adev, job ? false : true); if (r) @@ -5127,7 +5180,7 @@ skip_hw_reset: drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res); } - if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) { + if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) { drm_helper_resume_force_mode(adev_to_drm(tmp_adev)); } @@ -5148,7 +5201,7 @@ skip_sched_resume: list_for_each_entry(tmp_adev, device_list_handle, reset_list) { /* unlock kfd: SRIOV would do it separately */ if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev)) - amdgpu_amdkfd_post_reset(tmp_adev); + amdgpu_amdkfd_post_reset(tmp_adev); /* kfd_post_reset will do nothing if kfd device is not initialized, * need to bring up kfd here if it's not be initialized before @@ -5631,3 +5684,42 @@ void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev, amdgpu_asic_invalidate_hdp(adev, ring); } + +/** + * amdgpu_device_halt() - bring hardware to some kind of halt state + * + * @adev: amdgpu_device pointer + * + * Bring hardware to some kind of halt state so that no one can touch it + * any more. It will help to maintain error context when error occurred. + * Compare to a simple hang, the system will keep stable at least for SSH + * access. Then it should be trivial to inspect the hardware state and + * see what's going on. Implemented as following: + * + * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc), + * clears all CPU mappings to device, disallows remappings through page faults + * 2. amdgpu_irq_disable_all() disables all interrupts + * 3. amdgpu_fence_driver_hw_fini() signals all HW fences + * 4. set adev->no_hw_access to avoid potential crashes after setp 5 + * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings + * 6. pci_disable_device() and pci_wait_for_pending_transaction() + * flush any in flight DMA operations + */ +void amdgpu_device_halt(struct amdgpu_device *adev) +{ + struct pci_dev *pdev = adev->pdev; + struct drm_device *ddev = adev_to_drm(adev); + + drm_dev_unplug(ddev); + + amdgpu_irq_disable_all(adev); + + amdgpu_fence_driver_hw_fini(adev); + + adev->no_hw_access = true; + + amdgpu_device_unmap_mmio(adev); + + pci_disable_device(pdev); + pci_wait_for_pending_transaction(pdev); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index ff70bc233489..5bc072220f03 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -67,7 +67,8 @@ #include "smuio_v11_0_6.h" #include "smuio_v13_0.h" -MODULE_FIRMWARE("amdgpu/ip_discovery.bin"); +#define FIRMWARE_IP_DISCOVERY "amdgpu/ip_discovery.bin" +MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY); #define mmRCC_CONFIG_MEMSIZE 0xde3 #define mmMM_INDEX 0x0 @@ -157,6 +158,8 @@ static int hw_id_map[MAX_HWIP] = { [HDP_HWIP] = HDP_HWID, [SDMA0_HWIP] = SDMA0_HWID, [SDMA1_HWIP] = SDMA1_HWID, + [SDMA2_HWIP] = SDMA2_HWID, + [SDMA3_HWIP] = SDMA3_HWID, [MMHUB_HWIP] = MMHUB_HWID, [ATHUB_HWIP] = ATHUB_HWID, [NBIO_HWIP] = NBIF_HWID, @@ -177,7 +180,7 @@ static int hw_id_map[MAX_HWIP] = { [DCI_HWIP] = DCI_HWID, }; -static int amdgpu_discovery_read_binary(struct amdgpu_device *adev, uint8_t *binary) +static int amdgpu_discovery_read_binary_from_vram(struct amdgpu_device *adev, uint8_t *binary) { uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20; uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET; @@ -187,6 +190,34 @@ static int amdgpu_discovery_read_binary(struct amdgpu_device *adev, uint8_t *bin return 0; } +static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, uint8_t *binary) +{ + const struct firmware *fw; + const char *fw_name; + int r; + + switch (amdgpu_discovery) { + case 2: + fw_name = FIRMWARE_IP_DISCOVERY; + break; + default: + dev_warn(adev->dev, "amdgpu_discovery is not set properly\n"); + return -EINVAL; + } + + r = request_firmware(&fw, fw_name, adev->dev); + if (r) { + dev_err(adev->dev, "can't load firmware \"%s\"\n", + fw_name); + return r; + } + + memcpy((u8 *)binary, (u8 *)fw->data, adev->mman.discovery_tmr_size); + release_firmware(fw); + + return 0; +} + static uint16_t amdgpu_discovery_calculate_checksum(uint8_t *data, uint32_t size) { uint16_t checksum = 0; @@ -204,13 +235,20 @@ static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size return !!(amdgpu_discovery_calculate_checksum(data, size) == expected); } +static inline bool amdgpu_discovery_verify_binary_signature(uint8_t *binary) +{ + struct binary_header *bhdr; + bhdr = (struct binary_header *)binary; + + return (le32_to_cpu(bhdr->binary_signature) == BINARY_SIGNATURE); +} + static int amdgpu_discovery_init(struct amdgpu_device *adev) { struct table_info *info; struct binary_header *bhdr; struct ip_discovery_header *ihdr; struct gpu_info_header *ghdr; - const struct firmware *fw; uint16_t offset; uint16_t size; uint16_t checksum; @@ -221,39 +259,40 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) if (!adev->mman.discovery_bin) return -ENOMEM; - if (amdgpu_discovery == 2) { - r = request_firmware(&fw, "amdgpu/ip_discovery.bin", adev->dev); - if (r) - goto get_from_vram; - dev_info(adev->dev, "Using IP discovery from file\n"); - memcpy((u8 *)adev->mman.discovery_bin, (u8 *)fw->data, - adev->mman.discovery_tmr_size); - release_firmware(fw); - } else { -get_from_vram: - r = amdgpu_discovery_read_binary(adev, adev->mman.discovery_bin); + r = amdgpu_discovery_read_binary_from_vram(adev, adev->mman.discovery_bin); + if (r) { + dev_err(adev->dev, "failed to read ip discovery binary from vram\n"); + r = -EINVAL; + goto out; + } + + if(!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) { + dev_warn(adev->dev, "get invalid ip discovery binary signature from vram\n"); + /* retry read ip discovery binary from file */ + r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin); if (r) { - DRM_ERROR("failed to read ip discovery binary\n"); + dev_err(adev->dev, "failed to read ip discovery binary from file\n"); + r = -EINVAL; + goto out; + } + /* check the ip discovery binary signature */ + if(!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) { + dev_warn(adev->dev, "get invalid ip discovery binary signature from file\n"); + r = -EINVAL; goto out; } } bhdr = (struct binary_header *)adev->mman.discovery_bin; - if (le32_to_cpu(bhdr->binary_signature) != BINARY_SIGNATURE) { - DRM_ERROR("invalid ip discovery binary signature\n"); - r = -EINVAL; - goto out; - } - offset = offsetof(struct binary_header, binary_checksum) + sizeof(bhdr->binary_checksum); - size = bhdr->binary_size - offset; - checksum = bhdr->binary_checksum; + size = le16_to_cpu(bhdr->binary_size) - offset; + checksum = le16_to_cpu(bhdr->binary_checksum); if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, size, checksum)) { - DRM_ERROR("invalid ip discovery binary checksum\n"); + dev_err(adev->dev, "invalid ip discovery binary checksum\n"); r = -EINVAL; goto out; } @@ -264,14 +303,14 @@ get_from_vram: ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + offset); if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) { - DRM_ERROR("invalid ip discovery data table signature\n"); + dev_err(adev->dev, "invalid ip discovery data table signature\n"); r = -EINVAL; goto out; } if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, - ihdr->size, checksum)) { - DRM_ERROR("invalid ip discovery data table checksum\n"); + le16_to_cpu(ihdr->size), checksum)) { + dev_err(adev->dev, "invalid ip discovery data table checksum\n"); r = -EINVAL; goto out; } @@ -282,8 +321,8 @@ get_from_vram: ghdr = (struct gpu_info_header *)(adev->mman.discovery_bin + offset); if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, - ghdr->size, checksum)) { - DRM_ERROR("invalid gc data table checksum\n"); + le32_to_cpu(ghdr->size), checksum)) { + dev_err(adev->dev, "invalid gc data table checksum\n"); r = -EINVAL; goto out; } @@ -377,8 +416,18 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) ip->major, ip->minor, ip->revision); - if (le16_to_cpu(ip->hw_id) == VCN_HWID) + if (le16_to_cpu(ip->hw_id) == VCN_HWID) { + /* Bit [5:0]: original revision value + * Bit [7:6]: en/decode capability: + * 0b00 : VCN function normally + * 0b10 : encode is disabled + * 0b01 : decode is disabled + */ + adev->vcn.vcn_config[adev->vcn.num_vcn_inst] = + ip->revision & 0xc0; + ip->revision &= ~0xc0; adev->vcn.num_vcn_inst++; + } if (le16_to_cpu(ip->hw_id) == SDMA0_HWID || le16_to_cpu(ip->hw_id) == SDMA1_HWID || le16_to_cpu(ip->hw_id) == SDMA2_HWID || @@ -470,14 +519,6 @@ int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, int n return -EINVAL; } - -int amdgpu_discovery_get_vcn_version(struct amdgpu_device *adev, int vcn_instance, - int *major, int *minor, int *revision) -{ - return amdgpu_discovery_get_ip_version(adev, VCN_HWID, - vcn_instance, major, minor, revision); -} - void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev) { struct binary_header *bhdr; @@ -489,10 +530,10 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev) le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset)); for (i = 0; i < 32; i++) { - if (le32_to_cpu(harvest_info->list[i].hw_id) == 0) + if (le16_to_cpu(harvest_info->list[i].hw_id) == 0) break; - switch (le32_to_cpu(harvest_info->list[i].hw_id)) { + switch (le16_to_cpu(harvest_info->list[i].hw_id)) { case VCN_HWID: vcn_harvest_count++; if (harvest_info->list[i].number_instance == 0) @@ -587,6 +628,9 @@ static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &nv_common_ip_block); break; default: + dev_err(adev->dev, + "Failed to add common ip block(GC_HWIP:0x%x)\n", + adev->ip_versions[GC_HWIP][0]); return -EINVAL; } return 0; @@ -619,6 +663,9 @@ static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); break; default: + dev_err(adev->dev, + "Failed to add gmc ip block(GC_HWIP:0x%x)\n", + adev->ip_versions[GC_HWIP][0]); return -EINVAL; } return 0; @@ -648,6 +695,9 @@ static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); break; default: + dev_err(adev->dev, + "Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n", + adev->ip_versions[OSSSYS_HWIP][0]); return -EINVAL; } return 0; @@ -688,6 +738,9 @@ static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block); break; default: + dev_err(adev->dev, + "Failed to add psp ip block(MP0_HWIP:0x%x)\n", + adev->ip_versions[MP0_HWIP][0]); return -EINVAL; } return 0; @@ -726,6 +779,9 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block); break; default: + dev_err(adev->dev, + "Failed to add smu ip block(MP1_HWIP:0x%x)\n", + adev->ip_versions[MP1_HWIP][0]); return -EINVAL; } return 0; @@ -753,6 +809,9 @@ static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &dm_ip_block); break; default: + dev_err(adev->dev, + "Failed to add dm ip block(DCE_HWIP:0x%x)\n", + adev->ip_versions[DCE_HWIP][0]); return -EINVAL; } } else if (adev->ip_versions[DCI_HWIP][0]) { @@ -763,6 +822,9 @@ static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &dm_ip_block); break; default: + dev_err(adev->dev, + "Failed to add dm ip block(DCI_HWIP:0x%x)\n", + adev->ip_versions[DCI_HWIP][0]); return -EINVAL; } #endif @@ -796,6 +858,9 @@ static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); break; default: + dev_err(adev->dev, + "Failed to add gfx ip block(GC_HWIP:0x%x)\n", + adev->ip_versions[GC_HWIP][0]); return -EINVAL; } return 0; @@ -829,6 +894,9 @@ static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block); break; default: + dev_err(adev->dev, + "Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n", + adev->ip_versions[SDMA0_HWIP][0]); return -EINVAL; } return 0; @@ -845,6 +913,9 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block); break; default: + dev_err(adev->dev, + "Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n", + adev->ip_versions[UVD_HWIP][0]); return -EINVAL; } switch (adev->ip_versions[VCE_HWIP][0]) { @@ -855,6 +926,9 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block); break; default: + dev_err(adev->dev, + "Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n", + adev->ip_versions[VCE_HWIP][0]); return -EINVAL; } } else { @@ -882,9 +956,9 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) break; case IP_VERSION(3, 0, 0): case IP_VERSION(3, 0, 16): - case IP_VERSION(3, 0, 64): case IP_VERSION(3, 1, 1): case IP_VERSION(3, 0, 2): + case IP_VERSION(3, 0, 192): amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); if (!amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block); @@ -893,6 +967,9 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); break; default: + dev_err(adev->dev, + "Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n", + adev->ip_versions[UVD_HWIP][0]); return -EINVAL; } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h index 0ea029e3b850..14537cec19db 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h @@ -33,8 +33,6 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev); int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, int number_instance, int *major, int *minor, int *revision); -int amdgpu_discovery_get_vcn_version(struct amdgpu_device *adev, int vcn_instance, - int *major, int *minor, int *revision); int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev); int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index dc50c05f23fc..82011e75ed85 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -83,9 +83,6 @@ static void amdgpu_display_flip_work_func(struct work_struct *__work) unsigned i; int vpos, hpos; - if (amdgpu_display_flip_handle_fence(work, &work->excl)) - return; - for (i = 0; i < work->shared_count; ++i) if (amdgpu_display_flip_handle_fence(work, &work->shared[i])) return; @@ -203,7 +200,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc, goto unpin; } - r = dma_resv_get_fences(new_abo->tbo.base.resv, &work->excl, + r = dma_resv_get_fences(new_abo->tbo.base.resv, NULL, &work->shared_count, &work->shared); if (unlikely(r != 0)) { DRM_ERROR("failed to get fences for buffer\n"); @@ -253,7 +250,6 @@ unreserve: cleanup: amdgpu_bo_unref(&work->old_abo); - dma_fence_put(work->excl); for (i = 0; i < work->shared_count; ++i) dma_fence_put(work->shared[i]); kfree(work->shared); @@ -1364,7 +1360,7 @@ bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc, if ((!(mode->flags & DRM_MODE_FLAG_INTERLACE)) && ((amdgpu_encoder->underscan_type == UNDERSCAN_ON) || ((amdgpu_encoder->underscan_type == UNDERSCAN_AUTO) && - drm_detect_hdmi_monitor(amdgpu_connector_edid(connector)) && + connector->display_info.is_hdmi && amdgpu_display_is_hdtv_mode(mode)))) { if (amdgpu_encoder->underscan_hborder != 0) amdgpu_crtc->h_border = amdgpu_encoder->underscan_hborder; @@ -1603,13 +1599,10 @@ int amdgpu_display_suspend_helper(struct amdgpu_device *adev) continue; } robj = gem_to_amdgpu_bo(fb->obj[0]); - /* don't unpin kernel fb objects */ - if (!amdgpu_fbdev_robj_is_fb(adev, robj)) { - r = amdgpu_bo_reserve(robj, true); - if (r == 0) { - amdgpu_bo_unpin(robj); - amdgpu_bo_unreserve(robj); - } + r = amdgpu_bo_reserve(robj, true); + if (r == 0) { + amdgpu_bo_unpin(robj); + amdgpu_bo_unreserve(robj); } } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index ae6ab93c868b..4896c876ffec 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -61,9 +61,6 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf, if (pci_p2pdma_distance_many(adev->pdev, &attach->dev, 1, true) < 0) attach->peer2peer = false; - if (attach->dev->driver == adev->dev->driver) - return 0; - r = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (r < 0) goto out; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index ad95de6399af..02099058d0d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -31,7 +31,6 @@ #include "amdgpu_drv.h" #include <drm/drm_pciids.h> -#include <linux/console.h> #include <linux/module.h> #include <linux/pm_runtime.h> #include <linux/vga_switcheroo.h> @@ -314,9 +313,12 @@ module_param_named(dpm, amdgpu_dpm, int, 0444); /** * DOC: fw_load_type (int) - * Set different firmware loading type for debugging (0 = direct, 1 = SMU, 2 = PSP). The default is -1 (auto). + * Set different firmware loading type for debugging, if supported. + * Set to 0 to force direct loading if supported by the ASIC. Set + * to -1 to select the default loading mode for the ASIC, as defined + * by the driver. The default is -1 (auto). */ -MODULE_PARM_DESC(fw_load_type, "firmware loading type (0 = direct, 1 = SMU, 2 = PSP, -1 = auto)"); +MODULE_PARM_DESC(fw_load_type, "firmware loading type (0 = force direct if supported, -1 = auto)"); module_param_named(fw_load_type, amdgpu_fw_load_type, int, 0444); /** @@ -2002,6 +2004,19 @@ retry_init: goto err_pci; } + /* + * 1. don't init fbdev on hw without DCE + * 2. don't init fbdev if there are no connectors + */ + if (adev->mode_info.mode_config_initialized && + !list_empty(&adev_to_drm(adev)->mode_config.connector_list)) { + /* select 8 bpp console on low vram cards */ + if (adev->gmc.real_vram_size <= (32*1024*1024)) + drm_fbdev_generic_setup(adev_to_drm(adev), 8); + else + drm_fbdev_generic_setup(adev_to_drm(adev), 32); + } + ret = amdgpu_debugfs_init(adev); if (ret) DRM_ERROR("Creating debugfs files failed (%d).\n", ret); @@ -2516,10 +2531,8 @@ static int __init amdgpu_init(void) { int r; - if (vgacon_text_force()) { - DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n"); + if (drm_firmware_drivers_only()) return -EINVAL; - } r = amdgpu_sync_init(); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h index e3a4f7048042..8178323e4bef 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h @@ -45,4 +45,7 @@ long amdgpu_drm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); +long amdgpu_kms_compat_ioctl(struct file *filp, + unsigned int cmd, unsigned long arg); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c index af4ef84e27a7..c96e458ed088 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c @@ -222,7 +222,7 @@ bool amdgpu_dig_monitor_is_duallink(struct drm_encoder *encoder, case DRM_MODE_CONNECTOR_HDMIB: if (amdgpu_connector->use_digital) { /* HDMI 1.3 supports up to 340 Mhz over single link */ - if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) { + if (connector->display_info.is_hdmi) { if (pixel_clock > 340000) return true; else @@ -244,7 +244,7 @@ bool amdgpu_dig_monitor_is_duallink(struct drm_encoder *encoder, return false; else { /* HDMI 1.3 supports up to 340 Mhz over single link */ - if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) { + if (connector->display_info.is_hdmi) { if (pixel_clock > 340000) return true; else diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c deleted file mode 100644 index cd0acbea75da..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +++ /dev/null @@ -1,388 +0,0 @@ -/* - * Copyright © 2007 David Airlie - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * - * Authors: - * David Airlie - */ - -#include <linux/module.h> -#include <linux/pm_runtime.h> -#include <linux/slab.h> -#include <linux/vga_switcheroo.h> - -#include <drm/amdgpu_drm.h> -#include <drm/drm_crtc.h> -#include <drm/drm_crtc_helper.h> -#include <drm/drm_fb_helper.h> -#include <drm/drm_fourcc.h> - -#include "amdgpu.h" -#include "cikd.h" -#include "amdgpu_gem.h" - -#include "amdgpu_display.h" - -/* object hierarchy - - this contains a helper + a amdgpu fb - the helper contains a pointer to amdgpu framebuffer baseclass. -*/ - -static int -amdgpufb_open(struct fb_info *info, int user) -{ - struct drm_fb_helper *fb_helper = info->par; - int ret = pm_runtime_get_sync(fb_helper->dev->dev); - if (ret < 0 && ret != -EACCES) { - pm_runtime_mark_last_busy(fb_helper->dev->dev); - pm_runtime_put_autosuspend(fb_helper->dev->dev); - return ret; - } - return 0; -} - -static int -amdgpufb_release(struct fb_info *info, int user) -{ - struct drm_fb_helper *fb_helper = info->par; - - pm_runtime_mark_last_busy(fb_helper->dev->dev); - pm_runtime_put_autosuspend(fb_helper->dev->dev); - return 0; -} - -static const struct fb_ops amdgpufb_ops = { - .owner = THIS_MODULE, - DRM_FB_HELPER_DEFAULT_OPS, - .fb_open = amdgpufb_open, - .fb_release = amdgpufb_release, - .fb_fillrect = drm_fb_helper_cfb_fillrect, - .fb_copyarea = drm_fb_helper_cfb_copyarea, - .fb_imageblit = drm_fb_helper_cfb_imageblit, -}; - - -int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int cpp, bool tiled) -{ - int aligned = width; - int pitch_mask = 0; - - switch (cpp) { - case 1: - pitch_mask = 255; - break; - case 2: - pitch_mask = 127; - break; - case 3: - case 4: - pitch_mask = 63; - break; - } - - aligned += pitch_mask; - aligned &= ~pitch_mask; - return aligned * cpp; -} - -static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj) -{ - struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj); - int ret; - - ret = amdgpu_bo_reserve(abo, true); - if (likely(ret == 0)) { - amdgpu_bo_kunmap(abo); - amdgpu_bo_unpin(abo); - amdgpu_bo_unreserve(abo); - } - drm_gem_object_put(gobj); -} - -static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev, - struct drm_mode_fb_cmd2 *mode_cmd, - struct drm_gem_object **gobj_p) -{ - const struct drm_format_info *info; - struct amdgpu_device *adev = rfbdev->adev; - struct drm_gem_object *gobj = NULL; - struct amdgpu_bo *abo = NULL; - bool fb_tiled = false; /* useful for testing */ - u32 tiling_flags = 0, domain; - int ret; - int aligned_size, size; - int height = mode_cmd->height; - u32 cpp; - u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | - AMDGPU_GEM_CREATE_VRAM_CLEARED; - - info = drm_get_format_info(adev_to_drm(adev), mode_cmd); - cpp = info->cpp[0]; - - /* need to align pitch with crtc limits */ - mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, cpp, - fb_tiled); - domain = amdgpu_display_supported_domains(adev, flags); - height = ALIGN(mode_cmd->height, 8); - size = mode_cmd->pitches[0] * height; - aligned_size = ALIGN(size, PAGE_SIZE); - ret = amdgpu_gem_object_create(adev, aligned_size, 0, domain, flags, - ttm_bo_type_device, NULL, &gobj); - if (ret) { - pr_err("failed to allocate framebuffer (%d)\n", aligned_size); - return -ENOMEM; - } - abo = gem_to_amdgpu_bo(gobj); - - if (fb_tiled) - tiling_flags = AMDGPU_TILING_SET(ARRAY_MODE, GRPH_ARRAY_2D_TILED_THIN1); - - ret = amdgpu_bo_reserve(abo, false); - if (unlikely(ret != 0)) - goto out_unref; - - if (tiling_flags) { - ret = amdgpu_bo_set_tiling_flags(abo, - tiling_flags); - if (ret) - dev_err(adev->dev, "FB failed to set tiling flags\n"); - } - - ret = amdgpu_bo_pin(abo, domain); - if (ret) { - amdgpu_bo_unreserve(abo); - goto out_unref; - } - - ret = amdgpu_ttm_alloc_gart(&abo->tbo); - if (ret) { - amdgpu_bo_unreserve(abo); - dev_err(adev->dev, "%p bind failed\n", abo); - goto out_unref; - } - - ret = amdgpu_bo_kmap(abo, NULL); - amdgpu_bo_unreserve(abo); - if (ret) { - goto out_unref; - } - - *gobj_p = gobj; - return 0; -out_unref: - amdgpufb_destroy_pinned_object(gobj); - *gobj_p = NULL; - return ret; -} - -static int amdgpufb_create(struct drm_fb_helper *helper, - struct drm_fb_helper_surface_size *sizes) -{ - struct amdgpu_fbdev *rfbdev = (struct amdgpu_fbdev *)helper; - struct amdgpu_device *adev = rfbdev->adev; - struct fb_info *info; - struct drm_framebuffer *fb = NULL; - struct drm_mode_fb_cmd2 mode_cmd; - struct drm_gem_object *gobj = NULL; - struct amdgpu_bo *abo = NULL; - int ret; - - memset(&mode_cmd, 0, sizeof(mode_cmd)); - mode_cmd.width = sizes->surface_width; - mode_cmd.height = sizes->surface_height; - - if (sizes->surface_bpp == 24) - sizes->surface_bpp = 32; - - mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, - sizes->surface_depth); - - ret = amdgpufb_create_pinned_object(rfbdev, &mode_cmd, &gobj); - if (ret) { - DRM_ERROR("failed to create fbcon object %d\n", ret); - return ret; - } - - abo = gem_to_amdgpu_bo(gobj); - - /* okay we have an object now allocate the framebuffer */ - info = drm_fb_helper_alloc_fbi(helper); - if (IS_ERR(info)) { - ret = PTR_ERR(info); - goto out; - } - - ret = amdgpu_display_gem_fb_init(adev_to_drm(adev), &rfbdev->rfb, - &mode_cmd, gobj); - if (ret) { - DRM_ERROR("failed to initialize framebuffer %d\n", ret); - goto out; - } - - fb = &rfbdev->rfb.base; - - /* setup helper */ - rfbdev->helper.fb = fb; - - info->fbops = &amdgpufb_ops; - - info->fix.smem_start = amdgpu_gmc_vram_cpu_pa(adev, abo); - info->fix.smem_len = amdgpu_bo_size(abo); - info->screen_base = amdgpu_bo_kptr(abo); - info->screen_size = amdgpu_bo_size(abo); - - drm_fb_helper_fill_info(info, &rfbdev->helper, sizes); - - /* setup aperture base/size for vesafb takeover */ - info->apertures->ranges[0].base = adev_to_drm(adev)->mode_config.fb_base; - info->apertures->ranges[0].size = adev->gmc.aper_size; - - /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ - - if (info->screen_base == NULL) { - ret = -ENOSPC; - goto out; - } - - DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start); - DRM_INFO("vram apper at 0x%lX\n", (unsigned long)adev->gmc.aper_base); - DRM_INFO("size %lu\n", (unsigned long)amdgpu_bo_size(abo)); - DRM_INFO("fb depth is %d\n", fb->format->depth); - DRM_INFO(" pitch is %d\n", fb->pitches[0]); - - vga_switcheroo_client_fb_set(adev->pdev, info); - return 0; - -out: - if (fb && ret) { - drm_gem_object_put(gobj); - drm_framebuffer_unregister_private(fb); - drm_framebuffer_cleanup(fb); - kfree(fb); - } - return ret; -} - -static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfbdev) -{ - struct amdgpu_framebuffer *rfb = &rfbdev->rfb; - int i; - - drm_fb_helper_unregister_fbi(&rfbdev->helper); - - if (rfb->base.obj[0]) { - for (i = 0; i < rfb->base.format->num_planes; i++) - drm_gem_object_put(rfb->base.obj[0]); - amdgpufb_destroy_pinned_object(rfb->base.obj[0]); - rfb->base.obj[0] = NULL; - drm_framebuffer_unregister_private(&rfb->base); - drm_framebuffer_cleanup(&rfb->base); - } - drm_fb_helper_fini(&rfbdev->helper); - - return 0; -} - -static const struct drm_fb_helper_funcs amdgpu_fb_helper_funcs = { - .fb_probe = amdgpufb_create, -}; - -int amdgpu_fbdev_init(struct amdgpu_device *adev) -{ - struct amdgpu_fbdev *rfbdev; - int bpp_sel = 32; - int ret; - - /* don't init fbdev on hw without DCE */ - if (!adev->mode_info.mode_config_initialized) - return 0; - - /* don't init fbdev if there are no connectors */ - if (list_empty(&adev_to_drm(adev)->mode_config.connector_list)) - return 0; - - /* select 8 bpp console on low vram cards */ - if (adev->gmc.real_vram_size <= (32*1024*1024)) - bpp_sel = 8; - - rfbdev = kzalloc(sizeof(struct amdgpu_fbdev), GFP_KERNEL); - if (!rfbdev) - return -ENOMEM; - - rfbdev->adev = adev; - adev->mode_info.rfbdev = rfbdev; - - drm_fb_helper_prepare(adev_to_drm(adev), &rfbdev->helper, - &amdgpu_fb_helper_funcs); - - ret = drm_fb_helper_init(adev_to_drm(adev), &rfbdev->helper); - if (ret) { - kfree(rfbdev); - return ret; - } - - /* disable all the possible outputs/crtcs before entering KMS mode */ - if (!amdgpu_device_has_dc_support(adev) && !amdgpu_virtual_display) - drm_helper_disable_unused_functions(adev_to_drm(adev)); - - drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel); - return 0; -} - -void amdgpu_fbdev_fini(struct amdgpu_device *adev) -{ - if (!adev->mode_info.rfbdev) - return; - - amdgpu_fbdev_destroy(adev_to_drm(adev), adev->mode_info.rfbdev); - kfree(adev->mode_info.rfbdev); - adev->mode_info.rfbdev = NULL; -} - -void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state) -{ - if (adev->mode_info.rfbdev) - drm_fb_helper_set_suspend_unlocked(&adev->mode_info.rfbdev->helper, - state); -} - -int amdgpu_fbdev_total_size(struct amdgpu_device *adev) -{ - struct amdgpu_bo *robj; - int size = 0; - - if (!adev->mode_info.rfbdev) - return 0; - - robj = gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.base.obj[0]); - size += amdgpu_bo_size(robj); - return size; -} - -bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj) -{ - if (!adev->mode_info.rfbdev) - return false; - if (robj == gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.base.obj[0])) - return true; - return false; -} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index a1e63ba4c54a..c0d8f40a5b45 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -877,6 +877,32 @@ out: return r; } +static int amdgpu_gem_align_pitch(struct amdgpu_device *adev, + int width, + int cpp, + bool tiled) +{ + int aligned = width; + int pitch_mask = 0; + + switch (cpp) { + case 1: + pitch_mask = 255; + break; + case 2: + pitch_mask = 127; + break; + case 3: + case 4: + pitch_mask = 63; + break; + } + + aligned += pitch_mask; + aligned &= ~pitch_mask; + return aligned * cpp; +} + int amdgpu_mode_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args) @@ -885,7 +911,8 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv, struct drm_gem_object *gobj; uint32_t handle; u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | - AMDGPU_GEM_CREATE_CPU_GTT_USWC; + AMDGPU_GEM_CREATE_CPU_GTT_USWC | + AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; u32 domain; int r; @@ -897,8 +924,8 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv, if (adev->mman.buffer_funcs_enabled) flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED; - args->pitch = amdgpu_align_pitch(adev, args->width, - DIV_ROUND_UP(args->bpp, 8), 0); + args->pitch = amdgpu_gem_align_pitch(adev, args->width, + DIV_ROUND_UP(args->bpp, 8), 0); args->size = (u64)args->pitch * args->height; args->size = ALIGN(args->size, PAGE_SIZE); domain = amdgpu_bo_get_preferred_domain(adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 08478fce00f2..2430d6223c2d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -350,6 +350,7 @@ static inline uint64_t amdgpu_gmc_fault_key(uint64_t addr, uint16_t pasid) * amdgpu_gmc_filter_faults - filter VM faults * * @adev: amdgpu device structure + * @ih: interrupt ring that the fault received from * @addr: address of the VM fault * @pasid: PASID of the process causing the fault * @timestamp: timestamp of the fault @@ -358,7 +359,8 @@ static inline uint64_t amdgpu_gmc_fault_key(uint64_t addr, uint16_t pasid) * True if the fault was filtered and should not be processed further. * False if the fault is a new one and needs to be handled. */ -bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr, +bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih, uint64_t addr, uint16_t pasid, uint64_t timestamp) { struct amdgpu_gmc *gmc = &adev->gmc; @@ -366,6 +368,10 @@ bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr, struct amdgpu_gmc_fault *fault; uint32_t hash; + /* Stale retry fault if timestamp goes backward */ + if (amdgpu_ih_ts_after(timestamp, ih->processed_timestamp)) + return true; + /* If we don't have space left in the ring buffer return immediately */ stamp = max(timestamp, AMDGPU_GMC_FAULT_TIMEOUT + 1) - AMDGPU_GMC_FAULT_TIMEOUT; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index e55201134a01..8458cebc6d5b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -316,7 +316,8 @@ void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc); void amdgpu_gmc_agp_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc); -bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr, +bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih, uint64_t addr, uint16_t pasid, uint64_t timestamp); void amdgpu_gmc_filter_faults_remove(struct amdgpu_device *adev, uint64_t addr, uint16_t pasid); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c index f3d62e196901..3df146579ad9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c @@ -164,52 +164,32 @@ void amdgpu_ih_ring_write(struct amdgpu_ih_ring *ih, const uint32_t *iv, } } -/* Waiter helper that checks current rptr matches or passes checkpoint wptr */ -static bool amdgpu_ih_has_checkpoint_processed(struct amdgpu_device *adev, - struct amdgpu_ih_ring *ih, - uint32_t checkpoint_wptr, - uint32_t *prev_rptr) -{ - uint32_t cur_rptr = ih->rptr | (*prev_rptr & ~ih->ptr_mask); - - /* rptr has wrapped. */ - if (cur_rptr < *prev_rptr) - cur_rptr += ih->ptr_mask + 1; - *prev_rptr = cur_rptr; - - /* check ring is empty to workaround missing wptr overflow flag */ - return cur_rptr >= checkpoint_wptr || - (cur_rptr & ih->ptr_mask) == amdgpu_ih_get_wptr(adev, ih); -} - /** - * amdgpu_ih_wait_on_checkpoint_process - wait to process IVs up to checkpoint + * amdgpu_ih_wait_on_checkpoint_process_ts - wait to process IVs up to checkpoint * * @adev: amdgpu_device pointer * @ih: ih ring to process * * Used to ensure ring has processed IVs up to the checkpoint write pointer. */ -int amdgpu_ih_wait_on_checkpoint_process(struct amdgpu_device *adev, +int amdgpu_ih_wait_on_checkpoint_process_ts(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih) { - uint32_t checkpoint_wptr, rptr; + uint32_t checkpoint_wptr; + uint64_t checkpoint_ts; + long timeout = HZ; if (!ih->enabled || adev->shutdown) return -ENODEV; checkpoint_wptr = amdgpu_ih_get_wptr(adev, ih); - /* Order wptr with rptr. */ + /* Order wptr with ring data. */ rmb(); - rptr = READ_ONCE(ih->rptr); - - /* wptr has wrapped. */ - if (rptr > checkpoint_wptr) - checkpoint_wptr += ih->ptr_mask + 1; + checkpoint_ts = amdgpu_ih_decode_iv_ts(adev, ih, checkpoint_wptr, -1); - return wait_event_interruptible(ih->wait_process, - amdgpu_ih_has_checkpoint_processed(adev, ih, - checkpoint_wptr, &rptr)); + return wait_event_interruptible_timeout(ih->wait_process, + amdgpu_ih_ts_after(checkpoint_ts, ih->processed_timestamp) || + ih->rptr == amdgpu_ih_get_wptr(adev, ih), timeout); } /** @@ -223,7 +203,7 @@ int amdgpu_ih_wait_on_checkpoint_process(struct amdgpu_device *adev, */ int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih) { - unsigned int count = AMDGPU_IH_MAX_NUM_IVS; + unsigned int count; u32 wptr; if (!ih->enabled || adev->shutdown) @@ -232,6 +212,7 @@ int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih) wptr = amdgpu_ih_get_wptr(adev, ih); restart_ih: + count = AMDGPU_IH_MAX_NUM_IVS; DRM_DEBUG("%s: rptr %d, wptr %d\n", __func__, ih->rptr, wptr); /* Order reading of wptr vs. reading of IH ring data */ @@ -298,3 +279,18 @@ void amdgpu_ih_decode_iv_helper(struct amdgpu_device *adev, /* wptr/rptr are in bytes! */ ih->rptr += 32; } + +uint64_t amdgpu_ih_decode_iv_ts_helper(struct amdgpu_ih_ring *ih, u32 rptr, + signed int offset) +{ + uint32_t iv_size = 32; + uint32_t ring_index; + uint32_t dw1, dw2; + + rptr += iv_size * offset; + ring_index = (rptr & ih->ptr_mask) >> 2; + + dw1 = le32_to_cpu(ih->ring[ring_index + 1]); + dw2 = le32_to_cpu(ih->ring[ring_index + 2]); + return dw1 | ((u64)(dw2 & 0xffff) << 32); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h index 0649b59830a5..dd1c2eded6b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h @@ -68,20 +68,30 @@ struct amdgpu_ih_ring { /* For waiting on IH processing at checkpoint. */ wait_queue_head_t wait_process; + uint64_t processed_timestamp; }; +/* return true if time stamp t2 is after t1 with 48bit wrap around */ +#define amdgpu_ih_ts_after(t1, t2) \ + (((int64_t)((t2) << 16) - (int64_t)((t1) << 16)) > 0LL) + /* provided by the ih block */ struct amdgpu_ih_funcs { /* ring read/write ptr handling, called from interrupt context */ u32 (*get_wptr)(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih); void (*decode_iv)(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih, struct amdgpu_iv_entry *entry); + uint64_t (*decode_iv_ts)(struct amdgpu_ih_ring *ih, u32 rptr, + signed int offset); void (*set_rptr)(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih); }; #define amdgpu_ih_get_wptr(adev, ih) (adev)->irq.ih_funcs->get_wptr((adev), (ih)) #define amdgpu_ih_decode_iv(adev, iv) \ (adev)->irq.ih_funcs->decode_iv((adev), (ih), (iv)) +#define amdgpu_ih_decode_iv_ts(adev, ih, rptr, offset) \ + (WARN_ON_ONCE(!(adev)->irq.ih_funcs->decode_iv_ts) ? 0 : \ + (adev)->irq.ih_funcs->decode_iv_ts((ih), (rptr), (offset))) #define amdgpu_ih_set_rptr(adev, ih) (adev)->irq.ih_funcs->set_rptr((adev), (ih)) int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih, @@ -89,10 +99,12 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih, void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih); void amdgpu_ih_ring_write(struct amdgpu_ih_ring *ih, const uint32_t *iv, unsigned int num_dw); -int amdgpu_ih_wait_on_checkpoint_process(struct amdgpu_device *adev, - struct amdgpu_ih_ring *ih); +int amdgpu_ih_wait_on_checkpoint_process_ts(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih); int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih); void amdgpu_ih_decode_iv_helper(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih, struct amdgpu_iv_entry *entry); +uint64_t amdgpu_ih_decode_iv_ts_helper(struct amdgpu_ih_ring *ih, u32 rptr, + signed int offset); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ioc32.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ioc32.c index 5cf142e849bb..a1cbd7c3deb2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ioc32.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ioc32.c @@ -1,4 +1,4 @@ -/** +/* * \file amdgpu_ioc32.c * * 32-bit ioctl compatibility routines for the AMDGPU DRM. @@ -37,12 +37,9 @@ long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { unsigned int nr = DRM_IOCTL_NR(cmd); - int ret; if (nr < DRM_COMMAND_BASE) return drm_compat_ioctl(filp, cmd, arg); - ret = amdgpu_drm_ioctl(filp, cmd, arg); - - return ret; + return amdgpu_drm_ioctl(filp, cmd, arg); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index cc2e0c9cfe0a..f5cbc2747ac6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -333,7 +333,6 @@ int amdgpu_irq_init(struct amdgpu_device *adev) if (!amdgpu_device_has_dc_support(adev)) { if (!adev->enable_virtual_display) /* Disable vblank IRQs aggressively for power-saving */ - /* XXX: can this be enabled for DC? */ adev_to_drm(adev)->vblank_disable_immediate = true; r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc); @@ -391,7 +390,7 @@ void amdgpu_irq_fini_hw(struct amdgpu_device *adev) } /** - * amdgpu_irq_fini - shut down interrupt handling + * amdgpu_irq_fini_sw - shut down interrupt handling * * @adev: amdgpu device pointer * @@ -529,6 +528,9 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev, /* Send it to amdkfd as well if it isn't already handled */ if (!handled) amdgpu_amdkfd_interrupt(adev, entry.iv_entry); + + if (amdgpu_ih_ts_after(ih->processed_timestamp, entry.timestamp)) + ih->processed_timestamp = entry.timestamp; } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index 89fb372ed49c..6043bf6fd414 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -232,8 +232,6 @@ struct amdgpu_i2c_chan { struct mutex mutex; }; -struct amdgpu_fbdev; - struct amdgpu_afmt { bool enabled; int offset; @@ -309,13 +307,6 @@ struct amdgpu_framebuffer { uint64_t address; }; -struct amdgpu_fbdev { - struct drm_fb_helper helper; - struct amdgpu_framebuffer rfb; - struct list_head fbdev_list; - struct amdgpu_device *adev; -}; - struct amdgpu_mode_info { struct atom_context *atom_context; struct card_info *atom_card_info; @@ -341,8 +332,6 @@ struct amdgpu_mode_info { struct edid *bios_hardcoded_edid; int bios_hardcoded_edid_size; - /* pointer to fbdev info structure */ - struct amdgpu_fbdev *rfbdev; /* firmware flags */ u32 firmware_flags; /* pointer to backlight encoder */ @@ -631,15 +620,6 @@ bool amdgpu_crtc_get_scanout_position(struct drm_crtc *crtc, int *hpos, ktime_t *stime, ktime_t *etime, const struct drm_display_mode *mode); -/* fbdev layer */ -int amdgpu_fbdev_init(struct amdgpu_device *adev); -void amdgpu_fbdev_fini(struct amdgpu_device *adev); -void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state); -int amdgpu_fbdev_total_size(struct amdgpu_device *adev); -bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj); - -int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tiled); - /* amdgpu_display.c */ void amdgpu_display_print_display_setup(struct drm_device *dev); int amdgpu_display_modeset_create_props(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 4fcfc2313b8c..3a7b56e57cec 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -1032,9 +1032,14 @@ int amdgpu_bo_init(struct amdgpu_device *adev) /* On A+A platform, VRAM can be mapped as WB */ if (!adev->gmc.xgmi.connected_to_cpu) { /* reserve PAT memory space to WC for VRAM */ - arch_io_reserve_memtype_wc(adev->gmc.aper_base, + int r = arch_io_reserve_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size); + if (r) { + DRM_ERROR("Unable to set WC memtype for the aperture base\n"); + return r; + } + /* Add an MTRR for the VRAM */ adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base, adev->gmc.aper_size); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c index 4eaec446b49d..0bb2466d539a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c @@ -69,6 +69,7 @@ static void amdgpu_pll_reduce_ratio(unsigned *nom, unsigned *den, /** * amdgpu_pll_get_fb_ref_div - feedback and ref divider calculation * + * @adev: amdgpu_device pointer * @nom: nominator * @den: denominator * @post_div: post divider @@ -106,6 +107,7 @@ static void amdgpu_pll_get_fb_ref_div(struct amdgpu_device *adev, unsigned int n /** * amdgpu_pll_compute - compute PLL paramaters * + * @adev: amdgpu_device pointer * @pll: information about the PLL * @freq: requested frequency * @dot_clock_p: resulting pixel clock diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c index 82e9ecf84352..71ee361d0972 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c @@ -233,6 +233,10 @@ static void amdgpu_perf_start(struct perf_event *event, int flags) if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) return; + if ((!pe->adev->df.funcs) || + (!pe->adev->df.funcs->pmc_start)) + return; + WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); hwc->state = 0; @@ -268,6 +272,10 @@ static void amdgpu_perf_read(struct perf_event *event) pmu); u64 count, prev; + if ((!pe->adev->df.funcs) || + (!pe->adev->df.funcs->pmc_get_count)) + return; + do { prev = local64_read(&hwc->prev_count); @@ -297,6 +305,10 @@ static void amdgpu_perf_stop(struct perf_event *event, int flags) if (hwc->state & PERF_HES_UPTODATE) return; + if ((!pe->adev->df.funcs) || + (!pe->adev->df.funcs->pmc_stop)) + return; + switch (hwc->config_base) { case AMDGPU_PMU_EVENT_CONFIG_TYPE_DF: case AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI: @@ -326,6 +338,10 @@ static int amdgpu_perf_add(struct perf_event *event, int flags) struct amdgpu_pmu_entry, pmu); + if ((!pe->adev->df.funcs) || + (!pe->adev->df.funcs->pmc_start)) + return -EINVAL; + switch (pe->pmu_perf_type) { case AMDGPU_PMU_PERF_TYPE_DF: hwc->config_base = AMDGPU_PMU_EVENT_CONFIG_TYPE_DF; @@ -371,6 +387,9 @@ static void amdgpu_perf_del(struct perf_event *event, int flags) struct amdgpu_pmu_entry *pe = container_of(event->pmu, struct amdgpu_pmu_entry, pmu); + if ((!pe->adev->df.funcs) || + (!pe->adev->df.funcs->pmc_stop)) + return; amdgpu_perf_stop(event, PERF_EF_UPDATE); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c index d02c8637f909..786afe4f58f9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c @@ -59,7 +59,7 @@ static DEVICE_ATTR_RO(mem_info_preempt_used); * @man: TTM memory type manager * @tbo: TTM BO we need this range for * @place: placement flags and restrictions - * @mem: the resulting mem object + * @res: TTM memory object * * Dummy, just count the space used without allocating resources or any limit. */ @@ -85,7 +85,7 @@ static int amdgpu_preempt_mgr_new(struct ttm_resource_manager *man, * amdgpu_preempt_mgr_del - free ranges * * @man: TTM memory type manager - * @mem: TTM memory object + * @res: TTM memory object * * Free the allocated GTT again. */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index c641f84649d6..dee17a0e1187 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -518,7 +518,7 @@ static struct psp_gfx_cmd_resp *acquire_psp_cmd_buf(struct psp_context *psp) return cmd; } -void release_psp_cmd_buf(struct psp_context *psp) +static void release_psp_cmd_buf(struct psp_context *psp) { mutex_unlock(&psp->mutex); } @@ -2017,12 +2017,16 @@ static int psp_hw_start(struct psp_context *psp) return ret; } + if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) + goto skip_pin_bo; + ret = psp_tmr_init(psp); if (ret) { DRM_ERROR("PSP tmr init failed!\n"); return ret; } +skip_pin_bo: /* * For ASICs with DF Cstate management centralized * to PMFW, TMR setup should be performed after PMFW @@ -2452,6 +2456,18 @@ skip_memalloc: return ret; } + if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) { + if (adev->gmc.xgmi.num_physical_nodes > 1) { + ret = psp_xgmi_initialize(psp, false, true); + /* Warning the XGMI seesion initialize failure + * Instead of stop driver initialization + */ + if (ret) + dev_err(psp->adev->dev, + "XGMI: Failed to initialize XGMI session\n"); + } + } + if (psp->ta_fw) { ret = psp_ras_initialize(psp); if (ret) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 08133de21fdd..cd9e5914944b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -867,9 +867,9 @@ static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev, /* feature ctl end */ -void amdgpu_ras_mca_query_error_status(struct amdgpu_device *adev, - struct ras_common_if *ras_block, - struct ras_err_data *err_data) +static void amdgpu_ras_mca_query_error_status(struct amdgpu_device *adev, + struct ras_common_if *ras_block, + struct ras_err_data *err_data) { switch (ras_block->sub_block_index) { case AMDGPU_RAS_MCA_BLOCK__MP0: @@ -892,6 +892,38 @@ void amdgpu_ras_mca_query_error_status(struct amdgpu_device *adev, } } +static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_data *err_data) +{ + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + int ret = 0; + + /* + * choosing right query method according to + * whether smu support query error information + */ + ret = smu_get_ecc_info(&adev->smu, (void *)&(ras->umc_ecc)); + if (ret == -EOPNOTSUPP) { + if (adev->umc.ras_funcs && + adev->umc.ras_funcs->query_ras_error_count) + adev->umc.ras_funcs->query_ras_error_count(adev, err_data); + + /* umc query_ras_error_address is also responsible for clearing + * error status + */ + if (adev->umc.ras_funcs && + adev->umc.ras_funcs->query_ras_error_address) + adev->umc.ras_funcs->query_ras_error_address(adev, err_data); + } else if (!ret) { + if (adev->umc.ras_funcs && + adev->umc.ras_funcs->ecc_info_query_ras_error_count) + adev->umc.ras_funcs->ecc_info_query_ras_error_count(adev, err_data); + + if (adev->umc.ras_funcs && + adev->umc.ras_funcs->ecc_info_query_ras_error_address) + adev->umc.ras_funcs->ecc_info_query_ras_error_address(adev, err_data); + } +} + /* query/inject/cure begin */ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_if *info) @@ -905,15 +937,7 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, switch (info->head.block) { case AMDGPU_RAS_BLOCK__UMC: - if (adev->umc.ras_funcs && - adev->umc.ras_funcs->query_ras_error_count) - adev->umc.ras_funcs->query_ras_error_count(adev, &err_data); - /* umc query_ras_error_address is also responsible for clearing - * error status - */ - if (adev->umc.ras_funcs && - adev->umc.ras_funcs->query_ras_error_address) - adev->umc.ras_funcs->query_ras_error_address(adev, &err_data); + amdgpu_ras_get_ecc_info(adev, &err_data); break; case AMDGPU_RAS_BLOCK__SDMA: if (adev->sdma.funcs->query_ras_error_count) { @@ -1137,9 +1161,9 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev, /** * amdgpu_ras_query_error_count -- Get error counts of all IPs - * adev: pointer to AMD GPU device - * ce_count: pointer to an integer to be set to the count of correctible errors. - * ue_count: pointer to an integer to be set to the count of uncorrectible + * @adev: pointer to AMD GPU device + * @ce_count: pointer to an integer to be set to the count of correctible errors. + * @ue_count: pointer to an integer to be set to the count of uncorrectible * errors. * * If set, @ce_count or @ue_count, count and return the corresponding @@ -1723,6 +1747,16 @@ static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev) if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF) continue; + /* + * this is a workaround for aldebaran, skip send msg to + * smu to get ecc_info table due to smu handle get ecc + * info table failed temporarily. + * should be removed until smu fix handle ecc_info table. + */ + if ((info.head.block == AMDGPU_RAS_BLOCK__UMC) && + (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2))) + continue; + amdgpu_ras_query_error_status(adev, &info); } } @@ -1935,9 +1969,11 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev) if (!con || !con->eh_data) return 0; + mutex_lock(&con->recovery_lock); control = &con->eeprom_control; data = con->eh_data; save_count = data->count - control->ras_num_recs; + mutex_unlock(&con->recovery_lock); /* only new entries are saved */ if (save_count > 0) { if (amdgpu_ras_eeprom_append(control, @@ -2336,7 +2372,11 @@ int amdgpu_ras_init(struct amdgpu_device *adev) } /* Init poison supported flag, the default value is false */ - if (adev->df.funcs && + if (adev->gmc.xgmi.connected_to_cpu) { + /* enabled by default when GPU is connected to CPU */ + con->poison_supported = true; + } + else if (adev->df.funcs && adev->df.funcs->query_ras_poison_mode && adev->umc.ras_funcs && adev->umc.ras_funcs->query_ras_poison_mode) { @@ -2477,7 +2517,6 @@ void amdgpu_ras_late_fini(struct amdgpu_device *adev, amdgpu_ras_sysfs_remove(adev, ras_block); if (ih_info->cb) amdgpu_ras_interrupt_remove_handler(adev, ih_info); - amdgpu_ras_feature_enable(adev, ras_block, 0); } /* do some init work after IP late init as dependence. diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index e36f4de9fa55..1c708122d492 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -319,6 +319,19 @@ struct ras_common_if { char name[32]; }; +#define MAX_UMC_CHANNEL_NUM 32 + +struct ecc_info_per_ch { + uint16_t ce_count_lo_chip; + uint16_t ce_count_hi_chip; + uint64_t mca_umc_status; + uint64_t mca_umc_addr; +}; + +struct umc_ecc_info { + struct ecc_info_per_ch ecc[MAX_UMC_CHANNEL_NUM]; +}; + struct amdgpu_ras { /* ras infrastructure */ /* for ras itself. */ @@ -358,6 +371,9 @@ struct amdgpu_ras { struct delayed_work ras_counte_delay_work; atomic_t ras_ue_count; atomic_t ras_ce_count; + + /* record umc error info queried from smu */ + struct umc_ecc_info umc_ecc; }; struct ras_fs_data { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index 862eb3c1c4c5..f7d8487799b2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -252,41 +252,25 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, struct dma_resv *resv, enum amdgpu_sync_mode mode, void *owner) { - struct dma_resv_list *flist; + struct dma_resv_iter cursor; struct dma_fence *f; - unsigned i; - int r = 0; + int r; if (resv == NULL) return -EINVAL; - /* always sync to the exclusive fence */ - f = dma_resv_excl_fence(resv); - dma_fence_chain_for_each(f, f) { - struct dma_fence_chain *chain = to_dma_fence_chain(f); - - if (amdgpu_sync_test_fence(adev, mode, owner, chain ? - chain->fence : f)) { - r = amdgpu_sync_fence(sync, f); - dma_fence_put(f); - if (r) - return r; - break; - } - } - - flist = dma_resv_shared_list(resv); - if (!flist) - return 0; - - for (i = 0; i < flist->shared_count; ++i) { - f = rcu_dereference_protected(flist->shared[i], - dma_resv_held(resv)); - - if (amdgpu_sync_test_fence(adev, mode, owner, f)) { - r = amdgpu_sync_fence(sync, f); - if (r) - return r; + dma_resv_for_each_fence(&cursor, resv, true, f) { + dma_fence_chain_for_each(f, f) { + struct dma_fence_chain *chain = to_dma_fence_chain(f); + + if (amdgpu_sync_test_fence(adev, mode, owner, chain ? + chain->fence : f)) { + r = amdgpu_sync_fence(sync, f); + dma_fence_put(f); + if (r) + return r; + break; + } } } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index c875f1cdd2af..fb0d8bffdce2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -116,17 +116,8 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, abo = ttm_to_amdgpu_bo(bo); if (abo->flags & AMDGPU_AMDKFD_CREATE_SVM_BO) { - struct dma_fence *fence; - struct dma_resv *resv = &bo->base._resv; - - rcu_read_lock(); - fence = rcu_dereference(resv->fence_excl); - if (fence && !fence->ops->signaled) - dma_fence_enable_sw_signaling(fence); - placement->num_placement = 0; placement->num_busy_placement = 0; - rcu_read_unlock(); return; } @@ -922,11 +913,6 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev, ttm->num_pages, bo_mem, ttm); } - if (bo_mem->mem_type == AMDGPU_PL_GDS || - bo_mem->mem_type == AMDGPU_PL_GWS || - bo_mem->mem_type == AMDGPU_PL_OA) - return -EINVAL; - if (bo_mem->mem_type != TTM_PL_TT || !amdgpu_gtt_mgr_has_gart_addr(bo_mem)) { gtt->offset = AMDGPU_BO_INVALID_OFFSET; @@ -1353,10 +1339,9 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, const struct ttm_place *place) { unsigned long num_pages = bo->resource->num_pages; + struct dma_resv_iter resv_cursor; struct amdgpu_res_cursor cursor; - struct dma_resv_list *flist; struct dma_fence *f; - int i; /* Swapout? */ if (bo->resource->mem_type == TTM_PL_SYSTEM) @@ -1370,14 +1355,9 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, * If true, then return false as any KFD process needs all its BOs to * be resident to run successfully */ - flist = dma_resv_shared_list(bo->base.resv); - if (flist) { - for (i = 0; i < flist->shared_count; ++i) { - f = rcu_dereference_protected(flist->shared[i], - dma_resv_held(bo->base.resv)); - if (amdkfd_fence_check_mm(f, current->mm)) - return false; - } + dma_resv_for_each_fence(&resv_cursor, bo->base.resv, true, f) { + if (amdkfd_fence_check_mm(f, current->mm)) + return false; } switch (bo->resource->mem_type) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c index a90029ee9733..6e4bea012ea4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c @@ -94,30 +94,58 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev, { struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + int ret = 0; kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); - if (adev->umc.ras_funcs && - adev->umc.ras_funcs->query_ras_error_count) - adev->umc.ras_funcs->query_ras_error_count(adev, ras_error_status); - - if (adev->umc.ras_funcs && - adev->umc.ras_funcs->query_ras_error_address && - adev->umc.max_ras_err_cnt_per_query) { - err_data->err_addr = - kcalloc(adev->umc.max_ras_err_cnt_per_query, - sizeof(struct eeprom_table_record), GFP_KERNEL); - - /* still call query_ras_error_address to clear error status - * even NOMEM error is encountered - */ - if(!err_data->err_addr) - dev_warn(adev->dev, "Failed to alloc memory for " - "umc error address record!\n"); - - /* umc query_ras_error_address is also responsible for clearing - * error status - */ - adev->umc.ras_funcs->query_ras_error_address(adev, ras_error_status); + ret = smu_get_ecc_info(&adev->smu, (void *)&(con->umc_ecc)); + if (ret == -EOPNOTSUPP) { + if (adev->umc.ras_funcs && + adev->umc.ras_funcs->query_ras_error_count) + adev->umc.ras_funcs->query_ras_error_count(adev, ras_error_status); + + if (adev->umc.ras_funcs && + adev->umc.ras_funcs->query_ras_error_address && + adev->umc.max_ras_err_cnt_per_query) { + err_data->err_addr = + kcalloc(adev->umc.max_ras_err_cnt_per_query, + sizeof(struct eeprom_table_record), GFP_KERNEL); + + /* still call query_ras_error_address to clear error status + * even NOMEM error is encountered + */ + if(!err_data->err_addr) + dev_warn(adev->dev, "Failed to alloc memory for " + "umc error address record!\n"); + + /* umc query_ras_error_address is also responsible for clearing + * error status + */ + adev->umc.ras_funcs->query_ras_error_address(adev, ras_error_status); + } + } else if (!ret) { + if (adev->umc.ras_funcs && + adev->umc.ras_funcs->ecc_info_query_ras_error_count) + adev->umc.ras_funcs->ecc_info_query_ras_error_count(adev, ras_error_status); + + if (adev->umc.ras_funcs && + adev->umc.ras_funcs->ecc_info_query_ras_error_address && + adev->umc.max_ras_err_cnt_per_query) { + err_data->err_addr = + kcalloc(adev->umc.max_ras_err_cnt_per_query, + sizeof(struct eeprom_table_record), GFP_KERNEL); + + /* still call query_ras_error_address to clear error status + * even NOMEM error is encountered + */ + if(!err_data->err_addr) + dev_warn(adev->dev, "Failed to alloc memory for " + "umc error address record!\n"); + + /* umc query_ras_error_address is also responsible for clearing + * error status + */ + adev->umc.ras_funcs->ecc_info_query_ras_error_address(adev, ras_error_status); + } } /* only uncorrectable error needs gpu reset */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h index 1f5fe2315236..9e40bade0a68 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h @@ -49,6 +49,10 @@ struct amdgpu_umc_ras_funcs { void (*query_ras_error_address)(struct amdgpu_device *adev, void *ras_error_status); bool (*query_ras_poison_mode)(struct amdgpu_device *adev); + void (*ecc_info_query_ras_error_count)(struct amdgpu_device *adev, + void *ras_error_status); + void (*ecc_info_query_ras_error_address)(struct amdgpu_device *adev, + void *ras_error_status); }; struct amdgpu_umc_funcs { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 688bef1649b5..344f711ad144 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -434,7 +434,6 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp) * * @ring: ring we should submit the msg to * @handle: VCE session handle to use - * @bo: amdgpu object for which we query the offset * @fence: optional fence to return * * Open up a stream for HW test diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 4f7c70845785..9a19a6a57b23 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -135,6 +135,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) break; case IP_VERSION(3, 0, 0): case IP_VERSION(3, 0, 64): + case IP_VERSION(3, 0, 192): if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) fw_name = FIRMWARE_SIENNA_CICHLID; else @@ -285,20 +286,13 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev) bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance) { bool ret = false; + int vcn_config = adev->vcn.vcn_config[vcn_instance]; - int major; - int minor; - int revision; - - /* if cannot find IP data, then this VCN does not exist */ - if (amdgpu_discovery_get_vcn_version(adev, vcn_instance, &major, &minor, &revision) != 0) - return true; - - if ((type == VCN_ENCODE_RING) && (revision & VCN_BLOCK_ENCODE_DISABLE_MASK)) { + if ((type == VCN_ENCODE_RING) && (vcn_config & VCN_BLOCK_ENCODE_DISABLE_MASK)) { ret = true; - } else if ((type == VCN_DECODE_RING) && (revision & VCN_BLOCK_DECODE_DISABLE_MASK)) { + } else if ((type == VCN_DECODE_RING) && (vcn_config & VCN_BLOCK_DECODE_DISABLE_MASK)) { ret = true; - } else if ((type == VCN_UNIFIED_RING) && (revision & VCN_BLOCK_QUEUE_DISABLE_MASK)) { + } else if ((type == VCN_UNIFIED_RING) && (vcn_config & VCN_BLOCK_QUEUE_DISABLE_MASK)) { ret = true; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index bfa27ea94804..5d3728b027d3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -235,6 +235,7 @@ struct amdgpu_vcn { uint8_t num_vcn_inst; struct amdgpu_vcn_inst inst[AMDGPU_MAX_VCN_INSTANCES]; + uint8_t vcn_config[AMDGPU_MAX_VCN_INSTANCES]; struct amdgpu_vcn_reg internal; struct mutex vcn_pg_lock; struct mutex vcn1_jpeg1_workaround; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 04cf9b207e62..3fc49823f527 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -283,17 +283,15 @@ static int amdgpu_virt_init_ras_err_handler_data(struct amdgpu_device *adev) *data = kmalloc(sizeof(struct amdgpu_virt_ras_err_handler_data), GFP_KERNEL); if (!*data) - return -ENOMEM; + goto data_failure; bps = kmalloc_array(align_space, sizeof((*data)->bps), GFP_KERNEL); - bps_bo = kmalloc_array(align_space, sizeof((*data)->bps_bo), GFP_KERNEL); + if (!bps) + goto bps_failure; - if (!bps || !bps_bo) { - kfree(bps); - kfree(bps_bo); - kfree(*data); - return -ENOMEM; - } + bps_bo = kmalloc_array(align_space, sizeof((*data)->bps_bo), GFP_KERNEL); + if (!bps_bo) + goto bps_bo_failure; (*data)->bps = bps; (*data)->bps_bo = bps_bo; @@ -303,6 +301,13 @@ static int amdgpu_virt_init_ras_err_handler_data(struct amdgpu_device *adev) virt->ras_init_done = true; return 0; + +bps_bo_failure: + kfree(bps); +bps_failure: + kfree(*data); +data_failure: + return -ENOMEM; } static void amdgpu_virt_ras_release_bp(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c index ce982afeff91..2dcc68e04e84 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c @@ -16,6 +16,8 @@ #include "ivsrcid/ivsrcid_vislands30.h" #include "amdgpu_vkms.h" #include "amdgpu_display.h" +#include "atom.h" +#include "amdgpu_irq.h" /** * DOC: amdgpu_vkms @@ -41,16 +43,16 @@ static const u32 amdgpu_vkms_formats[] = { static enum hrtimer_restart amdgpu_vkms_vblank_simulate(struct hrtimer *timer) { - struct amdgpu_vkms_output *output = container_of(timer, - struct amdgpu_vkms_output, - vblank_hrtimer); - struct drm_crtc *crtc = &output->crtc; + struct amdgpu_crtc *amdgpu_crtc = container_of(timer, struct amdgpu_crtc, vblank_timer); + struct drm_crtc *crtc = &amdgpu_crtc->base; + struct amdgpu_vkms_output *output = drm_crtc_to_amdgpu_vkms_output(crtc); u64 ret_overrun; bool ret; - ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer, + ret_overrun = hrtimer_forward_now(&amdgpu_crtc->vblank_timer, output->period_ns); - WARN_ON(ret_overrun != 1); + if (ret_overrun != 1) + DRM_WARN("%s: vblank timer overrun\n", __func__); ret = drm_crtc_handle_vblank(crtc); if (!ret) @@ -65,22 +67,21 @@ static int amdgpu_vkms_enable_vblank(struct drm_crtc *crtc) unsigned int pipe = drm_crtc_index(crtc); struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; struct amdgpu_vkms_output *out = drm_crtc_to_amdgpu_vkms_output(crtc); + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); drm_calc_timestamping_constants(crtc, &crtc->mode); - hrtimer_init(&out->vblank_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - out->vblank_hrtimer.function = &amdgpu_vkms_vblank_simulate; out->period_ns = ktime_set(0, vblank->framedur_ns); - hrtimer_start(&out->vblank_hrtimer, out->period_ns, HRTIMER_MODE_REL); + hrtimer_start(&amdgpu_crtc->vblank_timer, out->period_ns, HRTIMER_MODE_REL); return 0; } static void amdgpu_vkms_disable_vblank(struct drm_crtc *crtc) { - struct amdgpu_vkms_output *out = drm_crtc_to_amdgpu_vkms_output(crtc); + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - hrtimer_cancel(&out->vblank_hrtimer); + hrtimer_cancel(&amdgpu_crtc->vblank_timer); } static bool amdgpu_vkms_get_vblank_timestamp(struct drm_crtc *crtc, @@ -92,13 +93,14 @@ static bool amdgpu_vkms_get_vblank_timestamp(struct drm_crtc *crtc, unsigned int pipe = crtc->index; struct amdgpu_vkms_output *output = drm_crtc_to_amdgpu_vkms_output(crtc); struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); if (!READ_ONCE(vblank->enabled)) { *vblank_time = ktime_get(); return true; } - *vblank_time = READ_ONCE(output->vblank_hrtimer.node.expires); + *vblank_time = READ_ONCE(amdgpu_crtc->vblank_timer.node.expires); if (WARN_ON(*vblank_time == vblank->time)) return true; @@ -165,6 +167,8 @@ static const struct drm_crtc_helper_funcs amdgpu_vkms_crtc_helper_funcs = { static int amdgpu_vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, struct drm_plane *primary, struct drm_plane *cursor) { + struct amdgpu_device *adev = drm_to_adev(dev); + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); int ret; ret = drm_crtc_init_with_planes(dev, crtc, primary, cursor, @@ -176,6 +180,17 @@ static int amdgpu_vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, drm_crtc_helper_add(crtc, &amdgpu_vkms_crtc_helper_funcs); + amdgpu_crtc->crtc_id = drm_crtc_index(crtc); + adev->mode_info.crtcs[drm_crtc_index(crtc)] = amdgpu_crtc; + + amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; + amdgpu_crtc->encoder = NULL; + amdgpu_crtc->connector = NULL; + amdgpu_crtc->vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE; + + hrtimer_init(&amdgpu_crtc->vblank_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + amdgpu_crtc->vblank_timer.function = &amdgpu_vkms_vblank_simulate; + return ret; } @@ -375,6 +390,7 @@ static struct drm_plane *amdgpu_vkms_plane_init(struct drm_device *dev, int index) { struct drm_plane *plane; + uint64_t modifiers[] = {DRM_FORMAT_MOD_LINEAR, DRM_FORMAT_MOD_INVALID}; int ret; plane = kzalloc(sizeof(*plane), GFP_KERNEL); @@ -385,7 +401,7 @@ static struct drm_plane *amdgpu_vkms_plane_init(struct drm_device *dev, &amdgpu_vkms_plane_funcs, amdgpu_vkms_formats, ARRAY_SIZE(amdgpu_vkms_formats), - NULL, type, NULL); + modifiers, type, NULL); if (ret) { kfree(plane); return ERR_PTR(ret); @@ -396,12 +412,12 @@ static struct drm_plane *amdgpu_vkms_plane_init(struct drm_device *dev, return plane; } -int amdgpu_vkms_output_init(struct drm_device *dev, - struct amdgpu_vkms_output *output, int index) +static int amdgpu_vkms_output_init(struct drm_device *dev, struct + amdgpu_vkms_output *output, int index) { struct drm_connector *connector = &output->connector; struct drm_encoder *encoder = &output->encoder; - struct drm_crtc *crtc = &output->crtc; + struct drm_crtc *crtc = &output->crtc.base; struct drm_plane *primary, *cursor = NULL; int ret; @@ -465,6 +481,11 @@ static int amdgpu_vkms_sw_init(void *handle) int r, i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + adev->amdgpu_vkms_output = kcalloc(adev->mode_info.num_crtc, + sizeof(struct amdgpu_vkms_output), GFP_KERNEL); + if (!adev->amdgpu_vkms_output) + return -ENOMEM; + adev_to_drm(adev)->max_vblank_count = 0; adev_to_drm(adev)->mode_config.funcs = &amdgpu_vkms_mode_funcs; @@ -481,10 +502,6 @@ static int amdgpu_vkms_sw_init(void *handle) if (r) return r; - adev->amdgpu_vkms_output = kcalloc(adev->mode_info.num_crtc, sizeof(struct amdgpu_vkms_output), GFP_KERNEL); - if (!adev->amdgpu_vkms_output) - return -ENOMEM; - /* allocate crtcs, encoders, connectors */ for (i = 0; i < adev->mode_info.num_crtc; i++) { r = amdgpu_vkms_output_init(adev_to_drm(adev), &adev->amdgpu_vkms_output[i], i); @@ -507,12 +524,13 @@ static int amdgpu_vkms_sw_fini(void *handle) if (adev->mode_info.crtcs[i]) hrtimer_cancel(&adev->mode_info.crtcs[i]->vblank_timer); - kfree(adev->mode_info.bios_hardcoded_edid); - kfree(adev->amdgpu_vkms_output); - drm_kms_helper_poll_fini(adev_to_drm(adev)); + drm_mode_config_cleanup(adev_to_drm(adev)); adev->mode_info.mode_config_initialized = false; + + kfree(adev->mode_info.bios_hardcoded_edid); + kfree(adev->amdgpu_vkms_output); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.h index 97f1b79c0724..4f8722ff37c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.h @@ -10,15 +10,14 @@ #define YRES_MAX 16384 #define drm_crtc_to_amdgpu_vkms_output(target) \ - container_of(target, struct amdgpu_vkms_output, crtc) + container_of(target, struct amdgpu_vkms_output, crtc.base) extern const struct amdgpu_ip_block_version amdgpu_vkms_ip_block; struct amdgpu_vkms_output { - struct drm_crtc crtc; + struct amdgpu_crtc crtc; struct drm_encoder encoder; struct drm_connector connector; - struct hrtimer vblank_hrtimer; ktime_t period_ns; struct drm_pending_vblank_event *event; }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 0e7dc23f78e7..b37fc7d7d2c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -53,7 +53,7 @@ * can be mapped as snooped (cached system pages) or unsnooped * (uncached system pages). * Each VM has an ID associated with it and there is a page table - * associated with each VMID. When execting a command buffer, + * associated with each VMID. When executing a command buffer, * the kernel tells the the ring what VMID to use for that command * buffer. VMIDs are allocated dynamically as commands are submitted. * The userspace drivers maintain their own address space and the kernel @@ -2102,30 +2102,14 @@ static void amdgpu_vm_free_mapping(struct amdgpu_device *adev, static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) { struct dma_resv *resv = vm->root.bo->tbo.base.resv; - struct dma_fence *excl, **shared; - unsigned i, shared_count; - int r; + struct dma_resv_iter cursor; + struct dma_fence *fence; - r = dma_resv_get_fences(resv, &excl, &shared_count, &shared); - if (r) { - /* Not enough memory to grab the fence list, as last resort - * block for all the fences to complete. - */ - dma_resv_wait_timeout(resv, true, false, - MAX_SCHEDULE_TIMEOUT); - return; - } - - /* Add a callback for each fence in the reservation object */ - amdgpu_vm_prt_get(adev); - amdgpu_vm_add_prt_cb(adev, excl); - - for (i = 0; i < shared_count; ++i) { + dma_resv_for_each_fence(&cursor, resv, true, fence) { + /* Add a callback for each fence in the reservation object */ amdgpu_vm_prt_get(adev); - amdgpu_vm_add_prt_cb(adev, shared[i]); + amdgpu_vm_add_prt_cb(adev, fence); } - - kfree(shared); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 0fad2bf854ae..a38c6a747fa4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -265,6 +265,11 @@ static ssize_t amdgpu_xgmi_show_error(struct device *dev, ficaa_pie_ctl_in = AMDGPU_XGMI_SET_FICAA(0x200); ficaa_pie_status_in = AMDGPU_XGMI_SET_FICAA(0x208); + if ((!adev->df.funcs) || + (!adev->df.funcs->get_fica) || + (!adev->df.funcs->set_fica)) + return -EINVAL; + fica_out = adev->df.funcs->get_fica(adev, ficaa_pie_ctl_in); if (fica_out != 0x1f) pr_err("xGMI error counters not enabled!\n"); @@ -386,6 +391,7 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev) "%s", "xgmi_hive_info"); if (ret) { dev_err(adev->dev, "XGMI: failed initializing kobject for xgmi hive\n"); + kobject_put(&hive->kobj); kfree(hive); hive = NULL; goto pro_end; diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c index 6134ed964027..a92d86e12718 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c @@ -469,7 +469,7 @@ int amdgpu_atombios_encoder_get_encoder_mode(struct drm_encoder *encoder) if (amdgpu_connector->use_digital && (amdgpu_connector->audio == AMDGPU_AUDIO_ENABLE)) return ATOM_ENCODER_MODE_HDMI; - else if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector)) && + else if (connector->display_info.is_hdmi && (amdgpu_connector->audio == AMDGPU_AUDIO_AUTO)) return ATOM_ENCODER_MODE_HDMI; else if (amdgpu_connector->use_digital) @@ -488,7 +488,7 @@ int amdgpu_atombios_encoder_get_encoder_mode(struct drm_encoder *encoder) if (amdgpu_audio != 0) { if (amdgpu_connector->audio == AMDGPU_AUDIO_ENABLE) return ATOM_ENCODER_MODE_HDMI; - else if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector)) && + else if (connector->display_info.is_hdmi && (amdgpu_connector->audio == AMDGPU_AUDIO_AUTO)) return ATOM_ENCODER_MODE_HDMI; else @@ -506,7 +506,7 @@ int amdgpu_atombios_encoder_get_encoder_mode(struct drm_encoder *encoder) } else if (amdgpu_audio != 0) { if (amdgpu_connector->audio == AMDGPU_AUDIO_ENABLE) return ATOM_ENCODER_MODE_HDMI; - else if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector)) && + else if (connector->display_info.is_hdmi && (amdgpu_connector->audio == AMDGPU_AUDIO_AUTO)) return ATOM_ENCODER_MODE_HDMI; else diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index b200b9e722d9..8318ee8339f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -2092,22 +2092,18 @@ static int dce_v8_0_pick_dig_encoder(struct drm_encoder *encoder) return 1; else return 0; - break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: if (dig->linkb) return 3; else return 2; - break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: if (dig->linkb) return 5; else return 4; - break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: return 6; - break; default: DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index e7dfeb466a0e..dbe7442fb25c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -7707,8 +7707,19 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev) switch (adev->ip_versions[GC_HWIP][0]) { case IP_VERSION(10, 3, 1): case IP_VERSION(10, 3, 3): - clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh) | - ((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh) << 32ULL); + preempt_disable(); + clock_hi = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh); + clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh); + hi_check = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh); + /* The SMUIO TSC clock frequency is 100MHz, which sets 32-bit carry over + * roughly every 42 seconds. + */ + if (hi_check != clock_hi) { + clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh); + clock_hi = hi_check; + } + preempt_enable(); + clock = clock_lo | (clock_hi << 32ULL); break; default: preempt_disable(); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index b4b80f27b894..edb3e3b08eed 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -140,6 +140,11 @@ MODULE_FIRMWARE("amdgpu/aldebaran_rlc.bin"); #define mmTCP_CHAN_STEER_5_ARCT 0x0b0c #define mmTCP_CHAN_STEER_5_ARCT_BASE_IDX 0 +#define mmGOLDEN_TSC_COUNT_UPPER_Renoir 0x0025 +#define mmGOLDEN_TSC_COUNT_UPPER_Renoir_BASE_IDX 1 +#define mmGOLDEN_TSC_COUNT_LOWER_Renoir 0x0026 +#define mmGOLDEN_TSC_COUNT_LOWER_Renoir_BASE_IDX 1 + enum ta_ras_gfx_subblock { /*CPC*/ TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0, @@ -3065,8 +3070,8 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev) AMD_PG_SUPPORT_CP | AMD_PG_SUPPORT_GDS | AMD_PG_SUPPORT_RLC_SMU_HS)) { - WREG32(mmRLC_JUMP_TABLE_RESTORE, - adev->gfx.rlc.cp_table_gpu_addr >> 8); + WREG32_SOC15(GC, 0, mmRLC_JUMP_TABLE_RESTORE, + adev->gfx.rlc.cp_table_gpu_addr >> 8); gfx_v9_0_init_gfx_power_gating(adev); } } @@ -4055,9 +4060,10 @@ static int gfx_v9_0_hw_fini(void *handle) gfx_v9_0_cp_enable(adev, false); - /* Skip suspend with A+A reset */ - if (adev->gmc.xgmi.connected_to_cpu && amdgpu_in_reset(adev)) { - dev_dbg(adev->dev, "Device in reset. Skipping RLC halt\n"); + /* Skip stopping RLC with A+A reset or when RLC controls GFX clock */ + if ((adev->gmc.xgmi.connected_to_cpu && amdgpu_in_reset(adev)) || + (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2))) { + dev_dbg(adev->dev, "Skipping RLC halt\n"); return 0; } @@ -4238,19 +4244,38 @@ failed_kiq_read: static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev) { - uint64_t clock; + uint64_t clock, clock_lo, clock_hi, hi_check; - amdgpu_gfx_off_ctrl(adev, false); - mutex_lock(&adev->gfx.gpu_clock_mutex); - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 0, 1) && amdgpu_sriov_runtime(adev)) { - clock = gfx_v9_0_kiq_read_clock(adev); - } else { - WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1); - clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) | - ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL); + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 3, 0): + preempt_disable(); + clock_hi = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Renoir); + clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Renoir); + hi_check = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Renoir); + /* The SMUIO TSC clock frequency is 100MHz, which sets 32-bit carry over + * roughly every 42 seconds. + */ + if (hi_check != clock_hi) { + clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Renoir); + clock_hi = hi_check; + } + preempt_enable(); + clock = clock_lo | (clock_hi << 32ULL); + break; + default: + amdgpu_gfx_off_ctrl(adev, false); + mutex_lock(&adev->gfx.gpu_clock_mutex); + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 0, 1) && amdgpu_sriov_runtime(adev)) { + clock = gfx_v9_0_kiq_read_clock(adev); + } else { + WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1); + clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) | + ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL); + } + mutex_unlock(&adev->gfx.gpu_clock_mutex); + amdgpu_gfx_off_ctrl(adev, true); + break; } - mutex_unlock(&adev->gfx.gpu_clock_mutex); - amdgpu_gfx_off_ctrl(adev, true); return clock; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c index 480e41847d7c..ec4d5e15b766 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c @@ -162,7 +162,6 @@ static void gfxhub_v1_0_init_tlb_regs(struct amdgpu_device *adev) ENABLE_ADVANCED_DRIVER_MODEL, 1); tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); - tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0); tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, MTYPE, MTYPE_UC);/* XXX for emulation. */ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1); diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c index 14c1c1a297dd..6e0ace2fbfab 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c @@ -196,7 +196,6 @@ static void gfxhub_v2_0_init_tlb_regs(struct amdgpu_device *adev) ENABLE_ADVANCED_DRIVER_MODEL, 1); tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); - tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0); tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, MTYPE, MTYPE_UC); /* UC, uncached */ diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c index e80d1dc43079..b4eddf6e98a6 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c @@ -197,7 +197,6 @@ static void gfxhub_v2_1_init_tlb_regs(struct amdgpu_device *adev) ENABLE_ADVANCED_DRIVER_MODEL, 1); tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); - tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0); tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, MTYPE, MTYPE_UC); /* UC, uncached */ diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index 3ec5ff5a6dbe..ae46eb35b3d7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -107,7 +107,7 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev, /* Process it onyl if it's the first fault for this address */ if (entry->ih != &adev->irq.ih_soft && - amdgpu_gmc_filter_faults(adev, addr, entry->pasid, + amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid, entry->timestamp)) return 1; @@ -992,10 +992,14 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev) return -EINVAL; } + if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) + goto skip_pin_bo; + r = amdgpu_gart_table_vram_pin(adev); if (r) return r; +skip_pin_bo: r = adev->gfxhub.funcs->gart_enable(adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index cb82404df534..a5471923b3f6 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -523,7 +523,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, /* Process it onyl if it's the first fault for this address */ if (entry->ih != &adev->irq.ih_soft && - amdgpu_gmc_filter_faults(adev, addr, entry->pasid, + amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid, entry->timestamp)) return 1; @@ -1294,7 +1294,8 @@ static int gmc_v9_0_late_init(void *handle) if (!amdgpu_sriov_vf(adev) && (adev->ip_versions[UMC_HWIP][0] == IP_VERSION(6, 0, 0))) { if (!(adev->ras_enabled & (1 << AMDGPU_RAS_BLOCK__UMC))) { - if (adev->df.funcs->enable_ecc_force_par_wr_rmw) + if (adev->df.funcs && + adev->df.funcs->enable_ecc_force_par_wr_rmw) adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false); } } @@ -1505,9 +1506,11 @@ static int gmc_v9_0_sw_init(void *handle) chansize = 64; else chansize = 128; - - numchan = adev->df.funcs->get_hbm_channel_number(adev); - adev->gmc.vram_width = numchan * chansize; + if (adev->df.funcs && + adev->df.funcs->get_hbm_channel_number) { + numchan = adev->df.funcs->get_hbm_channel_number(adev); + adev->gmc.vram_width = numchan * chansize; + } } adev->gmc.vram_type = vram_type; @@ -1714,10 +1717,14 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) return -EINVAL; } + if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) + goto skip_pin_bo; + r = amdgpu_gart_table_vram_pin(adev); if (r) return r; +skip_pin_bo: r = adev->gfxhub.funcs->gart_enable(adev); if (r) return r; @@ -1742,7 +1749,7 @@ static int gmc_v9_0_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; bool value; - int r, i; + int i; /* The sequence of these two function calls matters.*/ gmc_v9_0_init_golden_registers(adev); @@ -1777,9 +1784,7 @@ static int gmc_v9_0_hw_init(void *handle) if (adev->umc.funcs && adev->umc.funcs->init_registers) adev->umc.funcs->init_registers(adev); - r = gmc_v9_0_gart_enable(adev); - - return r; + return gmc_v9_0_gart_enable(adev); } /** @@ -1808,6 +1813,14 @@ static int gmc_v9_0_hw_fini(void *handle) return 0; } + /* + * Pair the operations did in gmc_v9_0_hw_init and thus maintain + * a correct cached state for GMC. Otherwise, the "gate" again + * operation on S3 resuming will fail due to wrong cached state. + */ + if (adev->mmhub.funcs->update_power_gating) + adev->mmhub.funcs->update_power_gating(adev, false); + amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0); amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index a99953833820..1da2ec692057 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -145,7 +145,6 @@ static void mmhub_v1_0_init_tlb_regs(struct amdgpu_device *adev) ENABLE_ADVANCED_DRIVER_MODEL, 1); tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); - tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0); tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, MTYPE, MTYPE_UC);/* XXX for emulation. */ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1); @@ -302,10 +301,10 @@ static void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev, if (amdgpu_sriov_vf(adev)) return; - if (enable && adev->pg_flags & AMD_PG_SUPPORT_MMHUB) { - amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GMC, true); - - } + if (adev->pg_flags & AMD_PG_SUPPORT_MMHUB) + amdgpu_dpm_set_powergating_by_smu(adev, + AMD_IP_BLOCK_TYPE_GMC, + enable); } static int mmhub_v1_0_gart_enable(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c index f80a14a1b82d..f5f7181f9af5 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c @@ -165,7 +165,6 @@ static void mmhub_v1_7_init_tlb_regs(struct amdgpu_device *adev) ENABLE_ADVANCED_DRIVER_MODEL, 1); tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); - tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0); tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, MTYPE, MTYPE_UC);/* XXX for emulation. */ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1); diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c index 25f8e93e5ec3..3718ff610ab2 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c @@ -267,7 +267,6 @@ static void mmhub_v2_0_init_tlb_regs(struct amdgpu_device *adev) ENABLE_ADVANCED_DRIVER_MODEL, 1); tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); - tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0); tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, MTYPE, MTYPE_UC); /* UC, uncached */ diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c index a11d60ec6321..9e16da28505a 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c @@ -194,7 +194,6 @@ static void mmhub_v2_3_init_tlb_regs(struct amdgpu_device *adev) ENABLE_ADVANCED_DRIVER_MODEL, 1); tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); - tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0); tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, MTYPE, MTYPE_UC); /* UC, uncached */ diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c index c4ef822bbe8c..ff49eeaf7882 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c @@ -190,8 +190,6 @@ static void mmhub_v9_4_init_tlb_regs(struct amdgpu_device *adev, int hubid) tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL, - ECO_BITS, 0); - tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL, MTYPE, MTYPE_UC);/* XXX for emulation. */ tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1); diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c index 23b066bcffb2..0077e738db31 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c @@ -252,11 +252,12 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work) * otherwise the mailbox msg will be ruined/reseted by * the VF FLR. */ - if (!down_write_trylock(&adev->reset_sem)) + if (atomic_cmpxchg(&adev->in_gpu_reset, 0, 1) != 0) return; + down_write(&adev->reset_sem); + amdgpu_virt_fini_data_exchange(adev); - atomic_set(&adev->in_gpu_reset, 1); xgpu_ai_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h index bd3b23171579..f9aa4d0bb638 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h @@ -26,7 +26,7 @@ #define AI_MAILBOX_POLL_ACK_TIMEDOUT 500 #define AI_MAILBOX_POLL_MSG_TIMEDOUT 6000 -#define AI_MAILBOX_POLL_FLR_TIMEDOUT 5000 +#define AI_MAILBOX_POLL_FLR_TIMEDOUT 10000 #define AI_MAILBOX_POLL_MSG_REP_MAX 11 enum idh_request { diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c index a35e6d87e537..477d0dde19c5 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c @@ -281,11 +281,12 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work) * otherwise the mailbox msg will be ruined/reseted by * the VF FLR. */ - if (!down_write_trylock(&adev->reset_sem)) + if (atomic_cmpxchg(&adev->in_gpu_reset, 0, 1) != 0) return; + down_write(&adev->reset_sem); + amdgpu_virt_fini_data_exchange(adev); - atomic_set(&adev->in_gpu_reset, 1); xgpu_nv_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c index 1d8414c3fadb..8ce5b8ca1fd7 100644 --- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c @@ -160,6 +160,7 @@ static int navi10_ih_toggle_ring_interrupts(struct amdgpu_device *adev, tmp = RREG32(ih_regs->ih_rb_cntl); tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0)); + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_GPU_TS_ENABLE, 1); /* enable_intr field is only valid in ring0 */ if (ih == &adev->irq.ih) tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0)); @@ -275,10 +276,8 @@ static int navi10_ih_enable_ring(struct amdgpu_device *adev, tmp = navi10_ih_rb_cntl(ih, tmp); if (ih == &adev->irq.ih) tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled); - if (ih == &adev->irq.ih1) { - tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0); + if (ih == &adev->irq.ih1) tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1); - } if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) { if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) { @@ -319,7 +318,6 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev) { struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2}; u32 ih_chicken; - u32 tmp; int ret; int i; @@ -363,15 +361,6 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev) adev->nbio.funcs->ih_doorbell_range(adev, ih[0]->use_doorbell, ih[0]->doorbell_index); - tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL); - tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL, - CLIENT18_IS_STORM_CLIENT, 1); - WREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL, tmp); - - tmp = RREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL); - tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1); - WREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL, tmp); - pci_set_master(adev->pdev); /* enable interrupts */ @@ -420,12 +409,19 @@ static u32 navi10_ih_get_wptr(struct amdgpu_device *adev, u32 wptr, tmp; struct amdgpu_ih_regs *ih_regs; - wptr = le32_to_cpu(*ih->wptr_cpu); - ih_regs = &ih->ih_regs; + if (ih == &adev->irq.ih) { + /* Only ring0 supports writeback. On other rings fall back + * to register-based code with overflow checking below. + */ + wptr = le32_to_cpu(*ih->wptr_cpu); - if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) - goto out; + if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) + goto out; + } + ih_regs = &ih->ih_regs; + + /* Double check that the overflow wasn't already cleared. */ wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr); if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) goto out; @@ -513,15 +509,11 @@ static int navi10_ih_self_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { - uint32_t wptr = cpu_to_le32(entry->src_data[0]); - switch (entry->ring_id) { case 1: - *adev->irq.ih1.wptr_cpu = wptr; schedule_work(&adev->irq.ih1_work); break; case 2: - *adev->irq.ih2.wptr_cpu = wptr; schedule_work(&adev->irq.ih2_work); break; default: break; @@ -724,6 +716,7 @@ static const struct amd_ip_funcs navi10_ih_ip_funcs = { static const struct amdgpu_ih_funcs navi10_ih_funcs = { .get_wptr = navi10_ih_get_wptr, .decode_iv = amdgpu_ih_decode_iv_helper, + .decode_iv_ts = amdgpu_ih_decode_iv_ts_helper, .set_rptr = navi10_ih_set_rptr }; diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c index 4ecd2b5808ce..ee7cab37dfd5 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c @@ -359,6 +359,10 @@ static void nbio_v2_3_init_registers(struct amdgpu_device *adev) if (def != data) WREG32_PCIE(smnPCIE_CONFIG_CNTL, data); + + if (amdgpu_sriov_vf(adev)) + adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0, + mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2; } #define NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT 0x00000000 // off by default, no gains over L1 diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c index 0d2d629e2d6a..4bbacf1be25a 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c @@ -276,6 +276,10 @@ static void nbio_v6_1_init_registers(struct amdgpu_device *adev) if (def != data) WREG32_PCIE(smnPCIE_CI_CNTL, data); + + if (amdgpu_sriov_vf(adev)) + adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0, + mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2; } static void nbio_v6_1_program_ltr(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c index 3c00666a13e1..37a4039fdfc5 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c @@ -273,7 +273,9 @@ const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg = { static void nbio_v7_0_init_registers(struct amdgpu_device *adev) { - + if (amdgpu_sriov_vf(adev)) + adev->rmmio_remap.reg_offset = + SOC15_REG_OFFSET(NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL) << 2; } const struct amdgpu_nbio_funcs nbio_v7_0_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c index 8f2a315e7c73..3444332ea110 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c @@ -371,6 +371,10 @@ static void nbio_v7_2_init_registers(struct amdgpu_device *adev) if (def != data) WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CONFIG_CNTL), data); } + + if (amdgpu_sriov_vf(adev)) + adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0, + regBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2; } const struct amdgpu_nbio_funcs nbio_v7_2_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c index b8bd03d16dba..dc5e93756fea 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c @@ -362,7 +362,9 @@ const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg_ald = { static void nbio_v7_4_init_registers(struct amdgpu_device *adev) { - + if (amdgpu_sriov_vf(adev)) + adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0, + mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2; } static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device *adev) @@ -692,6 +694,9 @@ static void nbio_v7_4_program_aspm(struct amdgpu_device *adev) { uint32_t def, data; + if (adev->ip_versions[NBIO_HWIP][0] == IP_VERSION(7, 4, 4)) + return; + def = data = RREG32_PCIE(smnPCIE_LC_CNTL); data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK; data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK; diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 59eafa31c626..2ec1ffb36b1f 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -183,6 +183,7 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode, switch (adev->ip_versions[UVD_HWIP][0]) { case IP_VERSION(3, 0, 0): case IP_VERSION(3, 0, 64): + case IP_VERSION(3, 0, 192): if (amdgpu_sriov_vf(adev)) { if (encode) *codecs = &sriov_sc_video_codecs_encode; @@ -731,8 +732,10 @@ static int nv_common_early_init(void *handle) #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE) struct amdgpu_device *adev = (struct amdgpu_device *)handle; - adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET; - adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET; + if (!amdgpu_sriov_vf(adev)) { + adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET; + adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET; + } adev->smc_rreg = NULL; adev->smc_wreg = NULL; adev->pcie_rreg = &nv_pcie_rreg; @@ -1032,7 +1035,7 @@ static int nv_common_hw_init(void *handle) * for the purpose of expose those registers * to process space */ - if (adev->nbio.funcs->remap_hdp_registers) + if (adev->nbio.funcs->remap_hdp_registers && !amdgpu_sriov_vf(adev)) adev->nbio.funcs->remap_hdp_registers(adev); /* enable the doorbell aperture */ nv_enable_doorbell_aperture(adev, true); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index 853d1511b889..81e033549dda 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -481,8 +481,6 @@ static void sdma_v5_0_ring_emit_ib(struct amdgpu_ring *ring, * sdma_v5_0_ring_emit_mem_sync - flush the IB by graphics cache rinse * * @ring: amdgpu ring pointer - * @job: job to retrieve vmid from - * @ib: IB object to schedule * * flush the IB by graphics cache rinse. */ diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c index 4d4d1aa51b8a..4f546f632223 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c @@ -368,8 +368,6 @@ static void sdma_v5_2_ring_emit_ib(struct amdgpu_ring *ring, * sdma_v5_2_ring_emit_mem_sync - flush the IB by graphics cache rinse * * @ring: amdgpu ring pointer - * @job: job to retrieve vmid from - * @ib: IB object to schedule * * flush the IB by graphics cache rinse. */ diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 0c316a2d42ed..0fc1747e4a70 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -744,7 +744,7 @@ static void soc15_reg_base_init(struct amdgpu_device *adev) vega10_reg_base_init(adev); break; case CHIP_RENOIR: - /* It's safe to do ip discovery here for Renior, + /* It's safe to do ip discovery here for Renoir, * it doesn't support SRIOV. */ if (amdgpu_discovery) { r = amdgpu_discovery_reg_base_init(adev); @@ -971,8 +971,10 @@ static int soc15_common_early_init(void *handle) #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE) struct amdgpu_device *adev = (struct amdgpu_device *)handle; - adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET; - adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET; + if (!amdgpu_sriov_vf(adev)) { + adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET; + adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET; + } adev->smc_rreg = NULL; adev->smc_wreg = NULL; adev->pcie_rreg = &soc15_pcie_rreg; @@ -1236,7 +1238,9 @@ static int soc15_common_sw_init(void *handle) if (amdgpu_sriov_vf(adev)) xgpu_ai_mailbox_add_irq_id(adev); - adev->df.funcs->sw_init(adev); + if (adev->df.funcs && + adev->df.funcs->sw_init) + adev->df.funcs->sw_init(adev); return 0; } @@ -1248,7 +1252,10 @@ static int soc15_common_sw_fini(void *handle) if (adev->nbio.ras_funcs && adev->nbio.ras_funcs->ras_fini) adev->nbio.ras_funcs->ras_fini(adev); - adev->df.funcs->sw_fini(adev); + + if (adev->df.funcs && + adev->df.funcs->sw_fini) + adev->df.funcs->sw_fini(adev); return 0; } @@ -1285,7 +1292,7 @@ static int soc15_common_hw_init(void *handle) * for the purpose of expose those registers * to process space */ - if (adev->nbio.funcs->remap_hdp_registers) + if (adev->nbio.funcs->remap_hdp_registers && !amdgpu_sriov_vf(adev)) adev->nbio.funcs->remap_hdp_registers(adev); /* enable the doorbell aperture */ diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c index f7ec3fe134e5..6dd1e19e8d43 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c @@ -50,6 +50,165 @@ static inline uint32_t get_umc_v6_7_reg_offset(struct amdgpu_device *adev, return adev->umc.channel_offs * ch_inst + UMC_V6_7_INST_DIST * umc_inst; } +static inline uint32_t get_umc_v6_7_channel_index(struct amdgpu_device *adev, + uint32_t umc_inst, + uint32_t ch_inst) +{ + return adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst]; +} + +static void umc_v6_7_ecc_info_query_correctable_error_count(struct amdgpu_device *adev, + uint32_t channel_index, + unsigned long *error_count) +{ + uint32_t ecc_err_cnt; + uint64_t mc_umc_status; + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + + /* + * select the lower chip and check the error count + * skip add error count, calc error counter only from mca_umc_status + */ + ecc_err_cnt = ras->umc_ecc.ecc[channel_index].ce_count_lo_chip; + + /* + * select the higher chip and check the err counter + * skip add error count, calc error counter only from mca_umc_status + */ + ecc_err_cnt = ras->umc_ecc.ecc[channel_index].ce_count_hi_chip; + + /* check for SRAM correctable error + MCUMC_STATUS is a 64 bit register */ + mc_umc_status = ras->umc_ecc.ecc[channel_index].mca_umc_status; + if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1) + *error_count += 1; +} + +static void umc_v6_7_ecc_info_querry_uncorrectable_error_count(struct amdgpu_device *adev, + uint32_t channel_index, + unsigned long *error_count) +{ + uint64_t mc_umc_status; + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + + /* check the MCUMC_STATUS */ + mc_umc_status = ras->umc_ecc.ecc[channel_index].mca_umc_status; + if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && + (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1)) + *error_count += 1; +} + +static void umc_v6_7_ecc_info_query_ras_error_count(struct amdgpu_device *adev, + void *ras_error_status) +{ + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + + uint32_t umc_inst = 0; + uint32_t ch_inst = 0; + uint32_t umc_reg_offset = 0; + uint32_t channel_index = 0; + + /*TODO: driver needs to toggle DF Cstate to ensure + * safe access of UMC registers. Will add the protection */ + LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { + umc_reg_offset = get_umc_v6_7_reg_offset(adev, + umc_inst, + ch_inst); + channel_index = get_umc_v6_7_channel_index(adev, + umc_inst, + ch_inst); + umc_v6_7_ecc_info_query_correctable_error_count(adev, + channel_index, + &(err_data->ce_count)); + umc_v6_7_ecc_info_querry_uncorrectable_error_count(adev, + channel_index, + &(err_data->ue_count)); + } +} + +static void umc_v6_7_ecc_info_query_error_address(struct amdgpu_device *adev, + struct ras_err_data *err_data, + uint32_t umc_reg_offset, + uint32_t ch_inst, + uint32_t umc_inst) +{ + uint64_t mc_umc_status, err_addr, retired_page; + struct eeprom_table_record *err_rec; + uint32_t channel_index; + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + + channel_index = + adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst]; + + mc_umc_status = ras->umc_ecc.ecc[channel_index].mca_umc_status; + + if (mc_umc_status == 0) + return; + + if (!err_data->err_addr) + return; + + err_rec = &err_data->err_addr[err_data->err_addr_cnt]; + + /* calculate error address if ue/ce error is detected */ + if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && + (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) { + + err_addr = ras->umc_ecc.ecc[channel_index].mca_umc_addr; + err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr); + + /* translate umc channel address to soc pa, 3 parts are included */ + retired_page = ADDR_OF_8KB_BLOCK(err_addr) | + ADDR_OF_256B_BLOCK(channel_index) | + OFFSET_IN_256B_BLOCK(err_addr); + + /* we only save ue error information currently, ce is skipped */ + if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) + == 1) { + err_rec->address = err_addr; + /* page frame address is saved */ + err_rec->retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT; + err_rec->ts = (uint64_t)ktime_get_real_seconds(); + err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE; + err_rec->cu = 0; + err_rec->mem_channel = channel_index; + err_rec->mcumc_id = umc_inst; + + err_data->err_addr_cnt++; + } + } +} + +static void umc_v6_7_ecc_info_query_ras_error_address(struct amdgpu_device *adev, + void *ras_error_status) +{ + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + + uint32_t umc_inst = 0; + uint32_t ch_inst = 0; + uint32_t umc_reg_offset = 0; + + /*TODO: driver needs to toggle DF Cstate to ensure + * safe access of UMC resgisters. Will add the protection + * when firmware interface is ready */ + LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { + umc_reg_offset = get_umc_v6_7_reg_offset(adev, + umc_inst, + ch_inst); + umc_v6_7_ecc_info_query_error_address(adev, + err_data, + umc_reg_offset, + ch_inst, + umc_inst); + } +} + static void umc_v6_7_query_correctable_error_count(struct amdgpu_device *adev, uint32_t umc_reg_offset, unsigned long *error_count) @@ -327,4 +486,6 @@ const struct amdgpu_umc_ras_funcs umc_v6_7_ras_funcs = { .query_ras_error_count = umc_v6_7_query_ras_error_count, .query_ras_error_address = umc_v6_7_query_ras_error_address, .query_ras_poison_mode = umc_v6_7_query_ras_poison_mode, + .ecc_info_query_ras_error_count = umc_v6_7_ecc_info_query_ras_error_count, + .ecc_info_query_ras_error_address = umc_v6_7_ecc_info_query_ras_error_address, }; diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c index a9ca6988009e..3070466f54e1 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c @@ -640,6 +640,7 @@ const struct amd_ip_funcs vega10_ih_ip_funcs = { static const struct amdgpu_ih_funcs vega10_ih_funcs = { .get_wptr = vega10_ih_get_wptr, .decode_iv = amdgpu_ih_decode_iv_helper, + .decode_iv_ts = amdgpu_ih_decode_iv_ts_helper, .set_rptr = vega10_ih_set_rptr }; diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c index f51dfc38ac65..3b4eb8285943 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c @@ -688,6 +688,7 @@ const struct amd_ip_funcs vega20_ih_ip_funcs = { static const struct amdgpu_ih_funcs vega20_ih_funcs = { .get_wptr = vega20_ih_get_wptr, .decode_iv = amdgpu_ih_decode_iv_helper, + .decode_iv_ts = amdgpu_ih_decode_iv_ts_helper, .set_rptr = vega20_ih_set_rptr }; diff --git a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c index f6233019f042..d60576ce10cd 100644 --- a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c +++ b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c @@ -43,15 +43,15 @@ static bool cik_event_interrupt_isr(struct kfd_dev *dev, */ if ((ihre->source_id == CIK_INTSRC_GFX_PAGE_INV_FAULT || ihre->source_id == CIK_INTSRC_GFX_MEM_PROT_FAULT) && - dev->device_info->asic_family == CHIP_HAWAII) { + dev->adev->asic_type == CHIP_HAWAII) { struct cik_ih_ring_entry *tmp_ihre = (struct cik_ih_ring_entry *)patched_ihre; *patched_flag = true; *tmp_ihre = *ihre; - vmid = f2g->read_vmid_from_vmfault_reg(dev->kgd); - ret = f2g->get_atc_vmid_pasid_mapping_info(dev->kgd, vmid, &pasid); + vmid = f2g->read_vmid_from_vmfault_reg(dev->adev); + ret = f2g->get_atc_vmid_pasid_mapping_info(dev->adev, vmid, &pasid); tmp_ihre->ring_id &= 0x000000ff; tmp_ihre->ring_id |= vmid << 8; @@ -113,7 +113,7 @@ static void cik_event_interrupt_wq(struct kfd_dev *dev, kfd_process_vm_fault(dev->dqm, pasid); memset(&info, 0, sizeof(info)); - amdgpu_amdkfd_gpuvm_get_vm_fault_info(dev->kgd, &info); + amdgpu_amdkfd_gpuvm_get_vm_fault_info(dev->adev, &info); if (!info.page_addr && !info.status) return; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 24ebd61395d8..4bfc0c8ab764 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -321,7 +321,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, /* Return gpu_id as doorbell offset for mmap usage */ args->doorbell_offset = KFD_MMAP_TYPE_DOORBELL; args->doorbell_offset |= KFD_MMAP_GPU_ID(args->gpu_id); - if (KFD_IS_SOC15(dev->device_info->asic_family)) + if (KFD_IS_SOC15(dev)) /* On SOC15 ASICs, include the doorbell offset within the * process doorbell frame, which is 2 pages. */ @@ -580,7 +580,7 @@ static int kfd_ioctl_dbg_register(struct file *filep, if (!dev) return -EINVAL; - if (dev->device_info->asic_family == CHIP_CARRIZO) { + if (dev->adev->asic_type == CHIP_CARRIZO) { pr_debug("kfd_ioctl_dbg_register not supported on CZ\n"); return -EINVAL; } @@ -631,7 +631,7 @@ static int kfd_ioctl_dbg_unregister(struct file *filep, if (!dev || !dev->dbgmgr) return -EINVAL; - if (dev->device_info->asic_family == CHIP_CARRIZO) { + if (dev->adev->asic_type == CHIP_CARRIZO) { pr_debug("kfd_ioctl_dbg_unregister not supported on CZ\n"); return -EINVAL; } @@ -676,7 +676,7 @@ static int kfd_ioctl_dbg_address_watch(struct file *filep, if (!dev) return -EINVAL; - if (dev->device_info->asic_family == CHIP_CARRIZO) { + if (dev->adev->asic_type == CHIP_CARRIZO) { pr_debug("kfd_ioctl_dbg_wave_control not supported on CZ\n"); return -EINVAL; } @@ -784,7 +784,7 @@ static int kfd_ioctl_dbg_wave_control(struct file *filep, if (!dev) return -EINVAL; - if (dev->device_info->asic_family == CHIP_CARRIZO) { + if (dev->adev->asic_type == CHIP_CARRIZO) { pr_debug("kfd_ioctl_dbg_wave_control not supported on CZ\n"); return -EINVAL; } @@ -851,7 +851,7 @@ static int kfd_ioctl_get_clock_counters(struct file *filep, dev = kfd_device_by_id(args->gpu_id); if (dev) /* Reading GPU clock counter from KGD */ - args->gpu_clock_counter = amdgpu_amdkfd_get_gpu_clock_counter(dev->kgd); + args->gpu_clock_counter = amdgpu_amdkfd_get_gpu_clock_counter(dev->adev); else /* Node without GPU resource */ args->gpu_clock_counter = 0; @@ -1041,7 +1041,7 @@ static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p, goto out_unlock; } - err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kfd->kgd, + err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kfd->adev, mem, &kern_addr, &size); if (err) { pr_err("Failed to map event page to kernel\n"); @@ -1051,7 +1051,7 @@ static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p, err = kfd_event_page_set(p, kern_addr, size); if (err) { pr_err("Failed to set event page\n"); - amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(kfd->kgd, mem); + amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(kfd->adev, mem); goto out_unlock; } @@ -1137,7 +1137,7 @@ static int kfd_ioctl_set_scratch_backing_va(struct file *filep, if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS && pdd->qpd.vmid != 0 && dev->kfd2kgd->set_scratch_backing_va) dev->kfd2kgd->set_scratch_backing_va( - dev->kgd, args->va_addr, pdd->qpd.vmid); + dev->adev, args->va_addr, pdd->qpd.vmid); return 0; @@ -1158,7 +1158,7 @@ static int kfd_ioctl_get_tile_config(struct file *filep, if (!dev) return -EINVAL; - amdgpu_amdkfd_get_tile_config(dev->kgd, &config); + amdgpu_amdkfd_get_tile_config(dev->adev, &config); args->gb_addr_config = config.gb_addr_config; args->num_banks = config.num_banks; @@ -1244,7 +1244,7 @@ bool kfd_dev_is_large_bar(struct kfd_dev *dev) if (dev->use_iommu_v2) return false; - amdgpu_amdkfd_get_local_mem_info(dev->kgd, &mem_info); + amdgpu_amdkfd_get_local_mem_info(dev->adev, &mem_info); if (mem_info.local_mem_size_private == 0 && mem_info.local_mem_size_public > 0) return true; @@ -1313,7 +1313,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep, err = -EINVAL; goto err_unlock; } - offset = amdgpu_amdkfd_get_mmio_remap_phys_addr(dev->kgd); + offset = dev->adev->rmmio_remap.bus_addr; if (!offset) { err = -ENOMEM; goto err_unlock; @@ -1321,7 +1321,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep, } err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( - dev->kgd, args->va_addr, args->size, + dev->adev, args->va_addr, args->size, pdd->drm_priv, (struct kgd_mem **) &mem, &offset, flags); @@ -1353,7 +1353,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep, return 0; err_free: - amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem, + amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev, (struct kgd_mem *)mem, pdd->drm_priv, NULL); err_unlock: mutex_unlock(&p->mutex); @@ -1399,7 +1399,7 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep, goto err_unlock; } - ret = amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, + ret = amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev, (struct kgd_mem *)mem, pdd->drm_priv, &size); /* If freeing the buffer failed, leave the handle in place for @@ -1484,7 +1484,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep, goto get_mem_obj_from_handle_failed; } err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu( - peer->kgd, (struct kgd_mem *)mem, + peer->adev, (struct kgd_mem *)mem, peer_pdd->drm_priv, &table_freed); if (err) { pr_err("Failed to map to gpu %d/%d\n", @@ -1496,7 +1496,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep, mutex_unlock(&p->mutex); - err = amdgpu_amdkfd_gpuvm_sync_memory(dev->kgd, (struct kgd_mem *) mem, true); + err = amdgpu_amdkfd_gpuvm_sync_memory(dev->adev, (struct kgd_mem *) mem, true); if (err) { pr_debug("Sync memory failed, wait interrupted by user signal\n"); goto sync_memory_failed; @@ -1593,7 +1593,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep, goto get_mem_obj_from_handle_failed; } err = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( - peer->kgd, (struct kgd_mem *)mem, peer_pdd->drm_priv); + peer->adev, (struct kgd_mem *)mem, peer_pdd->drm_priv); if (err) { pr_err("Failed to unmap from gpu %d/%d\n", i, args->n_devices); @@ -1603,8 +1603,8 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep, } mutex_unlock(&p->mutex); - if (dev->device_info->asic_family == CHIP_ALDEBARAN) { - err = amdgpu_amdkfd_gpuvm_sync_memory(dev->kgd, + if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2)) { + err = amdgpu_amdkfd_gpuvm_sync_memory(dev->adev, (struct kgd_mem *) mem, true); if (err) { pr_debug("Sync memory failed, wait interrupted by user signal\n"); @@ -1680,7 +1680,7 @@ static int kfd_ioctl_get_dmabuf_info(struct file *filep, { struct kfd_ioctl_get_dmabuf_info_args *args = data; struct kfd_dev *dev = NULL; - struct kgd_dev *dma_buf_kgd; + struct amdgpu_device *dmabuf_adev; void *metadata_buffer = NULL; uint32_t flags; unsigned int i; @@ -1700,15 +1700,15 @@ static int kfd_ioctl_get_dmabuf_info(struct file *filep, } /* Get dmabuf info from KGD */ - r = amdgpu_amdkfd_get_dmabuf_info(dev->kgd, args->dmabuf_fd, - &dma_buf_kgd, &args->size, + r = amdgpu_amdkfd_get_dmabuf_info(dev->adev, args->dmabuf_fd, + &dmabuf_adev, &args->size, metadata_buffer, args->metadata_size, &args->metadata_size, &flags); if (r) goto exit; /* Reverse-lookup gpu_id from kgd pointer */ - dev = kfd_device_by_kgd(dma_buf_kgd); + dev = kfd_device_by_adev(dmabuf_adev); if (!dev) { r = -EINVAL; goto exit; @@ -1758,7 +1758,7 @@ static int kfd_ioctl_import_dmabuf(struct file *filep, goto err_unlock; } - r = amdgpu_amdkfd_gpuvm_import_dmabuf(dev->kgd, dmabuf, + r = amdgpu_amdkfd_gpuvm_import_dmabuf(dev->adev, dmabuf, args->va_addr, pdd->drm_priv, (struct kgd_mem **)&mem, &size, NULL); @@ -1779,7 +1779,7 @@ static int kfd_ioctl_import_dmabuf(struct file *filep, return 0; err_free: - amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem, + amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev, (struct kgd_mem *)mem, pdd->drm_priv, NULL); err_unlock: mutex_unlock(&p->mutex); @@ -2066,7 +2066,7 @@ static int kfd_mmio_mmap(struct kfd_dev *dev, struct kfd_process *process, if (vma->vm_end - vma->vm_start != PAGE_SIZE) return -EINVAL; - address = amdgpu_amdkfd_get_mmio_remap_phys_addr(dev->kgd); + address = dev->adev->rmmio_remap.bus_addr; vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index cfedfb1e8596..f187596faf66 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -1340,7 +1340,7 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev, int ret; unsigned int num_cu_shared; - switch (kdev->device_info->asic_family) { + switch (kdev->adev->asic_type) { case CHIP_KAVERI: pcache_info = kaveri_cache_info; num_of_cache_types = ARRAY_SIZE(kaveri_cache_info); @@ -1377,67 +1377,71 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev, pcache_info = vegam_cache_info; num_of_cache_types = ARRAY_SIZE(vegam_cache_info); break; - case CHIP_VEGA10: - pcache_info = vega10_cache_info; - num_of_cache_types = ARRAY_SIZE(vega10_cache_info); - break; - case CHIP_VEGA12: - pcache_info = vega12_cache_info; - num_of_cache_types = ARRAY_SIZE(vega12_cache_info); - break; - case CHIP_VEGA20: - case CHIP_ARCTURUS: - pcache_info = vega20_cache_info; - num_of_cache_types = ARRAY_SIZE(vega20_cache_info); - break; - case CHIP_ALDEBARAN: - pcache_info = aldebaran_cache_info; - num_of_cache_types = ARRAY_SIZE(aldebaran_cache_info); - break; - case CHIP_RAVEN: - pcache_info = raven_cache_info; - num_of_cache_types = ARRAY_SIZE(raven_cache_info); - break; - case CHIP_RENOIR: - pcache_info = renoir_cache_info; - num_of_cache_types = ARRAY_SIZE(renoir_cache_info); - break; - case CHIP_NAVI10: - case CHIP_NAVI12: - case CHIP_CYAN_SKILLFISH: - pcache_info = navi10_cache_info; - num_of_cache_types = ARRAY_SIZE(navi10_cache_info); - break; - case CHIP_NAVI14: - pcache_info = navi14_cache_info; - num_of_cache_types = ARRAY_SIZE(navi14_cache_info); - break; - case CHIP_SIENNA_CICHLID: - pcache_info = sienna_cichlid_cache_info; - num_of_cache_types = ARRAY_SIZE(sienna_cichlid_cache_info); - break; - case CHIP_NAVY_FLOUNDER: - pcache_info = navy_flounder_cache_info; - num_of_cache_types = ARRAY_SIZE(navy_flounder_cache_info); - break; - case CHIP_DIMGREY_CAVEFISH: - pcache_info = dimgrey_cavefish_cache_info; - num_of_cache_types = ARRAY_SIZE(dimgrey_cavefish_cache_info); - break; - case CHIP_VANGOGH: - pcache_info = vangogh_cache_info; - num_of_cache_types = ARRAY_SIZE(vangogh_cache_info); - break; - case CHIP_BEIGE_GOBY: - pcache_info = beige_goby_cache_info; - num_of_cache_types = ARRAY_SIZE(beige_goby_cache_info); - break; - case CHIP_YELLOW_CARP: - pcache_info = yellow_carp_cache_info; - num_of_cache_types = ARRAY_SIZE(yellow_carp_cache_info); - break; default: - return -EINVAL; + switch(KFD_GC_VERSION(kdev)) { + case IP_VERSION(9, 0, 1): + pcache_info = vega10_cache_info; + num_of_cache_types = ARRAY_SIZE(vega10_cache_info); + break; + case IP_VERSION(9, 2, 1): + pcache_info = vega12_cache_info; + num_of_cache_types = ARRAY_SIZE(vega12_cache_info); + break; + case IP_VERSION(9, 4, 0): + case IP_VERSION(9, 4, 1): + pcache_info = vega20_cache_info; + num_of_cache_types = ARRAY_SIZE(vega20_cache_info); + break; + case IP_VERSION(9, 4, 2): + pcache_info = aldebaran_cache_info; + num_of_cache_types = ARRAY_SIZE(aldebaran_cache_info); + break; + case IP_VERSION(9, 1, 0): + case IP_VERSION(9, 2, 2): + pcache_info = raven_cache_info; + num_of_cache_types = ARRAY_SIZE(raven_cache_info); + break; + case IP_VERSION(9, 3, 0): + pcache_info = renoir_cache_info; + num_of_cache_types = ARRAY_SIZE(renoir_cache_info); + break; + case IP_VERSION(10, 1, 10): + case IP_VERSION(10, 1, 2): + case IP_VERSION(10, 1, 3): + pcache_info = navi10_cache_info; + num_of_cache_types = ARRAY_SIZE(navi10_cache_info); + break; + case IP_VERSION(10, 1, 1): + pcache_info = navi14_cache_info; + num_of_cache_types = ARRAY_SIZE(navi14_cache_info); + break; + case IP_VERSION(10, 3, 0): + pcache_info = sienna_cichlid_cache_info; + num_of_cache_types = ARRAY_SIZE(sienna_cichlid_cache_info); + break; + case IP_VERSION(10, 3, 2): + pcache_info = navy_flounder_cache_info; + num_of_cache_types = ARRAY_SIZE(navy_flounder_cache_info); + break; + case IP_VERSION(10, 3, 4): + pcache_info = dimgrey_cavefish_cache_info; + num_of_cache_types = ARRAY_SIZE(dimgrey_cavefish_cache_info); + break; + case IP_VERSION(10, 3, 1): + pcache_info = vangogh_cache_info; + num_of_cache_types = ARRAY_SIZE(vangogh_cache_info); + break; + case IP_VERSION(10, 3, 5): + pcache_info = beige_goby_cache_info; + num_of_cache_types = ARRAY_SIZE(beige_goby_cache_info); + break; + case IP_VERSION(10, 3, 3): + pcache_info = yellow_carp_cache_info; + num_of_cache_types = ARRAY_SIZE(yellow_carp_cache_info); + break; + default: + return -EINVAL; + } } *size_filled = 0; @@ -1963,8 +1967,6 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size, struct crat_subtype_iolink *sub_type_hdr, uint32_t proximity_domain) { - struct amdgpu_device *adev = (struct amdgpu_device *)kdev->kgd; - *avail_size -= sizeof(struct crat_subtype_iolink); if (*avail_size < 0) return -ENOMEM; @@ -1981,7 +1983,7 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size, /* Fill in IOLINK subtype. * TODO: Fill-in other fields of iolink subtype */ - if (adev->gmc.xgmi.connected_to_cpu) { + if (kdev->adev->gmc.xgmi.connected_to_cpu) { /* * with host gpu xgmi link, host can access gpu memory whether * or not pcie bar type is large, so always create bidirectional @@ -1990,19 +1992,19 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size, sub_type_hdr->flags |= CRAT_IOLINK_FLAGS_BI_DIRECTIONAL; sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_XGMI; sub_type_hdr->num_hops_xgmi = 1; - if (adev->asic_type == CHIP_ALDEBARAN) { + if (KFD_GC_VERSION(kdev) == IP_VERSION(9, 4, 2)) { sub_type_hdr->minimum_bandwidth_mbs = amdgpu_amdkfd_get_xgmi_bandwidth_mbytes( - kdev->kgd, NULL, true); + kdev->adev, NULL, true); sub_type_hdr->maximum_bandwidth_mbs = sub_type_hdr->minimum_bandwidth_mbs; } } else { sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_PCIEXPRESS; sub_type_hdr->minimum_bandwidth_mbs = - amdgpu_amdkfd_get_pcie_bandwidth_mbytes(kdev->kgd, true); + amdgpu_amdkfd_get_pcie_bandwidth_mbytes(kdev->adev, true); sub_type_hdr->maximum_bandwidth_mbs = - amdgpu_amdkfd_get_pcie_bandwidth_mbytes(kdev->kgd, false); + amdgpu_amdkfd_get_pcie_bandwidth_mbytes(kdev->adev, false); } sub_type_hdr->proximity_domain_from = proximity_domain; @@ -2044,11 +2046,11 @@ static int kfd_fill_gpu_xgmi_link_to_gpu(int *avail_size, sub_type_hdr->proximity_domain_from = proximity_domain_from; sub_type_hdr->proximity_domain_to = proximity_domain_to; sub_type_hdr->num_hops_xgmi = - amdgpu_amdkfd_get_xgmi_hops_count(kdev->kgd, peer_kdev->kgd); + amdgpu_amdkfd_get_xgmi_hops_count(kdev->adev, peer_kdev->adev); sub_type_hdr->maximum_bandwidth_mbs = - amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->kgd, peer_kdev->kgd, false); + amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->adev, peer_kdev->adev, false); sub_type_hdr->minimum_bandwidth_mbs = sub_type_hdr->maximum_bandwidth_mbs ? - amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->kgd, NULL, true) : 0; + amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->adev, NULL, true) : 0; return 0; } @@ -2114,7 +2116,7 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image, cu->flags |= CRAT_CU_FLAGS_GPU_PRESENT; cu->proximity_domain = proximity_domain; - amdgpu_amdkfd_get_cu_info(kdev->kgd, &cu_info); + amdgpu_amdkfd_get_cu_info(kdev->adev, &cu_info); cu->num_simd_per_cu = cu_info.simd_per_cu; cu->num_simd_cores = cu_info.simd_per_cu * cu_info.cu_active_number; cu->max_waves_simd = cu_info.max_waves_per_simd; @@ -2145,7 +2147,7 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image, * report the total FB size (public+private) as a single * private heap. */ - amdgpu_amdkfd_get_local_mem_info(kdev->kgd, &local_mem_info); + amdgpu_amdkfd_get_local_mem_info(kdev->adev, &local_mem_info); sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr + sub_type_hdr->length); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c index 159add0f5aaa..1e30717b5253 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c @@ -41,7 +41,7 @@ static void dbgdev_address_watch_disable_nodiq(struct kfd_dev *dev) { - dev->kfd2kgd->address_watch_disable(dev->kgd); + dev->kfd2kgd->address_watch_disable(dev->adev); } static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev, @@ -322,7 +322,7 @@ static int dbgdev_address_watch_nodiq(struct kfd_dbgdev *dbgdev, pr_debug("\t\t%30s\n", "* * * * * * * * * * * * * * * * * *"); pdd->dev->kfd2kgd->address_watch_execute( - dbgdev->dev->kgd, + dbgdev->dev->adev, i, cntl.u32All, addrHi.u32All, @@ -420,7 +420,7 @@ static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev, aw_reg_add_dword = dbgdev->dev->kfd2kgd->address_watch_get_offset( - dbgdev->dev->kgd, + dbgdev->dev->adev, i, ADDRESS_WATCH_REG_CNTL); @@ -431,7 +431,7 @@ static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev, aw_reg_add_dword = dbgdev->dev->kfd2kgd->address_watch_get_offset( - dbgdev->dev->kgd, + dbgdev->dev->adev, i, ADDRESS_WATCH_REG_ADDR_HI); @@ -441,7 +441,7 @@ static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev, aw_reg_add_dword = dbgdev->dev->kfd2kgd->address_watch_get_offset( - dbgdev->dev->kgd, + dbgdev->dev->adev, i, ADDRESS_WATCH_REG_ADDR_LO); @@ -457,7 +457,7 @@ static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev, aw_reg_add_dword = dbgdev->dev->kfd2kgd->address_watch_get_offset( - dbgdev->dev->kgd, + dbgdev->dev->adev, i, ADDRESS_WATCH_REG_CNTL); @@ -752,7 +752,7 @@ static int dbgdev_wave_control_nodiq(struct kfd_dbgdev *dbgdev, pr_debug("\t\t %30s\n", "* * * * * * * * * * * * * * * * * *"); - return dbgdev->dev->kfd2kgd->wave_control_execute(dbgdev->dev->kgd, + return dbgdev->dev->kfd2kgd->wave_control_execute(dbgdev->dev->adev, reg_gfx_index.u32All, reg_sq_cmd.u32All); } @@ -784,7 +784,7 @@ int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p) for (vmid = first_vmid_to_scan; vmid <= last_vmid_to_scan; vmid++) { status = dev->kfd2kgd->get_atc_vmid_pasid_mapping_info - (dev->kgd, vmid, &queried_pasid); + (dev->adev, vmid, &queried_pasid); if (status && queried_pasid == p->pasid) { pr_debug("Killing wave fronts of vmid %d and pasid 0x%x\n", @@ -811,7 +811,7 @@ int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p) /* for non DIQ we need to patch the VMID: */ reg_sq_cmd.bits.vm_id = vmid; - dev->kfd2kgd->wave_control_execute(dev->kgd, + dev->kfd2kgd->wave_control_execute(dev->adev, reg_gfx_index.u32All, reg_sq_cmd.u32All); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 3b119db16003..facc28f58c1f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -53,770 +53,248 @@ extern const struct kfd2kgd_calls aldebaran_kfd2kgd; extern const struct kfd2kgd_calls gfx_v10_kfd2kgd; extern const struct kfd2kgd_calls gfx_v10_3_kfd2kgd; -#ifdef KFD_SUPPORT_IOMMU_V2 -static const struct kfd_device_info kaveri_device_info = { - .asic_family = CHIP_KAVERI, - .asic_name = "kaveri", - .gfx_target_version = 70000, - .max_pasid_bits = 16, - /* max num of queues for KV.TODO should be a dynamic value */ - .max_no_of_hqd = 24, - .doorbell_size = 4, - .ih_ring_entry_size = 4 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_cik, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .supports_cwsr = false, - .needs_iommu_device = true, - .needs_pci_atomics = false, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, - .num_sdma_queues_per_engine = 2, -}; - -static const struct kfd_device_info carrizo_device_info = { - .asic_family = CHIP_CARRIZO, - .asic_name = "carrizo", - .gfx_target_version = 80001, - .max_pasid_bits = 16, - /* max num of queues for CZ.TODO should be a dynamic value */ - .max_no_of_hqd = 24, - .doorbell_size = 4, - .ih_ring_entry_size = 4 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_cik, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .supports_cwsr = true, - .needs_iommu_device = true, - .needs_pci_atomics = false, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, - .num_sdma_queues_per_engine = 2, -}; - -static const struct kfd_device_info raven_device_info = { - .asic_family = CHIP_RAVEN, - .asic_name = "raven", - .gfx_target_version = 90002, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 8, - .ih_ring_entry_size = 8 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_v9, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .supports_cwsr = true, - .needs_iommu_device = true, - .needs_pci_atomics = true, - .num_sdma_engines = 1, - .num_xgmi_sdma_engines = 0, - .num_sdma_queues_per_engine = 2, -}; -#endif - -#ifdef CONFIG_DRM_AMDGPU_CIK -static const struct kfd_device_info hawaii_device_info = { - .asic_family = CHIP_HAWAII, - .asic_name = "hawaii", - .gfx_target_version = 70001, - .max_pasid_bits = 16, - /* max num of queues for KV.TODO should be a dynamic value */ - .max_no_of_hqd = 24, - .doorbell_size = 4, - .ih_ring_entry_size = 4 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_cik, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .supports_cwsr = false, - .needs_iommu_device = false, - .needs_pci_atomics = false, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, - .num_sdma_queues_per_engine = 2, -}; -#endif - -static const struct kfd_device_info tonga_device_info = { - .asic_family = CHIP_TONGA, - .asic_name = "tonga", - .gfx_target_version = 80002, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 4, - .ih_ring_entry_size = 4 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_cik, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .supports_cwsr = false, - .needs_iommu_device = false, - .needs_pci_atomics = true, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, - .num_sdma_queues_per_engine = 2, -}; - -static const struct kfd_device_info fiji_device_info = { - .asic_family = CHIP_FIJI, - .asic_name = "fiji", - .gfx_target_version = 80003, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 4, - .ih_ring_entry_size = 4 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_cik, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .supports_cwsr = true, - .needs_iommu_device = false, - .needs_pci_atomics = true, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, - .num_sdma_queues_per_engine = 2, -}; - -static const struct kfd_device_info fiji_vf_device_info = { - .asic_family = CHIP_FIJI, - .asic_name = "fiji", - .gfx_target_version = 80003, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 4, - .ih_ring_entry_size = 4 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_cik, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .supports_cwsr = true, - .needs_iommu_device = false, - .needs_pci_atomics = false, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, - .num_sdma_queues_per_engine = 2, -}; - - -static const struct kfd_device_info polaris10_device_info = { - .asic_family = CHIP_POLARIS10, - .asic_name = "polaris10", - .gfx_target_version = 80003, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 4, - .ih_ring_entry_size = 4 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_cik, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .supports_cwsr = true, - .needs_iommu_device = false, - .needs_pci_atomics = true, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, - .num_sdma_queues_per_engine = 2, -}; - -static const struct kfd_device_info polaris10_vf_device_info = { - .asic_family = CHIP_POLARIS10, - .asic_name = "polaris10", - .gfx_target_version = 80003, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 4, - .ih_ring_entry_size = 4 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_cik, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .supports_cwsr = true, - .needs_iommu_device = false, - .needs_pci_atomics = false, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, - .num_sdma_queues_per_engine = 2, -}; - -static const struct kfd_device_info polaris11_device_info = { - .asic_family = CHIP_POLARIS11, - .asic_name = "polaris11", - .gfx_target_version = 80003, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 4, - .ih_ring_entry_size = 4 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_cik, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .supports_cwsr = true, - .needs_iommu_device = false, - .needs_pci_atomics = true, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, - .num_sdma_queues_per_engine = 2, -}; - -static const struct kfd_device_info polaris12_device_info = { - .asic_family = CHIP_POLARIS12, - .asic_name = "polaris12", - .gfx_target_version = 80003, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 4, - .ih_ring_entry_size = 4 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_cik, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .supports_cwsr = true, - .needs_iommu_device = false, - .needs_pci_atomics = true, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, - .num_sdma_queues_per_engine = 2, -}; - -static const struct kfd_device_info vegam_device_info = { - .asic_family = CHIP_VEGAM, - .asic_name = "vegam", - .gfx_target_version = 80003, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 4, - .ih_ring_entry_size = 4 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_cik, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .supports_cwsr = true, - .needs_iommu_device = false, - .needs_pci_atomics = true, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, - .num_sdma_queues_per_engine = 2, -}; - -static const struct kfd_device_info vega10_device_info = { - .asic_family = CHIP_VEGA10, - .asic_name = "vega10", - .gfx_target_version = 90000, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 8, - .ih_ring_entry_size = 8 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_v9, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .supports_cwsr = true, - .needs_iommu_device = false, - .needs_pci_atomics = false, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, - .num_sdma_queues_per_engine = 2, -}; - -static const struct kfd_device_info vega10_vf_device_info = { - .asic_family = CHIP_VEGA10, - .asic_name = "vega10", - .gfx_target_version = 90000, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 8, - .ih_ring_entry_size = 8 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_v9, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .supports_cwsr = true, - .needs_iommu_device = false, - .needs_pci_atomics = false, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, - .num_sdma_queues_per_engine = 2, -}; - -static const struct kfd_device_info vega12_device_info = { - .asic_family = CHIP_VEGA12, - .asic_name = "vega12", - .gfx_target_version = 90004, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 8, - .ih_ring_entry_size = 8 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_v9, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .supports_cwsr = true, - .needs_iommu_device = false, - .needs_pci_atomics = false, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, - .num_sdma_queues_per_engine = 2, -}; - -static const struct kfd_device_info vega20_device_info = { - .asic_family = CHIP_VEGA20, - .asic_name = "vega20", - .gfx_target_version = 90006, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 8, - .ih_ring_entry_size = 8 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_v9, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .supports_cwsr = true, - .needs_iommu_device = false, - .needs_pci_atomics = false, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, - .num_sdma_queues_per_engine = 8, -}; - -static const struct kfd_device_info arcturus_device_info = { - .asic_family = CHIP_ARCTURUS, - .asic_name = "arcturus", - .gfx_target_version = 90008, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 8, - .ih_ring_entry_size = 8 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_v9, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .supports_cwsr = true, - .needs_iommu_device = false, - .needs_pci_atomics = false, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 6, - .num_sdma_queues_per_engine = 8, -}; - -static const struct kfd_device_info aldebaran_device_info = { - .asic_family = CHIP_ALDEBARAN, - .asic_name = "aldebaran", - .gfx_target_version = 90010, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 8, - .ih_ring_entry_size = 8 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_v9, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .supports_cwsr = true, - .needs_iommu_device = false, - .needs_pci_atomics = false, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 3, - .num_sdma_queues_per_engine = 8, -}; - -static const struct kfd_device_info renoir_device_info = { - .asic_family = CHIP_RENOIR, - .asic_name = "renoir", - .gfx_target_version = 90012, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 8, - .ih_ring_entry_size = 8 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_v9, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .supports_cwsr = true, - .needs_iommu_device = false, - .needs_pci_atomics = false, - .num_sdma_engines = 1, - .num_xgmi_sdma_engines = 0, - .num_sdma_queues_per_engine = 2, -}; - -static const struct kfd_device_info navi10_device_info = { - .asic_family = CHIP_NAVI10, - .asic_name = "navi10", - .gfx_target_version = 100100, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 8, - .ih_ring_entry_size = 8 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_v9, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .needs_iommu_device = false, - .supports_cwsr = true, - .needs_pci_atomics = true, - .no_atomic_fw_version = 145, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, - .num_sdma_queues_per_engine = 8, -}; - -static const struct kfd_device_info navi12_device_info = { - .asic_family = CHIP_NAVI12, - .asic_name = "navi12", - .gfx_target_version = 100101, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 8, - .ih_ring_entry_size = 8 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_v9, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .needs_iommu_device = false, - .supports_cwsr = true, - .needs_pci_atomics = true, - .no_atomic_fw_version = 145, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, - .num_sdma_queues_per_engine = 8, -}; - -static const struct kfd_device_info navi14_device_info = { - .asic_family = CHIP_NAVI14, - .asic_name = "navi14", - .gfx_target_version = 100102, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 8, - .ih_ring_entry_size = 8 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_v9, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .needs_iommu_device = false, - .supports_cwsr = true, - .needs_pci_atomics = true, - .no_atomic_fw_version = 145, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, - .num_sdma_queues_per_engine = 8, -}; - -static const struct kfd_device_info sienna_cichlid_device_info = { - .asic_family = CHIP_SIENNA_CICHLID, - .asic_name = "sienna_cichlid", - .gfx_target_version = 100300, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 8, - .ih_ring_entry_size = 8 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_v9, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .needs_iommu_device = false, - .supports_cwsr = true, - .needs_pci_atomics = true, - .no_atomic_fw_version = 92, - .num_sdma_engines = 4, - .num_xgmi_sdma_engines = 0, - .num_sdma_queues_per_engine = 8, -}; - -static const struct kfd_device_info navy_flounder_device_info = { - .asic_family = CHIP_NAVY_FLOUNDER, - .asic_name = "navy_flounder", - .gfx_target_version = 100301, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 8, - .ih_ring_entry_size = 8 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_v9, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .needs_iommu_device = false, - .supports_cwsr = true, - .needs_pci_atomics = true, - .no_atomic_fw_version = 92, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, - .num_sdma_queues_per_engine = 8, -}; - -static const struct kfd_device_info vangogh_device_info = { - .asic_family = CHIP_VANGOGH, - .asic_name = "vangogh", - .gfx_target_version = 100303, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 8, - .ih_ring_entry_size = 8 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_v9, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .needs_iommu_device = false, - .supports_cwsr = true, - .needs_pci_atomics = true, - .no_atomic_fw_version = 92, - .num_sdma_engines = 1, - .num_xgmi_sdma_engines = 0, - .num_sdma_queues_per_engine = 2, -}; - -static const struct kfd_device_info dimgrey_cavefish_device_info = { - .asic_family = CHIP_DIMGREY_CAVEFISH, - .asic_name = "dimgrey_cavefish", - .gfx_target_version = 100302, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 8, - .ih_ring_entry_size = 8 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_v9, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .needs_iommu_device = false, - .supports_cwsr = true, - .needs_pci_atomics = true, - .no_atomic_fw_version = 92, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, - .num_sdma_queues_per_engine = 8, -}; - -static const struct kfd_device_info beige_goby_device_info = { - .asic_family = CHIP_BEIGE_GOBY, - .asic_name = "beige_goby", - .gfx_target_version = 100304, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 8, - .ih_ring_entry_size = 8 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_v9, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .needs_iommu_device = false, - .supports_cwsr = true, - .needs_pci_atomics = true, - .no_atomic_fw_version = 92, - .num_sdma_engines = 1, - .num_xgmi_sdma_engines = 0, - .num_sdma_queues_per_engine = 8, -}; - -static const struct kfd_device_info yellow_carp_device_info = { - .asic_family = CHIP_YELLOW_CARP, - .asic_name = "yellow_carp", - .gfx_target_version = 100305, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 8, - .ih_ring_entry_size = 8 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_v9, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .needs_iommu_device = false, - .supports_cwsr = true, - .needs_pci_atomics = true, - .no_atomic_fw_version = 92, - .num_sdma_engines = 1, - .num_xgmi_sdma_engines = 0, - .num_sdma_queues_per_engine = 2, -}; - -static const struct kfd_device_info cyan_skillfish_device_info = { - .asic_family = CHIP_CYAN_SKILLFISH, - .asic_name = "cyan_skillfish", - .gfx_target_version = 100103, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 8, - .ih_ring_entry_size = 8 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_v9, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .needs_iommu_device = false, - .supports_cwsr = true, - .needs_pci_atomics = true, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, - .num_sdma_queues_per_engine = 8, -}; - static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size, unsigned int chunk_size); static void kfd_gtt_sa_fini(struct kfd_dev *kfd); static int kfd_resume(struct kfd_dev *kfd); -struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, bool vf) +static void kfd_device_info_init(struct kfd_dev *kfd, + bool vf, uint32_t gfx_target_version) { - struct kfd_dev *kfd; - const struct kfd_device_info *device_info; - const struct kfd2kgd_calls *f2g; - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; + uint32_t gc_version = KFD_GC_VERSION(kfd); + uint32_t sdma_version = kfd->adev->ip_versions[SDMA0_HWIP][0]; + uint32_t asic_type = kfd->adev->asic_type; + + kfd->device_info.max_pasid_bits = 16; + kfd->device_info.max_no_of_hqd = 24; + kfd->device_info.num_of_watch_points = 4; + kfd->device_info.mqd_size_aligned = MQD_SIZE_ALIGNED; + kfd->device_info.gfx_target_version = gfx_target_version; + + if (KFD_IS_SOC15(kfd)) { + kfd->device_info.doorbell_size = 8; + kfd->device_info.ih_ring_entry_size = 8 * sizeof(uint32_t); + kfd->device_info.event_interrupt_class = &event_interrupt_class_v9; + kfd->device_info.supports_cwsr = true; + + if ((sdma_version >= IP_VERSION(4, 0, 0) && + sdma_version <= IP_VERSION(4, 2, 0)) || + sdma_version == IP_VERSION(5, 2, 1) || + sdma_version == IP_VERSION(5, 2, 3)) + kfd->device_info.num_sdma_queues_per_engine = 2; + else + kfd->device_info.num_sdma_queues_per_engine = 8; + + /* Raven */ + if (gc_version == IP_VERSION(9, 1, 0) || + gc_version == IP_VERSION(9, 2, 2)) + kfd->device_info.needs_iommu_device = true; + + if (gc_version < IP_VERSION(11, 0, 0)) { + /* Navi2x+, Navi1x+ */ + if (gc_version >= IP_VERSION(10, 3, 0)) + kfd->device_info.no_atomic_fw_version = 92; + else if (gc_version >= IP_VERSION(10, 1, 1)) + kfd->device_info.no_atomic_fw_version = 145; + + /* Navi1x+ */ + if (gc_version >= IP_VERSION(10, 1, 1)) + kfd->device_info.needs_pci_atomics = true; + } + } else { + kfd->device_info.doorbell_size = 4; + kfd->device_info.ih_ring_entry_size = 4 * sizeof(uint32_t); + kfd->device_info.event_interrupt_class = &event_interrupt_class_cik; + kfd->device_info.num_sdma_queues_per_engine = 2; + + if (asic_type != CHIP_KAVERI && + asic_type != CHIP_HAWAII && + asic_type != CHIP_TONGA) + kfd->device_info.supports_cwsr = true; + + if (asic_type == CHIP_KAVERI || + asic_type == CHIP_CARRIZO) + kfd->device_info.needs_iommu_device = true; + + if (asic_type != CHIP_HAWAII && !vf) + kfd->device_info.needs_pci_atomics = true; + } +} + +struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf) +{ + struct kfd_dev *kfd = NULL; + const struct kfd2kgd_calls *f2g = NULL; struct pci_dev *pdev = adev->pdev; + uint32_t gfx_target_version = 0; switch (adev->asic_type) { #ifdef KFD_SUPPORT_IOMMU_V2 #ifdef CONFIG_DRM_AMDGPU_CIK case CHIP_KAVERI: - if (vf) - device_info = NULL; - else - device_info = &kaveri_device_info; - f2g = &gfx_v7_kfd2kgd; + gfx_target_version = 70000; + if (!vf) + f2g = &gfx_v7_kfd2kgd; break; #endif case CHIP_CARRIZO: - if (vf) - device_info = NULL; - else - device_info = &carrizo_device_info; - f2g = &gfx_v8_kfd2kgd; + gfx_target_version = 80001; + if (!vf) + f2g = &gfx_v8_kfd2kgd; break; #endif #ifdef CONFIG_DRM_AMDGPU_CIK case CHIP_HAWAII: - if (vf) - device_info = NULL; - else - device_info = &hawaii_device_info; - f2g = &gfx_v7_kfd2kgd; + gfx_target_version = 70001; + if (!amdgpu_exp_hw_support) + pr_info( + "KFD support on Hawaii is experimental. See modparam exp_hw_support\n" + ); + else if (!vf) + f2g = &gfx_v7_kfd2kgd; break; #endif case CHIP_TONGA: - if (vf) - device_info = NULL; - else - device_info = &tonga_device_info; - f2g = &gfx_v8_kfd2kgd; + gfx_target_version = 80002; + if (!vf) + f2g = &gfx_v8_kfd2kgd; break; case CHIP_FIJI: - if (vf) - device_info = &fiji_vf_device_info; - else - device_info = &fiji_device_info; + gfx_target_version = 80003; f2g = &gfx_v8_kfd2kgd; break; case CHIP_POLARIS10: - if (vf) - device_info = &polaris10_vf_device_info; - else - device_info = &polaris10_device_info; + gfx_target_version = 80003; f2g = &gfx_v8_kfd2kgd; break; case CHIP_POLARIS11: - if (vf) - device_info = NULL; - else - device_info = &polaris11_device_info; - f2g = &gfx_v8_kfd2kgd; + gfx_target_version = 80003; + if (!vf) + f2g = &gfx_v8_kfd2kgd; break; case CHIP_POLARIS12: - if (vf) - device_info = NULL; - else - device_info = &polaris12_device_info; - f2g = &gfx_v8_kfd2kgd; + gfx_target_version = 80003; + if (!vf) + f2g = &gfx_v8_kfd2kgd; break; case CHIP_VEGAM: - if (vf) - device_info = NULL; - else - device_info = &vegam_device_info; - f2g = &gfx_v8_kfd2kgd; + gfx_target_version = 80003; + if (!vf) + f2g = &gfx_v8_kfd2kgd; break; default: switch (adev->ip_versions[GC_HWIP][0]) { + /* Vega 10 */ case IP_VERSION(9, 0, 1): - if (vf) - device_info = &vega10_vf_device_info; - else - device_info = &vega10_device_info; + gfx_target_version = 90000; f2g = &gfx_v9_kfd2kgd; break; #ifdef KFD_SUPPORT_IOMMU_V2 + /* Raven */ case IP_VERSION(9, 1, 0): case IP_VERSION(9, 2, 2): - if (vf) - device_info = NULL; - else - device_info = &raven_device_info; - f2g = &gfx_v9_kfd2kgd; + gfx_target_version = 90002; + if (!vf) + f2g = &gfx_v9_kfd2kgd; break; #endif + /* Vega12 */ case IP_VERSION(9, 2, 1): - if (vf) - device_info = NULL; - else - device_info = &vega12_device_info; - f2g = &gfx_v9_kfd2kgd; + gfx_target_version = 90004; + if (!vf) + f2g = &gfx_v9_kfd2kgd; break; + /* Renoir */ case IP_VERSION(9, 3, 0): - if (vf) - device_info = NULL; - else - device_info = &renoir_device_info; - f2g = &gfx_v9_kfd2kgd; + gfx_target_version = 90012; + if (!vf) + f2g = &gfx_v9_kfd2kgd; break; + /* Vega20 */ case IP_VERSION(9, 4, 0): - if (vf) - device_info = NULL; - else - device_info = &vega20_device_info; - f2g = &gfx_v9_kfd2kgd; + gfx_target_version = 90006; + if (!vf) + f2g = &gfx_v9_kfd2kgd; break; + /* Arcturus */ case IP_VERSION(9, 4, 1): - device_info = &arcturus_device_info; + gfx_target_version = 90008; f2g = &arcturus_kfd2kgd; break; + /* Aldebaran */ case IP_VERSION(9, 4, 2): - device_info = &aldebaran_device_info; + gfx_target_version = 90010; f2g = &aldebaran_kfd2kgd; break; + /* Navi10 */ case IP_VERSION(10, 1, 10): - if (vf) - device_info = NULL; - else - device_info = &navi10_device_info; - f2g = &gfx_v10_kfd2kgd; + gfx_target_version = 100100; + if (!vf) + f2g = &gfx_v10_kfd2kgd; break; + /* Navi12 */ case IP_VERSION(10, 1, 2): - device_info = &navi12_device_info; + gfx_target_version = 100101; f2g = &gfx_v10_kfd2kgd; break; + /* Navi14 */ case IP_VERSION(10, 1, 1): - if (vf) - device_info = NULL; - else - device_info = &navi14_device_info; - f2g = &gfx_v10_kfd2kgd; + gfx_target_version = 100102; + if (!vf) + f2g = &gfx_v10_kfd2kgd; break; + /* Cyan Skillfish */ case IP_VERSION(10, 1, 3): - if (vf) - device_info = NULL; - else - device_info = &cyan_skillfish_device_info; - f2g = &gfx_v10_kfd2kgd; + gfx_target_version = 100103; + if (!vf) + f2g = &gfx_v10_kfd2kgd; break; + /* Sienna Cichlid */ case IP_VERSION(10, 3, 0): - device_info = &sienna_cichlid_device_info; + gfx_target_version = 100300; f2g = &gfx_v10_3_kfd2kgd; break; + /* Navy Flounder */ case IP_VERSION(10, 3, 2): - device_info = &navy_flounder_device_info; + gfx_target_version = 100301; f2g = &gfx_v10_3_kfd2kgd; break; + /* Van Gogh */ case IP_VERSION(10, 3, 1): - if (vf) - device_info = NULL; - else - device_info = &vangogh_device_info; - f2g = &gfx_v10_3_kfd2kgd; + gfx_target_version = 100303; + if (!vf) + f2g = &gfx_v10_3_kfd2kgd; break; + /* Dimgrey Cavefish */ case IP_VERSION(10, 3, 4): - device_info = &dimgrey_cavefish_device_info; + gfx_target_version = 100302; f2g = &gfx_v10_3_kfd2kgd; break; + /* Beige Goby */ case IP_VERSION(10, 3, 5): - device_info = &beige_goby_device_info; + gfx_target_version = 100304; f2g = &gfx_v10_3_kfd2kgd; break; + /* Yellow Carp */ case IP_VERSION(10, 3, 3): - if (vf) - device_info = NULL; - else - device_info = &yellow_carp_device_info; - f2g = &gfx_v10_3_kfd2kgd; + gfx_target_version = 100305; + if (!vf) + f2g = &gfx_v10_3_kfd2kgd; break; default: - return NULL; + break; } break; } - if (!device_info || !f2g) { - dev_err(kfd_device, "%s %s not supported in kfd\n", - amdgpu_asic_name[adev->asic_type], vf ? "VF" : ""); + if (!f2g) { + if (adev->ip_versions[GC_HWIP][0]) + dev_err(kfd_device, "GC IP %06x %s not supported in kfd\n", + adev->ip_versions[GC_HWIP][0], vf ? "VF" : ""); + else + dev_err(kfd_device, "%s %s not supported in kfd\n", + amdgpu_asic_name[adev->asic_type], vf ? "VF" : ""); return NULL; } @@ -824,8 +302,8 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, bool vf) if (!kfd) return NULL; - kfd->kgd = kgd; - kfd->device_info = device_info; + kfd->adev = adev; + kfd_device_info_init(kfd, vf, gfx_target_version); kfd->pdev = pdev; kfd->init_complete = false; kfd->kfd2kgd = f2g; @@ -844,24 +322,24 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, bool vf) static void kfd_cwsr_init(struct kfd_dev *kfd) { - if (cwsr_enable && kfd->device_info->supports_cwsr) { - if (kfd->device_info->asic_family < CHIP_VEGA10) { + if (cwsr_enable && kfd->device_info.supports_cwsr) { + if (KFD_GC_VERSION(kfd) < IP_VERSION(9, 0, 1)) { BUILD_BUG_ON(sizeof(cwsr_trap_gfx8_hex) > PAGE_SIZE); kfd->cwsr_isa = cwsr_trap_gfx8_hex; kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx8_hex); - } else if (kfd->device_info->asic_family == CHIP_ARCTURUS) { + } else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 1)) { BUILD_BUG_ON(sizeof(cwsr_trap_arcturus_hex) > PAGE_SIZE); kfd->cwsr_isa = cwsr_trap_arcturus_hex; kfd->cwsr_isa_size = sizeof(cwsr_trap_arcturus_hex); - } else if (kfd->device_info->asic_family == CHIP_ALDEBARAN) { + } else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2)) { BUILD_BUG_ON(sizeof(cwsr_trap_aldebaran_hex) > PAGE_SIZE); kfd->cwsr_isa = cwsr_trap_aldebaran_hex; kfd->cwsr_isa_size = sizeof(cwsr_trap_aldebaran_hex); - } else if (kfd->device_info->asic_family < CHIP_NAVI10) { + } else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 1, 1)) { BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_hex) > PAGE_SIZE); kfd->cwsr_isa = cwsr_trap_gfx9_hex; kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_hex); - } else if (kfd->device_info->asic_family < CHIP_SIENNA_CICHLID) { + } else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 3, 0)) { BUILD_BUG_ON(sizeof(cwsr_trap_nv1x_hex) > PAGE_SIZE); kfd->cwsr_isa = cwsr_trap_nv1x_hex; kfd->cwsr_isa_size = sizeof(cwsr_trap_nv1x_hex); @@ -882,18 +360,17 @@ static int kfd_gws_init(struct kfd_dev *kfd) if (kfd->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) return 0; - if (hws_gws_support - || (kfd->device_info->asic_family == CHIP_VEGA10 - && kfd->mec2_fw_version >= 0x81b3) - || (kfd->device_info->asic_family >= CHIP_VEGA12 - && kfd->device_info->asic_family <= CHIP_RAVEN - && kfd->mec2_fw_version >= 0x1b3) - || (kfd->device_info->asic_family == CHIP_ARCTURUS - && kfd->mec2_fw_version >= 0x30) - || (kfd->device_info->asic_family == CHIP_ALDEBARAN - && kfd->mec2_fw_version >= 0x28)) - ret = amdgpu_amdkfd_alloc_gws(kfd->kgd, - amdgpu_amdkfd_get_num_gws(kfd->kgd), &kfd->gws); + if (hws_gws_support || (KFD_IS_SOC15(kfd) && + ((KFD_GC_VERSION(kfd) == IP_VERSION(9, 0, 1) + && kfd->mec2_fw_version >= 0x81b3) || + (KFD_GC_VERSION(kfd) <= IP_VERSION(9, 4, 0) + && kfd->mec2_fw_version >= 0x1b3) || + (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 1) + && kfd->mec2_fw_version >= 0x30) || + (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2) + && kfd->mec2_fw_version >= 0x28)))) + ret = amdgpu_amdkfd_alloc_gws(kfd->adev, + kfd->adev->gds.gws_size, &kfd->gws); return ret; } @@ -910,11 +387,11 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, unsigned int size, map_process_packet_size; kfd->ddev = ddev; - kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd, + kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev, KGD_ENGINE_MEC1); - kfd->mec2_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd, + kfd->mec2_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev, KGD_ENGINE_MEC2); - kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd, + kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev, KGD_ENGINE_SDMA1); kfd->shared_resources = *gpu_resources; @@ -927,16 +404,16 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, * 32 and 64-bit requests are possible and must be * supported. */ - kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kfd->kgd); + kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kfd->adev); if (!kfd->pci_atomic_requested && - kfd->device_info->needs_pci_atomics && - (!kfd->device_info->no_atomic_fw_version || - kfd->mec_fw_version < kfd->device_info->no_atomic_fw_version)) { + kfd->device_info.needs_pci_atomics && + (!kfd->device_info.no_atomic_fw_version || + kfd->mec_fw_version < kfd->device_info.no_atomic_fw_version)) { dev_info(kfd_device, "skipped device %x:%x, PCI rejects atomics %d<%d\n", kfd->pdev->vendor, kfd->pdev->device, kfd->mec_fw_version, - kfd->device_info->no_atomic_fw_version); + kfd->device_info.no_atomic_fw_version); return false; } @@ -953,16 +430,15 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, /* calculate max size of mqds needed for queues */ size = max_num_of_queues_per_device * - kfd->device_info->mqd_size_aligned; + kfd->device_info.mqd_size_aligned; /* * calculate max size of runlist packet. * There can be only 2 packets at once */ - map_process_packet_size = - kfd->device_info->asic_family == CHIP_ALDEBARAN ? + map_process_packet_size = KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2) ? sizeof(struct pm4_mes_map_process_aldebaran) : - sizeof(struct pm4_mes_map_process); + sizeof(struct pm4_mes_map_process); size += (KFD_MAX_NUM_OF_PROCESSES * map_process_packet_size + max_num_of_queues_per_device * sizeof(struct pm4_mes_map_queues) + sizeof(struct pm4_mes_runlist)) * 2; @@ -974,7 +450,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, size += 512 * 1024; if (amdgpu_amdkfd_alloc_gtt_mem( - kfd->kgd, size, &kfd->gtt_mem, + kfd->adev, size, &kfd->gtt_mem, &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr, false)) { dev_err(kfd_device, "Could not allocate %d bytes\n", size); @@ -995,9 +471,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, goto kfd_doorbell_error; } - kfd->hive_id = amdgpu_amdkfd_get_hive_id(kfd->kgd); + kfd->hive_id = kfd->adev->gmc.xgmi.hive_id; - kfd->noretry = amdgpu_amdkfd_get_noretry(kfd->kgd); + kfd->noretry = kfd->adev->gmc.noretry; if (kfd_interrupt_init(kfd)) { dev_err(kfd_device, "Error initializing interrupts\n"); @@ -1015,7 +491,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, */ if (kfd_gws_init(kfd)) { dev_err(kfd_device, "Could not allocate %d gws\n", - amdgpu_amdkfd_get_num_gws(kfd->kgd)); + kfd->adev->gds.gws_size); goto gws_error; } @@ -1030,7 +506,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, kfd_cwsr_init(kfd); - svm_migrate_init((struct amdgpu_device *)kfd->kgd); + svm_migrate_init(kfd->adev); if(kgd2kfd_resume_iommu(kfd)) goto device_iommu_error; @@ -1068,10 +544,10 @@ kfd_interrupt_error: kfd_doorbell_error: kfd_gtt_sa_fini(kfd); kfd_gtt_sa_init_error: - amdgpu_amdkfd_free_gtt_mem(kfd->kgd, kfd->gtt_mem); + amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem); alloc_gtt_mem_failure: if (kfd->gws) - amdgpu_amdkfd_free_gws(kfd->kgd, kfd->gws); + amdgpu_amdkfd_free_gws(kfd->adev, kfd->gws); dev_err(kfd_device, "device %x:%x NOT added due to errors\n", kfd->pdev->vendor, kfd->pdev->device); @@ -1088,9 +564,9 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd) kfd_doorbell_fini(kfd); ida_destroy(&kfd->doorbell_ida); kfd_gtt_sa_fini(kfd); - amdgpu_amdkfd_free_gtt_mem(kfd->kgd, kfd->gtt_mem); + amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem); if (kfd->gws) - amdgpu_amdkfd_free_gws(kfd->kgd, kfd->gws); + amdgpu_amdkfd_free_gws(kfd->adev, kfd->gws); } kfree(kfd); @@ -1229,7 +705,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) if (!kfd->init_complete) return; - if (kfd->device_info->ih_ring_entry_size > sizeof(patched_ihre)) { + if (kfd->device_info.ih_ring_entry_size > sizeof(patched_ihre)) { dev_err_once(kfd_device, "Ring entry too small\n"); return; } @@ -1526,7 +1002,7 @@ void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd) void kfd_inc_compute_active(struct kfd_dev *kfd) { if (atomic_inc_return(&kfd->compute_profile) == 1) - amdgpu_amdkfd_set_compute_idle(kfd->kgd, false); + amdgpu_amdkfd_set_compute_idle(kfd->adev, false); } void kfd_dec_compute_active(struct kfd_dev *kfd) @@ -1534,7 +1010,7 @@ void kfd_dec_compute_active(struct kfd_dev *kfd) int count = atomic_dec_return(&kfd->compute_profile); if (count == 0) - amdgpu_amdkfd_set_compute_idle(kfd->kgd, true); + amdgpu_amdkfd_set_compute_idle(kfd->adev, true); WARN_ONCE(count < 0, "Compute profile ref. count error"); } @@ -1544,6 +1020,26 @@ void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask) kfd_smi_event_update_thermal_throttling(kfd, throttle_bitmask); } +/* kfd_get_num_sdma_engines returns the number of PCIe optimized SDMA and + * kfd_get_num_xgmi_sdma_engines returns the number of XGMI SDMA. + * When the device has more than two engines, we reserve two for PCIe to enable + * full-duplex and the rest are used as XGMI. + */ +unsigned int kfd_get_num_sdma_engines(struct kfd_dev *kdev) +{ + /* If XGMI is not supported, all SDMA engines are PCIe */ + if (!kdev->adev->gmc.xgmi.supported) + return kdev->adev->sdma.num_instances; + + return min(kdev->adev->sdma.num_instances, 2); +} + +unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_dev *kdev) +{ + /* After reserved for PCIe, the rest of engines are XGMI */ + return kdev->adev->sdma.num_instances - kfd_get_num_sdma_engines(kdev); +} + #if defined(CONFIG_DEBUG_FS) /* This function will send a package to HIQ to hang the HWS diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 003ba6a373ff..dd0b952f0173 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -99,38 +99,29 @@ unsigned int get_pipes_per_mec(struct device_queue_manager *dqm) return dqm->dev->shared_resources.num_pipe_per_mec; } -static unsigned int get_num_sdma_engines(struct device_queue_manager *dqm) -{ - return dqm->dev->device_info->num_sdma_engines; -} - -static unsigned int get_num_xgmi_sdma_engines(struct device_queue_manager *dqm) -{ - return dqm->dev->device_info->num_xgmi_sdma_engines; -} - static unsigned int get_num_all_sdma_engines(struct device_queue_manager *dqm) { - return get_num_sdma_engines(dqm) + get_num_xgmi_sdma_engines(dqm); + return kfd_get_num_sdma_engines(dqm->dev) + + kfd_get_num_xgmi_sdma_engines(dqm->dev); } unsigned int get_num_sdma_queues(struct device_queue_manager *dqm) { - return dqm->dev->device_info->num_sdma_engines - * dqm->dev->device_info->num_sdma_queues_per_engine; + return kfd_get_num_sdma_engines(dqm->dev) * + dqm->dev->device_info.num_sdma_queues_per_engine; } unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm) { - return dqm->dev->device_info->num_xgmi_sdma_engines - * dqm->dev->device_info->num_sdma_queues_per_engine; + return kfd_get_num_xgmi_sdma_engines(dqm->dev) * + dqm->dev->device_info.num_sdma_queues_per_engine; } void program_sh_mem_settings(struct device_queue_manager *dqm, struct qcm_process_device *qpd) { return dqm->dev->kfd2kgd->program_sh_mem_settings( - dqm->dev->kgd, qpd->vmid, + dqm->dev->adev, qpd->vmid, qpd->sh_mem_config, qpd->sh_mem_ape1_base, qpd->sh_mem_ape1_limit, @@ -157,7 +148,7 @@ static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q) { struct kfd_dev *dev = qpd->dqm->dev; - if (!KFD_IS_SOC15(dev->device_info->asic_family)) { + if (!KFD_IS_SOC15(dev)) { /* On pre-SOC15 chips we need to use the queue ID to * preserve the user mode ABI. */ @@ -202,7 +193,7 @@ static void deallocate_doorbell(struct qcm_process_device *qpd, unsigned int old; struct kfd_dev *dev = qpd->dqm->dev; - if (!KFD_IS_SOC15(dev->device_info->asic_family) || + if (!KFD_IS_SOC15(dev) || q->properties.type == KFD_QUEUE_TYPE_SDMA || q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) return; @@ -216,7 +207,7 @@ static void program_trap_handler_settings(struct device_queue_manager *dqm, { if (dqm->dev->kfd2kgd->program_trap_handler_settings) dqm->dev->kfd2kgd->program_trap_handler_settings( - dqm->dev->kgd, qpd->vmid, + dqm->dev->adev, qpd->vmid, qpd->tba_addr, qpd->tma_addr); } @@ -250,21 +241,20 @@ static int allocate_vmid(struct device_queue_manager *dqm, program_sh_mem_settings(dqm, qpd); - if (dqm->dev->device_info->asic_family >= CHIP_VEGA10 && - dqm->dev->cwsr_enabled) + if (KFD_IS_SOC15(dqm->dev) && dqm->dev->cwsr_enabled) program_trap_handler_settings(dqm, qpd); /* qpd->page_table_base is set earlier when register_process() * is called, i.e. when the first queue is created. */ - dqm->dev->kfd2kgd->set_vm_context_page_table_base(dqm->dev->kgd, + dqm->dev->kfd2kgd->set_vm_context_page_table_base(dqm->dev->adev, qpd->vmid, qpd->page_table_base); /* invalidate the VM context after pasid and vmid mapping is set up */ kfd_flush_tlb(qpd_to_pdd(qpd), TLB_FLUSH_LEGACY); if (dqm->dev->kfd2kgd->set_scratch_backing_va) - dqm->dev->kfd2kgd->set_scratch_backing_va(dqm->dev->kgd, + dqm->dev->kfd2kgd->set_scratch_backing_va(dqm->dev->adev, qpd->sh_hidden_private_base, qpd->vmid); return 0; @@ -283,7 +273,7 @@ static int flush_texture_cache_nocpsch(struct kfd_dev *kdev, if (ret) return ret; - return amdgpu_amdkfd_submit_ib(kdev->kgd, KGD_ENGINE_MEC1, qpd->vmid, + return amdgpu_amdkfd_submit_ib(kdev->adev, KGD_ENGINE_MEC1, qpd->vmid, qpd->ib_base, (uint32_t *)qpd->ib_kaddr, pmf->release_mem_size / sizeof(uint32_t)); } @@ -293,7 +283,7 @@ static void deallocate_vmid(struct device_queue_manager *dqm, struct queue *q) { /* On GFX v7, CP doesn't flush TC at dequeue */ - if (q->device->device_info->asic_family == CHIP_HAWAII) + if (q->device->adev->asic_type == CHIP_HAWAII) if (flush_texture_cache_nocpsch(q->device, qpd)) pr_err("Failed to flush TC\n"); @@ -776,7 +766,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm, if (!list_empty(&qpd->queues_list)) { dqm->dev->kfd2kgd->set_vm_context_page_table_base( - dqm->dev->kgd, + dqm->dev->adev, qpd->vmid, qpd->page_table_base); kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY); @@ -954,7 +944,7 @@ set_pasid_vmid_mapping(struct device_queue_manager *dqm, u32 pasid, unsigned int vmid) { return dqm->dev->kfd2kgd->set_pasid_vmid_mapping( - dqm->dev->kgd, pasid, vmid); + dqm->dev->adev, pasid, vmid); } static void init_interrupts(struct device_queue_manager *dqm) @@ -963,7 +953,7 @@ static void init_interrupts(struct device_queue_manager *dqm) for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++) if (is_pipe_enabled(dqm, 0, i)) - dqm->dev->kfd2kgd->init_interrupts(dqm->dev->kgd, i); + dqm->dev->kfd2kgd->init_interrupts(dqm->dev->adev, i); } static int initialize_nocpsch(struct device_queue_manager *dqm) @@ -1017,7 +1007,7 @@ static int start_nocpsch(struct device_queue_manager *dqm) pr_info("SW scheduler is used"); init_interrupts(dqm); - if (dqm->dev->device_info->asic_family == CHIP_HAWAII) + if (dqm->dev->adev->asic_type == CHIP_HAWAII) return pm_init(&dqm->packet_mgr, dqm); dqm->sched_running = true; @@ -1026,7 +1016,7 @@ static int start_nocpsch(struct device_queue_manager *dqm) static int stop_nocpsch(struct device_queue_manager *dqm) { - if (dqm->dev->device_info->asic_family == CHIP_HAWAII) + if (dqm->dev->adev->asic_type == CHIP_HAWAII) pm_uninit(&dqm->packet_mgr, false); dqm->sched_running = false; @@ -1055,9 +1045,9 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm, dqm->sdma_bitmap &= ~(1ULL << bit); q->sdma_id = bit; q->properties.sdma_engine_id = q->sdma_id % - get_num_sdma_engines(dqm); + kfd_get_num_sdma_engines(dqm->dev); q->properties.sdma_queue_id = q->sdma_id / - get_num_sdma_engines(dqm); + kfd_get_num_sdma_engines(dqm->dev); } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { if (dqm->xgmi_sdma_bitmap == 0) { pr_err("No more XGMI SDMA queue to allocate\n"); @@ -1072,10 +1062,11 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm, * assumes the first N engines are always * PCIe-optimized ones */ - q->properties.sdma_engine_id = get_num_sdma_engines(dqm) + - q->sdma_id % get_num_xgmi_sdma_engines(dqm); + q->properties.sdma_engine_id = + kfd_get_num_sdma_engines(dqm->dev) + + q->sdma_id % kfd_get_num_xgmi_sdma_engines(dqm->dev); q->properties.sdma_queue_id = q->sdma_id / - get_num_xgmi_sdma_engines(dqm); + kfd_get_num_xgmi_sdma_engines(dqm->dev); } pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id); @@ -1132,7 +1123,7 @@ static int set_sched_resources(struct device_queue_manager *dqm) res.queue_mask |= 1ull << amdgpu_queue_mask_bit_to_set_resource_bit( - (struct amdgpu_device *)dqm->dev->kgd, i); + dqm->dev->adev, i); } res.gws_mask = ~0ull; res.oac_mask = res.gds_heap_base = res.gds_heap_size = 0; @@ -1226,6 +1217,11 @@ static int stop_cpsch(struct device_queue_manager *dqm) bool hanging; dqm_lock(dqm); + if (!dqm->sched_running) { + dqm_unlock(dqm); + return 0; + } + if (!dqm->is_hws_hang) unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0); hanging = dqm->is_hws_hang || dqm->is_resetting; @@ -1842,10 +1838,10 @@ static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm) struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd; uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size * get_num_all_sdma_engines(dqm) * - dev->device_info->num_sdma_queues_per_engine + + dev->device_info.num_sdma_queues_per_engine + dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size; - retval = amdgpu_amdkfd_alloc_gtt_mem(dev->kgd, size, + retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev, size, &(mem_obj->gtt_mem), &(mem_obj->gpu_addr), (void *)&(mem_obj->cpu_ptr), false); @@ -1862,7 +1858,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev) if (!dqm) return NULL; - switch (dev->device_info->asic_family) { + switch (dev->adev->asic_type) { /* HWS is not available on Hawaii. */ case CHIP_HAWAII: /* HWS depends on CWSR for timely dequeue. CWSR is not @@ -1925,7 +1921,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev) goto out_free; } - switch (dev->device_info->asic_family) { + switch (dev->adev->asic_type) { case CHIP_CARRIZO: device_queue_manager_init_vi(&dqm->asic_ops); break; @@ -1947,31 +1943,16 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev) device_queue_manager_init_vi_tonga(&dqm->asic_ops); break; - case CHIP_VEGA10: - case CHIP_VEGA12: - case CHIP_VEGA20: - case CHIP_RAVEN: - case CHIP_RENOIR: - case CHIP_ARCTURUS: - case CHIP_ALDEBARAN: - device_queue_manager_init_v9(&dqm->asic_ops); - break; - case CHIP_NAVI10: - case CHIP_NAVI12: - case CHIP_NAVI14: - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_VANGOGH: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: - case CHIP_YELLOW_CARP: - case CHIP_CYAN_SKILLFISH: - device_queue_manager_init_v10_navi10(&dqm->asic_ops); - break; default: - WARN(1, "Unexpected ASIC family %u", - dev->device_info->asic_family); - goto out_free; + if (KFD_GC_VERSION(dev) >= IP_VERSION(10, 1, 1)) + device_queue_manager_init_v10_navi10(&dqm->asic_ops); + else if (KFD_GC_VERSION(dev) >= IP_VERSION(9, 0, 1)) + device_queue_manager_init_v9(&dqm->asic_ops); + else { + WARN(1, "Unexpected ASIC family %u", + dev->adev->asic_type); + goto out_free; + } } if (init_mqd_managers(dqm)) @@ -1995,7 +1976,7 @@ static void deallocate_hiq_sdma_mqd(struct kfd_dev *dev, { WARN(!mqd, "No hiq sdma mqd trunk to free"); - amdgpu_amdkfd_free_gtt_mem(dev->kgd, mqd->gtt_mem); + amdgpu_amdkfd_free_gtt_mem(dev->adev, mqd->gtt_mem); } void device_queue_manager_uninit(struct device_queue_manager *dqm) @@ -2026,7 +2007,7 @@ static void kfd_process_hw_exception(struct work_struct *work) { struct device_queue_manager *dqm = container_of(work, struct device_queue_manager, hw_exception_work); - amdgpu_amdkfd_gpu_reset(dqm->dev->kgd); + amdgpu_amdkfd_gpu_reset(dqm->dev->adev); } #if defined(CONFIG_DEBUG_FS) @@ -2065,7 +2046,7 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data) return 0; } - r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->kgd, + r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->adev, KFD_CIK_HIQ_PIPE, KFD_CIK_HIQ_QUEUE, &dump, &n_regs); if (!r) { @@ -2087,7 +2068,7 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data) continue; r = dqm->dev->kfd2kgd->hqd_dump( - dqm->dev->kgd, pipe, queue, &dump, &n_regs); + dqm->dev->adev, pipe, queue, &dump, &n_regs); if (r) break; @@ -2101,10 +2082,10 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data) for (pipe = 0; pipe < get_num_all_sdma_engines(dqm); pipe++) { for (queue = 0; - queue < dqm->dev->device_info->num_sdma_queues_per_engine; + queue < dqm->dev->device_info.num_sdma_queues_per_engine; queue++) { r = dqm->dev->kfd2kgd->hqd_sdma_dump( - dqm->dev->kgd, pipe, queue, &dump, &n_regs); + dqm->dev->adev, pipe, queue, &dump, &n_regs); if (r) break; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c index b5c3d13643f1..f20434d9980e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c @@ -62,7 +62,7 @@ static int update_qpd_v9(struct device_queue_manager *dqm, SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT; - if (dqm->dev->device_info->asic_family == CHIP_ALDEBARAN) { + if (KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 2)) { /* Aldebaran can safely support different XNACK modes * per process */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c index 768d153acff4..0dbcf54657ed 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c @@ -48,7 +48,7 @@ /* # of doorbell bytes allocated for each process. */ size_t kfd_doorbell_process_slice(struct kfd_dev *kfd) { - return roundup(kfd->device_info->doorbell_size * + return roundup(kfd->device_info.doorbell_size * KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, PAGE_SIZE); } @@ -180,7 +180,7 @@ void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd, if (inx >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) return NULL; - inx *= kfd->device_info->doorbell_size / sizeof(u32); + inx *= kfd->device_info.doorbell_size / sizeof(u32); /* * Calculating the kernel doorbell offset using the first @@ -201,7 +201,7 @@ void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr) unsigned int inx; inx = (unsigned int)(db_addr - kfd->doorbell_kernel_ptr) - * sizeof(u32) / kfd->device_info->doorbell_size; + * sizeof(u32) / kfd->device_info.doorbell_size; mutex_lock(&kfd->doorbell_mutex); __clear_bit(inx, kfd->doorbell_available_index); @@ -239,7 +239,7 @@ unsigned int kfd_get_doorbell_dw_offset_in_bar(struct kfd_dev *kfd, return kfd->doorbell_base_dw_offset + pdd->doorbell_index * kfd_doorbell_process_slice(kfd) / sizeof(u32) + - doorbell_id * kfd->device_info->doorbell_size / sizeof(u32); + doorbell_id * kfd->device_info.doorbell_size / sizeof(u32); } uint64_t kfd_get_number_elems(struct kfd_dev *kfd) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c index 3eea4edee355..afe72dd11325 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c @@ -935,8 +935,10 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, u32 pasid, /* Workaround on Raven to not kill the process when memory is freed * before IOMMU is able to finish processing all the excessive PPRs */ - if (dev->device_info->asic_family != CHIP_RAVEN && - dev->device_info->asic_family != CHIP_RENOIR) { + + if (KFD_GC_VERSION(dev) != IP_VERSION(9, 1, 0) && + KFD_GC_VERSION(dev) != IP_VERSION(9, 2, 2) && + KFD_GC_VERSION(dev) != IP_VERSION(9, 3, 0)) { mutex_lock(&p->event_mutex); /* Lookup events by type and signal them */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c index d1388896f9c1..2e2b7ceb71db 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c @@ -394,7 +394,7 @@ int kfd_init_apertures(struct kfd_process *process) pdd->gpuvm_base = pdd->gpuvm_limit = 0; pdd->scratch_base = pdd->scratch_limit = 0; } else { - switch (dev->device_info->asic_family) { + switch (dev->adev->asic_type) { case CHIP_KAVERI: case CHIP_HAWAII: case CHIP_CARRIZO: @@ -406,29 +406,14 @@ int kfd_init_apertures(struct kfd_process *process) case CHIP_VEGAM: kfd_init_apertures_vi(pdd, id); break; - case CHIP_VEGA10: - case CHIP_VEGA12: - case CHIP_VEGA20: - case CHIP_RAVEN: - case CHIP_RENOIR: - case CHIP_ARCTURUS: - case CHIP_ALDEBARAN: - case CHIP_NAVI10: - case CHIP_NAVI12: - case CHIP_NAVI14: - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_VANGOGH: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: - case CHIP_YELLOW_CARP: - case CHIP_CYAN_SKILLFISH: - kfd_init_apertures_v9(pdd, id); - break; default: - WARN(1, "Unexpected ASIC family %u", - dev->device_info->asic_family); - return -EINVAL; + if (KFD_GC_VERSION(dev) >= IP_VERSION(9, 0, 1)) + kfd_init_apertures_v9(pdd, id); + else { + WARN(1, "Unexpected ASIC family %u", + dev->adev->asic_type); + return -EINVAL; + } } if (!dev->use_iommu_v2) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c index 543e7ea75593..deb64168c9e8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c @@ -135,7 +135,7 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev, *patched_flag = true; memcpy(patched_ihre, ih_ring_entry, - dev->device_info->ih_ring_entry_size); + dev->device_info.ih_ring_entry_size); pasid = dev->dqm->vmid_pasid[vmid]; @@ -231,7 +231,7 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev, if (sq_intr_err != SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST && sq_intr_err != SQ_INTERRUPT_ERROR_TYPE_MEMVIOL) { kfd_signal_poison_consumed_event(dev, pasid); - amdgpu_amdkfd_ras_poison_consumption_handler(dev->kgd); + amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev); return; } break; @@ -253,7 +253,7 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev, kfd_signal_event_interrupt(pasid, context_id0 & 0xfffffff, 28); } else if (source_id == SOC15_INTSRC_SDMA_ECC) { kfd_signal_poison_consumed_event(dev, pasid); - amdgpu_amdkfd_ras_poison_consumption_handler(dev->kgd); + amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev); return; } } else if (client_id == SOC15_IH_CLIENTID_VMC || diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c index bc47f6a44456..81887c2013c9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c @@ -54,7 +54,7 @@ int kfd_interrupt_init(struct kfd_dev *kfd) int r; r = kfifo_alloc(&kfd->ih_fifo, - KFD_IH_NUM_ENTRIES * kfd->device_info->ih_ring_entry_size, + KFD_IH_NUM_ENTRIES * kfd->device_info.ih_ring_entry_size, GFP_KERNEL); if (r) { dev_err(kfd_chardev(), "Failed to allocate IH fifo\n"); @@ -114,8 +114,8 @@ bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry) int count; count = kfifo_in(&kfd->ih_fifo, ih_ring_entry, - kfd->device_info->ih_ring_entry_size); - if (count != kfd->device_info->ih_ring_entry_size) { + kfd->device_info.ih_ring_entry_size); + if (count != kfd->device_info.ih_ring_entry_size) { dev_err_ratelimited(kfd_chardev(), "Interrupt ring overflow, dropping interrupt %d\n", count); @@ -133,11 +133,11 @@ static bool dequeue_ih_ring_entry(struct kfd_dev *kfd, void *ih_ring_entry) int count; count = kfifo_out(&kfd->ih_fifo, ih_ring_entry, - kfd->device_info->ih_ring_entry_size); + kfd->device_info.ih_ring_entry_size); - WARN_ON(count && count != kfd->device_info->ih_ring_entry_size); + WARN_ON(count && count != kfd->device_info.ih_ring_entry_size); - return count == kfd->device_info->ih_ring_entry_size; + return count == kfd->device_info.ih_ring_entry_size; } static void interrupt_wq(struct work_struct *work) @@ -146,13 +146,13 @@ static void interrupt_wq(struct work_struct *work) interrupt_work); uint32_t ih_ring_entry[KFD_MAX_RING_ENTRY_SIZE]; - if (dev->device_info->ih_ring_entry_size > sizeof(ih_ring_entry)) { + if (dev->device_info.ih_ring_entry_size > sizeof(ih_ring_entry)) { dev_err_once(kfd_chardev(), "Ring entry too small\n"); return; } while (dequeue_ih_ring_entry(dev, ih_ring_entry)) - dev->device_info->event_interrupt_class->interrupt_wq(dev, + dev->device_info.event_interrupt_class->interrupt_wq(dev, ih_ring_entry); } @@ -163,7 +163,7 @@ bool interrupt_is_wanted(struct kfd_dev *dev, /* integer and bitwise OR so there is no boolean short-circuiting */ unsigned int wanted = 0; - wanted |= dev->device_info->event_interrupt_class->interrupt_isr(dev, + wanted |= dev->device_info.event_interrupt_class->interrupt_isr(dev, ih_ring_entry, patched_ihre, flag); return wanted != 0; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c index 73f2257acc23..66ad8d0b8f7f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c @@ -89,7 +89,7 @@ int kfd_iommu_device_init(struct kfd_dev *kfd) } pasid_limit = min_t(unsigned int, - (unsigned int)(1 << kfd->device_info->max_pasid_bits), + (unsigned int)(1 << kfd->device_info.max_pasid_bits), iommu_info.max_pasids); if (!kfd_set_pasid_limit(pasid_limit)) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c index 64b4ac339904..16f8bc4ca7f6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c @@ -91,7 +91,7 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_dev *dev, kq->pq_gpu_addr = kq->pq->gpu_addr; /* For CIK family asics, kq->eop_mem is not needed */ - if (dev->device_info->asic_family > CHIP_MULLINS) { + if (dev->adev->asic_type > CHIP_MULLINS) { retval = kfd_gtt_sa_allocate(dev, PAGE_SIZE, &kq->eop_mem); if (retval != 0) goto err_eop_allocate_vidmem; @@ -111,7 +111,7 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_dev *dev, kq->rptr_kernel = kq->rptr_mem->cpu_ptr; kq->rptr_gpu_addr = kq->rptr_mem->gpu_addr; - retval = kfd_gtt_sa_allocate(dev, dev->device_info->doorbell_size, + retval = kfd_gtt_sa_allocate(dev, dev->device_info.doorbell_size, &kq->wptr_mem); if (retval != 0) @@ -297,7 +297,7 @@ void kq_submit_packet(struct kernel_queue *kq) } pr_debug("\n"); #endif - if (kq->dev->device_info->doorbell_size == 8) { + if (kq->dev->device_info.doorbell_size == 8) { *kq->wptr64_kernel = kq->pending_wptr64; write_kernel_doorbell64(kq->queue->properties.doorbell_ptr, kq->pending_wptr64); @@ -310,7 +310,7 @@ void kq_submit_packet(struct kernel_queue *kq) void kq_rollback_packet(struct kernel_queue *kq) { - if (kq->dev->device_info->doorbell_size == 8) { + if (kq->dev->device_info.doorbell_size == 8) { kq->pending_wptr64 = *kq->wptr64_kernel; kq->pending_wptr = *kq->wptr_kernel % (kq->queue->properties.queue_size / 4); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index 9b9c2b9bf2ef..48c2f2b6e217 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -108,8 +108,8 @@ error_free: * svm_migrate_copy_memory_gart - sdma copy data between ram and vram * * @adev: amdgpu device the sdma ring running - * @src: source page address array - * @dst: destination page address array + * @sys: system DMA pointer to be copied + * @vram: vram destination DMA pointer * @npages: number of pages to copy * @direction: enum MIGRATION_COPY_DIR * @mfence: output, sdma fence to signal after sdma is done @@ -938,7 +938,7 @@ int svm_migrate_init(struct amdgpu_device *adev) void *r; /* Page migration works on Vega10 or newer */ - if (kfddev->device_info->asic_family < CHIP_VEGA10) + if (!KFD_IS_SOC15(kfddev)) return -EINVAL; pgmap = &kfddev->pgmap; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c index c021519af810..e2825ad4d699 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c @@ -71,7 +71,7 @@ struct kfd_mem_obj *allocate_sdma_mqd(struct kfd_dev *dev, return NULL; offset = (q->sdma_engine_id * - dev->device_info->num_sdma_queues_per_engine + + dev->device_info.num_sdma_queues_per_engine + q->sdma_queue_id) * dev->dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size; @@ -100,7 +100,7 @@ void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm, struct kfd_cu_info cu_info; uint32_t cu_per_sh[KFD_MAX_NUM_SE][KFD_MAX_NUM_SH_PER_SE] = {0}; int i, se, sh, cu; - amdgpu_amdkfd_get_cu_info(mm->dev->kgd, &cu_info); + amdgpu_amdkfd_get_cu_info(mm->dev->adev, &cu_info); if (cu_mask_count > cu_info.cu_active_number) cu_mask_count = cu_info.cu_active_number; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c index 8128f4d312f1..e9a8e21e144e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c @@ -171,7 +171,7 @@ static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t pipe_id, uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0); uint32_t wptr_mask = (uint32_t)((p->queue_size / 4) - 1); - return mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id, + return mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id, (uint32_t __user *)p->write_ptr, wptr_shift, wptr_mask, mms); } @@ -180,7 +180,7 @@ static int load_mqd_sdma(struct mqd_manager *mm, void *mqd, uint32_t pipe_id, uint32_t queue_id, struct queue_properties *p, struct mm_struct *mms) { - return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd, + return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->adev, mqd, (uint32_t __user *)p->write_ptr, mms); } @@ -276,7 +276,7 @@ static int destroy_mqd(struct mqd_manager *mm, void *mqd, unsigned int timeout, uint32_t pipe_id, uint32_t queue_id) { - return mm->dev->kfd2kgd->hqd_destroy(mm->dev->kgd, mqd, type, timeout, + return mm->dev->kfd2kgd->hqd_destroy(mm->dev->adev, mqd, type, timeout, pipe_id, queue_id); } @@ -289,7 +289,7 @@ static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd, unsigned int timeout, uint32_t pipe_id, uint32_t queue_id) { - return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout); + return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->adev, mqd, timeout); } static bool is_occupied(struct mqd_manager *mm, void *mqd, @@ -297,7 +297,7 @@ static bool is_occupied(struct mqd_manager *mm, void *mqd, uint32_t queue_id) { - return mm->dev->kfd2kgd->hqd_is_occupied(mm->dev->kgd, queue_address, + return mm->dev->kfd2kgd->hqd_is_occupied(mm->dev->adev, queue_address, pipe_id, queue_id); } @@ -306,7 +306,7 @@ static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd, uint64_t queue_address, uint32_t pipe_id, uint32_t queue_id) { - return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd); + return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->adev, mqd); } /* diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c index 270160fc401b..d74d8a6ac27a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c @@ -148,7 +148,7 @@ static int load_mqd(struct mqd_manager *mm, void *mqd, /* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */ uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0); - r = mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id, + r = mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id, (uint32_t __user *)p->write_ptr, wptr_shift, 0, mms); return r; @@ -158,7 +158,7 @@ static int hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd, uint32_t pipe_id, uint32_t queue_id, struct queue_properties *p, struct mm_struct *mms) { - return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->kgd, mqd, pipe_id, + return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->adev, mqd, pipe_id, queue_id, p->doorbell_off); } @@ -239,7 +239,7 @@ static int destroy_mqd(struct mqd_manager *mm, void *mqd, uint32_t queue_id) { return mm->dev->kfd2kgd->hqd_destroy - (mm->dev->kgd, mqd, type, timeout, + (mm->dev->adev, mqd, type, timeout, pipe_id, queue_id); } @@ -254,7 +254,7 @@ static bool is_occupied(struct mqd_manager *mm, void *mqd, uint32_t queue_id) { return mm->dev->kfd2kgd->hqd_is_occupied( - mm->dev->kgd, queue_address, + mm->dev->adev, queue_address, pipe_id, queue_id); } @@ -320,7 +320,7 @@ static int load_mqd_sdma(struct mqd_manager *mm, void *mqd, uint32_t pipe_id, uint32_t queue_id, struct queue_properties *p, struct mm_struct *mms) { - return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd, + return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->adev, mqd, (uint32_t __user *)p->write_ptr, mms); } @@ -363,14 +363,14 @@ static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd, unsigned int timeout, uint32_t pipe_id, uint32_t queue_id) { - return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout); + return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->adev, mqd, timeout); } static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd, uint64_t queue_address, uint32_t pipe_id, uint32_t queue_id) { - return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd); + return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->adev, mqd); } #if defined(CONFIG_DEBUG_FS) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index 4e5932f54b5a..326eb2285029 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -108,7 +108,7 @@ static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd, mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL); if (!mqd_mem_obj) return NULL; - retval = amdgpu_amdkfd_alloc_gtt_mem(kfd->kgd, + retval = amdgpu_amdkfd_alloc_gtt_mem(kfd->adev, ALIGN(q->ctl_stack_size, PAGE_SIZE) + ALIGN(sizeof(struct v9_mqd), PAGE_SIZE), &(mqd_mem_obj->gtt_mem), @@ -199,7 +199,7 @@ static int load_mqd(struct mqd_manager *mm, void *mqd, /* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */ uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0); - return mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id, + return mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id, (uint32_t __user *)p->write_ptr, wptr_shift, 0, mms); } @@ -208,7 +208,7 @@ static int hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd, uint32_t pipe_id, uint32_t queue_id, struct queue_properties *p, struct mm_struct *mms) { - return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->kgd, mqd, pipe_id, + return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->adev, mqd, pipe_id, queue_id, p->doorbell_off); } @@ -291,7 +291,7 @@ static int destroy_mqd(struct mqd_manager *mm, void *mqd, uint32_t queue_id) { return mm->dev->kfd2kgd->hqd_destroy - (mm->dev->kgd, mqd, type, timeout, + (mm->dev->adev, mqd, type, timeout, pipe_id, queue_id); } @@ -301,7 +301,7 @@ static void free_mqd(struct mqd_manager *mm, void *mqd, struct kfd_dev *kfd = mm->dev; if (mqd_mem_obj->gtt_mem) { - amdgpu_amdkfd_free_gtt_mem(kfd->kgd, mqd_mem_obj->gtt_mem); + amdgpu_amdkfd_free_gtt_mem(kfd->adev, mqd_mem_obj->gtt_mem); kfree(mqd_mem_obj); } else { kfd_gtt_sa_free(mm->dev, mqd_mem_obj); @@ -313,7 +313,7 @@ static bool is_occupied(struct mqd_manager *mm, void *mqd, uint32_t queue_id) { return mm->dev->kfd2kgd->hqd_is_occupied( - mm->dev->kgd, queue_address, + mm->dev->adev, queue_address, pipe_id, queue_id); } @@ -375,7 +375,7 @@ static int load_mqd_sdma(struct mqd_manager *mm, void *mqd, uint32_t pipe_id, uint32_t queue_id, struct queue_properties *p, struct mm_struct *mms) { - return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd, + return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->adev, mqd, (uint32_t __user *)p->write_ptr, mms); } @@ -418,14 +418,14 @@ static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd, unsigned int timeout, uint32_t pipe_id, uint32_t queue_id) { - return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout); + return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->adev, mqd, timeout); } static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd, uint64_t queue_address, uint32_t pipe_id, uint32_t queue_id) { - return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd); + return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->adev, mqd); } #if defined(CONFIG_DEBUG_FS) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c index cd9220eb8a7a..d456e950ce1d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c @@ -162,7 +162,7 @@ static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0); uint32_t wptr_mask = (uint32_t)((p->queue_size / 4) - 1); - return mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id, + return mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id, (uint32_t __user *)p->write_ptr, wptr_shift, wptr_mask, mms); } @@ -265,7 +265,7 @@ static int destroy_mqd(struct mqd_manager *mm, void *mqd, uint32_t queue_id) { return mm->dev->kfd2kgd->hqd_destroy - (mm->dev->kgd, mqd, type, timeout, + (mm->dev->adev, mqd, type, timeout, pipe_id, queue_id); } @@ -280,7 +280,7 @@ static bool is_occupied(struct mqd_manager *mm, void *mqd, uint32_t queue_id) { return mm->dev->kfd2kgd->hqd_is_occupied( - mm->dev->kgd, queue_address, + mm->dev->adev, queue_address, pipe_id, queue_id); } @@ -347,7 +347,7 @@ static int load_mqd_sdma(struct mqd_manager *mm, void *mqd, uint32_t pipe_id, uint32_t queue_id, struct queue_properties *p, struct mm_struct *mms) { - return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd, + return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->adev, mqd, (uint32_t __user *)p->write_ptr, mms); } @@ -389,14 +389,14 @@ static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd, unsigned int timeout, uint32_t pipe_id, uint32_t queue_id) { - return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout); + return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->adev, mqd, timeout); } static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd, uint64_t queue_address, uint32_t pipe_id, uint32_t queue_id) { - return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd); + return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->adev, mqd); } #if defined(CONFIG_DEBUG_FS) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c index e547f1f8c49f..1439420925a0 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c @@ -223,7 +223,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm, int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm) { - switch (dqm->dev->device_info->asic_family) { + switch (dqm->dev->adev->asic_type) { case CHIP_KAVERI: case CHIP_HAWAII: /* PM4 packet structures on CIK are the same as on VI */ @@ -236,31 +236,16 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm) case CHIP_VEGAM: pm->pmf = &kfd_vi_pm_funcs; break; - case CHIP_VEGA10: - case CHIP_VEGA12: - case CHIP_VEGA20: - case CHIP_RAVEN: - case CHIP_RENOIR: - case CHIP_ARCTURUS: - case CHIP_NAVI10: - case CHIP_NAVI12: - case CHIP_NAVI14: - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_VANGOGH: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: - case CHIP_YELLOW_CARP: - case CHIP_CYAN_SKILLFISH: - pm->pmf = &kfd_v9_pm_funcs; - break; - case CHIP_ALDEBARAN: - pm->pmf = &kfd_aldebaran_pm_funcs; - break; default: - WARN(1, "Unexpected ASIC family %u", - dqm->dev->device_info->asic_family); - return -EINVAL; + if (KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 2)) + pm->pmf = &kfd_aldebaran_pm_funcs; + else if (KFD_GC_VERSION(dqm->dev) >= IP_VERSION(9, 0, 1)) + pm->pmf = &kfd_v9_pm_funcs; + else { + WARN(1, "Unexpected ASIC family %u", + dqm->dev->adev->asic_type); + return -EINVAL; + } } pm->dqm = dqm; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c index 08442e7d9944..3c0658e32e93 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c @@ -110,8 +110,8 @@ static int pm_runlist_vi(struct packet_manager *pm, uint32_t *buffer, return 0; } -int pm_set_resources_vi(struct packet_manager *pm, uint32_t *buffer, - struct scheduling_resources *res) +static int pm_set_resources_vi(struct packet_manager *pm, uint32_t *buffer, + struct scheduling_resources *res) { struct pm4_mes_set_resources *packet; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 94e92c0812db..0c3f911e3bf4 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -183,7 +183,8 @@ enum cache_policy { cache_policy_noncoherent }; -#define KFD_IS_SOC15(chip) ((chip) >= CHIP_VEGA10) +#define KFD_GC_VERSION(dev) ((dev)->adev->ip_versions[GC_HWIP][0]) +#define KFD_IS_SOC15(dev) ((KFD_GC_VERSION(dev)) >= (IP_VERSION(9, 0, 1))) struct kfd_event_interrupt_class { bool (*interrupt_isr)(struct kfd_dev *dev, @@ -194,8 +195,6 @@ struct kfd_event_interrupt_class { }; struct kfd_device_info { - enum amd_asic_type asic_family; - const char *asic_name; uint32_t gfx_target_version; const struct kfd_event_interrupt_class *event_interrupt_class; unsigned int max_pasid_bits; @@ -208,11 +207,12 @@ struct kfd_device_info { bool needs_iommu_device; bool needs_pci_atomics; uint32_t no_atomic_fw_version; - unsigned int num_sdma_engines; - unsigned int num_xgmi_sdma_engines; unsigned int num_sdma_queues_per_engine; }; +unsigned int kfd_get_num_sdma_engines(struct kfd_dev *kdev); +unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_dev *kdev); + struct kfd_mem_obj { uint32_t range_start; uint32_t range_end; @@ -228,9 +228,9 @@ struct kfd_vmid_info { }; struct kfd_dev { - struct kgd_dev *kgd; + struct amdgpu_device *adev; - const struct kfd_device_info *device_info; + struct kfd_device_info device_info; struct pci_dev *pdev; struct drm_device *ddev; @@ -766,7 +766,7 @@ struct svm_range_list { struct list_head deferred_range_list; spinlock_t deferred_list_lock; atomic_t evicted_ranges; - bool drain_pagefaults; + atomic_t drain_pagefaults; struct delayed_work restore_work; DECLARE_BITMAP(bitmap_supported, MAX_GPU_INSTANCE); struct task_struct *faulting_task; @@ -891,7 +891,7 @@ struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid); struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm); int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id); -int kfd_process_gpuid_from_kgd(struct kfd_process *p, +int kfd_process_gpuid_from_adev(struct kfd_process *p, struct amdgpu_device *adev, uint32_t *gpuid, uint32_t *gpuidx); static inline int kfd_process_gpuid_from_gpuidx(struct kfd_process *p, @@ -984,7 +984,7 @@ struct kfd_topology_device *kfd_topology_device_by_proximity_domain( struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id); struct kfd_dev *kfd_device_by_id(uint32_t gpu_id); struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev); -struct kfd_dev *kfd_device_by_kgd(const struct kgd_dev *kgd); +struct kfd_dev *kfd_device_by_adev(const struct amdgpu_device *adev); int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev); int kfd_numa_node_to_apic_id(int numa_node_id); void kfd_double_confirm_iommu_support(struct kfd_dev *gpu); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index b993011cfa64..f1930ff2c74a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -251,14 +251,13 @@ cleanup: } /** - * @kfd_get_cu_occupancy - Collect number of waves in-flight on this device + * kfd_get_cu_occupancy - Collect number of waves in-flight on this device * by current process. Translates acquired wave count into number of compute units * that are occupied. * - * @atr: Handle of attribute that allows reporting of wave count. The attribute + * @attr: Handle of attribute that allows reporting of wave count. The attribute * handle encapsulates GPU device it is associated with, thereby allowing collection * of waves in flight, etc - * * @buffer: Handle of user provided buffer updated with wave count * * Return: Number of bytes written to user buffer or an error value @@ -288,7 +287,7 @@ static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer) /* Collect wave count from device if it supports */ wave_cnt = 0; max_waves_per_cu = 0; - dev->kfd2kgd->get_cu_occupancy(dev->kgd, proc->pasid, &wave_cnt, + dev->kfd2kgd->get_cu_occupancy(dev->adev, proc->pasid, &wave_cnt, &max_waves_per_cu); /* Translate wave count to number of compute units */ @@ -692,12 +691,12 @@ static void kfd_process_free_gpuvm(struct kgd_mem *mem, struct kfd_dev *dev = pdd->dev; if (kptr) { - amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(dev->kgd, mem); + amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(dev->adev, mem); kptr = NULL; } - amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->kgd, mem, pdd->drm_priv); - amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, mem, pdd->drm_priv, + amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->adev, mem, pdd->drm_priv); + amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev, mem, pdd->drm_priv, NULL); } @@ -714,24 +713,24 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd, struct kfd_dev *kdev = pdd->dev; int err; - err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->kgd, gpu_va, size, + err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->adev, gpu_va, size, pdd->drm_priv, mem, NULL, flags); if (err) goto err_alloc_mem; - err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, *mem, + err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->adev, *mem, pdd->drm_priv, NULL); if (err) goto err_map_mem; - err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->kgd, *mem, true); + err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->adev, *mem, true); if (err) { pr_debug("Sync memory failed, wait interrupted by user signal\n"); goto sync_memory_failed; } if (kptr) { - err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kdev->kgd, + err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kdev->adev, (struct kgd_mem *)*mem, kptr, NULL); if (err) { pr_debug("Map GTT BO to kernel failed\n"); @@ -742,10 +741,10 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd, return err; sync_memory_failed: - amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(kdev->kgd, *mem, pdd->drm_priv); + amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(kdev->adev, *mem, pdd->drm_priv); err_map_mem: - amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->kgd, *mem, pdd->drm_priv, + amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->adev, *mem, pdd->drm_priv, NULL); err_alloc_mem: *mem = NULL; @@ -940,10 +939,10 @@ static void kfd_process_device_free_bos(struct kfd_process_device *pdd) if (!peer_pdd->drm_priv) continue; amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( - peer_pdd->dev->kgd, mem, peer_pdd->drm_priv); + peer_pdd->dev->adev, mem, peer_pdd->drm_priv); } - amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->kgd, mem, + amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, mem, pdd->drm_priv, NULL); kfd_process_device_remove_obj_handle(pdd, id); } @@ -974,7 +973,7 @@ static void kfd_process_kunmap_signal_bo(struct kfd_process *p) if (!mem) goto out; - amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(kdev->kgd, mem); + amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(kdev->adev, mem); out: mutex_unlock(&p->mutex); @@ -1003,7 +1002,7 @@ static void kfd_process_destroy_pdds(struct kfd_process *p) if (pdd->drm_file) { amdgpu_amdkfd_gpuvm_release_process_vm( - pdd->dev->kgd, pdd->drm_priv); + pdd->dev->adev, pdd->drm_priv); fput(pdd->drm_file); } @@ -1011,7 +1010,7 @@ static void kfd_process_destroy_pdds(struct kfd_process *p) free_pages((unsigned long)pdd->qpd.cwsr_kaddr, get_order(KFD_CWSR_TBA_TMA_SIZE)); - kfree(pdd->qpd.doorbell_bitmap); + bitmap_free(pdd->qpd.doorbell_bitmap); idr_destroy(&pdd->alloc_idr); kfd_free_process_doorbells(pdd->dev, pdd->doorbell_index); @@ -1317,14 +1316,13 @@ bool kfd_process_xnack_mode(struct kfd_process *p, bool supported) * support the SVM APIs and don't need to be considered * for the XNACK mode selection. */ - if (dev->device_info->asic_family < CHIP_VEGA10) + if (!KFD_IS_SOC15(dev)) continue; /* Aldebaran can always support XNACK because it can support * per-process XNACK mode selection. But let the dev->noretry * setting still influence the default XNACK mode. */ - if (supported && - dev->device_info->asic_family == CHIP_ALDEBARAN) + if (supported && KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2)) continue; /* GFXv10 and later GPUs do not support shader preemption @@ -1332,7 +1330,7 @@ bool kfd_process_xnack_mode(struct kfd_process *p, bool supported) * management and memory-manager-related preemptions or * even deadlocks. */ - if (dev->device_info->asic_family >= CHIP_NAVI10) + if (KFD_GC_VERSION(dev) >= IP_VERSION(10, 1, 1)) return false; if (dev->noretry) @@ -1431,12 +1429,11 @@ static int init_doorbell_bitmap(struct qcm_process_device *qpd, int range_start = dev->shared_resources.non_cp_doorbells_start; int range_end = dev->shared_resources.non_cp_doorbells_end; - if (!KFD_IS_SOC15(dev->device_info->asic_family)) + if (!KFD_IS_SOC15(dev)) return 0; - qpd->doorbell_bitmap = - kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, - BITS_PER_BYTE), GFP_KERNEL); + qpd->doorbell_bitmap = bitmap_zalloc(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, + GFP_KERNEL); if (!qpd->doorbell_bitmap) return -ENOMEM; @@ -1448,9 +1445,9 @@ static int init_doorbell_bitmap(struct qcm_process_device *qpd, for (i = 0; i < KFD_MAX_NUM_OF_QUEUES_PER_PROCESS / 2; i++) { if (i >= range_start && i <= range_end) { - set_bit(i, qpd->doorbell_bitmap); - set_bit(i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET, - qpd->doorbell_bitmap); + __set_bit(i, qpd->doorbell_bitmap); + __set_bit(i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET, + qpd->doorbell_bitmap); } } @@ -1547,7 +1544,7 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd, dev = pdd->dev; ret = amdgpu_amdkfd_gpuvm_acquire_process_vm( - dev->kgd, drm_file, p->pasid, + dev->adev, drm_file, p->pasid, &p->kgd_process_info, &p->ef); if (ret) { pr_err("Failed to create process VM object\n"); @@ -1779,14 +1776,13 @@ int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id) } int -kfd_process_gpuid_from_kgd(struct kfd_process *p, struct amdgpu_device *adev, +kfd_process_gpuid_from_adev(struct kfd_process *p, struct amdgpu_device *adev, uint32_t *gpuid, uint32_t *gpuidx) { - struct kgd_dev *kgd = (struct kgd_dev *)adev; int i; for (i = 0; i < p->n_pdds; i++) - if (p->pdds[i] && p->pdds[i]->dev->kgd == kgd) { + if (p->pdds[i] && p->pdds[i]->dev->adev == adev) { *gpuid = p->pdds[i]->dev->id; *gpuidx = i; return 0; @@ -1951,10 +1947,10 @@ void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type) * only happens when the first queue is created. */ if (pdd->qpd.vmid) - amdgpu_amdkfd_flush_gpu_tlb_vmid(dev->kgd, + amdgpu_amdkfd_flush_gpu_tlb_vmid(dev->adev, pdd->qpd.vmid); } else { - amdgpu_amdkfd_flush_gpu_tlb_pasid(dev->kgd, + amdgpu_amdkfd_flush_gpu_tlb_pasid(dev->adev, pdd->process->pasid, type); } } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 3627e7ac161b..5e5c84a8e1ef 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -118,7 +118,7 @@ int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid, return ret; pqn->q->gws = mem; - pdd->qpd.num_gws = gws ? amdgpu_amdkfd_get_num_gws(dev->kgd) : 0; + pdd->qpd.num_gws = gws ? dev->adev->gds.gws_size : 0; return pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm, pqn->q, NULL); @@ -135,9 +135,8 @@ void kfd_process_dequeue_from_all_devices(struct kfd_process *p) int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p) { INIT_LIST_HEAD(&pqm->queues); - pqm->queue_slot_bitmap = - kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, - BITS_PER_BYTE), GFP_KERNEL); + pqm->queue_slot_bitmap = bitmap_zalloc(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, + GFP_KERNEL); if (!pqm->queue_slot_bitmap) return -ENOMEM; pqm->process = p; @@ -159,7 +158,7 @@ void pqm_uninit(struct process_queue_manager *pqm) kfree(pqn); } - kfree(pqm->queue_slot_bitmap); + bitmap_free(pqm->queue_slot_bitmap); pqm->queue_slot_bitmap = NULL; } @@ -220,7 +219,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, * Hence we also check the type as well */ if ((pdd->qpd.is_debug) || (type == KFD_QUEUE_TYPE_DIQ)) - max_queues = dev->device_info->max_no_of_hqd/2; + max_queues = dev->device_info.max_no_of_hqd/2; if (pdd->qpd.queue_count >= max_queues) return -ENOSPC; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c index ed4bc5f844ce..deae12dc777d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c @@ -207,7 +207,6 @@ void kfd_smi_event_update_gpu_reset(struct kfd_dev *dev, bool post_reset) void kfd_smi_event_update_thermal_throttling(struct kfd_dev *dev, uint64_t throttle_bitmask) { - struct amdgpu_device *adev = (struct amdgpu_device *)dev->kgd; /* * ThermalThrottle msg = throttle_bitmask(8): * thermal_interrupt_count(16): @@ -223,14 +222,13 @@ void kfd_smi_event_update_thermal_throttling(struct kfd_dev *dev, len = snprintf(fifo_in, sizeof(fifo_in), "%x %llx:%llx\n", KFD_SMI_EVENT_THERMAL_THROTTLE, throttle_bitmask, - atomic64_read(&adev->smu.throttle_int_counter)); + atomic64_read(&dev->adev->smu.throttle_int_counter)); add_event_to_kfifo(dev, KFD_SMI_EVENT_THERMAL_THROTTLE, fifo_in, len); } void kfd_smi_event_update_vmfault(struct kfd_dev *dev, uint16_t pasid) { - struct amdgpu_device *adev = (struct amdgpu_device *)dev->kgd; struct amdgpu_task_info task_info; /* VmFault msg = (hex)uint32_pid(8) + :(1) + task name(16) = 25 */ /* 1 byte event + 1 byte space + 25 bytes msg + 1 byte \n + @@ -243,7 +241,7 @@ void kfd_smi_event_update_vmfault(struct kfd_dev *dev, uint16_t pasid) return; memset(&task_info, 0, sizeof(struct amdgpu_task_info)); - amdgpu_vm_get_task_info(adev, pasid, &task_info); + amdgpu_vm_get_task_info(dev->adev, pasid, &task_info); /* Report VM faults from user applications, not retry from kernel */ if (!task_info.pid) return; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 16137c4247bb..7e92dcea4ce8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -193,7 +193,6 @@ svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap, for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) { struct kfd_process_device *pdd; - struct amdgpu_device *adev; pr_debug("mapping to gpu idx 0x%x\n", gpuidx); pdd = kfd_process_device_from_gpuidx(p, gpuidx); @@ -201,9 +200,8 @@ svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap, pr_debug("failed to find device idx %d\n", gpuidx); return -EINVAL; } - adev = (struct amdgpu_device *)pdd->dev->kgd; - r = svm_range_dma_map_dev(adev, prange, offset, npages, + r = svm_range_dma_map_dev(pdd->dev->adev, prange, offset, npages, hmm_pfns, gpuidx); if (r) break; @@ -581,7 +579,7 @@ svm_range_get_adev_by_id(struct svm_range *prange, uint32_t gpu_id) return NULL; } - return (struct amdgpu_device *)pdd->dev->kgd; + return pdd->dev->adev; } struct kfd_process_device * @@ -593,7 +591,7 @@ svm_range_get_pdd_by_adev(struct svm_range *prange, struct amdgpu_device *adev) p = container_of(prange->svms, struct kfd_process, svms); - r = kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpu_idx); + r = kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpu_idx); if (r) { pr_debug("failed to get device id by adev %p\n", adev); return NULL; @@ -706,6 +704,61 @@ svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange, } } +static bool +svm_range_is_same_attrs(struct kfd_process *p, struct svm_range *prange, + uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs) +{ + uint32_t i; + int gpuidx; + + for (i = 0; i < nattr; i++) { + switch (attrs[i].type) { + case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC: + if (prange->preferred_loc != attrs[i].value) + return false; + break; + case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC: + /* Prefetch should always trigger a migration even + * if the value of the attribute didn't change. + */ + return false; + case KFD_IOCTL_SVM_ATTR_ACCESS: + case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE: + case KFD_IOCTL_SVM_ATTR_NO_ACCESS: + gpuidx = kfd_process_gpuidx_from_gpuid(p, + attrs[i].value); + if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) { + if (test_bit(gpuidx, prange->bitmap_access) || + test_bit(gpuidx, prange->bitmap_aip)) + return false; + } else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) { + if (!test_bit(gpuidx, prange->bitmap_access)) + return false; + } else { + if (!test_bit(gpuidx, prange->bitmap_aip)) + return false; + } + break; + case KFD_IOCTL_SVM_ATTR_SET_FLAGS: + if ((prange->flags & attrs[i].value) != attrs[i].value) + return false; + break; + case KFD_IOCTL_SVM_ATTR_CLR_FLAGS: + if ((prange->flags & attrs[i].value) != 0) + return false; + break; + case KFD_IOCTL_SVM_ATTR_GRANULARITY: + if (prange->granularity != attrs[i].value) + return false; + break; + default: + WARN_ONCE(1, "svm_range_check_attrs wasn't called?"); + } + } + + return true; +} + /** * svm_range_debug_dump - print all range information from svms * @svms: svm range list header @@ -743,14 +796,6 @@ static void svm_range_debug_dump(struct svm_range_list *svms) } } -static bool -svm_range_is_same_attrs(struct svm_range *old, struct svm_range *new) -{ - return (old->prefetch_loc == new->prefetch_loc && - old->flags == new->flags && - old->granularity == new->granularity); -} - static int svm_range_split_array(void *ppnew, void *ppold, size_t size, uint64_t old_start, uint64_t old_n, @@ -943,7 +988,7 @@ svm_range_split(struct svm_range *prange, uint64_t start, uint64_t last, } static int -svm_range_split_tail(struct svm_range *prange, struct svm_range *new, +svm_range_split_tail(struct svm_range *prange, uint64_t new_last, struct list_head *insert_list) { struct svm_range *tail; @@ -955,7 +1000,7 @@ svm_range_split_tail(struct svm_range *prange, struct svm_range *new, } static int -svm_range_split_head(struct svm_range *prange, struct svm_range *new, +svm_range_split_head(struct svm_range *prange, uint64_t new_start, struct list_head *insert_list) { struct svm_range *head; @@ -1053,8 +1098,8 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange, if (domain == SVM_RANGE_VRAM_DOMAIN) bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev); - switch (adev->asic_type) { - case CHIP_ARCTURUS: + switch (KFD_GC_VERSION(adev->kfd.dev)) { + case IP_VERSION(9, 4, 1): if (domain == SVM_RANGE_VRAM_DOMAIN) { if (bo_adev == adev) { mapping_flags |= coherent ? @@ -1070,7 +1115,7 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange, AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; } break; - case CHIP_ALDEBARAN: + case IP_VERSION(9, 4, 2): if (domain == SVM_RANGE_VRAM_DOMAIN) { if (bo_adev == adev) { mapping_flags |= coherent ? @@ -1129,7 +1174,6 @@ svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start, DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE); struct kfd_process_device *pdd; struct dma_fence *fence = NULL; - struct amdgpu_device *adev; struct kfd_process *p; uint32_t gpuidx; int r = 0; @@ -1145,9 +1189,9 @@ svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start, pr_debug("failed to find device idx %d\n", gpuidx); return -EINVAL; } - adev = (struct amdgpu_device *)pdd->dev->kgd; - r = svm_range_unmap_from_gpu(adev, drm_priv_to_vm(pdd->drm_priv), + r = svm_range_unmap_from_gpu(pdd->dev->adev, + drm_priv_to_vm(pdd->drm_priv), start, last, &fence); if (r) break; @@ -1159,7 +1203,7 @@ svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start, if (r) break; } - amdgpu_amdkfd_flush_gpu_tlb_pasid((struct kgd_dev *)adev, + amdgpu_amdkfd_flush_gpu_tlb_pasid(pdd->dev->adev, p->pasid, TLB_FLUSH_HEAVYWEIGHT); } @@ -1172,7 +1216,6 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned long npages, bool readonly, dma_addr_t *dma_addr, struct amdgpu_device *bo_adev, struct dma_fence **fence) { - struct amdgpu_bo_va bo_va; bool table_freed = false; uint64_t pte_flags; unsigned long last_start; @@ -1185,9 +1228,6 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm, pr_debug("svms 0x%p [0x%lx 0x%lx] readonly %d\n", prange->svms, last_start, last_start + npages - 1, readonly); - if (prange->svm_bo && prange->ttm_res) - bo_va.is_xgmi = amdgpu_xgmi_same_hive(adev, bo_adev); - for (i = offset; i < offset + npages; i++) { last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN; dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN; @@ -1243,8 +1283,7 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct kfd_process *p; p = container_of(prange->svms, struct kfd_process, svms); - amdgpu_amdkfd_flush_gpu_tlb_pasid((struct kgd_dev *)adev, - p->pasid, TLB_FLUSH_LEGACY); + amdgpu_amdkfd_flush_gpu_tlb_pasid(adev, p->pasid, TLB_FLUSH_LEGACY); } out: return r; @@ -1257,7 +1296,6 @@ svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset, { struct kfd_process_device *pdd; struct amdgpu_device *bo_adev; - struct amdgpu_device *adev; struct kfd_process *p; struct dma_fence *fence = NULL; uint32_t gpuidx; @@ -1276,19 +1314,18 @@ svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset, pr_debug("failed to find device idx %d\n", gpuidx); return -EINVAL; } - adev = (struct amdgpu_device *)pdd->dev->kgd; pdd = kfd_bind_process_to_device(pdd->dev, p); if (IS_ERR(pdd)) return -EINVAL; - if (bo_adev && adev != bo_adev && - !amdgpu_xgmi_same_hive(adev, bo_adev)) { + if (bo_adev && pdd->dev->adev != bo_adev && + !amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) { pr_debug("cannot map to device idx %d\n", gpuidx); continue; } - r = svm_range_map_to_gpu(adev, drm_priv_to_vm(pdd->drm_priv), + r = svm_range_map_to_gpu(pdd->dev->adev, drm_priv_to_vm(pdd->drm_priv), prange, offset, npages, readonly, prange->dma_addr[gpuidx], bo_adev, wait ? &fence : NULL); @@ -1322,7 +1359,6 @@ struct svm_validate_context { static int svm_range_reserve_bos(struct svm_validate_context *ctx) { struct kfd_process_device *pdd; - struct amdgpu_device *adev; struct amdgpu_vm *vm; uint32_t gpuidx; int r; @@ -1334,7 +1370,6 @@ static int svm_range_reserve_bos(struct svm_validate_context *ctx) pr_debug("failed to find device idx %d\n", gpuidx); return -EINVAL; } - adev = (struct amdgpu_device *)pdd->dev->kgd; vm = drm_priv_to_vm(pdd->drm_priv); ctx->tv[gpuidx].bo = &vm->root.bo->tbo; @@ -1356,9 +1391,9 @@ static int svm_range_reserve_bos(struct svm_validate_context *ctx) r = -EINVAL; goto unreserve_out; } - adev = (struct amdgpu_device *)pdd->dev->kgd; - r = amdgpu_vm_validate_pt_bos(adev, drm_priv_to_vm(pdd->drm_priv), + r = amdgpu_vm_validate_pt_bos(pdd->dev->adev, + drm_priv_to_vm(pdd->drm_priv), svm_range_bo_validate, NULL); if (r) { pr_debug("failed %d validate pt bos\n", r); @@ -1381,12 +1416,10 @@ static void svm_range_unreserve_bos(struct svm_validate_context *ctx) static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx) { struct kfd_process_device *pdd; - struct amdgpu_device *adev; pdd = kfd_process_device_from_gpuidx(p, gpuidx); - adev = (struct amdgpu_device *)pdd->dev->kgd; - return SVM_ADEV_PGMAP_OWNER(adev); + return SVM_ADEV_PGMAP_OWNER(pdd->dev->adev); } /* @@ -1574,7 +1607,6 @@ retry_flush_work: static void svm_range_restore_work(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); - struct amdkfd_process_info *process_info; struct svm_range_list *svms; struct svm_range *prange; struct kfd_process *p; @@ -1594,12 +1626,10 @@ static void svm_range_restore_work(struct work_struct *work) * the lifetime of this thread, kfd_process and mm will be valid. */ p = container_of(svms, struct kfd_process, svms); - process_info = p->kgd_process_info; mm = p->mm; if (!mm) return; - mutex_lock(&process_info->lock); svm_range_list_lock_and_flush_work(svms, mm); mutex_lock(&svms->lock); @@ -1652,7 +1682,6 @@ static void svm_range_restore_work(struct work_struct *work) out_reschedule: mutex_unlock(&svms->lock); mmap_write_unlock(mm); - mutex_unlock(&process_info->lock); /* If validation failed, reschedule another attempt */ if (evicted_ranges) { @@ -1664,6 +1693,10 @@ out_reschedule: /** * svm_range_evict - evict svm range + * @prange: svm range structure + * @mm: current process mm_struct + * @start: starting process queue number + * @last: last process queue number * * Stop all queues of the process to ensure GPU doesn't access the memory, then * return to let CPU evict the buffer and proceed CPU pagetable update. @@ -1768,46 +1801,49 @@ static struct svm_range *svm_range_clone(struct svm_range *old) } /** - * svm_range_handle_overlap - split overlap ranges - * @svms: svm range list header - * @new: range added with this attributes - * @start: range added start address, in pages - * @last: range last address, in pages - * @update_list: output, the ranges attributes are updated. For set_attr, this - * will do validation and map to GPUs. For unmap, this will be - * removed and unmap from GPUs - * @insert_list: output, the ranges will be inserted into svms, attributes are - * not changes. For set_attr, this will add into svms. - * @remove_list:output, the ranges will be removed from svms - * @left: the remaining range after overlap, For set_attr, this will be added - * as new range. + * svm_range_add - add svm range and handle overlap + * @p: the range add to this process svms + * @start: page size aligned + * @size: page size aligned + * @nattr: number of attributes + * @attrs: array of attributes + * @update_list: output, the ranges need validate and update GPU mapping + * @insert_list: output, the ranges need insert to svms + * @remove_list: output, the ranges are replaced and need remove from svms * - * Total have 5 overlap cases. + * Check if the virtual address range has overlap with any existing ranges, + * split partly overlapping ranges and add new ranges in the gaps. All changes + * should be applied to the range_list and interval tree transactionally. If + * any range split or allocation fails, the entire update fails. Therefore any + * existing overlapping svm_ranges are cloned and the original svm_ranges left + * unchanged. * - * This function handles overlap of an address interval with existing - * struct svm_ranges for applying new attributes. This may require - * splitting existing struct svm_ranges. All changes should be applied to - * the range_list and interval tree transactionally. If any split operation - * fails, the entire update fails. Therefore the existing overlapping - * svm_ranges are cloned and the original svm_ranges left unchanged. If the - * transaction succeeds, the modified clones are added and the originals - * freed. Otherwise the clones are removed and the old svm_ranges remain. + * If the transaction succeeds, the caller can update and insert clones and + * new ranges, then free the originals. * - * Context: The caller must hold svms->lock + * Otherwise the caller can free the clones and new ranges, while the old + * svm_ranges remain unchanged. + * + * Context: Process context, caller must hold svms->lock + * + * Return: + * 0 - OK, otherwise error code */ static int -svm_range_handle_overlap(struct svm_range_list *svms, struct svm_range *new, - unsigned long start, unsigned long last, - struct list_head *update_list, - struct list_head *insert_list, - struct list_head *remove_list, - unsigned long *left) +svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size, + uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs, + struct list_head *update_list, struct list_head *insert_list, + struct list_head *remove_list) { + unsigned long last = start + size - 1UL; + struct svm_range_list *svms = &p->svms; struct interval_tree_node *node; struct svm_range *prange; struct svm_range *tmp; int r = 0; + pr_debug("svms 0x%p [0x%llx 0x%lx]\n", &p->svms, start, last); + INIT_LIST_HEAD(update_list); INIT_LIST_HEAD(insert_list); INIT_LIST_HEAD(remove_list); @@ -1815,18 +1851,24 @@ svm_range_handle_overlap(struct svm_range_list *svms, struct svm_range *new, node = interval_tree_iter_first(&svms->objects, start, last); while (node) { struct interval_tree_node *next; - struct svm_range *old; unsigned long next_start; pr_debug("found overlap node [0x%lx 0x%lx]\n", node->start, node->last); - old = container_of(node, struct svm_range, it_node); + prange = container_of(node, struct svm_range, it_node); next = interval_tree_iter_next(node, start, last); next_start = min(node->last, last) + 1; - if (node->start < start || node->last > last) { - /* node intersects the updated range, clone+split it */ + if (svm_range_is_same_attrs(p, prange, nattr, attrs)) { + /* nothing to do */ + } else if (node->start < start || node->last > last) { + /* node intersects the update range and its attributes + * will change. Clone and split it, apply updates only + * to the overlapping part + */ + struct svm_range *old = prange; + prange = svm_range_clone(old); if (!prange) { r = -ENOMEM; @@ -1835,17 +1877,18 @@ svm_range_handle_overlap(struct svm_range_list *svms, struct svm_range *new, list_add(&old->remove_list, remove_list); list_add(&prange->insert_list, insert_list); + list_add(&prange->update_list, update_list); if (node->start < start) { pr_debug("change old range start\n"); - r = svm_range_split_head(prange, new, start, + r = svm_range_split_head(prange, start, insert_list); if (r) goto out; } if (node->last > last) { pr_debug("change old range last\n"); - r = svm_range_split_tail(prange, new, last, + r = svm_range_split_tail(prange, last, insert_list); if (r) goto out; @@ -1854,16 +1897,12 @@ svm_range_handle_overlap(struct svm_range_list *svms, struct svm_range *new, /* The node is contained within start..last, * just update it */ - prange = old; - } - - if (!svm_range_is_same_attrs(prange, new)) list_add(&prange->update_list, update_list); + } /* insert a new node if needed */ if (node->start > start) { - prange = svm_range_new(prange->svms, start, - node->start - 1); + prange = svm_range_new(svms, start, node->start - 1); if (!prange) { r = -ENOMEM; goto out; @@ -1877,8 +1916,16 @@ svm_range_handle_overlap(struct svm_range_list *svms, struct svm_range *new, start = next_start; } - if (left && start <= last) - *left = last - start + 1; + /* add a final range at the end if needed */ + if (start <= last) { + prange = svm_range_new(svms, start, last); + if (!prange) { + r = -ENOMEM; + goto out; + } + list_add(&prange->insert_list, insert_list); + list_add(&prange->update_list, update_list); + } out: if (r) @@ -1966,23 +2013,30 @@ svm_range_handle_list_op(struct svm_range_list *svms, struct svm_range *prange) static void svm_range_drain_retry_fault(struct svm_range_list *svms) { struct kfd_process_device *pdd; - struct amdgpu_device *adev; struct kfd_process *p; + int drain; uint32_t i; p = container_of(svms, struct kfd_process, svms); +restart: + drain = atomic_read(&svms->drain_pagefaults); + if (!drain) + return; + for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) { pdd = p->pdds[i]; if (!pdd) continue; pr_debug("drain retry fault gpu %d svms %p\n", i, svms); - adev = (struct amdgpu_device *)pdd->dev->kgd; - amdgpu_ih_wait_on_checkpoint_process(adev, &adev->irq.ih1); + amdgpu_ih_wait_on_checkpoint_process_ts(pdd->dev->adev, + &pdd->dev->adev->irq.ih1); pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, svms); } + if (atomic_cmpxchg(&svms->drain_pagefaults, drain, 0) != drain) + goto restart; } static void svm_range_deferred_list_work(struct work_struct *work) @@ -1990,43 +2044,41 @@ static void svm_range_deferred_list_work(struct work_struct *work) struct svm_range_list *svms; struct svm_range *prange; struct mm_struct *mm; + struct kfd_process *p; svms = container_of(work, struct svm_range_list, deferred_list_work); pr_debug("enter svms 0x%p\n", svms); + p = container_of(svms, struct kfd_process, svms); + /* Avoid mm is gone when inserting mmu notifier */ + mm = get_task_mm(p->lead_thread); + if (!mm) { + pr_debug("svms 0x%p process mm gone\n", svms); + return; + } +retry: + mmap_write_lock(mm); + + /* Checking for the need to drain retry faults must be inside + * mmap write lock to serialize with munmap notifiers. + */ + if (unlikely(atomic_read(&svms->drain_pagefaults))) { + mmap_write_unlock(mm); + svm_range_drain_retry_fault(svms); + goto retry; + } + spin_lock(&svms->deferred_list_lock); while (!list_empty(&svms->deferred_range_list)) { prange = list_first_entry(&svms->deferred_range_list, struct svm_range, deferred_list); + list_del_init(&prange->deferred_list); spin_unlock(&svms->deferred_list_lock); + pr_debug("prange 0x%p [0x%lx 0x%lx] op %d\n", prange, prange->start, prange->last, prange->work_item.op); - mm = prange->work_item.mm; -retry: - mmap_write_lock(mm); mutex_lock(&svms->lock); - - /* Checking for the need to drain retry faults must be in - * mmap write lock to serialize with munmap notifiers. - * - * Remove from deferred_list must be inside mmap write lock, - * otherwise, svm_range_list_lock_and_flush_work may hold mmap - * write lock, and continue because deferred_list is empty, then - * deferred_list handle is blocked by mmap write lock. - */ - spin_lock(&svms->deferred_list_lock); - if (unlikely(svms->drain_pagefaults)) { - svms->drain_pagefaults = false; - spin_unlock(&svms->deferred_list_lock); - mutex_unlock(&svms->lock); - mmap_write_unlock(mm); - svm_range_drain_retry_fault(svms); - goto retry; - } - list_del_init(&prange->deferred_list); - spin_unlock(&svms->deferred_list_lock); - mutex_lock(&prange->migrate_mutex); while (!list_empty(&prange->child_list)) { struct svm_range *pchild; @@ -2042,12 +2094,13 @@ retry: svm_range_handle_list_op(svms, prange); mutex_unlock(&svms->lock); - mmap_write_unlock(mm); spin_lock(&svms->deferred_list_lock); } spin_unlock(&svms->deferred_list_lock); + mmap_write_unlock(mm); + mmput(mm); pr_debug("exit svms 0x%p\n", svms); } @@ -2056,12 +2109,6 @@ svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange, struct mm_struct *mm, enum svm_work_list_ops op) { spin_lock(&svms->deferred_list_lock); - /* Make sure pending page faults are drained in the deferred worker - * before the range is freed to avoid straggler interrupts on - * unmapped memory causing "phantom faults". - */ - if (op == SVM_OP_UNMAP_RANGE) - svms->drain_pagefaults = true; /* if prange is on the deferred list */ if (!list_empty(&prange->deferred_list)) { pr_debug("update exist prange 0x%p work op %d\n", prange, op); @@ -2140,6 +2187,12 @@ svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange, pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n", svms, prange, prange->start, prange->last, start, last); + /* Make sure pending page faults are drained in the deferred worker + * before the range is freed to avoid straggler interrupts on + * unmapped memory causing "phantom faults". + */ + atomic_inc(&svms->drain_pagefaults); + unmap_parent = start <= prange->start && last >= prange->last; list_for_each_entry(pchild, &prange->child_list, child_list) { @@ -2169,6 +2222,9 @@ svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange, /** * svm_range_cpu_invalidate_pagetables - interval notifier callback + * @mni: mmu_interval_notifier struct + * @range: mmu_notifier_range struct + * @cur_seq: value to pass to mmu_interval_set_seq() * * If event is MMU_NOTIFY_UNMAP, this is from CPU unmap range, otherwise, it * is from migration, or CPU page invalidation callback. @@ -2301,7 +2357,7 @@ svm_range_best_restore_location(struct svm_range *prange, p = container_of(prange->svms, struct kfd_process, svms); - r = kfd_process_gpuid_from_kgd(p, adev, &gpuid, gpuidx); + r = kfd_process_gpuid_from_adev(p, adev, &gpuid, gpuidx); if (r < 0) { pr_debug("failed to get gpuid from kgd\n"); return -1; @@ -2478,7 +2534,7 @@ svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev, pr_debug("Failed to create prange in address [0x%llx]\n", addr); return NULL; } - if (kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpuidx)) { + if (kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpuidx)) { pr_debug("failed to get gpuid from kgd\n"); svm_range_free(prange); return NULL; @@ -2545,7 +2601,7 @@ svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p, uint32_t gpuid; int r; - r = kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpuidx); + r = kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpuidx); if (r < 0) return; } @@ -2559,20 +2615,13 @@ svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p, } static bool -svm_fault_allowed(struct mm_struct *mm, uint64_t addr, bool write_fault) +svm_fault_allowed(struct vm_area_struct *vma, bool write_fault) { unsigned long requested = VM_READ; - struct vm_area_struct *vma; if (write_fault) requested |= VM_WRITE; - vma = find_vma(mm, addr << PAGE_SHIFT); - if (!vma || (addr << PAGE_SHIFT) < vma->vm_start) { - pr_debug("address 0x%llx VMA is removed\n", addr); - return true; - } - pr_debug("requested 0x%lx, vma permission flags 0x%lx\n", requested, vma->vm_flags); return (vma->vm_flags & requested) == requested; @@ -2590,6 +2639,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, int32_t best_loc; int32_t gpuidx = MAX_GPU_INSTANCE; bool write_locked = false; + struct vm_area_struct *vma; int r = 0; if (!KFD_IS_SVM_API_SUPPORTED(adev->kfd.dev)) { @@ -2600,7 +2650,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, p = kfd_lookup_process_by_pasid(pasid); if (!p) { pr_debug("kfd process not founded pasid 0x%x\n", pasid); - return -ESRCH; + return 0; } if (!p->xnack_enabled) { pr_debug("XNACK not enabled for pasid 0x%x\n", pasid); @@ -2611,10 +2661,19 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, pr_debug("restoring svms 0x%p fault address 0x%llx\n", svms, addr); + if (atomic_read(&svms->drain_pagefaults)) { + pr_debug("draining retry fault, drop fault 0x%llx\n", addr); + r = 0; + goto out; + } + + /* p->lead_thread is available as kfd_process_wq_release flush the work + * before releasing task ref. + */ mm = get_task_mm(p->lead_thread); if (!mm) { pr_debug("svms 0x%p failed to get mm\n", svms); - r = -ESRCH; + r = 0; goto out; } @@ -2652,6 +2711,7 @@ retry_write_locked: if (svm_range_skip_recover(prange)) { amdgpu_gmc_filter_faults_remove(adev, addr, pasid); + r = 0; goto out_unlock_range; } @@ -2660,10 +2720,21 @@ retry_write_locked: if (timestamp < AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING) { pr_debug("svms 0x%p [0x%lx %lx] already restored\n", svms, prange->start, prange->last); + r = 0; + goto out_unlock_range; + } + + /* __do_munmap removed VMA, return success as we are handling stale + * retry fault. + */ + vma = find_vma(mm, addr << PAGE_SHIFT); + if (!vma || (addr << PAGE_SHIFT) < vma->vm_start) { + pr_debug("address 0x%llx VMA is removed\n", addr); + r = 0; goto out_unlock_range; } - if (!svm_fault_allowed(mm, addr, write_fault)) { + if (!svm_fault_allowed(vma, write_fault)) { pr_debug("fault addr 0x%llx no %s permission\n", addr, write_fault ? "write" : "read"); r = -EPERM; @@ -2741,6 +2812,14 @@ void svm_range_list_fini(struct kfd_process *p) /* Ensure list work is finished before process is destroyed */ flush_work(&p->svms.deferred_list_work); + /* + * Ensure no retry fault comes in afterwards, as page fault handler will + * not find kfd process and take mm lock to recover fault. + */ + atomic_inc(&p->svms.drain_pagefaults); + svm_range_drain_retry_fault(&p->svms); + + list_for_each_entry_safe(prange, next, &p->svms.list, list) { svm_range_unlink(prange); svm_range_remove_notifier(prange); @@ -2761,6 +2840,7 @@ int svm_range_list_init(struct kfd_process *p) mutex_init(&svms->lock); INIT_LIST_HEAD(&svms->list); atomic_set(&svms->evicted_ranges, 0); + atomic_set(&svms->drain_pagefaults, 0); INIT_DELAYED_WORK(&svms->restore_work, svm_range_restore_work); INIT_WORK(&svms->deferred_list_work, svm_range_deferred_list_work); INIT_LIST_HEAD(&svms->deferred_range_list); @@ -2868,59 +2948,6 @@ svm_range_is_valid(struct kfd_process *p, uint64_t start, uint64_t size) } /** - * svm_range_add - add svm range and handle overlap - * @p: the range add to this process svms - * @start: page size aligned - * @size: page size aligned - * @nattr: number of attributes - * @attrs: array of attributes - * @update_list: output, the ranges need validate and update GPU mapping - * @insert_list: output, the ranges need insert to svms - * @remove_list: output, the ranges are replaced and need remove from svms - * - * Check if the virtual address range has overlap with the registered ranges, - * split the overlapped range, copy and adjust pages address and vram nodes in - * old and new ranges. - * - * Context: Process context, caller must hold svms->lock - * - * Return: - * 0 - OK, otherwise error code - */ -static int -svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size, - uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs, - struct list_head *update_list, struct list_head *insert_list, - struct list_head *remove_list) -{ - uint64_t last = start + size - 1UL; - struct svm_range_list *svms; - struct svm_range new = {0}; - struct svm_range *prange; - unsigned long left = 0; - int r = 0; - - pr_debug("svms 0x%p [0x%llx 0x%llx]\n", &p->svms, start, last); - - svm_range_apply_attrs(p, &new, nattr, attrs); - - svms = &p->svms; - - r = svm_range_handle_overlap(svms, &new, start, last, update_list, - insert_list, remove_list, &left); - if (r) - return r; - - if (left) { - prange = svm_range_new(svms, last - left + 1, last); - list_add(&prange->insert_list, insert_list); - list_add(&prange->update_list, update_list); - } - - return 0; -} - -/** * svm_range_best_prefetch_location - decide the best prefetch location * @prange: svm range structure * @@ -2953,7 +2980,6 @@ svm_range_best_prefetch_location(struct svm_range *prange) uint32_t best_loc = prange->prefetch_loc; struct kfd_process_device *pdd; struct amdgpu_device *bo_adev; - struct amdgpu_device *adev; struct kfd_process *p; uint32_t gpuidx; @@ -2981,12 +3007,11 @@ svm_range_best_prefetch_location(struct svm_range *prange) pr_debug("failed to get device by idx 0x%x\n", gpuidx); continue; } - adev = (struct amdgpu_device *)pdd->dev->kgd; - if (adev == bo_adev) + if (pdd->dev->adev == bo_adev) continue; - if (!amdgpu_xgmi_same_hive(adev, bo_adev)) { + if (!amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) { best_loc = 0; break; } @@ -3150,7 +3175,6 @@ static int svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size, uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs) { - struct amdkfd_process_info *process_info = p->kgd_process_info; struct mm_struct *mm = current->mm; struct list_head update_list; struct list_head insert_list; @@ -3169,8 +3193,6 @@ svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size, svms = &p->svms; - mutex_lock(&process_info->lock); - svm_range_list_lock_and_flush_work(svms, mm); r = svm_range_is_valid(p, start, size); @@ -3246,8 +3268,6 @@ out_unlock_range: mutex_unlock(&svms->lock); mmap_read_unlock(mm); out: - mutex_unlock(&process_info->lock); - pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] done, r=%d\n", p->pasid, &p->svms, start, start + size - 1, r); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index dd593ad0614a..948fbb39336e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -113,7 +113,7 @@ struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev) return device; } -struct kfd_dev *kfd_device_by_kgd(const struct kgd_dev *kgd) +struct kfd_dev *kfd_device_by_adev(const struct amdgpu_device *adev) { struct kfd_topology_device *top_dev; struct kfd_dev *device = NULL; @@ -121,7 +121,7 @@ struct kfd_dev *kfd_device_by_kgd(const struct kgd_dev *kgd) down_read(&topology_lock); list_for_each_entry(top_dev, &topology_device_list, list) - if (top_dev->gpu && top_dev->gpu->kgd == kgd) { + if (top_dev->gpu && top_dev->gpu->adev == adev) { device = top_dev->gpu; break; } @@ -503,7 +503,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, if (dev->gpu) { log_max_watch_addr = - __ilog2_u32(dev->gpu->device_info->num_of_watch_points); + __ilog2_u32(dev->gpu->device_info.num_of_watch_points); if (log_max_watch_addr) { dev->node_props.capability |= @@ -515,7 +515,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, HSA_CAP_WATCH_POINTS_TOTALBITS_MASK); } - if (dev->gpu->device_info->asic_family == CHIP_TONGA) + if (dev->gpu->adev->asic_type == CHIP_TONGA) dev->node_props.capability |= HSA_CAP_AQL_QUEUE_DOUBLE_MAP; @@ -531,7 +531,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, sysfs_show_32bit_prop(buffer, offs, "sdma_fw_version", dev->gpu->sdma_fw_version); sysfs_show_64bit_prop(buffer, offs, "unique_id", - amdgpu_amdkfd_get_unique_id(dev->gpu->kgd)); + dev->gpu->adev->unique_id); } @@ -1106,7 +1106,7 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu) if (!gpu) return 0; - amdgpu_amdkfd_get_local_mem_info(gpu->kgd, &local_mem_info); + amdgpu_amdkfd_get_local_mem_info(gpu->adev, &local_mem_info); local_mem_size = local_mem_info.local_mem_size_private + local_mem_info.local_mem_size_public; @@ -1189,7 +1189,7 @@ static void kfd_fill_mem_clk_max_info(struct kfd_topology_device *dev) * for APUs - If CRAT from ACPI reports more than one bank, then * all the banks will report the same mem_clk_max information */ - amdgpu_amdkfd_get_local_mem_info(dev->gpu->kgd, &local_mem_info); + amdgpu_amdkfd_get_local_mem_info(dev->gpu->adev, &local_mem_info); list_for_each_entry(mem, &dev->mem_props, list) mem->mem_clk_max = local_mem_info.mem_clk_max; @@ -1217,8 +1217,7 @@ static void kfd_set_iolink_no_atomics(struct kfd_topology_device *dev, /* set gpu (dev) flags. */ } else { if (!dev->gpu->pci_atomic_requested || - dev->gpu->device_info->asic_family == - CHIP_HAWAII) + dev->gpu->adev->asic_type == CHIP_HAWAII) link->flags |= CRAT_IOLINK_FLAGS_NO_ATOMICS_32_BIT | CRAT_IOLINK_FLAGS_NO_ATOMICS_64_BIT; } @@ -1239,7 +1238,7 @@ static void kfd_set_iolink_non_coherent(struct kfd_topology_device *to_dev, */ if (inbound_link->iolink_type == CRAT_IOLINK_TYPE_PCIEXPRESS || (inbound_link->iolink_type == CRAT_IOLINK_TYPE_XGMI && - to_dev->gpu->device_info->asic_family == CHIP_VEGA20)) { + KFD_GC_VERSION(to_dev->gpu) == IP_VERSION(9, 4, 0))) { outbound_link->flags |= CRAT_IOLINK_FLAGS_NON_COHERENT; inbound_link->flags |= CRAT_IOLINK_FLAGS_NON_COHERENT; } @@ -1286,7 +1285,8 @@ int kfd_topology_add_device(struct kfd_dev *gpu) void *crat_image = NULL; size_t image_size = 0; int proximity_domain; - struct amdgpu_device *adev; + int i; + const char *asic_name = amdgpu_asic_name[gpu->adev->asic_type]; INIT_LIST_HEAD(&temp_topology_device_list); @@ -1296,10 +1296,8 @@ int kfd_topology_add_device(struct kfd_dev *gpu) proximity_domain = atomic_inc_return(&topology_crat_proximity_domain); - adev = (struct amdgpu_device *)(gpu->kgd); - /* Include the CPU in xGMI hive if xGMI connected by assigning it the hive ID. */ - if (gpu->hive_id && adev->gmc.xgmi.connected_to_cpu) { + if (gpu->hive_id && gpu->adev->gmc.xgmi.connected_to_cpu) { struct kfd_topology_device *top_dev; down_read(&topology_lock); @@ -1372,45 +1370,48 @@ int kfd_topology_add_device(struct kfd_dev *gpu) * needed for the topology */ - amdgpu_amdkfd_get_cu_info(dev->gpu->kgd, &cu_info); + amdgpu_amdkfd_get_cu_info(dev->gpu->adev, &cu_info); - strncpy(dev->node_props.name, gpu->device_info->asic_name, - KFD_TOPOLOGY_PUBLIC_NAME_SIZE); + for (i = 0; i < KFD_TOPOLOGY_PUBLIC_NAME_SIZE-1; i++) { + dev->node_props.name[i] = __tolower(asic_name[i]); + if (asic_name[i] == '\0') + break; + } + dev->node_props.name[i] = '\0'; dev->node_props.simd_arrays_per_engine = cu_info.num_shader_arrays_per_engine; - dev->node_props.gfx_target_version = gpu->device_info->gfx_target_version; + dev->node_props.gfx_target_version = gpu->device_info.gfx_target_version; dev->node_props.vendor_id = gpu->pdev->vendor; dev->node_props.device_id = gpu->pdev->device; dev->node_props.capability |= - ((amdgpu_amdkfd_get_asic_rev_id(dev->gpu->kgd) << - HSA_CAP_ASIC_REVISION_SHIFT) & + ((dev->gpu->adev->rev_id << HSA_CAP_ASIC_REVISION_SHIFT) & HSA_CAP_ASIC_REVISION_MASK); dev->node_props.location_id = pci_dev_id(gpu->pdev); dev->node_props.domain = pci_domain_nr(gpu->pdev->bus); dev->node_props.max_engine_clk_fcompute = - amdgpu_amdkfd_get_max_engine_clock_in_mhz(dev->gpu->kgd); + amdgpu_amdkfd_get_max_engine_clock_in_mhz(dev->gpu->adev); dev->node_props.max_engine_clk_ccompute = cpufreq_quick_get_max(0) / 1000; dev->node_props.drm_render_minor = gpu->shared_resources.drm_render_minor; dev->node_props.hive_id = gpu->hive_id; - dev->node_props.num_sdma_engines = gpu->device_info->num_sdma_engines; + dev->node_props.num_sdma_engines = kfd_get_num_sdma_engines(gpu); dev->node_props.num_sdma_xgmi_engines = - gpu->device_info->num_xgmi_sdma_engines; + kfd_get_num_xgmi_sdma_engines(gpu); dev->node_props.num_sdma_queues_per_engine = - gpu->device_info->num_sdma_queues_per_engine; + gpu->device_info.num_sdma_queues_per_engine; dev->node_props.num_gws = (dev->gpu->gws && dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ? - amdgpu_amdkfd_get_num_gws(dev->gpu->kgd) : 0; + dev->gpu->adev->gds.gws_size : 0; dev->node_props.num_cp_queues = get_cp_queues_num(dev->gpu->dqm); kfd_fill_mem_clk_max_info(dev); kfd_fill_iolink_non_crat_info(dev); - switch (dev->gpu->device_info->asic_family) { + switch (dev->gpu->adev->asic_type) { case CHIP_KAVERI: case CHIP_HAWAII: case CHIP_TONGA: @@ -1429,30 +1430,14 @@ int kfd_topology_add_device(struct kfd_dev *gpu) HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) & HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK); break; - case CHIP_VEGA10: - case CHIP_VEGA12: - case CHIP_VEGA20: - case CHIP_RAVEN: - case CHIP_RENOIR: - case CHIP_ARCTURUS: - case CHIP_ALDEBARAN: - case CHIP_NAVI10: - case CHIP_NAVI12: - case CHIP_NAVI14: - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_VANGOGH: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: - case CHIP_YELLOW_CARP: - case CHIP_CYAN_SKILLFISH: - dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_2_0 << - HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) & - HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK); - break; default: - WARN(1, "Unexpected ASIC family %u", - dev->gpu->device_info->asic_family); + if (KFD_GC_VERSION(dev->gpu) >= IP_VERSION(9, 0, 1)) + dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_2_0 << + HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) & + HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK); + else + WARN(1, "Unexpected ASIC family %u", + dev->gpu->adev->asic_type); } /* @@ -1469,7 +1454,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu) * because it doesn't consider masked out CUs * max_waves_per_simd: Carrizo reports wrong max_waves_per_simd */ - if (dev->gpu->device_info->asic_family == CHIP_CARRIZO) { + if (dev->gpu->adev->asic_type == CHIP_CARRIZO) { dev->node_props.simd_count = cu_info.simd_per_cu * cu_info.cu_active_number; dev->node_props.max_waves_per_simd = 10; @@ -1477,16 +1462,17 @@ int kfd_topology_add_device(struct kfd_dev *gpu) /* kfd only concerns sram ecc on GFX and HBM ecc on UMC */ dev->node_props.capability |= - ((adev->ras_enabled & BIT(AMDGPU_RAS_BLOCK__GFX)) != 0) ? + ((dev->gpu->adev->ras_enabled & BIT(AMDGPU_RAS_BLOCK__GFX)) != 0) ? HSA_CAP_SRAM_EDCSUPPORTED : 0; - dev->node_props.capability |= ((adev->ras_enabled & BIT(AMDGPU_RAS_BLOCK__UMC)) != 0) ? + dev->node_props.capability |= + ((dev->gpu->adev->ras_enabled & BIT(AMDGPU_RAS_BLOCK__UMC)) != 0) ? HSA_CAP_MEM_EDCSUPPORTED : 0; - if (adev->asic_type != CHIP_VEGA10) - dev->node_props.capability |= (adev->ras_enabled != 0) ? + if (KFD_GC_VERSION(dev->gpu) != IP_VERSION(9, 0, 1)) + dev->node_props.capability |= (dev->gpu->adev->ras_enabled != 0) ? HSA_CAP_RASEVENTNOTIFY : 0; - if (KFD_IS_SVM_API_SUPPORTED(adev->kfd.dev)) + if (KFD_IS_SVM_API_SUPPORTED(dev->gpu->adev->kfd.dev)) dev->node_props.capability |= HSA_CAP_SVMAPI_SUPPORTED; kfd_debug_print_topology(); @@ -1592,7 +1578,7 @@ void kfd_double_confirm_iommu_support(struct kfd_dev *gpu) gpu->use_iommu_v2 = false; - if (!gpu->device_info->needs_iommu_device) + if (!gpu->device_info.needs_iommu_device) return; down_read(&topology_lock); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h index a8db017c9b8e..f0cc59d2fd5d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h @@ -25,38 +25,11 @@ #include <linux/types.h> #include <linux/list.h> +#include <linux/kfd_sysfs.h> #include "kfd_crat.h" #define KFD_TOPOLOGY_PUBLIC_NAME_SIZE 32 -#define HSA_CAP_HOT_PLUGGABLE 0x00000001 -#define HSA_CAP_ATS_PRESENT 0x00000002 -#define HSA_CAP_SHARED_WITH_GRAPHICS 0x00000004 -#define HSA_CAP_QUEUE_SIZE_POW2 0x00000008 -#define HSA_CAP_QUEUE_SIZE_32BIT 0x00000010 -#define HSA_CAP_QUEUE_IDLE_EVENT 0x00000020 -#define HSA_CAP_VA_LIMIT 0x00000040 -#define HSA_CAP_WATCH_POINTS_SUPPORTED 0x00000080 -#define HSA_CAP_WATCH_POINTS_TOTALBITS_MASK 0x00000f00 -#define HSA_CAP_WATCH_POINTS_TOTALBITS_SHIFT 8 -#define HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK 0x00003000 -#define HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT 12 - -#define HSA_CAP_DOORBELL_TYPE_PRE_1_0 0x0 -#define HSA_CAP_DOORBELL_TYPE_1_0 0x1 -#define HSA_CAP_DOORBELL_TYPE_2_0 0x2 -#define HSA_CAP_AQL_QUEUE_DOUBLE_MAP 0x00004000 - -#define HSA_CAP_RESERVED_WAS_SRAM_EDCSUPPORTED 0x00080000 /* Old buggy user mode depends on this being 0 */ -#define HSA_CAP_MEM_EDCSUPPORTED 0x00100000 -#define HSA_CAP_RASEVENTNOTIFY 0x00200000 -#define HSA_CAP_ASIC_REVISION_MASK 0x03c00000 -#define HSA_CAP_ASIC_REVISION_SHIFT 22 -#define HSA_CAP_SRAM_EDCSUPPORTED 0x04000000 -#define HSA_CAP_SVMAPI_SUPPORTED 0x08000000 -#define HSA_CAP_FLAGS_COHERENTHOSTACCESS 0x10000000 -#define HSA_CAP_RESERVED 0xe00f8000 - struct kfd_node_properties { uint64_t hive_id; uint32_t cpu_cores_count; @@ -93,17 +66,6 @@ struct kfd_node_properties { char name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE]; }; -#define HSA_MEM_HEAP_TYPE_SYSTEM 0 -#define HSA_MEM_HEAP_TYPE_FB_PUBLIC 1 -#define HSA_MEM_HEAP_TYPE_FB_PRIVATE 2 -#define HSA_MEM_HEAP_TYPE_GPU_GDS 3 -#define HSA_MEM_HEAP_TYPE_GPU_LDS 4 -#define HSA_MEM_HEAP_TYPE_GPU_SCRATCH 5 - -#define HSA_MEM_FLAGS_HOT_PLUGGABLE 0x00000001 -#define HSA_MEM_FLAGS_NON_VOLATILE 0x00000002 -#define HSA_MEM_FLAGS_RESERVED 0xfffffffc - struct kfd_mem_properties { struct list_head list; uint32_t heap_type; @@ -116,12 +78,6 @@ struct kfd_mem_properties { struct attribute attr; }; -#define HSA_CACHE_TYPE_DATA 0x00000001 -#define HSA_CACHE_TYPE_INSTRUCTION 0x00000002 -#define HSA_CACHE_TYPE_CPU 0x00000004 -#define HSA_CACHE_TYPE_HSACU 0x00000008 -#define HSA_CACHE_TYPE_RESERVED 0xfffffff0 - struct kfd_cache_properties { struct list_head list; uint32_t processor_id_low; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index c911b30de658..2f0b14f8f833 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -51,6 +51,7 @@ #include <drm/drm_hdcp.h> #endif #include "amdgpu_pm.h" +#include "amdgpu_atombios.h" #include "amd_shared.h" #include "amdgpu_dm_irq.h" @@ -623,7 +624,7 @@ static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params) #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */ /** - * dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command. + * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command. * @adev: amdgpu_device pointer * @notify: dmub notification structure * @@ -631,7 +632,8 @@ static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params) * Copies dmub notification to DM which is to be read by AUX command. * issuing thread and also signals the event to wake up the thread. */ -void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notification *notify) +static void dmub_aux_setconfig_callback(struct amdgpu_device *adev, + struct dmub_notification *notify) { if (adev->dm.dmub_notify) memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification)); @@ -647,7 +649,8 @@ void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notific * Dmub Hpd interrupt processing callback. Gets displayindex through the * ink index and calls helper to do the processing. */ -void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *notify) +static void dmub_hpd_callback(struct amdgpu_device *adev, + struct dmub_notification *notify) { struct amdgpu_dm_connector *aconnector; struct amdgpu_dm_connector *hpd_aconnector = NULL; @@ -704,8 +707,10 @@ void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *not * to dmub interrupt handling thread * Return: true if successfully registered, false if there is existing registration */ -bool register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type, -dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload) +static bool register_dmub_notify_callback(struct amdgpu_device *adev, + enum dmub_notification_type type, + dmub_notify_interrupt_callback_t callback, + bool dmub_int_thread_offload) { if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) { adev->dm.dmub_callback[type] = callback; @@ -789,8 +794,7 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params) plink = adev->dm.dc->links[notify.link_index]; if (plink) { plink->hpd_status = - notify.hpd_status == - DP_HPD_PLUG ? true : false; + notify.hpd_status == DP_HPD_PLUG; } } queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work); @@ -1050,6 +1054,11 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) return 0; } + /* Reset DMCUB if it was previously running - before we overwrite its memory. */ + status = dmub_srv_hw_reset(dmub_srv); + if (status != DMUB_STATUS_OK) + DRM_WARN("Error resetting DMUB HW: %d\n", status); + hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data; fw_inst_const = dmub_fw->data + @@ -1453,8 +1462,21 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING) init_data.flags.edp_no_power_sequencing = true; +#ifdef CONFIG_DRM_AMD_DC_DCN + if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A) + init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true; + if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0) + init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true; +#endif + init_data.flags.power_down_display_on_boot = true; + if (check_seamless_boot_capability(adev)) { + init_data.flags.power_down_display_on_boot = false; + init_data.flags.allow_seamless_boot_optimization = true; + DRM_INFO("Seamless boot condition check passed\n"); + } + INIT_LIST_HEAD(&adev->dm.da_list); /* Display Core create. */ adev->dm.dc = dc_create(&init_data); @@ -1479,8 +1501,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER) adev->dm.dc->debug.disable_stutter = true; - if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) + if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) { adev->dm.dc->debug.disable_dsc = true; + adev->dm.dc->debug.disable_dsc_edp = true; + } if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING) adev->dm.dc->debug.disable_clock_gate = true; @@ -2303,14 +2327,6 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc) goto fail; } - - res = dc_validate_global_state(dc, context, false); - - if (res != DC_OK) { - DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res); - goto fail; - } - res = dc_commit_state(dc, context); fail: @@ -2561,6 +2577,23 @@ static int dm_resume(void *handle) if (amdgpu_in_reset(adev)) { dc_state = dm->cached_dc_state; + /* + * The dc->current_state is backed up into dm->cached_dc_state + * before we commit 0 streams. + * + * DC will clear link encoder assignments on the real state + * but the changes won't propagate over to the copy we made + * before the 0 streams commit. + * + * DC expects that link encoder assignments are *not* valid + * when committing a state, so as a workaround it needs to be + * cleared here. + */ + link_enc_cfg_init(dm->dc, dc_state); + + if (dc_enable_dmub_notifications(adev->dm.dc)) + amdgpu_dm_outbox_init(adev); + r = dm_dmub_hw_init(adev); if (r) DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); @@ -2572,20 +2605,11 @@ static int dm_resume(void *handle) for (i = 0; i < dc_state->stream_count; i++) { dc_state->streams[i]->mode_changed = true; - for (j = 0; j < dc_state->stream_status->plane_count; j++) { - dc_state->stream_status->plane_states[j]->update_flags.raw + for (j = 0; j < dc_state->stream_status[i].plane_count; j++) { + dc_state->stream_status[i].plane_states[j]->update_flags.raw = 0xffffffff; } } -#if defined(CONFIG_DRM_AMD_DC_DCN) - /* - * Resource allocation happens for link encoders for newer ASIC in - * dc_validate_global_state, so we need to revalidate it. - * - * This shouldn't fail (it passed once before), so warn if it does. - */ - WARN_ON(dc_validate_global_state(dm->dc, dc_state, false) != DC_OK); -#endif WARN_ON(!dc_commit_state(dm->dc, dc_state)); @@ -2608,6 +2632,10 @@ static int dm_resume(void *handle) /* TODO: Remove dc_state->dccg, use dc->dccg directly. */ dc_resource_state_construct(dm->dc, dm_state->context); + /* Re-enable outbox interrupts for DPIA. */ + if (dc_enable_dmub_notifications(adev->dm.dc)) + amdgpu_dm_outbox_init(adev); + /* Before powering on DC we need to re-initialize DMUB. */ r = dm_dmub_hw_init(adev); if (r) @@ -2938,13 +2966,12 @@ void amdgpu_dm_update_connector_after_detect( aconnector->edid = (struct edid *)sink->dc_edid.raw_edid; - drm_connector_update_edid_property(connector, - aconnector->edid); if (aconnector->dc_link->aux_mode) drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux, aconnector->edid); } + drm_connector_update_edid_property(connector, aconnector->edid); amdgpu_dm_update_freesync_caps(connector, aconnector->edid); update_connector_ext_caps(aconnector); } else { @@ -3012,7 +3039,7 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) drm_modeset_unlock_all(dev); if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) - drm_kms_helper_hotplug_event(dev); + drm_kms_helper_connector_hotplug_event(connector); } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) { if (new_connection_type == dc_connection_none && @@ -3027,7 +3054,7 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) drm_modeset_unlock_all(dev); if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) - drm_kms_helper_hotplug_event(dev); + drm_kms_helper_connector_hotplug_event(connector); } mutex_unlock(&aconnector->hpd_lock); @@ -3221,7 +3248,7 @@ out: dm_restore_drm_connector_state(dev, connector); drm_modeset_unlock_all(dev); - drm_kms_helper_hotplug_event(dev); + drm_kms_helper_connector_hotplug_event(connector); } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) { if (aconnector->fake_enable) @@ -3234,7 +3261,7 @@ out: dm_restore_drm_connector_state(dev, connector); drm_modeset_unlock_all(dev); - drm_kms_helper_hotplug_event(dev); + drm_kms_helper_connector_hotplug_event(connector); } } #ifdef CONFIG_DRM_AMD_DC_HDCP @@ -3909,6 +3936,9 @@ static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, caps = dm->backlight_caps[bl_idx]; dm->brightness[bl_idx] = user_brightness; + /* update scratch register */ + if (bl_idx == 0) + amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]); brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]); link = (struct dc_link *)dm->backlight_link[bl_idx]; @@ -4242,7 +4272,8 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) } else if (dc_link_detect(link, DETECT_REASON_BOOT)) { amdgpu_dm_update_connector_after_detect(aconnector); register_backlight_device(dm, link); - + if (dm->num_of_edps) + update_connector_ext_caps(aconnector); if (psr_feature_enabled) amdgpu_dm_set_psr_caps(link); } @@ -4250,6 +4281,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) } + /* + * Disable vblank IRQs aggressively for power-saving. + * + * TODO: Fix vblank control helpers to delay PSR entry to allow this when PSR + * is also supported. + */ + adev_to_drm(adev)->vblank_disable_immediate = !psr_feature_enabled; + /* Software is initialized. Now we can register interrupt handlers. */ switch (adev->asic_type) { #if defined(CONFIG_DRM_AMD_DC_SI) @@ -6035,11 +6074,72 @@ static void update_dsc_caps(struct amdgpu_dm_connector *aconnector, { stream->timing.flags.DSC = 0; - if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) { - dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc, - aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw, - aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw, - dsc_caps); + if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT || + sink->sink_signal == SIGNAL_TYPE_EDP)) { + if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE || + sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) + dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc, + aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw, + aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw, + dsc_caps); + } +} + +static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector, + struct dc_sink *sink, struct dc_stream_state *stream, + struct dsc_dec_dpcd_caps *dsc_caps, + uint32_t max_dsc_target_bpp_limit_override) +{ + const struct dc_link_settings *verified_link_cap = NULL; + uint32_t link_bw_in_kbps; + uint32_t edp_min_bpp_x16, edp_max_bpp_x16; + struct dc *dc = sink->ctx->dc; + struct dc_dsc_bw_range bw_range = {0}; + struct dc_dsc_config dsc_cfg = {0}; + + verified_link_cap = dc_link_get_link_cap(stream->link); + link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap); + edp_min_bpp_x16 = 8 * 16; + edp_max_bpp_x16 = 8 * 16; + + if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel) + edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel; + + if (edp_max_bpp_x16 < edp_min_bpp_x16) + edp_min_bpp_x16 = edp_max_bpp_x16; + + if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0], + dc->debug.dsc_min_slice_height_override, + edp_min_bpp_x16, edp_max_bpp_x16, + dsc_caps, + &stream->timing, + &bw_range)) { + + if (bw_range.max_kbps < link_bw_in_kbps) { + if (dc_dsc_compute_config(dc->res_pool->dscs[0], + dsc_caps, + dc->debug.dsc_min_slice_height_override, + max_dsc_target_bpp_limit_override, + 0, + &stream->timing, + &dsc_cfg)) { + stream->timing.dsc_cfg = dsc_cfg; + stream->timing.flags.DSC = 1; + stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16; + } + return; + } + } + + if (dc_dsc_compute_config(dc->res_pool->dscs[0], + dsc_caps, + dc->debug.dsc_min_slice_height_override, + max_dsc_target_bpp_limit_override, + link_bw_in_kbps, + &stream->timing, + &dsc_cfg)) { + stream->timing.dsc_cfg = dsc_cfg; + stream->timing.flags.DSC = 1; } } @@ -6050,6 +6150,9 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, struct drm_connector *drm_connector = &aconnector->base; uint32_t link_bandwidth_kbps; uint32_t max_dsc_target_bpp_limit_override = 0; + struct dc *dc = sink->ctx->dc; + uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps; + uint32_t dsc_max_supported_bw_in_kbps; link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, dc_link_get_link_cap(aconnector->dc_link)); @@ -6062,17 +6165,43 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, dc_dsc_policy_set_enable_dsc_when_not_needed( aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE); - if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) { + if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp && + dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) { + + apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override); - if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], + } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) { + if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) { + if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], dsc_caps, aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override, max_dsc_target_bpp_limit_override, link_bandwidth_kbps, &stream->timing, &stream->timing.dsc_cfg)) { - stream->timing.flags.DSC = 1; - DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name); + stream->timing.flags.DSC = 1; + DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", + __func__, drm_connector->name); + } + } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) { + timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing); + max_supported_bw_in_kbps = link_bandwidth_kbps; + dsc_max_supported_bw_in_kbps = link_bandwidth_kbps; + + if (timing_bw_in_kbps > max_supported_bw_in_kbps && + max_supported_bw_in_kbps > 0 && + dsc_max_supported_bw_in_kbps > 0) + if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], + dsc_caps, + aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override, + max_dsc_target_bpp_limit_override, + dsc_max_supported_bw_in_kbps, + &stream->timing, + &stream->timing.dsc_cfg)) { + stream->timing.flags.DSC = 1; + DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n", + __func__, drm_connector->name); + } } } @@ -8216,15 +8345,8 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, break; case DRM_MODE_CONNECTOR_DisplayPort: aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; - if (link->is_dig_mapping_flexible && - link->dc->res_pool->funcs->link_encs_assign) { - link->link_enc = - link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link); - if (!link->link_enc) - link->link_enc = - link_enc_cfg_get_next_avail_link_enc(link->ctx->dc); - } - + link->link_enc = dp_get_link_enc(link); + ASSERT(link->link_enc); if (link->link_enc) aconnector->base.ycbcr_420_allowed = link->link_enc->features.dp_ycbcr420_supported ? true : false; @@ -10627,6 +10749,24 @@ static int dm_update_plane_state(struct dc *dc, return ret; } +static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state, + int *src_w, int *src_h) +{ + switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) { + case DRM_MODE_ROTATE_90: + case DRM_MODE_ROTATE_270: + *src_w = plane_state->src_h >> 16; + *src_h = plane_state->src_w >> 16; + break; + case DRM_MODE_ROTATE_0: + case DRM_MODE_ROTATE_180: + default: + *src_w = plane_state->src_w >> 16; + *src_h = plane_state->src_h >> 16; + break; + } +} + static int dm_check_crtc_cursor(struct drm_atomic_state *state, struct drm_crtc *crtc, struct drm_crtc_state *new_crtc_state) @@ -10635,6 +10775,8 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state, struct drm_plane_state *new_cursor_state, *new_underlying_state; int i; int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h; + int cursor_src_w, cursor_src_h; + int underlying_src_w, underlying_src_h; /* On DCE and DCN there is no dedicated hardware cursor plane. We get a * cursor per pipe but it's going to inherit the scaling and @@ -10646,10 +10788,9 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state, return 0; } - cursor_scale_w = new_cursor_state->crtc_w * 1000 / - (new_cursor_state->src_w >> 16); - cursor_scale_h = new_cursor_state->crtc_h * 1000 / - (new_cursor_state->src_h >> 16); + dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h); + cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w; + cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h; for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) { /* Narrow down to non-cursor planes on the same CRTC as the cursor */ @@ -10660,10 +10801,10 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state, if (!new_underlying_state->fb) continue; - underlying_scale_w = new_underlying_state->crtc_w * 1000 / - (new_underlying_state->src_w >> 16); - underlying_scale_h = new_underlying_state->crtc_h * 1000 / - (new_underlying_state->src_h >> 16); + dm_get_oriented_plane_size(new_underlying_state, + &underlying_src_w, &underlying_src_h); + underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w; + underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h; if (cursor_scale_w != underlying_scale_w || cursor_scale_h != underlying_scale_h) { @@ -10758,8 +10899,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, trace_amdgpu_dm_atomic_check_begin(state); ret = drm_atomic_helper_check_modeset(dev, state); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n"); goto fail; + } /* Check connector changes */ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { @@ -10775,6 +10918,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc); if (IS_ERR(new_crtc_state)) { + DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n"); ret = PTR_ERR(new_crtc_state); goto fail; } @@ -10789,8 +10933,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { if (drm_atomic_crtc_needs_modeset(new_crtc_state)) { ret = add_affected_mst_dsc_crtcs(state, crtc); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n"); goto fail; + } } } } @@ -10805,19 +10951,25 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, continue; ret = amdgpu_dm_verify_lut_sizes(new_crtc_state); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n"); goto fail; + } if (!new_crtc_state->enable) continue; ret = drm_atomic_add_affected_connectors(state, crtc); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n"); goto fail; + } ret = drm_atomic_add_affected_planes(state, crtc); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n"); goto fail; + } if (dm_old_crtc_state->dsc_force_changed) new_crtc_state->mode_changed = true; @@ -10854,6 +11006,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, if (IS_ERR(new_plane_state)) { ret = PTR_ERR(new_plane_state); + DRM_DEBUG_DRIVER("new_plane_state is BAD\n"); goto fail; } } @@ -10866,8 +11019,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, new_plane_state, false, &lock_and_validation_needed); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n"); goto fail; + } } /* Disable all crtcs which require disable */ @@ -10877,8 +11032,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, new_crtc_state, false, &lock_and_validation_needed); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n"); goto fail; + } } /* Enable all crtcs which require enable */ @@ -10888,8 +11045,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, new_crtc_state, true, &lock_and_validation_needed); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n"); goto fail; + } } /* Add new/modified planes */ @@ -10899,20 +11058,26 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, new_plane_state, true, &lock_and_validation_needed); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n"); goto fail; + } } /* Run this here since we want to validate the streams we created */ ret = drm_atomic_helper_check_planes(dev, state); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n"); goto fail; + } /* Check cursor planes scaling */ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { ret = dm_check_crtc_cursor(state, crtc, new_crtc_state); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n"); goto fail; + } } if (state->legacy_cursor_update) { @@ -10999,20 +11164,28 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, */ if (lock_and_validation_needed) { ret = dm_atomic_get_state(state, &dm_state); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n"); goto fail; + } ret = do_aquire_global_lock(dev, state); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n"); goto fail; + } #if defined(CONFIG_DRM_AMD_DC_DCN) - if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) + if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) { + DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n"); goto fail; + } ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n"); goto fail; + } #endif /* @@ -11022,12 +11195,13 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, * to get stuck in an infinite loop and hang eventually. */ ret = drm_dp_mst_atomic_check(state); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n"); goto fail; - status = dc_validate_global_state(dc, dm_state->context, false); + } + status = dc_validate_global_state(dc, dm_state->context, true); if (status != DC_OK) { - drm_dbg_atomic(dev, - "DC global validation failure: %s (%d)", + DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)", dc_status_to_str(status), status); ret = -EINVAL; goto fail; @@ -11149,7 +11323,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm, sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header); input->offset = offset; input->length = length; - input->total_length = total_length; + input->cea_total_length = total_length; memcpy(input->payload, data, length); res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd); @@ -11456,8 +11630,10 @@ uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address, return value; } -int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux, struct dc_context *ctx, - uint8_t status_type, uint32_t *operation_result) +static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux, + struct dc_context *ctx, + uint8_t status_type, + uint32_t *operation_result) { struct amdgpu_device *adev = ctx->driver_context; int return_status = -1; @@ -11528,3 +11704,24 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS, (uint32_t *)operation_result); } + +/* + * Check whether seamless boot is supported. + * + * So far we only support seamless boot on CHIP_VANGOGH. + * If everything goes well, we may consider expanding + * seamless boot to other ASICs. + */ +bool check_seamless_boot_capability(struct amdgpu_device *adev) +{ + switch (adev->asic_type) { + case CHIP_VANGOGH: + if (!adev->mman.keep_stolen_vga_memory) + return true; + break; + default: + break; + } + + return false; +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 37e61a88d49e..c98e402eab0c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -50,9 +50,9 @@ #define AMDGPU_DMUB_NOTIFICATION_MAX 5 -/** +/* * DMUB Async to Sync Mechanism Status - **/ + */ #define DMUB_ASYNC_TO_SYNC_ACCESS_FAIL 1 #define DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT 2 #define DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS 3 @@ -731,4 +731,7 @@ extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs; int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx, unsigned int link_index, void *payload, void *operation_result); + +bool check_seamless_boot_capability(struct amdgpu_device *adev); + #endif /* __AMDGPU_DM_H__ */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c index a022e5bb30a5..a71177305bcd 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c @@ -285,8 +285,12 @@ static int __set_input_tf(struct dc_transfer_func *func, } /** + * amdgpu_dm_verify_lut_sizes + * @crtc_state: the DRM CRTC state + * * Verifies that the Degamma and Gamma LUTs attached to the |crtc_state| are of * the expected size. + * * Returns 0 on success. */ int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c index cce062adc439..8a441a22c46e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c @@ -314,6 +314,14 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) ret = -EINVAL; goto cleanup; } + + if ((aconn->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) && + (aconn->base.connector_type != DRM_MODE_CONNECTOR_eDP)) { + DRM_DEBUG_DRIVER("No DP connector available for CRC source\n"); + ret = -EINVAL; + goto cleanup; + } + } #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index 9d43ecb1f692..26719efa5396 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -824,6 +824,48 @@ static int dmub_fw_state_show(struct seq_file *m, void *data) return seq_write(m, state_base, state_size); } +/* psr_capability_show() - show eDP panel PSR capability + * + * The read function: sink_psr_capability_show + * Shows if sink has PSR capability or not. + * If yes - the PSR version is appended + * + * cat /sys/kernel/debug/dri/0/eDP-X/psr_capability + * + * Expected output: + * "Sink support: no\n" - if panel doesn't support PSR + * "Sink support: yes [0x01]\n" - if panel supports PSR1 + * "Driver support: no\n" - if driver doesn't support PSR + * "Driver support: yes [0x01]\n" - if driver supports PSR1 + */ +static int psr_capability_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct dc_link *link = aconnector->dc_link; + + if (!link) + return -ENODEV; + + if (link->type == dc_connection_none) + return -ENODEV; + + if (!(link->connector_signal & SIGNAL_TYPE_EDP)) + return -ENODEV; + + seq_printf(m, "Sink support: %s", yesno(link->dpcd_caps.psr_caps.psr_version != 0)); + if (link->dpcd_caps.psr_caps.psr_version) + seq_printf(m, " [0x%02x]", link->dpcd_caps.psr_caps.psr_version); + seq_puts(m, "\n"); + + seq_printf(m, "Driver support: %s", yesno(link->psr_settings.psr_feature_enabled)); + if (link->psr_settings.psr_version) + seq_printf(m, " [0x%02x]", link->psr_settings.psr_version); + seq_puts(m, "\n"); + + return 0; +} + /* * Returns the current and maximum output bpc for the connector. * Example usage: cat /sys/kernel/debug/dri/0/DP-1/output_bpc @@ -1243,7 +1285,7 @@ static ssize_t trigger_hotplug(struct file *f, const char __user *buf, dm_restore_drm_connector_state(dev, connector); drm_modeset_unlock_all(dev); - drm_kms_helper_hotplug_event(dev); + drm_kms_helper_connector_hotplug_event(connector); } else if (param[0] == 0) { if (!aconnector->dc_link) goto unlock; @@ -1265,7 +1307,7 @@ static ssize_t trigger_hotplug(struct file *f, const char __user *buf, dm_restore_drm_connector_state(dev, connector); drm_modeset_unlock_all(dev); - drm_kms_helper_hotplug_event(dev); + drm_kms_helper_connector_hotplug_event(connector); } unlock: @@ -2467,6 +2509,7 @@ DEFINE_SHOW_ATTRIBUTE(dp_lttpr_status); DEFINE_SHOW_ATTRIBUTE(hdcp_sink_capability); #endif DEFINE_SHOW_ATTRIBUTE(internal_display); +DEFINE_SHOW_ATTRIBUTE(psr_capability); static const struct file_operations dp_dsc_clock_en_debugfs_fops = { .owner = THIS_MODULE, @@ -2712,6 +2755,138 @@ static const struct { {"internal_display", &internal_display_fops} }; +/* + * Returns supported customized link rates by this eDP panel. + * Example usage: cat /sys/kernel/debug/dri/0/eDP-x/ilr_setting + */ +static int edp_ilr_show(struct seq_file *m, void *unused) +{ + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(m->private); + struct dc_link *link = aconnector->dc_link; + uint8_t supported_link_rates[16]; + uint32_t link_rate_in_khz; + uint32_t entry = 0; + uint8_t dpcd_rev; + + memset(supported_link_rates, 0, sizeof(supported_link_rates)); + dm_helpers_dp_read_dpcd(link->ctx, link, DP_SUPPORTED_LINK_RATES, + supported_link_rates, sizeof(supported_link_rates)); + + dpcd_rev = link->dpcd_caps.dpcd_rev.raw; + + if (dpcd_rev >= DP_DPCD_REV_13 && + (supported_link_rates[entry+1] != 0 || supported_link_rates[entry] != 0)) { + + for (entry = 0; entry < 16; entry += 2) { + link_rate_in_khz = (supported_link_rates[entry+1] * 0x100 + + supported_link_rates[entry]) * 200; + seq_printf(m, "[%d] %d kHz\n", entry/2, link_rate_in_khz); + } + } else { + seq_printf(m, "ILR is not supported by this eDP panel.\n"); + } + + return 0; +} + +/* + * Set supported customized link rate to eDP panel. + * + * echo <lane_count> <link_rate option> > ilr_setting + * + * for example, supported ILR : [0] 1620000 kHz [1] 2160000 kHz [2] 2430000 kHz ... + * echo 4 1 > /sys/kernel/debug/dri/0/eDP-x/ilr_setting + * to set 4 lanes and 2.16 GHz + */ +static ssize_t edp_ilr_write(struct file *f, const char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_dm_connector *connector = file_inode(f)->i_private; + struct dc_link *link = connector->dc_link; + struct amdgpu_device *adev = drm_to_adev(connector->base.dev); + struct dc *dc = (struct dc *)link->dc; + struct dc_link_settings prefer_link_settings; + char *wr_buf = NULL; + const uint32_t wr_buf_size = 40; + /* 0: lane_count; 1: link_rate */ + int max_param_num = 2; + uint8_t param_nums = 0; + long param[2]; + bool valid_input = true; + + if (size == 0) + return -EINVAL; + + wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL); + if (!wr_buf) + return -ENOMEM; + + if (parse_write_buffer_into_params(wr_buf, wr_buf_size, + (long *)param, buf, + max_param_num, + ¶m_nums)) { + kfree(wr_buf); + return -EINVAL; + } + + if (param_nums <= 0) { + kfree(wr_buf); + return -EINVAL; + } + + switch (param[0]) { + case LANE_COUNT_ONE: + case LANE_COUNT_TWO: + case LANE_COUNT_FOUR: + break; + default: + valid_input = false; + break; + } + + if (param[1] >= link->dpcd_caps.edp_supported_link_rates_count) + valid_input = false; + + if (!valid_input) { + kfree(wr_buf); + DRM_DEBUG_DRIVER("Invalid Input value. No HW will be programmed\n"); + prefer_link_settings.use_link_rate_set = false; + dc_link_set_preferred_training_settings(dc, NULL, NULL, link, true); + return size; + } + + /* save user force lane_count, link_rate to preferred settings + * spread spectrum will not be changed + */ + prefer_link_settings.link_spread = link->cur_link_settings.link_spread; + prefer_link_settings.lane_count = param[0]; + prefer_link_settings.use_link_rate_set = true; + prefer_link_settings.link_rate_set = param[1]; + prefer_link_settings.link_rate = link->dpcd_caps.edp_supported_link_rates[param[1]]; + + mutex_lock(&adev->dm.dc_lock); + dc_link_set_preferred_training_settings(dc, &prefer_link_settings, + NULL, link, false); + mutex_unlock(&adev->dm.dc_lock); + + kfree(wr_buf); + return size; +} + +static int edp_ilr_open(struct inode *inode, struct file *file) +{ + return single_open(file, edp_ilr_show, inode->i_private); +} + +static const struct file_operations edp_ilr_debugfs_fops = { + .owner = THIS_MODULE, + .open = edp_ilr_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = edp_ilr_write +}; + void connector_debugfs_init(struct amdgpu_dm_connector *connector) { int i; @@ -2726,11 +2901,14 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector) } } if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) { + debugfs_create_file_unsafe("psr_capability", 0444, dir, connector, &psr_capability_fops); debugfs_create_file_unsafe("psr_state", 0444, dir, connector, &psr_fops); debugfs_create_file("amdgpu_current_backlight_pwm", 0444, dir, connector, ¤t_backlight_fops); debugfs_create_file("amdgpu_target_backlight_pwm", 0444, dir, connector, &target_backlight_fops); + debugfs_create_file("ilr_setting", 0644, dir, connector, + &edp_ilr_debugfs_fops); } for (i = 0; i < ARRAY_SIZE(connector_debugfs_entries); i++) { @@ -2909,10 +3087,13 @@ static int crc_win_update_set(void *data, u64 val) struct amdgpu_device *adev = drm_to_adev(new_crtc->dev); struct crc_rd_work *crc_rd_wrk = adev->dm.crc_rd_wrk; + if (!crc_rd_wrk) + return 0; + if (val) { spin_lock_irq(&adev_to_drm(adev)->event_lock); spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock); - if (crc_rd_wrk && crc_rd_wrk->crtc) { + if (crc_rd_wrk->crtc) { old_crtc = crc_rd_wrk->crtc; old_acrtc = to_amdgpu_crtc(old_crtc); } @@ -3190,6 +3371,32 @@ static int disable_hpd_get(void *data, u64 *val) DEFINE_DEBUGFS_ATTRIBUTE(disable_hpd_ops, disable_hpd_get, disable_hpd_set, "%llu\n"); +#if defined(CONFIG_DRM_AMD_DC_DCN) +/* + * Temporary w/a to force sst sequence in M42D DP2 mst receiver + * Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_dp_set_mst_en_for_sst + */ +static int dp_force_sst_set(void *data, u64 val) +{ + struct amdgpu_device *adev = data; + + adev->dm.dc->debug.set_mst_en_for_sst = val; + + return 0; +} + +static int dp_force_sst_get(void *data, u64 *val) +{ + struct amdgpu_device *adev = data; + + *val = adev->dm.dc->debug.set_mst_en_for_sst; + + return 0; +} +DEFINE_DEBUGFS_ATTRIBUTE(dp_set_mst_en_for_sst_ops, dp_force_sst_get, + dp_force_sst_set, "%llu\n"); +#endif + /* * Sets the DC visual confirm debug option from the given string. * Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_visual_confirm @@ -3299,6 +3506,10 @@ void dtn_debugfs_init(struct amdgpu_device *adev) adev, &mst_topo_fops); debugfs_create_file("amdgpu_dm_dtn_log", 0644, root, adev, &dtn_log_fops); +#if defined(CONFIG_DRM_AMD_DC_DCN) + debugfs_create_file("amdgpu_dm_dp_set_mst_en_for_sst", 0644, root, adev, + &dp_set_mst_en_for_sst_ops); +#endif debugfs_create_file_unsafe("amdgpu_dm_visual_confirm", 0644, root, adev, &visual_confirm_fops); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index 8cbeeb7c986d..29f07c26d080 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -83,16 +83,17 @@ static int amdgpu_dm_patch_edid_caps(struct dc_edid_caps *edid_caps) * void * */ enum dc_edid_status dm_helpers_parse_edid_caps( - struct dc_context *ctx, + struct dc_link *link, const struct dc_edid *edid, struct dc_edid_caps *edid_caps) { + struct amdgpu_dm_connector *aconnector = link->priv; + struct drm_connector *connector = &aconnector->base; struct edid *edid_buf = (struct edid *) edid->raw_edid; struct cea_sad *sads; int sad_count = -1; int sadb_count = -1; int i = 0; - int j = 0; uint8_t *sadb = NULL; enum dc_edid_status result = EDID_OK; @@ -111,23 +112,11 @@ enum dc_edid_status dm_helpers_parse_edid_caps( edid_caps->manufacture_week = edid_buf->mfg_week; edid_caps->manufacture_year = edid_buf->mfg_year; - /* One of the four detailed_timings stores the monitor name. It's - * stored in an array of length 13. */ - for (i = 0; i < 4; i++) { - if (edid_buf->detailed_timings[i].data.other_data.type == 0xfc) { - while (j < 13 && edid_buf->detailed_timings[i].data.other_data.data.str.str[j]) { - if (edid_buf->detailed_timings[i].data.other_data.data.str.str[j] == '\n') - break; - - edid_caps->display_name[j] = - edid_buf->detailed_timings[i].data.other_data.data.str.str[j]; - j++; - } - } - } + drm_edid_get_monitor_name(edid_buf, + edid_caps->display_name, + AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS); - edid_caps->edid_hdmi = drm_detect_hdmi_monitor( - (struct edid *) edid->raw_edid); + edid_caps->edid_hdmi = connector->display_info.is_hdmi; sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads); if (sad_count <= 0) @@ -584,9 +573,18 @@ bool dm_helpers_dp_write_dsc_enable( ret = drm_dp_dpcd_write(aconnector->dsc_aux, DP_DSC_ENABLE, &enable_dsc, 1); } - if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT) { - ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1); - DC_LOG_DC("Send DSC %s to sst display\n", enable_dsc ? "enable" : "disable"); + if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || stream->signal == SIGNAL_TYPE_EDP) { +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) { +#endif + ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1); + DC_LOG_DC("Send DSC %s to SST RX\n", enable_dsc ? "enable" : "disable"); +#if defined(CONFIG_DRM_AMD_DC_DCN) + } else if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) { + ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1); + DC_LOG_DC("Send DSC %s to DP-HDMI PCON\n", enable_dsc ? "enable" : "disable"); + } +#endif } return (ret > 0); @@ -650,14 +648,8 @@ enum dc_edid_status dm_helpers_read_local_edid( /* We don't need the original edid anymore */ kfree(edid); - /* connector->display_info will be parsed from EDID and saved - * into drm_connector->display_info from edid by call stack - * below: - * drm_parse_ycbcr420_deep_color_info - * drm_parse_hdmi_forum_vsdb - * drm_parse_cea_ext - * drm_add_display_info - * drm_connector_update_edid_property + /* connector->display_info is parsed from EDID and saved + * into drm_connector->display_info * * drm_connector->display_info will be used by amdgpu_dm funcs, * like fill_stream_properties_from_drm_display_mode @@ -665,7 +657,7 @@ enum dc_edid_status dm_helpers_read_local_edid( amdgpu_dm_update_connector_after_detect(aconnector); edid_status = dm_helpers_parse_edid_caps( - ctx, + link, &sink->dc_edid, &sink->edid_caps); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 32a5ce09a62a..cc34a35d0bcb 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -36,6 +36,8 @@ #include "dm_helpers.h" #include "dc_link_ddc.h" +#include "ddc_service_types.h" +#include "dpcd_defs.h" #include "i2caux_interface.h" #include "dmub_cmd.h" @@ -157,6 +159,16 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = { }; #if defined(CONFIG_DRM_AMD_DC_DCN) +static bool needs_dsc_aux_workaround(struct dc_link *link) +{ + if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 && + (link->dpcd_caps.dpcd_rev.raw == DPCD_REV_14 || link->dpcd_caps.dpcd_rev.raw == DPCD_REV_12) && + link->dpcd_caps.sink_count.bits.SINK_COUNT >= 2) + return true; + + return false; +} + static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector) { struct dc_sink *dc_sink = aconnector->dc_sink; @@ -166,7 +178,7 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto u8 *dsc_branch_dec_caps = NULL; aconnector->dsc_aux = drm_dp_mst_dsc_aux_for_port(port); -#if defined(CONFIG_HP_HOOK_WORKAROUND) + /* * drm_dp_mst_dsc_aux_for_port() will return NULL for certain configs * because it only check the dsc/fec caps of the "port variable" and not the dock @@ -176,10 +188,10 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto * Workaround: explicitly check the use case above and use the mst dock's aux as dsc_aux * */ - - if (!aconnector->dsc_aux && !port->parent->port_parent) + if (!aconnector->dsc_aux && !port->parent->port_parent && + needs_dsc_aux_workaround(aconnector->dc_link)) aconnector->dsc_aux = &aconnector->mst_port->dm_dp_aux.aux; -#endif + if (!aconnector->dsc_aux) return false; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c index c022e56f9459..c510638b4f99 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c @@ -26,6 +26,73 @@ #include "amdgpu_dm_psr.h" #include "dc.h" #include "dm_helpers.h" +#include "amdgpu_dm.h" + +static bool link_get_psr_caps(struct dc_link *link) +{ + uint8_t psr_dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE]; + uint8_t edp_rev_dpcd_data; + + + + if (!dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT, + psr_dpcd_data, sizeof(psr_dpcd_data))) + return false; + + if (!dm_helpers_dp_read_dpcd(NULL, link, DP_EDP_DPCD_REV, + &edp_rev_dpcd_data, sizeof(edp_rev_dpcd_data))) + return false; + + link->dpcd_caps.psr_caps.psr_version = psr_dpcd_data[0]; + link->dpcd_caps.psr_caps.edp_revision = edp_rev_dpcd_data; + +#ifdef CONFIG_DRM_AMD_DC_DCN + if (link->dpcd_caps.psr_caps.psr_version > 0x1) { + uint8_t alpm_dpcd_data; + uint8_t su_granularity_dpcd_data; + + if (!dm_helpers_dp_read_dpcd(NULL, link, DP_RECEIVER_ALPM_CAP, + &alpm_dpcd_data, sizeof(alpm_dpcd_data))) + return false; + + if (!dm_helpers_dp_read_dpcd(NULL, link, DP_PSR2_SU_Y_GRANULARITY, + &su_granularity_dpcd_data, sizeof(su_granularity_dpcd_data))) + return false; + + link->dpcd_caps.psr_caps.y_coordinate_required = psr_dpcd_data[1] & DP_PSR2_SU_Y_COORDINATE_REQUIRED; + link->dpcd_caps.psr_caps.su_granularity_required = psr_dpcd_data[1] & DP_PSR2_SU_GRANULARITY_REQUIRED; + + link->dpcd_caps.psr_caps.alpm_cap = alpm_dpcd_data & DP_ALPM_CAP; + link->dpcd_caps.psr_caps.standby_support = alpm_dpcd_data & (1 << 1); + + link->dpcd_caps.psr_caps.su_y_granularity = su_granularity_dpcd_data; + } +#endif + return true; +} + +#ifdef CONFIG_DRM_AMD_DC_DCN +static bool link_supports_psrsu(struct dc_link *link) +{ + struct dc *dc = link->ctx->dc; + + if (!dc->caps.dmcub_support) + return false; + + if (dc->ctx->dce_version < DCN_VERSION_3_1) + return false; + + if (!link->dpcd_caps.psr_caps.alpm_cap || + !link->dpcd_caps.psr_caps.y_coordinate_required) + return false; + + if (link->dpcd_caps.psr_caps.su_granularity_required && + !link->dpcd_caps.psr_caps.su_y_granularity) + return false; + + return true; +} +#endif /* * amdgpu_dm_set_psr_caps() - set link psr capabilities @@ -34,26 +101,34 @@ */ void amdgpu_dm_set_psr_caps(struct dc_link *link) { - uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE]; - if (!(link->connector_signal & SIGNAL_TYPE_EDP)) return; + if (link->type == dc_connection_none) return; - if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT, - dpcd_data, sizeof(dpcd_data))) { - link->dpcd_caps.psr_caps.psr_version = dpcd_data[0]; - - if (dpcd_data[0] == 0) { - link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; - link->psr_settings.psr_feature_enabled = false; - } else { + + if (!link_get_psr_caps(link)) { + DRM_ERROR("amdgpu: Failed to read PSR Caps!\n"); + return; + } + + if (link->dpcd_caps.psr_caps.psr_version == 0) { + link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; + link->psr_settings.psr_feature_enabled = false; + + } else { +#ifdef CONFIG_DRM_AMD_DC_DCN + if (link_supports_psrsu(link)) + link->psr_settings.psr_version = DC_PSR_VERSION_SU_1; + else +#endif link->psr_settings.psr_version = DC_PSR_VERSION_1; - link->psr_settings.psr_feature_enabled = true; - } - DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled); + link->psr_settings.psr_feature_enabled = true; } + + DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled); + } /* diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index a4bef4364afd..1e385d55e7fb 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -2995,7 +2995,7 @@ static bool bios_parser2_construct( &bp->object_info_tbl.revision); if (bp->object_info_tbl.revision.major == 1 - && bp->object_info_tbl.revision.minor >= 4) { + && bp->object_info_tbl.revision.minor == 4) { struct display_object_info_table_v1_4 *tbl_v1_4; tbl_v1_4 = GET_IMAGE(struct display_object_info_table_v1_4, @@ -3004,8 +3004,10 @@ static bool bios_parser2_construct( return false; bp->object_info_tbl.v1_4 = tbl_v1_4; - } else + } else { + ASSERT(0); return false; + } dal_firmware_parser_init_cmd_tbl(bp); dal_bios_parser_init_cmd_tbl_helper2(&bp->cmd_helper, dce_version); diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c index 6b248cd2a461..ec19678a0702 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c @@ -739,7 +739,9 @@ static void hack_bounding_box(struct dcn_bw_internal_vars *v, hack_force_pipe_split(v, context->streams[0]->timing.pix_clk_100hz); } -unsigned int get_highest_allowed_voltage_level(uint32_t chip_family, uint32_t hw_internal_rev, uint32_t pci_revision_id) +static unsigned int get_highest_allowed_voltage_level(uint32_t chip_family, + uint32_t hw_internal_rev, + uint32_t pci_revision_id) { /* for low power RV2 variants, the highest voltage level we want is 0 */ if ((chip_family == FAMILY_RV) && @@ -763,7 +765,7 @@ unsigned int get_highest_allowed_voltage_level(uint32_t chip_family, uint32_t hw return 4; } -bool dcn_validate_bandwidth( +bool dcn10_validate_bandwidth( struct dc *dc, struct dc_state *context, bool fast_validate) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c index 26f96ee32472..9200c8ce02ba 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c @@ -308,8 +308,7 @@ void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base) case FAMILY_NV: if (ASICREV_IS_SIENNA_CICHLID_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) { dcn3_clk_mgr_destroy(clk_mgr); - } - if (ASICREV_IS_DIMGREY_CAVEFISH_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) { + } else if (ASICREV_IS_DIMGREY_CAVEFISH_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) { dcn3_clk_mgr_destroy(clk_mgr); } if (ASICREV_IS_BEIGE_GOBY_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) { diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c index 76ec8ec92efd..60761ff3cbf1 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c @@ -34,7 +34,7 @@ #include "rv1_clk_mgr_vbios_smu.h" #include "rv1_clk_mgr_clk.h" -void rv1_init_clocks(struct clk_mgr *clk_mgr) +static void rv1_init_clocks(struct clk_mgr *clk_mgr) { memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks)); } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c index fe18bb9e19aa..06bab24d8e27 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c @@ -28,6 +28,8 @@ #include "reg_helper.h" #include <linux/delay.h> +#include "rv1_clk_mgr_vbios_smu.h" + #define MAX_INSTANCE 5 #define MAX_SEGMENT 5 diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c index 2108bff49d4e..9f35f2e8f971 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c @@ -409,7 +409,7 @@ void dcn2_init_clocks(struct clk_mgr *clk_mgr) clk_mgr->clks.prev_p_state_change_support = true; } -void dcn2_enable_pme_wa(struct clk_mgr *clk_mgr_base) +static void dcn2_enable_pme_wa(struct clk_mgr *clk_mgr_base) { struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); struct pp_smu_funcs_nv *pp_smu = NULL; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c index db9950244c7b..fbdd0a92d146 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c @@ -74,42 +74,6 @@ static const struct clk_mgr_mask clk_mgr_mask = { CLK_COMMON_MASK_SH_LIST_DCN201_BASE(_MASK) }; -void dcn201_update_clocks_vbios(struct clk_mgr *clk_mgr, - struct dc_state *context, - bool safe_to_lower) -{ - struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; - - bool update_dppclk = false; - bool update_dispclk = false; - - if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->clks.dppclk_khz)) { - clk_mgr->clks.dppclk_khz = new_clocks->dppclk_khz; - update_dppclk = true; - } - - if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr->clks.dispclk_khz)) { - clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz; - update_dispclk = true; - } - - if (update_dppclk || update_dispclk) { - struct bp_set_dce_clock_parameters dce_clk_params; - struct dc_bios *bp = clk_mgr->ctx->dc_bios; - - if (update_dispclk) { - memset(&dce_clk_params, 0, sizeof(dce_clk_params)); - dce_clk_params.target_clock_frequency = new_clocks->dispclk_khz; - dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS; - dce_clk_params.clock_type = DCECLOCK_TYPE_DISPLAY_CLOCK; - bp->funcs->set_dce_clock(bp, &dce_clk_params); - } - /* currently there is no DCECLOCK_TYPE_DPPCLK type defined in VBIOS interface. - * vbios program DPPCLK to the same DispCLK limitation - */ - } -} - static void dcn201_init_clocks(struct clk_mgr *clk_mgr) { memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks)); @@ -126,10 +90,8 @@ static void dcn201_update_clocks(struct clk_mgr *clk_mgr_base, struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; struct dc *dc = clk_mgr_base->ctx->dc; - int display_count; bool update_dppclk = false; bool update_dispclk = false; - bool enter_display_off = false; bool dpp_clock_lowered = false; bool force_reset = false; bool p_state_change_support; @@ -145,10 +107,7 @@ static void dcn201_update_clocks(struct clk_mgr *clk_mgr_base, dcn2_read_clocks_from_hw_dentist(clk_mgr_base); } - display_count = clk_mgr_helper_get_active_display_cnt(dc, context); - - if (display_count == 0) - enter_display_off = true; + clk_mgr_helper_get_active_display_cnt(dc, context); if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr_base->clks.phyclk_khz)) clk_mgr_base->clks.phyclk_khz = new_clocks->phyclk_khz; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c index ac2d4c4f04e4..fbda42313bfe 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c @@ -56,9 +56,7 @@ /* TODO: evaluate how to lower or disable all dcn clocks in screen off case */ -int rn_get_active_display_cnt_wa( - struct dc *dc, - struct dc_state *context) +static int rn_get_active_display_cnt_wa(struct dc *dc, struct dc_state *context) { int i, display_count; bool tmds_present = false; @@ -77,7 +75,8 @@ int rn_get_active_display_cnt_wa( const struct dc_link *link = dc->links[i]; /* abusing the fact that the dig and phy are coupled to see if the phy is enabled */ - if (link->link_enc->funcs->is_dig_enabled(link->link_enc)) + if (link->link_enc->funcs->is_dig_enabled && + link->link_enc->funcs->is_dig_enabled(link->link_enc)) display_count++; } @@ -88,7 +87,7 @@ int rn_get_active_display_cnt_wa( return display_count; } -void rn_set_low_power_state(struct clk_mgr *clk_mgr_base) +static void rn_set_low_power_state(struct clk_mgr *clk_mgr_base) { struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); @@ -122,7 +121,7 @@ static void rn_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr, } -void rn_update_clocks(struct clk_mgr *clk_mgr_base, +static void rn_update_clocks(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool safe_to_lower) { @@ -437,25 +436,14 @@ static void rn_dump_clk_registers(struct clk_state_registers_and_bypass *regs_an } } -/* This function produce translated logical clk state values*/ -void rn_get_clk_states(struct clk_mgr *clk_mgr_base, struct clk_states *s) -{ - struct clk_state_registers_and_bypass sb = { 0 }; - struct clk_log_info log_info = { 0 }; - - rn_dump_clk_registers(&sb, clk_mgr_base, &log_info); - - s->dprefclk_khz = sb.dprefclk * 1000; -} - -void rn_enable_pme_wa(struct clk_mgr *clk_mgr_base) +static void rn_enable_pme_wa(struct clk_mgr *clk_mgr_base) { struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); rn_vbios_smu_enable_pme_wa(clk_mgr); } -void rn_init_clocks(struct clk_mgr *clk_mgr) +static void rn_init_clocks(struct clk_mgr *clk_mgr) { memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks)); // Assumption is that boot state always supports pstate diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c index 9f7eed6688c4..8161a6ae410d 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c @@ -33,6 +33,8 @@ #include "mp/mp_12_0_0_offset.h" #include "mp/mp_12_0_0_sh_mask.h" +#include "rn_clk_mgr_vbios_smu.h" + #define REG(reg_name) \ (MP0_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name) @@ -86,7 +88,9 @@ static uint32_t rn_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, unsig } -int rn_vbios_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, unsigned int msg_id, unsigned int param) +static int rn_vbios_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, + unsigned int msg_id, + unsigned int param) { uint32_t result; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c index 1861a147a7fa..f977f29907df 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c @@ -252,6 +252,7 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base, bool update_dispclk = false; bool enter_display_off = false; bool dpp_clock_lowered = false; + bool update_pstate_unsupported_clk = false; struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu; bool force_reset = false; bool update_uclk = false; @@ -299,13 +300,28 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base, clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support; total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context); p_state_change_support = new_clocks->p_state_change_support || (total_plane_count == 0); - if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support)) { + + // invalidate the current P-State forced min in certain dc_mode_softmax situations + if (dc->clk_mgr->dc_mode_softmax_enabled && safe_to_lower && !p_state_change_support) { + if ((new_clocks->dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000) != + (clk_mgr_base->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)) + update_pstate_unsupported_clk = true; + } + + if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support) || + update_pstate_unsupported_clk) { clk_mgr_base->clks.p_state_change_support = p_state_change_support; /* to disable P-State switching, set UCLK min = max */ - if (!clk_mgr_base->clks.p_state_change_support) - dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, + if (!clk_mgr_base->clks.p_state_change_support) { + if (dc->clk_mgr->dc_mode_softmax_enabled && + new_clocks->dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000) + dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, + dc->clk_mgr->bw_params->dc_mode_softmax_memclk); + else + dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries - 1].memclk_mhz); + } } /* Always update saved value, even if new value not set due to P-State switching unsupported */ @@ -421,6 +437,24 @@ static void dcn3_set_hard_max_memclk(struct clk_mgr *clk_mgr_base) clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries - 1].memclk_mhz); } +static void dcn3_set_max_memclk(struct clk_mgr *clk_mgr_base, unsigned int memclk_mhz) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + + if (!clk_mgr->smu_present) + return; + + dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK, memclk_mhz); +} +static void dcn3_set_min_memclk(struct clk_mgr *clk_mgr_base, unsigned int memclk_mhz) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + + if (!clk_mgr->smu_present) + return; + dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, memclk_mhz); +} + /* Get current memclk states, update bounding box */ static void dcn3_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base) { @@ -436,6 +470,8 @@ static void dcn3_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base) &num_levels); clk_mgr_base->bw_params->clk_table.num_entries = num_levels ? num_levels : 1; + clk_mgr_base->bw_params->dc_mode_softmax_memclk = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_UCLK); + /* Refresh bounding box */ clk_mgr_base->ctx->dc->res_pool->funcs->update_bw_bounding_box( clk_mgr->base.ctx->dc, clk_mgr_base->bw_params); @@ -505,6 +541,8 @@ static struct clk_mgr_funcs dcn3_funcs = { .notify_wm_ranges = dcn3_notify_wm_ranges, .set_hard_min_memclk = dcn3_set_hard_min_memclk, .set_hard_max_memclk = dcn3_set_hard_max_memclk, + .set_max_memclk = dcn3_set_max_memclk, + .set_min_memclk = dcn3_set_min_memclk, .get_memclk_states_from_smu = dcn3_get_memclk_states_from_smu, .are_clock_states_equal = dcn3_are_clock_states_equal, .enable_pme_wa = dcn3_enable_pme_wa, diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c index 6ea642615854..d9920d91838d 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c @@ -88,9 +88,9 @@ static uint32_t dcn301_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, u return res_val; } -int dcn301_smu_send_msg_with_param( - struct clk_mgr_internal *clk_mgr, - unsigned int msg_id, unsigned int param) +static int dcn301_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, + unsigned int msg_id, + unsigned int param) { uint32_t result; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c index 3eee32faa208..8f78e62b28b7 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c @@ -89,9 +89,9 @@ static int vg_get_active_display_cnt_wa( return display_count; } -void vg_update_clocks(struct clk_mgr *clk_mgr_base, - struct dc_state *context, - bool safe_to_lower) +static void vg_update_clocks(struct clk_mgr *clk_mgr_base, + struct dc_state *context, + bool safe_to_lower) { struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; @@ -367,18 +367,6 @@ static void vg_dump_clk_registers(struct clk_state_registers_and_bypass *regs_an } } -/* This function produce translated logical clk state values*/ -void vg_get_clk_states(struct clk_mgr *clk_mgr_base, struct clk_states *s) -{ - - struct clk_state_registers_and_bypass sb = { 0 }; - struct clk_log_info log_info = { 0 }; - - vg_dump_clk_registers(&sb, clk_mgr_base, &log_info); - - s->dprefclk_khz = sb.dprefclk * 1000; -} - static void vg_enable_pme_wa(struct clk_mgr *clk_mgr_base) { struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); @@ -386,7 +374,7 @@ static void vg_enable_pme_wa(struct clk_mgr *clk_mgr_base) dcn301_smu_enable_pme_wa(clk_mgr); } -void vg_init_clocks(struct clk_mgr *clk_mgr) +static void vg_init_clocks(struct clk_mgr *clk_mgr) { memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks)); // Assumption is that boot state always supports pstate diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c index f4c9a458ace8..412cc6a716f7 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c @@ -66,7 +66,7 @@ #define TO_CLK_MGR_DCN31(clk_mgr)\ container_of(clk_mgr, struct clk_mgr_dcn31, base) -int dcn31_get_active_display_cnt_wa( +static int dcn31_get_active_display_cnt_wa( struct dc *dc, struct dc_state *context) { @@ -118,7 +118,7 @@ static void dcn31_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable) } } -static void dcn31_update_clocks(struct clk_mgr *clk_mgr_base, +void dcn31_update_clocks(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool safe_to_lower) { @@ -284,7 +284,7 @@ static void dcn31_enable_pme_wa(struct clk_mgr *clk_mgr_base) dcn31_smu_enable_pme_wa(clk_mgr); } -static void dcn31_init_clocks(struct clk_mgr *clk_mgr) +void dcn31_init_clocks(struct clk_mgr *clk_mgr) { memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks)); // Assumption is that boot state always supports pstate @@ -294,7 +294,7 @@ static void dcn31_init_clocks(struct clk_mgr *clk_mgr) clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN; } -static bool dcn31_are_clock_states_equal(struct dc_clocks *a, +bool dcn31_are_clock_states_equal(struct dc_clocks *a, struct dc_clocks *b) { if (a->dispclk_khz != b->dispclk_khz) @@ -540,10 +540,9 @@ static unsigned int find_clk_for_voltage( return clock; } -void dcn31_clk_mgr_helper_populate_bw_params( - struct clk_mgr_internal *clk_mgr, - struct integrated_info *bios_info, - const DpmClocks_t *clock_table) +static void dcn31_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk_mgr, + struct integrated_info *bios_info, + const DpmClocks_t *clock_table) { int i, j; struct clk_bw_params *bw_params = clk_mgr->base.bw_params; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h index f8f100535526..961b10a49486 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h @@ -39,6 +39,13 @@ struct clk_mgr_dcn31 { struct dcn31_smu_watermark_set smu_wm_set; }; +bool dcn31_are_clock_states_equal(struct dc_clocks *a, + struct dc_clocks *b); +void dcn31_init_clocks(struct clk_mgr *clk_mgr); +void dcn31_update_clocks(struct clk_mgr *clk_mgr_base, + struct dc_state *context, + bool safe_to_lower); + void dcn31_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_dcn31 *clk_mgr, struct pp_smu_funcs *pp_smu, diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c index 8c2b77eb9459..b7ace235a2d5 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c @@ -95,9 +95,9 @@ static uint32_t dcn31_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, un return res_val; } -int dcn31_smu_send_msg_with_param( - struct clk_mgr_internal *clk_mgr, - unsigned int msg_id, unsigned int param) +static int dcn31_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, + unsigned int msg_id, + unsigned int param) { uint32_t result; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 0ded4decee05..c250f6de5136 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -221,9 +221,9 @@ static bool create_links( link = link_create(&link_init_params); if (link) { - dc->links[dc->link_count] = link; - link->dc = dc; - ++dc->link_count; + dc->links[dc->link_count] = link; + link->dc = dc; + ++dc->link_count; } } @@ -808,6 +808,10 @@ void dc_stream_set_static_screen_params(struct dc *dc, static void dc_destruct(struct dc *dc) { + // reset link encoder assignment table on destruct + if (dc->res_pool && dc->res_pool->funcs->link_encs_assign) + link_enc_cfg_init(dc, dc->current_state); + if (dc->current_state) { dc_release_state(dc->current_state); dc->current_state = NULL; @@ -1016,8 +1020,6 @@ static bool dc_construct(struct dc *dc, goto fail; } - dc_resource_state_construct(dc, dc->current_state); - if (!create_links(dc, init_params->num_virtual_links)) goto fail; @@ -1027,8 +1029,7 @@ static bool dc_construct(struct dc *dc, if (!create_link_encoders(dc)) goto fail; - /* Initialise DIG link encoder resource tracking variables. */ - link_enc_cfg_init(dc, dc->current_state); + dc_resource_state_construct(dc, dc->current_state); return true; @@ -1421,22 +1422,29 @@ static void program_timing_sync( status->timing_sync_info.master = false; } - /* remove any other unblanked pipes as they have already been synced */ - for (j = j + 1; j < group_size; j++) { - bool is_blanked; - if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked) - is_blanked = - pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp); - else - is_blanked = - pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg); - if (!is_blanked) { - group_size--; - pipe_set[j] = pipe_set[group_size]; - j--; + /* remove any other pipes that are already been synced */ + if (dc->config.use_pipe_ctx_sync_logic) { + /* check pipe's syncd to decide which pipe to be removed */ + for (j = 1; j < group_size; j++) { + if (pipe_set[j]->pipe_idx_syncd == pipe_set[0]->pipe_idx_syncd) { + group_size--; + pipe_set[j] = pipe_set[group_size]; + j--; + } else + /* link slave pipe's syncd with master pipe */ + pipe_set[j]->pipe_idx_syncd = pipe_set[0]->pipe_idx_syncd; } - } + } else { + /* remove any other pipes by checking valid plane */ + for (j = j + 1; j < group_size; j++) { + if (pipe_set[j]->plane_state) { + group_size--; + pipe_set[j] = pipe_set[group_size]; + j--; + } + } + } if (group_size > 1) { if (sync_type == TIMING_SYNCHRONIZABLE) { @@ -1830,6 +1838,19 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context) dc_stream_log(dc, stream); } + /* + * Previous validation was perfomred with fast_validation = true and + * the full DML state required for hardware programming was skipped. + * + * Re-validate here to calculate these parameters / watermarks. + */ + result = dc_validate_global_state(dc, context, false); + if (result != DC_OK) { + DC_LOG_ERROR("DC commit global validation failure: %s (%d)", + dc_status_to_str(result), result); + return result; + } + result = dc_commit_state_no_check(dc, context); return (result == DC_OK); @@ -2870,7 +2891,8 @@ static void commit_planes_for_stream(struct dc *dc, #endif if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) - if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { + if (top_pipe_to_program && + top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { if (should_use_dmub_lock(stream->link)) { union dmub_hw_lock_flags hw_locks = { 0 }; struct dmub_hw_lock_inst_flags inst_flags = { 0 }; @@ -2979,12 +3001,12 @@ static void commit_planes_for_stream(struct dc *dc, #ifdef CONFIG_DRM_AMD_DC_DCN if (dc->debug.validate_dml_output) { for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx cur_pipe = context->res_ctx.pipe_ctx[i]; - if (cur_pipe.stream == NULL) + struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i]; + if (cur_pipe->stream == NULL) continue; - cur_pipe.plane_res.hubp->funcs->validate_dml_output( - cur_pipe.plane_res.hubp, dc->ctx, + cur_pipe->plane_res.hubp->funcs->validate_dml_output( + cur_pipe->plane_res.hubp, dc->ctx, &context->res_ctx.pipe_ctx[i].rq_regs, &context->res_ctx.pipe_ctx[i].dlg_regs, &context->res_ctx.pipe_ctx[i].ttu_regs); @@ -3426,7 +3448,7 @@ struct dc_sink *dc_link_add_remote_sink( goto fail_add_sink; edid_status = dm_helpers_parse_edid_caps( - link->ctx, + link, &dc_sink->dc_edid, &dc_sink->edid_caps); @@ -3583,6 +3605,98 @@ void dc_lock_memory_clock_frequency(struct dc *dc) core_link_enable_stream(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]); } +static void blank_and_force_memclk(struct dc *dc, bool apply, unsigned int memclk_mhz) +{ + struct dc_state *context = dc->current_state; + struct hubp *hubp; + struct pipe_ctx *pipe; + int i; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + pipe = &context->res_ctx.pipe_ctx[i]; + + if (pipe->stream != NULL) { + dc->hwss.disable_pixel_data(dc, pipe, true); + + // wait for double buffer + pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE); + pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK); + pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE); + + hubp = pipe->plane_res.hubp; + hubp->funcs->set_blank_regs(hubp, true); + } + } + + dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, memclk_mhz); + dc->clk_mgr->funcs->set_min_memclk(dc->clk_mgr, memclk_mhz); + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + pipe = &context->res_ctx.pipe_ctx[i]; + + if (pipe->stream != NULL) { + dc->hwss.disable_pixel_data(dc, pipe, false); + + hubp = pipe->plane_res.hubp; + hubp->funcs->set_blank_regs(hubp, false); + } + } +} + + +/** + * dc_enable_dcmode_clk_limit() - lower clocks in dc (battery) mode + * @dc: pointer to dc of the dm calling this + * @enable: True = transition to DC mode, false = transition back to AC mode + * + * Some SoCs define additional clock limits when in DC mode, DM should + * invoke this function when the platform undergoes a power source transition + * so DC can apply/unapply the limit. This interface may be disruptive to + * the onscreen content. + * + * Context: Triggered by OS through DM interface, or manually by escape calls. + * Need to hold a dclock when doing so. + * + * Return: none (void function) + * + */ +void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable) +{ + uint32_t hw_internal_rev = dc->ctx->asic_id.hw_internal_rev; + unsigned int softMax, maxDPM, funcMin; + bool p_state_change_support; + + if (!ASICREV_IS_BEIGE_GOBY_P(hw_internal_rev)) + return; + + softMax = dc->clk_mgr->bw_params->dc_mode_softmax_memclk; + maxDPM = dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz; + funcMin = (dc->clk_mgr->clks.dramclk_khz + 999) / 1000; + p_state_change_support = dc->clk_mgr->clks.p_state_change_support; + + if (enable && !dc->clk_mgr->dc_mode_softmax_enabled) { + if (p_state_change_support) { + if (funcMin <= softMax) + dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, softMax); + // else: No-Op + } else { + if (funcMin <= softMax) + blank_and_force_memclk(dc, true, softMax); + // else: No-Op + } + } else if (!enable && dc->clk_mgr->dc_mode_softmax_enabled) { + if (p_state_change_support) { + if (funcMin <= softMax) + dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, maxDPM); + // else: No-Op + } else { + if (funcMin <= softMax) + blank_and_force_memclk(dc, true, maxDPM); + // else: No-Op + } + } + dc->clk_mgr->dc_mode_softmax_enabled = enable; +} bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane, struct dc_cursor_attributes *cursor_attr) { diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 60544788e911..3d75f56a939c 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -270,10 +270,10 @@ bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type) /* Link may not have physical HPD pin. */ if (link->ep_type != DISPLAY_ENDPOINT_PHY) { - if (link->hpd_status) - *type = dc_connection_single; - else + if (link->is_hpd_pending || !link->hpd_status) *type = dc_connection_none; + else + *type = dc_connection_single; return true; } @@ -758,6 +758,18 @@ static bool detect_dp(struct dc_link *link, dal_ddc_service_set_transaction_type(link->ddc, sink_caps->transaction_type); +#if defined(CONFIG_DRM_AMD_DC_DCN) + /* Apply work around for tunneled MST on certain USB4 docks. Always use DSC if dock + * reports DSC support. + */ + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && + link->type == dc_connection_mst_branch && + link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 && + link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT && + !link->dc->debug.dpia_debug.bits.disable_mst_dsc_work_around) + link->wa_flags.dpia_mst_dsc_always_on = true; +#endif + #if defined(CONFIG_DRM_AMD_DC_HDCP) /* In case of fallback to SST when topology discovery below fails * HDCP caps will be querried again later by the upper layer (caller @@ -1203,6 +1215,10 @@ static bool dc_link_detect_helper(struct dc_link *link, LINK_INFO("link=%d, mst branch is now Disconnected\n", link->link_index); + /* Disable work around which keeps DSC on for tunneled MST on certain USB4 docks. */ + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) + link->wa_flags.dpia_mst_dsc_always_on = false; + dm_helpers_dp_mst_stop_top_mgr(link->ctx, link); link->mst_stream_alloc_table.stream_count = 0; @@ -1999,6 +2015,57 @@ static enum dc_status enable_link_dp_mst( return enable_link_dp(state, pipe_ctx); } +void dc_link_blank_all_dp_displays(struct dc *dc) +{ + unsigned int i; + uint8_t dpcd_power_state = '\0'; + enum dc_status status = DC_ERROR_UNEXPECTED; + + for (i = 0; i < dc->link_count; i++) { + if ((dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) || + (dc->links[i]->priv == NULL) || (dc->links[i]->local_sink == NULL)) + continue; + + /* DP 2.0 spec requires that we read LTTPR caps first */ + dp_retrieve_lttpr_cap(dc->links[i]); + /* if any of the displays are lit up turn them off */ + status = core_link_read_dpcd(dc->links[i], DP_SET_POWER, + &dpcd_power_state, sizeof(dpcd_power_state)); + + if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) + dc_link_blank_dp_stream(dc->links[i], true); + } + +} + +void dc_link_blank_dp_stream(struct dc_link *link, bool hw_init) +{ + unsigned int j; + struct dc *dc = link->ctx->dc; + enum signal_type signal = link->connector_signal; + + if ((signal == SIGNAL_TYPE_EDP) || + (signal == SIGNAL_TYPE_DISPLAY_PORT)) { + if (link->ep_type == DISPLAY_ENDPOINT_PHY && + link->link_enc->funcs->get_dig_frontend && + link->link_enc->funcs->is_dig_enabled(link->link_enc)) { + unsigned int fe = link->link_enc->funcs->get_dig_frontend(link->link_enc); + + if (fe != ENGINE_ID_UNKNOWN) + for (j = 0; j < dc->res_pool->stream_enc_count; j++) { + if (fe == dc->res_pool->stream_enc[j]->id) { + dc->res_pool->stream_enc[j]->funcs->dp_blank(link, + dc->res_pool->stream_enc[j]); + break; + } + } + } + + if ((!link->wa_flags.dp_keep_receiver_powered) || hw_init) + dp_receiver_power_ctrl(link, false); + } +} + static bool get_ext_hdmi_settings(struct pipe_ctx *pipe_ctx, enum engine_id eng_id, struct ext_hdmi_settings *settings) @@ -2699,8 +2766,23 @@ static bool dp_active_dongle_validate_timing( return false; } +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps > 0) { // DP to HDMI FRL converter + struct dc_crtc_timing outputTiming = *timing; + + if (timing->flags.DSC && !timing->dsc_cfg.is_frl) + /* DP input has DSC, HDMI FRL output doesn't have DSC, remove DSC from output timing */ + outputTiming.flags.DSC = 0; + if (dc_bandwidth_in_kbps_from_timing(&outputTiming) > dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps) + return false; + } else { // DP to HDMI TMDS converter + if (get_timing_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10)) + return false; + } +#else if (get_timing_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10)) return false; +#endif #if defined(CONFIG_DRM_AMD_DC_DCN) } @@ -2946,7 +3028,7 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, const bool *allow_active link->psr_settings.psr_power_opt = *power_opts; if (psr != NULL && link->psr_settings.psr_feature_enabled && psr->funcs->psr_set_power_opt) - psr->funcs->psr_set_power_opt(psr, link->psr_settings.psr_power_opt); + psr->funcs->psr_set_power_opt(psr, link->psr_settings.psr_power_opt, panel_inst); } /* Enable or Disable PSR */ @@ -3334,7 +3416,8 @@ static void dc_log_vcp_x_y(const struct dc_link *link, struct fixed31_32 avg_tim /* * Payload allocation/deallocation for SST introduced in DP2.0 */ -enum dc_status dc_link_update_sst_payload(struct pipe_ctx *pipe_ctx, bool allocate) +static enum dc_status dc_link_update_sst_payload(struct pipe_ctx *pipe_ctx, + bool allocate) { struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; @@ -3913,9 +3996,6 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off) struct cp_psp *cp_psp = &pipe_ctx->stream->ctx->cp_psp; #if defined(CONFIG_DRM_AMD_DC_DCN) struct link_encoder *link_enc = NULL; - struct dc_state *state = pipe_ctx->stream->ctx->dc->current_state; - struct link_enc_assignment link_enc_assign; - int i; #endif if (cp_psp && cp_psp->funcs.update_stream_config) { @@ -3943,18 +4023,15 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off) pipe_ctx->stream->ctx->dc, pipe_ctx->stream); } + ASSERT(link_enc); + // Initialize PHY ID with ABCDE - 01234 mapping except when it is B0 config.phy_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A; - //look up the link_enc_assignment for the current pipe_ctx - for (i = 0; i < state->stream_count; i++) { - if (pipe_ctx->stream == state->streams[i]) { - link_enc_assign = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i]; - } - } // Add flag to guard new A0 DIG mapping - if (pipe_ctx->stream->ctx->dc->enable_c20_dtm_b0 == true) { - config.dig_be = link_enc_assign.eng_id; + if (pipe_ctx->stream->ctx->dc->enable_c20_dtm_b0 == true && + pipe_ctx->stream->link->dc->ctx->dce_version == DCN_VERSION_3_1) { + config.dig_be = link_enc->preferred_engine; config.dio_output_type = pipe_ctx->stream->link->ep_type; config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A; } else { @@ -3966,10 +4043,8 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off) if (pipe_ctx->stream->ctx->dc->enable_c20_dtm_b0 == true && link_enc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) { if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { - link_enc = link_enc_assign.stream->link_enc; - // enum ID 1-4 maps to DPIA PHY ID 0-3 - config.phy_idx = link_enc_assign.ep_id.link_id.enum_id - ENUM_ID_1; + config.phy_idx = pipe_ctx->stream->link->link_id.enum_id - ENUM_ID_1; } else { // for non DPIA mode over B0, ABCDE maps to 01564 switch (link_enc->transmitter) { @@ -4242,7 +4317,8 @@ void core_link_enable_stream( /* eDP lit up by bios already, no need to enable again. */ if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP && apply_edp_fast_boot_optimization && - !pipe_ctx->stream->timing.flags.DSC) { + !pipe_ctx->stream->timing.flags.DSC && + !pipe_ctx->next_odm_pipe) { pipe_ctx->stream->dpms_off = false; #if defined(CONFIG_DRM_AMD_DC_HDCP) update_psp_stream_config(pipe_ctx, false); @@ -4749,6 +4825,8 @@ bool dc_link_should_enable_fec(const struct dc_link *link) link->local_sink && link->local_sink->edid_caps.panel_patch.disable_fec) || (link->connector_signal == SIGNAL_TYPE_EDP + // enable FEC for EDP if DSC is supported + && link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT == false )) is_fec_disable = true; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c index 60539b1f2a80..24dc662ec3e4 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c @@ -626,7 +626,7 @@ bool dal_ddc_submit_aux_command(struct ddc_service *ddc, do { struct aux_payload current_payload; bool is_end_of_payload = (retrieved + DEFAULT_AUX_MAX_DATA_SIZE) >= - payload->length ? true : false; + payload->length; uint32_t payload_length = is_end_of_payload ? payload->length - retrieved : DEFAULT_AUX_MAX_DATA_SIZE; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index cb7bf9148904..8a35370da867 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -100,6 +100,7 @@ static const struct dp_lt_fallback_entry dp_lt_fallbacks[] = { #endif static bool decide_fallback_link_setting( + struct dc_link *link, struct dc_link_settings initial_link_settings, struct dc_link_settings *current_link_setting, enum link_training_result training_result); @@ -398,6 +399,223 @@ static uint8_t get_dpcd_link_rate(const struct dc_link_settings *link_settings) } #endif +static void vendor_specific_lttpr_wa_one_start(struct dc_link *link) +{ + const uint8_t vendor_lttpr_write_data[4] = {0x1, 0x50, 0x63, 0xff}; + const uint8_t offset = dp_convert_to_count( + link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + uint32_t vendor_lttpr_write_address = 0xF004F; + + if (offset != 0xFF) + vendor_lttpr_write_address += + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + + /* W/A for certain LTTPR to reset their lane settings, part one of two */ + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data[0], + sizeof(vendor_lttpr_write_data)); +} + +static void vendor_specific_lttpr_wa_one_end( + struct dc_link *link, + uint8_t retry_count) +{ + const uint8_t vendor_lttpr_write_data[4] = {0x1, 0x50, 0x63, 0x0}; + const uint8_t offset = dp_convert_to_count( + link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + uint32_t vendor_lttpr_write_address = 0xF004F; + + if (!retry_count) { + if (offset != 0xFF) + vendor_lttpr_write_address += + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + + /* W/A for certain LTTPR to reset their lane settings, part two of two */ + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data[0], + sizeof(vendor_lttpr_write_data)); + } +} + +static void vendor_specific_lttpr_wa_one_two( + struct dc_link *link, + const uint8_t rate) +{ + if (link->apply_vendor_specific_lttpr_link_rate_wa) { + uint8_t toggle_rate = 0x0; + + if (rate == 0x6) + toggle_rate = 0xA; + else + toggle_rate = 0x6; + + if (link->vendor_specific_lttpr_link_rate_wa == rate) { + /* W/A for certain LTTPR to reset internal state for link training */ + core_link_write_dpcd( + link, + DP_LINK_BW_SET, + &toggle_rate, + 1); + } + + /* Store the last attempted link rate for this link */ + link->vendor_specific_lttpr_link_rate_wa = rate; + } +} + +static void vendor_specific_lttpr_wa_three( + struct dc_link *link, + union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX]) +{ + const uint8_t vendor_lttpr_write_data_vs[3] = {0x0, 0x53, 0x63}; + const uint8_t vendor_lttpr_write_data_pe[3] = {0x0, 0x54, 0x63}; + const uint8_t offset = dp_convert_to_count( + link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + uint32_t vendor_lttpr_write_address = 0xF004F; + uint32_t vendor_lttpr_read_address = 0xF0053; + uint8_t dprx_vs = 0; + uint8_t dprx_pe = 0; + uint8_t lane; + + if (offset != 0xFF) { + vendor_lttpr_write_address += + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + vendor_lttpr_read_address += + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + } + + /* W/A to read lane settings requested by DPRX */ + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_vs[0], + sizeof(vendor_lttpr_write_data_vs)); + core_link_read_dpcd( + link, + vendor_lttpr_read_address, + &dprx_vs, + 1); + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_pe[0], + sizeof(vendor_lttpr_write_data_pe)); + core_link_read_dpcd( + link, + vendor_lttpr_read_address, + &dprx_pe, + 1); + + for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { + dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_LANE = (dprx_vs >> (2 * lane)) & 0x3; + dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_LANE = (dprx_pe >> (2 * lane)) & 0x3; + } +} + +static void vendor_specific_lttpr_wa_three_dpcd( + struct dc_link *link, + union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX]) +{ + union lane_adjust lane_adjust[LANE_COUNT_DP_MAX]; + uint8_t lane = 0; + + vendor_specific_lttpr_wa_three(link, lane_adjust); + + for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { + dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_SET = lane_adjust[lane].bits.VOLTAGE_SWING_LANE; + dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_SET = lane_adjust[lane].bits.PRE_EMPHASIS_LANE; + } +} + +static void vendor_specific_lttpr_wa_four( + struct dc_link *link, + bool apply_wa) +{ + const uint8_t vendor_lttpr_write_data_one[4] = {0x1, 0x55, 0x63, 0x8}; + const uint8_t vendor_lttpr_write_data_two[4] = {0x1, 0x55, 0x63, 0x0}; + const uint8_t offset = dp_convert_to_count( + link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + uint32_t vendor_lttpr_write_address = 0xF004F; +#if defined(CONFIG_DRM_AMD_DC_DP2_0) + uint8_t sink_status = 0; + uint8_t i; +#endif + + if (offset != 0xFF) + vendor_lttpr_write_address += + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + + /* W/A to pass through DPCD write of TPS=0 to DPRX */ + if (apply_wa) { + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_one[0], + sizeof(vendor_lttpr_write_data_one)); + } + + /* clear training pattern set */ + dpcd_set_training_pattern(link, DP_TRAINING_PATTERN_VIDEOIDLE); + + if (apply_wa) { + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_two[0], + sizeof(vendor_lttpr_write_data_two)); + } + +#if defined(CONFIG_DRM_AMD_DC_DP2_0) + /* poll for intra-hop disable */ + for (i = 0; i < 10; i++) { + if ((core_link_read_dpcd(link, DP_SINK_STATUS, &sink_status, 1) == DC_OK) && + (sink_status & DP_INTRA_HOP_AUX_REPLY_INDICATION) == 0) + break; + udelay(1000); + } +#endif +} + +static void vendor_specific_lttpr_wa_five( + struct dc_link *link, + const union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX], + uint8_t lane_count) +{ + const uint32_t vendor_lttpr_write_address = 0xF004F; + const uint8_t vendor_lttpr_write_data_reset[4] = {0x1, 0x50, 0x63, 0xFF}; + uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0}; + uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0}; + uint8_t lane = 0; + + for (lane = 0; lane < lane_count; lane++) { + vendor_lttpr_write_data_vs[3] |= + dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_SET << (2 * lane); + vendor_lttpr_write_data_pe[3] |= + dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_SET << (2 * lane); + } + + /* Force LTTPR to output desired VS and PE */ + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_reset[0], + sizeof(vendor_lttpr_write_data_reset)); + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_vs[0], + sizeof(vendor_lttpr_write_data_vs)); + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_pe[0], + sizeof(vendor_lttpr_write_data_pe)); +} + enum dc_status dpcd_set_link_settings( struct dc_link *link, const struct link_training_settings *lt_settings) @@ -430,7 +648,7 @@ enum dc_status dpcd_set_link_settings( status = core_link_write_dpcd(link, DP_LANE_COUNT_SET, &lane_count_set.raw, 1); - if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 && + if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_13 && lt_settings->link_settings.use_link_rate_set == true) { rate = 0; /* WA for some MUX chips that will power down with eDP and lose supported @@ -452,6 +670,15 @@ enum dc_status dpcd_set_link_settings( #else rate = (uint8_t) (lt_settings->link_settings.link_rate); #endif + if (link->dc->debug.apply_vendor_specific_lttpr_wa && + (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && + link->lttpr_mode == LTTPR_MODE_TRANSPARENT) + vendor_specific_lttpr_wa_one_start(link); + + if (link->dc->debug.apply_vendor_specific_lttpr_wa && + (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN)) + vendor_specific_lttpr_wa_one_two(link, rate); + status = core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1); } @@ -1211,6 +1438,12 @@ static enum link_training_result perform_channel_equalization_sequence( dp_translate_training_aux_read_interval( link->dpcd_caps.lttpr_caps.aux_rd_interval[offset - 1]); + if (link->dc->debug.apply_vendor_specific_lttpr_wa && + (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && + link->lttpr_mode == LTTPR_MODE_TRANSPARENT) { + wait_time_microsec = 16000; + } + dp_wait_for_training_aux_rd_interval( link, wait_time_microsec); @@ -1314,6 +1547,11 @@ static enum link_training_result perform_clock_recovery_sequence( if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) wait_time_microsec = TRAINING_AUX_RD_INTERVAL; + if (link->dc->debug.apply_vendor_specific_lttpr_wa && + (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN)) { + wait_time_microsec = 16000; + } + dp_wait_for_training_aux_rd_interval( link, wait_time_microsec); @@ -1329,6 +1567,13 @@ static enum link_training_result perform_clock_recovery_sequence( dpcd_lane_adjust, offset); + if (link->dc->debug.apply_vendor_specific_lttpr_wa && + (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && + link->lttpr_mode == LTTPR_MODE_TRANSPARENT) { + vendor_specific_lttpr_wa_one_end(link, retry_count); + vendor_specific_lttpr_wa_three(link, dpcd_lane_adjust); + } + /* 5. check CR done*/ if (dp_is_cr_done(lane_count, dpcd_lane_status)) return LINK_TRAINING_SUCCESS; @@ -2138,7 +2383,7 @@ static enum link_training_result dp_perform_8b_10b_link_training( } for (lane = 0; lane < (uint8_t)lt_settings->link_settings.lane_count; lane++) - lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET = VOLTAGE_SWING_LEVEL0; + lt_settings->dpcd_lane_settings[lane].raw = 0; } if (status == LINK_TRAINING_SUCCESS) { @@ -2203,7 +2448,14 @@ enum link_training_result dc_link_dp_perform_link_training( <_settings); /* reset previous training states */ - dpcd_exit_training_mode(link); + if (link->dc->debug.apply_vendor_specific_lttpr_wa && + (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && + link->lttpr_mode == LTTPR_MODE_TRANSPARENT) { + link->apply_vendor_specific_lttpr_link_rate_wa = true; + vendor_specific_lttpr_wa_four(link, true); + } else { + dpcd_exit_training_mode(link); + } /* configure link prior to entering training mode */ dpcd_configure_lttpr_mode(link, <_settings); @@ -2223,8 +2475,17 @@ enum link_training_result dc_link_dp_perform_link_training( else ASSERT(0); - /* exit training mode and switch to video idle */ - dpcd_exit_training_mode(link); + /* exit training mode */ + if (link->dc->debug.apply_vendor_specific_lttpr_wa && + (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && + link->lttpr_mode == LTTPR_MODE_TRANSPARENT) { + link->apply_vendor_specific_lttpr_link_rate_wa = false; + vendor_specific_lttpr_wa_four(link, (status != LINK_TRAINING_SUCCESS)); + } else { + dpcd_exit_training_mode(link); + } + + /* switch to video idle */ if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern) status = dp_transition_to_video_idle(link, <_settings, @@ -2349,7 +2610,7 @@ bool perform_link_training_with_retries( uint32_t req_bw; uint32_t link_bw; - decide_fallback_link_setting(*link_setting, ¤t_setting, status); + decide_fallback_link_setting(link, *link_setting, ¤t_setting, status); /* Fail link training if reduced link bandwidth no longer meets * stream requirements. */ @@ -2864,7 +3125,7 @@ bool dp_verify_link_cap( * based on the actual mode we're driving */ dp_disable_link_phy(link, link->connector_signal); - } while (!success && decide_fallback_link_setting( + } while (!success && decide_fallback_link_setting(link, initial_link_settings, cur, status)); /* Link Training failed for all Link Settings @@ -3116,6 +3377,7 @@ static bool decide_fallback_link_setting_max_bw_policy( * and no further fallback could be done */ static bool decide_fallback_link_setting( + struct dc_link *link, struct dc_link_settings initial_link_settings, struct dc_link_settings *current_link_setting, enum link_training_result training_result) @@ -3123,7 +3385,8 @@ static bool decide_fallback_link_setting( if (!current_link_setting) return false; #if defined(CONFIG_DRM_AMD_DC_DCN) - if (dp_get_link_encoding_format(&initial_link_settings) == DP_128b_132b_ENCODING) + if (dp_get_link_encoding_format(&initial_link_settings) == DP_128b_132b_ENCODING || + link->dc->debug.force_dp2_lt_fallback_method) return decide_fallback_link_setting_max_bw_policy(&initial_link_settings, current_link_setting); #endif @@ -3346,6 +3609,148 @@ bool decide_edp_link_settings(struct dc_link *link, struct dc_link_settings *lin return false; } +static bool decide_edp_link_settings_with_dsc(struct dc_link *link, + struct dc_link_settings *link_setting, + uint32_t req_bw, + enum dc_link_rate max_link_rate) +{ + struct dc_link_settings initial_link_setting; + struct dc_link_settings current_link_setting; + uint32_t link_bw; + + unsigned int policy = 0; + + policy = link->ctx->dc->debug.force_dsc_edp_policy; + if (max_link_rate == LINK_RATE_UNKNOWN) + max_link_rate = link->verified_link_cap.link_rate; + /* + * edp_supported_link_rates_count is only valid for eDP v1.4 or higher. + * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h" + */ + if ((link->dpcd_caps.dpcd_rev.raw < DPCD_REV_13 || + link->dpcd_caps.edp_supported_link_rates_count == 0)) { + /* for DSC enabled case, we search for minimum lane count */ + memset(&initial_link_setting, 0, sizeof(initial_link_setting)); + initial_link_setting.lane_count = LANE_COUNT_ONE; + initial_link_setting.link_rate = LINK_RATE_LOW; + initial_link_setting.link_spread = LINK_SPREAD_DISABLED; + initial_link_setting.use_link_rate_set = false; + initial_link_setting.link_rate_set = 0; + current_link_setting = initial_link_setting; + if (req_bw > dc_link_bandwidth_kbps(link, &link->verified_link_cap)) + return false; + + /* search for the minimum link setting that: + * 1. is supported according to the link training result + * 2. could support the b/w requested by the timing + */ + while (current_link_setting.link_rate <= + max_link_rate) { + link_bw = dc_link_bandwidth_kbps( + link, + ¤t_link_setting); + if (req_bw <= link_bw) { + *link_setting = current_link_setting; + return true; + } + if (policy) { + /* minimize lane */ + if (current_link_setting.link_rate < max_link_rate) { + current_link_setting.link_rate = + increase_link_rate( + current_link_setting.link_rate); + } else { + if (current_link_setting.lane_count < + link->verified_link_cap.lane_count) { + current_link_setting.lane_count = + increase_lane_count( + current_link_setting.lane_count); + current_link_setting.link_rate = initial_link_setting.link_rate; + } else + break; + } + } else { + /* minimize link rate */ + if (current_link_setting.lane_count < + link->verified_link_cap.lane_count) { + current_link_setting.lane_count = + increase_lane_count( + current_link_setting.lane_count); + } else { + current_link_setting.link_rate = + increase_link_rate( + current_link_setting.link_rate); + current_link_setting.lane_count = + initial_link_setting.lane_count; + } + } + } + return false; + } + + /* if optimize edp link is supported */ + memset(&initial_link_setting, 0, sizeof(initial_link_setting)); + initial_link_setting.lane_count = LANE_COUNT_ONE; + initial_link_setting.link_rate = link->dpcd_caps.edp_supported_link_rates[0]; + initial_link_setting.link_spread = LINK_SPREAD_DISABLED; + initial_link_setting.use_link_rate_set = true; + initial_link_setting.link_rate_set = 0; + current_link_setting = initial_link_setting; + + /* search for the minimum link setting that: + * 1. is supported according to the link training result + * 2. could support the b/w requested by the timing + */ + while (current_link_setting.link_rate <= + max_link_rate) { + link_bw = dc_link_bandwidth_kbps( + link, + ¤t_link_setting); + if (req_bw <= link_bw) { + *link_setting = current_link_setting; + return true; + } + if (policy) { + /* minimize lane */ + if (current_link_setting.link_rate_set < + link->dpcd_caps.edp_supported_link_rates_count + && current_link_setting.link_rate < max_link_rate) { + current_link_setting.link_rate_set++; + current_link_setting.link_rate = + link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set]; + } else { + if (current_link_setting.lane_count < link->verified_link_cap.lane_count) { + current_link_setting.lane_count = + increase_lane_count( + current_link_setting.lane_count); + current_link_setting.link_rate_set = initial_link_setting.link_rate_set; + current_link_setting.link_rate = + link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set]; + } else + break; + } + } else { + /* minimize link rate */ + if (current_link_setting.lane_count < + link->verified_link_cap.lane_count) { + current_link_setting.lane_count = + increase_lane_count( + current_link_setting.lane_count); + } else { + if (current_link_setting.link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) { + current_link_setting.link_rate_set++; + current_link_setting.link_rate = + link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set]; + current_link_setting.lane_count = + initial_link_setting.lane_count; + } else + break; + } + } + } + return false; +} + static bool decide_mst_link_settings(const struct dc_link *link, struct dc_link_settings *link_setting) { *link_setting = link->verified_link_cap; @@ -3380,7 +3785,25 @@ void decide_link_settings(struct dc_stream_state *stream, if (decide_mst_link_settings(link, link_setting)) return; } else if (link->connector_signal == SIGNAL_TYPE_EDP) { - if (decide_edp_link_settings(link, link_setting, req_bw)) + /* enable edp link optimization for DSC eDP case */ + if (stream->timing.flags.DSC) { + enum dc_link_rate max_link_rate = LINK_RATE_UNKNOWN; + + if (link->ctx->dc->debug.force_dsc_edp_policy) { + /* calculate link max link rate cap*/ + struct dc_link_settings tmp_link_setting; + struct dc_crtc_timing tmp_timing = stream->timing; + uint32_t orig_req_bw; + + tmp_link_setting.link_rate = LINK_RATE_UNKNOWN; + tmp_timing.flags.DSC = 0; + orig_req_bw = dc_bandwidth_in_kbps_from_timing(&tmp_timing); + decide_edp_link_settings(link, &tmp_link_setting, orig_req_bw); + max_link_rate = tmp_link_setting.link_rate; + } + if (decide_edp_link_settings_with_dsc(link, link_setting, req_bw, max_link_rate)) + return; + } else if (decide_edp_link_settings(link, link_setting, req_bw)) return; } else if (decide_dp_link_settings(link, link_setting, req_bw)) return; @@ -3421,7 +3844,6 @@ static bool handle_hpd_irq_psr_sink(struct dc_link *link) &psr_configuration.raw, sizeof(psr_configuration.raw)); - if (psr_configuration.bits.ENABLE) { unsigned char dpcdbuf[3] = {0}; union psr_error_status psr_error_status; @@ -3453,10 +3875,12 @@ static bool handle_hpd_irq_psr_sink(struct dc_link *link) sizeof(psr_error_status.raw)); /* PSR error, disable and re-enable PSR */ - allow_active = false; - dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL); - allow_active = true; - dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL); + if (link->psr_settings.psr_allow_active) { + allow_active = false; + dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL); + allow_active = true; + dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL); + } return true; } else if (psr_sink_psr_status.bits.SINK_SELF_REFRESH_STATUS == @@ -3534,6 +3958,13 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link) &dpcd_lane_adjustment[0].raw, sizeof(dpcd_lane_adjustment)); + if (link->dc->debug.apply_vendor_specific_lttpr_wa && + (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && + link->lttpr_mode == LTTPR_MODE_TRANSPARENT) + vendor_specific_lttpr_wa_three_dpcd( + link, + link_training_settings.dpcd_lane_settings); + /*get post cursor 2 parameters * For DP 1.1a or eariler, this DPCD register's value is 0 * For DP 1.2 or later: @@ -4153,6 +4584,56 @@ static int translate_dpcd_max_bpc(enum dpcd_downstream_port_max_bpc bpc) return -1; } +#if defined(CONFIG_DRM_AMD_DC_DCN) +uint32_t dc_link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw) +{ + switch (bw) { + case 0b001: + return 9000000; + case 0b010: + return 18000000; + case 0b011: + return 24000000; + case 0b100: + return 32000000; + case 0b101: + return 40000000; + case 0b110: + return 48000000; + } + + return 0; +} + +/** + * Return PCON's post FRL link training supported BW if its non-zero, otherwise return max_supported_frl_bw. + */ +static uint32_t intersect_frl_link_bw_support( + const uint32_t max_supported_frl_bw_in_kbps, + const union hdmi_encoded_link_bw hdmi_encoded_link_bw) +{ + uint32_t supported_bw_in_kbps = max_supported_frl_bw_in_kbps; + + // HDMI_ENCODED_LINK_BW bits are only valid if HDMI Link Configuration bit is 1 (FRL mode) + if (hdmi_encoded_link_bw.bits.FRL_MODE) { + if (hdmi_encoded_link_bw.bits.BW_48Gbps) + supported_bw_in_kbps = 48000000; + else if (hdmi_encoded_link_bw.bits.BW_40Gbps) + supported_bw_in_kbps = 40000000; + else if (hdmi_encoded_link_bw.bits.BW_32Gbps) + supported_bw_in_kbps = 32000000; + else if (hdmi_encoded_link_bw.bits.BW_24Gbps) + supported_bw_in_kbps = 24000000; + else if (hdmi_encoded_link_bw.bits.BW_18Gbps) + supported_bw_in_kbps = 18000000; + else if (hdmi_encoded_link_bw.bits.BW_9Gbps) + supported_bw_in_kbps = 9000000; + } + + return supported_bw_in_kbps; +} +#endif + static void read_dp_device_vendor_id(struct dc_link *link) { struct dp_device_vendor_id dp_id; @@ -4264,6 +4745,27 @@ static void get_active_converter_info( translate_dpcd_max_bpc( hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT); +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (link->dc->caps.hdmi_frl_pcon_support) { + union hdmi_encoded_link_bw hdmi_encoded_link_bw; + + link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps = + dc_link_bw_kbps_from_raw_frl_link_rate_data( + hdmi_color_caps.bits.MAX_ENCODED_LINK_BW_SUPPORT); + + // Intersect reported max link bw support with the supported link rate post FRL link training + if (core_link_read_dpcd(link, DP_PCON_HDMI_POST_FRL_STATUS, + &hdmi_encoded_link_bw.raw, sizeof(hdmi_encoded_link_bw)) == DC_OK) { + link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps = intersect_frl_link_bw_support( + link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps, + hdmi_encoded_link_bw); + } + + if (link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps > 0) + link->dpcd_caps.dongle_caps.extendedCapValid = true; + } +#endif + if (link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk_in_khz != 0) link->dpcd_caps.dongle_caps.extendedCapValid = true; } @@ -4454,7 +4956,7 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link) lttpr_dpcd_data, sizeof(lttpr_dpcd_data)); if (status != DC_OK) { - dm_error("%s: Read LTTPR caps data failed.\n", __func__); + DC_LOG_DP2("%s: Read LTTPR caps data failed.\n", __func__); return false; } @@ -5240,8 +5742,18 @@ bool dc_link_dp_set_test_pattern( if (is_dp_phy_pattern(test_pattern)) { /* Set DPCD Lane Settings before running test pattern */ if (p_link_settings != NULL) { - dp_set_hw_lane_settings(link, p_link_settings, DPRX); - dpcd_set_lane_settings(link, p_link_settings, DPRX); + if (link->dc->debug.apply_vendor_specific_lttpr_wa && + (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && + link->lttpr_mode == LTTPR_MODE_TRANSPARENT) { + dpcd_set_lane_settings(link, p_link_settings, DPRX); + vendor_specific_lttpr_wa_five( + link, + p_link_settings->dpcd_lane_settings, + p_link_settings->link_settings.lane_count); + } else { + dp_set_hw_lane_settings(link, p_link_settings, DPRX); + dpcd_set_lane_settings(link, p_link_settings, DPRX); + } } /* Blank stream if running test pattern */ @@ -5665,6 +6177,23 @@ void dp_set_fec_enable(struct dc_link *link, bool enable) } } +struct link_encoder *dp_get_link_enc(struct dc_link *link) +{ + struct link_encoder *link_enc; + + link_enc = link->link_enc; + if (link->is_dig_mapping_flexible && + link->dc->res_pool->funcs->link_encs_assign) { + link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, + link); + if (!link->link_enc) + link_enc = link_enc_cfg_get_next_avail_link_enc( + link->ctx->dc); + } + + return link_enc; +} + void dpcd_set_source_specific_data(struct dc_link *link) { if (!link->dc->vendor_signature.is_valid) { @@ -5885,7 +6414,10 @@ bool is_edp_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timin req_bw = dc_bandwidth_in_kbps_from_timing(crtc_timing); - decide_edp_link_settings(link, &link_setting, req_bw); + if (!crtc_timing->flags.DSC) + decide_edp_link_settings(link, &link_setting, req_bw); + else + decide_edp_link_settings_with_dsc(link, &link_setting, req_bw, LINK_RATE_UNKNOWN); if (link->dpcd_caps.edp_supported_link_rates[link_rate_set] != link_setting.link_rate || lane_count_set.bits.LANE_COUNT_SET != link_setting.lane_count) { @@ -6126,3 +6658,14 @@ bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx) dc_is_dp_signal(pipe_ctx->stream->signal)); } #endif + +void edp_panel_backlight_power_on(struct dc_link *link) +{ + if (link->connector_signal != SIGNAL_TYPE_EDP) + return; + + link->dc->hwss.edp_power_control(link, true); + link->dc->hwss.edp_wait_for_hpd_ready(link, true); + if (link->dc->hwss.edp_backlight_control) + link->dc->hwss.edp_backlight_control(link, true); +} diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c index b1c9f77d6bf4..d72122593959 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c @@ -94,17 +94,17 @@ static enum link_training_result dpia_configure_link(struct dc_link *link, lt_settings); status = dpcd_configure_channel_coding(link, lt_settings); - if (status != DC_OK && !link->hpd_status) + if (status != DC_OK && link->is_hpd_pending) return LINK_TRAINING_ABORT; /* Configure lttpr mode */ status = dpcd_configure_lttpr_mode(link, lt_settings); - if (status != DC_OK && !link->hpd_status) + if (status != DC_OK && link->is_hpd_pending) return LINK_TRAINING_ABORT; /* Set link rate, lane count and spread. */ status = dpcd_set_link_settings(link, lt_settings); - if (status != DC_OK && !link->hpd_status) + if (status != DC_OK && link->is_hpd_pending) return LINK_TRAINING_ABORT; if (link->preferred_training_settings.fec_enable) @@ -112,7 +112,7 @@ static enum link_training_result dpia_configure_link(struct dc_link *link, else fec_enable = true; status = dp_set_fec_ready(link, fec_enable); - if (status != DC_OK && !link->hpd_status) + if (status != DC_OK && link->is_hpd_pending) return LINK_TRAINING_ABORT; return LINK_TRAINING_SUCCESS; @@ -388,7 +388,7 @@ static enum link_training_result dpia_training_cr_non_transparent(struct dc_link } /* Abort link training if clock recovery failed due to HPD unplug. */ - if (!link->hpd_status) + if (link->is_hpd_pending) result = LINK_TRAINING_ABORT; DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) clock recovery\n" @@ -490,7 +490,7 @@ static enum link_training_result dpia_training_cr_transparent(struct dc_link *li } /* Abort link training if clock recovery failed due to HPD unplug. */ - if (!link->hpd_status) + if (link->is_hpd_pending) result = LINK_TRAINING_ABORT; DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) clock recovery\n" @@ -675,7 +675,7 @@ static enum link_training_result dpia_training_eq_non_transparent(struct dc_link } /* Abort link training if equalization failed due to HPD unplug. */ - if (!link->hpd_status) + if (link->is_hpd_pending) result = LINK_TRAINING_ABORT; DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) equalization\n" @@ -758,7 +758,7 @@ static enum link_training_result dpia_training_eq_transparent(struct dc_link *li } /* Abort link training if equalization failed due to HPD unplug. */ - if (!link->hpd_status) + if (link->is_hpd_pending) result = LINK_TRAINING_ABORT; DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) equalization\n" @@ -892,10 +892,10 @@ static void dpia_training_abort(struct dc_link *link, uint32_t hop) __func__, link->link_id.enum_id - ENUM_ID_1, link->lttpr_mode, - link->hpd_status); + link->is_hpd_pending); /* Abandon clean-up if sink unplugged. */ - if (!link->hpd_status) + if (link->is_hpd_pending) return; if (hop != DPRX) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c index 25e48a8cbb78..a55944da8d53 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c @@ -118,7 +118,10 @@ static void remove_link_enc_assignment( */ if (get_stream_using_link_enc(state, eng_id) == NULL) state->res_ctx.link_enc_cfg_ctx.link_enc_avail[eng_idx] = eng_id; + stream->link_enc = NULL; + state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].eng_id = ENGINE_ID_UNKNOWN; + state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].stream = NULL; break; } } @@ -148,6 +151,7 @@ static void add_link_enc_assignment( .ep_type = stream->link->ep_type}, .eng_id = eng_id, .stream = stream}; + dc_stream_retain(stream); state->res_ctx.link_enc_cfg_ctx.link_enc_avail[eng_idx] = ENGINE_ID_UNKNOWN; stream->link_enc = stream->ctx->dc->res_pool->link_encoders[eng_idx]; break; @@ -227,7 +231,7 @@ static struct link_encoder *get_link_enc_used_by_link( .link_id = link->link_id, .ep_type = link->ep_type}; - for (i = 0; i < state->stream_count; i++) { + for (i = 0; i < MAX_PIPES; i++) { struct link_enc_assignment assignment = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i]; if (assignment.valid == true && are_ep_ids_equal(&assignment.ep_id, &ep_id)) @@ -237,28 +241,18 @@ static struct link_encoder *get_link_enc_used_by_link( return link_enc; } /* Clear all link encoder assignments. */ -static void clear_enc_assignments(struct dc_state *state) +static void clear_enc_assignments(const struct dc *dc, struct dc_state *state) { int i; - enum engine_id eng_id; - struct dc_stream_state *stream; for (i = 0; i < MAX_PIPES; i++) { state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].valid = false; - eng_id = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].eng_id; - stream = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].stream; - if (eng_id != ENGINE_ID_UNKNOWN) - state->res_ctx.link_enc_cfg_ctx.link_enc_avail[eng_id - ENGINE_ID_DIGA] = eng_id; - if (stream) - stream->link_enc = NULL; + state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].eng_id = ENGINE_ID_UNKNOWN; + if (state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].stream != NULL) { + dc_stream_release(state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].stream); + state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].stream = NULL; + } } -} - -void link_enc_cfg_init( - struct dc *dc, - struct dc_state *state) -{ - int i; for (i = 0; i < dc->res_pool->res_cap->num_dig_link_enc; i++) { if (dc->res_pool->link_encoders[i]) @@ -266,8 +260,13 @@ void link_enc_cfg_init( else state->res_ctx.link_enc_cfg_ctx.link_enc_avail[i] = ENGINE_ID_UNKNOWN; } +} - clear_enc_assignments(state); +void link_enc_cfg_init( + const struct dc *dc, + struct dc_state *state) +{ + clear_enc_assignments(dc, state); state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY; } @@ -284,12 +283,9 @@ void link_enc_cfg_link_encs_assign( ASSERT(state->stream_count == stream_count); - if (stream_count == 0) - clear_enc_assignments(state); - /* Release DIG link encoder resources before running assignment algorithm. */ - for (i = 0; i < stream_count; i++) - dc->res_pool->funcs->link_enc_unassign(state, streams[i]); + for (i = 0; i < dc->current_state->stream_count; i++) + dc->res_pool->funcs->link_enc_unassign(state, dc->current_state->streams[i]); for (i = 0; i < MAX_PIPES; i++) ASSERT(state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].valid == false); @@ -544,6 +540,7 @@ bool link_enc_cfg_validate(struct dc *dc, struct dc_state *state) uint8_t dig_stream_count = 0; int matching_stream_ptrs = 0; int eng_ids_per_ep_id[MAX_PIPES] = {0}; + int valid_bitmap = 0; /* (1) No. valid entries same as stream count. */ for (i = 0; i < MAX_PIPES; i++) { @@ -625,5 +622,15 @@ bool link_enc_cfg_validate(struct dc *dc, struct dc_state *state) is_valid = valid_entries && valid_stream_ptrs && valid_uniqueness && valid_avail && valid_streams; ASSERT(is_valid); + if (is_valid == false) { + valid_bitmap = + (valid_entries & 0x1) | + ((valid_stream_ptrs & 0x1) << 1) | + ((valid_uniqueness & 0x1) << 2) | + ((valid_avail & 0x1) << 3) | + ((valid_streams & 0x1) << 4); + dm_error("Invalid link encoder assignments: 0x%x\n", valid_bitmap); + } + return is_valid; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index c32fdccd4d92..8b6b035bfa9c 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -734,6 +734,10 @@ static void calculate_split_count_and_index(struct pipe_ctx *pipe_ctx, int *spli (*split_idx)++; split_pipe = split_pipe->top_pipe; } + + /* MPO window on right side of ODM split */ + if (split_pipe && split_pipe->prev_odm_pipe && !pipe_ctx->prev_odm_pipe) + (*split_idx)++; } else { /*Get odm split index*/ struct pipe_ctx *split_pipe = pipe_ctx->prev_odm_pipe; @@ -780,7 +784,11 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx) /* * Only the leftmost ODM pipe should be offset by a nonzero distance */ - if (!pipe_ctx->prev_odm_pipe || split_idx == split_count) { + if (pipe_ctx->top_pipe && pipe_ctx->top_pipe->prev_odm_pipe && !pipe_ctx->prev_odm_pipe) { + /* MPO window on right side of ODM split */ + data->recout.x = stream->dst.x + (surf_clip.x - stream->dst.width/2) * + stream->dst.width / stream->src.width; + } else if (!pipe_ctx->prev_odm_pipe || split_idx == split_count) { data->recout.x = stream->dst.x; if (stream->src.x < surf_clip.x) data->recout.x += (surf_clip.x - stream->src.x) * stream->dst.width @@ -978,6 +986,8 @@ static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx) * stream->dst.height / stream->src.height; if (pipe_ctx->prev_odm_pipe && split_idx) ro_lb = data->h_active * split_idx - recout_full_x; + else if (pipe_ctx->top_pipe && pipe_ctx->top_pipe->prev_odm_pipe) + ro_lb = data->h_active * split_idx - recout_full_x + data->recout.x; else ro_lb = data->recout.x - recout_full_x; ro_tb = data->recout.y - recout_full_y; @@ -1076,6 +1086,9 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) timing->v_border_top + timing->v_border_bottom; if (pipe_ctx->next_odm_pipe || pipe_ctx->prev_odm_pipe) pipe_ctx->plane_res.scl_data.h_active /= get_num_odm_splits(pipe_ctx) + 1; + /* ODM + windows MPO, where window is on either right or left ODM half */ + else if (pipe_ctx->top_pipe && (pipe_ctx->top_pipe->next_odm_pipe || pipe_ctx->top_pipe->prev_odm_pipe)) + pipe_ctx->plane_res.scl_data.h_active /= get_num_odm_splits(pipe_ctx->top_pipe) + 1; /* depends on h_active */ calculate_recout(pipe_ctx); @@ -1084,11 +1097,6 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) /* depends on scaling ratios and recout, does not calculate offset yet */ calculate_viewport_size(pipe_ctx); - /* Stopgap for validation of ODM + MPO on one side of screen case */ - if (pipe_ctx->plane_res.scl_data.viewport.height < 1 || - pipe_ctx->plane_res.scl_data.viewport.width < 1) - return false; - /* * LB calculations depend on vp size, h/v_active and scaling ratios * Setting line buffer pixel depth to 24bpp yields banding @@ -1437,23 +1445,54 @@ bool dc_add_plane_to_context( if (head_pipe != free_pipe) { tail_pipe = resource_get_tail_pipe(&context->res_ctx, head_pipe); ASSERT(tail_pipe); - free_pipe->stream_res.tg = tail_pipe->stream_res.tg; - free_pipe->stream_res.abm = tail_pipe->stream_res.abm; - free_pipe->stream_res.opp = tail_pipe->stream_res.opp; - free_pipe->stream_res.stream_enc = tail_pipe->stream_res.stream_enc; - free_pipe->stream_res.audio = tail_pipe->stream_res.audio; - free_pipe->clock_source = tail_pipe->clock_source; - free_pipe->top_pipe = tail_pipe; - tail_pipe->bottom_pipe = free_pipe; - if (!free_pipe->next_odm_pipe && tail_pipe->next_odm_pipe && tail_pipe->next_odm_pipe->bottom_pipe) { - free_pipe->next_odm_pipe = tail_pipe->next_odm_pipe->bottom_pipe; - tail_pipe->next_odm_pipe->bottom_pipe->prev_odm_pipe = free_pipe; - } - if (!free_pipe->prev_odm_pipe && tail_pipe->prev_odm_pipe && tail_pipe->prev_odm_pipe->bottom_pipe) { - free_pipe->prev_odm_pipe = tail_pipe->prev_odm_pipe->bottom_pipe; - tail_pipe->prev_odm_pipe->bottom_pipe->next_odm_pipe = free_pipe; + + /* ODM + window MPO, where MPO window is on right half only */ + if (free_pipe->plane_state && + (free_pipe->plane_state->clip_rect.x >= free_pipe->stream->src.width/2) && + tail_pipe->next_odm_pipe) { + free_pipe->stream_res.tg = tail_pipe->next_odm_pipe->stream_res.tg; + free_pipe->stream_res.abm = tail_pipe->next_odm_pipe->stream_res.abm; + free_pipe->stream_res.opp = tail_pipe->next_odm_pipe->stream_res.opp; + free_pipe->stream_res.stream_enc = tail_pipe->next_odm_pipe->stream_res.stream_enc; + free_pipe->stream_res.audio = tail_pipe->next_odm_pipe->stream_res.audio; + free_pipe->clock_source = tail_pipe->next_odm_pipe->clock_source; + + free_pipe->top_pipe = tail_pipe->next_odm_pipe; + tail_pipe->next_odm_pipe->bottom_pipe = free_pipe; + } else { + free_pipe->stream_res.tg = tail_pipe->stream_res.tg; + free_pipe->stream_res.abm = tail_pipe->stream_res.abm; + free_pipe->stream_res.opp = tail_pipe->stream_res.opp; + free_pipe->stream_res.stream_enc = tail_pipe->stream_res.stream_enc; + free_pipe->stream_res.audio = tail_pipe->stream_res.audio; + free_pipe->clock_source = tail_pipe->clock_source; + + free_pipe->top_pipe = tail_pipe; + tail_pipe->bottom_pipe = free_pipe; + + if (!free_pipe->next_odm_pipe && tail_pipe->next_odm_pipe && tail_pipe->next_odm_pipe->bottom_pipe) { + free_pipe->next_odm_pipe = tail_pipe->next_odm_pipe->bottom_pipe; + tail_pipe->next_odm_pipe->bottom_pipe->prev_odm_pipe = free_pipe; + } + if (!free_pipe->prev_odm_pipe && tail_pipe->prev_odm_pipe && tail_pipe->prev_odm_pipe->bottom_pipe) { + free_pipe->prev_odm_pipe = tail_pipe->prev_odm_pipe->bottom_pipe; + tail_pipe->prev_odm_pipe->bottom_pipe->next_odm_pipe = free_pipe; + } } } + + /* ODM + window MPO, where MPO window is on left half only */ + if (free_pipe->plane_state && + (free_pipe->plane_state->clip_rect.x + free_pipe->plane_state->clip_rect.width <= + free_pipe->stream->src.x + free_pipe->stream->src.width/2)) { + break; + } + /* ODM + window MPO, where MPO window is on right half only */ + if (free_pipe->plane_state && + (free_pipe->plane_state->clip_rect.x >= free_pipe->stream->src.width/2)) { + break; + } + head_pipe = head_pipe->next_odm_pipe; } /* assign new surfaces*/ @@ -1664,6 +1703,10 @@ bool dc_is_stream_unchanged( if (old_stream->ignore_msa_timing_param != stream->ignore_msa_timing_param) return false; + // Only Have Audio left to check whether it is same or not. This is a corner case for Tiled sinks + if (old_stream->audio_info.mode_count != stream->audio_info.mode_count) + return false; + return true; } @@ -2078,7 +2121,6 @@ static void mark_seamless_boot_stream( { struct dc_bios *dcb = dc->ctx->dc_bios; - /* TODO: Check Linux */ if (dc->config.allow_seamless_boot_optimization && !dcb->funcs->is_accelerated_mode(dcb)) { if (dc_validate_seamless_boot_timing(dc, stream->sink, &stream->timing)) @@ -2224,6 +2266,9 @@ void dc_resource_state_construct( struct dc_state *dst_ctx) { dst_ctx->clk_mgr = dc->clk_mgr; + + /* Initialise DIG link encoder resource tracking variables. */ + link_enc_cfg_init(dc, dst_ctx); } @@ -2252,16 +2297,6 @@ enum dc_status dc_validate_global_state( if (!new_ctx) return DC_ERROR_UNEXPECTED; -#if defined(CONFIG_DRM_AMD_DC_DCN) - - /* - * Update link encoder to stream assignment. - * TODO: Split out reason allocation from validation. - */ - if (dc->res_pool->funcs->link_encs_assign && fast_validate == false) - dc->res_pool->funcs->link_encs_assign( - dc, new_ctx, new_ctx->streams, new_ctx->stream_count); -#endif if (dc->res_pool->funcs->validate_global) { result = dc->res_pool->funcs->validate_global(dc, new_ctx); @@ -2313,6 +2348,16 @@ enum dc_status dc_validate_global_state( if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate)) result = DC_FAIL_BANDWIDTH_VALIDATE; +#if defined(CONFIG_DRM_AMD_DC_DCN) + /* + * Only update link encoder to stream assignment after bandwidth validation passed. + * TODO: Split out assignment and validation. + */ + if (result == DC_OK && dc->res_pool->funcs->link_encs_assign && fast_validate == false) + dc->res_pool->funcs->link_encs_assign( + dc, new_ctx, new_ctx->streams, new_ctx->stream_count); +#endif + return result; } @@ -2506,17 +2551,7 @@ static void set_avi_info_frame( /* TODO : We should handle YCC quantization */ /* but we do not have matrix calculation */ - if (stream->qy_bit == 1) { - if (color_space == COLOR_SPACE_SRGB || - color_space == COLOR_SPACE_2020_RGB_FULLRANGE) - hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE; - else if (color_space == COLOR_SPACE_SRGB_LIMITED || - color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE) - hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE; - else - hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE; - } else - hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE; + hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE; ///VIC format = stream->timing.timing_3d_format; @@ -3126,3 +3161,57 @@ struct hpo_dp_link_encoder *resource_get_unused_hpo_dp_link_encoder( return enc; } #endif + +void reset_syncd_pipes_from_disabled_pipes(struct dc *dc, + struct dc_state *context) +{ + int i, j; + struct pipe_ctx *pipe_ctx_old, *pipe_ctx, *pipe_ctx_syncd; + + /* If pipe backend is reset, need to reset pipe syncd status */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + pipe_ctx_old = &dc->current_state->res_ctx.pipe_ctx[i]; + pipe_ctx = &context->res_ctx.pipe_ctx[i]; + + if (!pipe_ctx_old->stream) + continue; + + if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe) + continue; + + if (!pipe_ctx->stream || + pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) { + + /* Reset all the syncd pipes from the disabled pipe */ + for (j = 0; j < dc->res_pool->pipe_count; j++) { + pipe_ctx_syncd = &context->res_ctx.pipe_ctx[j]; + if ((GET_PIPE_SYNCD_FROM_PIPE(pipe_ctx_syncd) == pipe_ctx_old->pipe_idx) || + !IS_PIPE_SYNCD_VALID(pipe_ctx_syncd)) + SET_PIPE_SYNCD_TO_PIPE(pipe_ctx_syncd, j); + } + } + } +} + +void check_syncd_pipes_for_disabled_master_pipe(struct dc *dc, + struct dc_state *context, + uint8_t disabled_master_pipe_idx) +{ + int i; + struct pipe_ctx *pipe_ctx, *pipe_ctx_check; + + pipe_ctx = &context->res_ctx.pipe_ctx[disabled_master_pipe_idx]; + if ((GET_PIPE_SYNCD_FROM_PIPE(pipe_ctx) != disabled_master_pipe_idx) || + !IS_PIPE_SYNCD_VALID(pipe_ctx)) + SET_PIPE_SYNCD_TO_PIPE(pipe_ctx, disabled_master_pipe_idx); + + /* for the pipe disabled, check if any slave pipe exists and assert */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + pipe_ctx_check = &context->res_ctx.pipe_ctx[i]; + + if ((GET_PIPE_SYNCD_FROM_PIPE(pipe_ctx_check) == disabled_master_pipe_idx) && + IS_PIPE_SYNCD_VALID(pipe_ctx_check) && (i != disabled_master_pipe_idx)) + DC_ERR("DC: Failure: pipe_idx[%d] syncd with disabled master pipe_idx[%d]\n", + i, disabled_master_pipe_idx); + } +} diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_sink.c b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c index a249a0e5edd0..4b5e4d8e7735 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_sink.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c @@ -33,14 +33,6 @@ * Private functions ******************************************************************************/ -static void dc_sink_destruct(struct dc_sink *sink) -{ - if (sink->dc_container_id) { - kfree(sink->dc_container_id); - sink->dc_container_id = NULL; - } -} - static bool dc_sink_construct(struct dc_sink *sink, const struct dc_sink_init_data *init_params) { @@ -75,7 +67,7 @@ void dc_sink_retain(struct dc_sink *sink) static void dc_sink_free(struct kref *kref) { struct dc_sink *sink = container_of(kref, struct dc_sink, refcount); - dc_sink_destruct(sink); + kfree(sink->dc_container_id); kfree(sink); } diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 3aac3f4a2852..18e59d635ca2 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -47,7 +47,7 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.160" +#define DC_VER "3.2.166" #define MAX_SURFACES 3 #define MAX_PLANES 6 @@ -75,6 +75,16 @@ enum dc_plane_type { DC_PLANE_TYPE_DCN_UNIVERSAL, }; +// Sizes defined as multiples of 64KB +enum det_size { + DET_SIZE_DEFAULT = 0, + DET_SIZE_192KB = 3, + DET_SIZE_256KB = 4, + DET_SIZE_320KB = 5, + DET_SIZE_384KB = 6 +}; + + struct dc_plane_cap { enum dc_plane_type type; uint32_t blends_with_above : 1; @@ -187,7 +197,9 @@ struct dc_caps { struct dc_color_caps color; #if defined(CONFIG_DRM_AMD_DC_DCN) bool dp_hpo; + bool hdmi_frl_pcon_support; #endif + bool edp_dsc_support; bool vbios_lttpr_aware; bool vbios_lttpr_enable; }; @@ -332,6 +344,7 @@ struct dc_config { uint8_t vblank_alignment_max_frame_time_diff; bool is_asymmetric_memory; bool is_single_rank_dimm; + bool use_pipe_ctx_sync_logic; }; enum visual_confirm { @@ -508,7 +521,9 @@ union dpia_debug_options { uint32_t disable_dpia:1; uint32_t force_non_lttpr:1; uint32_t extend_aux_rd_interval:1; - uint32_t reserved:29; + uint32_t disable_mst_dsc_work_around:1; + uint32_t hpd_delay_in_ms:12; + uint32_t reserved:16; } bits; uint32_t raw; }; @@ -573,6 +588,8 @@ struct dc_debug_options { bool native422_support; bool disable_dsc; enum visual_confirm visual_confirm; + int visual_confirm_rect_height; + bool sanity_checks; bool max_disp_clk; bool surface_trace; @@ -667,11 +684,14 @@ struct dc_debug_options { bool validate_dml_output; bool enable_dmcub_surface_flip; bool usbc_combo_phy_reset_wa; + bool disable_dsc_edp; + unsigned int force_dsc_edp_policy; bool enable_dram_clock_change_one_display_vactive; #if defined(CONFIG_DRM_AMD_DC_DCN) /* TODO - remove once tested */ bool legacy_dp2_lt; bool set_mst_en_for_sst; + bool force_dp2_lt_fallback_method; #endif union mem_low_power_enable_options enable_mem_low_power; union root_clock_optimization_options root_clock_optimization; @@ -684,11 +704,14 @@ struct dc_debug_options { /* FEC/PSR1 sequence enable delay in 100us */ uint8_t fec_enable_delay_in100us; bool enable_driver_sequence_debug; + enum det_size crb_alloc_policy; + int crb_alloc_policy_min_disp_count; #if defined(CONFIG_DRM_AMD_DC_DCN) bool disable_z10; bool enable_sw_cntl_psr; union dpia_debug_options dpia_debug; #endif + bool apply_vendor_specific_lttpr_wa; }; struct gpu_info_soc_bounding_box_v1_0; @@ -1289,6 +1312,11 @@ struct dc_sink_dsc_caps { // 'true' if these are virtual DPCD's DSC caps (immediately upstream of sink in MST topology), // 'false' if they are sink's DSC caps bool is_virtual_dpcd_dsc; +#if defined(CONFIG_DRM_AMD_DC_DCN) + // 'true' if MST topology supports DSC passthrough for sink + // 'false' if MST topology does not support DSC passthrough + bool is_dsc_passthrough_supported; +#endif struct dsc_dec_dpcd_caps dsc_dec_caps; }; @@ -1404,6 +1432,9 @@ void dc_unlock_memory_clock_frequency(struct dc *dc); */ void dc_lock_memory_clock_frequency(struct dc *dc); +/* set soft max for memclk, to be used for AC/DC switching clock limitations */ +void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable); + /* cleanup on driver unload */ void dc_hardware_release(struct dc *dc); diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index 360f3199ea6f..541376fabbef 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -115,13 +115,44 @@ void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv) } } +void dc_dmub_srv_clear_inbox0_ack(struct dc_dmub_srv *dmub_srv) +{ + struct dmub_srv *dmub = dmub_srv->dmub; + struct dc_context *dc_ctx = dmub_srv->ctx; + enum dmub_status status = DMUB_STATUS_OK; + + status = dmub_srv_clear_inbox0_ack(dmub); + if (status != DMUB_STATUS_OK) { + DC_ERROR("Error clearing INBOX0 ack: status=%d\n", status); + dc_dmub_srv_log_diagnostic_data(dmub_srv); + } +} + +void dc_dmub_srv_wait_for_inbox0_ack(struct dc_dmub_srv *dmub_srv) +{ + struct dmub_srv *dmub = dmub_srv->dmub; + struct dc_context *dc_ctx = dmub_srv->ctx; + enum dmub_status status = DMUB_STATUS_OK; + + status = dmub_srv_wait_for_inbox0_ack(dmub, 100000); + if (status != DMUB_STATUS_OK) { + DC_ERROR("Error waiting for INBOX0 HW Lock Ack\n"); + dc_dmub_srv_log_diagnostic_data(dmub_srv); + } +} + void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dmub_srv, union dmub_inbox0_data_register data) { struct dmub_srv *dmub = dmub_srv->dmub; - if (dmub->hw_funcs.send_inbox0_cmd) - dmub->hw_funcs.send_inbox0_cmd(dmub, data); - // TODO: Add wait command -- poll register for ACK + struct dc_context *dc_ctx = dmub_srv->ctx; + enum dmub_status status = DMUB_STATUS_OK; + + status = dmub_srv_send_inbox0_cmd(dmub, data); + if (status != DMUB_STATUS_OK) { + DC_ERROR("Error sending INBOX0 cmd\n"); + dc_dmub_srv_log_diagnostic_data(dmub_srv); + } } bool dc_dmub_srv_cmd_with_reply_data(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd) diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h index 3e35eee7188c..7e4e2ec5915d 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h @@ -68,6 +68,8 @@ bool dc_dmub_srv_get_dmub_outbox0_msg(const struct dc *dc, struct dmcub_trace_bu void dc_dmub_trace_event_control(struct dc *dc, bool enable); +void dc_dmub_srv_clear_inbox0_ack(struct dc_dmub_srv *dmub_srv); +void dc_dmub_srv_wait_for_inbox0_ack(struct dc_dmub_srv *dmub_srv); void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dmub_srv, union dmub_inbox0_data_register data); bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmub_diagnostic_data *dmub_oca); diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h index e68e9a86a4d9..353dac420f34 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h @@ -378,7 +378,14 @@ enum dpcd_downstream_port_detailed_type { union dwnstream_port_caps_byte2 { struct { uint8_t MAX_BITS_PER_COLOR_COMPONENT:2; +#if defined(CONFIG_DRM_AMD_DC_DCN) + uint8_t MAX_ENCODED_LINK_BW_SUPPORT:3; + uint8_t SOURCE_CONTROL_MODE_SUPPORT:1; + uint8_t CONCURRENT_LINK_BRING_UP_SEQ_SUPPORT:1; + uint8_t RESERVED:1; +#else uint8_t RESERVED:6; +#endif } bits; uint8_t raw; }; @@ -416,6 +423,30 @@ union dwnstream_port_caps_byte3_hdmi { uint8_t raw; }; +#if defined(CONFIG_DRM_AMD_DC_DCN) +union hdmi_sink_encoded_link_bw_support { + struct { + uint8_t HDMI_SINK_ENCODED_LINK_BW_SUPPORT:3; + uint8_t RESERVED:5; + } bits; + uint8_t raw; +}; + +union hdmi_encoded_link_bw { + struct { + uint8_t FRL_MODE:1; // Bit 0 + uint8_t BW_9Gbps:1; + uint8_t BW_18Gbps:1; + uint8_t BW_24Gbps:1; + uint8_t BW_32Gbps:1; + uint8_t BW_40Gbps:1; + uint8_t BW_48Gbps:1; + uint8_t RESERVED:1; // Bit 7 + } bits; + uint8_t raw; +}; +#endif + /*4-byte structure for detailed capabilities of a down-stream port (DP-to-TMDS converter).*/ union dwnstream_portxcaps { @@ -852,6 +883,15 @@ struct psr_caps { unsigned char psr_version; unsigned int psr_rfb_setup_time; bool psr_exit_link_training_required; + unsigned char edp_revision; + unsigned char support_ver; + bool su_granularity_required; + bool y_coordinate_required; + uint8_t su_y_granularity; + bool alpm_cap; + bool standby_support; + uint8_t rate_control_caps; + unsigned int psr_power_opt_flag; }; /* Length of router topology ID read from DPCD in bytes. */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h index 52355fe6994c..eac34f591a3f 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h @@ -741,6 +741,9 @@ struct dc_dsc_config { uint32_t version_minor; /* DSC minor version. Full version is formed as 1.version_minor. */ bool ycbcr422_simple; /* Tell DSC engine to convert YCbCr 4:2:2 to 'YCbCr 4:2:2 simple'. */ int32_t rc_buffer_size; /* DSC RC buffer block size in bytes */ +#if defined(CONFIG_DRM_AMD_DC_DCN) + bool is_frl; /* indicate if DSC is applied based on HDMI FRL sink's capability */ +#endif bool is_dp; /* indicate if DSC is applied based on DP's capability */ }; struct dc_crtc_timing { diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index 180ecd860296..4ebba641538b 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -113,6 +113,7 @@ struct dc_link { * DIG encoder. */ bool is_dig_mapping_flexible; bool hpd_status; /* HPD status of link without physical HPD pin. */ + bool is_hpd_pending; /* Indicates a new received hpd */ bool edp_sink_present; @@ -185,12 +186,18 @@ struct dc_link { /* Drive settings read from integrated info table */ struct dc_lane_settings bios_forced_drive_settings; + /* Vendor specific LTTPR workaround variables */ + uint8_t vendor_specific_lttpr_link_rate_wa; + bool apply_vendor_specific_lttpr_link_rate_wa; + /* MST record stream using this link */ struct link_flags { bool dp_keep_receiver_powered; bool dp_skip_DID2; bool dp_skip_reset_segment; bool dp_mot_reset_segment; + /* Some USB4 docks do not handle turning off MST DSC once it has been enabled. */ + bool dpia_mst_dsc_always_on; } wa_flags; struct link_mst_stream_allocation_table mst_stream_alloc_table; @@ -224,6 +231,8 @@ static inline void get_edp_links(const struct dc *dc, *edp_num = 0; for (i = 0; i < dc->link_count; i++) { // report any eDP links, even unconnected DDI's + if (!dc->links[i]) + continue; if (dc->links[i]->connector_signal == SIGNAL_TYPE_EDP) { edp_links[*edp_num] = dc->links[i]; if (++(*edp_num) == MAX_NUM_EDP) @@ -287,6 +296,10 @@ bool dc_link_setup_psr(struct dc_link *dc_link, void dc_link_get_psr_residency(const struct dc_link *link, uint32_t *residency); +void dc_link_blank_all_dp_displays(struct dc *dc); + +void dc_link_blank_dp_stream(struct dc_link *link, bool hw_init); + /* Request DC to detect if there is a Panel connected. * boot - If this call is during initial boot. * Return false for any type of detection failure or MST detection @@ -298,7 +311,7 @@ enum dc_detect_reason { DETECT_REASON_HPD, DETECT_REASON_HPDRX, DETECT_REASON_FALLBACK, - DETECT_REASON_RETRAIN + DETECT_REASON_RETRAIN, }; bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason); @@ -438,6 +451,7 @@ bool dc_link_is_fec_supported(const struct dc_link *link); bool dc_link_should_enable_fec(const struct dc_link *link); #if defined(CONFIG_DRM_AMD_DC_DCN) +uint32_t dc_link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw); enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format(const struct dc_link *link); #endif #endif /* DC_LINK_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index 388457ffc0a8..0285a4b38d05 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -430,6 +430,7 @@ struct dc_dongle_caps { uint32_t dp_hdmi_max_bpc; uint32_t dp_hdmi_max_pixel_clk_in_khz; #if defined(CONFIG_DRM_AMD_DC_DCN) + uint32_t dp_hdmi_frl_max_link_bw_in_kbps; struct dc_dongle_dfp_cap_ext dfp_cap_ext; #endif }; @@ -950,6 +951,7 @@ enum dc_gpu_mem_alloc_type { enum dc_psr_version { DC_PSR_VERSION_1 = 0, + DC_PSR_VERSION_SU_1 = 1, DC_PSR_VERSION_UNSUPPORTED = 0xFFFFFFFF, }; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c index 27218ede150a..70eaac017624 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c @@ -67,9 +67,6 @@ static void write_indirect_azalia_reg(struct audio *audio, /* AZALIA_F0_CODEC_ENDPOINT_DATA endpoint data */ REG_SET(AZALIA_F0_CODEC_ENDPOINT_DATA, 0, AZALIA_ENDPOINT_REG_DATA, reg_data); - - DC_LOG_HW_AUDIO("AUDIO:write_indirect_azalia_reg: index: %u data: %u\n", - reg_index, reg_data); } static uint32_t read_indirect_azalia_reg(struct audio *audio, uint32_t reg_index) @@ -85,9 +82,6 @@ static uint32_t read_indirect_azalia_reg(struct audio *audio, uint32_t reg_index /* AZALIA_F0_CODEC_ENDPOINT_DATA endpoint data */ value = REG_READ(AZALIA_F0_CODEC_ENDPOINT_DATA); - DC_LOG_HW_AUDIO("AUDIO:read_indirect_azalia_reg: index: %u data: %u\n", - reg_index, value); - return value; } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h index 5622d5e32d81..dbd2cfed0603 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h @@ -113,6 +113,7 @@ struct dce_audio_shift { uint8_t DCCG_AUDIO_DTO2_USE_512FBR_DTO; uint32_t DCCG_AUDIO_DTO0_USE_512FBR_DTO; uint32_t DCCG_AUDIO_DTO1_USE_512FBR_DTO; + uint32_t CLOCK_GATING_DISABLE; }; struct dce_audio_mask { @@ -132,6 +133,7 @@ struct dce_audio_mask { uint32_t DCCG_AUDIO_DTO2_USE_512FBR_DTO; uint32_t DCCG_AUDIO_DTO0_USE_512FBR_DTO; uint32_t DCCG_AUDIO_DTO1_USE_512FBR_DTO; + uint32_t CLOCK_GATING_DISABLE; }; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c index 1e77ffee71b3..f1c61d5aee6c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c @@ -788,8 +788,9 @@ static bool dce110_link_encoder_validate_hdmi_output( crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) return false; - if (!enc110->base.features.flags.bits.HDMI_6GB_EN && - adjusted_pix_clk_khz >= 300000) + if ((!enc110->base.features.flags.bits.HDMI_6GB_EN || + enc110->base.ctx->dc->debug.hdmi20_disable) && + adjusted_pix_clk_khz >= 300000) return false; if (enc110->base.ctx->dc->debug.hdmi20_disable && crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c index 9baf8ca0a920..b1b2e3c6f379 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c @@ -56,8 +56,11 @@ void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv, union dmub_inbox0_cmd_lock_hw hw_lock_cmd) { union dmub_inbox0_data_register data = { 0 }; + data.inbox0_cmd_lock_hw = hw_lock_cmd; + dc_dmub_srv_clear_inbox0_ack(dmub_srv); dc_dmub_srv_send_inbox0_cmd(dmub_srv, data); + dc_dmub_srv_wait_for_inbox0_ack(dmub_srv); } bool should_use_dmub_lock(struct dc_link *link) diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c index 90eb8eedacf2..87ed48d5530d 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c @@ -230,7 +230,7 @@ static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level, uint8_ /** * Set PSR power optimization flags. */ -static void dmub_psr_set_power_opt(struct dmub_psr *dmub, unsigned int power_opt) +static void dmub_psr_set_power_opt(struct dmub_psr *dmub, unsigned int power_opt, uint8_t panel_inst) { union dmub_rb_cmd cmd; struct dc_context *dc = dmub->ctx; @@ -239,7 +239,9 @@ static void dmub_psr_set_power_opt(struct dmub_psr *dmub, unsigned int power_opt cmd.psr_set_power_opt.header.type = DMUB_CMD__PSR; cmd.psr_set_power_opt.header.sub_type = DMUB_CMD__SET_PSR_POWER_OPT; cmd.psr_set_power_opt.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_power_opt_data); + cmd.psr_set_power_opt.psr_set_power_opt_data.cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1; cmd.psr_set_power_opt.psr_set_power_opt_data.power_opt = power_opt; + cmd.psr_set_power_opt.psr_set_power_opt_data.panel_inst = panel_inst; dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); dc_dmub_srv_cmd_execute(dc->dmub_srv); @@ -327,6 +329,16 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub, copy_settings_data->fec_enable_delay_in100us = link->dc->debug.fec_enable_delay_in100us; copy_settings_data->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1; copy_settings_data->panel_inst = panel_inst; + copy_settings_data->dsc_enable_status = (pipe_ctx->stream->timing.flags.DSC == 1); + + if (link->fec_state == dc_link_fec_enabled && + (!memcmp(link->dpcd_caps.sink_dev_id_str, DP_SINK_DEVICE_STR_ID_1, + sizeof(link->dpcd_caps.sink_dev_id_str)) || + !memcmp(link->dpcd_caps.sink_dev_id_str, DP_SINK_DEVICE_STR_ID_2, + sizeof(link->dpcd_caps.sink_dev_id_str)))) + copy_settings_data->debug.bitfields.force_wakeup_by_tps3 = 1; + else + copy_settings_data->debug.bitfields.force_wakeup_by_tps3 = 0; dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); dc_dmub_srv_cmd_execute(dc->dmub_srv); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h index 5dbd479660f1..01acc01cc191 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h @@ -46,7 +46,7 @@ struct dmub_psr_funcs { void (*psr_force_static)(struct dmub_psr *dmub, uint8_t panel_inst); void (*psr_get_residency)(struct dmub_psr *dmub, uint32_t *residency, uint8_t panel_inst); - void (*psr_set_power_opt)(struct dmub_psr *dmub, unsigned int power_opt); + void (*psr_set_power_opt)(struct dmub_psr *dmub, unsigned int power_opt, uint8_t panel_inst); }; struct dmub_psr *dmub_psr_create(struct dc_context *ctx); diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 24e47df526f6..f1593186e964 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -69,6 +69,8 @@ #include "dcn10/dcn10_hw_sequencer.h" +#include "dce110_hw_sequencer.h" + #define GAMMA_HW_POINTS_NUM 256 /* @@ -1564,6 +1566,10 @@ static enum dc_status apply_single_controller_ctx_to_hw( &pipe_ctx->stream->audio_info); } + /* make sure no pipes syncd to the pipe being enabled */ + if (!pipe_ctx->stream->apply_seamless_boot_optimization && dc->config.use_pipe_ctx_sync_logic) + check_syncd_pipes_for_disabled_master_pipe(dc, context, pipe_ctx->pipe_idx); + #if defined(CONFIG_DRM_AMD_DC_DCN) /* DCN3.1 FPGA Workaround * Need to enable HPO DP Stream Encoder before setting OTG master enable. @@ -1602,6 +1608,11 @@ static enum dc_status apply_single_controller_ctx_to_hw( pipe_ctx->stream_res.stream_enc, pipe_ctx->stream_res.tg->inst); + if (dc_is_dp_signal(pipe_ctx->stream->signal) && + pipe_ctx->stream_res.stream_enc->funcs->reset_fifo) + pipe_ctx->stream_res.stream_enc->funcs->reset_fifo( + pipe_ctx->stream_res.stream_enc); + if (dc_is_dp_signal(pipe_ctx->stream->signal)) dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_OTG); @@ -1655,30 +1666,12 @@ static enum dc_status apply_single_controller_ctx_to_hw( static void power_down_encoders(struct dc *dc) { - int i, j; + int i; for (i = 0; i < dc->link_count; i++) { enum signal_type signal = dc->links[i]->connector_signal; - if ((signal == SIGNAL_TYPE_EDP) || - (signal == SIGNAL_TYPE_DISPLAY_PORT)) { - if (dc->links[i]->link_enc->funcs->get_dig_frontend && - dc->links[i]->link_enc->funcs->is_dig_enabled(dc->links[i]->link_enc)) { - unsigned int fe = dc->links[i]->link_enc->funcs->get_dig_frontend( - dc->links[i]->link_enc); - - for (j = 0; j < dc->res_pool->stream_enc_count; j++) { - if (fe == dc->res_pool->stream_enc[j]->id) { - dc->res_pool->stream_enc[j]->funcs->dp_blank(dc->links[i], - dc->res_pool->stream_enc[j]); - break; - } - } - } - - if (!dc->links[i]->wa_flags.dp_keep_receiver_powered) - dp_receiver_power_ctrl(dc->links[i], false); - } + dc_link_blank_dp_stream(dc->links[i], false); if (signal != SIGNAL_TYPE_EDP) signal = SIGNAL_TYPE_NONE; @@ -1805,7 +1798,6 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) struct dc_stream_state *edp_streams[MAX_NUM_EDP]; struct dc_link *edp_link_with_sink = NULL; struct dc_link *edp_link = NULL; - struct dc_stream_state *edp_stream = NULL; struct dce_hwseq *hws = dc->hwseq; int edp_with_sink_num; int edp_num; @@ -1826,27 +1818,29 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) get_edp_streams(context, edp_streams, &edp_stream_num); // Check fastboot support, disable on DCE8 because of blank screens - if (edp_num && dc->ctx->dce_version != DCE_VERSION_8_0 && + if (edp_num && edp_stream_num && dc->ctx->dce_version != DCE_VERSION_8_0 && dc->ctx->dce_version != DCE_VERSION_8_1 && dc->ctx->dce_version != DCE_VERSION_8_3) { for (i = 0; i < edp_num; i++) { edp_link = edp_links[i]; + if (edp_link != edp_streams[0]->link) + continue; // enable fastboot if backend is enabled on eDP - if (edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc)) { - /* Set optimization flag on eDP stream*/ - if (edp_stream_num && edp_link->link_status.link_active) { - edp_stream = edp_streams[0]; - can_apply_edp_fast_boot = !is_edp_ilr_optimization_required(edp_stream->link, &edp_stream->timing); - edp_stream->apply_edp_fast_boot_optimization = can_apply_edp_fast_boot; - if (can_apply_edp_fast_boot) - DC_LOG_EVENT_LINK_TRAINING("eDP fast boot disabled to optimize link rate\n"); - - break; - } + if (edp_link->link_enc->funcs->is_dig_enabled && + edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) && + edp_link->link_status.link_active) { + struct dc_stream_state *edp_stream = edp_streams[0]; + + can_apply_edp_fast_boot = !is_edp_ilr_optimization_required(edp_stream->link, &edp_stream->timing); + edp_stream->apply_edp_fast_boot_optimization = can_apply_edp_fast_boot; + if (can_apply_edp_fast_boot) + DC_LOG_EVENT_LINK_TRAINING("eDP fast boot disabled to optimize link rate\n"); + + break; } } // We are trying to enable eDP, don't power down VDD - if (edp_stream_num) + if (can_apply_edp_fast_boot) keep_edp_vdd_on = true; } @@ -2307,6 +2301,10 @@ enum dc_status dce110_apply_ctx_to_hw( enum dc_status status; int i; + /* reset syncd pipes from disabled pipes */ + if (dc->config.use_pipe_ctx_sync_logic) + reset_syncd_pipes_from_disabled_pipes(dc, context); + /* Reset old context */ /* look up the targets that have been removed since last commit */ hws->funcs.reset_hw_ctx_wrap(dc, context); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c index 91fdfcd8a14e..db7ca4b0cdb9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c @@ -119,14 +119,6 @@ void dpp_read_state(struct dpp *dpp_base, } } -/* Program gamut remap in bypass mode */ -void dpp_set_gamut_remap_bypass(struct dcn10_dpp *dpp) -{ - REG_SET(CM_GAMUT_REMAP_CONTROL, 0, - CM_GAMUT_REMAP_MODE, 0); - /* Gamut remap in bypass */ -} - #define IDENTITY_RATIO(ratio) (dc_fixpt_u2d19(ratio) == (1 << 19)) bool dpp1_get_optimal_number_of_taps( diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c index 44293d66b46b..f607a0e28f14 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c @@ -39,6 +39,10 @@ #define BLACK_OFFSET_RGB_Y 0x0 #define BLACK_OFFSET_CBCR 0x8000 +#define VISUAL_CONFIRM_RECT_HEIGHT_DEFAULT 3 +#define VISUAL_CONFIRM_RECT_HEIGHT_MIN 1 +#define VISUAL_CONFIRM_RECT_HEIGHT_MAX 10 + #define REG(reg)\ dpp->tf_regs->reg @@ -85,51 +89,6 @@ enum dscl_mode_sel { DSCL_MODE_DSCL_BYPASS = 6 }; -static void dpp1_dscl_set_overscan( - struct dcn10_dpp *dpp, - const struct scaler_data *data) -{ - uint32_t left = data->recout.x; - uint32_t top = data->recout.y; - - int right = data->h_active - data->recout.x - data->recout.width; - int bottom = data->v_active - data->recout.y - data->recout.height; - - if (right < 0) { - BREAK_TO_DEBUGGER(); - right = 0; - } - if (bottom < 0) { - BREAK_TO_DEBUGGER(); - bottom = 0; - } - - REG_SET_2(DSCL_EXT_OVERSCAN_LEFT_RIGHT, 0, - EXT_OVERSCAN_LEFT, left, - EXT_OVERSCAN_RIGHT, right); - - REG_SET_2(DSCL_EXT_OVERSCAN_TOP_BOTTOM, 0, - EXT_OVERSCAN_BOTTOM, bottom, - EXT_OVERSCAN_TOP, top); -} - -static void dpp1_dscl_set_otg_blank( - struct dcn10_dpp *dpp, const struct scaler_data *data) -{ - uint32_t h_blank_start = data->h_active; - uint32_t h_blank_end = 0; - uint32_t v_blank_start = data->v_active; - uint32_t v_blank_end = 0; - - REG_SET_2(OTG_H_BLANK, 0, - OTG_H_BLANK_START, h_blank_start, - OTG_H_BLANK_END, h_blank_end); - - REG_SET_2(OTG_V_BLANK, 0, - OTG_V_BLANK_START, v_blank_start, - OTG_V_BLANK_END, v_blank_end); -} - static int dpp1_dscl_get_pixel_depth_val(enum lb_pixel_depth depth) { if (depth == LB_PIXEL_DEPTH_30BPP) @@ -551,58 +510,6 @@ static enum lb_memory_config dpp1_dscl_find_lb_memory_config(struct dcn10_dpp *d return LB_MEMORY_CONFIG_0; } -void dpp1_dscl_set_scaler_auto_scale( - struct dpp *dpp_base, - const struct scaler_data *scl_data) -{ - enum lb_memory_config lb_config; - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - enum dscl_mode_sel dscl_mode = dpp1_dscl_get_dscl_mode( - dpp_base, scl_data, dpp_base->ctx->dc->debug.always_scale); - bool ycbcr = scl_data->format >= PIXEL_FORMAT_VIDEO_BEGIN - && scl_data->format <= PIXEL_FORMAT_VIDEO_END; - - dpp1_dscl_set_overscan(dpp, scl_data); - - dpp1_dscl_set_otg_blank(dpp, scl_data); - - REG_UPDATE(SCL_MODE, DSCL_MODE, dscl_mode); - - if (dscl_mode == DSCL_MODE_DSCL_BYPASS) - return; - - lb_config = dpp1_dscl_find_lb_memory_config(dpp, scl_data); - dpp1_dscl_set_lb(dpp, &scl_data->lb_params, lb_config); - - if (dscl_mode == DSCL_MODE_SCALING_444_BYPASS) - return; - - /* TODO: v_min */ - REG_SET_3(DSCL_AUTOCAL, 0, - AUTOCAL_MODE, AUTOCAL_MODE_AUTOSCALE, - AUTOCAL_NUM_PIPE, 0, - AUTOCAL_PIPE_ID, 0); - - /* Black offsets */ - if (ycbcr) - REG_SET_2(SCL_BLACK_OFFSET, 0, - SCL_BLACK_OFFSET_RGB_Y, BLACK_OFFSET_RGB_Y, - SCL_BLACK_OFFSET_CBCR, BLACK_OFFSET_CBCR); - else - - REG_SET_2(SCL_BLACK_OFFSET, 0, - SCL_BLACK_OFFSET_RGB_Y, BLACK_OFFSET_RGB_Y, - SCL_BLACK_OFFSET_CBCR, BLACK_OFFSET_RGB_Y); - - REG_SET_4(SCL_TAP_CONTROL, 0, - SCL_V_NUM_TAPS, scl_data->taps.v_taps - 1, - SCL_H_NUM_TAPS, scl_data->taps.h_taps - 1, - SCL_V_NUM_TAPS_C, scl_data->taps.v_taps_c - 1, - SCL_H_NUM_TAPS_C, scl_data->taps.h_taps_c - 1); - - dpp1_dscl_set_scl_filter(dpp, scl_data, ycbcr); -} - static void dpp1_dscl_set_manual_ratio_init( struct dcn10_dpp *dpp, const struct scaler_data *data) @@ -685,9 +592,17 @@ static void dpp1_dscl_set_recout(struct dcn10_dpp *dpp, const struct rect *recout) { int visual_confirm_on = 0; + unsigned short visual_confirm_rect_height = VISUAL_CONFIRM_RECT_HEIGHT_DEFAULT; + if (dpp->base.ctx->dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) visual_confirm_on = 1; + /* Check bounds to ensure the VC bar height was set to a sane value */ + if ((dpp->base.ctx->dc->debug.visual_confirm_rect_height >= VISUAL_CONFIRM_RECT_HEIGHT_MIN) && + (dpp->base.ctx->dc->debug.visual_confirm_rect_height <= VISUAL_CONFIRM_RECT_HEIGHT_MAX)) { + visual_confirm_rect_height = dpp->base.ctx->dc->debug.visual_confirm_rect_height; + } + REG_SET_2(RECOUT_START, 0, /* First pixel of RECOUT in the active OTG area */ RECOUT_START_X, recout->x, @@ -699,7 +614,7 @@ static void dpp1_dscl_set_recout(struct dcn10_dpp *dpp, RECOUT_WIDTH, recout->width, /* Number of RECOUT vertical lines */ RECOUT_HEIGHT, recout->height - - visual_confirm_on * 2 * (dpp->base.inst + 1)); + - visual_confirm_on * 2 * (dpp->base.inst + visual_confirm_rect_height)); } /** diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 0b788d794fb3..2d470f524367 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -77,9 +77,9 @@ #define PGFSM_POWER_ON 0 #define PGFSM_POWER_OFF 2 -void print_microsec(struct dc_context *dc_ctx, - struct dc_log_buffer_ctx *log_ctx, - uint32_t ref_cycle) +static void print_microsec(struct dc_context *dc_ctx, + struct dc_log_buffer_ctx *log_ctx, + uint32_t ref_cycle) { const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000; static const unsigned int frac = 1000; @@ -132,7 +132,8 @@ static void log_mpc_crc(struct dc *dc, REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G)); } -void dcn10_log_hubbub_state(struct dc *dc, struct dc_log_buffer_ctx *log_ctx) +static void dcn10_log_hubbub_state(struct dc *dc, + struct dc_log_buffer_ctx *log_ctx) { struct dc_context *dc_ctx = dc->ctx; struct dcn_hubbub_wm wm; @@ -1362,11 +1363,48 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context) tg->funcs->tg_init(tg); } + + /* Power gate DSCs */ + if (hws->funcs.dsc_pg_control != NULL) { + uint32_t num_opps = 0; + uint32_t opp_id_src0 = OPP_ID_INVALID; + uint32_t opp_id_src1 = OPP_ID_INVALID; + + // Step 1: To find out which OPTC is running & OPTC DSC is ON + for (i = 0; i < dc->res_pool->res_cap->num_timing_generator; i++) { + uint32_t optc_dsc_state = 0; + struct timing_generator *tg = dc->res_pool->timing_generators[i]; + + if (tg->funcs->is_tg_enabled(tg)) { + if (tg->funcs->get_dsc_status) + tg->funcs->get_dsc_status(tg, &optc_dsc_state); + // Only one OPTC with DSC is ON, so if we got one result, we would exit this block. + // non-zero value is DSC enabled + if (optc_dsc_state != 0) { + tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1); + break; + } + } + } + + // Step 2: To power down DSC but skip DSC of running OPTC + for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) { + struct dcn_dsc_state s = {0}; + + dc->res_pool->dscs[i]->funcs->dsc_read_state(dc->res_pool->dscs[i], &s); + + if ((s.dsc_opp_source == opp_id_src0 || s.dsc_opp_source == opp_id_src1) && + s.dsc_clock_en && s.dsc_fw_en) + continue; + + hws->funcs.dsc_pg_control(hws, dc->res_pool->dscs[i]->inst, false); + } + } } void dcn10_init_hw(struct dc *dc) { - int i, j; + int i; struct abm *abm = dc->res_pool->abm; struct dmcu *dmcu = dc->res_pool->dmcu; struct dce_hwseq *hws = dc->hwseq; @@ -1468,43 +1506,8 @@ void dcn10_init_hw(struct dc *dc) dmub_enable_outbox_notification(dc); /* we want to turn off all dp displays before doing detection */ - if (dc->config.power_down_display_on_boot) { - uint8_t dpcd_power_state = '\0'; - enum dc_status status = DC_ERROR_UNEXPECTED; - - for (i = 0; i < dc->link_count; i++) { - if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) - continue; - - /* DP 2.0 requires that LTTPR Caps be read first */ - dp_retrieve_lttpr_cap(dc->links[i]); - - /* - * If any of the displays are lit up turn them off. - * The reason is that some MST hubs cannot be turned off - * completely until we tell them to do so. - * If not turned off, then displays connected to MST hub - * won't light up. - */ - status = core_link_read_dpcd(dc->links[i], DP_SET_POWER, - &dpcd_power_state, sizeof(dpcd_power_state)); - if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) { - /* blank dp stream before power off receiver*/ - if (dc->links[i]->link_enc->funcs->get_dig_frontend) { - unsigned int fe = dc->links[i]->link_enc->funcs->get_dig_frontend(dc->links[i]->link_enc); - - for (j = 0; j < dc->res_pool->stream_enc_count; j++) { - if (fe == dc->res_pool->stream_enc[j]->id) { - dc->res_pool->stream_enc[j]->funcs->dp_blank(dc->links[i], - dc->res_pool->stream_enc[j]); - break; - } - } - } - dp_receiver_power_ctrl(dc->links[i], false); - } - } - } + if (dc->config.power_down_display_on_boot) + dc_link_blank_all_dp_displays(dc); /* If taking control over from VBIOS, we may want to optimize our first * mode set, so we need to skip powering down pipes until we know which @@ -1637,7 +1640,7 @@ void dcn10_reset_hw_ctx_wrap( dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state); if (hws->funcs.enable_stream_gating) - hws->funcs.enable_stream_gating(dc, pipe_ctx); + hws->funcs.enable_stream_gating(dc, pipe_ctx_old); if (old_clk) old_clk->funcs->cs_power_down(old_clk); } @@ -1970,10 +1973,9 @@ static bool wait_for_reset_trigger_to_occur( return rc; } -uint64_t reduceSizeAndFraction( - uint64_t *numerator, - uint64_t *denominator, - bool checkUint32Bounary) +static uint64_t reduceSizeAndFraction(uint64_t *numerator, + uint64_t *denominator, + bool checkUint32Bounary) { int i; bool ret = checkUint32Bounary == false; @@ -2021,7 +2023,7 @@ uint64_t reduceSizeAndFraction( return ret; } -bool is_low_refresh_rate(struct pipe_ctx *pipe) +static bool is_low_refresh_rate(struct pipe_ctx *pipe) { uint32_t master_pipe_refresh_rate = pipe->stream->timing.pix_clk_100hz * 100 / @@ -2030,7 +2032,8 @@ bool is_low_refresh_rate(struct pipe_ctx *pipe) return master_pipe_refresh_rate <= 30; } -uint8_t get_clock_divider(struct pipe_ctx *pipe, bool account_low_refresh_rate) +static uint8_t get_clock_divider(struct pipe_ctx *pipe, + bool account_low_refresh_rate) { uint32_t clock_divider = 1; uint32_t numpipes = 1; @@ -2050,10 +2053,8 @@ uint8_t get_clock_divider(struct pipe_ctx *pipe, bool account_low_refresh_rate) return clock_divider; } -int dcn10_align_pixel_clocks( - struct dc *dc, - int group_size, - struct pipe_ctx *grouped_pipes[]) +static int dcn10_align_pixel_clocks(struct dc *dc, int group_size, + struct pipe_ctx *grouped_pipes[]) { struct dc_context *dc_ctx = dc->ctx; int i, master = -1, embedded = -1; @@ -2342,7 +2343,7 @@ static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1, } -void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp) +static void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp) { struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp); struct vm_system_aperture_param apt = {0}; @@ -2624,7 +2625,7 @@ static void dcn10_update_dchubp_dpp( /* new calculated dispclk, dppclk are stored in * context->bw_ctx.bw.dcn.clk.dispclk_khz / dppclk_khz. current * dispclk, dppclk are from dc->clk_mgr->clks.dispclk_khz. - * dcn_validate_bandwidth compute new dispclk, dppclk. + * dcn10_validate_bandwidth compute new dispclk, dppclk. * dispclk will put in use after optimize_bandwidth when * ramp_up_dispclk_with_dpp is called. * there are two places for dppclk be put in use. One location @@ -2638,7 +2639,7 @@ static void dcn10_update_dchubp_dpp( * for example, eDP + external dp, change resolution of DP from * 1920x1080x144hz to 1280x960x60hz. * before change: dispclk = 337889 dppclk = 337889 - * change mode, dcn_validate_bandwidth calculate + * change mode, dcn10_validate_bandwidth calculate * dispclk = 143122 dppclk = 143122 * update_dchubp_dpp be executed before dispclk be updated, * dispclk = 337889, but dppclk use new value dispclk /2 = diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c index 2dc4b4e4ba02..f4b34c110eae 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c @@ -646,8 +646,9 @@ static bool dcn10_link_encoder_validate_hdmi_output( crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) return false; - if (!enc10->base.features.flags.bits.HDMI_6GB_EN && - adjusted_pix_clk_100hz >= 3000000) + if ((!enc10->base.features.flags.bits.HDMI_6GB_EN || + enc10->base.ctx->dc->debug.hdmi20_disable) && + adjusted_pix_clk_100hz >= 3000000) return false; if (enc10->base.ctx->dc->debug.hdmi20_disable && crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c index d54d731415d7..2c409356f512 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c @@ -348,36 +348,6 @@ void opp1_program_stereo( */ } -void opp1_program_oppbuf( - struct output_pixel_processor *opp, - struct oppbuf_params *oppbuf) -{ - struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp); - - /* Program the oppbuf active width to be the frame width from mpc */ - REG_UPDATE(OPPBUF_CONTROL, OPPBUF_ACTIVE_WIDTH, oppbuf->active_width); - - /* Specifies the number of segments in multi-segment mode (DP-MSO operation) - * description "In 1/2/4 segment mode, specifies the horizontal active width in pixels of the display panel. - * In 4 segment split left/right mode, specifies the horizontal 1/2 active width in pixels of the display panel. - * Used to determine segment boundaries in multi-segment mode. Used to determine the width of the vertical active space in 3D frame packed modes. - * OPPBUF_ACTIVE_WIDTH must be integer divisible by the total number of segments." - */ - REG_UPDATE(OPPBUF_CONTROL, OPPBUF_DISPLAY_SEGMENTATION, oppbuf->mso_segmentation); - - /* description "Specifies the number of overlap pixels (1-8 overlapping pixels supported), used in multi-segment mode (DP-MSO operation)" */ - REG_UPDATE(OPPBUF_CONTROL, OPPBUF_OVERLAP_PIXEL_NUM, oppbuf->mso_overlap_pixel_num); - - /* description "Specifies the number of times a pixel is replicated (0-15 pixel replications supported). - * A value of 0 disables replication. The total number of times a pixel is output is OPPBUF_PIXEL_REPETITION + 1." - */ - REG_UPDATE(OPPBUF_CONTROL, OPPBUF_PIXEL_REPETITION, oppbuf->pixel_repetition); - - /* Controls the number of padded pixels at the end of a segment */ - if (REG(OPPBUF_CONTROL1)) - REG_UPDATE(OPPBUF_CONTROL1, OPPBUF_NUM_SEGMENT_PADDED_PIXELS, oppbuf->num_segment_padded_pixels); -} - void opp1_pipe_clock_control(struct output_pixel_processor *opp, bool enable) { struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c index 3d2a2848857a..b1671b00ce40 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c @@ -132,22 +132,6 @@ void optc1_setup_vertical_interrupt2( } /** - * Vupdate keepout can be set to a window to block the update lock for that pipe from changing. - * Start offset begins with vstartup and goes for x number of clocks, - * end offset starts from end of vupdate to x number of clocks. - */ -void optc1_set_vupdate_keepout(struct timing_generator *optc, - struct vupdate_keepout_params *params) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - REG_SET_3(OTG_VUPDATE_KEEPOUT, 0, - MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, params->start_offset, - MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, params->end_offset, - OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, params->enable); -} - -/** * program_timing_generator used by mode timing set * Program CRTC Timing Registers - OTG_H_*, OTG_V_*, Pixel repetition. * Including SYNC. Call BIOS command table to program Timings. @@ -876,7 +860,7 @@ void optc1_set_static_screen_control( OTG_STATIC_SCREEN_FRAME_COUNT, num_frames); } -void optc1_setup_manual_trigger(struct timing_generator *optc) +static void optc1_setup_manual_trigger(struct timing_generator *optc) { struct optc *optc1 = DCN10TG_FROM_TG(optc); @@ -894,7 +878,7 @@ void optc1_setup_manual_trigger(struct timing_generator *optc) OTG_TRIGA_CLEAR, 1); } -void optc1_program_manual_trigger(struct timing_generator *optc) +static void optc1_program_manual_trigger(struct timing_generator *optc) { struct optc *optc1 = DCN10TG_FROM_TG(optc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index f37551e00023..858b72149897 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -686,9 +686,8 @@ static struct output_pixel_processor *dcn10_opp_create( return &opp->base; } -struct dce_aux *dcn10_aux_engine_create( - struct dc_context *ctx, - uint32_t inst) +static struct dce_aux *dcn10_aux_engine_create(struct dc_context *ctx, + uint32_t inst) { struct aux_engine_dce110 *aux_engine = kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); @@ -724,9 +723,8 @@ static const struct dce_i2c_mask i2c_masks = { I2C_COMMON_MASK_SH_LIST_DCE110(_MASK) }; -struct dce_i2c_hw *dcn10_i2c_hw_create( - struct dc_context *ctx, - uint32_t inst) +static struct dce_i2c_hw *dcn10_i2c_hw_create(struct dc_context *ctx, + uint32_t inst) { struct dce_i2c_hw *dce_i2c_hw = kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); @@ -805,7 +803,7 @@ static const struct encoder_feature_support link_enc_feature = { .flags.bits.IS_TPS4_CAPABLE = true }; -struct link_encoder *dcn10_link_encoder_create( +static struct link_encoder *dcn10_link_encoder_create( const struct encoder_init_data *enc_init_data) { struct dcn10_link_encoder *enc10 = @@ -847,7 +845,7 @@ static struct panel_cntl *dcn10_panel_cntl_create(const struct panel_cntl_init_d return &panel_cntl->base; } -struct clock_source *dcn10_clock_source_create( +static struct clock_source *dcn10_clock_source_create( struct dc_context *ctx, struct dc_bios *bios, enum clock_source_id id, @@ -945,7 +943,7 @@ static const struct resource_create_funcs res_create_maximus_funcs = { .create_hwseq = dcn10_hwseq_create, }; -void dcn10_clock_source_destroy(struct clock_source **clk_src) +static void dcn10_clock_source_destroy(struct clock_source **clk_src) { kfree(TO_DCE110_CLK_SRC(*clk_src)); *clk_src = NULL; @@ -978,10 +976,8 @@ static void dcn10_resource_destruct(struct dcn10_resource_pool *pool) pool->base.mpc = NULL; } - if (pool->base.hubbub != NULL) { - kfree(pool->base.hubbub); - pool->base.hubbub = NULL; - } + kfree(pool->base.hubbub); + pool->base.hubbub = NULL; for (i = 0; i < pool->base.pipe_count; i++) { if (pool->base.opps[i] != NULL) @@ -1011,14 +1007,10 @@ static void dcn10_resource_destruct(struct dcn10_resource_pool *pool) for (i = 0; i < pool->base.res_cap->num_ddc; i++) { if (pool->base.engines[i] != NULL) dce110_engine_destroy(&pool->base.engines[i]); - if (pool->base.hw_i2cs[i] != NULL) { - kfree(pool->base.hw_i2cs[i]); - pool->base.hw_i2cs[i] = NULL; - } - if (pool->base.sw_i2cs[i] != NULL) { - kfree(pool->base.sw_i2cs[i]); - pool->base.sw_i2cs[i] = NULL; - } + kfree(pool->base.hw_i2cs[i]); + pool->base.hw_i2cs[i] = NULL; + kfree(pool->base.sw_i2cs[i]); + pool->base.sw_i2cs[i] = NULL; } for (i = 0; i < pool->base.audio_count; i++) { @@ -1128,7 +1120,7 @@ static enum dc_status build_mapped_resource( return DC_OK; } -enum dc_status dcn10_add_stream_to_ctx( +static enum dc_status dcn10_add_stream_to_ctx( struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream) @@ -1320,7 +1312,7 @@ static const struct resource_funcs dcn10_res_pool_funcs = { .destroy = dcn10_destroy_resource_pool, .link_enc_create = dcn10_link_encoder_create, .panel_cntl_create = dcn10_panel_cntl_create, - .validate_bandwidth = dcn_validate_bandwidth, + .validate_bandwidth = dcn10_validate_bandwidth, .acquire_idle_pipe_for_layer = dcn10_acquire_idle_pipe_for_layer, .validate_plane = dcn10_validate_plane, .validate_global = dcn10_validate_global, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c index b0c08ee6bc2c..bf4436d7aaab 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c @@ -902,6 +902,19 @@ void enc1_stream_encoder_stop_dp_info_packets( } +void enc1_stream_encoder_reset_fifo( + struct stream_encoder *enc) +{ + struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + + /* set DIG_START to 0x1 to reset FIFO */ + REG_UPDATE(DIG_FE_CNTL, DIG_START, 1); + udelay(100); + + /* write 0 to take the FIFO out of reset */ + REG_UPDATE(DIG_FE_CNTL, DIG_START, 0); +} + void enc1_stream_encoder_dp_blank( struct dc_link *link, struct stream_encoder *enc) @@ -1587,6 +1600,8 @@ static const struct stream_encoder_funcs dcn10_str_enc_funcs = { enc1_stream_encoder_send_immediate_sdp_message, .stop_dp_info_packets = enc1_stream_encoder_stop_dp_info_packets, + .reset_fifo = + enc1_stream_encoder_reset_fifo, .dp_blank = enc1_stream_encoder_dp_blank, .dp_unblank = diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h index 687d7e4bf7ca..a146a41f68e9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h @@ -626,6 +626,9 @@ void enc1_stream_encoder_send_immediate_sdp_message( void enc1_stream_encoder_stop_dp_info_packets( struct stream_encoder *enc); +void enc1_stream_encoder_reset_fifo( + struct stream_encoder *enc); + void enc1_stream_encoder_dp_blank( struct dc_link *link, struct stream_encoder *enc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c index a9e420c7d75a..970b65efeac1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c @@ -251,20 +251,6 @@ static void dpp2_cnv_setup ( } -void dpp2_cnv_set_bias_scale( - struct dpp *dpp_base, - struct dc_bias_and_scale *bias_and_scale) -{ - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - - REG_UPDATE(FCNV_FP_BIAS_R, FCNV_FP_BIAS_R, bias_and_scale->bias_red); - REG_UPDATE(FCNV_FP_BIAS_G, FCNV_FP_BIAS_G, bias_and_scale->bias_green); - REG_UPDATE(FCNV_FP_BIAS_B, FCNV_FP_BIAS_B, bias_and_scale->bias_blue); - REG_UPDATE(FCNV_FP_SCALE_R, FCNV_FP_SCALE_R, bias_and_scale->scale_red); - REG_UPDATE(FCNV_FP_SCALE_G, FCNV_FP_SCALE_G, bias_and_scale->scale_green); - REG_UPDATE(FCNV_FP_SCALE_B, FCNV_FP_SCALE_B, bias_and_scale->scale_blue); -} - /*compute the maximum number of lines that we can fit in the line buffer*/ void dscl2_calc_lb_num_partitions( const struct scaler_data *scl_data, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c index 79b640e202eb..ef5c4c0f4d6c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c @@ -162,6 +162,8 @@ static void dsc2_read_state(struct display_stream_compressor *dsc, struct dcn_ds REG_GET(DSCC_PPS_CONFIG2, PIC_WIDTH, &s->dsc_pic_width); REG_GET(DSCC_PPS_CONFIG2, PIC_HEIGHT, &s->dsc_pic_height); REG_GET(DSCC_PPS_CONFIG7, SLICE_BPG_OFFSET, &s->dsc_slice_bpg_offset); + REG_GET_2(DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, &s->dsc_fw_en, + DSCRM_DSC_OPP_PIPE_SOURCE, &s->dsc_opp_source); } diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c index 880954ac0b02..994fb732a7cb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c @@ -527,7 +527,7 @@ static const uint16_t filter_12tap_16p_183[108] = { 0, 84, 16328, 16032, 416, 1944, 1944, 416, 16032, 16328, 84, 0, }; -const uint16_t *wbscl_get_filter_3tap_16p(struct fixed31_32 ratio) +static const uint16_t *wbscl_get_filter_3tap_16p(struct fixed31_32 ratio) { if (ratio.value < dc_fixpt_one.value) return filter_3tap_16p_upscale; @@ -539,7 +539,7 @@ const uint16_t *wbscl_get_filter_3tap_16p(struct fixed31_32 ratio) return filter_3tap_16p_183; } -const uint16_t *wbscl_get_filter_4tap_16p(struct fixed31_32 ratio) +static const uint16_t *wbscl_get_filter_4tap_16p(struct fixed31_32 ratio) { if (ratio.value < dc_fixpt_one.value) return filter_4tap_16p_upscale; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c index 5adf42a7cc27..dc1752e9f461 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c @@ -192,9 +192,8 @@ void hubp2_vready_at_or_After_vsync(struct hubp *hubp, REG_UPDATE(DCHUBP_CNTL, HUBP_VREADY_AT_OR_AFTER_VSYNC, value); } -void hubp2_program_requestor( - struct hubp *hubp, - struct _vcs_dpi_display_rq_regs_st *rq_regs) +static void hubp2_program_requestor(struct hubp *hubp, + struct _vcs_dpi_display_rq_regs_st *rq_regs) { struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); @@ -930,6 +929,16 @@ bool hubp2_is_flip_pending(struct hubp *hubp) void hubp2_set_blank(struct hubp *hubp, bool blank) { + hubp2_set_blank_regs(hubp, blank); + + if (blank) { + hubp->mpcc_id = 0xf; + hubp->opp_id = OPP_ID_INVALID; + } +} + +void hubp2_set_blank_regs(struct hubp *hubp, bool blank) +{ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); uint32_t blank_en = blank ? 1 : 0; @@ -951,9 +960,6 @@ void hubp2_set_blank(struct hubp *hubp, bool blank) HUBP_NO_OUTSTANDING_REQ, 1, 1, 200); } - - hubp->mpcc_id = 0xf; - hubp->opp_id = OPP_ID_INVALID; } } @@ -1285,7 +1291,7 @@ void hubp2_read_state(struct hubp *hubp) } -void hubp2_validate_dml_output(struct hubp *hubp, +static void hubp2_validate_dml_output(struct hubp *hubp, struct dc_context *ctx, struct _vcs_dpi_display_rq_regs_st *dml_rq_regs, struct _vcs_dpi_display_dlg_regs_st *dml_dlg_attr, @@ -1603,6 +1609,7 @@ static struct hubp_funcs dcn20_hubp_funcs = { .hubp_setup_interdependent = hubp2_setup_interdependent, .hubp_set_vm_system_aperture_settings = hubp2_set_vm_system_aperture_settings, .set_blank = hubp2_set_blank, + .set_blank_regs = hubp2_set_blank_regs, .dcc_control = hubp2_dcc_control, .mem_program_viewport = min_set_viewport, .set_cursor_attributes = hubp2_cursor_set_attributes, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h index eea2254b15e4..9204c3ef323b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h @@ -330,6 +330,7 @@ void hubp2_program_surface_config( bool hubp2_is_flip_pending(struct hubp *hubp); void hubp2_set_blank(struct hubp *hubp, bool blank); +void hubp2_set_blank_regs(struct hubp *hubp, bool blank); void hubp2_cursor_set_position( struct hubp *hubp, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 4f88376a118f..a17fe8ab2904 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -615,6 +615,11 @@ void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx) pipe_ctx->pipe_idx); } +void dcn20_disable_pixel_data(struct dc *dc, struct pipe_ctx *pipe_ctx, bool blank) +{ + dcn20_blank_pixel_data(dc, pipe_ctx, blank); +} + static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream, int opp_cnt) { @@ -1080,10 +1085,8 @@ static void dcn20_power_on_plane( } } -void dcn20_enable_plane( - struct dc *dc, - struct pipe_ctx *pipe_ctx, - struct dc_state *context) +static void dcn20_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx, + struct dc_state *context) { //if (dc->debug.sanity_checks) { // dcn10_verify_allow_pstate_change_high(dc); @@ -1842,6 +1845,11 @@ void dcn20_optimize_bandwidth( dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, true); + if (dc->clk_mgr->dc_mode_softmax_enabled) + if (dc->clk_mgr->clks.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 && + context->bw_ctx.bw.dcn.clk.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000) + dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->dc_mode_softmax_memclk); + dc->clk_mgr->funcs->update_clocks( dc->clk_mgr, context, @@ -2270,7 +2278,7 @@ void dcn20_reset_hw_ctx_wrap( dcn20_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state); if (hws->funcs.enable_stream_gating) - hws->funcs.enable_stream_gating(dc, pipe_ctx); + hws->funcs.enable_stream_gating(dc, pipe_ctx_old); if (old_clk) old_clk->funcs->cs_power_down(old_clk); } diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h index 6bba191cd33e..33a36c02b2f8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h @@ -53,6 +53,10 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx); void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx, struct dc_link_settings *link_settings); void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn20_disable_pixel_data( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + bool blank); void dcn20_blank_pixel_data( struct dc *dc, struct pipe_ctx *pipe_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c index 5cfd4b0afea5..91e4885b743e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c @@ -27,6 +27,8 @@ #include "dcn10/dcn10_hw_sequencer.h" #include "dcn20_hwseq.h" +#include "dcn20_init.h" + static const struct hw_sequencer_funcs dcn20_funcs = { .program_gamut_remap = dcn10_program_gamut_remap, .init_hw = dcn10_init_hw, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c index 947eb0df3f12..15734db0cdea 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c @@ -400,10 +400,9 @@ static void mpc20_program_ogam_pwl( } -void apply_DEDCN20_305_wa( - struct mpc *mpc, - int mpcc_id, enum dc_lut_mode current_mode, - enum dc_lut_mode next_mode) +static void apply_DEDCN20_305_wa(struct mpc *mpc, int mpcc_id, + enum dc_lut_mode current_mode, + enum dc_lut_mode next_mode) { struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc); @@ -525,7 +524,7 @@ static void mpc2_init_mpcc(struct mpcc *mpcc, int mpcc_inst) mpcc->sm_cfg.enable = false; } -struct mpcc *mpc2_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id) +static struct mpcc *mpc2_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id) { struct mpcc *tmp_mpcc = tree->opp_list; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c index c90b8516dcc1..0340fdd3f5fb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c @@ -73,21 +73,6 @@ bool optc2_enable_crtc(struct timing_generator *optc) } /** - * DRR double buffering control to select buffer point - * for V_TOTAL, H_TOTAL, VTOTAL_MIN, VTOTAL_MAX, VTOTAL_MIN_SEL and VTOTAL_MAX_SEL registers - * Options: anytime, start of frame, dp start of frame (range timing) - */ -void optc2_set_timing_db_mode(struct timing_generator *optc, bool enable) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - uint32_t blank_data_double_buffer_enable = enable ? 1 : 0; - - REG_UPDATE(OTG_DOUBLE_BUFFER_CONTROL, - OTG_RANGE_TIMING_DBUF_UPDATE_MODE, blank_data_double_buffer_enable); -} - -/** *For the below, I'm not sure how your GSL parameters are stored in your env, * so I will assume a gsl_params struct for now */ @@ -110,30 +95,6 @@ void optc2_set_gsl(struct timing_generator *optc, } -/* Use the gsl allow flip as the master update lock */ -void optc2_use_gsl_as_master_update_lock(struct timing_generator *optc, - const struct gsl_params *params) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - REG_UPDATE(OTG_GSL_CONTROL, - OTG_MASTER_UPDATE_LOCK_GSL_EN, params->master_update_lock_gsl_en); -} - -/* You can control the GSL timing by limiting GSL to a window (X,Y) */ -void optc2_set_gsl_window(struct timing_generator *optc, - const struct gsl_params *params) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - REG_SET_2(OTG_GSL_WINDOW_X, 0, - OTG_GSL_WINDOW_START_X, params->gsl_window_start_x, - OTG_GSL_WINDOW_END_X, params->gsl_window_end_x); - REG_SET_2(OTG_GSL_WINDOW_Y, 0, - OTG_GSL_WINDOW_START_Y, params->gsl_window_start_y, - OTG_GSL_WINDOW_END_Y, params->gsl_window_end_y); -} - void optc2_set_gsl_source_select( struct timing_generator *optc, int group_idx, @@ -156,18 +117,6 @@ void optc2_set_gsl_source_select( } } -/* DSC encoder frame start controls: x = h position, line_num = # of lines from vstartup */ -void optc2_set_dsc_encoder_frame_start(struct timing_generator *optc, - int x_position, - int line_num) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - REG_SET_2(OTG_DSC_START_POSITION, 0, - OTG_DSC_START_POSITION_X, x_position, - OTG_DSC_START_POSITION_LINE_NUM, line_num); -} - /* Set DSC-related configuration. * dsc_mode: 0 disables DSC, other values enable DSC in specified format * sc_bytes_per_pixel: Bytes per pixel in u3.28 format @@ -190,6 +139,19 @@ void optc2_set_dsc_config(struct timing_generator *optc, OPTC_DSC_SLICE_WIDTH, dsc_slice_width); } +/* Get DSC-related configuration. + * dsc_mode: 0 disables DSC, other values enable DSC in specified format + */ +void optc2_get_dsc_status(struct timing_generator *optc, + uint32_t *dsc_mode) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_GET(OPTC_DATA_FORMAT_CONTROL, + OPTC_DSC_MODE, dsc_mode); +} + + /*TEMP: Need to figure out inheritance model here.*/ bool optc2_is_two_pixels_per_containter(const struct dc_crtc_timing *timing) { @@ -280,8 +242,8 @@ void optc2_get_optc_source(struct timing_generator *optc, *num_of_src_opp = 1; } -void optc2_set_dwb_source(struct timing_generator *optc, - uint32_t dwb_pipe_inst) +static void optc2_set_dwb_source(struct timing_generator *optc, + uint32_t dwb_pipe_inst) { struct optc *optc1 = DCN10TG_FROM_TG(optc); @@ -293,7 +255,7 @@ void optc2_set_dwb_source(struct timing_generator *optc, OPTC_DWB1_SOURCE_SELECT, optc->inst); } -void optc2_align_vblanks( +static void optc2_align_vblanks( struct timing_generator *optc_master, struct timing_generator *optc_slave, uint32_t master_pixel_clock_100Hz, @@ -579,6 +541,7 @@ static struct timing_generator_funcs dcn20_tg_funcs = { .get_crc = optc1_get_crc, .configure_crc = optc2_configure_crc, .set_dsc_config = optc2_set_dsc_config, + .get_dsc_status = optc2_get_dsc_status, .set_dwb_source = optc2_set_dwb_source, .set_odm_bypass = optc2_set_odm_bypass, .set_odm_combine = optc2_set_odm_combine, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h index be19a6885fbf..f7968b9ca16e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h @@ -98,6 +98,9 @@ void optc2_set_dsc_config(struct timing_generator *optc, uint32_t dsc_bytes_per_pixel, uint32_t dsc_slice_width); +void optc2_get_dsc_status(struct timing_generator *optc, + uint32_t *dsc_mode); + void optc2_set_odm_bypass(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 3883f918b3bb..40b122a708ef 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -3796,6 +3796,8 @@ static bool dcn20_resource_construct( dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; + dc->caps.hdmi_frl_pcon_support = true; + if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) { dc->debug = debug_defaults_drv; } else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c index aab25ca8343a..8a70f92795c2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c @@ -593,6 +593,8 @@ static const struct stream_encoder_funcs dcn20_str_enc_funcs = { enc1_stream_encoder_send_immediate_sdp_message, .stop_dp_info_packets = enc1_stream_encoder_stop_dp_info_packets, + .reset_fifo = + enc1_stream_encoder_reset_fifo, .dp_blank = enc1_stream_encoder_dp_blank, .dp_unblank = diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dccg.c index f5bf04f7da25..9a3402148fde 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dccg.c @@ -44,7 +44,8 @@ #define DC_LOGGER \ dccg->ctx->logger -void dccg201_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk) +static void dccg201_update_dpp_dto(struct dccg *dccg, int dpp_inst, + int req_dppclk) { /* vbios handles it */ } diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c index 6b6f74d4afd1..35dd4bac242a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c @@ -55,7 +55,7 @@ static void hubp201_program_surface_config( hubp1_program_pixel_format(hubp, format); } -void hubp201_program_deadline( +static void hubp201_program_deadline( struct hubp *hubp, struct _vcs_dpi_display_dlg_regs_st *dlg_attr, struct _vcs_dpi_display_ttu_regs_st *ttu_attr) @@ -63,9 +63,8 @@ void hubp201_program_deadline( hubp1_program_deadline(hubp, dlg_attr, ttu_attr); } -void hubp201_program_requestor( - struct hubp *hubp, - struct _vcs_dpi_display_rq_regs_st *rq_regs) +static void hubp201_program_requestor(struct hubp *hubp, + struct _vcs_dpi_display_rq_regs_st *rq_regs) { struct dcn201_hubp *hubp201 = TO_DCN201_HUBP(hubp); diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c index a65e8f7801db..7f9ec59ef443 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c @@ -50,8 +50,8 @@ #define IND_REG(index) \ (enc10->link_regs->index) -void dcn201_link_encoder_get_max_link_cap(struct link_encoder *enc, - struct dc_link_settings *link_settings) +static void dcn201_link_encoder_get_max_link_cap(struct link_encoder *enc, + struct dc_link_settings *link_settings) { uint32_t value1, value2; struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); @@ -66,7 +66,7 @@ void dcn201_link_encoder_get_max_link_cap(struct link_encoder *enc, } } -bool dcn201_link_encoder_is_in_alt_mode(struct link_encoder *enc) +static bool dcn201_link_encoder_is_in_alt_mode(struct link_encoder *enc) { uint32_t value; struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c index 0fa381088d1d..d6acf9a8590a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c @@ -672,9 +672,8 @@ static struct output_pixel_processor *dcn201_opp_create( return &opp->base; } -struct dce_aux *dcn201_aux_engine_create( - struct dc_context *ctx, - uint32_t inst) +static struct dce_aux *dcn201_aux_engine_create(struct dc_context *ctx, + uint32_t inst) { struct aux_engine_dce110 *aux_engine = kzalloc(sizeof(struct aux_engine_dce110), GFP_ATOMIC); @@ -706,9 +705,8 @@ static const struct dce_i2c_mask i2c_masks = { I2C_COMMON_MASK_SH_LIST_DCN2(_MASK) }; -struct dce_i2c_hw *dcn201_i2c_hw_create( - struct dc_context *ctx, - uint32_t inst) +static struct dce_i2c_hw *dcn201_i2c_hw_create(struct dc_context *ctx, + uint32_t inst) { struct dce_i2c_hw *dce_i2c_hw = kzalloc(sizeof(struct dce_i2c_hw), GFP_ATOMIC); @@ -789,7 +787,7 @@ static const struct encoder_feature_support link_enc_feature = { .flags.bits.IS_TPS4_CAPABLE = true }; -struct link_encoder *dcn201_link_encoder_create( +static struct link_encoder *dcn201_link_encoder_create( const struct encoder_init_data *enc_init_data) { struct dcn20_link_encoder *enc20 = @@ -811,7 +809,7 @@ struct link_encoder *dcn201_link_encoder_create( return &enc10->base; } -struct clock_source *dcn201_clock_source_create( +static struct clock_source *dcn201_clock_source_create( struct dc_context *ctx, struct dc_bios *bios, enum clock_source_id id, @@ -906,7 +904,7 @@ static const struct resource_create_funcs res_create_maximus_funcs = { .create_hwseq = dcn201_hwseq_create, }; -void dcn201_clock_source_destroy(struct clock_source **clk_src) +static void dcn201_clock_source_destroy(struct clock_source **clk_src) { kfree(TO_DCE110_CLK_SRC(*clk_src)); *clk_src = NULL; diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c index 36044cb8ec83..c5e200d09038 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c @@ -680,7 +680,7 @@ void hubbub21_wm_read_state(struct hubbub *hubbub, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, &s->dram_clk_chanage); } -void hubbub21_apply_DEDCN21_147_wa(struct hubbub *hubbub) +static void hubbub21_apply_DEDCN21_147_wa(struct hubbub *hubbub) { struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); uint32_t prog_wm_value; diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c index 3de1bcf9b3d8..58e459c7e7d3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c @@ -183,7 +183,7 @@ static void hubp21_setup( } -void hubp21_set_viewport( +static void hubp21_set_viewport( struct hubp *hubp, const struct rect *viewport, const struct rect *viewport_c) @@ -225,8 +225,8 @@ void hubp21_set_viewport( SEC_VIEWPORT_Y_START_C, viewport_c->y); } -void hubp21_set_vm_system_aperture_settings(struct hubp *hubp, - struct vm_system_aperture_param *apt) +static void hubp21_set_vm_system_aperture_settings(struct hubp *hubp, + struct vm_system_aperture_param *apt) { struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp); @@ -248,7 +248,7 @@ void hubp21_set_vm_system_aperture_settings(struct hubp *hubp, SYSTEM_ACCESS_MODE, 0x3); } -void hubp21_validate_dml_output(struct hubp *hubp, +static void hubp21_validate_dml_output(struct hubp *hubp, struct dc_context *ctx, struct _vcs_dpi_display_rq_regs_st *dml_rq_regs, struct _vcs_dpi_display_dlg_regs_st *dml_dlg_attr, @@ -664,7 +664,8 @@ static void program_surface_flip_and_addr(struct hubp *hubp, struct surface_flip flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS); } -void dmcub_PLAT_54186_wa(struct hubp *hubp, struct surface_flip_registers *flip_regs) +static void dmcub_PLAT_54186_wa(struct hubp *hubp, + struct surface_flip_registers *flip_regs) { struct dc_dmub_srv *dmcub = hubp->ctx->dmub_srv; struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp); @@ -697,7 +698,7 @@ void dmcub_PLAT_54186_wa(struct hubp *hubp, struct surface_flip_registers *flip_ PERF_TRACE(); // TODO: remove after performance is stable. } -bool hubp21_program_surface_flip_and_addr( +static bool hubp21_program_surface_flip_and_addr( struct hubp *hubp, const struct dc_plane_address *address, bool flip_immediate) @@ -805,7 +806,7 @@ bool hubp21_program_surface_flip_and_addr( return true; } -void hubp21_init(struct hubp *hubp) +static void hubp21_init(struct hubp *hubp) { // DEDCN21-133: Inconsistent row starting line for flip between DPTE and Meta // This is a chicken bit to enable the ECO fix. diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c index 54c11ba550ae..b270f0b194dc 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c @@ -28,6 +28,8 @@ #include "dcn20/dcn20_hwseq.h" #include "dcn21_hwseq.h" +#include "dcn21_init.h" + static const struct hw_sequencer_funcs dcn21_funcs = { .program_gamut_remap = dcn10_program_gamut_remap, .init_hw = dcn10_init_hw, diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c index aa46c35b05a2..0a1ba6e7081c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c @@ -203,7 +203,7 @@ static bool update_cfg_data( return true; } -bool dcn21_link_encoder_acquire_phy(struct link_encoder *enc) +static bool dcn21_link_encoder_acquire_phy(struct link_encoder *enc) { struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); int value; @@ -277,7 +277,7 @@ void dcn21_link_encoder_enable_dp_output( } -void dcn21_link_encoder_enable_dp_mst_output( +static void dcn21_link_encoder_enable_dp_mst_output( struct link_encoder *enc, const struct dc_link_settings *link_settings, enum clock_source_id clock_source) @@ -288,9 +288,8 @@ void dcn21_link_encoder_enable_dp_mst_output( dcn10_link_encoder_enable_dp_mst_output(enc, link_settings, clock_source); } -void dcn21_link_encoder_disable_output( - struct link_encoder *enc, - enum signal_type signal) +static void dcn21_link_encoder_disable_output(struct link_encoder *enc, + enum signal_type signal) { dcn10_link_encoder_disable_output(enc, signal); diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index d452a0d1777e..ca1bbc942fd4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -784,9 +784,8 @@ static const struct dce_i2c_mask i2c_masks = { I2C_COMMON_MASK_SH_LIST_DCN2(_MASK) }; -struct dce_i2c_hw *dcn21_i2c_hw_create( - struct dc_context *ctx, - uint32_t inst) +static struct dce_i2c_hw *dcn21_i2c_hw_create(struct dc_context *ctx, + uint32_t inst) { struct dce_i2c_hw *dce_i2c_hw = kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); @@ -1093,7 +1092,7 @@ static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s } } -void dcn21_calculate_wm( +static void dcn21_calculate_wm( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, int *out_pipe_cnt, @@ -1390,7 +1389,7 @@ validate_out: * with DC_FP_START()/DC_FP_END(). Use the same approach as for * dcn20_validate_bandwidth in dcn20_resource.c. */ -bool dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context, +static bool dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate) { bool voltage_supported; @@ -1480,8 +1479,8 @@ static struct hubbub *dcn21_hubbub_create(struct dc_context *ctx) return &hubbub->base; } -struct output_pixel_processor *dcn21_opp_create( - struct dc_context *ctx, uint32_t inst) +static struct output_pixel_processor *dcn21_opp_create(struct dc_context *ctx, + uint32_t inst) { struct dcn20_opp *opp = kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL); @@ -1496,9 +1495,8 @@ struct output_pixel_processor *dcn21_opp_create( return &opp->base; } -struct timing_generator *dcn21_timing_generator_create( - struct dc_context *ctx, - uint32_t instance) +static struct timing_generator *dcn21_timing_generator_create(struct dc_context *ctx, + uint32_t instance) { struct optc *tgn10 = kzalloc(sizeof(struct optc), GFP_KERNEL); @@ -1518,7 +1516,7 @@ struct timing_generator *dcn21_timing_generator_create( return &tgn10->base; } -struct mpc *dcn21_mpc_create(struct dc_context *ctx) +static struct mpc *dcn21_mpc_create(struct dc_context *ctx) { struct dcn20_mpc *mpc20 = kzalloc(sizeof(struct dcn20_mpc), GFP_KERNEL); @@ -1545,8 +1543,8 @@ static void read_dce_straps( } -struct display_stream_compressor *dcn21_dsc_create( - struct dc_context *ctx, uint32_t inst) +static struct display_stream_compressor *dcn21_dsc_create(struct dc_context *ctx, + uint32_t inst) { struct dcn20_dsc *dsc = kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL); @@ -1683,9 +1681,8 @@ static struct dc_cap_funcs cap_funcs = { .get_dcc_compression_cap = dcn20_get_dcc_compression_cap }; -struct stream_encoder *dcn21_stream_encoder_create( - enum engine_id eng_id, - struct dc_context *ctx) +static struct stream_encoder *dcn21_stream_encoder_create(enum engine_id eng_id, + struct dc_context *ctx) { struct dcn10_stream_encoder *enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); @@ -1917,7 +1914,7 @@ static int dcn21_populate_dml_pipes_from_context( return pipe_cnt; } -enum dc_status dcn21_patch_unknown_plane_state(struct dc_plane_state *plane_state) +static enum dc_status dcn21_patch_unknown_plane_state(struct dc_plane_state *plane_state) { enum dc_status result = DC_OK; @@ -2028,6 +2025,8 @@ static bool dcn21_resource_construct( dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; + dc->caps.hdmi_frl_pcon_support = true; + if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c index ebd9c35c914f..8daa12730bc1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c @@ -50,22 +50,6 @@ enc1->base.ctx -void convert_dc_info_packet_to_128( - const struct dc_info_packet *info_packet, - struct dc_info_packet_128 *info_packet_128) -{ - unsigned int i; - - info_packet_128->hb0 = info_packet->hb0; - info_packet_128->hb1 = info_packet->hb1; - info_packet_128->hb2 = info_packet->hb2; - info_packet_128->hb3 = info_packet->hb3; - - for (i = 0; i < 32; i++) { - info_packet_128->sb[i] = info_packet->sb[i]; - } - -} static void enc3_update_hdmi_info_packet( struct dcn10_stream_encoder *enc1, uint32_t packet_index, @@ -489,7 +473,7 @@ static void enc3_dp_set_odm_combine( } /* setup stream encoder in dvi mode */ -void enc3_stream_encoder_dvi_set_stream_attribute( +static void enc3_stream_encoder_dvi_set_stream_attribute( struct stream_encoder *enc, struct dc_crtc_timing *crtc_timing, bool is_dual_link) @@ -805,6 +789,8 @@ static const struct stream_encoder_funcs dcn30_str_enc_funcs = { enc3_stream_encoder_update_dp_info_packets, .stop_dp_info_packets = enc1_stream_encoder_stop_dp_info_packets, + .reset_fifo = + enc1_stream_encoder_reset_fifo, .dp_blank = enc1_stream_encoder_dp_blank, .dp_unblank = diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c index c1d967ed6551..ab3918c0a15b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c @@ -41,8 +41,7 @@ dpp->tf_shift->field_name, dpp->tf_mask->field_name -void dpp30_read_state(struct dpp *dpp_base, - struct dcn_dpp_state *s) +static void dpp30_read_state(struct dpp *dpp_base, struct dcn_dpp_state *s) { struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); @@ -373,7 +372,7 @@ void dpp3_set_cursor_attributes( } -bool dpp3_get_optimal_number_of_taps( +static bool dpp3_get_optimal_number_of_taps( struct dpp *dpp, struct scaler_data *scl_data, const struct scaling_taps *in_taps) @@ -474,22 +473,7 @@ bool dpp3_get_optimal_number_of_taps( return true; } -void dpp3_cnv_set_bias_scale( - struct dpp *dpp_base, - struct dc_bias_and_scale *bias_and_scale) -{ - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - REG_UPDATE(FCNV_FP_BIAS_R, FCNV_FP_BIAS_R, bias_and_scale->bias_red); - REG_UPDATE(FCNV_FP_BIAS_G, FCNV_FP_BIAS_G, bias_and_scale->bias_green); - REG_UPDATE(FCNV_FP_BIAS_B, FCNV_FP_BIAS_B, bias_and_scale->bias_blue); - REG_UPDATE(FCNV_FP_SCALE_R, FCNV_FP_SCALE_R, bias_and_scale->scale_red); - REG_UPDATE(FCNV_FP_SCALE_G, FCNV_FP_SCALE_G, bias_and_scale->scale_green); - REG_UPDATE(FCNV_FP_SCALE_B, FCNV_FP_SCALE_B, bias_and_scale->scale_blue); -} - -void dpp3_deferred_update( - struct dpp *dpp_base) +static void dpp3_deferred_update(struct dpp *dpp_base) { int bypass_state; struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); @@ -751,8 +735,8 @@ static enum dc_lut_mode dpp3_get_blndgam_current(struct dpp *dpp_base) return mode; } -bool dpp3_program_blnd_lut( - struct dpp *dpp_base, const struct pwl_params *params) +static bool dpp3_program_blnd_lut(struct dpp *dpp_base, + const struct pwl_params *params) { enum dc_lut_mode current_mode; enum dc_lut_mode next_mode; @@ -1164,9 +1148,8 @@ static void dpp3_program_shaper_lutb_settings( } -bool dpp3_program_shaper( - struct dpp *dpp_base, - const struct pwl_params *params) +static bool dpp3_program_shaper(struct dpp *dpp_base, + const struct pwl_params *params) { enum dc_lut_mode current_mode; enum dc_lut_mode next_mode; @@ -1355,9 +1338,8 @@ static void dpp3_select_3dlut_ram_mask( REG_SET(CM_3DLUT_INDEX, 0, CM_3DLUT_INDEX, 0); } -bool dpp3_program_3dlut( - struct dpp *dpp_base, - struct tetrahedral_params *params) +static bool dpp3_program_3dlut(struct dpp *dpp_base, + struct tetrahedral_params *params) { enum dc_lut_mode mode; bool is_17x17x17; diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c index eac08926b574..6a4dcafb9bba 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c @@ -490,6 +490,7 @@ static struct hubp_funcs dcn30_hubp_funcs = { .hubp_setup_interdependent = hubp2_setup_interdependent, .hubp_set_vm_system_aperture_settings = hubp3_set_vm_system_aperture_settings, .set_blank = hubp2_set_blank, + .set_blank_regs = hubp2_set_blank_regs, .dcc_control = hubp3_dcc_control, .mem_program_viewport = min_set_viewport, .set_cursor_attributes = hubp2_cursor_set_attributes, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c index df2717116604..1db1ca19411d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c @@ -344,6 +344,17 @@ void dcn30_enable_writeback( dwb->funcs->enable(dwb, &wb_info->dwb_params); } +void dcn30_prepare_bandwidth(struct dc *dc, + struct dc_state *context) +{ + if (dc->clk_mgr->dc_mode_softmax_enabled) + if (dc->clk_mgr->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 && + context->bw_ctx.bw.dcn.clk.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000) + dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz); + + dcn20_prepare_bandwidth(dc, context); +} + void dcn30_disable_writeback( struct dc *dc, unsigned int dwb_pipe_inst) @@ -437,7 +448,7 @@ void dcn30_init_hw(struct dc *dc) struct dce_hwseq *hws = dc->hwseq; struct dc_bios *dcb = dc->ctx->dc_bios; struct resource_pool *res_pool = dc->res_pool; - int i, j; + int i; int edp_num; uint32_t backlight = MAX_BACKLIGHT_LEVEL; @@ -534,41 +545,8 @@ void dcn30_init_hw(struct dc *dc) hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false); /* we want to turn off all dp displays before doing detection */ - if (dc->config.power_down_display_on_boot) { - uint8_t dpcd_power_state = '\0'; - enum dc_status status = DC_ERROR_UNEXPECTED; - - for (i = 0; i < dc->link_count; i++) { - if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) - continue; - /* DP 2.0 states that LTTPR regs must be read first */ - dp_retrieve_lttpr_cap(dc->links[i]); - - /* if any of the displays are lit up turn them off */ - status = core_link_read_dpcd(dc->links[i], DP_SET_POWER, - &dpcd_power_state, sizeof(dpcd_power_state)); - if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) { - /* blank dp stream before power off receiver*/ - if (dc->links[i]->link_enc->funcs->get_dig_frontend) { - unsigned int fe; - - fe = dc->links[i]->link_enc->funcs->get_dig_frontend( - dc->links[i]->link_enc); - if (fe == ENGINE_ID_UNKNOWN) - continue; - - for (j = 0; j < dc->res_pool->stream_enc_count; j++) { - if (fe == dc->res_pool->stream_enc[j]->id) { - dc->res_pool->stream_enc[j]->funcs->dp_blank(dc->links[i], - dc->res_pool->stream_enc[j]); - break; - } - } - } - dp_receiver_power_ctrl(dc->links[i], false); - } - } - } + if (dc->config.power_down_display_on_boot) + dc_link_blank_all_dp_displays(dc); /* If taking control over from VBIOS, we may want to optimize our first * mode set, so we need to skip powering down pipes until we know which diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h index e9a0005288d3..73e7b690e82c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h @@ -27,7 +27,7 @@ #define __DC_HWSS_DCN30_H__ #include "hw_sequencer_private.h" - +#include "dcn20/dcn20_hwseq.h" struct dc; void dcn30_init_hw(struct dc *dc); @@ -47,6 +47,9 @@ void dcn30_disable_writeback( struct dc *dc, unsigned int dwb_pipe_inst); +void dcn30_prepare_bandwidth(struct dc *dc, + struct dc_state *context); + bool dcn30_mmhubbub_warmup( struct dc *dc, unsigned int num_dwb, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c index 93f32a312fee..bb347319de83 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c @@ -29,6 +29,8 @@ #include "dcn21/dcn21_hwseq.h" #include "dcn30_hwseq.h" +#include "dcn30_init.h" + static const struct hw_sequencer_funcs dcn30_funcs = { .program_gamut_remap = dcn10_program_gamut_remap, .init_hw = dcn30_init_hw, @@ -53,6 +55,7 @@ static const struct hw_sequencer_funcs dcn30_funcs = { .enable_audio_stream = dce110_enable_audio_stream, .disable_audio_stream = dce110_disable_audio_stream, .disable_plane = dcn20_disable_plane, + .disable_pixel_data = dcn20_disable_pixel_data, .pipe_control_lock = dcn20_pipe_control_lock, .interdependent_update_lock = dcn10_lock_all_pipes, .cursor_lock = dcn10_cursor_lock, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.c index 1c4b171c68ad..7a93eff183d9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.c @@ -100,7 +100,7 @@ static void mmhubbub3_warmup_mcif(struct mcif_wb *mcif_wb, REG_UPDATE(MMHUBBUB_WARMUP_CONTROL_STATUS, MMHUBBUB_WARMUP_EN, false); } -void mmhubbub3_config_mcif_buf(struct mcif_wb *mcif_wb, +static void mmhubbub3_config_mcif_buf(struct mcif_wb *mcif_wb, struct mcif_buf_params *params, unsigned int dest_height) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c index 95149734378b..0ce0d6165f43 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c @@ -1362,7 +1362,7 @@ uint32_t mpcc3_acquire_rmu(struct mpc *mpc, int mpcc_id, int rmu_idx) return -1; } -int mpcc3_release_rmu(struct mpc *mpc, int mpcc_id) +static int mpcc3_release_rmu(struct mpc *mpc, int mpcc_id) { struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); int rmu_idx; diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c index 5d9e6413d67a..f5e8916601d3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c @@ -332,6 +332,7 @@ static struct timing_generator_funcs dcn30_tg_funcs = { .get_crc = optc1_get_crc, .configure_crc = optc2_configure_crc, .set_dsc_config = optc3_set_dsc_config, + .get_dsc_status = optc2_get_dsc_status, .set_dwb_source = NULL, .set_odm_bypass = optc3_set_odm_bypass, .set_odm_combine = optc3_set_odm_combine, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c index 79a66e0c4303..369ceeeddc7e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c @@ -816,7 +816,7 @@ static const struct dc_plane_cap plane_cap = { .argb8888 = true, .nv12 = true, .fp16 = true, - .p010 = false, + .p010 = true, .ayuv = false, }, @@ -875,7 +875,7 @@ static const struct dc_debug_options debug_defaults_diags = { .use_max_lb = true }; -void dcn30_dpp_destroy(struct dpp **dpp) +static void dcn30_dpp_destroy(struct dpp **dpp) { kfree(TO_DCN20_DPP(*dpp)); *dpp = NULL; @@ -992,7 +992,7 @@ static struct mpc *dcn30_mpc_create( return &mpc30->base; } -struct hubbub *dcn30_hubbub_create(struct dc_context *ctx) +static struct hubbub *dcn30_hubbub_create(struct dc_context *ctx) { int i; @@ -1143,9 +1143,8 @@ static struct afmt *dcn30_afmt_create( return &afmt3->base; } -struct stream_encoder *dcn30_stream_encoder_create( - enum engine_id eng_id, - struct dc_context *ctx) +static struct stream_encoder *dcn30_stream_encoder_create(enum engine_id eng_id, + struct dc_context *ctx) { struct dcn10_stream_encoder *enc1; struct vpg *vpg; @@ -1179,8 +1178,7 @@ struct stream_encoder *dcn30_stream_encoder_create( return &enc1->base; } -struct dce_hwseq *dcn30_hwseq_create( - struct dc_context *ctx) +static struct dce_hwseq *dcn30_hwseq_create(struct dc_context *ctx) { struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); @@ -1763,6 +1761,17 @@ static bool dcn30_split_stream_for_mpc_or_odm( int pipe_idx = sec_pipe->pipe_idx; const struct resource_pool *pool = dc->res_pool; + if (pri_pipe->plane_state) { + /* ODM + window MPO, where MPO window is on left half only */ + if (pri_pipe->plane_state->clip_rect.x + pri_pipe->plane_state->clip_rect.width <= + pri_pipe->stream->src.x + pri_pipe->stream->src.width/2) + return true; + + /* ODM + window MPO, where MPO window is on right half only */ + if (pri_pipe->plane_state->clip_rect.x >= pri_pipe->stream->src.width/2) + return true; + } + *sec_pipe = *pri_pipe; sec_pipe->pipe_idx = pipe_idx; @@ -2639,6 +2648,8 @@ static bool dcn30_resource_construct( dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; + dc->caps.hdmi_frl_pcon_support = true; + /* read VBIOS LTTPR caps */ { if (ctx->dc_bios->funcs->get_lttpr_caps) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c index e85b695f2351..3d42a1a337ec 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c @@ -30,6 +30,8 @@ #include "dcn30/dcn30_hwseq.h" #include "dcn301_hwseq.h" +#include "dcn301_init.h" + static const struct hw_sequencer_funcs dcn301_funcs = { .program_gamut_remap = dcn10_program_gamut_remap, .init_hw = dcn10_init_hw, diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c index 736bda30abc3..ad0df1a72a90 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c @@ -93,7 +93,7 @@ static unsigned int dcn301_get_16_bit_backlight_from_pwm(struct panel_cntl *pane return (uint32_t)(current_backlight); } -uint32_t dcn301_panel_cntl_hw_init(struct panel_cntl *panel_cntl) +static uint32_t dcn301_panel_cntl_hw_init(struct panel_cntl *panel_cntl) { struct dcn301_panel_cntl *dcn301_panel_cntl = TO_DCN301_PANEL_CNTL(panel_cntl); uint32_t value; @@ -147,7 +147,7 @@ uint32_t dcn301_panel_cntl_hw_init(struct panel_cntl *panel_cntl) return current_backlight; } -void dcn301_panel_cntl_destroy(struct panel_cntl **panel_cntl) +static void dcn301_panel_cntl_destroy(struct panel_cntl **panel_cntl) { struct dcn301_panel_cntl *dcn301_panel_cntl = TO_DCN301_PANEL_CNTL(*panel_cntl); @@ -155,7 +155,7 @@ void dcn301_panel_cntl_destroy(struct panel_cntl **panel_cntl) *panel_cntl = NULL; } -bool dcn301_is_panel_backlight_on(struct panel_cntl *panel_cntl) +static bool dcn301_is_panel_backlight_on(struct panel_cntl *panel_cntl) { struct dcn301_panel_cntl *dcn301_panel_cntl = TO_DCN301_PANEL_CNTL(panel_cntl); uint32_t value; @@ -165,7 +165,7 @@ bool dcn301_is_panel_backlight_on(struct panel_cntl *panel_cntl) return value; } -bool dcn301_is_panel_powered_on(struct panel_cntl *panel_cntl) +static bool dcn301_is_panel_powered_on(struct panel_cntl *panel_cntl) { struct dcn301_panel_cntl *dcn301_panel_cntl = TO_DCN301_PANEL_CNTL(panel_cntl); uint32_t pwr_seq_state, dig_on, dig_on_ovrd; @@ -177,7 +177,7 @@ bool dcn301_is_panel_powered_on(struct panel_cntl *panel_cntl) return (pwr_seq_state == 1) || (dig_on == 1 && dig_on_ovrd == 1); } -void dcn301_store_backlight_level(struct panel_cntl *panel_cntl) +static void dcn301_store_backlight_level(struct panel_cntl *panel_cntl) { struct dcn301_panel_cntl *dcn301_panel_cntl = TO_DCN301_PANEL_CNTL(panel_cntl); diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c index fbaa03f26d8b..b4001233867c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c @@ -656,7 +656,7 @@ static const struct dc_plane_cap plane_cap = { .argb8888 = true, .nv12 = true, .fp16 = true, - .p010 = false, + .p010 = true, .ayuv = false, }, @@ -686,7 +686,7 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_clock_gate = true, .disable_pplib_clock_request = true, .disable_pplib_wm_range = true, - .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, + .pipe_split_policy = MPC_SPLIT_AVOID, .force_single_disp_pipe_split = false, .disable_dcc = DCC_ENABLE, .vsr_support = true, @@ -717,15 +717,13 @@ static const struct dc_debug_options debug_defaults_diags = { .use_max_lb = false, }; -void dcn301_dpp_destroy(struct dpp **dpp) +static void dcn301_dpp_destroy(struct dpp **dpp) { kfree(TO_DCN20_DPP(*dpp)); *dpp = NULL; } -struct dpp *dcn301_dpp_create( - struct dc_context *ctx, - uint32_t inst) +static struct dpp *dcn301_dpp_create(struct dc_context *ctx, uint32_t inst) { struct dcn3_dpp *dpp = kzalloc(sizeof(struct dcn3_dpp), GFP_KERNEL); @@ -741,8 +739,8 @@ struct dpp *dcn301_dpp_create( kfree(dpp); return NULL; } -struct output_pixel_processor *dcn301_opp_create( - struct dc_context *ctx, uint32_t inst) +static struct output_pixel_processor *dcn301_opp_create(struct dc_context *ctx, + uint32_t inst) { struct dcn20_opp *opp = kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL); @@ -757,9 +755,7 @@ struct output_pixel_processor *dcn301_opp_create( return &opp->base; } -struct dce_aux *dcn301_aux_engine_create( - struct dc_context *ctx, - uint32_t inst) +static struct dce_aux *dcn301_aux_engine_create(struct dc_context *ctx, uint32_t inst) { struct aux_engine_dce110 *aux_engine = kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); @@ -793,9 +789,7 @@ static const struct dce_i2c_mask i2c_masks = { I2C_COMMON_MASK_SH_LIST_DCN2(_MASK) }; -struct dce_i2c_hw *dcn301_i2c_hw_create( - struct dc_context *ctx, - uint32_t inst) +static struct dce_i2c_hw *dcn301_i2c_hw_create(struct dc_context *ctx, uint32_t inst) { struct dce_i2c_hw *dce_i2c_hw = kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); @@ -829,7 +823,7 @@ static struct mpc *dcn301_mpc_create( return &mpc30->base; } -struct hubbub *dcn301_hubbub_create(struct dc_context *ctx) +static struct hubbub *dcn301_hubbub_create(struct dc_context *ctx) { int i; @@ -860,9 +854,8 @@ struct hubbub *dcn301_hubbub_create(struct dc_context *ctx) return &hubbub3->base; } -struct timing_generator *dcn301_timing_generator_create( - struct dc_context *ctx, - uint32_t instance) +static struct timing_generator *dcn301_timing_generator_create( + struct dc_context *ctx, uint32_t instance) { struct optc *tgn10 = kzalloc(sizeof(struct optc), GFP_KERNEL); @@ -894,7 +887,7 @@ static const struct encoder_feature_support link_enc_feature = { .flags.bits.IS_TPS4_CAPABLE = true }; -struct link_encoder *dcn301_link_encoder_create( +static struct link_encoder *dcn301_link_encoder_create( const struct encoder_init_data *enc_init_data) { struct dcn20_link_encoder *enc20 = @@ -915,7 +908,7 @@ struct link_encoder *dcn301_link_encoder_create( return &enc20->enc10.base; } -struct panel_cntl *dcn301_panel_cntl_create(const struct panel_cntl_init_data *init_data) +static struct panel_cntl *dcn301_panel_cntl_create(const struct panel_cntl_init_data *init_data) { struct dcn301_panel_cntl *panel_cntl = kzalloc(sizeof(struct dcn301_panel_cntl), GFP_KERNEL); @@ -997,9 +990,8 @@ static struct afmt *dcn301_afmt_create( return &afmt3->base; } -struct stream_encoder *dcn301_stream_encoder_create( - enum engine_id eng_id, - struct dc_context *ctx) +static struct stream_encoder *dcn301_stream_encoder_create(enum engine_id eng_id, + struct dc_context *ctx) { struct dcn10_stream_encoder *enc1; struct vpg *vpg; @@ -1033,8 +1025,7 @@ struct stream_encoder *dcn301_stream_encoder_create( return &enc1->base; } -struct dce_hwseq *dcn301_hwseq_create( - struct dc_context *ctx) +static struct dce_hwseq *dcn301_hwseq_create(struct dc_context *ctx) { struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); @@ -1182,9 +1173,7 @@ static void dcn301_destruct(struct dcn301_resource_pool *pool) dcn_dccg_destroy(&pool->base.dccg); } -struct hubp *dcn301_hubp_create( - struct dc_context *ctx, - uint32_t inst) +static struct hubp *dcn301_hubp_create(struct dc_context *ctx, uint32_t inst) { struct dcn20_hubp *hubp2 = kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL); @@ -1201,7 +1190,7 @@ struct hubp *dcn301_hubp_create( return NULL; } -bool dcn301_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) +static bool dcn301_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) { int i; uint32_t pipe_count = pool->res_cap->num_dwb; @@ -1226,7 +1215,7 @@ bool dcn301_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) return true; } -bool dcn301_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) +static bool dcn301_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) { int i; uint32_t pipe_count = pool->res_cap->num_dwb; @@ -1449,9 +1438,7 @@ static bool dcn301_resource_construct( dc->caps.post_blend_color_processing = true; dc->caps.force_dp_tps4_for_cp2520 = true; dc->caps.extended_aux_timeout_support = true; -#ifdef CONFIG_DRM_AMD_DC_DMUB dc->caps.dmcub_support = true; -#endif /* Color pipeline capabilities */ dc->caps.color.dpp.dcn_arch = 1; @@ -1487,6 +1474,23 @@ static bool dcn301_resource_construct( dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; + /* read VBIOS LTTPR caps */ + if (ctx->dc_bios->funcs->get_lttpr_caps) { + enum bp_result bp_query_result; + uint8_t is_vbios_lttpr_enable = 0; + + bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable); + dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable; + } + + if (ctx->dc_bios->funcs->get_lttpr_interop) { + enum bp_result bp_query_result; + uint8_t is_vbios_interop_enabled = 0; + + bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios, &is_vbios_interop_enabled); + dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled; + } + if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.c index d88b9011c502..eb375f30f5bc 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.c @@ -29,6 +29,8 @@ #include "dc.h" +#include "dcn302_init.h" + void dcn302_hw_sequencer_construct(struct dc *dc) { dcn30_hw_sequencer_construct(dc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c index fcf96cf08c76..003e95368672 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c @@ -276,7 +276,7 @@ static const struct dc_plane_cap plane_cap = { .argb8888 = true, .nv12 = true, .fp16 = true, - .p010 = false, + .p010 = true, .ayuv = false, }, .max_upscale_factor = { @@ -1557,6 +1557,24 @@ static bool dcn302_resource_construct( dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; + /* read VBIOS LTTPR caps */ + if (ctx->dc_bios->funcs->get_lttpr_caps) { + enum bp_result bp_query_result; + uint8_t is_vbios_lttpr_enable = 0; + + bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable); + dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable; + } + + if (ctx->dc_bios->funcs->get_lttpr_interop) { + enum bp_result bp_query_result; + uint8_t is_vbios_interop_enabled = 0; + + bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios, + &is_vbios_interop_enabled); + dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled; + } + if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; else diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c index aa5dbbade2bd..f499f8ab5e47 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c @@ -9,6 +9,8 @@ #include "dcn30/dcn30_init.h" #include "dc.h" +#include "dcn303_init.h" + void dcn303_hw_sequencer_construct(struct dc *dc) { dcn30_hw_sequencer_construct(dc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c index 4a9b64023675..01ba9d656c72 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c @@ -254,7 +254,7 @@ static const struct dc_plane_cap plane_cap = { .argb8888 = true, .nv12 = true, .fp16 = true, - .p010 = false, + .p010 = true, .ayuv = false, }, .max_upscale_factor = { @@ -1500,6 +1500,23 @@ static bool dcn303_resource_construct( dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; + /* read VBIOS LTTPR caps */ + if (ctx->dc_bios->funcs->get_lttpr_caps) { + enum bp_result bp_query_result; + uint8_t is_vbios_lttpr_enable = 0; + + bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable); + dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable; + } + + if (ctx->dc_bios->funcs->get_lttpr_interop) { + enum bp_result bp_query_result; + uint8_t is_vbios_interop_enabled = 0; + + bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios, &is_vbios_interop_enabled); + dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled; + } + if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; else diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c index 815481a3ef54..ea4f8e06b07c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c @@ -462,7 +462,7 @@ void dccg31_set_physymclk( } /* Controls the generation of pixel valid for OTG in (OTG -> HPO case) */ -void dccg31_set_dtbclk_dto( +static void dccg31_set_dtbclk_dto( struct dccg *dccg, int dtbclk_inst, int req_dtbclk_khz, diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c index ee6f13bef377..71c359f9cdd2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c @@ -67,6 +67,39 @@ #define MIN(X, Y) ((X) < (Y) ? (X) : (Y)) #endif +static uint8_t phy_id_from_transmitter(enum transmitter t) +{ + uint8_t phy_id; + + switch (t) { + case TRANSMITTER_UNIPHY_A: + phy_id = 0; + break; + case TRANSMITTER_UNIPHY_B: + phy_id = 1; + break; + case TRANSMITTER_UNIPHY_C: + phy_id = 2; + break; + case TRANSMITTER_UNIPHY_D: + phy_id = 3; + break; + case TRANSMITTER_UNIPHY_E: + phy_id = 4; + break; + case TRANSMITTER_UNIPHY_F: + phy_id = 5; + break; + case TRANSMITTER_UNIPHY_G: + phy_id = 6; + break; + default: + phy_id = 0; + break; + } + return phy_id; +} + void dcn31_link_encoder_set_dio_phy_mux( struct link_encoder *enc, enum encoder_type_select sel, @@ -141,7 +174,7 @@ void dcn31_link_encoder_set_dio_phy_mux( } } -void enc31_hw_init(struct link_encoder *enc) +static void enc31_hw_init(struct link_encoder *enc) { struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); @@ -536,57 +569,45 @@ void dcn31_link_encoder_disable_output( bool dcn31_link_encoder_is_in_alt_mode(struct link_encoder *enc) { struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); - uint32_t dp_alt_mode_disable; + struct dc_dmub_srv *dc_dmub_srv = enc->ctx->dmub_srv; + union dmub_rb_cmd cmd; bool is_usb_c_alt_mode = false; - if (enc->features.flags.bits.DP_IS_USB_C) { - if (enc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_B0) { - // [Note] no need to check hw_internal_rev once phy mux selection is ready - REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable); - } else { - /* - * B0 phys use a new set of registers to check whether alt mode is disabled. - * if value == 1 alt mode is disabled, otherwise it is enabled. - */ - if ((enc10->base.transmitter == TRANSMITTER_UNIPHY_A) - || (enc10->base.transmitter == TRANSMITTER_UNIPHY_B) - || (enc10->base.transmitter == TRANSMITTER_UNIPHY_E)) { - REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable); - } else { - // [Note] need to change TRANSMITTER_UNIPHY_C/D to F/G once phy mux selection is ready - REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable); - } - } + if (enc->features.flags.bits.DP_IS_USB_C && dc_dmub_srv) { + memset(&cmd, 0, sizeof(cmd)); + cmd.query_dp_alt.header.type = DMUB_CMD__VBIOS; + cmd.query_dp_alt.header.sub_type = DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT; + cmd.query_dp_alt.header.payload_bytes = sizeof(cmd.panel_cntl.data); + cmd.query_dp_alt.data.phy_id = phy_id_from_transmitter(enc10->base.transmitter); - is_usb_c_alt_mode = (dp_alt_mode_disable == 0); + if (!dc_dmub_srv_cmd_with_reply_data(dc_dmub_srv, &cmd)) + return false; + + is_usb_c_alt_mode = (cmd.query_dp_alt.data.is_dp_alt_disable == 0); } return is_usb_c_alt_mode; } -void dcn31_link_encoder_get_max_link_cap(struct link_encoder *enc, - struct dc_link_settings *link_settings) +void dcn31_link_encoder_get_max_link_cap(struct link_encoder *enc, struct dc_link_settings *link_settings) { struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); - uint32_t is_in_usb_c_dp4_mode = 0; + struct dc_dmub_srv *dc_dmub_srv = enc->ctx->dmub_srv; + union dmub_rb_cmd cmd; dcn10_link_encoder_get_max_link_cap(enc, link_settings); - /* in usb c dp2 mode, max lane count is 2 */ - if (enc->funcs->is_in_alt_mode && enc->funcs->is_in_alt_mode(enc)) { - if (enc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_B0) { - // [Note] no need to check hw_internal_rev once phy mux selection is ready - REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode); - } else { - if ((enc10->base.transmitter == TRANSMITTER_UNIPHY_A) - || (enc10->base.transmitter == TRANSMITTER_UNIPHY_B) - || (enc10->base.transmitter == TRANSMITTER_UNIPHY_E)) { - REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode); - } else { - REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode); - } - } - if (!is_in_usb_c_dp4_mode) + if (enc->features.flags.bits.DP_IS_USB_C && dc_dmub_srv) { + memset(&cmd, 0, sizeof(cmd)); + cmd.query_dp_alt.header.type = DMUB_CMD__VBIOS; + cmd.query_dp_alt.header.sub_type = DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT; + cmd.query_dp_alt.header.payload_bytes = sizeof(cmd.panel_cntl.data); + cmd.query_dp_alt.data.phy_id = phy_id_from_transmitter(enc10->base.transmitter); + + if (!dc_dmub_srv_cmd_with_reply_data(dc_dmub_srv, &cmd)) + return; + + if (cmd.query_dp_alt.data.is_usb && cmd.query_dp_alt.data.is_dp4 == 0) link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count); } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c index 565f12dd179a..5065904c7833 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c @@ -358,8 +358,8 @@ static void dcn31_hpo_dp_stream_enc_set_stream_attribute( h_width = hw_crtc_timing.h_border_left + hw_crtc_timing.h_addressable + hw_crtc_timing.h_border_right; v_height = hw_crtc_timing.v_border_top + hw_crtc_timing.v_addressable + hw_crtc_timing.v_border_bottom; - hsp = hw_crtc_timing.flags.HSYNC_POSITIVE_POLARITY ? 0x80 : 0; - vsp = hw_crtc_timing.flags.VSYNC_POSITIVE_POLARITY ? 0x80 : 0; + hsp = hw_crtc_timing.flags.HSYNC_POSITIVE_POLARITY ? 0 : 0x80; + vsp = hw_crtc_timing.flags.VSYNC_POSITIVE_POLARITY ? 0 : 0x80; v_freq = hw_crtc_timing.pix_clk_100hz * 100; /* MSA Packet Mapping to 32-bit Link Symbols - DP2 spec, section 2.7.4.1 diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c index 5dd1ce9ddb53..4206ce5bf9a9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c @@ -112,7 +112,7 @@ void dcn31_init_hw(struct dc *dc) struct dc_bios *dcb = dc->ctx->dc_bios; struct resource_pool *res_pool = dc->res_pool; uint32_t backlight = MAX_BACKLIGHT_LEVEL; - int i, j; + int i; if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks) dc->clk_mgr->funcs->init_clocks(dc->clk_mgr); @@ -192,50 +192,13 @@ void dcn31_init_hw(struct dc *dc) link->link_status.link_active = true; } - /* Power gate DSCs */ - for (i = 0; i < res_pool->res_cap->num_dsc; i++) - if (hws->funcs.dsc_pg_control != NULL) - hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false); - /* Enables outbox notifications for usb4 dpia */ if (dc->res_pool->usb4_dpia_count) dmub_enable_outbox_notification(dc); /* we want to turn off all dp displays before doing detection */ - if (dc->config.power_down_display_on_boot) { - uint8_t dpcd_power_state = '\0'; - enum dc_status status = DC_ERROR_UNEXPECTED; - - for (i = 0; i < dc->link_count; i++) { - if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) - continue; - - /* if any of the displays are lit up turn them off */ - status = core_link_read_dpcd(dc->links[i], DP_SET_POWER, - &dpcd_power_state, sizeof(dpcd_power_state)); - if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) { - /* blank dp stream before power off receiver*/ - if (dc->links[i]->ep_type == DISPLAY_ENDPOINT_PHY && - dc->links[i]->link_enc->funcs->get_dig_frontend) { - unsigned int fe; - - fe = dc->links[i]->link_enc->funcs->get_dig_frontend( - dc->links[i]->link_enc); - if (fe == ENGINE_ID_UNKNOWN) - continue; - - for (j = 0; j < dc->res_pool->stream_enc_count; j++) { - if (fe == dc->res_pool->stream_enc[j]->id) { - dc->res_pool->stream_enc[j]->funcs->dp_blank(dc->links[i], - dc->res_pool->stream_enc[j]); - break; - } - } - } - dp_receiver_power_ctrl(dc->links[i], false); - } - } - } + if (dc->config.power_down_display_on_boot) + dc_link_blank_all_dp_displays(dc); /* If taking control over from VBIOS, we may want to optimize our first * mode set, so we need to skip powering down pipes until we know which @@ -602,7 +565,7 @@ void dcn31_reset_hw_ctx_wrap( dcn31_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state); if (hws->funcs.enable_stream_gating) - hws->funcs.enable_stream_gating(dc, pipe_ctx); + hws->funcs.enable_stream_gating(dc, pipe_ctx_old); if (old_clk) old_clk->funcs->cs_power_down(old_clk); } diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c index 05335a8c3c2d..7a7a8c5edabd 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c @@ -31,6 +31,8 @@ #include "dcn301/dcn301_hwseq.h" #include "dcn31/dcn31_hwseq.h" +#include "dcn31_init.h" + static const struct hw_sequencer_funcs dcn31_funcs = { .program_gamut_remap = dcn10_program_gamut_remap, .init_hw = dcn31_init_hw, @@ -101,6 +103,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = { .z10_restore = dcn31_z10_restore, .z10_save_init = dcn31_z10_save_init, .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, + .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, .update_visual_confirm_color = dcn20_update_visual_confirm_color, }; @@ -149,4 +152,9 @@ void dcn31_hw_sequencer_construct(struct dc *dc) dc->hwss.init_hw = dcn20_fpga_init_hw; dc->hwseq->funcs.init_pipes = NULL; } + if (dc->debug.disable_z10) { + /*hw not support z10 or sw disable it*/ + dc->hwss.z10_restore = NULL; + dc->hwss.z10_save_init = NULL; + } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c index a4b1d98f0007..e8562fa11366 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c @@ -256,6 +256,7 @@ static struct timing_generator_funcs dcn31_tg_funcs = { .get_crc = optc1_get_crc, .configure_crc = optc2_configure_crc, .set_dsc_config = optc3_set_dsc_config, + .get_dsc_status = optc2_get_dsc_status, .set_dwb_source = NULL, .set_odm_bypass = optc3_set_odm_bypass, .set_odm_combine = optc31_set_odm_combine, diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c index 3b3721386571..83ece02380a8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c @@ -65,7 +65,7 @@ static uint32_t dcn31_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_cnt return cmd.panel_cntl.data.current_backlight; } -uint32_t dcn31_panel_cntl_hw_init(struct panel_cntl *panel_cntl) +static uint32_t dcn31_panel_cntl_hw_init(struct panel_cntl *panel_cntl) { struct dcn31_panel_cntl *dcn31_panel_cntl = TO_DCN31_PANEL_CNTL(panel_cntl); struct dc_dmub_srv *dc_dmub_srv = panel_cntl->ctx->dmub_srv; @@ -96,7 +96,7 @@ uint32_t dcn31_panel_cntl_hw_init(struct panel_cntl *panel_cntl) return cmd.panel_cntl.data.current_backlight; } -void dcn31_panel_cntl_destroy(struct panel_cntl **panel_cntl) +static void dcn31_panel_cntl_destroy(struct panel_cntl **panel_cntl) { struct dcn31_panel_cntl *dcn31_panel_cntl = TO_DCN31_PANEL_CNTL(*panel_cntl); @@ -104,7 +104,7 @@ void dcn31_panel_cntl_destroy(struct panel_cntl **panel_cntl) *panel_cntl = NULL; } -bool dcn31_is_panel_backlight_on(struct panel_cntl *panel_cntl) +static bool dcn31_is_panel_backlight_on(struct panel_cntl *panel_cntl) { union dmub_rb_cmd cmd; @@ -114,7 +114,7 @@ bool dcn31_is_panel_backlight_on(struct panel_cntl *panel_cntl) return cmd.panel_cntl.data.is_backlight_on; } -bool dcn31_is_panel_powered_on(struct panel_cntl *panel_cntl) +static bool dcn31_is_panel_powered_on(struct panel_cntl *panel_cntl) { union dmub_rb_cmd cmd; @@ -124,7 +124,7 @@ bool dcn31_is_panel_powered_on(struct panel_cntl *panel_cntl) return cmd.panel_cntl.data.is_powered_on; } -void dcn31_store_backlight_level(struct panel_cntl *panel_cntl) +static void dcn31_store_backlight_level(struct panel_cntl *panel_cntl) { union dmub_rb_cmd cmd; diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c index 18896294ae12..29ee3c22b0ab 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c @@ -485,7 +485,8 @@ static const struct dcn31_apg_mask apg_mask = { SE_DCN3_REG_LIST(id)\ } -static const struct dcn10_stream_enc_registers stream_enc_regs[] = { +/* Some encoders won't be initialized here - but they're logical, not physical. */ +static const struct dcn10_stream_enc_registers stream_enc_regs[ENGINE_ID_COUNT] = { stream_enc_regs(0), stream_enc_regs(1), stream_enc_regs(2), @@ -968,7 +969,7 @@ static const struct dc_plane_cap plane_cap = { .argb8888 = true, .nv12 = true, .fp16 = true, - .p010 = false, + .p010 = true, .ayuv = false, }, @@ -1023,6 +1024,7 @@ static const struct dc_debug_options debug_defaults_drv = { }, .optimize_edp_link_rate = true, .enable_sw_cntl_psr = true, + .apply_vendor_specific_lttpr_wa = true, }; static const struct dc_debug_options debug_defaults_diags = { @@ -1270,7 +1272,7 @@ static struct link_encoder *dcn31_link_enc_create_minimal( return &enc20->enc10.base; } -struct panel_cntl *dcn31_panel_cntl_create(const struct panel_cntl_init_data *init_data) +static struct panel_cntl *dcn31_panel_cntl_create(const struct panel_cntl_init_data *init_data) { struct dcn31_panel_cntl *panel_cntl = kzalloc(sizeof(struct dcn31_panel_cntl), GFP_KERNEL); @@ -1774,6 +1776,7 @@ static int dcn31_populate_dml_pipes_from_context( int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; struct pipe_ctx *pipe; + bool upscaled = false; dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); @@ -1785,6 +1788,11 @@ static int dcn31_populate_dml_pipes_from_context( pipe = &res_ctx->pipe_ctx[i]; timing = &pipe->stream->timing; + if (pipe->plane_state && + (pipe->plane_state->src_rect.height < pipe->plane_state->dst_rect.height || + pipe->plane_state->src_rect.width < pipe->plane_state->dst_rect.width)) + upscaled = true; + /* * Immediate flip can be set dynamically after enabling the plane. * We need to require support for immediate flip or underflow can be @@ -1829,6 +1837,11 @@ static int dcn31_populate_dml_pipes_from_context( context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192; pipes[0].pipe.src.unbounded_req_mode = true; } + } else if (context->stream_count >= dc->debug.crb_alloc_policy_min_disp_count + && dc->debug.crb_alloc_policy > DET_SIZE_DEFAULT) { + context->bw_ctx.dml.ip.det_buffer_size_kbytes = dc->debug.crb_alloc_policy * 64; + } else if (context->stream_count >= 3 && upscaled) { + context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192; } return pipe_cnt; @@ -2199,6 +2212,8 @@ static bool dcn31_resource_construct( dc->caps.post_blend_color_processing = true; dc->caps.force_dp_tps4_for_cp2520 = true; dc->caps.dp_hpo = true; + dc->caps.hdmi_frl_pcon_support = true; + dc->caps.edp_dsc_support = true; dc->caps.extended_aux_timeout_support = true; dc->caps.dmcub_support = true; dc->caps.is_apu = true; @@ -2237,6 +2252,9 @@ static bool dcn31_resource_construct( dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; + /* Use pipe context based otg sync logic */ + dc->config.use_pipe_ctx_sync_logic = true; + /* read VBIOS LTTPR caps */ { if (ctx->dc_bios->funcs->get_lttpr_caps) { diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h index 0fe66b080a03..7f94e3f70d7f 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h +++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h @@ -59,7 +59,7 @@ void dm_helpers_free_gpu_mem( void *pvMem); enum dc_edid_status dm_helpers_parse_edid_caps( - struct dc_context *ctx, + struct dc_link *link, const struct dc_edid *edid, struct dc_edid_caps *edid_caps); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c index 46c433c0bcb0..8bc27de4c104 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c @@ -1711,14 +1711,6 @@ void dml21_rq_dlg_get_dlg_reg( dml_print("DML_DLG: Calculation for pipe[%d] end\n", pipe_idx); } -void dml_rq_dlg_get_arb_params(struct display_mode_lib *mode_lib, display_arb_params_st *arb_param) -{ - memset(arb_param, 0, sizeof(*arb_param)); - arb_param->max_req_outstanding = 256; - arb_param->min_req_outstanding = 68; - arb_param->sat_level_us = 60; -} - static void calculate_ttu_cursor( struct display_mode_lib *mode_lib, double *refcyc_per_req_delivery_pre_cur, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c index 7e937bdcea00..6feb23432f8d 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c @@ -422,62 +422,8 @@ static void CalculateUrgentBurstFactor( static void UseMinimumDCFCLK( struct display_mode_lib *mode_lib, - int MaxInterDCNTileRepeaters, int MaxPrefetchMode, - double FinalDRAMClockChangeLatency, - double SREnterPlusExitTime, - int ReturnBusWidth, - int RoundTripPingLatencyCycles, - int ReorderingBytes, - int PixelChunkSizeInKByte, - int MetaChunkSize, - bool GPUVMEnable, - int GPUVMMaxPageTableLevels, - bool HostVMEnable, - int NumberOfActivePlanes, - double HostVMMinPageSize, - int HostVMMaxNonCachedPageTableLevels, - bool DynamicMetadataVMEnabled, - enum immediate_flip_requirement ImmediateFlipRequirement, - bool ProgressiveToInterlaceUnitInOPP, - double MaxAveragePercentOfIdealFabricAndSDPPortBWDisplayCanUseInNormalSystemOperation, - double PercentOfIdealFabricAndSDPPortBWReceivedAfterUrgLatency, - int VTotal[], - int VActive[], - int DynamicMetadataTransmittedBytes[], - int DynamicMetadataLinesBeforeActiveRequired[], - bool Interlace[], - double RequiredDPPCLK[][2][DC__NUM_DPP__MAX], - double RequiredDISPCLK[][2], - double UrgLatency[], - unsigned int NoOfDPP[][2][DC__NUM_DPP__MAX], - double ProjectedDCFCLKDeepSleep[][2], - double MaximumVStartup[][2][DC__NUM_DPP__MAX], - double TotalVActivePixelBandwidth[][2], - double TotalVActiveCursorBandwidth[][2], - double TotalMetaRowBandwidth[][2], - double TotalDPTERowBandwidth[][2], - unsigned int TotalNumberOfActiveDPP[][2], - unsigned int TotalNumberOfDCCActiveDPP[][2], - int dpte_group_bytes[], - double PrefetchLinesY[][2][DC__NUM_DPP__MAX], - double PrefetchLinesC[][2][DC__NUM_DPP__MAX], - int swath_width_luma_ub_all_states[][2][DC__NUM_DPP__MAX], - int swath_width_chroma_ub_all_states[][2][DC__NUM_DPP__MAX], - int BytePerPixelY[], - int BytePerPixelC[], - int HTotal[], - double PixelClock[], - double PDEAndMetaPTEBytesPerFrame[][2][DC__NUM_DPP__MAX], - double DPTEBytesPerRow[][2][DC__NUM_DPP__MAX], - double MetaRowBytes[][2][DC__NUM_DPP__MAX], - bool DynamicMetadataEnable[], - double VActivePixelBandwidth[][2][DC__NUM_DPP__MAX], - double VActiveCursorBandwidth[][2][DC__NUM_DPP__MAX], - double ReadBandwidthLuma[], - double ReadBandwidthChroma[], - double DCFCLKPerState[], - double DCFCLKState[][2]); + int ReorderingBytes); static void CalculatePixelDeliveryTimes( unsigned int NumberOfActivePlanes, @@ -3949,6 +3895,102 @@ static double TruncToValidBPP( return BPP_INVALID; } +static noinline void CalculatePrefetchSchedulePerPlane( + struct display_mode_lib *mode_lib, + double HostVMInefficiencyFactor, + int i, + unsigned j, + unsigned k) +{ + struct vba_vars_st *v = &mode_lib->vba; + Pipe myPipe; + + myPipe.DPPCLK = v->RequiredDPPCLK[i][j][k]; + myPipe.DISPCLK = v->RequiredDISPCLK[i][j]; + myPipe.PixelClock = v->PixelClock[k]; + myPipe.DCFCLKDeepSleep = v->ProjectedDCFCLKDeepSleep[i][j]; + myPipe.DPPPerPlane = v->NoOfDPP[i][j][k]; + myPipe.ScalerEnabled = v->ScalerEnabled[k]; + myPipe.SourceScan = v->SourceScan[k]; + myPipe.BlockWidth256BytesY = v->Read256BlockWidthY[k]; + myPipe.BlockHeight256BytesY = v->Read256BlockHeightY[k]; + myPipe.BlockWidth256BytesC = v->Read256BlockWidthC[k]; + myPipe.BlockHeight256BytesC = v->Read256BlockHeightC[k]; + myPipe.InterlaceEnable = v->Interlace[k]; + myPipe.NumberOfCursors = v->NumberOfCursors[k]; + myPipe.VBlank = v->VTotal[k] - v->VActive[k]; + myPipe.HTotal = v->HTotal[k]; + myPipe.DCCEnable = v->DCCEnable[k]; + myPipe.ODMCombineIsEnabled = v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_4to1 + || v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1; + myPipe.SourcePixelFormat = v->SourcePixelFormat[k]; + myPipe.BytePerPixelY = v->BytePerPixelY[k]; + myPipe.BytePerPixelC = v->BytePerPixelC[k]; + myPipe.ProgressiveToInterlaceUnitInOPP = v->ProgressiveToInterlaceUnitInOPP; + v->NoTimeForPrefetch[i][j][k] = CalculatePrefetchSchedule( + mode_lib, + HostVMInefficiencyFactor, + &myPipe, + v->DSCDelayPerState[i][k], + v->DPPCLKDelaySubtotal + v->DPPCLKDelayCNVCFormater, + v->DPPCLKDelaySCL, + v->DPPCLKDelaySCLLBOnly, + v->DPPCLKDelayCNVCCursor, + v->DISPCLKDelaySubtotal, + v->SwathWidthYThisState[k] / v->HRatio[k], + v->OutputFormat[k], + v->MaxInterDCNTileRepeaters, + dml_min(v->MaxVStartup, v->MaximumVStartup[i][j][k]), + v->MaximumVStartup[i][j][k], + v->GPUVMMaxPageTableLevels, + v->GPUVMEnable, + v->HostVMEnable, + v->HostVMMaxNonCachedPageTableLevels, + v->HostVMMinPageSize, + v->DynamicMetadataEnable[k], + v->DynamicMetadataVMEnabled, + v->DynamicMetadataLinesBeforeActiveRequired[k], + v->DynamicMetadataTransmittedBytes[k], + v->UrgLatency[i], + v->ExtraLatency, + v->TimeCalc, + v->PDEAndMetaPTEBytesPerFrame[i][j][k], + v->MetaRowBytes[i][j][k], + v->DPTEBytesPerRow[i][j][k], + v->PrefetchLinesY[i][j][k], + v->SwathWidthYThisState[k], + v->PrefillY[k], + v->MaxNumSwY[k], + v->PrefetchLinesC[i][j][k], + v->SwathWidthCThisState[k], + v->PrefillC[k], + v->MaxNumSwC[k], + v->swath_width_luma_ub_this_state[k], + v->swath_width_chroma_ub_this_state[k], + v->SwathHeightYThisState[k], + v->SwathHeightCThisState[k], + v->TWait, + &v->DSTXAfterScaler[k], + &v->DSTYAfterScaler[k], + &v->LineTimesForPrefetch[k], + &v->PrefetchBW[k], + &v->LinesForMetaPTE[k], + &v->LinesForMetaAndDPTERow[k], + &v->VRatioPreY[i][j][k], + &v->VRatioPreC[i][j][k], + &v->RequiredPrefetchPixelDataBWLuma[i][j][k], + &v->RequiredPrefetchPixelDataBWChroma[i][j][k], + &v->NoTimeForDynamicMetadata[i][j][k], + &v->Tno_bw[k], + &v->prefetch_vmrow_bw[k], + &v->dummy7[k], + &v->dummy8[k], + &v->dummy13[k], + &v->VUpdateOffsetPix[k], + &v->VUpdateWidthPix[k], + &v->VReadyOffsetPix[k]); +} + void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib) { struct vba_vars_st *v = &mode_lib->vba; @@ -5079,66 +5121,8 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } } - if (v->UseMinimumRequiredDCFCLK == true) { - UseMinimumDCFCLK( - mode_lib, - v->MaxInterDCNTileRepeaters, - MaxPrefetchMode, - v->DRAMClockChangeLatency, - v->SREnterPlusExitTime, - v->ReturnBusWidth, - v->RoundTripPingLatencyCycles, - ReorderingBytes, - v->PixelChunkSizeInKByte, - v->MetaChunkSize, - v->GPUVMEnable, - v->GPUVMMaxPageTableLevels, - v->HostVMEnable, - v->NumberOfActivePlanes, - v->HostVMMinPageSize, - v->HostVMMaxNonCachedPageTableLevels, - v->DynamicMetadataVMEnabled, - v->ImmediateFlipRequirement[0], - v->ProgressiveToInterlaceUnitInOPP, - v->MaxAveragePercentOfIdealFabricAndSDPPortBWDisplayCanUseInNormalSystemOperation, - v->PercentOfIdealFabricAndSDPPortBWReceivedAfterUrgLatency, - v->VTotal, - v->VActive, - v->DynamicMetadataTransmittedBytes, - v->DynamicMetadataLinesBeforeActiveRequired, - v->Interlace, - v->RequiredDPPCLK, - v->RequiredDISPCLK, - v->UrgLatency, - v->NoOfDPP, - v->ProjectedDCFCLKDeepSleep, - v->MaximumVStartup, - v->TotalVActivePixelBandwidth, - v->TotalVActiveCursorBandwidth, - v->TotalMetaRowBandwidth, - v->TotalDPTERowBandwidth, - v->TotalNumberOfActiveDPP, - v->TotalNumberOfDCCActiveDPP, - v->dpte_group_bytes, - v->PrefetchLinesY, - v->PrefetchLinesC, - v->swath_width_luma_ub_all_states, - v->swath_width_chroma_ub_all_states, - v->BytePerPixelY, - v->BytePerPixelC, - v->HTotal, - v->PixelClock, - v->PDEAndMetaPTEBytesPerFrame, - v->DPTEBytesPerRow, - v->MetaRowBytes, - v->DynamicMetadataEnable, - v->VActivePixelBandwidth, - v->VActiveCursorBandwidth, - v->ReadBandwidthLuma, - v->ReadBandwidthChroma, - v->DCFCLKPerState, - v->DCFCLKState); - } + if (v->UseMinimumRequiredDCFCLK == true) + UseMinimumDCFCLK(mode_lib, MaxPrefetchMode, ReorderingBytes); for (i = 0; i < v->soc.num_states; ++i) { for (j = 0; j <= 1; ++j) { @@ -5276,92 +5260,9 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l v->SREnterPlusExitTime); for (k = 0; k < v->NumberOfActivePlanes; k++) { - Pipe myPipe; - - myPipe.DPPCLK = v->RequiredDPPCLK[i][j][k]; - myPipe.DISPCLK = v->RequiredDISPCLK[i][j]; - myPipe.PixelClock = v->PixelClock[k]; - myPipe.DCFCLKDeepSleep = v->ProjectedDCFCLKDeepSleep[i][j]; - myPipe.DPPPerPlane = v->NoOfDPP[i][j][k]; - myPipe.ScalerEnabled = v->ScalerEnabled[k]; - myPipe.SourceScan = v->SourceScan[k]; - myPipe.BlockWidth256BytesY = v->Read256BlockWidthY[k]; - myPipe.BlockHeight256BytesY = v->Read256BlockHeightY[k]; - myPipe.BlockWidth256BytesC = v->Read256BlockWidthC[k]; - myPipe.BlockHeight256BytesC = v->Read256BlockHeightC[k]; - myPipe.InterlaceEnable = v->Interlace[k]; - myPipe.NumberOfCursors = v->NumberOfCursors[k]; - myPipe.VBlank = v->VTotal[k] - v->VActive[k]; - myPipe.HTotal = v->HTotal[k]; - myPipe.DCCEnable = v->DCCEnable[k]; - myPipe.ODMCombineIsEnabled = v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_4to1 - || v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1; - myPipe.SourcePixelFormat = v->SourcePixelFormat[k]; - myPipe.BytePerPixelY = v->BytePerPixelY[k]; - myPipe.BytePerPixelC = v->BytePerPixelC[k]; - myPipe.ProgressiveToInterlaceUnitInOPP = v->ProgressiveToInterlaceUnitInOPP; - v->NoTimeForPrefetch[i][j][k] = CalculatePrefetchSchedule( - mode_lib, - HostVMInefficiencyFactor, - &myPipe, - v->DSCDelayPerState[i][k], - v->DPPCLKDelaySubtotal + v->DPPCLKDelayCNVCFormater, - v->DPPCLKDelaySCL, - v->DPPCLKDelaySCLLBOnly, - v->DPPCLKDelayCNVCCursor, - v->DISPCLKDelaySubtotal, - v->SwathWidthYThisState[k] / v->HRatio[k], - v->OutputFormat[k], - v->MaxInterDCNTileRepeaters, - dml_min(v->MaxVStartup, v->MaximumVStartup[i][j][k]), - v->MaximumVStartup[i][j][k], - v->GPUVMMaxPageTableLevels, - v->GPUVMEnable, - v->HostVMEnable, - v->HostVMMaxNonCachedPageTableLevels, - v->HostVMMinPageSize, - v->DynamicMetadataEnable[k], - v->DynamicMetadataVMEnabled, - v->DynamicMetadataLinesBeforeActiveRequired[k], - v->DynamicMetadataTransmittedBytes[k], - v->UrgLatency[i], - v->ExtraLatency, - v->TimeCalc, - v->PDEAndMetaPTEBytesPerFrame[i][j][k], - v->MetaRowBytes[i][j][k], - v->DPTEBytesPerRow[i][j][k], - v->PrefetchLinesY[i][j][k], - v->SwathWidthYThisState[k], - v->PrefillY[k], - v->MaxNumSwY[k], - v->PrefetchLinesC[i][j][k], - v->SwathWidthCThisState[k], - v->PrefillC[k], - v->MaxNumSwC[k], - v->swath_width_luma_ub_this_state[k], - v->swath_width_chroma_ub_this_state[k], - v->SwathHeightYThisState[k], - v->SwathHeightCThisState[k], - v->TWait, - &v->DSTXAfterScaler[k], - &v->DSTYAfterScaler[k], - &v->LineTimesForPrefetch[k], - &v->PrefetchBW[k], - &v->LinesForMetaPTE[k], - &v->LinesForMetaAndDPTERow[k], - &v->VRatioPreY[i][j][k], - &v->VRatioPreC[i][j][k], - &v->RequiredPrefetchPixelDataBWLuma[i][j][k], - &v->RequiredPrefetchPixelDataBWChroma[i][j][k], - &v->NoTimeForDynamicMetadata[i][j][k], - &v->Tno_bw[k], - &v->prefetch_vmrow_bw[k], - &v->dummy7[k], - &v->dummy8[k], - &v->dummy13[k], - &v->VUpdateOffsetPix[k], - &v->VUpdateWidthPix[k], - &v->VReadyOffsetPix[k]); + CalculatePrefetchSchedulePerPlane(mode_lib, + HostVMInefficiencyFactor, + i, j, k); } for (k = 0; k < v->NumberOfActivePlanes; k++) { @@ -7249,69 +7150,15 @@ static double CalculateUrgentLatency( static void UseMinimumDCFCLK( struct display_mode_lib *mode_lib, - int MaxInterDCNTileRepeaters, int MaxPrefetchMode, - double FinalDRAMClockChangeLatency, - double SREnterPlusExitTime, - int ReturnBusWidth, - int RoundTripPingLatencyCycles, - int ReorderingBytes, - int PixelChunkSizeInKByte, - int MetaChunkSize, - bool GPUVMEnable, - int GPUVMMaxPageTableLevels, - bool HostVMEnable, - int NumberOfActivePlanes, - double HostVMMinPageSize, - int HostVMMaxNonCachedPageTableLevels, - bool DynamicMetadataVMEnabled, - enum immediate_flip_requirement ImmediateFlipRequirement, - bool ProgressiveToInterlaceUnitInOPP, - double MaxAveragePercentOfIdealFabricAndSDPPortBWDisplayCanUseInNormalSystemOperation, - double PercentOfIdealFabricAndSDPPortBWReceivedAfterUrgLatency, - int VTotal[], - int VActive[], - int DynamicMetadataTransmittedBytes[], - int DynamicMetadataLinesBeforeActiveRequired[], - bool Interlace[], - double RequiredDPPCLK[][2][DC__NUM_DPP__MAX], - double RequiredDISPCLK[][2], - double UrgLatency[], - unsigned int NoOfDPP[][2][DC__NUM_DPP__MAX], - double ProjectedDCFCLKDeepSleep[][2], - double MaximumVStartup[][2][DC__NUM_DPP__MAX], - double TotalVActivePixelBandwidth[][2], - double TotalVActiveCursorBandwidth[][2], - double TotalMetaRowBandwidth[][2], - double TotalDPTERowBandwidth[][2], - unsigned int TotalNumberOfActiveDPP[][2], - unsigned int TotalNumberOfDCCActiveDPP[][2], - int dpte_group_bytes[], - double PrefetchLinesY[][2][DC__NUM_DPP__MAX], - double PrefetchLinesC[][2][DC__NUM_DPP__MAX], - int swath_width_luma_ub_all_states[][2][DC__NUM_DPP__MAX], - int swath_width_chroma_ub_all_states[][2][DC__NUM_DPP__MAX], - int BytePerPixelY[], - int BytePerPixelC[], - int HTotal[], - double PixelClock[], - double PDEAndMetaPTEBytesPerFrame[][2][DC__NUM_DPP__MAX], - double DPTEBytesPerRow[][2][DC__NUM_DPP__MAX], - double MetaRowBytes[][2][DC__NUM_DPP__MAX], - bool DynamicMetadataEnable[], - double VActivePixelBandwidth[][2][DC__NUM_DPP__MAX], - double VActiveCursorBandwidth[][2][DC__NUM_DPP__MAX], - double ReadBandwidthLuma[], - double ReadBandwidthChroma[], - double DCFCLKPerState[], - double DCFCLKState[][2]) + int ReorderingBytes) { struct vba_vars_st *v = &mode_lib->vba; int dummy1, i, j, k; double NormalEfficiency, dummy2, dummy3; double TotalMaxPrefetchFlipDPTERowBandwidth[DC__VOLTAGE_STATES][2]; - NormalEfficiency = PercentOfIdealFabricAndSDPPortBWReceivedAfterUrgLatency / 100.0; + NormalEfficiency = v->PercentOfIdealFabricAndSDPPortBWReceivedAfterUrgLatency / 100.0; for (i = 0; i < v->soc.num_states; ++i) { for (j = 0; j <= 1; ++j) { double PixelDCFCLKCyclesRequiredInPrefetch[DC__NUM_DPP__MAX]; @@ -7329,61 +7176,61 @@ static void UseMinimumDCFCLK( double MinimumTvmPlus2Tr0; TotalMaxPrefetchFlipDPTERowBandwidth[i][j] = 0; - for (k = 0; k < NumberOfActivePlanes; ++k) { + for (k = 0; k < v->NumberOfActivePlanes; ++k) { TotalMaxPrefetchFlipDPTERowBandwidth[i][j] = TotalMaxPrefetchFlipDPTERowBandwidth[i][j] - + NoOfDPP[i][j][k] * DPTEBytesPerRow[i][j][k] / (15.75 * HTotal[k] / PixelClock[k]); + + v->NoOfDPP[i][j][k] * v->DPTEBytesPerRow[i][j][k] / (15.75 * v->HTotal[k] / v->PixelClock[k]); } - for (k = 0; k <= NumberOfActivePlanes - 1; ++k) { - NoOfDPPState[k] = NoOfDPP[i][j][k]; + for (k = 0; k <= v->NumberOfActivePlanes - 1; ++k) { + NoOfDPPState[k] = v->NoOfDPP[i][j][k]; } - MinimumTWait = CalculateTWait(MaxPrefetchMode, FinalDRAMClockChangeLatency, UrgLatency[i], SREnterPlusExitTime); - NonDPTEBandwidth = TotalVActivePixelBandwidth[i][j] + TotalVActiveCursorBandwidth[i][j] + TotalMetaRowBandwidth[i][j]; - DPTEBandwidth = (HostVMEnable == true || ImmediateFlipRequirement == dm_immediate_flip_required) ? - TotalMaxPrefetchFlipDPTERowBandwidth[i][j] : TotalDPTERowBandwidth[i][j]; + MinimumTWait = CalculateTWait(MaxPrefetchMode, v->FinalDRAMClockChangeLatency, v->UrgLatency[i], v->SREnterPlusExitTime); + NonDPTEBandwidth = v->TotalVActivePixelBandwidth[i][j] + v->TotalVActiveCursorBandwidth[i][j] + v->TotalMetaRowBandwidth[i][j]; + DPTEBandwidth = (v->HostVMEnable == true || v->ImmediateFlipRequirement[0] == dm_immediate_flip_required) ? + TotalMaxPrefetchFlipDPTERowBandwidth[i][j] : v->TotalDPTERowBandwidth[i][j]; DCFCLKRequiredForAverageBandwidth = dml_max3( - ProjectedDCFCLKDeepSleep[i][j], - (NonDPTEBandwidth + TotalDPTERowBandwidth[i][j]) / ReturnBusWidth - / (MaxAveragePercentOfIdealFabricAndSDPPortBWDisplayCanUseInNormalSystemOperation / 100), - (NonDPTEBandwidth + DPTEBandwidth / NormalEfficiency) / NormalEfficiency / ReturnBusWidth); + v->ProjectedDCFCLKDeepSleep[i][j], + (NonDPTEBandwidth + v->TotalDPTERowBandwidth[i][j]) / v->ReturnBusWidth + / (v->MaxAveragePercentOfIdealFabricAndSDPPortBWDisplayCanUseInNormalSystemOperation / 100), + (NonDPTEBandwidth + DPTEBandwidth / NormalEfficiency) / NormalEfficiency / v->ReturnBusWidth); ExtraLatencyBytes = CalculateExtraLatencyBytes( ReorderingBytes, - TotalNumberOfActiveDPP[i][j], - PixelChunkSizeInKByte, - TotalNumberOfDCCActiveDPP[i][j], - MetaChunkSize, - GPUVMEnable, - HostVMEnable, - NumberOfActivePlanes, + v->TotalNumberOfActiveDPP[i][j], + v->PixelChunkSizeInKByte, + v->TotalNumberOfDCCActiveDPP[i][j], + v->MetaChunkSize, + v->GPUVMEnable, + v->HostVMEnable, + v->NumberOfActivePlanes, NoOfDPPState, - dpte_group_bytes, + v->dpte_group_bytes, 1, - HostVMMinPageSize, - HostVMMaxNonCachedPageTableLevels); - ExtraLatencyCycles = RoundTripPingLatencyCycles + __DML_ARB_TO_RET_DELAY__ + ExtraLatencyBytes / NormalEfficiency / ReturnBusWidth; - for (k = 0; k < NumberOfActivePlanes; ++k) { + v->HostVMMinPageSize, + v->HostVMMaxNonCachedPageTableLevels); + ExtraLatencyCycles = v->RoundTripPingLatencyCycles + __DML_ARB_TO_RET_DELAY__ + ExtraLatencyBytes / NormalEfficiency / v->ReturnBusWidth; + for (k = 0; k < v->NumberOfActivePlanes; ++k) { double DCFCLKCyclesRequiredInPrefetch; double ExpectedPrefetchBWAcceleration; double PrefetchTime; - PixelDCFCLKCyclesRequiredInPrefetch[k] = (PrefetchLinesY[i][j][k] * swath_width_luma_ub_all_states[i][j][k] * BytePerPixelY[k] - + PrefetchLinesC[i][j][k] * swath_width_chroma_ub_all_states[i][j][k] * BytePerPixelC[k]) / NormalEfficiency / ReturnBusWidth; + PixelDCFCLKCyclesRequiredInPrefetch[k] = (v->PrefetchLinesY[i][j][k] * v->swath_width_luma_ub_all_states[i][j][k] * v->BytePerPixelY[k] + + v->PrefetchLinesC[i][j][k] * v->swath_width_chroma_ub_all_states[i][j][k] * v->BytePerPixelC[k]) / NormalEfficiency / v->ReturnBusWidth; DCFCLKCyclesRequiredInPrefetch = 2 * ExtraLatencyCycles / NoOfDPPState[k] - + PDEAndMetaPTEBytesPerFrame[i][j][k] / NormalEfficiency / NormalEfficiency / ReturnBusWidth * (GPUVMMaxPageTableLevels > 2 ? 1 : 0) - + 2 * DPTEBytesPerRow[i][j][k] / NormalEfficiency / NormalEfficiency / ReturnBusWidth - + 2 * MetaRowBytes[i][j][k] / NormalEfficiency / ReturnBusWidth + PixelDCFCLKCyclesRequiredInPrefetch[k]; - PrefetchPixelLinesTime[k] = dml_max(PrefetchLinesY[i][j][k], PrefetchLinesC[i][j][k]) * HTotal[k] / PixelClock[k]; - ExpectedPrefetchBWAcceleration = (VActivePixelBandwidth[i][j][k] + VActiveCursorBandwidth[i][j][k]) - / (ReadBandwidthLuma[k] + ReadBandwidthChroma[k]); + + v->PDEAndMetaPTEBytesPerFrame[i][j][k] / NormalEfficiency / NormalEfficiency / v->ReturnBusWidth * (v->GPUVMMaxPageTableLevels > 2 ? 1 : 0) + + 2 * v->DPTEBytesPerRow[i][j][k] / NormalEfficiency / NormalEfficiency / v->ReturnBusWidth + + 2 * v->MetaRowBytes[i][j][k] / NormalEfficiency / v->ReturnBusWidth + PixelDCFCLKCyclesRequiredInPrefetch[k]; + PrefetchPixelLinesTime[k] = dml_max(v->PrefetchLinesY[i][j][k], v->PrefetchLinesC[i][j][k]) * v->HTotal[k] / v->PixelClock[k]; + ExpectedPrefetchBWAcceleration = (v->VActivePixelBandwidth[i][j][k] + v->VActiveCursorBandwidth[i][j][k]) + / (v->ReadBandwidthLuma[k] + v->ReadBandwidthChroma[k]); DynamicMetadataVMExtraLatency[k] = - (GPUVMEnable == true && DynamicMetadataEnable[k] == true && DynamicMetadataVMEnabled == true) ? - UrgLatency[i] * GPUVMMaxPageTableLevels * (HostVMEnable == true ? HostVMMaxNonCachedPageTableLevels + 1 : 1) : 0; - PrefetchTime = (MaximumVStartup[i][j][k] - 1) * HTotal[k] / PixelClock[k] - MinimumTWait - - UrgLatency[i] - * ((GPUVMMaxPageTableLevels <= 2 ? GPUVMMaxPageTableLevels : GPUVMMaxPageTableLevels - 2) - * (HostVMEnable == true ? HostVMMaxNonCachedPageTableLevels + 1 : 1) - 1) + (v->GPUVMEnable == true && v->DynamicMetadataEnable[k] == true && v->DynamicMetadataVMEnabled == true) ? + v->UrgLatency[i] * v->GPUVMMaxPageTableLevels * (v->HostVMEnable == true ? v->HostVMMaxNonCachedPageTableLevels + 1 : 1) : 0; + PrefetchTime = (v->MaximumVStartup[i][j][k] - 1) * v->HTotal[k] / v->PixelClock[k] - MinimumTWait + - v->UrgLatency[i] + * ((v->GPUVMMaxPageTableLevels <= 2 ? v->GPUVMMaxPageTableLevels : v->GPUVMMaxPageTableLevels - 2) + * (v->HostVMEnable == true ? v->HostVMMaxNonCachedPageTableLevels + 1 : 1) - 1) - DynamicMetadataVMExtraLatency[k]; if (PrefetchTime > 0) { @@ -7392,14 +7239,14 @@ static void UseMinimumDCFCLK( / (PrefetchTime * PixelDCFCLKCyclesRequiredInPrefetch[k] / DCFCLKCyclesRequiredInPrefetch); DCFCLKRequiredForPeakBandwidthPerPlane[k] = NoOfDPPState[k] * PixelDCFCLKCyclesRequiredInPrefetch[k] / PrefetchPixelLinesTime[k] * dml_max(1.0, ExpectedVRatioPrefetch) * dml_max(1.0, ExpectedVRatioPrefetch / 4) * ExpectedPrefetchBWAcceleration; - if (HostVMEnable == true || ImmediateFlipRequirement == dm_immediate_flip_required) { + if (v->HostVMEnable == true || v->ImmediateFlipRequirement[0] == dm_immediate_flip_required) { DCFCLKRequiredForPeakBandwidthPerPlane[k] = DCFCLKRequiredForPeakBandwidthPerPlane[k] - + NoOfDPPState[k] * DPTEBandwidth / NormalEfficiency / NormalEfficiency / ReturnBusWidth; + + NoOfDPPState[k] * DPTEBandwidth / NormalEfficiency / NormalEfficiency / v->ReturnBusWidth; } } else { - DCFCLKRequiredForPeakBandwidthPerPlane[k] = DCFCLKPerState[i]; + DCFCLKRequiredForPeakBandwidthPerPlane[k] = v->DCFCLKPerState[i]; } - if (DynamicMetadataEnable[k] == true) { + if (v->DynamicMetadataEnable[k] == true) { double TSetupPipe; double TdmbfPipe; double TdmsksPipe; @@ -7407,17 +7254,17 @@ static void UseMinimumDCFCLK( double AllowedTimeForUrgentExtraLatency; CalculateVupdateAndDynamicMetadataParameters( - MaxInterDCNTileRepeaters, - RequiredDPPCLK[i][j][k], - RequiredDISPCLK[i][j], - ProjectedDCFCLKDeepSleep[i][j], - PixelClock[k], - HTotal[k], - VTotal[k] - VActive[k], - DynamicMetadataTransmittedBytes[k], - DynamicMetadataLinesBeforeActiveRequired[k], - Interlace[k], - ProgressiveToInterlaceUnitInOPP, + v->MaxInterDCNTileRepeaters, + v->RequiredDPPCLK[i][j][k], + v->RequiredDISPCLK[i][j], + v->ProjectedDCFCLKDeepSleep[i][j], + v->PixelClock[k], + v->HTotal[k], + v->VTotal[k] - v->VActive[k], + v->DynamicMetadataTransmittedBytes[k], + v->DynamicMetadataLinesBeforeActiveRequired[k], + v->Interlace[k], + v->ProgressiveToInterlaceUnitInOPP, &TSetupPipe, &TdmbfPipe, &TdmecPipe, @@ -7425,31 +7272,31 @@ static void UseMinimumDCFCLK( &dummy1, &dummy2, &dummy3); - AllowedTimeForUrgentExtraLatency = MaximumVStartup[i][j][k] * HTotal[k] / PixelClock[k] - MinimumTWait - TSetupPipe - TdmbfPipe - TdmecPipe + AllowedTimeForUrgentExtraLatency = v->MaximumVStartup[i][j][k] * v->HTotal[k] / v->PixelClock[k] - MinimumTWait - TSetupPipe - TdmbfPipe - TdmecPipe - TdmsksPipe - DynamicMetadataVMExtraLatency[k]; if (AllowedTimeForUrgentExtraLatency > 0) { DCFCLKRequiredForPeakBandwidthPerPlane[k] = dml_max( DCFCLKRequiredForPeakBandwidthPerPlane[k], ExtraLatencyCycles / AllowedTimeForUrgentExtraLatency); } else { - DCFCLKRequiredForPeakBandwidthPerPlane[k] = DCFCLKPerState[i]; + DCFCLKRequiredForPeakBandwidthPerPlane[k] = v->DCFCLKPerState[i]; } } } DCFCLKRequiredForPeakBandwidth = 0; - for (k = 0; k <= NumberOfActivePlanes - 1; ++k) { + for (k = 0; k <= v->NumberOfActivePlanes - 1; ++k) { DCFCLKRequiredForPeakBandwidth = DCFCLKRequiredForPeakBandwidth + DCFCLKRequiredForPeakBandwidthPerPlane[k]; } - MinimumTvmPlus2Tr0 = UrgLatency[i] - * (GPUVMEnable == true ? - (HostVMEnable == true ? - (GPUVMMaxPageTableLevels + 2) * (HostVMMaxNonCachedPageTableLevels + 1) - 1 : GPUVMMaxPageTableLevels + 1) : + MinimumTvmPlus2Tr0 = v->UrgLatency[i] + * (v->GPUVMEnable == true ? + (v->HostVMEnable == true ? + (v->GPUVMMaxPageTableLevels + 2) * (v->HostVMMaxNonCachedPageTableLevels + 1) - 1 : v->GPUVMMaxPageTableLevels + 1) : 0); - for (k = 0; k < NumberOfActivePlanes; ++k) { + for (k = 0; k < v->NumberOfActivePlanes; ++k) { double MaximumTvmPlus2Tr0PlusTsw; - MaximumTvmPlus2Tr0PlusTsw = (MaximumVStartup[i][j][k] - 2) * HTotal[k] / PixelClock[k] - MinimumTWait - DynamicMetadataVMExtraLatency[k]; + MaximumTvmPlus2Tr0PlusTsw = (v->MaximumVStartup[i][j][k] - 2) * v->HTotal[k] / v->PixelClock[k] - MinimumTWait - DynamicMetadataVMExtraLatency[k]; if (MaximumTvmPlus2Tr0PlusTsw <= MinimumTvmPlus2Tr0 + PrefetchPixelLinesTime[k] / 4) { - DCFCLKRequiredForPeakBandwidth = DCFCLKPerState[i]; + DCFCLKRequiredForPeakBandwidth = v->DCFCLKPerState[i]; } else { DCFCLKRequiredForPeakBandwidth = dml_max3( DCFCLKRequiredForPeakBandwidth, @@ -7457,7 +7304,7 @@ static void UseMinimumDCFCLK( (2 * ExtraLatencyCycles + PixelDCFCLKCyclesRequiredInPrefetch[k]) / (MaximumTvmPlus2Tr0PlusTsw - MinimumTvmPlus2Tr0)); } } - DCFCLKState[i][j] = dml_min(DCFCLKPerState[i], 1.05 * dml_max(DCFCLKRequiredForAverageBandwidth, DCFCLKRequiredForPeakBandwidth)); + v->DCFCLKState[i][j] = dml_min(v->DCFCLKPerState[i], 1.05 * dml_max(DCFCLKRequiredForAverageBandwidth, DCFCLKRequiredForPeakBandwidth)); } } } diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h index 6905ef1e75a6..d76251fd1566 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h @@ -73,6 +73,7 @@ struct display_mode_lib { struct vba_vars_st vba; struct dal_logger *logger; struct dml_funcs funcs; + struct _vcs_dpi_display_e2e_pipe_params_st dml_pipe_state[6]; }; void dml_init_instance(struct display_mode_lib *lib, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c new file mode 100644 index 000000000000..91810aaee5a3 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c @@ -0,0 +1,1889 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dml_wrapper.h" +#include "resource.h" +#include "core_types.h" +#include "dsc.h" +#include "clk_mgr.h" + +#ifndef DC_LOGGER_INIT +#define DC_LOGGER_INIT +#undef DC_LOG_WARNING +#define DC_LOG_WARNING +#endif + +#define DML_WRAPPER_TRANSLATION_ +#include "dml_wrapper_translation.c" +#undef DML_WRAPPER_TRANSLATION_ + +static bool is_dual_plane(enum surface_pixel_format format) +{ + return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA; +} + +static void build_clamping_params(struct dc_stream_state *stream) +{ + stream->clamping.clamping_level = CLAMPING_FULL_RANGE; + stream->clamping.c_depth = stream->timing.display_color_depth; + stream->clamping.pixel_encoding = stream->timing.pixel_encoding; +} + +static void get_pixel_clock_parameters( + const struct pipe_ctx *pipe_ctx, + struct pixel_clk_params *pixel_clk_params) +{ + const struct dc_stream_state *stream = pipe_ctx->stream; + + /*TODO: is this halved for YCbCr 420? in that case we might want to move + * the pixel clock normalization for hdmi up to here instead of doing it + * in pll_adjust_pix_clk + */ + pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz; + pixel_clk_params->encoder_object_id = stream->link->link_enc->id; + pixel_clk_params->signal_type = pipe_ctx->stream->signal; + pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1; + /* TODO: un-hardcode*/ + pixel_clk_params->requested_sym_clk = LINK_RATE_LOW * + LINK_RATE_REF_FREQ_IN_KHZ; + pixel_clk_params->flags.ENABLE_SS = 0; + pixel_clk_params->color_depth = + stream->timing.display_color_depth; + pixel_clk_params->flags.DISPLAY_BLANKED = 1; + pixel_clk_params->flags.SUPPORT_YCBCR420 = (stream->timing.pixel_encoding == + PIXEL_ENCODING_YCBCR420); + pixel_clk_params->pixel_encoding = stream->timing.pixel_encoding; + if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) { + pixel_clk_params->color_depth = COLOR_DEPTH_888; + } + if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) { + pixel_clk_params->requested_pix_clk_100hz = pixel_clk_params->requested_pix_clk_100hz / 2; + } + if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) + pixel_clk_params->requested_pix_clk_100hz *= 2; + +} + +static enum dc_status build_pipe_hw_param(struct pipe_ctx *pipe_ctx) +{ + get_pixel_clock_parameters(pipe_ctx, &pipe_ctx->stream_res.pix_clk_params); + + if (pipe_ctx->clock_source) + pipe_ctx->clock_source->funcs->get_pix_clk_dividers( + pipe_ctx->clock_source, + &pipe_ctx->stream_res.pix_clk_params, + &pipe_ctx->pll_settings); + + pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding; + + resource_build_bit_depth_reduction_params(pipe_ctx->stream, + &pipe_ctx->stream->bit_depth_params); + build_clamping_params(pipe_ctx->stream); + + return DC_OK; +} + +static void resource_build_bit_depth_reduction_params(struct dc_stream_state *stream, + struct bit_depth_reduction_params *fmt_bit_depth) +{ + enum dc_dither_option option = stream->dither_option; + enum dc_pixel_encoding pixel_encoding = + stream->timing.pixel_encoding; + + memset(fmt_bit_depth, 0, sizeof(*fmt_bit_depth)); + + if (option == DITHER_OPTION_DEFAULT) { + switch (stream->timing.display_color_depth) { + case COLOR_DEPTH_666: + option = DITHER_OPTION_SPATIAL6; + break; + case COLOR_DEPTH_888: + option = DITHER_OPTION_SPATIAL8; + break; + case COLOR_DEPTH_101010: + option = DITHER_OPTION_SPATIAL10; + break; + default: + option = DITHER_OPTION_DISABLE; + } + } + + if (option == DITHER_OPTION_DISABLE) + return; + + if (option == DITHER_OPTION_TRUN6) { + fmt_bit_depth->flags.TRUNCATE_ENABLED = 1; + fmt_bit_depth->flags.TRUNCATE_DEPTH = 0; + } else if (option == DITHER_OPTION_TRUN8 || + option == DITHER_OPTION_TRUN8_SPATIAL6 || + option == DITHER_OPTION_TRUN8_FM6) { + fmt_bit_depth->flags.TRUNCATE_ENABLED = 1; + fmt_bit_depth->flags.TRUNCATE_DEPTH = 1; + } else if (option == DITHER_OPTION_TRUN10 || + option == DITHER_OPTION_TRUN10_SPATIAL6 || + option == DITHER_OPTION_TRUN10_SPATIAL8 || + option == DITHER_OPTION_TRUN10_FM8 || + option == DITHER_OPTION_TRUN10_FM6 || + option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) { + fmt_bit_depth->flags.TRUNCATE_ENABLED = 1; + fmt_bit_depth->flags.TRUNCATE_DEPTH = 2; + } + + /* special case - Formatter can only reduce by 4 bits at most. + * When reducing from 12 to 6 bits, + * HW recommends we use trunc with round mode + * (if we did nothing, trunc to 10 bits would be used) + * note that any 12->10 bit reduction is ignored prior to DCE8, + * as the input was 10 bits. + */ + if (option == DITHER_OPTION_SPATIAL6_FRAME_RANDOM || + option == DITHER_OPTION_SPATIAL6 || + option == DITHER_OPTION_FM6) { + fmt_bit_depth->flags.TRUNCATE_ENABLED = 1; + fmt_bit_depth->flags.TRUNCATE_DEPTH = 2; + fmt_bit_depth->flags.TRUNCATE_MODE = 1; + } + + /* spatial dither + * note that spatial modes 1-3 are never used + */ + if (option == DITHER_OPTION_SPATIAL6_FRAME_RANDOM || + option == DITHER_OPTION_SPATIAL6 || + option == DITHER_OPTION_TRUN10_SPATIAL6 || + option == DITHER_OPTION_TRUN8_SPATIAL6) { + fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1; + fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 0; + fmt_bit_depth->flags.HIGHPASS_RANDOM = 1; + fmt_bit_depth->flags.RGB_RANDOM = + (pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0; + } else if (option == DITHER_OPTION_SPATIAL8_FRAME_RANDOM || + option == DITHER_OPTION_SPATIAL8 || + option == DITHER_OPTION_SPATIAL8_FM6 || + option == DITHER_OPTION_TRUN10_SPATIAL8 || + option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) { + fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1; + fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 1; + fmt_bit_depth->flags.HIGHPASS_RANDOM = 1; + fmt_bit_depth->flags.RGB_RANDOM = + (pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0; + } else if (option == DITHER_OPTION_SPATIAL10_FRAME_RANDOM || + option == DITHER_OPTION_SPATIAL10 || + option == DITHER_OPTION_SPATIAL10_FM8 || + option == DITHER_OPTION_SPATIAL10_FM6) { + fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1; + fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 2; + fmt_bit_depth->flags.HIGHPASS_RANDOM = 1; + fmt_bit_depth->flags.RGB_RANDOM = + (pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0; + } + + if (option == DITHER_OPTION_SPATIAL6 || + option == DITHER_OPTION_SPATIAL8 || + option == DITHER_OPTION_SPATIAL10) { + fmt_bit_depth->flags.FRAME_RANDOM = 0; + } else { + fmt_bit_depth->flags.FRAME_RANDOM = 1; + } + + ////////////////////// + //// temporal dither + ////////////////////// + if (option == DITHER_OPTION_FM6 || + option == DITHER_OPTION_SPATIAL8_FM6 || + option == DITHER_OPTION_SPATIAL10_FM6 || + option == DITHER_OPTION_TRUN10_FM6 || + option == DITHER_OPTION_TRUN8_FM6 || + option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) { + fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 1; + fmt_bit_depth->flags.FRAME_MODULATION_DEPTH = 0; + } else if (option == DITHER_OPTION_FM8 || + option == DITHER_OPTION_SPATIAL10_FM8 || + option == DITHER_OPTION_TRUN10_FM8) { + fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 1; + fmt_bit_depth->flags.FRAME_MODULATION_DEPTH = 1; + } else if (option == DITHER_OPTION_FM10) { + fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 1; + fmt_bit_depth->flags.FRAME_MODULATION_DEPTH = 2; + } + + fmt_bit_depth->pixel_encoding = pixel_encoding; +} + +bool dml_validate_dsc(struct dc *dc, struct dc_state *new_ctx) +{ + int i; + + /* Validate DSC config, dsc count validation is already done */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &new_ctx->res_ctx.pipe_ctx[i]; + struct dc_stream_state *stream = pipe_ctx->stream; + struct dsc_config dsc_cfg; + struct pipe_ctx *odm_pipe; + int opp_cnt = 1; + + for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) + opp_cnt++; + + /* Only need to validate top pipe */ + if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe || !stream || !stream->timing.flags.DSC) + continue; + + dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + + stream->timing.h_border_right) / opp_cnt; + dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + + stream->timing.v_border_bottom; + dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; + dsc_cfg.color_depth = stream->timing.display_color_depth; + dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false; + dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; + dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; + + if (pipe_ctx->stream_res.dsc && !pipe_ctx->stream_res.dsc->funcs->dsc_validate_stream(pipe_ctx->stream_res.dsc, &dsc_cfg)) + return false; + } + return true; +} + +enum dc_status dml_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream) +{ + enum dc_status status = DC_OK; + struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream); + + if (!pipe_ctx) + return DC_ERROR_UNEXPECTED; + + + status = build_pipe_hw_param(pipe_ctx); + + return status; +} + +void dml_acquire_dsc(const struct dc *dc, + struct resource_context *res_ctx, + struct display_stream_compressor **dsc, + int pipe_idx) +{ + int i; + const struct resource_pool *pool = dc->res_pool; + struct display_stream_compressor *dsc_old = dc->current_state->res_ctx.pipe_ctx[pipe_idx].stream_res.dsc; + + ASSERT(*dsc == NULL); /* If this ASSERT fails, dsc was not released properly */ + *dsc = NULL; + + /* Always do 1-to-1 mapping when number of DSCs is same as number of pipes */ + if (pool->res_cap->num_dsc == pool->res_cap->num_opp) { + *dsc = pool->dscs[pipe_idx]; + res_ctx->is_dsc_acquired[pipe_idx] = true; + return; + } + + /* Return old DSC to avoid the need for redo it */ + if (dsc_old && !res_ctx->is_dsc_acquired[dsc_old->inst]) { + *dsc = dsc_old; + res_ctx->is_dsc_acquired[dsc_old->inst] = true; + return ; + } + + /* Find first free DSC */ + for (i = 0; i < pool->res_cap->num_dsc; i++) + if (!res_ctx->is_dsc_acquired[i]) { + *dsc = pool->dscs[i]; + res_ctx->is_dsc_acquired[i] = true; + break; + } +} + +static bool dml_split_stream_for_mpc_or_odm( + const struct dc *dc, + struct resource_context *res_ctx, + struct pipe_ctx *pri_pipe, + struct pipe_ctx *sec_pipe, + bool odm) +{ + int pipe_idx = sec_pipe->pipe_idx; + const struct resource_pool *pool = dc->res_pool; + + *sec_pipe = *pri_pipe; + + sec_pipe->pipe_idx = pipe_idx; + sec_pipe->plane_res.mi = pool->mis[pipe_idx]; + sec_pipe->plane_res.hubp = pool->hubps[pipe_idx]; + sec_pipe->plane_res.ipp = pool->ipps[pipe_idx]; + sec_pipe->plane_res.xfm = pool->transforms[pipe_idx]; + sec_pipe->plane_res.dpp = pool->dpps[pipe_idx]; + sec_pipe->plane_res.mpcc_inst = pool->dpps[pipe_idx]->inst; + sec_pipe->stream_res.dsc = NULL; + if (odm) { + if (pri_pipe->next_odm_pipe) { + ASSERT(pri_pipe->next_odm_pipe != sec_pipe); + sec_pipe->next_odm_pipe = pri_pipe->next_odm_pipe; + sec_pipe->next_odm_pipe->prev_odm_pipe = sec_pipe; + } + if (pri_pipe->top_pipe && pri_pipe->top_pipe->next_odm_pipe) { + pri_pipe->top_pipe->next_odm_pipe->bottom_pipe = sec_pipe; + sec_pipe->top_pipe = pri_pipe->top_pipe->next_odm_pipe; + } + if (pri_pipe->bottom_pipe && pri_pipe->bottom_pipe->next_odm_pipe) { + pri_pipe->bottom_pipe->next_odm_pipe->top_pipe = sec_pipe; + sec_pipe->bottom_pipe = pri_pipe->bottom_pipe->next_odm_pipe; + } + pri_pipe->next_odm_pipe = sec_pipe; + sec_pipe->prev_odm_pipe = pri_pipe; + ASSERT(sec_pipe->top_pipe == NULL); + + if (!sec_pipe->top_pipe) + sec_pipe->stream_res.opp = pool->opps[pipe_idx]; + else + sec_pipe->stream_res.opp = sec_pipe->top_pipe->stream_res.opp; + if (sec_pipe->stream->timing.flags.DSC == 1) { + dml_acquire_dsc(dc, res_ctx, &sec_pipe->stream_res.dsc, pipe_idx); + ASSERT(sec_pipe->stream_res.dsc); + if (sec_pipe->stream_res.dsc == NULL) + return false; + } + } else { + if (pri_pipe->bottom_pipe) { + ASSERT(pri_pipe->bottom_pipe != sec_pipe); + sec_pipe->bottom_pipe = pri_pipe->bottom_pipe; + sec_pipe->bottom_pipe->top_pipe = sec_pipe; + } + pri_pipe->bottom_pipe = sec_pipe; + sec_pipe->top_pipe = pri_pipe; + + ASSERT(pri_pipe->plane_state); + } + + return true; +} + +static struct pipe_ctx *dml_find_split_pipe( + struct dc *dc, + struct dc_state *context, + int old_index) +{ + struct pipe_ctx *pipe = NULL; + int i; + + if (old_index >= 0 && context->res_ctx.pipe_ctx[old_index].stream == NULL) { + pipe = &context->res_ctx.pipe_ctx[old_index]; + pipe->pipe_idx = old_index; + } + + if (!pipe) + for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) { + if (dc->current_state->res_ctx.pipe_ctx[i].top_pipe == NULL + && dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe == NULL) { + if (context->res_ctx.pipe_ctx[i].stream == NULL) { + pipe = &context->res_ctx.pipe_ctx[i]; + pipe->pipe_idx = i; + break; + } + } + } + + /* + * May need to fix pipes getting tossed from 1 opp to another on flip + * Add for debugging transient underflow during topology updates: + * ASSERT(pipe); + */ + if (!pipe) + for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) { + if (context->res_ctx.pipe_ctx[i].stream == NULL) { + pipe = &context->res_ctx.pipe_ctx[i]; + pipe->pipe_idx = i; + break; + } + } + + return pipe; +} + +static void dml_release_dsc(struct resource_context *res_ctx, + const struct resource_pool *pool, + struct display_stream_compressor **dsc) +{ + int i; + + for (i = 0; i < pool->res_cap->num_dsc; i++) + if (pool->dscs[i] == *dsc) { + res_ctx->is_dsc_acquired[i] = false; + *dsc = NULL; + break; + } +} + +static int dml_get_num_mpc_splits(struct pipe_ctx *pipe) +{ + int mpc_split_count = 0; + struct pipe_ctx *other_pipe = pipe->bottom_pipe; + + while (other_pipe && other_pipe->plane_state == pipe->plane_state) { + mpc_split_count++; + other_pipe = other_pipe->bottom_pipe; + } + other_pipe = pipe->top_pipe; + while (other_pipe && other_pipe->plane_state == pipe->plane_state) { + mpc_split_count++; + other_pipe = other_pipe->top_pipe; + } + + return mpc_split_count; +} + +static bool dml_enough_pipes_for_subvp(struct dc *dc, + struct dc_state *context) +{ + int i = 0; + int num_pipes = 0; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + if (pipe->stream && pipe->plane_state) + num_pipes++; + } + + // Sub-VP only possible if the number of "real" pipes is + // less than or equal to half the number of available pipes + if (num_pipes * 2 > dc->res_pool->pipe_count) + return false; + + return true; +} + +static int dml_validate_apply_pipe_split_flags( + struct dc *dc, + struct dc_state *context, + int vlevel, + int *split, + bool *merge) +{ + int i, pipe_idx, vlevel_split; + int plane_count = 0; + bool force_split = false; + bool avoid_split = dc->debug.pipe_split_policy == MPC_SPLIT_AVOID; + struct vba_vars_st *v = &context->bw_ctx.dml.vba; + int max_mpc_comb = v->maxMpcComb; + + if (context->stream_count > 1) { + if (dc->debug.pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP) + avoid_split = true; + } else if (dc->debug.force_single_disp_pipe_split) + force_split = true; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + /** + * Workaround for avoiding pipe-split in cases where we'd split + * planes that are too small, resulting in splits that aren't + * valid for the scaler. + */ + if (pipe->plane_state && + (pipe->plane_state->dst_rect.width <= 16 || + pipe->plane_state->dst_rect.height <= 16 || + pipe->plane_state->src_rect.width <= 16 || + pipe->plane_state->src_rect.height <= 16)) + avoid_split = true; + + /* TODO: fix dc bugs and remove this split threshold thing */ + if (pipe->stream && !pipe->prev_odm_pipe && + (!pipe->top_pipe || pipe->top_pipe->plane_state != pipe->plane_state)) + ++plane_count; + } + if (plane_count > dc->res_pool->pipe_count / 2) + avoid_split = true; + + /* W/A: Mode timing with borders may not work well with pipe split, avoid for this corner case */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + struct dc_crtc_timing timing; + + if (!pipe->stream) + continue; + else { + timing = pipe->stream->timing; + if (timing.h_border_left + timing.h_border_right + + timing.v_border_top + timing.v_border_bottom > 0) { + avoid_split = true; + break; + } + } + } + + /* Avoid split loop looks for lowest voltage level that allows most unsplit pipes possible */ + if (avoid_split) { + for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + if (!context->res_ctx.pipe_ctx[i].stream) + continue; + + for (vlevel_split = vlevel; vlevel <= context->bw_ctx.dml.soc.num_states; vlevel++) + if (v->NoOfDPP[vlevel][0][pipe_idx] == 1 && + v->ModeSupport[vlevel][0]) + break; + /* Impossible to not split this pipe */ + if (vlevel > context->bw_ctx.dml.soc.num_states) + vlevel = vlevel_split; + else + max_mpc_comb = 0; + pipe_idx++; + } + v->maxMpcComb = max_mpc_comb; + } + + /* Split loop sets which pipe should be split based on dml outputs and dc flags */ + for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + int pipe_plane = v->pipe_plane[pipe_idx]; + bool split4mpc = context->stream_count == 1 && plane_count == 1 + && dc->config.enable_4to1MPC && dc->res_pool->pipe_count >= 4; + + if (!context->res_ctx.pipe_ctx[i].stream) + continue; + + if (split4mpc || v->NoOfDPP[vlevel][max_mpc_comb][pipe_plane] == 4) + split[i] = 4; + else if (force_split || v->NoOfDPP[vlevel][max_mpc_comb][pipe_plane] == 2) + split[i] = 2; + + if ((pipe->stream->view_format == + VIEW_3D_FORMAT_SIDE_BY_SIDE || + pipe->stream->view_format == + VIEW_3D_FORMAT_TOP_AND_BOTTOM) && + (pipe->stream->timing.timing_3d_format == + TIMING_3D_FORMAT_TOP_AND_BOTTOM || + pipe->stream->timing.timing_3d_format == + TIMING_3D_FORMAT_SIDE_BY_SIDE)) + split[i] = 2; + if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) { + split[i] = 2; + v->ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_2to1; + } + if (dc->debug.force_odm_combine_4to1 & (1 << pipe->stream_res.tg->inst)) { + split[i] = 4; + v->ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_4to1; + } + /*420 format workaround*/ + if (pipe->stream->timing.h_addressable > 7680 && + pipe->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) { + split[i] = 4; + } + + v->ODMCombineEnabled[pipe_plane] = + v->ODMCombineEnablePerState[vlevel][pipe_plane]; + + if (v->ODMCombineEnabled[pipe_plane] == dm_odm_combine_mode_disabled) { + if (dml_get_num_mpc_splits(pipe) == 1) { + /*If need split for mpc but 2 way split already*/ + if (split[i] == 4) + split[i] = 2; /* 2 -> 4 MPC */ + else if (split[i] == 2) + split[i] = 0; /* 2 -> 2 MPC */ + else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) + merge[i] = true; /* 2 -> 1 MPC */ + } else if (dml_get_num_mpc_splits(pipe) == 3) { + /*If need split for mpc but 4 way split already*/ + if (split[i] == 2 && ((pipe->top_pipe && !pipe->top_pipe->top_pipe) + || !pipe->bottom_pipe)) { + merge[i] = true; /* 4 -> 2 MPC */ + } else if (split[i] == 0 && pipe->top_pipe && + pipe->top_pipe->plane_state == pipe->plane_state) + merge[i] = true; /* 4 -> 1 MPC */ + split[i] = 0; + } else if (dml_get_num_mpc_splits(pipe)) { + /* ODM -> MPC transition */ + if (pipe->prev_odm_pipe) { + split[i] = 0; + merge[i] = true; + } + } + } else { + if (dml_get_num_mpc_splits(pipe) == 1) { + /*If need split for odm but 2 way split already*/ + if (split[i] == 4) + split[i] = 2; /* 2 -> 4 ODM */ + else if (split[i] == 2) + split[i] = 0; /* 2 -> 2 ODM */ + else if (pipe->prev_odm_pipe) { + ASSERT(0); /* NOT expected yet */ + merge[i] = true; /* exit ODM */ + } + } else if (dml_get_num_mpc_splits(pipe) == 3) { + /*If need split for odm but 4 way split already*/ + if (split[i] == 2 && ((pipe->prev_odm_pipe && !pipe->prev_odm_pipe->prev_odm_pipe) + || !pipe->next_odm_pipe)) { + ASSERT(0); /* NOT expected yet */ + merge[i] = true; /* 4 -> 2 ODM */ + } else if (split[i] == 0 && pipe->prev_odm_pipe) { + ASSERT(0); /* NOT expected yet */ + merge[i] = true; /* exit ODM */ + } + split[i] = 0; + } else if (dml_get_num_mpc_splits(pipe)) { + /* MPC -> ODM transition */ + ASSERT(0); /* NOT expected yet */ + if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) { + split[i] = 0; + merge[i] = true; + } + } + } + + /* Adjust dppclk when split is forced, do not bother with dispclk */ + if (split[i] != 0 && v->NoOfDPP[vlevel][max_mpc_comb][pipe_idx] == 1) + v->RequiredDPPCLK[vlevel][max_mpc_comb][pipe_idx] /= 2; + pipe_idx++; + } + + return vlevel; +} + +static void dml_set_phantom_stream_timing(struct dc *dc, + struct dc_state *context, + struct pipe_ctx *ref_pipe, + struct dc_stream_state *phantom_stream) +{ + // phantom_vactive = blackout (latency + margin) + fw_processing_delays + pstate allow width + uint32_t phantom_vactive_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us + 60 + + dc->caps.subvp_fw_processing_delay_us + + dc->caps.subvp_pstate_allow_width_us; + uint32_t phantom_vactive = ((double)phantom_vactive_us/1000000) * + (ref_pipe->stream->timing.pix_clk_100hz * 100) / + (double)ref_pipe->stream->timing.h_total; + uint32_t phantom_bp = ref_pipe->pipe_dlg_param.vstartup_start; + + phantom_stream->dst.y = 0; + phantom_stream->dst.height = phantom_vactive; + phantom_stream->src.y = 0; + phantom_stream->src.height = phantom_vactive; + + phantom_stream->timing.v_addressable = phantom_vactive; + phantom_stream->timing.v_front_porch = 1; + phantom_stream->timing.v_total = phantom_stream->timing.v_addressable + + phantom_stream->timing.v_front_porch + + phantom_stream->timing.v_sync_width + + phantom_bp; +} + +static struct dc_stream_state *dml_enable_phantom_stream(struct dc *dc, + struct dc_state *context, + struct pipe_ctx *ref_pipe) +{ + struct dc_stream_state *phantom_stream = NULL; + + phantom_stream = dc_create_stream_for_sink(ref_pipe->stream->sink); + phantom_stream->signal = SIGNAL_TYPE_VIRTUAL; + phantom_stream->dpms_off = true; + phantom_stream->mall_stream_config.type = SUBVP_PHANTOM; + phantom_stream->mall_stream_config.paired_stream = ref_pipe->stream; + ref_pipe->stream->mall_stream_config.type = SUBVP_MAIN; + ref_pipe->stream->mall_stream_config.paired_stream = phantom_stream; + + /* stream has limited viewport and small timing */ + memcpy(&phantom_stream->timing, &ref_pipe->stream->timing, sizeof(phantom_stream->timing)); + memcpy(&phantom_stream->src, &ref_pipe->stream->src, sizeof(phantom_stream->src)); + memcpy(&phantom_stream->dst, &ref_pipe->stream->dst, sizeof(phantom_stream->dst)); + dml_set_phantom_stream_timing(dc, context, ref_pipe, phantom_stream); + + dc_add_stream_to_ctx(dc, context, phantom_stream); + dc->hwss.apply_ctx_to_hw(dc, context); + return phantom_stream; +} + +static void dml_enable_phantom_plane(struct dc *dc, + struct dc_state *context, + struct dc_stream_state *phantom_stream, + struct pipe_ctx *main_pipe) +{ + struct dc_plane_state *phantom_plane = NULL; + struct dc_plane_state *prev_phantom_plane = NULL; + struct pipe_ctx *curr_pipe = main_pipe; + + while (curr_pipe) { + if (curr_pipe->top_pipe && curr_pipe->top_pipe->plane_state == curr_pipe->plane_state) + phantom_plane = prev_phantom_plane; + else + phantom_plane = dc_create_plane_state(dc); + + memcpy(&phantom_plane->address, &curr_pipe->plane_state->address, sizeof(phantom_plane->address)); + memcpy(&phantom_plane->scaling_quality, &curr_pipe->plane_state->scaling_quality, + sizeof(phantom_plane->scaling_quality)); + memcpy(&phantom_plane->src_rect, &curr_pipe->plane_state->src_rect, sizeof(phantom_plane->src_rect)); + memcpy(&phantom_plane->dst_rect, &curr_pipe->plane_state->dst_rect, sizeof(phantom_plane->dst_rect)); + memcpy(&phantom_plane->clip_rect, &curr_pipe->plane_state->clip_rect, sizeof(phantom_plane->clip_rect)); + memcpy(&phantom_plane->plane_size, &curr_pipe->plane_state->plane_size, + sizeof(phantom_plane->plane_size)); + memcpy(&phantom_plane->tiling_info, &curr_pipe->plane_state->tiling_info, + sizeof(phantom_plane->tiling_info)); + memcpy(&phantom_plane->dcc, &curr_pipe->plane_state->dcc, sizeof(phantom_plane->dcc)); + /* Currently compat_level is undefined in dc_state + * phantom_plane->compat_level = curr_pipe->plane_state->compat_level; + */ + phantom_plane->format = curr_pipe->plane_state->format; + phantom_plane->rotation = curr_pipe->plane_state->rotation; + phantom_plane->visible = curr_pipe->plane_state->visible; + + /* Shadow pipe has small viewport. */ + phantom_plane->clip_rect.y = 0; + phantom_plane->clip_rect.height = phantom_stream->timing.v_addressable; + + dc_add_plane_to_context(dc, phantom_stream, phantom_plane, context); + + curr_pipe = curr_pipe->bottom_pipe; + prev_phantom_plane = phantom_plane; + } +} + +static void dml_add_phantom_pipes(struct dc *dc, struct dc_state *context) +{ + int i = 0; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + struct dc_stream_state *ref_stream = pipe->stream; + // Only construct phantom stream for top pipes that have plane enabled + if (!pipe->top_pipe && pipe->plane_state && pipe->stream && + pipe->stream->mall_stream_config.type == SUBVP_NONE) { + struct dc_stream_state *phantom_stream = NULL; + + phantom_stream = dml_enable_phantom_stream(dc, context, pipe); + dml_enable_phantom_plane(dc, context, phantom_stream, pipe); + } + } + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + if (pipe->plane_state && pipe->stream && + pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { + pipe->stream->use_dynamic_meta = false; + pipe->plane_state->flip_immediate = false; + if (!resource_build_scaling_params(pipe)) { + // Log / remove phantom pipes since failed to build scaling params + } + } + } +} + +static void dml_remove_phantom_pipes(struct dc *dc, struct dc_state *context) +{ + int i; + bool removed_pipe = false; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + // build scaling params for phantom pipes + if (pipe->plane_state && pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { + dc_rem_all_planes_for_stream(dc, pipe->stream, context); + dc_remove_stream_from_ctx(dc, context, pipe->stream); + removed_pipe = true; + } + + // Clear all phantom stream info + if (pipe->stream) { + pipe->stream->mall_stream_config.type = SUBVP_NONE; + pipe->stream->mall_stream_config.paired_stream = NULL; + } + } + if (removed_pipe) + dc->hwss.apply_ctx_to_hw(dc, context); +} + +/* + * If the input state contains no upstream planes for a particular pipe (i.e. only timing) + * we need to populate some "conservative" plane information as DML cannot handle "no planes" + */ +static void populate_default_plane_from_timing(const struct dc_crtc_timing *timing, struct _vcs_dpi_display_pipe_params_st *pipe) +{ + pipe->src.is_hsplit = pipe->dest.odm_combine != dm_odm_combine_mode_disabled; + pipe->src.source_scan = dm_horz; + pipe->src.sw_mode = dm_sw_4kb_s; + pipe->src.macro_tile_size = dm_64k_tile; + pipe->src.viewport_width = timing->h_addressable; + if (pipe->src.viewport_width > 1920) + pipe->src.viewport_width = 1920; + pipe->src.viewport_height = timing->v_addressable; + if (pipe->src.viewport_height > 1080) + pipe->src.viewport_height = 1080; + pipe->src.surface_height_y = pipe->src.viewport_height; + pipe->src.surface_width_y = pipe->src.viewport_width; + pipe->src.surface_height_c = pipe->src.viewport_height; + pipe->src.surface_width_c = pipe->src.viewport_width; + pipe->src.data_pitch = ((pipe->src.viewport_width + 255) / 256) * 256; + pipe->src.source_format = dm_444_32; + pipe->dest.recout_width = pipe->src.viewport_width; + pipe->dest.recout_height = pipe->src.viewport_height; + pipe->dest.full_recout_width = pipe->dest.recout_width; + pipe->dest.full_recout_height = pipe->dest.recout_height; + pipe->scale_ratio_depth.lb_depth = dm_lb_16; + pipe->scale_ratio_depth.hscl_ratio = 1.0; + pipe->scale_ratio_depth.vscl_ratio = 1.0; + pipe->scale_ratio_depth.scl_enable = 0; + pipe->scale_taps.htaps = 1; + pipe->scale_taps.vtaps = 1; + pipe->dest.vtotal_min = timing->v_total; + pipe->dest.vtotal_max = timing->v_total; + + if (pipe->dest.odm_combine == dm_odm_combine_mode_2to1) { + pipe->src.viewport_width /= 2; + pipe->dest.recout_width /= 2; + } else if (pipe->dest.odm_combine == dm_odm_combine_mode_4to1) { + pipe->src.viewport_width /= 4; + pipe->dest.recout_width /= 4; + } + + pipe->src.dcc = false; + pipe->src.dcc_rate = 1; +} + +/* + * If the pipe is not blending (i.e. pipe_ctx->top pipe == null) then its + * hsplit group is equal to its own pipe ID + * Otherwise, all pipes part of the same blending tree have the same hsplit group + * ID as the top most pipe + * + * If the pipe ctx is ODM combined, then similar logic follows + */ +static void populate_hsplit_group_from_dc_pipe_ctx (const struct pipe_ctx *dc_pipe_ctx, struct _vcs_dpi_display_e2e_pipe_params_st *e2e_pipe) +{ + e2e_pipe->pipe.src.hsplit_grp = dc_pipe_ctx->pipe_idx; + + if (dc_pipe_ctx->top_pipe && dc_pipe_ctx->top_pipe->plane_state + == dc_pipe_ctx->plane_state) { + struct pipe_ctx *first_pipe = dc_pipe_ctx->top_pipe; + int split_idx = 0; + + while (first_pipe->top_pipe && first_pipe->top_pipe->plane_state + == dc_pipe_ctx->plane_state) { + first_pipe = first_pipe->top_pipe; + split_idx++; + } + + /* Treat 4to1 mpc combine as an mpo of 2 2-to-1 combines */ + if (split_idx == 0) + e2e_pipe->pipe.src.hsplit_grp = first_pipe->pipe_idx; + else if (split_idx == 1) + e2e_pipe->pipe.src.hsplit_grp = dc_pipe_ctx->pipe_idx; + else if (split_idx == 2) + e2e_pipe->pipe.src.hsplit_grp = dc_pipe_ctx->top_pipe->pipe_idx; + + } else if (dc_pipe_ctx->prev_odm_pipe) { + struct pipe_ctx *first_pipe = dc_pipe_ctx->prev_odm_pipe; + + while (first_pipe->prev_odm_pipe) + first_pipe = first_pipe->prev_odm_pipe; + e2e_pipe->pipe.src.hsplit_grp = first_pipe->pipe_idx; + } +} + +static void populate_dml_from_dc_pipe_ctx (const struct pipe_ctx *dc_pipe_ctx, struct _vcs_dpi_display_e2e_pipe_params_st *e2e_pipe, int always_scale) +{ + const struct dc_plane_state *pln = dc_pipe_ctx->plane_state; + const struct scaler_data *scl = &dc_pipe_ctx->plane_res.scl_data; + + e2e_pipe->pipe.src.immediate_flip = pln->flip_immediate; + e2e_pipe->pipe.src.is_hsplit = (dc_pipe_ctx->bottom_pipe && dc_pipe_ctx->bottom_pipe->plane_state == pln) + || (dc_pipe_ctx->top_pipe && dc_pipe_ctx->top_pipe->plane_state == pln) + || e2e_pipe->pipe.dest.odm_combine != dm_odm_combine_mode_disabled; + + /* stereo is not split */ + if (pln->stereo_format == PLANE_STEREO_FORMAT_SIDE_BY_SIDE || + pln->stereo_format == PLANE_STEREO_FORMAT_TOP_AND_BOTTOM) { + e2e_pipe->pipe.src.is_hsplit = false; + e2e_pipe->pipe.src.hsplit_grp = dc_pipe_ctx->pipe_idx; + } + + e2e_pipe->pipe.src.source_scan = pln->rotation == ROTATION_ANGLE_90 + || pln->rotation == ROTATION_ANGLE_270 ? dm_vert : dm_horz; + e2e_pipe->pipe.src.viewport_y_y = scl->viewport.y; + e2e_pipe->pipe.src.viewport_y_c = scl->viewport_c.y; + e2e_pipe->pipe.src.viewport_width = scl->viewport.width; + e2e_pipe->pipe.src.viewport_width_c = scl->viewport_c.width; + e2e_pipe->pipe.src.viewport_height = scl->viewport.height; + e2e_pipe->pipe.src.viewport_height_c = scl->viewport_c.height; + e2e_pipe->pipe.src.viewport_width_max = pln->src_rect.width; + e2e_pipe->pipe.src.viewport_height_max = pln->src_rect.height; + e2e_pipe->pipe.src.surface_width_y = pln->plane_size.surface_size.width; + e2e_pipe->pipe.src.surface_height_y = pln->plane_size.surface_size.height; + e2e_pipe->pipe.src.surface_width_c = pln->plane_size.chroma_size.width; + e2e_pipe->pipe.src.surface_height_c = pln->plane_size.chroma_size.height; + + if (pln->format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA + || pln->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { + e2e_pipe->pipe.src.data_pitch = pln->plane_size.surface_pitch; + e2e_pipe->pipe.src.data_pitch_c = pln->plane_size.chroma_pitch; + e2e_pipe->pipe.src.meta_pitch = pln->dcc.meta_pitch; + e2e_pipe->pipe.src.meta_pitch_c = pln->dcc.meta_pitch_c; + } else { + e2e_pipe->pipe.src.data_pitch = pln->plane_size.surface_pitch; + e2e_pipe->pipe.src.meta_pitch = pln->dcc.meta_pitch; + } + e2e_pipe->pipe.src.dcc = pln->dcc.enable; + e2e_pipe->pipe.src.dcc_rate = 1; + e2e_pipe->pipe.dest.recout_width = scl->recout.width; + e2e_pipe->pipe.dest.recout_height = scl->recout.height; + e2e_pipe->pipe.dest.full_recout_height = scl->recout.height; + e2e_pipe->pipe.dest.full_recout_width = scl->recout.width; + if (e2e_pipe->pipe.dest.odm_combine == dm_odm_combine_mode_2to1) + e2e_pipe->pipe.dest.full_recout_width *= 2; + else if (e2e_pipe->pipe.dest.odm_combine == dm_odm_combine_mode_4to1) + e2e_pipe->pipe.dest.full_recout_width *= 4; + else { + struct pipe_ctx *split_pipe = dc_pipe_ctx->bottom_pipe; + + while (split_pipe && split_pipe->plane_state == pln) { + e2e_pipe->pipe.dest.full_recout_width += split_pipe->plane_res.scl_data.recout.width; + split_pipe = split_pipe->bottom_pipe; + } + split_pipe = dc_pipe_ctx->top_pipe; + while (split_pipe && split_pipe->plane_state == pln) { + e2e_pipe->pipe.dest.full_recout_width += split_pipe->plane_res.scl_data.recout.width; + split_pipe = split_pipe->top_pipe; + } + } + + e2e_pipe->pipe.scale_ratio_depth.lb_depth = dm_lb_16; + e2e_pipe->pipe.scale_ratio_depth.hscl_ratio = (double) scl->ratios.horz.value / (1ULL<<32); + e2e_pipe->pipe.scale_ratio_depth.hscl_ratio_c = (double) scl->ratios.horz_c.value / (1ULL<<32); + e2e_pipe->pipe.scale_ratio_depth.vscl_ratio = (double) scl->ratios.vert.value / (1ULL<<32); + e2e_pipe->pipe.scale_ratio_depth.vscl_ratio_c = (double) scl->ratios.vert_c.value / (1ULL<<32); + e2e_pipe->pipe.scale_ratio_depth.scl_enable = + scl->ratios.vert.value != dc_fixpt_one.value + || scl->ratios.horz.value != dc_fixpt_one.value + || scl->ratios.vert_c.value != dc_fixpt_one.value + || scl->ratios.horz_c.value != dc_fixpt_one.value /*Lb only or Full scl*/ + || always_scale; /*support always scale*/ + e2e_pipe->pipe.scale_taps.htaps = scl->taps.h_taps; + e2e_pipe->pipe.scale_taps.htaps_c = scl->taps.h_taps_c; + e2e_pipe->pipe.scale_taps.vtaps = scl->taps.v_taps; + e2e_pipe->pipe.scale_taps.vtaps_c = scl->taps.v_taps_c; + + /* Currently compat_level is not defined. Commenting it until further resolution + * if (pln->compat_level == DC_LEGACY_TILING_ADDR_GEN_TWO) { + swizzle_to_dml_params(pln->tiling_info.gfx9.swizzle, + &e2e_pipe->pipe.src.sw_mode); + e2e_pipe->pipe.src.macro_tile_size = + swizzle_mode_to_macro_tile_size(pln->tiling_info.gfx9.swizzle); + } else { + gfx10array_mode_to_dml_params(pln->tiling_info.gfx10compatible.array_mode, + pln->compat_level, + &e2e_pipe->pipe.src.sw_mode); + e2e_pipe->pipe.src.macro_tile_size = dm_4k_tile; + }*/ + + e2e_pipe->pipe.src.source_format = dc_source_format_to_dml_source_format(pln->format); +} + +static void populate_dml_cursor_parameters_from_dc_pipe_ctx (const struct pipe_ctx *dc_pipe_ctx, struct _vcs_dpi_display_e2e_pipe_params_st *e2e_pipe) +{ + /* + * For graphic plane, cursor number is 1, nv12 is 0 + * bw calculations due to cursor on/off + */ + if (dc_pipe_ctx->plane_state && + (dc_pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE || + dc_pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM)) + e2e_pipe->pipe.src.num_cursors = 0; + else + e2e_pipe->pipe.src.num_cursors = 1; + + e2e_pipe->pipe.src.cur0_src_width = 256; + e2e_pipe->pipe.src.cur0_bpp = dm_cur_32bit; +} + +static int populate_dml_pipes_from_context_base( + struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + bool fast_validate) +{ + int pipe_cnt, i; + bool synchronized_vblank = true; + struct resource_context *res_ctx = &context->res_ctx; + + for (i = 0, pipe_cnt = -1; i < dc->res_pool->pipe_count; i++) { + if (!res_ctx->pipe_ctx[i].stream) + continue; + + if (pipe_cnt < 0) { + pipe_cnt = i; + continue; + } + + if (res_ctx->pipe_ctx[pipe_cnt].stream == res_ctx->pipe_ctx[i].stream) + continue; + + if (dc->debug.disable_timing_sync || + (!resource_are_streams_timing_synchronizable( + res_ctx->pipe_ctx[pipe_cnt].stream, + res_ctx->pipe_ctx[i].stream) && + !resource_are_vblanks_synchronizable( + res_ctx->pipe_ctx[pipe_cnt].stream, + res_ctx->pipe_ctx[i].stream))) { + synchronized_vblank = false; + break; + } + } + + for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { + struct dc_crtc_timing *timing = &res_ctx->pipe_ctx[i].stream->timing; + + struct audio_check aud_check = {0}; + if (!res_ctx->pipe_ctx[i].stream) + continue; + + /* todo: + pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = 0; + pipes[pipe_cnt].pipe.src.dcc = 0; + pipes[pipe_cnt].pipe.src.vm = 0;*/ + + pipes[pipe_cnt].clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0; + + pipes[pipe_cnt].dout.dsc_enable = res_ctx->pipe_ctx[i].stream->timing.flags.DSC; + /* todo: rotation?*/ + pipes[pipe_cnt].dout.dsc_slices = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.num_slices_h; + if (res_ctx->pipe_ctx[i].stream->use_dynamic_meta) { + pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = true; + /* 1/2 vblank */ + pipes[pipe_cnt].pipe.src.dynamic_metadata_lines_before_active = + (timing->v_total - timing->v_addressable + - timing->v_border_top - timing->v_border_bottom) / 2; + /* 36 bytes dp, 32 hdmi */ + pipes[pipe_cnt].pipe.src.dynamic_metadata_xmit_bytes = + dc_is_dp_signal(res_ctx->pipe_ctx[i].stream->signal) ? 36 : 32; + } + pipes[pipe_cnt].pipe.dest.synchronized_vblank_all_planes = synchronized_vblank; + + dc_timing_to_dml_timing(timing, &pipes[pipe_cnt].pipe.dest); + pipes[pipe_cnt].pipe.dest.vtotal_min = res_ctx->pipe_ctx[i].stream->adjust.v_total_min; + pipes[pipe_cnt].pipe.dest.vtotal_max = res_ctx->pipe_ctx[i].stream->adjust.v_total_max; + + pipes[pipe_cnt].pipe.dest.otg_inst = res_ctx->pipe_ctx[i].stream_res.tg->inst; + + pipes[pipe_cnt].pipe.dest.odm_combine = get_dml_odm_combine(&res_ctx->pipe_ctx[i]); + + populate_hsplit_group_from_dc_pipe_ctx(&res_ctx->pipe_ctx[i], &pipes[pipe_cnt]); + + pipes[pipe_cnt].dout.dp_lanes = 4; + pipes[pipe_cnt].dout.is_virtual = 0; + pipes[pipe_cnt].dout.output_type = get_dml_output_type(res_ctx->pipe_ctx[i].stream->signal); + if (pipes[pipe_cnt].dout.output_type < 0) { + pipes[pipe_cnt].dout.output_type = dm_dp; + pipes[pipe_cnt].dout.is_virtual = 1; + } + + populate_color_depth_and_encoding_from_timing(&res_ctx->pipe_ctx[i].stream->timing, &pipes[pipe_cnt].dout); + + if (res_ctx->pipe_ctx[i].stream->timing.flags.DSC) + pipes[pipe_cnt].dout.output_bpp = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.bits_per_pixel / 16.0; + + /* todo: default max for now, until there is logic reflecting this in dc*/ + pipes[pipe_cnt].dout.dsc_input_bpc = 12; + /*fill up the audio sample rate (unit in kHz)*/ + get_audio_check(&res_ctx->pipe_ctx[i].stream->audio_info, &aud_check); + pipes[pipe_cnt].dout.max_audio_sample_rate = aud_check.max_audiosample_rate / 1000; + + populate_dml_cursor_parameters_from_dc_pipe_ctx(&res_ctx->pipe_ctx[i], &pipes[pipe_cnt]); + + if (!res_ctx->pipe_ctx[i].plane_state) { + populate_default_plane_from_timing(timing, &pipes[pipe_cnt].pipe); + } else { + populate_dml_from_dc_pipe_ctx(&res_ctx->pipe_ctx[i], &pipes[pipe_cnt], dc->debug.always_scale); + } + + pipe_cnt++; + } + + /* populate writeback information */ + if (dc->res_pool) + dc->res_pool->funcs->populate_dml_writeback_from_context(dc, res_ctx, pipes); + + return pipe_cnt; +} + +static int dml_populate_dml_pipes_from_context( + struct dc *dc, struct dc_state *context, + display_e2e_pipe_params_st *pipes, + bool fast_validate) +{ + int i, pipe_cnt; + struct resource_context *res_ctx = &context->res_ctx; + struct pipe_ctx *pipe; + + populate_dml_pipes_from_context_base(dc, context, pipes, fast_validate); + + for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { + struct dc_crtc_timing *timing; + + if (!res_ctx->pipe_ctx[i].stream) + continue; + pipe = &res_ctx->pipe_ctx[i]; + timing = &pipe->stream->timing; + + pipes[pipe_cnt].pipe.src.gpuvm = true; + pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0; + pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0; + pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch; + + pipes[pipe_cnt].dout.dsc_input_bpc = 0; + if (pipes[pipe_cnt].dout.dsc_enable) { + switch (timing->display_color_depth) { + case COLOR_DEPTH_888: + pipes[pipe_cnt].dout.dsc_input_bpc = 8; + break; + case COLOR_DEPTH_101010: + pipes[pipe_cnt].dout.dsc_input_bpc = 10; + break; + case COLOR_DEPTH_121212: + pipes[pipe_cnt].dout.dsc_input_bpc = 12; + break; + default: + ASSERT(0); + break; + } + } + pipe_cnt++; + } + dc->config.enable_4to1MPC = false; + if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) { + if (is_dual_plane(pipe->plane_state->format) + && pipe->plane_state->src_rect.width <= 1920 && pipe->plane_state->src_rect.height <= 1080) { + dc->config.enable_4to1MPC = true; + } else if (!is_dual_plane(pipe->plane_state->format)) { + context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192; + pipes[0].pipe.src.unbounded_req_mode = true; + } + } + + return pipe_cnt; +} + +static void dml_full_validate_bw_helper(struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int *vlevel, + int *split, + bool *merge, + int *pipe_cnt) +{ + struct vba_vars_st *vba = &context->bw_ctx.dml.vba; + + /* + * DML favors voltage over p-state, but we're more interested in + * supporting p-state over voltage. We can't support p-state in + * prefetch mode > 0 so try capping the prefetch mode to start. + */ + context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank = + dm_allow_self_refresh_and_mclk_switch; + *vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt); + /* This may adjust vlevel and maxMpcComb */ + if (*vlevel < context->bw_ctx.dml.soc.num_states) + *vlevel = dml_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge); + + /* Conditions for setting up phantom pipes for SubVP: + * 1. Not force disable SubVP + * 2. Full update (i.e. !fast_validate) + * 3. Enough pipes are available to support SubVP (TODO: Which pipes will use VACTIVE / VBLANK / SUBVP?) + * 4. Display configuration passes validation + * 5. (Config doesn't support MCLK in VACTIVE/VBLANK || dc->debug.force_subvp_mclk_switch) + */ + if (!dc->debug.force_disable_subvp && + dml_enough_pipes_for_subvp(dc, context) && + *vlevel < context->bw_ctx.dml.soc.num_states && + (vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported || + dc->debug.force_subvp_mclk_switch)) { + + dml_add_phantom_pipes(dc, context); + + /* Create input to DML based on new context which includes phantom pipes + * TODO: Input to DML should mark which pipes are phantom + */ + *pipe_cnt = dml_populate_dml_pipes_from_context(dc, context, pipes, false); + *vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt); + if (*vlevel < context->bw_ctx.dml.soc.num_states) { + memset(split, 0, MAX_PIPES * sizeof(*split)); + memset(merge, 0, MAX_PIPES * sizeof(*merge)); + *vlevel = dml_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge); + } + + // If SubVP pipe config is unsupported (or cannot be used for UCLK switching) + // remove phantom pipes and repopulate dml pipes + if (*vlevel == context->bw_ctx.dml.soc.num_states || + vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported) { + dml_remove_phantom_pipes(dc, context); + *pipe_cnt = dml_populate_dml_pipes_from_context(dc, context, pipes, false); + } + } +} + +static void dcn20_adjust_adaptive_sync_v_startup( + const struct dc_crtc_timing *dc_crtc_timing, int *vstartup_start) +{ + struct dc_crtc_timing patched_crtc_timing; + uint32_t asic_blank_end = 0; + uint32_t asic_blank_start = 0; + uint32_t newVstartup = 0; + + patched_crtc_timing = *dc_crtc_timing; + + if (patched_crtc_timing.flags.INTERLACE == 1) { + if (patched_crtc_timing.v_front_porch < 2) + patched_crtc_timing.v_front_porch = 2; + } else { + if (patched_crtc_timing.v_front_porch < 1) + patched_crtc_timing.v_front_porch = 1; + } + + /* blank_start = frame end - front porch */ + asic_blank_start = patched_crtc_timing.v_total - + patched_crtc_timing.v_front_porch; + + /* blank_end = blank_start - active */ + asic_blank_end = asic_blank_start - + patched_crtc_timing.v_border_bottom - + patched_crtc_timing.v_addressable - + patched_crtc_timing.v_border_top; + + newVstartup = asic_blank_end + (patched_crtc_timing.v_total - asic_blank_start); + + *vstartup_start = ((newVstartup > *vstartup_start) ? newVstartup : *vstartup_start); +} + +static bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx) +{ + return (pipe_ctx->stream_res.hpo_dp_stream_enc && + pipe_ctx->stream->link->hpo_dp_link_enc && + dc_is_dp_signal(pipe_ctx->stream->signal)); +} + +static bool is_dtbclk_required(struct dc *dc, struct dc_state *context) +{ + int i; + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (!context->res_ctx.pipe_ctx[i].stream) + continue; +#if defined (CONFIG_DRM_AMD_DC_DP2_0) + if (is_dp_128b_132b_signal(&context->res_ctx.pipe_ctx[i])) + return true; +#endif + } + return false; +} + +static void dml_update_soc_for_wm_a(struct dc *dc, struct dc_state *context) +{ + if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid) { + context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us; + context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us; + context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us; + } +} + +static bool dml_internal_validate( + struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int *pipe_cnt_out, + int *vlevel_out, + bool fast_validate) +{ + bool out = false; + bool repopulate_pipes = false; + int split[MAX_PIPES] = { 0 }; + bool merge[MAX_PIPES] = { false }; + bool newly_split[MAX_PIPES] = { false }; + int pipe_cnt, i, pipe_idx, vlevel; + struct vba_vars_st *vba = &context->bw_ctx.dml.vba; + + ASSERT(pipes); + if (!pipes) + return false; + + // For each full update, remove all existing phantom pipes first + dml_remove_phantom_pipes(dc, context); + + dml_update_soc_for_wm_a(dc, context); + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + if (pipe->plane_state) { + // On initial pass through DML, we intend to use MALL for SS on all + // (non-PSR) surfaces with none using MALL for P-State + // 'mall_plane_config': is not a member of 'dc_plane_state' - commenting it out till mall_plane_config gets supported in dc_plant_state + //if (pipe->stream && pipe->stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED) + // pipe->plane_state->mall_plane_config.use_mall_for_ss = true; + } + } + pipe_cnt = dml_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + + if (!pipe_cnt) { + out = true; + goto validate_out; + } + + dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt); + + if (!fast_validate) { + dml_full_validate_bw_helper(dc, context, pipes, &vlevel, split, merge, &pipe_cnt); + } + + if (fast_validate || vlevel == context->bw_ctx.dml.soc.num_states || + vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported) { + /* + * If mode is unsupported or there's still no p-state support then + * fall back to favoring voltage. + * + * We don't actually support prefetch mode 2, so require that we + * at least support prefetch mode 1. + */ + context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank = + dm_allow_self_refresh; + + vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); + if (vlevel < context->bw_ctx.dml.soc.num_states) { + memset(split, 0, sizeof(split)); + memset(merge, 0, sizeof(merge)); + vlevel = dml_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge); + } + } + + dml_log_mode_support_params(&context->bw_ctx.dml); + + if (vlevel == context->bw_ctx.dml.soc.num_states) + goto validate_fail; + + for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + struct pipe_ctx *mpo_pipe = pipe->bottom_pipe; + + if (!pipe->stream) + continue; + + /* We only support full screen mpo with ODM */ + if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled + && pipe->plane_state && mpo_pipe + && memcmp(&mpo_pipe->plane_res.scl_data.recout, + &pipe->plane_res.scl_data.recout, + sizeof(struct rect)) != 0) { + ASSERT(mpo_pipe->plane_state != pipe->plane_state); + goto validate_fail; + } + pipe_idx++; + } + + /* merge pipes if necessary */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + /*skip pipes that don't need merging*/ + if (!merge[i]) + continue; + + /* if ODM merge we ignore mpc tree, mpo pipes will have their own flags */ + if (pipe->prev_odm_pipe) { + /*split off odm pipe*/ + pipe->prev_odm_pipe->next_odm_pipe = pipe->next_odm_pipe; + if (pipe->next_odm_pipe) + pipe->next_odm_pipe->prev_odm_pipe = pipe->prev_odm_pipe; + + pipe->bottom_pipe = NULL; + pipe->next_odm_pipe = NULL; + pipe->plane_state = NULL; + pipe->stream = NULL; + pipe->top_pipe = NULL; + pipe->prev_odm_pipe = NULL; + if (pipe->stream_res.dsc) + dml_release_dsc(&context->res_ctx, dc->res_pool, &pipe->stream_res.dsc); + memset(&pipe->plane_res, 0, sizeof(pipe->plane_res)); + memset(&pipe->stream_res, 0, sizeof(pipe->stream_res)); + repopulate_pipes = true; + } else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) { + struct pipe_ctx *top_pipe = pipe->top_pipe; + struct pipe_ctx *bottom_pipe = pipe->bottom_pipe; + + top_pipe->bottom_pipe = bottom_pipe; + if (bottom_pipe) + bottom_pipe->top_pipe = top_pipe; + + pipe->top_pipe = NULL; + pipe->bottom_pipe = NULL; + pipe->plane_state = NULL; + pipe->stream = NULL; + memset(&pipe->plane_res, 0, sizeof(pipe->plane_res)); + memset(&pipe->stream_res, 0, sizeof(pipe->stream_res)); + repopulate_pipes = true; + } else + ASSERT(0); /* Should never try to merge master pipe */ + + } + + for (i = 0, pipe_idx = -1; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + struct pipe_ctx *hsplit_pipe = NULL; + bool odm; + int old_index = -1; + + if (!pipe->stream || newly_split[i]) + continue; + + pipe_idx++; + odm = vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled; + + if (!pipe->plane_state && !odm) + continue; + + if (split[i]) { + if (odm) { + if (split[i] == 4 && old_pipe->next_odm_pipe && old_pipe->next_odm_pipe->next_odm_pipe) + old_index = old_pipe->next_odm_pipe->next_odm_pipe->pipe_idx; + else if (old_pipe->next_odm_pipe) + old_index = old_pipe->next_odm_pipe->pipe_idx; + } else { + if (split[i] == 4 && old_pipe->bottom_pipe && old_pipe->bottom_pipe->bottom_pipe && + old_pipe->bottom_pipe->bottom_pipe->plane_state == old_pipe->plane_state) + old_index = old_pipe->bottom_pipe->bottom_pipe->pipe_idx; + else if (old_pipe->bottom_pipe && + old_pipe->bottom_pipe->plane_state == old_pipe->plane_state) + old_index = old_pipe->bottom_pipe->pipe_idx; + } + hsplit_pipe = dml_find_split_pipe(dc, context, old_index); + ASSERT(hsplit_pipe); + if (!hsplit_pipe) + goto validate_fail; + + if (!dml_split_stream_for_mpc_or_odm( + dc, &context->res_ctx, + pipe, hsplit_pipe, odm)) + goto validate_fail; + + newly_split[hsplit_pipe->pipe_idx] = true; + repopulate_pipes = true; + } + if (split[i] == 4) { + struct pipe_ctx *pipe_4to1; + + if (odm && old_pipe->next_odm_pipe) + old_index = old_pipe->next_odm_pipe->pipe_idx; + else if (!odm && old_pipe->bottom_pipe && + old_pipe->bottom_pipe->plane_state == old_pipe->plane_state) + old_index = old_pipe->bottom_pipe->pipe_idx; + else + old_index = -1; + pipe_4to1 = dml_find_split_pipe(dc, context, old_index); + ASSERT(pipe_4to1); + if (!pipe_4to1) + goto validate_fail; + if (!dml_split_stream_for_mpc_or_odm( + dc, &context->res_ctx, + pipe, pipe_4to1, odm)) + goto validate_fail; + newly_split[pipe_4to1->pipe_idx] = true; + + if (odm && old_pipe->next_odm_pipe && old_pipe->next_odm_pipe->next_odm_pipe + && old_pipe->next_odm_pipe->next_odm_pipe->next_odm_pipe) + old_index = old_pipe->next_odm_pipe->next_odm_pipe->next_odm_pipe->pipe_idx; + else if (!odm && old_pipe->bottom_pipe && old_pipe->bottom_pipe->bottom_pipe && + old_pipe->bottom_pipe->bottom_pipe->bottom_pipe && + old_pipe->bottom_pipe->bottom_pipe->bottom_pipe->plane_state == old_pipe->plane_state) + old_index = old_pipe->bottom_pipe->bottom_pipe->bottom_pipe->pipe_idx; + else + old_index = -1; + pipe_4to1 = dml_find_split_pipe(dc, context, old_index); + ASSERT(pipe_4to1); + if (!pipe_4to1) + goto validate_fail; + if (!dml_split_stream_for_mpc_or_odm( + dc, &context->res_ctx, + hsplit_pipe, pipe_4to1, odm)) + goto validate_fail; + newly_split[pipe_4to1->pipe_idx] = true; + } + if (odm) + dml_build_mapped_resource(dc, context, pipe->stream); + } + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + if (pipe->plane_state) { + if (!resource_build_scaling_params(pipe)) + goto validate_fail; + } + } + + /* Actual dsc count per stream dsc validation*/ + if (!dml_validate_dsc(dc, context)) { + vba->ValidationStatus[vba->soc.num_states] = DML_FAIL_DSC_VALIDATION_FAILURE; + goto validate_fail; + } + + if (repopulate_pipes) + pipe_cnt = dml_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + *vlevel_out = vlevel; + *pipe_cnt_out = pipe_cnt; + + out = true; + goto validate_out; + +validate_fail: + out = false; + +validate_out: + return out; +} + +static void dml_calculate_dlg_params( + struct dc *dc, struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int pipe_cnt, + int vlevel) +{ + int i, pipe_idx; + int plane_count; + + /* Writeback MCIF_WB arbitration parameters */ + if (dc->res_pool) + dc->res_pool->funcs->set_mcif_arb_params(dc, context, pipes, pipe_cnt); + + context->bw_ctx.bw.dcn.clk.dispclk_khz = context->bw_ctx.dml.vba.DISPCLK * 1000; + context->bw_ctx.bw.dcn.clk.dcfclk_khz = context->bw_ctx.dml.vba.DCFCLK * 1000; + context->bw_ctx.bw.dcn.clk.socclk_khz = context->bw_ctx.dml.vba.SOCCLK * 1000; + context->bw_ctx.bw.dcn.clk.dramclk_khz = context->bw_ctx.dml.vba.DRAMSpeed * 1000 / 16; + context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = context->bw_ctx.dml.vba.DCFCLKDeepSleep * 1000; + context->bw_ctx.bw.dcn.clk.fclk_khz = context->bw_ctx.dml.vba.FabricClock * 1000; + context->bw_ctx.bw.dcn.clk.p_state_change_support = + context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] + != dm_dram_clock_change_unsupported; + + context->bw_ctx.bw.dcn.clk.dppclk_khz = 0; + /* 'z9_support': is not a member of 'dc_clocks' - Commenting out till we have this support in dc_clocks + * context->bw_ctx.bw.dcn.clk.z9_support = (context->bw_ctx.dml.vba.StutterPeriod > 5000.0) ? + DCN_Z9_SUPPORT_ALLOW : DCN_Z9_SUPPORT_DISALLOW; + */ + plane_count = 0; + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (context->res_ctx.pipe_ctx[i].plane_state) + plane_count++; + } + + /* Commented out as per above error for now. + if (plane_count == 0) + context->bw_ctx.bw.dcn.clk.z9_support = DCN_Z9_SUPPORT_ALLOW; + */ + context->bw_ctx.bw.dcn.clk.dtbclk_en = is_dtbclk_required(dc, context); + /* TODO : Uncomment the below line and make changes + * as per DML nomenclature once it is available. + * context->bw_ctx.bw.dcn.clk.fclk_p_state_change_support = context->bw_ctx.dml.vba.fclk_pstate_support; + */ + + if (context->bw_ctx.bw.dcn.clk.dispclk_khz < dc->debug.min_disp_clk_khz) + context->bw_ctx.bw.dcn.clk.dispclk_khz = dc->debug.min_disp_clk_khz; + + for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + if (!context->res_ctx.pipe_ctx[i].stream) + continue; + pipes[pipe_idx].pipe.dest.vstartup_start = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); + pipes[pipe_idx].pipe.dest.vupdate_offset = get_vupdate_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); + pipes[pipe_idx].pipe.dest.vupdate_width = get_vupdate_width(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); + pipes[pipe_idx].pipe.dest.vready_offset = get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); + if (context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) { + // Phantom pipe requires that DET_SIZE = 0 and no unbounded requests + context->res_ctx.pipe_ctx[i].det_buffer_size_kb = 0; + context->res_ctx.pipe_ctx[i].unbounded_req = false; + } else { + context->res_ctx.pipe_ctx[i].det_buffer_size_kb = context->bw_ctx.dml.ip.det_buffer_size_kbytes; + context->res_ctx.pipe_ctx[i].unbounded_req = pipes[pipe_idx].pipe.src.unbounded_req_mode; + } + + if (context->bw_ctx.bw.dcn.clk.dppclk_khz < pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000) + context->bw_ctx.bw.dcn.clk.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000; + context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = + pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000; + context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest; + pipe_idx++; + } + /*save a original dppclock copy*/ + context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.dppclk_khz; + context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz; + context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dppclk_mhz * 1000; + context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dispclk_mhz * 1000; + context->bw_ctx.bw.dcn.compbuf_size_kb = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes + - context->bw_ctx.dml.ip.det_buffer_size_kbytes * pipe_idx; + + for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + bool cstate_en = context->bw_ctx.dml.vba.PrefetchMode[vlevel][context->bw_ctx.dml.vba.maxMpcComb] != 2; + + if (!context->res_ctx.pipe_ctx[i].stream) + continue; + + context->bw_ctx.dml.funcs.rq_dlg_get_dlg_reg(&context->bw_ctx.dml, + &context->res_ctx.pipe_ctx[i].dlg_regs, + &context->res_ctx.pipe_ctx[i].ttu_regs, + pipes, + pipe_cnt, + pipe_idx, + cstate_en, + context->bw_ctx.bw.dcn.clk.p_state_change_support, + false, false, true); + + context->bw_ctx.dml.funcs.rq_dlg_get_rq_reg(&context->bw_ctx.dml, + &context->res_ctx.pipe_ctx[i].rq_regs, + &pipes[pipe_idx].pipe); + pipe_idx++; + } +} + +static void dml_calculate_wm_and_dlg( + struct dc *dc, struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int pipe_cnt, + int vlevel) +{ + int i, pipe_idx, vlevel_temp = 0; + + double dcfclk = context->bw_ctx.dml.soc.clock_limits[0].dcfclk_mhz; + double dcfclk_from_validation = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb]; + unsigned int min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed; + bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] != + dm_dram_clock_change_unsupported; + + /* Set B: + * For Set B calculations use clocks from clock_limits[2] when available i.e. when SMU is present, + * otherwise use arbitrary low value from spreadsheet for DCFCLK as lower is safer for watermark + * calculations to cover bootup clocks. + * DCFCLK: soc.clock_limits[2] when available + * UCLK: soc.clock_limits[2] when available + */ + if (context->bw_ctx.dml.soc.num_states > 2) { + vlevel_temp = 2; + dcfclk = context->bw_ctx.dml.soc.clock_limits[2].dcfclk_mhz; + } else + dcfclk = 615; //DCFCLK Vmin_lv + + pipes[0].clks_cfg.voltage = vlevel_temp; + pipes[0].clks_cfg.dcfclk_mhz = dcfclk; + pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel_temp].socclk_mhz; + + if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid) { + context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us; + context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us; + context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us; + } + context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + //context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.fclk_pstate_change_ns = get_wm_fclk_pstate(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + //context->bw_ctx.bw.dcn.watermarks.b.usr_retraining_ns = get_wm_usr_retraining(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + + /* Temporary, to have some fclk_pstate_change_ns and usr_retraining_ns wm values until DML is implemented */ + context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.fclk_pstate_change_ns = context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns / 4; + context->bw_ctx.bw.dcn.watermarks.b.usr_retraining_ns = context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns / 8; + + /* Set D: + * All clocks min. + * DCFCLK: Min, as reported by PM FW when available + * UCLK : Min, as reported by PM FW when available + * sr_enter_exit/sr_exit should be lower than used for DRAM (TBD after bringup or later, use as decided in Clk Mgr) + */ + + if (context->bw_ctx.dml.soc.num_states > 2) { + vlevel_temp = 0; + dcfclk = dc->clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz; + } else + dcfclk = 615; //DCFCLK Vmin_lv + + pipes[0].clks_cfg.voltage = vlevel_temp; + pipes[0].clks_cfg.dcfclk_mhz = dcfclk; + pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel_temp].socclk_mhz; + + if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid) { + context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us; + context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us; + context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us; + } + context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.d.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + //context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.fclk_pstate_change_ns = get_wm_fclk_pstate(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + //context->bw_ctx.bw.dcn.watermarks.d.usr_retraining_ns = get_wm_usr_retraining(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + + /* Temporary, to have some fclk_pstate_change_ns and usr_retraining_ns wm values until DML is implemented */ + context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.fclk_pstate_change_ns = context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns / 4; + context->bw_ctx.bw.dcn.watermarks.d.usr_retraining_ns = context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns / 8; + + /* Set C, for Dummy P-State: + * All clocks min. + * DCFCLK: Min, as reported by PM FW, when available + * UCLK : Min, as reported by PM FW, when available + * pstate latency as per UCLK state dummy pstate latency + */ + if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) { + unsigned int min_dram_speed_mts_margin = 160; + + if ((!pstate_en)) + min_dram_speed_mts = dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz * 16; + + /* find largest table entry that is lower than dram speed, but lower than DPM0 still uses DPM0 */ + for (i = 3; i > 0; i--) + if (min_dram_speed_mts + min_dram_speed_mts_margin > dc->clk_mgr->bw_params->dummy_pstate_table[i].dram_speed_mts) + break; + + context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[i].dummy_pstate_latency_us; + context->bw_ctx.dml.soc.dummy_pstate_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[i].dummy_pstate_latency_us; + context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us; + context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us; + } + context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.c.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + //context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.fclk_pstate_change_ns = get_wm_fclk_pstate(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + //context->bw_ctx.bw.dcn.watermarks.c.usr_retraining_ns = get_wm_usr_retraining(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + + /* Temporary, to have some fclk_pstate_change_ns and usr_retraining_ns wm values until DML is implemented */ + context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.fclk_pstate_change_ns = context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns / 4; + context->bw_ctx.bw.dcn.watermarks.c.usr_retraining_ns = context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns / 8; + + if ((!pstate_en) && (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid)) { + /* The only difference between A and C is p-state latency, if p-state is not supported + * with full p-state latency we want to calculate DLG based on dummy p-state latency, + * Set A p-state watermark set to 0 previously, when p-state unsupported, for now keep as previous implementation. + */ + context->bw_ctx.bw.dcn.watermarks.a = context->bw_ctx.bw.dcn.watermarks.c; + context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 0; + } else { + /* Set A: + * All clocks min. + * DCFCLK: Min, as reported by PM FW, when available + * UCLK: Min, as reported by PM FW, when available + */ + dml_update_soc_for_wm_a(dc, context); + context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.a.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + } + + pipes[0].clks_cfg.voltage = vlevel; + pipes[0].clks_cfg.dcfclk_mhz = dcfclk_from_validation; + pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz; + + for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + if (!context->res_ctx.pipe_ctx[i].stream) + continue; + + pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt); + pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); + + if (dc->config.forced_clocks) { + pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz; + pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz; + } + if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000) + pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0; + if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000) + pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0; + + pipe_idx++; + } + + context->perf_params.stutter_period_us = context->bw_ctx.dml.vba.StutterPeriod; + + dml_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel); + + if (!pstate_en) + /* Restore full p-state latency */ + context->bw_ctx.dml.soc.dram_clock_change_latency_us = + dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us; +} + +bool dml_validate(struct dc *dc, + struct dc_state *context, + bool fast_validate) +{ + bool out = false; + + BW_VAL_TRACE_SETUP(); + + int vlevel = 0; + int pipe_cnt = 0; + display_e2e_pipe_params_st *pipes = context->bw_ctx.dml.dml_pipe_state; + DC_LOGGER_INIT(dc->ctx->logger); + + BW_VAL_TRACE_COUNT(); + + out = dml_internal_validate(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate); + + if (pipe_cnt == 0) + goto validate_out; + + if (!out) + goto validate_fail; + + BW_VAL_TRACE_END_VOLTAGE_LEVEL(); + + if (fast_validate) { + BW_VAL_TRACE_SKIP(fast); + goto validate_out; + } + + dml_calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel); + + BW_VAL_TRACE_END_WATERMARKS(); + + goto validate_out; + +validate_fail: + DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n", + dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states])); + + BW_VAL_TRACE_SKIP(fail); + out = false; + +validate_out: + BW_VAL_TRACE_FINISH(); + + return out; +} diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper_translation.c b/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper_translation.c new file mode 100644 index 000000000000..4ec5310a2962 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper_translation.c @@ -0,0 +1,284 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifdef DML_WRAPPER_TRANSLATION_ + +static void gfx10array_mode_to_dml_params( + enum array_mode_values array_mode, + enum legacy_tiling_compat_level compat_level, + unsigned int *sw_mode) +{ + switch (array_mode) { + case DC_ARRAY_LINEAR_ALLIGNED: + case DC_ARRAY_LINEAR_GENERAL: + *sw_mode = dm_sw_linear; + break; + case DC_ARRAY_2D_TILED_THIN1: +// DC_LEGACY_TILING_ADDR_GEN_ZERO - undefined as per current code hence removed +#if 0 + if (compat_level == DC_LEGACY_TILING_ADDR_GEN_ZERO) + *sw_mode = dm_sw_gfx7_2d_thin_l_vp; + else + *sw_mode = dm_sw_gfx7_2d_thin_gl; +#endif + break; + default: + ASSERT(0); /* Not supported */ + break; + } +} + +static void swizzle_to_dml_params( + enum swizzle_mode_values swizzle, + unsigned int *sw_mode) +{ + switch (swizzle) { + case DC_SW_LINEAR: + *sw_mode = dm_sw_linear; + break; + case DC_SW_4KB_S: + *sw_mode = dm_sw_4kb_s; + break; + case DC_SW_4KB_S_X: + *sw_mode = dm_sw_4kb_s_x; + break; + case DC_SW_4KB_D: + *sw_mode = dm_sw_4kb_d; + break; + case DC_SW_4KB_D_X: + *sw_mode = dm_sw_4kb_d_x; + break; + case DC_SW_64KB_S: + *sw_mode = dm_sw_64kb_s; + break; + case DC_SW_64KB_S_X: + *sw_mode = dm_sw_64kb_s_x; + break; + case DC_SW_64KB_S_T: + *sw_mode = dm_sw_64kb_s_t; + break; + case DC_SW_64KB_D: + *sw_mode = dm_sw_64kb_d; + break; + case DC_SW_64KB_D_X: + *sw_mode = dm_sw_64kb_d_x; + break; + case DC_SW_64KB_D_T: + *sw_mode = dm_sw_64kb_d_t; + break; + case DC_SW_64KB_R_X: + *sw_mode = dm_sw_64kb_r_x; + break; + case DC_SW_VAR_S: + *sw_mode = dm_sw_var_s; + break; + case DC_SW_VAR_S_X: + *sw_mode = dm_sw_var_s_x; + break; + case DC_SW_VAR_D: + *sw_mode = dm_sw_var_d; + break; + case DC_SW_VAR_D_X: + *sw_mode = dm_sw_var_d_x; + break; + + default: + ASSERT(0); /* Not supported */ + break; + } +} + +static void dc_timing_to_dml_timing(const struct dc_crtc_timing *timing, struct _vcs_dpi_display_pipe_dest_params_st *dest) +{ + dest->hblank_start = timing->h_total - timing->h_front_porch; + dest->hblank_end = dest->hblank_start + - timing->h_addressable + - timing->h_border_left + - timing->h_border_right; + dest->vblank_start = timing->v_total - timing->v_front_porch; + dest->vblank_end = dest->vblank_start + - timing->v_addressable + - timing->v_border_top + - timing->v_border_bottom; + dest->htotal = timing->h_total; + dest->vtotal = timing->v_total; + dest->hactive = timing->h_addressable; + dest->vactive = timing->v_addressable; + dest->interlaced = timing->flags.INTERLACE; + dest->pixel_rate_mhz = timing->pix_clk_100hz/10000.0; + if (timing->timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) + dest->pixel_rate_mhz *= 2; +} + +static enum odm_combine_mode get_dml_odm_combine(const struct pipe_ctx *pipe) +{ + int odm_split_count = 0; + enum odm_combine_mode combine_mode = dm_odm_combine_mode_disabled; + struct pipe_ctx *next_pipe = pipe->next_odm_pipe; + + // Traverse pipe tree to determine odm split count + while (next_pipe) { + odm_split_count++; + next_pipe = next_pipe->next_odm_pipe; + } + pipe = pipe->prev_odm_pipe; + while (pipe) { + odm_split_count++; + pipe = pipe->prev_odm_pipe; + } + + // Translate split to DML odm combine factor + switch (odm_split_count) { + case 1: + combine_mode = dm_odm_combine_mode_2to1; + break; + case 3: + combine_mode = dm_odm_combine_mode_4to1; + break; + default: + combine_mode = dm_odm_combine_mode_disabled; + } + + return combine_mode; +} + +static int get_dml_output_type(enum signal_type dc_signal) +{ + int dml_output_type = -1; + + switch (dc_signal) { + case SIGNAL_TYPE_DISPLAY_PORT_MST: + case SIGNAL_TYPE_DISPLAY_PORT: + dml_output_type = dm_dp; + break; + case SIGNAL_TYPE_EDP: + dml_output_type = dm_edp; + break; + case SIGNAL_TYPE_HDMI_TYPE_A: + case SIGNAL_TYPE_DVI_SINGLE_LINK: + case SIGNAL_TYPE_DVI_DUAL_LINK: + dml_output_type = dm_hdmi; + break; + default: + break; + } + + return dml_output_type; +} + +static void populate_color_depth_and_encoding_from_timing(const struct dc_crtc_timing *timing, struct _vcs_dpi_display_output_params_st *dout) +{ + int output_bpc = 0; + + switch (timing->display_color_depth) { + case COLOR_DEPTH_666: + output_bpc = 6; + break; + case COLOR_DEPTH_888: + output_bpc = 8; + break; + case COLOR_DEPTH_101010: + output_bpc = 10; + break; + case COLOR_DEPTH_121212: + output_bpc = 12; + break; + case COLOR_DEPTH_141414: + output_bpc = 14; + break; + case COLOR_DEPTH_161616: + output_bpc = 16; + break; + case COLOR_DEPTH_999: + output_bpc = 9; + break; + case COLOR_DEPTH_111111: + output_bpc = 11; + break; + default: + output_bpc = 8; + break; + } + + switch (timing->pixel_encoding) { + case PIXEL_ENCODING_RGB: + case PIXEL_ENCODING_YCBCR444: + dout->output_format = dm_444; + dout->output_bpp = output_bpc * 3; + break; + case PIXEL_ENCODING_YCBCR420: + dout->output_format = dm_420; + dout->output_bpp = (output_bpc * 3.0) / 2; + break; + case PIXEL_ENCODING_YCBCR422: + if (timing->flags.DSC && !timing->dsc_cfg.ycbcr422_simple) + dout->output_format = dm_n422; + else + dout->output_format = dm_s422; + dout->output_bpp = output_bpc * 2; + break; + default: + dout->output_format = dm_444; + dout->output_bpp = output_bpc * 3; + } +} + +static enum source_format_class dc_source_format_to_dml_source_format(enum surface_pixel_format dc_format) +{ + enum source_format_class dml_format = dm_444_32; + + switch (dc_format) { + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr: + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb: + dml_format = dm_420_8; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr: + case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb: + dml_format = dm_420_10; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: + dml_format = dm_444_64; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: + case SURFACE_PIXEL_FORMAT_GRPH_RGB565: + dml_format = dm_444_16; + break; + case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS: + dml_format = dm_444_8; + break; + case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA: + dml_format = dm_rgbe_alpha; + break; + default: + dml_format = dm_444_32; + break; + } + + return dml_format; +} + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c index 3ee858f311d1..ec636d06e18c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c @@ -61,16 +61,6 @@ static double dsc_roundf(double num) return (int)(num); } -static double dsc_ceil(double num) -{ - double retval = (int)num; - - if (retval != num && num > 0) - retval = num + 1; - - return (int)retval; -} - static void get_qp_set(qp_set qps, enum colour_mode cm, enum bits_per_comp bpc, enum max_min max_min, float bpp) { @@ -103,7 +93,7 @@ static void get_qp_set(qp_set qps, enum colour_mode cm, enum bits_per_comp bpc, TABLE_CASE(420, 12, min); } - if (table == 0) + if (!table) return; index = (bpp - table[0].bpp) * 2; @@ -268,24 +258,3 @@ void _do_calc_rc_params(struct rc_params *rc, rc->rc_buf_thresh[13] = 8064; } -u32 _do_bytes_per_pixel_calc(int slice_width, - u16 drm_bpp, - bool is_navite_422_or_420) -{ - float bpp; - u32 bytes_per_pixel; - double d_bytes_per_pixel; - - dc_assert_fp_enabled(); - - bpp = ((float)drm_bpp / 16.0); - d_bytes_per_pixel = dsc_ceil(bpp * slice_width / 8.0) / slice_width; - // TODO: Make sure the formula for calculating this is precise (ceiling - // vs. floor, and at what point they should be applied) - if (is_navite_422_or_420) - d_bytes_per_pixel /= 2; - - bytes_per_pixel = (u32)dsc_ceil(d_bytes_per_pixel * 0x10000000); - - return bytes_per_pixel; -} diff --git a/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h index b93b95409fbe..cad244c023cd 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h @@ -78,10 +78,6 @@ struct qp_entry { typedef struct qp_entry qp_table[]; -u32 _do_bytes_per_pixel_calc(int slice_width, - u16 drm_bpp, - bool is_navite_422_or_420); - void _do_calc_rc_params(struct rc_params *rc, enum colour_mode cm, enum bits_per_comp bpc, diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c index 0321b4446e05..9c74564cbd8d 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c @@ -455,6 +455,7 @@ static bool intersect_dsc_caps( if (pixel_encoding == PIXEL_ENCODING_YCBCR422 || pixel_encoding == PIXEL_ENCODING_YCBCR420) dsc_common_caps->bpp_increment_div = min(dsc_common_caps->bpp_increment_div, (uint32_t)8); + dsc_common_caps->edp_sink_max_bits_per_pixel = dsc_sink_caps->edp_max_bits_per_pixel; dsc_common_caps->is_dp = dsc_sink_caps->is_dp; return true; } @@ -513,6 +514,13 @@ static bool decide_dsc_bandwidth_range( range->min_target_bpp_x16 = preferred_bpp_x16; } } + /* TODO - make this value generic to all signal types */ + else if (dsc_caps->edp_sink_max_bits_per_pixel) { + /* apply max bpp limitation from edp sink */ + range->max_target_bpp_x16 = MIN(dsc_caps->edp_sink_max_bits_per_pixel, + max_bpp_x16); + range->min_target_bpp_x16 = min_bpp_x16; + } else { range->max_target_bpp_x16 = max_bpp_x16; range->min_target_bpp_x16 = min_bpp_x16; @@ -574,7 +582,7 @@ static bool decide_dsc_target_bpp_x16( return *target_bpp_x16 != 0; } -#define MIN_AVAILABLE_SLICES_SIZE 4 +#define MIN_AVAILABLE_SLICES_SIZE 6 static int get_available_dsc_slices(union dsc_enc_slice_caps slice_caps, int *available_slices) { @@ -860,6 +868,10 @@ static bool setup_dsc_config( min_slices_h = 0; // DSC TODO: Maybe try increasing the number of slices first? is_dsc_possible = (min_slices_h <= max_slices_h); + + if (min_slices_h == 0 && max_slices_h == 0) + is_dsc_possible = false; + if (!is_dsc_possible) goto done; diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c index b19d3aeb5962..e97cf09be9d5 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c @@ -60,31 +60,3 @@ void calc_rc_params(struct rc_params *rc, const struct drm_dsc_config *pps) pps->dsc_version_minor); DC_FP_END(); } - -/** - * calc_dsc_bytes_per_pixel - calculate bytes per pixel - * @pps: DRM struct with all required DSC values - * - * Based on the information inside drm_dsc_config, this function calculates the - * total of bytes per pixel. - * - * @note This calculation requires float point operation, most of it executes - * under kernel_fpu_{begin,end}. - * - * Return: - * Return the number of bytes per pixel - */ -u32 calc_dsc_bytes_per_pixel(const struct drm_dsc_config *pps) - -{ - u32 ret; - u16 drm_bpp = pps->bits_per_pixel; - int slice_width = pps->slice_width; - bool is_navite_422_or_420 = pps->native_422 || pps->native_420; - - DC_FP_START(); - ret = _do_bytes_per_pixel_calc(slice_width, drm_bpp, - is_navite_422_or_420); - DC_FP_END(); - return ret; -} diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h index c2340e001b57..80921c1c0d53 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h +++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h @@ -30,7 +30,6 @@ #include "dml/dsc/rc_calc_fpu.h" void calc_rc_params(struct rc_params *rc, const struct drm_dsc_config *pps); -u32 calc_dsc_bytes_per_pixel(const struct drm_dsc_config *pps); #endif diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c index 1e19dd674e5a..7e306aa3e2b9 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c @@ -100,8 +100,7 @@ int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps, struct dsc_par int ret; struct rc_params rc; struct drm_dsc_config dsc_cfg; - - dsc_params->bytes_per_pixel = calc_dsc_bytes_per_pixel(pps); + unsigned long long tmp; calc_rc_params(&rc, pps); dsc_params->pps = *pps; @@ -113,6 +112,9 @@ int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps, struct dsc_par dsc_cfg.mux_word_size = dsc_params->pps.bits_per_component <= 10 ? 48 : 64; ret = drm_dsc_compute_rc_parameters(&dsc_cfg); + tmp = (unsigned long long)dsc_cfg.slice_chunk_size * 0x10000000 + (dsc_cfg.slice_width - 1); + do_div(tmp, (uint32_t)dsc_cfg.slice_width); //ROUND-UP + dsc_params->bytes_per_pixel = (uint32_t)tmp; copy_pps_fields(&dsc_params->pps, &dsc_cfg); dsc_params->rc_buffer_model_size = dsc_cfg.rc_bits; diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index 6fc6488c54c0..f3c0e70073da 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -367,6 +367,7 @@ struct pipe_ctx { struct pll_settings pll_settings; uint8_t pipe_idx; + uint8_t pipe_idx_syncd; struct pipe_ctx *top_pipe; struct pipe_ctx *bottom_pipe; diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h index a6d3d859754a..52bdfea7897b 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h @@ -170,6 +170,7 @@ uint8_t dc_dp_initialize_scrambling_data_symbols( enum dc_status dp_set_fec_ready(struct dc_link *link, bool ready); void dp_set_fec_enable(struct dc_link *link, bool enable); +struct link_encoder *dp_get_link_enc(struct dc_link *link); bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable); bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable, bool immediate_update); void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable); @@ -217,4 +218,5 @@ bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx); void reset_dp_hpo_stream_encoders_for_link(struct dc_link *link); bool dp_retrieve_lttpr_cap(struct dc_link *link); +void edp_panel_backlight_power_on(struct dc_link *link); #endif /* __DC_LINK_DP_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h index 806f3041db14..337c0161e72d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h @@ -619,7 +619,7 @@ struct dcn_ip_params { }; extern const struct dcn_ip_params dcn10_ip_defaults; -bool dcn_validate_bandwidth( +bool dcn10_validate_bandwidth( struct dc *dc, struct dc_state *context, bool fast_validate); diff --git a/drivers/gpu/drm/amd/display/dc/inc/dml_wrapper.h b/drivers/gpu/drm/amd/display/dc/inc/dml_wrapper.h new file mode 100644 index 000000000000..5dcfbd8e2697 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/inc/dml_wrapper.h @@ -0,0 +1,34 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef DML_WRAPPER_H_ +#define DML_WRAPPER_H_ + +#include "dc.h" +#include "dml/display_mode_vba.h" + +bool dml_validate(struct dc *dc, struct dc_state *context, bool fast_validate); + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h index a17e5de3b100..c920c4b6077d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h @@ -211,6 +211,8 @@ struct dummy_pstate_entry { struct clk_bw_params { unsigned int vram_type; unsigned int num_channels; + unsigned int dispclk_vco_khz; + unsigned int dc_mode_softmax_memclk; struct clk_limit_table clk_table; struct wm_table wm_table; struct dummy_pstate_entry dummy_pstate_table[4]; @@ -261,6 +263,10 @@ struct clk_mgr_funcs { /* Send message to PMFW to set hard max memclk frequency to highest DPM */ void (*set_hard_max_memclk)(struct clk_mgr *clk_mgr); + /* Custom set a memclk freq range*/ + void (*set_max_memclk)(struct clk_mgr *clk_mgr, unsigned int memclk_mhz); + void (*set_min_memclk)(struct clk_mgr *clk_mgr, unsigned int memclk_mhz); + /* Get current memclk states from PMFW, update relevant structures */ void (*get_memclk_states_from_smu)(struct clk_mgr *clk_mgr); @@ -274,6 +280,7 @@ struct clk_mgr { struct dc_clocks clks; bool psr_allow_active_cache; bool force_smu_not_present; + bool dc_mode_softmax_enabled; int dprefclk_khz; // Used by program pixel clock in clock source funcs, need to figureout where this goes int dentist_vco_freq_khz; struct clk_state_registers_and_bypass boot_snapshot; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h index f94135c6e3c2..346f0ba73e86 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h @@ -61,6 +61,8 @@ struct dcn_dsc_state { uint32_t dsc_pic_height; uint32_t dsc_slice_bpg_offset; uint32_t dsc_chunk_size; + uint32_t dsc_fw_en; + uint32_t dsc_opp_source; }; @@ -88,6 +90,7 @@ struct dsc_enc_caps { int32_t max_total_throughput_mps; /* Maximum total throughput with all the slices combined */ int32_t max_slice_width; uint32_t bpp_increment_div; /* bpp increment divisor, e.g. if 16, it's 1/16th of a bit */ + uint32_t edp_sink_max_bits_per_pixel; bool is_dp; }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h index 80e1a32bc63d..2c031586f4e6 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h @@ -139,6 +139,7 @@ struct hubp_funcs { bool (*hubp_is_flip_pending)(struct hubp *hubp); void (*set_blank)(struct hubp *hubp, bool blank); + void (*set_blank_regs)(struct hubp *hubp, bool blank); void (*set_hubp_blank_en)(struct hubp *hubp, bool blank); void (*set_cursor_attributes)( diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h index c88e113b94d1..073f8b667eff 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h @@ -164,6 +164,10 @@ struct stream_encoder_funcs { void (*stop_dp_info_packets)( struct stream_encoder *enc); + void (*reset_fifo)( + struct stream_encoder *enc + ); + void (*dp_blank)( struct dc_link *link, struct stream_encoder *enc); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h index 7390baf916b5..c29320b3855d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h @@ -290,6 +290,8 @@ struct timing_generator_funcs { enum optc_dsc_mode dsc_mode, uint32_t dsc_bytes_per_pixel, uint32_t dsc_slice_width); + void (*get_dsc_status)(struct timing_generator *optc, + uint32_t *dsc_mode); void (*set_odm_bypass)(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing); void (*set_odm_combine)(struct timing_generator *optc, int *opp_id, int opp_cnt, struct dc_crtc_timing *timing); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index d50f4bd06b5d..05053f3b4ab7 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -64,6 +64,7 @@ struct hw_sequencer_funcs { enum dc_status (*apply_ctx_to_hw)(struct dc *dc, struct dc_state *context); void (*disable_plane)(struct dc *dc, struct pipe_ctx *pipe_ctx); + void (*disable_pixel_data)(struct dc *dc, struct pipe_ctx *pipe_ctx, bool blank); void (*apply_ctx_for_surface)(struct dc *dc, const struct dc_stream_state *stream, int num_planes, struct dc_state *context); diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h b/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h index 10dcf6a5e9b1..a4e43b4826e0 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h +++ b/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h @@ -36,7 +36,7 @@ * Initialise link encoder resource tracking. */ void link_enc_cfg_init( - struct dc *dc, + const struct dc *dc, struct dc_state *state); /* diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h index 372c0898facd..c208925f8247 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h @@ -34,6 +34,10 @@ #define MEMORY_TYPE_HBM 2 +#define IS_PIPE_SYNCD_VALID(pipe) ((((pipe)->pipe_idx_syncd) & 0x80)?1:0) +#define GET_PIPE_SYNCD_FROM_PIPE(pipe) ((pipe)->pipe_idx_syncd & 0x7F) +#define SET_PIPE_SYNCD_TO_PIPE(pipe, pipe_syncd) ((pipe)->pipe_idx_syncd = (0x80 | pipe_syncd)) + enum dce_version resource_parse_asic_id( struct hw_asic_id asic_id); @@ -206,4 +210,11 @@ struct hpo_dp_link_encoder *resource_get_unused_hpo_dp_link_encoder( const struct resource_pool *pool); #endif +void reset_syncd_pipes_from_disabled_pipes(struct dc *dc, + struct dc_state *context); + +void check_syncd_pipes_for_disabled_master_pipe(struct dc *dc, + struct dc_state *context, + uint8_t disabled_master_pipe_idx); + #endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c index 378cc11aa047..6b5fedd9ace0 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c @@ -185,16 +185,18 @@ bool dal_irq_service_dummy_set(struct irq_service *irq_service, const struct irq_source_info *info, bool enable) { - DC_LOG_ERROR("%s: called for non-implemented irq source\n", - __func__); + DC_LOG_ERROR("%s: called for non-implemented irq source, src_id=%u, ext_id=%u\n", + __func__, info->src_id, info->ext_id); + return false; } bool dal_irq_service_dummy_ack(struct irq_service *irq_service, const struct irq_source_info *info) { - DC_LOG_ERROR("%s: called for non-implemented irq source\n", - __func__); + DC_LOG_ERROR("%s: called for non-implemented irq source, src_id=%u, ext_id=%u\n", + __func__, info->src_id, info->ext_id); + return false; } diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c index 34f43cb650f8..cf072e2347d3 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c @@ -40,10 +40,9 @@ #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" -enum dc_irq_source to_dal_irq_source_dcn10( - struct irq_service *irq_service, - uint32_t src_id, - uint32_t ext_id) +static enum dc_irq_source to_dal_irq_source_dcn10(struct irq_service *irq_service, + uint32_t src_id, + uint32_t ext_id) { switch (src_id) { case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP: diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c index a47f68634fc3..aa708b61142f 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c @@ -39,10 +39,9 @@ #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" -enum dc_irq_source to_dal_irq_source_dcn201( - struct irq_service *irq_service, - uint32_t src_id, - uint32_t ext_id) +static enum dc_irq_source to_dal_irq_source_dcn201(struct irq_service *irq_service, + uint32_t src_id, + uint32_t ext_id) { switch (src_id) { case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP: diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c index 78940cb20e10..235294534c43 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c @@ -40,10 +40,9 @@ #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" -enum dc_irq_source to_dal_irq_source_dcn21( - struct irq_service *irq_service, - uint32_t src_id, - uint32_t ext_id) +static enum dc_irq_source to_dal_irq_source_dcn21(struct irq_service *irq_service, + uint32_t src_id, + uint32_t ext_id) { switch (src_id) { case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP: diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c b/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c index 38e0ade60c7b..1b88e4e627fd 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c @@ -36,10 +36,9 @@ #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" -enum dc_irq_source to_dal_irq_source_dcn31( - struct irq_service *irq_service, - uint32_t src_id, - uint32_t ext_id) +static enum dc_irq_source to_dal_irq_source_dcn31(struct irq_service *irq_service, + uint32_t src_id, + uint32_t ext_id) { switch (src_id) { case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP: diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h index cd204eef073b..83855b8a32e9 100644 --- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h @@ -360,6 +360,8 @@ struct dmub_srv_hw_funcs { uint32_t (*get_gpint_dataout)(struct dmub_srv *dmub); + void (*clear_inbox0_ack_register)(struct dmub_srv *dmub); + uint32_t (*read_inbox0_ack_register)(struct dmub_srv *dmub); void (*send_inbox0_cmd)(struct dmub_srv *dmub, union dmub_inbox0_data_register data); uint32_t (*get_current_time)(struct dmub_srv *dmub); @@ -409,6 +411,7 @@ struct dmub_srv { struct dmub_srv_base_funcs funcs; struct dmub_srv_hw_funcs hw_funcs; struct dmub_rb inbox1_rb; + uint32_t inbox1_last_wptr; /** * outbox1_rb is accessed without locks (dal & dc) * and to be used only in dmub_srv_stat_get_notification() @@ -735,6 +738,45 @@ bool dmub_srv_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_ bool dmub_srv_should_detect(struct dmub_srv *dmub); +/** + * dmub_srv_send_inbox0_cmd() - Send command to DMUB using INBOX0 + * @dmub: the dmub service + * @data: the data to be sent in the INBOX0 command + * + * Send command by writing directly to INBOX0 WPTR + * + * Return: + * DMUB_STATUS_OK - success + * DMUB_STATUS_INVALID - hw_init false or hw function does not exist + */ +enum dmub_status dmub_srv_send_inbox0_cmd(struct dmub_srv *dmub, union dmub_inbox0_data_register data); + +/** + * dmub_srv_wait_for_inbox0_ack() - wait for DMUB to ACK INBOX0 command + * @dmub: the dmub service + * @timeout_us: the maximum number of microseconds to wait + * + * Wait for DMUB to ACK the INBOX0 message + * + * Return: + * DMUB_STATUS_OK - success + * DMUB_STATUS_INVALID - hw_init false or hw function does not exist + * DMUB_STATUS_TIMEOUT - wait for ack timed out + */ +enum dmub_status dmub_srv_wait_for_inbox0_ack(struct dmub_srv *dmub, uint32_t timeout_us); + +/** + * dmub_srv_wait_for_inbox0_ack() - clear ACK register for INBOX0 + * @dmub: the dmub service + * + * Clear ACK register for INBOX0 + * + * Return: + * DMUB_STATUS_OK - success + * DMUB_STATUS_INVALID - hw_init false or hw function does not exist + */ +enum dmub_status dmub_srv_clear_inbox0_ack(struct dmub_srv *dmub); + #if defined(__cplusplus) } #endif diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index c29a67ccef17..afe9bc839b45 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -46,10 +46,10 @@ /* Firmware versioning. */ #ifdef DMUB_EXPOSE_VERSION -#define DMUB_FW_VERSION_GIT_HASH 0x1d82d23e +#define DMUB_FW_VERSION_GIT_HASH 0xc99a4517 #define DMUB_FW_VERSION_MAJOR 0 #define DMUB_FW_VERSION_MINOR 0 -#define DMUB_FW_VERSION_REVISION 91 +#define DMUB_FW_VERSION_REVISION 97 #define DMUB_FW_VERSION_TEST 0 #define DMUB_FW_VERSION_VBIOS 0 #define DMUB_FW_VERSION_HOTFIX 0 @@ -173,13 +173,6 @@ extern "C" { #endif /** - * Number of nanoseconds per DMUB tick. - * DMCUB_TIMER_CURRENT increments in DMUB ticks, which are 10ns by default. - * If DMCUB_TIMER_WINDOW is non-zero this will no longer be true. - */ -#define NS_PER_DMUB_TICK 10 - -/** * union dmub_addr - DMUB physical/virtual 64-bit address. */ union dmub_addr { @@ -208,10 +201,9 @@ union dmub_psr_debug_flags { uint32_t use_hw_lock_mgr : 1; /** - * Unused. - * TODO: Remove. + * Use TPS3 signal when restore main link. */ - uint32_t log_line_nums : 1; + uint32_t force_wakeup_by_tps3 : 1; } bitfields; /** @@ -416,7 +408,14 @@ enum dmub_cmd_vbios_type { * Enables or disables power gating. */ DMUB_CMD__VBIOS_ENABLE_DISP_POWER_GATING = 3, + /** + * Controls embedded panels. + */ DMUB_CMD__VBIOS_LVTMA_CONTROL = 15, + /** + * Query DP alt status on a transmitter. + */ + DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT = 26, }; //============================================================================== @@ -1550,10 +1549,14 @@ struct dmub_cmd_psr_copy_settings_data { * Currently the support is only for 0 or 1 */ uint8_t panel_inst; + /* + * DSC enable status in driver + */ + uint8_t dsc_enable_status; /** - * Explicit padding to 4 byte boundary. + * Explicit padding to 3 byte boundary. */ - uint8_t pad3[4]; + uint8_t pad3[3]; }; /** @@ -2398,6 +2401,24 @@ struct dmub_rb_cmd_lvtma_control { }; /** + * Data passed in/out in a DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT command. + */ +struct dmub_rb_cmd_transmitter_query_dp_alt_data { + uint8_t phy_id; /**< 0=UNIPHYA, 1=UNIPHYB, 2=UNIPHYC, 3=UNIPHYD, 4=UNIPHYE, 5=UNIPHYF */ + uint8_t is_usb; /**< is phy is usb */ + uint8_t is_dp_alt_disable; /**< is dp alt disable */ + uint8_t is_dp4; /**< is dp in 4 lane */ +}; + +/** + * Definition of a DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT command. + */ +struct dmub_rb_cmd_transmitter_query_dp_alt { + struct dmub_cmd_header header; /**< header */ + struct dmub_rb_cmd_transmitter_query_dp_alt_data data; /**< payload */ +}; + +/** * Maximum number of bytes a chunk sent to DMUB for parsing */ #define DMUB_EDID_CEA_DATA_CHUNK_BYTES 8 @@ -2408,7 +2429,7 @@ struct dmub_rb_cmd_lvtma_control { struct dmub_cmd_send_edid_cea { uint16_t offset; /**< offset into the CEA block */ uint8_t length; /**< number of bytes in payload to copy as part of CEA block */ - uint16_t total_length; /**< total length of the CEA block */ + uint16_t cea_total_length; /**< total length of the CEA block */ uint8_t payload[DMUB_EDID_CEA_DATA_CHUNK_BYTES]; /**< data chunk of the CEA block */ uint8_t pad[3]; /**< padding and for future expansion */ }; @@ -2605,6 +2626,10 @@ union dmub_rb_cmd { */ struct dmub_rb_cmd_lvtma_control lvtma_control; /** + * Definition of a DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT command. + */ + struct dmub_rb_cmd_transmitter_query_dp_alt query_dp_alt; + /** * Definition of a DMUB_CMD__DPIA_DIG1_CONTROL command. */ struct dmub_rb_cmd_dig1_dpia_control dig1_dpia_control; @@ -2722,7 +2747,7 @@ static inline bool dmub_rb_full(struct dmub_rb *rb) static inline bool dmub_rb_push_front(struct dmub_rb *rb, const union dmub_rb_cmd *cmd) { - uint64_t volatile *dst = (uint64_t volatile *)(rb->base_address) + rb->wrpt / sizeof(uint64_t); + uint64_t volatile *dst = (uint64_t volatile *)((uint8_t *)(rb->base_address) + rb->wrpt); const uint64_t *src = (const uint64_t *)cmd; uint8_t i; @@ -2840,7 +2865,7 @@ static inline bool dmub_rb_peek_offset(struct dmub_rb *rb, static inline bool dmub_rb_out_front(struct dmub_rb *rb, union dmub_rb_out_cmd *cmd) { - const uint64_t volatile *src = (const uint64_t volatile *)(rb->base_address) + rb->rptr / sizeof(uint64_t); + const uint64_t volatile *src = (const uint64_t volatile *)((uint8_t *)(rb->base_address) + rb->rptr); uint64_t *dst = (uint64_t *)cmd; uint8_t i; @@ -2888,7 +2913,7 @@ static inline void dmub_rb_flush_pending(const struct dmub_rb *rb) uint32_t wptr = rb->wrpt; while (rptr != wptr) { - uint64_t volatile *data = (uint64_t volatile *)rb->base_address + rptr / sizeof(uint64_t); + uint64_t volatile *data = (uint64_t volatile *)((uint8_t *)(rb->base_address) + rptr); //uint64_t volatile *p = (uint64_t volatile *)data; uint64_t temp; uint8_t i; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index 56d400ffa7ac..f673a1c1777a 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -100,24 +100,9 @@ void dmub_flush_buffer_mem(const struct dmub_fb *fb) } static const struct dmub_fw_meta_info * -dmub_get_fw_meta_info(const struct dmub_srv_region_params *params) +dmub_get_fw_meta_info_from_blob(const uint8_t *blob, uint32_t blob_size, uint32_t meta_offset) { const union dmub_fw_meta *meta; - const uint8_t *blob = NULL; - uint32_t blob_size = 0; - uint32_t meta_offset = 0; - - if (params->fw_bss_data && params->bss_data_size) { - /* Legacy metadata region. */ - blob = params->fw_bss_data; - blob_size = params->bss_data_size; - meta_offset = DMUB_FW_META_OFFSET; - } else if (params->fw_inst_const && params->inst_const_size) { - /* Combined metadata region. */ - blob = params->fw_inst_const; - blob_size = params->inst_const_size; - meta_offset = 0; - } if (!blob || !blob_size) return NULL; @@ -134,6 +119,32 @@ dmub_get_fw_meta_info(const struct dmub_srv_region_params *params) return &meta->info; } +static const struct dmub_fw_meta_info * +dmub_get_fw_meta_info(const struct dmub_srv_region_params *params) +{ + const struct dmub_fw_meta_info *info = NULL; + + if (params->fw_bss_data && params->bss_data_size) { + /* Legacy metadata region. */ + info = dmub_get_fw_meta_info_from_blob(params->fw_bss_data, + params->bss_data_size, + DMUB_FW_META_OFFSET); + } else if (params->fw_inst_const && params->inst_const_size) { + /* Combined metadata region - can be aligned to 16-bytes. */ + uint32_t i; + + for (i = 0; i < 16; ++i) { + info = dmub_get_fw_meta_info_from_blob( + params->fw_inst_const, params->inst_const_size, i); + + if (info) + break; + } + } + + return info; +} + static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic) { struct dmub_srv_hw_funcs *funcs = &dmub->hw_funcs; @@ -598,6 +609,8 @@ enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub, enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub) { + struct dmub_rb flush_rb; + if (!dmub->hw_init) return DMUB_STATUS_INVALID; @@ -606,9 +619,14 @@ enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub) * been flushed to framebuffer memory. Otherwise DMCUB might * read back stale, fully invalid or partially invalid data. */ - dmub_rb_flush_pending(&dmub->inbox1_rb); + flush_rb = dmub->inbox1_rb; + flush_rb.rptr = dmub->inbox1_last_wptr; + dmub_rb_flush_pending(&flush_rb); + + dmub->hw_funcs.set_inbox1_wptr(dmub, dmub->inbox1_rb.wrpt); + + dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt; - dmub->hw_funcs.set_inbox1_wptr(dmub, dmub->inbox1_rb.wrpt); return DMUB_STATUS_OK; } @@ -831,3 +849,38 @@ bool dmub_srv_should_detect(struct dmub_srv *dmub) return dmub->hw_funcs.should_detect(dmub); } + +enum dmub_status dmub_srv_clear_inbox0_ack(struct dmub_srv *dmub) +{ + if (!dmub->hw_init || dmub->hw_funcs.clear_inbox0_ack_register) + return DMUB_STATUS_INVALID; + + dmub->hw_funcs.clear_inbox0_ack_register(dmub); + return DMUB_STATUS_OK; +} + +enum dmub_status dmub_srv_wait_for_inbox0_ack(struct dmub_srv *dmub, uint32_t timeout_us) +{ + uint32_t i = 0; + uint32_t ack = 0; + + if (!dmub->hw_init || !dmub->hw_funcs.read_inbox0_ack_register) + return DMUB_STATUS_INVALID; + + for (i = 0; i <= timeout_us; i++) { + ack = dmub->hw_funcs.read_inbox0_ack_register(dmub); + if (ack) + return DMUB_STATUS_OK; + } + return DMUB_STATUS_TIMEOUT; +} + +enum dmub_status dmub_srv_send_inbox0_cmd(struct dmub_srv *dmub, + union dmub_inbox0_data_register data) +{ + if (!dmub->hw_init || dmub->hw_funcs.send_inbox0_cmd) + return DMUB_STATUS_INVALID; + + dmub->hw_funcs.send_inbox0_cmd(dmub, data); + return DMUB_STATUS_OK; +} diff --git a/drivers/gpu/drm/amd/display/include/ddc_service_types.h b/drivers/gpu/drm/amd/display/include/ddc_service_types.h index 4de59b66bb1a..a2b80514d83e 100644 --- a/drivers/gpu/drm/amd/display/include/ddc_service_types.h +++ b/drivers/gpu/drm/amd/display/include/ddc_service_types.h @@ -35,6 +35,7 @@ #define DP_BRANCH_DEVICE_ID_00E04C 0x00E04C #define DP_BRANCH_DEVICE_ID_006037 0x006037 +#define DP_DEVICE_ID_38EC11 0x38EC11 enum ddc_result { DDC_RESULT_UNKNOWN = 0, DDC_RESULT_SUCESSFULL, @@ -117,4 +118,7 @@ struct av_sync_data { uint8_t aud_del_ins3;/* DPCD 0002Dh */ }; +static const uint8_t DP_SINK_DEVICE_STR_ID_1[] = {7, 1, 8, 7, 3, 0}; +static const uint8_t DP_SINK_DEVICE_STR_ID_2[] = {7, 1, 8, 7, 5, 0}; + #endif /* __DAL_DDC_SERVICE_TYPES_H__ */ diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h index 370fad883e33..f093b49c5e6e 100644 --- a/drivers/gpu/drm/amd/display/include/logger_types.h +++ b/drivers/gpu/drm/amd/display/include/logger_types.h @@ -72,9 +72,7 @@ #define DC_LOG_DSC(...) DRM_DEBUG_KMS(__VA_ARGS__) #define DC_LOG_SMU(...) pr_debug("[SMU_MSG]:"__VA_ARGS__) #define DC_LOG_DWB(...) DRM_DEBUG_KMS(__VA_ARGS__) -#if defined(CONFIG_DRM_AMD_DC_DCN) #define DC_LOG_DP2(...) DRM_DEBUG_KMS(__VA_ARGS__) -#endif struct dal_logger; @@ -126,9 +124,7 @@ enum dc_log_type { LOG_MAX_HW_POINTS, LOG_ALL_TF_CHANNELS, LOG_SAMPLE_1DLUT, -#if defined(CONFIG_DRM_AMD_DC_DCN) LOG_DP2, -#endif LOG_SECTION_TOTAL_COUNT }; diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index f1a46d16f7ea..f57a1478f0fe 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -98,7 +98,8 @@ enum amd_ip_block_type { AMD_IP_BLOCK_TYPE_ACP, AMD_IP_BLOCK_TYPE_VCN, AMD_IP_BLOCK_TYPE_MES, - AMD_IP_BLOCK_TYPE_JPEG + AMD_IP_BLOCK_TYPE_JPEG, + AMD_IP_BLOCK_TYPE_NUM, }; enum amd_clockgating_state { @@ -230,6 +231,8 @@ enum DC_FEATURE_MASK { DC_DISABLE_FRACTIONAL_PWM_MASK = (1 << 2), //0x4, disabled by default DC_PSR_MASK = (1 << 3), //0x8, disabled by default for dcn < 3.1 DC_EDP_NO_POWER_SEQUENCING = (1 << 4), //0x10, disabled by default + DC_DISABLE_LTTPR_DP1_4A = (1 << 5), //0x20, disabled by default + DC_DISABLE_LTTPR_DP2_0 = (1 << 6), //0x40, disabled by default }; enum DC_DEBUG_MASK { diff --git a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_offset.h index 6d0052ce6bed..da6d380c948b 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_offset.h @@ -354,5 +354,12 @@ #define mmMP1_SMN_EXT_SCRATCH7 0x03c7 #define mmMP1_SMN_EXT_SCRATCH7_BASE_IDX 0 +/* + * addressBlock: mp_SmuMp1Pub_MmuDec + * base address: 0x0 + */ +#define smnMP1_PMI_3_START 0x3030204 +#define smnMP1_PMI_3_FIFO 0x3030208 +#define smnMP1_PMI_3 0x3030600 #endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_sh_mask.h index 136fb5de6a4c..a5ae2a801254 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_sh_mask.h @@ -959,5 +959,17 @@ #define MP1_SMN_EXT_SCRATCH7__DATA__SHIFT 0x0 #define MP1_SMN_EXT_SCRATCH7__DATA_MASK 0xFFFFFFFFL +// MP1_PMI_3_START +#define MP1_PMI_3_START__ENABLE_MASK 0x80000000L +// MP1_PMI_3_FIFO +#define MP1_PMI_3_FIFO__DEPTH_MASK 0x00000fffL + +// MP1_PMI_3_START +#define MP1_PMI_3_START__ENABLE__SHIFT 0x0000001f +// MP1_PMI_3_FIFO +#define MP1_PMI_3_FIFO__DEPTH__SHIFT 0x00000000 + + + #endif diff --git a/drivers/gpu/drm/amd/include/cyan_skillfish_ip_offset.h b/drivers/gpu/drm/amd/include/cyan_skillfish_ip_offset.h index 9cb5f3631c60..ce79e5de8ce3 100644 --- a/drivers/gpu/drm/amd/include/cyan_skillfish_ip_offset.h +++ b/drivers/gpu/drm/amd/include/cyan_skillfish_ip_offset.h @@ -25,15 +25,15 @@ #define MAX_SEGMENT 5 -struct IP_BASE_INSTANCE +struct IP_BASE_INSTANCE { unsigned int segment[MAX_SEGMENT]; -}; - -struct IP_BASE +} __maybe_unused; + +struct IP_BASE { struct IP_BASE_INSTANCE instance[MAX_INSTANCE]; -}; +} __maybe_unused; static const struct IP_BASE ATHUB_BASE ={ { { { 0x00000C00, 0, 0, 0, 0 } }, diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index c84bd7b2cf59..ac941f62cbed 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -33,12 +33,11 @@ #include <linux/dma-fence.h> struct pci_dev; +struct amdgpu_device; #define KGD_MAX_QUEUES 128 struct kfd_dev; -struct kgd_dev; - struct kgd_mem; enum kfd_preempt_type { @@ -228,61 +227,61 @@ struct tile_config { */ struct kfd2kgd_calls { /* Register access functions */ - void (*program_sh_mem_settings)(struct kgd_dev *kgd, uint32_t vmid, + void (*program_sh_mem_settings)(struct amdgpu_device *adev, uint32_t vmid, uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases); - int (*set_pasid_vmid_mapping)(struct kgd_dev *kgd, u32 pasid, + int (*set_pasid_vmid_mapping)(struct amdgpu_device *adev, u32 pasid, unsigned int vmid); - int (*init_interrupts)(struct kgd_dev *kgd, uint32_t pipe_id); + int (*init_interrupts)(struct amdgpu_device *adev, uint32_t pipe_id); - int (*hqd_load)(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, + int (*hqd_load)(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t __user *wptr, uint32_t wptr_shift, uint32_t wptr_mask, struct mm_struct *mm); - int (*hiq_mqd_load)(struct kgd_dev *kgd, void *mqd, + int (*hiq_mqd_load)(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t doorbell_off); - int (*hqd_sdma_load)(struct kgd_dev *kgd, void *mqd, + int (*hqd_sdma_load)(struct amdgpu_device *adev, void *mqd, uint32_t __user *wptr, struct mm_struct *mm); - int (*hqd_dump)(struct kgd_dev *kgd, + int (*hqd_dump)(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs); - int (*hqd_sdma_dump)(struct kgd_dev *kgd, + int (*hqd_sdma_dump)(struct amdgpu_device *adev, uint32_t engine_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs); - bool (*hqd_is_occupied)(struct kgd_dev *kgd, uint64_t queue_address, - uint32_t pipe_id, uint32_t queue_id); - - int (*hqd_destroy)(struct kgd_dev *kgd, void *mqd, uint32_t reset_type, - unsigned int timeout, uint32_t pipe_id, + bool (*hqd_is_occupied)(struct amdgpu_device *adev, + uint64_t queue_address, uint32_t pipe_id, uint32_t queue_id); - bool (*hqd_sdma_is_occupied)(struct kgd_dev *kgd, void *mqd); + int (*hqd_destroy)(struct amdgpu_device *adev, void *mqd, + uint32_t reset_type, unsigned int timeout, + uint32_t pipe_id, uint32_t queue_id); + + bool (*hqd_sdma_is_occupied)(struct amdgpu_device *adev, void *mqd); - int (*hqd_sdma_destroy)(struct kgd_dev *kgd, void *mqd, + int (*hqd_sdma_destroy)(struct amdgpu_device *adev, void *mqd, unsigned int timeout); - int (*address_watch_disable)(struct kgd_dev *kgd); - int (*address_watch_execute)(struct kgd_dev *kgd, + int (*address_watch_disable)(struct amdgpu_device *adev); + int (*address_watch_execute)(struct amdgpu_device *adev, unsigned int watch_point_id, uint32_t cntl_val, uint32_t addr_hi, uint32_t addr_lo); - int (*wave_control_execute)(struct kgd_dev *kgd, + int (*wave_control_execute)(struct amdgpu_device *adev, uint32_t gfx_index_val, uint32_t sq_cmd); - uint32_t (*address_watch_get_offset)(struct kgd_dev *kgd, + uint32_t (*address_watch_get_offset)(struct amdgpu_device *adev, unsigned int watch_point_id, unsigned int reg_offset); - bool (*get_atc_vmid_pasid_mapping_info)( - struct kgd_dev *kgd, + bool (*get_atc_vmid_pasid_mapping_info)(struct amdgpu_device *adev, uint8_t vmid, uint16_t *p_pasid); @@ -290,16 +289,16 @@ struct kfd2kgd_calls { * passed to the shader by the CP. It's the user mode driver's * responsibility. */ - void (*set_scratch_backing_va)(struct kgd_dev *kgd, + void (*set_scratch_backing_va)(struct amdgpu_device *adev, uint64_t va, uint32_t vmid); - void (*set_vm_context_page_table_base)(struct kgd_dev *kgd, + void (*set_vm_context_page_table_base)(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base); - uint32_t (*read_vmid_from_vmfault_reg)(struct kgd_dev *kgd); + uint32_t (*read_vmid_from_vmfault_reg)(struct amdgpu_device *adev); - void (*get_cu_occupancy)(struct kgd_dev *kgd, int pasid, int *wave_cnt, - int *max_waves_per_cu); - void (*program_trap_handler_settings)(struct kgd_dev *kgd, + void (*get_cu_occupancy)(struct amdgpu_device *adev, int pasid, + int *wave_cnt, int *max_waves_per_cu); + void (*program_trap_handler_settings)(struct amdgpu_device *adev, uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr); }; diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index bac15c466733..5c0867ebcfce 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -153,6 +153,10 @@ enum PP_SMC_POWER_PROFILE { PP_SMC_POWER_PROFILE_COUNT, }; +extern const char * const amdgpu_pp_profile_name[PP_SMC_POWER_PROFILE_COUNT]; + + + enum { PP_GROUP_UNKNOWN = 0, PP_GROUP_GFX = 1, diff --git a/drivers/gpu/drm/amd/include/yellow_carp_offset.h b/drivers/gpu/drm/amd/include/yellow_carp_offset.h index 76b9eb3f441d..28a56b56bcaf 100644 --- a/drivers/gpu/drm/amd/include/yellow_carp_offset.h +++ b/drivers/gpu/drm/amd/include/yellow_carp_offset.h @@ -9,12 +9,12 @@ struct IP_BASE_INSTANCE { unsigned int segment[MAX_SEGMENT]; -}; +} __maybe_unused; struct IP_BASE { struct IP_BASE_INSTANCE instance[MAX_INSTANCE]; -}; +} __maybe_unused; static const struct IP_BASE ACP_BASE = { { { { 0x02403800, 0x00480000, 0, 0, 0, 0 } }, diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index 03581d5b1836..08362d506534 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -927,6 +927,13 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block { int ret = 0; const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON; + + if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) { + dev_dbg(adev->dev, "IP block%d already in the target %s state!", + block_type, gate ? "gate" : "ungate"); + return 0; + } switch (block_type) { case AMD_IP_BLOCK_TYPE_UVD: @@ -979,6 +986,9 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block break; } + if (!ret) + atomic_set(&adev->pm.pwr_state[block_type], pwr_state); + return ret; } diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index 41472ed99253..082539c70fd4 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -82,6 +82,16 @@ static const struct hwmon_temp_label { {PP_TEMP_MEM, "mem"}, }; +const char * const amdgpu_pp_profile_name[] = { + "BOOTUP_DEFAULT", + "3D_FULL_SCREEN", + "POWER_SAVING", + "VIDEO", + "VR", + "COMPUTE", + "CUSTOM" +}; + /** * DOC: power_dpm_state * @@ -3759,5 +3769,7 @@ void amdgpu_debugfs_pm_init(struct amdgpu_device *adev) adev, &amdgpu_debugfs_pm_prv_buffer_fops, adev->pm.smu_prv_buffer_size); + + amdgpu_smu_stb_debug_fs_init(adev); #endif } diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h index 98f1b3d8c1d5..c464a045000d 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h @@ -417,6 +417,15 @@ struct amdgpu_dpm { enum amd_dpm_forced_level forced_level; }; +enum ip_power_state { + POWER_STATE_UNKNOWN, + POWER_STATE_ON, + POWER_STATE_OFF, +}; + +/* Used to mask smu debug modes */ +#define SMU_DEBUG_HALT_ON_ERROR 0x1 + struct amdgpu_pm { struct mutex mutex; u32 current_sclk; @@ -452,6 +461,13 @@ struct amdgpu_pm { struct i2c_adapter smu_i2c; struct mutex smu_i2c_mutex; struct list_head pm_attr_list; + + atomic_t pwr_state[AMD_IP_BLOCK_TYPE_NUM]; + + /* + * 0 = disabled (default), otherwise enable corresponding debug mode + */ + uint32_t smu_debug_mask; }; #define R600_SSTU_DFLT 0 diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h index 3557f4e7fc30..2b9b9a7ba97a 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h @@ -324,6 +324,7 @@ enum smu_table_id SMU_TABLE_OVERDRIVE, SMU_TABLE_I2C_COMMANDS, SMU_TABLE_PACE, + SMU_TABLE_ECCINFO, SMU_TABLE_COUNT, }; @@ -340,6 +341,7 @@ struct smu_table_context void *max_sustainable_clocks; struct smu_bios_boot_up_values boot_values; void *driver_pptable; + void *ecc_table; struct smu_table tables[SMU_TABLE_COUNT]; /* * The driver table is just a staging buffer for @@ -472,7 +474,14 @@ struct cmn2asic_mapping { int map_to; }; +struct stb_context { + uint32_t stb_buf_size; + bool enabled; + spinlock_t lock; +}; + #define WORKLOAD_POLICY_MAX 7 + struct smu_context { struct amdgpu_device *adev; @@ -559,6 +568,8 @@ struct smu_context uint16_t cpu_core_num; struct smu_user_dpm_profile user_dpm_profile; + + struct stb_context stb_context; }; struct i2c_adapter; @@ -1261,6 +1272,17 @@ struct pptable_funcs { * of SMUBUS table. */ int (*send_hbm_bad_pages_num)(struct smu_context *smu, uint32_t size); + + /** + * @get_ecc_table: message SMU to get ECC INFO table. + */ + ssize_t (*get_ecc_info)(struct smu_context *smu, void *table); + + + /** + * @stb_collect_info: Collects Smart Trace Buffers data. + */ + int (*stb_collect_info)(struct smu_context *smu, void *buf, uint32_t size); }; typedef enum { @@ -1397,6 +1419,9 @@ int smu_set_light_sbr(struct smu_context *smu, bool enable); int smu_wait_for_event(struct amdgpu_device *adev, enum smu_event_type event, uint64_t event_arg); +int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc); +int smu_stb_collect_info(struct smu_context *smu, void *buff, uint32_t size); +void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev); #endif #endif diff --git a/drivers/gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h b/drivers/gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h index a017983ff1fa..0f67c56c2863 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h +++ b/drivers/gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h @@ -140,6 +140,8 @@ #define MAX_SW_I2C_COMMANDS 24 +#define ALDEBARAN_UMC_CHANNEL_NUM 32 + typedef enum { I2C_CONTROLLER_PORT_0, //CKSVII2C0 I2C_CONTROLLER_PORT_1, //CKSVII2C1 @@ -507,6 +509,19 @@ typedef struct { uint32_t MmHubPadding[8]; // SMU internal use } AvfsDebugTable_t; +typedef struct { + uint64_t mca_umc_status; + uint64_t mca_umc_addr; + uint16_t ce_count_lo_chip; + uint16_t ce_count_hi_chip; + + uint32_t eccPadding; +} EccInfo_t; + +typedef struct { + EccInfo_t EccInfo[ALDEBARAN_UMC_CHANNEL_NUM]; +} EccInfoTable_t; + // These defines are used with the following messages: // SMC_MSG_TransferTableDram2Smu // SMC_MSG_TransferTableSmu2Dram @@ -517,6 +532,7 @@ typedef struct { #define TABLE_SMU_METRICS 4 #define TABLE_DRIVER_SMU_CONFIG 5 #define TABLE_I2C_COMMANDS 6 -#define TABLE_COUNT 7 +#define TABLE_ECCINFO 7 +#define TABLE_COUNT 8 #endif diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h index e5d3b0d1a032..44af23ae059e 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h +++ b/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h @@ -27,7 +27,9 @@ #define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF #define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04 -#define SMU13_DRIVER_IF_VERSION_ALDE 0x07 +#define SMU13_DRIVER_IF_VERSION_ALDE 0x08 + +#define SMU13_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms /* MP Apertures */ #define MP0_Public 0x03800000 @@ -216,7 +218,6 @@ int smu_v13_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) int smu_v13_0_baco_enter(struct smu_context *smu); int smu_v13_0_baco_exit(struct smu_context *smu); -int smu_v13_0_mode1_reset(struct smu_context *smu); int smu_v13_0_mode2_reset(struct smu_context *smu); int smu_v13_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type, diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c index 8d796ed3b7d1..3ab67b232cd4 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c @@ -1328,7 +1328,12 @@ static int pp_set_powergating_by_smu(void *handle, pp_dpm_powergate_vce(handle, gate); break; case AMD_IP_BLOCK_TYPE_GMC: - pp_dpm_powergate_mmhub(handle); + /* + * For now, this is only used on PICASSO. + * And only "gate" operation is supported. + */ + if (gate) + pp_dpm_powergate_mmhub(handle); break; case AMD_IP_BLOCK_TYPE_GFX: ret = pp_dpm_powergate_gfx(handle, gate); @@ -1551,7 +1556,7 @@ static int pp_set_ppfeature_status(void *handle, uint64_t ppfeature_masks) static int pp_asic_reset_mode_2(void *handle) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; + int ret = 0; if (!hwmgr || !hwmgr->pm_en) return -EINVAL; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c index 258c573acc97..9ddd8491ff00 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c @@ -1024,8 +1024,6 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr, uint32_t min_freq, max_freq = 0; uint32_t ret = 0; - phm_get_sysfs_buf(&buf, &size); - switch (type) { case PP_SCLK: smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &now); @@ -1038,13 +1036,13 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr, else i = 1; - size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", + size += sprintf(buf + size, "0: %uMhz %s\n", data->gfx_min_freq_limit/100, i == 0 ? "*" : ""); - size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", + size += sprintf(buf + size, "1: %uMhz %s\n", i == 1 ? now : SMU10_UMD_PSTATE_GFXCLK, i == 1 ? "*" : ""); - size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", + size += sprintf(buf + size, "2: %uMhz %s\n", data->gfx_max_freq_limit/100, i == 2 ? "*" : ""); break; @@ -1052,7 +1050,7 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr, smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &now); for (i = 0; i < mclk_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, mclk_table->entries[i].clk / 100, ((mclk_table->entries[i].clk / 100) @@ -1067,10 +1065,10 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr, if (ret) return ret; - size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); - size += sysfs_emit_at(buf, size, "0: %10uMhz\n", + size += sprintf(buf + size, "%s:\n", "OD_SCLK"); + size += sprintf(buf + size, "0: %10uMhz\n", (data->gfx_actual_soft_min_freq > 0) ? data->gfx_actual_soft_min_freq : min_freq); - size += sysfs_emit_at(buf, size, "1: %10uMhz\n", + size += sprintf(buf + size, "1: %10uMhz\n", (data->gfx_actual_soft_max_freq > 0) ? data->gfx_actual_soft_max_freq : max_freq); } break; @@ -1083,8 +1081,8 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr, if (ret) return ret; - size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); - size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n", + size += sprintf(buf + size, "%s:\n", "OD_RANGE"); + size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n", min_freq, max_freq); } break; @@ -1441,13 +1439,6 @@ static int smu10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) {70, 90, 0, 0,}, {30, 60, 0, 6,}, }; - static const char *profile_name[6] = { - "BOOTUP_DEFAULT", - "3D_FULL_SCREEN", - "POWER_SAVING", - "VIDEO", - "VR", - "COMPUTE"}; static const char *title[6] = {"NUM", "MODE_NAME", "BUSY_SET_POINT", @@ -1465,7 +1456,7 @@ static int smu10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) for (i = 0; i <= PP_SMC_POWER_PROFILE_COMPUTE; i++) size += sysfs_emit_at(buf, size, "%3d %14s%s: %14d %3d %10d %14d\n", - i, profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ", + i, amdgpu_pp_profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ", profile_mode_setting[i][0], profile_mode_setting[i][1], profile_mode_setting[i][2], profile_mode_setting[i][3]); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c index aceebf584225..cd99db0dc2be 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c @@ -4914,8 +4914,6 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, int size = 0; uint32_t i, now, clock, pcie_speed; - phm_get_sysfs_buf(&buf, &size); - switch (type) { case PP_SCLK: smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &clock); @@ -4928,7 +4926,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, now = i; for (i = 0; i < sclk_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, sclk_table->dpm_levels[i].value / 100, (i == now) ? "*" : ""); break; @@ -4943,7 +4941,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, now = i; for (i = 0; i < mclk_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, mclk_table->dpm_levels[i].value / 100, (i == now) ? "*" : ""); break; @@ -4957,7 +4955,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, now = i; for (i = 0; i < pcie_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %s %s\n", i, + size += sprintf(buf + size, "%d: %s %s\n", i, (pcie_table->dpm_levels[i].value == 0) ? "2.5GT/s, x8" : (pcie_table->dpm_levels[i].value == 1) ? "5.0GT/s, x16" : (pcie_table->dpm_levels[i].value == 2) ? "8.0GT/s, x16" : "", @@ -4965,32 +4963,32 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, break; case OD_SCLK: if (hwmgr->od_enabled) { - size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); + size += sprintf(buf + size, "%s:\n", "OD_SCLK"); for (i = 0; i < odn_sclk_table->num_of_pl; i++) - size += sysfs_emit_at(buf, size, "%d: %10uMHz %10umV\n", + size += sprintf(buf + size, "%d: %10uMHz %10umV\n", i, odn_sclk_table->entries[i].clock/100, odn_sclk_table->entries[i].vddc); } break; case OD_MCLK: if (hwmgr->od_enabled) { - size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK"); + size += sprintf(buf + size, "%s:\n", "OD_MCLK"); for (i = 0; i < odn_mclk_table->num_of_pl; i++) - size += sysfs_emit_at(buf, size, "%d: %10uMHz %10umV\n", + size += sprintf(buf + size, "%d: %10uMHz %10umV\n", i, odn_mclk_table->entries[i].clock/100, odn_mclk_table->entries[i].vddc); } break; case OD_RANGE: if (hwmgr->od_enabled) { - size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); - size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n", + size += sprintf(buf + size, "%s:\n", "OD_RANGE"); + size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n", data->golden_dpm_table.sclk_table.dpm_levels[0].value/100, hwmgr->platform_descriptor.overdriveLimit.engineClock/100); - size += sysfs_emit_at(buf, size, "MCLK: %7uMHz %10uMHz\n", + size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n", data->golden_dpm_table.mclk_table.dpm_levels[0].value/100, hwmgr->platform_descriptor.overdriveLimit.memoryClock/100); - size += sysfs_emit_at(buf, size, "VDDC: %7umV %11umV\n", + size += sprintf(buf + size, "VDDC: %7umV %11umV\n", data->odn_dpm_table.min_vddc, data->odn_dpm_table.max_vddc); } @@ -5500,14 +5498,6 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) uint32_t i, size = 0; uint32_t len; - static const char *profile_name[7] = {"BOOTUP_DEFAULT", - "3D_FULL_SCREEN", - "POWER_SAVING", - "VIDEO", - "VR", - "COMPUTE", - "CUSTOM"}; - static const char *title[8] = {"NUM", "MODE_NAME", "SCLK_UP_HYST", @@ -5531,7 +5521,7 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) for (i = 0; i < len; i++) { if (i == hwmgr->power_profile_mode) { size += sysfs_emit_at(buf, size, "%3d %14s %s: %8d %16d %16d %16d %16d %16d\n", - i, profile_name[i], "*", + i, amdgpu_pp_profile_name[i], "*", data->current_profile_setting.sclk_up_hyst, data->current_profile_setting.sclk_down_hyst, data->current_profile_setting.sclk_activity, @@ -5542,12 +5532,12 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) } if (smu7_profiling[i].bupdate_sclk) size += sysfs_emit_at(buf, size, "%3d %16s: %8d %16d %16d ", - i, profile_name[i], smu7_profiling[i].sclk_up_hyst, + i, amdgpu_pp_profile_name[i], smu7_profiling[i].sclk_up_hyst, smu7_profiling[i].sclk_down_hyst, smu7_profiling[i].sclk_activity); else size += sysfs_emit_at(buf, size, "%3d %16s: %8s %16s %16s ", - i, profile_name[i], "-", "-", "-"); + i, amdgpu_pp_profile_name[i], "-", "-", "-"); if (smu7_profiling[i].bupdate_mclk) size += sysfs_emit_at(buf, size, "%16d %16d %16d\n", diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c index 8e28a8eecefc..03bf8f069222 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c @@ -1550,8 +1550,6 @@ static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr, uint32_t i, now; int size = 0; - phm_get_sysfs_buf(&buf, &size); - switch (type) { case PP_SCLK: now = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, @@ -1561,7 +1559,7 @@ static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr, CURR_SCLK_INDEX); for (i = 0; i < sclk_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, sclk_table->entries[i].clk / 100, (i == now) ? "*" : ""); break; @@ -1573,7 +1571,7 @@ static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr, CURR_MCLK_INDEX); for (i = SMU8_NUM_NBPMEMORYCLOCK; i > 0; i--) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", SMU8_NUM_NBPMEMORYCLOCK-i, data->sys_info.nbp_memory_clock[i-1] / 100, (SMU8_NUM_NBPMEMORYCLOCK-i == now) ? "*" : ""); break; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c index c981fc2882f0..3f040be0d158 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c @@ -4639,8 +4639,6 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, int i, now, size = 0, count = 0; - phm_get_sysfs_buf(&buf, &size); - switch (type) { case PP_SCLK: if (data->registry_data.sclk_dpm_key_disabled) @@ -4654,7 +4652,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, else count = sclk_table->count; for (i = 0; i < count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, sclk_table->dpm_levels[i].value / 100, (i == now) ? "*" : ""); break; @@ -4665,7 +4663,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &now); for (i = 0; i < mclk_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, mclk_table->dpm_levels[i].value / 100, (i == now) ? "*" : ""); break; @@ -4676,7 +4674,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex, &now); for (i = 0; i < soc_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, soc_table->dpm_levels[i].value / 100, (i == now) ? "*" : ""); break; @@ -4688,7 +4686,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, PPSMC_MSG_GetClockFreqMHz, CLK_DCEFCLK, &now); for (i = 0; i < dcef_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, dcef_table->dpm_levels[i].value / 100, (dcef_table->dpm_levels[i].value / 100 == now) ? "*" : ""); @@ -4702,7 +4700,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, gen_speed = pptable->PcieGenSpeed[i]; lane_width = pptable->PcieLaneCount[i]; - size += sysfs_emit_at(buf, size, "%d: %s %s %s\n", i, + size += sprintf(buf + size, "%d: %s %s %s\n", i, (gen_speed == 0) ? "2.5GT/s," : (gen_speed == 1) ? "5.0GT/s," : (gen_speed == 2) ? "8.0GT/s," : @@ -4721,34 +4719,34 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, case OD_SCLK: if (hwmgr->od_enabled) { - size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); + size += sprintf(buf + size, "%s:\n", "OD_SCLK"); podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk; for (i = 0; i < podn_vdd_dep->count; i++) - size += sysfs_emit_at(buf, size, "%d: %10uMhz %10umV\n", + size += sprintf(buf + size, "%d: %10uMhz %10umV\n", i, podn_vdd_dep->entries[i].clk / 100, podn_vdd_dep->entries[i].vddc); } break; case OD_MCLK: if (hwmgr->od_enabled) { - size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK"); + size += sprintf(buf + size, "%s:\n", "OD_MCLK"); podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk; for (i = 0; i < podn_vdd_dep->count; i++) - size += sysfs_emit_at(buf, size, "%d: %10uMhz %10umV\n", + size += sprintf(buf + size, "%d: %10uMhz %10umV\n", i, podn_vdd_dep->entries[i].clk/100, podn_vdd_dep->entries[i].vddc); } break; case OD_RANGE: if (hwmgr->od_enabled) { - size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); - size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n", + size += sprintf(buf + size, "%s:\n", "OD_RANGE"); + size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n", data->golden_dpm_table.gfx_table.dpm_levels[0].value/100, hwmgr->platform_descriptor.overdriveLimit.engineClock/100); - size += sysfs_emit_at(buf, size, "MCLK: %7uMHz %10uMHz\n", + size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n", data->golden_dpm_table.mem_table.dpm_levels[0].value/100, hwmgr->platform_descriptor.overdriveLimit.memoryClock/100); - size += sysfs_emit_at(buf, size, "VDDC: %7umV %11umV\n", + size += sprintf(buf + size, "VDDC: %7umV %11umV\n", data->odn_dpm_table.min_vddc, data->odn_dpm_table.max_vddc); } @@ -5099,13 +5097,6 @@ static int vega10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) {70, 90, 0, 0,}, {30, 60, 0, 6,}, }; - static const char *profile_name[7] = {"BOOTUP_DEFAULT", - "3D_FULL_SCREEN", - "POWER_SAVING", - "VIDEO", - "VR", - "COMPUTE", - "CUSTOM"}; static const char *title[6] = {"NUM", "MODE_NAME", "BUSY_SET_POINT", @@ -5123,11 +5114,12 @@ static int vega10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) for (i = 0; i < PP_SMC_POWER_PROFILE_CUSTOM; i++) size += sysfs_emit_at(buf, size, "%3d %14s%s: %14d %3d %10d %14d\n", - i, profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ", + i, amdgpu_pp_profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ", profile_mode_setting[i][0], profile_mode_setting[i][1], profile_mode_setting[i][2], profile_mode_setting[i][3]); + size += sysfs_emit_at(buf, size, "%3d %14s%s: %14d %3d %10d %14d\n", i, - profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ", + amdgpu_pp_profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ", data->custom_profile_mode[0], data->custom_profile_mode[1], data->custom_profile_mode[2], data->custom_profile_mode[3]); return size; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c index f7e783e1c888..a2f4d6773d45 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c @@ -2246,8 +2246,6 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr, int i, now, size = 0; struct pp_clock_levels_with_latency clocks; - phm_get_sysfs_buf(&buf, &size); - switch (type) { case PP_SCLK: PP_ASSERT_WITH_CODE( @@ -2260,7 +2258,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr, "Attempt to get gfx clk levels Failed!", return -1); for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, (clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : ""); break; @@ -2276,7 +2274,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr, "Attempt to get memory clk levels Failed!", return -1); for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, (clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : ""); break; @@ -2294,7 +2292,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr, "Attempt to get soc clk levels Failed!", return -1); for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, (clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : ""); break; @@ -2312,7 +2310,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr, "Attempt to get dcef clk levels Failed!", return -1); for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, (clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : ""); break; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c index 03e63be4ee27..97b3ad369046 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c @@ -3366,8 +3366,6 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, int ret = 0; uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width; - phm_get_sysfs_buf(&buf, &size); - switch (type) { case PP_SCLK: ret = vega20_get_current_clk_freq(hwmgr, PPCLK_GFXCLK, &now); @@ -3376,13 +3374,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, return ret); if (vega20_get_sclks(hwmgr, &clocks)) { - size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n", + size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", now / 100); break; } for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, (clocks.data[i].clocks_in_khz == now * 10) ? "*" : ""); break; @@ -3394,13 +3392,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, return ret); if (vega20_get_memclocks(hwmgr, &clocks)) { - size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n", + size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", now / 100); break; } for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, (clocks.data[i].clocks_in_khz == now * 10) ? "*" : ""); break; @@ -3412,13 +3410,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, return ret); if (vega20_get_socclocks(hwmgr, &clocks)) { - size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n", + size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", now / 100); break; } for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, (clocks.data[i].clocks_in_khz == now * 10) ? "*" : ""); break; @@ -3430,7 +3428,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, return ret); for (i = 0; i < fclk_dpm_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, fclk_dpm_table->dpm_levels[i].value, fclk_dpm_table->dpm_levels[i].value == (now / 100) ? "*" : ""); break; @@ -3442,13 +3440,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, return ret); if (vega20_get_dcefclocks(hwmgr, &clocks)) { - size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n", + size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", now / 100); break; } for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, (clocks.data[i].clocks_in_khz == now * 10) ? "*" : ""); break; @@ -3462,7 +3460,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, gen_speed = pptable->PcieGenSpeed[i]; lane_width = pptable->PcieLaneCount[i]; - size += sysfs_emit_at(buf, size, "%d: %s %s %dMhz %s\n", i, + size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i, (gen_speed == 0) ? "2.5GT/s," : (gen_speed == 1) ? "5.0GT/s," : (gen_speed == 2) ? "8.0GT/s," : @@ -3483,18 +3481,18 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, case OD_SCLK: if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id && od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) { - size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); - size += sysfs_emit_at(buf, size, "0: %10uMhz\n", + size += sprintf(buf + size, "%s:\n", "OD_SCLK"); + size += sprintf(buf + size, "0: %10uMhz\n", od_table->GfxclkFmin); - size += sysfs_emit_at(buf, size, "1: %10uMhz\n", + size += sprintf(buf + size, "1: %10uMhz\n", od_table->GfxclkFmax); } break; case OD_MCLK: if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) { - size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK"); - size += sysfs_emit_at(buf, size, "1: %10uMhz\n", + size += sprintf(buf + size, "%s:\n", "OD_MCLK"); + size += sprintf(buf + size, "1: %10uMhz\n", od_table->UclkFmax); } @@ -3507,14 +3505,14 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id && od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id && od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) { - size += sysfs_emit_at(buf, size, "%s:\n", "OD_VDDC_CURVE"); - size += sysfs_emit_at(buf, size, "0: %10uMhz %10dmV\n", + size += sprintf(buf + size, "%s:\n", "OD_VDDC_CURVE"); + size += sprintf(buf + size, "0: %10uMhz %10dmV\n", od_table->GfxclkFreq1, od_table->GfxclkVolt1 / VOLTAGE_SCALE); - size += sysfs_emit_at(buf, size, "1: %10uMhz %10dmV\n", + size += sprintf(buf + size, "1: %10uMhz %10dmV\n", od_table->GfxclkFreq2, od_table->GfxclkVolt2 / VOLTAGE_SCALE); - size += sysfs_emit_at(buf, size, "2: %10uMhz %10dmV\n", + size += sprintf(buf + size, "2: %10uMhz %10dmV\n", od_table->GfxclkFreq3, od_table->GfxclkVolt3 / VOLTAGE_SCALE); } @@ -3522,17 +3520,17 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, break; case OD_RANGE: - size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + size += sprintf(buf + size, "%s:\n", "OD_RANGE"); if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id && od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) { - size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", + size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n", od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value, od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value); } if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) { - size += sysfs_emit_at(buf, size, "MCLK: %7uMhz %10uMhz\n", + size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n", od8_settings[OD8_SETTING_UCLK_FMAX].min_value, od8_settings[OD8_SETTING_UCLK_FMAX].max_value); } @@ -3543,22 +3541,22 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id && od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id && od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) { - size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n", + size += sprintf(buf + size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n", od8_settings[OD8_SETTING_GFXCLK_FREQ1].min_value, od8_settings[OD8_SETTING_GFXCLK_FREQ1].max_value); - size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n", + size += sprintf(buf + size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n", od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].min_value, od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].max_value); - size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n", + size += sprintf(buf + size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n", od8_settings[OD8_SETTING_GFXCLK_FREQ2].min_value, od8_settings[OD8_SETTING_GFXCLK_FREQ2].max_value); - size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n", + size += sprintf(buf + size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n", od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].min_value, od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].max_value); - size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n", + size += sprintf(buf + size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n", od8_settings[OD8_SETTING_GFXCLK_FREQ3].min_value, od8_settings[OD8_SETTING_GFXCLK_FREQ3].max_value); - size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n", + size += sprintf(buf + size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n", od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].min_value, od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].max_value); } @@ -3982,14 +3980,6 @@ static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) DpmActivityMonitorCoeffInt_t activity_monitor; uint32_t i, size = 0; uint16_t workload_type = 0; - static const char *profile_name[] = { - "BOOTUP_DEFAULT", - "3D_FULL_SCREEN", - "POWER_SAVING", - "VIDEO", - "VR", - "COMPUTE", - "CUSTOM"}; static const char *title[] = { "PROFILE_INDEX(NAME)", "CLOCK_TYPE(NAME)", @@ -4023,7 +4013,7 @@ static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) return result); size += sysfs_emit_at(buf, size, "%2d %14s%s:\n", - i, profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " "); + i, amdgpu_pp_profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " "); size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", " ", diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 01168b8955bf..2d718c30c8eb 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -277,8 +277,12 @@ static int smu_dpm_set_power_gate(void *handle, struct smu_context *smu = handle; int ret = 0; - if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) + if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) { + dev_WARN(smu->adev->dev, + "SMU uninitialized but power %s requested for %u!\n", + gate ? "gate" : "ungate", block_type); return -EOPNOTSUPP; + } switch (block_type) { /* @@ -1153,6 +1157,8 @@ static int smu_smc_hw_setup(struct smu_context *smu) case IP_VERSION(11, 5, 0): case IP_VERSION(11, 0, 12): ret = smu_system_features_control(smu, true); + if (ret) + dev_err(adev->dev, "Failed system features control!\n"); break; default: break; @@ -1277,8 +1283,10 @@ static int smu_smc_hw_setup(struct smu_context *smu) } ret = smu_notify_display_change(smu); - if (ret) + if (ret) { + dev_err(adev->dev, "Failed to notify display change!\n"); return ret; + } /* * Set min deep sleep dce fclk with bootup value from vbios via @@ -1286,8 +1294,6 @@ static int smu_smc_hw_setup(struct smu_context *smu) */ ret = smu_set_min_dcef_deep_sleep(smu, smu->smu_table.boot_values.dcefclk / 100); - if (ret) - return ret; return ret; } @@ -1344,7 +1350,6 @@ static int smu_hw_init(void *handle) } if (smu->is_apu) { - smu_powergate_sdma(&adev->smu, false); smu_dpm_set_vcn_enable(smu, true); smu_dpm_set_jpeg_enable(smu, true); smu_set_gfx_cgpg(&adev->smu, true); @@ -1468,7 +1473,7 @@ static int smu_disable_dpms(struct smu_context *smu) dev_err(adev->dev, "Failed to disable smu features.\n"); } - if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 0, 0) && + if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2) && adev->gfx.rlc.funcs->stop) adev->gfx.rlc.funcs->stop(adev); @@ -1506,10 +1511,6 @@ static int smu_hw_fini(void *handle) if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) return 0; - if (smu->is_apu) { - smu_powergate_sdma(&adev->smu, true); - } - smu_dpm_set_vcn_enable(smu, false); smu_dpm_set_jpeg_enable(smu, false); @@ -3072,6 +3073,20 @@ int smu_set_light_sbr(struct smu_context *smu, bool enable) return ret; } +int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc) +{ + int ret = -EOPNOTSUPP; + + mutex_lock(&smu->mutex); + if (smu->ppt_funcs && + smu->ppt_funcs->get_ecc_info) + ret = smu->ppt_funcs->get_ecc_info(smu, umc_ecc); + mutex_unlock(&smu->mutex); + + return ret; + +} + static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size) { struct smu_context *smu = handle; @@ -3161,3 +3176,107 @@ int smu_wait_for_event(struct amdgpu_device *adev, enum smu_event_type event, return ret; } + +int smu_stb_collect_info(struct smu_context *smu, void *buf, uint32_t size) +{ + + if (!smu->ppt_funcs->stb_collect_info || !smu->stb_context.enabled) + return -EOPNOTSUPP; + + /* Confirm the buffer allocated is of correct size */ + if (size != smu->stb_context.stb_buf_size) + return -EINVAL; + + /* + * No need to lock smu mutex as we access STB directly through MMIO + * and not going through SMU messaging route (for now at least). + * For registers access rely on implementation internal locking. + */ + return smu->ppt_funcs->stb_collect_info(smu, buf, size); +} + +#if defined(CONFIG_DEBUG_FS) + +static int smu_stb_debugfs_open(struct inode *inode, struct file *filp) +{ + struct amdgpu_device *adev = filp->f_inode->i_private; + struct smu_context *smu = &adev->smu; + unsigned char *buf; + int r; + + buf = kvmalloc_array(smu->stb_context.stb_buf_size, sizeof(*buf), GFP_KERNEL); + if (!buf) + return -ENOMEM; + + r = smu_stb_collect_info(smu, buf, smu->stb_context.stb_buf_size); + if (r) + goto out; + + filp->private_data = buf; + + return 0; + +out: + kvfree(buf); + return r; +} + +static ssize_t smu_stb_debugfs_read(struct file *filp, char __user *buf, size_t size, + loff_t *pos) +{ + struct amdgpu_device *adev = filp->f_inode->i_private; + struct smu_context *smu = &adev->smu; + + + if (!filp->private_data) + return -EINVAL; + + return simple_read_from_buffer(buf, + size, + pos, filp->private_data, + smu->stb_context.stb_buf_size); +} + +static int smu_stb_debugfs_release(struct inode *inode, struct file *filp) +{ + kvfree(filp->private_data); + filp->private_data = NULL; + + return 0; +} + +/* + * We have to define not only read method but also + * open and release because .read takes up to PAGE_SIZE + * data each time so and so is invoked multiple times. + * We allocate the STB buffer in .open and release it + * in .release + */ +static const struct file_operations smu_stb_debugfs_fops = { + .owner = THIS_MODULE, + .open = smu_stb_debugfs_open, + .read = smu_stb_debugfs_read, + .release = smu_stb_debugfs_release, + .llseek = default_llseek, +}; + +#endif + +void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev) +{ +#if defined(CONFIG_DEBUG_FS) + + struct smu_context *smu = &adev->smu; + + if (!smu->stb_context.stb_buf_size) + return; + + debugfs_create_file_size("amdgpu_smu_stb_dump", + S_IRUSR, + adev_to_drm(adev)->primary->debugfs_root, + adev, + &smu_stb_debugfs_fops, + smu->stb_context.stb_buf_size); +#endif + +} diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index fd1d30a93db5..58bc387fb279 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -295,16 +295,6 @@ static int arcturus_allocate_dpm_context(struct smu_context *smu) return -ENOMEM; smu_dpm->dpm_context_size = sizeof(struct smu_11_0_dpm_context); - smu_dpm->dpm_current_power_state = kzalloc(sizeof(struct smu_power_state), - GFP_KERNEL); - if (!smu_dpm->dpm_current_power_state) - return -ENOMEM; - - smu_dpm->dpm_request_power_state = kzalloc(sizeof(struct smu_power_state), - GFP_KERNEL); - if (!smu_dpm->dpm_request_power_state) - return -ENOMEM; - return 0; } @@ -1389,14 +1379,6 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu, char *buf) { DpmActivityMonitorCoeffInt_t activity_monitor; - static const char *profile_name[] = { - "BOOTUP_DEFAULT", - "3D_FULL_SCREEN", - "POWER_SAVING", - "VIDEO", - "VR", - "COMPUTE", - "CUSTOM"}; static const char *title[] = { "PROFILE_INDEX(NAME)", "CLOCK_TYPE(NAME)", @@ -1453,7 +1435,7 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu, } size += sysfs_emit_at(buf, size, "%2d %14s%s\n", - i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); + i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); if (smu_version >= 0x360d00) { size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c index cbc3f99e8573..2238ee19c222 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c @@ -309,6 +309,7 @@ static int cyan_skillfish_print_clk_levels(struct smu_context *smu, { int ret = 0, size = 0; uint32_t cur_value = 0; + int i; smu_cmn_get_sysfs_buf(&buf, &size); @@ -334,8 +335,6 @@ static int cyan_skillfish_print_clk_levels(struct smu_context *smu, size += sysfs_emit_at(buf, size, "VDDC: %7umV %10umV\n", CYAN_SKILLFISH_VDDC_MIN, CYAN_SKILLFISH_VDDC_MAX); break; - case SMU_GFXCLK: - case SMU_SCLK: case SMU_FCLK: case SMU_MCLK: case SMU_SOCCLK: @@ -346,6 +345,25 @@ static int cyan_skillfish_print_clk_levels(struct smu_context *smu, return ret; size += sysfs_emit_at(buf, size, "0: %uMhz *\n", cur_value); break; + case SMU_SCLK: + case SMU_GFXCLK: + ret = cyan_skillfish_get_current_clk_freq(smu, clk_type, &cur_value); + if (ret) + return ret; + if (cur_value == CYAN_SKILLFISH_SCLK_MAX) + i = 2; + else if (cur_value == CYAN_SKILLFISH_SCLK_MIN) + i = 0; + else + i = 1; + size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", CYAN_SKILLFISH_SCLK_MIN, + i == 0 ? "*" : ""); + size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", + i == 1 ? cur_value : cyan_skillfish_sclk_default, + i == 1 ? "*" : ""); + size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", CYAN_SKILLFISH_SCLK_MAX, + i == 2 ? "*" : ""); + break; default: dev_warn(smu->adev->dev, "Unsupported clock type\n"); return ret; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index 71161f6b78fe..2bb7816b245a 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -1265,7 +1265,7 @@ static int navi10_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { uint16_t *curve_settings; - int i, size = 0, ret = 0; + int i, levels, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t freq_values[3] = {0}; uint32_t mark_index = 0; @@ -1319,14 +1319,17 @@ static int navi10_print_clk_levels(struct smu_context *smu, freq_values[1] = cur_value; mark_index = cur_value == freq_values[0] ? 0 : cur_value == freq_values[2] ? 2 : 1; - if (mark_index != 1) - freq_values[1] = (freq_values[0] + freq_values[2]) / 2; - for (i = 0; i < 3; i++) { + levels = 3; + if (mark_index != 1) { + levels = 2; + freq_values[1] = freq_values[2]; + } + + for (i = 0; i < levels; i++) { size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, freq_values[i], i == mark_index ? "*" : ""); } - } break; case SMU_PCIE: @@ -1710,14 +1713,6 @@ static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf) DpmActivityMonitorCoeffInt_t activity_monitor; uint32_t i, size = 0; int16_t workload_type = 0; - static const char *profile_name[] = { - "BOOTUP_DEFAULT", - "3D_FULL_SCREEN", - "POWER_SAVING", - "VIDEO", - "VR", - "COMPUTE", - "CUSTOM"}; static const char *title[] = { "PROFILE_INDEX(NAME)", "CLOCK_TYPE(NAME)", @@ -1756,7 +1751,7 @@ static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf) } size += sysfs_emit_at(buf, size, "%2d %14s%s:\n", - i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); + i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", " ", diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index a4108025fe29..777f717c37ae 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -80,6 +80,9 @@ (*member) = (smu->smu_table.driver_pptable + offsetof(PPTable_t, field));\ } while(0) +/* STB FIFO depth is in 64bit units */ +#define SIENNA_CICHLID_STB_DEPTH_UNIT_BYTES 8 + static int get_table_size(struct smu_context *smu) { if (smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 13)) @@ -650,6 +653,8 @@ static int sienna_cichlid_allocate_dpm_context(struct smu_context *smu) return 0; } +static void sienna_cichlid_stb_init(struct smu_context *smu); + static int sienna_cichlid_init_smc_tables(struct smu_context *smu) { int ret = 0; @@ -662,6 +667,8 @@ static int sienna_cichlid_init_smc_tables(struct smu_context *smu) if (ret) return ret; + sienna_cichlid_stb_init(smu); + return smu_v11_0_init_smc_tables(smu); } @@ -1171,7 +1178,7 @@ static int sienna_cichlid_force_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t mask) { struct amdgpu_device *adev = smu->adev; - int ret = 0, size = 0; + int ret = 0; uint32_t soft_min_level = 0, soft_max_level = 0, min_freq = 0, max_freq = 0; soft_min_level = mask ? (ffs(mask) - 1) : 0; @@ -1216,7 +1223,7 @@ forec_level_out: if ((clk_type == SMU_GFXCLK) || (clk_type == SMU_SCLK)) amdgpu_gfx_off_ctrl(adev, true); - return size; + return 0; } static int sienna_cichlid_populate_umd_state_clk(struct smu_context *smu) @@ -1342,14 +1349,6 @@ static int sienna_cichlid_get_power_profile_mode(struct smu_context *smu, char * &(activity_monitor_external.DpmActivityMonitorCoeffInt); uint32_t i, size = 0; int16_t workload_type = 0; - static const char *profile_name[] = { - "BOOTUP_DEFAULT", - "3D_FULL_SCREEN", - "POWER_SAVING", - "VIDEO", - "VR", - "COMPUTE", - "CUSTOM"}; static const char *title[] = { "PROFILE_INDEX(NAME)", "CLOCK_TYPE(NAME)", @@ -1388,7 +1387,7 @@ static int sienna_cichlid_get_power_profile_mode(struct smu_context *smu, char * } size += sysfs_emit_at(buf, size, "%2d %14s%s:\n", - i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); + i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", " ", @@ -2135,7 +2134,13 @@ static int sienna_cichlid_od_edit_dpm_table(struct smu_context *smu, static int sienna_cichlid_run_btc(struct smu_context *smu) { - return smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL); + int res; + + res = smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL); + if (res) + dev_err(smu->adev->dev, "RunDcBtc failed!\n"); + + return res; } static int sienna_cichlid_baco_enter(struct smu_context *smu) @@ -3619,6 +3624,16 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu, gpu_metrics->energy_accumulator = use_metrics_v2 ? metrics_v2->EnergyAccumulator : metrics->EnergyAccumulator; + if (metrics->CurrGfxVoltageOffset) + gpu_metrics->voltage_gfx = + (155000 - 625 * metrics->CurrGfxVoltageOffset) / 100; + if (metrics->CurrMemVidOffset) + gpu_metrics->voltage_mem = + (155000 - 625 * metrics->CurrMemVidOffset) / 100; + if (metrics->CurrSocVoltageOffset) + gpu_metrics->voltage_soc = + (155000 - 625 * metrics->CurrSocVoltageOffset) / 100; + average_gfx_activity = use_metrics_v2 ? metrics_v2->AverageGfxActivity : metrics->AverageGfxActivity; if (average_gfx_activity <= SMU_11_0_7_GFX_BUSY_THRESHOLD) gpu_metrics->average_gfxclk_frequency = @@ -3793,6 +3808,53 @@ static int sienna_cichlid_set_mp1_state(struct smu_context *smu, return ret; } +static void sienna_cichlid_stb_init(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + uint32_t reg; + + reg = RREG32_PCIE(MP1_Public | smnMP1_PMI_3_START); + smu->stb_context.enabled = REG_GET_FIELD(reg, MP1_PMI_3_START, ENABLE); + + /* STB is disabled */ + if (!smu->stb_context.enabled) + return; + + spin_lock_init(&smu->stb_context.lock); + + /* STB buffer size in bytes as function of FIFO depth */ + reg = RREG32_PCIE(MP1_Public | smnMP1_PMI_3_FIFO); + smu->stb_context.stb_buf_size = 1 << REG_GET_FIELD(reg, MP1_PMI_3_FIFO, DEPTH); + smu->stb_context.stb_buf_size *= SIENNA_CICHLID_STB_DEPTH_UNIT_BYTES; + + dev_info(smu->adev->dev, "STB initialized to %d entries", + smu->stb_context.stb_buf_size / SIENNA_CICHLID_STB_DEPTH_UNIT_BYTES); + +} + +int sienna_cichlid_stb_get_data_direct(struct smu_context *smu, + void *buf, + uint32_t size) +{ + uint32_t *p = buf; + struct amdgpu_device *adev = smu->adev; + + /* No need to disable interrupts for now as we don't lock it yet from ISR */ + spin_lock(&smu->stb_context.lock); + + /* + * Read the STB FIFO in units of 32bit since this is the accessor window + * (register width) we have. + */ + buf = ((char *) buf) + size; + while ((void *)p < buf) + *p++ = cpu_to_le32(RREG32_PCIE(MP1_Public | smnMP1_PMI_3)); + + spin_unlock(&smu->stb_context.lock); + + return 0; +} + static const struct pptable_funcs sienna_cichlid_ppt_funcs = { .get_allowed_feature_mask = sienna_cichlid_get_allowed_feature_mask, .set_default_dpm_table = sienna_cichlid_set_default_dpm_table, @@ -3882,6 +3944,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = { .interrupt_work = smu_v11_0_interrupt_work, .gpo_control = sienna_cichlid_gpo_control, .set_mp1_state = sienna_cichlid_set_mp1_state, + .stb_collect_info = sienna_cichlid_stb_get_data_direct, }; void sienna_cichlid_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index 421f38e8dada..5cb07ed227fb 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -683,6 +683,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu, int i, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; bool cur_value_match_level = false; + uint32_t min, max; memset(&metrics, 0, sizeof(metrics)); @@ -743,6 +744,13 @@ static int vangogh_print_clk_levels(struct smu_context *smu, if (ret) return ret; break; + case SMU_GFXCLK: + case SMU_SCLK: + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetGfxclkFrequency, 0, &cur_value); + if (ret) { + return ret; + } + break; default: break; } @@ -768,6 +776,24 @@ static int vangogh_print_clk_levels(struct smu_context *smu, if (!cur_value_match_level) size += sysfs_emit_at(buf, size, " %uMhz *\n", cur_value); break; + case SMU_GFXCLK: + case SMU_SCLK: + min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq; + max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq; + if (cur_value == max) + i = 2; + else if (cur_value == min) + i = 0; + else + i = 1; + size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", min, + i == 0 ? "*" : ""); + size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", + i == 1 ? cur_value : VANGOGH_UMD_PSTATE_STANDARD_GFXCLK, + i == 1 ? "*" : ""); + size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max, + i == 2 ? "*" : ""); + break; default: break; } @@ -1013,14 +1039,6 @@ failed: static int vangogh_get_power_profile_mode(struct smu_context *smu, char *buf) { - static const char *profile_name[] = { - "BOOTUP_DEFAULT", - "3D_FULL_SCREEN", - "POWER_SAVING", - "VIDEO", - "VR", - "COMPUTE", - "CUSTOM"}; uint32_t i, size = 0; int16_t workload_type = 0; @@ -1040,7 +1058,7 @@ static int vangogh_get_power_profile_mode(struct smu_context *smu, continue; size += sysfs_emit_at(buf, size, "%2d %14s%s\n", - i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); + i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); } return size; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c index 145f13b8c977..25c4b135f830 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c @@ -1095,14 +1095,6 @@ static int renoir_set_watermarks_table( static int renoir_get_power_profile_mode(struct smu_context *smu, char *buf) { - static const char *profile_name[] = { - "BOOTUP_DEFAULT", - "3D_FULL_SCREEN", - "POWER_SAVING", - "VIDEO", - "VR", - "COMPUTE", - "CUSTOM"}; uint32_t i, size = 0; int16_t workload_type = 0; @@ -1121,7 +1113,7 @@ static int renoir_get_power_profile_mode(struct smu_context *smu, continue; size += sysfs_emit_at(buf, size, "%2d %14s%s\n", - i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); + i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); } return size; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c index d60b8c5e8715..43028f2cd28b 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c @@ -191,6 +191,9 @@ int smu_v12_0_fini_smc_tables(struct smu_context *smu) kfree(smu_table->watermarks_table); smu_table->watermarks_table = NULL; + kfree(smu_table->gpu_metrics_table); + smu_table->gpu_metrics_table = NULL; + return 0; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index 59a7d276541d..0907da022197 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -78,6 +78,12 @@ #define smnPCIE_ESM_CTRL 0x111003D0 +/* + * SMU support ECCTABLE since version 68.42.0, + * use this to check ECCTALE feature whether support + */ +#define SUPPORT_ECCTABLE_SMU_VERSION 0x00442a00 + static const struct smu_temperature_range smu13_thermal_policy[] = { {-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000}, @@ -190,6 +196,7 @@ static const struct cmn2asic_mapping aldebaran_table_map[SMU_TABLE_COUNT] = { TAB_MAP(SMU_METRICS), TAB_MAP(DRIVER_SMU_CONFIG), TAB_MAP(I2C_COMMANDS), + TAB_MAP(ECCINFO), }; static const uint8_t aldebaran_throttler_map[] = { @@ -223,6 +230,9 @@ static int aldebaran_tables_init(struct smu_context *smu) SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_ECCINFO, sizeof(EccInfoTable_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); if (!smu_table->metrics_table) return -ENOMEM; @@ -235,6 +245,10 @@ static int aldebaran_tables_init(struct smu_context *smu) return -ENOMEM; } + smu_table->ecc_table = kzalloc(tables[SMU_TABLE_ECCINFO].size, GFP_KERNEL); + if (!smu_table->ecc_table) + return -ENOMEM; + return 0; } @@ -248,16 +262,6 @@ static int aldebaran_allocate_dpm_context(struct smu_context *smu) return -ENOMEM; smu_dpm->dpm_context_size = sizeof(struct smu_13_0_dpm_context); - smu_dpm->dpm_current_power_state = kzalloc(sizeof(struct smu_power_state), - GFP_KERNEL); - if (!smu_dpm->dpm_current_power_state) - return -ENOMEM; - - smu_dpm->dpm_request_power_state = kzalloc(sizeof(struct smu_power_state), - GFP_KERNEL); - if (!smu_dpm->dpm_request_power_state) - return -ENOMEM; - return 0; } @@ -1765,6 +1769,98 @@ static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu, return sizeof(struct gpu_metrics_v1_3); } +static int aldebaran_check_ecc_table_support(struct smu_context *smu) +{ + uint32_t if_version = 0xff, smu_version = 0xff; + int ret = 0; + + ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version); + if (ret) { + /* return not support if failed get smu_version */ + ret = -EOPNOTSUPP; + } + + if (smu_version < SUPPORT_ECCTABLE_SMU_VERSION) + ret = -EOPNOTSUPP; + + return ret; +} + +static ssize_t aldebaran_get_ecc_info(struct smu_context *smu, + void *table) +{ + struct smu_table_context *smu_table = &smu->smu_table; + EccInfoTable_t *ecc_table = NULL; + struct ecc_info_per_ch *ecc_info_per_channel = NULL; + int i, ret = 0; + struct umc_ecc_info *eccinfo = (struct umc_ecc_info *)table; + + ret = aldebaran_check_ecc_table_support(smu); + if (ret) + return ret; + + ret = smu_cmn_update_table(smu, + SMU_TABLE_ECCINFO, + 0, + smu_table->ecc_table, + false); + if (ret) { + dev_info(smu->adev->dev, "Failed to export SMU ecc table!\n"); + return ret; + } + + ecc_table = (EccInfoTable_t *)smu_table->ecc_table; + + for (i = 0; i < ALDEBARAN_UMC_CHANNEL_NUM; i++) { + ecc_info_per_channel = &(eccinfo->ecc[i]); + ecc_info_per_channel->ce_count_lo_chip = + ecc_table->EccInfo[i].ce_count_lo_chip; + ecc_info_per_channel->ce_count_hi_chip = + ecc_table->EccInfo[i].ce_count_hi_chip; + ecc_info_per_channel->mca_umc_status = + ecc_table->EccInfo[i].mca_umc_status; + ecc_info_per_channel->mca_umc_addr = + ecc_table->EccInfo[i].mca_umc_addr; + } + + return ret; +} + +static int aldebaran_mode1_reset(struct smu_context *smu) +{ + u32 smu_version, fatal_err, param; + int ret = 0; + struct amdgpu_device *adev = smu->adev; + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + + fatal_err = 0; + param = SMU_RESET_MODE_1; + + /* + * PM FW support SMU_MSG_GfxDeviceDriverReset from 68.07 + */ + smu_cmn_get_smc_version(smu, NULL, &smu_version); + if (smu_version < 0x00440700) { + ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL); + } + else { + /* fatal error triggered by ras, PMFW supports the flag + from 68.44.0 */ + if ((smu_version >= 0x00442c00) && ras && + atomic_read(&ras->in_recovery)) + fatal_err = 1; + + param |= (fatal_err << 16); + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_GfxDeviceDriverReset, param, NULL); + } + + if (!ret) + msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS); + + return ret; +} + static int aldebaran_mode2_reset(struct smu_context *smu) { u32 smu_version; @@ -1925,13 +2021,14 @@ static const struct pptable_funcs aldebaran_ppt_funcs = { .get_gpu_metrics = aldebaran_get_gpu_metrics, .mode1_reset_is_support = aldebaran_is_mode1_reset_supported, .mode2_reset_is_support = aldebaran_is_mode2_reset_supported, - .mode1_reset = smu_v13_0_mode1_reset, + .mode1_reset = aldebaran_mode1_reset, .set_mp1_state = aldebaran_set_mp1_state, .mode2_reset = aldebaran_mode2_reset, .wait_for_event = smu_v13_0_wait_for_event, .i2c_init = aldebaran_i2c_control_init, .i2c_fini = aldebaran_i2c_control_fini, .send_hbm_bad_pages_num = aldebaran_smu_send_hbm_bad_page_num, + .get_ecc_info = aldebaran_get_ecc_info, }; void aldebaran_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index 35145db6eedf..677a246212f9 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -60,8 +60,6 @@ MODULE_FIRMWARE("amdgpu/aldebaran_smc.bin"); #define SMU13_VOLTAGE_SCALE 4 -#define SMU13_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms - #define LINK_WIDTH_MAX 6 #define LINK_SPEED_MAX 3 @@ -198,6 +196,7 @@ int smu_v13_0_check_fw_status(struct smu_context *smu) int smu_v13_0_check_fw_version(struct smu_context *smu) { + struct amdgpu_device *adev = smu->adev; uint32_t if_version = 0xff, smu_version = 0xff; uint16_t smu_major; uint8_t smu_minor, smu_debug; @@ -210,6 +209,8 @@ int smu_v13_0_check_fw_version(struct smu_context *smu) smu_major = (smu_version >> 16) & 0xffff; smu_minor = (smu_version >> 8) & 0xff; smu_debug = (smu_version >> 0) & 0xff; + if (smu->is_apu) + adev->pm.fw_version = smu_version; switch (smu->adev->ip_versions[MP1_HWIP][0]) { case IP_VERSION(13, 0, 2): @@ -226,9 +227,6 @@ int smu_v13_0_check_fw_version(struct smu_context *smu) break; } - dev_info(smu->adev->dev, "smu fw reported version = 0x%08x (%d.%d.%d)\n", - smu_version, smu_major, smu_minor, smu_debug); - /* * 1. if_version mismatch is not critical as our fw is designed * to be backward compatible. @@ -430,8 +428,10 @@ int smu_v13_0_fini_smc_tables(struct smu_context *smu) kfree(smu_table->hardcode_pptable); smu_table->hardcode_pptable = NULL; + kfree(smu_table->ecc_table); kfree(smu_table->metrics_table); kfree(smu_table->watermarks_table); + smu_table->ecc_table = NULL; smu_table->metrics_table = NULL; smu_table->watermarks_table = NULL; smu_table->metrics_time = 0; @@ -1424,25 +1424,6 @@ int smu_v13_0_set_azalia_d3_pme(struct smu_context *smu) return ret; } -int smu_v13_0_mode1_reset(struct smu_context *smu) -{ - u32 smu_version; - int ret = 0; - /* - * PM FW support SMU_MSG_GfxDeviceDriverReset from 68.07 - */ - smu_cmn_get_smc_version(smu, NULL, &smu_version); - if (smu_version < 0x00440700) - ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL); - else - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_1, NULL); - - if (!ret) - msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS); - - return ret; -} - static int smu_v13_0_wait_for_reset_complete(struct smu_context *smu, uint64_t event_arg) { diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c index 8215bbf5ed7c..caf1775d48ef 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c @@ -697,6 +697,11 @@ static int yellow_carp_get_current_clk_freq(struct smu_context *smu, case SMU_FCLK: return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetFclkFrequency, 0, value); + case SMU_GFXCLK: + case SMU_SCLK: + return smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_GetGfxclkFrequency, 0, value); + break; default: return -EINVAL; } @@ -967,6 +972,7 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu, { int i, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; + uint32_t min, max; smu_cmn_get_sysfs_buf(&buf, &size); @@ -1005,6 +1011,27 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu, cur_value == value ? "*" : ""); } break; + case SMU_GFXCLK: + case SMU_SCLK: + ret = yellow_carp_get_current_clk_freq(smu, clk_type, &cur_value); + if (ret) + goto print_clk_out; + min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq; + max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq; + if (cur_value == max) + i = 2; + else if (cur_value == min) + i = 0; + else + i = 1; + size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", min, + i == 0 ? "*" : ""); + size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", + i == 1 ? cur_value : YELLOW_CARP_UMD_PSTATE_GFXCLK, + i == 1 ? "*" : ""); + size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max, + i == 2 ? "*" : ""); + break; default: break; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.h index b3ad8352c68a..a9205a8ea3ad 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.h @@ -24,5 +24,6 @@ #define __YELLOW_CARP_PPT_H__ extern void yellow_carp_set_ppt_funcs(struct smu_context *smu); +#define YELLOW_CARP_UMD_PSTATE_GFXCLK 1100 #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index 843d2cbfc71d..ee1a312fd497 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -94,10 +94,10 @@ static void smu_cmn_read_arg(struct smu_context *smu, /** * __smu_cmn_poll_stat -- poll for a status from the SMU - * smu: a pointer to SMU context + * @smu: a pointer to SMU context * * Returns the status of the SMU, which could be, - * 0, the SMU is busy with your previous command; + * 0, the SMU is busy with your command; * 1, execution status: success, execution result: success; * 0xFF, execution status: success, execution result: failure; * 0xFE, unknown command; @@ -139,9 +139,13 @@ static void __smu_cmn_reg_print_error(struct smu_context *smu, const char *message = smu_get_message_name(smu, msg); switch (reg_c2pmsg_90) { - case SMU_RESP_NONE: + case SMU_RESP_NONE: { + u32 msg_idx = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66); + u32 prm = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82); dev_err_ratelimited(adev->dev, - "SMU: I'm not done with your previous command!"); + "SMU: I'm not done with your previous command: SMN_C2PMSG_66:0x%08X SMN_C2PMSG_82:0x%08X", + msg_idx, prm); + } break; case SMU_RESP_OK: /* The SMU executed the command. It completed with a @@ -253,10 +257,11 @@ int smu_cmn_send_msg_without_waiting(struct smu_context *smu, uint16_t msg_index, uint32_t param) { + struct amdgpu_device *adev = smu->adev; u32 reg; int res; - if (smu->adev->no_hw_access) + if (adev->no_hw_access) return 0; reg = __smu_cmn_poll_stat(smu); @@ -268,6 +273,12 @@ int smu_cmn_send_msg_without_waiting(struct smu_context *smu, __smu_cmn_send_msg(smu, msg_index, param); res = 0; Out: + if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) && + res && (res != -ETIME)) { + amdgpu_device_halt(adev); + WARN_ON(1); + } + return res; } @@ -284,9 +295,18 @@ Out: int smu_cmn_wait_for_response(struct smu_context *smu) { u32 reg; + int res; reg = __smu_cmn_poll_stat(smu); - return __smu_cmn_reg2errno(smu, reg); + res = __smu_cmn_reg2errno(smu, reg); + + if (unlikely(smu->adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) && + res && (res != -ETIME)) { + amdgpu_device_halt(smu->adev); + WARN_ON(1); + } + + return res; } /** @@ -324,10 +344,11 @@ int smu_cmn_send_smc_msg_with_param(struct smu_context *smu, uint32_t param, uint32_t *read_arg) { + struct amdgpu_device *adev = smu->adev; int res, index; u32 reg; - if (smu->adev->no_hw_access) + if (adev->no_hw_access) return 0; index = smu_cmn_to_asic_specific_index(smu, @@ -348,11 +369,16 @@ int smu_cmn_send_smc_msg_with_param(struct smu_context *smu, __smu_cmn_send_msg(smu, (uint16_t) index, param); reg = __smu_cmn_poll_stat(smu); res = __smu_cmn_reg2errno(smu, reg); - if (res == -EREMOTEIO) + if (res != 0) __smu_cmn_reg_print_error(smu, reg, index, param, msg); if (read_arg) smu_cmn_read_arg(smu, read_arg); Out: + if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) && res) { + amdgpu_device_halt(adev); + WARN_ON(1); + } + mutex_unlock(&smu->message_lock); return res; } diff --git a/drivers/gpu/drm/arm/Kconfig b/drivers/gpu/drm/arm/Kconfig index 3a9e966e0e78..58a242871b28 100644 --- a/drivers/gpu/drm/arm/Kconfig +++ b/drivers/gpu/drm/arm/Kconfig @@ -6,7 +6,6 @@ config DRM_HDLCD depends on DRM && OF && (ARM || ARM64 || COMPILE_TEST) depends on COMMON_CLK select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER help Choose this option if you have an ARM High Definition Colour LCD controller. @@ -27,7 +26,6 @@ config DRM_MALI_DISPLAY depends on DRM && OF && (ARM || ARM64 || COMPILE_TEST) depends on COMMON_CLK select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER select DRM_GEM_CMA_HELPER select VIDEOMODE_HELPERS help diff --git a/drivers/gpu/drm/arm/display/Kconfig b/drivers/gpu/drm/arm/display/Kconfig index cec0639e3aa1..e91598b60781 100644 --- a/drivers/gpu/drm/arm/display/Kconfig +++ b/drivers/gpu/drm/arm/display/Kconfig @@ -4,7 +4,6 @@ config DRM_KOMEDA depends on DRM && OF depends on COMMON_CLK select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER select DRM_GEM_CMA_HELPER select VIDEOMODE_HELPERS help diff --git a/drivers/gpu/drm/aspeed/Kconfig b/drivers/gpu/drm/aspeed/Kconfig index 5e95bcea43e9..024ccab14f88 100644 --- a/drivers/gpu/drm/aspeed/Kconfig +++ b/drivers/gpu/drm/aspeed/Kconfig @@ -5,7 +5,7 @@ config DRM_ASPEED_GFX depends on (COMPILE_TEST || ARCH_ASPEED) depends on MMU select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER + select DRM_GEM_CMA_HELPER select DMA_CMA if HAVE_DMA_CONTIGUOUS select CMA if HAVE_DMA_CONTIGUOUS select MFD_SYSCON diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c index b53fee6f1c17..65f172807a0d 100644 --- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c +++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c @@ -291,7 +291,7 @@ vga_pw_show(struct device *dev, struct device_attribute *attr, char *buf) if (rc) return rc; - return sprintf(buf, "%u\n", reg & 1); + return sprintf(buf, "%u\n", reg); } static DEVICE_ATTR_RO(vga_pw); diff --git a/drivers/gpu/drm/ast/Makefile b/drivers/gpu/drm/ast/Makefile index 438a2d05b115..21f71160bc3e 100644 --- a/drivers/gpu/drm/ast/Makefile +++ b/drivers/gpu/drm/ast/Makefile @@ -3,6 +3,6 @@ # Makefile for the drm device driver. This driver provides support for the # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. -ast-y := ast_drv.o ast_main.o ast_mm.o ast_mode.o ast_post.o ast_dp501.o +ast-y := ast_drv.o ast_i2c.o ast_main.o ast_mm.o ast_mode.o ast_post.o ast_dp501.o obj-$(CONFIG_DRM_AST) := ast.o diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c index 86d5cd7b6318..6d8613f6fe1c 100644 --- a/drivers/gpu/drm/ast/ast_drv.c +++ b/drivers/gpu/drm/ast/ast_drv.c @@ -26,7 +26,6 @@ * Authors: Dave Airlie <airlied@redhat.com> */ -#include <linux/console.h> #include <linux/module.h> #include <linux/pci.h> @@ -233,7 +232,7 @@ static struct pci_driver ast_pci_driver = { static int __init ast_init(void) { - if (vgacon_text_force() && ast_modeset == -1) + if (drm_firmware_drivers_only() && ast_modeset == -1) return -EINVAL; if (ast_modeset == 0) diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index 2cfce7dc95af..00bfa41ff7cb 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -357,4 +357,7 @@ bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata); u8 ast_get_dp501_max_clk(struct drm_device *dev); void ast_init_3rdtx(struct drm_device *dev); +/* ast_i2c.c */ +struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev); + #endif diff --git a/drivers/gpu/drm/ast/ast_i2c.c b/drivers/gpu/drm/ast/ast_i2c.c new file mode 100644 index 000000000000..93e91c36d649 --- /dev/null +++ b/drivers/gpu/drm/ast/ast_i2c.c @@ -0,0 +1,152 @@ +// SPDX-License-Identifier: MIT +/* + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + */ + +#include <drm/drm_managed.h> +#include <drm/drm_print.h> + +#include "ast_drv.h" + +static void ast_i2c_setsda(void *i2c_priv, int data) +{ + struct ast_i2c_chan *i2c = i2c_priv; + struct ast_private *ast = to_ast_private(i2c->dev); + int i; + u8 ujcrb7, jtemp; + + for (i = 0; i < 0x10000; i++) { + ujcrb7 = ((data & 0x01) ? 0 : 1) << 2; + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf1, ujcrb7); + jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x04); + if (ujcrb7 == jtemp) + break; + } +} + +static void ast_i2c_setscl(void *i2c_priv, int clock) +{ + struct ast_i2c_chan *i2c = i2c_priv; + struct ast_private *ast = to_ast_private(i2c->dev); + int i; + u8 ujcrb7, jtemp; + + for (i = 0; i < 0x10000; i++) { + ujcrb7 = ((clock & 0x01) ? 0 : 1); + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf4, ujcrb7); + jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x01); + if (ujcrb7 == jtemp) + break; + } +} + +static int ast_i2c_getsda(void *i2c_priv) +{ + struct ast_i2c_chan *i2c = i2c_priv; + struct ast_private *ast = to_ast_private(i2c->dev); + uint32_t val, val2, count, pass; + + count = 0; + pass = 0; + val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01; + do { + val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01; + if (val == val2) { + pass++; + } else { + pass = 0; + val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01; + } + } while ((pass < 5) && (count++ < 0x10000)); + + return val & 1 ? 1 : 0; +} + +static int ast_i2c_getscl(void *i2c_priv) +{ + struct ast_i2c_chan *i2c = i2c_priv; + struct ast_private *ast = to_ast_private(i2c->dev); + uint32_t val, val2, count, pass; + + count = 0; + pass = 0; + val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01; + do { + val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01; + if (val == val2) { + pass++; + } else { + pass = 0; + val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01; + } + } while ((pass < 5) && (count++ < 0x10000)); + + return val & 1 ? 1 : 0; +} + +static void ast_i2c_release(struct drm_device *dev, void *res) +{ + struct ast_i2c_chan *i2c = res; + + i2c_del_adapter(&i2c->adapter); + kfree(i2c); +} + +struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev) +{ + struct ast_i2c_chan *i2c; + int ret; + + i2c = kzalloc(sizeof(struct ast_i2c_chan), GFP_KERNEL); + if (!i2c) + return NULL; + + i2c->adapter.owner = THIS_MODULE; + i2c->adapter.class = I2C_CLASS_DDC; + i2c->adapter.dev.parent = dev->dev; + i2c->dev = dev; + i2c_set_adapdata(&i2c->adapter, i2c); + snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), + "AST i2c bit bus"); + i2c->adapter.algo_data = &i2c->bit; + + i2c->bit.udelay = 20; + i2c->bit.timeout = 2; + i2c->bit.data = i2c; + i2c->bit.setsda = ast_i2c_setsda; + i2c->bit.setscl = ast_i2c_setscl; + i2c->bit.getsda = ast_i2c_getsda; + i2c->bit.getscl = ast_i2c_getscl; + ret = i2c_bit_add_bus(&i2c->adapter); + if (ret) { + drm_err(dev, "Failed to register bit i2c\n"); + goto out_kfree; + } + + ret = drmm_add_action_or_reset(dev, ast_i2c_release, i2c); + if (ret) + return NULL; + return i2c; + +out_kfree: + kfree(i2c); + return NULL; +} diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 1e30eaeb0e1b..44c2aafcb7c2 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -47,9 +47,6 @@ #include "ast_drv.h" #include "ast_tables.h" -static struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev); -static void ast_i2c_destroy(struct ast_i2c_chan *i2c); - static inline void ast_load_palette_index(struct ast_private *ast, u8 index, u8 red, u8 green, u8 blue) @@ -1210,9 +1207,9 @@ static int ast_get_modes(struct drm_connector *connector) { struct ast_connector *ast_connector = to_ast_connector(connector); struct ast_private *ast = to_ast_private(connector->dev); - struct edid *edid; - int ret; + struct edid *edid = NULL; bool flags = false; + int ret; if (ast->tx_chip_type == AST_TX_DP501) { ast->dp501_maxclk = 0xff; @@ -1226,7 +1223,7 @@ static int ast_get_modes(struct drm_connector *connector) else kfree(edid); } - if (!flags) + if (!flags && ast_connector->i2c) edid = drm_get_edid(connector, &ast_connector->i2c->adapter); if (edid) { drm_connector_update_edid_property(&ast_connector->base, edid); @@ -1300,14 +1297,6 @@ static enum drm_mode_status ast_mode_valid(struct drm_connector *connector, return flags; } -static void ast_connector_destroy(struct drm_connector *connector) -{ - struct ast_connector *ast_connector = to_ast_connector(connector); - - ast_i2c_destroy(ast_connector->i2c); - drm_connector_cleanup(connector); -} - static const struct drm_connector_helper_funcs ast_connector_helper_funcs = { .get_modes = ast_get_modes, .mode_valid = ast_mode_valid, @@ -1316,7 +1305,7 @@ static const struct drm_connector_helper_funcs ast_connector_helper_funcs = { static const struct drm_connector_funcs ast_connector_funcs = { .reset = drm_atomic_helper_connector_reset, .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = ast_connector_destroy, + .destroy = drm_connector_cleanup, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; @@ -1332,10 +1321,13 @@ static int ast_connector_init(struct drm_device *dev) if (!ast_connector->i2c) drm_err(dev, "failed to add ddc bus for connector\n"); - drm_connector_init_with_ddc(dev, connector, - &ast_connector_funcs, - DRM_MODE_CONNECTOR_VGA, - &ast_connector->i2c->adapter); + if (ast_connector->i2c) + drm_connector_init_with_ddc(dev, connector, &ast_connector_funcs, + DRM_MODE_CONNECTOR_VGA, + &ast_connector->i2c->adapter); + else + drm_connector_init(dev, connector, &ast_connector_funcs, + DRM_MODE_CONNECTOR_VGA); drm_connector_helper_add(connector, &ast_connector_helper_funcs); @@ -1413,124 +1405,3 @@ int ast_mode_config_init(struct ast_private *ast) return 0; } - -static int get_clock(void *i2c_priv) -{ - struct ast_i2c_chan *i2c = i2c_priv; - struct ast_private *ast = to_ast_private(i2c->dev); - uint32_t val, val2, count, pass; - - count = 0; - pass = 0; - val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01; - do { - val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01; - if (val == val2) { - pass++; - } else { - pass = 0; - val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01; - } - } while ((pass < 5) && (count++ < 0x10000)); - - return val & 1 ? 1 : 0; -} - -static int get_data(void *i2c_priv) -{ - struct ast_i2c_chan *i2c = i2c_priv; - struct ast_private *ast = to_ast_private(i2c->dev); - uint32_t val, val2, count, pass; - - count = 0; - pass = 0; - val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01; - do { - val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01; - if (val == val2) { - pass++; - } else { - pass = 0; - val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01; - } - } while ((pass < 5) && (count++ < 0x10000)); - - return val & 1 ? 1 : 0; -} - -static void set_clock(void *i2c_priv, int clock) -{ - struct ast_i2c_chan *i2c = i2c_priv; - struct ast_private *ast = to_ast_private(i2c->dev); - int i; - u8 ujcrb7, jtemp; - - for (i = 0; i < 0x10000; i++) { - ujcrb7 = ((clock & 0x01) ? 0 : 1); - ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf4, ujcrb7); - jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x01); - if (ujcrb7 == jtemp) - break; - } -} - -static void set_data(void *i2c_priv, int data) -{ - struct ast_i2c_chan *i2c = i2c_priv; - struct ast_private *ast = to_ast_private(i2c->dev); - int i; - u8 ujcrb7, jtemp; - - for (i = 0; i < 0x10000; i++) { - ujcrb7 = ((data & 0x01) ? 0 : 1) << 2; - ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf1, ujcrb7); - jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x04); - if (ujcrb7 == jtemp) - break; - } -} - -static struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev) -{ - struct ast_i2c_chan *i2c; - int ret; - - i2c = kzalloc(sizeof(struct ast_i2c_chan), GFP_KERNEL); - if (!i2c) - return NULL; - - i2c->adapter.owner = THIS_MODULE; - i2c->adapter.class = I2C_CLASS_DDC; - i2c->adapter.dev.parent = dev->dev; - i2c->dev = dev; - i2c_set_adapdata(&i2c->adapter, i2c); - snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), - "AST i2c bit bus"); - i2c->adapter.algo_data = &i2c->bit; - - i2c->bit.udelay = 20; - i2c->bit.timeout = 2; - i2c->bit.data = i2c; - i2c->bit.setsda = set_data; - i2c->bit.setscl = set_clock; - i2c->bit.getsda = get_data; - i2c->bit.getscl = get_clock; - ret = i2c_bit_add_bus(&i2c->adapter); - if (ret) { - drm_err(dev, "Failed to register bit i2c\n"); - goto out_free; - } - - return i2c; -out_free: - kfree(i2c); - return NULL; -} - -static void ast_i2c_destroy(struct ast_i2c_chan *i2c) -{ - if (!i2c) - return; - i2c_del_adapter(&i2c->adapter); - kfree(i2c); -} diff --git a/drivers/gpu/drm/atmel-hlcdc/Kconfig b/drivers/gpu/drm/atmel-hlcdc/Kconfig index 5f67f001553b..8ae679f1a518 100644 --- a/drivers/gpu/drm/atmel-hlcdc/Kconfig +++ b/drivers/gpu/drm/atmel-hlcdc/Kconfig @@ -4,7 +4,6 @@ config DRM_ATMEL_HLCDC depends on DRM && OF && COMMON_CLK && MFD_ATMEL_HLCDC && ARM select DRM_GEM_CMA_HELPER select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER select DRM_PANEL help Choose this option if you have an ATMEL SoC with an HLCDC display diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index 431b6e12a81f..61db5a66b493 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -182,6 +182,7 @@ config DRM_PARADE_PS8622 config DRM_PARADE_PS8640 tristate "Parade PS8640 MIPI DSI to eDP Converter" depends on OF + select DRM_DP_AUX_BUS select DRM_KMS_HELPER select DRM_MIPI_DSI select DRM_PANEL diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h index 05e3abb5a0c9..592ecfcf00ca 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511.h +++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h @@ -401,7 +401,6 @@ void adv7533_mode_set(struct adv7511 *adv, const struct drm_display_mode *mode); int adv7533_patch_registers(struct adv7511 *adv); int adv7533_patch_cec_registers(struct adv7511 *adv); int adv7533_attach_dsi(struct adv7511 *adv); -void adv7533_detach_dsi(struct adv7511 *adv); int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv); #ifdef CONFIG_DRM_I2C_ADV7511_AUDIO diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c index 76555ae64e9c..f8e5da148599 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c @@ -910,9 +910,6 @@ static int adv7511_bridge_attach(struct drm_bridge *bridge, return ret; } - if (adv->type == ADV7533 || adv->type == ADV7535) - ret = adv7533_attach_dsi(adv); - if (adv->i2c_main->irq) regmap_write(adv->regmap, ADV7511_REG_INT_ENABLE(0), ADV7511_INT0_HPD); @@ -1288,8 +1285,18 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id) drm_bridge_add(&adv7511->bridge); adv7511_audio_init(dev, adv7511); + + if (adv7511->type == ADV7533 || adv7511->type == ADV7535) { + ret = adv7533_attach_dsi(adv7511); + if (ret) + goto err_unregister_audio; + } + return 0; +err_unregister_audio: + adv7511_audio_exit(adv7511); + drm_bridge_remove(&adv7511->bridge); err_unregister_cec: i2c_unregister_device(adv7511->i2c_cec); clk_disable_unprepare(adv7511->cec_clk); @@ -1307,8 +1314,6 @@ static int adv7511_remove(struct i2c_client *i2c) { struct adv7511 *adv7511 = i2c_get_clientdata(i2c); - if (adv7511->type == ADV7533 || adv7511->type == ADV7535) - adv7533_detach_dsi(adv7511); i2c_unregister_device(adv7511->i2c_cec); clk_disable_unprepare(adv7511->cec_clk); diff --git a/drivers/gpu/drm/bridge/adv7511/adv7533.c b/drivers/gpu/drm/bridge/adv7511/adv7533.c index 59d718bde8c4..eb7579dec40a 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7533.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7533.c @@ -153,11 +153,10 @@ int adv7533_attach_dsi(struct adv7511 *adv) return -EPROBE_DEFER; } - dsi = mipi_dsi_device_register_full(host, &info); + dsi = devm_mipi_dsi_device_register_full(dev, host, &info); if (IS_ERR(dsi)) { dev_err(dev, "failed to create dsi device\n"); - ret = PTR_ERR(dsi); - goto err_dsi_device; + return PTR_ERR(dsi); } adv->dsi = dsi; @@ -167,24 +166,13 @@ int adv7533_attach_dsi(struct adv7511 *adv) dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE | MIPI_DSI_MODE_NO_EOT_PACKET | MIPI_DSI_MODE_VIDEO_HSE; - ret = mipi_dsi_attach(dsi); + ret = devm_mipi_dsi_attach(dev, dsi); if (ret < 0) { dev_err(dev, "failed to attach dsi to host\n"); - goto err_dsi_attach; + return ret; } return 0; - -err_dsi_attach: - mipi_dsi_device_unregister(dsi); -err_dsi_device: - return ret; -} - -void adv7533_detach_dsi(struct adv7511 *adv) -{ - mipi_dsi_detach(adv->dsi); - mipi_dsi_device_unregister(adv->dsi); } int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv) diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c index cab6c8b92efd..6a4f20fccf84 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c @@ -998,11 +998,21 @@ int analogix_dp_send_psr_spd(struct analogix_dp_device *dp, if (!blocking) return 0; + /* + * db[1]!=0: entering PSR, wait for fully active remote frame buffer. + * db[1]==0: exiting PSR, wait for either + * (a) ACTIVE_RESYNC - the sink "must display the + * incoming active frames from the Source device with no visible + * glitches and/or artifacts", even though timings may still be + * re-synchronizing; or + * (b) INACTIVE - the transition is fully complete. + */ ret = readx_poll_timeout(analogix_dp_get_psr_status, dp, psr_status, psr_status >= 0 && ((vsc->db[1] && psr_status == DP_PSR_SINK_ACTIVE_RFB) || - (!vsc->db[1] && psr_status == DP_PSR_SINK_INACTIVE)), 1500, - DP_TIMEOUT_PSR_LOOP_MS * 1000); + (!vsc->db[1] && (psr_status == DP_PSR_SINK_ACTIVE_RESYNC || + psr_status == DP_PSR_SINK_INACTIVE))), + 1500, DP_TIMEOUT_PSR_LOOP_MS * 1000); if (ret) { dev_warn(dp->dev, "Failed to apply PSR %d\n", ret); return ret; diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c index 1a871f6b6822..2346dbcc505f 100644 --- a/drivers/gpu/drm/bridge/analogix/anx7625.c +++ b/drivers/gpu/drm/bridge/analogix/anx7625.c @@ -32,6 +32,8 @@ #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> +#include <media/v4l2-fwnode.h> +#include <sound/hdmi-codec.h> #include <video/display_timing.h> #include "anx7625.h" @@ -166,6 +168,20 @@ static int anx7625_write_and_or(struct anx7625_data *ctx, offset, (val & and_mask) | (or_mask)); } +static int anx7625_config_bit_matrix(struct anx7625_data *ctx) +{ + int i, ret; + + ret = anx7625_write_or(ctx, ctx->i2c.tx_p2_client, + AUDIO_CONTROL_REGISTER, 0x80); + for (i = 0; i < 13; i++) + ret |= anx7625_reg_write(ctx, ctx->i2c.tx_p2_client, + VIDEO_BIT_MATRIX_12 + i, + 0x18 + i); + + return ret; +} + static int anx7625_read_ctrl_status_p0(struct anx7625_data *ctx) { return anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, AP_AUX_CTRL_STATUS); @@ -191,10 +207,10 @@ static int wait_aux_op_finish(struct anx7625_data *ctx) AP_AUX_CTRL_STATUS); if (val < 0 || (val & 0x0F)) { DRM_DEV_ERROR(dev, "aux status %02x\n", val); - val = -EIO; + return -EIO; } - return val; + return 0; } static int anx7625_video_mute_control(struct anx7625_data *ctx, @@ -221,38 +237,6 @@ static int anx7625_video_mute_control(struct anx7625_data *ctx, return ret; } -static int anx7625_config_audio_input(struct anx7625_data *ctx) -{ - struct device *dev = &ctx->client->dev; - int ret; - - /* Channel num */ - ret = anx7625_reg_write(ctx, ctx->i2c.tx_p2_client, - AUDIO_CHANNEL_STATUS_6, I2S_CH_2 << 5); - - /* FS */ - ret |= anx7625_write_and_or(ctx, ctx->i2c.tx_p2_client, - AUDIO_CHANNEL_STATUS_4, - 0xf0, AUDIO_FS_48K); - /* Word length */ - ret |= anx7625_write_and_or(ctx, ctx->i2c.tx_p2_client, - AUDIO_CHANNEL_STATUS_5, - 0xf0, AUDIO_W_LEN_24_24MAX); - /* I2S */ - ret |= anx7625_write_or(ctx, ctx->i2c.tx_p2_client, - AUDIO_CHANNEL_STATUS_6, I2S_SLAVE_MODE); - ret |= anx7625_write_and(ctx, ctx->i2c.tx_p2_client, - AUDIO_CONTROL_REGISTER, ~TDM_TIMING_MODE); - /* Audio change flag */ - ret |= anx7625_write_or(ctx, ctx->i2c.rx_p0_client, - AP_AV_STATUS, AP_AUDIO_CHG); - - if (ret < 0) - DRM_DEV_ERROR(dev, "fail to config audio.\n"); - - return ret; -} - /* Reduction of fraction a/b */ static void anx7625_reduction_of_a_fraction(unsigned long *a, unsigned long *b) { @@ -431,7 +415,7 @@ static int anx7625_dsi_video_timing_config(struct anx7625_data *ctx) ret |= anx7625_write_and(ctx, ctx->i2c.rx_p1_client, MIPI_LANE_CTRL_0, 0xfc); ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, - MIPI_LANE_CTRL_0, 3); + MIPI_LANE_CTRL_0, ctx->pdata.mipi_lanes - 1); /* Htotal */ htotal = ctx->dt.hactive.min + ctx->dt.hfront_porch.min + @@ -615,6 +599,76 @@ static int anx7625_dsi_config(struct anx7625_data *ctx) return ret; } +static int anx7625_api_dpi_config(struct anx7625_data *ctx) +{ + struct device *dev = &ctx->client->dev; + u16 freq = ctx->dt.pixelclock.min / 1000; + int ret; + + /* configure pixel clock */ + ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, + PIXEL_CLOCK_L, freq & 0xFF); + ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, + PIXEL_CLOCK_H, (freq >> 8)); + + /* set DPI mode */ + /* set to DPI PLL module sel */ + ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client, + MIPI_DIGITAL_PLL_9, 0x20); + /* power down MIPI */ + ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client, + MIPI_LANE_CTRL_10, 0x08); + /* enable DPI mode */ + ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client, + MIPI_DIGITAL_PLL_18, 0x1C); + /* set first edge */ + ret |= anx7625_reg_write(ctx, ctx->i2c.tx_p2_client, + VIDEO_CONTROL_0, 0x06); + if (ret < 0) + DRM_DEV_ERROR(dev, "IO error : dpi phy set failed.\n"); + + return ret; +} + +static int anx7625_dpi_config(struct anx7625_data *ctx) +{ + struct device *dev = &ctx->client->dev; + int ret; + + DRM_DEV_DEBUG_DRIVER(dev, "config dpi\n"); + + /* DSC disable */ + ret = anx7625_write_and(ctx, ctx->i2c.rx_p0_client, + R_DSC_CTRL_0, ~DSC_EN); + if (ret < 0) { + DRM_DEV_ERROR(dev, "IO error : disable dsc failed.\n"); + return ret; + } + + ret = anx7625_config_bit_matrix(ctx); + if (ret < 0) { + DRM_DEV_ERROR(dev, "config bit matrix failed.\n"); + return ret; + } + + ret = anx7625_api_dpi_config(ctx); + if (ret < 0) { + DRM_DEV_ERROR(dev, "mipi phy(dpi) setup failed.\n"); + return ret; + } + + /* set MIPI RX EN */ + ret = anx7625_write_or(ctx, ctx->i2c.rx_p0_client, + AP_AV_STATUS, AP_MIPI_RX_EN); + /* clear mute flag */ + ret |= anx7625_write_and(ctx, ctx->i2c.rx_p0_client, + AP_AV_STATUS, (u8)~AP_MIPI_MUTE); + if (ret < 0) + DRM_DEV_ERROR(dev, "IO error : enable mipi rx failed.\n"); + + return ret; +} + static void anx7625_dp_start(struct anx7625_data *ctx) { int ret; @@ -625,9 +679,10 @@ static void anx7625_dp_start(struct anx7625_data *ctx) return; } - anx7625_config_audio_input(ctx); - - ret = anx7625_dsi_config(ctx); + if (ctx->pdata.is_dpi) + ret = anx7625_dpi_config(ctx); + else + ret = anx7625_dsi_config(ctx); if (ret < 0) DRM_DEV_ERROR(dev, "MIPI phy setup error.\n"); @@ -795,7 +850,7 @@ static int sp_tx_edid_read(struct anx7625_data *ctx, int count, blocks_num; u8 pblock_buf[MAX_DPCD_BUFFER_SIZE]; u8 i, j; - u8 g_edid_break = 0; + int g_edid_break = 0; int ret; struct device *dev = &ctx->client->dev; @@ -826,7 +881,7 @@ static int sp_tx_edid_read(struct anx7625_data *ctx, g_edid_break = edid_read(ctx, offset, pblock_buf); - if (g_edid_break) + if (g_edid_break < 0) break; memcpy(&pedid_blocks_buf[offset], @@ -1075,6 +1130,7 @@ static void anx7625_start_dp_work(struct anx7625_data *ctx) return; } + ctx->hpd_status = 1; ctx->hpd_high_cnt++; /* Not support HDCP */ @@ -1084,8 +1140,10 @@ static void anx7625_start_dp_work(struct anx7625_data *ctx) ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, 0xec, 0x10); /* Interrupt for DRM */ ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, 0xff, 0x01); - if (ret < 0) + if (ret < 0) { + DRM_DEV_ERROR(dev, "fail to setting HDCP/auth\n"); return; + } ret = anx7625_reg_read(ctx, ctx->i2c.rx_p1_client, 0x86); if (ret < 0) @@ -1104,6 +1162,10 @@ static void anx7625_hpd_polling(struct anx7625_data *ctx) int ret, val; struct device *dev = &ctx->client->dev; + /* Interrupt mode, no need poll HPD status, just return */ + if (ctx->pdata.intp_irq) + return; + ret = readx_poll_timeout(anx7625_read_hpd_status_p0, ctx, val, ((val & HPD_STATUS) || (val < 0)), @@ -1131,6 +1193,21 @@ static void anx7625_remove_edid(struct anx7625_data *ctx) ctx->slimport_edid_p.edid_block_num = -1; } +static void anx7625_dp_adjust_swing(struct anx7625_data *ctx) +{ + int i; + + for (i = 0; i < ctx->pdata.dp_lane0_swing_reg_cnt; i++) + anx7625_reg_write(ctx, ctx->i2c.tx_p1_client, + DP_TX_LANE0_SWING_REG0 + i, + ctx->pdata.lane0_reg_data[i] & 0xFF); + + for (i = 0; i < ctx->pdata.dp_lane1_swing_reg_cnt; i++) + anx7625_reg_write(ctx, ctx->i2c.tx_p1_client, + DP_TX_LANE1_SWING_REG0 + i, + ctx->pdata.lane1_reg_data[i] & 0xFF); +} + static void dp_hpd_change_handler(struct anx7625_data *ctx, bool on) { struct device *dev = &ctx->client->dev; @@ -1146,9 +1223,8 @@ static void dp_hpd_change_handler(struct anx7625_data *ctx, bool on) } else { DRM_DEV_DEBUG_DRIVER(dev, " HPD high\n"); anx7625_start_dp_work(ctx); + anx7625_dp_adjust_swing(ctx); } - - ctx->hpd_status = 1; } static int anx7625_hpd_change_detect(struct anx7625_data *ctx) @@ -1225,20 +1301,75 @@ static irqreturn_t anx7625_intr_hpd_isr(int irq, void *data) return IRQ_HANDLED; } +static int anx7625_get_swing_setting(struct device *dev, + struct anx7625_platform_data *pdata) +{ + int num_regs; + + if (of_get_property(dev->of_node, + "analogix,lane0-swing", &num_regs)) { + if (num_regs > DP_TX_SWING_REG_CNT) + num_regs = DP_TX_SWING_REG_CNT; + + pdata->dp_lane0_swing_reg_cnt = num_regs; + of_property_read_u32_array(dev->of_node, "analogix,lane0-swing", + pdata->lane0_reg_data, num_regs); + } + + if (of_get_property(dev->of_node, + "analogix,lane1-swing", &num_regs)) { + if (num_regs > DP_TX_SWING_REG_CNT) + num_regs = DP_TX_SWING_REG_CNT; + + pdata->dp_lane1_swing_reg_cnt = num_regs; + of_property_read_u32_array(dev->of_node, "analogix,lane1-swing", + pdata->lane1_reg_data, num_regs); + } + + return 0; +} + static int anx7625_parse_dt(struct device *dev, struct anx7625_platform_data *pdata) { - struct device_node *np = dev->of_node; + struct device_node *np = dev->of_node, *ep0; struct drm_panel *panel; int ret; + int bus_type, mipi_lanes; + + anx7625_get_swing_setting(dev, pdata); + pdata->is_dpi = 1; /* default dpi mode */ pdata->mipi_host_node = of_graph_get_remote_node(np, 0, 0); if (!pdata->mipi_host_node) { DRM_DEV_ERROR(dev, "fail to get internal panel.\n"); return -ENODEV; } - DRM_DEV_DEBUG_DRIVER(dev, "found dsi host node.\n"); + bus_type = V4L2_FWNODE_BUS_TYPE_PARALLEL; + mipi_lanes = MAX_LANES_SUPPORT; + ep0 = of_graph_get_endpoint_by_regs(np, 0, 0); + if (ep0) { + if (of_property_read_u32(ep0, "bus-type", &bus_type)) + bus_type = 0; + + mipi_lanes = of_property_count_u32_elems(ep0, "data-lanes"); + } + + if (bus_type == V4L2_FWNODE_BUS_TYPE_PARALLEL) /* bus type is Parallel(DSI) */ + pdata->is_dpi = 0; + + pdata->mipi_lanes = mipi_lanes; + if (pdata->mipi_lanes > MAX_LANES_SUPPORT || pdata->mipi_lanes <= 0) + pdata->mipi_lanes = MAX_LANES_SUPPORT; + + if (pdata->is_dpi) + DRM_DEV_DEBUG_DRIVER(dev, "found MIPI DPI host node.\n"); + else + DRM_DEV_DEBUG_DRIVER(dev, "found MIPI DSI host node.\n"); + + if (of_property_read_bool(np, "analogix,audio-enable")) + pdata->audio_en = 1; ret = drm_of_find_panel_or_bridge(np, 1, 0, &panel, NULL); if (ret < 0) { @@ -1301,9 +1432,215 @@ static enum drm_connector_status anx7625_sink_detect(struct anx7625_data *ctx) { struct device *dev = &ctx->client->dev; - DRM_DEV_DEBUG_DRIVER(dev, "sink detect, return connected\n"); + DRM_DEV_DEBUG_DRIVER(dev, "sink detect\n"); - return connector_status_connected; + if (ctx->pdata.panel_bridge) + return connector_status_connected; + + return ctx->hpd_status ? connector_status_connected : + connector_status_disconnected; +} + +static int anx7625_audio_hw_params(struct device *dev, void *data, + struct hdmi_codec_daifmt *fmt, + struct hdmi_codec_params *params) +{ + struct anx7625_data *ctx = dev_get_drvdata(dev); + int wl, ch, rate; + int ret = 0; + + if (fmt->fmt != HDMI_DSP_A) { + DRM_DEV_ERROR(dev, "only supports DSP_A\n"); + return -EINVAL; + } + + DRM_DEV_DEBUG_DRIVER(dev, "setting %d Hz, %d bit, %d channels\n", + params->sample_rate, params->sample_width, + params->cea.channels); + + ret |= anx7625_write_and_or(ctx, ctx->i2c.tx_p2_client, + AUDIO_CHANNEL_STATUS_6, + ~I2S_SLAVE_MODE, + TDM_SLAVE_MODE); + + /* Word length */ + switch (params->sample_width) { + case 16: + wl = AUDIO_W_LEN_16_20MAX; + break; + case 18: + wl = AUDIO_W_LEN_18_20MAX; + break; + case 20: + wl = AUDIO_W_LEN_20_20MAX; + break; + case 24: + wl = AUDIO_W_LEN_24_24MAX; + break; + default: + DRM_DEV_DEBUG_DRIVER(dev, "wordlength: %d bit not support", + params->sample_width); + return -EINVAL; + } + ret |= anx7625_write_and_or(ctx, ctx->i2c.tx_p2_client, + AUDIO_CHANNEL_STATUS_5, + 0xf0, wl); + + /* Channel num */ + switch (params->cea.channels) { + case 2: + ch = I2S_CH_2; + break; + case 4: + ch = TDM_CH_4; + break; + case 6: + ch = TDM_CH_6; + break; + case 8: + ch = TDM_CH_8; + break; + default: + DRM_DEV_DEBUG_DRIVER(dev, "channel number: %d not support", + params->cea.channels); + return -EINVAL; + } + ret |= anx7625_write_and_or(ctx, ctx->i2c.tx_p2_client, + AUDIO_CHANNEL_STATUS_6, 0x1f, ch << 5); + if (ch > I2S_CH_2) + ret |= anx7625_write_or(ctx, ctx->i2c.tx_p2_client, + AUDIO_CHANNEL_STATUS_6, AUDIO_LAYOUT); + else + ret |= anx7625_write_and(ctx, ctx->i2c.tx_p2_client, + AUDIO_CHANNEL_STATUS_6, ~AUDIO_LAYOUT); + + /* FS */ + switch (params->sample_rate) { + case 32000: + rate = AUDIO_FS_32K; + break; + case 44100: + rate = AUDIO_FS_441K; + break; + case 48000: + rate = AUDIO_FS_48K; + break; + case 88200: + rate = AUDIO_FS_882K; + break; + case 96000: + rate = AUDIO_FS_96K; + break; + case 176400: + rate = AUDIO_FS_1764K; + break; + case 192000: + rate = AUDIO_FS_192K; + break; + default: + DRM_DEV_DEBUG_DRIVER(dev, "sample rate: %d not support", + params->sample_rate); + return -EINVAL; + } + ret |= anx7625_write_and_or(ctx, ctx->i2c.tx_p2_client, + AUDIO_CHANNEL_STATUS_4, + 0xf0, rate); + ret |= anx7625_write_or(ctx, ctx->i2c.rx_p0_client, + AP_AV_STATUS, AP_AUDIO_CHG); + if (ret < 0) { + DRM_DEV_ERROR(dev, "IO error : config audio.\n"); + return -EIO; + } + + return 0; +} + +static void anx7625_audio_shutdown(struct device *dev, void *data) +{ + DRM_DEV_DEBUG_DRIVER(dev, "stop audio\n"); +} + +static int anx7625_hdmi_i2s_get_dai_id(struct snd_soc_component *component, + struct device_node *endpoint) +{ + struct of_endpoint of_ep; + int ret; + + ret = of_graph_parse_endpoint(endpoint, &of_ep); + if (ret < 0) + return ret; + + /* + * HDMI sound should be located at external DPI port + * Didn't have good way to check where is internal(DSI) + * or external(DPI) bridge + */ + return 0; +} + +static void +anx7625_audio_update_connector_status(struct anx7625_data *ctx, + enum drm_connector_status status) +{ + if (ctx->plugged_cb && ctx->codec_dev) { + ctx->plugged_cb(ctx->codec_dev, + status == connector_status_connected); + } +} + +static int anx7625_audio_hook_plugged_cb(struct device *dev, void *data, + hdmi_codec_plugged_cb fn, + struct device *codec_dev) +{ + struct anx7625_data *ctx = data; + + ctx->plugged_cb = fn; + ctx->codec_dev = codec_dev; + anx7625_audio_update_connector_status(ctx, anx7625_sink_detect(ctx)); + + return 0; +} + +static const struct hdmi_codec_ops anx7625_codec_ops = { + .hw_params = anx7625_audio_hw_params, + .audio_shutdown = anx7625_audio_shutdown, + .get_dai_id = anx7625_hdmi_i2s_get_dai_id, + .hook_plugged_cb = anx7625_audio_hook_plugged_cb, +}; + +static void anx7625_unregister_audio(struct anx7625_data *ctx) +{ + struct device *dev = &ctx->client->dev; + + if (ctx->audio_pdev) { + platform_device_unregister(ctx->audio_pdev); + ctx->audio_pdev = NULL; + } + + DRM_DEV_DEBUG_DRIVER(dev, "unbound to %s", HDMI_CODEC_DRV_NAME); +} + +static int anx7625_register_audio(struct device *dev, struct anx7625_data *ctx) +{ + struct hdmi_codec_pdata codec_data = { + .ops = &anx7625_codec_ops, + .max_i2s_channels = 8, + .i2s = 1, + .data = ctx, + }; + + ctx->audio_pdev = platform_device_register_data(dev, + HDMI_CODEC_DRV_NAME, + PLATFORM_DEVID_AUTO, + &codec_data, + sizeof(codec_data)); + + if (IS_ERR(ctx->audio_pdev)) + return PTR_ERR(ctx->audio_pdev); + + DRM_DEV_DEBUG_DRIVER(dev, "bound to %s", HDMI_CODEC_DRV_NAME); + + return 0; } static int anx7625_attach_dsi(struct anx7625_data *ctx) @@ -1316,6 +1653,7 @@ static int anx7625_attach_dsi(struct anx7625_data *ctx) .channel = 0, .node = NULL, }; + int ret; DRM_DEV_DEBUG_DRIVER(dev, "attach dsi\n"); @@ -1325,22 +1663,22 @@ static int anx7625_attach_dsi(struct anx7625_data *ctx) return -EINVAL; } - dsi = mipi_dsi_device_register_full(host, &info); + dsi = devm_mipi_dsi_device_register_full(dev, host, &info); if (IS_ERR(dsi)) { DRM_DEV_ERROR(dev, "fail to create dsi device.\n"); return -EINVAL; } - dsi->lanes = 4; + dsi->lanes = ctx->pdata.mipi_lanes; dsi->format = MIPI_DSI_FMT_RGB888; dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE | MIPI_DSI_MODE_VIDEO_HSE; - if (mipi_dsi_attach(dsi) < 0) { + ret = devm_mipi_dsi_attach(dev, dsi); + if (ret) { DRM_DEV_ERROR(dev, "fail to attach dsi to host.\n"); - mipi_dsi_device_unregister(dsi); - return -EINVAL; + return ret; } ctx->dsi = dsi; @@ -1350,16 +1688,6 @@ static int anx7625_attach_dsi(struct anx7625_data *ctx) return 0; } -static void anx7625_bridge_detach(struct drm_bridge *bridge) -{ - struct anx7625_data *ctx = bridge_to_anx7625(bridge); - - if (ctx->dsi) { - mipi_dsi_detach(ctx->dsi); - mipi_dsi_device_unregister(ctx->dsi); - } -} - static int anx7625_bridge_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { @@ -1376,12 +1704,6 @@ static int anx7625_bridge_attach(struct drm_bridge *bridge, return -ENODEV; } - err = anx7625_attach_dsi(ctx); - if (err) { - DRM_DEV_ERROR(dev, "Fail to attach to dsi : %d\n", err); - return err; - } - if (ctx->pdata.panel_bridge) { err = drm_bridge_attach(bridge->encoder, ctx->pdata.panel_bridge, @@ -1475,6 +1797,10 @@ static bool anx7625_bridge_mode_fixup(struct drm_bridge *bridge, DRM_DEV_DEBUG_DRIVER(dev, "drm mode fixup set\n"); + /* No need fixup for external monitor */ + if (!ctx->pdata.panel_bridge) + return true; + hsync = mode->hsync_end - mode->hsync_start; hfp = mode->hsync_start - mode->hdisplay; hbp = mode->htotal - mode->hsync_end; @@ -1624,7 +1950,6 @@ static struct edid *anx7625_bridge_get_edid(struct drm_bridge *bridge, static const struct drm_bridge_funcs anx7625_bridge_funcs = { .attach = anx7625_bridge_attach, - .detach = anx7625_bridge_detach, .disable = anx7625_bridge_disable, .mode_valid = anx7625_bridge_mode_valid, .mode_set = anx7625_bridge_mode_set, @@ -1851,14 +2176,39 @@ static int anx7625_i2c_probe(struct i2c_client *client, platform->bridge.funcs = &anx7625_bridge_funcs; platform->bridge.of_node = client->dev.of_node; - platform->bridge.ops = DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_HPD; - platform->bridge.type = DRM_MODE_CONNECTOR_eDP; + platform->bridge.ops = DRM_BRIDGE_OP_EDID; + if (!platform->pdata.panel_bridge) + platform->bridge.ops |= DRM_BRIDGE_OP_HPD | + DRM_BRIDGE_OP_DETECT; + platform->bridge.type = platform->pdata.panel_bridge ? + DRM_MODE_CONNECTOR_eDP : + DRM_MODE_CONNECTOR_DisplayPort; + drm_bridge_add(&platform->bridge); + if (!platform->pdata.is_dpi) { + ret = anx7625_attach_dsi(platform); + if (ret) { + DRM_DEV_ERROR(dev, "Fail to attach to dsi : %d\n", ret); + goto unregister_bridge; + } + } + + if (platform->pdata.audio_en) + anx7625_register_audio(dev, platform); + DRM_DEV_DEBUG_DRIVER(dev, "probe done\n"); return 0; +unregister_bridge: + drm_bridge_remove(&platform->bridge); + + if (!platform->pdata.low_power_mode) + pm_runtime_put_sync_suspend(&client->dev); + + anx7625_unregister_i2c_dummy_clients(platform); + free_wq: if (platform->workqueue) destroy_workqueue(platform->workqueue); @@ -1883,6 +2233,9 @@ static int anx7625_i2c_remove(struct i2c_client *client) anx7625_unregister_i2c_dummy_clients(platform); + if (platform->pdata.audio_en) + anx7625_unregister_audio(platform); + kfree(platform); return 0; } diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.h b/drivers/gpu/drm/bridge/analogix/anx7625.h index 6dcf64c703f9..3d79b6fb13c8 100644 --- a/drivers/gpu/drm/bridge/analogix/anx7625.h +++ b/drivers/gpu/drm/bridge/analogix/anx7625.h @@ -111,6 +111,7 @@ #define AUDIO_CHANNEL_STATUS_6 0xd5 #define TDM_SLAVE_MODE 0x10 #define I2S_SLAVE_MODE 0x08 +#define AUDIO_LAYOUT 0x01 #define AUDIO_CONTROL_REGISTER 0xe6 #define TDM_TIMING_MODE 0x08 @@ -141,12 +142,20 @@ #define HORIZONTAL_BACK_PORCH_H 0x22 /* Bit[7:4] are reserved */ /******** END of I2C Address 0x72 *********/ + +/***************************************************************/ +/* Register definition of device address 0x7a */ +#define DP_TX_SWING_REG_CNT 0x14 +#define DP_TX_LANE0_SWING_REG0 0x00 +#define DP_TX_LANE1_SWING_REG0 0x14 +/******** END of I2C Address 0x7a *********/ + /***************************************************************/ /* Register definition of device address 0x7e */ #define I2C_ADDR_7E_FLASH_CONTROLLER 0x7E -#define FLASH_LOAD_STA 0x05 +#define FLASH_LOAD_STA 0x05 #define FLASH_LOAD_STA_CHK BIT(7) #define XTAL_FRQ_SEL 0x3F @@ -349,12 +358,21 @@ struct s_edid_data { /***************** Display End *****************/ +#define MAX_LANES_SUPPORT 4 + struct anx7625_platform_data { struct gpio_desc *gpio_p_on; struct gpio_desc *gpio_reset; struct regulator_bulk_data supplies[3]; struct drm_bridge *panel_bridge; int intp_irq; + int is_dpi; + int mipi_lanes; + int audio_en; + int dp_lane0_swing_reg_cnt; + int lane0_reg_data[DP_TX_SWING_REG_CNT]; + int dp_lane1_swing_reg_cnt; + int lane1_reg_data[DP_TX_SWING_REG_CNT]; u32 low_power_mode; struct device_node *mipi_host_node; }; @@ -371,6 +389,7 @@ struct anx7625_i2c_client { struct anx7625_data { struct anx7625_platform_data pdata; + struct platform_device *audio_pdev; int hpd_status; int hpd_high_cnt; /* Lock for work queue */ @@ -379,6 +398,8 @@ struct anx7625_data { struct anx7625_i2c_client i2c; struct i2c_client *last_client; struct s_edid_data slimport_edid_p; + struct device *codec_dev; + hdmi_codec_plugged_cb plugged_cb; struct work_struct work; struct workqueue_struct *workqueue; char edid_block; diff --git a/drivers/gpu/drm/bridge/display-connector.c b/drivers/gpu/drm/bridge/display-connector.c index 05eb759da6fc..d24f5b90feab 100644 --- a/drivers/gpu/drm/bridge/display-connector.c +++ b/drivers/gpu/drm/bridge/display-connector.c @@ -13,6 +13,7 @@ #include <linux/platform_device.h> #include <linux/regulator/consumer.h> +#include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_edid.h> @@ -87,10 +88,95 @@ static struct edid *display_connector_get_edid(struct drm_bridge *bridge, return drm_get_edid(connector, conn->bridge.ddc); } +/* + * Since this bridge is tied to the connector, it acts like a passthrough, + * so concerning the output bus formats, either pass the bus formats from the + * previous bridge or return fallback data like done in the bridge function: + * drm_atomic_bridge_chain_select_bus_fmts(). + * This supports negotiation if the bridge chain has all bits in place. + */ +static u32 *display_connector_get_output_bus_fmts(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state, + unsigned int *num_output_fmts) +{ + struct drm_bridge *prev_bridge = drm_bridge_get_prev_bridge(bridge); + struct drm_bridge_state *prev_bridge_state; + + if (!prev_bridge || !prev_bridge->funcs->atomic_get_output_bus_fmts) { + struct drm_connector *conn = conn_state->connector; + u32 *out_bus_fmts; + + *num_output_fmts = 1; + out_bus_fmts = kmalloc(sizeof(*out_bus_fmts), GFP_KERNEL); + if (!out_bus_fmts) + return NULL; + + if (conn->display_info.num_bus_formats && + conn->display_info.bus_formats) + out_bus_fmts[0] = conn->display_info.bus_formats[0]; + else + out_bus_fmts[0] = MEDIA_BUS_FMT_FIXED; + + return out_bus_fmts; + } + + prev_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state, + prev_bridge); + + return prev_bridge->funcs->atomic_get_output_bus_fmts(prev_bridge, prev_bridge_state, + crtc_state, conn_state, + num_output_fmts); +} + +/* + * Since this bridge is tied to the connector, it acts like a passthrough, + * so concerning the input bus formats, either pass the bus formats from the + * previous bridge or MEDIA_BUS_FMT_FIXED (like select_bus_fmt_recursive()) + * when atomic_get_input_bus_fmts is not supported. + * This supports negotiation if the bridge chain has all bits in place. + */ +static u32 *display_connector_get_input_bus_fmts(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state, + u32 output_fmt, + unsigned int *num_input_fmts) +{ + struct drm_bridge *prev_bridge = drm_bridge_get_prev_bridge(bridge); + struct drm_bridge_state *prev_bridge_state; + + if (!prev_bridge || !prev_bridge->funcs->atomic_get_input_bus_fmts) { + u32 *in_bus_fmts; + + *num_input_fmts = 1; + in_bus_fmts = kmalloc(sizeof(*in_bus_fmts), GFP_KERNEL); + if (!in_bus_fmts) + return NULL; + + in_bus_fmts[0] = MEDIA_BUS_FMT_FIXED; + + return in_bus_fmts; + } + + prev_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state, + prev_bridge); + + return prev_bridge->funcs->atomic_get_input_bus_fmts(prev_bridge, prev_bridge_state, + crtc_state, conn_state, output_fmt, + num_input_fmts); +} + static const struct drm_bridge_funcs display_connector_bridge_funcs = { .attach = display_connector_attach, .detect = display_connector_detect, .get_edid = display_connector_get_edid, + .atomic_get_output_bus_fmts = display_connector_get_output_bus_fmts, + .atomic_get_input_bus_fmts = display_connector_get_input_bus_fmts, + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, + .atomic_reset = drm_atomic_helper_bridge_reset, }; static irqreturn_t display_connector_hpd_irq(int irq, void *arg) @@ -107,7 +193,7 @@ static int display_connector_probe(struct platform_device *pdev) { struct display_connector *conn; unsigned int type; - const char *label; + const char *label = NULL; int ret; conn = devm_kzalloc(&pdev->dev, sizeof(*conn), GFP_KERNEL); diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c index 1b0c7eaf6c84..c642d1e02b2f 100644 --- a/drivers/gpu/drm/bridge/lontium-lt8912b.c +++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c @@ -472,11 +472,11 @@ static int lt8912_attach_dsi(struct lt8912 *lt) return -EPROBE_DEFER; } - dsi = mipi_dsi_device_register_full(host, &info); + dsi = devm_mipi_dsi_device_register_full(dev, host, &info); if (IS_ERR(dsi)) { ret = PTR_ERR(dsi); dev_err(dev, "failed to create dsi device (%d)\n", ret); - goto err_dsi_device; + return ret; } lt->dsi = dsi; @@ -489,24 +489,13 @@ static int lt8912_attach_dsi(struct lt8912 *lt) MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET; - ret = mipi_dsi_attach(dsi); + ret = devm_mipi_dsi_attach(dev, dsi); if (ret < 0) { dev_err(dev, "failed to attach dsi to host\n"); - goto err_dsi_attach; + return ret; } return 0; - -err_dsi_attach: - mipi_dsi_device_unregister(dsi); -err_dsi_device: - return ret; -} - -static void lt8912_detach_dsi(struct lt8912 *lt) -{ - mipi_dsi_detach(lt->dsi); - mipi_dsi_device_unregister(lt->dsi); } static int lt8912_bridge_connector_init(struct drm_bridge *bridge) @@ -555,10 +544,6 @@ static int lt8912_bridge_attach(struct drm_bridge *bridge, if (ret) goto error; - ret = lt8912_attach_dsi(lt); - if (ret) - goto error; - lt->is_attached = true; return 0; @@ -573,7 +558,6 @@ static void lt8912_bridge_detach(struct drm_bridge *bridge) struct lt8912 *lt = bridge_to_lt8912(bridge); if (lt->is_attached) { - lt8912_detach_dsi(lt); lt8912_hard_power_off(lt); drm_connector_unregister(<->connector); drm_connector_cleanup(<->connector); @@ -718,8 +702,15 @@ static int lt8912_probe(struct i2c_client *client, drm_bridge_add(<->bridge); + ret = lt8912_attach_dsi(lt); + if (ret) + goto err_attach; + return 0; +err_attach: + drm_bridge_remove(<->bridge); + lt8912_free_i2c(lt); err_i2c: lt8912_put_dt(lt); err_dt_parse: diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c index 29b1ce2140ab..dafb1b47c15f 100644 --- a/drivers/gpu/drm/bridge/lontium-lt9611.c +++ b/drivers/gpu/drm/bridge/lontium-lt9611.c @@ -586,7 +586,7 @@ lt9611_connector_detect(struct drm_connector *connector, bool force) int connected = 0; regmap_read(lt9611->regmap, 0x825e, ®_val); - connected = (reg_val & BIT(2)); + connected = (reg_val & BIT(0)); lt9611->status = connected ? connector_status_connected : connector_status_disconnected; @@ -760,6 +760,7 @@ static struct mipi_dsi_device *lt9611_attach_dsi(struct lt9611 *lt9611, const struct mipi_dsi_device_info info = { "lt9611", 0, NULL }; struct mipi_dsi_device *dsi; struct mipi_dsi_host *host; + struct device *dev = lt9611->dev; int ret; host = of_find_mipi_dsi_host_by_node(dsi_node); @@ -768,7 +769,7 @@ static struct mipi_dsi_device *lt9611_attach_dsi(struct lt9611 *lt9611, return ERR_PTR(-EPROBE_DEFER); } - dsi = mipi_dsi_device_register_full(host, &info); + dsi = devm_mipi_dsi_device_register_full(dev, host, &info); if (IS_ERR(dsi)) { dev_err(lt9611->dev, "failed to create dsi device\n"); return dsi; @@ -779,29 +780,15 @@ static struct mipi_dsi_device *lt9611_attach_dsi(struct lt9611 *lt9611, dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE | MIPI_DSI_MODE_VIDEO_HSE; - ret = mipi_dsi_attach(dsi); + ret = devm_mipi_dsi_attach(dev, dsi); if (ret < 0) { - dev_err(lt9611->dev, "failed to attach dsi to host\n"); - mipi_dsi_device_unregister(dsi); + dev_err(dev, "failed to attach dsi to host\n"); return ERR_PTR(ret); } return dsi; } -static void lt9611_bridge_detach(struct drm_bridge *bridge) -{ - struct lt9611 *lt9611 = bridge_to_lt9611(bridge); - - if (lt9611->dsi1) { - mipi_dsi_detach(lt9611->dsi1); - mipi_dsi_device_unregister(lt9611->dsi1); - } - - mipi_dsi_detach(lt9611->dsi0); - mipi_dsi_device_unregister(lt9611->dsi0); -} - static int lt9611_connector_init(struct drm_bridge *bridge, struct lt9611 *lt9611) { int ret; @@ -838,28 +825,7 @@ static int lt9611_bridge_attach(struct drm_bridge *bridge, return ret; } - /* Attach primary DSI */ - lt9611->dsi0 = lt9611_attach_dsi(lt9611, lt9611->dsi0_node); - if (IS_ERR(lt9611->dsi0)) - return PTR_ERR(lt9611->dsi0); - - /* Attach secondary DSI, if specified */ - if (lt9611->dsi1_node) { - lt9611->dsi1 = lt9611_attach_dsi(lt9611, lt9611->dsi1_node); - if (IS_ERR(lt9611->dsi1)) { - ret = PTR_ERR(lt9611->dsi1); - goto err_unregister_dsi0; - } - } - return 0; - -err_unregister_dsi0: - lt9611_bridge_detach(bridge); - drm_connector_cleanup(<9611->connector); - mipi_dsi_device_unregister(lt9611->dsi0); - - return ret; } static enum drm_mode_status lt9611_bridge_mode_valid(struct drm_bridge *bridge, @@ -926,7 +892,7 @@ static enum drm_connector_status lt9611_bridge_detect(struct drm_bridge *bridge) int connected; regmap_read(lt9611->regmap, 0x825e, ®_val); - connected = reg_val & BIT(2); + connected = reg_val & BIT(0); lt9611->status = connected ? connector_status_connected : connector_status_disconnected; @@ -952,7 +918,6 @@ static void lt9611_bridge_hpd_enable(struct drm_bridge *bridge) static const struct drm_bridge_funcs lt9611_bridge_funcs = { .attach = lt9611_bridge_attach, - .detach = lt9611_bridge_detach, .mode_valid = lt9611_bridge_mode_valid, .enable = lt9611_bridge_enable, .disable = lt9611_bridge_disable, @@ -1181,10 +1146,29 @@ static int lt9611_probe(struct i2c_client *client, drm_bridge_add(<9611->bridge); + /* Attach primary DSI */ + lt9611->dsi0 = lt9611_attach_dsi(lt9611, lt9611->dsi0_node); + if (IS_ERR(lt9611->dsi0)) { + ret = PTR_ERR(lt9611->dsi0); + goto err_remove_bridge; + } + + /* Attach secondary DSI, if specified */ + if (lt9611->dsi1_node) { + lt9611->dsi1 = lt9611_attach_dsi(lt9611, lt9611->dsi1_node); + if (IS_ERR(lt9611->dsi1)) { + ret = PTR_ERR(lt9611->dsi1); + goto err_remove_bridge; + } + } + lt9611_enable_hpd_interrupts(lt9611); return lt9611_audio_init(dev, lt9611); +err_remove_bridge: + drm_bridge_remove(<9611->bridge); + err_disable_regulators: regulator_bulk_disable(ARRAY_SIZE(lt9611->supplies), lt9611->supplies); diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c index 010657ea7af7..33f9716da0ee 100644 --- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c +++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c @@ -258,17 +258,18 @@ static struct mipi_dsi_device *lt9611uxc_attach_dsi(struct lt9611uxc *lt9611uxc, const struct mipi_dsi_device_info info = { "lt9611uxc", 0, NULL }; struct mipi_dsi_device *dsi; struct mipi_dsi_host *host; + struct device *dev = lt9611uxc->dev; int ret; host = of_find_mipi_dsi_host_by_node(dsi_node); if (!host) { - dev_err(lt9611uxc->dev, "failed to find dsi host\n"); + dev_err(dev, "failed to find dsi host\n"); return ERR_PTR(-EPROBE_DEFER); } - dsi = mipi_dsi_device_register_full(host, &info); + dsi = devm_mipi_dsi_device_register_full(dev, host, &info); if (IS_ERR(dsi)) { - dev_err(lt9611uxc->dev, "failed to create dsi device\n"); + dev_err(dev, "failed to create dsi device\n"); return dsi; } @@ -277,10 +278,9 @@ static struct mipi_dsi_device *lt9611uxc_attach_dsi(struct lt9611uxc *lt9611uxc, dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE | MIPI_DSI_MODE_VIDEO_HSE; - ret = mipi_dsi_attach(dsi); + ret = devm_mipi_dsi_attach(dev, dsi); if (ret < 0) { - dev_err(lt9611uxc->dev, "failed to attach dsi to host\n"); - mipi_dsi_device_unregister(dsi); + dev_err(dev, "failed to attach dsi to host\n"); return ERR_PTR(ret); } @@ -355,19 +355,6 @@ static int lt9611uxc_connector_init(struct drm_bridge *bridge, struct lt9611uxc return drm_connector_attach_encoder(<9611uxc->connector, bridge->encoder); } -static void lt9611uxc_bridge_detach(struct drm_bridge *bridge) -{ - struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge); - - if (lt9611uxc->dsi1) { - mipi_dsi_detach(lt9611uxc->dsi1); - mipi_dsi_device_unregister(lt9611uxc->dsi1); - } - - mipi_dsi_detach(lt9611uxc->dsi0); - mipi_dsi_device_unregister(lt9611uxc->dsi0); -} - static int lt9611uxc_bridge_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { @@ -380,27 +367,7 @@ static int lt9611uxc_bridge_attach(struct drm_bridge *bridge, return ret; } - /* Attach primary DSI */ - lt9611uxc->dsi0 = lt9611uxc_attach_dsi(lt9611uxc, lt9611uxc->dsi0_node); - if (IS_ERR(lt9611uxc->dsi0)) - return PTR_ERR(lt9611uxc->dsi0); - - /* Attach secondary DSI, if specified */ - if (lt9611uxc->dsi1_node) { - lt9611uxc->dsi1 = lt9611uxc_attach_dsi(lt9611uxc, lt9611uxc->dsi1_node); - if (IS_ERR(lt9611uxc->dsi1)) { - ret = PTR_ERR(lt9611uxc->dsi1); - goto err_unregister_dsi0; - } - } - return 0; - -err_unregister_dsi0: - mipi_dsi_detach(lt9611uxc->dsi0); - mipi_dsi_device_unregister(lt9611uxc->dsi0); - - return ret; } static enum drm_mode_status @@ -544,7 +511,6 @@ static struct edid *lt9611uxc_bridge_get_edid(struct drm_bridge *bridge, static const struct drm_bridge_funcs lt9611uxc_bridge_funcs = { .attach = lt9611uxc_bridge_attach, - .detach = lt9611uxc_bridge_detach, .mode_valid = lt9611uxc_bridge_mode_valid, .mode_set = lt9611uxc_bridge_mode_set, .detect = lt9611uxc_bridge_detect, @@ -980,8 +946,27 @@ retry: drm_bridge_add(<9611uxc->bridge); + /* Attach primary DSI */ + lt9611uxc->dsi0 = lt9611uxc_attach_dsi(lt9611uxc, lt9611uxc->dsi0_node); + if (IS_ERR(lt9611uxc->dsi0)) { + ret = PTR_ERR(lt9611uxc->dsi0); + goto err_remove_bridge; + } + + /* Attach secondary DSI, if specified */ + if (lt9611uxc->dsi1_node) { + lt9611uxc->dsi1 = lt9611uxc_attach_dsi(lt9611uxc, lt9611uxc->dsi1_node); + if (IS_ERR(lt9611uxc->dsi1)) { + ret = PTR_ERR(lt9611uxc->dsi1); + goto err_remove_bridge; + } + } + return lt9611uxc_audio_init(dev, lt9611uxc); +err_remove_bridge: + drm_bridge_remove(<9611uxc->bridge); + err_disable_regulators: regulator_bulk_disable(ARRAY_SIZE(lt9611uxc->supplies), lt9611uxc->supplies); diff --git a/drivers/gpu/drm/bridge/lvds-codec.c b/drivers/gpu/drm/bridge/lvds-codec.c index ad460b96c0a3..702ea803a743 100644 --- a/drivers/gpu/drm/bridge/lvds-codec.c +++ b/drivers/gpu/drm/bridge/lvds-codec.c @@ -14,12 +14,14 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> +#include <drm/drm_of.h> #include <drm/drm_panel.h> struct lvds_codec { struct device *dev; struct drm_bridge bridge; struct drm_bridge *panel_bridge; + struct drm_bridge_timings timings; struct regulator *vcc; struct gpio_desc *powerdown_gpio; u32 connector_type; @@ -118,7 +120,7 @@ static int lvds_codec_probe(struct platform_device *pdev) struct device_node *bus_node; struct drm_panel *panel; struct lvds_codec *lvds_codec; - const char *mapping; + u32 val; int ret; lvds_codec = devm_kzalloc(dev, sizeof(*lvds_codec), GFP_KERNEL); @@ -174,32 +176,38 @@ static int lvds_codec_probe(struct platform_device *pdev) return -ENXIO; } - ret = of_property_read_string(bus_node, "data-mapping", - &mapping); + ret = drm_of_lvds_get_data_mapping(bus_node); of_node_put(bus_node); - if (ret < 0) { + if (ret == -ENODEV) { dev_warn(dev, "missing 'data-mapping' DT property\n"); + } else if (ret) { + dev_err(dev, "invalid 'data-mapping' DT property\n"); + return ret; } else { - if (!strcmp(mapping, "jeida-18")) { - lvds_codec->bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG; - } else if (!strcmp(mapping, "jeida-24")) { - lvds_codec->bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA; - } else if (!strcmp(mapping, "vesa-24")) { - lvds_codec->bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG; - } else { - dev_err(dev, "invalid 'data-mapping' DT property\n"); - return -EINVAL; - } + lvds_codec->bus_format = ret; lvds_codec->bridge.funcs = &funcs_decoder; } } /* + * Encoder might sample data on different clock edge than the display, + * for example OnSemi FIN3385 has a dedicated strapping pin to select + * the sampling edge. + */ + if (lvds_codec->connector_type == DRM_MODE_CONNECTOR_LVDS && + !of_property_read_u32(dev->of_node, "pclk-sample", &val)) { + lvds_codec->timings.input_bus_flags = val ? + DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE : + DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE; + } + + /* * The panel_bridge bridge is attached to the panel's of_node, * but we need a bridge attached to our of_node for our user * to look up. */ lvds_codec->bridge.of_node = dev->of_node; + lvds_codec->bridge.timings = &lvds_codec->timings; drm_bridge_add(&lvds_codec->bridge); platform_set_drvdata(pdev, lvds_codec); diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c index d2808c4a6fb1..cce98bf2a4e7 100644 --- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c +++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c @@ -306,19 +306,10 @@ out: mutex_unlock(&ge_b850v3_lvds_dev_mutex); } -static int stdp4028_ge_b850v3_fw_probe(struct i2c_client *stdp4028_i2c, - const struct i2c_device_id *id) +static int ge_b850v3_register(void) { + struct i2c_client *stdp4028_i2c = ge_b850v3_lvds_ptr->stdp4028_i2c; struct device *dev = &stdp4028_i2c->dev; - int ret; - - ret = ge_b850v3_lvds_init(dev); - - if (ret) - return ret; - - ge_b850v3_lvds_ptr->stdp4028_i2c = stdp4028_i2c; - i2c_set_clientdata(stdp4028_i2c, ge_b850v3_lvds_ptr); /* drm bridge initialization */ ge_b850v3_lvds_ptr->bridge.funcs = &ge_b850v3_lvds_funcs; @@ -343,6 +334,27 @@ static int stdp4028_ge_b850v3_fw_probe(struct i2c_client *stdp4028_i2c, "ge-b850v3-lvds-dp", ge_b850v3_lvds_ptr); } +static int stdp4028_ge_b850v3_fw_probe(struct i2c_client *stdp4028_i2c, + const struct i2c_device_id *id) +{ + struct device *dev = &stdp4028_i2c->dev; + int ret; + + ret = ge_b850v3_lvds_init(dev); + + if (ret) + return ret; + + ge_b850v3_lvds_ptr->stdp4028_i2c = stdp4028_i2c; + i2c_set_clientdata(stdp4028_i2c, ge_b850v3_lvds_ptr); + + /* Only register after both bridges are probed */ + if (!ge_b850v3_lvds_ptr->stdp2690_i2c) + return 0; + + return ge_b850v3_register(); +} + static int stdp4028_ge_b850v3_fw_remove(struct i2c_client *stdp4028_i2c) { ge_b850v3_lvds_remove(); @@ -386,7 +398,11 @@ static int stdp2690_ge_b850v3_fw_probe(struct i2c_client *stdp2690_i2c, ge_b850v3_lvds_ptr->stdp2690_i2c = stdp2690_i2c; i2c_set_clientdata(stdp2690_i2c, ge_b850v3_lvds_ptr); - return 0; + /* Only register after both bridges are probed */ + if (!ge_b850v3_lvds_ptr->stdp4028_i2c) + return 0; + + return ge_b850v3_register(); } static int stdp2690_ge_b850v3_fw_remove(struct i2c_client *stdp2690_i2c) diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c index 3aaa90913bf8..818704bf5e86 100644 --- a/drivers/gpu/drm/bridge/parade-ps8640.c +++ b/drivers/gpu/drm/bridge/parade-ps8640.c @@ -9,10 +9,12 @@ #include <linux/i2c.h> #include <linux/module.h> #include <linux/of_graph.h> +#include <linux/pm_runtime.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include <drm/drm_bridge.h> +#include <drm/drm_dp_aux_bus.h> #include <drm/drm_dp_helper.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_of.h> @@ -100,7 +102,7 @@ struct ps8640 { struct regulator_bulk_data supplies[2]; struct gpio_desc *gpio_reset; struct gpio_desc *gpio_powerdown; - bool powered; + bool pre_enabled; }; static const struct regmap_config ps8640_regmap_config[] = { @@ -148,8 +150,46 @@ static inline struct ps8640 *aux_to_ps8640(struct drm_dp_aux *aux) return container_of(aux, struct ps8640, aux); } -static ssize_t ps8640_aux_transfer(struct drm_dp_aux *aux, - struct drm_dp_aux_msg *msg) +static bool ps8640_of_panel_on_aux_bus(struct device *dev) +{ + struct device_node *bus, *panel; + + bus = of_get_child_by_name(dev->of_node, "aux-bus"); + if (!bus) + return false; + + panel = of_get_child_by_name(bus, "panel"); + of_node_put(bus); + if (!panel) + return false; + of_node_put(panel); + + return true; +} + +static int ps8640_ensure_hpd(struct ps8640 *ps_bridge) +{ + struct regmap *map = ps_bridge->regmap[PAGE2_TOP_CNTL]; + struct device *dev = &ps_bridge->page[PAGE2_TOP_CNTL]->dev; + int status; + int ret; + + /* + * Apparently something about the firmware in the chip signals that + * HPD goes high by reporting GPIO9 as high (even though HPD isn't + * actually connected to GPIO9). + */ + ret = regmap_read_poll_timeout(map, PAGE2_GPIO_H, status, + status & PS_GPIO9, 20 * 1000, 200 * 1000); + + if (ret < 0) + dev_warn(dev, "HPD didn't go high: %d\n", ret); + + return ret; +} + +static ssize_t ps8640_aux_transfer_msg(struct drm_dp_aux *aux, + struct drm_dp_aux_msg *msg) { struct ps8640 *ps_bridge = aux_to_ps8640(aux); struct regmap *map = ps_bridge->regmap[PAGE0_DP_CNTL]; @@ -274,38 +314,49 @@ static ssize_t ps8640_aux_transfer(struct drm_dp_aux *aux, return len; } -static int ps8640_bridge_vdo_control(struct ps8640 *ps_bridge, - const enum ps8640_vdo_control ctrl) +static ssize_t ps8640_aux_transfer(struct drm_dp_aux *aux, + struct drm_dp_aux_msg *msg) +{ + struct ps8640 *ps_bridge = aux_to_ps8640(aux); + struct device *dev = &ps_bridge->page[PAGE0_DP_CNTL]->dev; + int ret; + + pm_runtime_get_sync(dev); + ret = ps8640_ensure_hpd(ps_bridge); + if (!ret) + ret = ps8640_aux_transfer_msg(aux, msg); + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); + + return ret; +} + +static void ps8640_bridge_vdo_control(struct ps8640 *ps_bridge, + const enum ps8640_vdo_control ctrl) { struct regmap *map = ps_bridge->regmap[PAGE3_DSI_CNTL1]; + struct device *dev = &ps_bridge->page[PAGE3_DSI_CNTL1]->dev; u8 vdo_ctrl_buf[] = { VDO_CTL_ADD, ctrl }; int ret; ret = regmap_bulk_write(map, PAGE3_SET_ADD, vdo_ctrl_buf, sizeof(vdo_ctrl_buf)); - if (ret < 0) { - DRM_ERROR("failed to %sable VDO: %d\n", - ctrl == ENABLE ? "en" : "dis", ret); - return ret; - } - - return 0; + if (ret < 0) + dev_err(dev, "failed to %sable VDO: %d\n", + ctrl == ENABLE ? "en" : "dis", ret); } -static void ps8640_bridge_poweron(struct ps8640 *ps_bridge) +static int __maybe_unused ps8640_resume(struct device *dev) { - struct regmap *map = ps_bridge->regmap[PAGE2_TOP_CNTL]; - int ret, status; - - if (ps_bridge->powered) - return; + struct ps8640 *ps_bridge = dev_get_drvdata(dev); + int ret; ret = regulator_bulk_enable(ARRAY_SIZE(ps_bridge->supplies), ps_bridge->supplies); if (ret < 0) { - DRM_ERROR("cannot enable regulators %d\n", ret); - return; + dev_err(dev, "cannot enable regulators %d\n", ret); + return ret; } gpiod_set_value(ps_bridge->gpio_powerdown, 0); @@ -314,86 +365,78 @@ static void ps8640_bridge_poweron(struct ps8640 *ps_bridge) gpiod_set_value(ps_bridge->gpio_reset, 0); /* - * Wait for the ps8640 embedded MCU to be ready - * First wait 200ms and then check the MCU ready flag every 20ms + * Mystery 200 ms delay for the "MCU to be ready". It's unclear if + * this is truly necessary since the MCU will already signal that + * things are "good to go" by signaling HPD on "gpio 9". See + * ps8640_ensure_hpd(). For now we'll keep this mystery delay just in + * case. */ msleep(200); - ret = regmap_read_poll_timeout(map, PAGE2_GPIO_H, status, - status & PS_GPIO9, 20 * 1000, 200 * 1000); - - if (ret < 0) { - DRM_ERROR("failed read PAGE2_GPIO_H: %d\n", ret); - goto err_regulators_disable; - } - - msleep(50); - - /* - * The Manufacturer Command Set (MCS) is a device dependent interface - * intended for factory programming of the display module default - * parameters. Once the display module is configured, the MCS shall be - * disabled by the manufacturer. Once disabled, all MCS commands are - * ignored by the display interface. - */ - - ret = regmap_update_bits(map, PAGE2_MCS_EN, MCS_EN, 0); - if (ret < 0) { - DRM_ERROR("failed write PAGE2_MCS_EN: %d\n", ret); - goto err_regulators_disable; - } - - /* Switch access edp panel's edid through i2c */ - ret = regmap_write(map, PAGE2_I2C_BYPASS, I2C_BYPASS_EN); - if (ret < 0) { - DRM_ERROR("failed write PAGE2_I2C_BYPASS: %d\n", ret); - goto err_regulators_disable; - } - - ps_bridge->powered = true; - - return; - -err_regulators_disable: - regulator_bulk_disable(ARRAY_SIZE(ps_bridge->supplies), - ps_bridge->supplies); + return 0; } -static void ps8640_bridge_poweroff(struct ps8640 *ps_bridge) +static int __maybe_unused ps8640_suspend(struct device *dev) { + struct ps8640 *ps_bridge = dev_get_drvdata(dev); int ret; - if (!ps_bridge->powered) - return; - gpiod_set_value(ps_bridge->gpio_reset, 1); gpiod_set_value(ps_bridge->gpio_powerdown, 1); ret = regulator_bulk_disable(ARRAY_SIZE(ps_bridge->supplies), ps_bridge->supplies); if (ret < 0) - DRM_ERROR("cannot disable regulators %d\n", ret); + dev_err(dev, "cannot disable regulators %d\n", ret); - ps_bridge->powered = false; + return ret; } +static const struct dev_pm_ops ps8640_pm_ops = { + SET_RUNTIME_PM_OPS(ps8640_suspend, ps8640_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) +}; + static void ps8640_pre_enable(struct drm_bridge *bridge) { struct ps8640 *ps_bridge = bridge_to_ps8640(bridge); + struct regmap *map = ps_bridge->regmap[PAGE2_TOP_CNTL]; + struct device *dev = &ps_bridge->page[PAGE0_DP_CNTL]->dev; int ret; - ps8640_bridge_poweron(ps_bridge); + pm_runtime_get_sync(dev); + ps8640_ensure_hpd(ps_bridge); - ret = ps8640_bridge_vdo_control(ps_bridge, ENABLE); + /* + * The Manufacturer Command Set (MCS) is a device dependent interface + * intended for factory programming of the display module default + * parameters. Once the display module is configured, the MCS shall be + * disabled by the manufacturer. Once disabled, all MCS commands are + * ignored by the display interface. + */ + + ret = regmap_update_bits(map, PAGE2_MCS_EN, MCS_EN, 0); if (ret < 0) - ps8640_bridge_poweroff(ps_bridge); + dev_warn(dev, "failed write PAGE2_MCS_EN: %d\n", ret); + + /* Switch access edp panel's edid through i2c */ + ret = regmap_write(map, PAGE2_I2C_BYPASS, I2C_BYPASS_EN); + if (ret < 0) + dev_warn(dev, "failed write PAGE2_MCS_EN: %d\n", ret); + + ps8640_bridge_vdo_control(ps_bridge, ENABLE); + + ps_bridge->pre_enabled = true; } static void ps8640_post_disable(struct drm_bridge *bridge) { struct ps8640 *ps_bridge = bridge_to_ps8640(bridge); + ps_bridge->pre_enabled = false; + ps8640_bridge_vdo_control(ps_bridge, DISABLE); - ps8640_bridge_poweroff(ps_bridge); + pm_runtime_put_sync_suspend(&ps_bridge->page[PAGE0_DP_CNTL]->dev); } static int ps8640_bridge_attach(struct drm_bridge *bridge, @@ -401,68 +444,21 @@ static int ps8640_bridge_attach(struct drm_bridge *bridge, { struct ps8640 *ps_bridge = bridge_to_ps8640(bridge); struct device *dev = &ps_bridge->page[0]->dev; - struct device_node *in_ep, *dsi_node; - struct mipi_dsi_device *dsi; - struct mipi_dsi_host *host; int ret; - const struct mipi_dsi_device_info info = { .type = "ps8640", - .channel = 0, - .node = NULL, - }; if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) return -EINVAL; - /* port@0 is ps8640 dsi input port */ - in_ep = of_graph_get_endpoint_by_regs(dev->of_node, 0, -1); - if (!in_ep) - return -ENODEV; - - dsi_node = of_graph_get_remote_port_parent(in_ep); - of_node_put(in_ep); - if (!dsi_node) - return -ENODEV; - - host = of_find_mipi_dsi_host_by_node(dsi_node); - of_node_put(dsi_node); - if (!host) - return -ENODEV; - - dsi = mipi_dsi_device_register_full(host, &info); - if (IS_ERR(dsi)) { - dev_err(dev, "failed to create dsi device\n"); - ret = PTR_ERR(dsi); - return ret; - } - - ps_bridge->dsi = dsi; - - dsi->host = host; - dsi->mode_flags = MIPI_DSI_MODE_VIDEO | - MIPI_DSI_MODE_VIDEO_SYNC_PULSE; - dsi->format = MIPI_DSI_FMT_RGB888; - dsi->lanes = NUM_MIPI_LANES; - ret = mipi_dsi_attach(dsi); - if (ret) { - dev_err(dev, "failed to attach dsi device: %d\n", ret); - goto err_dsi_attach; - } - + ps_bridge->aux.drm_dev = bridge->dev; ret = drm_dp_aux_register(&ps_bridge->aux); if (ret) { dev_err(dev, "failed to register DP AUX channel: %d\n", ret); - goto err_aux_register; + return ret; } /* Attach the panel-bridge to the dsi bridge */ return drm_bridge_attach(bridge->encoder, ps_bridge->panel_bridge, &ps_bridge->bridge, flags); - -err_aux_register: - mipi_dsi_detach(dsi); -err_dsi_attach: - mipi_dsi_device_unregister(dsi); - return ret; } static void ps8640_bridge_detach(struct drm_bridge *bridge) @@ -474,7 +470,7 @@ static struct edid *ps8640_bridge_get_edid(struct drm_bridge *bridge, struct drm_connector *connector) { struct ps8640 *ps_bridge = bridge_to_ps8640(bridge); - bool poweroff = !ps_bridge->powered; + bool poweroff = !ps_bridge->pre_enabled; struct edid *edid; /* @@ -504,6 +500,12 @@ static struct edid *ps8640_bridge_get_edid(struct drm_bridge *bridge, return edid; } +static void ps8640_runtime_disable(void *data) +{ + pm_runtime_dont_use_autosuspend(data); + pm_runtime_disable(data); +} + static const struct drm_bridge_funcs ps8640_bridge_funcs = { .attach = ps8640_bridge_attach, .detach = ps8640_bridge_detach, @@ -512,6 +514,53 @@ static const struct drm_bridge_funcs ps8640_bridge_funcs = { .pre_enable = ps8640_pre_enable, }; +static int ps8640_bridge_host_attach(struct device *dev, struct ps8640 *ps_bridge) +{ + struct device_node *in_ep, *dsi_node; + struct mipi_dsi_device *dsi; + struct mipi_dsi_host *host; + int ret; + const struct mipi_dsi_device_info info = { .type = "ps8640", + .channel = 0, + .node = NULL, + }; + + /* port@0 is ps8640 dsi input port */ + in_ep = of_graph_get_endpoint_by_regs(dev->of_node, 0, -1); + if (!in_ep) + return -ENODEV; + + dsi_node = of_graph_get_remote_port_parent(in_ep); + of_node_put(in_ep); + if (!dsi_node) + return -ENODEV; + + host = of_find_mipi_dsi_host_by_node(dsi_node); + of_node_put(dsi_node); + if (!host) + return -EPROBE_DEFER; + + dsi = devm_mipi_dsi_device_register_full(dev, host, &info); + if (IS_ERR(dsi)) { + dev_err(dev, "failed to create dsi device\n"); + return PTR_ERR(dsi); + } + + ps_bridge->dsi = dsi; + + dsi->host = host; + dsi->mode_flags = MIPI_DSI_MODE_VIDEO | + MIPI_DSI_MODE_VIDEO_SYNC_PULSE; + dsi->format = MIPI_DSI_FMT_RGB888; + dsi->lanes = NUM_MIPI_LANES; + + ret = devm_mipi_dsi_attach(dev, dsi); + if (ret) + return ret; + + return 0; +} + static int ps8640_probe(struct i2c_client *client) { struct device *dev = &client->dev; @@ -525,17 +574,6 @@ static int ps8640_probe(struct i2c_client *client) if (!ps_bridge) return -ENOMEM; - /* port@1 is ps8640 output port */ - ret = drm_of_find_panel_or_bridge(np, 1, 0, &panel, NULL); - if (ret < 0) - return ret; - if (!panel) - return -ENODEV; - - ps_bridge->panel_bridge = devm_drm_panel_bridge_add(dev, panel); - if (IS_ERR(ps_bridge->panel_bridge)) - return PTR_ERR(ps_bridge->panel_bridge); - ps_bridge->supplies[0].supply = "vdd33"; ps_bridge->supplies[1].supply = "vdd12"; ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ps_bridge->supplies), @@ -558,9 +596,16 @@ static int ps8640_probe(struct i2c_client *client) ps_bridge->bridge.funcs = &ps8640_bridge_funcs; ps_bridge->bridge.of_node = dev->of_node; - ps_bridge->bridge.ops = DRM_BRIDGE_OP_EDID; ps_bridge->bridge.type = DRM_MODE_CONNECTOR_eDP; + /* + * In the device tree, if panel is listed under aux-bus of the bridge + * node, panel driver should be able to retrieve EDID by itself using + * aux-bus. So let's not set DRM_BRIDGE_OP_EDID here. + */ + if (!ps8640_of_panel_on_aux_bus(&client->dev)) + ps_bridge->bridge.ops = DRM_BRIDGE_OP_EDID; + ps_bridge->page[PAGE0_DP_CNTL] = client; ps_bridge->regmap[PAGE0_DP_CNTL] = devm_regmap_init_i2c(client, ps8640_regmap_config); @@ -587,9 +632,46 @@ static int ps8640_probe(struct i2c_client *client) ps_bridge->aux.transfer = ps8640_aux_transfer; drm_dp_aux_init(&ps_bridge->aux); + pm_runtime_enable(dev); + /* + * Powering on ps8640 takes ~300ms. To avoid wasting time on power + * cycling ps8640 too often, set autosuspend_delay to 1000ms to ensure + * the bridge wouldn't suspend in between each _aux_transfer_msg() call + * during EDID read (~20ms in my experiment) and in between the last + * _aux_transfer_msg() call during EDID read and the _pre_enable() call + * (~100ms in my experiment). + */ + pm_runtime_set_autosuspend_delay(dev, 1000); + pm_runtime_use_autosuspend(dev); + pm_suspend_ignore_children(dev, true); + ret = devm_add_action_or_reset(dev, ps8640_runtime_disable, dev); + if (ret) + return ret; + + devm_of_dp_aux_populate_ep_devices(&ps_bridge->aux); + + /* port@1 is ps8640 output port */ + ret = drm_of_find_panel_or_bridge(np, 1, 0, &panel, NULL); + if (ret < 0) + return ret; + if (!panel) + return -ENODEV; + + ps_bridge->panel_bridge = devm_drm_panel_bridge_add(dev, panel); + if (IS_ERR(ps_bridge->panel_bridge)) + return PTR_ERR(ps_bridge->panel_bridge); + drm_bridge_add(&ps_bridge->bridge); + ret = ps8640_bridge_host_attach(dev, ps_bridge); + if (ret) + goto err_bridge_remove; + return 0; + +err_bridge_remove: + drm_bridge_remove(&ps_bridge->bridge); + return ret; } static int ps8640_remove(struct i2c_client *client) @@ -613,6 +695,7 @@ static struct i2c_driver ps8640_driver = { .driver = { .name = "ps8640", .of_match_table = ps8640_match, + .pm = &ps8640_pm_ops, }, }; module_i2c_driver(ps8640_driver); diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c index d0db1acf11d7..7d2ed0ed2fe2 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c @@ -320,13 +320,17 @@ static int dw_hdmi_open(struct snd_pcm_substream *substream) struct snd_pcm_runtime *runtime = substream->runtime; struct snd_dw_hdmi *dw = substream->private_data; void __iomem *base = dw->data.base; + u8 *eld; int ret; runtime->hw = dw_hdmi_hw; - ret = snd_pcm_hw_constraint_eld(runtime, dw->data.eld); - if (ret < 0) - return ret; + eld = dw->data.get_eld(dw->data.hdmi); + if (eld) { + ret = snd_pcm_hw_constraint_eld(runtime, eld); + if (ret < 0) + return ret; + } ret = snd_pcm_limit_hw_rates(runtime); if (ret < 0) diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h index cb07dc0da5a7..f72d27208ebe 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h @@ -9,15 +9,15 @@ struct dw_hdmi_audio_data { void __iomem *base; int irq; struct dw_hdmi *hdmi; - u8 *eld; + u8 *(*get_eld)(struct dw_hdmi *hdmi); }; struct dw_hdmi_i2s_audio_data { struct dw_hdmi *hdmi; - u8 *eld; void (*write)(struct dw_hdmi *hdmi, u8 val, int offset); u8 (*read)(struct dw_hdmi *hdmi, int offset); + u8 *(*get_eld)(struct dw_hdmi *hdmi); }; #endif diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c index feb04f127b55..f50b47ac11a8 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c @@ -135,8 +135,15 @@ static int dw_hdmi_i2s_get_eld(struct device *dev, void *data, uint8_t *buf, size_t len) { struct dw_hdmi_i2s_audio_data *audio = data; + u8 *eld; + + eld = audio->get_eld(audio->hdmi); + if (eld) + memcpy(buf, eld, min_t(size_t, MAX_ELD_BYTES, len)); + else + /* Pass en empty ELD if connector not available */ + memset(buf, 0, len); - memcpy(buf, audio->eld, min_t(size_t, MAX_ELD_BYTES, len)); return 0; } diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index f08d0fded61f..54d8fdad395f 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -757,6 +757,14 @@ static void hdmi_enable_audio_clk(struct dw_hdmi *hdmi, bool enable) hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS); } +static u8 *hdmi_audio_get_eld(struct dw_hdmi *hdmi) +{ + if (!hdmi->curr_conn) + return NULL; + + return hdmi->curr_conn->eld; +} + static void dw_hdmi_ahb_audio_enable(struct dw_hdmi *hdmi) { hdmi_set_cts_n(hdmi, hdmi->audio_cts, hdmi->audio_n); @@ -3413,6 +3421,7 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev, hdmi->bridge.funcs = &dw_hdmi_bridge_funcs; hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_HPD; + hdmi->bridge.interlace_allowed = true; #ifdef CONFIG_OF hdmi->bridge.of_node = pdev->dev.of_node; #endif @@ -3431,7 +3440,7 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev, audio.base = hdmi->regs; audio.irq = irq; audio.hdmi = hdmi; - audio.eld = hdmi->connector.eld; + audio.get_eld = hdmi_audio_get_eld; hdmi->enable_audio = dw_hdmi_ahb_audio_enable; hdmi->disable_audio = dw_hdmi_ahb_audio_disable; @@ -3444,7 +3453,7 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev, struct dw_hdmi_i2s_audio_data audio; audio.hdmi = hdmi; - audio.eld = hdmi->connector.eld; + audio.get_eld = hdmi_audio_get_eld; audio.write = hdmi_writeb; audio.read = hdmi_readb; hdmi->enable_audio = dw_hdmi_i2s_audio_enable; diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c index a3db532bbdd1..fd585bf925fe 100644 --- a/drivers/gpu/drm/bridge/tc358768.c +++ b/drivers/gpu/drm/bridge/tc358768.c @@ -237,6 +237,10 @@ static void tc358768_hw_enable(struct tc358768_priv *priv) if (priv->enabled) return; + ret = clk_prepare_enable(priv->refclk); + if (ret < 0) + dev_err(priv->dev, "error enabling refclk (%d)\n", ret); + ret = regulator_bulk_enable(ARRAY_SIZE(priv->supplies), priv->supplies); if (ret < 0) dev_err(priv->dev, "error enabling regulators (%d)\n", ret); @@ -274,6 +278,8 @@ static void tc358768_hw_disable(struct tc358768_priv *priv) if (ret < 0) dev_err(priv->dev, "error disabling regulators (%d)\n", ret); + clk_disable_unprepare(priv->refclk); + priv->enabled = false; } @@ -625,12 +631,19 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge) { struct tc358768_priv *priv = bridge_to_tc358768(bridge); struct mipi_dsi_device *dsi_dev = priv->output.dev; + unsigned long mode_flags = dsi_dev->mode_flags; u32 val, val2, lptxcnt, hact, data_type; const struct drm_display_mode *mode; u32 dsibclk_nsk, dsiclk_nsk, ui_nsk, phy_delay_nsk; - u32 dsiclk, dsibclk; + u32 dsiclk, dsibclk, video_start; + const u32 internal_delay = 40; int ret, i; + if (mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) { + dev_warn_once(priv->dev, "Non-continuous mode unimplemented, falling back to continuous\n"); + mode_flags &= ~MIPI_DSI_CLOCK_NON_CONTINUOUS; + } + tc358768_hw_enable(priv); ret = tc358768_sw_reset(priv); @@ -657,23 +670,27 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge) case MIPI_DSI_FMT_RGB888: val |= (0x3 << 4); hact = mode->hdisplay * 3; + video_start = (mode->htotal - mode->hsync_start) * 3; data_type = MIPI_DSI_PACKED_PIXEL_STREAM_24; break; case MIPI_DSI_FMT_RGB666: val |= (0x4 << 4); hact = mode->hdisplay * 3; + video_start = (mode->htotal - mode->hsync_start) * 3; data_type = MIPI_DSI_PACKED_PIXEL_STREAM_18; break; case MIPI_DSI_FMT_RGB666_PACKED: val |= (0x4 << 4) | BIT(3); hact = mode->hdisplay * 18 / 8; + video_start = (mode->htotal - mode->hsync_start) * 18 / 8; data_type = MIPI_DSI_PIXEL_STREAM_3BYTE_18; break; case MIPI_DSI_FMT_RGB565: val |= (0x5 << 4); hact = mode->hdisplay * 2; + video_start = (mode->htotal - mode->hsync_start) * 2; data_type = MIPI_DSI_PACKED_PIXEL_STREAM_16; break; default: @@ -684,7 +701,8 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge) } /* VSDly[9:0] */ - tc358768_write(priv, TC358768_VSDLY, 1); + video_start = max(video_start, internal_delay + 1) - internal_delay; + tc358768_write(priv, TC358768_VSDLY, video_start); tc358768_write(priv, TC358768_DATAFMT, val); tc358768_write(priv, TC358768_DSITX_DT, data_type); @@ -764,7 +782,7 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge) val |= BIT(i + 1); tc358768_write(priv, TC358768_HSTXVREGEN, val); - if (!(dsi_dev->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)) + if (!(mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)) tc358768_write(priv, TC358768_TXOPTIONCNTRL, 0x1); /* TXTAGOCNT[26:16] RXTASURECNT[10:0] */ @@ -772,31 +790,61 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge) val = tc358768_ns_to_cnt(val, dsibclk_nsk) - 1; val2 = tc358768_ns_to_cnt(tc358768_to_ns((lptxcnt + 1) * dsibclk_nsk), dsibclk_nsk) - 2; - val |= val2 << 16; + val = val << 16 | val2; dev_dbg(priv->dev, "BTACNTRL1: 0x%x\n", val); tc358768_write(priv, TC358768_BTACNTRL1, val); /* START[0] */ tc358768_write(priv, TC358768_STARTCNTRL, 1); - /* Set event mode */ - tc358768_write(priv, TC358768_DSI_EVENT, 1); - - /* vsw (+ vbp) */ - tc358768_write(priv, TC358768_DSI_VSW, - mode->vtotal - mode->vsync_start); - /* vbp (not used in event mode) */ - tc358768_write(priv, TC358768_DSI_VBPR, 0); - /* vact */ - tc358768_write(priv, TC358768_DSI_VACT, mode->vdisplay); - - /* (hsw + hbp) * byteclk * ndl / pclk */ - val = (u32)div_u64((mode->htotal - mode->hsync_start) * - ((u64)priv->dsiclk / 4) * priv->dsi_lanes, - mode->clock * 1000); - tc358768_write(priv, TC358768_DSI_HSW, val); - /* hbp (not used in event mode) */ - tc358768_write(priv, TC358768_DSI_HBPR, 0); + if (dsi_dev->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) { + /* Set pulse mode */ + tc358768_write(priv, TC358768_DSI_EVENT, 0); + + /* vact */ + tc358768_write(priv, TC358768_DSI_VACT, mode->vdisplay); + + /* vsw */ + tc358768_write(priv, TC358768_DSI_VSW, + mode->vsync_end - mode->vsync_start); + /* vbp */ + tc358768_write(priv, TC358768_DSI_VBPR, + mode->vtotal - mode->vsync_end); + + /* hsw * byteclk * ndl / pclk */ + val = (u32)div_u64((mode->hsync_end - mode->hsync_start) * + ((u64)priv->dsiclk / 4) * priv->dsi_lanes, + mode->clock * 1000); + tc358768_write(priv, TC358768_DSI_HSW, val); + + /* hbp * byteclk * ndl / pclk */ + val = (u32)div_u64((mode->htotal - mode->hsync_end) * + ((u64)priv->dsiclk / 4) * priv->dsi_lanes, + mode->clock * 1000); + tc358768_write(priv, TC358768_DSI_HBPR, val); + } else { + /* Set event mode */ + tc358768_write(priv, TC358768_DSI_EVENT, 1); + + /* vact */ + tc358768_write(priv, TC358768_DSI_VACT, mode->vdisplay); + + /* vsw (+ vbp) */ + tc358768_write(priv, TC358768_DSI_VSW, + mode->vtotal - mode->vsync_start); + /* vbp (not used in event mode) */ + tc358768_write(priv, TC358768_DSI_VBPR, 0); + + /* (hsw + hbp) * byteclk * ndl / pclk */ + val = (u32)div_u64((mode->htotal - mode->hsync_start) * + ((u64)priv->dsiclk / 4) * priv->dsi_lanes, + mode->clock * 1000); + tc358768_write(priv, TC358768_DSI_HSW, val); + + /* hbp (not used in event mode) */ + tc358768_write(priv, TC358768_DSI_HBPR, 0); + } + /* hact (bytes) */ tc358768_write(priv, TC358768_DSI_HACT, hact); @@ -822,7 +870,7 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge) if (!(dsi_dev->mode_flags & MIPI_DSI_MODE_LPM)) val |= TC358768_DSI_CONTROL_TXMD; - if (!(dsi_dev->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)) + if (!(mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)) val |= TC358768_DSI_CONTROL_HSCKMD; if (dsi_dev->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET) diff --git a/drivers/gpu/drm/bridge/tc358775.c b/drivers/gpu/drm/bridge/tc358775.c index 2272adcc5b4a..2c76331b251d 100644 --- a/drivers/gpu/drm/bridge/tc358775.c +++ b/drivers/gpu/drm/bridge/tc358775.c @@ -594,11 +594,26 @@ static int tc_bridge_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct tc_data *tc = bridge_to_tc(bridge); + + /* Attach the panel-bridge to the dsi bridge */ + return drm_bridge_attach(bridge->encoder, tc->panel_bridge, + &tc->bridge, flags); +} + +static const struct drm_bridge_funcs tc_bridge_funcs = { + .attach = tc_bridge_attach, + .pre_enable = tc_bridge_pre_enable, + .enable = tc_bridge_enable, + .mode_valid = tc_mode_valid, + .post_disable = tc_bridge_post_disable, +}; + +static int tc_attach_host(struct tc_data *tc) +{ struct device *dev = &tc->i2c->dev; struct mipi_dsi_host *host; struct mipi_dsi_device *dsi; int ret; - const struct mipi_dsi_device_info info = { .type = "tc358775", .channel = 0, .node = NULL, @@ -610,11 +625,10 @@ static int tc_bridge_attach(struct drm_bridge *bridge, return -EPROBE_DEFER; } - dsi = mipi_dsi_device_register_full(host, &info); + dsi = devm_mipi_dsi_device_register_full(dev, host, &info); if (IS_ERR(dsi)) { dev_err(dev, "failed to create dsi device\n"); - ret = PTR_ERR(dsi); - goto err_dsi_device; + return PTR_ERR(dsi); } tc->dsi = dsi; @@ -623,29 +637,15 @@ static int tc_bridge_attach(struct drm_bridge *bridge, dsi->format = MIPI_DSI_FMT_RGB888; dsi->mode_flags = MIPI_DSI_MODE_VIDEO; - ret = mipi_dsi_attach(dsi); + ret = devm_mipi_dsi_attach(dev, dsi); if (ret < 0) { dev_err(dev, "failed to attach dsi to host\n"); - goto err_dsi_attach; + return ret; } - /* Attach the panel-bridge to the dsi bridge */ - return drm_bridge_attach(bridge->encoder, tc->panel_bridge, - &tc->bridge, flags); -err_dsi_attach: - mipi_dsi_device_unregister(dsi); -err_dsi_device: - return ret; + return 0; } -static const struct drm_bridge_funcs tc_bridge_funcs = { - .attach = tc_bridge_attach, - .pre_enable = tc_bridge_pre_enable, - .enable = tc_bridge_enable, - .mode_valid = tc_mode_valid, - .post_disable = tc_bridge_post_disable, -}; - static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct device *dev = &client->dev; @@ -709,7 +709,15 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id) i2c_set_clientdata(client, tc); + ret = tc_attach_host(tc); + if (ret) + goto err_bridge_remove; + return 0; + +err_bridge_remove: + drm_bridge_remove(&tc->bridge); + return ret; } static int tc_remove(struct i2c_client *client) diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c index ba1160ec6d6e..945f08de45f1 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c @@ -245,47 +245,9 @@ static int sn65dsi83_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge); - struct device *dev = ctx->dev; - struct mipi_dsi_device *dsi; - struct mipi_dsi_host *host; - int ret = 0; - - const struct mipi_dsi_device_info info = { - .type = "sn65dsi83", - .channel = 0, - .node = NULL, - }; - - host = of_find_mipi_dsi_host_by_node(ctx->host_node); - if (!host) { - dev_err(dev, "failed to find dsi host\n"); - return -EPROBE_DEFER; - } - - dsi = mipi_dsi_device_register_full(host, &info); - if (IS_ERR(dsi)) { - return dev_err_probe(dev, PTR_ERR(dsi), - "failed to create dsi device\n"); - } - - ctx->dsi = dsi; - - dsi->lanes = ctx->dsi_lanes; - dsi->format = MIPI_DSI_FMT_RGB888; - dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST; - - ret = mipi_dsi_attach(dsi); - if (ret < 0) { - dev_err(dev, "failed to attach dsi to host\n"); - goto err_dsi_attach; - } return drm_bridge_attach(bridge->encoder, ctx->panel_bridge, &ctx->bridge, flags); - -err_dsi_attach: - mipi_dsi_device_unregister(dsi); - return ret; } static void sn65dsi83_detach(struct drm_bridge *bridge) @@ -295,28 +257,9 @@ static void sn65dsi83_detach(struct drm_bridge *bridge) if (!ctx->dsi) return; - mipi_dsi_detach(ctx->dsi); - mipi_dsi_device_unregister(ctx->dsi); - drm_bridge_remove(&ctx->bridge); ctx->dsi = NULL; } -static void sn65dsi83_atomic_pre_enable(struct drm_bridge *bridge, - struct drm_bridge_state *old_bridge_state) -{ - struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge); - - /* - * Reset the chip, pull EN line low for t_reset=10ms, - * then high for t_en=1ms. - */ - regcache_mark_dirty(ctx->regmap); - gpiod_set_value(ctx->enable_gpio, 0); - usleep_range(10000, 11000); - gpiod_set_value(ctx->enable_gpio, 1); - usleep_range(1000, 1100); -} - static u8 sn65dsi83_get_lvds_range(struct sn65dsi83 *ctx, const struct drm_display_mode *mode) { @@ -394,6 +337,10 @@ static void sn65dsi83_atomic_enable(struct drm_bridge *bridge, u16 val; int ret; + /* Deassert reset */ + gpiod_set_value(ctx->enable_gpio, 1); + usleep_range(1000, 1100); + /* Get the LVDS format from the bridge state. */ bridge_state = drm_atomic_get_new_bridge_state(state, bridge); @@ -540,18 +487,11 @@ static void sn65dsi83_atomic_disable(struct drm_bridge *bridge, { struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge); - /* Clear reset, disable PLL */ - regmap_write(ctx->regmap, REG_RC_RESET, 0x00); - regmap_write(ctx->regmap, REG_RC_PLL_EN, 0x00); -} - -static void sn65dsi83_atomic_post_disable(struct drm_bridge *bridge, - struct drm_bridge_state *old_bridge_state) -{ - struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge); - - /* Put the chip in reset, pull EN line low. */ + /* Put the chip in reset, pull EN line low, and assure 10ms reset low timing. */ gpiod_set_value(ctx->enable_gpio, 0); + usleep_range(10000, 11000); + + regcache_mark_dirty(ctx->regmap); } static enum drm_mode_status @@ -597,10 +537,8 @@ sn65dsi83_atomic_get_input_bus_fmts(struct drm_bridge *bridge, static const struct drm_bridge_funcs sn65dsi83_funcs = { .attach = sn65dsi83_attach, .detach = sn65dsi83_detach, - .atomic_pre_enable = sn65dsi83_atomic_pre_enable, .atomic_enable = sn65dsi83_atomic_enable, .atomic_disable = sn65dsi83_atomic_disable, - .atomic_post_disable = sn65dsi83_atomic_post_disable, .mode_valid = sn65dsi83_mode_valid, .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, @@ -664,6 +602,44 @@ static int sn65dsi83_parse_dt(struct sn65dsi83 *ctx, enum sn65dsi83_model model) return 0; } +static int sn65dsi83_host_attach(struct sn65dsi83 *ctx) +{ + struct device *dev = ctx->dev; + struct mipi_dsi_device *dsi; + struct mipi_dsi_host *host; + const struct mipi_dsi_device_info info = { + .type = "sn65dsi83", + .channel = 0, + .node = NULL, + }; + int ret; + + host = of_find_mipi_dsi_host_by_node(ctx->host_node); + if (!host) { + dev_err(dev, "failed to find dsi host\n"); + return -EPROBE_DEFER; + } + + dsi = devm_mipi_dsi_device_register_full(dev, host, &info); + if (IS_ERR(dsi)) + return dev_err_probe(dev, PTR_ERR(dsi), + "failed to create dsi device\n"); + + ctx->dsi = dsi; + + dsi->lanes = ctx->dsi_lanes; + dsi->format = MIPI_DSI_FMT_RGB888; + dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST; + + ret = devm_mipi_dsi_attach(dev, dsi); + if (ret < 0) { + dev_err(dev, "failed to attach dsi to host: %d\n", ret); + return ret; + } + + return 0; +} + static int sn65dsi83_probe(struct i2c_client *client, const struct i2c_device_id *id) { @@ -685,10 +661,13 @@ static int sn65dsi83_probe(struct i2c_client *client, model = id->driver_data; } + /* Put the chip in reset, pull EN line low, and assure 10ms reset low timing. */ ctx->enable_gpio = devm_gpiod_get(ctx->dev, "enable", GPIOD_OUT_LOW); if (IS_ERR(ctx->enable_gpio)) return PTR_ERR(ctx->enable_gpio); + usleep_range(10000, 11000); + ret = sn65dsi83_parse_dt(ctx, model); if (ret) return ret; @@ -704,13 +683,22 @@ static int sn65dsi83_probe(struct i2c_client *client, ctx->bridge.of_node = dev->of_node; drm_bridge_add(&ctx->bridge); + ret = sn65dsi83_host_attach(ctx); + if (ret) + goto err_remove_bridge; + return 0; + +err_remove_bridge: + drm_bridge_remove(&ctx->bridge); + return ret; } static int sn65dsi83_remove(struct i2c_client *client) { struct sn65dsi83 *ctx = i2c_get_clientdata(client); + drm_bridge_remove(&ctx->bridge); of_node_put(ctx->host_node); return 0; diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index 6154bed0af5b..dab8f76618f3 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c @@ -4,7 +4,9 @@ * datasheet: https://www.ti.com/lit/ds/symlink/sn65dsi86.pdf */ +#include <linux/atomic.h> #include <linux/auxiliary_bus.h> +#include <linux/bitfield.h> #include <linux/bits.h> #include <linux/clk.h> #include <linux/debugfs.h> @@ -15,6 +17,7 @@ #include <linux/module.h> #include <linux/of_graph.h> #include <linux/pm_runtime.h> +#include <linux/pwm.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> @@ -91,6 +94,13 @@ #define SN_ML_TX_MODE_REG 0x96 #define ML_TX_MAIN_LINK_OFF 0 #define ML_TX_NORMAL_MODE BIT(0) +#define SN_PWM_PRE_DIV_REG 0xA0 +#define SN_BACKLIGHT_SCALE_REG 0xA1 +#define BACKLIGHT_SCALE_MAX 0xFFFF +#define SN_BACKLIGHT_REG 0xA3 +#define SN_PWM_EN_INV_REG 0xA5 +#define SN_PWM_INV_MASK BIT(0) +#define SN_PWM_EN_MASK BIT(1) #define SN_AUX_CMD_STATUS_REG 0xF4 #define AUX_IRQ_STATUS_AUX_RPLY_TOUT BIT(3) #define AUX_IRQ_STATUS_AUX_SHORT BIT(5) @@ -113,11 +123,14 @@ #define SN_LINK_TRAINING_TRIES 10 +#define SN_PWM_GPIO_IDX 3 /* 4th GPIO */ + /** * struct ti_sn65dsi86 - Platform data for ti-sn65dsi86 driver. * @bridge_aux: AUX-bus sub device for MIPI-to-eDP bridge functionality. * @gpio_aux: AUX-bus sub device for GPIO controller functionality. * @aux_aux: AUX-bus sub device for eDP AUX channel functionality. + * @pwm_aux: AUX-bus sub device for PWM controller functionality. * * @dev: Pointer to the top level (i2c) device. * @regmap: Regmap for accessing i2c. @@ -145,11 +158,17 @@ * bitmap so we can do atomic ops on it without an extra * lock so concurrent users of our 4 GPIOs don't stomp on * each other's read-modify-write. + * + * @pchip: pwm_chip if the PWM is exposed. + * @pwm_enabled: Used to track if the PWM signal is currently enabled. + * @pwm_pin_busy: Track if GPIO4 is currently requested for GPIO or PWM. + * @pwm_refclk_freq: Cache for the reference clock input to the PWM. */ struct ti_sn65dsi86 { struct auxiliary_device bridge_aux; struct auxiliary_device gpio_aux; struct auxiliary_device aux_aux; + struct auxiliary_device pwm_aux; struct device *dev; struct regmap *regmap; @@ -172,6 +191,12 @@ struct ti_sn65dsi86 { struct gpio_chip gchip; DECLARE_BITMAP(gchip_output, SN_NUM_GPIOS); #endif +#if defined(CONFIG_PWM) + struct pwm_chip pchip; + bool pwm_enabled; + atomic_t pwm_pin_busy; +#endif + unsigned int pwm_refclk_freq; }; static const struct regmap_range ti_sn65dsi86_volatile_ranges[] = { @@ -188,13 +213,30 @@ static const struct regmap_config ti_sn65dsi86_regmap_config = { .val_bits = 8, .volatile_table = &ti_sn_bridge_volatile_table, .cache_type = REGCACHE_NONE, + .max_register = 0xFF, }; +static int __maybe_unused ti_sn65dsi86_read_u16(struct ti_sn65dsi86 *pdata, + unsigned int reg, u16 *val) +{ + u8 buf[2]; + int ret; + + ret = regmap_bulk_read(pdata->regmap, reg, buf, ARRAY_SIZE(buf)); + if (ret) + return ret; + + *val = buf[0] | (buf[1] << 8); + + return 0; +} + static void ti_sn65dsi86_write_u16(struct ti_sn65dsi86 *pdata, unsigned int reg, u16 val) { - regmap_write(pdata->regmap, reg, val & 0xFF); - regmap_write(pdata->regmap, reg + 1, val >> 8); + u8 buf[2] = { val & 0xff, val >> 8 }; + + regmap_bulk_write(pdata->regmap, reg, buf, ARRAY_SIZE(buf)); } static u32 ti_sn_bridge_get_dsi_freq(struct ti_sn65dsi86 *pdata) @@ -253,6 +295,12 @@ static void ti_sn_bridge_set_refclk_freq(struct ti_sn65dsi86 *pdata) regmap_update_bits(pdata->regmap, SN_DPPLL_SRC_REG, REFCLK_FREQ_MASK, REFCLK_FREQ(i)); + + /* + * The PWM refclk is based on the value written to SN_DPPLL_SRC_REG, + * regardless of its actual sourcing. + */ + pdata->pwm_refclk_freq = ti_sn_bridge_refclk_lut[i]; } static void ti_sn65dsi86_enable_comms(struct ti_sn65dsi86 *pdata) @@ -655,58 +703,24 @@ static struct ti_sn65dsi86 *bridge_to_ti_sn65dsi86(struct drm_bridge *bridge) return container_of(bridge, struct ti_sn65dsi86, bridge); } -static int ti_sn_bridge_attach(struct drm_bridge *bridge, - enum drm_bridge_attach_flags flags) +static int ti_sn_attach_host(struct ti_sn65dsi86 *pdata) { - int ret, val; - struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge); + int val; struct mipi_dsi_host *host; struct mipi_dsi_device *dsi; + struct device *dev = pdata->dev; const struct mipi_dsi_device_info info = { .type = "ti_sn_bridge", .channel = 0, .node = NULL, - }; - - if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) { - DRM_ERROR("Fix bridge driver to make connector optional!"); - return -EINVAL; - } - - pdata->aux.drm_dev = bridge->dev; - ret = drm_dp_aux_register(&pdata->aux); - if (ret < 0) { - drm_err(bridge->dev, "Failed to register DP AUX channel: %d\n", ret); - return ret; - } - - ret = ti_sn_bridge_connector_init(pdata); - if (ret < 0) - goto err_conn_init; + }; - /* - * TODO: ideally finding host resource and dsi dev registration needs - * to be done in bridge probe. But some existing DSI host drivers will - * wait for any of the drm_bridge/drm_panel to get added to the global - * bridge/panel list, before completing their probe. So if we do the - * dsi dev registration part in bridge probe, before populating in - * the global bridge list, then it will cause deadlock as dsi host probe - * will never complete, neither our bridge probe. So keeping it here - * will satisfy most of the existing host drivers. Once the host driver - * is fixed we can move the below code to bridge probe safely. - */ host = of_find_mipi_dsi_host_by_node(pdata->host_node); - if (!host) { - DRM_ERROR("failed to find dsi host\n"); - ret = -ENODEV; - goto err_dsi_host; - } + if (!host) + return -EPROBE_DEFER; - dsi = mipi_dsi_device_register_full(host, &info); - if (IS_ERR(dsi)) { - DRM_ERROR("failed to create dsi device\n"); - ret = PTR_ERR(dsi); - goto err_dsi_host; - } + dsi = devm_mipi_dsi_device_register_full(dev, host, &info); + if (IS_ERR(dsi)) + return PTR_ERR(dsi); /* TODO: setting to 4 MIPI lanes always for now */ dsi->lanes = 4; @@ -714,18 +728,38 @@ static int ti_sn_bridge_attach(struct drm_bridge *bridge, dsi->mode_flags = MIPI_DSI_MODE_VIDEO; /* check if continuous dsi clock is required or not */ - pm_runtime_get_sync(pdata->dev); + pm_runtime_get_sync(dev); regmap_read(pdata->regmap, SN_DPPLL_SRC_REG, &val); - pm_runtime_put_autosuspend(pdata->dev); + pm_runtime_put_autosuspend(dev); if (!(val & DPPLL_CLK_SRC_DSICLK)) dsi->mode_flags |= MIPI_DSI_CLOCK_NON_CONTINUOUS; - ret = mipi_dsi_attach(dsi); + pdata->dsi = dsi; + + return devm_mipi_dsi_attach(dev, dsi); +} + +static int ti_sn_bridge_attach(struct drm_bridge *bridge, + enum drm_bridge_attach_flags flags) +{ + struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge); + int ret; + + if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) { + DRM_ERROR("Fix bridge driver to make connector optional!"); + return -EINVAL; + } + + pdata->aux.drm_dev = bridge->dev; + ret = drm_dp_aux_register(&pdata->aux); if (ret < 0) { - DRM_ERROR("failed to attach dsi to host\n"); - goto err_dsi_attach; + drm_err(bridge->dev, "Failed to register DP AUX channel: %d\n", ret); + return ret; } - pdata->dsi = dsi; + + ret = ti_sn_bridge_connector_init(pdata); + if (ret < 0) + goto err_conn_init; /* We never want the next bridge to *also* create a connector: */ flags |= DRM_BRIDGE_ATTACH_NO_CONNECTOR; @@ -734,14 +768,10 @@ static int ti_sn_bridge_attach(struct drm_bridge *bridge, ret = drm_bridge_attach(bridge->encoder, pdata->next_bridge, &pdata->bridge, flags); if (ret < 0) - goto err_dsi_detach; + goto err_dsi_host; return 0; -err_dsi_detach: - mipi_dsi_detach(dsi); -err_dsi_attach: - mipi_dsi_device_unregister(dsi); err_dsi_host: drm_connector_cleanup(&pdata->connector); err_conn_init: @@ -1227,7 +1257,17 @@ static int ti_sn_bridge_probe(struct auxiliary_device *adev, drm_bridge_add(&pdata->bridge); + ret = ti_sn_attach_host(pdata); + if (ret) { + dev_err_probe(pdata->dev, ret, "failed to attach dsi host\n"); + goto err_remove_bridge; + } + return 0; + +err_remove_bridge: + drm_bridge_remove(&pdata->bridge); + return ret; } static void ti_sn_bridge_remove(struct auxiliary_device *adev) @@ -1237,11 +1277,6 @@ static void ti_sn_bridge_remove(struct auxiliary_device *adev) if (!pdata) return; - if (pdata->dsi) { - mipi_dsi_detach(pdata->dsi); - mipi_dsi_device_unregister(pdata->dsi); - } - drm_bridge_remove(&pdata->bridge); of_node_put(pdata->host_node); @@ -1260,9 +1295,287 @@ static struct auxiliary_driver ti_sn_bridge_driver = { }; /* ----------------------------------------------------------------------------- - * GPIO Controller + * PWM Controller */ +#if defined(CONFIG_PWM) +static int ti_sn_pwm_pin_request(struct ti_sn65dsi86 *pdata) +{ + return atomic_xchg(&pdata->pwm_pin_busy, 1) ? -EBUSY : 0; +} + +static void ti_sn_pwm_pin_release(struct ti_sn65dsi86 *pdata) +{ + atomic_set(&pdata->pwm_pin_busy, 0); +} + +static struct ti_sn65dsi86 *pwm_chip_to_ti_sn_bridge(struct pwm_chip *chip) +{ + return container_of(chip, struct ti_sn65dsi86, pchip); +} + +static int ti_sn_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct ti_sn65dsi86 *pdata = pwm_chip_to_ti_sn_bridge(chip); + + return ti_sn_pwm_pin_request(pdata); +} + +static void ti_sn_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct ti_sn65dsi86 *pdata = pwm_chip_to_ti_sn_bridge(chip); + + ti_sn_pwm_pin_release(pdata); +} + +/* + * Limitations: + * - The PWM signal is not driven when the chip is powered down, or in its + * reset state and the driver does not implement the "suspend state" + * described in the documentation. In order to save power, state->enabled is + * interpreted as denoting if the signal is expected to be valid, and is used + * to determine if the chip needs to be kept powered. + * - Changing both period and duty_cycle is not done atomically, neither is the + * multi-byte register updates, so the output might briefly be undefined + * during update. + */ +static int ti_sn_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, + const struct pwm_state *state) +{ + struct ti_sn65dsi86 *pdata = pwm_chip_to_ti_sn_bridge(chip); + unsigned int pwm_en_inv; + unsigned int backlight; + unsigned int pre_div; + unsigned int scale; + u64 period_max; + u64 period; + int ret; + + if (!pdata->pwm_enabled) { + ret = pm_runtime_get_sync(pdata->dev); + if (ret < 0) { + pm_runtime_put_sync(pdata->dev); + return ret; + } + } + + if (state->enabled) { + if (!pdata->pwm_enabled) { + /* + * The chip might have been powered down while we + * didn't hold a PM runtime reference, so mux in the + * PWM function on the GPIO pin again. + */ + ret = regmap_update_bits(pdata->regmap, SN_GPIO_CTRL_REG, + SN_GPIO_MUX_MASK << (2 * SN_PWM_GPIO_IDX), + SN_GPIO_MUX_SPECIAL << (2 * SN_PWM_GPIO_IDX)); + if (ret) { + dev_err(pdata->dev, "failed to mux in PWM function\n"); + goto out; + } + } + + /* + * Per the datasheet the PWM frequency is given by: + * + * REFCLK_FREQ + * PWM_FREQ = ----------------------------------- + * PWM_PRE_DIV * BACKLIGHT_SCALE + 1 + * + * However, after careful review the author is convinced that + * the documentation has lost some parenthesis around + * "BACKLIGHT_SCALE + 1". + * + * With the period T_pwm = 1/PWM_FREQ this can be written: + * + * T_pwm * REFCLK_FREQ = PWM_PRE_DIV * (BACKLIGHT_SCALE + 1) + * + * In order to keep BACKLIGHT_SCALE within its 16 bits, + * PWM_PRE_DIV must be: + * + * T_pwm * REFCLK_FREQ + * PWM_PRE_DIV >= ------------------------- + * BACKLIGHT_SCALE_MAX + 1 + * + * To simplify the search and to favour higher resolution of + * the duty cycle over accuracy of the period, the lowest + * possible PWM_PRE_DIV is used. Finally the scale is + * calculated as: + * + * T_pwm * REFCLK_FREQ + * BACKLIGHT_SCALE = ---------------------- - 1 + * PWM_PRE_DIV + * + * Here T_pwm is represented in seconds, so appropriate scaling + * to nanoseconds is necessary. + */ + + /* Minimum T_pwm is 1 / REFCLK_FREQ */ + if (state->period <= NSEC_PER_SEC / pdata->pwm_refclk_freq) { + ret = -EINVAL; + goto out; + } + + /* + * Maximum T_pwm is 255 * (65535 + 1) / REFCLK_FREQ + * Limit period to this to avoid overflows + */ + period_max = div_u64((u64)NSEC_PER_SEC * 255 * (65535 + 1), + pdata->pwm_refclk_freq); + period = min(state->period, period_max); + + pre_div = DIV64_U64_ROUND_UP(period * pdata->pwm_refclk_freq, + (u64)NSEC_PER_SEC * (BACKLIGHT_SCALE_MAX + 1)); + scale = div64_u64(period * pdata->pwm_refclk_freq, (u64)NSEC_PER_SEC * pre_div) - 1; + + /* + * The documentation has the duty ratio given as: + * + * duty BACKLIGHT + * ------- = --------------------- + * period BACKLIGHT_SCALE + 1 + * + * Solve for BACKLIGHT, substituting BACKLIGHT_SCALE according + * to definition above and adjusting for nanosecond + * representation of duty cycle gives us: + */ + backlight = div64_u64(state->duty_cycle * pdata->pwm_refclk_freq, + (u64)NSEC_PER_SEC * pre_div); + if (backlight > scale) + backlight = scale; + + ret = regmap_write(pdata->regmap, SN_PWM_PRE_DIV_REG, pre_div); + if (ret) { + dev_err(pdata->dev, "failed to update PWM_PRE_DIV\n"); + goto out; + } + + ti_sn65dsi86_write_u16(pdata, SN_BACKLIGHT_SCALE_REG, scale); + ti_sn65dsi86_write_u16(pdata, SN_BACKLIGHT_REG, backlight); + } + + pwm_en_inv = FIELD_PREP(SN_PWM_EN_MASK, state->enabled) | + FIELD_PREP(SN_PWM_INV_MASK, state->polarity == PWM_POLARITY_INVERSED); + ret = regmap_write(pdata->regmap, SN_PWM_EN_INV_REG, pwm_en_inv); + if (ret) { + dev_err(pdata->dev, "failed to update PWM_EN/PWM_INV\n"); + goto out; + } + + pdata->pwm_enabled = state->enabled; +out: + + if (!pdata->pwm_enabled) + pm_runtime_put_sync(pdata->dev); + + return ret; +} + +static void ti_sn_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm, + struct pwm_state *state) +{ + struct ti_sn65dsi86 *pdata = pwm_chip_to_ti_sn_bridge(chip); + unsigned int pwm_en_inv; + unsigned int pre_div; + u16 backlight; + u16 scale; + int ret; + + ret = regmap_read(pdata->regmap, SN_PWM_EN_INV_REG, &pwm_en_inv); + if (ret) + return; + + ret = ti_sn65dsi86_read_u16(pdata, SN_BACKLIGHT_SCALE_REG, &scale); + if (ret) + return; + + ret = ti_sn65dsi86_read_u16(pdata, SN_BACKLIGHT_REG, &backlight); + if (ret) + return; + + ret = regmap_read(pdata->regmap, SN_PWM_PRE_DIV_REG, &pre_div); + if (ret) + return; + + state->enabled = FIELD_GET(SN_PWM_EN_MASK, pwm_en_inv); + if (FIELD_GET(SN_PWM_INV_MASK, pwm_en_inv)) + state->polarity = PWM_POLARITY_INVERSED; + else + state->polarity = PWM_POLARITY_NORMAL; + + state->period = DIV_ROUND_UP_ULL((u64)NSEC_PER_SEC * pre_div * (scale + 1), + pdata->pwm_refclk_freq); + state->duty_cycle = DIV_ROUND_UP_ULL((u64)NSEC_PER_SEC * pre_div * backlight, + pdata->pwm_refclk_freq); + + if (state->duty_cycle > state->period) + state->duty_cycle = state->period; +} +static const struct pwm_ops ti_sn_pwm_ops = { + .request = ti_sn_pwm_request, + .free = ti_sn_pwm_free, + .apply = ti_sn_pwm_apply, + .get_state = ti_sn_pwm_get_state, + .owner = THIS_MODULE, +}; + +static int ti_sn_pwm_probe(struct auxiliary_device *adev, + const struct auxiliary_device_id *id) +{ + struct ti_sn65dsi86 *pdata = dev_get_drvdata(adev->dev.parent); + + pdata->pchip.dev = pdata->dev; + pdata->pchip.ops = &ti_sn_pwm_ops; + pdata->pchip.npwm = 1; + pdata->pchip.of_xlate = of_pwm_single_xlate; + pdata->pchip.of_pwm_n_cells = 1; + + return pwmchip_add(&pdata->pchip); +} + +static void ti_sn_pwm_remove(struct auxiliary_device *adev) +{ + struct ti_sn65dsi86 *pdata = dev_get_drvdata(adev->dev.parent); + + pwmchip_remove(&pdata->pchip); + + if (pdata->pwm_enabled) + pm_runtime_put_sync(pdata->dev); +} + +static const struct auxiliary_device_id ti_sn_pwm_id_table[] = { + { .name = "ti_sn65dsi86.pwm", }, + {}, +}; + +static struct auxiliary_driver ti_sn_pwm_driver = { + .name = "pwm", + .probe = ti_sn_pwm_probe, + .remove = ti_sn_pwm_remove, + .id_table = ti_sn_pwm_id_table, +}; + +static int __init ti_sn_pwm_register(void) +{ + return auxiliary_driver_register(&ti_sn_pwm_driver); +} + +static void ti_sn_pwm_unregister(void) +{ + auxiliary_driver_unregister(&ti_sn_pwm_driver); +} + +#else +static inline int ti_sn_pwm_pin_request(struct ti_sn65dsi86 *pdata) { return 0; } +static inline void ti_sn_pwm_pin_release(struct ti_sn65dsi86 *pdata) {} + +static inline int ti_sn_pwm_register(void) { return 0; } +static inline void ti_sn_pwm_unregister(void) {} +#endif + +/* ----------------------------------------------------------------------------- + * GPIO Controller + */ #if defined(CONFIG_OF_GPIO) static int tn_sn_bridge_of_xlate(struct gpio_chip *chip, @@ -1395,10 +1708,25 @@ static int ti_sn_bridge_gpio_direction_output(struct gpio_chip *chip, return ret; } +static int ti_sn_bridge_gpio_request(struct gpio_chip *chip, unsigned int offset) +{ + struct ti_sn65dsi86 *pdata = gpiochip_get_data(chip); + + if (offset == SN_PWM_GPIO_IDX) + return ti_sn_pwm_pin_request(pdata); + + return 0; +} + static void ti_sn_bridge_gpio_free(struct gpio_chip *chip, unsigned int offset) { + struct ti_sn65dsi86 *pdata = gpiochip_get_data(chip); + /* We won't keep pm_runtime if we're input, so switch there on free */ ti_sn_bridge_gpio_direction_input(chip, offset); + + if (offset == SN_PWM_GPIO_IDX) + ti_sn_pwm_pin_release(pdata); } static const char * const ti_sn_bridge_gpio_names[SN_NUM_GPIOS] = { @@ -1420,6 +1748,7 @@ static int ti_sn_gpio_probe(struct auxiliary_device *adev, pdata->gchip.owner = THIS_MODULE; pdata->gchip.of_xlate = tn_sn_bridge_of_xlate; pdata->gchip.of_gpio_n_cells = 2; + pdata->gchip.request = ti_sn_bridge_gpio_request; pdata->gchip.free = ti_sn_bridge_gpio_free; pdata->gchip.get_direction = ti_sn_bridge_gpio_get_direction; pdata->gchip.direction_input = ti_sn_bridge_gpio_direction_input; @@ -1546,10 +1875,9 @@ static int ti_sn65dsi86_probe(struct i2c_client *client, * ordering. The bridge wants the panel to be there when it probes. * The panel wants its HPD GPIO (provided by sn65dsi86 on some boards) * when it probes. The panel and maybe backlight might want the DDC - * bus. Soon the PWM provided by the bridge chip will have the same - * problem. Having sub-devices allows the some sub devices to finish - * probing even if others return -EPROBE_DEFER and gets us around the - * problems. + * bus or the pwm_chip. Having sub-devices allows the some sub devices + * to finish probing even if others return -EPROBE_DEFER and gets us + * around the problems. */ if (IS_ENABLED(CONFIG_OF_GPIO)) { @@ -1558,6 +1886,12 @@ static int ti_sn65dsi86_probe(struct i2c_client *client, return ret; } + if (IS_ENABLED(CONFIG_PWM)) { + ret = ti_sn65dsi86_add_aux_device(pdata, &pdata->pwm_aux, "pwm"); + if (ret) + return ret; + } + /* * NOTE: At the end of the AUX channel probe we'll add the aux device * for the bridge. This is because the bridge can't be used until the @@ -1601,10 +1935,14 @@ static int __init ti_sn65dsi86_init(void) if (ret) goto err_main_was_registered; - ret = auxiliary_driver_register(&ti_sn_aux_driver); + ret = ti_sn_pwm_register(); if (ret) goto err_gpio_was_registered; + ret = auxiliary_driver_register(&ti_sn_aux_driver); + if (ret) + goto err_pwm_was_registered; + ret = auxiliary_driver_register(&ti_sn_bridge_driver); if (ret) goto err_aux_was_registered; @@ -1613,6 +1951,8 @@ static int __init ti_sn65dsi86_init(void) err_aux_was_registered: auxiliary_driver_unregister(&ti_sn_aux_driver); +err_pwm_was_registered: + ti_sn_pwm_unregister(); err_gpio_was_registered: ti_sn_gpio_unregister(); err_main_was_registered: @@ -1626,6 +1966,7 @@ static void __exit ti_sn65dsi86_exit(void) { auxiliary_driver_unregister(&ti_sn_bridge_driver); auxiliary_driver_unregister(&ti_sn_aux_driver); + ti_sn_pwm_unregister(); ti_sn_gpio_unregister(); i2c_del_driver(&ti_sn65dsi86_driver); } diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index ff1416cd609a..21174efd91be 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -74,7 +74,7 @@ int drm_crtc_commit_wait(struct drm_crtc_commit *commit) ret = wait_for_completion_timeout(&commit->hw_done, timeout); if (!ret) { - DRM_ERROR("hw_done timed out\n"); + drm_err(commit->crtc->dev, "hw_done timed out\n"); return -ETIMEDOUT; } @@ -84,7 +84,7 @@ int drm_crtc_commit_wait(struct drm_crtc_commit *commit) */ ret = wait_for_completion_timeout(&commit->flip_done, timeout); if (!ret) { - DRM_ERROR("flip_done timed out\n"); + drm_err(commit->crtc->dev, "flip_done timed out\n"); return -ETIMEDOUT; } @@ -140,7 +140,7 @@ drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state) state->dev = dev; - DRM_DEBUG_ATOMIC("Allocated atomic state %p\n", state); + drm_dbg_atomic(dev, "Allocated atomic state %p\n", state); return 0; fail: @@ -191,7 +191,7 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state) struct drm_mode_config *config = &dev->mode_config; int i; - DRM_DEBUG_ATOMIC("Clearing atomic state %p\n", state); + drm_dbg_atomic(dev, "Clearing atomic state %p\n", state); for (i = 0; i < state->num_connector; i++) { struct drm_connector *connector = state->connectors[i].ptr; @@ -301,7 +301,7 @@ void __drm_atomic_state_free(struct kref *ref) drm_atomic_state_clear(state); - DRM_DEBUG_ATOMIC("Freeing atomic state %p\n", state); + drm_dbg_atomic(state->dev, "Freeing atomic state %p\n", state); if (config->funcs->atomic_state_free) { config->funcs->atomic_state_free(state); @@ -358,8 +358,8 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state, state->crtcs[index].ptr = crtc; crtc_state->state = state; - DRM_DEBUG_ATOMIC("Added [CRTC:%d:%s] %p state to %p\n", - crtc->base.id, crtc->name, crtc_state, state); + drm_dbg_atomic(state->dev, "Added [CRTC:%d:%s] %p state to %p\n", + crtc->base.id, crtc->name, crtc_state, state); return crtc_state; } @@ -379,8 +379,9 @@ static int drm_atomic_crtc_check(const struct drm_crtc_state *old_crtc_state, */ if (new_crtc_state->active && !new_crtc_state->enable) { - DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active without enabled\n", - crtc->base.id, crtc->name); + drm_dbg_atomic(crtc->dev, + "[CRTC:%d:%s] active without enabled\n", + crtc->base.id, crtc->name); return -EINVAL; } @@ -390,15 +391,17 @@ static int drm_atomic_crtc_check(const struct drm_crtc_state *old_crtc_state, */ if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) && WARN_ON(new_crtc_state->enable && !new_crtc_state->mode_blob)) { - DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled without mode blob\n", - crtc->base.id, crtc->name); + drm_dbg_atomic(crtc->dev, + "[CRTC:%d:%s] enabled without mode blob\n", + crtc->base.id, crtc->name); return -EINVAL; } if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) && WARN_ON(!new_crtc_state->enable && new_crtc_state->mode_blob)) { - DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled with mode blob\n", - crtc->base.id, crtc->name); + drm_dbg_atomic(crtc->dev, + "[CRTC:%d:%s] disabled with mode blob\n", + crtc->base.id, crtc->name); return -EINVAL; } @@ -414,8 +417,9 @@ static int drm_atomic_crtc_check(const struct drm_crtc_state *old_crtc_state, */ if (new_crtc_state->event && !new_crtc_state->active && !old_crtc_state->active) { - DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requesting event but off\n", - crtc->base.id, crtc->name); + drm_dbg_atomic(crtc->dev, + "[CRTC:%d:%s] requesting event but off\n", + crtc->base.id, crtc->name); return -EINVAL; } @@ -460,8 +464,9 @@ static int drm_atomic_connector_check(struct drm_connector *connector, return 0; if (writeback_job->fb && !state->crtc) { - DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] framebuffer without CRTC\n", - connector->base.id, connector->name); + drm_dbg_atomic(connector->dev, + "[CONNECTOR:%d:%s] framebuffer without CRTC\n", + connector->base.id, connector->name); return -EINVAL; } @@ -470,16 +475,18 @@ static int drm_atomic_connector_check(struct drm_connector *connector, state->crtc); if (writeback_job->fb && !crtc_state->active) { - DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] has framebuffer, but [CRTC:%d] is off\n", - connector->base.id, connector->name, - state->crtc->base.id); + drm_dbg_atomic(connector->dev, + "[CONNECTOR:%d:%s] has framebuffer, but [CRTC:%d] is off\n", + connector->base.id, connector->name, + state->crtc->base.id); return -EINVAL; } if (!writeback_job->fb) { if (writeback_job->out_fence) { - DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] requesting out-fence without framebuffer\n", - connector->base.id, connector->name); + drm_dbg_atomic(connector->dev, + "[CONNECTOR:%d:%s] requesting out-fence without framebuffer\n", + connector->base.id, connector->name); return -EINVAL; } @@ -537,8 +544,8 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state, state->planes[index].new_state = plane_state; plane_state->state = state; - DRM_DEBUG_ATOMIC("Added [PLANE:%d:%s] %p state to %p\n", - plane->base.id, plane->name, plane_state, state); + drm_dbg_atomic(plane->dev, "Added [PLANE:%d:%s] %p state to %p\n", + plane->base.id, plane->name, plane_state, state); if (plane_state->crtc) { struct drm_crtc_state *crtc_state; @@ -594,12 +601,12 @@ static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state, /* either *both* CRTC and FB must be set, or neither */ if (crtc && !fb) { - DRM_DEBUG_ATOMIC("[PLANE:%d:%s] CRTC set but no FB\n", - plane->base.id, plane->name); + drm_dbg_atomic(plane->dev, "[PLANE:%d:%s] CRTC set but no FB\n", + plane->base.id, plane->name); return -EINVAL; } else if (fb && !crtc) { - DRM_DEBUG_ATOMIC("[PLANE:%d:%s] FB set but no CRTC\n", - plane->base.id, plane->name); + drm_dbg_atomic(plane->dev, "[PLANE:%d:%s] FB set but no CRTC\n", + plane->base.id, plane->name); return -EINVAL; } @@ -609,9 +616,10 @@ static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state, /* Check whether this plane is usable on this CRTC */ if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) { - DRM_DEBUG_ATOMIC("Invalid [CRTC:%d:%s] for [PLANE:%d:%s]\n", - crtc->base.id, crtc->name, - plane->base.id, plane->name); + drm_dbg_atomic(plane->dev, + "Invalid [CRTC:%d:%s] for [PLANE:%d:%s]\n", + crtc->base.id, crtc->name, + plane->base.id, plane->name); return -EINVAL; } @@ -619,9 +627,10 @@ static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state, ret = drm_plane_check_pixel_format(plane, fb->format->format, fb->modifier); if (ret) { - DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid pixel format %p4cc, modifier 0x%llx\n", - plane->base.id, plane->name, - &fb->format->format, fb->modifier); + drm_dbg_atomic(plane->dev, + "[PLANE:%d:%s] invalid pixel format %p4cc, modifier 0x%llx\n", + plane->base.id, plane->name, + &fb->format->format, fb->modifier); return ret; } @@ -630,10 +639,11 @@ static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state, new_plane_state->crtc_x > INT_MAX - (int32_t) new_plane_state->crtc_w || new_plane_state->crtc_h > INT_MAX || new_plane_state->crtc_y > INT_MAX - (int32_t) new_plane_state->crtc_h) { - DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid CRTC coordinates %ux%u+%d+%d\n", - plane->base.id, plane->name, - new_plane_state->crtc_w, new_plane_state->crtc_h, - new_plane_state->crtc_x, new_plane_state->crtc_y); + drm_dbg_atomic(plane->dev, + "[PLANE:%d:%s] invalid CRTC coordinates %ux%u+%d+%d\n", + plane->base.id, plane->name, + new_plane_state->crtc_w, new_plane_state->crtc_h, + new_plane_state->crtc_x, new_plane_state->crtc_y); return -ERANGE; } @@ -645,18 +655,19 @@ static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state, new_plane_state->src_x > fb_width - new_plane_state->src_w || new_plane_state->src_h > fb_height || new_plane_state->src_y > fb_height - new_plane_state->src_h) { - DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid source coordinates " - "%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n", - plane->base.id, plane->name, - new_plane_state->src_w >> 16, - ((new_plane_state->src_w & 0xffff) * 15625) >> 10, - new_plane_state->src_h >> 16, - ((new_plane_state->src_h & 0xffff) * 15625) >> 10, - new_plane_state->src_x >> 16, - ((new_plane_state->src_x & 0xffff) * 15625) >> 10, - new_plane_state->src_y >> 16, - ((new_plane_state->src_y & 0xffff) * 15625) >> 10, - fb->width, fb->height); + drm_dbg_atomic(plane->dev, + "[PLANE:%d:%s] invalid source coordinates " + "%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n", + plane->base.id, plane->name, + new_plane_state->src_w >> 16, + ((new_plane_state->src_w & 0xffff) * 15625) >> 10, + new_plane_state->src_h >> 16, + ((new_plane_state->src_h & 0xffff) * 15625) >> 10, + new_plane_state->src_x >> 16, + ((new_plane_state->src_x & 0xffff) * 15625) >> 10, + new_plane_state->src_y >> 16, + ((new_plane_state->src_y & 0xffff) * 15625) >> 10, + fb->width, fb->height); return -ENOSPC; } @@ -671,9 +682,10 @@ static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state, clips->y1 < 0 || clips->x2 > fb_width || clips->y2 > fb_height) { - DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid damage clip %d %d %d %d\n", - plane->base.id, plane->name, clips->x1, - clips->y1, clips->x2, clips->y2); + drm_dbg_atomic(plane->dev, + "[PLANE:%d:%s] invalid damage clip %d %d %d %d\n", + plane->base.id, plane->name, clips->x1, + clips->y1, clips->x2, clips->y2); return -EINVAL; } clips++; @@ -681,8 +693,9 @@ static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state, } if (plane_switching_crtc(old_plane_state, new_plane_state)) { - DRM_DEBUG_ATOMIC("[PLANE:%d:%s] switching CRTC directly\n", - plane->base.id, plane->name); + drm_dbg_atomic(plane->dev, + "[PLANE:%d:%s] switching CRTC directly\n", + plane->base.id, plane->name); return -EINVAL; } @@ -846,8 +859,9 @@ drm_atomic_get_private_obj_state(struct drm_atomic_state *state, state->num_private_objs = num_objs; - DRM_DEBUG_ATOMIC("Added new private object %p state %p to %p\n", - obj, obj_state, state); + drm_dbg_atomic(state->dev, + "Added new private object %p state %p to %p\n", + obj, obj_state, state); return obj_state; } @@ -1027,7 +1041,7 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state, state->connectors[index].ptr = connector; connector_state->state = state; - DRM_DEBUG_ATOMIC("Added [CONNECTOR:%d:%s] %p state to %p\n", + drm_dbg_atomic(connector->dev, "Added [CONNECTOR:%d:%s] %p state to %p\n", connector->base.id, connector->name, connector_state, state); @@ -1160,8 +1174,9 @@ drm_atomic_add_encoder_bridges(struct drm_atomic_state *state, if (!encoder) return 0; - DRM_DEBUG_ATOMIC("Adding all bridges for [encoder:%d:%s] to %p\n", - encoder->base.id, encoder->name, state); + drm_dbg_atomic(encoder->dev, + "Adding all bridges for [encoder:%d:%s] to %p\n", + encoder->base.id, encoder->name, state); drm_for_each_bridge_in_chain(encoder, bridge) { /* Skip bridges that don't implement the atomic state hooks. */ @@ -1213,8 +1228,9 @@ drm_atomic_add_affected_connectors(struct drm_atomic_state *state, if (ret) return ret; - DRM_DEBUG_ATOMIC("Adding all current connectors for [CRTC:%d:%s] to %p\n", - crtc->base.id, crtc->name, state); + drm_dbg_atomic(crtc->dev, + "Adding all current connectors for [CRTC:%d:%s] to %p\n", + crtc->base.id, crtc->name, state); /* * Changed connectors are already in @state, so only need to look @@ -1267,8 +1283,9 @@ drm_atomic_add_affected_planes(struct drm_atomic_state *state, WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc)); - DRM_DEBUG_ATOMIC("Adding all current planes for [CRTC:%d:%s] to %p\n", - crtc->base.id, crtc->name, state); + drm_dbg_atomic(crtc->dev, + "Adding all current planes for [CRTC:%d:%s] to %p\n", + crtc->base.id, crtc->name, state); drm_for_each_plane_mask(plane, state->dev, old_crtc_state->plane_mask) { struct drm_plane_state *plane_state = @@ -1308,7 +1325,7 @@ int drm_atomic_check_only(struct drm_atomic_state *state) unsigned int affected_crtc = 0; int i, ret = 0; - DRM_DEBUG_ATOMIC("checking %p\n", state); + drm_dbg_atomic(dev, "checking %p\n", state); for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) requested_crtc |= drm_crtc_mask(crtc); @@ -1316,8 +1333,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state) for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { ret = drm_atomic_plane_check(old_plane_state, new_plane_state); if (ret) { - DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic core check failed\n", - plane->base.id, plane->name); + drm_dbg_atomic(dev, "[PLANE:%d:%s] atomic core check failed\n", + plane->base.id, plane->name); return ret; } } @@ -1325,8 +1342,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state) for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { ret = drm_atomic_crtc_check(old_crtc_state, new_crtc_state); if (ret) { - DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic core check failed\n", - crtc->base.id, crtc->name); + drm_dbg_atomic(dev, "[CRTC:%d:%s] atomic core check failed\n", + crtc->base.id, crtc->name); return ret; } } @@ -1334,8 +1351,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state) for_each_new_connector_in_state(state, conn, conn_state, i) { ret = drm_atomic_connector_check(conn, conn_state); if (ret) { - DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] atomic core check failed\n", - conn->base.id, conn->name); + drm_dbg_atomic(dev, "[CONNECTOR:%d:%s] atomic core check failed\n", + conn->base.id, conn->name); return ret; } } @@ -1344,8 +1361,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state) ret = config->funcs->atomic_check(state->dev, state); if (ret) { - DRM_DEBUG_ATOMIC("atomic driver check for %p failed: %d\n", - state, ret); + drm_dbg_atomic(dev, "atomic driver check for %p failed: %d\n", + state, ret); return ret; } } @@ -1353,8 +1370,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state) if (!state->allow_modeset) { for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { if (drm_atomic_crtc_needs_modeset(new_crtc_state)) { - DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requires full modeset\n", - crtc->base.id, crtc->name); + drm_dbg_atomic(dev, "[CRTC:%d:%s] requires full modeset\n", + crtc->base.id, crtc->name); return -EINVAL; } } @@ -1374,8 +1391,9 @@ int drm_atomic_check_only(struct drm_atomic_state *state) * so compositors know what's going on. */ if (affected_crtc != requested_crtc) { - DRM_DEBUG_ATOMIC("driver added CRTC to commit: requested 0x%x, affected 0x%0x\n", - requested_crtc, affected_crtc); + drm_dbg_atomic(dev, + "driver added CRTC to commit: requested 0x%x, affected 0x%0x\n", + requested_crtc, affected_crtc); WARN(!state->allow_modeset, "adding CRTC not allowed without modesets: requested 0x%x, affected 0x%0x\n", requested_crtc, affected_crtc); } @@ -1407,7 +1425,7 @@ int drm_atomic_commit(struct drm_atomic_state *state) if (ret) return ret; - DRM_DEBUG_ATOMIC("committing %p\n", state); + drm_dbg_atomic(state->dev, "committing %p\n", state); return config->funcs->atomic_commit(state->dev, state, false); } @@ -1436,7 +1454,7 @@ int drm_atomic_nonblocking_commit(struct drm_atomic_state *state) if (ret) return ret; - DRM_DEBUG_ATOMIC("committing %p nonblocking\n", state); + drm_dbg_atomic(state->dev, "committing %p nonblocking\n", state); return config->funcs->atomic_commit(state->dev, state, true); } @@ -1633,11 +1651,11 @@ void drm_atomic_print_new_state(const struct drm_atomic_state *state, int i; if (!p) { - DRM_ERROR("invalid drm printer\n"); + drm_err(state->dev, "invalid drm printer\n"); return; } - DRM_DEBUG_ATOMIC("checking %p\n", state); + drm_dbg_atomic(state->dev, "checking %p\n", state); for_each_new_plane_in_state(state, plane, plane_state, i) drm_atomic_plane_print_state(p, plane_state); diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 2c0c6ec92820..aef2fbd676e5 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -132,9 +132,10 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state, if (new_encoder) { if (encoder_mask & drm_encoder_mask(new_encoder)) { - DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] on [CONNECTOR:%d:%s] already assigned\n", - new_encoder->base.id, new_encoder->name, - connector->base.id, connector->name); + drm_dbg_atomic(connector->dev, + "[ENCODER:%d:%s] on [CONNECTOR:%d:%s] already assigned\n", + new_encoder->base.id, new_encoder->name, + connector->base.id, connector->name); return -EINVAL; } @@ -169,11 +170,12 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state, continue; if (!disable_conflicting_encoders) { - DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s] by [CONNECTOR:%d:%s]\n", - encoder->base.id, encoder->name, - connector->state->crtc->base.id, - connector->state->crtc->name, - connector->base.id, connector->name); + drm_dbg_atomic(connector->dev, + "[ENCODER:%d:%s] in use on [CRTC:%d:%s] by [CONNECTOR:%d:%s]\n", + encoder->base.id, encoder->name, + connector->state->crtc->base.id, + connector->state->crtc->name, + connector->base.id, connector->name); ret = -EINVAL; goto out; } @@ -184,10 +186,11 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state, goto out; } - DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], disabling [CONNECTOR:%d:%s]\n", - encoder->base.id, encoder->name, - new_conn_state->crtc->base.id, new_conn_state->crtc->name, - connector->base.id, connector->name); + drm_dbg_atomic(connector->dev, + "[ENCODER:%d:%s] in use on [CRTC:%d:%s], disabling [CONNECTOR:%d:%s]\n", + encoder->base.id, encoder->name, + new_conn_state->crtc->base.id, new_conn_state->crtc->name, + connector->base.id, connector->name); crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc); @@ -268,9 +271,10 @@ steal_encoder(struct drm_atomic_state *state, encoder_crtc = old_connector_state->crtc; - DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], stealing it\n", - encoder->base.id, encoder->name, - encoder_crtc->base.id, encoder_crtc->name); + drm_dbg_atomic(encoder->dev, + "[ENCODER:%d:%s] in use on [CRTC:%d:%s], stealing it\n", + encoder->base.id, encoder->name, + encoder_crtc->base.id, encoder_crtc->name); set_best_encoder(state, new_connector_state, NULL); @@ -291,9 +295,8 @@ update_connector_routing(struct drm_atomic_state *state, struct drm_encoder *new_encoder; struct drm_crtc_state *crtc_state; - DRM_DEBUG_ATOMIC("Updating routing for [CONNECTOR:%d:%s]\n", - connector->base.id, - connector->name); + drm_dbg_atomic(connector->dev, "Updating routing for [CONNECTOR:%d:%s]\n", + connector->base.id, connector->name); if (old_connector_state->crtc != new_connector_state->crtc) { if (old_connector_state->crtc) { @@ -308,9 +311,8 @@ update_connector_routing(struct drm_atomic_state *state, } if (!new_connector_state->crtc) { - DRM_DEBUG_ATOMIC("Disabling [CONNECTOR:%d:%s]\n", - connector->base.id, - connector->name); + drm_dbg_atomic(connector->dev, "Disabling [CONNECTOR:%d:%s]\n", + connector->base.id, connector->name); set_best_encoder(state, new_connector_state, NULL); @@ -339,8 +341,9 @@ update_connector_routing(struct drm_atomic_state *state, */ if (!state->duplicated && drm_connector_is_unregistered(connector) && crtc_state->active) { - DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] is not registered\n", - connector->base.id, connector->name); + drm_dbg_atomic(connector->dev, + "[CONNECTOR:%d:%s] is not registered\n", + connector->base.id, connector->name); return -EINVAL; } @@ -354,31 +357,33 @@ update_connector_routing(struct drm_atomic_state *state, new_encoder = drm_connector_get_single_encoder(connector); if (!new_encoder) { - DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n", - connector->base.id, - connector->name); + drm_dbg_atomic(connector->dev, + "No suitable encoder found for [CONNECTOR:%d:%s]\n", + connector->base.id, connector->name); return -EINVAL; } if (!drm_encoder_crtc_ok(new_encoder, new_connector_state->crtc)) { - DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] incompatible with [CRTC:%d:%s]\n", - new_encoder->base.id, - new_encoder->name, - new_connector_state->crtc->base.id, - new_connector_state->crtc->name); + drm_dbg_atomic(connector->dev, + "[ENCODER:%d:%s] incompatible with [CRTC:%d:%s]\n", + new_encoder->base.id, + new_encoder->name, + new_connector_state->crtc->base.id, + new_connector_state->crtc->name); return -EINVAL; } if (new_encoder == new_connector_state->best_encoder) { set_best_encoder(state, new_connector_state, new_encoder); - DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d:%s]\n", - connector->base.id, - connector->name, - new_encoder->base.id, - new_encoder->name, - new_connector_state->crtc->base.id, - new_connector_state->crtc->name); + drm_dbg_atomic(connector->dev, + "[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d:%s]\n", + connector->base.id, + connector->name, + new_encoder->base.id, + new_encoder->name, + new_connector_state->crtc->base.id, + new_connector_state->crtc->name); return 0; } @@ -389,13 +394,14 @@ update_connector_routing(struct drm_atomic_state *state, crtc_state->connectors_changed = true; - DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d:%s]\n", - connector->base.id, - connector->name, - new_encoder->base.id, - new_encoder->name, - new_connector_state->crtc->base.id, - new_connector_state->crtc->name); + drm_dbg_atomic(connector->dev, + "[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d:%s]\n", + connector->base.id, + connector->name, + new_encoder->base.id, + new_encoder->name, + new_connector_state->crtc->base.id, + new_connector_state->crtc->name); return 0; } @@ -443,7 +449,7 @@ mode_fixup(struct drm_atomic_state *state) new_crtc_state, new_conn_state); if (ret) { - DRM_DEBUG_ATOMIC("Bridge atomic check failed\n"); + drm_dbg_atomic(encoder->dev, "Bridge atomic check failed\n"); return ret; } @@ -451,16 +457,18 @@ mode_fixup(struct drm_atomic_state *state) ret = funcs->atomic_check(encoder, new_crtc_state, new_conn_state); if (ret) { - DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] check failed\n", - encoder->base.id, encoder->name); + drm_dbg_atomic(encoder->dev, + "[ENCODER:%d:%s] check failed\n", + encoder->base.id, encoder->name); return ret; } } else if (funcs && funcs->mode_fixup) { ret = funcs->mode_fixup(encoder, &new_crtc_state->mode, &new_crtc_state->adjusted_mode); if (!ret) { - DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] fixup failed\n", - encoder->base.id, encoder->name); + drm_dbg_atomic(encoder->dev, + "[ENCODER:%d:%s] fixup failed\n", + encoder->base.id, encoder->name); return -EINVAL; } } @@ -483,8 +491,8 @@ mode_fixup(struct drm_atomic_state *state) ret = funcs->mode_fixup(crtc, &new_crtc_state->mode, &new_crtc_state->adjusted_mode); if (!ret) { - DRM_DEBUG_ATOMIC("[CRTC:%d:%s] fixup failed\n", - crtc->base.id, crtc->name); + drm_dbg_atomic(crtc->dev, "[CRTC:%d:%s] fixup failed\n", + crtc->base.id, crtc->name); return -EINVAL; } } @@ -502,8 +510,9 @@ static enum drm_mode_status mode_valid_path(struct drm_connector *connector, ret = drm_encoder_mode_valid(encoder, mode); if (ret != MODE_OK) { - DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] mode_valid() failed\n", - encoder->base.id, encoder->name); + drm_dbg_atomic(encoder->dev, + "[ENCODER:%d:%s] mode_valid() failed\n", + encoder->base.id, encoder->name); return ret; } @@ -511,14 +520,14 @@ static enum drm_mode_status mode_valid_path(struct drm_connector *connector, ret = drm_bridge_chain_mode_valid(bridge, &connector->display_info, mode); if (ret != MODE_OK) { - DRM_DEBUG_ATOMIC("[BRIDGE] mode_valid() failed\n"); + drm_dbg_atomic(encoder->dev, "[BRIDGE] mode_valid() failed\n"); return ret; } ret = drm_crtc_mode_valid(crtc, mode); if (ret != MODE_OK) { - DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode_valid() failed\n", - crtc->base.id, crtc->name); + drm_dbg_atomic(encoder->dev, "[CRTC:%d:%s] mode_valid() failed\n", + crtc->base.id, crtc->name); return ret; } @@ -619,14 +628,14 @@ drm_atomic_helper_check_modeset(struct drm_device *dev, WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); if (!drm_mode_equal(&old_crtc_state->mode, &new_crtc_state->mode)) { - DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode changed\n", - crtc->base.id, crtc->name); + drm_dbg_atomic(dev, "[CRTC:%d:%s] mode changed\n", + crtc->base.id, crtc->name); new_crtc_state->mode_changed = true; } if (old_crtc_state->enable != new_crtc_state->enable) { - DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enable changed\n", - crtc->base.id, crtc->name); + drm_dbg_atomic(dev, "[CRTC:%d:%s] enable changed\n", + crtc->base.id, crtc->name); /* * For clarity this assignment is done here, but @@ -641,14 +650,14 @@ drm_atomic_helper_check_modeset(struct drm_device *dev, } if (old_crtc_state->active != new_crtc_state->active) { - DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active changed\n", - crtc->base.id, crtc->name); + drm_dbg_atomic(dev, "[CRTC:%d:%s] active changed\n", + crtc->base.id, crtc->name); new_crtc_state->active_changed = true; } if (new_crtc_state->enable != has_connectors) { - DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled/connectors mismatch\n", - crtc->base.id, crtc->name); + drm_dbg_atomic(dev, "[CRTC:%d:%s] enabled/connectors mismatch\n", + crtc->base.id, crtc->name); return -EINVAL; } @@ -708,10 +717,11 @@ drm_atomic_helper_check_modeset(struct drm_device *dev, if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) continue; - DRM_DEBUG_ATOMIC("[CRTC:%d:%s] needs all connectors, enable: %c, active: %c\n", - crtc->base.id, crtc->name, - new_crtc_state->enable ? 'y' : 'n', - new_crtc_state->active ? 'y' : 'n'); + drm_dbg_atomic(dev, + "[CRTC:%d:%s] needs all connectors, enable: %c, active: %c\n", + crtc->base.id, crtc->name, + new_crtc_state->enable ? 'y' : 'n', + new_crtc_state->active ? 'y' : 'n'); ret = drm_atomic_add_affected_connectors(state, crtc); if (ret != 0) @@ -818,7 +828,8 @@ int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state, } if (!crtc_state->enable && !can_update_disabled) { - DRM_DEBUG_KMS("Cannot update plane of a disabled CRTC.\n"); + drm_dbg_kms(plane_state->crtc->dev, + "Cannot update plane of a disabled CRTC.\n"); return -EINVAL; } @@ -828,7 +839,8 @@ int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state, hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale); vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale); if (hscale < 0 || vscale < 0) { - DRM_DEBUG_KMS("Invalid scaling of plane\n"); + drm_dbg_kms(plane_state->crtc->dev, + "Invalid scaling of plane\n"); drm_rect_debug_print("src: ", &plane_state->src, true); drm_rect_debug_print("dst: ", &plane_state->dst, false); return -ERANGE; @@ -852,7 +864,8 @@ int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state, return 0; if (!can_position && !drm_rect_equals(dst, &clip)) { - DRM_DEBUG_KMS("Plane must cover entire CRTC\n"); + drm_dbg_kms(plane_state->crtc->dev, + "Plane must cover entire CRTC\n"); drm_rect_debug_print("dst: ", dst, false); drm_rect_debug_print("clip: ", &clip, false); return -EINVAL; @@ -904,8 +917,9 @@ drm_atomic_helper_check_planes(struct drm_device *dev, ret = funcs->atomic_check(plane, state); if (ret) { - DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic driver check failed\n", - plane->base.id, plane->name); + drm_dbg_atomic(plane->dev, + "[PLANE:%d:%s] atomic driver check failed\n", + plane->base.id, plane->name); return ret; } } @@ -920,8 +934,9 @@ drm_atomic_helper_check_planes(struct drm_device *dev, ret = funcs->atomic_check(crtc, state); if (ret) { - DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n", - crtc->base.id, crtc->name); + drm_dbg_atomic(crtc->dev, + "[CRTC:%d:%s] atomic driver check failed\n", + crtc->base.id, crtc->name); return ret; } } @@ -1049,8 +1064,8 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) funcs = encoder->helper_private; - DRM_DEBUG_ATOMIC("disabling [ENCODER:%d:%s]\n", - encoder->base.id, encoder->name); + drm_dbg_atomic(dev, "disabling [ENCODER:%d:%s]\n", + encoder->base.id, encoder->name); /* * Each encoder has at most one connector (since we always steal @@ -1087,8 +1102,8 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) funcs = crtc->helper_private; - DRM_DEBUG_ATOMIC("disabling [CRTC:%d:%s]\n", - crtc->base.id, crtc->name); + drm_dbg_atomic(dev, "disabling [CRTC:%d:%s]\n", + crtc->base.id, crtc->name); /* Right function depends upon target state. */ @@ -1229,8 +1244,8 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state) funcs = crtc->helper_private; if (new_crtc_state->enable && funcs->mode_set_nofb) { - DRM_DEBUG_ATOMIC("modeset on [CRTC:%d:%s]\n", - crtc->base.id, crtc->name); + drm_dbg_atomic(dev, "modeset on [CRTC:%d:%s]\n", + crtc->base.id, crtc->name); funcs->mode_set_nofb(crtc); } @@ -1254,8 +1269,8 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state) if (!new_crtc_state->mode_changed) continue; - DRM_DEBUG_ATOMIC("modeset on [ENCODER:%d:%s]\n", - encoder->base.id, encoder->name); + drm_dbg_atomic(dev, "modeset on [ENCODER:%d:%s]\n", + encoder->base.id, encoder->name); /* * Each encoder has at most one connector (since we always steal @@ -1357,8 +1372,8 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev, funcs = crtc->helper_private; if (new_crtc_state->enable) { - DRM_DEBUG_ATOMIC("enabling [CRTC:%d:%s]\n", - crtc->base.id, crtc->name); + drm_dbg_atomic(dev, "enabling [CRTC:%d:%s]\n", + crtc->base.id, crtc->name); if (funcs->atomic_enable) funcs->atomic_enable(crtc, old_state); else if (funcs->commit) @@ -1381,8 +1396,8 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev, encoder = new_conn_state->best_encoder; funcs = encoder->helper_private; - DRM_DEBUG_ATOMIC("enabling [ENCODER:%d:%s]\n", - encoder->base.id, encoder->name); + drm_dbg_atomic(dev, "enabling [ENCODER:%d:%s]\n", + encoder->base.id, encoder->name); /* * Each encoder has at most one connector (since we always steal @@ -1551,8 +1566,8 @@ void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev, ret = wait_for_completion_timeout(&commit->flip_done, 10 * HZ); if (ret == 0) - DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n", - crtc->base.id, crtc->name); + drm_err(dev, "[CRTC:%d:%s] flip_done timed out\n", + crtc->base.id, crtc->name); } if (old_state->fake_commit) @@ -1739,8 +1754,9 @@ int drm_atomic_helper_async_check(struct drm_device *dev, */ if (old_plane_state->commit && !try_wait_for_completion(&old_plane_state->commit->hw_done)) { - DRM_DEBUG_ATOMIC("[PLANE:%d:%s] inflight previous commit preventing async commit\n", - plane->base.id, plane->name); + drm_dbg_atomic(dev, + "[PLANE:%d:%s] inflight previous commit preventing async commit\n", + plane->base.id, plane->name); return -EBUSY; } @@ -1962,8 +1978,9 @@ static int stall_checks(struct drm_crtc *crtc, bool nonblock) */ if (!completed && nonblock) { spin_unlock(&crtc->commit_lock); - DRM_DEBUG_ATOMIC("[CRTC:%d:%s] busy with a previous commit\n", - crtc->base.id, crtc->name); + drm_dbg_atomic(crtc->dev, + "[CRTC:%d:%s] busy with a previous commit\n", + crtc->base.id, crtc->name); return -EBUSY; } @@ -1985,8 +2002,8 @@ static int stall_checks(struct drm_crtc *crtc, bool nonblock) ret = wait_for_completion_interruptible_timeout(&stall_commit->cleanup_done, 10*HZ); if (ret == 0) - DRM_ERROR("[CRTC:%d:%s] cleanup_done timed out\n", - crtc->base.id, crtc->name); + drm_err(crtc->dev, "[CRTC:%d:%s] cleanup_done timed out\n", + crtc->base.id, crtc->name); drm_crtc_commit_put(stall_commit); @@ -2150,8 +2167,9 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state, */ if (nonblock && old_conn_state->commit && !try_wait_for_completion(&old_conn_state->commit->flip_done)) { - DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] busy with a previous commit\n", - conn->base.id, conn->name); + drm_dbg_atomic(conn->dev, + "[CONNECTOR:%d:%s] busy with a previous commit\n", + conn->base.id, conn->name); return -EBUSY; } @@ -2171,8 +2189,9 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state, */ if (nonblock && old_plane_state->commit && !try_wait_for_completion(&old_plane_state->commit->flip_done)) { - DRM_DEBUG_ATOMIC("[PLANE:%d:%s] busy with a previous commit\n", - plane->base.id, plane->name); + drm_dbg_atomic(plane->dev, + "[PLANE:%d:%s] busy with a previous commit\n", + plane->base.id, plane->name); return -EBUSY; } @@ -2218,22 +2237,25 @@ void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *old_state) for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) { ret = drm_crtc_commit_wait(old_crtc_state->commit); if (ret) - DRM_ERROR("[CRTC:%d:%s] commit wait timed out\n", - crtc->base.id, crtc->name); + drm_err(crtc->dev, + "[CRTC:%d:%s] commit wait timed out\n", + crtc->base.id, crtc->name); } for_each_old_connector_in_state(old_state, conn, old_conn_state, i) { ret = drm_crtc_commit_wait(old_conn_state->commit); if (ret) - DRM_ERROR("[CONNECTOR:%d:%s] commit wait timed out\n", - conn->base.id, conn->name); + drm_err(conn->dev, + "[CONNECTOR:%d:%s] commit wait timed out\n", + conn->base.id, conn->name); } for_each_old_plane_in_state(old_state, plane, old_plane_state, i) { ret = drm_crtc_commit_wait(old_plane_state->commit); if (ret) - DRM_ERROR("[PLANE:%d:%s] commit wait timed out\n", - plane->base.id, plane->name); + drm_err(plane->dev, + "[PLANE:%d:%s] commit wait timed out\n", + plane->base.id, plane->name); } } EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies); @@ -3120,7 +3142,9 @@ void drm_atomic_helper_shutdown(struct drm_device *dev) ret = drm_atomic_helper_disable_all(dev, &ctx); if (ret) - DRM_ERROR("Disabling all crtc's during unload failed with %i\n", ret); + drm_err(dev, + "Disabling all crtc's during unload failed with %i\n", + ret); DRM_MODESET_LOCK_ALL_END(dev, ctx, ret); } @@ -3380,8 +3404,9 @@ static int page_flip_common(struct drm_atomic_state *state, /* Make sure we don't accidentally do a full modeset. */ state->allow_modeset = false; if (!crtc_state->active) { - DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled, rejecting legacy flip\n", - crtc->base.id, crtc->name); + drm_dbg_atomic(crtc->dev, + "[CRTC:%d:%s] disabled, rejecting legacy flip\n", + crtc->base.id, crtc->name); return -EINVAL; } diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c index 909f31833181..9781722519c3 100644 --- a/drivers/gpu/drm/drm_atomic_uapi.c +++ b/drivers/gpu/drm/drm_atomic_uapi.c @@ -773,7 +773,7 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector, state->scaling_mode = val; } else if (property == config->content_protection_property) { if (val == DRM_MODE_CONTENT_PROTECTION_ENABLED) { - DRM_DEBUG_KMS("only drivers can set CP Enabled\n"); + drm_dbg_kms(dev, "only drivers can set CP Enabled\n"); return -EINVAL; } state->content_protection = val; @@ -797,6 +797,8 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector, fence_ptr); } else if (property == connector->max_bpc_property) { state->max_requested_bpc = val; + } else if (property == connector->privacy_screen_sw_state_property) { + state->privacy_screen_sw_state = val; } else if (connector->funcs->atomic_set_property) { return connector->funcs->atomic_set_property(connector, state, property, val); @@ -874,6 +876,8 @@ drm_atomic_connector_get_property(struct drm_connector *connector, *val = 0; } else if (property == connector->max_bpc_property) { *val = state->max_requested_bpc; + } else if (property == connector->privacy_screen_sw_state_property) { + *val = state->privacy_screen_sw_state; } else if (connector->funcs->atomic_get_property) { return connector->funcs->atomic_get_property(connector, state, property, val); diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c index 60a6b21474b1..6e433d465f41 100644 --- a/drivers/gpu/drm/drm_auth.c +++ b/drivers/gpu/drm/drm_auth.c @@ -106,7 +106,7 @@ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv) auth->magic = file_priv->magic; mutex_unlock(&dev->master_mutex); - DRM_DEBUG("%u\n", auth->magic); + drm_dbg_core(dev, "%u\n", auth->magic); return ret < 0 ? ret : 0; } @@ -117,7 +117,7 @@ int drm_authmagic(struct drm_device *dev, void *data, struct drm_auth *auth = data; struct drm_file *file; - DRM_DEBUG("%u\n", auth->magic); + drm_dbg_core(dev, "%u\n", auth->magic); mutex_lock(&dev->master_mutex); file = idr_find(&file_priv->master->magic_map, auth->magic); @@ -274,7 +274,9 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data, } if (file_priv->master->lessor != NULL) { - DRM_DEBUG_LEASE("Attempt to set lessee %d as master\n", file_priv->master->lessee_id); + drm_dbg_lease(dev, + "Attempt to set lessee %d as master\n", + file_priv->master->lessee_id); ret = -EINVAL; goto out_unlock; } @@ -315,7 +317,9 @@ int drm_dropmaster_ioctl(struct drm_device *dev, void *data, } if (file_priv->master->lessor != NULL) { - DRM_DEBUG_LEASE("Attempt to drop lessee %d as master\n", file_priv->master->lessee_id); + drm_dbg_lease(dev, + "Attempt to drop lessee %d as master\n", + file_priv->master->lessee_id); ret = -EINVAL; goto out_unlock; } diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 52e20c68813b..a50c82bc2b2f 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -28,6 +28,7 @@ #include <drm/drm_print.h> #include <drm/drm_drv.h> #include <drm/drm_file.h> +#include <drm/drm_privacy_screen_consumer.h> #include <drm/drm_sysfs.h> #include <linux/uaccess.h> @@ -462,6 +463,11 @@ void drm_connector_cleanup(struct drm_connector *connector) DRM_CONNECTOR_REGISTERED)) drm_connector_unregister(connector); + if (connector->privacy_screen) { + drm_privacy_screen_put(connector->privacy_screen); + connector->privacy_screen = NULL; + } + if (connector->tile_group) { drm_mode_put_tile_group(dev, connector->tile_group); connector->tile_group = NULL; @@ -541,7 +547,11 @@ int drm_connector_register(struct drm_connector *connector) connector->registration_state = DRM_CONNECTOR_REGISTERED; /* Let userspace know we have a new connector */ - drm_sysfs_hotplug_event(connector->dev); + drm_sysfs_connector_hotplug_event(connector); + + if (connector->privacy_screen) + drm_privacy_screen_register_notifier(connector->privacy_screen, + &connector->privacy_screen_notifier); mutex_lock(&connector_list_lock); list_add_tail(&connector->global_connector_list_entry, &connector_list); @@ -578,6 +588,11 @@ void drm_connector_unregister(struct drm_connector *connector) list_del_init(&connector->global_connector_list_entry); mutex_unlock(&connector_list_lock); + if (connector->privacy_screen) + drm_privacy_screen_unregister_notifier( + connector->privacy_screen, + &connector->privacy_screen_notifier); + if (connector->funcs->early_unregister) connector->funcs->early_unregister(connector); @@ -1271,6 +1286,46 @@ static const struct drm_prop_enum_list dp_colorspaces[] = { * For DVI-I and TVout there is also a matching property "select subconnector" * allowing to switch between signal types. * DP subconnector corresponds to a downstream port. + * + * privacy-screen sw-state, privacy-screen hw-state: + * These 2 optional properties can be used to query the state of the + * electronic privacy screen that is available on some displays; and in + * some cases also control the state. If a driver implements these + * properties then both properties must be present. + * + * "privacy-screen hw-state" is read-only and reflects the actual state + * of the privacy-screen, possible values: "Enabled", "Disabled, + * "Enabled-locked", "Disabled-locked". The locked states indicate + * that the state cannot be changed through the DRM API. E.g. there + * might be devices where the firmware-setup options, or a hardware + * slider-switch, offer always on / off modes. + * + * "privacy-screen sw-state" can be set to change the privacy-screen state + * when not locked. In this case the driver must update the hw-state + * property to reflect the new state on completion of the commit of the + * sw-state property. Setting the sw-state property when the hw-state is + * locked must be interpreted by the driver as a request to change the + * state to the set state when the hw-state becomes unlocked. E.g. if + * "privacy-screen hw-state" is "Enabled-locked" and the sw-state + * gets set to "Disabled" followed by the user unlocking the state by + * changing the slider-switch position, then the driver must set the + * state to "Disabled" upon receiving the unlock event. + * + * In some cases the privacy-screen's actual state might change outside of + * control of the DRM code. E.g. there might be a firmware handled hotkey + * which toggles the actual state, or the actual state might be changed + * through another userspace API such as writing /proc/acpi/ibm/lcdshadow. + * In this case the driver must update both the hw-state and the sw-state + * to reflect the new value, overwriting any pending state requests in the + * sw-state. Any pending sw-state requests are thus discarded. + * + * Note that the ability for the state to change outside of control of + * the DRM master process means that userspace must not cache the value + * of the sw-state. Caching the sw-state value and including it in later + * atomic commits may lead to overriding a state change done through e.g. + * a firmware handled hotkey. Therefor userspace must not include the + * privacy-screen sw-state in an atomic commit unless it wants to change + * its value. */ int drm_connector_create_standard_properties(struct drm_device *dev) @@ -2365,6 +2420,154 @@ int drm_connector_set_panel_orientation_with_quirk( } EXPORT_SYMBOL(drm_connector_set_panel_orientation_with_quirk); +static const struct drm_prop_enum_list privacy_screen_enum[] = { + { PRIVACY_SCREEN_DISABLED, "Disabled" }, + { PRIVACY_SCREEN_ENABLED, "Enabled" }, + { PRIVACY_SCREEN_DISABLED_LOCKED, "Disabled-locked" }, + { PRIVACY_SCREEN_ENABLED_LOCKED, "Enabled-locked" }, +}; + +/** + * drm_connector_create_privacy_screen_properties - create the drm connecter's + * privacy-screen properties. + * @connector: connector for which to create the privacy-screen properties + * + * This function creates the "privacy-screen sw-state" and "privacy-screen + * hw-state" properties for the connector. They are not attached. + */ +void +drm_connector_create_privacy_screen_properties(struct drm_connector *connector) +{ + if (connector->privacy_screen_sw_state_property) + return; + + /* Note sw-state only supports the first 2 values of the enum */ + connector->privacy_screen_sw_state_property = + drm_property_create_enum(connector->dev, DRM_MODE_PROP_ENUM, + "privacy-screen sw-state", + privacy_screen_enum, 2); + + connector->privacy_screen_hw_state_property = + drm_property_create_enum(connector->dev, + DRM_MODE_PROP_IMMUTABLE | DRM_MODE_PROP_ENUM, + "privacy-screen hw-state", + privacy_screen_enum, + ARRAY_SIZE(privacy_screen_enum)); +} +EXPORT_SYMBOL(drm_connector_create_privacy_screen_properties); + +/** + * drm_connector_attach_privacy_screen_properties - attach the drm connecter's + * privacy-screen properties. + * @connector: connector on which to attach the privacy-screen properties + * + * This function attaches the "privacy-screen sw-state" and "privacy-screen + * hw-state" properties to the connector. The initial state of both is set + * to "Disabled". + */ +void +drm_connector_attach_privacy_screen_properties(struct drm_connector *connector) +{ + if (!connector->privacy_screen_sw_state_property) + return; + + drm_object_attach_property(&connector->base, + connector->privacy_screen_sw_state_property, + PRIVACY_SCREEN_DISABLED); + + drm_object_attach_property(&connector->base, + connector->privacy_screen_hw_state_property, + PRIVACY_SCREEN_DISABLED); +} +EXPORT_SYMBOL(drm_connector_attach_privacy_screen_properties); + +static void drm_connector_update_privacy_screen_properties( + struct drm_connector *connector, bool set_sw_state) +{ + enum drm_privacy_screen_status sw_state, hw_state; + + drm_privacy_screen_get_state(connector->privacy_screen, + &sw_state, &hw_state); + + if (set_sw_state) + connector->state->privacy_screen_sw_state = sw_state; + drm_object_property_set_value(&connector->base, + connector->privacy_screen_hw_state_property, hw_state); +} + +static int drm_connector_privacy_screen_notifier( + struct notifier_block *nb, unsigned long action, void *data) +{ + struct drm_connector *connector = + container_of(nb, struct drm_connector, privacy_screen_notifier); + struct drm_device *dev = connector->dev; + + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + drm_connector_update_privacy_screen_properties(connector, true); + drm_modeset_unlock(&dev->mode_config.connection_mutex); + + drm_sysfs_connector_status_event(connector, + connector->privacy_screen_sw_state_property); + drm_sysfs_connector_status_event(connector, + connector->privacy_screen_hw_state_property); + + return NOTIFY_DONE; +} + +/** + * drm_connector_attach_privacy_screen_provider - attach a privacy-screen to + * the connector + * @connector: connector to attach the privacy-screen to + * @priv: drm_privacy_screen to attach + * + * Create and attach the standard privacy-screen properties and register + * a generic notifier for generating sysfs-connector-status-events + * on external changes to the privacy-screen status. + * This function takes ownership of the passed in drm_privacy_screen and will + * call drm_privacy_screen_put() on it when the connector is destroyed. + */ +void drm_connector_attach_privacy_screen_provider( + struct drm_connector *connector, struct drm_privacy_screen *priv) +{ + connector->privacy_screen = priv; + connector->privacy_screen_notifier.notifier_call = + drm_connector_privacy_screen_notifier; + + drm_connector_create_privacy_screen_properties(connector); + drm_connector_update_privacy_screen_properties(connector, true); + drm_connector_attach_privacy_screen_properties(connector); +} +EXPORT_SYMBOL(drm_connector_attach_privacy_screen_provider); + +/** + * drm_connector_update_privacy_screen - update connector's privacy-screen sw-state + * @connector_state: connector-state to update the privacy-screen for + * + * This function calls drm_privacy_screen_set_sw_state() on the connector's + * privacy-screen. + * + * If the connector has no privacy-screen, then this is a no-op. + */ +void drm_connector_update_privacy_screen(const struct drm_connector_state *connector_state) +{ + struct drm_connector *connector = connector_state->connector; + int ret; + + if (!connector->privacy_screen) + return; + + ret = drm_privacy_screen_set_sw_state(connector->privacy_screen, + connector_state->privacy_screen_sw_state); + if (ret) { + drm_err(connector->dev, "Error updating privacy-screen sw_state\n"); + return; + } + + /* The hw_state property value may have changed, update it. */ + drm_connector_update_privacy_screen_properties(connector, false); +} +EXPORT_SYMBOL(drm_connector_update_privacy_screen); + int drm_connector_set_obj_prop(struct drm_mode_object *obj, struct drm_property *property, uint64_t value) diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index 4d0d1e8e51fa..23f9073bc473 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c @@ -154,38 +154,155 @@ u8 drm_dp_get_adjust_request_post_cursor(const u8 link_status[DP_LINK_STATUS_SIZ } EXPORT_SYMBOL(drm_dp_get_adjust_request_post_cursor); -void drm_dp_link_train_clock_recovery_delay(const struct drm_dp_aux *aux, - const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +static int __8b10b_clock_recovery_delay_us(const struct drm_dp_aux *aux, u8 rd_interval) { - unsigned long rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] & - DP_TRAINING_AUX_RD_MASK; - if (rd_interval > 4) - drm_dbg_kms(aux->drm_dev, "%s: AUX interval %lu, out of range (max 4)\n", + drm_dbg_kms(aux->drm_dev, "%s: invalid AUX interval 0x%02x (max 4)\n", aux->name, rd_interval); - if (rd_interval == 0 || dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14) - rd_interval = 100; - else - rd_interval *= 4 * USEC_PER_MSEC; + if (rd_interval == 0) + return 100; - usleep_range(rd_interval, rd_interval * 2); + return rd_interval * 4 * USEC_PER_MSEC; } -EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay); -static void __drm_dp_link_train_channel_eq_delay(const struct drm_dp_aux *aux, - unsigned long rd_interval) +static int __8b10b_channel_eq_delay_us(const struct drm_dp_aux *aux, u8 rd_interval) { if (rd_interval > 4) - drm_dbg_kms(aux->drm_dev, "%s: AUX interval %lu, out of range (max 4)\n", + drm_dbg_kms(aux->drm_dev, "%s: invalid AUX interval 0x%02x (max 4)\n", aux->name, rd_interval); if (rd_interval == 0) - rd_interval = 400; + return 400; + + return rd_interval * 4 * USEC_PER_MSEC; +} + +static int __128b132b_channel_eq_delay_us(const struct drm_dp_aux *aux, u8 rd_interval) +{ + switch (rd_interval) { + default: + drm_dbg_kms(aux->drm_dev, "%s: invalid AUX interval 0x%02x\n", + aux->name, rd_interval); + fallthrough; + case DP_128B132B_TRAINING_AUX_RD_INTERVAL_400_US: + return 400; + case DP_128B132B_TRAINING_AUX_RD_INTERVAL_4_MS: + return 4000; + case DP_128B132B_TRAINING_AUX_RD_INTERVAL_8_MS: + return 8000; + case DP_128B132B_TRAINING_AUX_RD_INTERVAL_12_MS: + return 12000; + case DP_128B132B_TRAINING_AUX_RD_INTERVAL_16_MS: + return 16000; + case DP_128B132B_TRAINING_AUX_RD_INTERVAL_32_MS: + return 32000; + case DP_128B132B_TRAINING_AUX_RD_INTERVAL_64_MS: + return 64000; + } +} + +/* + * The link training delays are different for: + * + * - Clock recovery vs. channel equalization + * - DPRX vs. LTTPR + * - 128b/132b vs. 8b/10b + * - DPCD rev 1.3 vs. later + * + * Get the correct delay in us, reading DPCD if necessary. + */ +static int __read_delay(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE], + enum drm_dp_phy dp_phy, bool uhbr, bool cr) +{ + int (*parse)(const struct drm_dp_aux *aux, u8 rd_interval); + unsigned int offset; + u8 rd_interval, mask; + + if (dp_phy == DP_PHY_DPRX) { + if (uhbr) { + if (cr) + return 100; + + offset = DP_128B132B_TRAINING_AUX_RD_INTERVAL; + mask = DP_128B132B_TRAINING_AUX_RD_INTERVAL_MASK; + parse = __128b132b_channel_eq_delay_us; + } else { + if (cr && dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14) + return 100; + + offset = DP_TRAINING_AUX_RD_INTERVAL; + mask = DP_TRAINING_AUX_RD_MASK; + if (cr) + parse = __8b10b_clock_recovery_delay_us; + else + parse = __8b10b_channel_eq_delay_us; + } + } else { + if (uhbr) { + offset = DP_128B132B_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER(dp_phy); + mask = DP_128B132B_TRAINING_AUX_RD_INTERVAL_MASK; + parse = __128b132b_channel_eq_delay_us; + } else { + if (cr) + return 100; + + offset = DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER(dp_phy); + mask = DP_TRAINING_AUX_RD_MASK; + parse = __8b10b_channel_eq_delay_us; + } + } + + if (offset < DP_RECEIVER_CAP_SIZE) { + rd_interval = dpcd[offset]; + } else { + if (drm_dp_dpcd_readb(aux, offset, &rd_interval) != 1) { + drm_dbg_kms(aux->drm_dev, "%s: failed rd interval read\n", + aux->name); + /* arbitrary default delay */ + return 400; + } + } + + return parse(aux, rd_interval & mask); +} + +int drm_dp_read_clock_recovery_delay(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE], + enum drm_dp_phy dp_phy, bool uhbr) +{ + return __read_delay(aux, dpcd, dp_phy, uhbr, true); +} +EXPORT_SYMBOL(drm_dp_read_clock_recovery_delay); + +int drm_dp_read_channel_eq_delay(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE], + enum drm_dp_phy dp_phy, bool uhbr) +{ + return __read_delay(aux, dpcd, dp_phy, uhbr, false); +} +EXPORT_SYMBOL(drm_dp_read_channel_eq_delay); + +void drm_dp_link_train_clock_recovery_delay(const struct drm_dp_aux *aux, + const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + u8 rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] & + DP_TRAINING_AUX_RD_MASK; + int delay_us; + + if (dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14) + delay_us = 100; else - rd_interval *= 4 * USEC_PER_MSEC; + delay_us = __8b10b_clock_recovery_delay_us(aux, rd_interval); - usleep_range(rd_interval, rd_interval * 2); + usleep_range(delay_us, delay_us * 2); +} +EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay); + +static void __drm_dp_link_train_channel_eq_delay(const struct drm_dp_aux *aux, + u8 rd_interval) +{ + int delay_us = __8b10b_channel_eq_delay_us(aux, rd_interval); + + usleep_range(delay_us, delay_us * 2); } void drm_dp_link_train_channel_eq_delay(const struct drm_dp_aux *aux, @@ -3173,6 +3290,10 @@ int drm_edp_backlight_set_level(struct drm_dp_aux *aux, const struct drm_edp_bac int ret; u8 buf[2] = { 0 }; + /* The panel uses the PWM for controlling brightness levels */ + if (!bl->aux_set) + return 0; + if (bl->lsb_reg_used) { buf[0] = (level & 0xff00) >> 8; buf[1] = (level & 0x00ff); @@ -3199,7 +3320,7 @@ drm_edp_backlight_set_enable(struct drm_dp_aux *aux, const struct drm_edp_backli int ret; u8 buf; - /* The panel uses something other then DPCD for enabling its backlight */ + /* This panel uses the EDP_BL_PWR GPIO for enablement */ if (!bl->aux_enable) return 0; @@ -3234,11 +3355,11 @@ drm_edp_backlight_set_enable(struct drm_dp_aux *aux, const struct drm_edp_backli * restoring any important backlight state such as the given backlight level, the brightness byte * count, backlight frequency, etc. * - * Note that certain panels, while supporting brightness level controls over DPCD, may not support - * having their backlights enabled via the standard %DP_EDP_DISPLAY_CONTROL_REGISTER. On such panels - * &drm_edp_backlight_info.aux_enable will be set to %false, this function will skip the step of - * programming the %DP_EDP_DISPLAY_CONTROL_REGISTER, and the driver must perform the required - * implementation specific step for enabling the backlight after calling this function. + * Note that certain panels do not support being enabled or disabled via DPCD, but instead require + * that the driver handle enabling/disabling the panel through implementation-specific means using + * the EDP_BL_PWR GPIO. For such panels, &drm_edp_backlight_info.aux_enable will be set to %false, + * this function becomes a no-op, and the driver is expected to handle powering the panel on using + * the EDP_BL_PWR GPIO. * * Returns: %0 on success, negative error code on failure. */ @@ -3246,27 +3367,18 @@ int drm_edp_backlight_enable(struct drm_dp_aux *aux, const struct drm_edp_backli const u16 level) { int ret; - u8 dpcd_buf, new_dpcd_buf; + u8 dpcd_buf; - ret = drm_dp_dpcd_readb(aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf); - if (ret != 1) { - drm_dbg_kms(aux->drm_dev, - "%s: Failed to read backlight mode: %d\n", aux->name, ret); - return ret < 0 ? ret : -EIO; - } - - new_dpcd_buf = dpcd_buf; - - if ((dpcd_buf & DP_EDP_BACKLIGHT_CONTROL_MODE_MASK) != DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD) { - new_dpcd_buf &= ~DP_EDP_BACKLIGHT_CONTROL_MODE_MASK; - new_dpcd_buf |= DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD; + if (bl->aux_set) + dpcd_buf = DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD; + else + dpcd_buf = DP_EDP_BACKLIGHT_CONTROL_MODE_PWM; - if (bl->pwmgen_bit_count) { - ret = drm_dp_dpcd_writeb(aux, DP_EDP_PWMGEN_BIT_COUNT, bl->pwmgen_bit_count); - if (ret != 1) - drm_dbg_kms(aux->drm_dev, "%s: Failed to write aux pwmgen bit count: %d\n", - aux->name, ret); - } + if (bl->pwmgen_bit_count) { + ret = drm_dp_dpcd_writeb(aux, DP_EDP_PWMGEN_BIT_COUNT, bl->pwmgen_bit_count); + if (ret != 1) + drm_dbg_kms(aux->drm_dev, "%s: Failed to write aux pwmgen bit count: %d\n", + aux->name, ret); } if (bl->pwm_freq_pre_divider) { @@ -3276,16 +3388,14 @@ int drm_edp_backlight_enable(struct drm_dp_aux *aux, const struct drm_edp_backli "%s: Failed to write aux backlight frequency: %d\n", aux->name, ret); else - new_dpcd_buf |= DP_EDP_BACKLIGHT_FREQ_AUX_SET_ENABLE; + dpcd_buf |= DP_EDP_BACKLIGHT_FREQ_AUX_SET_ENABLE; } - if (new_dpcd_buf != dpcd_buf) { - ret = drm_dp_dpcd_writeb(aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, new_dpcd_buf); - if (ret != 1) { - drm_dbg_kms(aux->drm_dev, "%s: Failed to write aux backlight mode: %d\n", - aux->name, ret); - return ret < 0 ? ret : -EIO; - } + ret = drm_dp_dpcd_writeb(aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, dpcd_buf); + if (ret != 1) { + drm_dbg_kms(aux->drm_dev, "%s: Failed to write aux backlight mode: %d\n", + aux->name, ret); + return ret < 0 ? ret : -EIO; } ret = drm_edp_backlight_set_level(aux, bl, level); @@ -3304,12 +3414,13 @@ EXPORT_SYMBOL(drm_edp_backlight_enable); * @aux: The DP AUX channel to use * @bl: Backlight capability info from drm_edp_backlight_init() * - * This function handles disabling DPCD backlight controls on a panel over AUX. Note that some - * panels have backlights that are enabled/disabled by other means, despite having their brightness - * values controlled through DPCD. On such panels &drm_edp_backlight_info.aux_enable will be set to - * %false, this function will become a no-op (and we will skip updating - * %DP_EDP_DISPLAY_CONTROL_REGISTER), and the driver must take care to perform it's own - * implementation specific step for disabling the backlight. + * This function handles disabling DPCD backlight controls on a panel over AUX. + * + * Note that certain panels do not support being enabled or disabled via DPCD, but instead require + * that the driver handle enabling/disabling the panel through implementation-specific means using + * the EDP_BL_PWR GPIO. For such panels, &drm_edp_backlight_info.aux_enable will be set to %false, + * this function becomes a no-op, and the driver is expected to handle powering the panel off using + * the EDP_BL_PWR GPIO. * * Returns: %0 on success or no-op, negative error code on failure. */ @@ -3333,6 +3444,9 @@ drm_edp_backlight_probe_max(struct drm_dp_aux *aux, struct drm_edp_backlight_inf int ret; u8 pn, pn_min, pn_max; + if (!bl->aux_set) + return 0; + ret = drm_dp_dpcd_readb(aux, DP_EDP_PWMGEN_BIT_COUNT, &pn); if (ret != 1) { drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap: %d\n", @@ -3418,7 +3532,7 @@ drm_edp_backlight_probe_max(struct drm_dp_aux *aux, struct drm_edp_backlight_inf } static inline int -drm_edp_backlight_probe_level(struct drm_dp_aux *aux, struct drm_edp_backlight_info *bl, +drm_edp_backlight_probe_state(struct drm_dp_aux *aux, struct drm_edp_backlight_info *bl, u8 *current_mode) { int ret; @@ -3433,6 +3547,9 @@ drm_edp_backlight_probe_level(struct drm_dp_aux *aux, struct drm_edp_backlight_i } *current_mode = (mode_reg & DP_EDP_BACKLIGHT_CONTROL_MODE_MASK); + if (!bl->aux_set) + return 0; + if (*current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD) { int size = 1 + bl->lsb_reg_used; @@ -3463,7 +3580,7 @@ drm_edp_backlight_probe_level(struct drm_dp_aux *aux, struct drm_edp_backlight_i * @bl: The &drm_edp_backlight_info struct to fill out with information on the backlight * @driver_pwm_freq_hz: Optional PWM frequency from the driver in hz * @edp_dpcd: A cached copy of the eDP DPCD - * @current_level: Where to store the probed brightness level + * @current_level: Where to store the probed brightness level, if any * @current_mode: Where to store the currently set backlight control mode * * Initializes a &drm_edp_backlight_info struct by probing @aux for it's backlight capabilities, @@ -3483,24 +3600,38 @@ drm_edp_backlight_init(struct drm_dp_aux *aux, struct drm_edp_backlight_info *bl if (edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP) bl->aux_enable = true; + if (edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP) + bl->aux_set = true; if (edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) bl->lsb_reg_used = true; + /* Sanity check caps */ + if (!bl->aux_set && !(edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP)) { + drm_dbg_kms(aux->drm_dev, + "%s: Panel supports neither AUX or PWM brightness control? Aborting\n", + aux->name); + return -EINVAL; + } + ret = drm_edp_backlight_probe_max(aux, bl, driver_pwm_freq_hz, edp_dpcd); if (ret < 0) return ret; - ret = drm_edp_backlight_probe_level(aux, bl, current_mode); + ret = drm_edp_backlight_probe_state(aux, bl, current_mode); if (ret < 0) return ret; *current_level = ret; drm_dbg_kms(aux->drm_dev, - "%s: Found backlight level=%d/%d pwm_freq_pre_divider=%d mode=%x\n", - aux->name, *current_level, bl->max, bl->pwm_freq_pre_divider, *current_mode); - drm_dbg_kms(aux->drm_dev, - "%s: Backlight caps: pwmgen_bit_count=%d lsb_reg_used=%d aux_enable=%d\n", - aux->name, bl->pwmgen_bit_count, bl->lsb_reg_used, bl->aux_enable); + "%s: Found backlight: aux_set=%d aux_enable=%d mode=%d\n", + aux->name, bl->aux_set, bl->aux_enable, *current_mode); + if (bl->aux_set) { + drm_dbg_kms(aux->drm_dev, + "%s: Backlight caps: level=%d/%d pwm_freq_pre_divider=%d lsb_reg_used=%d\n", + aux->name, *current_level, bl->max, bl->pwm_freq_pre_divider, + bl->lsb_reg_used); + } + return 0; } EXPORT_SYMBOL(drm_edp_backlight_init); diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 7a5097467ba5..8214a0b1ab7f 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -43,6 +43,7 @@ #include <drm/drm_managed.h> #include <drm/drm_mode_object.h> #include <drm/drm_print.h> +#include <drm/drm_privacy_screen_machine.h> #include "drm_crtc_internal.h" #include "drm_internal.h" @@ -581,6 +582,7 @@ static int drm_dev_init(struct drm_device *dev, const struct drm_driver *driver, struct device *parent) { + struct inode *inode; int ret; if (!drm_core_init_complete) { @@ -617,13 +619,15 @@ static int drm_dev_init(struct drm_device *dev, if (ret) return ret; - dev->anon_inode = drm_fs_inode_new(); - if (IS_ERR(dev->anon_inode)) { - ret = PTR_ERR(dev->anon_inode); + inode = drm_fs_inode_new(); + if (IS_ERR(inode)) { + ret = PTR_ERR(inode); DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret); goto err; } + dev->anon_inode = inode; + if (drm_core_check_feature(dev, DRIVER_RENDER)) { ret = drm_minor_alloc(dev, DRM_MINOR_RENDER); if (ret) @@ -1029,6 +1033,7 @@ static const struct file_operations drm_stub_fops = { static void drm_core_exit(void) { + drm_privacy_screen_lookup_exit(); unregister_chrdev(DRM_MAJOR, "drm"); debugfs_remove(drm_debugfs_root); drm_sysfs_destroy(); @@ -1056,6 +1061,8 @@ static int __init drm_core_init(void) if (ret < 0) goto error; + drm_privacy_screen_lookup_init(); + drm_core_init_complete = true; DRM_DEBUG("Initialized\n"); diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 8e7a124d6c5a..9727a59d35fd 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -2338,7 +2338,7 @@ static int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper, return PTR_ERR(fbi); fbi->fbops = &drm_fbdev_fb_ops; - fbi->screen_size = fb->height * fb->pitches[0]; + fbi->screen_size = sizes->surface_height * fb->pitches[0]; fbi->fix.smem_len = fbi->screen_size; drm_fb_helper_fill_info(fbi, fb_helper, sizes); diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c index 69fde60e36b3..0f28dd2bdd72 100644 --- a/drivers/gpu/drm/drm_format_helper.c +++ b/drivers/gpu/drm/drm_format_helper.c @@ -17,71 +17,91 @@ #include <drm/drm_fourcc.h> #include <drm/drm_rect.h> -static unsigned int clip_offset(struct drm_rect *clip, - unsigned int pitch, unsigned int cpp) +static unsigned int clip_offset(const struct drm_rect *clip, unsigned int pitch, unsigned int cpp) { return clip->y1 * pitch + clip->x1 * cpp; } /** + * drm_fb_clip_offset - Returns the clipping rectangles byte-offset in a framebuffer + * @pitch: Framebuffer line pitch in byte + * @format: Framebuffer format + * @clip: Clip rectangle + * + * Returns: + * The byte offset of the clip rectangle's top-left corner within the framebuffer. + */ +unsigned int drm_fb_clip_offset(unsigned int pitch, const struct drm_format_info *format, + const struct drm_rect *clip) +{ + return clip_offset(clip, pitch, format->cpp[0]); +} +EXPORT_SYMBOL(drm_fb_clip_offset); + +/** * drm_fb_memcpy - Copy clip buffer * @dst: Destination buffer + * @dst_pitch: Number of bytes between two consecutive scanlines within dst * @vaddr: Source buffer * @fb: DRM framebuffer * @clip: Clip rectangle area to copy * * This function does not apply clipping on dst, i.e. the destination - * is a small buffer containing the clip rect only. + * is at the top-left corner. */ -void drm_fb_memcpy(void *dst, void *vaddr, struct drm_framebuffer *fb, - struct drm_rect *clip) +void drm_fb_memcpy(void *dst, unsigned int dst_pitch, const void *vaddr, + const struct drm_framebuffer *fb, const struct drm_rect *clip) { unsigned int cpp = fb->format->cpp[0]; size_t len = (clip->x2 - clip->x1) * cpp; unsigned int y, lines = clip->y2 - clip->y1; + if (!dst_pitch) + dst_pitch = len; + vaddr += clip_offset(clip, fb->pitches[0], cpp); for (y = 0; y < lines; y++) { memcpy(dst, vaddr, len); vaddr += fb->pitches[0]; - dst += len; + dst += dst_pitch; } } EXPORT_SYMBOL(drm_fb_memcpy); /** - * drm_fb_memcpy_dstclip - Copy clip buffer + * drm_fb_memcpy_toio - Copy clip buffer * @dst: Destination buffer (iomem) * @dst_pitch: Number of bytes between two consecutive scanlines within dst * @vaddr: Source buffer * @fb: DRM framebuffer * @clip: Clip rectangle area to copy * - * This function applies clipping on dst, i.e. the destination is a - * full (iomem) framebuffer but only the clip rect content is copied over. + * This function does not apply clipping on dst, i.e. the destination + * is at the top-left corner. */ -void drm_fb_memcpy_dstclip(void __iomem *dst, unsigned int dst_pitch, - void *vaddr, struct drm_framebuffer *fb, - struct drm_rect *clip) +void drm_fb_memcpy_toio(void __iomem *dst, unsigned int dst_pitch, const void *vaddr, + const struct drm_framebuffer *fb, const struct drm_rect *clip) { unsigned int cpp = fb->format->cpp[0]; - unsigned int offset = clip_offset(clip, dst_pitch, cpp); size_t len = (clip->x2 - clip->x1) * cpp; unsigned int y, lines = clip->y2 - clip->y1; - vaddr += offset; - dst += offset; + if (!dst_pitch) + dst_pitch = len; + + vaddr += clip_offset(clip, fb->pitches[0], cpp); for (y = 0; y < lines; y++) { memcpy_toio(dst, vaddr, len); vaddr += fb->pitches[0]; dst += dst_pitch; } } -EXPORT_SYMBOL(drm_fb_memcpy_dstclip); +EXPORT_SYMBOL(drm_fb_memcpy_toio); /** * drm_fb_swab - Swap bytes into clip buffer * @dst: Destination buffer + * @dst_pitch: Number of bytes between two consecutive scanlines within dst * @src: Source buffer * @fb: DRM framebuffer * @clip: Clip rectangle area to copy @@ -91,21 +111,27 @@ EXPORT_SYMBOL(drm_fb_memcpy_dstclip); * time to speed up slow uncached reads. * * This function does not apply clipping on dst, i.e. the destination - * is a small buffer containing the clip rect only. + * is at the top-left corner. */ -void drm_fb_swab(void *dst, void *src, struct drm_framebuffer *fb, - struct drm_rect *clip, bool cached) +void drm_fb_swab(void *dst, unsigned int dst_pitch, const void *src, + const struct drm_framebuffer *fb, const struct drm_rect *clip, + bool cached) { u8 cpp = fb->format->cpp[0]; size_t len = drm_rect_width(clip) * cpp; - u16 *src16, *dst16 = dst; - u32 *src32, *dst32 = dst; + const u16 *src16; + const u32 *src32; + u16 *dst16; + u32 *dst32; unsigned int x, y; void *buf = NULL; if (WARN_ON_ONCE(cpp != 2 && cpp != 4)) return; + if (!dst_pitch) + dst_pitch = len; + if (!cached) buf = kmalloc(len, GFP_KERNEL); @@ -121,6 +147,9 @@ void drm_fb_swab(void *dst, void *src, struct drm_framebuffer *fb, src32 = src; } + dst16 = dst; + dst32 = dst; + for (x = clip->x1; x < clip->x2; x++) { if (cpp == 4) *dst32++ = swab32(*src32++); @@ -129,13 +158,14 @@ void drm_fb_swab(void *dst, void *src, struct drm_framebuffer *fb, } src += fb->pitches[0]; + dst += dst_pitch; } kfree(buf); } EXPORT_SYMBOL(drm_fb_swab); -static void drm_fb_xrgb8888_to_rgb332_line(u8 *dbuf, __le32 *sbuf, unsigned int pixels) +static void drm_fb_xrgb8888_to_rgb332_line(u8 *dbuf, const __le32 *sbuf, unsigned int pixels) { unsigned int x; u32 pix; @@ -151,23 +181,24 @@ static void drm_fb_xrgb8888_to_rgb332_line(u8 *dbuf, __le32 *sbuf, unsigned int /** * drm_fb_xrgb8888_to_rgb332 - Convert XRGB8888 to RGB332 clip buffer * @dst: RGB332 destination buffer + * @dst_pitch: Number of bytes between two consecutive scanlines within dst * @src: XRGB8888 source buffer * @fb: DRM framebuffer * @clip: Clip rectangle area to copy * * Drivers can use this function for RGB332 devices that don't natively support XRGB8888. - * - * This function does not apply clipping on dst, i.e. the destination is a small buffer - * containing the clip rect only. */ -void drm_fb_xrgb8888_to_rgb332(void *dst, void *src, struct drm_framebuffer *fb, - struct drm_rect *clip) +void drm_fb_xrgb8888_to_rgb332(void *dst, unsigned int dst_pitch, const void *src, + const struct drm_framebuffer *fb, const struct drm_rect *clip) { size_t width = drm_rect_width(clip); size_t src_len = width * sizeof(u32); unsigned int y; void *sbuf; + if (!dst_pitch) + dst_pitch = width; + /* Use a buffer to speed up access on buffers with uncached read mapping (i.e. WC) */ sbuf = kmalloc(src_len, GFP_KERNEL); if (!sbuf) @@ -178,14 +209,14 @@ void drm_fb_xrgb8888_to_rgb332(void *dst, void *src, struct drm_framebuffer *fb, memcpy(sbuf, src, src_len); drm_fb_xrgb8888_to_rgb332_line(dst, sbuf, width); src += fb->pitches[0]; - dst += width; + dst += dst_pitch; } kfree(sbuf); } EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb332); -static void drm_fb_xrgb8888_to_rgb565_line(u16 *dbuf, u32 *sbuf, +static void drm_fb_xrgb8888_to_rgb565_line(u16 *dbuf, const u32 *sbuf, unsigned int pixels, bool swab) { @@ -206,6 +237,7 @@ static void drm_fb_xrgb8888_to_rgb565_line(u16 *dbuf, u32 *sbuf, /** * drm_fb_xrgb8888_to_rgb565 - Convert XRGB8888 to RGB565 clip buffer * @dst: RGB565 destination buffer + * @dst_pitch: Number of bytes between two consecutive scanlines within dst * @vaddr: XRGB8888 source buffer * @fb: DRM framebuffer * @clip: Clip rectangle area to copy @@ -213,13 +245,10 @@ static void drm_fb_xrgb8888_to_rgb565_line(u16 *dbuf, u32 *sbuf, * * Drivers can use this function for RGB565 devices that don't natively * support XRGB8888. - * - * This function does not apply clipping on dst, i.e. the destination - * is a small buffer containing the clip rect only. */ -void drm_fb_xrgb8888_to_rgb565(void *dst, void *vaddr, - struct drm_framebuffer *fb, - struct drm_rect *clip, bool swab) +void drm_fb_xrgb8888_to_rgb565(void *dst, unsigned int dst_pitch, const void *vaddr, + const struct drm_framebuffer *fb, const struct drm_rect *clip, + bool swab) { size_t linepixels = clip->x2 - clip->x1; size_t src_len = linepixels * sizeof(u32); @@ -227,6 +256,9 @@ void drm_fb_xrgb8888_to_rgb565(void *dst, void *vaddr, unsigned y, lines = clip->y2 - clip->y1; void *sbuf; + if (!dst_pitch) + dst_pitch = dst_len; + /* * The cma memory is write-combined so reads are uncached. * Speed up by fetching one line at a time. @@ -240,7 +272,7 @@ void drm_fb_xrgb8888_to_rgb565(void *dst, void *vaddr, memcpy(sbuf, vaddr, src_len); drm_fb_xrgb8888_to_rgb565_line(dst, sbuf, linepixels, swab); vaddr += fb->pitches[0]; - dst += dst_len; + dst += dst_pitch; } kfree(sbuf); @@ -248,9 +280,9 @@ void drm_fb_xrgb8888_to_rgb565(void *dst, void *vaddr, EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb565); /** - * drm_fb_xrgb8888_to_rgb565_dstclip - Convert XRGB8888 to RGB565 clip buffer + * drm_fb_xrgb8888_to_rgb565_toio - Convert XRGB8888 to RGB565 clip buffer * @dst: RGB565 destination buffer (iomem) - * @dst_pitch: destination buffer pitch + * @dst_pitch: Number of bytes between two consecutive scanlines within dst * @vaddr: XRGB8888 source buffer * @fb: DRM framebuffer * @clip: Clip rectangle area to copy @@ -258,37 +290,36 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb565); * * Drivers can use this function for RGB565 devices that don't natively * support XRGB8888. - * - * This function applies clipping on dst, i.e. the destination is a - * full (iomem) framebuffer but only the clip rect content is copied over. */ -void drm_fb_xrgb8888_to_rgb565_dstclip(void __iomem *dst, unsigned int dst_pitch, - void *vaddr, struct drm_framebuffer *fb, - struct drm_rect *clip, bool swab) +void drm_fb_xrgb8888_to_rgb565_toio(void __iomem *dst, unsigned int dst_pitch, + const void *vaddr, const struct drm_framebuffer *fb, + const struct drm_rect *clip, bool swab) { size_t linepixels = clip->x2 - clip->x1; size_t dst_len = linepixels * sizeof(u16); unsigned y, lines = clip->y2 - clip->y1; void *dbuf; + if (!dst_pitch) + dst_pitch = dst_len; + dbuf = kmalloc(dst_len, GFP_KERNEL); if (!dbuf) return; vaddr += clip_offset(clip, fb->pitches[0], sizeof(u32)); - dst += clip_offset(clip, dst_pitch, sizeof(u16)); for (y = 0; y < lines; y++) { drm_fb_xrgb8888_to_rgb565_line(dbuf, vaddr, linepixels, swab); memcpy_toio(dst, dbuf, dst_len); vaddr += fb->pitches[0]; - dst += dst_len; + dst += dst_pitch; } kfree(dbuf); } -EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb565_dstclip); +EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb565_toio); -static void drm_fb_xrgb8888_to_rgb888_line(u8 *dbuf, u32 *sbuf, +static void drm_fb_xrgb8888_to_rgb888_line(u8 *dbuf, const u32 *sbuf, unsigned int pixels) { unsigned int x; @@ -303,24 +334,25 @@ static void drm_fb_xrgb8888_to_rgb888_line(u8 *dbuf, u32 *sbuf, /** * drm_fb_xrgb8888_to_rgb888 - Convert XRGB8888 to RGB888 clip buffer * @dst: RGB888 destination buffer + * @dst_pitch: Number of bytes between two consecutive scanlines within dst * @src: XRGB8888 source buffer * @fb: DRM framebuffer * @clip: Clip rectangle area to copy * * Drivers can use this function for RGB888 devices that don't natively * support XRGB8888. - * - * This function does not apply clipping on dst, i.e. the destination - * is a small buffer containing the clip rect only. */ -void drm_fb_xrgb8888_to_rgb888(void *dst, void *src, struct drm_framebuffer *fb, - struct drm_rect *clip) +void drm_fb_xrgb8888_to_rgb888(void *dst, unsigned int dst_pitch, const void *src, + const struct drm_framebuffer *fb, const struct drm_rect *clip) { size_t width = drm_rect_width(clip); size_t src_len = width * sizeof(u32); unsigned int y; void *sbuf; + if (!dst_pitch) + dst_pitch = width * 3; + /* Use a buffer to speed up access on buffers with uncached read mapping (i.e. WC) */ sbuf = kmalloc(src_len, GFP_KERNEL); if (!sbuf) @@ -331,7 +363,7 @@ void drm_fb_xrgb8888_to_rgb888(void *dst, void *src, struct drm_framebuffer *fb, memcpy(sbuf, src, src_len); drm_fb_xrgb8888_to_rgb888_line(dst, sbuf, width); src += fb->pitches[0]; - dst += width * 3; + dst += dst_pitch; } kfree(sbuf); @@ -339,48 +371,103 @@ void drm_fb_xrgb8888_to_rgb888(void *dst, void *src, struct drm_framebuffer *fb, EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb888); /** - * drm_fb_xrgb8888_to_rgb888_dstclip - Convert XRGB8888 to RGB888 clip buffer + * drm_fb_xrgb8888_to_rgb888_toio - Convert XRGB8888 to RGB888 clip buffer * @dst: RGB565 destination buffer (iomem) - * @dst_pitch: destination buffer pitch + * @dst_pitch: Number of bytes between two consecutive scanlines within dst * @vaddr: XRGB8888 source buffer * @fb: DRM framebuffer * @clip: Clip rectangle area to copy * * Drivers can use this function for RGB888 devices that don't natively * support XRGB8888. - * - * This function applies clipping on dst, i.e. the destination is a - * full (iomem) framebuffer but only the clip rect content is copied over. */ -void drm_fb_xrgb8888_to_rgb888_dstclip(void __iomem *dst, unsigned int dst_pitch, - void *vaddr, struct drm_framebuffer *fb, - struct drm_rect *clip) +void drm_fb_xrgb8888_to_rgb888_toio(void __iomem *dst, unsigned int dst_pitch, + const void *vaddr, const struct drm_framebuffer *fb, + const struct drm_rect *clip) { size_t linepixels = clip->x2 - clip->x1; size_t dst_len = linepixels * 3; unsigned y, lines = clip->y2 - clip->y1; void *dbuf; + if (!dst_pitch) + dst_pitch = dst_len; + dbuf = kmalloc(dst_len, GFP_KERNEL); if (!dbuf) return; vaddr += clip_offset(clip, fb->pitches[0], sizeof(u32)); - dst += clip_offset(clip, dst_pitch, sizeof(u16)); for (y = 0; y < lines; y++) { drm_fb_xrgb8888_to_rgb888_line(dbuf, vaddr, linepixels); memcpy_toio(dst, dbuf, dst_len); vaddr += fb->pitches[0]; - dst += dst_len; + dst += dst_pitch; } kfree(dbuf); } -EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb888_dstclip); +EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb888_toio); + +static void drm_fb_xrgb8888_to_xrgb2101010_line(u32 *dbuf, const u32 *sbuf, + unsigned int pixels) +{ + unsigned int x; + u32 val32; + + for (x = 0; x < pixels; x++) { + val32 = ((sbuf[x] & 0x000000FF) << 2) | + ((sbuf[x] & 0x0000FF00) << 4) | + ((sbuf[x] & 0x00FF0000) << 6); + *dbuf++ = val32 | ((val32 >> 8) & 0x00300C03); + } +} + +/** + * drm_fb_xrgb8888_to_xrgb2101010_toio - Convert XRGB8888 to XRGB2101010 clip + * buffer + * @dst: XRGB2101010 destination buffer (iomem) + * @dst_pitch: Number of bytes between two consecutive scanlines within dst + * @vaddr: XRGB8888 source buffer + * @fb: DRM framebuffer + * @clip: Clip rectangle area to copy + * + * Drivers can use this function for XRGB2101010 devices that don't natively + * support XRGB8888. + */ +void drm_fb_xrgb8888_to_xrgb2101010_toio(void __iomem *dst, + unsigned int dst_pitch, const void *vaddr, + const struct drm_framebuffer *fb, + const struct drm_rect *clip) +{ + size_t linepixels = clip->x2 - clip->x1; + size_t dst_len = linepixels * sizeof(u32); + unsigned int y, lines = clip->y2 - clip->y1; + void *dbuf; + + if (!dst_pitch) + dst_pitch = dst_len; + + dbuf = kmalloc(dst_len, GFP_KERNEL); + if (!dbuf) + return; + + vaddr += clip_offset(clip, fb->pitches[0], sizeof(u32)); + for (y = 0; y < lines; y++) { + drm_fb_xrgb8888_to_xrgb2101010_line(dbuf, vaddr, linepixels); + memcpy_toio(dst, dbuf, dst_len); + vaddr += fb->pitches[0]; + dst += dst_pitch; + } + + kfree(dbuf); +} +EXPORT_SYMBOL(drm_fb_xrgb8888_to_xrgb2101010_toio); /** * drm_fb_xrgb8888_to_gray8 - Convert XRGB8888 to grayscale * @dst: 8-bit grayscale destination buffer + * @dst_pitch: Number of bytes between two consecutive scanlines within dst * @vaddr: XRGB8888 source buffer * @fb: DRM framebuffer * @clip: Clip rectangle area to copy @@ -394,16 +481,21 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb888_dstclip); * * ITU BT.601 is used for the RGB -> luma (brightness) conversion. */ -void drm_fb_xrgb8888_to_gray8(u8 *dst, void *vaddr, struct drm_framebuffer *fb, - struct drm_rect *clip) +void drm_fb_xrgb8888_to_gray8(void *dst, unsigned int dst_pitch, const void *vaddr, + const struct drm_framebuffer *fb, const struct drm_rect *clip) { unsigned int len = (clip->x2 - clip->x1) * sizeof(u32); unsigned int x, y; void *buf; - u32 *src; + u8 *dst8; + u32 *src32; if (WARN_ON(fb->format->format != DRM_FORMAT_XRGB8888)) return; + + if (!dst_pitch) + dst_pitch = drm_rect_width(clip); + /* * The cma memory is write-combined so reads are uncached. * Speed up by fetching one line at a time. @@ -412,20 +504,22 @@ void drm_fb_xrgb8888_to_gray8(u8 *dst, void *vaddr, struct drm_framebuffer *fb, if (!buf) return; + vaddr += clip_offset(clip, fb->pitches[0], sizeof(u32)); for (y = clip->y1; y < clip->y2; y++) { - src = vaddr + (y * fb->pitches[0]); - src += clip->x1; - memcpy(buf, src, len); - src = buf; + dst8 = dst; + src32 = memcpy(buf, vaddr, len); for (x = clip->x1; x < clip->x2; x++) { - u8 r = (*src & 0x00ff0000) >> 16; - u8 g = (*src & 0x0000ff00) >> 8; - u8 b = *src & 0x000000ff; + u8 r = (*src32 & 0x00ff0000) >> 16; + u8 g = (*src32 & 0x0000ff00) >> 8; + u8 b = *src32 & 0x000000ff; /* ITU BT.601: Y = 0.299 R + 0.587 G + 0.114 B */ - *dst++ = (3 * r + 6 * g + b) / 10; - src++; + *dst8++ = (3 * r + 6 * g + b) / 10; + src32++; } + + vaddr += fb->pitches[0]; + dst += dst_pitch; } kfree(buf); @@ -433,7 +527,7 @@ void drm_fb_xrgb8888_to_gray8(u8 *dst, void *vaddr, struct drm_framebuffer *fb, EXPORT_SYMBOL(drm_fb_xrgb8888_to_gray8); /** - * drm_fb_blit_rect_dstclip - Copy parts of a framebuffer to display memory + * drm_fb_blit_toio - Copy parts of a framebuffer to display memory * @dst: The display memory to copy to * @dst_pitch: Number of bytes between two consecutive scanlines within dst * @dst_format: FOURCC code of the display's color format @@ -445,17 +539,14 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_gray8); * formats of the display and the framebuffer mismatch, the blit function * will attempt to convert between them. * - * Use drm_fb_blit_dstclip() to copy the full framebuffer. - * * Returns: * 0 on success, or * -EINVAL if the color-format conversion failed, or * a negative error code otherwise. */ -int drm_fb_blit_rect_dstclip(void __iomem *dst, unsigned int dst_pitch, - uint32_t dst_format, void *vmap, - struct drm_framebuffer *fb, - struct drm_rect *clip) +int drm_fb_blit_toio(void __iomem *dst, unsigned int dst_pitch, uint32_t dst_format, + const void *vmap, const struct drm_framebuffer *fb, + const struct drm_rect *clip) { uint32_t fb_format = fb->format->format; @@ -464,58 +555,32 @@ int drm_fb_blit_rect_dstclip(void __iomem *dst, unsigned int dst_pitch, fb_format = DRM_FORMAT_XRGB8888; if (dst_format == DRM_FORMAT_ARGB8888) dst_format = DRM_FORMAT_XRGB8888; + if (fb_format == DRM_FORMAT_ARGB2101010) + fb_format = DRM_FORMAT_XRGB2101010; + if (dst_format == DRM_FORMAT_ARGB2101010) + dst_format = DRM_FORMAT_XRGB2101010; if (dst_format == fb_format) { - drm_fb_memcpy_dstclip(dst, dst_pitch, vmap, fb, clip); + drm_fb_memcpy_toio(dst, dst_pitch, vmap, fb, clip); return 0; } else if (dst_format == DRM_FORMAT_RGB565) { if (fb_format == DRM_FORMAT_XRGB8888) { - drm_fb_xrgb8888_to_rgb565_dstclip(dst, dst_pitch, - vmap, fb, clip, - false); + drm_fb_xrgb8888_to_rgb565_toio(dst, dst_pitch, vmap, fb, clip, false); return 0; } } else if (dst_format == DRM_FORMAT_RGB888) { if (fb_format == DRM_FORMAT_XRGB8888) { - drm_fb_xrgb8888_to_rgb888_dstclip(dst, dst_pitch, - vmap, fb, clip); + drm_fb_xrgb8888_to_rgb888_toio(dst, dst_pitch, vmap, fb, clip); + return 0; + } + } else if (dst_format == DRM_FORMAT_XRGB2101010) { + if (fb_format == DRM_FORMAT_XRGB8888) { + drm_fb_xrgb8888_to_xrgb2101010_toio(dst, dst_pitch, vmap, fb, clip); return 0; } } return -EINVAL; } -EXPORT_SYMBOL(drm_fb_blit_rect_dstclip); - -/** - * drm_fb_blit_dstclip - Copy framebuffer to display memory - * @dst: The display memory to copy to - * @dst_pitch: Number of bytes between two consecutive scanlines within dst - * @dst_format: FOURCC code of the display's color format - * @vmap: The framebuffer memory to copy from - * @fb: The framebuffer to copy from - * - * This function copies a full framebuffer to display memory. If the formats - * of the display and the framebuffer mismatch, the copy function will - * attempt to convert between them. - * - * See drm_fb_blit_rect_dstclip() for more information. - * - * Returns: - * 0 on success, or a negative error code otherwise. - */ -int drm_fb_blit_dstclip(void __iomem *dst, unsigned int dst_pitch, - uint32_t dst_format, void *vmap, - struct drm_framebuffer *fb) -{ - struct drm_rect fullscreen = { - .x1 = 0, - .x2 = fb->width, - .y1 = 0, - .y2 = fb->height, - }; - return drm_fb_blit_rect_dstclip(dst, dst_pitch, dst_format, vmap, fb, - &fullscreen); -} -EXPORT_SYMBOL(drm_fb_blit_dstclip); +EXPORT_SYMBOL(drm_fb_blit_toio); diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c index 25837b1d6639..07741b678798 100644 --- a/drivers/gpu/drm/drm_fourcc.c +++ b/drivers/gpu/drm/drm_fourcc.c @@ -269,6 +269,9 @@ const struct drm_format_info *__drm_format_info(u32 format) .num_planes = 3, .char_per_block = { 2, 2, 2 }, .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 }, .hsub = 0, .vsub = 0, .is_yuv = true }, + { .format = DRM_FORMAT_P030, .depth = 0, .num_planes = 2, + .char_per_block = { 4, 8, 0 }, .block_w = { 3, 3, 0 }, .block_h = { 1, 1, 0 }, + .hsub = 2, .vsub = 2, .is_yuv = true}, }; unsigned int i; diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c index e570398abd78..c3189afe10cb 100644 --- a/drivers/gpu/drm/drm_gem_atomic_helper.c +++ b/drivers/gpu/drm/drm_gem_atomic_helper.c @@ -143,6 +143,7 @@ */ int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state) { + struct dma_resv_iter cursor; struct drm_gem_object *obj; struct dma_fence *fence; @@ -150,9 +151,18 @@ int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_st return 0; obj = drm_gem_fb_get_obj(state->fb, 0); - fence = dma_resv_get_excl_unlocked(obj->resv); - drm_atomic_set_fence_for_plane(state, fence); + dma_resv_iter_begin(&cursor, obj->resv, false); + dma_resv_for_each_fence_unlocked(&cursor, fence) { + /* TODO: Currently there should be only one write fence, so this + * here works fine. But drm_atomic_set_fence_for_plane() should + * be changed to be able to handle more fences in general for + * multiple BOs per fb anyway. */ + dma_fence_get(fence); + break; + } + dma_resv_iter_end(&cursor); + drm_atomic_set_fence_for_plane(state, fence); return 0; } EXPORT_SYMBOL_GPL(drm_gem_plane_helper_prepare_fb); diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c index d53388199f34..cefd0cbf9deb 100644 --- a/drivers/gpu/drm/drm_gem_cma_helper.c +++ b/drivers/gpu/drm/drm_gem_cma_helper.c @@ -13,6 +13,7 @@ #include <linux/dma-mapping.h> #include <linux/export.h> #include <linux/mm.h> +#include <linux/module.h> #include <linux/mutex.h> #include <linux/slab.h> @@ -31,14 +32,18 @@ * The DRM GEM/CMA helpers use this allocator as a means to provide buffer * objects that are physically contiguous in memory. This is useful for * display drivers that are unable to map scattered buffers via an IOMMU. + * + * For GEM callback helpers in struct &drm_gem_object functions, see likewise + * named functions with an _object_ infix (e.g., drm_gem_cma_object_vmap() wraps + * drm_gem_cma_vmap()). These helpers perform the necessary type conversion. */ static const struct drm_gem_object_funcs drm_gem_cma_default_funcs = { - .free = drm_gem_cma_free_object, - .print_info = drm_gem_cma_print_info, - .get_sg_table = drm_gem_cma_get_sg_table, - .vmap = drm_gem_cma_vmap, - .mmap = drm_gem_cma_mmap, + .free = drm_gem_cma_object_free, + .print_info = drm_gem_cma_object_print_info, + .get_sg_table = drm_gem_cma_object_get_sg_table, + .vmap = drm_gem_cma_object_vmap, + .mmap = drm_gem_cma_object_mmap, .vm_ops = &drm_gem_cma_vm_ops, }; @@ -62,18 +67,21 @@ __drm_gem_cma_create(struct drm_device *drm, size_t size, bool private) struct drm_gem_object *gem_obj; int ret = 0; - if (drm->driver->gem_create_object) + if (drm->driver->gem_create_object) { gem_obj = drm->driver->gem_create_object(drm, size); - else - gem_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL); - if (!gem_obj) - return ERR_PTR(-ENOMEM); + if (IS_ERR(gem_obj)) + return ERR_CAST(gem_obj); + cma_obj = to_drm_gem_cma_obj(gem_obj); + } else { + cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL); + if (!cma_obj) + return ERR_PTR(-ENOMEM); + gem_obj = &cma_obj->base; + } if (!gem_obj->funcs) gem_obj->funcs = &drm_gem_cma_default_funcs; - cma_obj = container_of(gem_obj, struct drm_gem_cma_object, base); - if (private) { drm_gem_private_object_init(drm, gem_obj, size); @@ -191,18 +199,16 @@ drm_gem_cma_create_with_handle(struct drm_file *file_priv, } /** - * drm_gem_cma_free_object - free resources associated with a CMA GEM object - * @gem_obj: GEM object to free + * drm_gem_cma_free - free resources associated with a CMA GEM object + * @cma_obj: CMA GEM object to free * * This function frees the backing memory of the CMA GEM object, cleans up the * GEM object state and frees the memory used to store the object itself. * If the buffer is imported and the virtual address is set, it is released. - * Drivers using the CMA helpers should set this as their - * &drm_gem_object_funcs.free callback. */ -void drm_gem_cma_free_object(struct drm_gem_object *gem_obj) +void drm_gem_cma_free(struct drm_gem_cma_object *cma_obj) { - struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(gem_obj); + struct drm_gem_object *gem_obj = &cma_obj->base; struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(cma_obj->vaddr); if (gem_obj->import_attach) { @@ -210,15 +216,20 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj) dma_buf_vunmap(gem_obj->import_attach->dmabuf, &map); drm_prime_gem_destroy(gem_obj, cma_obj->sgt); } else if (cma_obj->vaddr) { - dma_free_wc(gem_obj->dev->dev, cma_obj->base.size, - cma_obj->vaddr, cma_obj->paddr); + if (cma_obj->map_noncoherent) + dma_free_noncoherent(gem_obj->dev->dev, cma_obj->base.size, + cma_obj->vaddr, cma_obj->paddr, + DMA_TO_DEVICE); + else + dma_free_wc(gem_obj->dev->dev, cma_obj->base.size, + cma_obj->vaddr, cma_obj->paddr); } drm_gem_object_release(gem_obj); kfree(cma_obj); } -EXPORT_SYMBOL_GPL(drm_gem_cma_free_object); +EXPORT_SYMBOL_GPL(drm_gem_cma_free); /** * drm_gem_cma_dumb_create_internal - create a dumb buffer object @@ -365,18 +376,15 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_get_unmapped_area); /** * drm_gem_cma_print_info() - Print &drm_gem_cma_object info for debugfs + * @cma_obj: CMA GEM object * @p: DRM printer * @indent: Tab indentation level - * @obj: GEM object * - * This function can be used as the &drm_driver->gem_print_info callback. - * It prints paddr and vaddr for use in e.g. debugfs output. + * This function prints paddr and vaddr for use in e.g. debugfs output. */ -void drm_gem_cma_print_info(struct drm_printer *p, unsigned int indent, - const struct drm_gem_object *obj) +void drm_gem_cma_print_info(const struct drm_gem_cma_object *cma_obj, + struct drm_printer *p, unsigned int indent) { - const struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj); - drm_printf_indent(p, indent, "paddr=%pad\n", &cma_obj->paddr); drm_printf_indent(p, indent, "vaddr=%p\n", cma_obj->vaddr); } @@ -385,18 +393,17 @@ EXPORT_SYMBOL(drm_gem_cma_print_info); /** * drm_gem_cma_get_sg_table - provide a scatter/gather table of pinned * pages for a CMA GEM object - * @obj: GEM object + * @cma_obj: CMA GEM object * - * This function exports a scatter/gather table by - * calling the standard DMA mapping API. Drivers using the CMA helpers should - * set this as their &drm_gem_object_funcs.get_sg_table callback. + * This function exports a scatter/gather table by calling the standard + * DMA mapping API. * * Returns: * A pointer to the scatter/gather table of pinned pages or NULL on failure. */ -struct sg_table *drm_gem_cma_get_sg_table(struct drm_gem_object *obj) +struct sg_table *drm_gem_cma_get_sg_table(struct drm_gem_cma_object *cma_obj) { - struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj); + struct drm_gem_object *obj = &cma_obj->base; struct sg_table *sgt; int ret; @@ -462,23 +469,19 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_prime_import_sg_table); /** * drm_gem_cma_vmap - map a CMA GEM object into the kernel's virtual * address space - * @obj: GEM object + * @cma_obj: CMA GEM object * @map: Returns the kernel virtual address of the CMA GEM object's backing * store. * - * This function maps a buffer into the kernel's - * virtual address space. Since the CMA buffers are already mapped into the - * kernel virtual address space this simply returns the cached virtual - * address. Drivers using the CMA helpers should set this as their DRM - * driver's &drm_gem_object_funcs.vmap callback. + * This function maps a buffer into the kernel's virtual address space. + * Since the CMA buffers are already mapped into the kernel virtual address + * space this simply returns the cached virtual address. * * Returns: * 0 on success, or a negative error code otherwise. */ -int drm_gem_cma_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) +int drm_gem_cma_vmap(struct drm_gem_cma_object *cma_obj, struct dma_buf_map *map) { - struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj); - dma_buf_map_set_vaddr(map, cma_obj->vaddr); return 0; @@ -487,20 +490,19 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_vmap); /** * drm_gem_cma_mmap - memory-map an exported CMA GEM object - * @obj: GEM object + * @cma_obj: CMA GEM object * @vma: VMA for the area to be mapped * * This function maps a buffer into a userspace process's address space. * In addition to the usual GEM VMA setup it immediately faults in the entire - * object instead of using on-demand faulting. Drivers that use the CMA - * helpers should set this as their &drm_gem_object_funcs.mmap callback. + * object instead of using on-demand faulting. * * Returns: * 0 on success or a negative error code on failure. */ -int drm_gem_cma_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) +int drm_gem_cma_mmap(struct drm_gem_cma_object *cma_obj, struct vm_area_struct *vma) { - struct drm_gem_cma_object *cma_obj; + struct drm_gem_object *obj = &cma_obj->base; int ret; /* @@ -511,8 +513,6 @@ int drm_gem_cma_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node); vma->vm_flags &= ~VM_PFNMAP; - cma_obj = to_drm_gem_cma_obj(obj); - if (cma_obj->map_noncoherent) { vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); @@ -578,3 +578,7 @@ drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *dev, return obj; } EXPORT_SYMBOL(drm_gem_cma_prime_import_sg_table_vmap); + +MODULE_DESCRIPTION("DRM CMA memory-management helpers"); +MODULE_IMPORT_NS(DMA_BUF); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index 7b9f69f21f1e..621924116eb4 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -5,10 +5,12 @@ #include <linux/dma-buf.h> #include <linux/export.h> +#include <linux/module.h> #include <linux/mutex.h> #include <linux/shmem_fs.h> #include <linux/slab.h> #include <linux/vmalloc.h> +#include <linux/module.h> #ifdef CONFIG_X86 #include <asm/set_memory.h> @@ -28,17 +30,22 @@ MODULE_IMPORT_NS(DMA_BUF); * * This library provides helpers for GEM objects backed by shmem buffers * allocated using anonymous pageable memory. + * + * Functions that operate on the GEM object receive struct &drm_gem_shmem_object. + * For GEM callback helpers in struct &drm_gem_object functions, see likewise + * named functions with an _object_ infix (e.g., drm_gem_shmem_object_vmap() wraps + * drm_gem_shmem_vmap()). These helpers perform the necessary type conversion. */ static const struct drm_gem_object_funcs drm_gem_shmem_funcs = { - .free = drm_gem_shmem_free_object, - .print_info = drm_gem_shmem_print_info, - .pin = drm_gem_shmem_pin, - .unpin = drm_gem_shmem_unpin, - .get_sg_table = drm_gem_shmem_get_sg_table, - .vmap = drm_gem_shmem_vmap, - .vunmap = drm_gem_shmem_vunmap, - .mmap = drm_gem_shmem_mmap, + .free = drm_gem_shmem_object_free, + .print_info = drm_gem_shmem_object_print_info, + .pin = drm_gem_shmem_object_pin, + .unpin = drm_gem_shmem_object_unpin, + .get_sg_table = drm_gem_shmem_object_get_sg_table, + .vmap = drm_gem_shmem_object_vmap, + .vunmap = drm_gem_shmem_object_vunmap, + .mmap = drm_gem_shmem_object_mmap, }; static struct drm_gem_shmem_object * @@ -50,14 +57,17 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private) size = PAGE_ALIGN(size); - if (dev->driver->gem_create_object) + if (dev->driver->gem_create_object) { obj = dev->driver->gem_create_object(dev, size); - else - obj = kzalloc(sizeof(*shmem), GFP_KERNEL); - if (!obj) - return ERR_PTR(-ENOMEM); - - shmem = to_drm_gem_shmem_obj(obj); + if (IS_ERR(obj)) + return ERR_CAST(obj); + shmem = to_drm_gem_shmem_obj(obj); + } else { + shmem = kzalloc(sizeof(*shmem), GFP_KERNEL); + if (!shmem) + return ERR_PTR(-ENOMEM); + obj = &shmem->base; + } if (!obj->funcs) obj->funcs = &drm_gem_shmem_funcs; @@ -118,16 +128,15 @@ struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t EXPORT_SYMBOL_GPL(drm_gem_shmem_create); /** - * drm_gem_shmem_free_object - Free resources associated with a shmem GEM object - * @obj: GEM object to free + * drm_gem_shmem_free - Free resources associated with a shmem GEM object + * @shmem: shmem GEM object to free * * This function cleans up the GEM object state and frees the memory used to - * store the object itself. It should be used to implement - * &drm_gem_object_funcs.free. + * store the object itself. */ -void drm_gem_shmem_free_object(struct drm_gem_object *obj) +void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem) { - struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + struct drm_gem_object *obj = &shmem->base; WARN_ON(shmem->vmap_use_count); @@ -151,7 +160,7 @@ void drm_gem_shmem_free_object(struct drm_gem_object *obj) mutex_destroy(&shmem->vmap_lock); kfree(shmem); } -EXPORT_SYMBOL_GPL(drm_gem_shmem_free_object); +EXPORT_SYMBOL_GPL(drm_gem_shmem_free); static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem) { @@ -246,19 +255,16 @@ EXPORT_SYMBOL(drm_gem_shmem_put_pages); /** * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object - * @obj: GEM object + * @shmem: shmem GEM object * * This function makes sure the backing pages are pinned in memory while the - * buffer is exported. It should only be used to implement - * &drm_gem_object_funcs.pin. + * buffer is exported. * * Returns: * 0 on success or a negative error code on failure. */ -int drm_gem_shmem_pin(struct drm_gem_object *obj) +int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem) { - struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); - WARN_ON(shmem->base.import_attach); return drm_gem_shmem_get_pages(shmem); @@ -267,15 +273,13 @@ EXPORT_SYMBOL(drm_gem_shmem_pin); /** * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object - * @obj: GEM object + * @shmem: shmem GEM object * * This function removes the requirement that the backing pages are pinned in - * memory. It should only be used to implement &drm_gem_object_funcs.unpin. + * memory. */ -void drm_gem_shmem_unpin(struct drm_gem_object *obj) +void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem) { - struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); - WARN_ON(shmem->base.import_attach); drm_gem_shmem_put_pages(shmem); @@ -341,20 +345,16 @@ err_zero_use: * store. * * This function makes sure that a contiguous kernel virtual address mapping - * exists for the buffer backing the shmem GEM object. - * - * This function can be used to implement &drm_gem_object_funcs.vmap. But it can - * also be called by drivers directly, in which case it will hide the - * differences between dma-buf imported and natively allocated objects. + * exists for the buffer backing the shmem GEM object. It hides the differences + * between dma-buf imported and natively allocated objects. * * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap(). * * Returns: * 0 on success or a negative error code on failure. */ -int drm_gem_shmem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) +int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, struct dma_buf_map *map) { - struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); int ret; ret = mutex_lock_interruptible(&shmem->vmap_lock); @@ -397,21 +397,18 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem, * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to * zero. * - * This function can be used to implement &drm_gem_object_funcs.vmap. But it can - * also be called by drivers directly, in which case it will hide the - * differences between dma-buf imported and natively allocated objects. + * This function hides the differences between dma-buf imported and natively + * allocated objects. */ -void drm_gem_shmem_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map) +void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, struct dma_buf_map *map) { - struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); - mutex_lock(&shmem->vmap_lock); drm_gem_shmem_vunmap_locked(shmem, map); mutex_unlock(&shmem->vmap_lock); } EXPORT_SYMBOL(drm_gem_shmem_vunmap); -struct drm_gem_shmem_object * +static struct drm_gem_shmem_object * drm_gem_shmem_create_with_handle(struct drm_file *file_priv, struct drm_device *dev, size_t size, uint32_t *handle) @@ -435,15 +432,12 @@ drm_gem_shmem_create_with_handle(struct drm_file *file_priv, return shmem; } -EXPORT_SYMBOL(drm_gem_shmem_create_with_handle); /* Update madvise status, returns true if not purged, else * false or -errno. */ -int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv) +int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv) { - struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); - mutex_lock(&shmem->pages_lock); if (shmem->madv >= 0) @@ -457,14 +451,14 @@ int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv) } EXPORT_SYMBOL(drm_gem_shmem_madvise); -void drm_gem_shmem_purge_locked(struct drm_gem_object *obj) +void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem) { + struct drm_gem_object *obj = &shmem->base; struct drm_device *dev = obj->dev; - struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); WARN_ON(!drm_gem_shmem_is_purgeable(shmem)); - dma_unmap_sgtable(obj->dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0); + dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0); sg_free_table(shmem->sgt); kfree(shmem->sgt); shmem->sgt = NULL; @@ -483,18 +477,15 @@ void drm_gem_shmem_purge_locked(struct drm_gem_object *obj) */ shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1); - invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, - 0, (loff_t)-1); + invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1); } EXPORT_SYMBOL(drm_gem_shmem_purge_locked); -bool drm_gem_shmem_purge(struct drm_gem_object *obj) +bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem) { - struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); - if (!mutex_trylock(&shmem->pages_lock)) return false; - drm_gem_shmem_purge_locked(obj); + drm_gem_shmem_purge_locked(shmem); mutex_unlock(&shmem->pages_lock); return true; @@ -602,19 +593,18 @@ static const struct vm_operations_struct drm_gem_shmem_vm_ops = { /** * drm_gem_shmem_mmap - Memory-map a shmem GEM object - * @obj: gem object + * @shmem: shmem GEM object * @vma: VMA for the area to be mapped * * This function implements an augmented version of the GEM DRM file mmap - * operation for shmem objects. Drivers which employ the shmem helpers should - * use this function as their &drm_gem_object_funcs.mmap handler. + * operation for shmem objects. * * Returns: * 0 on success or a negative error code on failure. */ -int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) +int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma) { - struct drm_gem_shmem_object *shmem; + struct drm_gem_object *obj = &shmem->base; int ret; if (obj->import_attach) { @@ -625,8 +615,6 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) return dma_buf_mmap(obj->dma_buf, vma, 0); } - shmem = to_drm_gem_shmem_obj(obj); - ret = drm_gem_shmem_get_pages(shmem); if (ret) { drm_gem_vm_close(vma); @@ -645,17 +633,13 @@ EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap); /** * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs + * @shmem: shmem GEM object * @p: DRM printer * @indent: Tab indentation level - * @obj: GEM object - * - * This implements the &drm_gem_object_funcs.info callback. */ -void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent, - const struct drm_gem_object *obj) +void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem, + struct drm_printer *p, unsigned int indent) { - const struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); - drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count); drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count); drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr); @@ -665,12 +649,10 @@ EXPORT_SYMBOL(drm_gem_shmem_print_info); /** * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned * pages for a shmem GEM object - * @obj: GEM object + * @shmem: shmem GEM object * * This function exports a scatter/gather table suitable for PRIME usage by - * calling the standard DMA mapping API. Drivers should not call this function - * directly, instead it should only be used as an implementation for - * &drm_gem_object_funcs.get_sg_table. + * calling the standard DMA mapping API. * * Drivers who need to acquire an scatter/gather table for objects need to call * drm_gem_shmem_get_pages_sgt() instead. @@ -678,9 +660,9 @@ EXPORT_SYMBOL(drm_gem_shmem_print_info); * Returns: * A pointer to the scatter/gather table of pinned pages or NULL on failure. */ -struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj) +struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem) { - struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + struct drm_gem_object *obj = &shmem->base; WARN_ON(shmem->base.import_attach); @@ -691,7 +673,7 @@ EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table); /** * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a * scatter/gather table for a shmem GEM object. - * @obj: GEM object + * @shmem: shmem GEM object * * This function returns a scatter/gather table suitable for driver usage. If * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg @@ -704,10 +686,10 @@ EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table); * Returns: * A pointer to the scatter/gather table of pinned pages or errno on failure. */ -struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_object *obj) +struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem) { + struct drm_gem_object *obj = &shmem->base; int ret; - struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); struct sg_table *sgt; if (shmem->sgt) @@ -719,7 +701,7 @@ struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_object *obj) if (ret) return ERR_PTR(ret); - sgt = drm_gem_shmem_get_sg_table(&shmem->base); + sgt = drm_gem_shmem_get_sg_table(shmem); if (IS_ERR(sgt)) { ret = PTR_ERR(sgt); goto err_put_pages; @@ -776,3 +758,7 @@ drm_gem_shmem_prime_import_sg_table(struct drm_device *dev, return &shmem->base; } EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table); + +MODULE_DESCRIPTION("DRM SHMEM memory-management helpers"); +MODULE_IMPORT_NS(DMA_BUF); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c index bfa386b98134..3f00192215d1 100644 --- a/drivers/gpu/drm/drm_gem_vram_helper.c +++ b/drivers/gpu/drm/drm_gem_vram_helper.c @@ -197,8 +197,8 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev, if (dev->driver->gem_create_object) { gem = dev->driver->gem_create_object(dev, size); - if (!gem) - return ERR_PTR(-ENOMEM); + if (IS_ERR(gem)) + return ERR_CAST(gem); gbo = drm_gem_vram_of_gem(gem); } else { gbo = kzalloc(sizeof(*gbo), GFP_KERNEL); diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c index c50fa6f0709f..60afa1865559 100644 --- a/drivers/gpu/drm/drm_hashtab.c +++ b/drivers/gpu/drm/drm_hashtab.c @@ -32,16 +32,16 @@ * Thomas Hellström <thomas-at-tungstengraphics-dot-com> */ -#include <linux/export.h> #include <linux/hash.h> #include <linux/mm.h> #include <linux/rculist.h> #include <linux/slab.h> #include <linux/vmalloc.h> -#include <drm/drm_hashtab.h> #include <drm/drm_print.h> +#include "drm_legacy.h" + int drm_ht_create(struct drm_open_hash *ht, unsigned int order) { unsigned int size = 1 << order; @@ -58,7 +58,6 @@ int drm_ht_create(struct drm_open_hash *ht, unsigned int order) } return 0; } -EXPORT_SYMBOL(drm_ht_create); void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key) { @@ -135,7 +134,6 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item) } return 0; } -EXPORT_SYMBOL(drm_ht_insert_item); /* * Just insert an item and return any "bits" bit key that hasn't been @@ -164,7 +162,6 @@ int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *it } return 0; } -EXPORT_SYMBOL(drm_ht_just_insert_please); int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item) @@ -178,7 +175,6 @@ int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, *item = hlist_entry(list, struct drm_hash_item, head); return 0; } -EXPORT_SYMBOL(drm_ht_find_item); int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key) { @@ -197,7 +193,6 @@ int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item) hlist_del_init_rcu(&item->head); return 0; } -EXPORT_SYMBOL(drm_ht_remove_item); void drm_ht_remove(struct drm_open_hash *ht) { @@ -206,4 +201,3 @@ void drm_ht_remove(struct drm_open_hash *ht) ht->table = NULL; } } -EXPORT_SYMBOL(drm_ht_remove); diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 13e1d5c4ec82..d327638e15ee 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -66,7 +66,6 @@ #include "drm_internal.h" -#if IS_ENABLED(CONFIG_DRM_LEGACY) static int drm_legacy_irq_install(struct drm_device *dev, int irq) { int ret; @@ -203,4 +202,3 @@ int drm_legacy_irq_control(struct drm_device *dev, void *data, return -EINVAL; } } -#endif diff --git a/drivers/gpu/drm/drm_legacy.h b/drivers/gpu/drm/drm_legacy.h index c9206840c87f..70c9dba114a6 100644 --- a/drivers/gpu/drm/drm_legacy.h +++ b/drivers/gpu/drm/drm_legacy.h @@ -35,9 +35,47 @@ #include <drm/drm_legacy.h> struct agp_memory; +struct drm_buf_desc; struct drm_device; struct drm_file; -struct drm_buf_desc; +struct drm_hash_item; +struct drm_open_hash; + +/* + * Hash-table Support + */ + +#define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member) + +/* drm_hashtab.c */ +#if IS_ENABLED(CONFIG_DRM_LEGACY) +int drm_ht_create(struct drm_open_hash *ht, unsigned int order); +int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item); +int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item, + unsigned long seed, int bits, int shift, + unsigned long add); +int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item); + +void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key); +int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key); +int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item); +void drm_ht_remove(struct drm_open_hash *ht); +#endif + +/* + * RCU-safe interface + * + * The user of this API needs to make sure that two or more instances of the + * hash table manipulation functions are never run simultaneously. + * The lookup function drm_ht_find_item_rcu may, however, run simultaneously + * with any of the manipulation functions as long as it's called from within + * an RCU read-locked section. + */ +#define drm_ht_insert_item_rcu drm_ht_insert_item +#define drm_ht_just_insert_please_rcu drm_ht_just_insert_please +#define drm_ht_remove_key_rcu drm_ht_remove_key +#define drm_ht_remove_item_rcu drm_ht_remove_item +#define drm_ht_find_item_rcu drm_ht_find_item /* * Generic DRM Contexts diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c index 71b646c4131f..ded8968b3e8a 100644 --- a/drivers/gpu/drm/drm_mipi_dbi.c +++ b/drivers/gpu/drm/drm_mipi_dbi.c @@ -15,9 +15,10 @@ #include <drm/drm_connector.h> #include <drm/drm_damage_helper.h> #include <drm/drm_drv.h> -#include <drm/drm_gem_cma_helper.h> +#include <drm/drm_file.h> #include <drm/drm_format_helper.h> #include <drm/drm_fourcc.h> +#include <drm/drm_gem.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_mipi_dbi.h> #include <drm/drm_modes.h> @@ -200,30 +201,38 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb, struct drm_rect *clip, bool swap) { struct drm_gem_object *gem = drm_gem_fb_get_obj(fb, 0); - struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(gem); - void *src = cma_obj->vaddr; + struct dma_buf_map map[DRM_FORMAT_MAX_PLANES]; + struct dma_buf_map data[DRM_FORMAT_MAX_PLANES]; + void *src; int ret; ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE); if (ret) return ret; + src = data[0].vaddr; /* TODO: Use mapping abstraction properly */ + + ret = drm_gem_fb_vmap(fb, map, data); + if (ret) + goto out_drm_gem_fb_end_cpu_access; switch (fb->format->format) { case DRM_FORMAT_RGB565: if (swap) - drm_fb_swab(dst, src, fb, clip, !gem->import_attach); + drm_fb_swab(dst, 0, src, fb, clip, !gem->import_attach); else - drm_fb_memcpy(dst, src, fb, clip); + drm_fb_memcpy(dst, 0, src, fb, clip); break; case DRM_FORMAT_XRGB8888: - drm_fb_xrgb8888_to_rgb565(dst, src, fb, clip, swap); + drm_fb_xrgb8888_to_rgb565(dst, 0, src, fb, clip, swap); break; default: drm_err_once(fb->dev, "Format is not supported: %p4cc\n", &fb->format->format); - return -EINVAL; + ret = -EINVAL; } + drm_gem_fb_vunmap(fb, map); +out_drm_gem_fb_end_cpu_access: drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE); return ret; @@ -249,8 +258,8 @@ static void mipi_dbi_set_window_address(struct mipi_dbi_dev *dbidev, static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect) { - struct drm_gem_object *gem = drm_gem_fb_get_obj(fb, 0); - struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(gem); + struct dma_buf_map map[DRM_FORMAT_MAX_PLANES]; + struct dma_buf_map data[DRM_FORMAT_MAX_PLANES]; struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(fb->dev); unsigned int height = rect->y2 - rect->y1; unsigned int width = rect->x2 - rect->x1; @@ -266,6 +275,10 @@ static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect) if (!drm_dev_enter(fb->dev, &idx)) return; + ret = drm_gem_fb_vmap(fb, map, data); + if (ret) + goto err_drm_dev_exit; + full = width == fb->width && height == fb->height; DRM_DEBUG_KMS("Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect)); @@ -277,7 +290,7 @@ static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect) if (ret) goto err_msg; } else { - tr = cma_obj->vaddr; + tr = data[0].vaddr; /* TODO: Use mapping abstraction properly */ } mipi_dbi_set_window_address(dbidev, rect->x1, rect->x2 - 1, rect->y1, @@ -289,6 +302,9 @@ err_msg: if (ret) drm_err_once(fb->dev, "Failed to update display %d\n", ret); + drm_gem_fb_vunmap(fb, map); + +err_drm_dev_exit: drm_dev_exit(idx); } @@ -1117,8 +1133,8 @@ int mipi_dbi_spi_init(struct spi_device *spi, struct mipi_dbi *dbi, /* * Even though it's not the SPI device that does DMA (the master does), - * the dma mask is necessary for the dma_alloc_wc() in - * drm_gem_cma_create(). The dma_addr returned will be a physical + * the dma mask is necessary for the dma_alloc_wc() in the GEM code + * (e.g., drm_gem_cma_create()). The dma_addr returned will be a physical * address which might be different from the bus address, but this is * not a problem since the address will not be used. * The virtual address is used in the transfer and the SPI core diff --git a/drivers/gpu/drm/drm_nomodeset.c b/drivers/gpu/drm/drm_nomodeset.c new file mode 100644 index 000000000000..f3978d5bd3a1 --- /dev/null +++ b/drivers/gpu/drm/drm_nomodeset.c @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/module.h> +#include <linux/types.h> + +static bool drm_nomodeset; + +bool drm_firmware_drivers_only(void) +{ + return drm_nomodeset; +} +EXPORT_SYMBOL(drm_firmware_drivers_only); + +static int __init disable_modeset(char *str) +{ + drm_nomodeset = true; + + pr_warn("Booted with the nomodeset parameter. Only the system framebuffer will be available\n"); + + return 1; +} + +/* Disable kernel modesetting */ +__setup("nomodeset", disable_modeset); diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c index 37c34146eea8..59d368ea006b 100644 --- a/drivers/gpu/drm/drm_of.c +++ b/drivers/gpu/drm/drm_of.c @@ -402,3 +402,36 @@ int drm_of_lvds_get_dual_link_pixel_order(const struct device_node *port1, DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS; } EXPORT_SYMBOL_GPL(drm_of_lvds_get_dual_link_pixel_order); + +/** + * drm_of_lvds_get_data_mapping - Get LVDS data mapping + * @port: DT port node of the LVDS source or sink + * + * Convert DT "data-mapping" property string value into media bus format value. + * + * Return: + * * MEDIA_BUS_FMT_RGB666_1X7X3_SPWG - data-mapping is "jeida-18" + * * MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA - data-mapping is "jeida-24" + * * MEDIA_BUS_FMT_RGB888_1X7X4_SPWG - data-mapping is "vesa-24" + * * -EINVAL - the "data-mapping" property is unsupported + * * -ENODEV - the "data-mapping" property is missing + */ +int drm_of_lvds_get_data_mapping(const struct device_node *port) +{ + const char *mapping; + int ret; + + ret = of_property_read_string(port, "data-mapping", &mapping); + if (ret < 0) + return -ENODEV; + + if (!strcmp(mapping, "jeida-18")) + return MEDIA_BUS_FMT_RGB666_1X7X3_SPWG; + if (!strcmp(mapping, "jeida-24")) + return MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA; + if (!strcmp(mapping, "vesa-24")) + return MEDIA_BUS_FMT_RGB888_1X7X4_SPWG; + + return -EINVAL; +} +EXPORT_SYMBOL_GPL(drm_of_lvds_get_data_mapping); diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index a9359878f4ed..042bb80383c9 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -262,6 +262,12 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"), }, .driver_data = (void *)&lcd1200x1920_rightside_up, + }, { /* Lenovo Yoga Book X90F / X91F / X91L */ + .matches = { + /* Non exact match to match all versions */ + DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X9"), + }, + .driver_data = (void *)&lcd1200x1920_rightside_up, }, { /* OneGX1 Pro */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SYSTEM_MANUFACTURER"), diff --git a/drivers/gpu/drm/drm_privacy_screen.c b/drivers/gpu/drm/drm_privacy_screen.c new file mode 100644 index 000000000000..beaf99e9120a --- /dev/null +++ b/drivers/gpu/drm/drm_privacy_screen.c @@ -0,0 +1,467 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright (C) 2020 - 2021 Red Hat, Inc. + * + * Authors: + * Hans de Goede <hdegoede@redhat.com> + */ + +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/slab.h> +#include <drm/drm_privacy_screen_machine.h> +#include <drm/drm_privacy_screen_consumer.h> +#include <drm/drm_privacy_screen_driver.h> +#include "drm_internal.h" + +/** + * DOC: overview + * + * This class allows non KMS drivers, from e.g. drivers/platform/x86 to + * register a privacy-screen device, which the KMS drivers can then use + * to implement the standard privacy-screen properties, see + * :ref:`Standard Connector Properties<standard_connector_properties>`. + * + * KMS drivers using a privacy-screen class device are advised to use the + * drm_connector_attach_privacy_screen_provider() and + * drm_connector_update_privacy_screen() helpers for dealing with this. + */ + +#define to_drm_privacy_screen(dev) \ + container_of(dev, struct drm_privacy_screen, dev) + +static DEFINE_MUTEX(drm_privacy_screen_lookup_lock); +static LIST_HEAD(drm_privacy_screen_lookup_list); + +static DEFINE_MUTEX(drm_privacy_screen_devs_lock); +static LIST_HEAD(drm_privacy_screen_devs); + +/*** drm_privacy_screen_machine.h functions ***/ + +/** + * drm_privacy_screen_lookup_add - add an entry to the static privacy-screen + * lookup list + * @lookup: lookup list entry to add + * + * Add an entry to the static privacy-screen lookup list. Note the + * &struct list_head which is part of the &struct drm_privacy_screen_lookup + * gets added to a list owned by the privacy-screen core. So the passed in + * &struct drm_privacy_screen_lookup must not be free-ed until it is removed + * from the lookup list by calling drm_privacy_screen_lookup_remove(). + */ +void drm_privacy_screen_lookup_add(struct drm_privacy_screen_lookup *lookup) +{ + mutex_lock(&drm_privacy_screen_lookup_lock); + list_add(&lookup->list, &drm_privacy_screen_lookup_list); + mutex_unlock(&drm_privacy_screen_lookup_lock); +} +EXPORT_SYMBOL(drm_privacy_screen_lookup_add); + +/** + * drm_privacy_screen_lookup_remove - remove an entry to the static + * privacy-screen lookup list + * @lookup: lookup list entry to remove + * + * Remove an entry previously added with drm_privacy_screen_lookup_add() + * from the static privacy-screen lookup list. + */ +void drm_privacy_screen_lookup_remove(struct drm_privacy_screen_lookup *lookup) +{ + mutex_lock(&drm_privacy_screen_lookup_lock); + list_del(&lookup->list); + mutex_unlock(&drm_privacy_screen_lookup_lock); +} +EXPORT_SYMBOL(drm_privacy_screen_lookup_remove); + +/*** drm_privacy_screen_consumer.h functions ***/ + +static struct drm_privacy_screen *drm_privacy_screen_get_by_name( + const char *name) +{ + struct drm_privacy_screen *priv; + struct device *dev = NULL; + + mutex_lock(&drm_privacy_screen_devs_lock); + + list_for_each_entry(priv, &drm_privacy_screen_devs, list) { + if (strcmp(dev_name(&priv->dev), name) == 0) { + dev = get_device(&priv->dev); + break; + } + } + + mutex_unlock(&drm_privacy_screen_devs_lock); + + return dev ? to_drm_privacy_screen(dev) : NULL; +} + +/** + * drm_privacy_screen_get - get a privacy-screen provider + * @dev: consumer-device for which to get a privacy-screen provider + * @con_id: (video)connector name for which to get a privacy-screen provider + * + * Get a privacy-screen provider for a privacy-screen attached to the + * display described by the @dev and @con_id parameters. + * + * Return: + * * A pointer to a &struct drm_privacy_screen on success. + * * ERR_PTR(-ENODEV) if no matching privacy-screen is found + * * ERR_PTR(-EPROBE_DEFER) if there is a matching privacy-screen, + * but it has not been registered yet. + */ +struct drm_privacy_screen *drm_privacy_screen_get(struct device *dev, + const char *con_id) +{ + const char *dev_id = dev ? dev_name(dev) : NULL; + struct drm_privacy_screen_lookup *l; + struct drm_privacy_screen *priv; + const char *provider = NULL; + int match, best = -1; + + /* + * For now we only support using a static lookup table, which is + * populated by the drm_privacy_screen_arch_init() call. This should + * be extended with device-tree / fw_node lookup when support is added + * for device-tree using hardware with a privacy-screen. + * + * The lookup algorithm was shamelessly taken from the clock + * framework: + * + * We do slightly fuzzy matching here: + * An entry with a NULL ID is assumed to be a wildcard. + * If an entry has a device ID, it must match + * If an entry has a connection ID, it must match + * Then we take the most specific entry - with the following order + * of precedence: dev+con > dev only > con only. + */ + mutex_lock(&drm_privacy_screen_lookup_lock); + + list_for_each_entry(l, &drm_privacy_screen_lookup_list, list) { + match = 0; + + if (l->dev_id) { + if (!dev_id || strcmp(l->dev_id, dev_id)) + continue; + + match += 2; + } + + if (l->con_id) { + if (!con_id || strcmp(l->con_id, con_id)) + continue; + + match += 1; + } + + if (match > best) { + provider = l->provider; + best = match; + } + } + + mutex_unlock(&drm_privacy_screen_lookup_lock); + + if (!provider) + return ERR_PTR(-ENODEV); + + priv = drm_privacy_screen_get_by_name(provider); + if (!priv) + return ERR_PTR(-EPROBE_DEFER); + + return priv; +} +EXPORT_SYMBOL(drm_privacy_screen_get); + +/** + * drm_privacy_screen_put - release a privacy-screen reference + * @priv: privacy screen reference to release + * + * Release a privacy-screen provider reference gotten through + * drm_privacy_screen_get(). May be called with a NULL or ERR_PTR, + * in which case it is a no-op. + */ +void drm_privacy_screen_put(struct drm_privacy_screen *priv) +{ + if (IS_ERR_OR_NULL(priv)) + return; + + put_device(&priv->dev); +} +EXPORT_SYMBOL(drm_privacy_screen_put); + +/** + * drm_privacy_screen_set_sw_state - set a privacy-screen's sw-state + * @priv: privacy screen to set the sw-state for + * @sw_state: new sw-state value to set + * + * Set the sw-state of a privacy screen. If the privacy-screen is not + * in a locked hw-state, then the actual and hw-state of the privacy-screen + * will be immediately updated to the new value. If the privacy-screen is + * in a locked hw-state, then the new sw-state will be remembered as the + * requested state to put the privacy-screen in when it becomes unlocked. + * + * Return: 0 on success, negative error code on failure. + */ +int drm_privacy_screen_set_sw_state(struct drm_privacy_screen *priv, + enum drm_privacy_screen_status sw_state) +{ + int ret = 0; + + mutex_lock(&priv->lock); + + if (!priv->ops) { + ret = -ENODEV; + goto out; + } + + /* + * As per the DRM connector properties documentation, setting the + * sw_state while the hw_state is locked is allowed. In this case + * it is a no-op other then storing the new sw_state so that it + * can be honored when the state gets unlocked. + * Also skip the set if the hw already is in the desired state. + */ + if (priv->hw_state >= PRIVACY_SCREEN_DISABLED_LOCKED || + priv->hw_state == sw_state) { + priv->sw_state = sw_state; + goto out; + } + + ret = priv->ops->set_sw_state(priv, sw_state); +out: + mutex_unlock(&priv->lock); + return ret; +} +EXPORT_SYMBOL(drm_privacy_screen_set_sw_state); + +/** + * drm_privacy_screen_get_state - get privacy-screen's current state + * @priv: privacy screen to get the state for + * @sw_state_ret: address where to store the privacy-screens current sw-state + * @hw_state_ret: address where to store the privacy-screens current hw-state + * + * Get the current state of a privacy-screen, both the sw-state and the + * hw-state. + */ +void drm_privacy_screen_get_state(struct drm_privacy_screen *priv, + enum drm_privacy_screen_status *sw_state_ret, + enum drm_privacy_screen_status *hw_state_ret) +{ + mutex_lock(&priv->lock); + *sw_state_ret = priv->sw_state; + *hw_state_ret = priv->hw_state; + mutex_unlock(&priv->lock); +} +EXPORT_SYMBOL(drm_privacy_screen_get_state); + +/** + * drm_privacy_screen_register_notifier - register a notifier + * @priv: Privacy screen to register the notifier with + * @nb: Notifier-block for the notifier to register + * + * Register a notifier with the privacy-screen to be notified of changes made + * to the privacy-screen state from outside of the privacy-screen class. + * E.g. the state may be changed by the hardware itself in response to a + * hotkey press. + * + * The notifier is called with no locks held. The new hw_state and sw_state + * can be retrieved using the drm_privacy_screen_get_state() function. + * A pointer to the drm_privacy_screen's struct is passed as the void *data + * argument of the notifier_block's notifier_call. + * + * The notifier will NOT be called when changes are made through + * drm_privacy_screen_set_sw_state(). It is only called for external changes. + * + * Return: 0 on success, negative error code on failure. + */ +int drm_privacy_screen_register_notifier(struct drm_privacy_screen *priv, + struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&priv->notifier_head, nb); +} +EXPORT_SYMBOL(drm_privacy_screen_register_notifier); + +/** + * drm_privacy_screen_unregister_notifier - unregister a notifier + * @priv: Privacy screen to register the notifier with + * @nb: Notifier-block for the notifier to register + * + * Unregister a notifier registered with drm_privacy_screen_register_notifier(). + * + * Return: 0 on success, negative error code on failure. + */ +int drm_privacy_screen_unregister_notifier(struct drm_privacy_screen *priv, + struct notifier_block *nb) +{ + return blocking_notifier_chain_unregister(&priv->notifier_head, nb); +} +EXPORT_SYMBOL(drm_privacy_screen_unregister_notifier); + +/*** drm_privacy_screen_driver.h functions ***/ + +static ssize_t sw_state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct drm_privacy_screen *priv = to_drm_privacy_screen(dev); + const char * const sw_state_names[] = { + "Disabled", + "Enabled", + }; + ssize_t ret; + + mutex_lock(&priv->lock); + + if (!priv->ops) + ret = -ENODEV; + else if (WARN_ON(priv->sw_state >= ARRAY_SIZE(sw_state_names))) + ret = -ENXIO; + else + ret = sprintf(buf, "%s\n", sw_state_names[priv->sw_state]); + + mutex_unlock(&priv->lock); + return ret; +} +/* + * RO: Do not allow setting the sw_state through sysfs, this MUST be done + * through the drm_properties on the drm_connector. + */ +static DEVICE_ATTR_RO(sw_state); + +static ssize_t hw_state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct drm_privacy_screen *priv = to_drm_privacy_screen(dev); + const char * const hw_state_names[] = { + "Disabled", + "Enabled", + "Disabled, locked", + "Enabled, locked", + }; + ssize_t ret; + + mutex_lock(&priv->lock); + + if (!priv->ops) + ret = -ENODEV; + else if (WARN_ON(priv->hw_state >= ARRAY_SIZE(hw_state_names))) + ret = -ENXIO; + else + ret = sprintf(buf, "%s\n", hw_state_names[priv->hw_state]); + + mutex_unlock(&priv->lock); + return ret; +} +static DEVICE_ATTR_RO(hw_state); + +static struct attribute *drm_privacy_screen_attrs[] = { + &dev_attr_sw_state.attr, + &dev_attr_hw_state.attr, + NULL +}; +ATTRIBUTE_GROUPS(drm_privacy_screen); + +static struct device_type drm_privacy_screen_type = { + .name = "privacy_screen", + .groups = drm_privacy_screen_groups, +}; + +static void drm_privacy_screen_device_release(struct device *dev) +{ + struct drm_privacy_screen *priv = to_drm_privacy_screen(dev); + + kfree(priv); +} + +/** + * drm_privacy_screen_register - register a privacy-screen + * @parent: parent-device for the privacy-screen + * @ops: &struct drm_privacy_screen_ops pointer with ops for the privacy-screen + * + * Create and register a privacy-screen. + * + * Return: + * * A pointer to the created privacy-screen on success. + * * An ERR_PTR(errno) on failure. + */ +struct drm_privacy_screen *drm_privacy_screen_register( + struct device *parent, const struct drm_privacy_screen_ops *ops) +{ + struct drm_privacy_screen *priv; + int ret; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return ERR_PTR(-ENOMEM); + + mutex_init(&priv->lock); + BLOCKING_INIT_NOTIFIER_HEAD(&priv->notifier_head); + + priv->dev.class = drm_class; + priv->dev.type = &drm_privacy_screen_type; + priv->dev.parent = parent; + priv->dev.release = drm_privacy_screen_device_release; + dev_set_name(&priv->dev, "privacy_screen-%s", dev_name(parent)); + priv->ops = ops; + + priv->ops->get_hw_state(priv); + + ret = device_register(&priv->dev); + if (ret) { + put_device(&priv->dev); + return ERR_PTR(ret); + } + + mutex_lock(&drm_privacy_screen_devs_lock); + list_add(&priv->list, &drm_privacy_screen_devs); + mutex_unlock(&drm_privacy_screen_devs_lock); + + return priv; +} +EXPORT_SYMBOL(drm_privacy_screen_register); + +/** + * drm_privacy_screen_unregister - unregister privacy-screen + * @priv: privacy-screen to unregister + * + * Unregister a privacy-screen registered with drm_privacy_screen_register(). + * May be called with a NULL or ERR_PTR, in which case it is a no-op. + */ +void drm_privacy_screen_unregister(struct drm_privacy_screen *priv) +{ + if (IS_ERR_OR_NULL(priv)) + return; + + mutex_lock(&drm_privacy_screen_devs_lock); + list_del(&priv->list); + mutex_unlock(&drm_privacy_screen_devs_lock); + + mutex_lock(&priv->lock); + priv->ops = NULL; + mutex_unlock(&priv->lock); + + device_unregister(&priv->dev); +} +EXPORT_SYMBOL(drm_privacy_screen_unregister); + +/** + * drm_privacy_screen_call_notifier_chain - notify consumers of state change + * @priv: Privacy screen to register the notifier with + * + * A privacy-screen provider driver can call this functions upon external + * changes to the privacy-screen state. E.g. the state may be changed by the + * hardware itself in response to a hotkey press. + * This function must be called without holding the privacy-screen lock. + * the driver must update sw_state and hw_state to reflect the new state before + * calling this function. + * The expected behavior from the driver upon receiving an external state + * change event is: 1. Take the lock; 2. Update sw_state and hw_state; + * 3. Release the lock. 4. Call drm_privacy_screen_call_notifier_chain(). + */ +void drm_privacy_screen_call_notifier_chain(struct drm_privacy_screen *priv) +{ + blocking_notifier_call_chain(&priv->notifier_head, 0, priv); +} +EXPORT_SYMBOL(drm_privacy_screen_call_notifier_chain); diff --git a/drivers/gpu/drm/drm_privacy_screen_x86.c b/drivers/gpu/drm/drm_privacy_screen_x86.c new file mode 100644 index 000000000000..a2cafb294ca6 --- /dev/null +++ b/drivers/gpu/drm/drm_privacy_screen_x86.c @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright (C) 2020 Red Hat, Inc. + * + * Authors: + * Hans de Goede <hdegoede@redhat.com> + */ + +#include <linux/acpi.h> +#include <drm/drm_privacy_screen_machine.h> + +#ifdef CONFIG_X86 +static struct drm_privacy_screen_lookup arch_lookup; + +struct arch_init_data { + struct drm_privacy_screen_lookup lookup; + bool (*detect)(void); +}; + +#if IS_ENABLED(CONFIG_THINKPAD_ACPI) +static acpi_status __init acpi_set_handle(acpi_handle handle, u32 level, + void *context, void **return_value) +{ + *(acpi_handle *)return_value = handle; + return AE_CTRL_TERMINATE; +} + +static bool __init detect_thinkpad_privacy_screen(void) +{ + union acpi_object obj = { .type = ACPI_TYPE_INTEGER }; + struct acpi_object_list args = { .count = 1, .pointer = &obj, }; + acpi_handle ec_handle = NULL; + unsigned long long output; + acpi_status status; + + /* Get embedded-controller handle */ + status = acpi_get_devices("PNP0C09", acpi_set_handle, NULL, &ec_handle); + if (ACPI_FAILURE(status) || !ec_handle) + return false; + + /* And call the privacy-screen get-status method */ + status = acpi_evaluate_integer(ec_handle, "HKEY.GSSS", &args, &output); + if (ACPI_FAILURE(status)) + return false; + + return (output & 0x10000) ? true : false; +} +#endif + +static const struct arch_init_data arch_init_data[] __initconst = { +#if IS_ENABLED(CONFIG_THINKPAD_ACPI) + { + .lookup = { + .dev_id = NULL, + .con_id = NULL, + .provider = "privacy_screen-thinkpad_acpi", + }, + .detect = detect_thinkpad_privacy_screen, + }, +#endif +}; + +void __init drm_privacy_screen_lookup_init(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(arch_init_data); i++) { + if (!arch_init_data[i].detect()) + continue; + + pr_info("Found '%s' privacy-screen provider\n", + arch_init_data[i].lookup.provider); + + /* Make a copy because arch_init_data is __initconst */ + arch_lookup = arch_init_data[i].lookup; + drm_privacy_screen_lookup_add(&arch_lookup); + break; + } +} + +void drm_privacy_screen_lookup_exit(void) +{ + if (arch_lookup.provider) + drm_privacy_screen_lookup_remove(&arch_lookup); +} +#endif /* ifdef CONFIG_X86 */ diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index 61d5c57f23e1..682359512996 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -604,6 +604,9 @@ EXPORT_SYMBOL(drm_helper_probe_single_connector_modes); * * This function must be called from process context with no mode * setting locks held. + * + * If only a single connector has changed, consider calling + * drm_kms_helper_connector_hotplug_event() instead. */ void drm_kms_helper_hotplug_event(struct drm_device *dev) { @@ -616,6 +619,26 @@ void drm_kms_helper_hotplug_event(struct drm_device *dev) } EXPORT_SYMBOL(drm_kms_helper_hotplug_event); +/** + * drm_kms_helper_connector_hotplug_event - fire off a KMS connector hotplug event + * @connector: drm_connector which has changed + * + * This is the same as drm_kms_helper_hotplug_event(), except it fires a more + * fine-grained uevent for a single connector. + */ +void drm_kms_helper_connector_hotplug_event(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + + /* send a uevent + call fbdev */ + drm_sysfs_connector_hotplug_event(connector); + if (dev->mode_config.funcs->output_poll_changed) + dev->mode_config.funcs->output_poll_changed(dev); + + drm_client_dev_hotplug(dev); +} +EXPORT_SYMBOL(drm_kms_helper_connector_hotplug_event); + static void output_poll_execute(struct work_struct *work) { struct delayed_work *delayed_work = to_delayed_work(work); @@ -865,7 +888,7 @@ bool drm_connector_helper_hpd_irq_event(struct drm_connector *connector) mutex_unlock(&dev->mode_config.mutex); if (changed) { - drm_kms_helper_hotplug_event(dev); + drm_kms_helper_connector_hotplug_event(connector); drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Sent hotplug event\n", connector->base.id, connector->name); @@ -904,9 +927,9 @@ EXPORT_SYMBOL(drm_connector_helper_hpd_irq_event); */ bool drm_helper_hpd_irq_event(struct drm_device *dev) { - struct drm_connector *connector; + struct drm_connector *connector, *first_changed_connector = NULL; struct drm_connector_list_iter conn_iter; - bool changed = false; + int changed = 0; if (!dev->mode_config.poll_enabled) return false; @@ -918,16 +941,25 @@ bool drm_helper_hpd_irq_event(struct drm_device *dev) if (!(connector->polled & DRM_CONNECTOR_POLL_HPD)) continue; - if (check_connector_changed(connector)) - changed = true; + if (check_connector_changed(connector)) { + if (!first_changed_connector) { + drm_connector_get(connector); + first_changed_connector = connector; + } + + changed++; + } } drm_connector_list_iter_end(&conn_iter); mutex_unlock(&dev->mode_config.mutex); - if (changed) { + if (changed == 1) + drm_kms_helper_connector_hotplug_event(first_changed_connector); + else if (changed > 0) drm_kms_helper_hotplug_event(dev); - DRM_DEBUG_KMS("Sent hotplug event\n"); - } + + if (first_changed_connector) + drm_connector_put(first_changed_connector); return changed; } diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index c9a9d74f338c..c313a5b4549c 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -404,8 +404,17 @@ int drm_syncobj_find_fence(struct drm_file *file_private, if (*fence) { ret = dma_fence_chain_find_seqno(fence, point); - if (!ret) + if (!ret) { + /* If the requested seqno is already signaled + * drm_syncobj_find_fence may return a NULL + * fence. To make sure the recipient gets + * signalled, use a new fence instead. + */ + if (!*fence) + *fence = dma_fence_get_stub(); + goto out; + } dma_fence_put(*fence); } else { ret = -EINVAL; diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index 76ff6ec3421b..430e00b16eec 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c @@ -410,6 +410,31 @@ void drm_sysfs_hotplug_event(struct drm_device *dev) EXPORT_SYMBOL(drm_sysfs_hotplug_event); /** + * drm_sysfs_connector_hotplug_event - generate a DRM uevent for any connector + * change + * @connector: connector which has changed + * + * Send a uevent for the DRM connector specified by @connector. This will send + * a uevent with the properties HOTPLUG=1 and CONNECTOR. + */ +void drm_sysfs_connector_hotplug_event(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + char hotplug_str[] = "HOTPLUG=1", conn_id[21]; + char *envp[] = { hotplug_str, conn_id, NULL }; + + snprintf(conn_id, sizeof(conn_id), + "CONNECTOR=%u", connector->base.id); + + drm_dbg_kms(connector->dev, + "[CONNECTOR:%d:%s] generating connector hotplug event\n", + connector->base.id, connector->name); + + kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp); +} +EXPORT_SYMBOL(drm_sysfs_connector_hotplug_event); + +/** * drm_sysfs_connector_status_event - generate a DRM uevent for connector * property status change * @connector: connector on which property status changed diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index f0b2540e60e4..d5314aa28ff7 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -424,45 +424,24 @@ int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj, } #ifdef CONFIG_DEBUG_FS -static void etnaviv_gem_describe_fence(struct dma_fence *fence, - const char *type, struct seq_file *m) -{ - if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) - seq_printf(m, "\t%9s: %s %s seq %llu\n", - type, - fence->ops->get_driver_name(fence), - fence->ops->get_timeline_name(fence), - fence->seqno); -} - static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m) { struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); struct dma_resv *robj = obj->resv; - struct dma_resv_list *fobj; - struct dma_fence *fence; unsigned long off = drm_vma_node_start(&obj->vma_node); + int r; seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n", etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I', obj->name, kref_read(&obj->refcount), off, etnaviv_obj->vaddr, obj->size); - rcu_read_lock(); - fobj = dma_resv_shared_list(robj); - if (fobj) { - unsigned int i, shared_count = fobj->shared_count; - - for (i = 0; i < shared_count; i++) { - fence = rcu_dereference(fobj->shared[i]); - etnaviv_gem_describe_fence(fence, "Shared", m); - } - } + r = dma_resv_lock(robj, NULL); + if (r) + return; - fence = dma_resv_excl_fence(robj); - if (fence) - etnaviv_gem_describe_fence(fence, "Exclusive", m); - rcu_read_unlock(); + dma_resv_describe(robj, m); + dma_resv_unlock(robj); } void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv, diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index 225fa5879ebd..b03c20c14ca1 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c @@ -189,13 +189,13 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit) continue; if (bo->flags & ETNA_SUBMIT_BO_WRITE) { - ret = dma_resv_get_fences(robj, &bo->excl, + ret = dma_resv_get_fences(robj, NULL, &bo->nr_shared, &bo->shared); if (ret) return ret; } else { - bo->excl = dma_resv_get_excl_unlocked(robj); + bo->excl = dma_fence_get(dma_resv_excl_fence(robj)); } } diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index d8f1cf4d6b69..9743b6b17447 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -102,16 +102,7 @@ static const struct drm_ioctl_desc exynos_ioctls[] = { DRM_RENDER_ALLOW), }; -static const struct file_operations exynos_drm_driver_fops = { - .owner = THIS_MODULE, - .open = drm_open, - .mmap = exynos_drm_gem_mmap, - .poll = drm_poll, - .read = drm_read, - .unlocked_ioctl = drm_ioctl, - .compat_ioctl = drm_compat_ioctl, - .release = drm_release, -}; +DEFINE_DRM_GEM_FOPS(exynos_drm_driver_fops); static const struct drm_driver exynos_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM @@ -124,7 +115,7 @@ static const struct drm_driver exynos_drm_driver = { .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import = exynos_drm_gem_prime_import, .gem_prime_import_sg_table = exynos_drm_gem_prime_import_sg_table, - .gem_prime_mmap = exynos_drm_gem_prime_mmap, + .gem_prime_mmap = drm_gem_prime_mmap, .ioctls = exynos_ioctls, .num_ioctls = ARRAY_SIZE(exynos_ioctls), .fops = &exynos_drm_driver_fops, diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index 8d137857818c..32a36572b894 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -13,7 +13,6 @@ #include <linux/gpio/consumer.h> #include <linux/irq.h> #include <linux/of_device.h> -#include <linux/of_gpio.h> #include <linux/of_graph.h> #include <linux/phy/phy.h> #include <linux/regulator/consumer.h> @@ -265,7 +264,7 @@ struct exynos_dsi { struct clk **clks; struct regulator_bulk_data supplies[2]; int irq; - int te_gpio; + struct gpio_desc *te_gpio; u32 pll_clk_rate; u32 burst_clk_rate; @@ -1298,14 +1297,14 @@ static void exynos_dsi_enable_irq(struct exynos_dsi *dsi) { enable_irq(dsi->irq); - if (gpio_is_valid(dsi->te_gpio)) - enable_irq(gpio_to_irq(dsi->te_gpio)); + if (dsi->te_gpio) + enable_irq(gpiod_to_irq(dsi->te_gpio)); } static void exynos_dsi_disable_irq(struct exynos_dsi *dsi) { - if (gpio_is_valid(dsi->te_gpio)) - disable_irq(gpio_to_irq(dsi->te_gpio)); + if (dsi->te_gpio) + disable_irq(gpiod_to_irq(dsi->te_gpio)); disable_irq(dsi->irq); } @@ -1335,42 +1334,31 @@ static int exynos_dsi_register_te_irq(struct exynos_dsi *dsi, int ret; int te_gpio_irq; - dsi->te_gpio = of_get_named_gpio(panel->of_node, "te-gpios", 0); - if (dsi->te_gpio == -ENOENT) - return 0; - - if (!gpio_is_valid(dsi->te_gpio)) { - ret = dsi->te_gpio; - dev_err(dsi->dev, "cannot get te-gpios, %d\n", ret); - goto out; - } - - ret = gpio_request(dsi->te_gpio, "te_gpio"); - if (ret) { - dev_err(dsi->dev, "gpio request failed with %d\n", ret); - goto out; + dsi->te_gpio = devm_gpiod_get_optional(dsi->dev, "te", GPIOD_IN); + if (IS_ERR(dsi->te_gpio)) { + dev_err(dsi->dev, "gpio request failed with %ld\n", + PTR_ERR(dsi->te_gpio)); + return PTR_ERR(dsi->te_gpio); } - te_gpio_irq = gpio_to_irq(dsi->te_gpio); + te_gpio_irq = gpiod_to_irq(dsi->te_gpio); ret = request_threaded_irq(te_gpio_irq, exynos_dsi_te_irq_handler, NULL, IRQF_TRIGGER_RISING | IRQF_NO_AUTOEN, "TE", dsi); if (ret) { dev_err(dsi->dev, "request interrupt failed with %d\n", ret); - gpio_free(dsi->te_gpio); - goto out; + gpiod_put(dsi->te_gpio); + return ret; } -out: - return ret; + return 0; } static void exynos_dsi_unregister_te_irq(struct exynos_dsi *dsi) { - if (gpio_is_valid(dsi->te_gpio)) { - free_irq(gpio_to_irq(dsi->te_gpio), dsi); - gpio_free(dsi->te_gpio); - dsi->te_gpio = -ENOENT; + if (dsi->te_gpio) { + free_irq(gpiod_to_irq(dsi->te_gpio), dsi); + gpiod_put(dsi->te_gpio); } } @@ -1745,9 +1733,6 @@ static int exynos_dsi_probe(struct platform_device *pdev) if (!dsi) return -ENOMEM; - /* To be checked as invalid one */ - dsi->te_gpio = -ENOENT; - init_completion(&dsi->completed); spin_lock_init(&dsi->transfer_lock); INIT_LIST_HEAD(&dsi->transfer_list); diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c index 5147f5929be7..02c97b9ca926 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c @@ -15,6 +15,7 @@ #include <drm/drm_crtc.h> #include <drm/drm_fb_helper.h> #include <drm/drm_fourcc.h> +#include <drm/drm_prime.h> #include <drm/drm_probe_helper.h> #include <drm/exynos_drm.h> @@ -39,25 +40,8 @@ static int exynos_drm_fb_mmap(struct fb_info *info, struct drm_fb_helper *helper = info->par; struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper); struct exynos_drm_gem *exynos_gem = exynos_fbd->exynos_gem; - unsigned long vm_size; - int ret; - - vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; - - vm_size = vma->vm_end - vma->vm_start; - - if (vm_size > exynos_gem->size) - return -EINVAL; - ret = dma_mmap_attrs(to_dma_dev(helper->dev), vma, exynos_gem->cookie, - exynos_gem->dma_addr, exynos_gem->size, - exynos_gem->dma_attrs); - if (ret < 0) { - DRM_DEV_ERROR(to_dma_dev(helper->dev), "failed to mmap.\n"); - return ret; - } - - return 0; + return drm_gem_prime_mmap(&exynos_gem->base, vma); } static const struct fb_ops exynos_drm_fb_ops = { diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index ecfd82d0afb7..023f54ee61a8 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c @@ -782,8 +782,8 @@ static int fimc_set_prescaler(struct fimc_context *ctx, struct fimc_scaler *sc, sc->hratio = (src_w << 14) / (dst_w << hfactor); sc->vratio = (src_h << 14) / (dst_h << vfactor); - sc->up_h = (dst_w >= src_w) ? true : false; - sc->up_v = (dst_h >= src_h) ? true : false; + sc->up_h = (dst_w >= src_w); + sc->up_v = (dst_h >= src_h); DRM_DEV_DEBUG_KMS(ctx->dev, "hratio[%d]vratio[%d]up_h[%d]up_v[%d]\n", sc->hratio, sc->vratio, sc->up_h, sc->up_v); diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index 0a0c042a3155..3e493f48e0d4 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -20,6 +20,8 @@ MODULE_IMPORT_NS(DMA_BUF); +static int exynos_drm_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); + static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem, bool kvmap) { struct drm_device *dev = exynos_gem->base.dev; @@ -138,6 +140,7 @@ static const struct vm_operations_struct exynos_drm_gem_vm_ops = { static const struct drm_gem_object_funcs exynos_drm_gem_object_funcs = { .free = exynos_drm_gem_free_object, .get_sg_table = exynos_drm_gem_prime_get_sg_table, + .mmap = exynos_drm_gem_mmap, .vm_ops = &exynos_drm_gem_vm_ops, }; @@ -357,12 +360,16 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv, return 0; } -static int exynos_drm_gem_mmap_obj(struct drm_gem_object *obj, - struct vm_area_struct *vma) +static int exynos_drm_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) { struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj); int ret; + if (obj->import_attach) + return dma_buf_mmap(obj->dma_buf, vma, 0); + + vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; + DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "flags = 0x%x\n", exynos_gem->flags); @@ -388,26 +395,6 @@ err_close_vm: return ret; } -int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) -{ - struct drm_gem_object *obj; - int ret; - - /* set vm_area_struct. */ - ret = drm_gem_mmap(filp, vma); - if (ret < 0) { - DRM_ERROR("failed to mmap.\n"); - return ret; - } - - obj = vma->vm_private_data; - - if (obj->import_attach) - return dma_buf_mmap(obj->dma_buf, vma, 0); - - return exynos_drm_gem_mmap_obj(obj, vma); -} - /* low-level interface prime helpers */ struct drm_gem_object *exynos_drm_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf) @@ -469,15 +456,3 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev, exynos_gem->sgt = sgt; return &exynos_gem->base; } - -int exynos_drm_gem_prime_mmap(struct drm_gem_object *obj, - struct vm_area_struct *vma) -{ - int ret; - - ret = drm_gem_mmap_obj(obj, obj->size, vma); - if (ret < 0) - return ret; - - return exynos_drm_gem_mmap_obj(obj, vma); -} diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h index a23272fb96fb..79d7e1a87419 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.h +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h @@ -96,9 +96,6 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); -/* set vm_flags and we can change the vm attribute to other one at here. */ -int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); - /* low-level interface prime helpers */ struct drm_gem_object *exynos_drm_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf); @@ -107,7 +104,5 @@ struct drm_gem_object * exynos_drm_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sgt); -int exynos_drm_gem_prime_mmap(struct drm_gem_object *obj, - struct vm_area_struct *vma); #endif diff --git a/drivers/gpu/drm/fsl-dcu/Kconfig b/drivers/gpu/drm/fsl-dcu/Kconfig index d7dd8ba90e3a..e95e96c565ba 100644 --- a/drivers/gpu/drm/fsl-dcu/Kconfig +++ b/drivers/gpu/drm/fsl-dcu/Kconfig @@ -3,8 +3,8 @@ config DRM_FSL_DCU tristate "DRM Support for Freescale DCU" depends on DRM && OF && ARM && COMMON_CLK select BACKLIGHT_CLASS_DEVICE + select DRM_GEM_CMA_HELPER select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER select DRM_PANEL select REGMAP_MMIO select VIDEOMODE_HELPERS diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c index 321e416489a9..45df9de22007 100644 --- a/drivers/gpu/drm/gma500/framebuffer.c +++ b/drivers/gpu/drm/gma500/framebuffer.c @@ -25,7 +25,6 @@ #include "framebuffer.h" #include "gem.h" -#include "gtt.h" #include "psb_drv.h" #include "psb_intel_drv.h" #include "psb_intel_reg.h" @@ -82,14 +81,13 @@ static vm_fault_t psbfb_vm_fault(struct vm_fault *vmf) struct drm_framebuffer *fb = vma->vm_private_data; struct drm_device *dev = fb->dev; struct drm_psb_private *dev_priv = to_drm_psb_private(dev); - struct gtt_range *gtt = to_gtt_range(fb->obj[0]); + struct psb_gem_object *pobj = to_psb_gem_object(fb->obj[0]); int page_num; int i; unsigned long address; vm_fault_t ret = VM_FAULT_SIGBUS; unsigned long pfn; - unsigned long phys_addr = (unsigned long)dev_priv->stolen_base + - gtt->offset; + unsigned long phys_addr = (unsigned long)dev_priv->stolen_base + pobj->offset; page_num = vma_pages(vma); address = vmf->address - (vmf->pgoff << PAGE_SHIFT); @@ -226,31 +224,6 @@ static struct drm_framebuffer *psb_framebuffer_create } /** - * psbfb_alloc - allocate frame buffer memory - * @dev: the DRM device - * @aligned_size: space needed - * - * Allocate the frame buffer. In the usual case we get a GTT range that - * is stolen memory backed and life is simple. If there isn't sufficient - * we fail as we don't have the virtual mapping space to really vmap it - * and the kernel console code can't handle non linear framebuffers. - * - * Re-address this as and if the framebuffer layer grows this ability. - */ -static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size) -{ - struct gtt_range *backing; - /* Begin by trying to use stolen memory backing */ - backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1, PAGE_SIZE); - if (backing) { - backing->gem.funcs = &psb_gem_object_funcs; - drm_gem_private_object_init(dev, &backing->gem, aligned_size); - return backing; - } - return NULL; -} - -/** * psbfb_create - create a framebuffer * @fb_helper: the framebuffer helper * @sizes: specification of the layout @@ -268,7 +241,8 @@ static int psbfb_create(struct drm_fb_helper *fb_helper, struct drm_mode_fb_cmd2 mode_cmd; int size; int ret; - struct gtt_range *backing; + struct psb_gem_object *backing; + struct drm_gem_object *obj; u32 bpp, depth; mode_cmd.width = sizes->surface_width; @@ -286,24 +260,25 @@ static int psbfb_create(struct drm_fb_helper *fb_helper, size = ALIGN(size, PAGE_SIZE); /* Allocate the framebuffer in the GTT with stolen page backing */ - backing = psbfb_alloc(dev, size); - if (backing == NULL) - return -ENOMEM; + backing = psb_gem_create(dev, size, "fb", true, PAGE_SIZE); + if (IS_ERR(backing)) + return PTR_ERR(backing); + obj = &backing->base; memset(dev_priv->vram_addr + backing->offset, 0, size); info = drm_fb_helper_alloc_fbi(fb_helper); if (IS_ERR(info)) { ret = PTR_ERR(info); - goto out; + goto err_drm_gem_object_put; } mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth); - fb = psb_framebuffer_create(dev, &mode_cmd, &backing->gem); + fb = psb_framebuffer_create(dev, &mode_cmd, obj); if (IS_ERR(fb)) { ret = PTR_ERR(fb); - goto out; + goto err_drm_gem_object_put; } fb_helper->fb = fb; @@ -334,8 +309,9 @@ static int psbfb_create(struct drm_fb_helper *fb_helper, dev_dbg(dev->dev, "allocated %dx%d fb\n", fb->width, fb->height); return 0; -out: - psb_gtt_free_range(dev, backing); + +err_drm_gem_object_put: + drm_gem_object_put(obj); return ret; } diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c index 5ae54c9d2819..8d65af80bb08 100644 --- a/drivers/gpu/drm/gma500/gem.c +++ b/drivers/gpu/drm/gma500/gem.c @@ -13,24 +13,105 @@ #include <linux/pagemap.h> +#include <asm/set_memory.h> + #include <drm/drm.h> #include <drm/drm_vma_manager.h> #include "gem.h" #include "psb_drv.h" +int psb_gem_pin(struct psb_gem_object *pobj) +{ + struct drm_gem_object *obj = &pobj->base; + struct drm_device *dev = obj->dev; + struct drm_psb_private *dev_priv = to_drm_psb_private(dev); + u32 gpu_base = dev_priv->gtt.gatt_start; + struct page **pages; + unsigned int npages; + int ret; + + mutex_lock(&dev_priv->gtt_mutex); + + if (pobj->in_gart || pobj->stolen) + goto out; /* already mapped */ + + pages = drm_gem_get_pages(obj); + if (IS_ERR(pages)) { + ret = PTR_ERR(pages); + goto err_mutex_unlock; + } + + npages = obj->size / PAGE_SIZE; + + set_pages_array_wc(pages, npages); + + psb_gtt_insert_pages(dev_priv, &pobj->resource, pages); + psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu), pages, + (gpu_base + pobj->offset), npages, 0, 0, + PSB_MMU_CACHED_MEMORY); + + pobj->npage = npages; + pobj->pages = pages; + +out: + ++pobj->in_gart; + mutex_unlock(&dev_priv->gtt_mutex); + + return 0; + +err_mutex_unlock: + mutex_unlock(&dev_priv->gtt_mutex); + return ret; +} + +void psb_gem_unpin(struct psb_gem_object *pobj) +{ + struct drm_gem_object *obj = &pobj->base; + struct drm_device *dev = obj->dev; + struct drm_psb_private *dev_priv = to_drm_psb_private(dev); + u32 gpu_base = dev_priv->gtt.gatt_start; + + mutex_lock(&dev_priv->gtt_mutex); + + WARN_ON(!pobj->in_gart); + + --pobj->in_gart; + + if (pobj->in_gart || pobj->stolen) + goto out; + + psb_mmu_remove_pages(psb_mmu_get_default_pd(dev_priv->mmu), + (gpu_base + pobj->offset), pobj->npage, 0, 0); + psb_gtt_remove_pages(dev_priv, &pobj->resource); + + /* Reset caching flags */ + set_pages_array_wb(pobj->pages, pobj->npage); + + drm_gem_put_pages(obj, pobj->pages, true, false); + pobj->pages = NULL; + pobj->npage = 0; + +out: + mutex_unlock(&dev_priv->gtt_mutex); +} + static vm_fault_t psb_gem_fault(struct vm_fault *vmf); static void psb_gem_free_object(struct drm_gem_object *obj) { - struct gtt_range *gtt = container_of(obj, struct gtt_range, gem); + struct psb_gem_object *pobj = to_psb_gem_object(obj); - /* Remove the list map if one is present */ - drm_gem_free_mmap_offset(obj); drm_gem_object_release(obj); - /* This must occur last as it frees up the memory of the GEM object */ - psb_gtt_free_range(obj->dev, gtt); + /* Undo the mmap pin if we are destroying the object */ + if (pobj->mmapping) + psb_gem_unpin(pobj); + + WARN_ON(pobj->in_gart && !pobj->stolen); + + release_resource(&pobj->resource); + kfree(pobj); } static const struct vm_operations_struct psb_gem_vm_ops = { @@ -39,63 +120,60 @@ static const struct vm_operations_struct psb_gem_vm_ops = { .close = drm_gem_vm_close, }; -const struct drm_gem_object_funcs psb_gem_object_funcs = { +static const struct drm_gem_object_funcs psb_gem_object_funcs = { .free = psb_gem_free_object, .vm_ops = &psb_gem_vm_ops, }; -/** - * psb_gem_create - create a mappable object - * @file: the DRM file of the client - * @dev: our device - * @size: the size requested - * @handlep: returned handle (opaque number) - * @stolen: unused - * @align: unused - * - * Create a GEM object, fill in the boilerplate and attach a handle to - * it so that userspace can speak about it. This does the core work - * for the various methods that do/will create GEM objects for things - */ -int psb_gem_create(struct drm_file *file, struct drm_device *dev, u64 size, - u32 *handlep, int stolen, u32 align) +struct psb_gem_object * +psb_gem_create(struct drm_device *dev, u64 size, const char *name, bool stolen, u32 align) { - struct gtt_range *r; + struct drm_psb_private *dev_priv = to_drm_psb_private(dev); + struct psb_gem_object *pobj; + struct drm_gem_object *obj; int ret; - u32 handle; size = roundup(size, PAGE_SIZE); - /* Allocate our object - for now a direct gtt range which is not - stolen memory backed */ - r = psb_gtt_alloc_range(dev, size, "gem", 0, PAGE_SIZE); - if (r == NULL) { - dev_err(dev->dev, "no memory for %lld byte GEM object\n", size); - return -ENOSPC; - } - r->gem.funcs = &psb_gem_object_funcs; - /* Initialize the extra goodies GEM needs to do all the hard work */ - if (drm_gem_object_init(dev, &r->gem, size) != 0) { - psb_gtt_free_range(dev, r); - /* GEM doesn't give an error code so use -ENOMEM */ - dev_err(dev->dev, "GEM init failed for %lld\n", size); - return -ENOMEM; + pobj = kzalloc(sizeof(*pobj), GFP_KERNEL); + if (!pobj) + return ERR_PTR(-ENOMEM); + obj = &pobj->base; + + /* GTT resource */ + + ret = psb_gtt_allocate_resource(dev_priv, &pobj->resource, name, size, align, stolen, + &pobj->offset); + if (ret) + goto err_kfree; + + if (stolen) { + pobj->stolen = true; + pobj->in_gart = 1; } - /* Limit the object to 32bit mappings */ - mapping_set_gfp_mask(r->gem.filp->f_mapping, GFP_KERNEL | __GFP_DMA32); - /* Give the object a handle so we can carry it more easily */ - ret = drm_gem_handle_create(file, &r->gem, &handle); - if (ret) { - dev_err(dev->dev, "GEM handle failed for %p, %lld\n", - &r->gem, size); - drm_gem_object_release(&r->gem); - psb_gtt_free_range(dev, r); - return ret; + + /* GEM object */ + + obj->funcs = &psb_gem_object_funcs; + + if (stolen) { + drm_gem_private_object_init(dev, obj, size); + } else { + ret = drm_gem_object_init(dev, obj, size); + if (ret) + goto err_release_resource; + + /* Limit the object to 32-bit mappings */ + mapping_set_gfp_mask(obj->filp->f_mapping, GFP_KERNEL | __GFP_DMA32); } - /* We have the initial and handle reference but need only one now */ - drm_gem_object_put(&r->gem); - *handlep = handle; - return 0; + + return pobj; + +err_release_resource: + release_resource(&pobj->resource); +err_kfree: + kfree(pobj); + return ERR_PTR(ret); } /** @@ -111,10 +189,40 @@ int psb_gem_create(struct drm_file *file, struct drm_device *dev, u64 size, int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args) { - args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64); - args->size = args->pitch * args->height; - return psb_gem_create(file, dev, args->size, &args->handle, 0, - PAGE_SIZE); + size_t pitch, size; + struct psb_gem_object *pobj; + struct drm_gem_object *obj; + u32 handle; + int ret; + + pitch = args->width * DIV_ROUND_UP(args->bpp, 8); + pitch = ALIGN(pitch, 64); + + size = pitch * args->height; + size = roundup(size, PAGE_SIZE); + if (!size) + return -EINVAL; + + pobj = psb_gem_create(dev, size, "gem", false, PAGE_SIZE); + if (IS_ERR(pobj)) + return PTR_ERR(pobj); + obj = &pobj->base; + + ret = drm_gem_handle_create(file, obj, &handle); + if (ret) + goto err_drm_gem_object_put; + + drm_gem_object_put(obj); + + args->pitch = pitch; + args->size = size; + args->handle = handle; + + return 0; + +err_drm_gem_object_put: + drm_gem_object_put(obj); + return ret; } /** @@ -137,7 +245,7 @@ static vm_fault_t psb_gem_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct drm_gem_object *obj; - struct gtt_range *r; + struct psb_gem_object *pobj; int err; vm_fault_t ret; unsigned long pfn; @@ -149,7 +257,7 @@ static vm_fault_t psb_gem_fault(struct vm_fault *vmf) dev = obj->dev; dev_priv = to_drm_psb_private(dev); - r = container_of(obj, struct gtt_range, gem); /* Get the gtt range */ + pobj = to_psb_gem_object(obj); /* Make sure we don't parallel update on a fault, nor move or remove something from beneath our feet */ @@ -157,14 +265,14 @@ static vm_fault_t psb_gem_fault(struct vm_fault *vmf) /* For now the mmap pins the object and it stays pinned. As things stand that will do us no harm */ - if (r->mmapping == 0) { - err = psb_gtt_pin(r); + if (pobj->mmapping == 0) { + err = psb_gem_pin(pobj); if (err < 0) { dev_err(dev->dev, "gma500: pin failed: %d\n", err); ret = vmf_error(err); goto fail; } - r->mmapping = 1; + pobj->mmapping = 1; } /* Page relative to the VMA start - we must calculate this ourselves @@ -172,10 +280,10 @@ static vm_fault_t psb_gem_fault(struct vm_fault *vmf) page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; /* CPU view of the page, don't go via the GART for CPU writes */ - if (r->stolen) - pfn = (dev_priv->stolen_base + r->offset) >> PAGE_SHIFT; + if (pobj->stolen) + pfn = (dev_priv->stolen_base + pobj->offset) >> PAGE_SHIFT; else - pfn = page_to_pfn(r->pages[page_offset]); + pfn = page_to_pfn(pobj->pages[page_offset]); ret = vmf_insert_pfn(vma, vmf->address, pfn); fail: mutex_unlock(&dev_priv->mmap_mutex); diff --git a/drivers/gpu/drm/gma500/gem.h b/drivers/gpu/drm/gma500/gem.h index bae6454ead29..79cced40c87f 100644 --- a/drivers/gpu/drm/gma500/gem.h +++ b/drivers/gpu/drm/gma500/gem.h @@ -8,11 +8,33 @@ #ifndef _GEM_H #define _GEM_H +#include <linux/kernel.h> + +#include <drm/drm_gem.h> + struct drm_device; -extern const struct drm_gem_object_funcs psb_gem_object_funcs; +struct psb_gem_object { + struct drm_gem_object base; + + struct resource resource; /* GTT resource for our allocation */ + u32 offset; /* GTT offset of our object */ + int in_gart; /* Currently in the GART (ref ct) */ + bool stolen; /* Backed from stolen RAM */ + bool mmapping; /* Is mmappable */ + struct page **pages; /* Backing pages if present */ + int npage; /* Number of backing pages */ +}; + +static inline struct psb_gem_object *to_psb_gem_object(struct drm_gem_object *obj) +{ + return container_of(obj, struct psb_gem_object, base); +} + +struct psb_gem_object * +psb_gem_create(struct drm_device *dev, u64 size, const char *name, bool stolen, u32 align); -extern int psb_gem_create(struct drm_file *file, struct drm_device *dev, - u64 size, u32 *handlep, int stolen, u32 align); +int psb_gem_pin(struct psb_gem_object *pobj); +void psb_gem_unpin(struct psb_gem_object *pobj); #endif diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c index cbcecbaa041b..99da3118131a 100644 --- a/drivers/gpu/drm/gma500/gma_display.c +++ b/drivers/gpu/drm/gma500/gma_display.c @@ -15,6 +15,7 @@ #include <drm/drm_vblank.h> #include "framebuffer.h" +#include "gem.h" #include "gma_display.h" #include "psb_drv.h" #include "psb_intel_drv.h" @@ -54,7 +55,7 @@ int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_psb_private *dev_priv = to_drm_psb_private(dev); struct gma_crtc *gma_crtc = to_gma_crtc(crtc); struct drm_framebuffer *fb = crtc->primary->fb; - struct gtt_range *gtt; + struct psb_gem_object *pobj; int pipe = gma_crtc->pipe; const struct psb_offset *map = &dev_priv->regmap[pipe]; unsigned long start, offset; @@ -70,14 +71,14 @@ int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y, goto gma_pipe_cleaner; } - gtt = to_gtt_range(fb->obj[0]); + pobj = to_psb_gem_object(fb->obj[0]); /* We are displaying this buffer, make sure it is actually loaded into the GTT */ - ret = psb_gtt_pin(gtt); + ret = psb_gem_pin(pobj); if (ret < 0) goto gma_pipe_set_base_exit; - start = gtt->offset; + start = pobj->offset; offset = y * fb->pitches[0] + x * fb->format->cpp[0]; REG_WRITE(map->stride, fb->pitches[0]); @@ -125,7 +126,7 @@ int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y, gma_pipe_cleaner: /* If there was a previous display we can now unpin it */ if (old_fb) - psb_gtt_unpin(to_gtt_range(old_fb->obj[0])); + psb_gem_unpin(to_psb_gem_object(old_fb->obj[0])); gma_pipe_set_base_exit: gma_power_end(dev); @@ -331,8 +332,8 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc, uint32_t base = (pipe == 0) ? CURABASE : CURBBASE; uint32_t temp; size_t addr = 0; - struct gtt_range *gt; - struct gtt_range *cursor_gt = gma_crtc->cursor_gt; + struct psb_gem_object *pobj; + struct psb_gem_object *cursor_pobj = gma_crtc->cursor_pobj; struct drm_gem_object *obj; void *tmp_dst, *tmp_src; int ret = 0, i, cursor_pages; @@ -348,9 +349,8 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc, /* Unpin the old GEM object */ if (gma_crtc->cursor_obj) { - gt = container_of(gma_crtc->cursor_obj, - struct gtt_range, gem); - psb_gtt_unpin(gt); + pobj = to_psb_gem_object(gma_crtc->cursor_obj); + psb_gem_unpin(pobj); drm_gem_object_put(gma_crtc->cursor_obj); gma_crtc->cursor_obj = NULL; } @@ -375,40 +375,40 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc, goto unref_cursor; } - gt = container_of(obj, struct gtt_range, gem); + pobj = to_psb_gem_object(obj); /* Pin the memory into the GTT */ - ret = psb_gtt_pin(gt); + ret = psb_gem_pin(pobj); if (ret) { dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle); goto unref_cursor; } if (dev_priv->ops->cursor_needs_phys) { - if (cursor_gt == NULL) { + if (!cursor_pobj) { dev_err(dev->dev, "No hardware cursor mem available"); ret = -ENOMEM; goto unref_cursor; } /* Prevent overflow */ - if (gt->npage > 4) + if (pobj->npage > 4) cursor_pages = 4; else - cursor_pages = gt->npage; + cursor_pages = pobj->npage; /* Copy the cursor to cursor mem */ - tmp_dst = dev_priv->vram_addr + cursor_gt->offset; + tmp_dst = dev_priv->vram_addr + cursor_pobj->offset; for (i = 0; i < cursor_pages; i++) { - tmp_src = kmap(gt->pages[i]); + tmp_src = kmap(pobj->pages[i]); memcpy(tmp_dst, tmp_src, PAGE_SIZE); - kunmap(gt->pages[i]); + kunmap(pobj->pages[i]); tmp_dst += PAGE_SIZE; } addr = gma_crtc->cursor_addr; } else { - addr = gt->offset; + addr = pobj->offset; gma_crtc->cursor_addr = addr; } @@ -425,8 +425,8 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc, /* unpin the old bo */ if (gma_crtc->cursor_obj) { - gt = container_of(gma_crtc->cursor_obj, struct gtt_range, gem); - psb_gtt_unpin(gt); + pobj = to_psb_gem_object(gma_crtc->cursor_obj); + psb_gem_unpin(pobj); drm_gem_object_put(gma_crtc->cursor_obj); } @@ -483,14 +483,14 @@ void gma_crtc_commit(struct drm_crtc *crtc) void gma_crtc_disable(struct drm_crtc *crtc) { - struct gtt_range *gt; + struct psb_gem_object *pobj; const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); if (crtc->primary->fb) { - gt = to_gtt_range(crtc->primary->fb->obj[0]); - psb_gtt_unpin(gt); + pobj = to_psb_gem_object(crtc->primary->fb->obj[0]); + psb_gem_unpin(pobj); } } @@ -498,6 +498,9 @@ void gma_crtc_destroy(struct drm_crtc *crtc) { struct gma_crtc *gma_crtc = to_gma_crtc(crtc); + if (gma_crtc->cursor_pobj) + drm_gem_object_put(&gma_crtc->cursor_pobj->base); + kfree(gma_crtc->crtc_state); drm_crtc_cleanup(crtc); kfree(gma_crtc); diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c index 55a2a6919533..309ffe921bfd 100644 --- a/drivers/gpu/drm/gma500/gtt.c +++ b/drivers/gpu/drm/gma500/gtt.c @@ -7,10 +7,7 @@ * Alan Cox <alan@linux.intel.com> */ -#include <linux/shmem_fs.h> - -#include <asm/set_memory.h> - +#include "gem.h" /* TODO: for struct psb_gem_object, see psb_gtt_restore() */ #include "psb_drv.h" @@ -18,6 +15,33 @@ * GTT resource allocator - manage page mappings in GTT space */ +int psb_gtt_allocate_resource(struct drm_psb_private *pdev, struct resource *res, + const char *name, resource_size_t size, resource_size_t align, + bool stolen, u32 *offset) +{ + struct resource *root = pdev->gtt_mem; + resource_size_t start, end; + int ret; + + if (stolen) { + /* The start of the GTT is backed by stolen pages. */ + start = root->start; + end = root->start + pdev->gtt.stolen_size - 1; + } else { + /* The rest is backed by system pages. */ + start = root->start + pdev->gtt.stolen_size; + end = root->end; + } + + res->name = name; + ret = allocate_resource(root, res, size, start, end, align, NULL, NULL); + if (ret) + return ret; + *offset = res->start - root->start; + + return 0; +} + /** * psb_gtt_mask_pte - generate GTT pte entry * @pfn: page number to encode @@ -43,281 +67,62 @@ static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type) return (pfn << PAGE_SHIFT) | mask; } -/** - * psb_gtt_entry - find the GTT entries for a gtt_range - * @dev: our DRM device - * @r: our GTT range - * - * Given a gtt_range object return the GTT offset of the page table - * entries for this gtt_range - */ -static u32 __iomem *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r) +static u32 __iomem *psb_gtt_entry(struct drm_psb_private *pdev, const struct resource *res) { - struct drm_psb_private *dev_priv = to_drm_psb_private(dev); - unsigned long offset; + unsigned long offset = res->start - pdev->gtt_mem->start; - offset = r->resource.start - dev_priv->gtt_mem->start; - - return dev_priv->gtt_map + (offset >> PAGE_SHIFT); + return pdev->gtt_map + (offset >> PAGE_SHIFT); } -/** - * psb_gtt_insert - put an object into the GTT - * @dev: our DRM device - * @r: our GTT range - * @resume: on resume - * - * Take our preallocated GTT range and insert the GEM object into - * the GTT. This is protected via the gtt mutex which the caller - * must hold. +/* + * Take our preallocated GTT range and insert the GEM object into + * the GTT. This is protected via the gtt mutex which the caller + * must hold. */ -static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r, - int resume) +void psb_gtt_insert_pages(struct drm_psb_private *pdev, const struct resource *res, + struct page **pages) { + resource_size_t npages, i; u32 __iomem *gtt_slot; u32 pte; - struct page **pages; - int i; - if (r->pages == NULL) { - WARN_ON(1); - return -EINVAL; - } - - WARN_ON(r->stolen); /* refcount these maybe ? */ - - gtt_slot = psb_gtt_entry(dev, r); - pages = r->pages; + /* Write our page entries into the GTT itself */ - if (!resume) { - /* Make sure changes are visible to the GPU */ - set_pages_array_wc(pages, r->npage); - } + npages = resource_size(res) >> PAGE_SHIFT; + gtt_slot = psb_gtt_entry(pdev, res); - /* Write our page entries into the GTT itself */ - for (i = 0; i < r->npage; i++) { - pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), - PSB_MMU_CACHED_MEMORY); - iowrite32(pte, gtt_slot++); + for (i = 0; i < npages; ++i, ++gtt_slot) { + pte = psb_gtt_mask_pte(page_to_pfn(pages[i]), PSB_MMU_CACHED_MEMORY); + iowrite32(pte, gtt_slot); } /* Make sure all the entries are set before we return */ ioread32(gtt_slot - 1); - - return 0; } -/** - * psb_gtt_remove - remove an object from the GTT - * @dev: our DRM device - * @r: our GTT range - * - * Remove a preallocated GTT range from the GTT. Overwrite all the - * page table entries with the dummy page. This is protected via the gtt - * mutex which the caller must hold. +/* + * Remove a preallocated GTT range from the GTT. Overwrite all the + * page table entries with the dummy page. This is protected via the gtt + * mutex which the caller must hold. */ -static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r) +void psb_gtt_remove_pages(struct drm_psb_private *pdev, const struct resource *res) { - struct drm_psb_private *dev_priv = to_drm_psb_private(dev); + resource_size_t npages, i; u32 __iomem *gtt_slot; u32 pte; - int i; - - WARN_ON(r->stolen); - - gtt_slot = psb_gtt_entry(dev, r); - pte = psb_gtt_mask_pte(page_to_pfn(dev_priv->scratch_page), - PSB_MMU_CACHED_MEMORY); - - for (i = 0; i < r->npage; i++) - iowrite32(pte, gtt_slot++); - ioread32(gtt_slot - 1); - set_pages_array_wb(r->pages, r->npage); -} - -/** - * psb_gtt_attach_pages - attach and pin GEM pages - * @gt: the gtt range - * - * Pin and build an in kernel list of the pages that back our GEM object. - * While we hold this the pages cannot be swapped out. This is protected - * via the gtt mutex which the caller must hold. - */ -static int psb_gtt_attach_pages(struct gtt_range *gt) -{ - struct page **pages; - - WARN_ON(gt->pages); - - pages = drm_gem_get_pages(>->gem); - if (IS_ERR(pages)) - return PTR_ERR(pages); - - gt->npage = gt->gem.size / PAGE_SIZE; - gt->pages = pages; - - return 0; -} - -/** - * psb_gtt_detach_pages - attach and pin GEM pages - * @gt: the gtt range - * - * Undo the effect of psb_gtt_attach_pages. At this point the pages - * must have been removed from the GTT as they could now be paged out - * and move bus address. This is protected via the gtt mutex which the - * caller must hold. - */ -static void psb_gtt_detach_pages(struct gtt_range *gt) -{ - drm_gem_put_pages(>->gem, gt->pages, true, false); - gt->pages = NULL; -} -/** - * psb_gtt_pin - pin pages into the GTT - * @gt: range to pin - * - * Pin a set of pages into the GTT. The pins are refcounted so that - * multiple pins need multiple unpins to undo. - * - * Non GEM backed objects treat this as a no-op as they are always GTT - * backed objects. - */ -int psb_gtt_pin(struct gtt_range *gt) -{ - int ret = 0; - struct drm_device *dev = gt->gem.dev; - struct drm_psb_private *dev_priv = to_drm_psb_private(dev); - u32 gpu_base = dev_priv->gtt.gatt_start; + /* Install scratch page for the resource */ - mutex_lock(&dev_priv->gtt_mutex); + pte = psb_gtt_mask_pte(page_to_pfn(pdev->scratch_page), PSB_MMU_CACHED_MEMORY); - if (gt->in_gart == 0 && gt->stolen == 0) { - ret = psb_gtt_attach_pages(gt); - if (ret < 0) - goto out; - ret = psb_gtt_insert(dev, gt, 0); - if (ret < 0) { - psb_gtt_detach_pages(gt); - goto out; - } - psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu), - gt->pages, (gpu_base + gt->offset), - gt->npage, 0, 0, PSB_MMU_CACHED_MEMORY); - } - gt->in_gart++; -out: - mutex_unlock(&dev_priv->gtt_mutex); - return ret; -} + npages = resource_size(res) >> PAGE_SHIFT; + gtt_slot = psb_gtt_entry(pdev, res); -/** - * psb_gtt_unpin - Drop a GTT pin requirement - * @gt: range to pin - * - * Undoes the effect of psb_gtt_pin. On the last drop the GEM object - * will be removed from the GTT which will also drop the page references - * and allow the VM to clean up or page stuff. - * - * Non GEM backed objects treat this as a no-op as they are always GTT - * backed objects. - */ -void psb_gtt_unpin(struct gtt_range *gt) -{ - struct drm_device *dev = gt->gem.dev; - struct drm_psb_private *dev_priv = to_drm_psb_private(dev); - u32 gpu_base = dev_priv->gtt.gatt_start; + for (i = 0; i < npages; ++i, ++gtt_slot) + iowrite32(pte, gtt_slot); - mutex_lock(&dev_priv->gtt_mutex); - - WARN_ON(!gt->in_gart); - - gt->in_gart--; - if (gt->in_gart == 0 && gt->stolen == 0) { - psb_mmu_remove_pages(psb_mmu_get_default_pd(dev_priv->mmu), - (gpu_base + gt->offset), gt->npage, 0, 0); - psb_gtt_remove(dev, gt); - psb_gtt_detach_pages(gt); - } - - mutex_unlock(&dev_priv->gtt_mutex); -} - -/* - * GTT resource allocator - allocate and manage GTT address space - */ - -/** - * psb_gtt_alloc_range - allocate GTT address space - * @dev: Our DRM device - * @len: length (bytes) of address space required - * @name: resource name - * @backed: resource should be backed by stolen pages - * @align: requested alignment - * - * Ask the kernel core to find us a suitable range of addresses - * to use for a GTT mapping. - * - * Returns a gtt_range structure describing the object, or NULL on - * error. On successful return the resource is both allocated and marked - * as in use. - */ -struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len, - const char *name, int backed, u32 align) -{ - struct drm_psb_private *dev_priv = to_drm_psb_private(dev); - struct gtt_range *gt; - struct resource *r = dev_priv->gtt_mem; - int ret; - unsigned long start, end; - - if (backed) { - /* The start of the GTT is the stolen pages */ - start = r->start; - end = r->start + dev_priv->gtt.stolen_size - 1; - } else { - /* The rest we will use for GEM backed objects */ - start = r->start + dev_priv->gtt.stolen_size; - end = r->end; - } - - gt = kzalloc(sizeof(struct gtt_range), GFP_KERNEL); - if (gt == NULL) - return NULL; - gt->resource.name = name; - gt->stolen = backed; - gt->in_gart = backed; - /* Ensure this is set for non GEM objects */ - gt->gem.dev = dev; - ret = allocate_resource(dev_priv->gtt_mem, >->resource, - len, start, end, align, NULL, NULL); - if (ret == 0) { - gt->offset = gt->resource.start - r->start; - return gt; - } - kfree(gt); - return NULL; -} - -/** - * psb_gtt_free_range - release GTT address space - * @dev: our DRM device - * @gt: a mapping created with psb_gtt_alloc_range - * - * Release a resource that was allocated with psb_gtt_alloc_range. If the - * object has been pinned by mmap users we clean this up here currently. - */ -void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt) -{ - /* Undo the mmap pin if we are destroying the object */ - if (gt->mmapping) { - psb_gtt_unpin(gt); - gt->mmapping = 0; - } - WARN_ON(gt->in_gart && !gt->stolen); - release_resource(>->resource); - kfree(gt); + /* Make sure all the entries are set before we return */ + ioread32(gtt_slot - 1); } static void psb_gtt_alloc(struct drm_device *dev) @@ -498,7 +303,7 @@ int psb_gtt_restore(struct drm_device *dev) { struct drm_psb_private *dev_priv = to_drm_psb_private(dev); struct resource *r = dev_priv->gtt_mem->child; - struct gtt_range *range; + struct psb_gem_object *pobj; unsigned int restored = 0, total = 0, size = 0; /* On resume, the gtt_mutex is already initialized */ @@ -506,10 +311,15 @@ int psb_gtt_restore(struct drm_device *dev) psb_gtt_init(dev, 1); while (r != NULL) { - range = container_of(r, struct gtt_range, resource); - if (range->pages) { - psb_gtt_insert(dev, range, 1); - size += range->resource.end - range->resource.start; + /* + * TODO: GTT restoration needs a refactoring, so that we don't have to touch + * struct psb_gem_object here. The type represents a GEM object and is + * not related to the GTT itself. + */ + pobj = container_of(r, struct psb_gem_object, resource); + if (pobj->pages) { + psb_gtt_insert_pages(dev_priv, &pobj->resource, pobj->pages); + size += pobj->resource.end - pobj->resource.start; restored++; } r = r->sibling; diff --git a/drivers/gpu/drm/gma500/gtt.h b/drivers/gpu/drm/gma500/gtt.h index 2bf165849ebe..ff1dcdd1ff52 100644 --- a/drivers/gpu/drm/gma500/gtt.h +++ b/drivers/gpu/drm/gma500/gtt.h @@ -10,6 +10,8 @@ #include <drm/drm_gem.h> +struct drm_psb_private; + /* This wants cleaning up with respect to the psb_dev and un-needed stuff */ struct psb_gtt { uint32_t gatt_start; @@ -26,27 +28,14 @@ struct psb_gtt { /* Exported functions */ extern int psb_gtt_init(struct drm_device *dev, int resume); extern void psb_gtt_takedown(struct drm_device *dev); +extern int psb_gtt_restore(struct drm_device *dev); -/* Each gtt_range describes an allocation in the GTT area */ -struct gtt_range { - struct resource resource; /* Resource for our allocation */ - u32 offset; /* GTT offset of our object */ - struct drm_gem_object gem; /* GEM high level stuff */ - int in_gart; /* Currently in the GART (ref ct) */ - bool stolen; /* Backed from stolen RAM */ - bool mmapping; /* Is mmappable */ - struct page **pages; /* Backing pages if present */ - int npage; /* Number of backing pages */ -}; +int psb_gtt_allocate_resource(struct drm_psb_private *pdev, struct resource *res, + const char *name, resource_size_t size, resource_size_t align, + bool stolen, u32 *offset); -#define to_gtt_range(x) container_of(x, struct gtt_range, gem) +void psb_gtt_insert_pages(struct drm_psb_private *pdev, const struct resource *res, + struct page **pages); +void psb_gtt_remove_pages(struct drm_psb_private *pdev, const struct resource *res); -extern struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len, - const char *name, int backed, - u32 align); -extern void psb_gtt_kref_put(struct gtt_range *gt); -extern void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt); -extern int psb_gtt_pin(struct gtt_range *gt); -extern void psb_gtt_unpin(struct gtt_range *gt); -extern int psb_gtt_restore(struct drm_device *dev); #endif diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c index c6b115954b7d..36c7c2686c90 100644 --- a/drivers/gpu/drm/gma500/oaktrail_crtc.c +++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c @@ -10,6 +10,7 @@ #include <drm/drm_fourcc.h> #include "framebuffer.h" +#include "gem.h" #include "gma_display.h" #include "power.h" #include "psb_drv.h" @@ -608,7 +609,7 @@ static int oaktrail_pipe_set_base(struct drm_crtc *crtc, if (!gma_power_begin(dev, true)) return 0; - start = to_gtt_range(fb->obj[0])->offset; + start = to_psb_gem_object(fb->obj[0])->offset; offset = y * fb->pitches[0] + x * fb->format->cpp[0]; REG_WRITE(map->stride, fb->pitches[0]); diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c index 7a10bb39ef0b..65cf1c79dd7c 100644 --- a/drivers/gpu/drm/gma500/psb_drv.c +++ b/drivers/gpu/drm/gma500/psb_drv.c @@ -19,6 +19,7 @@ #include <acpi/video.h> #include <drm/drm.h> +#include <drm/drm_aperture.h> #include <drm/drm_drv.h> #include <drm/drm_fb_helper.h> #include <drm/drm_file.h> @@ -448,6 +449,17 @@ static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct drm_device *dev; int ret; + /* + * We cannot yet easily find the framebuffer's location in memory. So + * remove all framebuffers here. + * + * TODO: Refactor psb_driver_load() to map vdc_reg earlier. Then we + * might be able to read the framebuffer range from the device. + */ + ret = drm_aperture_remove_framebuffers(true, &driver); + if (ret) + return ret; + ret = pcim_enable_device(pdev); if (ret) return ret; diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c index f5f259fde88e..d5f95212934e 100644 --- a/drivers/gpu/drm/gma500/psb_intel_display.c +++ b/drivers/gpu/drm/gma500/psb_intel_display.c @@ -12,6 +12,7 @@ #include <drm/drm_plane_helper.h> #include "framebuffer.h" +#include "gem.h" #include "gma_display.h" #include "power.h" #include "psb_drv.h" @@ -454,23 +455,21 @@ static void psb_intel_cursor_init(struct drm_device *dev, struct drm_psb_private *dev_priv = to_drm_psb_private(dev); u32 control[3] = { CURACNTR, CURBCNTR, CURCCNTR }; u32 base[3] = { CURABASE, CURBBASE, CURCBASE }; - struct gtt_range *cursor_gt; + struct psb_gem_object *cursor_pobj; if (dev_priv->ops->cursor_needs_phys) { /* Allocate 4 pages of stolen mem for a hardware cursor. That * is enough for the 64 x 64 ARGB cursors we support. */ - cursor_gt = psb_gtt_alloc_range(dev, 4 * PAGE_SIZE, "cursor", 1, - PAGE_SIZE); - if (!cursor_gt) { - gma_crtc->cursor_gt = NULL; + cursor_pobj = psb_gem_create(dev, 4 * PAGE_SIZE, "cursor", true, PAGE_SIZE); + if (IS_ERR(cursor_pobj)) { + gma_crtc->cursor_pobj = NULL; goto out; } - gma_crtc->cursor_gt = cursor_gt; - gma_crtc->cursor_addr = dev_priv->stolen_base + - cursor_gt->offset; + gma_crtc->cursor_pobj = cursor_pobj; + gma_crtc->cursor_addr = dev_priv->stolen_base + cursor_pobj->offset; } else { - gma_crtc->cursor_gt = NULL; + gma_crtc->cursor_pobj = NULL; } out: diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h index 5340225d6997..db3e757328fe 100644 --- a/drivers/gpu/drm/gma500/psb_intel_drv.h +++ b/drivers/gpu/drm/gma500/psb_intel_drv.h @@ -140,7 +140,7 @@ struct gma_crtc { int pipe; int plane; uint32_t cursor_addr; - struct gtt_range *cursor_gt; + struct psb_gem_object *cursor_pobj; u8 lut_adj[256]; struct psb_intel_framebuffer *fbdev_fb; /* a mode_set for fbdev users on this crtc */ diff --git a/drivers/gpu/drm/gud/gud_pipe.c b/drivers/gpu/drm/gud/gud_pipe.c index daf75c178c2b..a150a5a4b5d4 100644 --- a/drivers/gpu/drm/gud/gud_pipe.c +++ b/drivers/gpu/drm/gud/gud_pipe.c @@ -74,7 +74,7 @@ static size_t gud_xrgb8888_to_r124(u8 *dst, const struct drm_format_info *format if (!buf) return 0; - drm_fb_xrgb8888_to_gray8(buf, src, fb, rect); + drm_fb_xrgb8888_to_gray8(buf, 0, src, fb, rect); pix8 = buf; for (y = 0; y < height; y++) { @@ -190,23 +190,23 @@ retry: goto end_cpu_access; } } else if (format->format == DRM_FORMAT_R8) { - drm_fb_xrgb8888_to_gray8(buf, vaddr, fb, rect); + drm_fb_xrgb8888_to_gray8(buf, 0, vaddr, fb, rect); } else if (format->format == DRM_FORMAT_RGB332) { - drm_fb_xrgb8888_to_rgb332(buf, vaddr, fb, rect); + drm_fb_xrgb8888_to_rgb332(buf, 0, vaddr, fb, rect); } else if (format->format == DRM_FORMAT_RGB565) { - drm_fb_xrgb8888_to_rgb565(buf, vaddr, fb, rect, gud_is_big_endian()); + drm_fb_xrgb8888_to_rgb565(buf, 0, vaddr, fb, rect, gud_is_big_endian()); } else if (format->format == DRM_FORMAT_RGB888) { - drm_fb_xrgb8888_to_rgb888(buf, vaddr, fb, rect); + drm_fb_xrgb8888_to_rgb888(buf, 0, vaddr, fb, rect); } else { len = gud_xrgb8888_to_color(buf, format, vaddr, fb, rect); } } else if (gud_is_big_endian() && format->cpp[0] > 1) { - drm_fb_swab(buf, vaddr, fb, rect, !import_attach); + drm_fb_swab(buf, 0, vaddr, fb, rect, !import_attach); } else if (compression && !import_attach && pitch == fb->pitches[0]) { /* can compress directly from the framebuffer */ buf = vaddr + rect->y1 * pitch; } else { - drm_fb_memcpy(buf, vaddr, fb, rect); + drm_fb_memcpy(buf, 0, vaddr, fb, rect); } memset(req, 0, sizeof(*req)); diff --git a/drivers/gpu/drm/hisilicon/kirin/Kconfig b/drivers/gpu/drm/hisilicon/kirin/Kconfig index 290553e2f6b4..b770f7662830 100644 --- a/drivers/gpu/drm/hisilicon/kirin/Kconfig +++ b/drivers/gpu/drm/hisilicon/kirin/Kconfig @@ -4,7 +4,6 @@ config DRM_HISI_KIRIN depends on DRM && OF && ARM64 select DRM_KMS_HELPER select DRM_GEM_CMA_HELPER - select DRM_KMS_CMA_HELPER select DRM_MIPI_DSI help Choose this option if you have a hisilicon Kirin chipsets(hi6220). diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c index 952cfdb1961d..1d556482bb46 100644 --- a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c +++ b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c @@ -81,7 +81,7 @@ struct dsi_hw_ctx { struct dw_dsi { struct drm_encoder encoder; - struct drm_bridge *bridge; + struct device *dev; struct mipi_dsi_host host; struct drm_display_mode cur_mode; struct dsi_hw_ctx *ctx; @@ -720,10 +720,13 @@ static int dw_drm_encoder_init(struct device *dev, return 0; } +static const struct component_ops dsi_ops; static int dsi_host_attach(struct mipi_dsi_host *host, struct mipi_dsi_device *mdsi) { struct dw_dsi *dsi = host_to_dsi(host); + struct device *dev = host->dev; + int ret; if (mdsi->lanes < 1 || mdsi->lanes > 4) { DRM_ERROR("dsi device params invalid\n"); @@ -734,13 +737,20 @@ static int dsi_host_attach(struct mipi_dsi_host *host, dsi->format = mdsi->format; dsi->mode_flags = mdsi->mode_flags; + ret = component_add(dev, &dsi_ops); + if (ret) + return ret; + return 0; } static int dsi_host_detach(struct mipi_dsi_host *host, struct mipi_dsi_device *mdsi) { - /* do nothing */ + struct device *dev = host->dev; + + component_del(dev, &dsi_ops); + return 0; } @@ -768,7 +778,17 @@ static int dsi_host_init(struct device *dev, struct dw_dsi *dsi) static int dsi_bridge_init(struct drm_device *dev, struct dw_dsi *dsi) { struct drm_encoder *encoder = &dsi->encoder; - struct drm_bridge *bridge = dsi->bridge; + struct drm_bridge *bridge; + struct device_node *np = dsi->dev->of_node; + int ret; + + /* + * Get the endpoint node. In our case, dsi has one output port1 + * to which the external HDMI bridge is connected. + */ + ret = drm_of_find_panel_or_bridge(np, 1, 0, NULL, &bridge); + if (ret) + return ret; /* associate the bridge to dsi encoder */ return drm_bridge_attach(encoder, bridge, NULL, 0); @@ -785,10 +805,6 @@ static int dsi_bind(struct device *dev, struct device *master, void *data) if (ret) return ret; - ret = dsi_host_init(dev, dsi); - if (ret) - return ret; - ret = dsi_bridge_init(drm_dev, dsi); if (ret) return ret; @@ -809,17 +825,7 @@ static const struct component_ops dsi_ops = { static int dsi_parse_dt(struct platform_device *pdev, struct dw_dsi *dsi) { struct dsi_hw_ctx *ctx = dsi->ctx; - struct device_node *np = pdev->dev.of_node; struct resource *res; - int ret; - - /* - * Get the endpoint node. In our case, dsi has one output port1 - * to which the external HDMI bridge is connected. - */ - ret = drm_of_find_panel_or_bridge(np, 1, 0, NULL, &dsi->bridge); - if (ret) - return ret; ctx->pclk = devm_clk_get(&pdev->dev, "pclk"); if (IS_ERR(ctx->pclk)) { @@ -852,6 +858,7 @@ static int dsi_probe(struct platform_device *pdev) dsi = &data->dsi; ctx = &data->ctx; dsi->ctx = ctx; + dsi->dev = &pdev->dev; ret = dsi_parse_dt(pdev, dsi); if (ret) @@ -859,12 +866,19 @@ static int dsi_probe(struct platform_device *pdev) platform_set_drvdata(pdev, data); - return component_add(&pdev->dev, &dsi_ops); + ret = dsi_host_init(&pdev->dev, dsi); + if (ret) + return ret; + + return 0; } static int dsi_remove(struct platform_device *pdev) { - component_del(&pdev->dev, &dsi_ops); + struct dsi_data *data = platform_get_drvdata(pdev); + struct dw_dsi *dsi = &data->dsi; + + mipi_dsi_host_unregister(&dsi->host); return 0; } diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c index cd818a629183..00e53de4812b 100644 --- a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c +++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c @@ -225,12 +225,29 @@ static int hyperv_vmbus_remove(struct hv_device *hdev) { struct drm_device *dev = hv_get_drvdata(hdev); struct hyperv_drm_device *hv = to_hv(dev); + struct pci_dev *pdev; drm_dev_unplug(dev); drm_atomic_helper_shutdown(dev); vmbus_close(hdev->channel); hv_set_drvdata(hdev, NULL); - vmbus_free_mmio(hv->mem->start, hv->fb_size); + + /* + * Free allocated MMIO memory only on Gen2 VMs. + * On Gen1 VMs, release the PCI device + */ + if (efi_enabled(EFI_BOOT)) { + vmbus_free_mmio(hv->mem->start, hv->fb_size); + } else { + pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT, + PCI_DEVICE_ID_HYPERV_VIDEO, NULL); + if (!pdev) { + drm_err(dev, "Unable to find PCI Hyper-V video\n"); + return -ENODEV; + } + pci_release_region(pdev, 0); + pci_dev_put(pdev); + } return 0; } diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c b/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c index 8c97a20dfe23..93f51e70a951 100644 --- a/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c +++ b/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c @@ -23,13 +23,16 @@ static int hyperv_blit_to_vram_rect(struct drm_framebuffer *fb, struct drm_rect *rect) { struct hyperv_drm_device *hv = to_hv(fb->dev); + void __iomem *dst = hv->vram; void *vmap = map->vaddr; /* TODO: Use mapping abstraction properly */ int idx; if (!drm_dev_enter(&hv->dev, &idx)) return -ENODEV; - drm_fb_memcpy_dstclip(hv->vram, fb->pitches[0], vmap, fb, rect); + dst += drm_fb_clip_offset(fb->pitches[0], fb->format, rect); + drm_fb_memcpy_toio(dst, fb->pitches[0], vmap, fb, rect); + drm_dev_exit(idx); return 0; diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index 84b6fc70cbf5..a4c94dc2e216 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -21,7 +21,7 @@ config DRM_I915 select ACPI_VIDEO if ACPI select ACPI_BUTTON if ACPI select SYNC_FILE - select IOSF_MBI + select IOSF_MBI if X86 select CRC32 select SND_HDA_I915 if SND_HDA_CORE select CEC_CORE if CEC_NOTIFIER diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 660bb03de6fc..6ddd2d2bbaaf 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -30,7 +30,7 @@ subdir-ccflags-y += -I$(srctree)/$(src) # Please keep these build lists sorted! # core driver code -i915-y += i915_drv.o \ +i915-y += i915_driver.o \ i915_config.o \ i915_irq.o \ i915_getparam.o \ @@ -60,7 +60,6 @@ i915-y += i915_drv.o \ # core library code i915-y += \ - dma_resv_utils.o \ i915_memcpy.o \ i915_mm.o \ i915_sw_fence.o \ @@ -154,6 +153,7 @@ gem-y += \ gem/i915_gem_throttle.o \ gem/i915_gem_tiling.o \ gem/i915_gem_ttm.o \ + gem/i915_gem_ttm_move.o \ gem/i915_gem_ttm_pm.o \ gem/i915_gem_userptr.o \ gem/i915_gem_wait.o \ @@ -173,6 +173,7 @@ i915-y += \ i915_trace_points.o \ i915_ttm_buddy_manager.o \ i915_vma.o \ + i915_vma_snapshot.o \ intel_wopcm.o # general-purpose microcontroller (GuC) support @@ -226,6 +227,8 @@ i915-y += \ display/intel_hotplug.o \ display/intel_lpe_audio.o \ display/intel_overlay.o \ + display/intel_pch_display.o \ + display/intel_pch_refclk.o \ display/intel_plane_initial.o \ display/intel_psr.o \ display/intel_quirks.o \ @@ -256,6 +259,7 @@ i915-y += \ display/intel_crt.o \ display/intel_ddi.o \ display/intel_ddi_buf_trans.o \ + display/intel_display_trace.o \ display/intel_dp.o \ display/intel_dp_aux.o \ display/intel_dp_aux_backlight.o \ diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c index dc41868d01ef..f37677df6ebf 100644 --- a/drivers/gpu/drm/i915/display/g4x_dp.c +++ b/drivers/gpu/drm/i915/display/g4x_dp.c @@ -9,6 +9,7 @@ #include "intel_audio.h" #include "intel_backlight.h" #include "intel_connector.h" +#include "intel_crtc.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_dp.h" diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c index f5b4dd5b4275..06e00b1eaa7c 100644 --- a/drivers/gpu/drm/i915/display/g4x_hdmi.c +++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c @@ -8,6 +8,7 @@ #include "g4x_hdmi.h" #include "intel_audio.h" #include "intel_connector.h" +#include "intel_crtc.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_dpio_phy.h" diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c index b1439ba78f67..85950ff67609 100644 --- a/drivers/gpu/drm/i915/display/i9xx_plane.c +++ b/drivers/gpu/drm/i915/display/i9xx_plane.c @@ -13,6 +13,7 @@ #include "intel_de.h" #include "intel_display_types.h" #include "intel_fb.h" +#include "intel_fbc.h" #include "intel_sprite.h" #include "i9xx_plane.h" @@ -60,22 +61,11 @@ static const u32 vlv_primary_formats[] = { DRM_FORMAT_XBGR16161616F, }; -static const u64 i9xx_format_modifiers[] = { - I915_FORMAT_MOD_X_TILED, - DRM_FORMAT_MOD_LINEAR, - DRM_FORMAT_MOD_INVALID -}; - static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane, u32 format, u64 modifier) { - switch (modifier) { - case DRM_FORMAT_MOD_LINEAR: - case I915_FORMAT_MOD_X_TILED: - break; - default: + if (!intel_fb_plane_supports_modifier(to_intel_plane(_plane), modifier)) return false; - } switch (format) { case DRM_FORMAT_C8: @@ -92,13 +82,8 @@ static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane, static bool i965_plane_format_mod_supported(struct drm_plane *_plane, u32 format, u64 modifier) { - switch (modifier) { - case DRM_FORMAT_MOD_LINEAR: - case I915_FORMAT_MOD_X_TILED: - break; - default: + if (!intel_fb_plane_supports_modifier(to_intel_plane(_plane), modifier)) return false; - } switch (format) { case DRM_FORMAT_C8: @@ -136,6 +121,15 @@ static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv, return i9xx_plane == PLANE_A; } +static struct intel_fbc *i9xx_plane_fbc(struct drm_i915_private *dev_priv, + enum i9xx_plane_id i9xx_plane) +{ + if (i9xx_plane_has_fbc(dev_priv, i9xx_plane)) + return dev_priv->fbc; + else + return NULL; +} + static bool i9xx_plane_has_windowing(struct intel_plane *plane) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); @@ -272,7 +266,7 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state) u32 alignment = intel_surf_alignment(fb, 0); int cpp = fb->format->cpp[0]; - while ((src_x + src_w) * cpp > plane_state->view.color_plane[0].stride) { + while ((src_x + src_w) * cpp > plane_state->view.color_plane[0].mapping_stride) { if (offset == 0) { drm_dbg_kms(&dev_priv->drm, "Unable to find suitable display surface offset due to X-tiling\n"); @@ -418,38 +412,25 @@ static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state, return DIV_ROUND_UP(pixel_rate * num, den); } -static void i9xx_update_plane(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) +static void i9xx_plane_update_noarm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; - u32 linear_offset; - int x = plane_state->view.color_plane[0].x; - int y = plane_state->view.color_plane[0].y; - int crtc_x = plane_state->uapi.dst.x1; - int crtc_y = plane_state->uapi.dst.y1; - int crtc_w = drm_rect_width(&plane_state->uapi.dst); - int crtc_h = drm_rect_height(&plane_state->uapi.dst); unsigned long irqflags; - u32 dspaddr_offset; - u32 dspcntr; - - dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state); - - linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); - - if (DISPLAY_VER(dev_priv) >= 4) - dspaddr_offset = plane_state->view.color_plane[0].offset; - else - dspaddr_offset = linear_offset; spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); intel_de_write_fw(dev_priv, DSPSTRIDE(i9xx_plane), - plane_state->view.color_plane[0].stride); + plane_state->view.color_plane[0].mapping_stride); if (DISPLAY_VER(dev_priv) < 4) { + int crtc_x = plane_state->uapi.dst.x1; + int crtc_y = plane_state->uapi.dst.y1; + int crtc_w = drm_rect_width(&plane_state->uapi.dst); + int crtc_h = drm_rect_height(&plane_state->uapi.dst); + /* * PLANE_A doesn't actually have a full window * generator but let's assume we still need to @@ -459,7 +440,39 @@ static void i9xx_update_plane(struct intel_plane *plane, (crtc_y << 16) | crtc_x); intel_de_write_fw(dev_priv, DSPSIZE(i9xx_plane), ((crtc_h - 1) << 16) | (crtc_w - 1)); - } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) { + } + + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); +} + +static void i9xx_plane_update_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; + int x = plane_state->view.color_plane[0].x; + int y = plane_state->view.color_plane[0].y; + u32 dspcntr, dspaddr_offset, linear_offset; + unsigned long irqflags; + + dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state); + + linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); + + if (DISPLAY_VER(dev_priv) >= 4) + dspaddr_offset = plane_state->view.color_plane[0].offset; + else + dspaddr_offset = linear_offset; + + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + + if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) { + int crtc_x = plane_state->uapi.dst.x1; + int crtc_y = plane_state->uapi.dst.y1; + int crtc_w = drm_rect_width(&plane_state->uapi.dst); + int crtc_h = drm_rect_height(&plane_state->uapi.dst); + intel_de_write_fw(dev_priv, PRIMPOS(i9xx_plane), (crtc_y << 16) | crtc_x); intel_de_write_fw(dev_priv, PRIMSIZE(i9xx_plane), @@ -493,8 +506,22 @@ static void i9xx_update_plane(struct intel_plane *plane, spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } -static void i9xx_disable_plane(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state) +static void i830_plane_update_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + /* + * On i830/i845 all registers are self-arming [ALM040]. + * + * Additional breakage on i830 causes register reads to return + * the last latched value instead of the last written value [ALM026]. + */ + i9xx_plane_update_noarm(plane, crtc_state, plane_state); + i9xx_plane_update_arm(plane, crtc_state, plane_state); +} + +static void i9xx_plane_disable_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; @@ -768,6 +795,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) struct intel_plane *plane; const struct drm_plane_funcs *plane_funcs; unsigned int supported_rotations; + const u64 *modifiers; const u32 *formats; int num_formats; int ret, zpos; @@ -789,12 +817,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) plane->id = PLANE_PRIMARY; plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id); - plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane); - if (plane->has_fbc) { - struct intel_fbc *fbc = &dev_priv->fbc; - - fbc->possible_framebuffer_bits |= plane->frontbuffer_bit; - } + intel_fbc_add_plane(i9xx_plane_fbc(dev_priv, plane->i9xx_plane), plane); if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { formats = vlv_primary_formats; @@ -851,8 +874,13 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) plane->max_stride = ilk_primary_max_stride; } - plane->update_plane = i9xx_update_plane; - plane->disable_plane = i9xx_disable_plane; + if (IS_I830(dev_priv) || IS_I845G(dev_priv)) { + plane->update_arm = i830_plane_update_arm; + } else { + plane->update_noarm = i9xx_plane_update_noarm; + plane->update_arm = i9xx_plane_update_arm; + } + plane->disable_arm = i9xx_plane_disable_arm; plane->get_hw_state = i9xx_plane_get_hw_state; plane->check_plane = i9xx_plane_check; @@ -875,21 +903,26 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) plane->disable_flip_done = ilk_primary_disable_flip_done; } + modifiers = intel_fb_plane_get_modifiers(dev_priv, INTEL_PLANE_CAP_TILING_X); + if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv)) ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, 0, plane_funcs, formats, num_formats, - i9xx_format_modifiers, + modifiers, DRM_PLANE_TYPE_PRIMARY, "primary %c", pipe_name(pipe)); else ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, 0, plane_funcs, formats, num_formats, - i9xx_format_modifiers, + modifiers, DRM_PLANE_TYPE_PRIMARY, "plane %c", plane_name(plane->i9xx_plane)); + + kfree(modifiers); + if (ret) goto fail; diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index 168c84a74d30..5781e9fac8b4 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -28,6 +28,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_mipi_dsi.h> +#include "icl_dsi.h" #include "intel_atomic.h" #include "intel_backlight.h" #include "intel_combo_phy.h" @@ -36,6 +37,7 @@ #include "intel_ddi.h" #include "intel_de.h" #include "intel_dsi.h" +#include "intel_dsi_vbt.h" #include "intel_panel.h" #include "intel_vdsc.h" #include "skl_scaler.h" @@ -183,6 +185,8 @@ static int dsi_send_pkt_hdr(struct intel_dsi_host *host, if (enable_lpdt) tmp |= LP_DATA_TRANSFER; + else + tmp &= ~LP_DATA_TRANSFER; tmp &= ~(PARAM_WC_MASK | VC_MASK | DT_MASK); tmp |= ((packet->header[0] & VC_MASK) << VC_SHIFT); @@ -696,10 +700,7 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder, intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val); for_each_dsi_phy(phy, intel_dsi->phys) { - if (DISPLAY_VER(dev_priv) >= 12) - val |= ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); - else - val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); + val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); } intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val); @@ -1135,8 +1136,6 @@ static void gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - /* step 4a: power up all lanes of the DDI used by DSI */ gen11_dsi_power_up_lanes(encoder); @@ -1162,8 +1161,7 @@ gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder, gen11_dsi_configure_transcoder(encoder, crtc_state); /* Step 4l: Gate DDI clocks */ - if (DISPLAY_VER(dev_priv) == 11) - gen11_dsi_gate_clocks(encoder); + gen11_dsi_gate_clocks(encoder); } static void gen11_dsi_powerup_panel(struct intel_encoder *encoder) @@ -1232,7 +1230,9 @@ static void gen11_dsi_pre_enable(struct intel_atomic_state *state, /* step5: program and powerup panel */ gen11_dsi_powerup_panel(encoder); - intel_dsc_enable(encoder, pipe_config); + intel_dsc_dsi_pps_write(encoder, pipe_config); + + intel_dsc_enable(pipe_config); /* step6c: configure transcoder timings */ gen11_dsi_set_transcoder_timings(encoder, pipe_config); @@ -1271,7 +1271,8 @@ static void adlp_set_lp_hs_wakeup_gb(struct intel_encoder *encoder) if (DISPLAY_VER(i915) == 13) { for_each_dsi_port(port, intel_dsi->ports) intel_de_rmw(i915, TGL_DSI_CHKN_REG(port), - TGL_DSI_CHKN_LSHS_GB, 0x4); + TGL_DSI_CHKN_LSHS_GB_MASK, + TGL_DSI_CHKN_LSHS_GB(4)); } } @@ -1628,7 +1629,7 @@ static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder, /* FIXME: initialize from VBT */ vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST; - ret = intel_dsc_compute_params(encoder, crtc_state); + ret = intel_dsc_compute_params(crtc_state); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/display/icl_dsi.h b/drivers/gpu/drm/i915/display/icl_dsi.h new file mode 100644 index 000000000000..b4861b56b5b2 --- /dev/null +++ b/drivers/gpu/drm/i915/display/icl_dsi.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef __ICL_DSI_H__ +#define __ICL_DSI_H__ + +struct drm_i915_private; +struct intel_crtc_state; + +void icl_dsi_init(struct drm_i915_private *i915); +void icl_dsi_frame_update(struct intel_crtc_state *crtc_state); + +#endif /* __ICL_DSI_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c index b4e7ac51aa31..a62550711e98 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic.c +++ b/drivers/gpu/drm/i915/display/intel_atomic.c @@ -139,6 +139,7 @@ int intel_digital_connector_atomic_check(struct drm_connector *conn, new_conn_state->base.picture_aspect_ratio != old_conn_state->base.picture_aspect_ratio || new_conn_state->base.content_type != old_conn_state->base.content_type || new_conn_state->base.scaling_mode != old_conn_state->base.scaling_mode || + new_conn_state->base.privacy_screen_sw_state != old_conn_state->base.privacy_screen_sw_state || !drm_connector_atomic_hdr_metadata_equal(old_state, new_state)) crtc_state->mode_changed = true; diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c index 0be8c00e3db9..89005628cc3a 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c @@ -35,14 +35,16 @@ #include <drm/drm_fourcc.h> #include <drm/drm_plane_helper.h> -#include "i915_trace.h" +#include "gt/intel_rps.h" + #include "intel_atomic_plane.h" #include "intel_cdclk.h" +#include "intel_display_trace.h" #include "intel_display_types.h" +#include "intel_fb.h" #include "intel_fb_pin.h" #include "intel_pm.h" #include "intel_sprite.h" -#include "gt/intel_rps.h" static void intel_plane_state_reset(struct intel_plane_state *plane_state, struct intel_plane *plane) @@ -394,7 +396,7 @@ int intel_plane_atomic_check(struct intel_atomic_state *state, const struct intel_plane_state *old_plane_state = intel_atomic_get_old_plane_state(state, plane); const struct intel_plane_state *new_master_plane_state; - struct intel_crtc *crtc = intel_get_crtc_for_pipe(i915, plane->pipe); + struct intel_crtc *crtc = intel_crtc_for_pipe(i915, plane->pipe); const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); struct intel_crtc_state *new_crtc_state = @@ -469,31 +471,72 @@ skl_next_plane_to_commit(struct intel_atomic_state *state, return NULL; } -void intel_update_plane(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) +void intel_plane_update_noarm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + + trace_intel_plane_update_noarm(&plane->base, crtc); + + if (plane->update_noarm) + plane->update_noarm(plane, crtc_state, plane_state); +} + +void intel_plane_update_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - trace_intel_update_plane(&plane->base, crtc); + trace_intel_plane_update_arm(&plane->base, crtc); if (crtc_state->uapi.async_flip && plane->async_flip) plane->async_flip(plane, crtc_state, plane_state, true); else - plane->update_plane(plane, crtc_state, plane_state); + plane->update_arm(plane, crtc_state, plane_state); } -void intel_disable_plane(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state) +void intel_plane_disable_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - trace_intel_disable_plane(&plane->base, crtc); - plane->disable_plane(plane, crtc_state); + trace_intel_plane_disable_arm(&plane->base, crtc); + plane->disable_arm(plane, crtc_state); +} + +void intel_update_planes_on_crtc(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + u32 update_mask = new_crtc_state->update_planes; + struct intel_plane_state *new_plane_state; + struct intel_plane *plane; + int i; + + if (new_crtc_state->uapi.async_flip) + return; + + /* + * Since we only write non-arming registers here, + * the order does not matter even for skl+. + */ + for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) { + if (crtc->pipe != plane->pipe || + !(update_mask & BIT(plane->id))) + continue; + + /* TODO: for mailbox updates this should be skipped */ + if (new_plane_state->uapi.visible || + new_plane_state->planar_slave) + intel_plane_update_noarm(plane, new_crtc_state, new_plane_state); + } } -void skl_update_planes_on_crtc(struct intel_atomic_state *state, - struct intel_crtc *crtc) +void skl_arm_planes_on_crtc(struct intel_atomic_state *state, + struct intel_crtc *crtc) { struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); @@ -515,17 +558,20 @@ void skl_update_planes_on_crtc(struct intel_atomic_state *state, struct intel_plane_state *new_plane_state = intel_atomic_get_new_plane_state(state, plane); + /* + * TODO: for mailbox updates intel_plane_update_noarm() + * would have to be called here as well. + */ if (new_plane_state->uapi.visible || - new_plane_state->planar_slave) { - intel_update_plane(plane, new_crtc_state, new_plane_state); - } else { - intel_disable_plane(plane, new_crtc_state); - } + new_plane_state->planar_slave) + intel_plane_update_arm(plane, new_crtc_state, new_plane_state); + else + intel_plane_disable_arm(plane, new_crtc_state); } } -void i9xx_update_planes_on_crtc(struct intel_atomic_state *state, - struct intel_crtc *crtc) +void i9xx_arm_planes_on_crtc(struct intel_atomic_state *state, + struct intel_crtc *crtc) { struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); @@ -539,10 +585,14 @@ void i9xx_update_planes_on_crtc(struct intel_atomic_state *state, !(update_mask & BIT(plane->id))) continue; + /* + * TODO: for mailbox updates intel_plane_update_noarm() + * would have to be called here as well. + */ if (new_plane_state->uapi.visible) - intel_update_plane(plane, new_crtc_state, new_plane_state); + intel_plane_update_arm(plane, new_crtc_state, new_plane_state); else - intel_disable_plane(plane, new_crtc_state); + intel_plane_disable_arm(plane, new_crtc_state); } } @@ -738,6 +788,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane, i915_gem_object_wait_priority(obj, 0, &attr); if (!new_plane_state->uapi.fence) { /* implicit fencing */ + struct dma_resv_iter cursor; struct dma_fence *fence; ret = i915_sw_fence_await_reservation(&state->commit_ready, @@ -748,12 +799,12 @@ intel_prepare_plane_fb(struct drm_plane *_plane, if (ret < 0) goto unpin_fb; - fence = dma_resv_get_excl_unlocked(obj->base.resv); - if (fence) { + dma_resv_iter_begin(&cursor, obj->base.resv, false); + dma_resv_for_each_fence_unlocked(&cursor, fence) { add_rps_boost_after_vblank(new_plane_state->hw.crtc, fence); - dma_fence_put(fence); } + dma_resv_iter_end(&cursor); } else { add_rps_boost_after_vblank(new_plane_state->hw.crtc, new_plane_state->uapi.fence); diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h index 62e5a2a77fd4..7907f601598e 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h @@ -30,20 +30,25 @@ void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state, struct intel_crtc *crtc); void intel_plane_copy_hw_state(struct intel_plane_state *plane_state, const struct intel_plane_state *from_plane_state); -void intel_update_plane(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state); -void intel_disable_plane(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state); +void intel_plane_update_noarm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state); +void intel_plane_update_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state); +void intel_plane_disable_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state); struct intel_plane *intel_plane_alloc(void); void intel_plane_free(struct intel_plane *plane); struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane); void intel_plane_destroy_state(struct drm_plane *plane, struct drm_plane_state *state); -void skl_update_planes_on_crtc(struct intel_atomic_state *state, - struct intel_crtc *crtc); -void i9xx_update_planes_on_crtc(struct intel_atomic_state *state, - struct intel_crtc *crtc); +void intel_update_planes_on_crtc(struct intel_atomic_state *state, + struct intel_crtc *crtc); +void skl_arm_planes_on_crtc(struct intel_atomic_state *state, + struct intel_crtc *crtc); +void i9xx_arm_planes_on_crtc(struct intel_atomic_state *state, + struct intel_crtc *crtc); int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state, struct intel_crtc_state *crtc_state, const struct intel_plane_state *old_plane_state, diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c index 03e8c05a74f6..3bdca0fe2cee 100644 --- a/drivers/gpu/drm/i915/display/intel_audio.c +++ b/drivers/gpu/drm/i915/display/intel_audio.c @@ -31,6 +31,7 @@ #include "intel_atomic.h" #include "intel_audio.h" #include "intel_cdclk.h" +#include "intel_crtc.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_lpe_audio.h" @@ -62,6 +63,15 @@ * struct &i915_audio_component_audio_ops @audio_ops is called from i915 driver. */ +struct intel_audio_funcs { + void (*audio_codec_enable)(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state); + void (*audio_codec_disable)(struct intel_encoder *encoder, + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state); +}; + /* DP N/M table */ #define LC_810M 810000 #define LC_540M 540000 @@ -388,7 +398,7 @@ hsw_dp_audio_config_update(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct i915_audio_component *acomp = dev_priv->audio_component; + struct i915_audio_component *acomp = dev_priv->audio.component; enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; enum port port = encoder->port; const struct dp_aud_n_m *nm; @@ -436,7 +446,7 @@ hsw_hdmi_audio_config_update(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct i915_audio_component *acomp = dev_priv->audio_component; + struct i915_audio_component *acomp = dev_priv->audio.component; enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; enum port port = encoder->port; int n, rate; @@ -494,7 +504,7 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder, drm_dbg_kms(&dev_priv->drm, "Disable audio codec on transcoder %s\n", transcoder_name(cpu_transcoder)); - mutex_lock(&dev_priv->av_mutex); + mutex_lock(&dev_priv->audio.mutex); /* Disable timestamps */ tmp = intel_de_read(dev_priv, HSW_AUD_CFG(cpu_transcoder)); @@ -512,7 +522,7 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder, tmp &= ~AUDIO_OUTPUT_ENABLE(cpu_transcoder); intel_de_write(dev_priv, HSW_AUD_PIN_ELD_CP_VLD, tmp); - mutex_unlock(&dev_priv->av_mutex); + mutex_unlock(&dev_priv->audio.mutex); } static unsigned int calc_hblank_early_prog(struct intel_encoder *encoder, @@ -641,7 +651,7 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder, "Enable audio codec on transcoder %s, %u bytes ELD\n", transcoder_name(cpu_transcoder), drm_eld_size(eld)); - mutex_lock(&dev_priv->av_mutex); + mutex_lock(&dev_priv->audio.mutex); /* Enable Audio WA for 4k DSC usecases */ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP)) @@ -679,7 +689,7 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder, /* Enable timestamps */ hsw_audio_config_update(encoder, crtc_state); - mutex_unlock(&dev_priv->av_mutex); + mutex_unlock(&dev_priv->audio.mutex); } static void ilk_audio_codec_disable(struct intel_encoder *encoder, @@ -826,7 +836,7 @@ void intel_audio_codec_enable(struct intel_encoder *encoder, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct i915_audio_component *acomp = dev_priv->audio_component; + struct i915_audio_component *acomp = dev_priv->audio.component; struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_connector *connector = conn_state->connector; const struct drm_display_mode *adjusted_mode = @@ -848,17 +858,17 @@ void intel_audio_codec_enable(struct intel_encoder *encoder, connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2; - if (dev_priv->audio_funcs) - dev_priv->audio_funcs->audio_codec_enable(encoder, + if (dev_priv->audio.funcs) + dev_priv->audio.funcs->audio_codec_enable(encoder, crtc_state, conn_state); - mutex_lock(&dev_priv->av_mutex); + mutex_lock(&dev_priv->audio.mutex); encoder->audio_connector = connector; /* referred in audio callbacks */ - dev_priv->av_enc_map[pipe] = encoder; - mutex_unlock(&dev_priv->av_mutex); + dev_priv->audio.encoder_map[pipe] = encoder; + mutex_unlock(&dev_priv->audio.mutex); if (acomp && acomp->base.audio_ops && acomp->base.audio_ops->pin_eld_notify) { @@ -888,20 +898,20 @@ void intel_audio_codec_disable(struct intel_encoder *encoder, const struct drm_connector_state *old_conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct i915_audio_component *acomp = dev_priv->audio_component; + struct i915_audio_component *acomp = dev_priv->audio.component; struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); enum port port = encoder->port; enum pipe pipe = crtc->pipe; - if (dev_priv->audio_funcs) - dev_priv->audio_funcs->audio_codec_disable(encoder, + if (dev_priv->audio.funcs) + dev_priv->audio.funcs->audio_codec_disable(encoder, old_crtc_state, old_conn_state); - mutex_lock(&dev_priv->av_mutex); + mutex_lock(&dev_priv->audio.mutex); encoder->audio_connector = NULL; - dev_priv->av_enc_map[pipe] = NULL; - mutex_unlock(&dev_priv->av_mutex); + dev_priv->audio.encoder_map[pipe] = NULL; + mutex_unlock(&dev_priv->audio.mutex); if (acomp && acomp->base.audio_ops && acomp->base.audio_ops->pin_eld_notify) { @@ -931,19 +941,53 @@ static const struct intel_audio_funcs hsw_audio_funcs = { }; /** - * intel_init_audio_hooks - Set up chip specific audio hooks + * intel_audio_hooks_init - Set up chip specific audio hooks * @dev_priv: device private */ -void intel_init_audio_hooks(struct drm_i915_private *dev_priv) +void intel_audio_hooks_init(struct drm_i915_private *dev_priv) { if (IS_G4X(dev_priv)) { - dev_priv->audio_funcs = &g4x_audio_funcs; + dev_priv->audio.funcs = &g4x_audio_funcs; } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - dev_priv->audio_funcs = &ilk_audio_funcs; + dev_priv->audio.funcs = &ilk_audio_funcs; } else if (IS_HASWELL(dev_priv) || DISPLAY_VER(dev_priv) >= 8) { - dev_priv->audio_funcs = &hsw_audio_funcs; + dev_priv->audio.funcs = &hsw_audio_funcs; } else if (HAS_PCH_SPLIT(dev_priv)) { - dev_priv->audio_funcs = &ilk_audio_funcs; + dev_priv->audio.funcs = &ilk_audio_funcs; + } +} + +struct aud_ts_cdclk_m_n { + u8 m; + u16 n; +}; + +void intel_audio_cdclk_change_pre(struct drm_i915_private *i915) +{ + if (DISPLAY_VER(i915) >= 13) + intel_de_rmw(i915, AUD_TS_CDCLK_M, AUD_TS_CDCLK_M_EN, 0); +} + +static void get_aud_ts_cdclk_m_n(int refclk, int cdclk, struct aud_ts_cdclk_m_n *aud_ts) +{ + if (refclk == 24000) + aud_ts->m = 12; + else + aud_ts->m = 15; + + aud_ts->n = cdclk * aud_ts->m / 24000; +} + +void intel_audio_cdclk_change_post(struct drm_i915_private *i915) +{ + struct aud_ts_cdclk_m_n aud_ts; + + if (DISPLAY_VER(i915) >= 13) { + get_aud_ts_cdclk_m_n(i915->cdclk.hw.ref, i915->cdclk.hw.cdclk, &aud_ts); + + intel_de_write(i915, AUD_TS_CDCLK_N, aud_ts.n); + intel_de_write(i915, AUD_TS_CDCLK_M, aud_ts.m | AUD_TS_CDCLK_M_EN); + drm_dbg_kms(&i915->drm, "aud_ts_cdclk set to M=%u, N=%u\n", aud_ts.m, aud_ts.n); } } @@ -976,7 +1020,7 @@ static void glk_force_audio_cdclk(struct drm_i915_private *dev_priv, struct intel_crtc *crtc; int ret; - crtc = intel_get_first_crtc(dev_priv); + crtc = intel_first_crtc(dev_priv); if (!crtc) return; @@ -1014,13 +1058,13 @@ static unsigned long i915_audio_component_get_power(struct device *kdev) ret = intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO_PLAYBACK); - if (dev_priv->audio_power_refcount++ == 0) { + if (dev_priv->audio.power_refcount++ == 0) { if (DISPLAY_VER(dev_priv) >= 9) { intel_de_write(dev_priv, AUD_FREQ_CNTRL, - dev_priv->audio_freq_cntrl); + dev_priv->audio.freq_cntrl); drm_dbg_kms(&dev_priv->drm, "restored AUD_FREQ_CNTRL to 0x%x\n", - dev_priv->audio_freq_cntrl); + dev_priv->audio.freq_cntrl); } /* Force CDCLK to 2*BCLK as long as we need audio powered. */ @@ -1041,7 +1085,7 @@ static void i915_audio_component_put_power(struct device *kdev, struct drm_i915_private *dev_priv = kdev_to_i915(kdev); /* Stop forcing CDCLK to 2*BCLK if no need for audio to be powered. */ - if (--dev_priv->audio_power_refcount == 0) + if (--dev_priv->audio.power_refcount == 0) if (IS_GEMINILAKE(dev_priv)) glk_force_audio_cdclk(dev_priv, false); @@ -1093,7 +1137,7 @@ static int i915_audio_component_get_cdclk_freq(struct device *kdev) /* * get the intel_encoder according to the parameter port and pipe * intel_encoder is saved by the index of pipe - * MST & (pipe >= 0): return the av_enc_map[pipe], + * MST & (pipe >= 0): return the audio.encoder_map[pipe], * when port is matched * MST & (pipe < 0): this is invalid * Non-MST & (pipe >= 0): only pipe = 0 (the first device entry) @@ -1108,10 +1152,10 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv, /* MST */ if (pipe >= 0) { if (drm_WARN_ON(&dev_priv->drm, - pipe >= ARRAY_SIZE(dev_priv->av_enc_map))) + pipe >= ARRAY_SIZE(dev_priv->audio.encoder_map))) return NULL; - encoder = dev_priv->av_enc_map[pipe]; + encoder = dev_priv->audio.encoder_map[pipe]; /* * when bootup, audio driver may not know it is * MST or not. So it will poll all the port & pipe @@ -1127,7 +1171,7 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv, return NULL; for_each_pipe(dev_priv, pipe) { - encoder = dev_priv->av_enc_map[pipe]; + encoder = dev_priv->audio.encoder_map[pipe]; if (encoder == NULL) continue; @@ -1145,7 +1189,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port, int pipe, int rate) { struct drm_i915_private *dev_priv = kdev_to_i915(kdev); - struct i915_audio_component *acomp = dev_priv->audio_component; + struct i915_audio_component *acomp = dev_priv->audio.component; struct intel_encoder *encoder; struct intel_crtc *crtc; unsigned long cookie; @@ -1155,7 +1199,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port, return 0; cookie = i915_audio_component_get_power(kdev); - mutex_lock(&dev_priv->av_mutex); + mutex_lock(&dev_priv->audio.mutex); /* 1. get the pipe */ encoder = get_saved_enc(dev_priv, port, pipe); @@ -1174,7 +1218,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port, hsw_audio_config_update(encoder, crtc->config); unlock: - mutex_unlock(&dev_priv->av_mutex); + mutex_unlock(&dev_priv->audio.mutex); i915_audio_component_put_power(kdev, cookie); return err; } @@ -1188,13 +1232,13 @@ static int i915_audio_component_get_eld(struct device *kdev, int port, const u8 *eld; int ret = -EINVAL; - mutex_lock(&dev_priv->av_mutex); + mutex_lock(&dev_priv->audio.mutex); intel_encoder = get_saved_enc(dev_priv, port, pipe); if (!intel_encoder) { drm_dbg_kms(&dev_priv->drm, "Not valid for port %c\n", port_name(port)); - mutex_unlock(&dev_priv->av_mutex); + mutex_unlock(&dev_priv->audio.mutex); return ret; } @@ -1206,7 +1250,7 @@ static int i915_audio_component_get_eld(struct device *kdev, int port, memcpy(buf, eld, min(max_bytes, ret)); } - mutex_unlock(&dev_priv->av_mutex); + mutex_unlock(&dev_priv->audio.mutex); return ret; } @@ -1241,7 +1285,7 @@ static int i915_audio_component_bind(struct device *i915_kdev, BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS); for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++) acomp->aud_sample_rate[i] = 0; - dev_priv->audio_component = acomp; + dev_priv->audio.component = acomp; drm_modeset_unlock_all(&dev_priv->drm); return 0; @@ -1256,14 +1300,14 @@ static void i915_audio_component_unbind(struct device *i915_kdev, drm_modeset_lock_all(&dev_priv->drm); acomp->base.ops = NULL; acomp->base.dev = NULL; - dev_priv->audio_component = NULL; + dev_priv->audio.component = NULL; drm_modeset_unlock_all(&dev_priv->drm); device_link_remove(hda_kdev, i915_kdev); - if (dev_priv->audio_power_refcount) + if (dev_priv->audio.power_refcount) drm_err(&dev_priv->drm, "audio power refcount %d after unbind\n", - dev_priv->audio_power_refcount); + dev_priv->audio.power_refcount); } static const struct component_ops i915_audio_component_bind_ops = { @@ -1327,10 +1371,13 @@ static void i915_audio_component_init(struct drm_i915_private *dev_priv) drm_dbg_kms(&dev_priv->drm, "use AUD_FREQ_CNTRL of 0x%x (init value 0x%x)\n", aud_freq, aud_freq_init); - dev_priv->audio_freq_cntrl = aud_freq; + dev_priv->audio.freq_cntrl = aud_freq; } - dev_priv->audio_component_registered = true; + /* init with current cdclk */ + intel_audio_cdclk_change_post(dev_priv); + + dev_priv->audio.component_registered = true; } /** @@ -1342,11 +1389,11 @@ static void i915_audio_component_init(struct drm_i915_private *dev_priv) */ static void i915_audio_component_cleanup(struct drm_i915_private *dev_priv) { - if (!dev_priv->audio_component_registered) + if (!dev_priv->audio.component_registered) return; component_del(dev_priv->drm.dev, &i915_audio_component_bind_ops); - dev_priv->audio_component_registered = false; + dev_priv->audio.component_registered = false; } /** @@ -1368,7 +1415,7 @@ void intel_audio_init(struct drm_i915_private *dev_priv) */ void intel_audio_deinit(struct drm_i915_private *dev_priv) { - if ((dev_priv)->lpe_audio.platdev != NULL) + if ((dev_priv)->audio.lpe.platdev != NULL) intel_lpe_audio_teardown(dev_priv); else i915_audio_component_cleanup(dev_priv); diff --git a/drivers/gpu/drm/i915/display/intel_audio.h b/drivers/gpu/drm/i915/display/intel_audio.h index a3657c7a7ba2..63b22131dc45 100644 --- a/drivers/gpu/drm/i915/display/intel_audio.h +++ b/drivers/gpu/drm/i915/display/intel_audio.h @@ -11,13 +11,15 @@ struct drm_i915_private; struct intel_crtc_state; struct intel_encoder; -void intel_init_audio_hooks(struct drm_i915_private *dev_priv); +void intel_audio_hooks_init(struct drm_i915_private *dev_priv); void intel_audio_codec_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); void intel_audio_codec_disable(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state); +void intel_audio_cdclk_change_pre(struct drm_i915_private *dev_priv); +void intel_audio_cdclk_change_post(struct drm_i915_private *dev_priv); void intel_audio_init(struct drm_i915_private *dev_priv); void intel_audio_deinit(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index 2b1423a43437..9d989c9f5da4 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -1555,12 +1555,24 @@ static const u8 gen9bc_tgp_ddc_pin_map[] = { [DDC_BUS_DDI_D] = GMBUS_PIN_10_TC2_ICP, }; +static const u8 adlp_ddc_pin_map[] = { + [ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT, + [ICL_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT, + [ADLP_DDC_BUS_PORT_TC1] = GMBUS_PIN_9_TC1_ICP, + [ADLP_DDC_BUS_PORT_TC2] = GMBUS_PIN_10_TC2_ICP, + [ADLP_DDC_BUS_PORT_TC3] = GMBUS_PIN_11_TC3_ICP, + [ADLP_DDC_BUS_PORT_TC4] = GMBUS_PIN_12_TC4_ICP, +}; + static u8 map_ddc_pin(struct drm_i915_private *i915, u8 vbt_pin) { const u8 *ddc_pin_map; int n_entries; - if (IS_ALDERLAKE_S(i915)) { + if (IS_ALDERLAKE_P(i915)) { + ddc_pin_map = adlp_ddc_pin_map; + n_entries = ARRAY_SIZE(adlp_ddc_pin_map); + } else if (IS_ALDERLAKE_S(i915)) { ddc_pin_map = adls_ddc_pin_map; n_entries = ARRAY_SIZE(adls_ddc_pin_map); } else if (INTEL_PCH_TYPE(i915) >= PCH_DG1) { diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c index 8d9d888e9316..2da4aacc956b 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.c +++ b/drivers/gpu/drm/i915/display/intel_bw.c @@ -27,6 +27,9 @@ struct intel_qgv_info { u8 num_points; u8 num_psf_points; u8 t_bl; + u8 max_numchannels; + u8 channel_width; + u8 deinterleave; }; static int dg1_mchbar_read_qgv_point_info(struct drm_i915_private *dev_priv, @@ -42,7 +45,7 @@ static int dg1_mchbar_read_qgv_point_info(struct drm_i915_private *dev_priv, dclk_reference = 6; /* 6 * 16.666 MHz = 100 MHz */ else dclk_reference = 8; /* 8 * 16.666 MHz = 133 MHz */ - sp->dclk = dclk_ratio * dclk_reference; + sp->dclk = DIV_ROUND_UP((16667 * dclk_ratio * dclk_reference) + 500, 1000); val = intel_uncore_read(&dev_priv->uncore, SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU); if (val & DG1_GEAR_TYPE) @@ -69,6 +72,7 @@ static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv, int point) { u32 val = 0, val2 = 0; + u16 dclk; int ret; ret = sandybridge_pcode_read(dev_priv, @@ -78,7 +82,8 @@ static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv, if (ret) return ret; - sp->dclk = val & 0xffff; + dclk = val & 0xffff; + sp->dclk = DIV_ROUND_UP((16667 * dclk) + (DISPLAY_VER(dev_priv) > 11 ? 500 : 0), 1000); sp->t_rp = (val & 0xff0000) >> 16; sp->t_rcd = (val & 0xff000000) >> 24; @@ -133,7 +138,8 @@ int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv, } static int icl_get_qgv_points(struct drm_i915_private *dev_priv, - struct intel_qgv_info *qi) + struct intel_qgv_info *qi, + bool is_y_tile) { const struct dram_info *dram_info = &dev_priv->dram_info; int i, ret; @@ -141,20 +147,44 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv, qi->num_points = dram_info->num_qgv_points; qi->num_psf_points = dram_info->num_psf_gv_points; - if (DISPLAY_VER(dev_priv) == 12) + if (DISPLAY_VER(dev_priv) >= 12) switch (dram_info->type) { case INTEL_DRAM_DDR4: - qi->t_bl = 4; + qi->t_bl = is_y_tile ? 8 : 4; + qi->max_numchannels = 2; + qi->channel_width = 64; + qi->deinterleave = is_y_tile ? 1 : 2; break; case INTEL_DRAM_DDR5: - qi->t_bl = 8; + qi->t_bl = is_y_tile ? 16 : 8; + qi->max_numchannels = 4; + qi->channel_width = 32; + qi->deinterleave = is_y_tile ? 1 : 2; + break; + case INTEL_DRAM_LPDDR4: + if (IS_ROCKETLAKE(dev_priv)) { + qi->t_bl = 8; + qi->max_numchannels = 4; + qi->channel_width = 32; + qi->deinterleave = 2; + break; + } + fallthrough; + case INTEL_DRAM_LPDDR5: + qi->t_bl = 16; + qi->max_numchannels = 8; + qi->channel_width = 16; + qi->deinterleave = is_y_tile ? 2 : 4; break; default: qi->t_bl = 16; + qi->max_numchannels = 1; break; } - else if (DISPLAY_VER(dev_priv) == 11) + else if (DISPLAY_VER(dev_priv) == 11) { qi->t_bl = dev_priv->dram_info.type == INTEL_DRAM_DDR4 ? 4 : 8; + qi->max_numchannels = 1; + } if (drm_WARN_ON(&dev_priv->drm, qi->num_points > ARRAY_SIZE(qi->points))) @@ -193,12 +223,6 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv, return 0; } -static int icl_calc_bw(int dclk, int num, int den) -{ - /* multiples of 16.666MHz (100/6) */ - return DIV_ROUND_CLOSEST(num * dclk * 100, den * 6); -} - static int adl_calc_psf_bw(int clk) { /* @@ -240,7 +264,7 @@ static const struct intel_sa_info tgl_sa_info = { }; static const struct intel_sa_info rkl_sa_info = { - .deburst = 16, + .deburst = 8, .deprogbwlimit = 20, /* GB/s */ .displayrtids = 128, .derating = 10, @@ -265,35 +289,130 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel struct intel_qgv_info qi = {}; bool is_y_tile = true; /* assume y tile may be used */ int num_channels = max_t(u8, 1, dev_priv->dram_info.num_channels); - int deinterleave; - int ipqdepth, ipqdepthpch; + int ipqdepth, ipqdepthpch = 16; int dclk_max; int maxdebw; + int num_groups = ARRAY_SIZE(dev_priv->max_bw); int i, ret; - ret = icl_get_qgv_points(dev_priv, &qi); + ret = icl_get_qgv_points(dev_priv, &qi, is_y_tile); if (ret) { drm_dbg_kms(&dev_priv->drm, "Failed to get memory subsystem information, ignoring bandwidth limits"); return ret; } - deinterleave = DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2); dclk_max = icl_sagv_max_dclk(&qi); + maxdebw = min(sa->deprogbwlimit * 1000, dclk_max * 16 * 6 / 10); + ipqdepth = min(ipqdepthpch, sa->displayrtids / num_channels); + qi.deinterleave = DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2); + + for (i = 0; i < num_groups; i++) { + struct intel_bw_info *bi = &dev_priv->max_bw[i]; + int clpchgroup; + int j; + + clpchgroup = (sa->deburst * qi.deinterleave / num_channels) << i; + bi->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1; + + bi->num_qgv_points = qi.num_points; + bi->num_psf_gv_points = qi.num_psf_points; + + for (j = 0; j < qi.num_points; j++) { + const struct intel_qgv_point *sp = &qi.points[j]; + int ct, bw; + + /* + * Max row cycle time + * + * FIXME what is the logic behind the + * assumed burst length? + */ + ct = max_t(int, sp->t_rc, sp->t_rp + sp->t_rcd + + (clpchgroup - 1) * qi.t_bl + sp->t_rdpre); + bw = DIV_ROUND_UP(sp->dclk * clpchgroup * 32 * num_channels, ct); - ipqdepthpch = 16; + bi->deratedbw[j] = min(maxdebw, + bw * (100 - sa->derating) / 100); + + drm_dbg_kms(&dev_priv->drm, + "BW%d / QGV %d: num_planes=%d deratedbw=%u\n", + i, j, bi->num_planes, bi->deratedbw[j]); + } + } + /* + * In case if SAGV is disabled in BIOS, we always get 1 + * SAGV point, but we can't send PCode commands to restrict it + * as it will fail and pointless anyway. + */ + if (qi.num_points == 1) + dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED; + else + dev_priv->sagv_status = I915_SAGV_ENABLED; + + return 0; +} + +static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa) +{ + struct intel_qgv_info qi = {}; + const struct dram_info *dram_info = &dev_priv->dram_info; + bool is_y_tile = true; /* assume y tile may be used */ + int num_channels = max_t(u8, 1, dev_priv->dram_info.num_channels); + int ipqdepth, ipqdepthpch = 16; + int dclk_max; + int maxdebw, peakbw; + int clperchgroup; + int num_groups = ARRAY_SIZE(dev_priv->max_bw); + int i, ret; + + ret = icl_get_qgv_points(dev_priv, &qi, is_y_tile); + if (ret) { + drm_dbg_kms(&dev_priv->drm, + "Failed to get memory subsystem information, ignoring bandwidth limits"); + return ret; + } + + if (dram_info->type == INTEL_DRAM_LPDDR4 || dram_info->type == INTEL_DRAM_LPDDR5) + num_channels *= 2; + + qi.deinterleave = qi.deinterleave ? : DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2); + + if (num_channels < qi.max_numchannels && DISPLAY_VER(dev_priv) >= 12) + qi.deinterleave = max(DIV_ROUND_UP(qi.deinterleave, 2), 1); + + if (DISPLAY_VER(dev_priv) > 11 && num_channels > qi.max_numchannels) + drm_warn(&dev_priv->drm, "Number of channels exceeds max number of channels."); + if (qi.max_numchannels != 0) + num_channels = min_t(u8, num_channels, qi.max_numchannels); + + dclk_max = icl_sagv_max_dclk(&qi); + + peakbw = num_channels * DIV_ROUND_UP(qi.channel_width, 8) * dclk_max; + maxdebw = min(sa->deprogbwlimit * 1000, peakbw * 6 / 10); /* 60% */ - maxdebw = min(sa->deprogbwlimit * 1000, - icl_calc_bw(dclk_max, 16, 1) * 6 / 10); /* 60% */ ipqdepth = min(ipqdepthpch, sa->displayrtids / num_channels); + /* + * clperchgroup = 4kpagespermempage * clperchperblock, + * clperchperblock = 8 / num_channels * interleave + */ + clperchgroup = 4 * DIV_ROUND_UP(8, num_channels) * qi.deinterleave; - for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) { + for (i = 0; i < num_groups; i++) { struct intel_bw_info *bi = &dev_priv->max_bw[i]; + struct intel_bw_info *bi_next; int clpchgroup; int j; - clpchgroup = (sa->deburst * deinterleave / num_channels) << i; - bi->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1; + if (i < num_groups - 1) + bi_next = &dev_priv->max_bw[i + 1]; + + clpchgroup = (sa->deburst * qi.deinterleave / num_channels) << i; + + if (i < num_groups - 1 && clpchgroup < clperchgroup) + bi_next->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1; + else + bi_next->num_planes = 0; bi->num_qgv_points = qi.num_points; bi->num_psf_gv_points = qi.num_psf_points; @@ -310,7 +429,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel */ ct = max_t(int, sp->t_rc, sp->t_rp + sp->t_rcd + (clpchgroup - 1) * qi.t_bl + sp->t_rdpre); - bw = icl_calc_bw(sp->dclk, clpchgroup * 32 * num_channels, ct); + bw = DIV_ROUND_UP(sp->dclk * clpchgroup * 32 * num_channels, ct); bi->deratedbw[j] = min(maxdebw, bw * (100 - sa->derating) / 100); @@ -329,9 +448,6 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel "BW%d / PSF GV %d: num_planes=%d bw=%u\n", i, j, bi->num_planes, bi->psf_bw[j]); } - - if (bi->num_planes == 1) - break; } /* @@ -395,6 +511,34 @@ static unsigned int icl_max_bw(struct drm_i915_private *dev_priv, return 0; } +static unsigned int tgl_max_bw(struct drm_i915_private *dev_priv, + int num_planes, int qgv_point) +{ + int i; + + /* + * Let's return max bw for 0 planes + */ + num_planes = max(1, num_planes); + + for (i = ARRAY_SIZE(dev_priv->max_bw) - 1; i >= 0; i--) { + const struct intel_bw_info *bi = + &dev_priv->max_bw[i]; + + /* + * Pcode will not expose all QGV points when + * SAGV is forced to off/min/med/max. + */ + if (qgv_point >= bi->num_qgv_points) + return UINT_MAX; + + if (num_planes <= bi->num_planes) + return bi->deratedbw[qgv_point]; + } + + return dev_priv->max_bw[0].deratedbw[qgv_point]; +} + static unsigned int adl_psf_bw(struct drm_i915_private *dev_priv, int psf_gv_point) { @@ -412,13 +556,13 @@ void intel_bw_init_hw(struct drm_i915_private *dev_priv) if (IS_DG2(dev_priv)) dg2_get_bw_info(dev_priv); else if (IS_ALDERLAKE_P(dev_priv)) - icl_get_bw_info(dev_priv, &adlp_sa_info); + tgl_get_bw_info(dev_priv, &adlp_sa_info); else if (IS_ALDERLAKE_S(dev_priv)) - icl_get_bw_info(dev_priv, &adls_sa_info); + tgl_get_bw_info(dev_priv, &adls_sa_info); else if (IS_ROCKETLAKE(dev_priv)) - icl_get_bw_info(dev_priv, &rkl_sa_info); + tgl_get_bw_info(dev_priv, &rkl_sa_info); else if (DISPLAY_VER(dev_priv) == 12) - icl_get_bw_info(dev_priv, &tgl_sa_info); + tgl_get_bw_info(dev_priv, &tgl_sa_info); else if (DISPLAY_VER(dev_priv) == 11) icl_get_bw_info(dev_priv, &icl_sa_info); } @@ -490,7 +634,7 @@ static unsigned int intel_bw_data_rate(struct drm_i915_private *dev_priv, for_each_pipe(dev_priv, pipe) data_rate += bw_state->data_rate[pipe]; - if (DISPLAY_VER(dev_priv) >= 13 && intel_vtd_active()) + if (DISPLAY_VER(dev_priv) >= 13 && intel_vtd_active(dev_priv)) data_rate = data_rate * 105 / 100; return data_rate; @@ -746,7 +890,10 @@ int intel_bw_atomic_check(struct intel_atomic_state *state) for (i = 0; i < num_qgv_points; i++) { unsigned int max_data_rate; - max_data_rate = icl_max_bw(dev_priv, num_active_planes, i); + if (DISPLAY_VER(dev_priv) > 11) + max_data_rate = tgl_max_bw(dev_priv, num_active_planes, i); + else + max_data_rate = icl_max_bw(dev_priv, num_active_planes, i); /* * We need to know which qgv point gives us * maximum bandwidth in order to disable SAGV diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 868dd43a7542..c30cf8d2b835 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -24,8 +24,11 @@ #include <linux/time.h> #include "intel_atomic.h" +#include "intel_atomic_plane.h" +#include "intel_audio.h" #include "intel_bw.h" #include "intel_cdclk.h" +#include "intel_crtc.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_pcode.h" @@ -66,7 +69,7 @@ void intel_cdclk_get_cdclk(struct drm_i915_private *dev_priv, dev_priv->cdclk_funcs->get_cdclk(dev_priv, cdclk_config); } -int intel_cdclk_bw_calc_min_cdclk(struct intel_atomic_state *state) +static int intel_cdclk_bw_calc_min_cdclk(struct intel_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); return dev_priv->cdclk_funcs->bw_calc_min_cdclk(state); @@ -1211,6 +1214,19 @@ static void skl_cdclk_uninit_hw(struct drm_i915_private *dev_priv) skl_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE); } +static bool has_cdclk_squasher(struct drm_i915_private *i915) +{ + return IS_DG2(i915); +} + +struct intel_cdclk_vals { + u32 cdclk; + u16 refclk; + u16 waveform; + u8 divider; /* CD2X divider * 2 */ + u8 ratio; +}; + static const struct intel_cdclk_vals bxt_cdclk_table[] = { { .refclk = 19200, .cdclk = 144000, .divider = 8, .ratio = 60 }, { .refclk = 19200, .cdclk = 288000, .divider = 4, .ratio = 60 }, @@ -1312,12 +1328,19 @@ static const struct intel_cdclk_vals adlp_cdclk_table[] = { }; static const struct intel_cdclk_vals dg2_cdclk_table[] = { - { .refclk = 38400, .cdclk = 172800, .divider = 2, .ratio = 9 }, - { .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 10 }, - { .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 }, - { .refclk = 38400, .cdclk = 326400, .divider = 4, .ratio = 34 }, - { .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 }, - { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 }, + { .refclk = 38400, .cdclk = 163200, .divider = 2, .ratio = 34, .waveform = 0x8888 }, + { .refclk = 38400, .cdclk = 204000, .divider = 2, .ratio = 34, .waveform = 0x9248 }, + { .refclk = 38400, .cdclk = 244800, .divider = 2, .ratio = 34, .waveform = 0xa4a4 }, + { .refclk = 38400, .cdclk = 285600, .divider = 2, .ratio = 34, .waveform = 0xa54a }, + { .refclk = 38400, .cdclk = 326400, .divider = 2, .ratio = 34, .waveform = 0xaaaa }, + { .refclk = 38400, .cdclk = 367200, .divider = 2, .ratio = 34, .waveform = 0xad5a }, + { .refclk = 38400, .cdclk = 408000, .divider = 2, .ratio = 34, .waveform = 0xb6b6 }, + { .refclk = 38400, .cdclk = 448800, .divider = 2, .ratio = 34, .waveform = 0xdbb6 }, + { .refclk = 38400, .cdclk = 489600, .divider = 2, .ratio = 34, .waveform = 0xeeee }, + { .refclk = 38400, .cdclk = 530400, .divider = 2, .ratio = 34, .waveform = 0xf7de }, + { .refclk = 38400, .cdclk = 571200, .divider = 2, .ratio = 34, .waveform = 0xfefe }, + { .refclk = 38400, .cdclk = 612000, .divider = 2, .ratio = 34, .waveform = 0xfffe }, + { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34, .waveform = 0xffff }, {} }; @@ -1453,6 +1476,7 @@ static void bxt_de_pll_readout(struct drm_i915_private *dev_priv, static void bxt_get_cdclk(struct drm_i915_private *dev_priv, struct intel_cdclk_config *cdclk_config) { + u32 squash_ctl = 0; u32 divider; int div; @@ -1490,7 +1514,21 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv, return; } - cdclk_config->cdclk = DIV_ROUND_CLOSEST(cdclk_config->vco, div); + if (has_cdclk_squasher(dev_priv)) + squash_ctl = intel_de_read(dev_priv, CDCLK_SQUASH_CTL); + + if (squash_ctl & CDCLK_SQUASH_ENABLE) { + u16 waveform; + int size; + + size = REG_FIELD_GET(CDCLK_SQUASH_WINDOW_SIZE_MASK, squash_ctl) + 1; + waveform = REG_FIELD_GET(CDCLK_SQUASH_WAVEFORM_MASK, squash_ctl) >> (16 - size); + + cdclk_config->cdclk = DIV_ROUND_CLOSEST(hweight16(waveform) * + cdclk_config->vco, size * div); + } else { + cdclk_config->cdclk = DIV_ROUND_CLOSEST(cdclk_config->vco, div); + } out: /* @@ -1625,6 +1663,26 @@ static u32 bxt_cdclk_cd2x_div_sel(struct drm_i915_private *dev_priv, } } +static u32 cdclk_squash_waveform(struct drm_i915_private *dev_priv, + int cdclk) +{ + const struct intel_cdclk_vals *table = dev_priv->cdclk.table; + int i; + + if (cdclk == dev_priv->cdclk.hw.bypass) + return 0; + + for (i = 0; table[i].refclk; i++) + if (table[i].refclk == dev_priv->cdclk.hw.ref && + table[i].cdclk == cdclk) + return table[i].waveform; + + drm_WARN(&dev_priv->drm, 1, "cdclk %d not valid for refclk %u\n", + cdclk, dev_priv->cdclk.hw.ref); + + return 0xffff; +} + static void bxt_set_cdclk(struct drm_i915_private *dev_priv, const struct intel_cdclk_config *cdclk_config, enum pipe pipe) @@ -1632,6 +1690,8 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, int cdclk = cdclk_config->cdclk; int vco = cdclk_config->vco; u32 val; + u16 waveform; + int clock; int ret; /* Inform power controller of upcoming frequency change. */ @@ -1675,7 +1735,24 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, bxt_de_pll_enable(dev_priv, vco); } - val = bxt_cdclk_cd2x_div_sel(dev_priv, cdclk, vco) | + waveform = cdclk_squash_waveform(dev_priv, cdclk); + + if (waveform) + clock = vco / 2; + else + clock = cdclk; + + if (has_cdclk_squasher(dev_priv)) { + u32 squash_ctl = 0; + + if (waveform) + squash_ctl = CDCLK_SQUASH_ENABLE | + CDCLK_SQUASH_WINDOW_SIZE(0xf) | waveform; + + intel_de_write(dev_priv, CDCLK_SQUASH_CTL, squash_ctl); + } + + val = bxt_cdclk_cd2x_div_sel(dev_priv, clock, vco) | bxt_cdclk_cd2x_pipe(dev_priv, pipe) | skl_cdclk_decimal(cdclk); @@ -1689,7 +1766,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, intel_de_write(dev_priv, CDCLK_CTL, val); if (pipe != INVALID_PIPE) - intel_wait_for_vblank(dev_priv, pipe); + intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(dev_priv, pipe)); if (DISPLAY_VER(dev_priv) >= 11) { ret = sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, @@ -1727,7 +1804,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv) { u32 cdctl, expected; - int cdclk, vco; + int cdclk, clock, vco; intel_update_cdclk(dev_priv); intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK"); @@ -1763,8 +1840,12 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv) expected = skl_cdclk_decimal(cdclk); /* Figure out what CD2X divider we should be using for this cdclk */ - expected |= bxt_cdclk_cd2x_div_sel(dev_priv, - dev_priv->cdclk.hw.cdclk, + if (has_cdclk_squasher(dev_priv)) + clock = dev_priv->cdclk.hw.vco / 2; + else + clock = dev_priv->cdclk.hw.cdclk; + + expected |= bxt_cdclk_cd2x_div_sel(dev_priv, clock, dev_priv->cdclk.hw.vco); /* @@ -1880,6 +1961,25 @@ static bool intel_cdclk_can_crawl(struct drm_i915_private *dev_priv, a->ref == b->ref; } +static bool intel_cdclk_can_squash(struct drm_i915_private *dev_priv, + const struct intel_cdclk_config *a, + const struct intel_cdclk_config *b) +{ + /* + * FIXME should store a bit more state in intel_cdclk_config + * to differentiate squasher vs. cd2x divider properly. For + * the moment all platforms with squasher use a fixed cd2x + * divider. + */ + if (!has_cdclk_squasher(dev_priv)) + return false; + + return a->cdclk != b->cdclk && + a->vco != 0 && + a->vco == b->vco && + a->ref == b->ref; +} + /** * intel_cdclk_needs_modeset - Determine if changong between the CDCLK * configurations requires a modeset on all pipes @@ -1917,7 +2017,17 @@ static bool intel_cdclk_can_cd2x_update(struct drm_i915_private *dev_priv, if (DISPLAY_VER(dev_priv) < 10 && !IS_BROXTON(dev_priv)) return false; + /* + * FIXME should store a bit more state in intel_cdclk_config + * to differentiate squasher vs. cd2x divider properly. For + * the moment all platforms with squasher use a fixed cd2x + * divider. + */ + if (has_cdclk_squasher(dev_priv)) + return false; + return a->cdclk != b->cdclk && + a->vco != 0 && a->vco == b->vco && a->ref == b->ref; } @@ -1975,6 +2085,8 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv, intel_psr_pause(intel_dp); } + intel_audio_cdclk_change_pre(dev_priv); + /* * Lock aux/gmbus while we change cdclk in case those * functions use cdclk. Not all platforms/ports do, @@ -2003,6 +2115,8 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv, intel_psr_resume(intel_dp); } + intel_audio_cdclk_change_post(dev_priv); + if (drm_WARN(&dev_priv->drm, intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_config), "cdclk state doesn't match!\n")) { @@ -2524,6 +2638,58 @@ intel_atomic_get_cdclk_state(struct intel_atomic_state *state) return to_intel_cdclk_state(cdclk_state); } +int intel_cdclk_atomic_check(struct intel_atomic_state *state, + bool *need_cdclk_calc) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_cdclk_state *old_cdclk_state; + const struct intel_cdclk_state *new_cdclk_state; + struct intel_plane_state *plane_state; + struct intel_bw_state *new_bw_state; + struct intel_plane *plane; + int min_cdclk = 0; + enum pipe pipe; + int ret; + int i; + + /* + * active_planes bitmask has been updated, and potentially affected + * planes are part of the state. We can now compute the minimum cdclk + * for each plane. + */ + for_each_new_intel_plane_in_state(state, plane, plane_state, i) { + ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc); + if (ret) + return ret; + } + + old_cdclk_state = intel_atomic_get_old_cdclk_state(state); + new_cdclk_state = intel_atomic_get_new_cdclk_state(state); + + if (new_cdclk_state && + old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk) + *need_cdclk_calc = true; + + ret = intel_cdclk_bw_calc_min_cdclk(state); + if (ret) + return ret; + + new_bw_state = intel_atomic_get_new_bw_state(state); + + if (!new_cdclk_state || !new_bw_state) + return 0; + + for_each_pipe(i915, pipe) { + min_cdclk = max(new_cdclk_state->min_cdclk[pipe], min_cdclk); + + /* Currently do this change only if we need to increase */ + if (new_bw_state->min_cdclk > min_cdclk) + *need_cdclk_calc = true; + } + + return 0; +} + int intel_cdclk_init(struct drm_i915_private *dev_priv) { struct intel_cdclk_state *cdclk_state; @@ -2587,7 +2753,7 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state) struct intel_crtc_state *crtc_state; pipe = ilog2(new_cdclk_state->active_pipes); - crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + crtc = intel_crtc_for_pipe(dev_priv, pipe); crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); if (IS_ERR(crtc_state)) @@ -2597,9 +2763,14 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state) pipe = INVALID_PIPE; } - if (intel_cdclk_can_crawl(dev_priv, - &old_cdclk_state->actual, - &new_cdclk_state->actual)) { + if (intel_cdclk_can_squash(dev_priv, + &old_cdclk_state->actual, + &new_cdclk_state->actual)) { + drm_dbg_kms(&dev_priv->drm, + "Can change cdclk via squasher\n"); + } else if (intel_cdclk_can_crawl(dev_priv, + &old_cdclk_state->actual, + &new_cdclk_state->actual)) { drm_dbg_kms(&dev_priv->drm, "Can change cdclk via crawl\n"); } else if (pipe != INVALID_PIPE) { diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h index 309b3f394e24..fc638522e445 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.h +++ b/drivers/gpu/drm/i915/display/intel_cdclk.h @@ -16,13 +16,6 @@ struct drm_i915_private; struct intel_atomic_state; struct intel_crtc_state; -struct intel_cdclk_vals { - u32 cdclk; - u16 refclk; - u8 divider; /* CD2X divider * 2 */ - u8 ratio; -}; - struct intel_cdclk_state { struct intel_global_state base; @@ -70,7 +63,8 @@ void intel_dump_cdclk_config(const struct intel_cdclk_config *cdclk_config, int intel_modeset_calc_cdclk(struct intel_atomic_state *state); void intel_cdclk_get_cdclk(struct drm_i915_private *dev_priv, struct intel_cdclk_config *cdclk_config); -int intel_cdclk_bw_calc_min_cdclk(struct intel_atomic_state *state); +int intel_cdclk_atomic_check(struct intel_atomic_state *state, + bool *need_cdclk_calc); struct intel_cdclk_state * intel_atomic_get_cdclk_state(struct intel_atomic_state *state); diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c index 5359b7305a78..de3ded1e327a 100644 --- a/drivers/gpu/drm/i915/display/intel_color.c +++ b/drivers/gpu/drm/i915/display/intel_color.c @@ -26,7 +26,7 @@ #include "intel_de.h" #include "intel_display_types.h" #include "intel_dpll.h" -#include "intel_dsi.h" +#include "vlv_dsi_pll.h" #define CTM_COEFF_SIGN (1ULL << 63) @@ -552,8 +552,8 @@ static void i9xx_load_lut_8(struct intel_crtc *crtc, lut = blob->data; for (i = 0; i < 256; i++) - intel_de_write(dev_priv, PALETTE(pipe, i), - i9xx_lut_8(&lut[i])); + intel_de_write_fw(dev_priv, PALETTE(pipe, i), + i9xx_lut_8(&lut[i])); } static void i9xx_load_luts(const struct intel_crtc_state *crtc_state) @@ -576,15 +576,15 @@ static void i965_load_lut_10p6(struct intel_crtc *crtc, enum pipe pipe = crtc->pipe; for (i = 0; i < lut_size - 1; i++) { - intel_de_write(dev_priv, PALETTE(pipe, 2 * i + 0), - i965_lut_10p6_ldw(&lut[i])); - intel_de_write(dev_priv, PALETTE(pipe, 2 * i + 1), - i965_lut_10p6_udw(&lut[i])); + intel_de_write_fw(dev_priv, PALETTE(pipe, 2 * i + 0), + i965_lut_10p6_ldw(&lut[i])); + intel_de_write_fw(dev_priv, PALETTE(pipe, 2 * i + 1), + i965_lut_10p6_udw(&lut[i])); } - intel_de_write(dev_priv, PIPEGCMAX(pipe, 0), lut[i].red); - intel_de_write(dev_priv, PIPEGCMAX(pipe, 1), lut[i].green); - intel_de_write(dev_priv, PIPEGCMAX(pipe, 2), lut[i].blue); + intel_de_write_fw(dev_priv, PIPEGCMAX(pipe, 0), lut[i].red); + intel_de_write_fw(dev_priv, PIPEGCMAX(pipe, 1), lut[i].green); + intel_de_write_fw(dev_priv, PIPEGCMAX(pipe, 2), lut[i].blue); } static void i965_load_luts(const struct intel_crtc_state *crtc_state) @@ -618,8 +618,8 @@ static void ilk_load_lut_8(struct intel_crtc *crtc, lut = blob->data; for (i = 0; i < 256; i++) - intel_de_write(dev_priv, LGC_PALETTE(pipe, i), - i9xx_lut_8(&lut[i])); + intel_de_write_fw(dev_priv, LGC_PALETTE(pipe, i), + i9xx_lut_8(&lut[i])); } static void ilk_load_lut_10(struct intel_crtc *crtc, @@ -631,8 +631,8 @@ static void ilk_load_lut_10(struct intel_crtc *crtc, enum pipe pipe = crtc->pipe; for (i = 0; i < lut_size; i++) - intel_de_write(dev_priv, PREC_PALETTE(pipe, i), - ilk_lut_10(&lut[i])); + intel_de_write_fw(dev_priv, PREC_PALETTE(pipe, i), + ilk_lut_10(&lut[i])); } static void ilk_load_luts(const struct intel_crtc_state *crtc_state) @@ -681,16 +681,16 @@ static void ivb_load_lut_10(struct intel_crtc *crtc, const struct drm_color_lut *entry = &lut[i * (lut_size - 1) / (hw_lut_size - 1)]; - intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), prec_index++); - intel_de_write(dev_priv, PREC_PAL_DATA(pipe), - ilk_lut_10(entry)); + intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe), prec_index++); + intel_de_write_fw(dev_priv, PREC_PAL_DATA(pipe), + ilk_lut_10(entry)); } /* * Reset the index, otherwise it prevents the legacy palette to be * written properly. */ - intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), 0); + intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe), 0); } /* On BDW+ the index auto increment mode actually works */ @@ -704,23 +704,23 @@ static void bdw_load_lut_10(struct intel_crtc *crtc, int i, lut_size = drm_color_lut_size(blob); enum pipe pipe = crtc->pipe; - intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), - prec_index | PAL_PREC_AUTO_INCREMENT); + intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe), + prec_index | PAL_PREC_AUTO_INCREMENT); for (i = 0; i < hw_lut_size; i++) { /* We discard half the user entries in split gamma mode */ const struct drm_color_lut *entry = &lut[i * (lut_size - 1) / (hw_lut_size - 1)]; - intel_de_write(dev_priv, PREC_PAL_DATA(pipe), - ilk_lut_10(entry)); + intel_de_write_fw(dev_priv, PREC_PAL_DATA(pipe), + ilk_lut_10(entry)); } /* * Reset the index, otherwise it prevents the legacy palette to be * written properly. */ - intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), 0); + intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe), 0); } static void ivb_load_lut_ext_max(const struct intel_crtc_state *crtc_state) @@ -808,6 +808,14 @@ static void bdw_load_luts(const struct intel_crtc_state *crtc_state) } } +static int glk_degamma_lut_size(struct drm_i915_private *i915) +{ + if (DISPLAY_VER(i915) >= 13) + return 131; + else + return 35; +} + static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); @@ -821,14 +829,14 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state) * ignore the index bits, so we need to reset it to index 0 * separately. */ - intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0); - intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), - PRE_CSC_GAMC_AUTO_INCREMENT); + intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0); + intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe), + PRE_CSC_GAMC_AUTO_INCREMENT); for (i = 0; i < lut_size; i++) { /* - * First 33 entries represent range from 0 to 1.0 - * 34th and 35th entry will represent extended range + * First lut_size entries represent range from 0 to 1.0 + * 3 additional lut entries will represent extended range * inputs 3.0 and 7.0 respectively, currently clamped * at 1.0. Since the precision is 16bit, the user * value can be directly filled to register. @@ -839,15 +847,15 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state) * ToDo: Extend to max 7.0. Enable 32 bit input value * as compared to just 16 to achieve this. */ - intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe), - lut[i].green); + intel_de_write_fw(dev_priv, PRE_CSC_GAMC_DATA(pipe), + lut[i].green); } /* Clamp values > 1.0. */ - while (i++ < 35) - intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe), 1 << 16); + while (i++ < glk_degamma_lut_size(dev_priv)) + intel_de_write_fw(dev_priv, PRE_CSC_GAMC_DATA(pipe), 1 << 16); - intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0); + intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0); } static void glk_load_degamma_lut_linear(const struct intel_crtc_state *crtc_state) @@ -862,21 +870,21 @@ static void glk_load_degamma_lut_linear(const struct intel_crtc_state *crtc_stat * ignore the index bits, so we need to reset it to index 0 * separately. */ - intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0); - intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), - PRE_CSC_GAMC_AUTO_INCREMENT); + intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0); + intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe), + PRE_CSC_GAMC_AUTO_INCREMENT); for (i = 0; i < lut_size; i++) { u32 v = (i << 16) / (lut_size - 1); - intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe), v); + intel_de_write_fw(dev_priv, PRE_CSC_GAMC_DATA(pipe), v); } /* Clamp values > 1.0. */ while (i++ < 35) - intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe), 1 << 16); + intel_de_write_fw(dev_priv, PRE_CSC_GAMC_DATA(pipe), 1 << 16); - intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0); + intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0); } static void glk_load_luts(const struct intel_crtc_state *crtc_state) @@ -1071,10 +1079,10 @@ static void chv_load_cgm_degamma(struct intel_crtc *crtc, enum pipe pipe = crtc->pipe; for (i = 0; i < lut_size; i++) { - intel_de_write(dev_priv, CGM_PIPE_DEGAMMA(pipe, i, 0), - chv_cgm_degamma_ldw(&lut[i])); - intel_de_write(dev_priv, CGM_PIPE_DEGAMMA(pipe, i, 1), - chv_cgm_degamma_udw(&lut[i])); + intel_de_write_fw(dev_priv, CGM_PIPE_DEGAMMA(pipe, i, 0), + chv_cgm_degamma_ldw(&lut[i])); + intel_de_write_fw(dev_priv, CGM_PIPE_DEGAMMA(pipe, i, 1), + chv_cgm_degamma_udw(&lut[i])); } } @@ -1105,10 +1113,10 @@ static void chv_load_cgm_gamma(struct intel_crtc *crtc, enum pipe pipe = crtc->pipe; for (i = 0; i < lut_size; i++) { - intel_de_write(dev_priv, CGM_PIPE_GAMMA(pipe, i, 0), - chv_cgm_gamma_ldw(&lut[i])); - intel_de_write(dev_priv, CGM_PIPE_GAMMA(pipe, i, 1), - chv_cgm_gamma_udw(&lut[i])); + intel_de_write_fw(dev_priv, CGM_PIPE_GAMMA(pipe, i, 0), + chv_cgm_gamma_ldw(&lut[i])); + intel_de_write_fw(dev_priv, CGM_PIPE_GAMMA(pipe, i, 1), + chv_cgm_gamma_udw(&lut[i])); } } @@ -1131,8 +1139,8 @@ static void chv_load_luts(const struct intel_crtc_state *crtc_state) else i965_load_luts(crtc_state); - intel_de_write(dev_priv, CGM_PIPE_MODE(crtc->pipe), - crtc_state->cgm_mode); + intel_de_write_fw(dev_priv, CGM_PIPE_MODE(crtc->pipe), + crtc_state->cgm_mode); } void intel_color_load_luts(const struct intel_crtc_state *crtc_state) @@ -1574,6 +1582,8 @@ static int glk_color_check(struct intel_crtc_state *crtc_state) static u32 icl_gamma_mode(const struct intel_crtc_state *crtc_state) { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); u32 gamma_mode = 0; if (crtc_state->hw.degamma_lut) @@ -1586,6 +1596,13 @@ static u32 icl_gamma_mode(const struct intel_crtc_state *crtc_state) if (!crtc_state->hw.gamma_lut || crtc_state_is_legacy_gamma(crtc_state)) gamma_mode |= GAMMA_MODE_MODE_8BIT; + /* + * Enable 10bit gamma for D13 + * ToDo: Extend to Logarithmic Gamma once the new UAPI + * is acccepted and implemented by a userspace consumer + */ + else if (DISPLAY_VER(i915) >= 13) + gamma_mode |= GAMMA_MODE_MODE_10BIT; else gamma_mode |= GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED; @@ -1808,7 +1825,7 @@ static struct drm_property_blob *i9xx_read_lut_8(struct intel_crtc *crtc) lut = blob->data; for (i = 0; i < LEGACY_LUT_LENGTH; i++) { - u32 val = intel_de_read(dev_priv, PALETTE(pipe, i)); + u32 val = intel_de_read_fw(dev_priv, PALETTE(pipe, i)); i9xx_lut_8_pack(&lut[i], val); } @@ -1843,15 +1860,15 @@ static struct drm_property_blob *i965_read_lut_10p6(struct intel_crtc *crtc) lut = blob->data; for (i = 0; i < lut_size - 1; i++) { - u32 ldw = intel_de_read(dev_priv, PALETTE(pipe, 2 * i + 0)); - u32 udw = intel_de_read(dev_priv, PALETTE(pipe, 2 * i + 1)); + u32 ldw = intel_de_read_fw(dev_priv, PALETTE(pipe, 2 * i + 0)); + u32 udw = intel_de_read_fw(dev_priv, PALETTE(pipe, 2 * i + 1)); i965_lut_10p6_pack(&lut[i], ldw, udw); } - lut[i].red = i965_lut_11p6_max_pack(intel_de_read(dev_priv, PIPEGCMAX(pipe, 0))); - lut[i].green = i965_lut_11p6_max_pack(intel_de_read(dev_priv, PIPEGCMAX(pipe, 1))); - lut[i].blue = i965_lut_11p6_max_pack(intel_de_read(dev_priv, PIPEGCMAX(pipe, 2))); + lut[i].red = i965_lut_11p6_max_pack(intel_de_read_fw(dev_priv, PIPEGCMAX(pipe, 0))); + lut[i].green = i965_lut_11p6_max_pack(intel_de_read_fw(dev_priv, PIPEGCMAX(pipe, 1))); + lut[i].blue = i965_lut_11p6_max_pack(intel_de_read_fw(dev_priv, PIPEGCMAX(pipe, 2))); return blob; } @@ -1886,8 +1903,8 @@ static struct drm_property_blob *chv_read_cgm_gamma(struct intel_crtc *crtc) lut = blob->data; for (i = 0; i < lut_size; i++) { - u32 ldw = intel_de_read(dev_priv, CGM_PIPE_GAMMA(pipe, i, 0)); - u32 udw = intel_de_read(dev_priv, CGM_PIPE_GAMMA(pipe, i, 1)); + u32 ldw = intel_de_read_fw(dev_priv, CGM_PIPE_GAMMA(pipe, i, 0)); + u32 udw = intel_de_read_fw(dev_priv, CGM_PIPE_GAMMA(pipe, i, 1)); chv_cgm_gamma_pack(&lut[i], ldw, udw); } @@ -1922,7 +1939,7 @@ static struct drm_property_blob *ilk_read_lut_8(struct intel_crtc *crtc) lut = blob->data; for (i = 0; i < LEGACY_LUT_LENGTH; i++) { - u32 val = intel_de_read(dev_priv, LGC_PALETTE(pipe, i)); + u32 val = intel_de_read_fw(dev_priv, LGC_PALETTE(pipe, i)); i9xx_lut_8_pack(&lut[i], val); } @@ -1947,7 +1964,7 @@ static struct drm_property_blob *ilk_read_lut_10(struct intel_crtc *crtc) lut = blob->data; for (i = 0; i < lut_size; i++) { - u32 val = intel_de_read(dev_priv, PREC_PALETTE(pipe, i)); + u32 val = intel_de_read_fw(dev_priv, PREC_PALETTE(pipe, i)); ilk_lut_10_pack(&lut[i], val); } @@ -1999,16 +2016,16 @@ static struct drm_property_blob *bdw_read_lut_10(struct intel_crtc *crtc, lut = blob->data; - intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), - prec_index | PAL_PREC_AUTO_INCREMENT); + intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe), + prec_index | PAL_PREC_AUTO_INCREMENT); for (i = 0; i < lut_size; i++) { - u32 val = intel_de_read(dev_priv, PREC_PAL_DATA(pipe)); + u32 val = intel_de_read_fw(dev_priv, PREC_PAL_DATA(pipe)); ilk_lut_10_pack(&lut[i], val); } - intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), 0); + intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe), 0); return blob; } @@ -2050,17 +2067,17 @@ icl_read_lut_multi_segment(struct intel_crtc *crtc) lut = blob->data; - intel_de_write(dev_priv, PREC_PAL_MULTI_SEG_INDEX(pipe), - PAL_PREC_AUTO_INCREMENT); + intel_de_write_fw(dev_priv, PREC_PAL_MULTI_SEG_INDEX(pipe), + PAL_PREC_AUTO_INCREMENT); for (i = 0; i < 9; i++) { - u32 ldw = intel_de_read(dev_priv, PREC_PAL_MULTI_SEG_DATA(pipe)); - u32 udw = intel_de_read(dev_priv, PREC_PAL_MULTI_SEG_DATA(pipe)); + u32 ldw = intel_de_read_fw(dev_priv, PREC_PAL_MULTI_SEG_DATA(pipe)); + u32 udw = intel_de_read_fw(dev_priv, PREC_PAL_MULTI_SEG_DATA(pipe)); icl_lut_multi_seg_pack(&lut[i], ldw, udw); } - intel_de_write(dev_priv, PREC_PAL_MULTI_SEG_INDEX(pipe), 0); + intel_de_write_fw(dev_priv, PREC_PAL_MULTI_SEG_INDEX(pipe), 0); /* * FIXME readouts from PAL_PREC_DATA register aren't giving diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c index 634e8d449457..f628e0542933 100644 --- a/drivers/gpu/drm/i915/display/intel_combo_phy.c +++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c @@ -301,7 +301,7 @@ void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv, val = intel_de_read(dev_priv, ICL_PORT_CL_DW10(phy)); val &= ~PWR_DOWN_LN_MASK; - val |= lane_mask << PWR_DOWN_LN_SHIFT; + val |= lane_mask; intel_de_write(dev_priv, ICL_PORT_CL_DW10(phy), val); } diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c index 1c161eeed82f..6a3893c8ff22 100644 --- a/drivers/gpu/drm/i915/display/intel_crt.c +++ b/drivers/gpu/drm/i915/display/intel_crt.c @@ -45,6 +45,7 @@ #include "intel_fifo_underrun.h" #include "intel_gmbus.h" #include "intel_hotplug.h" +#include "intel_pch_display.h" /* Here's the desired hotplug mode */ #define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_PERIOD_128 | \ @@ -143,7 +144,7 @@ static void intel_crt_get_config(struct intel_encoder *encoder, static void hsw_crt_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + lpt_pch_get_config(pipe_config); hsw_ddi_get_config(encoder, pipe_config); @@ -152,8 +153,6 @@ static void hsw_crt_get_config(struct intel_encoder *encoder, DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC); pipe_config->hw.adjusted_mode.flags |= intel_crt_get_flags(encoder); - - pipe_config->hw.adjusted_mode.crtc_clock = lpt_get_iclkip(dev_priv); } /* Note: The caller is required to filter out dpms modes not supported by the @@ -247,6 +246,7 @@ static void hsw_post_disable_crt(struct intel_atomic_state *state, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { + struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); intel_crtc_vblank_off(old_crtc_state); @@ -261,10 +261,9 @@ static void hsw_post_disable_crt(struct intel_atomic_state *state, pch_post_disable_crt(state, encoder, old_crtc_state, old_conn_state); - lpt_disable_pch_transcoder(dev_priv); - lpt_disable_iclkip(dev_priv); + lpt_pch_disable(state, crtc); - intel_ddi_fdi_post_disable(state, encoder, old_crtc_state, old_conn_state); + hsw_fdi_disable(encoder); drm_WARN_ON(&dev_priv->drm, !old_crtc_state->has_pch_encoder); @@ -316,14 +315,14 @@ static void hsw_enable_crt(struct intel_atomic_state *state, intel_enable_transcoder(crtc_state); - lpt_pch_enable(crtc_state); + lpt_pch_enable(state, crtc); intel_crtc_vblank_on(crtc_state); intel_crt_set_dpms(encoder, crtc_state, DRM_MODE_DPMS_ON); - intel_wait_for_vblank(dev_priv, pipe); - intel_wait_for_vblank(dev_priv, pipe); + intel_crtc_wait_for_next_vblank(crtc); + intel_crtc_wait_for_next_vblank(crtc); intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); } @@ -722,7 +721,7 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe) intel_uncore_posting_read(uncore, pipeconf_reg); /* Wait for next Vblank to substitue * border color for Color info */ - intel_wait_for_vblank(dev_priv, pipe); + intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(dev_priv, pipe)); st00 = intel_uncore_read8(uncore, _VGA_MSR_WRITE); status = ((st00 & (1 << 4)) != 0) ? connector_status_connected : diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c index 254e67141a77..16c3ca66d9f0 100644 --- a/drivers/gpu/drm/i915/display/intel_crtc.c +++ b/drivers/gpu/drm/i915/display/intel_crtc.c @@ -3,29 +3,31 @@ * Copyright © 2020 Intel Corporation */ #include <linux/kernel.h> +#include <linux/pm_qos.h> #include <linux/slab.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_plane.h> #include <drm/drm_plane_helper.h> +#include <drm/drm_vblank_work.h> -#include "i915_trace.h" #include "i915_vgpu.h" - +#include "i9xx_plane.h" +#include "icl_dsi.h" #include "intel_atomic.h" #include "intel_atomic_plane.h" #include "intel_color.h" #include "intel_crtc.h" #include "intel_cursor.h" #include "intel_display_debugfs.h" +#include "intel_display_trace.h" #include "intel_display_types.h" #include "intel_dsi.h" #include "intel_pipe_crc.h" #include "intel_psr.h" #include "intel_sprite.h" #include "intel_vrr.h" -#include "i9xx_plane.h" #include "skl_universal_plane.h" static void assert_vblank_disabled(struct drm_crtc *crtc) @@ -34,6 +36,38 @@ static void assert_vblank_disabled(struct drm_crtc *crtc) drm_crtc_vblank_put(crtc); } +struct intel_crtc *intel_first_crtc(struct drm_i915_private *i915) +{ + return to_intel_crtc(drm_crtc_from_index(&i915->drm, 0)); +} + +struct intel_crtc *intel_crtc_for_pipe(struct drm_i915_private *i915, + enum pipe pipe) +{ + struct intel_crtc *crtc; + + for_each_intel_crtc(&i915->drm, crtc) { + if (crtc->pipe == pipe) + return crtc; + } + + return NULL; +} + +void intel_crtc_wait_for_next_vblank(struct intel_crtc *crtc) +{ + drm_crtc_wait_one_vblank(&crtc->base); +} + +void intel_wait_for_vblank_if_active(struct drm_i915_private *i915, + enum pipe pipe) +{ + struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe); + + if (crtc->active) + intel_crtc_wait_for_next_vblank(crtc); +} + u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; @@ -167,6 +201,8 @@ static void intel_crtc_destroy(struct drm_crtc *_crtc) { struct intel_crtc *crtc = to_intel_crtc(_crtc); + cpu_latency_qos_remove_request(&crtc->vblank_pm_qos); + drm_crtc_cleanup(&crtc->base); kfree(crtc); } @@ -323,18 +359,6 @@ int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) if (ret) goto fail; - BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) || - dev_priv->pipe_to_crtc_mapping[pipe] != NULL); - dev_priv->pipe_to_crtc_mapping[pipe] = crtc; - - if (DISPLAY_VER(dev_priv) < 9) { - enum i9xx_plane_id i9xx_plane = primary->i9xx_plane; - - BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || - dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL); - dev_priv->plane_to_crtc_mapping[i9xx_plane] = crtc; - } - if (DISPLAY_VER(dev_priv) >= 11) drm_crtc_create_scaling_filter_property(&crtc->base, BIT(DRM_SCALING_FILTER_DEFAULT) | @@ -344,6 +368,8 @@ int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) intel_crtc_crc_init(crtc); + cpu_latency_qos_add_request(&crtc->vblank_pm_qos, PM_QOS_DEFAULT_VALUE); + drm_WARN_ON(&dev_priv->drm, drm_crtc_index(&crtc->base) != crtc->pipe); return 0; @@ -354,6 +380,65 @@ fail: return ret; } +static bool intel_crtc_needs_vblank_work(const struct intel_crtc_state *crtc_state) +{ + return crtc_state->hw.active && + !intel_crtc_needs_modeset(crtc_state) && + !crtc_state->preload_luts && + (crtc_state->uapi.color_mgmt_changed || + crtc_state->update_pipe); +} + +static void intel_crtc_vblank_work(struct kthread_work *base) +{ + struct drm_vblank_work *work = to_drm_vblank_work(base); + struct intel_crtc_state *crtc_state = + container_of(work, typeof(*crtc_state), vblank_work); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + + trace_intel_crtc_vblank_work_start(crtc); + + intel_color_load_luts(crtc_state); + + if (crtc_state->uapi.event) { + spin_lock_irq(&crtc->base.dev->event_lock); + drm_crtc_send_vblank_event(&crtc->base, crtc_state->uapi.event); + crtc_state->uapi.event = NULL; + spin_unlock_irq(&crtc->base.dev->event_lock); + } + + trace_intel_crtc_vblank_work_end(crtc); +} + +static void intel_crtc_vblank_work_init(struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + + drm_vblank_work_init(&crtc_state->vblank_work, &crtc->base, + intel_crtc_vblank_work); + /* + * Interrupt latency is critical for getting the vblank + * work executed as early as possible during the vblank. + */ + cpu_latency_qos_update_request(&crtc->vblank_pm_qos, 0); +} + +void intel_wait_for_vblank_workers(struct intel_atomic_state *state) +{ + struct intel_crtc_state *crtc_state; + struct intel_crtc *crtc; + int i; + + for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { + if (!intel_crtc_needs_vblank_work(crtc_state)) + continue; + + drm_vblank_work_flush(&crtc_state->vblank_work); + cpu_latency_qos_update_request(&crtc->vblank_pm_qos, + PM_QOS_DEFAULT_VALUE); + } +} + int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, int usecs) { @@ -387,7 +472,7 @@ static int intel_mode_vblank_start(const struct drm_display_mode *mode) * until a subsequent call to intel_pipe_update_end(). That is done to * avoid random delays. */ -void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state) +void intel_pipe_update_start(struct intel_crtc_state *new_crtc_state) { struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -402,10 +487,17 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state) if (new_crtc_state->uapi.async_flip) return; - if (new_crtc_state->vrr.enable) - vblank_start = intel_vrr_vmax_vblank_start(new_crtc_state); - else + if (intel_crtc_needs_vblank_work(new_crtc_state)) + intel_crtc_vblank_work_init(new_crtc_state); + + if (new_crtc_state->vrr.enable) { + if (intel_vrr_is_push_sent(new_crtc_state)) + vblank_start = intel_vrr_vmin_vblank_start(new_crtc_state); + else + vblank_start = intel_vrr_vmax_vblank_start(new_crtc_state); + } else { vblank_start = intel_mode_vblank_start(adjusted_mode); + } /* FIXME needs to be calibrated sensibly */ min = vblank_start - intel_usecs_to_scanlines(adjusted_mode, @@ -554,7 +646,11 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state) * Would be slightly nice to just grab the vblank count and arm the * event outside of the critical section - the spinlock might spin for a * while ... */ - if (new_crtc_state->uapi.event) { + if (intel_crtc_needs_vblank_work(new_crtc_state)) { + drm_vblank_work_schedule(&new_crtc_state->vblank_work, + drm_crtc_accurate_vblank_count(&crtc->base) + 1, + false); + } else if (new_crtc_state->uapi.event) { drm_WARN_ON(&dev_priv->drm, drm_crtc_vblank_get(&crtc->base) != 0); @@ -566,11 +662,24 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state) new_crtc_state->uapi.event = NULL; } - local_irq_enable(); - - /* Send VRR Push to terminate Vblank */ + /* + * Send VRR Push to terminate Vblank. If we are already in vblank + * this has to be done _after_ sampling the frame counter, as + * otherwise the push would immediately terminate the vblank and + * the sampled frame counter would correspond to the next frame + * instead of the current frame. + * + * There is a tiny race here (iff vblank evasion failed us) where + * we might sample the frame counter just before vmax vblank start + * but the push would be sent just after it. That would cause the + * push to affect the next frame instead of the current frame, + * which would cause the next frame to terminate already at vmin + * vblank start instead of vmax vblank start. + */ intel_vrr_send_push(new_crtc_state); + local_irq_enable(); + if (intel_vgpu_active(dev_priv)) return; diff --git a/drivers/gpu/drm/i915/display/intel_crtc.h b/drivers/gpu/drm/i915/display/intel_crtc.h index a5ae997581aa..73077137fb99 100644 --- a/drivers/gpu/drm/i915/display/intel_crtc.h +++ b/drivers/gpu/drm/i915/display/intel_crtc.h @@ -8,11 +8,16 @@ #include <linux/types.h> +enum i9xx_plane_id; enum pipe; +struct drm_display_mode; struct drm_i915_private; +struct intel_atomic_state; struct intel_crtc; struct intel_crtc_state; +int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, + int usecs); u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state); int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe); struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc); @@ -21,5 +26,14 @@ void intel_crtc_state_reset(struct intel_crtc_state *crtc_state, u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc); void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state); void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state); +void intel_pipe_update_start(struct intel_crtc_state *new_crtc_state); +void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state); +void intel_wait_for_vblank_workers(struct intel_atomic_state *state); +struct intel_crtc *intel_first_crtc(struct drm_i915_private *i915); +struct intel_crtc *intel_crtc_for_pipe(struct drm_i915_private *i915, + enum pipe pipe); +void intel_wait_for_vblank_if_active(struct drm_i915_private *i915, + enum pipe pipe); +void intel_crtc_wait_for_next_vblank(struct intel_crtc *crtc); #endif diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c index 11842f212613..16d34685d83f 100644 --- a/drivers/gpu/drm/i915/display/intel_cursor.c +++ b/drivers/gpu/drm/i915/display/intel_cursor.c @@ -28,11 +28,6 @@ static const u32 intel_cursor_formats[] = { DRM_FORMAT_ARGB8888, }; -static const u64 cursor_format_modifiers[] = { - DRM_FORMAT_MOD_LINEAR, - DRM_FORMAT_MOD_INVALID -}; - static u32 intel_cursor_base(const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = @@ -195,7 +190,7 @@ static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state, { return CURSOR_ENABLE | CURSOR_FORMAT_ARGB | - CURSOR_STRIDE(plane_state->view.color_plane[0].stride); + CURSOR_STRIDE(plane_state->view.color_plane[0].mapping_stride); } static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state) @@ -234,7 +229,7 @@ static int i845_check_cursor(struct intel_crtc_state *crtc_state, } drm_WARN_ON(&i915->drm, plane_state->uapi.visible && - plane_state->view.color_plane[0].stride != fb->pitches[0]); + plane_state->view.color_plane[0].mapping_stride != fb->pitches[0]); switch (fb->pitches[0]) { case 256: @@ -253,9 +248,10 @@ static int i845_check_cursor(struct intel_crtc_state *crtc_state, return 0; } -static void i845_update_cursor(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) +/* TODO: split into noarm+arm pair */ +static void i845_cursor_update_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); u32 cntl = 0, base = 0, pos = 0, size = 0; @@ -298,10 +294,10 @@ static void i845_update_cursor(struct intel_plane *plane, spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } -static void i845_disable_cursor(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state) +static void i845_cursor_disable_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) { - i845_update_cursor(plane, crtc_state, NULL); + i845_cursor_update_arm(plane, crtc_state, NULL); } static bool i845_cursor_get_hw_state(struct intel_plane *plane, @@ -455,7 +451,7 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state, } drm_WARN_ON(&dev_priv->drm, plane_state->uapi.visible && - plane_state->view.color_plane[0].stride != fb->pitches[0]); + plane_state->view.color_plane[0].mapping_stride != fb->pitches[0]); if (fb->pitches[0] != drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) { @@ -488,9 +484,10 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state, return 0; } -static void i9xx_update_cursor(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) +/* TODO: split into noarm+arm pair */ +static void i9xx_cursor_update_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; @@ -562,10 +559,10 @@ static void i9xx_update_cursor(struct intel_plane *plane, spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } -static void i9xx_disable_cursor(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state) +static void i9xx_cursor_disable_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) { - i9xx_update_cursor(plane, crtc_state, NULL); + i9xx_cursor_update_arm(plane, crtc_state, NULL); } static bool i9xx_cursor_get_hw_state(struct intel_plane *plane, @@ -605,8 +602,10 @@ static bool i9xx_cursor_get_hw_state(struct intel_plane *plane, static bool intel_cursor_format_mod_supported(struct drm_plane *_plane, u32 format, u64 modifier) { - return modifier == DRM_FORMAT_MOD_LINEAR && - format == DRM_FORMAT_ARGB8888; + if (!intel_fb_plane_supports_modifier(to_intel_plane(_plane), modifier)) + return false; + + return format == DRM_FORMAT_ARGB8888; } static int @@ -717,10 +716,12 @@ intel_legacy_cursor_update(struct drm_plane *_plane, */ crtc_state->active_planes = new_crtc_state->active_planes; - if (new_plane_state->uapi.visible) - intel_update_plane(plane, crtc_state, new_plane_state); - else - intel_disable_plane(plane, crtc_state); + if (new_plane_state->uapi.visible) { + intel_plane_update_noarm(plane, crtc_state, new_plane_state); + intel_plane_update_arm(plane, crtc_state, new_plane_state); + } else { + intel_plane_disable_arm(plane, crtc_state); + } intel_plane_unpin_fb(old_plane_state); @@ -754,6 +755,7 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv, { struct intel_plane *cursor; int ret, zpos; + u64 *modifiers; cursor = intel_plane_alloc(); if (IS_ERR(cursor)) @@ -766,14 +768,14 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv, if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) { cursor->max_stride = i845_cursor_max_stride; - cursor->update_plane = i845_update_cursor; - cursor->disable_plane = i845_disable_cursor; + cursor->update_arm = i845_cursor_update_arm; + cursor->disable_arm = i845_cursor_disable_arm; cursor->get_hw_state = i845_cursor_get_hw_state; cursor->check_plane = i845_check_cursor; } else { cursor->max_stride = i9xx_cursor_max_stride; - cursor->update_plane = i9xx_update_cursor; - cursor->disable_plane = i9xx_disable_cursor; + cursor->update_arm = i9xx_cursor_update_arm; + cursor->disable_arm = i9xx_cursor_disable_arm; cursor->get_hw_state = i9xx_cursor_get_hw_state; cursor->check_plane = i9xx_check_cursor; } @@ -784,13 +786,18 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv, if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv)) cursor->cursor.size = ~0; + modifiers = intel_fb_plane_get_modifiers(dev_priv, INTEL_PLANE_CAP_NONE); + ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base, 0, &intel_cursor_plane_funcs, intel_cursor_formats, ARRAY_SIZE(intel_cursor_formats), - cursor_format_modifiers, + modifiers, DRM_PLANE_TYPE_CURSOR, "cursor %c", pipe_name(pipe)); + + kfree(modifiers); + if (ret) goto fail; diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index cfb567df71b3..9c9d574f0b8c 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -25,6 +25,7 @@ * */ +#include <drm/drm_privacy_screen_consumer.h> #include <drm/drm_scdc_helper.h> #include "i915_drv.h" @@ -321,10 +322,11 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config) { int dotclock; + /* CRT dotclock is determined via other means */ if (pipe_config->has_pch_encoder) - dotclock = intel_dotclock_calculate(pipe_config->port_clock, - &pipe_config->fdi_m_n); - else if (intel_crtc_has_dp_encoder(pipe_config)) + return; + + if (intel_crtc_has_dp_encoder(pipe_config)) dotclock = intel_dotclock_calculate(pipe_config->port_clock, &pipe_config->dp_m_n); else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp > 24) @@ -1039,7 +1041,6 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - int level = intel_ddi_level(encoder, crtc_state, 0); const struct intel_ddi_buf_trans *trans; enum phy phy = intel_port_to_phy(dev_priv, encoder->port); int n_entries, ln; @@ -1068,32 +1069,36 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder, intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val); /* Program PORT_TX_DW2 */ - val = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN(0, phy)); - val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | - RCOMP_SCALAR_MASK); - val |= SWING_SEL_UPPER(trans->entries[level].icl.dw2_swing_sel); - val |= SWING_SEL_LOWER(trans->entries[level].icl.dw2_swing_sel); - /* Program Rcomp scalar for every table entry */ - val |= RCOMP_SCALAR(0x98); - intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), val); + for (ln = 0; ln < 4; ln++) { + int level = intel_ddi_level(encoder, crtc_state, ln); + + intel_de_rmw(dev_priv, ICL_PORT_TX_DW2_LN(ln, phy), + SWING_SEL_UPPER_MASK | SWING_SEL_LOWER_MASK | RCOMP_SCALAR_MASK, + SWING_SEL_UPPER(trans->entries[level].icl.dw2_swing_sel) | + SWING_SEL_LOWER(trans->entries[level].icl.dw2_swing_sel) | + RCOMP_SCALAR(0x98)); + } /* Program PORT_TX_DW4 */ /* We cannot write to GRP. It would overwrite individual loadgen. */ for (ln = 0; ln < 4; ln++) { - val = intel_de_read(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy)); - val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | - CURSOR_COEFF_MASK); - val |= POST_CURSOR_1(trans->entries[level].icl.dw4_post_cursor_1); - val |= POST_CURSOR_2(trans->entries[level].icl.dw4_post_cursor_2); - val |= CURSOR_COEFF(trans->entries[level].icl.dw4_cursor_coeff); - intel_de_write(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy), val); + int level = intel_ddi_level(encoder, crtc_state, ln); + + intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy), + POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | CURSOR_COEFF_MASK, + POST_CURSOR_1(trans->entries[level].icl.dw4_post_cursor_1) | + POST_CURSOR_2(trans->entries[level].icl.dw4_post_cursor_2) | + CURSOR_COEFF(trans->entries[level].icl.dw4_cursor_coeff)); } /* Program PORT_TX_DW7 */ - val = intel_de_read(dev_priv, ICL_PORT_TX_DW7_LN(0, phy)); - val &= ~N_SCALAR_MASK; - val |= N_SCALAR(trans->entries[level].icl.dw7_n_scalar); - intel_de_write(dev_priv, ICL_PORT_TX_DW7_GRP(phy), val); + for (ln = 0; ln < 4; ln++) { + int level = intel_ddi_level(encoder, crtc_state, ln); + + intel_de_rmw(dev_priv, ICL_PORT_TX_DW7_LN(ln, phy), + N_SCALAR_MASK, + N_SCALAR(trans->entries[level].icl.dw7_n_scalar)); + } } static void icl_combo_phy_set_signal_levels(struct intel_encoder *encoder, @@ -1124,16 +1129,14 @@ static void icl_combo_phy_set_signal_levels(struct intel_encoder *encoder, * > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0) */ for (ln = 0; ln < 4; ln++) { - val = intel_de_read(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy)); - val &= ~LOADGEN_SELECT; - val |= icl_combo_phy_loadgen_select(crtc_state, ln); - intel_de_write(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy), val); + intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy), + LOADGEN_SELECT, + icl_combo_phy_loadgen_select(crtc_state, ln)); } /* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */ - val = intel_de_read(dev_priv, ICL_PORT_CL_DW5(phy)); - val |= SUS_CLOCK_CONFIG; - intel_de_write(dev_priv, ICL_PORT_CL_DW5(phy), val); + intel_de_rmw(dev_priv, ICL_PORT_CL_DW5(phy), + 0, SUS_CLOCK_CONFIG); /* 4. Clear training enable to change swing values */ val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy)); @@ -1154,10 +1157,8 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port); - int level = intel_ddi_level(encoder, crtc_state, 0); const struct intel_ddi_buf_trans *trans; int n_entries, ln; - u32 val; if (intel_tc_port_in_tbt_alt_mode(enc_to_dig_port(encoder))) return; @@ -1166,53 +1167,51 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder, if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans)) return; - /* Set MG_TX_LINK_PARAMS cri_use_fs32 to 0. */ for (ln = 0; ln < 2; ln++) { - val = intel_de_read(dev_priv, MG_TX1_LINK_PARAMS(ln, tc_port)); - val &= ~CRI_USE_FS32; - intel_de_write(dev_priv, MG_TX1_LINK_PARAMS(ln, tc_port), val); - - val = intel_de_read(dev_priv, MG_TX2_LINK_PARAMS(ln, tc_port)); - val &= ~CRI_USE_FS32; - intel_de_write(dev_priv, MG_TX2_LINK_PARAMS(ln, tc_port), val); + intel_de_rmw(dev_priv, MG_TX1_LINK_PARAMS(ln, tc_port), + CRI_USE_FS32, 0); + intel_de_rmw(dev_priv, MG_TX2_LINK_PARAMS(ln, tc_port), + CRI_USE_FS32, 0); } /* Program MG_TX_SWINGCTRL with values from vswing table */ for (ln = 0; ln < 2; ln++) { - val = intel_de_read(dev_priv, MG_TX1_SWINGCTRL(ln, tc_port)); - val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK; - val |= CRI_TXDEEMPH_OVERRIDE_17_12( - trans->entries[level].mg.cri_txdeemph_override_17_12); - intel_de_write(dev_priv, MG_TX1_SWINGCTRL(ln, tc_port), val); - - val = intel_de_read(dev_priv, MG_TX2_SWINGCTRL(ln, tc_port)); - val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK; - val |= CRI_TXDEEMPH_OVERRIDE_17_12( - trans->entries[level].mg.cri_txdeemph_override_17_12); - intel_de_write(dev_priv, MG_TX2_SWINGCTRL(ln, tc_port), val); + int level; + + level = intel_ddi_level(encoder, crtc_state, 2*ln+0); + + intel_de_rmw(dev_priv, MG_TX1_SWINGCTRL(ln, tc_port), + CRI_TXDEEMPH_OVERRIDE_17_12_MASK, + CRI_TXDEEMPH_OVERRIDE_17_12(trans->entries[level].mg.cri_txdeemph_override_17_12)); + + level = intel_ddi_level(encoder, crtc_state, 2*ln+1); + + intel_de_rmw(dev_priv, MG_TX2_SWINGCTRL(ln, tc_port), + CRI_TXDEEMPH_OVERRIDE_17_12_MASK, + CRI_TXDEEMPH_OVERRIDE_17_12(trans->entries[level].mg.cri_txdeemph_override_17_12)); } /* Program MG_TX_DRVCTRL with values from vswing table */ for (ln = 0; ln < 2; ln++) { - val = intel_de_read(dev_priv, MG_TX1_DRVCTRL(ln, tc_port)); - val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK | - CRI_TXDEEMPH_OVERRIDE_5_0_MASK); - val |= CRI_TXDEEMPH_OVERRIDE_5_0( - trans->entries[level].mg.cri_txdeemph_override_5_0) | - CRI_TXDEEMPH_OVERRIDE_11_6( - trans->entries[level].mg.cri_txdeemph_override_11_6) | - CRI_TXDEEMPH_OVERRIDE_EN; - intel_de_write(dev_priv, MG_TX1_DRVCTRL(ln, tc_port), val); - - val = intel_de_read(dev_priv, MG_TX2_DRVCTRL(ln, tc_port)); - val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK | - CRI_TXDEEMPH_OVERRIDE_5_0_MASK); - val |= CRI_TXDEEMPH_OVERRIDE_5_0( - trans->entries[level].mg.cri_txdeemph_override_5_0) | - CRI_TXDEEMPH_OVERRIDE_11_6( - trans->entries[level].mg.cri_txdeemph_override_11_6) | - CRI_TXDEEMPH_OVERRIDE_EN; - intel_de_write(dev_priv, MG_TX2_DRVCTRL(ln, tc_port), val); + int level; + + level = intel_ddi_level(encoder, crtc_state, 2*ln+0); + + intel_de_rmw(dev_priv, MG_TX1_DRVCTRL(ln, tc_port), + CRI_TXDEEMPH_OVERRIDE_11_6_MASK | + CRI_TXDEEMPH_OVERRIDE_5_0_MASK, + CRI_TXDEEMPH_OVERRIDE_11_6(trans->entries[level].mg.cri_txdeemph_override_11_6) | + CRI_TXDEEMPH_OVERRIDE_5_0(trans->entries[level].mg.cri_txdeemph_override_5_0) | + CRI_TXDEEMPH_OVERRIDE_EN); + + level = intel_ddi_level(encoder, crtc_state, 2*ln+1); + + intel_de_rmw(dev_priv, MG_TX2_DRVCTRL(ln, tc_port), + CRI_TXDEEMPH_OVERRIDE_11_6_MASK | + CRI_TXDEEMPH_OVERRIDE_5_0_MASK, + CRI_TXDEEMPH_OVERRIDE_11_6(trans->entries[level].mg.cri_txdeemph_override_11_6) | + CRI_TXDEEMPH_OVERRIDE_5_0(trans->entries[level].mg.cri_txdeemph_override_5_0) | + CRI_TXDEEMPH_OVERRIDE_EN); /* FIXME: Program CRI_LOADGEN_SEL after the spec is updated */ } @@ -1223,50 +1222,34 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder, * values from table for which TX1 and TX2 enabled. */ for (ln = 0; ln < 2; ln++) { - val = intel_de_read(dev_priv, MG_CLKHUB(ln, tc_port)); - if (crtc_state->port_clock < 300000) - val |= CFG_LOW_RATE_LKREN_EN; - else - val &= ~CFG_LOW_RATE_LKREN_EN; - intel_de_write(dev_priv, MG_CLKHUB(ln, tc_port), val); + intel_de_rmw(dev_priv, MG_CLKHUB(ln, tc_port), + CFG_LOW_RATE_LKREN_EN, + crtc_state->port_clock < 300000 ? CFG_LOW_RATE_LKREN_EN : 0); } /* Program the MG_TX_DCC<LN, port being used> based on the link frequency */ for (ln = 0; ln < 2; ln++) { - val = intel_de_read(dev_priv, MG_TX1_DCC(ln, tc_port)); - val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK; - if (crtc_state->port_clock <= 500000) { - val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN; - } else { - val |= CFG_AMI_CK_DIV_OVERRIDE_EN | - CFG_AMI_CK_DIV_OVERRIDE_VAL(1); - } - intel_de_write(dev_priv, MG_TX1_DCC(ln, tc_port), val); - - val = intel_de_read(dev_priv, MG_TX2_DCC(ln, tc_port)); - val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK; - if (crtc_state->port_clock <= 500000) { - val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN; - } else { - val |= CFG_AMI_CK_DIV_OVERRIDE_EN | - CFG_AMI_CK_DIV_OVERRIDE_VAL(1); - } - intel_de_write(dev_priv, MG_TX2_DCC(ln, tc_port), val); + intel_de_rmw(dev_priv, MG_TX1_DCC(ln, tc_port), + CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK | + CFG_AMI_CK_DIV_OVERRIDE_EN, + crtc_state->port_clock > 500000 ? + CFG_AMI_CK_DIV_OVERRIDE_VAL(1) | + CFG_AMI_CK_DIV_OVERRIDE_EN : 0); + + intel_de_rmw(dev_priv, MG_TX2_DCC(ln, tc_port), + CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK | + CFG_AMI_CK_DIV_OVERRIDE_EN, + crtc_state->port_clock > 500000 ? + CFG_AMI_CK_DIV_OVERRIDE_VAL(1) | + CFG_AMI_CK_DIV_OVERRIDE_EN : 0); } /* Program MG_TX_PISO_READLOAD with values from vswing table */ for (ln = 0; ln < 2; ln++) { - val = intel_de_read(dev_priv, - MG_TX1_PISO_READLOAD(ln, tc_port)); - val |= CRI_CALCINIT; - intel_de_write(dev_priv, MG_TX1_PISO_READLOAD(ln, tc_port), - val); - - val = intel_de_read(dev_priv, - MG_TX2_PISO_READLOAD(ln, tc_port)); - val |= CRI_CALCINIT; - intel_de_write(dev_priv, MG_TX2_PISO_READLOAD(ln, tc_port), - val); + intel_de_rmw(dev_priv, MG_TX1_PISO_READLOAD(ln, tc_port), + 0, CRI_CALCINIT); + intel_de_rmw(dev_priv, MG_TX2_PISO_READLOAD(ln, tc_port), + 0, CRI_CALCINIT); } } @@ -1275,9 +1258,7 @@ static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port); - int level = intel_ddi_level(encoder, crtc_state, 0); const struct intel_ddi_buf_trans *trans; - u32 val, dpcnt_mask, dpcnt_val; int n_entries, ln; if (intel_tc_port_in_tbt_alt_mode(enc_to_dig_port(encoder))) @@ -1287,33 +1268,36 @@ static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder, if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans)) return; - dpcnt_mask = (DKL_TX_PRESHOOT_COEFF_MASK | - DKL_TX_DE_EMPAHSIS_COEFF_MASK | - DKL_TX_VSWING_CONTROL_MASK); - dpcnt_val = DKL_TX_VSWING_CONTROL(trans->entries[level].dkl.vswing); - dpcnt_val |= DKL_TX_DE_EMPHASIS_COEFF(trans->entries[level].dkl.de_emphasis); - dpcnt_val |= DKL_TX_PRESHOOT_COEFF(trans->entries[level].dkl.preshoot); - for (ln = 0; ln < 2; ln++) { + int level; + intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, ln)); intel_de_write(dev_priv, DKL_TX_PMD_LANE_SUS(tc_port), 0); - /* All the registers are RMW */ - val = intel_de_read(dev_priv, DKL_TX_DPCNTL0(tc_port)); - val &= ~dpcnt_mask; - val |= dpcnt_val; - intel_de_write(dev_priv, DKL_TX_DPCNTL0(tc_port), val); + level = intel_ddi_level(encoder, crtc_state, 2*ln+0); + + intel_de_rmw(dev_priv, DKL_TX_DPCNTL0(tc_port), + DKL_TX_PRESHOOT_COEFF_MASK | + DKL_TX_DE_EMPAHSIS_COEFF_MASK | + DKL_TX_VSWING_CONTROL_MASK, + DKL_TX_PRESHOOT_COEFF(trans->entries[level].dkl.preshoot) | + DKL_TX_DE_EMPHASIS_COEFF(trans->entries[level].dkl.de_emphasis) | + DKL_TX_VSWING_CONTROL(trans->entries[level].dkl.vswing)); - val = intel_de_read(dev_priv, DKL_TX_DPCNTL1(tc_port)); - val &= ~dpcnt_mask; - val |= dpcnt_val; - intel_de_write(dev_priv, DKL_TX_DPCNTL1(tc_port), val); + level = intel_ddi_level(encoder, crtc_state, 2*ln+1); - val = intel_de_read(dev_priv, DKL_TX_DPCNTL2(tc_port)); - val &= ~DKL_TX_DP20BITMODE; - intel_de_write(dev_priv, DKL_TX_DPCNTL2(tc_port), val); + intel_de_rmw(dev_priv, DKL_TX_DPCNTL1(tc_port), + DKL_TX_PRESHOOT_COEFF_MASK | + DKL_TX_DE_EMPAHSIS_COEFF_MASK | + DKL_TX_VSWING_CONTROL_MASK, + DKL_TX_PRESHOOT_COEFF(trans->entries[level].dkl.preshoot) | + DKL_TX_DE_EMPHASIS_COEFF(trans->entries[level].dkl.de_emphasis) | + DKL_TX_VSWING_CONTROL(trans->entries[level].dkl.vswing)); + + intel_de_rmw(dev_priv, DKL_TX_DPCNTL2(tc_port), + DKL_TX_DP20BITMODE, 0); } } @@ -1938,7 +1922,7 @@ void intel_ddi_enable_clock(struct intel_encoder *encoder, encoder->enable_clock(encoder, crtc_state); } -static void intel_ddi_disable_clock(struct intel_encoder *encoder) +void intel_ddi_disable_clock(struct intel_encoder *encoder) { if (encoder->disable_clock) encoder->disable_clock(encoder); @@ -2385,7 +2369,10 @@ static void dg2_ddi_pre_enable_dp(struct intel_atomic_state *state, /* 5.k Configure and enable FEC if needed */ intel_ddi_enable_fec(encoder, crtc_state); - intel_dsc_enable(encoder, crtc_state); + + intel_dsc_dp_pps_write(encoder, crtc_state); + + intel_dsc_enable(crtc_state); } static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state, @@ -2519,8 +2506,11 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state, /* 7.l Configure and enable FEC if needed */ intel_ddi_enable_fec(encoder, crtc_state); + + intel_dsc_dp_pps_write(encoder, crtc_state); + if (!crtc_state->bigjoiner) - intel_dsc_enable(encoder, crtc_state); + intel_dsc_enable(crtc_state); } static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state, @@ -2585,8 +2575,10 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state, if (!is_mst) intel_ddi_enable_pipe_clock(encoder, crtc_state); + intel_dsc_dp_pps_write(encoder, crtc_state); + if (!crtc_state->bigjoiner) - intel_dsc_enable(encoder, crtc_state); + intel_dsc_enable(crtc_state); } static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state, @@ -2824,12 +2816,10 @@ static void intel_ddi_post_disable(struct intel_atomic_state *state, } if (old_crtc_state->bigjoiner_linked_crtc) { - struct intel_atomic_state *state = - to_intel_atomic_state(old_crtc_state->uapi.state); - struct intel_crtc *slave = + struct intel_crtc *slave_crtc = old_crtc_state->bigjoiner_linked_crtc; const struct intel_crtc_state *old_slave_crtc_state = - intel_atomic_get_old_crtc_state(state, slave); + intel_atomic_get_old_crtc_state(state, slave_crtc); intel_crtc_vblank_off(old_slave_crtc_state); @@ -2866,41 +2856,6 @@ static void intel_ddi_post_disable(struct intel_atomic_state *state, intel_tc_port_put_link(dig_port); } -void intel_ddi_fdi_post_disable(struct intel_atomic_state *state, - struct intel_encoder *encoder, - const struct intel_crtc_state *old_crtc_state, - const struct drm_connector_state *old_conn_state) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - u32 val; - - /* - * Bspec lists this as both step 13 (before DDI_BUF_CTL disable) - * and step 18 (after clearing PORT_CLK_SEL). Based on a BUN, - * step 13 is the correct place for it. Step 18 is where it was - * originally before the BUN. - */ - val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); - val &= ~FDI_RX_ENABLE; - intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val); - - intel_disable_ddi_buf(encoder, old_crtc_state); - intel_ddi_disable_clock(encoder); - - val = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A)); - val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); - val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2); - intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), val); - - val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); - val &= ~FDI_PCDCLK; - intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val); - - val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); - val &= ~FDI_RX_PLL_ENABLE; - intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val); -} - static void trans_port_sync_stop_link_train(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) @@ -2951,6 +2906,7 @@ static void intel_enable_ddi_dp(struct intel_atomic_state *state, if (port == PORT_A && DISPLAY_VER(dev_priv) < 9) intel_dp_stop_link_train(intel_dp, crtc_state); + drm_connector_update_privacy_screen(conn_state); intel_edp_backlight_on(crtc_state, conn_state); if (!dig_port->lspcon.active || dig_port->dp.has_hdmi_sink) @@ -3095,6 +3051,12 @@ static void intel_disable_ddi_dp(struct intel_atomic_state *state, intel_dp->link_trained = false; + if (old_crtc_state->has_audio) + intel_audio_codec_disable(encoder, + old_crtc_state, old_conn_state); + + intel_drrs_disable(intel_dp, old_crtc_state); + intel_psr_disable(intel_dp, old_crtc_state); intel_edp_backlight_off(old_conn_state); /* Disable the decompression in DP Sink */ intel_dp_sink_set_decompression_state(intel_dp, old_crtc_state, @@ -3112,6 +3074,10 @@ static void intel_disable_ddi_hdmi(struct intel_atomic_state *state, struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct drm_connector *connector = old_conn_state->connector; + if (old_crtc_state->has_audio) + intel_audio_codec_disable(encoder, + old_crtc_state, old_conn_state); + if (!intel_hdmi_handle_sink_scrambling(encoder, connector, false, false)) drm_dbg_kms(&i915->drm, @@ -3119,25 +3085,6 @@ static void intel_disable_ddi_hdmi(struct intel_atomic_state *state, connector->base.id, connector->name); } -static void intel_pre_disable_ddi(struct intel_atomic_state *state, - struct intel_encoder *encoder, - const struct intel_crtc_state *old_crtc_state, - const struct drm_connector_state *old_conn_state) -{ - struct intel_dp *intel_dp; - - if (old_crtc_state->has_audio) - intel_audio_codec_disable(encoder, old_crtc_state, - old_conn_state); - - if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI)) - return; - - intel_dp = enc_to_intel_dp(encoder); - intel_drrs_disable(intel_dp, old_crtc_state); - intel_psr_disable(intel_dp, old_crtc_state); -} - static void intel_disable_ddi(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, @@ -3166,6 +3113,7 @@ static void intel_ddi_update_pipe_dp(struct intel_atomic_state *state, intel_drrs_update(intel_dp, crtc_state); intel_backlight_update(state, encoder, crtc_state, conn_state); + drm_connector_update_privacy_screen(conn_state); } void intel_ddi_update_pipe(struct intel_atomic_state *state, @@ -3195,8 +3143,14 @@ intel_ddi_update_prepare(struct intel_atomic_state *state, intel_tc_port_get_link(enc_to_dig_port(encoder), required_lanes); - if (crtc_state && crtc_state->hw.active) + if (crtc_state && crtc_state->hw.active) { + struct intel_crtc *slave_crtc = crtc_state->bigjoiner_linked_crtc; + intel_update_active_dpll(state, crtc, encoder); + + if (slave_crtc) + intel_update_active_dpll(state, slave_crtc, encoder); + } } static void @@ -3552,18 +3506,7 @@ static void intel_ddi_get_config(struct intel_encoder *encoder, if (drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder))) return; - if (pipe_config->bigjoiner_slave) { - /* read out pipe settings from master */ - enum transcoder save = pipe_config->cpu_transcoder; - - /* Our own transcoder needs to be disabled when reading it in intel_ddi_read_func_ctl() */ - WARN_ON(pipe_config->output_types); - pipe_config->cpu_transcoder = (enum transcoder)pipe_config->bigjoiner_linked_crtc->pipe; - intel_ddi_read_func_ctl(encoder, pipe_config); - pipe_config->cpu_transcoder = save; - } else { - intel_ddi_read_func_ctl(encoder, pipe_config); - } + intel_ddi_read_func_ctl(encoder, pipe_config); intel_ddi_mso_get_config(encoder, pipe_config); @@ -3591,8 +3534,7 @@ static void intel_ddi_get_config(struct intel_encoder *encoder, dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp; } - if (!pipe_config->bigjoiner_slave) - ddi_dotclock_get(pipe_config); + ddi_dotclock_get(pipe_config); if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) pipe_config->lane_lat_optim_mask = @@ -3983,6 +3925,19 @@ intel_ddi_init_dp_connector(struct intel_digital_port *dig_port) return NULL; } + if (dig_port->base.type == INTEL_OUTPUT_EDP) { + struct drm_device *dev = dig_port->base.base.dev; + struct drm_privacy_screen *privacy_screen; + + privacy_screen = drm_privacy_screen_get(dev->dev, NULL); + if (!IS_ERR(privacy_screen)) { + drm_connector_attach_privacy_screen_provider(&connector->base, + privacy_screen); + } else if (PTR_ERR(privacy_screen) != -ENODEV) { + drm_warn(dev, "Error getting privacy-screen\n"); + } + } + return connector; } @@ -4472,7 +4427,6 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) encoder->enable = intel_enable_ddi; encoder->pre_pll_enable = intel_ddi_pre_pll_enable; encoder->pre_enable = intel_ddi_pre_enable; - encoder->pre_disable = intel_pre_disable_ddi; encoder->disable = intel_disable_ddi; encoder->post_disable = intel_ddi_post_disable; encoder->update_pipe = intel_ddi_update_pipe; diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h index d6971717ef9c..c2fea6562917 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.h +++ b/drivers/gpu/drm/i915/display/intel_ddi.h @@ -6,11 +6,11 @@ #ifndef __INTEL_DDI_H__ #define __INTEL_DDI_H__ -#include "intel_display.h" #include "i915_reg.h" struct drm_connector_state; struct drm_i915_private; +struct intel_atomic_state; struct intel_connector; struct intel_crtc; struct intel_crtc_state; @@ -18,6 +18,8 @@ struct intel_dp; struct intel_dpll_hw_state; struct intel_encoder; struct intel_shared_dpll; +enum pipe; +enum port; enum transcoder; i915_reg_t dp_tp_ctl_reg(struct intel_encoder *encoder, @@ -30,6 +32,7 @@ void intel_ddi_fdi_post_disable(struct intel_atomic_state *state, const struct drm_connector_state *old_conn_state); void intel_ddi_enable_clock(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state); +void intel_ddi_disable_clock(struct intel_encoder *encoder); void intel_ddi_get_clock(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, struct intel_shared_dpll *pll); diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c index 78cd8f77b49d..1e689d573512 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c +++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c @@ -1032,6 +1032,21 @@ bool is_hobl_buf_trans(const struct intel_ddi_buf_trans *table) return table == &tgl_combo_phy_trans_edp_hbr2_hobl; } +static bool use_edp_hobl(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + + return i915->vbt.edp.hobl && !intel_dp->hobl_failed; +} + +static bool use_edp_low_vswing(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + + return i915->vbt.edp.low_vswing; +} + static const struct intel_ddi_buf_trans * intel_get_buf_trans(const struct intel_ddi_buf_trans *trans, int *num_entries) { @@ -1057,14 +1072,12 @@ bdw_get_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) return intel_get_buf_trans(&bdw_trans_fdi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) return intel_get_buf_trans(&bdw_trans_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && - i915->vbt.edp.low_vswing) + use_edp_low_vswing(encoder)) return intel_get_buf_trans(&bdw_trans_edp, n_entries); else return intel_get_buf_trans(&bdw_trans_dp, n_entries); @@ -1094,12 +1107,10 @@ skl_y_get_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) return intel_get_buf_trans(&skl_y_trans_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && - i915->vbt.edp.low_vswing) + use_edp_low_vswing(encoder)) return _skl_get_buf_trans_dp(encoder, &skl_y_trans_edp, n_entries); else return _skl_get_buf_trans_dp(encoder, &skl_y_trans_dp, n_entries); @@ -1110,12 +1121,10 @@ skl_u_get_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) return intel_get_buf_trans(&skl_trans_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && - i915->vbt.edp.low_vswing) + use_edp_low_vswing(encoder)) return _skl_get_buf_trans_dp(encoder, &skl_u_trans_edp, n_entries); else return _skl_get_buf_trans_dp(encoder, &skl_u_trans_dp, n_entries); @@ -1126,12 +1135,10 @@ skl_get_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) return intel_get_buf_trans(&skl_trans_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && - i915->vbt.edp.low_vswing) + use_edp_low_vswing(encoder)) return _skl_get_buf_trans_dp(encoder, &skl_trans_edp, n_entries); else return _skl_get_buf_trans_dp(encoder, &skl_trans_dp, n_entries); @@ -1142,12 +1149,10 @@ kbl_y_get_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) return intel_get_buf_trans(&skl_y_trans_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && - i915->vbt.edp.low_vswing) + use_edp_low_vswing(encoder)) return _skl_get_buf_trans_dp(encoder, &skl_y_trans_edp, n_entries); else return _skl_get_buf_trans_dp(encoder, &kbl_y_trans_dp, n_entries); @@ -1158,12 +1163,10 @@ kbl_u_get_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) return intel_get_buf_trans(&skl_trans_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && - i915->vbt.edp.low_vswing) + use_edp_low_vswing(encoder)) return _skl_get_buf_trans_dp(encoder, &skl_u_trans_edp, n_entries); else return _skl_get_buf_trans_dp(encoder, &kbl_u_trans_dp, n_entries); @@ -1174,12 +1177,10 @@ kbl_get_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) return intel_get_buf_trans(&skl_trans_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && - i915->vbt.edp.low_vswing) + use_edp_low_vswing(encoder)) return _skl_get_buf_trans_dp(encoder, &skl_trans_edp, n_entries); else return _skl_get_buf_trans_dp(encoder, &kbl_trans_dp, n_entries); @@ -1190,12 +1191,10 @@ bxt_get_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) return intel_get_buf_trans(&bxt_trans_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && - i915->vbt.edp.low_vswing) + use_edp_low_vswing(encoder)) return intel_get_buf_trans(&bxt_trans_edp, n_entries); else return intel_get_buf_trans(&bxt_trans_dp, n_entries); @@ -1215,12 +1214,10 @@ icl_get_combo_buf_trans_edp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - if (crtc_state->port_clock > 540000) { return intel_get_buf_trans(&icl_combo_phy_trans_dp_hbr2_edp_hbr3, n_entries); - } else if (dev_priv->vbt.edp.low_vswing) { + } else if (use_edp_low_vswing(encoder)) { return intel_get_buf_trans(&icl_combo_phy_trans_edp_hbr2, n_entries); } @@ -1282,12 +1279,10 @@ ehl_get_combo_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) return intel_get_buf_trans(&icl_combo_phy_trans_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && - dev_priv->vbt.edp.low_vswing) + use_edp_low_vswing(encoder)) return ehl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries); else return intel_get_buf_trans(&ehl_combo_phy_trans_dp, n_entries); @@ -1309,12 +1304,10 @@ jsl_get_combo_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) return intel_get_buf_trans(&icl_combo_phy_trans_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && - dev_priv->vbt.edp.low_vswing) + use_edp_low_vswing(encoder)) return jsl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries); else return intel_get_buf_trans(&icl_combo_phy_trans_dp_hbr2_edp_hbr3, n_entries); @@ -1346,16 +1339,13 @@ tgl_get_combo_buf_trans_edp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - if (crtc_state->port_clock > 540000) { return intel_get_buf_trans(&icl_combo_phy_trans_dp_hbr2_edp_hbr3, n_entries); - } else if (dev_priv->vbt.edp.hobl && !intel_dp->hobl_failed) { + } else if (use_edp_hobl(encoder)) { return intel_get_buf_trans(&tgl_combo_phy_trans_edp_hbr2_hobl, n_entries); - } else if (dev_priv->vbt.edp.low_vswing) { + } else if (use_edp_low_vswing(encoder)) { return intel_get_buf_trans(&icl_combo_phy_trans_edp_hbr2, n_entries); } @@ -1394,16 +1384,13 @@ dg1_get_combo_buf_trans_edp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - if (crtc_state->port_clock > 540000) return intel_get_buf_trans(&icl_combo_phy_trans_dp_hbr2_edp_hbr3, n_entries); - else if (dev_priv->vbt.edp.hobl && !intel_dp->hobl_failed) + else if (use_edp_hobl(encoder)) return intel_get_buf_trans(&tgl_combo_phy_trans_edp_hbr2_hobl, n_entries); - else if (dev_priv->vbt.edp.low_vswing) + else if (use_edp_low_vswing(encoder)) return intel_get_buf_trans(&icl_combo_phy_trans_edp_hbr2, n_entries); else @@ -1439,16 +1426,13 @@ rkl_get_combo_buf_trans_edp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - if (crtc_state->port_clock > 540000) { return intel_get_buf_trans(&icl_combo_phy_trans_dp_hbr2_edp_hbr3, n_entries); - } else if (dev_priv->vbt.edp.hobl && !intel_dp->hobl_failed) { + } else if (use_edp_hobl(encoder)) { return intel_get_buf_trans(&tgl_combo_phy_trans_edp_hbr2_hobl, n_entries); - } else if (dev_priv->vbt.edp.low_vswing) { + } else if (use_edp_low_vswing(encoder)) { return intel_get_buf_trans(&icl_combo_phy_trans_edp_hbr2, n_entries); } @@ -1485,14 +1469,11 @@ adls_get_combo_buf_trans_edp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - if (crtc_state->port_clock > 540000) return intel_get_buf_trans(&adls_combo_phy_trans_edp_hbr3, n_entries); - else if (i915->vbt.edp.hobl && !intel_dp->hobl_failed) + else if (use_edp_hobl(encoder)) return intel_get_buf_trans(&tgl_combo_phy_trans_edp_hbr2_hobl, n_entries); - else if (i915->vbt.edp.low_vswing) + else if (use_edp_low_vswing(encoder)) return intel_get_buf_trans(&adls_combo_phy_trans_edp_hbr2, n_entries); else return adls_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); @@ -1527,16 +1508,13 @@ adlp_get_combo_buf_trans_edp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - if (crtc_state->port_clock > 540000) { return intel_get_buf_trans(&adlp_combo_phy_trans_edp_hbr3, n_entries); - } else if (dev_priv->vbt.edp.hobl && !intel_dp->hobl_failed) { + } else if (use_edp_hobl(encoder)) { return intel_get_buf_trans(&tgl_combo_phy_trans_edp_hbr2_hobl, n_entries); - } else if (dev_priv->vbt.edp.low_vswing) { + } else if (use_edp_low_vswing(encoder)) { return intel_get_buf_trans(&adlp_combo_phy_trans_edp_up_to_hbr2, n_entries); } diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index ec403e46a328..6fbad5c6cc71 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -32,6 +32,7 @@ #include <linux/module.h> #include <linux/dma-resv.h> #include <linux/slab.h> +#include <linux/vga_switcheroo.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> @@ -41,6 +42,7 @@ #include <drm/drm_edid.h> #include <drm/drm_fourcc.h> #include <drm/drm_plane_helper.h> +#include <drm/drm_privacy_screen_consumer.h> #include <drm/drm_probe_helper.h> #include <drm/drm_rect.h> @@ -70,11 +72,10 @@ #include "gt/gen8_ppgtt.h" -#include "pxp/intel_pxp.h" - #include "g4x_dp.h" #include "g4x_hdmi.h" #include "i915_drv.h" +#include "icl_dsi.h" #include "intel_acpi.h" #include "intel_atomic.h" #include "intel_atomic_plane.h" @@ -96,6 +97,8 @@ #include "intel_hotplug.h" #include "intel_overlay.h" #include "intel_panel.h" +#include "intel_pch_display.h" +#include "intel_pch_refclk.h" #include "intel_pcode.h" #include "intel_pipe_crc.h" #include "intel_plane_initial.h" @@ -103,19 +106,15 @@ #include "intel_pps.h" #include "intel_psr.h" #include "intel_quirks.h" -#include "intel_sbi.h" #include "intel_sprite.h" #include "intel_tc.h" #include "intel_vga.h" #include "i9xx_plane.h" #include "skl_scaler.h" #include "skl_universal_plane.h" +#include "vlv_dsi_pll.h" #include "vlv_sideband.h" - -static void i9xx_crtc_clock_get(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config); -static void ilk_pch_clock_get(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config); +#include "vlv_dsi.h" static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state); static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state); @@ -341,6 +340,14 @@ is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state) is_trans_port_sync_slave(crtc_state); } +static struct intel_crtc *intel_master_crtc(const struct intel_crtc_state *crtc_state) +{ + if (crtc_state->bigjoiner_slave) + return crtc_state->bigjoiner_linked_crtc; + else + return to_intel_crtc(crtc_state->uapi.crtc); +} + static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv, enum pipe pipe) { @@ -454,80 +461,6 @@ static void assert_planes_disabled(struct intel_crtc *crtc) assert_plane_disabled(plane); } -void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, - enum pipe pipe) -{ - u32 val; - bool enabled; - - val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe)); - enabled = !!(val & TRANS_ENABLE); - I915_STATE_WARN(enabled, - "transcoder assertion failed, should be off on pipe %c but is still active\n", - pipe_name(pipe)); -} - -static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, - enum pipe pipe, enum port port, - i915_reg_t dp_reg) -{ - enum pipe port_pipe; - bool state; - - state = g4x_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe); - - I915_STATE_WARN(state && port_pipe == pipe, - "PCH DP %c enabled on transcoder %c, should be disabled\n", - port_name(port), pipe_name(pipe)); - - I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B, - "IBX PCH DP %c still using transcoder B\n", - port_name(port)); -} - -static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, - enum pipe pipe, enum port port, - i915_reg_t hdmi_reg) -{ - enum pipe port_pipe; - bool state; - - state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe); - - I915_STATE_WARN(state && port_pipe == pipe, - "PCH HDMI %c enabled on transcoder %c, should be disabled\n", - port_name(port), pipe_name(pipe)); - - I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B, - "IBX PCH HDMI %c still using transcoder B\n", - port_name(port)); -} - -static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, - enum pipe pipe) -{ - enum pipe port_pipe; - - assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B); - assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C); - assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D); - - I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) && - port_pipe == pipe, - "PCH VGA enabled on transcoder %c, should be disabled\n", - pipe_name(pipe)); - - I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) && - port_pipe == pipe, - "PCH LVDS enabled on transcoder %c, should be disabled\n", - pipe_name(pipe)); - - /* PCH SDVOB multiplex with HDMIB */ - assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB); - assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC); - assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID); -} - void vlv_wait_port_ready(struct drm_i915_private *dev_priv, struct intel_digital_port *dig_port, unsigned int expected_mask) @@ -562,154 +495,6 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv, expected_mask); } -static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum pipe pipe = crtc->pipe; - i915_reg_t reg; - u32 val, pipeconf_val; - - /* Make sure PCH DPLL is enabled */ - assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll); - - /* FDI must be feeding us bits for PCH ports */ - assert_fdi_tx_enabled(dev_priv, pipe); - assert_fdi_rx_enabled(dev_priv, pipe); - - if (HAS_PCH_CPT(dev_priv)) { - reg = TRANS_CHICKEN2(pipe); - val = intel_de_read(dev_priv, reg); - /* - * Workaround: Set the timing override bit - * before enabling the pch transcoder. - */ - val |= TRANS_CHICKEN2_TIMING_OVERRIDE; - /* Configure frame start delay to match the CPU */ - val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; - val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1); - intel_de_write(dev_priv, reg, val); - } - - reg = PCH_TRANSCONF(pipe); - val = intel_de_read(dev_priv, reg); - pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe)); - - if (HAS_PCH_IBX(dev_priv)) { - /* Configure frame start delay to match the CPU */ - val &= ~TRANS_FRAME_START_DELAY_MASK; - val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1); - - /* - * Make the BPC in transcoder be consistent with - * that in pipeconf reg. For HDMI we must use 8bpc - * here for both 8bpc and 12bpc. - */ - val &= ~PIPECONF_BPC_MASK; - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) - val |= PIPECONF_8BPC; - else - val |= pipeconf_val & PIPECONF_BPC_MASK; - } - - val &= ~TRANS_INTERLACE_MASK; - if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) { - if (HAS_PCH_IBX(dev_priv) && - intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) - val |= TRANS_LEGACY_INTERLACED_ILK; - else - val |= TRANS_INTERLACED; - } else { - val |= TRANS_PROGRESSIVE; - } - - intel_de_write(dev_priv, reg, val | TRANS_ENABLE); - if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100)) - drm_err(&dev_priv->drm, "failed to enable transcoder %c\n", - pipe_name(pipe)); -} - -static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, - enum transcoder cpu_transcoder) -{ - u32 val, pipeconf_val; - - /* FDI must be feeding us bits for PCH ports */ - assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder); - assert_fdi_rx_enabled(dev_priv, PIPE_A); - - val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A)); - /* Workaround: set timing override bit. */ - val |= TRANS_CHICKEN2_TIMING_OVERRIDE; - /* Configure frame start delay to match the CPU */ - val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; - val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1); - intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val); - - val = TRANS_ENABLE; - pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder)); - - if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == - PIPECONF_INTERLACED_ILK) - val |= TRANS_INTERLACED; - else - val |= TRANS_PROGRESSIVE; - - intel_de_write(dev_priv, LPT_TRANSCONF, val); - if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF, - TRANS_STATE_ENABLE, 100)) - drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n"); -} - -static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv, - enum pipe pipe) -{ - i915_reg_t reg; - u32 val; - - /* FDI relies on the transcoder */ - assert_fdi_tx_disabled(dev_priv, pipe); - assert_fdi_rx_disabled(dev_priv, pipe); - - /* Ports must be off as well */ - assert_pch_ports_disabled(dev_priv, pipe); - - reg = PCH_TRANSCONF(pipe); - val = intel_de_read(dev_priv, reg); - val &= ~TRANS_ENABLE; - intel_de_write(dev_priv, reg, val); - /* wait for PCH transcoder off, transcoder state */ - if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50)) - drm_err(&dev_priv->drm, "failed to disable transcoder %c\n", - pipe_name(pipe)); - - if (HAS_PCH_CPT(dev_priv)) { - /* Workaround: Clear the timing override chicken bit again. */ - reg = TRANS_CHICKEN2(pipe); - val = intel_de_read(dev_priv, reg); - val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; - intel_de_write(dev_priv, reg, val); - } -} - -void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) -{ - u32 val; - - val = intel_de_read(dev_priv, LPT_TRANSCONF); - val &= ~TRANS_ENABLE; - intel_de_write(dev_priv, LPT_TRANSCONF, val); - /* wait for PCH transcoder off, transcoder state */ - if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF, - TRANS_STATE_ENABLE, 50)) - drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n"); - - /* Workaround: clear timing override bit. */ - val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A)); - val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; - intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val); -} - enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -823,14 +608,6 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state) intel_wait_for_pipe_off(old_crtc_state); } -bool -intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info, - u64 modifier) -{ - return info->is_yuv && - info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2); -} - unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info) { unsigned int size = 0; @@ -850,7 +627,11 @@ unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) { unsigned int plane_size; - plane_size = rem_info->plane[i].dst_stride * rem_info->plane[i].height; + if (rem_info->plane[i].linear) + plane_size = rem_info->plane[i].size; + else + plane_size = rem_info->plane[i].dst_stride * rem_info->plane[i].height; + if (plane_size == 0) continue; @@ -869,7 +650,7 @@ bool intel_plane_uses_fence(const struct intel_plane_state *plane_state) struct drm_i915_private *dev_priv = to_i915(plane->base.dev); return DISPLAY_VER(dev_priv) < 4 || - (plane->has_fbc && + (plane->fbc && plane_state->view.gtt.type == I915_GGTT_VIEW_NORMAL); } @@ -885,7 +666,7 @@ u32 intel_fb_xy_to_linear(int x, int y, { const struct drm_framebuffer *fb = state->hw.fb; unsigned int cpp = fb->format->cpp[color_plane]; - unsigned int pitch = state->view.color_plane[color_plane].stride; + unsigned int pitch = state->view.color_plane[color_plane].mapping_stride; return y * pitch + x * cpp; } @@ -904,136 +685,6 @@ void intel_add_fb_offsets(int *x, int *y, *y += state->view.color_plane[color_plane].y; } -/* - * From the Sky Lake PRM: - * "The Color Control Surface (CCS) contains the compression status of - * the cache-line pairs. The compression state of the cache-line pair - * is specified by 2 bits in the CCS. Each CCS cache-line represents - * an area on the main surface of 16 x16 sets of 128 byte Y-tiled - * cache-line-pairs. CCS is always Y tiled." - * - * Since cache line pairs refers to horizontally adjacent cache lines, - * each cache line in the CCS corresponds to an area of 32x16 cache - * lines on the main surface. Since each pixel is 4 bytes, this gives - * us a ratio of one byte in the CCS for each 8x16 pixels in the - * main surface. - */ -static const struct drm_format_info skl_ccs_formats[] = { - { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, - .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, - { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, - .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, - { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, - .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, }, - { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, - .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, }, -}; - -/* - * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the - * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles - * in the main surface. With 4 byte pixels and each Y-tile having dimensions of - * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in - * the main surface. - */ -static const struct drm_format_info gen12_ccs_formats[] = { - { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, - .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, - .hsub = 1, .vsub = 1, }, - { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, - .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, - .hsub = 1, .vsub = 1, }, - { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, - .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, - .hsub = 1, .vsub = 1, .has_alpha = true }, - { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, - .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, - .hsub = 1, .vsub = 1, .has_alpha = true }, - { .format = DRM_FORMAT_YUYV, .num_planes = 2, - .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, - .hsub = 2, .vsub = 1, .is_yuv = true }, - { .format = DRM_FORMAT_YVYU, .num_planes = 2, - .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, - .hsub = 2, .vsub = 1, .is_yuv = true }, - { .format = DRM_FORMAT_UYVY, .num_planes = 2, - .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, - .hsub = 2, .vsub = 1, .is_yuv = true }, - { .format = DRM_FORMAT_VYUY, .num_planes = 2, - .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, - .hsub = 2, .vsub = 1, .is_yuv = true }, - { .format = DRM_FORMAT_XYUV8888, .num_planes = 2, - .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, - .hsub = 1, .vsub = 1, .is_yuv = true }, - { .format = DRM_FORMAT_NV12, .num_planes = 4, - .char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 }, - .hsub = 2, .vsub = 2, .is_yuv = true }, - { .format = DRM_FORMAT_P010, .num_planes = 4, - .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 }, - .hsub = 2, .vsub = 2, .is_yuv = true }, - { .format = DRM_FORMAT_P012, .num_planes = 4, - .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 }, - .hsub = 2, .vsub = 2, .is_yuv = true }, - { .format = DRM_FORMAT_P016, .num_planes = 4, - .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 }, - .hsub = 2, .vsub = 2, .is_yuv = true }, -}; - -/* - * Same as gen12_ccs_formats[] above, but with additional surface used - * to pass Clear Color information in plane 2 with 64 bits of data. - */ -static const struct drm_format_info gen12_ccs_cc_formats[] = { - { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3, - .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 }, - .hsub = 1, .vsub = 1, }, - { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3, - .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 }, - .hsub = 1, .vsub = 1, }, - { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3, - .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 }, - .hsub = 1, .vsub = 1, .has_alpha = true }, - { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3, - .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 }, - .hsub = 1, .vsub = 1, .has_alpha = true }, -}; - -static const struct drm_format_info * -lookup_format_info(const struct drm_format_info formats[], - int num_formats, u32 format) -{ - int i; - - for (i = 0; i < num_formats; i++) { - if (formats[i].format == format) - return &formats[i]; - } - - return NULL; -} - -static const struct drm_format_info * -intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd) -{ - switch (cmd->modifier[0]) { - case I915_FORMAT_MOD_Y_TILED_CCS: - case I915_FORMAT_MOD_Yf_TILED_CCS: - return lookup_format_info(skl_ccs_formats, - ARRAY_SIZE(skl_ccs_formats), - cmd->pixel_format); - case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: - case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: - return lookup_format_info(gen12_ccs_formats, - ARRAY_SIZE(gen12_ccs_formats), - cmd->pixel_format); - case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC: - return lookup_format_info(gen12_ccs_cc_formats, - ARRAY_SIZE(gen12_ccs_cc_formats), - cmd->pixel_format); - default: - return NULL; - } -} - u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, u32 pixel_format, u64 modifier) { @@ -1048,7 +699,7 @@ u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, * the highest stride limits of them all, * if in case pipe A is disabled, use the first pipe from pipe_mask. */ - crtc = intel_get_first_crtc(dev_priv); + crtc = intel_first_crtc(dev_priv); if (!crtc) return 0; @@ -1126,7 +777,7 @@ void intel_plane_disable_noatomic(struct intel_crtc *crtc, */ if (HAS_GMCH(dev_priv) && intel_set_memory_cxsr(dev_priv, false)) - intel_wait_for_vblank(dev_priv, crtc->pipe); + intel_crtc_wait_for_next_vblank(crtc); /* * Gen2 reports pipe underruns whenever all planes are disabled. @@ -1135,8 +786,8 @@ void intel_plane_disable_noatomic(struct intel_crtc *crtc, if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes) intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); - intel_disable_plane(plane, crtc_state); - intel_wait_for_vblank(dev_priv, crtc->pipe); + intel_plane_disable_arm(plane, crtc_state); + intel_crtc_wait_for_next_vblank(crtc); } unsigned int @@ -1310,26 +961,6 @@ unlock: clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags); } -static bool underrun_recovery_supported(const struct intel_crtc_state *crtc_state) -{ - if (crtc_state->pch_pfit.enabled && - (crtc_state->pipe_src_w > drm_rect_width(&crtc_state->pch_pfit.dst) || - crtc_state->pipe_src_h > drm_rect_height(&crtc_state->pch_pfit.dst) || - crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)) - return false; - - if (crtc_state->dsc.compression_enable) - return false; - - if (crtc_state->has_psr2) - return false; - - if (crtc_state->splitter.enable) - return false; - - return true; -} - static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); @@ -1353,19 +984,18 @@ static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state) */ tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU; - if (IS_DG2(dev_priv)) { - /* - * Underrun recovery must always be disabled on DG2. However - * the chicken bit meaning is inverted compared to other - * platforms. - */ + /* + * Underrun recovery must always be disabled on display 13+. + * DG2 chicken bit meaning is inverted compared to other platforms. + */ + if (IS_DG2(dev_priv)) tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2; - } else if (DISPLAY_VER(dev_priv) >= 13) { - if (underrun_recovery_supported(crtc_state)) - tmp &= ~UNDERRUN_RECOVERY_DISABLE_ADLP; - else - tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP; - } + else if (DISPLAY_VER(dev_priv) >= 13) + tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP; + + /* Wa_14010547955:dg2 */ + if (IS_DG2_DISPLAY_STEP(dev_priv, STEP_B0, STEP_FOREVER)) + tmp |= DG2_RENDER_CCSTAG_4_3_EN; intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp); } @@ -1387,7 +1017,7 @@ bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv) if (cleanup_done) continue; - drm_crtc_wait_one_vblank(crtc); + intel_crtc_wait_for_next_vblank(to_intel_crtc(crtc)); return true; } @@ -1395,158 +1025,6 @@ bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv) return false; } -void lpt_disable_iclkip(struct drm_i915_private *dev_priv) -{ - u32 temp; - - intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE); - - mutex_lock(&dev_priv->sb_lock); - - temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); - temp |= SBI_SSCCTL_DISABLE; - intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); - - mutex_unlock(&dev_priv->sb_lock); -} - -/* Program iCLKIP clock to the desired frequency */ -static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - int clock = crtc_state->hw.adjusted_mode.crtc_clock; - u32 divsel, phaseinc, auxdiv, phasedir = 0; - u32 temp; - - lpt_disable_iclkip(dev_priv); - - /* The iCLK virtual clock root frequency is in MHz, - * but the adjusted_mode->crtc_clock in in KHz. To get the - * divisors, it is necessary to divide one by another, so we - * convert the virtual clock precision to KHz here for higher - * precision. - */ - for (auxdiv = 0; auxdiv < 2; auxdiv++) { - u32 iclk_virtual_root_freq = 172800 * 1000; - u32 iclk_pi_range = 64; - u32 desired_divisor; - - desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq, - clock << auxdiv); - divsel = (desired_divisor / iclk_pi_range) - 2; - phaseinc = desired_divisor % iclk_pi_range; - - /* - * Near 20MHz is a corner case which is - * out of range for the 7-bit divisor - */ - if (divsel <= 0x7f) - break; - } - - /* This should not happen with any sane values */ - drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(divsel) & - ~SBI_SSCDIVINTPHASE_DIVSEL_MASK); - drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(phasedir) & - ~SBI_SSCDIVINTPHASE_INCVAL_MASK); - - drm_dbg_kms(&dev_priv->drm, - "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n", - clock, auxdiv, divsel, phasedir, phaseinc); - - mutex_lock(&dev_priv->sb_lock); - - /* Program SSCDIVINTPHASE6 */ - temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); - temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK; - temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel); - temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK; - temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc); - temp |= SBI_SSCDIVINTPHASE_DIR(phasedir); - temp |= SBI_SSCDIVINTPHASE_PROPAGATE; - intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK); - - /* Program SSCAUXDIV */ - temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); - temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1); - temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv); - intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK); - - /* Enable modulator and associated divider */ - temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); - temp &= ~SBI_SSCCTL_DISABLE; - intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); - - mutex_unlock(&dev_priv->sb_lock); - - /* Wait for initialization time */ - udelay(24); - - intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE); -} - -int lpt_get_iclkip(struct drm_i915_private *dev_priv) -{ - u32 divsel, phaseinc, auxdiv; - u32 iclk_virtual_root_freq = 172800 * 1000; - u32 iclk_pi_range = 64; - u32 desired_divisor; - u32 temp; - - if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0) - return 0; - - mutex_lock(&dev_priv->sb_lock); - - temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); - if (temp & SBI_SSCCTL_DISABLE) { - mutex_unlock(&dev_priv->sb_lock); - return 0; - } - - temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); - divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >> - SBI_SSCDIVINTPHASE_DIVSEL_SHIFT; - phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >> - SBI_SSCDIVINTPHASE_INCVAL_SHIFT; - - temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); - auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >> - SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT; - - mutex_unlock(&dev_priv->sb_lock); - - desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc; - - return DIV_ROUND_CLOSEST(iclk_virtual_root_freq, - desired_divisor << auxdiv); -} - -static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state, - enum pipe pch_transcoder) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; - - intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder), - intel_de_read(dev_priv, HTOTAL(cpu_transcoder))); - intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder), - intel_de_read(dev_priv, HBLANK(cpu_transcoder))); - intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder), - intel_de_read(dev_priv, HSYNC(cpu_transcoder))); - - intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder), - intel_de_read(dev_priv, VTOTAL(cpu_transcoder))); - intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder), - intel_de_read(dev_priv, VBLANK(cpu_transcoder))); - intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder), - intel_de_read(dev_priv, VSYNC(cpu_transcoder))); - intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder), - intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder))); -} - /* * Finds the encoder associated with the given CRTC. This can only be * used when we know that the CRTC isn't feeding multiple encoders! @@ -1555,15 +1033,17 @@ struct intel_encoder * intel_get_crtc_new_encoder(const struct intel_atomic_state *state, const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); const struct drm_connector_state *connector_state; const struct drm_connector *connector; struct intel_encoder *encoder = NULL; + struct intel_crtc *master_crtc; int num_encoders = 0; int i; + master_crtc = intel_master_crtc(crtc_state); + for_each_new_connector_in_state(&state->base, connector, connector_state, i) { - if (connector_state->crtc != &crtc->base) + if (connector_state->crtc != &master_crtc->base) continue; encoder = to_intel_encoder(connector_state->best_encoder); @@ -1572,111 +1052,11 @@ intel_get_crtc_new_encoder(const struct intel_atomic_state *state, drm_WARN(encoder->base.dev, num_encoders != 1, "%d encoders for pipe %c\n", - num_encoders, pipe_name(crtc->pipe)); + num_encoders, pipe_name(master_crtc->pipe)); return encoder; } -/* - * Enable PCH resources required for PCH ports: - * - PCH PLLs - * - FDI training & RX/TX - * - update transcoder timings - * - DP transcoding bits - * - transcoder - */ -static void ilk_pch_enable(const struct intel_atomic_state *state, - const struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - enum pipe pipe = crtc->pipe; - u32 temp; - - assert_pch_transcoder_disabled(dev_priv, pipe); - - /* For PCH output, training FDI link */ - intel_fdi_link_train(crtc, crtc_state); - - /* We need to program the right clock selection before writing the pixel - * mutliplier into the DPLL. */ - if (HAS_PCH_CPT(dev_priv)) { - u32 sel; - - temp = intel_de_read(dev_priv, PCH_DPLL_SEL); - temp |= TRANS_DPLL_ENABLE(pipe); - sel = TRANS_DPLLB_SEL(pipe); - if (crtc_state->shared_dpll == - intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B)) - temp |= sel; - else - temp &= ~sel; - intel_de_write(dev_priv, PCH_DPLL_SEL, temp); - } - - /* XXX: pch pll's can be enabled any time before we enable the PCH - * transcoder, and we actually should do this to not upset any PCH - * transcoder that already use the clock when we share it. - * - * Note that enable_shared_dpll tries to do the right thing, but - * get_shared_dpll unconditionally resets the pll - we need that to have - * the right LVDS enable sequence. */ - intel_enable_shared_dpll(crtc_state); - - /* set transcoder timing, panel must allow it */ - assert_pps_unlocked(dev_priv, pipe); - ilk_pch_transcoder_set_timings(crtc_state, pipe); - - intel_fdi_normal_train(crtc); - - /* For PCH DP, enable TRANS_DP_CTL */ - if (HAS_PCH_CPT(dev_priv) && - intel_crtc_has_dp_encoder(crtc_state)) { - const struct drm_display_mode *adjusted_mode = - &crtc_state->hw.adjusted_mode; - u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; - i915_reg_t reg = TRANS_DP_CTL(pipe); - enum port port; - - temp = intel_de_read(dev_priv, reg); - temp &= ~(TRANS_DP_PORT_SEL_MASK | - TRANS_DP_SYNC_MASK | - TRANS_DP_BPC_MASK); - temp |= TRANS_DP_OUTPUT_ENABLE; - temp |= bpc << 9; /* same format but at 11:9 */ - - if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) - temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; - if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) - temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; - - port = intel_get_crtc_new_encoder(state, crtc_state)->port; - drm_WARN_ON(dev, port < PORT_B || port > PORT_D); - temp |= TRANS_DP_PORT_SEL(port); - - intel_de_write(dev_priv, reg, temp); - } - - ilk_enable_pch_transcoder(crtc_state); -} - -void lpt_pch_enable(const struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; - - assert_pch_transcoder_disabled(dev_priv, PIPE_A); - - lpt_program_iclkip(crtc_state); - - /* Set transcoder timing. */ - ilk_pch_transcoder_set_timings(crtc_state, PIPE_A); - - lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); -} - static void cpt_verify_modeset(struct drm_i915_private *dev_priv, enum pipe pipe) { @@ -1784,7 +1164,7 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state) } /* We need to wait for a vblank before we can disable the plane. */ - intel_wait_for_vblank(dev_priv, crtc->pipe); + intel_crtc_wait_for_next_vblank(crtc); } static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc) @@ -1919,7 +1299,7 @@ static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); - return crtc_state->uapi.async_flip && intel_vtd_active() && + return crtc_state->uapi.async_flip && intel_vtd_active(i915) && (DISPLAY_VER(i915) == 9 || IS_BROADWELL(i915) || IS_HASWELL(i915)); } @@ -2015,7 +1395,6 @@ static void intel_crtc_disable_flip_done(struct intel_atomic_state *state, static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); const struct intel_crtc_state *new_crtc_state = @@ -2041,7 +1420,7 @@ static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state, } if (need_vbl_wait) - intel_wait_for_vblank(i915, crtc->pipe); + intel_crtc_wait_for_next_vblank(crtc); } static void intel_pre_plane_update(struct intel_atomic_state *state, @@ -2054,11 +1433,13 @@ static void intel_pre_plane_update(struct intel_atomic_state *state, intel_atomic_get_new_crtc_state(state, crtc); enum pipe pipe = crtc->pipe; + intel_psr_pre_plane_update(state, crtc); + if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state)) hsw_disable_ips(old_crtc_state); if (intel_fbc_pre_update(state, crtc)) - intel_wait_for_vblank(dev_priv, pipe); + intel_crtc_wait_for_next_vblank(crtc); if (!needs_async_flip_vtd_wa(old_crtc_state) && needs_async_flip_vtd_wa(new_crtc_state)) @@ -2090,7 +1471,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state, */ if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active && new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false)) - intel_wait_for_vblank(dev_priv, pipe); + intel_crtc_wait_for_next_vblank(crtc); /* * IVB workaround: must disable low power watermarks for at least @@ -2101,7 +1482,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state, */ if (old_crtc_state->hw.active && new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv)) - intel_wait_for_vblank(dev_priv, pipe); + intel_crtc_wait_for_next_vblank(crtc); /* * If we're doing a modeset we don't need to do any @@ -2165,7 +1546,7 @@ static void intel_crtc_disable_planes(struct intel_atomic_state *state, !(update_mask & BIT(plane->id))) continue; - intel_disable_plane(plane, new_crtc_state); + intel_plane_disable_arm(plane, new_crtc_state); if (old_plane_state->uapi.visible) fb_bits |= plane->frontbuffer_bit; @@ -2199,10 +1580,30 @@ intel_connector_primary_encoder(struct intel_connector *connector) static void intel_encoders_update_prepare(struct intel_atomic_state *state) { + struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_crtc_state *new_crtc_state, *old_crtc_state; + struct intel_crtc *crtc; struct drm_connector_state *new_conn_state; struct drm_connector *connector; int i; + /* + * Make sure the DPLL state is up-to-date for fastset TypeC ports after non-blocking commits. + * TODO: Update the DPLL state for all cases in the encoder->update_prepare() hook. + */ + if (i915->dpll.mgr) { + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + if (intel_crtc_needs_modeset(new_crtc_state)) + continue; + + new_crtc_state->shared_dpll = old_crtc_state->shared_dpll; + new_crtc_state->dpll_hw_state = old_crtc_state->dpll_hw_state; + } + } + + if (!state->modeset) + return; + for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) { struct intel_connector *intel_connector; @@ -2229,6 +1630,9 @@ static void intel_encoders_update_complete(struct intel_atomic_state *state) struct drm_connector *connector; int i; + if (!state->modeset) + return; + for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) { struct intel_connector *intel_connector; @@ -2316,28 +1720,6 @@ static void intel_encoders_enable(struct intel_atomic_state *state, } } -static void intel_encoders_pre_disable(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - const struct intel_crtc_state *old_crtc_state = - intel_atomic_get_old_crtc_state(state, crtc); - const struct drm_connector_state *old_conn_state; - struct drm_connector *conn; - int i; - - for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { - struct intel_encoder *encoder = - to_intel_encoder(old_conn_state->best_encoder); - - if (old_conn_state->crtc != &crtc->base) - continue; - - if (encoder->pre_disable) - encoder->pre_disable(state, encoder, old_crtc_state, - old_conn_state); - } -} - static void intel_encoders_disable(struct intel_atomic_state *state, struct intel_crtc *crtc) { @@ -2432,7 +1814,7 @@ static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_stat struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct intel_plane *plane = to_intel_plane(crtc->base.primary); - plane->disable_plane(plane, crtc_state); + plane->disable_arm(plane, crtc_state); } static void ilk_crtc_enable(struct intel_atomic_state *state, @@ -2500,7 +1882,7 @@ static void ilk_crtc_enable(struct intel_atomic_state *state, intel_enable_transcoder(new_crtc_state); if (new_crtc_state->has_pch_encoder) - ilk_pch_enable(state, new_crtc_state); + ilk_pch_enable(state, crtc); intel_crtc_vblank_on(new_crtc_state); @@ -2516,8 +1898,8 @@ static void ilk_crtc_enable(struct intel_atomic_state *state, * in case there are more corner cases we don't know about. */ if (new_crtc_state->has_pch_encoder) { - intel_wait_for_vblank(dev_priv, pipe); - intel_wait_for_vblank(dev_priv, pipe); + intel_crtc_wait_for_next_vblank(crtc); + intel_crtc_wait_for_next_vblank(crtc); } intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); @@ -2592,42 +1974,39 @@ static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state) static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state, const struct intel_crtc_state *crtc_state) { - struct intel_crtc *master = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(master->base.dev); + struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *master_crtc_state; + struct intel_crtc *master_crtc; struct drm_connector_state *conn_state; struct drm_connector *conn; struct intel_encoder *encoder = NULL; int i; - if (crtc_state->bigjoiner_slave) - master = crtc_state->bigjoiner_linked_crtc; - - master_crtc_state = intel_atomic_get_new_crtc_state(state, master); + master_crtc = intel_master_crtc(crtc_state); + master_crtc_state = intel_atomic_get_new_crtc_state(state, master_crtc); for_each_new_connector_in_state(&state->base, conn, conn_state, i) { - if (conn_state->crtc != &master->base) + if (conn_state->crtc != &master_crtc->base) continue; encoder = to_intel_encoder(conn_state->best_encoder); break; } - if (!crtc_state->bigjoiner_slave) { - /* need to enable VDSC, which we skipped in pre-enable */ - intel_dsc_enable(encoder, crtc_state); - } else { - /* - * Enable sequence steps 1-7 on bigjoiner master - */ - intel_encoders_pre_pll_enable(state, master); - if (master_crtc_state->shared_dpll) - intel_enable_shared_dpll(master_crtc_state); - intel_encoders_pre_enable(state, master); + /* + * Enable sequence steps 1-7 on bigjoiner master + */ + if (crtc_state->bigjoiner_slave) + intel_encoders_pre_pll_enable(state, master_crtc); - /* and DSC on slave */ - intel_dsc_enable(NULL, crtc_state); - } + if (crtc_state->shared_dpll) + intel_enable_shared_dpll(crtc_state); + + if (crtc_state->bigjoiner_slave) + intel_encoders_pre_enable(state, master_crtc); + + /* need to enable VDSC, which we skipped in pre-enable */ + intel_dsc_enable(crtc_state); if (DISPLAY_VER(dev_priv) >= 13) intel_uncompressed_joiner_enable(crtc_state); @@ -2720,7 +2099,7 @@ static void hsw_crtc_enable(struct intel_atomic_state *state, intel_encoders_enable(state, crtc); if (psl_clkgate_wa) { - intel_wait_for_vblank(dev_priv, pipe); + intel_crtc_wait_for_next_vblank(crtc); glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false); } @@ -2728,8 +2107,12 @@ static void hsw_crtc_enable(struct intel_atomic_state *state, * to change the workaround. */ hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe; if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) { - intel_wait_for_vblank(dev_priv, hsw_workaround_pipe); - intel_wait_for_vblank(dev_priv, hsw_workaround_pipe); + struct intel_crtc *wa_crtc; + + wa_crtc = intel_crtc_for_pipe(dev_priv, hsw_workaround_pipe); + + intel_crtc_wait_for_next_vblank(wa_crtc); + intel_crtc_wait_for_next_vblank(wa_crtc); } } @@ -2774,33 +2157,12 @@ static void ilk_crtc_disable(struct intel_atomic_state *state, ilk_pfit_disable(old_crtc_state); if (old_crtc_state->has_pch_encoder) - ilk_fdi_disable(crtc); + ilk_pch_disable(state, crtc); intel_encoders_post_disable(state, crtc); - if (old_crtc_state->has_pch_encoder) { - ilk_disable_pch_transcoder(dev_priv, pipe); - - if (HAS_PCH_CPT(dev_priv)) { - i915_reg_t reg; - u32 temp; - - /* disable TRANS_DP_CTL */ - reg = TRANS_DP_CTL(pipe); - temp = intel_de_read(dev_priv, reg); - temp &= ~(TRANS_DP_OUTPUT_ENABLE | - TRANS_DP_PORT_SEL_MASK); - temp |= TRANS_DP_PORT_SEL_NONE; - intel_de_write(dev_priv, reg, temp); - - /* disable DPLL_SEL */ - temp = intel_de_read(dev_priv, PCH_DPLL_SEL); - temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe)); - intel_de_write(dev_priv, PCH_DPLL_SEL, temp); - } - - ilk_fdi_pll_disable(crtc); - } + if (old_crtc_state->has_pch_encoder) + ilk_pch_post_disable(state, crtc); intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); @@ -2809,12 +2171,17 @@ static void ilk_crtc_disable(struct intel_atomic_state *state, static void hsw_crtc_disable(struct intel_atomic_state *state, struct intel_crtc *crtc) { + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + /* * FIXME collapse everything to one hook. * Need care with mst->ddi interactions. */ - intel_encoders_disable(state, crtc); - intel_encoders_post_disable(state, crtc); + if (!old_crtc_state->bigjoiner_slave) { + intel_encoders_disable(state, crtc); + intel_encoders_post_disable(state, crtc); + } } static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state) @@ -3171,7 +2538,7 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state, /* prevents spurious underruns */ if (DISPLAY_VER(dev_priv) == 2) - intel_wait_for_vblank(dev_priv, pipe); + intel_crtc_wait_for_next_vblank(crtc); } static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state) @@ -3202,7 +2569,7 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state, * wait for planes to fully turn off before disabling the pipe. */ if (DISPLAY_VER(dev_priv) == 2) - intel_wait_for_vblank(dev_priv, pipe); + intel_crtc_wait_for_next_vblank(crtc); intel_encoders_disable(state, crtc); @@ -4306,414 +3673,6 @@ out: return ret; } -static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv) -{ - struct intel_encoder *encoder; - int i; - u32 val, final; - bool has_lvds = false; - bool has_cpu_edp = false; - bool has_panel = false; - bool has_ck505 = false; - bool can_ssc = false; - bool using_ssc_source = false; - - /* We need to take the global config into account */ - for_each_intel_encoder(&dev_priv->drm, encoder) { - switch (encoder->type) { - case INTEL_OUTPUT_LVDS: - has_panel = true; - has_lvds = true; - break; - case INTEL_OUTPUT_EDP: - has_panel = true; - if (encoder->port == PORT_A) - has_cpu_edp = true; - break; - default: - break; - } - } - - if (HAS_PCH_IBX(dev_priv)) { - has_ck505 = dev_priv->vbt.display_clock_mode; - can_ssc = has_ck505; - } else { - has_ck505 = false; - can_ssc = true; - } - - /* Check if any DPLLs are using the SSC source */ - for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) { - u32 temp = intel_de_read(dev_priv, PCH_DPLL(i)); - - if (!(temp & DPLL_VCO_ENABLE)) - continue; - - if ((temp & PLL_REF_INPUT_MASK) == - PLLB_REF_INPUT_SPREADSPECTRUMIN) { - using_ssc_source = true; - break; - } - } - - drm_dbg_kms(&dev_priv->drm, - "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n", - has_panel, has_lvds, has_ck505, using_ssc_source); - - /* Ironlake: try to setup display ref clock before DPLL - * enabling. This is only under driver's control after - * PCH B stepping, previous chipset stepping should be - * ignoring this setting. - */ - val = intel_de_read(dev_priv, PCH_DREF_CONTROL); - - /* As we must carefully and slowly disable/enable each source in turn, - * compute the final state we want first and check if we need to - * make any changes at all. - */ - final = val; - final &= ~DREF_NONSPREAD_SOURCE_MASK; - if (has_ck505) - final |= DREF_NONSPREAD_CK505_ENABLE; - else - final |= DREF_NONSPREAD_SOURCE_ENABLE; - - final &= ~DREF_SSC_SOURCE_MASK; - final &= ~DREF_CPU_SOURCE_OUTPUT_MASK; - final &= ~DREF_SSC1_ENABLE; - - if (has_panel) { - final |= DREF_SSC_SOURCE_ENABLE; - - if (intel_panel_use_ssc(dev_priv) && can_ssc) - final |= DREF_SSC1_ENABLE; - - if (has_cpu_edp) { - if (intel_panel_use_ssc(dev_priv) && can_ssc) - final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; - else - final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; - } else - final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; - } else if (using_ssc_source) { - final |= DREF_SSC_SOURCE_ENABLE; - final |= DREF_SSC1_ENABLE; - } - - if (final == val) - return; - - /* Always enable nonspread source */ - val &= ~DREF_NONSPREAD_SOURCE_MASK; - - if (has_ck505) - val |= DREF_NONSPREAD_CK505_ENABLE; - else - val |= DREF_NONSPREAD_SOURCE_ENABLE; - - if (has_panel) { - val &= ~DREF_SSC_SOURCE_MASK; - val |= DREF_SSC_SOURCE_ENABLE; - - /* SSC must be turned on before enabling the CPU output */ - if (intel_panel_use_ssc(dev_priv) && can_ssc) { - drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n"); - val |= DREF_SSC1_ENABLE; - } else - val &= ~DREF_SSC1_ENABLE; - - /* Get SSC going before enabling the outputs */ - intel_de_write(dev_priv, PCH_DREF_CONTROL, val); - intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); - udelay(200); - - val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; - - /* Enable CPU source on CPU attached eDP */ - if (has_cpu_edp) { - if (intel_panel_use_ssc(dev_priv) && can_ssc) { - drm_dbg_kms(&dev_priv->drm, - "Using SSC on eDP\n"); - val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; - } else - val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; - } else - val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; - - intel_de_write(dev_priv, PCH_DREF_CONTROL, val); - intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); - udelay(200); - } else { - drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n"); - - val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; - - /* Turn off CPU output */ - val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; - - intel_de_write(dev_priv, PCH_DREF_CONTROL, val); - intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); - udelay(200); - - if (!using_ssc_source) { - drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n"); - - /* Turn off the SSC source */ - val &= ~DREF_SSC_SOURCE_MASK; - val |= DREF_SSC_SOURCE_DISABLE; - - /* Turn off SSC1 */ - val &= ~DREF_SSC1_ENABLE; - - intel_de_write(dev_priv, PCH_DREF_CONTROL, val); - intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); - udelay(200); - } - } - - BUG_ON(val != final); -} - -/* Implements 3 different sequences from BSpec chapter "Display iCLK - * Programming" based on the parameters passed: - * - Sequence to enable CLKOUT_DP - * - Sequence to enable CLKOUT_DP without spread - * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O - */ -static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv, - bool with_spread, bool with_fdi) -{ - u32 reg, tmp; - - if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread, - "FDI requires downspread\n")) - with_spread = true; - if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) && - with_fdi, "LP PCH doesn't have FDI\n")) - with_fdi = false; - - mutex_lock(&dev_priv->sb_lock); - - tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); - tmp &= ~SBI_SSCCTL_DISABLE; - tmp |= SBI_SSCCTL_PATHALT; - intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); - - udelay(24); - - if (with_spread) { - tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); - tmp &= ~SBI_SSCCTL_PATHALT; - intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); - - if (with_fdi) - lpt_fdi_program_mphy(dev_priv); - } - - reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; - tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); - tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE; - intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); - - mutex_unlock(&dev_priv->sb_lock); -} - -/* Sequence to disable CLKOUT_DP */ -void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv) -{ - u32 reg, tmp; - - mutex_lock(&dev_priv->sb_lock); - - reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; - tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); - tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE; - intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); - - tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); - if (!(tmp & SBI_SSCCTL_DISABLE)) { - if (!(tmp & SBI_SSCCTL_PATHALT)) { - tmp |= SBI_SSCCTL_PATHALT; - intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); - udelay(32); - } - tmp |= SBI_SSCCTL_DISABLE; - intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); - } - - mutex_unlock(&dev_priv->sb_lock); -} - -#define BEND_IDX(steps) ((50 + (steps)) / 5) - -static const u16 sscdivintphase[] = { - [BEND_IDX( 50)] = 0x3B23, - [BEND_IDX( 45)] = 0x3B23, - [BEND_IDX( 40)] = 0x3C23, - [BEND_IDX( 35)] = 0x3C23, - [BEND_IDX( 30)] = 0x3D23, - [BEND_IDX( 25)] = 0x3D23, - [BEND_IDX( 20)] = 0x3E23, - [BEND_IDX( 15)] = 0x3E23, - [BEND_IDX( 10)] = 0x3F23, - [BEND_IDX( 5)] = 0x3F23, - [BEND_IDX( 0)] = 0x0025, - [BEND_IDX( -5)] = 0x0025, - [BEND_IDX(-10)] = 0x0125, - [BEND_IDX(-15)] = 0x0125, - [BEND_IDX(-20)] = 0x0225, - [BEND_IDX(-25)] = 0x0225, - [BEND_IDX(-30)] = 0x0325, - [BEND_IDX(-35)] = 0x0325, - [BEND_IDX(-40)] = 0x0425, - [BEND_IDX(-45)] = 0x0425, - [BEND_IDX(-50)] = 0x0525, -}; - -/* - * Bend CLKOUT_DP - * steps -50 to 50 inclusive, in steps of 5 - * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz) - * change in clock period = -(steps / 10) * 5.787 ps - */ -static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps) -{ - u32 tmp; - int idx = BEND_IDX(steps); - - if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0)) - return; - - if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase))) - return; - - mutex_lock(&dev_priv->sb_lock); - - if (steps % 10 != 0) - tmp = 0xAAAAAAAB; - else - tmp = 0x00000000; - intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK); - - tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK); - tmp &= 0xffff0000; - tmp |= sscdivintphase[idx]; - intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK); - - mutex_unlock(&dev_priv->sb_lock); -} - -#undef BEND_IDX - -static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv) -{ - u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP); - u32 ctl = intel_de_read(dev_priv, SPLL_CTL); - - if ((ctl & SPLL_PLL_ENABLE) == 0) - return false; - - if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC && - (fuse_strap & HSW_CPU_SSC_ENABLE) == 0) - return true; - - if (IS_BROADWELL(dev_priv) && - (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW) - return true; - - return false; -} - -static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv, - enum intel_dpll_id id) -{ - u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP); - u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id)); - - if ((ctl & WRPLL_PLL_ENABLE) == 0) - return false; - - if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC) - return true; - - if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) && - (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW && - (fuse_strap & HSW_CPU_SSC_ENABLE) == 0) - return true; - - return false; -} - -static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv) -{ - struct intel_encoder *encoder; - bool has_fdi = false; - - for_each_intel_encoder(&dev_priv->drm, encoder) { - switch (encoder->type) { - case INTEL_OUTPUT_ANALOG: - has_fdi = true; - break; - default: - break; - } - } - - /* - * The BIOS may have decided to use the PCH SSC - * reference so we must not disable it until the - * relevant PLLs have stopped relying on it. We'll - * just leave the PCH SSC reference enabled in case - * any active PLL is using it. It will get disabled - * after runtime suspend if we don't have FDI. - * - * TODO: Move the whole reference clock handling - * to the modeset sequence proper so that we can - * actually enable/disable/reconfigure these things - * safely. To do that we need to introduce a real - * clock hierarchy. That would also allow us to do - * clock bending finally. - */ - dev_priv->pch_ssc_use = 0; - - if (spll_uses_pch_ssc(dev_priv)) { - drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n"); - dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL); - } - - if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) { - drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n"); - dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1); - } - - if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) { - drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n"); - dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2); - } - - if (dev_priv->pch_ssc_use) - return; - - if (has_fdi) { - lpt_bend_clkout_dp(dev_priv, 0); - lpt_enable_clkout_dp(dev_priv, true, true); - } else { - lpt_disable_clkout_dp(dev_priv); - } -} - -/* - * Initialize reference clocks when the driver loads - */ -void intel_init_pch_refclk(struct drm_i915_private *dev_priv) -{ - if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) - ilk_init_pch_refclk(dev_priv); - else if (HAS_PCH_LPT(dev_priv)) - lpt_init_pch_refclk(dev_priv); -} - static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); @@ -4978,8 +3937,8 @@ void intel_dp_get_m_n(struct intel_crtc *crtc, &pipe_config->dp_m2_n2); } -static void ilk_get_fdi_m_n_config(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config) +void ilk_get_fdi_m_n_config(struct intel_crtc *crtc, + struct intel_crtc_state *pipe_config) { intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, &pipe_config->fdi_m_n, NULL); @@ -5116,50 +4075,9 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc, i9xx_get_pipe_color_config(pipe_config); intel_color_get_config(pipe_config); - if (intel_de_read(dev_priv, PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) { - struct intel_shared_dpll *pll; - enum intel_dpll_id pll_id; - bool pll_active; - - pipe_config->has_pch_encoder = true; - - tmp = intel_de_read(dev_priv, FDI_RX_CTL(crtc->pipe)); - pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> - FDI_DP_PORT_WIDTH_SHIFT) + 1; - - ilk_get_fdi_m_n_config(crtc, pipe_config); - - if (HAS_PCH_IBX(dev_priv)) { - /* - * The pipe->pch transcoder and pch transcoder->pll - * mapping is fixed. - */ - pll_id = (enum intel_dpll_id) crtc->pipe; - } else { - tmp = intel_de_read(dev_priv, PCH_DPLL_SEL); - if (tmp & TRANS_DPLLB_SEL(crtc->pipe)) - pll_id = DPLL_ID_PCH_PLL_B; - else - pll_id= DPLL_ID_PCH_PLL_A; - } - - pipe_config->shared_dpll = - intel_get_shared_dpll_by_id(dev_priv, pll_id); - pll = pipe_config->shared_dpll; - - pll_active = intel_dpll_get_hw_state(dev_priv, pll, - &pipe_config->dpll_hw_state); - drm_WARN_ON(dev, !pll_active); - - tmp = pipe_config->dpll_hw_state.dpll; - pipe_config->pixel_multiplier = - ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK) - >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1; + pipe_config->pixel_multiplier = 1; - ilk_pch_clock_get(crtc, pipe_config); - } else { - pipe_config->pixel_multiplier = 1; - } + ilk_pch_get_config(pipe_config); intel_get_transcoder_timings(crtc, pipe_config); intel_get_pipe_src_size(crtc, pipe_config); @@ -5174,6 +4092,16 @@ out: return ret; } +static u8 bigjoiner_pipes(struct drm_i915_private *i915) +{ + if (DISPLAY_VER(i915) >= 12) + return BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D); + else if (DISPLAY_VER(i915) >= 11) + return BIT(PIPE_B) | BIT(PIPE_C); + else + return 0; +} + static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder) { @@ -5189,6 +4117,54 @@ static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv, return tmp & TRANS_DDI_FUNC_ENABLE; } +static u8 enabled_bigjoiner_pipes(struct drm_i915_private *dev_priv) +{ + u8 master_pipes = 0, slave_pipes = 0; + struct intel_crtc *crtc; + + for_each_intel_crtc(&dev_priv->drm, crtc) { + enum intel_display_power_domain power_domain; + enum pipe pipe = crtc->pipe; + intel_wakeref_t wakeref; + + if ((bigjoiner_pipes(dev_priv) & BIT(pipe)) == 0) + continue; + + power_domain = intel_dsc_power_domain(crtc, (enum transcoder) pipe); + with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) { + u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe)); + + if (!(tmp & BIG_JOINER_ENABLE)) + continue; + + if (tmp & MASTER_BIG_JOINER_ENABLE) + master_pipes |= BIT(pipe); + else + slave_pipes |= BIT(pipe); + } + + if (DISPLAY_VER(dev_priv) < 13) + continue; + + power_domain = POWER_DOMAIN_PIPE(pipe); + with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) { + u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe)); + + if (tmp & UNCOMPRESSED_JOINER_MASTER) + master_pipes |= BIT(pipe); + if (tmp & UNCOMPRESSED_JOINER_SLAVE) + slave_pipes |= BIT(pipe); + } + } + + /* Bigjoiner pipes should always be consecutive master and slave */ + drm_WARN(&dev_priv->drm, slave_pipes != master_pipes << 1, + "Bigjoiner misconfigured (master pipes 0x%x, slave pipes 0x%x)\n", + master_pipes, slave_pipes); + + return slave_pipes; +} + static u8 hsw_panel_transcoders(struct drm_i915_private *i915) { u8 panel_transcoder_mask = BIT(TRANSCODER_EDP); @@ -5250,10 +4226,18 @@ static u8 hsw_enabled_transcoders(struct intel_crtc *crtc) enabled_transcoders |= BIT(cpu_transcoder); } + /* single pipe or bigjoiner master */ cpu_transcoder = (enum transcoder) crtc->pipe; if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder)) enabled_transcoders |= BIT(cpu_transcoder); + /* bigjoiner slave -> consider the master pipe's transcoder as well */ + if (enabled_bigjoiner_pipes(dev_priv) & BIT(crtc->pipe)) { + cpu_transcoder = (enum transcoder) crtc->pipe - 1; + if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder)) + enabled_transcoders |= BIT(cpu_transcoder); + } + return enabled_transcoders; } @@ -5374,45 +4358,6 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, return transcoder_is_dsi(pipe_config->cpu_transcoder); } -static void hsw_get_ddi_port_state(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; - enum port port; - u32 tmp; - - if (transcoder_is_dsi(cpu_transcoder)) { - port = (cpu_transcoder == TRANSCODER_DSI_A) ? - PORT_A : PORT_B; - } else { - tmp = intel_de_read(dev_priv, - TRANS_DDI_FUNC_CTL(cpu_transcoder)); - if (!(tmp & TRANS_DDI_FUNC_ENABLE)) - return; - if (DISPLAY_VER(dev_priv) >= 12) - port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp); - else - port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp); - } - - /* - * Haswell has only FDI/PCH transcoder A. It is which is connected to - * DDI E. So just check whether this pipe is wired to DDI E and whether - * the PCH transcoder is on. - */ - if (DISPLAY_VER(dev_priv) < 9 && - (port == PORT_E) && intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) { - pipe_config->has_pch_encoder = true; - - tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); - pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> - FDI_DP_PORT_WIDTH_SHIFT) + 1; - - ilk_get_fdi_m_n_config(crtc, pipe_config); - } -} - static bool hsw_get_pipe_config(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { @@ -5439,21 +4384,12 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc, if (DISPLAY_VER(dev_priv) >= 13 && !pipe_config->dsc.compression_enable) intel_uncompressed_joiner_get_config(pipe_config); - if (!active) { - /* bigjoiner slave doesn't enable transcoder */ - if (!pipe_config->bigjoiner_slave) - goto out; - - active = true; - pipe_config->pixel_multiplier = 1; + if (!active) + goto out; - /* we cannot read out most state, so don't bother.. */ - pipe_config->quirks |= PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE; - } else if (!transcoder_is_dsi(pipe_config->cpu_transcoder) || - DISPLAY_VER(dev_priv) >= 11) { - hsw_get_ddi_port_state(crtc, pipe_config); + if (!transcoder_is_dsi(pipe_config->cpu_transcoder) || + DISPLAY_VER(dev_priv) >= 11) intel_get_transcoder_timings(crtc, pipe_config); - } if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder)) intel_vrr_get_config(crtc, pipe_config); @@ -5521,10 +4457,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc, } } - if (pipe_config->bigjoiner_slave) { - /* Cannot be read out as a slave, set to 0. */ - pipe_config->pixel_multiplier = 0; - } else if (pipe_config->cpu_transcoder != TRANSCODER_EDP && + if (pipe_config->cpu_transcoder != TRANSCODER_EDP && !transcoder_is_dsi(pipe_config->cpu_transcoder)) { pipe_config->pixel_multiplier = intel_de_read(dev_priv, @@ -5721,7 +4654,8 @@ found: drm_atomic_state_put(state); /* let the connector get through one full cycle before testing */ - intel_wait_for_vblank(dev_priv, crtc->pipe); + intel_crtc_wait_for_next_vblank(crtc); + return true; fail: @@ -5782,8 +4716,8 @@ static int i9xx_pll_refclk(struct drm_device *dev, } /* Returns the clock of the currently programmed mode of the given pipe. */ -static void i9xx_crtc_clock_get(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config) +void i9xx_crtc_clock_get(struct intel_crtc *crtc, + struct intel_crtc_state *pipe_config) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); @@ -5893,24 +4827,6 @@ int intel_dotclock_calculate(int link_freq, return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n); } -static void ilk_pch_clock_get(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - - /* read out port_clock from the DPLL */ - i9xx_crtc_clock_get(crtc, pipe_config); - - /* - * In case there is an active pipe without active ports, - * we may need some idea for the dotclock anyway. - * Calculate one based on the FDI configuration. - */ - pipe_config->hw.adjusted_mode.crtc_clock = - intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config), - &pipe_config->fdi_m_n); -} - /* Returns the currently programmed mode of the given encoder. */ struct drm_display_mode * intel_encoder_current_mode(struct intel_encoder *encoder) @@ -5924,7 +4840,7 @@ intel_encoder_current_mode(struct intel_encoder *encoder) if (!encoder->get_hw_state(encoder, &pipe)) return NULL; - crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + crtc = intel_crtc_for_pipe(dev_priv, pipe); mode = kzalloc(sizeof(*mode), GFP_KERNEL); if (!mode) @@ -6245,6 +5161,7 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state) linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE; linked_state->color_ctl = plane_state->color_ctl; linked_state->view = plane_state->view; + linked_state->decrypt = plane_state->decrypt; intel_plane_copy_hw_state(linked_state, plane_state); linked_state->uapi.src = plane_state->uapi.src; @@ -6252,13 +5169,13 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state) if (icl_is_hdr_plane(dev_priv, plane->id)) { if (linked->id == PLANE_SPRITE5) - plane_state->cus_ctl |= PLANE_CUS_PLANE_7; + plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_7_ICL; else if (linked->id == PLANE_SPRITE4) - plane_state->cus_ctl |= PLANE_CUS_PLANE_6; + plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_6_ICL; else if (linked->id == PLANE_SPRITE3) - plane_state->cus_ctl |= PLANE_CUS_PLANE_5_RKL; + plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_5_RKL; else if (linked->id == PLANE_SPRITE2) - plane_state->cus_ctl |= PLANE_CUS_PLANE_4_RKL; + plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_4_RKL; else MISSING_CASE(linked->id); } @@ -6371,8 +5288,6 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state, crtc_state->update_wm_post = true; if (mode_changed && crtc_state->hw.enable && - dev_priv->dpll_funcs && - !crtc_state->bigjoiner_slave && !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) { ret = dev_priv->dpll_funcs->crtc_compute_clock(crtc_state); if (ret) @@ -6928,18 +5843,15 @@ static void intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state, struct intel_crtc_state *crtc_state) { - const struct intel_crtc_state *from_crtc_state = crtc_state; - - if (crtc_state->bigjoiner_slave) { - from_crtc_state = intel_atomic_get_new_crtc_state(state, - crtc_state->bigjoiner_linked_crtc); + const struct intel_crtc_state *master_crtc_state; + struct intel_crtc *master_crtc; - /* No need to copy state if the master state is unchanged */ - if (!from_crtc_state) - return; - } + master_crtc = intel_master_crtc(crtc_state); + master_crtc_state = intel_atomic_get_new_crtc_state(state, master_crtc); - intel_crtc_copy_color_blobs(crtc_state, from_crtc_state); + /* No need to copy state if the master state is unchanged */ + if (master_crtc_state) + intel_crtc_copy_color_blobs(crtc_state, master_crtc_state); } static void @@ -6982,7 +5894,6 @@ copy_bigjoiner_crtc_state(struct intel_crtc_state *crtc_state, const struct intel_crtc_state *from_crtc_state) { struct intel_crtc_state *saved_state; - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); saved_state = kmemdup(from_crtc_state, sizeof(*saved_state), GFP_KERNEL); if (!saved_state) @@ -7012,8 +5923,8 @@ copy_bigjoiner_crtc_state(struct intel_crtc_state *crtc_state, crtc_state->nv12_planes = crtc_state->c8_planes = crtc_state->update_planes = 0; crtc_state->bigjoiner_linked_crtc = to_intel_crtc(from_crtc_state->uapi.crtc); crtc_state->bigjoiner_slave = true; - crtc_state->cpu_transcoder = (enum transcoder)crtc->pipe; - crtc_state->has_audio = false; + crtc_state->cpu_transcoder = from_crtc_state->cpu_transcoder; + crtc_state->has_audio = from_crtc_state->has_audio; return 0; } @@ -7609,51 +6520,48 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_X(output_types); - /* FIXME do the readout properly and get rid of this quirk */ - if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) { - PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hdisplay); - PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_htotal); - PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_start); - PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_end); - PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_start); - PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_end); - - PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vdisplay); - PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vtotal); - PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_start); - PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_end); - PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_start); - PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_end); - - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay); - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal); - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start); - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end); - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start); - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end); - - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay); - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal); - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start); - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end); - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start); - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end); - - PIPE_CONF_CHECK_I(pixel_multiplier); - + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hdisplay); + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_htotal); + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_start); + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_end); + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_start); + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_end); + + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vdisplay); + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vtotal); + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_start); + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_end); + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_start); + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_end); + + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end); + + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end); + + PIPE_CONF_CHECK_I(pixel_multiplier); + + PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, + DRM_MODE_FLAG_INTERLACE); + + if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) { PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, - DRM_MODE_FLAG_INTERLACE); - - if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) { - PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, - DRM_MODE_FLAG_PHSYNC); - PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, - DRM_MODE_FLAG_NHSYNC); - PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, - DRM_MODE_FLAG_PVSYNC); - PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, - DRM_MODE_FLAG_NVSYNC); - } + DRM_MODE_FLAG_PHSYNC); + PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, + DRM_MODE_FLAG_NHSYNC); + PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, + DRM_MODE_FLAG_PVSYNC); + PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, + DRM_MODE_FLAG_NVSYNC); } PIPE_CONF_CHECK_I(output_format); @@ -7665,9 +6573,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_BOOL(hdmi_scrambling); PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio); PIPE_CONF_CHECK_BOOL(has_infoframe); - /* FIXME do the readout properly and get rid of this quirk */ - if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) - PIPE_CONF_CHECK_BOOL(fec_enable); + PIPE_CONF_CHECK_BOOL(fec_enable); PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio); @@ -7696,9 +6602,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, } PIPE_CONF_CHECK_I(scaler_state.scaler_id); - /* FIXME do the readout properly and get rid of this quirk */ - if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) - PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate); + PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate); PIPE_CONF_CHECK_X(gamma_mode); if (IS_CHERRYVIEW(dev_priv)) @@ -7725,11 +6629,9 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_BOOL(double_wide); - if (dev_priv->dpll.mgr) + if (dev_priv->dpll.mgr) { PIPE_CONF_CHECK_P(shared_dpll); - /* FIXME do the readout properly and get rid of this quirk */ - if (dev_priv->dpll.mgr && !PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) { PIPE_CONF_CHECK_X(dpll_hw_state.dpll); PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md); PIPE_CONF_CHECK_X(dpll_hw_state.fp0); @@ -7763,19 +6665,17 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias); } - if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) { - PIPE_CONF_CHECK_X(dsi_pll.ctrl); - PIPE_CONF_CHECK_X(dsi_pll.div); + PIPE_CONF_CHECK_X(dsi_pll.ctrl); + PIPE_CONF_CHECK_X(dsi_pll.div); - if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5) - PIPE_CONF_CHECK_I(pipe_bpp); + if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5) + PIPE_CONF_CHECK_I(pipe_bpp); - PIPE_CONF_CHECK_CLOCK_FUZZY(hw.pipe_mode.crtc_clock); - PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock); - PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); + PIPE_CONF_CHECK_CLOCK_FUZZY(hw.pipe_mode.crtc_clock); + PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock); + PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); - PIPE_CONF_CHECK_I(min_voltage_level); - } + PIPE_CONF_CHECK_I(min_voltage_level); if (current_config->has_psr || pipe_config->has_psr) PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable, @@ -8049,7 +6949,7 @@ verify_crtc_state(struct intel_crtc *crtc, struct intel_encoder *encoder; struct intel_crtc_state *pipe_config = old_crtc_state; struct drm_atomic_state *state = old_crtc_state->uapi.state; - struct intel_crtc *master = crtc; + struct intel_crtc *master_crtc; __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi); intel_crtc_free_hw_state(old_crtc_state); @@ -8077,10 +6977,9 @@ verify_crtc_state(struct intel_crtc *crtc, "(expected %i, found %i)\n", new_crtc_state->hw.active, crtc->active); - if (new_crtc_state->bigjoiner_slave) - master = new_crtc_state->bigjoiner_linked_crtc; + master_crtc = intel_master_crtc(new_crtc_state); - for_each_encoder_on_crtc(dev, &master->base, encoder) { + for_each_encoder_on_crtc(dev, &master_crtc->base, encoder) { enum pipe pipe; bool active; @@ -8090,7 +6989,7 @@ verify_crtc_state(struct intel_crtc *crtc, encoder->base.base.id, active, new_crtc_state->hw.active); - I915_STATE_WARN(active && master->pipe != pipe, + I915_STATE_WARN(active && master_crtc->pipe != pipe, "Encoder connected to wrong pipe %c\n", pipe_name(pipe)); @@ -8101,10 +7000,6 @@ verify_crtc_state(struct intel_crtc *crtc, if (!new_crtc_state->hw.active) return; - if (new_crtc_state->bigjoiner_slave) - /* No PLLs set for slave */ - pipe_config->shared_dpll = NULL; - intel_pipe_config_sanity_check(dev_priv, pipe_config); if (!intel_pipe_config_compare(new_crtc_state, @@ -8223,9 +7118,6 @@ verify_mpllb_state(struct intel_atomic_state *state, if (!new_crtc_state->hw.active) return; - if (new_crtc_state->bigjoiner_slave) - return; - encoder = intel_get_crtc_new_encoder(state, new_crtc_state); intel_mpllb_readout_hw_state(encoder, &mpllb_hw_state); @@ -8607,28 +7499,13 @@ static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state) return 0; } -static bool bo_has_valid_encryption(struct drm_i915_gem_object *obj) -{ - struct drm_i915_private *i915 = to_i915(obj->base.dev); - - return intel_pxp_key_check(&i915->gt.pxp, obj, false) == 0; -} - -static bool pxp_is_borked(struct drm_i915_gem_object *obj) -{ - return i915_gem_object_is_protected(obj) && !bo_has_valid_encryption(obj); -} - static int intel_atomic_check_planes(struct intel_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *old_crtc_state, *new_crtc_state; struct intel_plane_state *plane_state; struct intel_plane *plane; - struct intel_plane_state *new_plane_state; - struct intel_plane_state *old_plane_state; struct intel_crtc *crtc; - const struct drm_framebuffer *fb; int i, ret; ret = icl_add_linked_planes(state); @@ -8676,72 +7553,6 @@ static int intel_atomic_check_planes(struct intel_atomic_state *state) return ret; } - for_each_new_intel_plane_in_state(state, plane, plane_state, i) { - new_plane_state = intel_atomic_get_new_plane_state(state, plane); - old_plane_state = intel_atomic_get_old_plane_state(state, plane); - fb = new_plane_state->hw.fb; - if (fb) { - new_plane_state->decrypt = bo_has_valid_encryption(intel_fb_obj(fb)); - new_plane_state->force_black = pxp_is_borked(intel_fb_obj(fb)); - } else { - new_plane_state->decrypt = old_plane_state->decrypt; - new_plane_state->force_black = old_plane_state->force_black; - } - } - - return 0; -} - -static int intel_atomic_check_cdclk(struct intel_atomic_state *state, - bool *need_cdclk_calc) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - const struct intel_cdclk_state *old_cdclk_state; - const struct intel_cdclk_state *new_cdclk_state; - struct intel_plane_state *plane_state; - struct intel_bw_state *new_bw_state; - struct intel_plane *plane; - int min_cdclk = 0; - enum pipe pipe; - int ret; - int i; - /* - * active_planes bitmask has been updated, and potentially - * affected planes are part of the state. We can now - * compute the minimum cdclk for each plane. - */ - for_each_new_intel_plane_in_state(state, plane, plane_state, i) { - ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc); - if (ret) - return ret; - } - - old_cdclk_state = intel_atomic_get_old_cdclk_state(state); - new_cdclk_state = intel_atomic_get_new_cdclk_state(state); - - if (new_cdclk_state && - old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk) - *need_cdclk_calc = true; - - ret = intel_cdclk_bw_calc_min_cdclk(state); - if (ret) - return ret; - - new_bw_state = intel_atomic_get_new_bw_state(state); - - if (!new_cdclk_state || !new_bw_state) - return 0; - - for_each_pipe(dev_priv, pipe) { - min_cdclk = max(new_cdclk_state->min_cdclk[pipe], min_cdclk); - - /* - * Currently do this change only if we need to increase - */ - if (new_bw_state->min_cdclk > min_cdclk) - *need_cdclk_calc = true; - } - return 0; } @@ -8790,13 +7601,13 @@ static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state, struct intel_crtc_state *new_crtc_state) { struct intel_crtc_state *slave_crtc_state, *master_crtc_state; - struct intel_crtc *slave, *master; + struct intel_crtc *slave_crtc, *master_crtc; /* slave being enabled, is master is still claiming this crtc? */ if (old_crtc_state->bigjoiner_slave) { - slave = crtc; - master = old_crtc_state->bigjoiner_linked_crtc; - master_crtc_state = intel_atomic_get_new_crtc_state(state, master); + slave_crtc = crtc; + master_crtc = old_crtc_state->bigjoiner_linked_crtc; + master_crtc_state = intel_atomic_get_new_crtc_state(state, master_crtc); if (!master_crtc_state || !intel_crtc_needs_modeset(master_crtc_state)) goto claimed; } @@ -8804,17 +7615,17 @@ static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state, if (!new_crtc_state->bigjoiner) return 0; - slave = intel_dsc_get_bigjoiner_secondary(crtc); - if (!slave) { + slave_crtc = intel_dsc_get_bigjoiner_secondary(crtc); + if (!slave_crtc) { DRM_DEBUG_KMS("[CRTC:%d:%s] Big joiner configuration requires " "CRTC + 1 to be used, doesn't exist\n", crtc->base.base.id, crtc->base.name); return -EINVAL; } - new_crtc_state->bigjoiner_linked_crtc = slave; - slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave); - master = crtc; + new_crtc_state->bigjoiner_linked_crtc = slave_crtc; + slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave_crtc); + master_crtc = crtc; if (IS_ERR(slave_crtc_state)) return PTR_ERR(slave_crtc_state); @@ -8823,15 +7634,15 @@ static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state, goto claimed; DRM_DEBUG_KMS("[CRTC:%d:%s] Used as slave for big joiner\n", - slave->base.base.id, slave->base.name); + slave_crtc->base.base.id, slave_crtc->base.name); return copy_bigjoiner_crtc_state(slave_crtc_state, new_crtc_state); claimed: DRM_DEBUG_KMS("[CRTC:%d:%s] Slave is enabled as normal CRTC, but " "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n", - slave->base.base.id, slave->base.name, - master->base.base.id, master->base.name); + slave_crtc->base.base.id, slave_crtc->base.name, + master_crtc->base.base.id, master_crtc->base.name); return -EINVAL; } @@ -8865,35 +7676,37 @@ static void kill_bigjoiner_slave(struct intel_atomic_state *state, * correspond to the last vblank and have no relation to the actual time when * the flip done event was sent. */ -static int intel_atomic_check_async(struct intel_atomic_state *state) +static int intel_atomic_check_async(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_crtc_state *old_crtc_state, *new_crtc_state; const struct intel_plane_state *new_plane_state, *old_plane_state; - struct intel_crtc *crtc; struct intel_plane *plane; int i; - for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, - new_crtc_state, i) { - if (intel_crtc_needs_modeset(new_crtc_state)) { - drm_dbg_kms(&i915->drm, "Modeset Required. Async flip not supported\n"); - return -EINVAL; - } + old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); + new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - if (!new_crtc_state->hw.active) { - drm_dbg_kms(&i915->drm, "CRTC inactive\n"); - return -EINVAL; - } - if (old_crtc_state->active_planes != new_crtc_state->active_planes) { - drm_dbg_kms(&i915->drm, - "Active planes cannot be changed during async flip\n"); - return -EINVAL; - } + if (intel_crtc_needs_modeset(new_crtc_state)) { + drm_dbg_kms(&i915->drm, "Modeset Required. Async flip not supported\n"); + return -EINVAL; + } + + if (!new_crtc_state->hw.active) { + drm_dbg_kms(&i915->drm, "CRTC inactive\n"); + return -EINVAL; + } + if (old_crtc_state->active_planes != new_crtc_state->active_planes) { + drm_dbg_kms(&i915->drm, + "Active planes cannot be changed during async flip\n"); + return -EINVAL; } for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { + if (plane->pipe != crtc->pipe) + continue; + /* * TODO: Async flip is only supported through the page flip IOCTL * as of now. So support currently added for primary plane only. @@ -8920,8 +7733,14 @@ static int intel_atomic_check_async(struct intel_atomic_state *state) return -EINVAL; } - if (old_plane_state->view.color_plane[0].stride != - new_plane_state->view.color_plane[0].stride) { + if (new_plane_state->hw.fb->format->num_planes > 1) { + drm_dbg_kms(&i915->drm, + "Planar formats not supported with async flips\n"); + return -EINVAL; + } + + if (old_plane_state->view.color_plane[0].mapping_stride != + new_plane_state->view.color_plane[0].mapping_stride) { drm_dbg_kms(&i915->drm, "Stride cannot be changed in async flip\n"); return -EINVAL; } @@ -9177,7 +7996,6 @@ static int intel_atomic_check(struct drm_device *dev, if (ret) goto fail; - intel_fbc_choose_crtc(dev_priv, state); ret = intel_compute_global_watermarks(state); if (ret) goto fail; @@ -9186,7 +8004,7 @@ static int intel_atomic_check(struct drm_device *dev, if (ret) goto fail; - ret = intel_atomic_check_cdclk(state, &any_ms); + ret = intel_cdclk_atomic_check(state, &any_ms); if (ret) goto fail; @@ -9209,10 +8027,14 @@ static int intel_atomic_check(struct drm_device *dev, if (ret) goto fail; + ret = intel_fbc_atomic_check(state); + if (ret) + goto fail; + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { if (new_crtc_state->uapi.async_flip) { - ret = intel_atomic_check_async(state); + ret = intel_atomic_check_async(state, crtc); if (ret) goto fail; } @@ -9417,15 +8239,17 @@ static void intel_update_crtc(struct intel_atomic_state *state, intel_fbc_update(state, crtc); + intel_update_planes_on_crtc(state, crtc); + /* Perform vblank evasion around commit operation */ intel_pipe_update_start(new_crtc_state); commit_pipe_pre_planes(state, crtc); if (DISPLAY_VER(dev_priv) >= 9) - skl_update_planes_on_crtc(state, crtc); + skl_arm_planes_on_crtc(state, crtc); else - i9xx_update_planes_on_crtc(state, crtc); + i9xx_arm_planes_on_crtc(state, crtc); commit_pipe_post_planes(state, crtc); @@ -9449,23 +8273,6 @@ static void intel_old_crtc_state_disables(struct intel_atomic_state *state, { struct drm_i915_private *dev_priv = to_i915(state->base.dev); - drm_WARN_ON(&dev_priv->drm, old_crtc_state->bigjoiner_slave); - - intel_encoders_pre_disable(state, crtc); - - intel_crtc_disable_planes(state, crtc); - - /* - * We still need special handling for disabling bigjoiner master - * and slaves since for slave we do not have encoder or plls - * so we dont need to disable those. - */ - if (old_crtc_state->bigjoiner) { - intel_crtc_disable_planes(state, - old_crtc_state->bigjoiner_linked_crtc); - old_crtc_state->bigjoiner_linked_crtc->active = false; - } - /* * We need to disable pipe CRC before disabling the pipe, * or we race against vblank off. @@ -9490,10 +8297,22 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state) u32 handled = 0; int i; + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, + new_crtc_state, i) { + if (!intel_crtc_needs_modeset(new_crtc_state)) + continue; + + if (!old_crtc_state->hw.active) + continue; + + intel_pre_plane_update(state, crtc); + intel_crtc_disable_planes(state, crtc); + } + /* Only disable port sync and MST slaves */ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { - if (!intel_crtc_needs_modeset(new_crtc_state) || old_crtc_state->bigjoiner) + if (!intel_crtc_needs_modeset(new_crtc_state)) continue; if (!old_crtc_state->hw.active) @@ -9505,10 +8324,10 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state) * Slave vblanks are masked till Master Vblanks. */ if (!is_trans_port_sync_slave(old_crtc_state) && - !intel_dp_mst_is_slave_trans(old_crtc_state)) + !intel_dp_mst_is_slave_trans(old_crtc_state) && + !old_crtc_state->bigjoiner_slave) continue; - intel_pre_plane_update(state, crtc); intel_old_crtc_state_disables(state, old_crtc_state, new_crtc_state, crtc); handled |= BIT(crtc->pipe); @@ -9518,21 +8337,14 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state) for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { if (!intel_crtc_needs_modeset(new_crtc_state) || - (handled & BIT(crtc->pipe)) || - old_crtc_state->bigjoiner_slave) + (handled & BIT(crtc->pipe))) continue; - intel_pre_plane_update(state, crtc); - if (old_crtc_state->bigjoiner) { - struct intel_crtc *slave = - old_crtc_state->bigjoiner_linked_crtc; - - intel_pre_plane_update(state, slave); - } + if (!old_crtc_state->hw.active) + continue; - if (old_crtc_state->hw.active) - intel_old_crtc_state_disables(state, old_crtc_state, - new_crtc_state, crtc); + intel_old_crtc_state_disables(state, old_crtc_state, + new_crtc_state, crtc); } } @@ -9610,7 +8422,7 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state) if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb, &old_crtc_state->wm.skl.ddb) && (update_pipes | modeset_pipes)) - intel_wait_for_vblank(dev_priv, pipe); + intel_crtc_wait_for_next_vblank(crtc); } } @@ -9752,10 +8564,14 @@ static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *s for_each_new_intel_plane_in_state(state, plane, plane_state, i) { struct drm_framebuffer *fb = plane_state->hw.fb; + int cc_plane; int ret; - if (!fb || - fb->modifier != I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC) + if (!fb) + continue; + + cc_plane = intel_fb_rc_ccs_cc_plane(fb); + if (cc_plane < 0) continue; /* @@ -9772,7 +8588,7 @@ static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *s * GPU write on it. */ ret = i915_gem_object_read_from_page(intel_fb_obj(fb), - fb->offsets[2] + 16, + fb->offsets[cc_plane] + 16, &plane_state->ccval, sizeof(plane_state->ccval)); /* The above could only fail if the FB obj has an unexpected backing store type. */ @@ -9840,11 +8656,9 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) } } - if (state->modeset) - intel_encoders_update_prepare(state); + intel_encoders_update_prepare(state); intel_dbuf_pre_plane_update(state); - intel_psr_pre_plane_update(state); for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { if (new_crtc_state->uapi.async_flip) @@ -9854,11 +8668,12 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) /* Now enable the clocks, plane, pipe, and connectors that we set up. */ dev_priv->display->commit_modeset_enables(state); - if (state->modeset) { - intel_encoders_update_complete(state); + intel_encoders_update_complete(state); + if (state->modeset) intel_set_cdclk_post_plane_update(state); - } + + intel_wait_for_vblank_workers(state); /* FIXME: We should call drm_atomic_helper_commit_hw_done() here * already, but still need the state for the delayed optimization. To @@ -9874,13 +8689,6 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { if (new_crtc_state->uapi.async_flip) intel_crtc_disable_flip_done(state, crtc); - - if (new_crtc_state->hw.active && - !intel_crtc_needs_modeset(new_crtc_state) && - !new_crtc_state->preload_luts && - (new_crtc_state->uapi.color_mgmt_changed || - new_crtc_state->update_pipe)) - intel_color_load_luts(new_crtc_state); } /* @@ -9967,7 +8775,7 @@ static void intel_atomic_commit_work(struct work_struct *work) intel_atomic_commit_tail(state); } -static int __i915_sw_fence_call +static int intel_atomic_commit_ready(struct i915_sw_fence *fence, enum i915_sw_fence_notify notify) { @@ -10114,8 +8922,8 @@ static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv) struct intel_plane *plane; for_each_intel_plane(&dev_priv->drm, plane) { - struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, - plane->pipe); + struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, + plane->pipe); plane->base.possible_crtcs = drm_crtc_mask(&crtc->base); } @@ -10580,7 +9388,7 @@ intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv, static const struct drm_mode_config_funcs intel_mode_funcs = { .fb_create = intel_user_framebuffer_create, - .get_format_info = intel_get_format_info, + .get_format_info = intel_fb_get_format_info, .output_poll_changed = intel_fbdev_output_poll_changed, .mode_valid = intel_mode_valid, .atomic_check = intel_atomic_check, @@ -10640,7 +9448,7 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv) return; intel_init_cdclk_hooks(dev_priv); - intel_init_audio_hooks(dev_priv); + intel_audio_hooks_init(dev_priv); intel_dpll_init_clock_hook(dev_priv); @@ -11108,7 +9916,7 @@ int intel_modeset_init(struct drm_i915_private *i915) void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) { - struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); /* 640x480@60Hz, ~25175 kHz */ struct dpll clock = { .m1 = 18, @@ -11181,7 +9989,7 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) { - struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n", pipe_name(pipe)); @@ -11233,7 +10041,7 @@ intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv) "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n", plane->base.base.id, plane->base.name); - plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + plane_crtc = intel_crtc_for_pipe(dev_priv, pipe); intel_plane_disable_noatomic(plane_crtc, plane); } } @@ -11486,7 +10294,7 @@ static void readout_plane_state(struct drm_i915_private *dev_priv) visible = plane->get_hw_state(plane, &pipe); - crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + crtc = intel_crtc_for_pipe(dev_priv, pipe); crtc_state = to_intel_crtc_state(crtc->base.state); intel_set_plane_visible(crtc_state, plane_state, visible); @@ -11553,7 +10361,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) pipe = 0; if (encoder->get_hw_state(encoder, &pipe)) { - crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + crtc = intel_crtc_for_pipe(dev_priv, pipe); crtc_state = to_intel_crtc_state(crtc->base.state); encoder->base.crtc = &crtc->base; @@ -11628,9 +10436,6 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) struct intel_plane *plane; int min_cdclk = 0; - if (crtc_state->bigjoiner_slave) - continue; - if (crtc_state->hw.active) { /* * The initial mode needs to be set in order to keep @@ -11690,39 +10495,6 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) intel_bw_crtc_update(bw_state, crtc_state); intel_pipe_config_sanity_check(dev_priv, crtc_state); - - /* discard our incomplete slave state, copy it from master */ - if (crtc_state->bigjoiner && crtc_state->hw.active) { - struct intel_crtc *slave = crtc_state->bigjoiner_linked_crtc; - struct intel_crtc_state *slave_crtc_state = - to_intel_crtc_state(slave->base.state); - - copy_bigjoiner_crtc_state(slave_crtc_state, crtc_state); - slave->base.mode = crtc->base.mode; - - cdclk_state->min_cdclk[slave->pipe] = min_cdclk; - cdclk_state->min_voltage_level[slave->pipe] = - crtc_state->min_voltage_level; - - for_each_intel_plane_on_crtc(&dev_priv->drm, slave, plane) { - const struct intel_plane_state *plane_state = - to_intel_plane_state(plane->base.state); - - /* - * FIXME don't have the fb yet, so can't - * use intel_plane_data_rate() :( - */ - if (plane_state->uapi.visible) - crtc_state->data_rate[plane->id] = - 4 * crtc_state->pixel_rate; - else - crtc_state->data_rate[plane->id] = 0; - } - - intel_bw_crtc_update(bw_state, slave_crtc_state); - drm_calc_timestamping_constants(&slave->base, - &slave_crtc_state->hw.adjusted_mode); - } } } @@ -12027,7 +10799,7 @@ void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915) destroy_workqueue(i915->flip_wq); destroy_workqueue(i915->modeset_wq); - intel_fbc_cleanup_cfb(i915); + intel_fbc_cleanup(i915); } /* part #3: call after gem init */ @@ -12042,6 +10814,27 @@ void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915) intel_bios_driver_remove(i915); } +bool intel_modeset_probe_defer(struct pci_dev *pdev) +{ + struct drm_privacy_screen *privacy_screen; + + /* + * apple-gmux is needed on dual GPU MacBook Pro + * to probe the panel if we're the inactive GPU. + */ + if (vga_switcheroo_client_probe_defer(pdev)) + return true; + + /* If the LCD panel has a privacy-screen, wait for it */ + privacy_screen = drm_privacy_screen_get(&pdev->dev, NULL); + if (IS_ERR(privacy_screen) && PTR_ERR(privacy_screen) == -EPROBE_DEFER) + return true; + + drm_privacy_screen_put(privacy_screen); + + return false; +} + void intel_display_driver_register(struct drm_i915_private *i915) { if (!HAS_DISPLAY(i915)) diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index 0c76bf57f86b..b61b75248ded 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -57,6 +57,7 @@ struct intel_plane; struct intel_plane_state; struct intel_remapped_info; struct intel_rotation_info; +struct pci_dev; enum i915_gpio { GPIOA, @@ -346,9 +347,33 @@ enum phy_fia { FIA3, }; +enum hpd_pin { + HPD_NONE = 0, + HPD_TV = HPD_NONE, /* TV is known to be unreliable */ + HPD_CRT, + HPD_SDVO_B, + HPD_SDVO_C, + HPD_PORT_A, + HPD_PORT_B, + HPD_PORT_C, + HPD_PORT_D, + HPD_PORT_E, + HPD_PORT_TC1, + HPD_PORT_TC2, + HPD_PORT_TC3, + HPD_PORT_TC4, + HPD_PORT_TC5, + HPD_PORT_TC6, + + HPD_NUM_PINS +}; + +#define for_each_hpd_pin(__pin) \ + for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++) + #define for_each_pipe(__dev_priv, __p) \ for ((__p) = 0; (__p) < I915_MAX_PIPES; (__p)++) \ - for_each_if(INTEL_INFO(__dev_priv)->pipe_mask & BIT(__p)) + for_each_if(INTEL_INFO(__dev_priv)->display.pipe_mask & BIT(__p)) #define for_each_pipe_masked(__dev_priv, __p, __mask) \ for_each_pipe(__dev_priv, __p) \ @@ -356,7 +381,7 @@ enum phy_fia { #define for_each_cpu_transcoder(__dev_priv, __t) \ for ((__t) = 0; (__t) < I915_MAX_TRANSCODERS; (__t)++) \ - for_each_if (INTEL_INFO(__dev_priv)->cpu_transcoder_mask & BIT(__t)) + for_each_if (INTEL_INFO(__dev_priv)->display.cpu_transcoder_mask & BIT(__t)) #define for_each_cpu_transcoder_masked(__dev_priv, __t, __mask) \ for_each_cpu_transcoder(__dev_priv, __t) \ @@ -521,7 +546,6 @@ void intel_link_compute_m_n(u16 bpp, int nlanes, int pixel_clock, int link_clock, struct intel_link_m_n *m_n, bool constant_n, bool fec_enable); -void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv); u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, u32 pixel_format, u64 modifier); enum drm_mode_status @@ -542,9 +566,6 @@ int vlv_get_cck_clock(struct drm_i915_private *dev_priv, const char *name, u32 reg, int ref_freq); int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv, const char *name, u32 reg); -void lpt_pch_enable(const struct intel_crtc_state *crtc_state); -void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv); -void lpt_disable_iclkip(struct drm_i915_private *dev_priv); void intel_init_display_hooks(struct drm_i915_private *dev_priv); unsigned int intel_fb_xy_to_linear(int x, int y, const struct intel_plane_state *state, @@ -580,10 +601,6 @@ struct drm_framebuffer * intel_framebuffer_create(struct drm_i915_gem_object *obj, struct drm_mode_fb_cmd2 *mode_cmd); -void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, - enum pipe pipe); - -int lpt_get_iclkip(struct drm_i915_private *dev_priv); bool intel_fuzzy_clock_check(int clock1, int clock2); void intel_display_prepare_reset(struct drm_i915_private *dev_priv); @@ -592,8 +609,11 @@ void intel_dp_get_m_n(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config); void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n); +void ilk_get_fdi_m_n_config(struct intel_crtc *crtc, + struct intel_crtc_state *pipe_config); +void i9xx_crtc_clock_get(struct intel_crtc *crtc, + struct intel_crtc_state *pipe_config); int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n); - bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state); void hsw_enable_ips(const struct intel_crtc_state *crtc_state); void hsw_disable_ips(const struct intel_crtc_state *crtc_state); @@ -610,9 +630,6 @@ int bdw_get_pipemisc_bpp(struct intel_crtc *crtc); unsigned int intel_plane_fence_y_offset(const struct intel_plane_state *plane_state); bool intel_plane_uses_fence(const struct intel_plane_state *plane_state); -bool -intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info, - u64 modifier); struct intel_encoder * intel_get_crtc_new_encoder(const struct intel_atomic_state *state, @@ -624,6 +641,7 @@ void intel_display_driver_register(struct drm_i915_private *i915); void intel_display_driver_unregister(struct drm_i915_private *i915); /* modesetting */ +bool intel_modeset_probe_defer(struct pci_dev *pdev); void intel_modeset_init_hw(struct drm_i915_private *i915); int intel_modeset_init_noirq(struct drm_i915_private *i915); int intel_modeset_init_nogem(struct drm_i915_private *i915); @@ -632,7 +650,6 @@ void intel_modeset_driver_remove(struct drm_i915_private *i915); void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915); void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915); void intel_display_resume(struct drm_device *dev); -void intel_init_pch_refclk(struct drm_i915_private *dev_priv); int intel_modeset_all_pipes(struct intel_atomic_state *state); /* modesetting asserts */ diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c index e04767695530..572445299b04 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c @@ -40,83 +40,6 @@ static int i915_frontbuffer_tracking(struct seq_file *m, void *unused) return 0; } -static int i915_fbc_status(struct seq_file *m, void *unused) -{ - struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct intel_fbc *fbc = &dev_priv->fbc; - intel_wakeref_t wakeref; - - if (!HAS_FBC(dev_priv)) - return -ENODEV; - - wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); - mutex_lock(&fbc->lock); - - if (intel_fbc_is_active(dev_priv)) - seq_puts(m, "FBC enabled\n"); - else - seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason); - - if (intel_fbc_is_active(dev_priv)) { - u32 mask; - - if (DISPLAY_VER(dev_priv) >= 8) - mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK; - else if (DISPLAY_VER(dev_priv) >= 7) - mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK; - else if (DISPLAY_VER(dev_priv) >= 5) - mask = intel_de_read(dev_priv, ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK; - else if (IS_G4X(dev_priv)) - mask = intel_de_read(dev_priv, DPFC_STATUS) & DPFC_COMP_SEG_MASK; - else - mask = intel_de_read(dev_priv, FBC_STATUS) & - (FBC_STAT_COMPRESSING | FBC_STAT_COMPRESSED); - - seq_printf(m, "Compressing: %s\n", yesno(mask)); - } - - mutex_unlock(&fbc->lock); - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); - - return 0; -} - -static int i915_fbc_false_color_get(void *data, u64 *val) -{ - struct drm_i915_private *dev_priv = data; - - if (DISPLAY_VER(dev_priv) < 7 || !HAS_FBC(dev_priv)) - return -ENODEV; - - *val = dev_priv->fbc.false_color; - - return 0; -} - -static int i915_fbc_false_color_set(void *data, u64 val) -{ - struct drm_i915_private *dev_priv = data; - u32 reg; - - if (DISPLAY_VER(dev_priv) < 7 || !HAS_FBC(dev_priv)) - return -ENODEV; - - mutex_lock(&dev_priv->fbc.lock); - - reg = intel_de_read(dev_priv, ILK_DPFC_CONTROL); - dev_priv->fbc.false_color = val; - - intel_de_write(dev_priv, ILK_DPFC_CONTROL, - val ? (reg | FBC_CTL_FALSE_COLOR) : (reg & ~FBC_CTL_FALSE_COLOR)); - - mutex_unlock(&dev_priv->fbc.lock); - return 0; -} - -DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops, - i915_fbc_false_color_get, i915_fbc_false_color_set, - "%llu\n"); - static int i915_ips_status(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); @@ -303,8 +226,7 @@ psr_source_status(struct intel_dp *intel_dp, struct seq_file *m) }; val = intel_de_read(dev_priv, EDP_PSR2_STATUS(intel_dp->psr.transcoder)); - status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >> - EDP_PSR2_STATUS_STATE_SHIFT; + status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val); if (status_val < ARRAY_SIZE(live_status)) status = live_status[status_val]; } else { @@ -503,28 +425,9 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops, static int i915_power_domain_info(struct seq_file *m, void *unused) { - struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct i915_power_domains *power_domains = &dev_priv->power_domains; - int i; - - mutex_lock(&power_domains->lock); - - seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count"); - for (i = 0; i < power_domains->power_well_count; i++) { - struct i915_power_well *power_well; - enum intel_display_power_domain power_domain; - - power_well = &power_domains->power_wells[i]; - seq_printf(m, "%-25s %d\n", power_well->desc->name, - power_well->count); - - for_each_power_domain(power_domain, power_well->desc->domains) - seq_printf(m, " %-23s %d\n", - intel_display_power_domain_str(power_domain), - power_domains->domain_use_count[power_domain]); - } + struct drm_i915_private *i915 = node_to_i915(m->private); - mutex_unlock(&power_domains->lock); + intel_display_power_debug(i915, m); return 0; } @@ -2095,9 +1998,7 @@ i915_fifo_underrun_reset_write(struct file *filp, return ret; } - ret = intel_fbc_reset_underrun(dev_priv); - if (ret) - return ret; + intel_fbc_reset_underrun(dev_priv); return cnt; } @@ -2111,7 +2012,6 @@ static const struct file_operations i915_fifo_underrun_reset_ops = { static const struct drm_info_list intel_display_debugfs_list[] = { {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0}, - {"i915_fbc_status", i915_fbc_status, 0}, {"i915_ips_status", i915_ips_status, 0}, {"i915_sr_status", i915_sr_status, 0}, {"i915_opregion", i915_opregion, 0}, @@ -2136,7 +2036,6 @@ static const struct { {"i915_pri_wm_latency", &i915_pri_wm_latency_fops}, {"i915_spr_wm_latency", &i915_spr_wm_latency_fops}, {"i915_cur_wm_latency", &i915_cur_wm_latency_fops}, - {"i915_fbc_false_color", &i915_fbc_false_color_fops}, {"i915_dp_test_data", &i915_displayport_test_data_fops}, {"i915_dp_test_type", &i915_displayport_test_type_fops}, {"i915_dp_test_active", &i915_displayport_test_active_fops}, @@ -2163,6 +2062,8 @@ void intel_display_debugfs_register(struct drm_i915_private *i915) drm_debugfs_create_files(intel_display_debugfs_list, ARRAY_SIZE(intel_display_debugfs_list), minor->debugfs_root, minor); + + intel_fbc_debugfs_register(i915); } static int i915_panel_show(struct seq_file *m, void *data) diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 1672604f9ef7..05babdcf5f2e 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -15,6 +15,7 @@ #include "intel_dpio_phy.h" #include "intel_dpll.h" #include "intel_hotplug.h" +#include "intel_pch_refclk.h" #include "intel_pcode.h" #include "intel_pm.h" #include "intel_pps.h" @@ -23,6 +24,98 @@ #include "intel_vga.h" #include "vlv_sideband.h" +struct i915_power_well_ops { + /* + * Synchronize the well's hw state to match the current sw state, for + * example enable/disable it based on the current refcount. Called + * during driver init and resume time, possibly after first calling + * the enable/disable handlers. + */ + void (*sync_hw)(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well); + /* + * Enable the well and resources that depend on it (for example + * interrupts located on the well). Called after the 0->1 refcount + * transition. + */ + void (*enable)(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well); + /* + * Disable the well and resources that depend on it. Called after + * the 1->0 refcount transition. + */ + void (*disable)(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well); + /* Returns the hw enabled state. */ + bool (*is_enabled)(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well); +}; + +struct i915_power_well_regs { + i915_reg_t bios; + i915_reg_t driver; + i915_reg_t kvmr; + i915_reg_t debug; +}; + +/* Power well structure for haswell */ +struct i915_power_well_desc { + const char *name; + bool always_on; + u64 domains; + /* unique identifier for this power well */ + enum i915_power_well_id id; + /* + * Arbitraty data associated with this power well. Platform and power + * well specific. + */ + union { + struct { + /* + * request/status flag index in the PUNIT power well + * control/status registers. + */ + u8 idx; + } vlv; + struct { + enum dpio_phy phy; + } bxt; + struct { + const struct i915_power_well_regs *regs; + /* + * request/status flag index in the power well + * constrol/status registers. + */ + u8 idx; + /* Mask of pipes whose IRQ logic is backed by the pw */ + u8 irq_pipe_mask; + /* + * Instead of waiting for the status bit to ack enables, + * just wait a specific amount of time and then consider + * the well enabled. + */ + u16 fixed_enable_delay; + /* The pw is backing the VGA functionality */ + bool has_vga:1; + bool has_fuses:1; + /* + * The pw is for an ICL+ TypeC PHY port in + * Thunderbolt mode. + */ + bool is_tc_tbt:1; + } hsw; + }; + const struct i915_power_well_ops *ops; +}; + +struct i915_power_well { + const struct i915_power_well_desc *desc; + /* power well enable/disable usage count */ + int count; + /* cached hw enabled state */ + bool hw_enabled; +}; + bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, enum i915_power_well_id power_well_id); @@ -154,8 +247,8 @@ intel_display_power_domain_str(enum intel_display_power_domain domain) return "MODESET"; case POWER_DOMAIN_GT_IRQ: return "GT_IRQ"; - case POWER_DOMAIN_DPLL_DC_OFF: - return "DPLL_DC_OFF"; + case POWER_DOMAIN_DC_OFF: + return "DC_OFF"; case POWER_DOMAIN_TC_COLD_OFF: return "TC_COLD_OFF"; default: @@ -434,6 +527,11 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv, pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : SKL_PW_CTL_IDX_TO_PG(pw_idx); + + /* Wa_16013190616:adlp */ + if (IS_ALDERLAKE_P(dev_priv) && pg == SKL_PG1) + intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0, DISABLE_FLR_SRC); + /* * For PW1 we have to wait both for the PW0/PG0 fuse state * before enabling the power well and PW1/PG1's own fuse @@ -894,7 +992,7 @@ static u32 sanitize_target_dc_state(struct drm_i915_private *dev_priv, u32 target_dc_state) { - u32 states[] = { + static const u32 states[] = { DC_STATE_EN_UPTO_DC6, DC_STATE_EN_UPTO_DC5, DC_STATE_EN_DC3CO, @@ -2802,7 +2900,7 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915, ICL_PW_2_POWER_DOMAINS | \ BIT_ULL(POWER_DOMAIN_MODESET) | \ BIT_ULL(POWER_DOMAIN_AUX_A) | \ - BIT_ULL(POWER_DOMAIN_DPLL_DC_OFF) | \ + BIT_ULL(POWER_DOMAIN_DC_OFF) | \ BIT_ULL(POWER_DOMAIN_INIT)) #define ICL_DDI_IO_A_POWER_DOMAINS ( \ @@ -3105,6 +3203,7 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915, BIT_ULL(POWER_DOMAIN_MODESET) | \ BIT_ULL(POWER_DOMAIN_AUX_A) | \ BIT_ULL(POWER_DOMAIN_AUX_B) | \ + BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ BIT_ULL(POWER_DOMAIN_INIT)) #define XELPD_AUX_IO_D_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_D_XELPD) @@ -5271,7 +5370,7 @@ static void gen12_dbuf_slices_config(struct drm_i915_private *dev_priv) static void icl_mbus_init(struct drm_i915_private *dev_priv) { - unsigned long abox_regs = INTEL_INFO(dev_priv)->abox_mask; + unsigned long abox_regs = INTEL_INFO(dev_priv)->display.abox_mask; u32 mask, val, i; if (IS_ALDERLAKE_P(dev_priv)) @@ -5731,7 +5830,7 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv) enum intel_dram_type type = dev_priv->dram_info.type; u8 num_channels = dev_priv->dram_info.num_channels; const struct buddy_page_mask *table; - unsigned long abox_mask = INTEL_INFO(dev_priv)->abox_mask; + unsigned long abox_mask = INTEL_INFO(dev_priv)->display.abox_mask; int config, i; /* BW_BUDDY registers are not used on dgpu's beyond DG1 */ @@ -6390,3 +6489,28 @@ void intel_display_power_resume(struct drm_i915_private *i915) hsw_disable_pc8(i915); } } + +void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m) +{ + struct i915_power_domains *power_domains = &i915->power_domains; + int i; + + mutex_lock(&power_domains->lock); + + seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count"); + for (i = 0; i < power_domains->power_well_count; i++) { + struct i915_power_well *power_well; + enum intel_display_power_domain power_domain; + + power_well = &power_domains->power_wells[i]; + seq_printf(m, "%-25s %d\n", power_well->desc->name, + power_well->count); + + for_each_power_domain(power_domain, power_well->desc->domains) + seq_printf(m, " %-23s %d\n", + intel_display_power_domain_str(power_domain), + power_domains->domain_use_count[power_domain]); + } + + mutex_unlock(&power_domains->lock); +} diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h index 0612e4b6e3c8..686d18eaa24c 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.h +++ b/drivers/gpu/drm/i915/display/intel_display_power.h @@ -6,11 +6,13 @@ #ifndef __INTEL_DISPLAY_POWER_H__ #define __INTEL_DISPLAY_POWER_H__ -#include "intel_display.h" #include "intel_runtime_pm.h" #include "i915_reg.h" +enum dpio_channel; +enum dpio_phy; struct drm_i915_private; +struct i915_power_well; struct intel_encoder; enum intel_display_power_domain { @@ -117,7 +119,7 @@ enum intel_display_power_domain { POWER_DOMAIN_GMBUS, POWER_DOMAIN_MODESET, POWER_DOMAIN_GT_IRQ, - POWER_DOMAIN_DPLL_DC_OFF, + POWER_DOMAIN_DC_OFF, POWER_DOMAIN_TC_COLD_OFF, POWER_DOMAIN_INIT, @@ -155,100 +157,6 @@ enum i915_power_well_id { ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \ (tran) + POWER_DOMAIN_TRANSCODER_A) -struct i915_power_well; - -struct i915_power_well_ops { - /* - * Synchronize the well's hw state to match the current sw state, for - * example enable/disable it based on the current refcount. Called - * during driver init and resume time, possibly after first calling - * the enable/disable handlers. - */ - void (*sync_hw)(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well); - /* - * Enable the well and resources that depend on it (for example - * interrupts located on the well). Called after the 0->1 refcount - * transition. - */ - void (*enable)(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well); - /* - * Disable the well and resources that depend on it. Called after - * the 1->0 refcount transition. - */ - void (*disable)(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well); - /* Returns the hw enabled state. */ - bool (*is_enabled)(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well); -}; - -struct i915_power_well_regs { - i915_reg_t bios; - i915_reg_t driver; - i915_reg_t kvmr; - i915_reg_t debug; -}; - -/* Power well structure for haswell */ -struct i915_power_well_desc { - const char *name; - bool always_on; - u64 domains; - /* unique identifier for this power well */ - enum i915_power_well_id id; - /* - * Arbitraty data associated with this power well. Platform and power - * well specific. - */ - union { - struct { - /* - * request/status flag index in the PUNIT power well - * control/status registers. - */ - u8 idx; - } vlv; - struct { - enum dpio_phy phy; - } bxt; - struct { - const struct i915_power_well_regs *regs; - /* - * request/status flag index in the power well - * constrol/status registers. - */ - u8 idx; - /* Mask of pipes whose IRQ logic is backed by the pw */ - u8 irq_pipe_mask; - /* - * Instead of waiting for the status bit to ack enables, - * just wait a specific amount of time and then consider - * the well enabled. - */ - u16 fixed_enable_delay; - /* The pw is backing the VGA functionality */ - bool has_vga:1; - bool has_fuses:1; - /* - * The pw is for an ICL+ TypeC PHY port in - * Thunderbolt mode. - */ - bool is_tc_tbt:1; - } hsw; - }; - const struct i915_power_well_ops *ops; -}; - -struct i915_power_well { - const struct i915_power_well_desc *desc; - /* power well enable/disable usage count */ - int count; - /* cached hw enabled state */ - bool hw_enabled; -}; - struct i915_power_domains { /* * Power wells needed for initialization at driver init and suspend @@ -391,6 +299,8 @@ intel_display_power_put_all_in_set(struct drm_i915_private *i915, intel_display_power_put_mask_in_set(i915, power_domain_set, power_domain_set->mask); } +void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m); + /* * FIXME: We should probably switch this to a 0-based scheme to be consistent * with how we now name/number DBUF_CTL instances. diff --git a/drivers/gpu/drm/i915/display/intel_display_trace.c b/drivers/gpu/drm/i915/display/intel_display_trace.c new file mode 100644 index 000000000000..737979ada869 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_display_trace.c @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef __CHECKER__ +#define CREATE_TRACE_POINTS +#include "intel_display_trace.h" +#endif diff --git a/drivers/gpu/drm/i915/display/intel_display_trace.h b/drivers/gpu/drm/i915/display/intel_display_trace.h new file mode 100644 index 000000000000..4043e1276383 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_display_trace.h @@ -0,0 +1,587 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright © 2021 Intel Corporation + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM i915 + +#if !defined(__INTEL_DISPLAY_TRACE_H__) || defined(TRACE_HEADER_MULTI_READ) +#define __INTEL_DISPLAY_TRACE_H__ + +#include <linux/types.h> +#include <linux/tracepoint.h> + +#include "i915_drv.h" +#include "intel_crtc.h" +#include "intel_display_types.h" + +TRACE_EVENT(intel_pipe_enable, + TP_PROTO(struct intel_crtc *crtc), + TP_ARGS(crtc), + + TP_STRUCT__entry( + __array(u32, frame, 3) + __array(u32, scanline, 3) + __field(enum pipe, pipe) + ), + TP_fast_assign( + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_crtc *it__; + for_each_intel_crtc(&dev_priv->drm, it__) { + __entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__); + __entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__); + } + __entry->pipe = crtc->pipe; + ), + + TP_printk("pipe %c enable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u", + pipe_name(__entry->pipe), + __entry->frame[PIPE_A], __entry->scanline[PIPE_A], + __entry->frame[PIPE_B], __entry->scanline[PIPE_B], + __entry->frame[PIPE_C], __entry->scanline[PIPE_C]) +); + +TRACE_EVENT(intel_pipe_disable, + TP_PROTO(struct intel_crtc *crtc), + TP_ARGS(crtc), + + TP_STRUCT__entry( + __array(u32, frame, 3) + __array(u32, scanline, 3) + __field(enum pipe, pipe) + ), + + TP_fast_assign( + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_crtc *it__; + for_each_intel_crtc(&dev_priv->drm, it__) { + __entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__); + __entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__); + } + __entry->pipe = crtc->pipe; + ), + + TP_printk("pipe %c disable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u", + pipe_name(__entry->pipe), + __entry->frame[PIPE_A], __entry->scanline[PIPE_A], + __entry->frame[PIPE_B], __entry->scanline[PIPE_B], + __entry->frame[PIPE_C], __entry->scanline[PIPE_C]) +); + +TRACE_EVENT(intel_pipe_crc, + TP_PROTO(struct intel_crtc *crtc, const u32 *crcs), + TP_ARGS(crtc, crcs), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + __array(u32, crcs, 5) + ), + + TP_fast_assign( + __entry->pipe = crtc->pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); + memcpy(__entry->crcs, crcs, sizeof(__entry->crcs)); + ), + + TP_printk("pipe %c, frame=%u, scanline=%u crc=%08x %08x %08x %08x %08x", + pipe_name(__entry->pipe), __entry->frame, __entry->scanline, + __entry->crcs[0], __entry->crcs[1], __entry->crcs[2], + __entry->crcs[3], __entry->crcs[4]) +); + +TRACE_EVENT(intel_cpu_fifo_underrun, + TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe), + TP_ARGS(dev_priv, pipe), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + ), + + TP_fast_assign( + struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); + __entry->pipe = pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); + ), + + TP_printk("pipe %c, frame=%u, scanline=%u", + pipe_name(__entry->pipe), + __entry->frame, __entry->scanline) +); + +TRACE_EVENT(intel_pch_fifo_underrun, + TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pch_transcoder), + TP_ARGS(dev_priv, pch_transcoder), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + ), + + TP_fast_assign( + enum pipe pipe = pch_transcoder; + struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); + __entry->pipe = pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); + ), + + TP_printk("pch transcoder %c, frame=%u, scanline=%u", + pipe_name(__entry->pipe), + __entry->frame, __entry->scanline) +); + +TRACE_EVENT(intel_memory_cxsr, + TP_PROTO(struct drm_i915_private *dev_priv, bool old, bool new), + TP_ARGS(dev_priv, old, new), + + TP_STRUCT__entry( + __array(u32, frame, 3) + __array(u32, scanline, 3) + __field(bool, old) + __field(bool, new) + ), + + TP_fast_assign( + struct intel_crtc *crtc; + for_each_intel_crtc(&dev_priv->drm, crtc) { + __entry->frame[crtc->pipe] = intel_crtc_get_vblank_counter(crtc); + __entry->scanline[crtc->pipe] = intel_get_crtc_scanline(crtc); + } + __entry->old = old; + __entry->new = new; + ), + + TP_printk("%s->%s, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u", + onoff(__entry->old), onoff(__entry->new), + __entry->frame[PIPE_A], __entry->scanline[PIPE_A], + __entry->frame[PIPE_B], __entry->scanline[PIPE_B], + __entry->frame[PIPE_C], __entry->scanline[PIPE_C]) +); + +TRACE_EVENT(g4x_wm, + TP_PROTO(struct intel_crtc *crtc, const struct g4x_wm_values *wm), + TP_ARGS(crtc, wm), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + __field(u16, primary) + __field(u16, sprite) + __field(u16, cursor) + __field(u16, sr_plane) + __field(u16, sr_cursor) + __field(u16, sr_fbc) + __field(u16, hpll_plane) + __field(u16, hpll_cursor) + __field(u16, hpll_fbc) + __field(bool, cxsr) + __field(bool, hpll) + __field(bool, fbc) + ), + + TP_fast_assign( + __entry->pipe = crtc->pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); + __entry->primary = wm->pipe[crtc->pipe].plane[PLANE_PRIMARY]; + __entry->sprite = wm->pipe[crtc->pipe].plane[PLANE_SPRITE0]; + __entry->cursor = wm->pipe[crtc->pipe].plane[PLANE_CURSOR]; + __entry->sr_plane = wm->sr.plane; + __entry->sr_cursor = wm->sr.cursor; + __entry->sr_fbc = wm->sr.fbc; + __entry->hpll_plane = wm->hpll.plane; + __entry->hpll_cursor = wm->hpll.cursor; + __entry->hpll_fbc = wm->hpll.fbc; + __entry->cxsr = wm->cxsr; + __entry->hpll = wm->hpll_en; + __entry->fbc = wm->fbc_en; + ), + + TP_printk("pipe %c, frame=%u, scanline=%u, wm %d/%d/%d, sr %s/%d/%d/%d, hpll %s/%d/%d/%d, fbc %s", + pipe_name(__entry->pipe), __entry->frame, __entry->scanline, + __entry->primary, __entry->sprite, __entry->cursor, + yesno(__entry->cxsr), __entry->sr_plane, __entry->sr_cursor, __entry->sr_fbc, + yesno(__entry->hpll), __entry->hpll_plane, __entry->hpll_cursor, __entry->hpll_fbc, + yesno(__entry->fbc)) +); + +TRACE_EVENT(vlv_wm, + TP_PROTO(struct intel_crtc *crtc, const struct vlv_wm_values *wm), + TP_ARGS(crtc, wm), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + __field(u32, level) + __field(u32, cxsr) + __field(u32, primary) + __field(u32, sprite0) + __field(u32, sprite1) + __field(u32, cursor) + __field(u32, sr_plane) + __field(u32, sr_cursor) + ), + + TP_fast_assign( + __entry->pipe = crtc->pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); + __entry->level = wm->level; + __entry->cxsr = wm->cxsr; + __entry->primary = wm->pipe[crtc->pipe].plane[PLANE_PRIMARY]; + __entry->sprite0 = wm->pipe[crtc->pipe].plane[PLANE_SPRITE0]; + __entry->sprite1 = wm->pipe[crtc->pipe].plane[PLANE_SPRITE1]; + __entry->cursor = wm->pipe[crtc->pipe].plane[PLANE_CURSOR]; + __entry->sr_plane = wm->sr.plane; + __entry->sr_cursor = wm->sr.cursor; + ), + + TP_printk("pipe %c, frame=%u, scanline=%u, level=%d, cxsr=%d, wm %d/%d/%d/%d, sr %d/%d", + pipe_name(__entry->pipe), __entry->frame, + __entry->scanline, __entry->level, __entry->cxsr, + __entry->primary, __entry->sprite0, __entry->sprite1, __entry->cursor, + __entry->sr_plane, __entry->sr_cursor) +); + +TRACE_EVENT(vlv_fifo_size, + TP_PROTO(struct intel_crtc *crtc, u32 sprite0_start, u32 sprite1_start, u32 fifo_size), + TP_ARGS(crtc, sprite0_start, sprite1_start, fifo_size), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + __field(u32, sprite0_start) + __field(u32, sprite1_start) + __field(u32, fifo_size) + ), + + TP_fast_assign( + __entry->pipe = crtc->pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); + __entry->sprite0_start = sprite0_start; + __entry->sprite1_start = sprite1_start; + __entry->fifo_size = fifo_size; + ), + + TP_printk("pipe %c, frame=%u, scanline=%u, %d/%d/%d", + pipe_name(__entry->pipe), __entry->frame, + __entry->scanline, __entry->sprite0_start, + __entry->sprite1_start, __entry->fifo_size) +); + +TRACE_EVENT(intel_plane_update_noarm, + TP_PROTO(struct drm_plane *plane, struct intel_crtc *crtc), + TP_ARGS(plane, crtc), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + __array(int, src, 4) + __array(int, dst, 4) + __string(name, plane->name) + ), + + TP_fast_assign( + __assign_str(name, plane->name); + __entry->pipe = crtc->pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); + memcpy(__entry->src, &plane->state->src, sizeof(__entry->src)); + memcpy(__entry->dst, &plane->state->dst, sizeof(__entry->dst)); + ), + + TP_printk("pipe %c, plane %s, frame=%u, scanline=%u, " DRM_RECT_FP_FMT " -> " DRM_RECT_FMT, + pipe_name(__entry->pipe), __get_str(name), + __entry->frame, __entry->scanline, + DRM_RECT_FP_ARG((const struct drm_rect *)__entry->src), + DRM_RECT_ARG((const struct drm_rect *)__entry->dst)) +); + +TRACE_EVENT(intel_plane_update_arm, + TP_PROTO(struct drm_plane *plane, struct intel_crtc *crtc), + TP_ARGS(plane, crtc), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + __array(int, src, 4) + __array(int, dst, 4) + __string(name, plane->name) + ), + + TP_fast_assign( + __assign_str(name, plane->name); + __entry->pipe = crtc->pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); + memcpy(__entry->src, &plane->state->src, sizeof(__entry->src)); + memcpy(__entry->dst, &plane->state->dst, sizeof(__entry->dst)); + ), + + TP_printk("pipe %c, plane %s, frame=%u, scanline=%u, " DRM_RECT_FP_FMT " -> " DRM_RECT_FMT, + pipe_name(__entry->pipe), __get_str(name), + __entry->frame, __entry->scanline, + DRM_RECT_FP_ARG((const struct drm_rect *)__entry->src), + DRM_RECT_ARG((const struct drm_rect *)__entry->dst)) +); + +TRACE_EVENT(intel_plane_disable_arm, + TP_PROTO(struct drm_plane *plane, struct intel_crtc *crtc), + TP_ARGS(plane, crtc), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + __string(name, plane->name) + ), + + TP_fast_assign( + __assign_str(name, plane->name); + __entry->pipe = crtc->pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); + ), + + TP_printk("pipe %c, plane %s, frame=%u, scanline=%u", + pipe_name(__entry->pipe), __get_str(name), + __entry->frame, __entry->scanline) +); + +TRACE_EVENT(intel_fbc_activate, + TP_PROTO(struct intel_plane *plane), + TP_ARGS(plane), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + ), + + TP_fast_assign( + struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(plane->base.dev), + plane->pipe); + __entry->pipe = crtc->pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); + ), + + TP_printk("pipe %c, frame=%u, scanline=%u", + pipe_name(__entry->pipe), __entry->frame, __entry->scanline) +); + +TRACE_EVENT(intel_fbc_deactivate, + TP_PROTO(struct intel_plane *plane), + TP_ARGS(plane), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + ), + + TP_fast_assign( + struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(plane->base.dev), + plane->pipe); + __entry->pipe = crtc->pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); + ), + + TP_printk("pipe %c, frame=%u, scanline=%u", + pipe_name(__entry->pipe), __entry->frame, __entry->scanline) +); + +TRACE_EVENT(intel_fbc_nuke, + TP_PROTO(struct intel_plane *plane), + TP_ARGS(plane), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + ), + + TP_fast_assign( + struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(plane->base.dev), + plane->pipe); + __entry->pipe = crtc->pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); + ), + + TP_printk("pipe %c, frame=%u, scanline=%u", + pipe_name(__entry->pipe), __entry->frame, __entry->scanline) +); + +TRACE_EVENT(intel_crtc_vblank_work_start, + TP_PROTO(struct intel_crtc *crtc), + TP_ARGS(crtc), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + ), + + TP_fast_assign( + __entry->pipe = crtc->pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); + ), + + TP_printk("pipe %c, frame=%u, scanline=%u", + pipe_name(__entry->pipe), __entry->frame, + __entry->scanline) +); + +TRACE_EVENT(intel_crtc_vblank_work_end, + TP_PROTO(struct intel_crtc *crtc), + TP_ARGS(crtc), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + ), + + TP_fast_assign( + __entry->pipe = crtc->pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); + ), + + TP_printk("pipe %c, frame=%u, scanline=%u", + pipe_name(__entry->pipe), __entry->frame, + __entry->scanline) +); + +TRACE_EVENT(intel_pipe_update_start, + TP_PROTO(struct intel_crtc *crtc), + TP_ARGS(crtc), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + __field(u32, min) + __field(u32, max) + ), + + TP_fast_assign( + __entry->pipe = crtc->pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); + __entry->min = crtc->debug.min_vbl; + __entry->max = crtc->debug.max_vbl; + ), + + TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u", + pipe_name(__entry->pipe), __entry->frame, + __entry->scanline, __entry->min, __entry->max) +); + +TRACE_EVENT(intel_pipe_update_vblank_evaded, + TP_PROTO(struct intel_crtc *crtc), + TP_ARGS(crtc), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + __field(u32, min) + __field(u32, max) + ), + + TP_fast_assign( + __entry->pipe = crtc->pipe; + __entry->frame = crtc->debug.start_vbl_count; + __entry->scanline = crtc->debug.scanline_start; + __entry->min = crtc->debug.min_vbl; + __entry->max = crtc->debug.max_vbl; + ), + + TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u", + pipe_name(__entry->pipe), __entry->frame, + __entry->scanline, __entry->min, __entry->max) +); + +TRACE_EVENT(intel_pipe_update_end, + TP_PROTO(struct intel_crtc *crtc, u32 frame, int scanline_end), + TP_ARGS(crtc, frame, scanline_end), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + ), + + TP_fast_assign( + __entry->pipe = crtc->pipe; + __entry->frame = frame; + __entry->scanline = scanline_end; + ), + + TP_printk("pipe %c, frame=%u, scanline=%u", + pipe_name(__entry->pipe), __entry->frame, + __entry->scanline) +); + +TRACE_EVENT(intel_frontbuffer_invalidate, + TP_PROTO(unsigned int frontbuffer_bits, unsigned int origin), + TP_ARGS(frontbuffer_bits, origin), + + TP_STRUCT__entry( + __field(unsigned int, frontbuffer_bits) + __field(unsigned int, origin) + ), + + TP_fast_assign( + __entry->frontbuffer_bits = frontbuffer_bits; + __entry->origin = origin; + ), + + TP_printk("frontbuffer_bits=0x%08x, origin=%u", + __entry->frontbuffer_bits, __entry->origin) +); + +TRACE_EVENT(intel_frontbuffer_flush, + TP_PROTO(unsigned int frontbuffer_bits, unsigned int origin), + TP_ARGS(frontbuffer_bits, origin), + + TP_STRUCT__entry( + __field(unsigned int, frontbuffer_bits) + __field(unsigned int, origin) + ), + + TP_fast_assign( + __entry->frontbuffer_bits = frontbuffer_bits; + __entry->origin = origin; + ), + + TP_printk("frontbuffer_bits=0x%08x, origin=%u", + __entry->frontbuffer_bits, __entry->origin) +); + +#endif /* __INTEL_DISPLAY_TRACE_H__ */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915/display +#define TRACE_INCLUDE_FILE intel_display_trace +#include <trace/define_trace.h> diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index 39e11eaec1a3..c9c6efadf8b4 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -28,6 +28,7 @@ #include <linux/async.h> #include <linux/i2c.h> +#include <linux/pm_qos.h> #include <linux/pwm.h> #include <linux/sched/clock.h> @@ -35,20 +36,30 @@ #include <drm/drm_crtc.h> #include <drm/drm_dp_dual_mode_helper.h> #include <drm/drm_dp_mst_helper.h> +#include <drm/drm_dsc.h> #include <drm/drm_encoder.h> #include <drm/drm_fb_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_probe_helper.h> #include <drm/drm_rect.h> #include <drm/drm_vblank.h> +#include <drm/drm_vblank_work.h> #include <drm/i915_mei_hdcp_interface.h> #include <media/cec-notifier.h> -#include "i915_drv.h" +#include "i915_vma.h" +#include "i915_vma_types.h" +#include "intel_bios.h" +#include "intel_display.h" +#include "intel_display_power.h" +#include "intel_dpll_mgr.h" +#include "intel_pm_types.h" struct drm_printer; struct __intel_global_objs_state; struct intel_ddi_buf_trans; +struct intel_fbc; +struct intel_connector; /* * Display related stuff @@ -115,7 +126,8 @@ struct intel_fb_view { * bytes for 0/180 degree rotation * pixels for 90/270 degree rotation */ - unsigned int stride; + unsigned int mapping_stride; + unsigned int scanout_stride; } color_plane[4]; }; @@ -194,10 +206,6 @@ struct intel_encoder { void (*update_complete)(struct intel_atomic_state *, struct intel_encoder *, struct intel_crtc *); - void (*pre_disable)(struct intel_atomic_state *, - struct intel_encoder *, - const struct intel_crtc_state *, - const struct drm_connector_state *); void (*disable)(struct intel_atomic_state *, struct intel_encoder *, const struct intel_crtc_state *, @@ -687,6 +695,8 @@ struct intel_plane_state { /* Clear Color Value */ u64 ccval; + + const char *no_fbc_reason; }; struct intel_initial_plane_config { @@ -949,7 +959,6 @@ struct intel_crtc_state { * accordingly. */ #define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */ -#define PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE (1<<1) /* bigjoiner slave, partial readout */ unsigned long quirks; unsigned fb_bits; /* framebuffers to flip */ @@ -1118,8 +1127,6 @@ struct intel_crtc_state { bool crc_enabled; - bool enable_fbc; - bool double_wide; int pbn; @@ -1241,6 +1248,9 @@ struct intel_crtc_state { u8 link_count; u8 pixel_overlap; } splitter; + + /* for loading single buffered registers during vblank */ + struct drm_vblank_work vblank_work; }; enum intel_pipe_crc_source { @@ -1325,6 +1335,9 @@ struct intel_crtc { /* scalers available on this crtc */ int num_scalers; + /* for loading single buffered registers during vblank */ + struct pm_qos_request vblank_pm_qos; + #ifdef CONFIG_DEBUG_FS struct intel_pipe_crc pipe_crc; #endif @@ -1335,8 +1348,6 @@ struct intel_plane { enum i9xx_plane_id i9xx_plane; enum plane_id id; enum pipe pipe; - bool has_fbc; - bool has_ccs; bool need_async_flip_disable_wa; u32 frontbuffer_bit; @@ -1344,6 +1355,8 @@ struct intel_plane { u32 base, cntl, size; } cursor; + struct intel_fbc *fbc; + /* * NOTE: Do not place new plane state fields here (e.g., when adding * new plane properties). New runtime state should now be placed in @@ -1362,11 +1375,17 @@ struct intel_plane { unsigned int (*max_stride)(struct intel_plane *plane, u32 pixel_format, u64 modifier, unsigned int rotation); - void (*update_plane)(struct intel_plane *plane, + /* Write all non-self arming plane registers */ + void (*update_noarm)(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state); - void (*disable_plane)(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state); + /* Write all self-arming plane registers */ + void (*update_arm)(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state); + /* Disable the plane, must arm */ + void (*disable_arm)(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state); bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe); int (*check_plane)(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state); @@ -1563,6 +1582,8 @@ struct intel_dp { int num_sink_rates; int sink_rates[DP_MAX_SUPPORTED_RATES]; bool use_rate_select; + /* Max sink lane count as reported by DP_MAX_LANE_COUNT */ + int max_sink_lane_count; /* intersection of source and sink rates */ int num_common_rates; int common_rates[DP_MAX_SUPPORTED_RATES]; @@ -1640,6 +1661,9 @@ struct intel_dp { struct intel_dp_pcon_frl frl; struct intel_psr psr; + + /* When we last wrote the OUI for eDP */ + unsigned long last_oui_write; }; enum lspcon_vendor { @@ -1757,35 +1781,6 @@ vlv_pipe_to_channel(enum pipe pipe) } } -static inline bool intel_pipe_valid(struct drm_i915_private *i915, enum pipe pipe) -{ - return (pipe >= 0 && - pipe < ARRAY_SIZE(i915->pipe_to_crtc_mapping) && - INTEL_INFO(i915)->pipe_mask & BIT(pipe) && - i915->pipe_to_crtc_mapping[pipe]); -} - -static inline struct intel_crtc * -intel_get_first_crtc(struct drm_i915_private *dev_priv) -{ - return to_intel_crtc(drm_crtc_from_index(&dev_priv->drm, 0)); -} - -static inline struct intel_crtc * -intel_get_crtc_for_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) -{ - /* pipe_to_crtc_mapping may have hole on any of 3 display pipe system */ - drm_WARN_ON(&dev_priv->drm, - !(INTEL_INFO(dev_priv)->pipe_mask & BIT(pipe))); - return dev_priv->pipe_to_crtc_mapping[pipe]; -} - -static inline struct intel_crtc * -intel_get_crtc_for_plane(struct drm_i915_private *dev_priv, enum i9xx_plane_id plane) -{ - return dev_priv->plane_to_crtc_mapping[plane]; -} - struct intel_load_detect_pipe { struct drm_atomic_state *restore_state; }; @@ -1895,11 +1890,7 @@ dp_to_lspcon(struct intel_dp *intel_dp) return &dp_to_dig_port(intel_dp)->lspcon; } -static inline struct drm_i915_private * -dp_to_i915(struct intel_dp *intel_dp) -{ - return to_i915(dp_to_dig_port(intel_dp)->base.base.dev); -} +#define dp_to_i915(__intel_dp) to_i915(dp_to_dig_port(__intel_dp)->base.base.dev) #define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \ (intel_dp)->psr.source_support) @@ -2003,33 +1994,6 @@ intel_crtc_needs_modeset(const struct intel_crtc_state *crtc_state) return drm_atomic_crtc_needs_modeset(&crtc_state->uapi); } -static inline void -intel_wait_for_vblank(struct drm_i915_private *dev_priv, enum pipe pipe) -{ - struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); - - drm_crtc_wait_one_vblank(&crtc->base); -} - -static inline void -intel_wait_for_vblank_if_active(struct drm_i915_private *dev_priv, enum pipe pipe) -{ - const struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); - - if (crtc->active) - intel_wait_for_vblank(dev_priv, pipe); -} - -static inline bool intel_modifier_uses_dpt(struct drm_i915_private *i915, u64 modifier) -{ - return DISPLAY_VER(i915) >= 13 && modifier != DRM_FORMAT_MOD_LINEAR; -} - -static inline bool intel_fb_uses_dpt(const struct drm_framebuffer *fb) -{ - return fb && intel_modifier_uses_dpt(to_i915(fb->dev), fb->modifier); -} - static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *plane_state) { return i915_ggtt_offset(plane_state->ggtt_vma); @@ -2041,20 +2005,4 @@ to_intel_frontbuffer(struct drm_framebuffer *fb) return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL; } -static inline bool is_ccs_modifier(u64 modifier) -{ - return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS || - modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC || - modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS || - modifier == I915_FORMAT_MOD_Y_TILED_CCS || - modifier == I915_FORMAT_MOD_Yf_TILED_CCS; -} - -static inline bool is_gen12_ccs_modifier(u64 modifier) -{ - return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS || - modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC || - modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS; -} - #endif /* __INTEL_DISPLAY_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c index 2dc9d632969d..a69b28d65a9b 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc.c +++ b/drivers/gpu/drm/i915/display/intel_dmc.c @@ -45,8 +45,10 @@ #define GEN12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE -#define ADLP_DMC_PATH DMC_PATH(adlp, 2, 12) -#define ADLP_DMC_VERSION_REQUIRED DMC_VERSION(2, 12) +#define GEN13_DMC_MAX_FW_SIZE 0x20000 + +#define ADLP_DMC_PATH DMC_PATH(adlp, 2, 14) +#define ADLP_DMC_VERSION_REQUIRED DMC_VERSION(2, 14) MODULE_FIRMWARE(ADLP_DMC_PATH); #define ADLS_DMC_PATH DMC_PATH(adls, 2, 01) @@ -596,7 +598,7 @@ static void parse_dmc_fw(struct drm_i915_private *dev_priv, continue; offset = readcount + dmc->dmc_info[id].dmc_offset * 4; - if (fw->size - offset < 0) { + if (offset > fw->size) { drm_err(&dev_priv->drm, "Reading beyond the fw_size\n"); continue; } @@ -682,7 +684,7 @@ void intel_dmc_ucode_init(struct drm_i915_private *dev_priv) if (IS_ALDERLAKE_P(dev_priv)) { dmc->fw_path = ADLP_DMC_PATH; dmc->required_version = ADLP_DMC_VERSION_REQUIRED; - dmc->max_fw_size = GEN12_DMC_MAX_FW_SIZE; + dmc->max_fw_size = GEN13_DMC_MAX_FW_SIZE; } else if (IS_ALDERLAKE_S(dev_priv)) { dmc->fw_path = ADLS_DMC_PATH; dmc->required_version = ADLS_DMC_VERSION_REQUIRED; diff --git a/drivers/gpu/drm/i915/display/intel_dmc.h b/drivers/gpu/drm/i915/display/intel_dmc.h index c3c00ff03869..b20f3441ca60 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc.h +++ b/drivers/gpu/drm/i915/display/intel_dmc.h @@ -20,6 +20,8 @@ enum { DMC_FW_MAIN = 0, DMC_FW_PIPEA, DMC_FW_PIPEB, + DMC_FW_PIPEC, + DMC_FW_PIPED, DMC_FW_MAX }; diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index be883469d2fc..b5e2508db1cf 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -29,6 +29,7 @@ #include <linux/i2c.h> #include <linux/notifier.h> #include <linux/slab.h> +#include <linux/timekeeping.h> #include <linux/types.h> #include <asm/byteorder.h> @@ -46,6 +47,7 @@ #include "intel_audio.h" #include "intel_backlight.h" #include "intel_connector.h" +#include "intel_crtc.h" #include "intel_ddi.h" #include "intel_de.h" #include "intel_display_types.h" @@ -127,7 +129,7 @@ static void intel_dp_set_default_sink_rates(struct intel_dp *intel_dp) } /* update sink rates from dpcd */ -static void intel_dp_set_sink_rates(struct intel_dp *intel_dp) +static void intel_dp_set_dpcd_sink_rates(struct intel_dp *intel_dp) { static const int dp_rates[] = { 162000, 270000, 540000, 810000 @@ -197,6 +199,54 @@ static void intel_dp_set_sink_rates(struct intel_dp *intel_dp) intel_dp->num_sink_rates = i; } +static void intel_dp_set_sink_rates(struct intel_dp *intel_dp) +{ + struct intel_connector *connector = intel_dp->attached_connector; + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct intel_encoder *encoder = &intel_dig_port->base; + + intel_dp_set_dpcd_sink_rates(intel_dp); + + if (intel_dp->num_sink_rates) + return; + + drm_err(&dp_to_i915(intel_dp)->drm, + "[CONNECTOR:%d:%s][ENCODER:%d:%s] Invalid DPCD with no link rates, using defaults\n", + connector->base.base.id, connector->base.name, + encoder->base.base.id, encoder->base.name); + + intel_dp_set_default_sink_rates(intel_dp); +} + +static void intel_dp_set_default_max_sink_lane_count(struct intel_dp *intel_dp) +{ + intel_dp->max_sink_lane_count = 1; +} + +static void intel_dp_set_max_sink_lane_count(struct intel_dp *intel_dp) +{ + struct intel_connector *connector = intel_dp->attached_connector; + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct intel_encoder *encoder = &intel_dig_port->base; + + intel_dp->max_sink_lane_count = drm_dp_max_lane_count(intel_dp->dpcd); + + switch (intel_dp->max_sink_lane_count) { + case 1: + case 2: + case 4: + return; + } + + drm_err(&dp_to_i915(intel_dp)->drm, + "[CONNECTOR:%d:%s][ENCODER:%d:%s] Invalid DPCD max lane count (%d), using default\n", + connector->base.base.id, connector->base.name, + encoder->base.base.id, encoder->base.name, + intel_dp->max_sink_lane_count); + + intel_dp_set_default_max_sink_lane_count(intel_dp); +} + /* Get length of rates array potentially limited by max_rate. */ static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate) { @@ -219,10 +269,19 @@ static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp, intel_dp->num_common_rates, max_rate); } +static int intel_dp_common_rate(struct intel_dp *intel_dp, int index) +{ + if (drm_WARN_ON(&dp_to_i915(intel_dp)->drm, + index < 0 || index >= intel_dp->num_common_rates)) + return 162000; + + return intel_dp->common_rates[index]; +} + /* Theoretical max between source and sink */ static int intel_dp_max_common_rate(struct intel_dp *intel_dp) { - return intel_dp->common_rates[intel_dp->num_common_rates - 1]; + return intel_dp_common_rate(intel_dp, intel_dp->num_common_rates - 1); } /* Theoretical max between source and sink */ @@ -230,7 +289,7 @@ static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); int source_max = dig_port->max_lanes; - int sink_max = drm_dp_max_lane_count(intel_dp->dpcd); + int sink_max = intel_dp->max_sink_lane_count; int fia_max = intel_tc_port_fia_max_lane_count(dig_port); int lttpr_max = drm_dp_lttpr_max_lane_count(intel_dp->lttpr_common_caps); @@ -242,7 +301,15 @@ static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp) int intel_dp_max_lane_count(struct intel_dp *intel_dp) { - return intel_dp->max_link_lane_count; + switch (intel_dp->max_link_lane_count) { + case 1: + case 2: + case 4: + return intel_dp->max_link_lane_count; + default: + MISSING_CASE(intel_dp->max_link_lane_count); + return 1; + } } /* @@ -554,13 +621,13 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, if (index > 0) { if (intel_dp_is_edp(intel_dp) && !intel_dp_can_link_train_fallback_for_edp(intel_dp, - intel_dp->common_rates[index - 1], + intel_dp_common_rate(intel_dp, index - 1), lane_count)) { drm_dbg_kms(&i915->drm, "Retrying Link training for eDP with same parameters\n"); return 0; } - intel_dp->max_link_rate = intel_dp->common_rates[index - 1]; + intel_dp->max_link_rate = intel_dp_common_rate(intel_dp, index - 1); intel_dp->max_link_lane_count = lane_count; } else if (lane_count > 1) { if (intel_dp_is_edp(intel_dp) && @@ -1000,14 +1067,11 @@ static void intel_dp_print_rates(struct intel_dp *intel_dp) int intel_dp_max_link_rate(struct intel_dp *intel_dp) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); int len; len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate); - if (drm_WARN_ON(&i915->drm, len <= 0)) - return 162000; - return intel_dp->common_rates[len - 1]; + return intel_dp_common_rate(intel_dp, len - 1); } int intel_dp_rate_select(struct intel_dp *intel_dp, int rate) @@ -1204,7 +1268,7 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, output_bpp); for (i = 0; i < intel_dp->num_common_rates; i++) { - link_rate = intel_dp->common_rates[i]; + link_rate = intel_dp_common_rate(intel_dp, i); if (link_rate < limits->min_rate || link_rate > limits->max_rate) continue; @@ -1283,7 +1347,7 @@ static int intel_dp_dsc_compute_params(struct intel_encoder *encoder, else vdsc_cfg->slice_height = 2; - ret = intel_dsc_compute_params(encoder, crtc_state); + ret = intel_dsc_compute_params(crtc_state); if (ret) return ret; @@ -1452,17 +1516,10 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, &pipe_config->hw.adjusted_mode; struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct link_config_limits limits; - int common_len; int ret; - common_len = intel_dp_common_len_rate_limit(intel_dp, - intel_dp->max_link_rate); - - /* No common link rates between source and sink */ - drm_WARN_ON(encoder->base.dev, common_len <= 0); - - limits.min_rate = intel_dp->common_rates[0]; - limits.max_rate = intel_dp->common_rates[common_len - 1]; + limits.min_rate = intel_dp_common_rate(intel_dp, 0); + limits.max_rate = intel_dp_max_link_rate(intel_dp); limits.min_lane_count = 1; limits.max_lane_count = intel_dp_max_lane_count(intel_dp); @@ -1955,6 +2012,16 @@ intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful) if (drm_dp_dpcd_write(&intel_dp->aux, DP_SOURCE_OUI, oui, sizeof(oui)) < 0) drm_err(&i915->drm, "Failed to write source OUI\n"); + + intel_dp->last_oui_write = jiffies; +} + +void intel_dp_wait_source_oui(struct intel_dp *intel_dp) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + + drm_dbg_kms(&i915->drm, "Performing OUI wait\n"); + wait_remaining_ms_from_jiffies(intel_dp->last_oui_write, 30); } /* If the device supports it, try to set the power state appropriately */ @@ -2143,6 +2210,18 @@ static int intel_dp_hdmi_sink_max_frl(struct intel_dp *intel_dp) return max_frl_rate; } +static bool +intel_dp_pcon_is_frl_trained(struct intel_dp *intel_dp, + u8 max_frl_bw_mask, u8 *frl_trained_mask) +{ + if (drm_dp_pcon_hdmi_link_active(&intel_dp->aux) && + drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, frl_trained_mask) == DP_PCON_HDMI_MODE_FRL && + *frl_trained_mask >= max_frl_bw_mask) + return true; + + return false; +} + static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp) { #define TIMEOUT_FRL_READY_MS 500 @@ -2153,10 +2232,6 @@ static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp) u8 max_frl_bw_mask = 0, frl_trained_mask; bool is_active; - ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux); - if (ret < 0) - return ret; - max_pcon_frl_bw = intel_dp->dfp.pcon_max_frl_bw; drm_dbg(&i915->drm, "PCON max rate = %d Gbps\n", max_pcon_frl_bw); @@ -2168,6 +2243,12 @@ static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp) if (max_frl_bw <= 0) return -EINVAL; + max_frl_bw_mask = intel_dp_pcon_set_frl_mask(max_frl_bw); + drm_dbg(&i915->drm, "MAX_FRL_BW_MASK = %u\n", max_frl_bw_mask); + + if (intel_dp_pcon_is_frl_trained(intel_dp, max_frl_bw_mask, &frl_trained_mask)) + goto frl_trained; + ret = drm_dp_pcon_frl_prepare(&intel_dp->aux, false); if (ret < 0) return ret; @@ -2177,7 +2258,6 @@ static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp) if (!is_active) return -ETIMEDOUT; - max_frl_bw_mask = intel_dp_pcon_set_frl_mask(max_frl_bw); ret = drm_dp_pcon_frl_configure_1(&intel_dp->aux, max_frl_bw, DP_PCON_ENABLE_SEQUENTIAL_LINK); if (ret < 0) @@ -2193,19 +2273,15 @@ static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp) * Wait for FRL to be completed * Check if the HDMI Link is up and active. */ - wait_for(is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux) == true, TIMEOUT_HDMI_LINK_ACTIVE_MS); + wait_for(is_active = + intel_dp_pcon_is_frl_trained(intel_dp, max_frl_bw_mask, &frl_trained_mask), + TIMEOUT_HDMI_LINK_ACTIVE_MS); if (!is_active) return -ETIMEDOUT; - /* Verify HDMI Link configuration shows FRL Mode */ - if (drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, &frl_trained_mask) != - DP_PCON_HDMI_MODE_FRL) { - drm_dbg(&i915->drm, "HDMI couldn't be trained in FRL Mode\n"); - return -EINVAL; - } - drm_dbg(&i915->drm, "MAX_FRL_MASK = %u, FRL_TRAINED_MASK = %u\n", max_frl_bw_mask, frl_trained_mask); - +frl_trained: + drm_dbg(&i915->drm, "FRL_TRAINED_MASK = %u\n", frl_trained_mask); intel_dp->frl.trained_rate_gbps = intel_dp_pcon_get_frl_mask(frl_trained_mask); intel_dp->frl.is_trained = true; drm_dbg(&i915->drm, "FRL trained with : %d Gbps\n", intel_dp->frl.trained_rate_gbps); @@ -2223,6 +2299,28 @@ static bool intel_dp_is_hdmi_2_1_sink(struct intel_dp *intel_dp) return false; } +static +int intel_dp_pcon_set_tmds_mode(struct intel_dp *intel_dp) +{ + int ret; + u8 buf = 0; + + /* Set PCON source control mode */ + buf |= DP_PCON_ENABLE_SOURCE_CTL_MODE; + + ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf); + if (ret < 0) + return ret; + + /* Set HDMI LINK ENABLE */ + buf |= DP_PCON_ENABLE_HDMI_LINK; + ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf); + if (ret < 0) + return ret; + + return 0; +} + void intel_dp_check_frl_training(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); @@ -2241,7 +2339,7 @@ void intel_dp_check_frl_training(struct intel_dp *intel_dp) int ret, mode; drm_dbg(&dev_priv->drm, "Couldn't set FRL mode, continuing with TMDS mode\n"); - ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux); + ret = intel_dp_pcon_set_tmds_mode(intel_dp); mode = drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, NULL); if (ret < 0 || mode != DP_PCON_HDMI_MODE_TMDS) @@ -2603,6 +2701,7 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp) intel_dp->use_rate_select = true; else intel_dp_set_sink_rates(intel_dp); + intel_dp_set_max_sink_lane_count(intel_dp); intel_dp_set_common_rates(intel_dp); intel_dp_reset_max_link_params(intel_dp); @@ -2648,6 +2747,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) drm_dp_is_branch(intel_dp->dpcd)); intel_dp_set_sink_rates(intel_dp); + intel_dp_set_max_sink_lane_count(intel_dp); intel_dp_set_common_rates(intel_dp); } @@ -3806,7 +3906,7 @@ int intel_dp_retrain_link(struct intel_encoder *encoder, to_intel_crtc_state(crtc->base.state); /* Keep underrun reporting disabled until things are stable */ - intel_wait_for_vblank(dev_priv, crtc->pipe); + intel_crtc_wait_for_next_vblank(crtc); intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); if (crtc_state->has_pch_encoder) @@ -4960,7 +5060,7 @@ static void intel_dp_modeset_retry_work_fn(struct work_struct *work) DRM_MODE_LINK_STATUS_BAD); mutex_unlock(&connector->dev->mode_config.mutex); /* Send Hotplug uevent so userspace can reprobe */ - drm_kms_helper_hotplug_event(connector->dev); + drm_kms_helper_connector_hotplug_event(connector); } bool @@ -5014,6 +5114,7 @@ intel_dp_init_connector(struct intel_digital_port *dig_port, intel_dp_set_source_rates(intel_dp); intel_dp_set_default_sink_rates(intel_dp); + intel_dp_set_default_max_sink_lane_count(intel_dp); intel_dp_set_common_rates(intel_dp); intel_dp_reset_max_link_params(intel_dp); diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h index ce229026dc91..b64145a3869a 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.h +++ b/drivers/gpu/drm/i915/display/intel_dp.h @@ -119,4 +119,6 @@ void intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state); void intel_dp_phy_test(struct intel_encoder *encoder); +void intel_dp_wait_source_oui(struct intel_dp *intel_dp); + #endif /* __INTEL_DP_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c index 569d17b4d00f..97cf3cac0105 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c @@ -34,8 +34,10 @@ * for some reason. */ +#include "i915_drv.h" #include "intel_backlight.h" #include "intel_display_types.h" +#include "intel_dp.h" #include "intel_dp_aux_backlight.h" /* TODO: @@ -106,6 +108,8 @@ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector) int ret; u8 tcon_cap[4]; + intel_dp_wait_source_oui(intel_dp); + ret = drm_dp_dpcd_read(aux, INTEL_EDP_HDR_TCON_CAP0, tcon_cap, sizeof(tcon_cap)); if (ret != sizeof(tcon_cap)) return false; @@ -204,6 +208,8 @@ intel_dp_aux_hdr_enable_backlight(const struct intel_crtc_state *crtc_state, int ret; u8 old_ctrl, ctrl; + intel_dp_wait_source_oui(intel_dp); + ret = drm_dp_dpcd_readb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, &old_ctrl); if (ret != 1) { drm_err(&i915->drm, "Failed to read current backlight control mode: %d\n", ret); @@ -282,6 +288,12 @@ intel_dp_aux_vesa_set_backlight(const struct drm_connector_state *conn_state, u3 struct intel_panel *panel = &connector->panel; struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); + if (!panel->backlight.edp.vesa.info.aux_set) { + const u32 pwm_level = intel_backlight_level_to_pwm(connector, level); + + intel_backlight_set_pwm_level(conn_state, pwm_level); + } + drm_edp_backlight_set_level(&intel_dp->aux, &panel->backlight.edp.vesa.info, level); } @@ -293,6 +305,18 @@ intel_dp_aux_vesa_enable_backlight(const struct intel_crtc_state *crtc_state, struct intel_panel *panel = &connector->panel; struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); + if (!panel->backlight.edp.vesa.info.aux_enable) { + u32 pwm_level; + + if (!panel->backlight.edp.vesa.info.aux_set) + pwm_level = intel_backlight_level_to_pwm(connector, level); + else + pwm_level = intel_backlight_invert_pwm_level(connector, + panel->backlight.pwm_level_max); + + panel->backlight.pwm_funcs->enable(crtc_state, conn_state, pwm_level); + } + drm_edp_backlight_enable(&intel_dp->aux, &panel->backlight.edp.vesa.info, level); } @@ -304,6 +328,10 @@ static void intel_dp_aux_vesa_disable_backlight(const struct drm_connector_state struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); drm_edp_backlight_disable(&intel_dp->aux, &panel->backlight.edp.vesa.info); + + if (!panel->backlight.edp.vesa.info.aux_enable) + panel->backlight.pwm_funcs->disable(old_conn_state, + intel_backlight_invert_pwm_level(connector, 0)); } static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector, enum pipe pipe) @@ -321,14 +349,36 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector, if (ret < 0) return ret; - panel->backlight.max = panel->backlight.edp.vesa.info.max; - panel->backlight.min = 0; - if (current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD) { - panel->backlight.level = current_level; - panel->backlight.enabled = panel->backlight.level != 0; + if (!panel->backlight.edp.vesa.info.aux_set || !panel->backlight.edp.vesa.info.aux_enable) { + ret = panel->backlight.pwm_funcs->setup(connector, pipe); + if (ret < 0) { + drm_err(&i915->drm, + "Failed to setup PWM backlight controls for eDP backlight: %d\n", + ret); + return ret; + } + } + + if (panel->backlight.edp.vesa.info.aux_set) { + panel->backlight.max = panel->backlight.edp.vesa.info.max; + panel->backlight.min = 0; + if (current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD) { + panel->backlight.level = current_level; + panel->backlight.enabled = panel->backlight.level != 0; + } else { + panel->backlight.level = panel->backlight.max; + panel->backlight.enabled = false; + } } else { - panel->backlight.level = panel->backlight.max; - panel->backlight.enabled = false; + panel->backlight.max = panel->backlight.pwm_level_max; + panel->backlight.min = panel->backlight.pwm_level_min; + if (current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_PWM) { + panel->backlight.level = panel->backlight.pwm_funcs->get(connector, pipe); + panel->backlight.enabled = panel->backlight.pwm_enabled; + } else { + panel->backlight.level = panel->backlight.max; + panel->backlight.enabled = false; + } } return 0; @@ -340,12 +390,7 @@ intel_dp_aux_supports_vesa_backlight(struct intel_connector *connector) struct intel_dp *intel_dp = intel_attached_dp(connector); struct drm_i915_private *i915 = dp_to_i915(intel_dp); - /* TODO: We currently only support AUX only backlight configurations, not backlights which - * require a mix of PWM and AUX controls to work. In the mean time, these machines typically - * work just fine using normal PWM controls anyway. - */ - if ((intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP) && - drm_edp_backlight_supported(intel_dp->edp_dpcd)) { + if (drm_edp_backlight_supported(intel_dp->edp_dpcd)) { drm_dbg_kms(&i915->drm, "AUX Backlight Control Supported!\n"); return true; } @@ -417,11 +462,17 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector) } /* - * A lot of eDP panels in the wild will report supporting both the - * Intel proprietary backlight control interface, and the VESA - * backlight control interface. Many of these panels are liars though, - * and will only work with the Intel interface. So, always probe for - * that first. + * Since Intel has their own backlight control interface, the majority of machines out there + * using DPCD backlight controls with Intel GPUs will be using this interface as opposed to + * the VESA interface. However, other GPUs (such as Nvidia's) will always use the VESA + * interface. This means that there's quite a number of panels out there that will advertise + * support for both interfaces, primarily systems with Intel/Nvidia hybrid GPU setups. + * + * There's a catch to this though: on many panels that advertise support for both + * interfaces, the VESA backlight interface will stop working once we've programmed the + * panel with Intel's OUI - which is also required for us to be able to detect Intel's + * backlight interface at all. This means that the only sensible way for us to detect both + * interfaces is to probe for Intel's first, and VESA's second. */ if (try_intel_interface && intel_dp_aux_supports_hdr_backlight(connector)) { drm_dbg_kms(dev, "Using Intel proprietary eDP backlight controls\n"); diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c index 85676c953e0a..9451f336f28f 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c @@ -21,11 +21,11 @@ * IN THE SOFTWARE. */ +#include "i915_drv.h" #include "intel_display_types.h" #include "intel_dp.h" #include "intel_dp_link_training.h" - static void intel_dp_reset_lttpr_common_caps(struct intel_dp *intel_dp) { memset(intel_dp->lttpr_common_caps, 0, sizeof(intel_dp->lttpr_common_caps)); @@ -301,7 +301,10 @@ static u8 intel_dp_phy_preemph_max(struct intel_dp *intel_dp, static bool has_per_lane_signal_levels(struct intel_dp *intel_dp, enum drm_dp_phy dp_phy) { - return !intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy); + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + + return !intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy) || + DISPLAY_VER(i915) >= 11; } /* 128b/132b */ @@ -683,15 +686,6 @@ intel_dp_prepare_link_train(struct intel_dp *intel_dp, return true; } -static void intel_dp_link_training_clock_recovery_delay(struct intel_dp *intel_dp, - enum drm_dp_phy dp_phy) -{ - if (dp_phy == DP_PHY_DPRX) - drm_dp_link_train_clock_recovery_delay(&intel_dp->aux, intel_dp->dpcd); - else - drm_dp_lttpr_link_train_clock_recovery_delay(); -} - static bool intel_dp_adjust_request_changed(const struct intel_crtc_state *crtc_state, const u8 old_link_status[DP_LINK_STATUS_SIZE], const u8 new_link_status[DP_LINK_STATUS_SIZE]) @@ -750,6 +744,11 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE]; bool max_vswing_reached = false; char phy_name[10]; + int delay_us; + + delay_us = drm_dp_read_clock_recovery_delay(&intel_dp->aux, + intel_dp->dpcd, dp_phy, + intel_dp_is_uhbr(crtc_state)); intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)); @@ -777,7 +776,7 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp, voltage_tries = 1; for (cr_tries = 0; cr_tries < max_cr_tries; ++cr_tries) { - intel_dp_link_training_clock_recovery_delay(intel_dp, dp_phy); + usleep_range(delay_us, 2 * delay_us); if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy, link_status) < 0) { @@ -895,19 +894,6 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp, return DP_TRAINING_PATTERN_2; } -static void -intel_dp_link_training_channel_equalization_delay(struct intel_dp *intel_dp, - enum drm_dp_phy dp_phy) -{ - if (dp_phy == DP_PHY_DPRX) { - drm_dp_link_train_channel_eq_delay(&intel_dp->aux, intel_dp->dpcd); - } else { - const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy); - - drm_dp_lttpr_link_train_channel_eq_delay(&intel_dp->aux, phy_caps); - } -} - /* * Perform the link training channel equalization phase on the given DP PHY * using one of training pattern 2, 3 or 4 depending on the source and @@ -925,6 +911,11 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE]; bool channel_eq = false; char phy_name[10]; + int delay_us; + + delay_us = drm_dp_read_channel_eq_delay(&intel_dp->aux, + intel_dp->dpcd, dp_phy, + intel_dp_is_uhbr(crtc_state)); intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)); @@ -944,8 +935,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp, } for (tries = 0; tries < 5; tries++) { - intel_dp_link_training_channel_equalization_delay(intel_dp, - dp_phy); + usleep_range(delay_us, 2 * delay_us); + if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy, link_status) < 0) { drm_err(&i915->drm, diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 89d701e8ae9d..b8bc7d397c81 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -231,6 +231,7 @@ intel_dp_mst_atomic_master_trans_check(struct intel_connector *connector, struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct drm_connector_list_iter connector_list_iter; struct intel_connector *connector_iter; + int ret = 0; if (DISPLAY_VER(dev_priv) < 12) return 0; @@ -243,7 +244,6 @@ intel_dp_mst_atomic_master_trans_check(struct intel_connector *connector, struct intel_digital_connector_state *conn_iter_state; struct intel_crtc_state *crtc_state; struct intel_crtc *crtc; - int ret; if (connector_iter->mst_port != connector->mst_port || connector_iter == connector) @@ -252,8 +252,8 @@ intel_dp_mst_atomic_master_trans_check(struct intel_connector *connector, conn_iter_state = intel_atomic_get_digital_connector_state(state, connector_iter); if (IS_ERR(conn_iter_state)) { - drm_connector_list_iter_end(&connector_list_iter); - return PTR_ERR(conn_iter_state); + ret = PTR_ERR(conn_iter_state); + break; } if (!conn_iter_state->base.crtc) @@ -262,20 +262,18 @@ intel_dp_mst_atomic_master_trans_check(struct intel_connector *connector, crtc = to_intel_crtc(conn_iter_state->base.crtc); crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); if (IS_ERR(crtc_state)) { - drm_connector_list_iter_end(&connector_list_iter); - return PTR_ERR(crtc_state); + ret = PTR_ERR(crtc_state); + break; } ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); - if (ret) { - drm_connector_list_iter_end(&connector_list_iter); - return ret; - } + if (ret) + break; crtc_state->uapi.mode_changed = true; } drm_connector_list_iter_end(&connector_list_iter); - return 0; + return ret; } static int @@ -348,16 +346,6 @@ static void wait_for_act_sent(struct intel_encoder *encoder, drm_dp_check_act_status(&intel_dp->mst_mgr); } -static void intel_mst_pre_disable_dp(struct intel_atomic_state *state, - struct intel_encoder *encoder, - const struct intel_crtc_state *old_crtc_state, - const struct drm_connector_state *old_conn_state) -{ - if (old_crtc_state->has_audio) - intel_audio_codec_disable(encoder, old_crtc_state, - old_conn_state); -} - static void intel_mst_disable_dp(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, @@ -382,6 +370,9 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state, if (ret) { drm_dbg_kms(&i915->drm, "failed to update payload %d\n", ret); } + if (old_crtc_state->has_audio) + intel_audio_codec_disable(encoder, + old_crtc_state, old_conn_state); } static void intel_mst_post_disable_dp(struct intel_atomic_state *state, @@ -916,7 +907,6 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *dig_port, enum pipe intel_encoder->compute_config = intel_dp_mst_compute_config; intel_encoder->compute_config_late = intel_dp_mst_compute_config_late; - intel_encoder->pre_disable = intel_mst_pre_disable_dp; intel_encoder->disable = intel_mst_disable_dp; intel_encoder->post_disable = intel_mst_post_disable_dp; intel_encoder->update_pipe = intel_ddi_update_pipe; diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c index 04a7af8340ca..1ce0c171f4fb 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll.c +++ b/drivers/gpu/drm/i915/display/intel_dpll.c @@ -1823,7 +1823,7 @@ void chv_enable_pll(const struct intel_crtc_state *crtc_state) int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe, const struct dpll *dpll) { - struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); struct intel_crtc_state *crtc_state; crtc_state = intel_crtc_state_alloc(crtc); diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index 0a7e04db04be..fc8fda77483a 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -26,6 +26,7 @@ #include "intel_dpio_phy.h" #include "intel_dpll.h" #include "intel_dpll_mgr.h" +#include "intel_pch_refclk.h" #include "intel_tc.h" /** @@ -3740,7 +3741,7 @@ static void combo_pll_enable(struct drm_i915_private *dev_priv, * domain. */ pll->wakeref = intel_display_power_get(dev_priv, - POWER_DOMAIN_DPLL_DC_OFF); + POWER_DOMAIN_DC_OFF); } icl_pll_power_enable(dev_priv, pll, enable_reg); @@ -3847,7 +3848,7 @@ static void combo_pll_disable(struct drm_i915_private *dev_priv, if (IS_JSL_EHL(dev_priv) && pll->info->id == DPLL_ID_EHL_DPLL4) - intel_display_power_put(dev_priv, POWER_DOMAIN_DPLL_DC_OFF, + intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF, pll->wakeref); } @@ -4231,7 +4232,7 @@ static void readout_dpll_hw_state(struct drm_i915_private *i915, if (IS_JSL_EHL(i915) && pll->on && pll->info->id == DPLL_ID_EHL_DPLL4) { pll->wakeref = intel_display_power_get(i915, - POWER_DOMAIN_DPLL_DC_OFF); + POWER_DOMAIN_DC_OFF); } pll->state.pipe_mask = 0; diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h index 2f59d863be4c..ef2889753807 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h @@ -27,7 +27,6 @@ #include <linux/types.h> -#include "intel_display.h" #include "intel_wakeref.h" /*FIXME: Move this to a more appropriate place. */ @@ -37,6 +36,7 @@ (void) (&__a == &__b); \ __a > __b ? (__a - __b) : (__b - __a); }) +enum tc_port; struct drm_device; struct drm_i915_private; struct intel_atomic_state; diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c index 8f7b1f7534a4..963ca7155b06 100644 --- a/drivers/gpu/drm/i915/display/intel_dpt.c +++ b/drivers/gpu/drm/i915/display/intel_dpt.c @@ -167,6 +167,64 @@ void intel_dpt_unpin(struct i915_address_space *vm) i915_vma_put(dpt->vma); } +/** + * intel_dpt_resume - restore the memory mapping for all DPT FBs during system resume + * @i915: device instance + * + * Restore the memory mapping during system resume for all framebuffers which + * are mapped to HW via a GGTT->DPT page table. The content of these page + * tables are not stored in the hibernation image during S4 and S3RST->S4 + * transitions, so here we reprogram the PTE entries in those tables. + * + * This function must be called after the mappings in GGTT have been restored calling + * i915_ggtt_resume(). + */ +void intel_dpt_resume(struct drm_i915_private *i915) +{ + struct drm_framebuffer *drm_fb; + + if (!HAS_DISPLAY(i915)) + return; + + mutex_lock(&i915->drm.mode_config.fb_lock); + drm_for_each_fb(drm_fb, &i915->drm) { + struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb); + + if (fb->dpt_vm) + i915_ggtt_resume_vm(fb->dpt_vm); + } + mutex_unlock(&i915->drm.mode_config.fb_lock); +} + +/** + * intel_dpt_suspend - suspend the memory mapping for all DPT FBs during system suspend + * @i915: device instance + * + * Suspend the memory mapping during system suspend for all framebuffers which + * are mapped to HW via a GGTT->DPT page table. + * + * This function must be called before the mappings in GGTT are suspended calling + * i915_ggtt_suspend(). + */ +void intel_dpt_suspend(struct drm_i915_private *i915) +{ + struct drm_framebuffer *drm_fb; + + if (!HAS_DISPLAY(i915)) + return; + + mutex_lock(&i915->drm.mode_config.fb_lock); + + drm_for_each_fb(drm_fb, &i915->drm) { + struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb); + + if (fb->dpt_vm) + i915_ggtt_suspend_vm(fb->dpt_vm); + } + + mutex_unlock(&i915->drm.mode_config.fb_lock); +} + struct i915_address_space * intel_dpt_create(struct intel_framebuffer *fb) { diff --git a/drivers/gpu/drm/i915/display/intel_dpt.h b/drivers/gpu/drm/i915/display/intel_dpt.h index 45142b8f849f..e18a9f767b11 100644 --- a/drivers/gpu/drm/i915/display/intel_dpt.h +++ b/drivers/gpu/drm/i915/display/intel_dpt.h @@ -6,6 +6,8 @@ #ifndef __INTEL_DPT_H__ #define __INTEL_DPT_H__ +struct drm_i915_private; + struct i915_address_space; struct i915_vma; struct intel_framebuffer; @@ -13,6 +15,8 @@ struct intel_framebuffer; void intel_dpt_destroy(struct i915_address_space *vm); struct i915_vma *intel_dpt_pin(struct i915_address_space *vm); void intel_dpt_unpin(struct i915_address_space *vm); +void intel_dpt_suspend(struct drm_i915_private *i915); +void intel_dpt_resume(struct drm_i915_private *i915); struct i915_address_space * intel_dpt_create(struct intel_framebuffer *fb); diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c index 62a8a69f9f5d..83a69a4a4fea 100644 --- a/drivers/gpu/drm/i915/display/intel_dsb.c +++ b/drivers/gpu/drm/i915/display/intel_dsb.c @@ -100,7 +100,7 @@ void intel_dsb_indexed_reg_write(const struct intel_crtc_state *crtc_state, u32 reg_val; if (!dsb) { - intel_de_write(dev_priv, reg, val); + intel_de_write_fw(dev_priv, reg, val); return; } buf = dsb->cmd_buf; @@ -177,7 +177,7 @@ void intel_dsb_reg_write(const struct intel_crtc_state *crtc_state, dsb = crtc_state->dsb; if (!dsb) { - intel_de_write(dev_priv, reg, val); + intel_de_write_fw(dev_priv, reg, val); return; } diff --git a/drivers/gpu/drm/i915/display/intel_dsi.c b/drivers/gpu/drm/i915/display/intel_dsi.c index 6b0301ba046e..a50422e03a7e 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi.c +++ b/drivers/gpu/drm/i915/display/intel_dsi.c @@ -4,6 +4,8 @@ */ #include <drm/drm_mipi_dsi.h> + +#include "i915_drv.h" #include "intel_dsi.h" #include "intel_panel.h" diff --git a/drivers/gpu/drm/i915/display/intel_dsi.h b/drivers/gpu/drm/i915/display/intel_dsi.h index fbc40ffdc02e..a3a906cb097e 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi.h +++ b/drivers/gpu/drm/i915/display/intel_dsi.h @@ -166,57 +166,15 @@ static inline u16 intel_dsi_encoder_ports(struct intel_encoder *encoder) return enc_to_intel_dsi(encoder)->ports; } -/* icl_dsi.c */ -void icl_dsi_init(struct drm_i915_private *dev_priv); -void icl_dsi_frame_update(struct intel_crtc_state *crtc_state); - -/* intel_dsi.c */ int intel_dsi_bitrate(const struct intel_dsi *intel_dsi); int intel_dsi_tlpx_ns(const struct intel_dsi *intel_dsi); enum drm_panel_orientation intel_dsi_get_panel_orientation(struct intel_connector *connector); - -/* vlv_dsi.c */ -void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port); -enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt); int intel_dsi_get_modes(struct drm_connector *connector); enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode); struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi, const struct mipi_dsi_host_ops *funcs, enum port port); -void vlv_dsi_init(struct drm_i915_private *dev_priv); - -/* vlv_dsi_pll.c */ -int vlv_dsi_pll_compute(struct intel_encoder *encoder, - struct intel_crtc_state *config); -void vlv_dsi_pll_enable(struct intel_encoder *encoder, - const struct intel_crtc_state *config); -void vlv_dsi_pll_disable(struct intel_encoder *encoder); -u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, - struct intel_crtc_state *config); -void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port); - -bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv); -int bxt_dsi_pll_compute(struct intel_encoder *encoder, - struct intel_crtc_state *config); -void bxt_dsi_pll_enable(struct intel_encoder *encoder, - const struct intel_crtc_state *config); -void bxt_dsi_pll_disable(struct intel_encoder *encoder); -u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, - struct intel_crtc_state *config); -void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port); - -void assert_dsi_pll_enabled(struct drm_i915_private *i915); -void assert_dsi_pll_disabled(struct drm_i915_private *i915); - -/* intel_dsi_vbt.c */ -bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id); -void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on); -void intel_dsi_vbt_gpio_cleanup(struct intel_dsi *intel_dsi); -void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi, - enum mipi_seq seq_id); -void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec); -void intel_dsi_log_params(struct intel_dsi *intel_dsi); #endif /* _INTEL_DSI_H */ diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c index f61ed82e8867..7d234429e71e 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c @@ -71,6 +71,7 @@ static void dcs_set_backlight(const struct drm_connector_state *conn_state, u32 u8 data[2] = {}; enum port port; size_t len = panel->backlight.max > U8_MAX ? 2 : 1; + unsigned long mode_flags; if (len == 1) { data[0] = level; @@ -81,8 +82,11 @@ static void dcs_set_backlight(const struct drm_connector_state *conn_state, u32 for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) { dsi_device = intel_dsi->dsi_hosts[port]->device; + mode_flags = dsi_device->mode_flags; + dsi_device->mode_flags &= ~MIPI_DSI_MODE_LPM; mipi_dsi_dcs_write(dsi_device, MIPI_DCS_SET_DISPLAY_BRIGHTNESS, &data, len); + dsi_device->mode_flags = mode_flags; } } diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c index f241bedb8597..0da91849efde 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c +++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c @@ -41,6 +41,8 @@ #include "i915_drv.h" #include "intel_display_types.h" #include "intel_dsi.h" +#include "intel_dsi_vbt.h" +#include "vlv_dsi.h" #include "vlv_sideband.h" #define MIPI_TRANSFER_MODE_SHIFT 0 diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.h b/drivers/gpu/drm/i915/display/intel_dsi_vbt.h new file mode 100644 index 000000000000..dc642c1fe7ef --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef __INTEL_DSI_VBT_H__ +#define __INTEL_DSI_VBT_H__ + +#include <linux/types.h> + +enum mipi_seq; +struct intel_dsi; + +bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id); +void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on); +void intel_dsi_vbt_gpio_cleanup(struct intel_dsi *intel_dsi); +void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi, + enum mipi_seq seq_id); +void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec); +void intel_dsi_log_params(struct intel_dsi *intel_dsi); + +#endif /* __INTEL_DSI_VBT_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c index cb511b2b7069..23cfe2e5ce2a 100644 --- a/drivers/gpu/drm/i915/display/intel_fb.c +++ b/drivers/gpu/drm/i915/display/intel_fb.c @@ -6,6 +6,7 @@ #include <drm/drm_framebuffer.h> #include <drm/drm_modeset_helper.h> +#include "i915_drv.h" #include "intel_display.h" #include "intel_display_types.h" #include "intel_dpt.h" @@ -13,26 +14,465 @@ #define check_array_bounds(i915, a, i) drm_WARN_ON(&(i915)->drm, (i) >= ARRAY_SIZE(a)) -bool is_ccs_plane(const struct drm_framebuffer *fb, int plane) +/* + * From the Sky Lake PRM: + * "The Color Control Surface (CCS) contains the compression status of + * the cache-line pairs. The compression state of the cache-line pair + * is specified by 2 bits in the CCS. Each CCS cache-line represents + * an area on the main surface of 16 x16 sets of 128 byte Y-tiled + * cache-line-pairs. CCS is always Y tiled." + * + * Since cache line pairs refers to horizontally adjacent cache lines, + * each cache line in the CCS corresponds to an area of 32x16 cache + * lines on the main surface. Since each pixel is 4 bytes, this gives + * us a ratio of one byte in the CCS for each 8x16 pixels in the + * main surface. + */ +static const struct drm_format_info skl_ccs_formats[] = { + { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, + .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, + { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, + .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, + { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, + .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, }, + { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, + .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, }, +}; + +/* + * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the + * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles + * in the main surface. With 4 byte pixels and each Y-tile having dimensions of + * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in + * the main surface. + */ +static const struct drm_format_info gen12_ccs_formats[] = { + { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, + .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .hsub = 1, .vsub = 1, }, + { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, + .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .hsub = 1, .vsub = 1, }, + { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, + .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, + .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_YUYV, .num_planes = 2, + .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .hsub = 2, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_YVYU, .num_planes = 2, + .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .hsub = 2, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_UYVY, .num_planes = 2, + .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .hsub = 2, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_VYUY, .num_planes = 2, + .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .hsub = 2, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_XYUV8888, .num_planes = 2, + .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .hsub = 1, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_NV12, .num_planes = 4, + .char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 }, + .hsub = 2, .vsub = 2, .is_yuv = true }, + { .format = DRM_FORMAT_P010, .num_planes = 4, + .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 }, + .hsub = 2, .vsub = 2, .is_yuv = true }, + { .format = DRM_FORMAT_P012, .num_planes = 4, + .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 }, + .hsub = 2, .vsub = 2, .is_yuv = true }, + { .format = DRM_FORMAT_P016, .num_planes = 4, + .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 }, + .hsub = 2, .vsub = 2, .is_yuv = true }, +}; + +/* + * Same as gen12_ccs_formats[] above, but with additional surface used + * to pass Clear Color information in plane 2 with 64 bits of data. + */ +static const struct drm_format_info gen12_ccs_cc_formats[] = { + { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3, + .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 }, + .hsub = 1, .vsub = 1, }, + { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3, + .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 }, + .hsub = 1, .vsub = 1, }, + { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3, + .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 }, + .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3, + .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 }, + .hsub = 1, .vsub = 1, .has_alpha = true }, +}; + +struct intel_modifier_desc { + u64 modifier; + struct { + u8 from; + u8 until; + } display_ver; +#define DISPLAY_VER_ALL { 0, -1 } + + const struct drm_format_info *formats; + int format_count; +#define FORMAT_OVERRIDE(format_list) \ + .formats = format_list, \ + .format_count = ARRAY_SIZE(format_list) + + u8 plane_caps; + + struct { + u8 cc_planes:3; + u8 packed_aux_planes:4; + u8 planar_aux_planes:4; + } ccs; +}; + +#define INTEL_PLANE_CAP_CCS_MASK (INTEL_PLANE_CAP_CCS_RC | \ + INTEL_PLANE_CAP_CCS_RC_CC | \ + INTEL_PLANE_CAP_CCS_MC) +#define INTEL_PLANE_CAP_TILING_MASK (INTEL_PLANE_CAP_TILING_X | \ + INTEL_PLANE_CAP_TILING_Y | \ + INTEL_PLANE_CAP_TILING_Yf) +#define INTEL_PLANE_CAP_TILING_NONE 0 + +static const struct intel_modifier_desc intel_modifiers[] = { + { + .modifier = I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS, + .display_ver = { 12, 13 }, + .plane_caps = INTEL_PLANE_CAP_TILING_Y | INTEL_PLANE_CAP_CCS_MC, + + .ccs.packed_aux_planes = BIT(1), + .ccs.planar_aux_planes = BIT(2) | BIT(3), + + FORMAT_OVERRIDE(gen12_ccs_formats), + }, { + .modifier = I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS, + .display_ver = { 12, 13 }, + .plane_caps = INTEL_PLANE_CAP_TILING_Y | INTEL_PLANE_CAP_CCS_RC, + + .ccs.packed_aux_planes = BIT(1), + + FORMAT_OVERRIDE(gen12_ccs_formats), + }, { + .modifier = I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC, + .display_ver = { 12, 13 }, + .plane_caps = INTEL_PLANE_CAP_TILING_Y | INTEL_PLANE_CAP_CCS_RC_CC, + + .ccs.cc_planes = BIT(2), + .ccs.packed_aux_planes = BIT(1), + + FORMAT_OVERRIDE(gen12_ccs_cc_formats), + }, { + .modifier = I915_FORMAT_MOD_Yf_TILED_CCS, + .display_ver = { 9, 11 }, + .plane_caps = INTEL_PLANE_CAP_TILING_Yf | INTEL_PLANE_CAP_CCS_RC, + + .ccs.packed_aux_planes = BIT(1), + + FORMAT_OVERRIDE(skl_ccs_formats), + }, { + .modifier = I915_FORMAT_MOD_Y_TILED_CCS, + .display_ver = { 9, 11 }, + .plane_caps = INTEL_PLANE_CAP_TILING_Y | INTEL_PLANE_CAP_CCS_RC, + + .ccs.packed_aux_planes = BIT(1), + + FORMAT_OVERRIDE(skl_ccs_formats), + }, { + .modifier = I915_FORMAT_MOD_Yf_TILED, + .display_ver = { 9, 11 }, + .plane_caps = INTEL_PLANE_CAP_TILING_Yf, + }, { + .modifier = I915_FORMAT_MOD_Y_TILED, + .display_ver = { 9, 13 }, + .plane_caps = INTEL_PLANE_CAP_TILING_Y, + }, { + .modifier = I915_FORMAT_MOD_X_TILED, + .display_ver = DISPLAY_VER_ALL, + .plane_caps = INTEL_PLANE_CAP_TILING_X, + }, { + .modifier = DRM_FORMAT_MOD_LINEAR, + .display_ver = DISPLAY_VER_ALL, + }, +}; + +static const struct intel_modifier_desc *lookup_modifier_or_null(u64 modifier) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(intel_modifiers); i++) + if (intel_modifiers[i].modifier == modifier) + return &intel_modifiers[i]; + + return NULL; +} + +static const struct intel_modifier_desc *lookup_modifier(u64 modifier) +{ + const struct intel_modifier_desc *md = lookup_modifier_or_null(modifier); + + if (WARN_ON(!md)) + return &intel_modifiers[0]; + + return md; +} + +static const struct drm_format_info * +lookup_format_info(const struct drm_format_info formats[], + int num_formats, u32 format) +{ + int i; + + for (i = 0; i < num_formats; i++) { + if (formats[i].format == format) + return &formats[i]; + } + + return NULL; +} + +/** + * intel_fb_get_format_info: Get a modifier specific format information + * @cmd: FB add command structure + * + * Returns: + * Returns the format information for @cmd->pixel_format specific to @cmd->modifier[0], + * or %NULL if the modifier doesn't override the format. + */ +const struct drm_format_info * +intel_fb_get_format_info(const struct drm_mode_fb_cmd2 *cmd) +{ + const struct intel_modifier_desc *md = lookup_modifier_or_null(cmd->modifier[0]); + + if (!md || !md->formats) + return NULL; + + return lookup_format_info(md->formats, md->format_count, cmd->pixel_format); +} + +static bool plane_caps_contain_any(u8 caps, u8 mask) +{ + return caps & mask; +} + +static bool plane_caps_contain_all(u8 caps, u8 mask) +{ + return (caps & mask) == mask; +} + +/** + * intel_fb_is_ccs_modifier: Check if a modifier is a CCS modifier type + * @modifier: Modifier to check + * + * Returns: + * Returns %true if @modifier is a render, render with color clear or + * media compression modifier. + */ +bool intel_fb_is_ccs_modifier(u64 modifier) +{ + return plane_caps_contain_any(lookup_modifier(modifier)->plane_caps, + INTEL_PLANE_CAP_CCS_MASK); +} + +/** + * intel_fb_is_rc_ccs_cc_modifier: Check if a modifier is an RC CCS CC modifier type + * @modifier: Modifier to check + * + * Returns: + * Returns %true if @modifier is a render with color clear modifier. + */ +bool intel_fb_is_rc_ccs_cc_modifier(u64 modifier) +{ + return plane_caps_contain_any(lookup_modifier(modifier)->plane_caps, + INTEL_PLANE_CAP_CCS_RC_CC); +} + +/** + * intel_fb_is_mc_ccs_modifier: Check if a modifier is an MC CCS modifier type + * @modifier: Modifier to check + * + * Returns: + * Returns %true if @modifier is a media compression modifier. + */ +bool intel_fb_is_mc_ccs_modifier(u64 modifier) +{ + return plane_caps_contain_any(lookup_modifier(modifier)->plane_caps, + INTEL_PLANE_CAP_CCS_MC); +} + +static bool check_modifier_display_ver_range(const struct intel_modifier_desc *md, + u8 display_ver_from, u8 display_ver_until) +{ + return md->display_ver.from <= display_ver_until && + display_ver_from <= md->display_ver.until; +} + +static bool plane_has_modifier(struct drm_i915_private *i915, + u8 plane_caps, + const struct intel_modifier_desc *md) +{ + if (!IS_DISPLAY_VER(i915, md->display_ver.from, md->display_ver.until)) + return false; + + if (!plane_caps_contain_all(plane_caps, md->plane_caps)) + return false; + + return true; +} + +/** + * intel_fb_plane_get_modifiers: Get the modifiers for the given platform and plane capabilities + * @i915: i915 device instance + * @plane_caps: capabilities for the plane the modifiers are queried for + * + * Returns: + * Returns the list of modifiers allowed by the @i915 platform and @plane_caps. + * The caller must free the returned buffer. + */ +u64 *intel_fb_plane_get_modifiers(struct drm_i915_private *i915, + u8 plane_caps) +{ + u64 *list, *p; + int count = 1; /* +1 for invalid modifier terminator */ + int i; + + for (i = 0; i < ARRAY_SIZE(intel_modifiers); i++) { + if (plane_has_modifier(i915, plane_caps, &intel_modifiers[i])) + count++; + } + + list = kmalloc_array(count, sizeof(*list), GFP_KERNEL); + if (drm_WARN_ON(&i915->drm, !list)) + return NULL; + + p = list; + for (i = 0; i < ARRAY_SIZE(intel_modifiers); i++) { + if (plane_has_modifier(i915, plane_caps, &intel_modifiers[i])) + *p++ = intel_modifiers[i].modifier; + } + *p++ = DRM_FORMAT_MOD_INVALID; + + return list; +} + +/** + * intel_fb_plane_supports_modifier: Determine if a modifier is supported by the given plane + * @plane: Plane to check the modifier support for + * @modifier: The modifier to check the support for + * + * Returns: + * %true if the @modifier is supported on @plane. + */ +bool intel_fb_plane_supports_modifier(struct intel_plane *plane, u64 modifier) +{ + int i; + + for (i = 0; i < plane->base.modifier_count; i++) + if (plane->base.modifiers[i] == modifier) + return true; + + return false; +} + +static bool format_is_yuv_semiplanar(const struct intel_modifier_desc *md, + const struct drm_format_info *info) { - if (!is_ccs_modifier(fb->modifier)) + int yuv_planes; + + if (!info->is_yuv) return false; - return plane >= fb->format->num_planes / 2; + if (plane_caps_contain_any(md->plane_caps, INTEL_PLANE_CAP_CCS_MASK)) + yuv_planes = 4; + else + yuv_planes = 2; + + return info->num_planes == yuv_planes; +} + +/** + * intel_format_info_is_yuv_semiplanar: Check if the given format is YUV semiplanar + * @info: format to check + * @modifier: modifier used with the format + * + * Returns: + * %true if @info / @modifier is YUV semiplanar. + */ +bool intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info, + u64 modifier) +{ + return format_is_yuv_semiplanar(lookup_modifier(modifier), info); +} + +static u8 ccs_aux_plane_mask(const struct intel_modifier_desc *md, + const struct drm_format_info *format) +{ + if (format_is_yuv_semiplanar(md, format)) + return md->ccs.planar_aux_planes; + else + return md->ccs.packed_aux_planes; +} + +/** + * intel_fb_is_ccs_aux_plane: Check if a framebuffer color plane is a CCS AUX plane + * @fb: Framebuffer + * @color_plane: color plane index to check + * + * Returns: + * Returns %true if @fb's color plane at index @color_plane is a CCS AUX plane. + */ +bool intel_fb_is_ccs_aux_plane(const struct drm_framebuffer *fb, int color_plane) +{ + const struct intel_modifier_desc *md = lookup_modifier(fb->modifier); + + return ccs_aux_plane_mask(md, fb->format) & BIT(color_plane); +} + +/** + * intel_fb_is_gen12_ccs_aux_plane: Check if a framebuffer color plane is a GEN12 CCS AUX plane + * @fb: Framebuffer + * @color_plane: color plane index to check + * + * Returns: + * Returns %true if @fb's color plane at index @color_plane is a GEN12 CCS AUX plane. + */ +static bool intel_fb_is_gen12_ccs_aux_plane(const struct drm_framebuffer *fb, int color_plane) +{ + const struct intel_modifier_desc *md = lookup_modifier(fb->modifier); + + return check_modifier_display_ver_range(md, 12, 13) && + ccs_aux_plane_mask(md, fb->format) & BIT(color_plane); } -bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane) +/** + * intel_fb_rc_ccs_cc_plane: Get the CCS CC color plane index for a framebuffer + * @fb: Framebuffer + * + * Returns: + * Returns the index of the color clear plane for @fb, or -1 if @fb is not a + * framebuffer using a render compression/color clear modifier. + */ +int intel_fb_rc_ccs_cc_plane(const struct drm_framebuffer *fb) { - return is_gen12_ccs_modifier(fb->modifier) && is_ccs_plane(fb, plane); + const struct intel_modifier_desc *md = lookup_modifier(fb->modifier); + + if (!md->ccs.cc_planes) + return -1; + + drm_WARN_ON_ONCE(fb->dev, hweight8(md->ccs.cc_planes) > 1); + + return ilog2((int)md->ccs.cc_planes); } -bool is_gen12_ccs_cc_plane(const struct drm_framebuffer *fb, int plane) +static bool is_gen12_ccs_cc_plane(const struct drm_framebuffer *fb, int color_plane) { - return fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC && - plane == 2; + return intel_fb_rc_ccs_cc_plane(fb) == color_plane; } -bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb, int color_plane) +static bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb, int color_plane) { return intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) && color_plane == 1; @@ -41,12 +481,13 @@ bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb, int color_plane) bool is_surface_linear(const struct drm_framebuffer *fb, int color_plane) { return fb->modifier == DRM_FORMAT_MOD_LINEAR || - is_gen12_ccs_plane(fb, color_plane); + intel_fb_is_gen12_ccs_aux_plane(fb, color_plane) || + is_gen12_ccs_cc_plane(fb, color_plane); } int main_to_ccs_plane(const struct drm_framebuffer *fb, int main_plane) { - drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) || + drm_WARN_ON(fb->dev, !intel_fb_is_ccs_modifier(fb->modifier) || (main_plane && main_plane >= fb->format->num_planes / 2)); return fb->format->num_planes / 2 + main_plane; @@ -54,7 +495,7 @@ int main_to_ccs_plane(const struct drm_framebuffer *fb, int main_plane) int skl_ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane) { - drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) || + drm_WARN_ON(fb->dev, !intel_fb_is_ccs_modifier(fb->modifier) || ccs_plane < fb->format->num_planes / 2); if (is_gen12_ccs_cc_plane(fb, ccs_plane)) @@ -63,35 +504,12 @@ int skl_ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane) return ccs_plane - fb->format->num_planes / 2; } -static unsigned int gen12_aligned_scanout_stride(const struct intel_framebuffer *fb, - int color_plane) -{ - struct drm_i915_private *i915 = to_i915(fb->base.dev); - unsigned int stride = fb->base.pitches[color_plane]; - - if (IS_ALDERLAKE_P(i915)) - return roundup_pow_of_two(max(stride, - 8u * intel_tile_width_bytes(&fb->base, color_plane))); - - return stride; -} - static unsigned int gen12_ccs_aux_stride(struct intel_framebuffer *fb, int ccs_plane) { - struct drm_i915_private *i915 = to_i915(fb->base.dev); int main_plane = skl_ccs_to_main_plane(&fb->base, ccs_plane); unsigned int main_stride = fb->base.pitches[main_plane]; unsigned int main_tile_width = intel_tile_width_bytes(&fb->base, main_plane); - /* - * On ADL-P the AUX stride must align with a power-of-two aligned main - * surface stride. The stride of the allocated main surface object can - * be less than this POT stride, which is then autopadded to the POT - * size. - */ - if (IS_ALDERLAKE_P(i915)) - main_stride = gen12_aligned_scanout_stride(fb, main_plane); - return DIV_ROUND_UP(main_stride, 4 * main_tile_width) * 64; } @@ -99,7 +517,7 @@ int skl_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane) { struct drm_i915_private *i915 = to_i915(fb->dev); - if (is_ccs_modifier(fb->modifier)) + if (intel_fb_is_ccs_modifier(fb->modifier)) return main_to_ccs_plane(fb, main_plane); else if (DISPLAY_VER(i915) < 11 && intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) @@ -128,13 +546,14 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane) else return 512; case I915_FORMAT_MOD_Y_TILED_CCS: - if (is_ccs_plane(fb, color_plane)) + if (intel_fb_is_ccs_aux_plane(fb, color_plane)) return 128; fallthrough; case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC: case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: - if (is_ccs_plane(fb, color_plane)) + if (intel_fb_is_ccs_aux_plane(fb, color_plane) || + is_gen12_ccs_cc_plane(fb, color_plane)) return 64; fallthrough; case I915_FORMAT_MOD_Y_TILED: @@ -143,7 +562,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane) else return 512; case I915_FORMAT_MOD_Yf_TILED_CCS: - if (is_ccs_plane(fb, color_plane)) + if (intel_fb_is_ccs_aux_plane(fb, color_plane)) return 128; fallthrough; case I915_FORMAT_MOD_Yf_TILED: @@ -199,7 +618,7 @@ static void intel_tile_block_dims(const struct drm_framebuffer *fb, int color_pl { intel_tile_dims(fb, color_plane, tile_width, tile_height); - if (is_gen12_ccs_plane(fb, color_plane)) + if (intel_fb_is_gen12_ccs_aux_plane(fb, color_plane)) *tile_height = 1; } @@ -223,20 +642,33 @@ intel_fb_align_height(const struct drm_framebuffer *fb, static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier) { - switch (fb_modifier) { - case I915_FORMAT_MOD_X_TILED: - return I915_TILING_X; - case I915_FORMAT_MOD_Y_TILED: - case I915_FORMAT_MOD_Y_TILED_CCS: - case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: - case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC: - case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: + u8 tiling_caps = lookup_modifier(fb_modifier)->plane_caps & + INTEL_PLANE_CAP_TILING_MASK; + + switch (tiling_caps) { + case INTEL_PLANE_CAP_TILING_Y: return I915_TILING_Y; + case INTEL_PLANE_CAP_TILING_X: + return I915_TILING_X; + case INTEL_PLANE_CAP_TILING_Yf: + case INTEL_PLANE_CAP_TILING_NONE: + return I915_TILING_NONE; default: + MISSING_CASE(tiling_caps); return I915_TILING_NONE; } } +static bool intel_modifier_uses_dpt(struct drm_i915_private *i915, u64 modifier) +{ + return DISPLAY_VER(i915) >= 13 && modifier != DRM_FORMAT_MOD_LINEAR; +} + +bool intel_fb_uses_dpt(const struct drm_framebuffer *fb) +{ + return fb && intel_modifier_uses_dpt(to_i915(fb->dev), fb->modifier); +} + unsigned int intel_cursor_alignment(const struct drm_i915_private *i915) { if (IS_I830(i915)) @@ -271,7 +703,7 @@ unsigned int intel_surf_alignment(const struct drm_framebuffer *fb, return 512 * 4096; /* AUX_DIST needs only 4K alignment */ - if (is_ccs_plane(fb, color_plane)) + if (intel_fb_is_ccs_aux_plane(fb, color_plane)) return 4096; if (is_semiplanar_uv_plane(fb, color_plane)) { @@ -330,7 +762,7 @@ void intel_fb_plane_get_subsampling(int *hsub, int *vsub, * TODO: Deduct the subsampling from the char block for all CCS * formats and planes. */ - if (!is_gen12_ccs_plane(fb, color_plane)) { + if (!intel_fb_is_gen12_ccs_aux_plane(fb, color_plane)) { *hsub = fb->format->hsub; *vsub = fb->format->vsub; @@ -357,24 +789,13 @@ void intel_fb_plane_get_subsampling(int *hsub, int *vsub, static void intel_fb_plane_dims(const struct intel_framebuffer *fb, int color_plane, int *w, int *h) { - struct drm_i915_private *i915 = to_i915(fb->base.dev); - int main_plane = is_ccs_plane(&fb->base, color_plane) ? + int main_plane = intel_fb_is_ccs_aux_plane(&fb->base, color_plane) ? skl_ccs_to_main_plane(&fb->base, color_plane) : 0; unsigned int main_width = fb->base.width; unsigned int main_height = fb->base.height; int main_hsub, main_vsub; int hsub, vsub; - /* - * On ADL-P the CCS AUX surface layout always aligns with the - * power-of-two aligned main surface stride. The main surface - * stride in the allocated FB object may not be power-of-two - * sized, in which case it is auto-padded to the POT size. - */ - if (IS_ALDERLAKE_P(i915) && is_ccs_plane(&fb->base, color_plane)) - main_width = gen12_aligned_scanout_stride(fb, 0) / - fb->base.format->cpp[0]; - intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, &fb->base, main_plane); intel_fb_plane_get_subsampling(&hsub, &vsub, &fb->base, color_plane); @@ -409,6 +830,20 @@ static u32 intel_adjust_tile_offset(int *x, int *y, return new_offset; } +static u32 intel_adjust_linear_offset(int *x, int *y, + unsigned int cpp, + unsigned int pitch, + u32 old_offset, + u32 new_offset) +{ + old_offset += *y * pitch + *x * cpp; + + *y = (old_offset - new_offset) / pitch; + *x = ((old_offset - new_offset) - *y * pitch) / cpp; + + return new_offset; +} + static u32 intel_adjust_aligned_offset(int *x, int *y, const struct drm_framebuffer *fb, int color_plane, @@ -439,10 +874,8 @@ static u32 intel_adjust_aligned_offset(int *x, int *y, tile_size, pitch_tiles, old_offset, new_offset); } else { - old_offset += *y * pitch + *x * cpp; - - *y = (old_offset - new_offset) / pitch; - *x = ((old_offset - new_offset) - *y * pitch) / cpp; + intel_adjust_linear_offset(x, y, cpp, pitch, + old_offset, new_offset); } return new_offset; @@ -459,7 +892,7 @@ u32 intel_plane_adjust_aligned_offset(int *x, int *y, { return intel_adjust_aligned_offset(x, y, state->hw.fb, color_plane, state->hw.rotation, - state->view.color_plane[color_plane].stride, + state->view.color_plane[color_plane].mapping_stride, old_offset, new_offset); } @@ -540,7 +973,7 @@ u32 intel_plane_compute_aligned_offset(int *x, int *y, struct drm_i915_private *i915 = to_i915(intel_plane->base.dev); const struct drm_framebuffer *fb = state->hw.fb; unsigned int rotation = state->hw.rotation; - int pitch = state->view.color_plane[color_plane].stride; + int pitch = state->view.color_plane[color_plane].mapping_stride; u32 alignment; if (intel_plane->id == PLANE_CURSOR) @@ -562,6 +995,7 @@ static int intel_fb_offset_to_xy(int *x, int *y, u32 alignment; if (DISPLAY_VER(i915) >= 12 && + !intel_fb_needs_pot_stride_remap(to_intel_framebuffer(fb)) && is_semiplanar_uv_plane(fb, color_plane)) alignment = intel_tile_row_size(fb, color_plane); else if (fb->modifier != DRM_FORMAT_MOD_LINEAR) @@ -610,7 +1044,7 @@ static int intel_fb_check_ccs_xy(const struct drm_framebuffer *fb, int ccs_plane int ccs_x, ccs_y; int main_x, main_y; - if (!is_ccs_plane(fb, ccs_plane) || is_gen12_ccs_cc_plane(fb, ccs_plane)) + if (!intel_fb_is_ccs_aux_plane(fb, ccs_plane)) return 0; /* @@ -673,7 +1107,7 @@ static bool intel_plane_can_remap(const struct intel_plane_state *plane_state) * The new CCS hash mode isn't compatible with remapping as * the virtual address of the pages affects the compressed data. */ - if (is_ccs_modifier(fb->modifier)) + if (intel_fb_is_ccs_modifier(fb->modifier)) return false; /* Linear needs a page aligned stride for remapping */ @@ -699,11 +1133,11 @@ bool intel_fb_needs_pot_stride_remap(const struct intel_framebuffer *fb) static int intel_fb_pitch(const struct intel_framebuffer *fb, int color_plane, unsigned int rotation) { if (drm_rotation_90_or_270(rotation)) - return fb->rotated_view.color_plane[color_plane].stride; + return fb->rotated_view.color_plane[color_plane].mapping_stride; else if (intel_fb_needs_pot_stride_remap(fb)) - return fb->remapped_view.color_plane[color_plane].stride; + return fb->remapped_view.color_plane[color_plane].mapping_stride; else - return fb->normal_view.color_plane[color_plane].stride; + return fb->normal_view.color_plane[color_plane].mapping_stride; } static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state) @@ -814,18 +1248,32 @@ plane_view_dst_stride_tiles(const struct intel_framebuffer *fb, int color_plane, unsigned int pitch_tiles) { if (intel_fb_needs_pot_stride_remap(fb)) { - unsigned int min_stride = is_ccs_plane(&fb->base, color_plane) ? 2 : 8; /* * ADL_P, the only platform needing a POT stride has a minimum - * of 8 main surface and 2 CCS AUX stride tiles. + * of 8 main surface tiles. */ - return roundup_pow_of_two(max(pitch_tiles, min_stride)); + return roundup_pow_of_two(max(pitch_tiles, 8u)); } else { return pitch_tiles; } } static unsigned int +plane_view_scanout_stride(const struct intel_framebuffer *fb, int color_plane, + unsigned int tile_width, + unsigned int src_stride_tiles, unsigned int dst_stride_tiles) +{ + unsigned int stride_tiles; + + if (IS_ALDERLAKE_P(to_i915(fb->base.dev))) + stride_tiles = src_stride_tiles; + else + stride_tiles = dst_stride_tiles; + + return stride_tiles * tile_width * fb->base.format->cpp[color_plane]; +} + +static unsigned int plane_view_width_tiles(const struct intel_framebuffer *fb, int color_plane, const struct fb_plane_view_dims *dims, int x) @@ -841,11 +1289,31 @@ plane_view_height_tiles(const struct intel_framebuffer *fb, int color_plane, return DIV_ROUND_UP(y + dims->height, dims->tile_height); } +static unsigned int +plane_view_linear_tiles(const struct intel_framebuffer *fb, int color_plane, + const struct fb_plane_view_dims *dims, + int x, int y) +{ + struct drm_i915_private *i915 = to_i915(fb->base.dev); + unsigned int size; + + size = (y + dims->height) * fb->base.pitches[color_plane] + + x * fb->base.format->cpp[color_plane]; + + return DIV_ROUND_UP(size, intel_tile_size(i915)); +} + #define assign_chk_ovf(i915, var, val) ({ \ drm_WARN_ON(&(i915)->drm, overflows_type(val, var)); \ (var) = (val); \ }) +#define assign_bfld_chk_ovf(i915, var, val) ({ \ + (var) = (val); \ + drm_WARN_ON(&(i915)->drm, (var) != (val)); \ + (var); \ +}) + static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_plane, const struct fb_plane_view_dims *dims, u32 obj_offset, u32 gtt_offset, int x, int y, @@ -860,12 +1328,26 @@ static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_p struct drm_rect r; u32 size = 0; - assign_chk_ovf(i915, remap_info->offset, obj_offset); - assign_chk_ovf(i915, remap_info->src_stride, plane_view_src_stride_tiles(fb, color_plane, dims)); - assign_chk_ovf(i915, remap_info->width, plane_view_width_tiles(fb, color_plane, dims, x)); - assign_chk_ovf(i915, remap_info->height, plane_view_height_tiles(fb, color_plane, dims, y)); + assign_bfld_chk_ovf(i915, remap_info->offset, obj_offset); + + if (intel_fb_is_gen12_ccs_aux_plane(&fb->base, color_plane)) { + remap_info->linear = 1; + + assign_chk_ovf(i915, remap_info->size, + plane_view_linear_tiles(fb, color_plane, dims, x, y)); + } else { + remap_info->linear = 0; + + assign_chk_ovf(i915, remap_info->src_stride, + plane_view_src_stride_tiles(fb, color_plane, dims)); + assign_chk_ovf(i915, remap_info->width, + plane_view_width_tiles(fb, color_plane, dims, x)); + assign_chk_ovf(i915, remap_info->height, + plane_view_height_tiles(fb, color_plane, dims, y)); + } if (view->gtt.type == I915_GGTT_VIEW_ROTATED) { + drm_WARN_ON(&i915->drm, remap_info->linear); check_array_bounds(i915, view->gtt.rotated.plane, color_plane); assign_chk_ovf(i915, remap_info->dst_stride, @@ -881,7 +1363,8 @@ static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_p color_plane_info->x = r.x1; color_plane_info->y = r.y1; - color_plane_info->stride = remap_info->dst_stride * tile_height; + color_plane_info->mapping_stride = remap_info->dst_stride * tile_height; + color_plane_info->scanout_stride = color_plane_info->mapping_stride; size += remap_info->dst_stride * remap_info->width; @@ -900,16 +1383,29 @@ static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_p gtt_offset = aligned_offset; } - assign_chk_ovf(i915, remap_info->dst_stride, - plane_view_dst_stride_tiles(fb, color_plane, remap_info->width)); - color_plane_info->x = x; color_plane_info->y = y; - color_plane_info->stride = remap_info->dst_stride * tile_width * - fb->base.format->cpp[color_plane]; + if (remap_info->linear) { + color_plane_info->mapping_stride = fb->base.pitches[color_plane]; + color_plane_info->scanout_stride = color_plane_info->mapping_stride; - size += remap_info->dst_stride * remap_info->height; + size += remap_info->size; + } else { + unsigned int dst_stride = plane_view_dst_stride_tiles(fb, color_plane, + remap_info->width); + + assign_chk_ovf(i915, remap_info->dst_stride, dst_stride); + color_plane_info->mapping_stride = dst_stride * + tile_width * + fb->base.format->cpp[color_plane]; + color_plane_info->scanout_stride = + plane_view_scanout_stride(fb, color_plane, tile_width, + remap_info->src_stride, + dst_stride); + + size += dst_stride * remap_info->height; + } } /* @@ -917,10 +1413,16 @@ static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_p * the x/y offsets. x,y will hold the first pixel of the framebuffer * plane from the start of the remapped/rotated gtt mapping. */ - intel_adjust_tile_offset(&color_plane_info->x, &color_plane_info->y, - tile_width, tile_height, - tile_size, remap_info->dst_stride, - gtt_offset * tile_size, 0); + if (remap_info->linear) + intel_adjust_linear_offset(&color_plane_info->x, &color_plane_info->y, + fb->base.format->cpp[color_plane], + color_plane_info->mapping_stride, + gtt_offset * tile_size, 0); + else + intel_adjust_tile_offset(&color_plane_info->x, &color_plane_info->y, + tile_width, tile_height, + tile_size, remap_info->dst_stride, + gtt_offset * tile_size, 0); return size; } @@ -933,15 +1435,10 @@ calc_plane_normal_size(const struct intel_framebuffer *fb, int color_plane, const struct fb_plane_view_dims *dims, int x, int y) { - struct drm_i915_private *i915 = to_i915(fb->base.dev); unsigned int tiles; if (is_surface_linear(&fb->base, color_plane)) { - unsigned int size; - - size = (y + dims->height) * fb->base.pitches[color_plane] + - x * fb->base.format->cpp[color_plane]; - tiles = DIV_ROUND_UP(size, intel_tile_size(i915)); + tiles = plane_view_linear_tiles(fb, color_plane, dims, x, y); } else { tiles = plane_view_src_stride_tiles(fb, color_plane, dims) * plane_view_height_tiles(fb, color_plane, dims, y); @@ -1030,7 +1527,9 @@ int intel_fill_fb_info(struct drm_i915_private *i915, struct intel_framebuffer * */ fb->normal_view.color_plane[i].x = x; fb->normal_view.color_plane[i].y = y; - fb->normal_view.color_plane[i].stride = fb->base.pitches[i]; + fb->normal_view.color_plane[i].mapping_stride = fb->base.pitches[i]; + fb->normal_view.color_plane[i].scanout_stride = + fb->normal_view.color_plane[i].mapping_stride; offset = calc_plane_aligned_offset(fb, i, &x, &y); @@ -1080,7 +1579,7 @@ static void intel_plane_remap_gtt(struct intel_plane_state *plane_state) src_w = drm_rect_width(&plane_state->uapi.src) >> 16; src_h = drm_rect_height(&plane_state->uapi.src) >> 16; - drm_WARN_ON(&i915->drm, is_ccs_modifier(fb->modifier)); + drm_WARN_ON(&i915->drm, intel_fb_is_ccs_modifier(fb->modifier)); /* Make src coordinates relative to the viewport */ drm_rect_translate(&plane_state->uapi.src, @@ -1143,7 +1642,7 @@ u32 intel_fb_max_stride(struct drm_i915_private *dev_priv, * * The new CCS hash mode makes remapping impossible */ - if (DISPLAY_VER(dev_priv) < 4 || is_ccs_modifier(modifier) || + if (DISPLAY_VER(dev_priv) < 4 || intel_fb_is_ccs_modifier(modifier) || intel_modifier_uses_dpt(dev_priv, modifier)) return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier); else if (DISPLAY_VER(dev_priv) >= 7) @@ -1168,27 +1667,19 @@ intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane) * we need the stride to be page aligned. */ if (fb->pitches[color_plane] > max_stride && - !is_ccs_modifier(fb->modifier)) + !intel_fb_is_ccs_modifier(fb->modifier)) return intel_tile_size(dev_priv); else return 64; } tile_width = intel_tile_width_bytes(fb, color_plane); - if (is_ccs_modifier(fb->modifier)) { - /* - * On ADL-P the stride must be either 8 tiles or a stride - * that is aligned to 16 tiles, required by the 16 tiles = - * 64 kbyte CCS AUX PTE granularity, allowing CCS FBs to be - * remapped. - */ - if (IS_ALDERLAKE_P(dev_priv)) - tile_width *= fb->pitches[0] <= tile_width * 8 ? 8 : 16; + if (intel_fb_is_ccs_modifier(fb->modifier)) { /* * On TGL the surface stride must be 4 tile aligned, mapped by * one 64 byte cacheline on the CCS AUX surface. */ - else if (DISPLAY_VER(dev_priv) >= 12) + if (DISPLAY_VER(dev_priv) >= 12) tile_width *= 4; /* * Display WA #0531: skl,bxt,kbl,glk @@ -1224,7 +1715,7 @@ static int intel_plane_check_stride(const struct intel_plane_state *plane_state) return 0; /* FIXME other color planes? */ - stride = plane_state->view.color_plane[0].stride; + stride = plane_state->view.color_plane[0].mapping_stride; max_stride = plane->max_stride(plane, fb->format->format, fb->modifier, rotation); @@ -1430,7 +1921,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb, goto err; } - if (is_gen12_ccs_plane(fb, i) && !is_gen12_ccs_cc_plane(fb, i)) { + if (intel_fb_is_gen12_ccs_aux_plane(fb, i)) { int ccs_aux_stride = gen12_ccs_aux_stride(intel_fb, i); if (fb->pitches[i] != ccs_aux_stride) { diff --git a/drivers/gpu/drm/i915/display/intel_fb.h b/drivers/gpu/drm/i915/display/intel_fb.h index 1cbdd84502bd..ba9df8986c1e 100644 --- a/drivers/gpu/drm/i915/display/intel_fb.h +++ b/drivers/gpu/drm/i915/display/intel_fb.h @@ -6,6 +6,7 @@ #ifndef __INTEL_FB_H__ #define __INTEL_FB_H__ +#include <linux/bits.h> #include <linux/types.h> struct drm_device; @@ -16,12 +17,34 @@ struct drm_i915_private; struct drm_mode_fb_cmd2; struct intel_fb_view; struct intel_framebuffer; +struct intel_plane; struct intel_plane_state; -bool is_ccs_plane(const struct drm_framebuffer *fb, int plane); -bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane); -bool is_gen12_ccs_cc_plane(const struct drm_framebuffer *fb, int plane); -bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb, int color_plane); +#define INTEL_PLANE_CAP_NONE 0 +#define INTEL_PLANE_CAP_CCS_RC BIT(0) +#define INTEL_PLANE_CAP_CCS_RC_CC BIT(1) +#define INTEL_PLANE_CAP_CCS_MC BIT(2) +#define INTEL_PLANE_CAP_TILING_X BIT(3) +#define INTEL_PLANE_CAP_TILING_Y BIT(4) +#define INTEL_PLANE_CAP_TILING_Yf BIT(5) + +bool intel_fb_is_ccs_modifier(u64 modifier); +bool intel_fb_is_rc_ccs_cc_modifier(u64 modifier); +bool intel_fb_is_mc_ccs_modifier(u64 modifier); + +bool intel_fb_is_ccs_aux_plane(const struct drm_framebuffer *fb, int color_plane); +int intel_fb_rc_ccs_cc_plane(const struct drm_framebuffer *fb); + +u64 *intel_fb_plane_get_modifiers(struct drm_i915_private *i915, + u8 plane_caps); +bool intel_fb_plane_supports_modifier(struct intel_plane *plane, u64 modifier); + +const struct drm_format_info * +intel_fb_get_format_info(const struct drm_mode_fb_cmd2 *cmd); + +bool +intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info, + u64 modifier); bool is_surface_linear(const struct drm_framebuffer *fb, int color_plane); @@ -67,4 +90,6 @@ intel_user_framebuffer_create(struct drm_device *dev, struct drm_file *filp, const struct drm_mode_fb_cmd2 *user_mode_cmd); +bool intel_fb_uses_dpt(const struct drm_framebuffer *fb); + #endif /* __INTEL_FB_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c index 3f77f3013584..31c15e5fca95 100644 --- a/drivers/gpu/drm/i915/display/intel_fb_pin.c +++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c @@ -7,13 +7,13 @@ * DOC: display pinning helpers */ -#include "intel_display_types.h" -#include "intel_fb_pin.h" -#include "intel_fb.h" +#include "gem/i915_gem_object.h" +#include "i915_drv.h" +#include "intel_display_types.h" #include "intel_dpt.h" - -#include "gem/i915_gem_object.h" +#include "intel_fb.h" +#include "intel_fb_pin.h" static struct i915_vma * intel_pin_fb_obj_dpt(struct drm_framebuffer *fb, @@ -142,13 +142,11 @@ retry: if (ret) goto err; - if (!ret) { - vma = i915_gem_object_pin_to_display_plane(obj, &ww, alignment, - view, pinctl); - if (IS_ERR(vma)) { - ret = PTR_ERR(vma); - goto err_unpin; - } + vma = i915_gem_object_pin_to_display_plane(obj, &ww, alignment, + view, pinctl); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto err_unpin; } if (uses_fence && i915_vma_is_map_and_fenceable(vma)) { diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index 1f66de77a6b1..8be01b93015f 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -41,26 +41,71 @@ #include <drm/drm_fourcc.h> #include "i915_drv.h" -#include "i915_trace.h" #include "i915_vgpu.h" +#include "intel_cdclk.h" #include "intel_de.h" +#include "intel_display_trace.h" #include "intel_display_types.h" #include "intel_fbc.h" #include "intel_frontbuffer.h" -/* - * For SKL+, the plane source size used by the hardware is based on the value we - * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value - * we wrote to PIPESRC. - */ -static void intel_fbc_get_plane_source_size(const struct intel_fbc_state_cache *cache, - int *width, int *height) -{ - if (width) - *width = cache->plane.src_w; - if (height) - *height = cache->plane.src_h; -} +struct intel_fbc_funcs { + void (*activate)(struct intel_fbc *fbc); + void (*deactivate)(struct intel_fbc *fbc); + bool (*is_active)(struct intel_fbc *fbc); + bool (*is_compressing)(struct intel_fbc *fbc); + void (*nuke)(struct intel_fbc *fbc); + void (*program_cfb)(struct intel_fbc *fbc); + void (*set_false_color)(struct intel_fbc *fbc, bool enable); +}; + +struct intel_fbc_state { + struct intel_plane *plane; + unsigned int cfb_stride; + unsigned int cfb_size; + unsigned int fence_y_offset; + u16 override_cfb_stride; + u16 interval; + s8 fence_id; +}; + +struct intel_fbc { + struct drm_i915_private *i915; + const struct intel_fbc_funcs *funcs; + + /* + * This is always the inner lock when overlapping with + * struct_mutex and it's the outer lock when overlapping + * with stolen_lock. + */ + struct mutex lock; + unsigned int possible_framebuffer_bits; + unsigned int busy_bits; + + struct drm_mm_node compressed_fb; + struct drm_mm_node compressed_llb; + + u8 limit; + + bool false_color; + + bool active; + bool activated; + bool flip_pending; + + bool underrun_detected; + struct work_struct underrun_work; + + /* + * This structure contains everything that's relevant to program the + * hardware registers. When we want to figure out if we need to disable + * and re-enable FBC for a new configuration we just check if there's + * something different in the struct. The genx_fbc_activate functions + * are supposed to read from it in order to program the registers. + */ + struct intel_fbc_state state; + const char *no_fbc_reason; +}; /* plane stride in pixels */ static unsigned int intel_fbc_plane_stride(const struct intel_plane_state *plane_state) @@ -68,7 +113,7 @@ static unsigned int intel_fbc_plane_stride(const struct intel_plane_state *plane const struct drm_framebuffer *fb = plane_state->hw.fb; unsigned int stride; - stride = plane_state->view.color_plane[0].stride; + stride = plane_state->view.color_plane[0].mapping_stride; if (!drm_rotation_90_or_270(plane_state->hw.rotation)) stride /= fb->format->cpp[0]; @@ -76,24 +121,25 @@ static unsigned int intel_fbc_plane_stride(const struct intel_plane_state *plane } /* plane stride based cfb stride in bytes, assuming 1:1 compression limit */ -static unsigned int _intel_fbc_cfb_stride(const struct intel_fbc_state_cache *cache) +static unsigned int _intel_fbc_cfb_stride(const struct intel_plane_state *plane_state) { unsigned int cpp = 4; /* FBC always 4 bytes per pixel */ - return cache->fb.stride * cpp; + return intel_fbc_plane_stride(plane_state) * cpp; } /* minimum acceptable cfb stride in bytes, assuming 1:1 compression limit */ -static unsigned int skl_fbc_min_cfb_stride(struct drm_i915_private *i915, - const struct intel_fbc_state_cache *cache) +static unsigned int skl_fbc_min_cfb_stride(const struct intel_plane_state *plane_state) { + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); unsigned int limit = 4; /* 1:4 compression limit is the worst case */ unsigned int cpp = 4; /* FBC always 4 bytes per pixel */ + unsigned int width = drm_rect_width(&plane_state->uapi.src) >> 16; unsigned int height = 4; /* FBC segment is 4 lines */ unsigned int stride; /* minimum segment stride we can use */ - stride = cache->plane.src_w * cpp * height / limit; + stride = width * cpp * height / limit; /* * Wa_16011863758: icl+ @@ -113,10 +159,10 @@ static unsigned int skl_fbc_min_cfb_stride(struct drm_i915_private *i915, } /* properly aligned cfb stride in bytes, assuming 1:1 compression limit */ -static unsigned int intel_fbc_cfb_stride(struct drm_i915_private *i915, - const struct intel_fbc_state_cache *cache) +static unsigned int intel_fbc_cfb_stride(const struct intel_plane_state *plane_state) { - unsigned int stride = _intel_fbc_cfb_stride(cache); + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); + unsigned int stride = _intel_fbc_cfb_stride(plane_state); /* * At least some of the platforms require each 4 line segment to @@ -124,98 +170,202 @@ static unsigned int intel_fbc_cfb_stride(struct drm_i915_private *i915, * that regardless of the compression limit we choose later. */ if (DISPLAY_VER(i915) >= 9) - return max(ALIGN(stride, 512), skl_fbc_min_cfb_stride(i915, cache)); + return max(ALIGN(stride, 512), skl_fbc_min_cfb_stride(plane_state)); else return stride; } -static unsigned int intel_fbc_cfb_size(struct drm_i915_private *dev_priv, - const struct intel_fbc_state_cache *cache) +static unsigned int intel_fbc_cfb_size(const struct intel_plane_state *plane_state) { - int lines = cache->plane.src_h; + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); + int lines = drm_rect_height(&plane_state->uapi.src) >> 16; - if (DISPLAY_VER(dev_priv) == 7) + if (DISPLAY_VER(i915) == 7) lines = min(lines, 2048); - else if (DISPLAY_VER(dev_priv) >= 8) + else if (DISPLAY_VER(i915) >= 8) lines = min(lines, 2560); - return lines * intel_fbc_cfb_stride(dev_priv, cache); + return lines * intel_fbc_cfb_stride(plane_state); +} + +static u16 intel_fbc_override_cfb_stride(const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); + unsigned int stride_aligned = intel_fbc_cfb_stride(plane_state); + unsigned int stride = _intel_fbc_cfb_stride(plane_state); + const struct drm_framebuffer *fb = plane_state->hw.fb; + + /* + * Override stride in 64 byte units per 4 line segment. + * + * Gen9 hw miscalculates cfb stride for linear as + * PLANE_STRIDE*512 instead of PLANE_STRIDE*64, so + * we always need to use the override there. + */ + if (stride != stride_aligned || + (DISPLAY_VER(i915) == 9 && fb->modifier == DRM_FORMAT_MOD_LINEAR)) + return stride_aligned * 4 / 64; + + return 0; } -static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv) +static u32 i8xx_fbc_ctl(struct intel_fbc *fbc) { + const struct intel_fbc_state *fbc_state = &fbc->state; + struct drm_i915_private *i915 = fbc->i915; + unsigned int cfb_stride; + u32 fbc_ctl; + + cfb_stride = fbc_state->cfb_stride / fbc->limit; + + /* FBC_CTL wants 32B or 64B units */ + if (DISPLAY_VER(i915) == 2) + cfb_stride = (cfb_stride / 32) - 1; + else + cfb_stride = (cfb_stride / 64) - 1; + + fbc_ctl = FBC_CTL_PERIODIC | + FBC_CTL_INTERVAL(fbc_state->interval) | + FBC_CTL_STRIDE(cfb_stride); + + if (IS_I945GM(i915)) + fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ + + if (fbc_state->fence_id >= 0) + fbc_ctl |= FBC_CTL_FENCENO(fbc_state->fence_id); + + return fbc_ctl; +} + +static u32 i965_fbc_ctl2(struct intel_fbc *fbc) +{ + const struct intel_fbc_state *fbc_state = &fbc->state; + u32 fbc_ctl2; + + fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | + FBC_CTL_PLANE(fbc_state->plane->i9xx_plane); + + if (fbc_state->fence_id >= 0) + fbc_ctl2 |= FBC_CTL_CPU_FENCE_EN; + + return fbc_ctl2; +} + +static void i8xx_fbc_deactivate(struct intel_fbc *fbc) +{ + struct drm_i915_private *i915 = fbc->i915; u32 fbc_ctl; /* Disable compression */ - fbc_ctl = intel_de_read(dev_priv, FBC_CONTROL); + fbc_ctl = intel_de_read(i915, FBC_CONTROL); if ((fbc_ctl & FBC_CTL_EN) == 0) return; fbc_ctl &= ~FBC_CTL_EN; - intel_de_write(dev_priv, FBC_CONTROL, fbc_ctl); + intel_de_write(i915, FBC_CONTROL, fbc_ctl); /* Wait for compressing bit to clear */ - if (intel_de_wait_for_clear(dev_priv, FBC_STATUS, + if (intel_de_wait_for_clear(i915, FBC_STATUS, FBC_STAT_COMPRESSING, 10)) { - drm_dbg_kms(&dev_priv->drm, "FBC idle timed out\n"); + drm_dbg_kms(&i915->drm, "FBC idle timed out\n"); return; } } -static void i8xx_fbc_activate(struct drm_i915_private *dev_priv) +static void i8xx_fbc_activate(struct intel_fbc *fbc) { - struct intel_fbc *fbc = &dev_priv->fbc; - const struct intel_fbc_reg_params *params = &fbc->params; - int cfb_pitch; + const struct intel_fbc_state *fbc_state = &fbc->state; + struct drm_i915_private *i915 = fbc->i915; int i; - u32 fbc_ctl; - - cfb_pitch = params->cfb_stride / fbc->limit; - - /* FBC_CTL wants 32B or 64B units */ - if (DISPLAY_VER(dev_priv) == 2) - cfb_pitch = (cfb_pitch / 32) - 1; - else - cfb_pitch = (cfb_pitch / 64) - 1; /* Clear old tags */ for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) - intel_de_write(dev_priv, FBC_TAG(i), 0); - - if (DISPLAY_VER(dev_priv) == 4) { - u32 fbc_ctl2; - - /* Set it up... */ - fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM; - fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.i9xx_plane); - if (params->fence_id >= 0) - fbc_ctl2 |= FBC_CTL_CPU_FENCE; - intel_de_write(dev_priv, FBC_CONTROL2, fbc_ctl2); - intel_de_write(dev_priv, FBC_FENCE_OFF, - params->fence_y_offset); + intel_de_write(i915, FBC_TAG(i), 0); + + if (DISPLAY_VER(i915) == 4) { + intel_de_write(i915, FBC_CONTROL2, + i965_fbc_ctl2(fbc)); + intel_de_write(i915, FBC_FENCE_OFF, + fbc_state->fence_y_offset); } - /* enable it... */ - fbc_ctl = FBC_CTL_INTERVAL(params->interval); - fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC; - if (IS_I945GM(dev_priv)) - fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ - fbc_ctl |= FBC_CTL_STRIDE(cfb_pitch & 0xff); - if (params->fence_id >= 0) - fbc_ctl |= FBC_CTL_FENCENO(params->fence_id); - intel_de_write(dev_priv, FBC_CONTROL, fbc_ctl); + intel_de_write(i915, FBC_CONTROL, + FBC_CTL_EN | i8xx_fbc_ctl(fbc)); +} + +static bool i8xx_fbc_is_active(struct intel_fbc *fbc) +{ + return intel_de_read(fbc->i915, FBC_CONTROL) & FBC_CTL_EN; +} + +static bool i8xx_fbc_is_compressing(struct intel_fbc *fbc) +{ + return intel_de_read(fbc->i915, FBC_STATUS) & + (FBC_STAT_COMPRESSING | FBC_STAT_COMPRESSED); +} + +static void i8xx_fbc_nuke(struct intel_fbc *fbc) +{ + struct intel_fbc_state *fbc_state = &fbc->state; + enum i9xx_plane_id i9xx_plane = fbc_state->plane->i9xx_plane; + struct drm_i915_private *dev_priv = fbc->i915; + + spin_lock_irq(&dev_priv->uncore.lock); + intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), + intel_de_read_fw(dev_priv, DSPADDR(i9xx_plane))); + spin_unlock_irq(&dev_priv->uncore.lock); } -static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv) +static void i8xx_fbc_program_cfb(struct intel_fbc *fbc) { - return intel_de_read(dev_priv, FBC_CONTROL) & FBC_CTL_EN; + struct drm_i915_private *i915 = fbc->i915; + + GEM_BUG_ON(range_overflows_end_t(u64, i915->dsm.start, + fbc->compressed_fb.start, U32_MAX)); + GEM_BUG_ON(range_overflows_end_t(u64, i915->dsm.start, + fbc->compressed_llb.start, U32_MAX)); + + intel_de_write(i915, FBC_CFB_BASE, + i915->dsm.start + fbc->compressed_fb.start); + intel_de_write(i915, FBC_LL_BASE, + i915->dsm.start + fbc->compressed_llb.start); } -static u32 g4x_dpfc_ctl_limit(struct drm_i915_private *i915) +static const struct intel_fbc_funcs i8xx_fbc_funcs = { + .activate = i8xx_fbc_activate, + .deactivate = i8xx_fbc_deactivate, + .is_active = i8xx_fbc_is_active, + .is_compressing = i8xx_fbc_is_compressing, + .nuke = i8xx_fbc_nuke, + .program_cfb = i8xx_fbc_program_cfb, +}; + +static void i965_fbc_nuke(struct intel_fbc *fbc) { - switch (i915->fbc.limit) { + struct intel_fbc_state *fbc_state = &fbc->state; + enum i9xx_plane_id i9xx_plane = fbc_state->plane->i9xx_plane; + struct drm_i915_private *dev_priv = fbc->i915; + + spin_lock_irq(&dev_priv->uncore.lock); + intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), + intel_de_read_fw(dev_priv, DSPSURF(i9xx_plane))); + spin_unlock_irq(&dev_priv->uncore.lock); +} + +static const struct intel_fbc_funcs i965_fbc_funcs = { + .activate = i8xx_fbc_activate, + .deactivate = i8xx_fbc_deactivate, + .is_active = i8xx_fbc_is_active, + .is_compressing = i8xx_fbc_is_compressing, + .nuke = i965_fbc_nuke, + .program_cfb = i8xx_fbc_program_cfb, +}; + +static u32 g4x_dpfc_ctl_limit(struct intel_fbc *fbc) +{ + switch (fbc->limit) { default: - MISSING_CASE(i915->fbc.limit); + MISSING_CASE(fbc->limit); fallthrough; case 1: return DPFC_CTL_LIMIT_1X; @@ -226,260 +376,306 @@ static u32 g4x_dpfc_ctl_limit(struct drm_i915_private *i915) } } -static void g4x_fbc_activate(struct drm_i915_private *dev_priv) +static u32 g4x_dpfc_ctl(struct intel_fbc *fbc) { - struct intel_fbc_reg_params *params = &dev_priv->fbc.params; + const struct intel_fbc_state *fbc_state = &fbc->state; + struct drm_i915_private *i915 = fbc->i915; u32 dpfc_ctl; - dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane) | DPFC_SR_EN; + dpfc_ctl = g4x_dpfc_ctl_limit(fbc) | + DPFC_CTL_PLANE_G4X(fbc_state->plane->i9xx_plane); - dpfc_ctl |= g4x_dpfc_ctl_limit(dev_priv); + if (IS_G4X(i915)) + dpfc_ctl |= DPFC_CTL_SR_EN; - if (params->fence_id >= 0) { - dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fence_id; - intel_de_write(dev_priv, DPFC_FENCE_YOFF, - params->fence_y_offset); - } else { - intel_de_write(dev_priv, DPFC_FENCE_YOFF, 0); + if (fbc_state->fence_id >= 0) { + dpfc_ctl |= DPFC_CTL_FENCE_EN_G4X; + + if (DISPLAY_VER(i915) < 6) + dpfc_ctl |= DPFC_CTL_FENCENO(fbc_state->fence_id); } - /* enable it... */ - intel_de_write(dev_priv, DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); + return dpfc_ctl; } -static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv) +static void g4x_fbc_activate(struct intel_fbc *fbc) { + const struct intel_fbc_state *fbc_state = &fbc->state; + struct drm_i915_private *i915 = fbc->i915; + + intel_de_write(i915, DPFC_FENCE_YOFF, + fbc_state->fence_y_offset); + + intel_de_write(i915, DPFC_CONTROL, + DPFC_CTL_EN | g4x_dpfc_ctl(fbc)); +} + +static void g4x_fbc_deactivate(struct intel_fbc *fbc) +{ + struct drm_i915_private *i915 = fbc->i915; u32 dpfc_ctl; /* Disable compression */ - dpfc_ctl = intel_de_read(dev_priv, DPFC_CONTROL); + dpfc_ctl = intel_de_read(i915, DPFC_CONTROL); if (dpfc_ctl & DPFC_CTL_EN) { dpfc_ctl &= ~DPFC_CTL_EN; - intel_de_write(dev_priv, DPFC_CONTROL, dpfc_ctl); + intel_de_write(i915, DPFC_CONTROL, dpfc_ctl); } } -static bool g4x_fbc_is_active(struct drm_i915_private *dev_priv) +static bool g4x_fbc_is_active(struct intel_fbc *fbc) { - return intel_de_read(dev_priv, DPFC_CONTROL) & DPFC_CTL_EN; + return intel_de_read(fbc->i915, DPFC_CONTROL) & DPFC_CTL_EN; } -static void i8xx_fbc_recompress(struct drm_i915_private *dev_priv) +static bool g4x_fbc_is_compressing(struct intel_fbc *fbc) { - struct intel_fbc_reg_params *params = &dev_priv->fbc.params; - enum i9xx_plane_id i9xx_plane = params->crtc.i9xx_plane; + return intel_de_read(fbc->i915, DPFC_STATUS) & DPFC_COMP_SEG_MASK; +} - spin_lock_irq(&dev_priv->uncore.lock); - intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), - intel_de_read_fw(dev_priv, DSPADDR(i9xx_plane))); - spin_unlock_irq(&dev_priv->uncore.lock); +static void g4x_fbc_program_cfb(struct intel_fbc *fbc) +{ + struct drm_i915_private *i915 = fbc->i915; + + intel_de_write(i915, DPFC_CB_BASE, fbc->compressed_fb.start); } -static void i965_fbc_recompress(struct drm_i915_private *dev_priv) +static const struct intel_fbc_funcs g4x_fbc_funcs = { + .activate = g4x_fbc_activate, + .deactivate = g4x_fbc_deactivate, + .is_active = g4x_fbc_is_active, + .is_compressing = g4x_fbc_is_compressing, + .nuke = i965_fbc_nuke, + .program_cfb = g4x_fbc_program_cfb, +}; + +static void ilk_fbc_activate(struct intel_fbc *fbc) { - struct intel_fbc_reg_params *params = &dev_priv->fbc.params; - enum i9xx_plane_id i9xx_plane = params->crtc.i9xx_plane; + struct intel_fbc_state *fbc_state = &fbc->state; + struct drm_i915_private *i915 = fbc->i915; - spin_lock_irq(&dev_priv->uncore.lock); - intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), - intel_de_read_fw(dev_priv, DSPSURF(i9xx_plane))); - spin_unlock_irq(&dev_priv->uncore.lock); + intel_de_write(i915, ILK_DPFC_FENCE_YOFF, + fbc_state->fence_y_offset); + + intel_de_write(i915, ILK_DPFC_CONTROL, + DPFC_CTL_EN | g4x_dpfc_ctl(fbc)); } -/* This function forces a CFB recompression through the nuke operation. */ -static void snb_fbc_recompress(struct drm_i915_private *dev_priv) +static void ilk_fbc_deactivate(struct intel_fbc *fbc) { - intel_de_write(dev_priv, MSG_FBC_REND_STATE, FBC_REND_NUKE); - intel_de_posting_read(dev_priv, MSG_FBC_REND_STATE); + struct drm_i915_private *i915 = fbc->i915; + u32 dpfc_ctl; + + /* Disable compression */ + dpfc_ctl = intel_de_read(i915, ILK_DPFC_CONTROL); + if (dpfc_ctl & DPFC_CTL_EN) { + dpfc_ctl &= ~DPFC_CTL_EN; + intel_de_write(i915, ILK_DPFC_CONTROL, dpfc_ctl); + } +} + +static bool ilk_fbc_is_active(struct intel_fbc *fbc) +{ + return intel_de_read(fbc->i915, ILK_DPFC_CONTROL) & DPFC_CTL_EN; } -static void intel_fbc_recompress(struct drm_i915_private *dev_priv) +static bool ilk_fbc_is_compressing(struct intel_fbc *fbc) { - struct intel_fbc *fbc = &dev_priv->fbc; + return intel_de_read(fbc->i915, ILK_DPFC_STATUS) & DPFC_COMP_SEG_MASK; +} - trace_intel_fbc_nuke(fbc->crtc); +static void ilk_fbc_program_cfb(struct intel_fbc *fbc) +{ + struct drm_i915_private *i915 = fbc->i915; - if (DISPLAY_VER(dev_priv) >= 6) - snb_fbc_recompress(dev_priv); - else if (DISPLAY_VER(dev_priv) >= 4) - i965_fbc_recompress(dev_priv); - else - i8xx_fbc_recompress(dev_priv); + intel_de_write(i915, ILK_DPFC_CB_BASE, fbc->compressed_fb.start); } -static void ilk_fbc_activate(struct drm_i915_private *dev_priv) +static const struct intel_fbc_funcs ilk_fbc_funcs = { + .activate = ilk_fbc_activate, + .deactivate = ilk_fbc_deactivate, + .is_active = ilk_fbc_is_active, + .is_compressing = ilk_fbc_is_compressing, + .nuke = i965_fbc_nuke, + .program_cfb = ilk_fbc_program_cfb, +}; + +static void snb_fbc_program_fence(struct intel_fbc *fbc) { - struct intel_fbc_reg_params *params = &dev_priv->fbc.params; - u32 dpfc_ctl; + const struct intel_fbc_state *fbc_state = &fbc->state; + struct drm_i915_private *i915 = fbc->i915; + u32 ctl = 0; - dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane); + if (fbc_state->fence_id >= 0) + ctl = SNB_DPFC_FENCE_EN | SNB_DPFC_FENCENO(fbc_state->fence_id); - dpfc_ctl |= g4x_dpfc_ctl_limit(dev_priv); + intel_de_write(i915, SNB_DPFC_CTL_SA, ctl); + intel_de_write(i915, SNB_DPFC_CPU_FENCE_OFFSET, fbc_state->fence_y_offset); +} - if (params->fence_id >= 0) { - dpfc_ctl |= DPFC_CTL_FENCE_EN; - if (IS_IRONLAKE(dev_priv)) - dpfc_ctl |= params->fence_id; - if (IS_SANDYBRIDGE(dev_priv)) { - intel_de_write(dev_priv, SNB_DPFC_CTL_SA, - SNB_CPU_FENCE_ENABLE | params->fence_id); - intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, - params->fence_y_offset); - } - } else { - if (IS_SANDYBRIDGE(dev_priv)) { - intel_de_write(dev_priv, SNB_DPFC_CTL_SA, 0); - intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, 0); - } - } +static void snb_fbc_activate(struct intel_fbc *fbc) +{ + snb_fbc_program_fence(fbc); - intel_de_write(dev_priv, ILK_DPFC_FENCE_YOFF, - params->fence_y_offset); - /* enable it... */ - intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); + ilk_fbc_activate(fbc); } -static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv) +static void snb_fbc_nuke(struct intel_fbc *fbc) { - u32 dpfc_ctl; + struct drm_i915_private *i915 = fbc->i915; - /* Disable compression */ - dpfc_ctl = intel_de_read(dev_priv, ILK_DPFC_CONTROL); - if (dpfc_ctl & DPFC_CTL_EN) { - dpfc_ctl &= ~DPFC_CTL_EN; - intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl); - } + intel_de_write(i915, MSG_FBC_REND_STATE, FBC_REND_NUKE); + intel_de_posting_read(i915, MSG_FBC_REND_STATE); +} + +static const struct intel_fbc_funcs snb_fbc_funcs = { + .activate = snb_fbc_activate, + .deactivate = ilk_fbc_deactivate, + .is_active = ilk_fbc_is_active, + .is_compressing = ilk_fbc_is_compressing, + .nuke = snb_fbc_nuke, + .program_cfb = ilk_fbc_program_cfb, +}; + +static void glk_fbc_program_cfb_stride(struct intel_fbc *fbc) +{ + const struct intel_fbc_state *fbc_state = &fbc->state; + struct drm_i915_private *i915 = fbc->i915; + u32 val = 0; + + if (fbc_state->override_cfb_stride) + val |= FBC_STRIDE_OVERRIDE | + FBC_STRIDE(fbc_state->override_cfb_stride / fbc->limit); + + intel_de_write(i915, GLK_FBC_STRIDE, val); } -static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv) +static void skl_fbc_program_cfb_stride(struct intel_fbc *fbc) { - return intel_de_read(dev_priv, ILK_DPFC_CONTROL) & DPFC_CTL_EN; + const struct intel_fbc_state *fbc_state = &fbc->state; + struct drm_i915_private *i915 = fbc->i915; + u32 val = 0; + + /* Display WA #0529: skl, kbl, bxt. */ + if (fbc_state->override_cfb_stride) + val |= CHICKEN_FBC_STRIDE_OVERRIDE | + CHICKEN_FBC_STRIDE(fbc_state->override_cfb_stride / fbc->limit); + + intel_de_rmw(i915, CHICKEN_MISC_4, + CHICKEN_FBC_STRIDE_OVERRIDE | + CHICKEN_FBC_STRIDE_MASK, val); } -static void gen7_fbc_activate(struct drm_i915_private *dev_priv) +static u32 ivb_dpfc_ctl(struct intel_fbc *fbc) { - struct intel_fbc *fbc = &dev_priv->fbc; - const struct intel_fbc_reg_params *params = &fbc->params; + const struct intel_fbc_state *fbc_state = &fbc->state; + struct drm_i915_private *i915 = fbc->i915; u32 dpfc_ctl; - if (DISPLAY_VER(dev_priv) >= 10) { - u32 val = 0; + dpfc_ctl = g4x_dpfc_ctl_limit(fbc); - if (params->override_cfb_stride) - val |= FBC_STRIDE_OVERRIDE | - FBC_STRIDE(params->override_cfb_stride / fbc->limit); + if (IS_IVYBRIDGE(i915)) + dpfc_ctl |= DPFC_CTL_PLANE_IVB(fbc_state->plane->i9xx_plane); - intel_de_write(dev_priv, GLK_FBC_STRIDE, val); - } else if (DISPLAY_VER(dev_priv) == 9) { - u32 val = 0; + if (fbc_state->fence_id >= 0) + dpfc_ctl |= DPFC_CTL_FENCE_EN_IVB; - /* Display WA #0529: skl, kbl, bxt. */ - if (params->override_cfb_stride) - val |= CHICKEN_FBC_STRIDE_OVERRIDE | - CHICKEN_FBC_STRIDE(params->override_cfb_stride / fbc->limit); + if (fbc->false_color) + dpfc_ctl |= DPFC_CTL_FALSE_COLOR; - intel_de_rmw(dev_priv, CHICKEN_MISC_4, - CHICKEN_FBC_STRIDE_OVERRIDE | - CHICKEN_FBC_STRIDE_MASK, val); - } + return dpfc_ctl; +} - dpfc_ctl = 0; - if (IS_IVYBRIDGE(dev_priv)) - dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.i9xx_plane); - - dpfc_ctl |= g4x_dpfc_ctl_limit(dev_priv); - - if (params->fence_id >= 0) { - dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN; - intel_de_write(dev_priv, SNB_DPFC_CTL_SA, - SNB_CPU_FENCE_ENABLE | params->fence_id); - intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, - params->fence_y_offset); - } else if (dev_priv->ggtt.num_fences) { - intel_de_write(dev_priv, SNB_DPFC_CTL_SA, 0); - intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, 0); - } +static void ivb_fbc_activate(struct intel_fbc *fbc) +{ + struct drm_i915_private *i915 = fbc->i915; + + if (DISPLAY_VER(i915) >= 10) + glk_fbc_program_cfb_stride(fbc); + else if (DISPLAY_VER(i915) == 9) + skl_fbc_program_cfb_stride(fbc); - if (dev_priv->fbc.false_color) - dpfc_ctl |= FBC_CTL_FALSE_COLOR; + if (i915->ggtt.num_fences) + snb_fbc_program_fence(fbc); - intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); + intel_de_write(i915, ILK_DPFC_CONTROL, + DPFC_CTL_EN | ivb_dpfc_ctl(fbc)); } -static bool intel_fbc_hw_is_active(struct drm_i915_private *dev_priv) +static bool ivb_fbc_is_compressing(struct intel_fbc *fbc) { - if (DISPLAY_VER(dev_priv) >= 5) - return ilk_fbc_is_active(dev_priv); - else if (IS_GM45(dev_priv)) - return g4x_fbc_is_active(dev_priv); - else - return i8xx_fbc_is_active(dev_priv); + return intel_de_read(fbc->i915, ILK_DPFC_STATUS2) & DPFC_COMP_SEG_MASK_IVB; } -static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv) +static void ivb_fbc_set_false_color(struct intel_fbc *fbc, + bool enable) { - struct intel_fbc *fbc = &dev_priv->fbc; + intel_de_rmw(fbc->i915, ILK_DPFC_CONTROL, + DPFC_CTL_FALSE_COLOR, enable ? DPFC_CTL_FALSE_COLOR : 0); +} - trace_intel_fbc_activate(fbc->crtc); +static const struct intel_fbc_funcs ivb_fbc_funcs = { + .activate = ivb_fbc_activate, + .deactivate = ilk_fbc_deactivate, + .is_active = ilk_fbc_is_active, + .is_compressing = ivb_fbc_is_compressing, + .nuke = snb_fbc_nuke, + .program_cfb = ilk_fbc_program_cfb, + .set_false_color = ivb_fbc_set_false_color, +}; + +static bool intel_fbc_hw_is_active(struct intel_fbc *fbc) +{ + return fbc->funcs->is_active(fbc); +} + +static void intel_fbc_hw_activate(struct intel_fbc *fbc) +{ + trace_intel_fbc_activate(fbc->state.plane); fbc->active = true; fbc->activated = true; - if (DISPLAY_VER(dev_priv) >= 7) - gen7_fbc_activate(dev_priv); - else if (DISPLAY_VER(dev_priv) >= 5) - ilk_fbc_activate(dev_priv); - else if (IS_GM45(dev_priv)) - g4x_fbc_activate(dev_priv); - else - i8xx_fbc_activate(dev_priv); + fbc->funcs->activate(fbc); } -static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv) +static void intel_fbc_hw_deactivate(struct intel_fbc *fbc) { - struct intel_fbc *fbc = &dev_priv->fbc; - - trace_intel_fbc_deactivate(fbc->crtc); + trace_intel_fbc_deactivate(fbc->state.plane); fbc->active = false; - if (DISPLAY_VER(dev_priv) >= 5) - ilk_fbc_deactivate(dev_priv); - else if (IS_GM45(dev_priv)) - g4x_fbc_deactivate(dev_priv); - else - i8xx_fbc_deactivate(dev_priv); + fbc->funcs->deactivate(fbc); } -/** - * intel_fbc_is_active - Is FBC active? - * @dev_priv: i915 device instance - * - * This function is used to verify the current state of FBC. - * - * FIXME: This should be tracked in the plane config eventually - * instead of queried at runtime for most callers. - */ -bool intel_fbc_is_active(struct drm_i915_private *dev_priv) +static bool intel_fbc_is_compressing(struct intel_fbc *fbc) +{ + return fbc->funcs->is_compressing(fbc); +} + +static void intel_fbc_nuke(struct intel_fbc *fbc) { - return dev_priv->fbc.active; + trace_intel_fbc_nuke(fbc->state.plane); + + fbc->funcs->nuke(fbc); } -static void intel_fbc_activate(struct drm_i915_private *dev_priv) +static void intel_fbc_activate(struct intel_fbc *fbc) { - intel_fbc_hw_activate(dev_priv); - intel_fbc_recompress(dev_priv); + intel_fbc_hw_activate(fbc); + intel_fbc_nuke(fbc); + + fbc->no_fbc_reason = NULL; } -static void intel_fbc_deactivate(struct drm_i915_private *dev_priv, - const char *reason) +static void intel_fbc_deactivate(struct intel_fbc *fbc, const char *reason) { - struct intel_fbc *fbc = &dev_priv->fbc; + struct drm_i915_private *i915 = fbc->i915; - drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock)); + drm_WARN_ON(&i915->drm, !mutex_is_locked(&fbc->lock)); if (fbc->active) - intel_fbc_hw_deactivate(dev_priv); + intel_fbc_hw_deactivate(fbc); fbc->no_fbc_reason = reason; } @@ -492,7 +688,7 @@ static u64 intel_fbc_cfb_base_max(struct drm_i915_private *i915) return BIT_ULL(32); } -static u64 intel_fbc_stolen_end(struct drm_i915_private *dev_priv) +static u64 intel_fbc_stolen_end(struct drm_i915_private *i915) { u64 end; @@ -500,24 +696,24 @@ static u64 intel_fbc_stolen_end(struct drm_i915_private *dev_priv) * reserved range size, so it always assumes the maximum (8mb) is used. * If we enable FBC using a CFB on that memory range we'll get FIFO * underruns, even if that range is not reserved by the BIOS. */ - if (IS_BROADWELL(dev_priv) || (DISPLAY_VER(dev_priv) == 9 && - !IS_BROXTON(dev_priv))) - end = resource_size(&dev_priv->dsm) - 8 * 1024 * 1024; + if (IS_BROADWELL(i915) || + (DISPLAY_VER(i915) == 9 && !IS_BROXTON(i915))) + end = resource_size(&i915->dsm) - 8 * 1024 * 1024; else end = U64_MAX; - return min(end, intel_fbc_cfb_base_max(dev_priv)); + return min(end, intel_fbc_cfb_base_max(i915)); } -static int intel_fbc_min_limit(int fb_cpp) +static int intel_fbc_min_limit(const struct intel_plane_state *plane_state) { - return fb_cpp == 2 ? 2 : 1; + return plane_state->hw.fb->format->cpp[0] == 2 ? 2 : 1; } -static int intel_fbc_max_limit(struct drm_i915_private *dev_priv) +static int intel_fbc_max_limit(struct drm_i915_private *i915) { /* WaFbcOnly1to1Ratio:ctg */ - if (IS_G4X(dev_priv)) + if (IS_G4X(i915)) return 1; /* @@ -527,23 +723,23 @@ static int intel_fbc_max_limit(struct drm_i915_private *dev_priv) return 4; } -static int find_compression_limit(struct drm_i915_private *dev_priv, +static int find_compression_limit(struct intel_fbc *fbc, unsigned int size, int min_limit) { - struct intel_fbc *fbc = &dev_priv->fbc; - u64 end = intel_fbc_stolen_end(dev_priv); + struct drm_i915_private *i915 = fbc->i915; + u64 end = intel_fbc_stolen_end(i915); int ret, limit = min_limit; size /= limit; /* Try to over-allocate to reduce reallocations and fragmentation. */ - ret = i915_gem_stolen_insert_node_in_range(dev_priv, &fbc->compressed_fb, + ret = i915_gem_stolen_insert_node_in_range(i915, &fbc->compressed_fb, size <<= 1, 4096, 0, end); if (ret == 0) return limit; - for (; limit <= intel_fbc_max_limit(dev_priv); limit <<= 1) { - ret = i915_gem_stolen_insert_node_in_range(dev_priv, &fbc->compressed_fb, + for (; limit <= intel_fbc_max_limit(i915); limit <<= 1) { + ret = i915_gem_stolen_insert_node_in_range(i915, &fbc->compressed_fb, size >>= 1, 4096, 0, end); if (ret == 0) return limit; @@ -552,34 +748,34 @@ static int find_compression_limit(struct drm_i915_private *dev_priv, return 0; } -static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv, +static int intel_fbc_alloc_cfb(struct intel_fbc *fbc, unsigned int size, int min_limit) { - struct intel_fbc *fbc = &dev_priv->fbc; + struct drm_i915_private *i915 = fbc->i915; int ret; - drm_WARN_ON(&dev_priv->drm, + drm_WARN_ON(&i915->drm, drm_mm_node_allocated(&fbc->compressed_fb)); - drm_WARN_ON(&dev_priv->drm, + drm_WARN_ON(&i915->drm, drm_mm_node_allocated(&fbc->compressed_llb)); - if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) { - ret = i915_gem_stolen_insert_node(dev_priv, &fbc->compressed_llb, + if (DISPLAY_VER(i915) < 5 && !IS_G4X(i915)) { + ret = i915_gem_stolen_insert_node(i915, &fbc->compressed_llb, 4096, 4096); if (ret) goto err; } - ret = find_compression_limit(dev_priv, size, min_limit); + ret = find_compression_limit(fbc, size, min_limit); if (!ret) goto err_llb; else if (ret > min_limit) - drm_info_once(&dev_priv->drm, + drm_info_once(&i915->drm, "Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n"); fbc->limit = ret; - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(&i915->drm, "reserved %llu bytes of contiguous stolen space for FBC, limit: %d\n", fbc->compressed_fb.size, fbc->limit); @@ -587,83 +783,69 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv, err_llb: if (drm_mm_node_allocated(&fbc->compressed_llb)) - i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_llb); + i915_gem_stolen_remove_node(i915, &fbc->compressed_llb); err: - if (drm_mm_initialized(&dev_priv->mm.stolen)) - drm_info_once(&dev_priv->drm, "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size); + if (drm_mm_initialized(&i915->mm.stolen)) + drm_info_once(&i915->drm, "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size); return -ENOSPC; } -static void intel_fbc_program_cfb(struct drm_i915_private *dev_priv) +static void intel_fbc_program_cfb(struct intel_fbc *fbc) { - struct intel_fbc *fbc = &dev_priv->fbc; - - if (DISPLAY_VER(dev_priv) >= 5) { - intel_de_write(dev_priv, ILK_DPFC_CB_BASE, - fbc->compressed_fb.start); - } else if (IS_GM45(dev_priv)) { - intel_de_write(dev_priv, DPFC_CB_BASE, - fbc->compressed_fb.start); - } else { - GEM_BUG_ON(range_overflows_end_t(u64, dev_priv->dsm.start, - fbc->compressed_fb.start, - U32_MAX)); - GEM_BUG_ON(range_overflows_end_t(u64, dev_priv->dsm.start, - fbc->compressed_llb.start, - U32_MAX)); - - intel_de_write(dev_priv, FBC_CFB_BASE, - dev_priv->dsm.start + fbc->compressed_fb.start); - intel_de_write(dev_priv, FBC_LL_BASE, - dev_priv->dsm.start + fbc->compressed_llb.start); - } + fbc->funcs->program_cfb(fbc); } -static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv) +static void __intel_fbc_cleanup_cfb(struct intel_fbc *fbc) { - struct intel_fbc *fbc = &dev_priv->fbc; + struct drm_i915_private *i915 = fbc->i915; - if (WARN_ON(intel_fbc_hw_is_active(dev_priv))) + if (WARN_ON(intel_fbc_hw_is_active(fbc))) return; if (drm_mm_node_allocated(&fbc->compressed_llb)) - i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_llb); + i915_gem_stolen_remove_node(i915, &fbc->compressed_llb); if (drm_mm_node_allocated(&fbc->compressed_fb)) - i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb); + i915_gem_stolen_remove_node(i915, &fbc->compressed_fb); } -void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv) +void intel_fbc_cleanup(struct drm_i915_private *i915) { - struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc *fbc = i915->fbc; - if (!HAS_FBC(dev_priv)) + if (!fbc) return; mutex_lock(&fbc->lock); - __intel_fbc_cleanup_cfb(dev_priv); + __intel_fbc_cleanup_cfb(fbc); mutex_unlock(&fbc->lock); + + kfree(fbc); } -static bool stride_is_valid(struct drm_i915_private *dev_priv, - u64 modifier, unsigned int stride) +static bool stride_is_valid(const struct intel_plane_state *plane_state) { + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); + const struct drm_framebuffer *fb = plane_state->hw.fb; + unsigned int stride = intel_fbc_plane_stride(plane_state) * + fb->format->cpp[0]; + /* This should have been caught earlier. */ - if (drm_WARN_ON_ONCE(&dev_priv->drm, (stride & (64 - 1)) != 0)) + if (drm_WARN_ON_ONCE(&i915->drm, (stride & (64 - 1)) != 0)) return false; /* Below are the additional FBC restrictions. */ if (stride < 512) return false; - if (DISPLAY_VER(dev_priv) == 2 || DISPLAY_VER(dev_priv) == 3) + if (DISPLAY_VER(i915) == 2 || DISPLAY_VER(i915) == 3) return stride == 4096 || stride == 8192; - if (DISPLAY_VER(dev_priv) == 4 && !IS_G4X(dev_priv) && stride < 2048) + if (DISPLAY_VER(i915) == 4 && !IS_G4X(i915) && stride < 2048) return false; /* Display WA #1105: skl,bxt,kbl,cfl,glk */ - if ((DISPLAY_VER(dev_priv) == 9 || IS_GEMINILAKE(dev_priv)) && - modifier == DRM_FORMAT_MOD_LINEAR && stride & 511) + if ((DISPLAY_VER(i915) == 9 || IS_GEMINILAKE(i915)) && + fb->modifier == DRM_FORMAT_MOD_LINEAR && stride & 511) return false; if (stride > 16384) @@ -672,20 +854,22 @@ static bool stride_is_valid(struct drm_i915_private *dev_priv, return true; } -static bool pixel_format_is_valid(struct drm_i915_private *dev_priv, - u32 pixel_format) +static bool pixel_format_is_valid(const struct intel_plane_state *plane_state) { - switch (pixel_format) { + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); + const struct drm_framebuffer *fb = plane_state->hw.fb; + + switch (fb->format->format) { case DRM_FORMAT_XRGB8888: case DRM_FORMAT_XBGR8888: return true; case DRM_FORMAT_XRGB1555: case DRM_FORMAT_RGB565: /* 16bpp not supported on gen2 */ - if (DISPLAY_VER(dev_priv) == 2) + if (DISPLAY_VER(i915) == 2) return false; /* WaFbcOnly1to1Ratio:ctg */ - if (IS_G4X(dev_priv)) + if (IS_G4X(i915)) return false; return true; default: @@ -693,13 +877,16 @@ static bool pixel_format_is_valid(struct drm_i915_private *dev_priv, } } -static bool rotation_is_valid(struct drm_i915_private *dev_priv, - u32 pixel_format, unsigned int rotation) +static bool rotation_is_valid(const struct intel_plane_state *plane_state) { - if (DISPLAY_VER(dev_priv) >= 9 && pixel_format == DRM_FORMAT_RGB565 && + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); + const struct drm_framebuffer *fb = plane_state->hw.fb; + unsigned int rotation = plane_state->hw.rotation; + + if (DISPLAY_VER(i915) >= 9 && fb->format->format == DRM_FORMAT_RGB565 && drm_rotation_90_or_270(rotation)) return false; - else if (DISPLAY_VER(dev_priv) <= 4 && !IS_G4X(dev_priv) && + else if (DISPLAY_VER(i915) <= 4 && !IS_G4X(i915) && rotation != DRM_MODE_ROTATE_0) return false; @@ -712,19 +899,18 @@ static bool rotation_is_valid(struct drm_i915_private *dev_priv, * the X and Y offset registers. That's why we include the src x/y offsets * instead of just looking at the plane size. */ -static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc) +static bool intel_fbc_hw_tracking_covers_screen(const struct intel_plane_state *plane_state) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_fbc *fbc = &dev_priv->fbc; + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); unsigned int effective_w, effective_h, max_w, max_h; - if (DISPLAY_VER(dev_priv) >= 10) { + if (DISPLAY_VER(i915) >= 10) { max_w = 5120; max_h = 4096; - } else if (DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL(dev_priv)) { + } else if (DISPLAY_VER(i915) >= 8 || IS_HASWELL(i915)) { max_w = 4096; max_h = 4096; - } else if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5) { + } else if (IS_G4X(i915) || DISPLAY_VER(i915) >= 5) { max_w = 4096; max_h = 2048; } else { @@ -732,22 +918,24 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc) max_h = 1536; } - intel_fbc_get_plane_source_size(&fbc->state_cache, &effective_w, - &effective_h); - effective_w += fbc->state_cache.plane.adjusted_x; - effective_h += fbc->state_cache.plane.adjusted_y; + effective_w = plane_state->view.color_plane[0].x + + (drm_rect_width(&plane_state->uapi.src) >> 16); + effective_h = plane_state->view.color_plane[0].y + + (drm_rect_height(&plane_state->uapi.src) >> 16); return effective_w <= max_w && effective_h <= max_h; } -static bool tiling_is_valid(struct drm_i915_private *dev_priv, - u64 modifier) +static bool tiling_is_valid(const struct intel_plane_state *plane_state) { - switch (modifier) { + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); + const struct drm_framebuffer *fb = plane_state->hw.fb; + + switch (fb->modifier) { case DRM_FORMAT_MOD_LINEAR: case I915_FORMAT_MOD_Y_TILED: case I915_FORMAT_MOD_Yf_TILED: - return DISPLAY_VER(dev_priv) >= 9; + return DISPLAY_VER(i915) >= 9; case I915_FORMAT_MOD_X_TILED: return true; default: @@ -755,210 +943,163 @@ static bool tiling_is_valid(struct drm_i915_private *dev_priv, } } -static void intel_fbc_update_state_cache(struct intel_crtc *crtc, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) +static void intel_fbc_update_state(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_plane *plane) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_fbc *fbc = &dev_priv->fbc; - struct intel_fbc_state_cache *cache = &fbc->state_cache; - struct drm_framebuffer *fb = plane_state->hw.fb; - - cache->plane.visible = plane_state->uapi.visible; - if (!cache->plane.visible) - return; - - cache->crtc.mode_flags = crtc_state->hw.adjusted_mode.flags; - if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) - cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate; - - cache->plane.rotation = plane_state->hw.rotation; - /* - * Src coordinates are already rotated by 270 degrees for - * the 90/270 degree plane rotation cases (to match the - * GTT mapping), hence no need to account for rotation here. - */ - cache->plane.src_w = drm_rect_width(&plane_state->uapi.src) >> 16; - cache->plane.src_h = drm_rect_height(&plane_state->uapi.src) >> 16; - cache->plane.adjusted_x = plane_state->view.color_plane[0].x; - cache->plane.adjusted_y = plane_state->view.color_plane[0].y; + struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + const struct intel_plane_state *plane_state = + intel_atomic_get_new_plane_state(state, plane); + struct intel_fbc *fbc = plane->fbc; + struct intel_fbc_state *fbc_state = &fbc->state; - cache->plane.pixel_blend_mode = plane_state->hw.pixel_blend_mode; + WARN_ON(plane_state->no_fbc_reason); - cache->fb.format = fb->format; - cache->fb.modifier = fb->modifier; - cache->fb.stride = intel_fbc_plane_stride(plane_state); + fbc_state->plane = plane; /* FBC1 compression interval: arbitrary choice of 1 second */ - cache->interval = drm_mode_vrefresh(&crtc_state->hw.adjusted_mode); + fbc_state->interval = drm_mode_vrefresh(&crtc_state->hw.adjusted_mode); - cache->fence_y_offset = intel_plane_fence_y_offset(plane_state); + fbc_state->fence_y_offset = intel_plane_fence_y_offset(plane_state); - drm_WARN_ON(&dev_priv->drm, plane_state->flags & PLANE_HAS_FENCE && + drm_WARN_ON(&i915->drm, plane_state->flags & PLANE_HAS_FENCE && !plane_state->ggtt_vma->fence); if (plane_state->flags & PLANE_HAS_FENCE && plane_state->ggtt_vma->fence) - cache->fence_id = plane_state->ggtt_vma->fence->id; + fbc_state->fence_id = plane_state->ggtt_vma->fence->id; else - cache->fence_id = -1; + fbc_state->fence_id = -1; - cache->psr2_active = crtc_state->has_psr2; + fbc_state->cfb_stride = intel_fbc_cfb_stride(plane_state); + fbc_state->cfb_size = intel_fbc_cfb_size(plane_state); + fbc_state->override_cfb_stride = intel_fbc_override_cfb_stride(plane_state); } -static bool intel_fbc_cfb_size_changed(struct drm_i915_private *dev_priv) +static bool intel_fbc_is_fence_ok(const struct intel_plane_state *plane_state) { - struct intel_fbc *fbc = &dev_priv->fbc; + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); - return intel_fbc_cfb_size(dev_priv, &fbc->state_cache) > - fbc->compressed_fb.size * fbc->limit; -} - -static u16 intel_fbc_override_cfb_stride(struct drm_i915_private *dev_priv, - const struct intel_fbc_state_cache *cache) -{ - unsigned int stride = _intel_fbc_cfb_stride(cache); - unsigned int stride_aligned = intel_fbc_cfb_stride(dev_priv, cache); - - /* - * Override stride in 64 byte units per 4 line segment. + /* The use of a CPU fence is one of two ways to detect writes by the + * CPU to the scanout and trigger updates to the FBC. * - * Gen9 hw miscalculates cfb stride for linear as - * PLANE_STRIDE*512 instead of PLANE_STRIDE*64, so - * we always need to use the override there. + * The other method is by software tracking (see + * intel_fbc_invalidate/flush()), it will manually notify FBC and nuke + * the current compressed buffer and recompress it. + * + * Note that is possible for a tiled surface to be unmappable (and + * so have no fence associated with it) due to aperture constraints + * at the time of pinning. + * + * FIXME with 90/270 degree rotation we should use the fence on + * the normal GTT view (the rotated view doesn't even have a + * fence). Would need changes to the FBC fence Y offset as well. + * For now this will effectively disable FBC with 90/270 degree + * rotation. */ - if (stride != stride_aligned || - (DISPLAY_VER(dev_priv) == 9 && - cache->fb.modifier == DRM_FORMAT_MOD_LINEAR)) - return stride_aligned * 4 / 64; - - return 0; + return DISPLAY_VER(i915) >= 9 || + (plane_state->flags & PLANE_HAS_FENCE && + plane_state->ggtt_vma->fence); } -static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv) +static bool intel_fbc_is_cfb_ok(const struct intel_plane_state *plane_state) { - struct intel_fbc *fbc = &dev_priv->fbc; - - if (intel_vgpu_active(dev_priv)) { - fbc->no_fbc_reason = "VGPU is active"; - return false; - } - - if (!dev_priv->params.enable_fbc) { - fbc->no_fbc_reason = "disabled per module param or by default"; - return false; - } + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); + struct intel_fbc *fbc = plane->fbc; - if (fbc->underrun_detected) { - fbc->no_fbc_reason = "underrun detected"; - return false; - } + return intel_fbc_min_limit(plane_state) <= fbc->limit && + intel_fbc_cfb_size(plane_state) <= fbc->compressed_fb.size * fbc->limit; +} - return true; +static bool intel_fbc_is_ok(const struct intel_plane_state *plane_state) +{ + return !plane_state->no_fbc_reason && + intel_fbc_is_fence_ok(plane_state) && + intel_fbc_is_cfb_ok(plane_state); } -static bool intel_fbc_can_activate(struct intel_crtc *crtc) +static int intel_fbc_check_plane(struct intel_atomic_state *state, + struct intel_plane *plane) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_fbc *fbc = &dev_priv->fbc; - struct intel_fbc_state_cache *cache = &fbc->state_cache; + struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_plane_state *plane_state = + intel_atomic_get_new_plane_state(state, plane); + const struct drm_framebuffer *fb = plane_state->hw.fb; + struct intel_crtc *crtc = to_intel_crtc(plane_state->uapi.crtc); + const struct intel_crtc_state *crtc_state; + struct intel_fbc *fbc = plane->fbc; - if (!intel_fbc_can_enable(dev_priv)) - return false; + if (!fbc) + return 0; - if (!cache->plane.visible) { - fbc->no_fbc_reason = "primary plane not visible"; - return false; + if (intel_vgpu_active(i915)) { + plane_state->no_fbc_reason = "VGPU active"; + return 0; } - /* We don't need to use a state cache here since this information is - * global for all CRTC. - */ - if (fbc->underrun_detected) { - fbc->no_fbc_reason = "underrun detected"; - return false; + if (!i915->params.enable_fbc) { + plane_state->no_fbc_reason = "disabled per module param or by default"; + return 0; } - if (cache->crtc.mode_flags & DRM_MODE_FLAG_INTERLACE) { - fbc->no_fbc_reason = "incompatible mode"; - return false; + if (!plane_state->uapi.visible) { + plane_state->no_fbc_reason = "plane not visible"; + return 0; } - if (!intel_fbc_hw_tracking_covers_screen(crtc)) { - fbc->no_fbc_reason = "mode too large for compression"; - return false; + crtc_state = intel_atomic_get_new_crtc_state(state, crtc); + + if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { + plane_state->no_fbc_reason = "interlaced mode not supported"; + return 0; } - /* The use of a CPU fence is one of two ways to detect writes by the - * CPU to the scanout and trigger updates to the FBC. - * - * The other method is by software tracking (see - * intel_fbc_invalidate/flush()), it will manually notify FBC and nuke - * the current compressed buffer and recompress it. - * - * Note that is possible for a tiled surface to be unmappable (and - * so have no fence associated with it) due to aperture constraints - * at the time of pinning. - * - * FIXME with 90/270 degree rotation we should use the fence on - * the normal GTT view (the rotated view doesn't even have a - * fence). Would need changes to the FBC fence Y offset as well. - * For now this will effectively disable FBC with 90/270 degree - * rotation. - */ - if (DISPLAY_VER(dev_priv) < 9 && cache->fence_id < 0) { - fbc->no_fbc_reason = "framebuffer not tiled or fenced"; - return false; + if (crtc_state->double_wide) { + plane_state->no_fbc_reason = "double wide pipe not supported"; + return 0; } - if (!pixel_format_is_valid(dev_priv, cache->fb.format->format)) { - fbc->no_fbc_reason = "pixel format is invalid"; + /* + * Display 12+ is not supporting FBC with PSR2. + * Recommendation is to keep this combination disabled + * Bspec: 50422 HSD: 14010260002 + */ + if (DISPLAY_VER(i915) >= 12 && crtc_state->has_psr2) { + plane_state->no_fbc_reason = "PSR2 enabled"; return false; } - if (!rotation_is_valid(dev_priv, cache->fb.format->format, - cache->plane.rotation)) { - fbc->no_fbc_reason = "rotation unsupported"; - return false; + if (!pixel_format_is_valid(plane_state)) { + plane_state->no_fbc_reason = "pixel format not supported"; + return 0; } - if (!tiling_is_valid(dev_priv, cache->fb.modifier)) { - fbc->no_fbc_reason = "tiling unsupported"; - return false; + if (!tiling_is_valid(plane_state)) { + plane_state->no_fbc_reason = "tiling not supported"; + return 0; } - if (!stride_is_valid(dev_priv, cache->fb.modifier, - cache->fb.stride * cache->fb.format->cpp[0])) { - fbc->no_fbc_reason = "framebuffer stride not supported"; - return false; + if (!rotation_is_valid(plane_state)) { + plane_state->no_fbc_reason = "rotation not supported"; + return 0; } - if (cache->plane.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE && - cache->fb.format->has_alpha) { - fbc->no_fbc_reason = "per-pixel alpha blending is incompatible with FBC"; - return false; + if (!stride_is_valid(plane_state)) { + plane_state->no_fbc_reason = "stride not supported"; + return 0; } - /* WaFbcExceedCdClockThreshold:hsw,bdw */ - if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) && - cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk.hw.cdclk * 95 / 100) { - fbc->no_fbc_reason = "pixel rate is too big"; + if (plane_state->hw.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE && + fb->format->has_alpha) { + plane_state->no_fbc_reason = "per-pixel alpha not supported"; return false; } - /* It is possible for the required CFB size change without a - * crtc->disable + crtc->enable since it is possible to change the - * stride without triggering a full modeset. Since we try to - * over-allocate the CFB, there's a chance we may keep FBC enabled even - * if this happens, but if we exceed the current CFB size we'll have to - * disable FBC. Notice that it would be possible to disable FBC, wait - * for a frame, free the stolen node, then try to reenable FBC in case - * we didn't get any invalidate/deactivate calls, but this would require - * a lot of tracking just for a specific case. If we conclude it's an - * important case, we can implement it later. */ - if (intel_fbc_cfb_size_changed(dev_priv)) { - fbc->no_fbc_reason = "CFB requirements changed"; - return false; + if (!intel_fbc_hw_tracking_covers_screen(plane_state)) { + plane_state->no_fbc_reason = "plane size too big"; + return 0; } /* @@ -966,238 +1107,211 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc) * having a Y offset that isn't divisible by 4 causes FIFO underrun * and screen flicker. */ - if (DISPLAY_VER(dev_priv) >= 9 && - (fbc->state_cache.plane.adjusted_y & 3)) { - fbc->no_fbc_reason = "plane Y offset is misaligned"; + if (DISPLAY_VER(i915) >= 9 && + plane_state->view.color_plane[0].y & 3) { + plane_state->no_fbc_reason = "plane start Y offset misaligned"; return false; } /* Wa_22010751166: icl, ehl, tgl, dg1, rkl */ - if (DISPLAY_VER(dev_priv) >= 11 && - (cache->plane.src_h + cache->plane.adjusted_y) % 4) { - fbc->no_fbc_reason = "plane height + offset is non-modulo of 4"; - return false; - } - - /* - * Display 12+ is not supporting FBC with PSR2. - * Recommendation is to keep this combination disabled - * Bspec: 50422 HSD: 14010260002 - */ - if (fbc->state_cache.psr2_active && DISPLAY_VER(dev_priv) >= 12) { - fbc->no_fbc_reason = "not supported with PSR2"; + if (DISPLAY_VER(i915) >= 11 && + (plane_state->view.color_plane[0].y + drm_rect_height(&plane_state->uapi.src)) & 3) { + plane_state->no_fbc_reason = "plane end Y offset misaligned"; return false; } - return true; -} - -static void intel_fbc_get_reg_params(struct intel_crtc *crtc, - struct intel_fbc_reg_params *params) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_fbc *fbc = &dev_priv->fbc; - struct intel_fbc_state_cache *cache = &fbc->state_cache; - - /* Since all our fields are integer types, use memset here so the - * comparison function can rely on memcmp because the padding will be - * zero. */ - memset(params, 0, sizeof(*params)); - - params->fence_id = cache->fence_id; - params->fence_y_offset = cache->fence_y_offset; - - params->interval = cache->interval; + /* WaFbcExceedCdClockThreshold:hsw,bdw */ + if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { + const struct intel_cdclk_state *cdclk_state; - params->crtc.pipe = crtc->pipe; - params->crtc.i9xx_plane = to_intel_plane(crtc->base.primary)->i9xx_plane; + cdclk_state = intel_atomic_get_cdclk_state(state); + if (IS_ERR(cdclk_state)) + return PTR_ERR(cdclk_state); - params->fb.format = cache->fb.format; - params->fb.modifier = cache->fb.modifier; - params->fb.stride = cache->fb.stride; + if (crtc_state->pixel_rate >= cdclk_state->logical.cdclk * 95 / 100) { + plane_state->no_fbc_reason = "pixel rate too high"; + return 0; + } + } - params->cfb_stride = intel_fbc_cfb_stride(dev_priv, cache); - params->cfb_size = intel_fbc_cfb_size(dev_priv, cache); - params->override_cfb_stride = intel_fbc_override_cfb_stride(dev_priv, cache); + plane_state->no_fbc_reason = NULL; - params->plane_visible = cache->plane.visible; + return 0; } -static bool intel_fbc_can_flip_nuke(const struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - const struct intel_fbc *fbc = &dev_priv->fbc; - const struct intel_fbc_state_cache *cache = &fbc->state_cache; - const struct intel_fbc_reg_params *params = &fbc->params; - if (drm_atomic_crtc_needs_modeset(&crtc_state->uapi)) - return false; +static bool intel_fbc_can_flip_nuke(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_plane *plane) +{ + const struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + const struct intel_plane_state *old_plane_state = + intel_atomic_get_old_plane_state(state, plane); + const struct intel_plane_state *new_plane_state = + intel_atomic_get_new_plane_state(state, plane); + const struct drm_framebuffer *old_fb = old_plane_state->hw.fb; + const struct drm_framebuffer *new_fb = new_plane_state->hw.fb; - if (!params->plane_visible) + if (drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi)) return false; - if (!intel_fbc_can_activate(crtc)) + if (!intel_fbc_is_ok(old_plane_state) || + !intel_fbc_is_ok(new_plane_state)) return false; - if (params->fb.format != cache->fb.format) + if (old_fb->format->format != new_fb->format->format) return false; - if (params->fb.modifier != cache->fb.modifier) + if (old_fb->modifier != new_fb->modifier) return false; - if (params->fb.stride != cache->fb.stride) + if (intel_fbc_plane_stride(old_plane_state) != + intel_fbc_plane_stride(new_plane_state)) return false; - if (params->cfb_stride != intel_fbc_cfb_stride(dev_priv, cache)) + if (intel_fbc_cfb_stride(old_plane_state) != + intel_fbc_cfb_stride(new_plane_state)) return false; - if (params->cfb_size != intel_fbc_cfb_size(dev_priv, cache)) + if (intel_fbc_cfb_size(old_plane_state) != + intel_fbc_cfb_size(new_plane_state)) return false; - if (params->override_cfb_stride != intel_fbc_override_cfb_stride(dev_priv, cache)) + if (intel_fbc_override_cfb_stride(old_plane_state) != + intel_fbc_override_cfb_stride(new_plane_state)) return false; return true; } -bool intel_fbc_pre_update(struct intel_atomic_state *state, - struct intel_crtc *crtc) +static bool __intel_fbc_pre_update(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_plane *plane) { - struct intel_plane *plane = to_intel_plane(crtc->base.primary); - const struct intel_crtc_state *crtc_state = - intel_atomic_get_new_crtc_state(state, crtc); - const struct intel_plane_state *plane_state = - intel_atomic_get_new_plane_state(state, plane); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_fbc *fbc = &dev_priv->fbc; - const char *reason = "update pending"; + struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_fbc *fbc = plane->fbc; bool need_vblank_wait = false; - if (!plane->has_fbc || !plane_state) - return need_vblank_wait; - - mutex_lock(&fbc->lock); + fbc->flip_pending = true; - if (fbc->crtc != crtc) - goto unlock; + if (intel_fbc_can_flip_nuke(state, crtc, plane)) + return need_vblank_wait; - intel_fbc_update_state_cache(crtc, crtc_state, plane_state); - fbc->flip_pending = true; + intel_fbc_deactivate(fbc, "update pending"); - if (!intel_fbc_can_flip_nuke(crtc_state)) { - intel_fbc_deactivate(dev_priv, reason); - - /* - * Display WA #1198: glk+ - * Need an extra vblank wait between FBC disable and most plane - * updates. Bspec says this is only needed for plane disable, but - * that is not true. Touching most plane registers will cause the - * corruption to appear. Also SKL/derivatives do not seem to be - * affected. - * - * TODO: could optimize this a bit by sampling the frame - * counter when we disable FBC (if it was already done earlier) - * and skipping the extra vblank wait before the plane update - * if at least one frame has already passed. - */ - if (fbc->activated && - DISPLAY_VER(dev_priv) >= 10) - need_vblank_wait = true; - fbc->activated = false; - } -unlock: - mutex_unlock(&fbc->lock); + /* + * Display WA #1198: glk+ + * Need an extra vblank wait between FBC disable and most plane + * updates. Bspec says this is only needed for plane disable, but + * that is not true. Touching most plane registers will cause the + * corruption to appear. Also SKL/derivatives do not seem to be + * affected. + * + * TODO: could optimize this a bit by sampling the frame + * counter when we disable FBC (if it was already done earlier) + * and skipping the extra vblank wait before the plane update + * if at least one frame has already passed. + */ + if (fbc->activated && DISPLAY_VER(i915) >= 10) + need_vblank_wait = true; + fbc->activated = false; return need_vblank_wait; } -/** - * __intel_fbc_disable - disable FBC - * @dev_priv: i915 device instance - * - * This is the low level function that actually disables FBC. Callers should - * grab the FBC lock. - */ -static void __intel_fbc_disable(struct drm_i915_private *dev_priv) +bool intel_fbc_pre_update(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - struct intel_fbc *fbc = &dev_priv->fbc; - struct intel_crtc *crtc = fbc->crtc; + const struct intel_plane_state *plane_state; + bool need_vblank_wait = false; + struct intel_plane *plane; + int i; + + for_each_new_intel_plane_in_state(state, plane, plane_state, i) { + struct intel_fbc *fbc = plane->fbc; - drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock)); - drm_WARN_ON(&dev_priv->drm, !fbc->crtc); - drm_WARN_ON(&dev_priv->drm, fbc->active); + if (!fbc || plane->pipe != crtc->pipe) + continue; - drm_dbg_kms(&dev_priv->drm, "Disabling FBC on pipe %c\n", - pipe_name(crtc->pipe)); + mutex_lock(&fbc->lock); - __intel_fbc_cleanup_cfb(dev_priv); + if (fbc->state.plane == plane) + need_vblank_wait |= __intel_fbc_pre_update(state, crtc, plane); - fbc->crtc = NULL; + mutex_unlock(&fbc->lock); + } + + return need_vblank_wait; } -static void __intel_fbc_post_update(struct intel_crtc *crtc) +static void __intel_fbc_disable(struct intel_fbc *fbc) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_fbc *fbc = &dev_priv->fbc; - - drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock)); + struct drm_i915_private *i915 = fbc->i915; + struct intel_plane *plane = fbc->state.plane; - if (fbc->crtc != crtc) - return; + drm_WARN_ON(&i915->drm, !mutex_is_locked(&fbc->lock)); + drm_WARN_ON(&i915->drm, fbc->active); - fbc->flip_pending = false; + drm_dbg_kms(&i915->drm, "Disabling FBC on [PLANE:%d:%s]\n", + plane->base.base.id, plane->base.name); - if (!dev_priv->params.enable_fbc) { - intel_fbc_deactivate(dev_priv, "disabled at runtime per module param"); - __intel_fbc_disable(dev_priv); + __intel_fbc_cleanup_cfb(fbc); - return; - } + fbc->state.plane = NULL; +} - intel_fbc_get_reg_params(crtc, &fbc->params); +static void __intel_fbc_post_update(struct intel_fbc *fbc) +{ + struct drm_i915_private *i915 = fbc->i915; - if (!intel_fbc_can_activate(crtc)) - return; + drm_WARN_ON(&i915->drm, !mutex_is_locked(&fbc->lock)); if (!fbc->busy_bits) - intel_fbc_activate(dev_priv); + intel_fbc_activate(fbc); else - intel_fbc_deactivate(dev_priv, "frontbuffer write"); + intel_fbc_deactivate(fbc, "frontbuffer write"); } void intel_fbc_post_update(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_plane *plane = to_intel_plane(crtc->base.primary); - const struct intel_plane_state *plane_state = - intel_atomic_get_new_plane_state(state, plane); - struct intel_fbc *fbc = &dev_priv->fbc; + const struct intel_plane_state *plane_state; + struct intel_plane *plane; + int i; - if (!plane->has_fbc || !plane_state) - return; + for_each_new_intel_plane_in_state(state, plane, plane_state, i) { + struct intel_fbc *fbc = plane->fbc; - mutex_lock(&fbc->lock); - __intel_fbc_post_update(crtc); - mutex_unlock(&fbc->lock); + if (!fbc || plane->pipe != crtc->pipe) + continue; + + mutex_lock(&fbc->lock); + + if (fbc->state.plane == plane) { + fbc->flip_pending = false; + __intel_fbc_post_update(fbc); + } + + mutex_unlock(&fbc->lock); + } } static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc) { - if (fbc->crtc) - return to_intel_plane(fbc->crtc->base.primary)->frontbuffer_bit; + if (fbc->state.plane) + return fbc->state.plane->frontbuffer_bit; else return fbc->possible_framebuffer_bits; } -void intel_fbc_invalidate(struct drm_i915_private *dev_priv, +void intel_fbc_invalidate(struct drm_i915_private *i915, unsigned int frontbuffer_bits, enum fb_op_origin origin) { - struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc *fbc = i915->fbc; - if (!HAS_FBC(dev_priv)) + if (!fbc) return; if (origin == ORIGIN_FLIP || origin == ORIGIN_CURSOR_UPDATE) @@ -1207,18 +1321,18 @@ void intel_fbc_invalidate(struct drm_i915_private *dev_priv, fbc->busy_bits |= intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits; - if (fbc->crtc && fbc->busy_bits) - intel_fbc_deactivate(dev_priv, "frontbuffer write"); + if (fbc->state.plane && fbc->busy_bits) + intel_fbc_deactivate(fbc, "frontbuffer write"); mutex_unlock(&fbc->lock); } -void intel_fbc_flush(struct drm_i915_private *dev_priv, +void intel_fbc_flush(struct drm_i915_private *i915, unsigned int frontbuffer_bits, enum fb_op_origin origin) { - struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc *fbc = i915->fbc; - if (!HAS_FBC(dev_priv)) + if (!fbc) return; mutex_lock(&fbc->lock); @@ -1228,143 +1342,83 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv, if (origin == ORIGIN_FLIP || origin == ORIGIN_CURSOR_UPDATE) goto out; - if (!fbc->busy_bits && fbc->crtc && + if (!fbc->busy_bits && fbc->state.plane && (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) { if (fbc->active) - intel_fbc_recompress(dev_priv); + intel_fbc_nuke(fbc); else if (!fbc->flip_pending) - __intel_fbc_post_update(fbc->crtc); + __intel_fbc_post_update(fbc); } out: mutex_unlock(&fbc->lock); } -/** - * intel_fbc_choose_crtc - select a CRTC to enable FBC on - * @dev_priv: i915 device instance - * @state: the atomic state structure - * - * This function looks at the proposed state for CRTCs and planes, then chooses - * which pipe is going to have FBC by setting intel_crtc_state->enable_fbc to - * true. - * - * Later, intel_fbc_enable is going to look for state->enable_fbc and then maybe - * enable FBC for the chosen CRTC. If it does, it will set dev_priv->fbc.crtc. - */ -void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv, - struct intel_atomic_state *state) +int intel_fbc_atomic_check(struct intel_atomic_state *state) { - struct intel_fbc *fbc = &dev_priv->fbc; - struct intel_plane *plane; struct intel_plane_state *plane_state; - bool crtc_chosen = false; + struct intel_plane *plane; int i; - mutex_lock(&fbc->lock); - - /* Does this atomic commit involve the CRTC currently tied to FBC? */ - if (fbc->crtc && - !intel_atomic_get_new_crtc_state(state, fbc->crtc)) - goto out; - - if (!intel_fbc_can_enable(dev_priv)) - goto out; - - /* Simply choose the first CRTC that is compatible and has a visible - * plane. We could go for fancier schemes such as checking the plane - * size, but this would just affect the few platforms that don't tie FBC - * to pipe or plane A. */ for_each_new_intel_plane_in_state(state, plane, plane_state, i) { - struct intel_crtc_state *crtc_state; - struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc); - - if (!plane->has_fbc) - continue; + int ret; - if (!plane_state->uapi.visible) - continue; - - crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - - crtc_state->enable_fbc = true; - crtc_chosen = true; - break; + ret = intel_fbc_check_plane(state, plane); + if (ret) + return ret; } - if (!crtc_chosen) - fbc->no_fbc_reason = "no suitable CRTC for FBC"; - -out: - mutex_unlock(&fbc->lock); + return 0; } -/** - * intel_fbc_enable: tries to enable FBC on the CRTC - * @crtc: the CRTC - * @state: corresponding &drm_crtc_state for @crtc - * - * This function checks if the given CRTC was chosen for FBC, then enables it if - * possible. Notice that it doesn't activate FBC. It is valid to call - * intel_fbc_enable multiple times for the same pipe without an - * intel_fbc_disable in the middle, as long as it is deactivated. - */ -static void intel_fbc_enable(struct intel_atomic_state *state, - struct intel_crtc *crtc) +static void __intel_fbc_enable(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_plane *plane) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_plane *plane = to_intel_plane(crtc->base.primary); - const struct intel_crtc_state *crtc_state = - intel_atomic_get_new_crtc_state(state, crtc); + struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_plane_state *plane_state = intel_atomic_get_new_plane_state(state, plane); - struct intel_fbc *fbc = &dev_priv->fbc; - struct intel_fbc_state_cache *cache = &fbc->state_cache; - int min_limit; + struct intel_fbc *fbc = plane->fbc; - if (!plane->has_fbc || !plane_state) - return; + if (fbc->state.plane) { + if (fbc->state.plane != plane) + return; - min_limit = intel_fbc_min_limit(plane_state->hw.fb ? - plane_state->hw.fb->format->cpp[0] : 0); + if (intel_fbc_is_ok(plane_state)) + return; - mutex_lock(&fbc->lock); + __intel_fbc_disable(fbc); + } - if (fbc->crtc) { - if (fbc->crtc != crtc) - goto out; + drm_WARN_ON(&i915->drm, fbc->active); - if (fbc->limit >= min_limit && - !intel_fbc_cfb_size_changed(dev_priv)) - goto out; + fbc->no_fbc_reason = plane_state->no_fbc_reason; + if (fbc->no_fbc_reason) + return; - __intel_fbc_disable(dev_priv); + if (!intel_fbc_is_fence_ok(plane_state)) { + fbc->no_fbc_reason = "framebuffer not fenced"; + return; } - drm_WARN_ON(&dev_priv->drm, fbc->active); - - intel_fbc_update_state_cache(crtc, crtc_state, plane_state); - - /* FIXME crtc_state->enable_fbc lies :( */ - if (!cache->plane.visible) - goto out; + if (fbc->underrun_detected) { + fbc->no_fbc_reason = "FIFO underrun"; + return; + } - if (intel_fbc_alloc_cfb(dev_priv, - intel_fbc_cfb_size(dev_priv, cache), min_limit)) { - cache->plane.visible = false; + if (intel_fbc_alloc_cfb(fbc, intel_fbc_cfb_size(plane_state), + intel_fbc_min_limit(plane_state))) { fbc->no_fbc_reason = "not enough stolen memory"; - goto out; + return; } - drm_dbg_kms(&dev_priv->drm, "Enabling FBC on pipe %c\n", - pipe_name(crtc->pipe)); + drm_dbg_kms(&i915->drm, "Enabling FBC on [PLANE:%d:%s]\n", + plane->base.base.id, plane->base.name); fbc->no_fbc_reason = "FBC enabled but not active yet\n"; - fbc->crtc = crtc; + intel_fbc_update_state(state, crtc, plane); - intel_fbc_program_cfb(dev_priv); -out: - mutex_unlock(&fbc->lock); + intel_fbc_program_cfb(fbc); } /** @@ -1375,114 +1429,122 @@ out: */ void intel_fbc_disable(struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_plane *plane = to_intel_plane(crtc->base.primary); - struct intel_fbc *fbc = &dev_priv->fbc; + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_plane *plane; - if (!plane->has_fbc) - return; + for_each_intel_plane(&i915->drm, plane) { + struct intel_fbc *fbc = plane->fbc; - mutex_lock(&fbc->lock); - if (fbc->crtc == crtc) - __intel_fbc_disable(dev_priv); - mutex_unlock(&fbc->lock); + if (!fbc || plane->pipe != crtc->pipe) + continue; + + mutex_lock(&fbc->lock); + if (fbc->state.plane == plane) + __intel_fbc_disable(fbc); + mutex_unlock(&fbc->lock); + } } -/** - * intel_fbc_update: enable/disable FBC on the CRTC - * @state: atomic state - * @crtc: the CRTC - * - * This function checks if the given CRTC was chosen for FBC, then enables it if - * possible. Notice that it doesn't activate FBC. It is valid to call - * intel_fbc_update multiple times for the same pipe without an - * intel_fbc_disable in the middle. - */ void intel_fbc_update(struct intel_atomic_state *state, struct intel_crtc *crtc) { const struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); + const struct intel_plane_state *plane_state; + struct intel_plane *plane; + int i; - if (crtc_state->update_pipe && !crtc_state->enable_fbc) - intel_fbc_disable(crtc); - else - intel_fbc_enable(state, crtc); + for_each_new_intel_plane_in_state(state, plane, plane_state, i) { + struct intel_fbc *fbc = plane->fbc; + + if (!fbc || plane->pipe != crtc->pipe) + continue; + + mutex_lock(&fbc->lock); + + if (crtc_state->update_pipe && plane_state->no_fbc_reason) { + if (fbc->state.plane == plane) + __intel_fbc_disable(fbc); + } else { + __intel_fbc_enable(state, crtc, plane); + } + + mutex_unlock(&fbc->lock); + } } /** * intel_fbc_global_disable - globally disable FBC - * @dev_priv: i915 device instance + * @i915: i915 device instance * * This function disables FBC regardless of which CRTC is associated with it. */ -void intel_fbc_global_disable(struct drm_i915_private *dev_priv) +void intel_fbc_global_disable(struct drm_i915_private *i915) { - struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc *fbc = i915->fbc; - if (!HAS_FBC(dev_priv)) + if (!fbc) return; mutex_lock(&fbc->lock); - if (fbc->crtc) { - drm_WARN_ON(&dev_priv->drm, fbc->crtc->active); - __intel_fbc_disable(dev_priv); - } + if (fbc->state.plane) + __intel_fbc_disable(fbc); mutex_unlock(&fbc->lock); } static void intel_fbc_underrun_work_fn(struct work_struct *work) { - struct drm_i915_private *dev_priv = - container_of(work, struct drm_i915_private, fbc.underrun_work); - struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc *fbc = container_of(work, typeof(*fbc), underrun_work); + struct drm_i915_private *i915 = fbc->i915; mutex_lock(&fbc->lock); /* Maybe we were scheduled twice. */ - if (fbc->underrun_detected || !fbc->crtc) + if (fbc->underrun_detected || !fbc->state.plane) goto out; - drm_dbg_kms(&dev_priv->drm, "Disabling FBC due to FIFO underrun.\n"); + drm_dbg_kms(&i915->drm, "Disabling FBC due to FIFO underrun.\n"); fbc->underrun_detected = true; - intel_fbc_deactivate(dev_priv, "FIFO underrun"); + intel_fbc_deactivate(fbc, "FIFO underrun"); + if (!fbc->flip_pending) + intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(i915, fbc->state.plane->pipe)); + __intel_fbc_disable(fbc); out: mutex_unlock(&fbc->lock); } /* * intel_fbc_reset_underrun - reset FBC fifo underrun status. - * @dev_priv: i915 device instance + * @i915: the i915 device * * See intel_fbc_handle_fifo_underrun_irq(). For automated testing we * want to re-enable FBC after an underrun to increase test coverage. */ -int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv) +void intel_fbc_reset_underrun(struct drm_i915_private *i915) { - int ret; + struct intel_fbc *fbc = i915->fbc; - cancel_work_sync(&dev_priv->fbc.underrun_work); + if (!fbc) + return; - ret = mutex_lock_interruptible(&dev_priv->fbc.lock); - if (ret) - return ret; + cancel_work_sync(&fbc->underrun_work); - if (dev_priv->fbc.underrun_detected) { - drm_dbg_kms(&dev_priv->drm, + mutex_lock(&fbc->lock); + + if (fbc->underrun_detected) { + drm_dbg_kms(&i915->drm, "Re-allowing FBC after fifo underrun\n"); - dev_priv->fbc.no_fbc_reason = "FIFO underrun cleared"; + fbc->no_fbc_reason = "FIFO underrun cleared"; } - dev_priv->fbc.underrun_detected = false; - mutex_unlock(&dev_priv->fbc.lock); - - return 0; + fbc->underrun_detected = false; + mutex_unlock(&fbc->lock); } /** * intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun - * @dev_priv: i915 device instance + * @i915: i915 device * * Without FBC, most underruns are harmless and don't really cause too many * problems, except for an annoying message on dmesg. With FBC, underruns can @@ -1494,11 +1556,11 @@ int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv) * * This function is called from the IRQ handler. */ -void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv) +void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *i915) { - struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc *fbc = i915->fbc; - if (!HAS_FBC(dev_priv)) + if (!fbc) return; /* There's no guarantee that underrun_detected won't be set to true @@ -1522,26 +1584,26 @@ void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv) * space to change the value during runtime without sanitizing it again. IGT * relies on being able to change i915.enable_fbc at runtime. */ -static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv) +static int intel_sanitize_fbc_option(struct drm_i915_private *i915) { - if (dev_priv->params.enable_fbc >= 0) - return !!dev_priv->params.enable_fbc; + if (i915->params.enable_fbc >= 0) + return !!i915->params.enable_fbc; - if (!HAS_FBC(dev_priv)) + if (!HAS_FBC(i915)) return 0; - if (IS_BROADWELL(dev_priv) || DISPLAY_VER(dev_priv) >= 9) + if (IS_BROADWELL(i915) || DISPLAY_VER(i915) >= 9) return 1; return 0; } -static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv) +static bool need_fbc_vtd_wa(struct drm_i915_private *i915) { /* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */ - if (intel_vtd_active() && - (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))) { - drm_info(&dev_priv->drm, + if (intel_vtd_active(i915) && + (IS_SKYLAKE(i915) || IS_BROXTON(i915))) { + drm_info(&i915->drm, "Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n"); return true; } @@ -1549,38 +1611,171 @@ static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv) return false; } +void intel_fbc_add_plane(struct intel_fbc *fbc, struct intel_plane *plane) +{ + if (!fbc) + return; + + plane->fbc = fbc; + fbc->possible_framebuffer_bits |= plane->frontbuffer_bit; +} + +static struct intel_fbc *intel_fbc_create(struct drm_i915_private *i915) +{ + struct intel_fbc *fbc; + + fbc = kzalloc(sizeof(*fbc), GFP_KERNEL); + if (!fbc) + return NULL; + + fbc->i915 = i915; + INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn); + mutex_init(&fbc->lock); + + if (DISPLAY_VER(i915) >= 7) + fbc->funcs = &ivb_fbc_funcs; + else if (DISPLAY_VER(i915) == 6) + fbc->funcs = &snb_fbc_funcs; + else if (DISPLAY_VER(i915) == 5) + fbc->funcs = &ilk_fbc_funcs; + else if (IS_G4X(i915)) + fbc->funcs = &g4x_fbc_funcs; + else if (DISPLAY_VER(i915) == 4) + fbc->funcs = &i965_fbc_funcs; + else + fbc->funcs = &i8xx_fbc_funcs; + + return fbc; +} + /** * intel_fbc_init - Initialize FBC - * @dev_priv: the i915 device + * @i915: the i915 device * * This function might be called during PM init process. */ -void intel_fbc_init(struct drm_i915_private *dev_priv) +void intel_fbc_init(struct drm_i915_private *i915) { - struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc *fbc; - INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn); - mutex_init(&fbc->lock); - fbc->active = false; + if (!drm_mm_initialized(&i915->mm.stolen)) + mkwrite_device_info(i915)->display.has_fbc = false; - if (!drm_mm_initialized(&dev_priv->mm.stolen)) - mkwrite_device_info(dev_priv)->display.has_fbc = false; + if (need_fbc_vtd_wa(i915)) + mkwrite_device_info(i915)->display.has_fbc = false; - if (need_fbc_vtd_wa(dev_priv)) - mkwrite_device_info(dev_priv)->display.has_fbc = false; + i915->params.enable_fbc = intel_sanitize_fbc_option(i915); + drm_dbg_kms(&i915->drm, "Sanitized enable_fbc value: %d\n", + i915->params.enable_fbc); - dev_priv->params.enable_fbc = intel_sanitize_fbc_option(dev_priv); - drm_dbg_kms(&dev_priv->drm, "Sanitized enable_fbc value: %d\n", - dev_priv->params.enable_fbc); + if (!HAS_FBC(i915)) + return; - if (!HAS_FBC(dev_priv)) { - fbc->no_fbc_reason = "unsupported by this chipset"; + fbc = intel_fbc_create(i915); + if (!fbc) return; - } /* We still don't have any sort of hardware state readout for FBC, so * deactivate it in case the BIOS activated it to make sure software * matches the hardware state. */ - if (intel_fbc_hw_is_active(dev_priv)) - intel_fbc_hw_deactivate(dev_priv); + if (intel_fbc_hw_is_active(fbc)) + intel_fbc_hw_deactivate(fbc); + + i915->fbc = fbc; +} + +static int intel_fbc_debugfs_status_show(struct seq_file *m, void *unused) +{ + struct intel_fbc *fbc = m->private; + struct drm_i915_private *i915 = fbc->i915; + struct intel_plane *plane; + intel_wakeref_t wakeref; + + drm_modeset_lock_all(&i915->drm); + + wakeref = intel_runtime_pm_get(&i915->runtime_pm); + mutex_lock(&fbc->lock); + + if (fbc->active) { + seq_puts(m, "FBC enabled\n"); + seq_printf(m, "Compressing: %s\n", + yesno(intel_fbc_is_compressing(fbc))); + } else { + seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason); + } + + for_each_intel_plane(&i915->drm, plane) { + const struct intel_plane_state *plane_state = + to_intel_plane_state(plane->base.state); + + if (plane->fbc != fbc) + continue; + + seq_printf(m, "%c [PLANE:%d:%s]: %s\n", + fbc->state.plane == plane ? '*' : ' ', + plane->base.base.id, plane->base.name, + plane_state->no_fbc_reason ?: "FBC possible"); + } + + mutex_unlock(&fbc->lock); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); + + drm_modeset_unlock_all(&i915->drm); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(intel_fbc_debugfs_status); + +static int intel_fbc_debugfs_false_color_get(void *data, u64 *val) +{ + struct intel_fbc *fbc = data; + + *val = fbc->false_color; + + return 0; +} + +static int intel_fbc_debugfs_false_color_set(void *data, u64 val) +{ + struct intel_fbc *fbc = data; + + mutex_lock(&fbc->lock); + + fbc->false_color = val; + + if (fbc->active) + fbc->funcs->set_false_color(fbc, fbc->false_color); + + mutex_unlock(&fbc->lock); + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(intel_fbc_debugfs_false_color_fops, + intel_fbc_debugfs_false_color_get, + intel_fbc_debugfs_false_color_set, + "%llu\n"); + +static void intel_fbc_debugfs_add(struct intel_fbc *fbc) +{ + struct drm_i915_private *i915 = fbc->i915; + struct drm_minor *minor = i915->drm.primary; + + debugfs_create_file("i915_fbc_status", 0444, + minor->debugfs_root, fbc, + &intel_fbc_debugfs_status_fops); + + if (fbc->funcs->set_false_color) + debugfs_create_file("i915_fbc_false_color", 0644, + minor->debugfs_root, fbc, + &intel_fbc_debugfs_false_color_fops); +} + +void intel_fbc_debugfs_register(struct drm_i915_private *i915) +{ + struct intel_fbc *fbc = i915->fbc; + + if (fbc) + intel_fbc_debugfs_add(fbc); } diff --git a/drivers/gpu/drm/i915/display/intel_fbc.h b/drivers/gpu/drm/i915/display/intel_fbc.h index b97d908738e6..07ad0411fcc3 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.h +++ b/drivers/gpu/drm/i915/display/intel_fbc.h @@ -8,22 +8,22 @@ #include <linux/types.h> -#include "intel_frontbuffer.h" - +enum fb_op_origin; struct drm_i915_private; struct intel_atomic_state; struct intel_crtc; struct intel_crtc_state; +struct intel_fbc; +struct intel_plane; struct intel_plane_state; -void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv, - struct intel_atomic_state *state); -bool intel_fbc_is_active(struct drm_i915_private *dev_priv); +int intel_fbc_atomic_check(struct intel_atomic_state *state); bool intel_fbc_pre_update(struct intel_atomic_state *state, struct intel_crtc *crtc); void intel_fbc_post_update(struct intel_atomic_state *state, struct intel_crtc *crtc); void intel_fbc_init(struct drm_i915_private *dev_priv); +void intel_fbc_cleanup(struct drm_i915_private *dev_priv); void intel_fbc_update(struct intel_atomic_state *state, struct intel_crtc *crtc); void intel_fbc_disable(struct intel_crtc *crtc); @@ -33,8 +33,9 @@ void intel_fbc_invalidate(struct drm_i915_private *dev_priv, enum fb_op_origin origin); void intel_fbc_flush(struct drm_i915_private *dev_priv, unsigned int frontbuffer_bits, enum fb_op_origin origin); -void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv); -void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv); -int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv); +void intel_fbc_add_plane(struct intel_fbc *fbc, struct intel_plane *plane); +void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *i915); +void intel_fbc_reset_underrun(struct drm_i915_private *i915); +void intel_fbc_debugfs_register(struct drm_i915_private *i915); #endif /* __INTEL_FBC_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c index dd2cf0c59921..3d6e22923601 100644 --- a/drivers/gpu/drm/i915/display/intel_fdi.c +++ b/drivers/gpu/drm/i915/display/intel_fdi.c @@ -4,11 +4,11 @@ */ #include "intel_atomic.h" +#include "intel_crtc.h" #include "intel_ddi.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_fdi.h" -#include "intel_sbi.h" static void assert_fdi_tx(struct drm_i915_private *dev_priv, enum pipe pipe, bool state) @@ -158,7 +158,7 @@ static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, if (pipe_config->fdi_lanes <= 2) return 0; - other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C); + other_crtc = intel_crtc_for_pipe(dev_priv, PIPE_C); other_crtc_state = intel_atomic_get_crtc_state(state, other_crtc); if (IS_ERR(other_crtc_state)) @@ -179,7 +179,7 @@ static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, return -EINVAL; } - other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B); + other_crtc = intel_crtc_for_pipe(dev_priv, PIPE_B); other_crtc_state = intel_atomic_get_crtc_state(state, other_crtc); if (IS_ERR(other_crtc_state)) @@ -887,6 +887,43 @@ void hsw_fdi_link_train(struct intel_encoder *encoder, DP_TP_CTL_ENABLE); } +void hsw_fdi_disable(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + u32 val; + + /* + * Bspec lists this as both step 13 (before DDI_BUF_CTL disable) + * and step 18 (after clearing PORT_CLK_SEL). Based on a BUN, + * step 13 is the correct place for it. Step 18 is where it was + * originally before the BUN. + */ + val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); + val &= ~FDI_RX_ENABLE; + intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val); + + val = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_E)); + val &= ~DDI_BUF_CTL_ENABLE; + intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E), val); + + intel_wait_ddi_buf_idle(dev_priv, PORT_E); + + intel_ddi_disable_clock(encoder); + + val = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A)); + val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); + val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2); + intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), val); + + val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); + val &= ~FDI_PCDCLK; + intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val); + + val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); + val &= ~FDI_RX_PLL_ENABLE; + intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val); +} + void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); @@ -1006,104 +1043,6 @@ void ilk_fdi_disable(struct intel_crtc *crtc) udelay(100); } -static void lpt_fdi_reset_mphy(struct drm_i915_private *dev_priv) -{ - u32 tmp; - - tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2); - tmp |= FDI_MPHY_IOSFSB_RESET_CTL; - intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp); - - if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) & - FDI_MPHY_IOSFSB_RESET_STATUS, 100)) - drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n"); - - tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2); - tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; - intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp); - - if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) & - FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) - drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n"); -} - -/* WaMPhyProgramming:hsw */ -void lpt_fdi_program_mphy(struct drm_i915_private *dev_priv) -{ - u32 tmp; - - lpt_fdi_reset_mphy(dev_priv); - - tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY); - tmp &= ~(0xFF << 24); - tmp |= (0x12 << 24); - intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY); - - tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY); - tmp |= (1 << 11); - intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY); - - tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY); - tmp |= (1 << 11); - intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY); - - tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY); - tmp |= (1 << 24) | (1 << 21) | (1 << 18); - intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY); - - tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY); - tmp |= (1 << 24) | (1 << 21) | (1 << 18); - intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY); - - tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY); - tmp &= ~(7 << 13); - tmp |= (5 << 13); - intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY); - - tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY); - tmp &= ~(7 << 13); - tmp |= (5 << 13); - intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY); - - tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY); - tmp &= ~0xFF; - tmp |= 0x1C; - intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY); - - tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY); - tmp &= ~0xFF; - tmp |= 0x1C; - intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY); - - tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY); - tmp &= ~(0xFF << 16); - tmp |= (0x1C << 16); - intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY); - - tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY); - tmp &= ~(0xFF << 16); - tmp |= (0x1C << 16); - intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY); - - tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY); - tmp |= (1 << 27); - intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY); - - tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY); - tmp |= (1 << 27); - intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY); - - tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY); - tmp &= ~(0xF << 28); - tmp |= (4 << 28); - intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY); - - tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY); - tmp &= ~(0xF << 28); - tmp |= (4 << 28); - intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); -} - static const struct intel_fdi_funcs ilk_funcs = { .fdi_link_train = ilk_fdi_link_train, }; diff --git a/drivers/gpu/drm/i915/display/intel_fdi.h b/drivers/gpu/drm/i915/display/intel_fdi.h index 640d6585c137..1cdb86172702 100644 --- a/drivers/gpu/drm/i915/display/intel_fdi.h +++ b/drivers/gpu/drm/i915/display/intel_fdi.h @@ -23,8 +23,8 @@ void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state); void intel_fdi_init_hook(struct drm_i915_private *dev_priv); void hsw_fdi_link_train(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state); +void hsw_fdi_disable(struct intel_encoder *encoder); void intel_fdi_pll_freq_update(struct drm_i915_private *i915); -void lpt_fdi_program_mphy(struct drm_i915_private *i915); void intel_fdi_link_train(struct intel_crtc *crtc, const struct intel_crtc_state *crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c index eb841960840d..d636d21fa9ce 100644 --- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c +++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c @@ -26,8 +26,8 @@ */ #include "i915_drv.h" -#include "i915_trace.h" #include "intel_de.h" +#include "intel_display_trace.h" #include "intel_display_types.h" #include "intel_fbc.h" #include "intel_fifo_underrun.h" @@ -61,7 +61,7 @@ static bool ivb_can_enable_err_int(struct drm_device *dev) lockdep_assert_held(&dev_priv->irq_lock); for_each_pipe(dev_priv, pipe) { - crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + crtc = intel_crtc_for_pipe(dev_priv, pipe); if (crtc->cpu_fifo_underrun_disabled) return false; @@ -79,7 +79,7 @@ static bool cpt_can_enable_serr_int(struct drm_device *dev) lockdep_assert_held(&dev_priv->irq_lock); for_each_pipe(dev_priv, pipe) { - crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + crtc = intel_crtc_for_pipe(dev_priv, pipe); if (crtc->pch_fifo_underrun_disabled) return false; @@ -279,7 +279,7 @@ static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, enum pipe pipe, bool enable) { struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); bool old; lockdep_assert_held(&dev_priv->irq_lock); @@ -348,7 +348,7 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv, bool enable) { struct intel_crtc *crtc = - intel_get_crtc_for_pipe(dev_priv, pch_transcoder); + intel_crtc_for_pipe(dev_priv, pch_transcoder); unsigned long flags; bool old; @@ -391,7 +391,7 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv, void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv, enum pipe pipe) { - struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); u32 underruns = 0; /* We may be called too early in init, thanks BIOS! */ diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c index 0492446cd04a..791248f812aa 100644 --- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c +++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c @@ -55,14 +55,13 @@ * cancelled as soon as busyness is detected. */ -#include "display/intel_dp.h" - #include "i915_drv.h" -#include "i915_trace.h" +#include "intel_display_trace.h" #include "intel_display_types.h" +#include "intel_dp.h" +#include "intel_drrs.h" #include "intel_fbc.h" #include "intel_frontbuffer.h" -#include "intel_drrs.h" #include "intel_psr.h" /** diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.h b/drivers/gpu/drm/i915/display/intel_frontbuffer.h index a88441edc8f9..ff0c37b079aa 100644 --- a/drivers/gpu/drm/i915/display/intel_frontbuffer.h +++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.h @@ -28,7 +28,7 @@ #include <linux/kref.h> #include "gem/i915_gem_object_types.h" -#include "i915_active.h" +#include "i915_active_types.h" struct drm_i915_private; diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c index ceb1bf8a8c3c..3b8b84177085 100644 --- a/drivers/gpu/drm/i915/display/intel_gmbus.c +++ b/drivers/gpu/drm/i915/display/intel_gmbus.c @@ -334,6 +334,15 @@ intel_gpio_setup(struct intel_gmbus *bus, unsigned int pin) algo->data = bus; } +static bool has_gmbus_irq(struct drm_i915_private *i915) +{ + /* + * encoder->shutdown() may want to use GMBUS + * after irqs have already been disabled. + */ + return HAS_GMBUS_IRQ(i915) && intel_irqs_enabled(i915); +} + static int gmbus_wait(struct drm_i915_private *dev_priv, u32 status, u32 irq_en) { DEFINE_WAIT(wait); @@ -344,7 +353,7 @@ static int gmbus_wait(struct drm_i915_private *dev_priv, u32 status, u32 irq_en) * we also need to check for NAKs besides the hw ready/idle signal, we * need to wake up periodically and check that ourselves. */ - if (!HAS_GMBUS_IRQ(dev_priv)) + if (!has_gmbus_irq(dev_priv)) irq_en = 0; add_wait_queue(&dev_priv->gmbus_wait_queue, &wait); @@ -375,7 +384,7 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv) /* Important: The hw handles only the first bit, so set only one! */ irq_enable = 0; - if (HAS_GMBUS_IRQ(dev_priv)) + if (has_gmbus_irq(dev_priv)) irq_enable = GMBUS_IDLE_EN; add_wait_queue(&dev_priv->gmbus_wait_queue, &wait); diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index 371736bdc01f..3b5b9e7b05b7 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -1800,6 +1800,11 @@ static bool intel_has_hdmi_sink(struct intel_hdmi *hdmi, READ_ONCE(to_intel_digital_connector_state(conn_state)->force_audio) != HDMI_AUDIO_OFF_DVI; } +static bool intel_hdmi_is_ycbcr420(const struct intel_crtc_state *crtc_state) +{ + return crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420; +} + static int hdmi_port_clock_limit(struct intel_hdmi *hdmi, bool respect_downstream_limits, bool has_hdmi_sink) @@ -1864,8 +1869,12 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi, return MODE_OK; } -static int intel_hdmi_port_clock(int clock, int bpc) +static int intel_hdmi_tmds_clock(int clock, int bpc, bool ycbcr420_output) { + /* YCBCR420 TMDS rate requirement is half the pixel clock */ + if (ycbcr420_output) + clock /= 2; + /* * Need to adjust the port link by: * 1.5x for 12bpc @@ -1874,18 +1883,29 @@ static int intel_hdmi_port_clock(int clock, int bpc) return clock * bpc / 8; } -static bool intel_hdmi_bpc_possible(struct drm_connector *connector, - int bpc, bool has_hdmi_sink, bool ycbcr420_output) +static bool intel_hdmi_source_bpc_possible(struct drm_i915_private *i915, int bpc) +{ + switch (bpc) { + case 12: + return !HAS_GMCH(i915); + case 10: + return DISPLAY_VER(i915) >= 11; + case 8: + return true; + default: + MISSING_CASE(bpc); + return false; + } +} + +static bool intel_hdmi_sink_bpc_possible(struct drm_connector *connector, + int bpc, bool has_hdmi_sink, bool ycbcr420_output) { - struct drm_i915_private *i915 = to_i915(connector->dev); const struct drm_display_info *info = &connector->display_info; const struct drm_hdmi_info *hdmi = &info->hdmi; switch (bpc) { case 12: - if (HAS_GMCH(i915)) - return false; - if (!has_hdmi_sink) return false; @@ -1894,9 +1914,6 @@ static bool intel_hdmi_bpc_possible(struct drm_connector *connector, else return info->edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_36; case 10: - if (DISPLAY_VER(i915) < 11) - return false; - if (!has_hdmi_sink) return false; @@ -1916,26 +1933,26 @@ static enum drm_mode_status intel_hdmi_mode_clock_valid(struct drm_connector *connector, int clock, bool has_hdmi_sink, bool ycbcr420_output) { + struct drm_i915_private *i915 = to_i915(connector->dev); struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector)); enum drm_mode_status status; - if (ycbcr420_output) - clock /= 2; - /* check if we can do 8bpc */ - status = hdmi_port_clock_valid(hdmi, intel_hdmi_port_clock(clock, 8), + status = hdmi_port_clock_valid(hdmi, intel_hdmi_tmds_clock(clock, 8, ycbcr420_output), true, has_hdmi_sink); /* if we can't do 8bpc we may still be able to do 12bpc */ if (status != MODE_OK && - intel_hdmi_bpc_possible(connector, 12, has_hdmi_sink, ycbcr420_output)) - status = hdmi_port_clock_valid(hdmi, intel_hdmi_port_clock(clock, 12), + intel_hdmi_source_bpc_possible(i915, 12) && + intel_hdmi_sink_bpc_possible(connector, 12, has_hdmi_sink, ycbcr420_output)) + status = hdmi_port_clock_valid(hdmi, intel_hdmi_tmds_clock(clock, 12, ycbcr420_output), true, has_hdmi_sink); /* if we can't do 8,12bpc we may still be able to do 10bpc */ if (status != MODE_OK && - intel_hdmi_bpc_possible(connector, 10, has_hdmi_sink, ycbcr420_output)) - status = hdmi_port_clock_valid(hdmi, intel_hdmi_port_clock(clock, 10), + intel_hdmi_source_bpc_possible(i915, 10) && + intel_hdmi_sink_bpc_possible(connector, 10, has_hdmi_sink, ycbcr420_output)) + status = hdmi_port_clock_valid(hdmi, intel_hdmi_tmds_clock(clock, 10, ycbcr420_output), true, has_hdmi_sink); return status; @@ -2000,7 +2017,7 @@ bool intel_hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, if (connector_state->crtc != crtc_state->uapi.crtc) continue; - if (!intel_hdmi_bpc_possible(connector, bpc, has_hdmi_sink, ycbcr420_output)) + if (!intel_hdmi_sink_bpc_possible(connector, bpc, has_hdmi_sink, ycbcr420_output)) return false; } @@ -2015,6 +2032,9 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; + if (!intel_hdmi_source_bpc_possible(dev_priv, bpc)) + return false; + /* * HDMI deep color affects the clocks, so it's only possible * when not cloning with other encoder types. @@ -2023,7 +2043,7 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, return false; /* Display Wa_1405510057:icl,ehl */ - if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 && + if (intel_hdmi_is_ycbcr420(crtc_state) && bpc == 10 && DISPLAY_VER(dev_priv) == 11 && (adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start) % 8 == 2) @@ -2031,8 +2051,7 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, return intel_hdmi_deep_color_possible(crtc_state, bpc, crtc_state->has_hdmi_sink, - crtc_state->output_format == - INTEL_OUTPUT_FORMAT_YCBCR420); + intel_hdmi_is_ycbcr420(crtc_state)); } static int intel_hdmi_compute_bpc(struct intel_encoder *encoder, @@ -2040,12 +2059,13 @@ static int intel_hdmi_compute_bpc(struct intel_encoder *encoder, int clock) { struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); + bool ycbcr420_output = intel_hdmi_is_ycbcr420(crtc_state); int bpc; for (bpc = 12; bpc >= 10; bpc -= 2) { if (hdmi_deep_color_possible(crtc_state, bpc) && hdmi_port_clock_valid(intel_hdmi, - intel_hdmi_port_clock(clock, bpc), + intel_hdmi_tmds_clock(clock, bpc, ycbcr420_output), true, crtc_state->has_hdmi_sink) == MODE_OK) return bpc; } @@ -2065,13 +2085,10 @@ static int intel_hdmi_compute_clock(struct intel_encoder *encoder, if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) clock *= 2; - /* YCBCR420 TMDS rate requirement is half the pixel clock */ - if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) - clock /= 2; - bpc = intel_hdmi_compute_bpc(encoder, crtc_state, clock); - crtc_state->port_clock = intel_hdmi_port_clock(clock, bpc); + crtc_state->port_clock = intel_hdmi_tmds_clock(clock, bpc, + intel_hdmi_is_ycbcr420(crtc_state)); /* * pipe_bpp could already be below 8bpc due to @@ -2141,34 +2158,44 @@ static bool intel_hdmi_has_audio(struct intel_encoder *encoder, return intel_conn_state->force_audio == HDMI_AUDIO_ON; } +static enum intel_output_format +intel_hdmi_output_format(struct intel_connector *connector, + bool ycbcr_420_output) +{ + if (connector->base.ycbcr_420_allowed && ycbcr_420_output) + return INTEL_OUTPUT_FORMAT_YCBCR420; + else + return INTEL_OUTPUT_FORMAT_RGB; +} + static int intel_hdmi_compute_output_format(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { - struct drm_connector *connector = conn_state->connector; - struct drm_i915_private *i915 = to_i915(connector->dev); + struct intel_connector *connector = to_intel_connector(conn_state->connector); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; + const struct drm_display_info *info = &connector->base.display_info; + struct drm_i915_private *i915 = to_i915(connector->base.dev); + bool ycbcr_420_only = drm_mode_is_420_only(info, adjusted_mode); int ret; - bool ycbcr_420_only; - ycbcr_420_only = drm_mode_is_420_only(&connector->display_info, adjusted_mode); - if (connector->ycbcr_420_allowed && ycbcr_420_only) { - crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420; - } else { - if (!connector->ycbcr_420_allowed && ycbcr_420_only) - drm_dbg_kms(&i915->drm, - "YCbCr 4:2:0 mode but YCbCr 4:2:0 output not possible. Falling back to RGB.\n"); + crtc_state->output_format = intel_hdmi_output_format(connector, ycbcr_420_only); + + if (ycbcr_420_only && !intel_hdmi_is_ycbcr420(crtc_state)) { + drm_dbg_kms(&i915->drm, + "YCbCr 4:2:0 mode but YCbCr 4:2:0 output not possible. Falling back to RGB.\n"); crtc_state->output_format = INTEL_OUTPUT_FORMAT_RGB; } ret = intel_hdmi_compute_clock(encoder, crtc_state); if (ret) { - if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_YCBCR420 && - connector->ycbcr_420_allowed && - drm_mode_is_420_also(&connector->display_info, adjusted_mode)) { - crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420; - ret = intel_hdmi_compute_clock(encoder, crtc_state); - } + if (intel_hdmi_is_ycbcr420(crtc_state) || + !connector->base.ycbcr_420_allowed || + !drm_mode_is_420_also(info, adjusted_mode)) + return ret; + + crtc_state->output_format = intel_hdmi_output_format(connector, true); + ret = intel_hdmi_compute_clock(encoder, crtc_state); } return ret; @@ -2208,7 +2235,7 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder, if (ret) return ret; - if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) { + if (intel_hdmi_is_ycbcr420(pipe_config)) { ret = intel_panel_fitting(pipe_config, conn_state); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c index 7f3c638c8950..4970bf146c4a 100644 --- a/drivers/gpu/drm/i915/display/intel_lpe_audio.c +++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.c @@ -74,7 +74,7 @@ #include "intel_de.h" #include "intel_lpe_audio.h" -#define HAS_LPE_AUDIO(dev_priv) ((dev_priv)->lpe_audio.platdev != NULL) +#define HAS_LPE_AUDIO(dev_priv) ((dev_priv)->audio.lpe.platdev != NULL) static struct platform_device * lpe_audio_platdev_create(struct drm_i915_private *dev_priv) @@ -96,7 +96,7 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv) return ERR_PTR(-ENOMEM); } - rsc[0].start = rsc[0].end = dev_priv->lpe_audio.irq; + rsc[0].start = rsc[0].end = dev_priv->audio.lpe.irq; rsc[0].flags = IORESOURCE_IRQ; rsc[0].name = "hdmi-lpe-audio-irq"; @@ -148,7 +148,7 @@ static void lpe_audio_platdev_destroy(struct drm_i915_private *dev_priv) * than us fiddle with its internals. */ - platform_device_unregister(dev_priv->lpe_audio.platdev); + platform_device_unregister(dev_priv->audio.lpe.platdev); } static void lpe_audio_irq_unmask(struct irq_data *d) @@ -167,7 +167,7 @@ static struct irq_chip lpe_audio_irqchip = { static int lpe_audio_irq_init(struct drm_i915_private *dev_priv) { - int irq = dev_priv->lpe_audio.irq; + int irq = dev_priv->audio.lpe.irq; drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)); irq_set_chip_and_handler_name(irq, @@ -204,15 +204,15 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv) { int ret; - dev_priv->lpe_audio.irq = irq_alloc_desc(0); - if (dev_priv->lpe_audio.irq < 0) { + dev_priv->audio.lpe.irq = irq_alloc_desc(0); + if (dev_priv->audio.lpe.irq < 0) { drm_err(&dev_priv->drm, "Failed to allocate IRQ desc: %d\n", - dev_priv->lpe_audio.irq); - ret = dev_priv->lpe_audio.irq; + dev_priv->audio.lpe.irq); + ret = dev_priv->audio.lpe.irq; goto err; } - drm_dbg(&dev_priv->drm, "irq = %d\n", dev_priv->lpe_audio.irq); + drm_dbg(&dev_priv->drm, "irq = %d\n", dev_priv->audio.lpe.irq); ret = lpe_audio_irq_init(dev_priv); @@ -223,10 +223,10 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv) goto err_free_irq; } - dev_priv->lpe_audio.platdev = lpe_audio_platdev_create(dev_priv); + dev_priv->audio.lpe.platdev = lpe_audio_platdev_create(dev_priv); - if (IS_ERR(dev_priv->lpe_audio.platdev)) { - ret = PTR_ERR(dev_priv->lpe_audio.platdev); + if (IS_ERR(dev_priv->audio.lpe.platdev)) { + ret = PTR_ERR(dev_priv->audio.lpe.platdev); drm_err(&dev_priv->drm, "Failed to create lpe audio platform device: %d\n", ret); @@ -241,10 +241,10 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv) return 0; err_free_irq: - irq_free_desc(dev_priv->lpe_audio.irq); + irq_free_desc(dev_priv->audio.lpe.irq); err: - dev_priv->lpe_audio.irq = -1; - dev_priv->lpe_audio.platdev = NULL; + dev_priv->audio.lpe.irq = -1; + dev_priv->audio.lpe.platdev = NULL; return ret; } @@ -262,7 +262,7 @@ void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv) if (!HAS_LPE_AUDIO(dev_priv)) return; - ret = generic_handle_irq(dev_priv->lpe_audio.irq); + ret = generic_handle_irq(dev_priv->audio.lpe.irq); if (ret) drm_err_ratelimited(&dev_priv->drm, "error handling LPE audio irq: %d\n", ret); @@ -303,10 +303,10 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv) lpe_audio_platdev_destroy(dev_priv); - irq_free_desc(dev_priv->lpe_audio.irq); + irq_free_desc(dev_priv->audio.lpe.irq); - dev_priv->lpe_audio.irq = -1; - dev_priv->lpe_audio.platdev = NULL; + dev_priv->audio.lpe.irq = -1; + dev_priv->audio.lpe.platdev = NULL; } /** @@ -333,7 +333,7 @@ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv, if (!HAS_LPE_AUDIO(dev_priv)) return; - pdata = dev_get_platdata(&dev_priv->lpe_audio.platdev->dev); + pdata = dev_get_platdata(&dev_priv->audio.lpe.platdev->dev); ppdata = &pdata->port[port - PORT_B]; spin_lock_irqsave(&pdata->lpe_audio_slock, irqflags); @@ -361,7 +361,7 @@ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv, } if (pdata->notify_audio_lpe) - pdata->notify_audio_lpe(dev_priv->lpe_audio.platdev, port - PORT_B); + pdata->notify_audio_lpe(dev_priv->audio.lpe.platdev, port - PORT_B); spin_unlock_irqrestore(&pdata->lpe_audio_slock, irqflags); } diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c new file mode 100644 index 000000000000..a55c4bfacd0d --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_pch_display.c @@ -0,0 +1,501 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2021 Intel Corporation + */ + +#include "g4x_dp.h" +#include "intel_crt.h" +#include "intel_de.h" +#include "intel_display_types.h" +#include "intel_fdi.h" +#include "intel_lvds.h" +#include "intel_pch_display.h" +#include "intel_pch_refclk.h" +#include "intel_pps.h" +#include "intel_sdvo.h" + +static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, + enum pipe pipe, enum port port, + i915_reg_t dp_reg) +{ + enum pipe port_pipe; + bool state; + + state = g4x_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe); + + I915_STATE_WARN(state && port_pipe == pipe, + "PCH DP %c enabled on transcoder %c, should be disabled\n", + port_name(port), pipe_name(pipe)); + + I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B, + "IBX PCH DP %c still using transcoder B\n", + port_name(port)); +} + +static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, + enum pipe pipe, enum port port, + i915_reg_t hdmi_reg) +{ + enum pipe port_pipe; + bool state; + + state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe); + + I915_STATE_WARN(state && port_pipe == pipe, + "PCH HDMI %c enabled on transcoder %c, should be disabled\n", + port_name(port), pipe_name(pipe)); + + I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B, + "IBX PCH HDMI %c still using transcoder B\n", + port_name(port)); +} + +static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, + enum pipe pipe) +{ + enum pipe port_pipe; + + assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B); + assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C); + assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D); + + I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) && + port_pipe == pipe, + "PCH VGA enabled on transcoder %c, should be disabled\n", + pipe_name(pipe)); + + I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) && + port_pipe == pipe, + "PCH LVDS enabled on transcoder %c, should be disabled\n", + pipe_name(pipe)); + + /* PCH SDVOB multiplex with HDMIB */ + assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB); + assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC); + assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID); +} + +static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, + enum pipe pipe) +{ + u32 val; + bool enabled; + + val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe)); + enabled = !!(val & TRANS_ENABLE); + I915_STATE_WARN(enabled, + "transcoder assertion failed, should be off on pipe %c but is still active\n", + pipe_name(pipe)); +} + +static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state, + enum pipe pch_transcoder) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + + intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder), + intel_de_read(dev_priv, HTOTAL(cpu_transcoder))); + intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder), + intel_de_read(dev_priv, HBLANK(cpu_transcoder))); + intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder), + intel_de_read(dev_priv, HSYNC(cpu_transcoder))); + + intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder), + intel_de_read(dev_priv, VTOTAL(cpu_transcoder))); + intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder), + intel_de_read(dev_priv, VBLANK(cpu_transcoder))); + intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder), + intel_de_read(dev_priv, VSYNC(cpu_transcoder))); + intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder), + intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder))); +} + +static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + i915_reg_t reg; + u32 val, pipeconf_val; + + /* Make sure PCH DPLL is enabled */ + assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll); + + /* FDI must be feeding us bits for PCH ports */ + assert_fdi_tx_enabled(dev_priv, pipe); + assert_fdi_rx_enabled(dev_priv, pipe); + + if (HAS_PCH_CPT(dev_priv)) { + reg = TRANS_CHICKEN2(pipe); + val = intel_de_read(dev_priv, reg); + /* + * Workaround: Set the timing override bit + * before enabling the pch transcoder. + */ + val |= TRANS_CHICKEN2_TIMING_OVERRIDE; + /* Configure frame start delay to match the CPU */ + val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; + val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1); + intel_de_write(dev_priv, reg, val); + } + + reg = PCH_TRANSCONF(pipe); + val = intel_de_read(dev_priv, reg); + pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe)); + + if (HAS_PCH_IBX(dev_priv)) { + /* Configure frame start delay to match the CPU */ + val &= ~TRANS_FRAME_START_DELAY_MASK; + val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1); + + /* + * Make the BPC in transcoder be consistent with + * that in pipeconf reg. For HDMI we must use 8bpc + * here for both 8bpc and 12bpc. + */ + val &= ~PIPECONF_BPC_MASK; + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) + val |= PIPECONF_8BPC; + else + val |= pipeconf_val & PIPECONF_BPC_MASK; + } + + val &= ~TRANS_INTERLACE_MASK; + if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) { + if (HAS_PCH_IBX(dev_priv) && + intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) + val |= TRANS_LEGACY_INTERLACED_ILK; + else + val |= TRANS_INTERLACED; + } else { + val |= TRANS_PROGRESSIVE; + } + + intel_de_write(dev_priv, reg, val | TRANS_ENABLE); + if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100)) + drm_err(&dev_priv->drm, "failed to enable transcoder %c\n", + pipe_name(pipe)); +} + +static void ilk_disable_pch_transcoder(struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + i915_reg_t reg; + u32 val; + + /* FDI relies on the transcoder */ + assert_fdi_tx_disabled(dev_priv, pipe); + assert_fdi_rx_disabled(dev_priv, pipe); + + /* Ports must be off as well */ + assert_pch_ports_disabled(dev_priv, pipe); + + reg = PCH_TRANSCONF(pipe); + val = intel_de_read(dev_priv, reg); + val &= ~TRANS_ENABLE; + intel_de_write(dev_priv, reg, val); + /* wait for PCH transcoder off, transcoder state */ + if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50)) + drm_err(&dev_priv->drm, "failed to disable transcoder %c\n", + pipe_name(pipe)); + + if (HAS_PCH_CPT(dev_priv)) { + /* Workaround: Clear the timing override chicken bit again. */ + reg = TRANS_CHICKEN2(pipe); + val = intel_de_read(dev_priv, reg); + val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; + intel_de_write(dev_priv, reg, val); + } +} + +/* + * Enable PCH resources required for PCH ports: + * - PCH PLLs + * - FDI training & RX/TX + * - update transcoder timings + * - DP transcoding bits + * - transcoder + */ +void ilk_pch_enable(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + enum pipe pipe = crtc->pipe; + u32 temp; + + assert_pch_transcoder_disabled(dev_priv, pipe); + + /* For PCH output, training FDI link */ + intel_fdi_link_train(crtc, crtc_state); + + /* + * We need to program the right clock selection + * before writing the pixel multiplier into the DPLL. + */ + if (HAS_PCH_CPT(dev_priv)) { + u32 sel; + + temp = intel_de_read(dev_priv, PCH_DPLL_SEL); + temp |= TRANS_DPLL_ENABLE(pipe); + sel = TRANS_DPLLB_SEL(pipe); + if (crtc_state->shared_dpll == + intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B)) + temp |= sel; + else + temp &= ~sel; + intel_de_write(dev_priv, PCH_DPLL_SEL, temp); + } + + /* + * XXX: pch pll's can be enabled any time before we enable the PCH + * transcoder, and we actually should do this to not upset any PCH + * transcoder that already use the clock when we share it. + * + * Note that enable_shared_dpll tries to do the right thing, but + * get_shared_dpll unconditionally resets the pll - we need that + * to have the right LVDS enable sequence. + */ + intel_enable_shared_dpll(crtc_state); + + /* set transcoder timing, panel must allow it */ + assert_pps_unlocked(dev_priv, pipe); + ilk_pch_transcoder_set_timings(crtc_state, pipe); + + intel_fdi_normal_train(crtc); + + /* For PCH DP, enable TRANS_DP_CTL */ + if (HAS_PCH_CPT(dev_priv) && + intel_crtc_has_dp_encoder(crtc_state)) { + const struct drm_display_mode *adjusted_mode = + &crtc_state->hw.adjusted_mode; + u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; + i915_reg_t reg = TRANS_DP_CTL(pipe); + enum port port; + + temp = intel_de_read(dev_priv, reg); + temp &= ~(TRANS_DP_PORT_SEL_MASK | + TRANS_DP_SYNC_MASK | + TRANS_DP_BPC_MASK); + temp |= TRANS_DP_OUTPUT_ENABLE; + temp |= bpc << 9; /* same format but at 11:9 */ + + if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) + temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; + if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) + temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; + + port = intel_get_crtc_new_encoder(state, crtc_state)->port; + drm_WARN_ON(&dev_priv->drm, port < PORT_B || port > PORT_D); + temp |= TRANS_DP_PORT_SEL(port); + + intel_de_write(dev_priv, reg, temp); + } + + ilk_enable_pch_transcoder(crtc_state); +} + +void ilk_pch_disable(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + ilk_fdi_disable(crtc); +} + +void ilk_pch_post_disable(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + + ilk_disable_pch_transcoder(crtc); + + if (HAS_PCH_CPT(dev_priv)) { + i915_reg_t reg; + u32 temp; + + /* disable TRANS_DP_CTL */ + reg = TRANS_DP_CTL(pipe); + temp = intel_de_read(dev_priv, reg); + temp &= ~(TRANS_DP_OUTPUT_ENABLE | + TRANS_DP_PORT_SEL_MASK); + temp |= TRANS_DP_PORT_SEL_NONE; + intel_de_write(dev_priv, reg, temp); + + /* disable DPLL_SEL */ + temp = intel_de_read(dev_priv, PCH_DPLL_SEL); + temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe)); + intel_de_write(dev_priv, PCH_DPLL_SEL, temp); + } + + ilk_fdi_pll_disable(crtc); +} + +static void ilk_pch_clock_get(struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + + /* read out port_clock from the DPLL */ + i9xx_crtc_clock_get(crtc, crtc_state); + + /* + * In case there is an active pipe without active ports, + * we may need some idea for the dotclock anyway. + * Calculate one based on the FDI configuration. + */ + crtc_state->hw.adjusted_mode.crtc_clock = + intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, crtc_state), + &crtc_state->fdi_m_n); +} + +void ilk_pch_get_config(struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_shared_dpll *pll; + enum pipe pipe = crtc->pipe; + enum intel_dpll_id pll_id; + bool pll_active; + u32 tmp; + + if ((intel_de_read(dev_priv, PCH_TRANSCONF(pipe)) & TRANS_ENABLE) == 0) + return; + + crtc_state->has_pch_encoder = true; + + tmp = intel_de_read(dev_priv, FDI_RX_CTL(pipe)); + crtc_state->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> + FDI_DP_PORT_WIDTH_SHIFT) + 1; + + ilk_get_fdi_m_n_config(crtc, crtc_state); + + if (HAS_PCH_IBX(dev_priv)) { + /* + * The pipe->pch transcoder and pch transcoder->pll + * mapping is fixed. + */ + pll_id = (enum intel_dpll_id) pipe; + } else { + tmp = intel_de_read(dev_priv, PCH_DPLL_SEL); + if (tmp & TRANS_DPLLB_SEL(pipe)) + pll_id = DPLL_ID_PCH_PLL_B; + else + pll_id = DPLL_ID_PCH_PLL_A; + } + + crtc_state->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, pll_id); + pll = crtc_state->shared_dpll; + + pll_active = intel_dpll_get_hw_state(dev_priv, pll, + &crtc_state->dpll_hw_state); + drm_WARN_ON(&dev_priv->drm, !pll_active); + + tmp = crtc_state->dpll_hw_state.dpll; + crtc_state->pixel_multiplier = + ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK) + >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1; + + ilk_pch_clock_get(crtc_state); +} + +static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, + enum transcoder cpu_transcoder) +{ + u32 val, pipeconf_val; + + /* FDI must be feeding us bits for PCH ports */ + assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder); + assert_fdi_rx_enabled(dev_priv, PIPE_A); + + val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A)); + /* Workaround: set timing override bit. */ + val |= TRANS_CHICKEN2_TIMING_OVERRIDE; + /* Configure frame start delay to match the CPU */ + val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; + val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1); + intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val); + + val = TRANS_ENABLE; + pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder)); + + if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == + PIPECONF_INTERLACED_ILK) + val |= TRANS_INTERLACED; + else + val |= TRANS_PROGRESSIVE; + + intel_de_write(dev_priv, LPT_TRANSCONF, val); + if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF, + TRANS_STATE_ENABLE, 100)) + drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n"); +} + +static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) +{ + u32 val; + + val = intel_de_read(dev_priv, LPT_TRANSCONF); + val &= ~TRANS_ENABLE; + intel_de_write(dev_priv, LPT_TRANSCONF, val); + /* wait for PCH transcoder off, transcoder state */ + if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF, + TRANS_STATE_ENABLE, 50)) + drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n"); + + /* Workaround: clear timing override bit. */ + val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A)); + val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; + intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val); +} + +void lpt_pch_enable(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + + assert_pch_transcoder_disabled(dev_priv, PIPE_A); + + lpt_program_iclkip(crtc_state); + + /* Set transcoder timing. */ + ilk_pch_transcoder_set_timings(crtc_state, PIPE_A); + + lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); +} + +void lpt_pch_disable(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + + lpt_disable_pch_transcoder(dev_priv); + + lpt_disable_iclkip(dev_priv); +} + +void lpt_pch_get_config(struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + u32 tmp; + + if ((intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) == 0) + return; + + crtc_state->has_pch_encoder = true; + + tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); + crtc_state->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> + FDI_DP_PORT_WIDTH_SHIFT) + 1; + + ilk_get_fdi_m_n_config(crtc, crtc_state); + + crtc_state->hw.adjusted_mode.crtc_clock = lpt_get_iclkip(dev_priv); +} diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.h b/drivers/gpu/drm/i915/display/intel_pch_display.h new file mode 100644 index 000000000000..2c387fe3a467 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_pch_display.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef _INTEL_PCH_DISPLAY_H_ +#define _INTEL_PCH_DISPLAY_H_ + +struct intel_atomic_state; +struct intel_crtc; +struct intel_crtc_state; + +void ilk_pch_enable(struct intel_atomic_state *state, + struct intel_crtc *crtc); +void ilk_pch_disable(struct intel_atomic_state *state, + struct intel_crtc *crtc); +void ilk_pch_post_disable(struct intel_atomic_state *state, + struct intel_crtc *crtc); +void ilk_pch_get_config(struct intel_crtc_state *crtc_state); + +void lpt_pch_enable(struct intel_atomic_state *state, + struct intel_crtc *crtc); +void lpt_pch_disable(struct intel_atomic_state *state, + struct intel_crtc *crtc); +void lpt_pch_get_config(struct intel_crtc_state *crtc_state); + +#endif diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.c b/drivers/gpu/drm/i915/display/intel_pch_refclk.c new file mode 100644 index 000000000000..b688fd87e3da --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.c @@ -0,0 +1,648 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2021 Intel Corporation + */ + +#include "intel_de.h" +#include "intel_display_types.h" +#include "intel_panel.h" +#include "intel_pch_refclk.h" +#include "intel_sbi.h" + +static void lpt_fdi_reset_mphy(struct drm_i915_private *dev_priv) +{ + u32 tmp; + + tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2); + tmp |= FDI_MPHY_IOSFSB_RESET_CTL; + intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp); + + if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) & + FDI_MPHY_IOSFSB_RESET_STATUS, 100)) + drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n"); + + tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2); + tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; + intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp); + + if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) & + FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) + drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n"); +} + +/* WaMPhyProgramming:hsw */ +static void lpt_fdi_program_mphy(struct drm_i915_private *dev_priv) +{ + u32 tmp; + + lpt_fdi_reset_mphy(dev_priv); + + tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY); + tmp &= ~(0xFF << 24); + tmp |= (0x12 << 24); + intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY); + tmp |= (1 << 11); + intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY); + tmp |= (1 << 11); + intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY); + tmp |= (1 << 24) | (1 << 21) | (1 << 18); + intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY); + tmp |= (1 << 24) | (1 << 21) | (1 << 18); + intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY); + tmp &= ~(7 << 13); + tmp |= (5 << 13); + intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY); + tmp &= ~(7 << 13); + tmp |= (5 << 13); + intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY); + tmp &= ~0xFF; + tmp |= 0x1C; + intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY); + tmp &= ~0xFF; + tmp |= 0x1C; + intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY); + tmp &= ~(0xFF << 16); + tmp |= (0x1C << 16); + intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY); + tmp &= ~(0xFF << 16); + tmp |= (0x1C << 16); + intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY); + tmp |= (1 << 27); + intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY); + tmp |= (1 << 27); + intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY); + tmp &= ~(0xF << 28); + tmp |= (4 << 28); + intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY); + tmp &= ~(0xF << 28); + tmp |= (4 << 28); + intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); +} + +void lpt_disable_iclkip(struct drm_i915_private *dev_priv) +{ + u32 temp; + + intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE); + + mutex_lock(&dev_priv->sb_lock); + + temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); + temp |= SBI_SSCCTL_DISABLE; + intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); + + mutex_unlock(&dev_priv->sb_lock); +} + +/* Program iCLKIP clock to the desired frequency */ +void lpt_program_iclkip(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + int clock = crtc_state->hw.adjusted_mode.crtc_clock; + u32 divsel, phaseinc, auxdiv, phasedir = 0; + u32 temp; + + lpt_disable_iclkip(dev_priv); + + /* The iCLK virtual clock root frequency is in MHz, + * but the adjusted_mode->crtc_clock in KHz. To get the + * divisors, it is necessary to divide one by another, so we + * convert the virtual clock precision to KHz here for higher + * precision. + */ + for (auxdiv = 0; auxdiv < 2; auxdiv++) { + u32 iclk_virtual_root_freq = 172800 * 1000; + u32 iclk_pi_range = 64; + u32 desired_divisor; + + desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq, + clock << auxdiv); + divsel = (desired_divisor / iclk_pi_range) - 2; + phaseinc = desired_divisor % iclk_pi_range; + + /* + * Near 20MHz is a corner case which is + * out of range for the 7-bit divisor + */ + if (divsel <= 0x7f) + break; + } + + /* This should not happen with any sane values */ + drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(divsel) & + ~SBI_SSCDIVINTPHASE_DIVSEL_MASK); + drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(phasedir) & + ~SBI_SSCDIVINTPHASE_INCVAL_MASK); + + drm_dbg_kms(&dev_priv->drm, + "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n", + clock, auxdiv, divsel, phasedir, phaseinc); + + mutex_lock(&dev_priv->sb_lock); + + /* Program SSCDIVINTPHASE6 */ + temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); + temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK; + temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel); + temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK; + temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc); + temp |= SBI_SSCDIVINTPHASE_DIR(phasedir); + temp |= SBI_SSCDIVINTPHASE_PROPAGATE; + intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK); + + /* Program SSCAUXDIV */ + temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); + temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1); + temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv); + intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK); + + /* Enable modulator and associated divider */ + temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); + temp &= ~SBI_SSCCTL_DISABLE; + intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); + + mutex_unlock(&dev_priv->sb_lock); + + /* Wait for initialization time */ + udelay(24); + + intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE); +} + +int lpt_get_iclkip(struct drm_i915_private *dev_priv) +{ + u32 divsel, phaseinc, auxdiv; + u32 iclk_virtual_root_freq = 172800 * 1000; + u32 iclk_pi_range = 64; + u32 desired_divisor; + u32 temp; + + if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0) + return 0; + + mutex_lock(&dev_priv->sb_lock); + + temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); + if (temp & SBI_SSCCTL_DISABLE) { + mutex_unlock(&dev_priv->sb_lock); + return 0; + } + + temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); + divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >> + SBI_SSCDIVINTPHASE_DIVSEL_SHIFT; + phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >> + SBI_SSCDIVINTPHASE_INCVAL_SHIFT; + + temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); + auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >> + SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT; + + mutex_unlock(&dev_priv->sb_lock); + + desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc; + + return DIV_ROUND_CLOSEST(iclk_virtual_root_freq, + desired_divisor << auxdiv); +} + +/* Implements 3 different sequences from BSpec chapter "Display iCLK + * Programming" based on the parameters passed: + * - Sequence to enable CLKOUT_DP + * - Sequence to enable CLKOUT_DP without spread + * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O + */ +static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv, + bool with_spread, bool with_fdi) +{ + u32 reg, tmp; + + if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread, + "FDI requires downspread\n")) + with_spread = true; + if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) && + with_fdi, "LP PCH doesn't have FDI\n")) + with_fdi = false; + + mutex_lock(&dev_priv->sb_lock); + + tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); + tmp &= ~SBI_SSCCTL_DISABLE; + tmp |= SBI_SSCCTL_PATHALT; + intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); + + udelay(24); + + if (with_spread) { + tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); + tmp &= ~SBI_SSCCTL_PATHALT; + intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); + + if (with_fdi) + lpt_fdi_program_mphy(dev_priv); + } + + reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; + tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); + tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE; + intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); + + mutex_unlock(&dev_priv->sb_lock); +} + +/* Sequence to disable CLKOUT_DP */ +void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv) +{ + u32 reg, tmp; + + mutex_lock(&dev_priv->sb_lock); + + reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; + tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); + tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE; + intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); + + tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); + if (!(tmp & SBI_SSCCTL_DISABLE)) { + if (!(tmp & SBI_SSCCTL_PATHALT)) { + tmp |= SBI_SSCCTL_PATHALT; + intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); + udelay(32); + } + tmp |= SBI_SSCCTL_DISABLE; + intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); + } + + mutex_unlock(&dev_priv->sb_lock); +} + +#define BEND_IDX(steps) ((50 + (steps)) / 5) + +static const u16 sscdivintphase[] = { + [BEND_IDX( 50)] = 0x3B23, + [BEND_IDX( 45)] = 0x3B23, + [BEND_IDX( 40)] = 0x3C23, + [BEND_IDX( 35)] = 0x3C23, + [BEND_IDX( 30)] = 0x3D23, + [BEND_IDX( 25)] = 0x3D23, + [BEND_IDX( 20)] = 0x3E23, + [BEND_IDX( 15)] = 0x3E23, + [BEND_IDX( 10)] = 0x3F23, + [BEND_IDX( 5)] = 0x3F23, + [BEND_IDX( 0)] = 0x0025, + [BEND_IDX( -5)] = 0x0025, + [BEND_IDX(-10)] = 0x0125, + [BEND_IDX(-15)] = 0x0125, + [BEND_IDX(-20)] = 0x0225, + [BEND_IDX(-25)] = 0x0225, + [BEND_IDX(-30)] = 0x0325, + [BEND_IDX(-35)] = 0x0325, + [BEND_IDX(-40)] = 0x0425, + [BEND_IDX(-45)] = 0x0425, + [BEND_IDX(-50)] = 0x0525, +}; + +/* + * Bend CLKOUT_DP + * steps -50 to 50 inclusive, in steps of 5 + * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz) + * change in clock period = -(steps / 10) * 5.787 ps + */ +static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps) +{ + u32 tmp; + int idx = BEND_IDX(steps); + + if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0)) + return; + + if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase))) + return; + + mutex_lock(&dev_priv->sb_lock); + + if (steps % 10 != 0) + tmp = 0xAAAAAAAB; + else + tmp = 0x00000000; + intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK); + + tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK); + tmp &= 0xffff0000; + tmp |= sscdivintphase[idx]; + intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK); + + mutex_unlock(&dev_priv->sb_lock); +} + +#undef BEND_IDX + +static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv) +{ + u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP); + u32 ctl = intel_de_read(dev_priv, SPLL_CTL); + + if ((ctl & SPLL_PLL_ENABLE) == 0) + return false; + + if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC && + (fuse_strap & HSW_CPU_SSC_ENABLE) == 0) + return true; + + if (IS_BROADWELL(dev_priv) && + (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW) + return true; + + return false; +} + +static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv, + enum intel_dpll_id id) +{ + u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP); + u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id)); + + if ((ctl & WRPLL_PLL_ENABLE) == 0) + return false; + + if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC) + return true; + + if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) && + (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW && + (fuse_strap & HSW_CPU_SSC_ENABLE) == 0) + return true; + + return false; +} + +static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv) +{ + struct intel_encoder *encoder; + bool has_fdi = false; + + for_each_intel_encoder(&dev_priv->drm, encoder) { + switch (encoder->type) { + case INTEL_OUTPUT_ANALOG: + has_fdi = true; + break; + default: + break; + } + } + + /* + * The BIOS may have decided to use the PCH SSC + * reference so we must not disable it until the + * relevant PLLs have stopped relying on it. We'll + * just leave the PCH SSC reference enabled in case + * any active PLL is using it. It will get disabled + * after runtime suspend if we don't have FDI. + * + * TODO: Move the whole reference clock handling + * to the modeset sequence proper so that we can + * actually enable/disable/reconfigure these things + * safely. To do that we need to introduce a real + * clock hierarchy. That would also allow us to do + * clock bending finally. + */ + dev_priv->pch_ssc_use = 0; + + if (spll_uses_pch_ssc(dev_priv)) { + drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n"); + dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL); + } + + if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) { + drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n"); + dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1); + } + + if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) { + drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n"); + dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2); + } + + if (dev_priv->pch_ssc_use) + return; + + if (has_fdi) { + lpt_bend_clkout_dp(dev_priv, 0); + lpt_enable_clkout_dp(dev_priv, true, true); + } else { + lpt_disable_clkout_dp(dev_priv); + } +} + +static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv) +{ + struct intel_encoder *encoder; + int i; + u32 val, final; + bool has_lvds = false; + bool has_cpu_edp = false; + bool has_panel = false; + bool has_ck505 = false; + bool can_ssc = false; + bool using_ssc_source = false; + + /* We need to take the global config into account */ + for_each_intel_encoder(&dev_priv->drm, encoder) { + switch (encoder->type) { + case INTEL_OUTPUT_LVDS: + has_panel = true; + has_lvds = true; + break; + case INTEL_OUTPUT_EDP: + has_panel = true; + if (encoder->port == PORT_A) + has_cpu_edp = true; + break; + default: + break; + } + } + + if (HAS_PCH_IBX(dev_priv)) { + has_ck505 = dev_priv->vbt.display_clock_mode; + can_ssc = has_ck505; + } else { + has_ck505 = false; + can_ssc = true; + } + + /* Check if any DPLLs are using the SSC source */ + for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) { + u32 temp = intel_de_read(dev_priv, PCH_DPLL(i)); + + if (!(temp & DPLL_VCO_ENABLE)) + continue; + + if ((temp & PLL_REF_INPUT_MASK) == + PLLB_REF_INPUT_SPREADSPECTRUMIN) { + using_ssc_source = true; + break; + } + } + + drm_dbg_kms(&dev_priv->drm, + "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n", + has_panel, has_lvds, has_ck505, using_ssc_source); + + /* Ironlake: try to setup display ref clock before DPLL + * enabling. This is only under driver's control after + * PCH B stepping, previous chipset stepping should be + * ignoring this setting. + */ + val = intel_de_read(dev_priv, PCH_DREF_CONTROL); + + /* As we must carefully and slowly disable/enable each source in turn, + * compute the final state we want first and check if we need to + * make any changes at all. + */ + final = val; + final &= ~DREF_NONSPREAD_SOURCE_MASK; + if (has_ck505) + final |= DREF_NONSPREAD_CK505_ENABLE; + else + final |= DREF_NONSPREAD_SOURCE_ENABLE; + + final &= ~DREF_SSC_SOURCE_MASK; + final &= ~DREF_CPU_SOURCE_OUTPUT_MASK; + final &= ~DREF_SSC1_ENABLE; + + if (has_panel) { + final |= DREF_SSC_SOURCE_ENABLE; + + if (intel_panel_use_ssc(dev_priv) && can_ssc) + final |= DREF_SSC1_ENABLE; + + if (has_cpu_edp) { + if (intel_panel_use_ssc(dev_priv) && can_ssc) + final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; + else + final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; + } else { + final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; + } + } else if (using_ssc_source) { + final |= DREF_SSC_SOURCE_ENABLE; + final |= DREF_SSC1_ENABLE; + } + + if (final == val) + return; + + /* Always enable nonspread source */ + val &= ~DREF_NONSPREAD_SOURCE_MASK; + + if (has_ck505) + val |= DREF_NONSPREAD_CK505_ENABLE; + else + val |= DREF_NONSPREAD_SOURCE_ENABLE; + + if (has_panel) { + val &= ~DREF_SSC_SOURCE_MASK; + val |= DREF_SSC_SOURCE_ENABLE; + + /* SSC must be turned on before enabling the CPU output */ + if (intel_panel_use_ssc(dev_priv) && can_ssc) { + drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n"); + val |= DREF_SSC1_ENABLE; + } else { + val &= ~DREF_SSC1_ENABLE; + } + + /* Get SSC going before enabling the outputs */ + intel_de_write(dev_priv, PCH_DREF_CONTROL, val); + intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); + udelay(200); + + val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; + + /* Enable CPU source on CPU attached eDP */ + if (has_cpu_edp) { + if (intel_panel_use_ssc(dev_priv) && can_ssc) { + drm_dbg_kms(&dev_priv->drm, + "Using SSC on eDP\n"); + val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; + } else { + val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; + } + } else { + val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; + } + + intel_de_write(dev_priv, PCH_DREF_CONTROL, val); + intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); + udelay(200); + } else { + drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n"); + + val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; + + /* Turn off CPU output */ + val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; + + intel_de_write(dev_priv, PCH_DREF_CONTROL, val); + intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); + udelay(200); + + if (!using_ssc_source) { + drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n"); + + /* Turn off the SSC source */ + val &= ~DREF_SSC_SOURCE_MASK; + val |= DREF_SSC_SOURCE_DISABLE; + + /* Turn off SSC1 */ + val &= ~DREF_SSC1_ENABLE; + + intel_de_write(dev_priv, PCH_DREF_CONTROL, val); + intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); + udelay(200); + } + } + + BUG_ON(val != final); +} + +/* + * Initialize reference clocks when the driver loads + */ +void intel_init_pch_refclk(struct drm_i915_private *dev_priv) +{ + if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) + ilk_init_pch_refclk(dev_priv); + else if (HAS_PCH_LPT(dev_priv)) + lpt_init_pch_refclk(dev_priv); +} diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.h b/drivers/gpu/drm/i915/display/intel_pch_refclk.h new file mode 100644 index 000000000000..12ab2c75a800 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef _INTEL_PCH_REFCLK_H_ +#define _INTEL_PCH_REFCLK_H_ + +#include <linux/types.h> + +struct drm_i915_private; +struct intel_crtc_state; + +void lpt_program_iclkip(const struct intel_crtc_state *crtc_state); +void lpt_disable_iclkip(struct drm_i915_private *dev_priv); +int lpt_get_iclkip(struct drm_i915_private *dev_priv); + +void intel_init_pch_refclk(struct drm_i915_private *dev_priv); +void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv); + +#endif diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.c b/drivers/gpu/drm/i915/display/intel_plane_initial.c index dcd698a02da2..01ce1d72297f 100644 --- a/drivers/gpu/drm/i915/display/intel_plane_initial.c +++ b/drivers/gpu/drm/i915/display/intel_plane_initial.c @@ -3,11 +3,12 @@ * Copyright © 2021 Intel Corporation */ -#include "intel_display_types.h" -#include "intel_plane_initial.h" +#include "i915_drv.h" #include "intel_atomic_plane.h" #include "intel_display.h" +#include "intel_display_types.h" #include "intel_fb.h" +#include "intel_plane_initial.h" static bool intel_reuse_initial_plane_obj(struct drm_i915_private *i915, diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 7a205fd5023b..a1a663f362e7 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -28,13 +28,13 @@ #include "i915_drv.h" #include "intel_atomic.h" +#include "intel_crtc.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_dp_aux.h" #include "intel_hdmi.h" #include "intel_psr.h" #include "intel_snps_phy.h" -#include "intel_sprite.h" #include "skl_universal_plane.h" /** @@ -588,7 +588,9 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) static bool transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder trans) { - if (DISPLAY_VER(dev_priv) >= 12) + if (IS_ALDERLAKE_P(dev_priv)) + return trans == TRANSCODER_A || trans == TRANSCODER_B; + else if (DISPLAY_VER(dev_priv) >= 12) return trans == TRANSCODER_A; else return trans == TRANSCODER_EDP; @@ -1346,6 +1348,7 @@ void intel_psr_disable(struct intel_dp *intel_dp, */ void intel_psr_pause(struct intel_dp *intel_dp) { + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_psr *psr = &intel_dp->psr; if (!CAN_PSR(intel_dp)) @@ -1358,6 +1361,9 @@ void intel_psr_pause(struct intel_dp *intel_dp) return; } + /* If we ever hit this, we will need to add refcount to pause/resume */ + drm_WARN_ON(&dev_priv->drm, psr->paused); + intel_psr_exit(intel_dp); intel_psr_wait_exit_locked(intel_dp); psr->paused = true; @@ -1463,10 +1469,19 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane, val |= plane_state->uapi.dst.x1; intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_POS(pipe, plane->id), val); - /* TODO: consider auxiliary surfaces */ - x = plane_state->uapi.src.x1 >> 16; - y = (plane_state->uapi.src.y1 >> 16) + clip->y1; + x = plane_state->view.color_plane[color_plane].x; + + /* + * From Bspec: UV surface Start Y Position = half of Y plane Y + * start position. + */ + if (!color_plane) + y = plane_state->view.color_plane[color_plane].y + clip->y1; + else + y = plane_state->view.color_plane[color_plane].y + clip->y1 / 2; + val = y << 16 | x; + intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_OFFSET(pipe, plane->id), val); @@ -1558,9 +1573,6 @@ static void intel_psr2_sel_fetch_pipe_alignment(const struct intel_crtc_state *c * also planes are not updated if they have a negative X * position so for now doing a full update in this cases * - * TODO: We are missing multi-planar formats handling, until it is - * implemented it will send full frame updates. - * * Plane scaling and rotation is not supported by selective fetch and both * properties can change without a modeset, so need to be check at every * atomic commmit. @@ -1570,7 +1582,6 @@ static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state if (plane_state->uapi.dst.y1 < 0 || plane_state->uapi.dst.x1 < 0 || plane_state->scaler_id >= 0 || - plane_state->hw.fb->format->num_planes > 1 || plane_state->uapi.rotation != DRM_MODE_ROTATE_0) return false; @@ -1696,6 +1707,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { struct drm_rect *sel_fetch_area, inter; + struct intel_plane *linked = new_plane_state->planar_linked_plane; if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc || !new_plane_state->uapi.visible) @@ -1714,6 +1726,24 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1; sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1; crtc_state->update_planes |= BIT(plane->id); + + /* + * Sel_fetch_area is calculated for UV plane. Use + * same area for Y plane as well. + */ + if (linked) { + struct intel_plane_state *linked_new_plane_state; + struct drm_rect *linked_sel_fetch_area; + + linked_new_plane_state = intel_atomic_get_plane_state(state, linked); + if (IS_ERR(linked_new_plane_state)) + return PTR_ERR(linked_new_plane_state); + + linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area; + linked_sel_fetch_area->y1 = sel_fetch_area->y1; + linked_sel_fetch_area->y2 = sel_fetch_area->y2; + crtc_state->update_planes |= BIT(linked->id); + } } skip_sel_fetch_set_loop: @@ -1721,11 +1751,17 @@ skip_sel_fetch_set_loop: return 0; } -static void _intel_psr_pre_plane_update(const struct intel_atomic_state *state, - const struct intel_crtc_state *crtc_state) +void intel_psr_pre_plane_update(struct intel_atomic_state *state, + struct intel_crtc *crtc) { + struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); struct intel_encoder *encoder; + if (!HAS_PSR(i915)) + return; + for_each_intel_encoder_mask_with_psr(state->base.dev, encoder, crtc_state->uapi.encoder_mask) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); @@ -1740,6 +1776,7 @@ static void _intel_psr_pre_plane_update(const struct intel_atomic_state *state, * - All planes will go inactive * - Changing between PSR versions */ + needs_to_disable |= intel_crtc_needs_modeset(crtc_state); needs_to_disable |= !crtc_state->has_psr; needs_to_disable |= !crtc_state->active_planes; needs_to_disable |= crtc_state->has_psr2 != psr->psr2_enabled; @@ -1751,20 +1788,6 @@ static void _intel_psr_pre_plane_update(const struct intel_atomic_state *state, } } -void intel_psr_pre_plane_update(const struct intel_atomic_state *state) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - struct intel_crtc_state *crtc_state; - struct intel_crtc *crtc; - int i; - - if (!HAS_PSR(dev_priv)) - return; - - for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) - _intel_psr_pre_plane_update(state, crtc_state); -} - static void _intel_psr_post_plane_update(const struct intel_atomic_state *state, const struct intel_crtc_state *crtc_state) { @@ -1809,15 +1832,21 @@ void intel_psr_post_plane_update(const struct intel_atomic_state *state) _intel_psr_post_plane_update(state, crtc_state); } -/** - * psr_wait_for_idle - wait for PSR1 to idle - * @intel_dp: Intel DP - * @out_value: PSR status in case of failure - * - * Returns: 0 on success or -ETIMEOUT if PSR status does not idle. - * - */ -static int psr_wait_for_idle(struct intel_dp *intel_dp, u32 *out_value) +static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + + /* + * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough. + * As all higher states has bit 4 of PSR2 state set we can just wait for + * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared. + */ + return intel_de_wait_for_clear(dev_priv, + EDP_PSR2_STATUS(intel_dp->psr.transcoder), + EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50); +} + +static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); @@ -1827,15 +1856,13 @@ static int psr_wait_for_idle(struct intel_dp *intel_dp, u32 *out_value) * exit training time + 1.5 ms of aux channel handshake. 50 ms is * defensive enough to cover everything. */ - return __intel_wait_for_register(&dev_priv->uncore, - EDP_PSR_STATUS(intel_dp->psr.transcoder), - EDP_PSR_STATUS_STATE_MASK, - EDP_PSR_STATUS_STATE_IDLE, 2, 50, - out_value); + return intel_de_wait_for_clear(dev_priv, + EDP_PSR_STATUS(intel_dp->psr.transcoder), + EDP_PSR_STATUS_STATE_MASK, 50); } /** - * intel_psr_wait_for_idle - wait for PSR1 to idle + * intel_psr_wait_for_idle - wait for PSR be ready for a pipe update * @new_crtc_state: new CRTC state * * This function is expected to be called from pipe_update_start() where it is @@ -1852,19 +1879,23 @@ void intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state) for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder, new_crtc_state->uapi.encoder_mask) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - u32 psr_status; + int ret; mutex_lock(&intel_dp->psr.lock); - if (!intel_dp->psr.enabled || intel_dp->psr.psr2_enabled) { + + if (!intel_dp->psr.enabled) { mutex_unlock(&intel_dp->psr.lock); continue; } - /* when the PSR1 is enabled */ - if (psr_wait_for_idle(intel_dp, &psr_status)) - drm_err(&dev_priv->drm, - "PSR idle timed out 0x%x, atomic update may fail\n", - psr_status); + if (intel_dp->psr.psr2_enabled) + ret = _psr2_ready_for_pipe_update_locked(intel_dp); + else + ret = _psr1_ready_for_pipe_update_locked(intel_dp); + + if (ret) + drm_err(&dev_priv->drm, "PSR wait timed out, atomic update may fail\n"); + mutex_unlock(&intel_dp->psr.lock); } } diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h index facffbacd357..f6526d9ccfdc 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.h +++ b/drivers/gpu/drm/i915/display/intel_psr.h @@ -6,21 +6,23 @@ #ifndef __INTEL_PSR_H__ #define __INTEL_PSR_H__ -#include "intel_frontbuffer.h" +#include <linux/types.h> +enum fb_op_origin; struct drm_connector; struct drm_connector_state; struct drm_i915_private; +struct intel_atomic_state; +struct intel_crtc; struct intel_crtc_state; struct intel_dp; -struct intel_crtc; -struct intel_atomic_state; -struct intel_plane_state; -struct intel_plane; struct intel_encoder; +struct intel_plane; +struct intel_plane_state; void intel_psr_init_dpcd(struct intel_dp *intel_dp); -void intel_psr_pre_plane_update(const struct intel_atomic_state *state); +void intel_psr_pre_plane_update(struct intel_atomic_state *state, + struct intel_crtc *crtc); void intel_psr_post_plane_update(const struct intel_atomic_state *state); void intel_psr_disable(struct intel_dp *intel_dp, const struct intel_crtc_state *old_crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c index 8a52b7a16774..c8488f5ebd04 100644 --- a/drivers/gpu/drm/i915/display/intel_quirks.c +++ b/drivers/gpu/drm/i915/display/intel_quirks.c @@ -5,6 +5,7 @@ #include <linux/dmi.h> +#include "i915_drv.h" #include "intel_display_types.h" #include "intel_quirks.h" diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c index 2dc6c3742ba2..76e1188b01d4 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.c +++ b/drivers/gpu/drm/i915/display/intel_sdvo.c @@ -1842,7 +1842,7 @@ static void intel_enable_sdvo(struct intel_atomic_state *state, intel_sdvo_write_sdvox(intel_sdvo, temp); for (i = 0; i < 2; i++) - intel_wait_for_vblank(dev_priv, crtc->pipe); + intel_crtc_wait_for_next_vblank(crtc); success = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2); /* diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c index 5e20f340730f..09f405e4d363 100644 --- a/drivers/gpu/drm/i915/display/intel_snps_phy.c +++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c @@ -58,7 +58,6 @@ void intel_snps_phy_set_signal_levels(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); const struct intel_ddi_buf_trans *trans; enum phy phy = intel_port_to_phy(dev_priv, encoder->port); - int level = intel_ddi_level(encoder, crtc_state, 0); int n_entries, ln; trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries); @@ -66,6 +65,7 @@ void intel_snps_phy_set_signal_levels(struct intel_encoder *encoder, return; for (ln = 0; ln < 4; ln++) { + int level = intel_ddi_level(encoder, crtc_state, ln); u32 val = 0; val |= REG_FIELD_PREP(SNPS_PHY_TX_EQ_MAIN, trans->entries[level].snps.vswing); @@ -186,6 +186,7 @@ static const struct intel_mpllb_state dg2_dp_uhbr10_100 = { REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_WORD_DIV2_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_DP2_MODE, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_SHIM_DIV32_CLK_SEL, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 2) | @@ -369,6 +370,7 @@ static const struct intel_mpllb_state dg2_dp_uhbr10_38_4 = { REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_WORD_DIV2_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_DP2_MODE, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_SHIM_DIV32_CLK_SEL, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c index 08116f41da26..2357a1301f48 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.c +++ b/drivers/gpu/drm/i915/display/intel_sprite.c @@ -40,14 +40,15 @@ #include <drm/drm_rect.h> #include "i915_drv.h" -#include "i915_trace.h" #include "i915_vgpu.h" +#include "i9xx_plane.h" #include "intel_atomic_plane.h" +#include "intel_crtc.h" #include "intel_de.h" #include "intel_display_types.h" +#include "intel_fb.h" #include "intel_frontbuffer.h" #include "intel_sprite.h" -#include "i9xx_plane.h" #include "intel_vrr.h" int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state) @@ -118,7 +119,7 @@ static void i9xx_plane_linear_gamma(u16 gamma[8]) } static void -chv_update_csc(const struct intel_plane_state *plane_state) +chv_sprite_update_csc(const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); @@ -190,7 +191,7 @@ chv_update_csc(const struct intel_plane_state *plane_state) #define COS_0 1 static void -vlv_update_clrc(const struct intel_plane_state *plane_state) +vlv_sprite_update_clrc(const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); @@ -393,7 +394,7 @@ static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state, return sprctl; } -static void vlv_update_gamma(const struct intel_plane_state *plane_state) +static void vlv_sprite_update_gamma(const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); @@ -417,45 +418,54 @@ static void vlv_update_gamma(const struct intel_plane_state *plane_state) } static void -vlv_update_plane(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) +vlv_sprite_update_noarm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; enum plane_id plane_id = plane->id; - u32 sprsurf_offset = plane_state->view.color_plane[0].offset; - u32 linear_offset; - const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; int crtc_x = plane_state->uapi.dst.x1; int crtc_y = plane_state->uapi.dst.y1; u32 crtc_w = drm_rect_width(&plane_state->uapi.dst); u32 crtc_h = drm_rect_height(&plane_state->uapi.dst); + unsigned long irqflags; + + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + + intel_de_write_fw(dev_priv, SPSTRIDE(pipe, plane_id), + plane_state->view.color_plane[0].mapping_stride); + intel_de_write_fw(dev_priv, SPPOS(pipe, plane_id), + (crtc_y << 16) | crtc_x); + intel_de_write_fw(dev_priv, SPSIZE(pipe, plane_id), + ((crtc_h - 1) << 16) | (crtc_w - 1)); + + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); +} + +static void +vlv_sprite_update_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum pipe pipe = plane->pipe; + enum plane_id plane_id = plane->id; + const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; + u32 sprsurf_offset = plane_state->view.color_plane[0].offset; u32 x = plane_state->view.color_plane[0].x; u32 y = plane_state->view.color_plane[0].y; + u32 sprctl, linear_offset; unsigned long irqflags; - u32 sprctl; sprctl = plane_state->ctl | vlv_sprite_ctl_crtc(crtc_state); - /* Sizes are 0 based */ - crtc_w--; - crtc_h--; - linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); - intel_de_write_fw(dev_priv, SPSTRIDE(pipe, plane_id), - plane_state->view.color_plane[0].stride); - intel_de_write_fw(dev_priv, SPPOS(pipe, plane_id), - (crtc_y << 16) | crtc_x); - intel_de_write_fw(dev_priv, SPSIZE(pipe, plane_id), - (crtc_h << 16) | crtc_w); - intel_de_write_fw(dev_priv, SPCONSTALPHA(pipe, plane_id), 0); - if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) - chv_update_csc(plane_state); + chv_sprite_update_csc(plane_state); if (key->flags) { intel_de_write_fw(dev_priv, SPKEYMINVAL(pipe, plane_id), @@ -466,6 +476,8 @@ vlv_update_plane(struct intel_plane *plane, key->max_value); } + intel_de_write_fw(dev_priv, SPCONSTALPHA(pipe, plane_id), 0); + intel_de_write_fw(dev_priv, SPLINOFF(pipe, plane_id), linear_offset); intel_de_write_fw(dev_priv, SPTILEOFF(pipe, plane_id), (y << 16) | x); @@ -478,15 +490,15 @@ vlv_update_plane(struct intel_plane *plane, intel_de_write_fw(dev_priv, SPSURF(pipe, plane_id), intel_plane_ggtt_offset(plane_state) + sprsurf_offset); - vlv_update_clrc(plane_state); - vlv_update_gamma(plane_state); + vlv_sprite_update_clrc(plane_state); + vlv_sprite_update_gamma(plane_state); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } static void -vlv_disable_plane(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state) +vlv_sprite_disable_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; @@ -502,8 +514,8 @@ vlv_disable_plane(struct intel_plane *plane, } static bool -vlv_plane_get_hw_state(struct intel_plane *plane, - enum pipe *pipe) +vlv_sprite_get_hw_state(struct intel_plane *plane, + enum pipe *pipe) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum intel_display_power_domain power_domain; @@ -805,7 +817,7 @@ static void ivb_sprite_linear_gamma(const struct intel_plane_state *plane_state, i++; } -static void ivb_update_gamma(const struct intel_plane_state *plane_state) +static void ivb_sprite_update_gamma(const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); @@ -835,48 +847,56 @@ static void ivb_update_gamma(const struct intel_plane_state *plane_state) } static void -ivb_update_plane(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) +ivb_sprite_update_noarm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; - u32 sprsurf_offset = plane_state->view.color_plane[0].offset; - u32 linear_offset; - const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; int crtc_x = plane_state->uapi.dst.x1; int crtc_y = plane_state->uapi.dst.y1; u32 crtc_w = drm_rect_width(&plane_state->uapi.dst); u32 crtc_h = drm_rect_height(&plane_state->uapi.dst); - u32 x = plane_state->view.color_plane[0].x; - u32 y = plane_state->view.color_plane[0].y; u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16; u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16; - u32 sprctl, sprscale = 0; + u32 sprscale = 0; unsigned long irqflags; - sprctl = plane_state->ctl | ivb_sprite_ctl_crtc(crtc_state); - - /* Sizes are 0 based */ - src_w--; - src_h--; - crtc_w--; - crtc_h--; - if (crtc_w != src_w || crtc_h != src_h) - sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h; - - linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); + sprscale = SPRITE_SCALE_ENABLE | ((src_w - 1) << 16) | (src_h - 1); spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); intel_de_write_fw(dev_priv, SPRSTRIDE(pipe), - plane_state->view.color_plane[0].stride); + plane_state->view.color_plane[0].mapping_stride); intel_de_write_fw(dev_priv, SPRPOS(pipe), (crtc_y << 16) | crtc_x); - intel_de_write_fw(dev_priv, SPRSIZE(pipe), (crtc_h << 16) | crtc_w); + intel_de_write_fw(dev_priv, SPRSIZE(pipe), ((crtc_h - 1) << 16) | (crtc_w - 1)); if (IS_IVYBRIDGE(dev_priv)) intel_de_write_fw(dev_priv, SPRSCALE(pipe), sprscale); + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); +} + +static void +ivb_sprite_update_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum pipe pipe = plane->pipe; + const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; + u32 sprsurf_offset = plane_state->view.color_plane[0].offset; + u32 x = plane_state->view.color_plane[0].x; + u32 y = plane_state->view.color_plane[0].y; + u32 sprctl, linear_offset; + unsigned long irqflags; + + sprctl = plane_state->ctl | ivb_sprite_ctl_crtc(crtc_state); + + linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); + + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + if (key->flags) { intel_de_write_fw(dev_priv, SPRKEYVAL(pipe), key->min_value); intel_de_write_fw(dev_priv, SPRKEYMSK(pipe), @@ -902,14 +922,14 @@ ivb_update_plane(struct intel_plane *plane, intel_de_write_fw(dev_priv, SPRSURF(pipe), intel_plane_ggtt_offset(plane_state) + sprsurf_offset); - ivb_update_gamma(plane_state); + ivb_sprite_update_gamma(plane_state); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } static void -ivb_disable_plane(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state) +ivb_sprite_disable_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; @@ -927,8 +947,8 @@ ivb_disable_plane(struct intel_plane *plane, } static bool -ivb_plane_get_hw_state(struct intel_plane *plane, - enum pipe *pipe) +ivb_sprite_get_hw_state(struct intel_plane *plane, + enum pipe *pipe) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum intel_display_power_domain power_domain; @@ -1106,7 +1126,7 @@ static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state, return dvscntr; } -static void g4x_update_gamma(const struct intel_plane_state *plane_state) +static void g4x_sprite_update_gamma(const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); @@ -1136,7 +1156,7 @@ static void ilk_sprite_linear_gamma(u16 gamma[17]) gamma[i] = (i << 10) / 16; } -static void ilk_update_gamma(const struct intel_plane_state *plane_state) +static void ilk_sprite_update_gamma(const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); @@ -1163,47 +1183,55 @@ static void ilk_update_gamma(const struct intel_plane_state *plane_state) } static void -g4x_update_plane(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) +g4x_sprite_update_noarm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; - u32 dvssurf_offset = plane_state->view.color_plane[0].offset; - u32 linear_offset; - const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; int crtc_x = plane_state->uapi.dst.x1; int crtc_y = plane_state->uapi.dst.y1; u32 crtc_w = drm_rect_width(&plane_state->uapi.dst); u32 crtc_h = drm_rect_height(&plane_state->uapi.dst); - u32 x = plane_state->view.color_plane[0].x; - u32 y = plane_state->view.color_plane[0].y; u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16; u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16; - u32 dvscntr, dvsscale = 0; + u32 dvsscale = 0; unsigned long irqflags; - dvscntr = plane_state->ctl | g4x_sprite_ctl_crtc(crtc_state); - - /* Sizes are 0 based */ - src_w--; - src_h--; - crtc_w--; - crtc_h--; - if (crtc_w != src_w || crtc_h != src_h) - dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h; - - linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); + dvsscale = DVS_SCALE_ENABLE | ((src_w - 1) << 16) | (src_h - 1); spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); intel_de_write_fw(dev_priv, DVSSTRIDE(pipe), - plane_state->view.color_plane[0].stride); + plane_state->view.color_plane[0].mapping_stride); intel_de_write_fw(dev_priv, DVSPOS(pipe), (crtc_y << 16) | crtc_x); - intel_de_write_fw(dev_priv, DVSSIZE(pipe), (crtc_h << 16) | crtc_w); + intel_de_write_fw(dev_priv, DVSSIZE(pipe), ((crtc_h - 1) << 16) | (crtc_w - 1)); intel_de_write_fw(dev_priv, DVSSCALE(pipe), dvsscale); + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); +} + +static void +g4x_sprite_update_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum pipe pipe = plane->pipe; + const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; + u32 dvssurf_offset = plane_state->view.color_plane[0].offset; + u32 x = plane_state->view.color_plane[0].x; + u32 y = plane_state->view.color_plane[0].y; + u32 dvscntr, linear_offset; + unsigned long irqflags; + + dvscntr = plane_state->ctl | g4x_sprite_ctl_crtc(crtc_state); + + linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); + + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + if (key->flags) { intel_de_write_fw(dev_priv, DVSKEYVAL(pipe), key->min_value); intel_de_write_fw(dev_priv, DVSKEYMSK(pipe), @@ -1224,16 +1252,16 @@ g4x_update_plane(struct intel_plane *plane, intel_plane_ggtt_offset(plane_state) + dvssurf_offset); if (IS_G4X(dev_priv)) - g4x_update_gamma(plane_state); + g4x_sprite_update_gamma(plane_state); else - ilk_update_gamma(plane_state); + ilk_sprite_update_gamma(plane_state); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } static void -g4x_disable_plane(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state) +g4x_sprite_disable_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; @@ -1250,8 +1278,8 @@ g4x_disable_plane(struct intel_plane *plane, } static bool -g4x_plane_get_hw_state(struct intel_plane *plane, - enum pipe *pipe) +g4x_sprite_get_hw_state(struct intel_plane *plane, + enum pipe *pipe) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum intel_display_power_domain power_domain; @@ -1299,7 +1327,7 @@ g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state, int src_x, src_w, src_h, crtc_w, crtc_h; const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; - unsigned int stride = plane_state->view.color_plane[0].stride; + unsigned int stride = plane_state->view.color_plane[0].mapping_stride; unsigned int cpp = fb->format->cpp[0]; unsigned int width_bytes; int min_width, min_height; @@ -1540,8 +1568,8 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data, */ if (!ret && has_dst_key_in_primary_plane(dev_priv)) { struct intel_crtc *crtc = - intel_get_crtc_for_pipe(dev_priv, - to_intel_plane(plane)->pipe); + intel_crtc_for_pipe(dev_priv, + to_intel_plane(plane)->pipe); plane_state = drm_atomic_get_plane_state(state, crtc->base.primary); @@ -1567,7 +1595,7 @@ out: return ret; } -static const u32 g4x_plane_formats[] = { +static const u32 g4x_sprite_formats[] = { DRM_FORMAT_XRGB8888, DRM_FORMAT_YUYV, DRM_FORMAT_YVYU, @@ -1575,13 +1603,7 @@ static const u32 g4x_plane_formats[] = { DRM_FORMAT_VYUY, }; -static const u64 i9xx_plane_format_modifiers[] = { - I915_FORMAT_MOD_X_TILED, - DRM_FORMAT_MOD_LINEAR, - DRM_FORMAT_MOD_INVALID -}; - -static const u32 snb_plane_formats[] = { +static const u32 snb_sprite_formats[] = { DRM_FORMAT_XRGB8888, DRM_FORMAT_XBGR8888, DRM_FORMAT_XRGB2101010, @@ -1594,7 +1616,7 @@ static const u32 snb_plane_formats[] = { DRM_FORMAT_VYUY, }; -static const u32 vlv_plane_formats[] = { +static const u32 vlv_sprite_formats[] = { DRM_FORMAT_C8, DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888, @@ -1629,13 +1651,8 @@ static const u32 chv_pipe_b_sprite_formats[] = { static bool g4x_sprite_format_mod_supported(struct drm_plane *_plane, u32 format, u64 modifier) { - switch (modifier) { - case DRM_FORMAT_MOD_LINEAR: - case I915_FORMAT_MOD_X_TILED: - break; - default: + if (!intel_fb_plane_supports_modifier(to_intel_plane(_plane), modifier)) return false; - } switch (format) { case DRM_FORMAT_XRGB8888: @@ -1655,13 +1672,8 @@ static bool g4x_sprite_format_mod_supported(struct drm_plane *_plane, static bool snb_sprite_format_mod_supported(struct drm_plane *_plane, u32 format, u64 modifier) { - switch (modifier) { - case DRM_FORMAT_MOD_LINEAR: - case I915_FORMAT_MOD_X_TILED: - break; - default: + if (!intel_fb_plane_supports_modifier(to_intel_plane(_plane), modifier)) return false; - } switch (format) { case DRM_FORMAT_XRGB8888: @@ -1686,13 +1698,8 @@ static bool snb_sprite_format_mod_supported(struct drm_plane *_plane, static bool vlv_sprite_format_mod_supported(struct drm_plane *_plane, u32 format, u64 modifier) { - switch (modifier) { - case DRM_FORMAT_MOD_LINEAR: - case I915_FORMAT_MOD_X_TILED: - break; - default: + if (!intel_fb_plane_supports_modifier(to_intel_plane(_plane), modifier)) return false; - } switch (format) { case DRM_FORMAT_C8: @@ -1762,9 +1769,10 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, return plane; if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - plane->update_plane = vlv_update_plane; - plane->disable_plane = vlv_disable_plane; - plane->get_hw_state = vlv_plane_get_hw_state; + plane->update_noarm = vlv_sprite_update_noarm; + plane->update_arm = vlv_sprite_update_arm; + plane->disable_arm = vlv_sprite_disable_arm; + plane->get_hw_state = vlv_sprite_get_hw_state; plane->check_plane = vlv_sprite_check; plane->max_stride = i965_plane_max_stride; plane->min_cdclk = vlv_plane_min_cdclk; @@ -1773,16 +1781,16 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, formats = chv_pipe_b_sprite_formats; num_formats = ARRAY_SIZE(chv_pipe_b_sprite_formats); } else { - formats = vlv_plane_formats; - num_formats = ARRAY_SIZE(vlv_plane_formats); + formats = vlv_sprite_formats; + num_formats = ARRAY_SIZE(vlv_sprite_formats); } - modifiers = i9xx_plane_format_modifiers; plane_funcs = &vlv_sprite_funcs; } else if (DISPLAY_VER(dev_priv) >= 7) { - plane->update_plane = ivb_update_plane; - plane->disable_plane = ivb_disable_plane; - plane->get_hw_state = ivb_plane_get_hw_state; + plane->update_noarm = ivb_sprite_update_noarm; + plane->update_arm = ivb_sprite_update_arm; + plane->disable_arm = ivb_sprite_disable_arm; + plane->get_hw_state = ivb_sprite_get_hw_state; plane->check_plane = g4x_sprite_check; if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { @@ -1793,28 +1801,27 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, plane->min_cdclk = ivb_sprite_min_cdclk; } - formats = snb_plane_formats; - num_formats = ARRAY_SIZE(snb_plane_formats); - modifiers = i9xx_plane_format_modifiers; + formats = snb_sprite_formats; + num_formats = ARRAY_SIZE(snb_sprite_formats); plane_funcs = &snb_sprite_funcs; } else { - plane->update_plane = g4x_update_plane; - plane->disable_plane = g4x_disable_plane; - plane->get_hw_state = g4x_plane_get_hw_state; + plane->update_noarm = g4x_sprite_update_noarm; + plane->update_arm = g4x_sprite_update_arm; + plane->disable_arm = g4x_sprite_disable_arm; + plane->get_hw_state = g4x_sprite_get_hw_state; plane->check_plane = g4x_sprite_check; plane->max_stride = g4x_sprite_max_stride; plane->min_cdclk = g4x_sprite_min_cdclk; - modifiers = i9xx_plane_format_modifiers; if (IS_SANDYBRIDGE(dev_priv)) { - formats = snb_plane_formats; - num_formats = ARRAY_SIZE(snb_plane_formats); + formats = snb_sprite_formats; + num_formats = ARRAY_SIZE(snb_sprite_formats); plane_funcs = &snb_sprite_funcs; } else { - formats = g4x_plane_formats; - num_formats = ARRAY_SIZE(g4x_plane_formats); + formats = g4x_sprite_formats; + num_formats = ARRAY_SIZE(g4x_sprite_formats); plane_funcs = &g4x_sprite_funcs; } @@ -1833,11 +1840,15 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, plane->id = PLANE_SPRITE0 + sprite; plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id); + modifiers = intel_fb_plane_get_modifiers(dev_priv, INTEL_PLANE_CAP_TILING_X); + ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, 0, plane_funcs, formats, num_formats, modifiers, DRM_PLANE_TYPE_OVERLAY, "sprite %c", sprite_name(pipe, sprite)); + kfree(modifiers); + if (ret) goto fail; diff --git a/drivers/gpu/drm/i915/display/intel_sprite.h b/drivers/gpu/drm/i915/display/intel_sprite.h index c085eb87705c..4f63e4967731 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.h +++ b/drivers/gpu/drm/i915/display/intel_sprite.h @@ -27,14 +27,10 @@ struct intel_plane_state; #define VBLANK_EVASION_TIME_US 100 #endif -int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, - int usecs); struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe, int plane); int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state); -void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state); int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state); int chv_plane_check_rotation(const struct intel_plane_state *plane_state); diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c index 88a398df9621..8a39989b87ad 100644 --- a/drivers/gpu/drm/i915/display/intel_tv.c +++ b/drivers/gpu/drm/i915/display/intel_tv.c @@ -36,6 +36,7 @@ #include "i915_drv.h" #include "intel_connector.h" +#include "intel_crtc.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_hotplug.h" @@ -924,8 +925,7 @@ intel_enable_tv(struct intel_atomic_state *state, struct drm_i915_private *dev_priv = to_i915(dev); /* Prevents vblank waits from timing out in intel_tv_detect_type() */ - intel_wait_for_vblank(dev_priv, - to_intel_crtc(pipe_config->uapi.crtc)->pipe); + intel_crtc_wait_for_next_vblank(to_intel_crtc(pipe_config->uapi.crtc)); intel_de_write(dev_priv, TV_CTL, intel_de_read(dev_priv, TV_CTL) | TV_ENC_ENABLE); @@ -1618,7 +1618,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv, intel_de_write(dev_priv, TV_DAC, tv_dac); intel_de_posting_read(dev_priv, TV_DAC); - intel_wait_for_vblank(dev_priv, crtc->pipe); + intel_crtc_wait_for_next_vblank(crtc); type = -1; tv_dac = intel_de_read(dev_priv, TV_DAC); @@ -1651,7 +1651,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv, intel_de_posting_read(dev_priv, TV_CTL); /* For unknown reasons the hw barfs if we don't do this vblank wait. */ - intel_wait_for_vblank(dev_priv, crtc->pipe); + intel_crtc_wait_for_next_vblank(crtc); /* Restore interrupt config */ if (connector->polled & DRM_CONNECTOR_POLL_HPD) { diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h index a2108a8f544d..f043d85ba64d 100644 --- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h @@ -330,7 +330,12 @@ enum vbt_gmbus_ddi { ADLS_DDC_BUS_PORT_TC1 = 0x2, ADLS_DDC_BUS_PORT_TC2, ADLS_DDC_BUS_PORT_TC3, - ADLS_DDC_BUS_PORT_TC4 + ADLS_DDC_BUS_PORT_TC4, + ADLP_DDC_BUS_PORT_TC1 = 0x3, + ADLP_DDC_BUS_PORT_TC2, + ADLP_DDC_BUS_PORT_TC3, + ADLP_DDC_BUS_PORT_TC4 + }; #define DP_AUX_A 0x40 diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c index 2275f99ce9d7..9b05f93ed8bc 100644 --- a/drivers/gpu/drm/i915/display/intel_vdsc.c +++ b/drivers/gpu/drm/i915/display/intel_vdsc.c @@ -6,12 +6,14 @@ * Manasi Navare <manasi.d.navare@intel.com> */ #include <linux/limits.h> + #include "i915_drv.h" +#include "intel_crtc.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_dsi.h" -#include "intel_vdsc.h" #include "intel_qp_tables.h" +#include "intel_vdsc.h" enum ROW_INDEX_BPP { ROW_INDEX_6BPP = 0, @@ -442,10 +444,10 @@ calculate_rc_params(struct rc_parameters *rc, } } -int intel_dsc_compute_params(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config) +int intel_dsc_compute_params(struct intel_crtc_state *pipe_config) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct drm_dsc_config *vdsc_cfg = &pipe_config->dsc.config; u16 compressed_bpp = pipe_config->dsc.compressed_bpp; const struct rc_parameters *rc_params; @@ -598,7 +600,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) pps_val |= DSC_422_ENABLE; if (vdsc_cfg->vbr_enable) pps_val |= DSC_VBR_ENABLE; - drm_info(&dev_priv->drm, "PPS0 = 0x%08x\n", pps_val); + drm_dbg_kms(&dev_priv->drm, "PPS0 = 0x%08x\n", pps_val); if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_0, pps_val); @@ -622,7 +624,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) /* Populate PICTURE_PARAMETER_SET_1 registers */ pps_val = 0; pps_val |= DSC_BPP(vdsc_cfg->bits_per_pixel); - drm_info(&dev_priv->drm, "PPS1 = 0x%08x\n", pps_val); + drm_dbg_kms(&dev_priv->drm, "PPS1 = 0x%08x\n", pps_val); if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_1, pps_val); @@ -647,7 +649,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) pps_val = 0; pps_val |= DSC_PIC_HEIGHT(vdsc_cfg->pic_height) | DSC_PIC_WIDTH(vdsc_cfg->pic_width / num_vdsc_instances); - drm_info(&dev_priv->drm, "PPS2 = 0x%08x\n", pps_val); + drm_dbg_kms(&dev_priv->drm, "PPS2 = 0x%08x\n", pps_val); if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_2, pps_val); @@ -672,7 +674,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) pps_val = 0; pps_val |= DSC_SLICE_HEIGHT(vdsc_cfg->slice_height) | DSC_SLICE_WIDTH(vdsc_cfg->slice_width); - drm_info(&dev_priv->drm, "PPS3 = 0x%08x\n", pps_val); + drm_dbg_kms(&dev_priv->drm, "PPS3 = 0x%08x\n", pps_val); if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_3, pps_val); @@ -697,7 +699,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) pps_val = 0; pps_val |= DSC_INITIAL_XMIT_DELAY(vdsc_cfg->initial_xmit_delay) | DSC_INITIAL_DEC_DELAY(vdsc_cfg->initial_dec_delay); - drm_info(&dev_priv->drm, "PPS4 = 0x%08x\n", pps_val); + drm_dbg_kms(&dev_priv->drm, "PPS4 = 0x%08x\n", pps_val); if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_4, pps_val); @@ -722,7 +724,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) pps_val = 0; pps_val |= DSC_SCALE_INC_INT(vdsc_cfg->scale_increment_interval) | DSC_SCALE_DEC_INT(vdsc_cfg->scale_decrement_interval); - drm_info(&dev_priv->drm, "PPS5 = 0x%08x\n", pps_val); + drm_dbg_kms(&dev_priv->drm, "PPS5 = 0x%08x\n", pps_val); if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_5, pps_val); @@ -749,7 +751,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) DSC_FIRST_LINE_BPG_OFFSET(vdsc_cfg->first_line_bpg_offset) | DSC_FLATNESS_MIN_QP(vdsc_cfg->flatness_min_qp) | DSC_FLATNESS_MAX_QP(vdsc_cfg->flatness_max_qp); - drm_info(&dev_priv->drm, "PPS6 = 0x%08x\n", pps_val); + drm_dbg_kms(&dev_priv->drm, "PPS6 = 0x%08x\n", pps_val); if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_6, pps_val); @@ -774,7 +776,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) pps_val = 0; pps_val |= DSC_SLICE_BPG_OFFSET(vdsc_cfg->slice_bpg_offset) | DSC_NFL_BPG_OFFSET(vdsc_cfg->nfl_bpg_offset); - drm_info(&dev_priv->drm, "PPS7 = 0x%08x\n", pps_val); + drm_dbg_kms(&dev_priv->drm, "PPS7 = 0x%08x\n", pps_val); if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_7, pps_val); @@ -799,7 +801,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) pps_val = 0; pps_val |= DSC_FINAL_OFFSET(vdsc_cfg->final_offset) | DSC_INITIAL_OFFSET(vdsc_cfg->initial_offset); - drm_info(&dev_priv->drm, "PPS8 = 0x%08x\n", pps_val); + drm_dbg_kms(&dev_priv->drm, "PPS8 = 0x%08x\n", pps_val); if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_8, pps_val); @@ -824,7 +826,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) pps_val = 0; pps_val |= DSC_RC_MODEL_SIZE(vdsc_cfg->rc_model_size) | DSC_RC_EDGE_FACTOR(DSC_RC_EDGE_FACTOR_CONST); - drm_info(&dev_priv->drm, "PPS9 = 0x%08x\n", pps_val); + drm_dbg_kms(&dev_priv->drm, "PPS9 = 0x%08x\n", pps_val); if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_9, pps_val); @@ -851,7 +853,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) DSC_RC_QUANT_INC_LIMIT1(vdsc_cfg->rc_quant_incr_limit1) | DSC_RC_TARGET_OFF_HIGH(DSC_RC_TGT_OFFSET_HI_CONST) | DSC_RC_TARGET_OFF_LOW(DSC_RC_TGT_OFFSET_LO_CONST); - drm_info(&dev_priv->drm, "PPS10 = 0x%08x\n", pps_val); + drm_dbg_kms(&dev_priv->drm, "PPS10 = 0x%08x\n", pps_val); if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_10, pps_val); @@ -879,7 +881,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) vdsc_cfg->slice_width) | DSC_SLICE_ROW_PER_FRAME(vdsc_cfg->pic_height / vdsc_cfg->slice_height); - drm_info(&dev_priv->drm, "PPS16 = 0x%08x\n", pps_val); + drm_dbg_kms(&dev_priv->drm, "PPS16 = 0x%08x\n", pps_val); if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_16, pps_val); @@ -906,8 +908,8 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) rc_buf_thresh_dword[i / 4] |= (u32)(vdsc_cfg->rc_buf_thresh[i] << BITS_PER_BYTE * (i % 4)); - drm_info(&dev_priv->drm, " RC_BUF_THRESH%d = 0x%08x\n", i, - rc_buf_thresh_dword[i / 4]); + drm_dbg_kms(&dev_priv->drm, "RC_BUF_THRESH_%d = 0x%08x\n", i, + rc_buf_thresh_dword[i / 4]); } if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_RC_BUF_THRESH_0, @@ -963,8 +965,8 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) RC_MAX_QP_SHIFT) | (vdsc_cfg->rc_range_params[i].range_min_qp << RC_MIN_QP_SHIFT)) << 16 * (i % 2)); - drm_info(&dev_priv->drm, " RC_RANGE_PARAM_%d = 0x%08x\n", i, - rc_range_params_dword[i / 2]); + drm_dbg_kms(&dev_priv->drm, "RC_RANGE_PARAM_%d = 0x%08x\n", i, + rc_range_params_dword[i / 2]); } if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_0, @@ -1055,8 +1057,8 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) } } -static void intel_dsc_dsi_pps_write(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state) +void intel_dsc_dsi_pps_write(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) { const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); @@ -1064,6 +1066,9 @@ static void intel_dsc_dsi_pps_write(struct intel_encoder *encoder, struct drm_dsc_picture_parameter_set pps; enum port port; + if (!crtc_state->dsc.compression_enable) + return; + drm_dsc_pps_payload_pack(&pps, vdsc_cfg); for_each_dsi_port(port, intel_dsi->ports) { @@ -1074,14 +1079,16 @@ static void intel_dsc_dsi_pps_write(struct intel_encoder *encoder, } } -static void intel_dsc_dp_pps_write(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state) +void intel_dsc_dp_pps_write(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) { - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; struct drm_dsc_pps_infoframe dp_dsc_pps_sdp; + if (!crtc_state->dsc.compression_enable) + return; + /* Prepare DP SDP PPS header as per DP 1.4 spec, Table 2-123 */ drm_dsc_dp_pps_header_init(&dp_dsc_pps_sdp.pps_header); @@ -1105,25 +1112,16 @@ static i915_reg_t dss_ctl2_reg(struct intel_crtc *crtc, enum transcoder cpu_tran ICL_PIPE_DSS_CTL2(crtc->pipe) : DSS_CTL2; } -static struct intel_crtc * -_get_crtc_for_pipe(struct drm_i915_private *i915, enum pipe pipe) -{ - if (!intel_pipe_valid(i915, pipe)) - return NULL; - - return intel_get_crtc_for_pipe(i915, pipe); -} - struct intel_crtc * intel_dsc_get_bigjoiner_secondary(const struct intel_crtc *primary_crtc) { - return _get_crtc_for_pipe(to_i915(primary_crtc->base.dev), primary_crtc->pipe + 1); + return intel_crtc_for_pipe(to_i915(primary_crtc->base.dev), primary_crtc->pipe + 1); } static struct intel_crtc * intel_dsc_get_bigjoiner_primary(const struct intel_crtc *secondary_crtc) { - return _get_crtc_for_pipe(to_i915(secondary_crtc->base.dev), secondary_crtc->pipe - 1); + return intel_crtc_for_pipe(to_i915(secondary_crtc->base.dev), secondary_crtc->pipe - 1); } void intel_uncompressed_joiner_enable(const struct intel_crtc_state *crtc_state) @@ -1142,8 +1140,7 @@ void intel_uncompressed_joiner_enable(const struct intel_crtc_state *crtc_state) } } -void intel_dsc_enable(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state) +void intel_dsc_enable(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -1155,13 +1152,6 @@ void intel_dsc_enable(struct intel_encoder *encoder, intel_dsc_pps_configure(crtc_state); - if (!crtc_state->bigjoiner_slave) { - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) - intel_dsc_dsi_pps_write(encoder, crtc_state); - else - intel_dsc_dp_pps_write(encoder, crtc_state); - } - dss_ctl2_val |= LEFT_BRANCH_VDSC_ENABLE; if (crtc_state->dsc.dsc_split) { dss_ctl2_val |= RIGHT_BRANCH_VDSC_ENABLE; diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.h b/drivers/gpu/drm/i915/display/intel_vdsc.h index 0c5d80a572da..4ec75f715986 100644 --- a/drivers/gpu/drm/i915/display/intel_vdsc.h +++ b/drivers/gpu/drm/i915/display/intel_vdsc.h @@ -15,15 +15,17 @@ struct intel_encoder; bool intel_dsc_source_support(const struct intel_crtc_state *crtc_state); void intel_uncompressed_joiner_enable(const struct intel_crtc_state *crtc_state); -void intel_dsc_enable(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state); +void intel_dsc_enable(const struct intel_crtc_state *crtc_state); void intel_dsc_disable(const struct intel_crtc_state *crtc_state); -int intel_dsc_compute_params(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config); +int intel_dsc_compute_params(struct intel_crtc_state *pipe_config); void intel_uncompressed_joiner_get_config(struct intel_crtc_state *crtc_state); void intel_dsc_get_config(struct intel_crtc_state *crtc_state); enum intel_display_power_domain intel_dsc_power_domain(struct intel_crtc *crtc, enum transcoder cpu_transcoder); struct intel_crtc *intel_dsc_get_bigjoiner_secondary(const struct intel_crtc *primary_crtc); +void intel_dsc_dsi_pps_write(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state); +void intel_dsc_dp_pps_write(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state); #endif /* __INTEL_VDSC_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c index c335b1dbafcf..139e8936edc5 100644 --- a/drivers/gpu/drm/i915/display/intel_vrr.c +++ b/drivers/gpu/drm/i915/display/intel_vrr.c @@ -60,7 +60,7 @@ intel_vrr_check_modeset(struct intel_atomic_state *state) * Between those two points the vblank exit starts (and hence registers get * latched) ASAP after a push is sent. * - * framestart_delay is programmable 0-3. + * framestart_delay is programmable 1-4. */ static int intel_vrr_vblank_exit_length(const struct intel_crtc_state *crtc_state) { @@ -138,13 +138,13 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state, i915->window2_delay; else /* - * FIXME: s/4/framestart_delay+1/ to get consistent + * FIXME: s/4/framestart_delay/ to get consistent * earliest/latest points for register latching regardless * of the framestart_delay used? * * FIXME: this really needs the extra scanline to provide consistent * behaviour for all framestart_delay values. Otherwise with - * framestart_delay==3 we will end up extending the min vblank by + * framestart_delay==4 we will end up extending the min vblank by * one extra line. */ crtc_state->vrr.pipeline_full = @@ -193,6 +193,18 @@ void intel_vrr_send_push(const struct intel_crtc_state *crtc_state) TRANS_PUSH_EN | TRANS_PUSH_SEND); } +bool intel_vrr_is_push_sent(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + + if (!crtc_state->vrr.enable) + return false; + + return intel_de_read(dev_priv, TRANS_PUSH(cpu_transcoder)) & TRANS_PUSH_SEND; +} + void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state) { struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); diff --git a/drivers/gpu/drm/i915/display/intel_vrr.h b/drivers/gpu/drm/i915/display/intel_vrr.h index 96f9c9c27ab9..1c2da572693d 100644 --- a/drivers/gpu/drm/i915/display/intel_vrr.h +++ b/drivers/gpu/drm/i915/display/intel_vrr.h @@ -23,6 +23,7 @@ void intel_vrr_compute_config(struct intel_crtc_state *crtc_state, void intel_vrr_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state); void intel_vrr_send_push(const struct intel_crtc_state *crtc_state); +bool intel_vrr_is_push_sent(const struct intel_crtc_state *crtc_state); void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state); void intel_vrr_get_config(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state); diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c index 37eabeff8197..c2e94118566b 100644 --- a/drivers/gpu/drm/i915/display/skl_scaler.c +++ b/drivers/gpu/drm/i915/display/skl_scaler.c @@ -4,6 +4,7 @@ */ #include "intel_de.h" #include "intel_display_types.h" +#include "intel_fb.h" #include "skl_scaler.h" #include "skl_universal_plane.h" diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c index a0e53a3b267a..d5359cf3d270 100644 --- a/drivers/gpu/drm/i915/display/skl_universal_plane.c +++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c @@ -13,6 +13,7 @@ #include "intel_de.h" #include "intel_display_types.h" #include "intel_fb.h" +#include "intel_fbc.h" #include "intel_pm.h" #include "intel_psr.h" #include "intel_sprite.h" @@ -163,50 +164,6 @@ static const u32 icl_hdr_plane_formats[] = { DRM_FORMAT_XVYU16161616, }; -static const u64 skl_plane_format_modifiers_noccs[] = { - I915_FORMAT_MOD_Yf_TILED, - I915_FORMAT_MOD_Y_TILED, - I915_FORMAT_MOD_X_TILED, - DRM_FORMAT_MOD_LINEAR, - DRM_FORMAT_MOD_INVALID -}; - -static const u64 skl_plane_format_modifiers_ccs[] = { - I915_FORMAT_MOD_Yf_TILED_CCS, - I915_FORMAT_MOD_Y_TILED_CCS, - I915_FORMAT_MOD_Yf_TILED, - I915_FORMAT_MOD_Y_TILED, - I915_FORMAT_MOD_X_TILED, - DRM_FORMAT_MOD_LINEAR, - DRM_FORMAT_MOD_INVALID -}; - -static const u64 gen12_plane_format_modifiers_mc_ccs[] = { - I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS, - I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS, - I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC, - I915_FORMAT_MOD_Y_TILED, - I915_FORMAT_MOD_X_TILED, - DRM_FORMAT_MOD_LINEAR, - DRM_FORMAT_MOD_INVALID -}; - -static const u64 gen12_plane_format_modifiers_rc_ccs[] = { - I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS, - I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC, - I915_FORMAT_MOD_Y_TILED, - I915_FORMAT_MOD_X_TILED, - DRM_FORMAT_MOD_LINEAR, - DRM_FORMAT_MOD_INVALID -}; - -static const u64 adlp_step_a_plane_format_modifiers[] = { - I915_FORMAT_MOD_Y_TILED, - I915_FORMAT_MOD_X_TILED, - DRM_FORMAT_MOD_LINEAR, - DRM_FORMAT_MOD_INVALID -}; - int skl_format_to_fourcc(int format, bool rgb_order, bool alpha) { switch (format) { @@ -464,9 +421,19 @@ static int icl_plane_min_width(const struct drm_framebuffer *fb, } } -static int icl_plane_max_width(const struct drm_framebuffer *fb, - int color_plane, - unsigned int rotation) +static int icl_hdr_plane_max_width(const struct drm_framebuffer *fb, + int color_plane, + unsigned int rotation) +{ + if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) + return 4096; + else + return 5120; +} + +static int icl_sdr_plane_max_width(const struct drm_framebuffer *fb, + int color_plane, + unsigned int rotation) { return 5120; } @@ -633,7 +600,7 @@ static u32 skl_plane_stride(const struct intel_plane_state *plane_state, { const struct drm_framebuffer *fb = plane_state->hw.fb; unsigned int rotation = plane_state->hw.rotation; - u32 stride = plane_state->view.color_plane[color_plane].stride; + u32 stride = plane_state->view.color_plane[color_plane].scanout_stride; if (color_plane >= fb->format->num_planes) return 0; @@ -642,8 +609,8 @@ static u32 skl_plane_stride(const struct intel_plane_state *plane_state, } static void -skl_disable_plane(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state) +skl_plane_disable_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum plane_id plane_id = plane->id; @@ -716,13 +683,13 @@ static u32 skl_plane_ctl_format(u32 pixel_format) case DRM_FORMAT_XYUV8888: return PLANE_CTL_FORMAT_XYUV; case DRM_FORMAT_YUYV: - return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV; + return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_ORDER_YUYV; case DRM_FORMAT_YVYU: - return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU; + return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_ORDER_YVYU; case DRM_FORMAT_UYVY: - return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY; + return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_ORDER_UYVY; case DRM_FORMAT_VYUY: - return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY; + return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_ORDER_VYUY; case DRM_FORMAT_NV12: return PLANE_CTL_FORMAT_NV12; case DRM_FORMAT_P010: @@ -985,6 +952,9 @@ static u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state, plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE; } + if (plane_state->force_black) + plane_color_ctl |= PLANE_COLOR_PLANE_CSC_ENABLE; + return plane_color_ctl; } @@ -1008,74 +978,60 @@ static u32 skl_surf_address(const struct intel_plane_state *plane_state, } } -static void intel_load_plane_csc_black(struct intel_plane *intel_plane) +static u32 skl_plane_surf(const struct intel_plane_state *plane_state, + int color_plane) { - struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev); - enum pipe pipe = intel_plane->pipe; - enum plane_id plane = intel_plane->id; - u16 postoff = 0; + u32 plane_surf; - drm_dbg_kms(&dev_priv->drm, "plane color CTM to black %s:%d\n", - intel_plane->base.name, plane); - intel_de_write_fw(dev_priv, PLANE_CSC_COEFF(pipe, plane, 0), 0); - intel_de_write_fw(dev_priv, PLANE_CSC_COEFF(pipe, plane, 1), 0); + plane_surf = intel_plane_ggtt_offset(plane_state) + + skl_surf_address(plane_state, color_plane); - intel_de_write_fw(dev_priv, PLANE_CSC_COEFF(pipe, plane, 2), 0); - intel_de_write_fw(dev_priv, PLANE_CSC_COEFF(pipe, plane, 3), 0); + if (plane_state->decrypt) + plane_surf |= PLANE_SURF_DECRYPT; - intel_de_write_fw(dev_priv, PLANE_CSC_COEFF(pipe, plane, 4), 0); - intel_de_write_fw(dev_priv, PLANE_CSC_COEFF(pipe, plane, 5), 0); + return plane_surf; +} - intel_de_write_fw(dev_priv, PLANE_CSC_PREOFF(pipe, plane, 0), 0); - intel_de_write_fw(dev_priv, PLANE_CSC_PREOFF(pipe, plane, 1), 0); - intel_de_write_fw(dev_priv, PLANE_CSC_PREOFF(pipe, plane, 2), 0); +static void icl_plane_csc_load_black(struct intel_plane *plane) +{ + struct drm_i915_private *i915 = to_i915(plane->base.dev); + enum plane_id plane_id = plane->id; + enum pipe pipe = plane->pipe; + + intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 0), 0); + intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 1), 0); + + intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 2), 0); + intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 3), 0); + + intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 4), 0); + intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 5), 0); - intel_de_write_fw(dev_priv, PLANE_CSC_POSTOFF(pipe, plane, 0), postoff); - intel_de_write_fw(dev_priv, PLANE_CSC_POSTOFF(pipe, plane, 1), postoff); - intel_de_write_fw(dev_priv, PLANE_CSC_POSTOFF(pipe, plane, 2), postoff); + intel_de_write_fw(i915, PLANE_CSC_PREOFF(pipe, plane_id, 0), 0); + intel_de_write_fw(i915, PLANE_CSC_PREOFF(pipe, plane_id, 1), 0); + intel_de_write_fw(i915, PLANE_CSC_PREOFF(pipe, plane_id, 2), 0); + + intel_de_write_fw(i915, PLANE_CSC_POSTOFF(pipe, plane_id, 0), 0); + intel_de_write_fw(i915, PLANE_CSC_POSTOFF(pipe, plane_id, 1), 0); + intel_de_write_fw(i915, PLANE_CSC_POSTOFF(pipe, plane_id, 2), 0); } static void -skl_program_plane(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state, - int color_plane) +skl_program_plane_noarm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + int color_plane) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; - const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; - u32 surf_addr = skl_surf_address(plane_state, color_plane); u32 stride = skl_plane_stride(plane_state, color_plane); const struct drm_framebuffer *fb = plane_state->hw.fb; - int aux_plane = skl_main_to_aux_plane(fb, color_plane); int crtc_x = plane_state->uapi.dst.x1; int crtc_y = plane_state->uapi.dst.y1; - u32 x = plane_state->view.color_plane[color_plane].x; - u32 y = plane_state->view.color_plane[color_plane].y; u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16; u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16; - u8 alpha = plane_state->hw.alpha >> 8; - u32 plane_color_ctl = 0, aux_dist = 0; unsigned long irqflags; - u32 keymsk, keymax, plane_surf; - u32 plane_ctl = plane_state->ctl; - - plane_ctl |= skl_plane_ctl_crtc(crtc_state); - - if (DISPLAY_VER(dev_priv) >= 10) - plane_color_ctl = plane_state->color_ctl | - glk_plane_color_ctl_crtc(crtc_state); - - /* Sizes are 0 based */ - src_w--; - src_h--; - - keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha); - - keymsk = key->channel_mask & 0x7ffffff; - if (alpha < 0xff) - keymsk |= PLANE_KEYMSK_ALPHA_ENABLE; /* The scaler will handle the output position */ if (plane_state->scaler_id >= 0) { @@ -1083,40 +1039,83 @@ skl_program_plane(struct intel_plane *plane, crtc_y = 0; } - if (aux_plane) { - aux_dist = skl_surf_address(plane_state, aux_plane) - surf_addr; - - if (DISPLAY_VER(dev_priv) < 12) - aux_dist |= skl_plane_stride(plane_state, aux_plane); - } - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + /* + * FIXME: pxp session invalidation can hit any time even at time of commit + * or after the commit, display content will be garbage. + */ + if (plane_state->force_black) + icl_plane_csc_load_black(plane); + intel_de_write_fw(dev_priv, PLANE_STRIDE(pipe, plane_id), stride); intel_de_write_fw(dev_priv, PLANE_POS(pipe, plane_id), (crtc_y << 16) | crtc_x); intel_de_write_fw(dev_priv, PLANE_SIZE(pipe, plane_id), - (src_h << 16) | src_w); + ((src_h - 1) << 16) | (src_w - 1)); - intel_de_write_fw(dev_priv, PLANE_AUX_DIST(pipe, plane_id), aux_dist); + if (intel_fb_is_rc_ccs_cc_modifier(fb->modifier)) { + intel_de_write_fw(dev_priv, PLANE_CC_VAL(pipe, plane_id, 0), + lower_32_bits(plane_state->ccval)); + intel_de_write_fw(dev_priv, PLANE_CC_VAL(pipe, plane_id, 1), + upper_32_bits(plane_state->ccval)); + } if (icl_is_hdr_plane(dev_priv, plane_id)) intel_de_write_fw(dev_priv, PLANE_CUS_CTL(pipe, plane_id), plane_state->cus_ctl); - if (DISPLAY_VER(dev_priv) >= 10) - intel_de_write_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id), - plane_color_ctl); - if (fb->format->is_yuv && icl_is_hdr_plane(dev_priv, plane_id)) icl_program_input_csc(plane, crtc_state, plane_state); - if (fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC) - intel_uncore_write64_fw(&dev_priv->uncore, - PLANE_CC_VAL(pipe, plane_id), plane_state->ccval); - skl_write_plane_wm(plane, crtc_state); + intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, color_plane); + + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); +} + +static void +skl_program_plane_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + int color_plane) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum plane_id plane_id = plane->id; + enum pipe pipe = plane->pipe; + const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; + const struct drm_framebuffer *fb = plane_state->hw.fb; + int aux_plane = skl_main_to_aux_plane(fb, color_plane); + u32 x = plane_state->view.color_plane[color_plane].x; + u32 y = plane_state->view.color_plane[color_plane].y; + u32 keymsk, keymax, aux_dist = 0, plane_color_ctl = 0; + u8 alpha = plane_state->hw.alpha >> 8; + u32 plane_ctl = plane_state->ctl; + unsigned long irqflags; + + plane_ctl |= skl_plane_ctl_crtc(crtc_state); + + if (DISPLAY_VER(dev_priv) >= 10) + plane_color_ctl = plane_state->color_ctl | + glk_plane_color_ctl_crtc(crtc_state); + + keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha); + + keymsk = key->channel_mask & 0x7ffffff; + if (alpha < 0xff) + keymsk |= PLANE_KEYMSK_ALPHA_ENABLE; + + if (aux_plane) { + aux_dist = skl_surf_address(plane_state, aux_plane) - + skl_surf_address(plane_state, color_plane); + + if (DISPLAY_VER(dev_priv) < 12) + aux_dist |= skl_plane_stride(plane_state, aux_plane); + } + + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + intel_de_write_fw(dev_priv, PLANE_KEYVAL(pipe, plane_id), key->min_value); intel_de_write_fw(dev_priv, PLANE_KEYMSK(pipe, plane_id), keymsk); @@ -1125,17 +1124,22 @@ skl_program_plane(struct intel_plane *plane, intel_de_write_fw(dev_priv, PLANE_OFFSET(pipe, plane_id), (y << 16) | x); + intel_de_write_fw(dev_priv, PLANE_AUX_DIST(pipe, plane_id), aux_dist); + if (DISPLAY_VER(dev_priv) < 11) intel_de_write_fw(dev_priv, PLANE_AUX_OFFSET(pipe, plane_id), (plane_state->view.color_plane[1].y << 16) | plane_state->view.color_plane[1].x); - intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, color_plane); + if (DISPLAY_VER(dev_priv) >= 10) + intel_de_write_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id), plane_color_ctl); /* * Enable the scaler before the plane so that we don't * get a catastrophic underrun even if the two operations * end up happening in two different frames. + * + * TODO: split into noarm+arm pair */ if (plane_state->scaler_id >= 0) skl_program_plane_scaler(plane, crtc_state, plane_state); @@ -1146,23 +1150,8 @@ skl_program_plane(struct intel_plane *plane, * the control register just before the surface register. */ intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl); - plane_surf = intel_plane_ggtt_offset(plane_state) + surf_addr; - plane_color_ctl = intel_de_read_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id)); - - /* - * FIXME: pxp session invalidation can hit any time even at time of commit - * or after the commit, display content will be garbage. - */ - if (plane_state->decrypt) { - plane_surf |= PLANE_SURF_DECRYPT; - } else if (plane_state->force_black) { - intel_load_plane_csc_black(plane); - plane_color_ctl |= PLANE_COLOR_PLANE_CSC_ENABLE; - } - - intel_de_write_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id), - plane_color_ctl); - intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), plane_surf); + intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), + skl_plane_surf(plane_state, color_plane)); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } @@ -1177,7 +1166,6 @@ skl_plane_async_flip(struct intel_plane *plane, unsigned long irqflags; enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; - u32 surf_addr = plane_state->view.color_plane[0].offset; u32 plane_ctl = plane_state->ctl; plane_ctl |= skl_plane_ctl_crtc(crtc_state); @@ -1189,15 +1177,29 @@ skl_plane_async_flip(struct intel_plane *plane, intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl); intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), - intel_plane_ggtt_offset(plane_state) + surf_addr); + skl_plane_surf(plane_state, 0)); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } static void -skl_update_plane(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) +skl_plane_update_noarm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + int color_plane = 0; + + if (plane_state->planar_linked_plane && !plane_state->planar_slave) + /* Program the UV plane on planar master */ + color_plane = 1; + + skl_program_plane_noarm(plane, crtc_state, plane_state, color_plane); +} + +static void +skl_plane_update_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { int color_plane = 0; @@ -1205,7 +1207,7 @@ skl_update_plane(struct intel_plane *plane, /* Program the UV plane on planar master */ color_plane = 1; - skl_program_plane(plane, crtc_state, plane_state, color_plane); + skl_program_plane_arm(plane, crtc_state, plane_state, color_plane); } static bool intel_format_is_p01x(u32 format) @@ -1232,7 +1234,7 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state, return 0; if (rotation & ~(DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180) && - is_ccs_modifier(fb->modifier)) { + intel_fb_is_ccs_modifier(fb->modifier)) { drm_dbg_kms(&dev_priv->drm, "RC support only with 0/180 degree rotation (%x)\n", rotation); @@ -1284,13 +1286,8 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state, /* Y-tiling is not supported in IF-ID Interlace mode */ if (crtc_state->hw.enable && crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE && - (fb->modifier == I915_FORMAT_MOD_Y_TILED || - fb->modifier == I915_FORMAT_MOD_Yf_TILED || - fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS || - fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS || - fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS || - fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS || - fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)) { + fb->modifier != DRM_FORMAT_MOD_LINEAR && + fb->modifier != I915_FORMAT_MOD_X_TILED) { drm_dbg_kms(&dev_priv->drm, "Y/Yf tiling not supported in IF-ID mode\n"); return -EINVAL; @@ -1487,7 +1484,7 @@ int skl_calc_main_surface_offset(const struct intel_plane_state *plane_state, if (fb->modifier == I915_FORMAT_MOD_X_TILED) { int cpp = fb->format->cpp[0]; - while ((*x + w) * cpp > plane_state->view.color_plane[0].stride) { + while ((*x + w) * cpp > plane_state->view.color_plane[0].mapping_stride) { if (*offset == 0) { drm_dbg_kms(&dev_priv->drm, "Unable to find suitable display surface offset due to X-tiling\n"); @@ -1536,7 +1533,7 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state) * CCS AUX surface doesn't have its own x/y offsets, we must make sure * they match with the main surface x/y offsets. */ - if (is_ccs_modifier(fb->modifier)) { + if (intel_fb_is_ccs_modifier(fb->modifier)) { while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset, aux_plane)) { if (offset == 0) @@ -1600,7 +1597,7 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state) offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, uv_plane); - if (is_ccs_modifier(fb->modifier)) { + if (intel_fb_is_ccs_modifier(fb->modifier)) { int ccs_plane = main_to_ccs_plane(fb, uv_plane); u32 aux_offset = plane_state->view.color_plane[ccs_plane].offset; u32 alignment = intel_surf_alignment(fb, uv_plane); @@ -1656,8 +1653,7 @@ static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state) int hsub, vsub; int x, y; - if (!is_ccs_plane(fb, ccs_plane) || - is_gen12_ccs_cc_plane(fb, ccs_plane)) + if (!intel_fb_is_ccs_aux_plane(fb, ccs_plane)) continue; intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb, @@ -1699,7 +1695,7 @@ static int skl_check_plane_surface(struct intel_plane_state *plane_state) * Handle the AUX surface first since the main surface setup depends on * it. */ - if (is_ccs_modifier(fb->modifier)) { + if (intel_fb_is_ccs_modifier(fb->modifier)) { ret = skl_check_ccs_aux_surface(plane_state); if (ret) return ret; @@ -1737,6 +1733,18 @@ static bool skl_fb_scalable(const struct drm_framebuffer *fb) } } +static bool bo_has_valid_encryption(struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *i915 = to_i915(obj->base.dev); + + return intel_pxp_key_check(&i915->gt.pxp, obj, false) == 0; +} + +static bool pxp_is_borked(struct drm_i915_gem_object *obj) +{ + return i915_gem_object_is_protected(obj) && !bo_has_valid_encryption(obj); +} + static int skl_plane_check(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state) { @@ -1781,6 +1789,11 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state, if (ret) return ret; + if (DISPLAY_VER(dev_priv) >= 11) { + plane_state->decrypt = bo_has_valid_encryption(intel_fb_obj(fb)); + plane_state->force_black = pxp_is_borked(intel_fb_obj(fb)); + } + /* HW only has 8 bits pixel precision, disable plane if invisible */ if (!(plane_state->hw.alpha >> 8)) plane_state->uapi.visible = false; @@ -1812,6 +1825,15 @@ static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv, return pipe == PIPE_A && plane_id == PLANE_PRIMARY; } +static struct intel_fbc *skl_plane_fbc(struct drm_i915_private *dev_priv, + enum pipe pipe, enum plane_id plane_id) +{ + if (skl_plane_has_fbc(dev_priv, pipe, plane_id)) + return dev_priv->fbc; + else + return NULL; +} + static bool skl_plane_has_planar(struct drm_i915_private *dev_priv, enum pipe pipe, enum plane_id plane_id) { @@ -1870,49 +1892,20 @@ static const u32 *icl_get_plane_formats(struct drm_i915_private *dev_priv, } } -static bool skl_plane_has_ccs(struct drm_i915_private *dev_priv, - enum pipe pipe, enum plane_id plane_id) -{ - if (plane_id == PLANE_CURSOR) - return false; - - if (DISPLAY_VER(dev_priv) >= 11) - return true; - - if (IS_GEMINILAKE(dev_priv)) - return pipe != PIPE_C; - - return pipe != PIPE_C && - (plane_id == PLANE_PRIMARY || - plane_id == PLANE_SPRITE0); -} - static bool skl_plane_format_mod_supported(struct drm_plane *_plane, u32 format, u64 modifier) { struct intel_plane *plane = to_intel_plane(_plane); - switch (modifier) { - case DRM_FORMAT_MOD_LINEAR: - case I915_FORMAT_MOD_X_TILED: - case I915_FORMAT_MOD_Y_TILED: - case I915_FORMAT_MOD_Yf_TILED: - break; - case I915_FORMAT_MOD_Y_TILED_CCS: - case I915_FORMAT_MOD_Yf_TILED_CCS: - if (!plane->has_ccs) - return false; - break; - default: + if (!intel_fb_plane_supports_modifier(plane, modifier)) return false; - } switch (format) { case DRM_FORMAT_XRGB8888: case DRM_FORMAT_XBGR8888: case DRM_FORMAT_ARGB8888: case DRM_FORMAT_ABGR8888: - if (is_ccs_modifier(modifier)) + if (intel_fb_is_ccs_modifier(modifier)) return true; fallthrough; case DRM_FORMAT_RGB565: @@ -1953,52 +1946,20 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane, } } -static bool gen12_plane_supports_mc_ccs(struct drm_i915_private *dev_priv, - enum plane_id plane_id) -{ - /* Wa_14010477008:tgl[a0..c0],rkl[all],dg1[all] */ - if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv) || - IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_D0)) - return false; - - /* Wa_22011186057 */ - if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) - return false; - - return plane_id < PLANE_SPRITE4; -} - static bool gen12_plane_format_mod_supported(struct drm_plane *_plane, u32 format, u64 modifier) { - struct drm_i915_private *dev_priv = to_i915(_plane->dev); struct intel_plane *plane = to_intel_plane(_plane); - switch (modifier) { - case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: - if (!gen12_plane_supports_mc_ccs(dev_priv, plane->id)) - return false; - fallthrough; - case DRM_FORMAT_MOD_LINEAR: - case I915_FORMAT_MOD_X_TILED: - case I915_FORMAT_MOD_Y_TILED: - break; - case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: - case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC: - /* Wa_22011186057 */ - if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) - return false; - break; - default: + if (!intel_fb_plane_supports_modifier(plane, modifier)) return false; - } switch (format) { case DRM_FORMAT_XRGB8888: case DRM_FORMAT_XBGR8888: case DRM_FORMAT_ARGB8888: case DRM_FORMAT_ABGR8888: - if (is_ccs_modifier(modifier)) + if (intel_fb_is_ccs_modifier(modifier)) return true; fallthrough; case DRM_FORMAT_YUYV: @@ -2010,7 +1971,7 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane, case DRM_FORMAT_P010: case DRM_FORMAT_P012: case DRM_FORMAT_P016: - if (modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS) + if (intel_fb_is_mc_ccs_modifier(modifier)) return true; fallthrough; case DRM_FORMAT_RGB565: @@ -2039,18 +2000,6 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane, } } -static const u64 *gen12_get_plane_modifiers(struct drm_i915_private *dev_priv, - enum plane_id plane_id) -{ - /* Wa_22011186057 */ - if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) - return adlp_step_a_plane_format_modifiers; - else if (gen12_plane_supports_mc_ccs(dev_priv, plane_id)) - return gen12_plane_format_modifiers_mc_ccs; - else - return gen12_plane_format_modifiers_rc_ccs; -} - static const struct drm_plane_funcs skl_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, @@ -2091,6 +2040,64 @@ skl_plane_disable_flip_done(struct intel_plane *plane) spin_unlock_irq(&i915->irq_lock); } +static bool skl_plane_has_rc_ccs(struct drm_i915_private *i915, + enum pipe pipe, enum plane_id plane_id) +{ + /* Wa_22011186057 */ + if (IS_ADLP_DISPLAY_STEP(i915, STEP_A0, STEP_B0)) + return false; + + if (DISPLAY_VER(i915) >= 11) + return true; + + if (IS_GEMINILAKE(i915)) + return pipe != PIPE_C; + + return pipe != PIPE_C && + (plane_id == PLANE_PRIMARY || + plane_id == PLANE_SPRITE0); +} + +static bool gen12_plane_has_mc_ccs(struct drm_i915_private *i915, + enum plane_id plane_id) +{ + if (DISPLAY_VER(i915) < 12) + return false; + + /* Wa_14010477008:tgl[a0..c0],rkl[all],dg1[all] */ + if (IS_DG1(i915) || IS_ROCKETLAKE(i915) || + IS_TGL_DISPLAY_STEP(i915, STEP_A0, STEP_D0)) + return false; + + /* Wa_22011186057 */ + if (IS_ADLP_DISPLAY_STEP(i915, STEP_A0, STEP_B0)) + return false; + + return plane_id < PLANE_SPRITE4; +} + +static u8 skl_get_plane_caps(struct drm_i915_private *i915, + enum pipe pipe, enum plane_id plane_id) +{ + u8 caps = INTEL_PLANE_CAP_TILING_X; + + if (DISPLAY_VER(i915) < 13 || IS_ALDERLAKE_P(i915)) + caps |= INTEL_PLANE_CAP_TILING_Y; + if (DISPLAY_VER(i915) < 12) + caps |= INTEL_PLANE_CAP_TILING_Yf; + + if (skl_plane_has_rc_ccs(i915, pipe, plane_id)) { + caps |= INTEL_PLANE_CAP_CCS_RC; + if (DISPLAY_VER(i915) >= 12) + caps |= INTEL_PLANE_CAP_CCS_RC_CC; + } + + if (gen12_plane_has_mc_ccs(i915, plane_id)) + caps |= INTEL_PLANE_CAP_CCS_MC; + + return caps; +} + struct intel_plane * skl_universal_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe, enum plane_id plane_id) @@ -2113,16 +2120,14 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv, plane->id = plane_id; plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane_id); - plane->has_fbc = skl_plane_has_fbc(dev_priv, pipe, plane_id); - if (plane->has_fbc) { - struct intel_fbc *fbc = &dev_priv->fbc; - - fbc->possible_framebuffer_bits |= plane->frontbuffer_bit; - } + intel_fbc_add_plane(skl_plane_fbc(dev_priv, pipe, plane_id), plane); if (DISPLAY_VER(dev_priv) >= 11) { plane->min_width = icl_plane_min_width; - plane->max_width = icl_plane_max_width; + if (icl_is_hdr_plane(dev_priv, plane_id)) + plane->max_width = icl_hdr_plane_max_width; + else + plane->max_width = icl_sdr_plane_max_width; plane->max_height = icl_plane_max_height; plane->min_cdclk = icl_plane_min_cdclk; } else if (DISPLAY_VER(dev_priv) >= 10) { @@ -2136,8 +2141,9 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv, } plane->max_stride = skl_plane_max_stride; - plane->update_plane = skl_update_plane; - plane->disable_plane = skl_disable_plane; + plane->update_noarm = skl_plane_update_noarm; + plane->update_arm = skl_plane_update_arm; + plane->disable_arm = skl_plane_disable_arm; plane->get_hw_state = skl_plane_get_hw_state; plane->check_plane = skl_plane_check; @@ -2159,29 +2165,28 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv, formats = skl_get_plane_formats(dev_priv, pipe, plane_id, &num_formats); - plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id); - if (DISPLAY_VER(dev_priv) >= 12) { - modifiers = gen12_get_plane_modifiers(dev_priv, plane_id); + if (DISPLAY_VER(dev_priv) >= 12) plane_funcs = &gen12_plane_funcs; - } else { - if (plane->has_ccs) - modifiers = skl_plane_format_modifiers_ccs; - else - modifiers = skl_plane_format_modifiers_noccs; + else plane_funcs = &skl_plane_funcs; - } if (plane_id == PLANE_PRIMARY) plane_type = DRM_PLANE_TYPE_PRIMARY; else plane_type = DRM_PLANE_TYPE_OVERLAY; + modifiers = intel_fb_plane_get_modifiers(dev_priv, + skl_get_plane_caps(dev_priv, pipe, plane_id)); + ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, 0, plane_funcs, formats, num_formats, modifiers, plane_type, "plane %d%c", plane_id + 1, pipe_name(pipe)); + + kfree(modifiers); + if (ret) goto fail; diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c index 07584695514b..20141f33ed64 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi.c @@ -38,9 +38,12 @@ #include "intel_de.h" #include "intel_display_types.h" #include "intel_dsi.h" +#include "intel_dsi_vbt.h" #include "intel_fifo_underrun.h" #include "intel_panel.h" #include "skl_scaler.h" +#include "vlv_dsi.h" +#include "vlv_dsi_pll.h" #include "vlv_sideband.h" /* return pixels in terms of txbyteclkhs */ @@ -1258,7 +1261,9 @@ static void intel_dsi_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); u32 pclk; + drm_dbg_kms(&dev_priv->drm, "\n"); pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI); @@ -1270,6 +1275,9 @@ static void intel_dsi_get_config(struct intel_encoder *encoder, pclk = vlv_dsi_get_pclk(encoder, pipe_config); } + if (intel_dsi->dual_link) + pclk *= 2; + if (pclk) { pipe_config->hw.adjusted_mode.crtc_clock = pclk; pipe_config->port_clock = pclk; diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.h b/drivers/gpu/drm/i915/display/vlv_dsi.h new file mode 100644 index 000000000000..0c2b279df9d4 --- /dev/null +++ b/drivers/gpu/drm/i915/display/vlv_dsi.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef __VLV_DSI_H__ +#define __VLV_DSI_H__ + +#include <linux/types.h> + +enum port; +struct drm_i915_private; +struct intel_dsi; + +void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port); +enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt); +void vlv_dsi_init(struct drm_i915_private *dev_priv); + +#endif /* __VLV_DSI_H__ */ diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c index 5413b52ab6ba..1b81797dd02e 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c @@ -31,6 +31,7 @@ #include "intel_de.h" #include "intel_display_types.h" #include "intel_dsi.h" +#include "vlv_dsi_pll.h" #include "vlv_sideband.h" static const u16 lfsr_converts[] = { diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.h b/drivers/gpu/drm/i915/display/vlv_dsi_pll.h new file mode 100644 index 000000000000..ab9291ad1e79 --- /dev/null +++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef __VLV_DSI_PLL_H__ +#define __VLV_DSI_PLL_H__ + +#include <linux/types.h> + +enum port; +struct drm_i915_private; +struct intel_crtc_state; +struct intel_encoder; + +int vlv_dsi_pll_compute(struct intel_encoder *encoder, + struct intel_crtc_state *config); +void vlv_dsi_pll_enable(struct intel_encoder *encoder, + const struct intel_crtc_state *config); +void vlv_dsi_pll_disable(struct intel_encoder *encoder); +u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, + struct intel_crtc_state *config); +void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port); + +bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv); +int bxt_dsi_pll_compute(struct intel_encoder *encoder, + struct intel_crtc_state *config); +void bxt_dsi_pll_enable(struct intel_encoder *encoder, + const struct intel_crtc_state *config); +void bxt_dsi_pll_disable(struct intel_encoder *encoder); +u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, + struct intel_crtc_state *config); +void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port); + +void assert_dsi_pll_enabled(struct drm_i915_private *i915); +void assert_dsi_pll_disabled(struct drm_i915_private *i915); + +#endif /* __VLV_DSI_PLL_H__ */ diff --git a/drivers/gpu/drm/i915/dma_resv_utils.c b/drivers/gpu/drm/i915/dma_resv_utils.c deleted file mode 100644 index 7df91b7e4ca8..000000000000 --- a/drivers/gpu/drm/i915/dma_resv_utils.c +++ /dev/null @@ -1,17 +0,0 @@ -// SPDX-License-Identifier: MIT -/* - * Copyright © 2020 Intel Corporation - */ - -#include <linux/dma-resv.h> - -#include "dma_resv_utils.h" - -void dma_resv_prune(struct dma_resv *resv) -{ - if (dma_resv_trylock(resv)) { - if (dma_resv_test_signaled(resv, true)) - dma_resv_add_excl_fence(resv, NULL); - dma_resv_unlock(resv); - } -} diff --git a/drivers/gpu/drm/i915/dma_resv_utils.h b/drivers/gpu/drm/i915/dma_resv_utils.h deleted file mode 100644 index b9d8fb5f8367..000000000000 --- a/drivers/gpu/drm/i915/dma_resv_utils.h +++ /dev/null @@ -1,13 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright © 2020 Intel Corporation - */ - -#ifndef DMA_RESV_UTILS_H -#define DMA_RESV_UTILS_H - -struct dma_resv; - -void dma_resv_prune(struct dma_resv *resv); - -#endif /* DMA_RESV_UTILS_H */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c index 7358bebef15c..470fdfd61a0f 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c @@ -115,8 +115,8 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, { struct drm_i915_gem_busy *args = data; struct drm_i915_gem_object *obj; - struct dma_resv_list *list; - unsigned int seq; + struct dma_resv_iter cursor; + struct dma_fence *fence; int err; err = -ENOENT; @@ -142,27 +142,20 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, * to report the overall busyness. This is what the wait-ioctl does. * */ -retry: - seq = raw_read_seqcount(&obj->base.resv->seq); - - /* Translate the exclusive fence to the READ *and* WRITE engine */ - args->busy = busy_check_writer(dma_resv_excl_fence(obj->base.resv)); - - /* Translate shared fences to READ set of engines */ - list = dma_resv_shared_list(obj->base.resv); - if (list) { - unsigned int shared_count = list->shared_count, i; - - for (i = 0; i < shared_count; ++i) { - struct dma_fence *fence = - rcu_dereference(list->shared[i]); - + args->busy = 0; + dma_resv_iter_begin(&cursor, obj->base.resv, true); + dma_resv_for_each_fence_unlocked(&cursor, fence) { + if (dma_resv_iter_is_restarted(&cursor)) + args->busy = 0; + + if (dma_resv_iter_is_exclusive(&cursor)) + /* Translate the exclusive fence to the READ *and* WRITE engine */ + args->busy |= busy_check_writer(fence); + else + /* Translate shared fences to READ set of engines */ args->busy |= busy_check_reader(fence); - } } - - if (args->busy && read_seqcount_retry(&obj->base.resv->seq, seq)) - goto retry; + dma_resv_iter_end(&cursor); err = 0; out: diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c index f0435c6feb68..8a248003dfae 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c @@ -69,10 +69,16 @@ static struct clflush *clflush_work_create(struct drm_i915_gem_object *obj) bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, unsigned int flags) { + struct drm_i915_private *i915 = to_i915(obj->base.dev); struct clflush *clflush; assert_object_held(obj); + if (IS_DGFX(i915)) { + WARN_ON_ONCE(obj->cache_dirty); + return false; + } + /* * Stolen memory is always coherent with the GPU as it is explicitly * marked as wc by the system, or the system is cache-coherent. @@ -105,16 +111,24 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, if (clflush) { i915_sw_fence_await_reservation(&clflush->base.chain, obj->base.resv, NULL, true, - i915_fence_timeout(to_i915(obj->base.dev)), + i915_fence_timeout(i915), I915_FENCE_GFP); dma_resv_add_excl_fence(obj->base.resv, &clflush->base.dma); dma_fence_work_commit(&clflush->base); + /* + * We must have successfully populated the pages(since we are + * holding a pin on the pages as per the flush worker) to reach + * this point, which must mean we have already done the required + * flush-on-acquire, hence resetting cache_dirty here should be + * safe. + */ + obj->cache_dirty = false; } else if (obj->mm.pages) { __do_clflush(obj); + obj->cache_dirty = false; } else { GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU); } - obj->cache_dirty = false; return true; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index fb33d0322960..347dab952e90 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -479,7 +479,7 @@ set_proto_ctx_engines_bond(struct i915_user_extension __user *base, void *data) if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915) && !IS_ROCKETLAKE(i915) && !IS_ALDERLAKE_S(i915)) { drm_dbg(&i915->drm, - "Bonding on gen12+ aside from TGL, RKL, and ADL_S not supported\n"); + "Bonding not supported on this platform\n"); return -ENODEV; } @@ -1001,7 +1001,7 @@ static void free_engines_rcu(struct rcu_head *rcu) free_engines(engines); } -static int __i915_sw_fence_call +static int engines_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) { struct i915_gem_engines *engines = diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c index e8a58c997170..1b526039a60d 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c @@ -248,8 +248,19 @@ static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) if (IS_ERR(pages)) return PTR_ERR(pages); - /* XXX: consider doing a vmap flush or something */ - if (!HAS_LLC(i915) || i915_gem_object_can_bypass_llc(obj)) + /* + * DG1 is special here since it still snoops transactions even with + * CACHE_NONE. This is not the case with other HAS_SNOOP platforms. We + * might need to revisit this as we add new discrete platforms. + * + * XXX: Consider doing a vmap flush or something, where possible. + * Currently we just do a heavy handed wbinvd_on_all_cpus() here since + * the underlying sg_table might not even point to struct pages, so we + * can't just call drm_clflush_sg or similar, like we do elsewhere in + * the driver. + */ + if (i915_gem_object_can_bypass_llc(obj) || + (!HAS_LLC(i915) && !IS_DG1(i915))) wbinvd_on_all_cpus(); sg_page_sizes = i915_sg_dma_sizes(pages->sgl); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c index b684a62bf3b0..26532c07d467 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c @@ -18,10 +18,32 @@ static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj) { + struct drm_i915_private *i915 = to_i915(obj->base.dev); + + if (IS_DGFX(i915)) + return false; + return !(obj->cache_level == I915_CACHE_NONE || obj->cache_level == I915_CACHE_WT); } +bool i915_gem_cpu_write_needs_clflush(struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *i915 = to_i915(obj->base.dev); + + if (obj->cache_dirty) + return false; + + if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)) + return true; + + if (IS_DGFX(i915)) + return false; + + /* Currently in use by HW (display engine)? Keep flushed. */ + return i915_gem_object_is_framebuffer(obj); +} + static void flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains) { diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 4d7da07442f2..60ee60f7bb09 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -29,6 +29,7 @@ #include "i915_gem_ioctls.h" #include "i915_trace.h" #include "i915_user_extensions.h" +#include "i915_vma_snapshot.h" struct eb_vma { struct i915_vma *vma; @@ -307,11 +308,15 @@ struct i915_execbuffer { struct eb_fence *fences; unsigned long num_fences; +#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) + struct i915_capture_list *capture_lists[MAX_ENGINE_INSTANCE + 1]; +#endif }; static int eb_parse(struct i915_execbuffer *eb); static int eb_pin_engine(struct i915_execbuffer *eb, bool throttle); static void eb_unpin_engine(struct i915_execbuffer *eb); +static void eb_capture_release(struct i915_execbuffer *eb); static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb) { @@ -990,7 +995,7 @@ static int eb_validate_vmas(struct i915_execbuffer *eb) } if (!(ev->flags & EXEC_OBJECT_WRITE)) { - err = dma_resv_reserve_shared(vma->resv, 1); + err = dma_resv_reserve_shared(vma->obj->base.resv, 1); if (err) return err; } @@ -1043,6 +1048,7 @@ static void eb_release_vmas(struct i915_execbuffer *eb, bool final) i915_vma_put(vma); } + eb_capture_release(eb); eb_unpin_engine(eb); } @@ -1880,36 +1886,113 @@ eb_find_first_request_added(struct i915_execbuffer *eb) return NULL; } -static int eb_move_to_gpu(struct i915_execbuffer *eb) +#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) + +/* Stage with GFP_KERNEL allocations before we enter the signaling critical path */ +static void eb_capture_stage(struct i915_execbuffer *eb) { const unsigned int count = eb->buffer_count; - unsigned int i = count; - int err = 0, j; + unsigned int i = count, j; + struct i915_vma_snapshot *vsnap; while (i--) { struct eb_vma *ev = &eb->vma[i]; struct i915_vma *vma = ev->vma; unsigned int flags = ev->flags; - struct drm_i915_gem_object *obj = vma->obj; - assert_vma_held(vma); + if (!(flags & EXEC_OBJECT_CAPTURE)) + continue; - if (flags & EXEC_OBJECT_CAPTURE) { + vsnap = i915_vma_snapshot_alloc(GFP_KERNEL); + if (!vsnap) + continue; + + i915_vma_snapshot_init(vsnap, vma, "user"); + for_each_batch_create_order(eb, j) { struct i915_capture_list *capture; - for_each_batch_create_order(eb, j) { - if (!eb->requests[j]) - break; + capture = kmalloc(sizeof(*capture), GFP_KERNEL); + if (!capture) + continue; - capture = kmalloc(sizeof(*capture), GFP_KERNEL); - if (capture) { - capture->next = - eb->requests[j]->capture_list; - capture->vma = vma; - eb->requests[j]->capture_list = capture; - } - } + capture->next = eb->capture_lists[j]; + capture->vma_snapshot = i915_vma_snapshot_get(vsnap); + eb->capture_lists[j] = capture; + } + i915_vma_snapshot_put(vsnap); + } +} + +/* Commit once we're in the critical path */ +static void eb_capture_commit(struct i915_execbuffer *eb) +{ + unsigned int j; + + for_each_batch_create_order(eb, j) { + struct i915_request *rq = eb->requests[j]; + + if (!rq) + break; + + rq->capture_list = eb->capture_lists[j]; + eb->capture_lists[j] = NULL; + } +} + +/* + * Release anything that didn't get committed due to errors. + * The capture_list will otherwise be freed at request retire. + */ +static void eb_capture_release(struct i915_execbuffer *eb) +{ + unsigned int j; + + for_each_batch_create_order(eb, j) { + if (eb->capture_lists[j]) { + i915_request_free_capture_list(eb->capture_lists[j]); + eb->capture_lists[j] = NULL; } + } +} + +static void eb_capture_list_clear(struct i915_execbuffer *eb) +{ + memset(eb->capture_lists, 0, sizeof(eb->capture_lists)); +} + +#else + +static void eb_capture_stage(struct i915_execbuffer *eb) +{ +} + +static void eb_capture_commit(struct i915_execbuffer *eb) +{ +} + +static void eb_capture_release(struct i915_execbuffer *eb) +{ +} + +static void eb_capture_list_clear(struct i915_execbuffer *eb) +{ +} + +#endif + +static int eb_move_to_gpu(struct i915_execbuffer *eb) +{ + const unsigned int count = eb->buffer_count; + unsigned int i = count; + int err = 0, j; + + while (i--) { + struct eb_vma *ev = &eb->vma[i]; + struct i915_vma *vma = ev->vma; + unsigned int flags = ev->flags; + struct drm_i915_gem_object *obj = vma->obj; + + assert_vma_held(vma); /* * If the GPU is not _reading_ through the CPU cache, we need @@ -1990,6 +2073,8 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) /* Unconditionally flush any chipset caches (for streaming writes). */ intel_gt_chipset_flush(eb->gt); + eb_capture_commit(eb); + return 0; err_skip: @@ -2164,7 +2249,7 @@ static int eb_parse(struct i915_execbuffer *eb) goto err_trampoline; } - err = dma_resv_reserve_shared(shadow->resv, 1); + err = dma_resv_reserve_shared(shadow->obj->base.resv, 1); if (err) goto err_trampoline; @@ -3114,7 +3199,7 @@ eb_requests_create(struct i915_execbuffer *eb, struct dma_fence *in_fence, /* Allocate a request for this batch buffer nice and early. */ eb->requests[i] = i915_request_create(eb_find_context(eb, i)); if (IS_ERR(eb->requests[i])) { - out_fence = ERR_PTR(PTR_ERR(eb->requests[i])); + out_fence = ERR_CAST(eb->requests[i]); eb->requests[i] = NULL; return out_fence; } @@ -3132,13 +3217,14 @@ eb_requests_create(struct i915_execbuffer *eb, struct dma_fence *in_fence, } /* - * Whilst this request exists, batch_obj will be on the - * active_list, and so will hold the active reference. Only when - * this request is retired will the batch_obj be moved onto - * the inactive_list and lose its active reference. Hence we do - * not need to explicitly hold another reference here. + * Not really on stack, but we don't want to call + * kfree on the batch_snapshot when we put it, so use the + * _onstack interface. */ - eb->requests[i]->batch = eb->batches[i]->vma; + if (eb->batches[i]->vma) + i915_vma_snapshot_init_onstack(&eb->requests[i]->batch_snapshot, + eb->batches[i]->vma, + "batch"); if (eb->batch_pool) { GEM_BUG_ON(intel_context_is_parallel(eb->context)); intel_gt_buffer_pool_mark_active(eb->batch_pool, @@ -3187,6 +3273,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, eb.fences = NULL; eb.num_fences = 0; + eb_capture_list_clear(&eb); + memset(eb.requests, 0, sizeof(struct i915_request *) * ARRAY_SIZE(eb.requests)); eb.composite_fence = NULL; @@ -3273,10 +3361,12 @@ i915_gem_do_execbuffer(struct drm_device *dev, } ww_acquire_done(&eb.ww.ctx); + eb_capture_stage(&eb); out_fence = eb_requests_create(&eb, in_fence, out_fence_fd); if (IS_ERR(out_fence)) { err = PTR_ERR(out_fence); + out_fence = NULL; if (eb.requests[0]) goto err_request; else diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c index a57a6b7013c2..c5150a1ee3d2 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c @@ -145,24 +145,10 @@ static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = { .put_pages = i915_gem_object_put_pages_internal, }; -/** - * i915_gem_object_create_internal: create an object with volatile pages - * @i915: the i915 device - * @size: the size in bytes of backing storage to allocate for the object - * - * Creates a new object that wraps some internal memory for private use. - * This object is not backed by swappable storage, and as such its contents - * are volatile and only valid whilst pinned. If the object is reaped by the - * shrinker, its pages and data will be discarded. Equally, it is not a full - * GEM object and so not valid for access from userspace. This makes it useful - * for hardware interfaces like ringbuffers (which are pinned from the time - * the request is written to the time the hardware stops accessing it), but - * not for contexts (which need to be preserved when not active for later - * reuse). Note that it is not cleared upon allocation. - */ struct drm_i915_gem_object * -i915_gem_object_create_internal(struct drm_i915_private *i915, - phys_addr_t size) +__i915_gem_object_create_internal(struct drm_i915_private *i915, + const struct drm_i915_gem_object_ops *ops, + phys_addr_t size) { static struct lock_class_key lock_class; struct drm_i915_gem_object *obj; @@ -179,7 +165,7 @@ i915_gem_object_create_internal(struct drm_i915_private *i915, return ERR_PTR(-ENOMEM); drm_gem_private_object_init(&i915->drm, &obj->base, size); - i915_gem_object_init(obj, &i915_gem_object_internal_ops, &lock_class, 0); + i915_gem_object_init(obj, ops, &lock_class, 0); obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE; /* @@ -199,3 +185,25 @@ i915_gem_object_create_internal(struct drm_i915_private *i915, return obj; } + +/** + * i915_gem_object_create_internal: create an object with volatile pages + * @i915: the i915 device + * @size: the size in bytes of backing storage to allocate for the object + * + * Creates a new object that wraps some internal memory for private use. + * This object is not backed by swappable storage, and as such its contents + * are volatile and only valid whilst pinned. If the object is reaped by the + * shrinker, its pages and data will be discarded. Equally, it is not a full + * GEM object and so not valid for access from userspace. This makes it useful + * for hardware interfaces like ringbuffers (which are pinned from the time + * the request is written to the time the hardware stops accessing it), but + * not for contexts (which need to be preserved when not active for later + * reuse). Note that it is not cleared upon allocation. + */ +struct drm_i915_gem_object * +i915_gem_object_create_internal(struct drm_i915_private *i915, + phys_addr_t size) +{ + return __i915_gem_object_create_internal(i915, &i915_gem_object_internal_ops, size); +} diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index 65fc6ff5f59d..39bb15eafc07 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -17,6 +17,7 @@ #include "i915_gem_ioctls.h" #include "i915_gem_object.h" #include "i915_gem_mman.h" +#include "i915_mm.h" #include "i915_trace.h" #include "i915_user_extensions.h" #include "i915_gem_ttm.h" diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index 1e426a42a36c..5fac9b560b73 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -31,6 +31,7 @@ #include "i915_gem_context.h" #include "i915_gem_mman.h" #include "i915_gem_object.h" +#include "i915_gem_ttm.h" #include "i915_memcpy.h" #include "i915_trace.h" @@ -91,7 +92,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, } /** - * i915_gem_object_fini - Clean up a GEM object initialization + * __i915_gem_object_fini - Clean up a GEM object initialization * @obj: The gem object to cleanup * * This function cleans up gem object fields that are set up by @@ -107,25 +108,29 @@ void __i915_gem_object_fini(struct drm_i915_gem_object *obj) } /** - * Mark up the object's coherency levels for a given cache_level + * i915_gem_object_set_cache_coherency - Mark up the object's coherency levels + * for a given cache_level * @obj: #drm_i915_gem_object * @cache_level: cache level */ void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj, unsigned int cache_level) { + struct drm_i915_private *i915 = to_i915(obj->base.dev); + obj->cache_level = cache_level; if (cache_level != I915_CACHE_NONE) obj->cache_coherent = (I915_BO_CACHE_COHERENT_FOR_READ | I915_BO_CACHE_COHERENT_FOR_WRITE); - else if (HAS_LLC(to_i915(obj->base.dev))) + else if (HAS_LLC(i915)) obj->cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ; else obj->cache_coherent = 0; obj->cache_dirty = - !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE); + !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE) && + !IS_DGFX(i915); } bool i915_gem_object_can_bypass_llc(struct drm_i915_gem_object *obj) @@ -364,15 +369,6 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj) atomic_inc(&i915->mm.free_count); /* - * This serializes freeing with the shrinker. Since the free - * is delayed, first by RCU then by the workqueue, we want the - * shrinker to be able to free pages of unreferenced objects, - * or else we may oom whilst there are plenty of deferred - * freed objects. - */ - i915_gem_object_make_unshrinkable(obj); - - /* * Since we require blocking on struct_mutex to unbind the freed * object from the GPU before releasing resources back to the * system, we can not do that directly from the RCU callback (which may @@ -456,7 +452,7 @@ i915_gem_object_read_from_page_iomap(struct drm_i915_gem_object *obj, u64 offset * from can't cross a page boundary. The caller must ensure that @obj pages * are pinned and that @obj is synced wrt. any related writes. * - * Returns 0 on success or -ENODEV if the type of @obj's backing store is + * Return: %0 on success or -ENODEV if the type of @obj's backing store is * unsupported. */ int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size) @@ -732,6 +728,57 @@ static const struct drm_gem_object_funcs i915_gem_object_funcs = { .export = i915_gem_prime_export, }; +/** + * i915_gem_object_get_moving_fence - Get the object's moving fence if any + * @obj: The object whose moving fence to get. + * + * A non-signaled moving fence means that there is an async operation + * pending on the object that needs to be waited on before setting up + * any GPU- or CPU PTEs to the object's pages. + * + * Return: A refcounted pointer to the object's moving fence if any, + * NULL otherwise. + */ +struct dma_fence * +i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj) +{ + return dma_fence_get(i915_gem_to_ttm(obj)->moving); +} + +/** + * i915_gem_object_wait_moving_fence - Wait for the object's moving fence if any + * @obj: The object whose moving fence to wait for. + * @intr: Whether to wait interruptible. + * + * If the moving fence signaled without an error, it is detached from the + * object and put. + * + * Return: 0 if successful, -ERESTARTSYS if the wait was interrupted, + * negative error code if the async operation represented by the + * moving fence failed. + */ +int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj, + bool intr) +{ + struct dma_fence *fence = i915_gem_to_ttm(obj)->moving; + int ret; + + assert_object_held(obj); + if (!fence) + return 0; + + ret = dma_fence_wait(fence, intr); + if (ret) + return ret; + + if (fence->error) + return fence->error; + + i915_gem_to_ttm(obj)->moving = NULL; + dma_fence_put(fence); + return 0; +} + #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #include "selftests/huge_gem_object.c" #include "selftests/huge_pages.c" diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index 59201801cec5..66f20b803b01 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -93,7 +93,6 @@ void i915_gem_flush_free_objects(struct drm_i915_private *i915); struct sg_table * __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj); -void i915_gem_object_truncate(struct drm_i915_gem_object *obj); /** * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle @@ -296,6 +295,12 @@ i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj) } static inline bool +i915_gem_object_has_self_managed_shrink_list(const struct drm_i915_gem_object *obj) +{ + return i915_gem_object_type_has(obj, I915_GEM_OBJECT_SELF_MANAGED_SHRINK_LIST); +} + +static inline bool i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj) { return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_PROXY); @@ -449,7 +454,7 @@ i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) } int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj); -void i915_gem_object_truncate(struct drm_i915_gem_object *obj); +int i915_gem_object_truncate(struct drm_i915_gem_object *obj); void i915_gem_object_writeback(struct drm_i915_gem_object *obj); /** @@ -512,11 +517,18 @@ i915_gem_object_finish_access(struct drm_i915_gem_object *obj) i915_gem_object_unpin_pages(obj); } +struct dma_fence * +i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj); + +int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj, + bool intr); + void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj, unsigned int cache_level); bool i915_gem_object_can_bypass_llc(struct drm_i915_gem_object *obj); void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj); void i915_gem_object_flush_if_display_locked(struct drm_i915_gem_object *obj); +bool i915_gem_cpu_write_needs_clflush(struct drm_i915_gem_object *obj); int __must_check i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write); @@ -533,25 +545,15 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj); void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj); +void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj); +void __i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj); void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj); -static inline bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj) -{ - if (obj->cache_dirty) - return false; - - if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)) - return true; - - /* Currently in use by HW (display engine)? Keep flushed. */ - return i915_gem_object_is_framebuffer(obj); -} - static inline void __start_cpu_write(struct drm_i915_gem_object *obj) { obj->read_domains = I915_GEM_DOMAIN_CPU; obj->write_domain = I915_GEM_DOMAIN_CPU; - if (cpu_write_needs_clflush(obj)) + if (i915_gem_cpu_write_needs_clflush(obj)) obj->cache_dirty = true; } @@ -613,6 +615,14 @@ int i915_gem_object_wait_migration(struct drm_i915_gem_object *obj, bool i915_gem_object_placement_possible(struct drm_i915_gem_object *obj, enum intel_memory_type type); +int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st, + size_t size, struct intel_memory_region *mr, + struct address_space *mapping, + unsigned int max_segment); +void shmem_sg_free_table(struct sg_table *st, struct address_space *mapping, + bool dirty, bool backup); +void __shmem_writeback(size_t size, struct address_space *mapping); + #ifdef CONFIG_MMU_NOTIFIER static inline bool i915_gem_object_is_userptr(struct drm_i915_gem_object *obj) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index da85169006d4..f9f7e44099fe 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -34,9 +34,11 @@ struct i915_lut_handle { struct drm_i915_gem_object_ops { unsigned int flags; -#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1) -#define I915_GEM_OBJECT_IS_PROXY BIT(2) -#define I915_GEM_OBJECT_NO_MMAP BIT(3) +#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1) +/* Skip the shrinker management in set_pages/unset_pages */ +#define I915_GEM_OBJECT_SELF_MANAGED_SHRINK_LIST BIT(2) +#define I915_GEM_OBJECT_IS_PROXY BIT(3) +#define I915_GEM_OBJECT_NO_MMAP BIT(4) /* Interface between the GEM object and its backing storage. * get_pages() is called once prior to the use of the associated set @@ -54,8 +56,11 @@ struct drm_i915_gem_object_ops { int (*get_pages)(struct drm_i915_gem_object *obj); void (*put_pages)(struct drm_i915_gem_object *obj, struct sg_table *pages); - void (*truncate)(struct drm_i915_gem_object *obj); + int (*truncate)(struct drm_i915_gem_object *obj); void (*writeback)(struct drm_i915_gem_object *obj); + int (*shrinker_release_pages)(struct drm_i915_gem_object *obj, + bool no_gpu_wait, + bool should_writeback); int (*pread)(struct drm_i915_gem_object *obj, const struct drm_i915_gem_pread *arg); @@ -486,9 +491,37 @@ struct drm_i915_gem_object { * instead go through the pin/unpin interfaces. */ atomic_t pages_pin_count; + + /** + * @shrink_pin: Prevents the pages from being made visible to + * the shrinker, while the shrink_pin is non-zero. Most users + * should pretty much never have to care about this, outside of + * some special use cases. + * + * By default most objects will start out as visible to the + * shrinker(if I915_GEM_OBJECT_IS_SHRINKABLE) as soon as the + * backing pages are attached to the object, like in + * __i915_gem_object_set_pages(). They will then be removed the + * shrinker list once the pages are released. + * + * The @shrink_pin is incremented by calling + * i915_gem_object_make_unshrinkable(), which will also remove + * the object from the shrinker list, if the pin count was zero. + * + * Callers will then typically call + * i915_gem_object_make_shrinkable() or + * i915_gem_object_make_purgeable() to decrement the pin count, + * and make the pages visible again. + */ atomic_t shrink_pin; /** + * @ttm_shrinkable: True when the object is using shmem pages + * underneath. Protected by the object lock. + */ + bool ttm_shrinkable; + + /** * Priority list of potential placements for this object. */ struct intel_memory_region **placements; @@ -512,6 +545,7 @@ struct drm_i915_gem_object { */ struct list_head region_link; + struct i915_refct_sgt *rsgt; struct sg_table *pages; void *mapping; @@ -547,7 +581,7 @@ struct drm_i915_gem_object { struct i915_gem_object_page_iter get_dma_page; /** - * Element within i915->mm.unbound_list or i915->mm.bound_list, + * Element within i915->mm.shrink_list or i915->mm.purge_list, * locked by i915->mm.obj_lock. */ struct list_head link; @@ -565,7 +599,7 @@ struct drm_i915_gem_object { } mm; struct { - struct sg_table *cached_io_st; + struct i915_refct_sgt *cached_io_rsgt; struct i915_gem_object_page_iter get_io_page; struct drm_i915_gem_object *backup; bool created:1; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index 8eb1c3a6fc9c..49c6e55c68ce 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -26,6 +26,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, /* Make the pages coherent with the GPU (flushing any swapin). */ if (obj->cache_dirty) { + WARN_ON_ONCE(IS_DGFX(i915)); obj->write_domain = 0; if (i915_gem_object_has_struct_page(obj)) drm_clflush_sg(pages); @@ -68,7 +69,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, shrinkable = false; } - if (shrinkable) { + if (shrinkable && !i915_gem_object_has_self_managed_shrink_list(obj)) { struct list_head *list; unsigned long flags; @@ -158,11 +159,13 @@ retry: } /* Immediately discard the backing storage */ -void i915_gem_object_truncate(struct drm_i915_gem_object *obj) +int i915_gem_object_truncate(struct drm_i915_gem_object *obj) { drm_gem_free_mmap_offset(&obj->base); if (obj->ops->truncate) - obj->ops->truncate(obj); + return obj->ops->truncate(obj); + + return 0; } /* Try to discard unwanted pages */ @@ -208,7 +211,8 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj) if (i915_gem_object_is_volatile(obj)) obj->mm.madv = I915_MADV_WILLNEED; - i915_gem_object_make_unshrinkable(obj); + if (!i915_gem_object_has_self_managed_shrink_list(obj)) + i915_gem_object_make_unshrinkable(obj); if (obj->mm.mapping) { unmap_object(obj, page_mask_bits(obj->mm.mapping)); @@ -414,6 +418,12 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, } if (!ptr) { + err = i915_gem_object_wait_moving_fence(obj, true); + if (err) { + ptr = ERR_PTR(err); + goto err_unpin; + } + if (GEM_WARN_ON(type == I915_MAP_WC && !static_cpu_has(X86_FEATURE_PAT))) ptr = ERR_PTR(-ENODEV); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c index a016ccec36f3..a4350227e9ae 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_region.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c @@ -11,7 +11,7 @@ void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj, struct intel_memory_region *mem) { - obj->mm.region = intel_memory_region_get(mem); + obj->mm.region = mem; mutex_lock(&mem->objects.lock); list_add(&obj->mm.region_link, &mem->objects.list); @@ -25,8 +25,6 @@ void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj) mutex_lock(&mem->objects.lock); list_del(&obj->mm.region_link); mutex_unlock(&mem->objects.lock); - - intel_memory_region_put(mem); } struct drm_i915_gem_object * diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c index d77da59fae04..cc9fe258fba7 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c @@ -25,62 +25,67 @@ static void check_release_pagevec(struct pagevec *pvec) cond_resched(); } -static int shmem_get_pages(struct drm_i915_gem_object *obj) +void shmem_sg_free_table(struct sg_table *st, struct address_space *mapping, + bool dirty, bool backup) { - struct drm_i915_private *i915 = to_i915(obj->base.dev); - struct intel_memory_region *mem = obj->mm.region; - const unsigned long page_count = obj->base.size / PAGE_SIZE; + struct sgt_iter sgt_iter; + struct pagevec pvec; + struct page *page; + + mapping_clear_unevictable(mapping); + + pagevec_init(&pvec); + for_each_sgt_page(page, sgt_iter, st) { + if (dirty) + set_page_dirty(page); + + if (backup) + mark_page_accessed(page); + + if (!pagevec_add(&pvec, page)) + check_release_pagevec(&pvec); + } + if (pagevec_count(&pvec)) + check_release_pagevec(&pvec); + + sg_free_table(st); +} + +int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st, + size_t size, struct intel_memory_region *mr, + struct address_space *mapping, + unsigned int max_segment) +{ + const unsigned long page_count = size / PAGE_SIZE; unsigned long i; - struct address_space *mapping; - struct sg_table *st; struct scatterlist *sg; - struct sgt_iter sgt_iter; struct page *page; unsigned long last_pfn = 0; /* suppress gcc warning */ - unsigned int max_segment = i915_sg_segment_size(); - unsigned int sg_page_sizes; gfp_t noreclaim; int ret; /* - * Assert that the object is not currently in any GPU domain. As it - * wasn't in the GTT, there shouldn't be any way it could have been in - * a GPU cache - */ - GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); - GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); - - /* * If there's no chance of allocating enough pages for the whole * object, bail early. */ - if (obj->base.size > resource_size(&mem->region)) + if (size > resource_size(&mr->region)) return -ENOMEM; - st = kmalloc(sizeof(*st), GFP_KERNEL); - if (!st) + if (sg_alloc_table(st, page_count, GFP_KERNEL)) return -ENOMEM; -rebuild_st: - if (sg_alloc_table(st, page_count, GFP_KERNEL)) { - kfree(st); - return -ENOMEM; - } - /* * Get the list of pages out of our struct file. They'll be pinned * at this point until we release them. * * Fail silently without starting the shrinker */ - mapping = obj->base.filp->f_mapping; mapping_set_unevictable(mapping); noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM); noreclaim |= __GFP_NORETRY | __GFP_NOWARN; sg = st->sgl; st->nents = 0; - sg_page_sizes = 0; for (i = 0; i < page_count; i++) { const unsigned int shrink[] = { I915_SHRINK_BOUND | I915_SHRINK_UNBOUND, @@ -135,10 +140,9 @@ rebuild_st: if (!i || sg->length >= max_segment || page_to_pfn(page) != last_pfn + 1) { - if (i) { - sg_page_sizes |= sg->length; + if (i) sg = sg_next(sg); - } + st->nents++; sg_set_page(sg, page, PAGE_SIZE, 0); } else { @@ -149,14 +153,67 @@ rebuild_st: /* Check that the i965g/gm workaround works. */ GEM_BUG_ON(gfp & __GFP_DMA32 && last_pfn >= 0x00100000UL); } - if (sg) { /* loop terminated early; short sg table */ - sg_page_sizes |= sg->length; + if (sg) /* loop terminated early; short sg table */ sg_mark_end(sg); - } /* Trim unused sg entries to avoid wasting memory. */ i915_sg_trim(st); + return 0; +err_sg: + sg_mark_end(sg); + if (sg != st->sgl) { + shmem_sg_free_table(st, mapping, false, false); + } else { + mapping_clear_unevictable(mapping); + sg_free_table(st); + } + + /* + * shmemfs first checks if there is enough memory to allocate the page + * and reports ENOSPC should there be insufficient, along with the usual + * ENOMEM for a genuine allocation failure. + * + * We use ENOSPC in our driver to mean that we have run out of aperture + * space and so want to translate the error from shmemfs back to our + * usual understanding of ENOMEM. + */ + if (ret == -ENOSPC) + ret = -ENOMEM; + + return ret; +} + +static int shmem_get_pages(struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *i915 = to_i915(obj->base.dev); + struct intel_memory_region *mem = obj->mm.region; + struct address_space *mapping = obj->base.filp->f_mapping; + const unsigned long page_count = obj->base.size / PAGE_SIZE; + unsigned int max_segment = i915_sg_segment_size(); + struct sg_table *st; + struct sgt_iter sgt_iter; + struct page *page; + int ret; + + /* + * Assert that the object is not currently in any GPU domain. As it + * wasn't in the GTT, there shouldn't be any way it could have been in + * a GPU cache + */ + GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); + GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); + +rebuild_st: + st = kmalloc(sizeof(*st), GFP_KERNEL); + if (!st) + return -ENOMEM; + + ret = shmem_sg_alloc_table(i915, st, obj->base.size, mem, mapping, + max_segment); + if (ret) + goto err_st; + ret = i915_gem_gtt_prepare_pages(obj, st); if (ret) { /* @@ -168,6 +225,7 @@ rebuild_st: for_each_sgt_page(page, sgt_iter, st) put_page(page); sg_free_table(st); + kfree(st); max_segment = PAGE_SIZE; goto rebuild_st; @@ -185,28 +243,12 @@ rebuild_st: if (i915_gem_object_can_bypass_llc(obj)) obj->cache_dirty = true; - __i915_gem_object_set_pages(obj, st, sg_page_sizes); + __i915_gem_object_set_pages(obj, st, i915_sg_dma_sizes(st->sgl)); return 0; -err_sg: - sg_mark_end(sg); err_pages: - mapping_clear_unevictable(mapping); - if (sg != st->sgl) { - struct pagevec pvec; - - pagevec_init(&pvec); - for_each_sgt_page(page, sgt_iter, st) { - if (!pagevec_add(&pvec, page)) - check_release_pagevec(&pvec); - } - if (pagevec_count(&pvec)) - check_release_pagevec(&pvec); - } - sg_free_table(st); - kfree(st); - + shmem_sg_free_table(st, mapping, false, false); /* * shmemfs first checks if there is enough memory to allocate the page * and reports ENOSPC should there be insufficient, along with the usual @@ -216,13 +258,16 @@ err_pages: * space and so want to translate the error from shmemfs back to our * usual understanding of ENOMEM. */ +err_st: if (ret == -ENOSPC) ret = -ENOMEM; + kfree(st); + return ret; } -static void +static int shmem_truncate(struct drm_i915_gem_object *obj) { /* @@ -234,12 +279,12 @@ shmem_truncate(struct drm_i915_gem_object *obj) shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1); obj->mm.madv = __I915_MADV_PURGED; obj->mm.pages = ERR_PTR(-EFAULT); + + return 0; } -static void -shmem_writeback(struct drm_i915_gem_object *obj) +void __shmem_writeback(size_t size, struct address_space *mapping) { - struct address_space *mapping; struct writeback_control wbc = { .sync_mode = WB_SYNC_NONE, .nr_to_write = SWAP_CLUSTER_MAX, @@ -255,10 +300,9 @@ shmem_writeback(struct drm_i915_gem_object *obj) * instead of invoking writeback so they are aged and paged out * as normal. */ - mapping = obj->base.filp->f_mapping; /* Begin writeback on each dirty page */ - for (i = 0; i < obj->base.size >> PAGE_SHIFT; i++) { + for (i = 0; i < size >> PAGE_SHIFT; i++) { struct page *page; page = find_lock_page(mapping, i); @@ -281,6 +325,12 @@ put: } } +static void +shmem_writeback(struct drm_i915_gem_object *obj) +{ + __shmem_writeback(obj->base.size, obj->base.filp->f_mapping); +} + void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, struct sg_table *pages, @@ -313,11 +363,6 @@ __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj, struct sg_table *pages) { - struct sgt_iter sgt_iter; - struct pagevec pvec; - struct page *page; - - GEM_WARN_ON(IS_DGFX(to_i915(obj->base.dev))); __i915_gem_object_release_shmem(obj, pages, true); i915_gem_gtt_finish_pages(obj, pages); @@ -325,25 +370,10 @@ void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj, struct sg_ if (i915_gem_object_needs_bit17_swizzle(obj)) i915_gem_object_save_bit_17_swizzle(obj, pages); - mapping_clear_unevictable(file_inode(obj->base.filp)->i_mapping); - - pagevec_init(&pvec); - for_each_sgt_page(page, sgt_iter, pages) { - if (obj->mm.dirty) - set_page_dirty(page); - - if (obj->mm.madv == I915_MADV_WILLNEED) - mark_page_accessed(page); - - if (!pagevec_add(&pvec, page)) - check_release_pagevec(&pvec); - } - if (pagevec_count(&pvec)) - check_release_pagevec(&pvec); - obj->mm.dirty = false; - - sg_free_table(pages); + shmem_sg_free_table(pages, file_inode(obj->base.filp)->i_mapping, + obj->mm.dirty, obj->mm.madv == I915_MADV_WILLNEED); kfree(pages); + obj->mm.dirty = false; } static void @@ -634,9 +664,10 @@ static int init_shmem(struct intel_memory_region *mem) return 0; /* Don't error, we can simply fallback to the kernel mnt */ } -static void release_shmem(struct intel_memory_region *mem) +static int release_shmem(struct intel_memory_region *mem) { i915_gemfs_fini(mem->i915); + return 0; } static const struct intel_memory_region_ops shmem_region_ops = { diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c index 5ab136ffdeb2..157a9765f483 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c @@ -15,7 +15,6 @@ #include "gt/intel_gt_requests.h" -#include "dma_resv_utils.h" #include "i915_trace.h" static bool swap_available(void) @@ -56,19 +55,25 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj, return false; } -static void try_to_writeback(struct drm_i915_gem_object *obj, - unsigned int flags) +static int try_to_writeback(struct drm_i915_gem_object *obj, unsigned int flags) { + if (obj->ops->shrinker_release_pages) + return obj->ops->shrinker_release_pages(obj, + !(flags & I915_SHRINK_ACTIVE), + flags & I915_SHRINK_WRITEBACK); + switch (obj->mm.madv) { case I915_MADV_DONTNEED: i915_gem_object_truncate(obj); - return; + return 0; case __I915_MADV_PURGED: - return; + return 0; } if (flags & I915_SHRINK_WRITEBACK) i915_gem_object_writeback(obj); + + return 0; } /** @@ -222,15 +227,13 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww, } if (!__i915_gem_object_put_pages(obj)) { - try_to_writeback(obj, shrink); - count += obj->base.size >> PAGE_SHIFT; + if (!try_to_writeback(obj, shrink)) + count += obj->base.size >> PAGE_SHIFT; } if (!ww) i915_gem_object_unlock(obj); } - dma_resv_prune(obj->base.resv); - scanned += obj->base.size >> PAGE_SHIFT; skip: i915_gem_object_put(obj); @@ -458,6 +461,16 @@ void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915, #define obj_to_i915(obj__) to_i915((obj__)->base.dev) +/** + * i915_gem_object_make_unshrinkable - Hide the object from the shrinker. By + * default all object types that support shrinking(see IS_SHRINKABLE), will also + * make the object visible to the shrinker after allocating the system memory + * pages. + * @obj: The GEM object. + * + * This is typically used for special kernel internal objects that can't be + * easily processed by the shrinker, like if they are perma-pinned. + */ void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = obj_to_i915(obj); @@ -482,13 +495,12 @@ void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj) spin_unlock_irqrestore(&i915->mm.obj_lock, flags); } -static void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj, - struct list_head *head) +static void ___i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj, + struct list_head *head) { struct drm_i915_private *i915 = obj_to_i915(obj); unsigned long flags; - GEM_BUG_ON(!i915_gem_object_has_pages(obj)); if (!i915_gem_object_is_shrinkable(obj)) return; @@ -508,14 +520,67 @@ static void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj, spin_unlock_irqrestore(&i915->mm.obj_lock, flags); } +/** + * __i915_gem_object_make_shrinkable - Move the object to the tail of the + * shrinkable list. Objects on this list might be swapped out. Used with + * WILLNEED objects. + * @obj: The GEM object. + * + * DO NOT USE. This is intended to be called on very special objects that don't + * yet have mm.pages, but are guaranteed to have potentially reclaimable pages + * underneath. + */ +void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj) +{ + ___i915_gem_object_make_shrinkable(obj, + &obj_to_i915(obj)->mm.shrink_list); +} + +/** + * __i915_gem_object_make_purgeable - Move the object to the tail of the + * purgeable list. Objects on this list might be swapped out. Used with + * DONTNEED objects. + * @obj: The GEM object. + * + * DO NOT USE. This is intended to be called on very special objects that don't + * yet have mm.pages, but are guaranteed to have potentially reclaimable pages + * underneath. + */ +void __i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj) +{ + ___i915_gem_object_make_shrinkable(obj, + &obj_to_i915(obj)->mm.purge_list); +} + +/** + * i915_gem_object_make_shrinkable - Move the object to the tail of the + * shrinkable list. Objects on this list might be swapped out. Used with + * WILLNEED objects. + * @obj: The GEM object. + * + * MUST only be called on objects which have backing pages. + * + * MUST be balanced with previous call to i915_gem_object_make_unshrinkable(). + */ void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj) { - __i915_gem_object_make_shrinkable(obj, - &obj_to_i915(obj)->mm.shrink_list); + GEM_BUG_ON(!i915_gem_object_has_pages(obj)); + __i915_gem_object_make_shrinkable(obj); } +/** + * i915_gem_object_make_purgeable - Move the object to the tail of the purgeable + * list. Used with DONTNEED objects. Unlike with shrinkable objects, the + * shrinker will attempt to discard the backing pages, instead of trying to swap + * them out. + * @obj: The GEM object. + * + * MUST only be called on objects which have backing pages. + * + * MUST be balanced with previous call to i915_gem_object_make_unshrinkable(). + */ void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj) { - __i915_gem_object_make_shrinkable(obj, - &obj_to_i915(obj)->mm.purge_list); + GEM_BUG_ON(!i915_gem_object_has_pages(obj)); + __i915_gem_object_make_purgeable(obj); } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index ddd37ccb1362..bce03d74a0b4 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -399,7 +399,7 @@ static int i915_gem_init_stolen(struct intel_memory_region *mem) return 0; } - if (intel_vtd_active() && GRAPHICS_VER(i915) < 8) { + if (intel_vtd_active(i915) && GRAPHICS_VER(i915) < 8) { drm_notice(&i915->drm, "%s, disabling use of stolen memory\n", "DMAR active"); @@ -720,9 +720,10 @@ static int init_stolen_smem(struct intel_memory_region *mem) return i915_gem_init_stolen(mem); } -static void release_stolen_smem(struct intel_memory_region *mem) +static int release_stolen_smem(struct intel_memory_region *mem) { i915_gem_cleanup_stolen(mem->i915); + return 0; } static const struct intel_memory_region_ops i915_region_stolen_smem_ops = { @@ -759,10 +760,11 @@ err_fini: return err; } -static void release_stolen_lmem(struct intel_memory_region *mem) +static int release_stolen_lmem(struct intel_memory_region *mem) { io_mapping_fini(&mem->iomap); i915_gem_cleanup_stolen(mem->i915); + return 0; } static const struct intel_memory_region_ops i915_region_stolen_lmem_ops = { diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c index 74a1ffd0d7dd..218a9b3037c7 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c @@ -14,13 +14,9 @@ #include "gem/i915_gem_object.h" #include "gem/i915_gem_region.h" #include "gem/i915_gem_ttm.h" +#include "gem/i915_gem_ttm_move.h" #include "gem/i915_gem_ttm_pm.h" - -#include "gt/intel_engine_pm.h" -#include "gt/intel_gt.h" -#include "gt/intel_migrate.h" - #define I915_TTM_PRIO_PURGE 0 #define I915_TTM_PRIO_NO_PAGES 1 #define I915_TTM_PRIO_HAS_PAGES 2 @@ -34,7 +30,9 @@ * struct i915_ttm_tt - TTM page vector with additional private information * @ttm: The base TTM page vector. * @dev: The struct device used for dma mapping and unmapping. - * @cached_st: The cached scatter-gather table. + * @cached_rsgt: The cached scatter-gather table. + * @is_shmem: Set if using shmem. + * @filp: The shmem file, if using shmem backend. * * Note that DMA may be going on right up to the point where the page- * vector is unpopulated in delayed destroy. Hence keep the @@ -45,7 +43,10 @@ struct i915_ttm_tt { struct ttm_tt ttm; struct device *dev; - struct sg_table *cached_st; + struct i915_refct_sgt cached_rsgt; + + bool is_shmem; + struct file *filp; }; static const struct ttm_place sys_placement_flags = { @@ -103,37 +104,15 @@ static int i915_ttm_err_to_gem(int err) return err; } -static bool gpu_binds_iomem(struct ttm_resource *mem) -{ - return mem->mem_type != TTM_PL_SYSTEM; -} - -static bool cpu_maps_iomem(struct ttm_resource *mem) -{ - /* Once / if we support GGTT, this is also false for cached ttm_tts */ - return mem->mem_type != TTM_PL_SYSTEM; -} - -static enum i915_cache_level -i915_ttm_cache_level(struct drm_i915_private *i915, struct ttm_resource *res, - struct ttm_tt *ttm) -{ - return ((HAS_LLC(i915) || HAS_SNOOP(i915)) && !gpu_binds_iomem(res) && - ttm->caching == ttm_cached) ? I915_CACHE_LLC : - I915_CACHE_NONE; -} - -static void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj); - static enum ttm_caching i915_ttm_select_tt_caching(const struct drm_i915_gem_object *obj) { /* - * Objects only allowed in system get cached cpu-mappings. - * Other objects get WC mapping for now. Even if in system. + * Objects only allowed in system get cached cpu-mappings, or when + * evicting lmem-only buffers to system for swapping. Other objects get + * WC mapping for now. Even if in system. */ - if (obj->mm.region->type == INTEL_MEMORY_SYSTEM && - obj->mm.n_placements <= 1) + if (obj->mm.n_placements <= 1) return ttm_cached; return ttm_write_combined; @@ -179,15 +158,103 @@ i915_ttm_placement_from_obj(const struct drm_i915_gem_object *obj, placement->busy_placement = busy; } +static int i915_ttm_tt_shmem_populate(struct ttm_device *bdev, + struct ttm_tt *ttm, + struct ttm_operation_ctx *ctx) +{ + struct drm_i915_private *i915 = container_of(bdev, typeof(*i915), bdev); + struct intel_memory_region *mr = i915->mm.regions[INTEL_MEMORY_SYSTEM]; + struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); + const unsigned int max_segment = i915_sg_segment_size(); + const size_t size = ttm->num_pages << PAGE_SHIFT; + struct file *filp = i915_tt->filp; + struct sgt_iter sgt_iter; + struct sg_table *st; + struct page *page; + unsigned long i; + int err; + + if (!filp) { + struct address_space *mapping; + gfp_t mask; + + filp = shmem_file_setup("i915-shmem-tt", size, VM_NORESERVE); + if (IS_ERR(filp)) + return PTR_ERR(filp); + + mask = GFP_HIGHUSER | __GFP_RECLAIMABLE; + + mapping = filp->f_mapping; + mapping_set_gfp_mask(mapping, mask); + GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM)); + + i915_tt->filp = filp; + } + + st = &i915_tt->cached_rsgt.table; + err = shmem_sg_alloc_table(i915, st, size, mr, filp->f_mapping, + max_segment); + if (err) + return err; + + err = dma_map_sgtable(i915_tt->dev, st, DMA_BIDIRECTIONAL, + DMA_ATTR_SKIP_CPU_SYNC); + if (err) + goto err_free_st; + + i = 0; + for_each_sgt_page(page, sgt_iter, st) + ttm->pages[i++] = page; + + if (ttm->page_flags & TTM_TT_FLAG_SWAPPED) + ttm->page_flags &= ~TTM_TT_FLAG_SWAPPED; + + return 0; + +err_free_st: + shmem_sg_free_table(st, filp->f_mapping, false, false); + + return err; +} + +static void i915_ttm_tt_shmem_unpopulate(struct ttm_tt *ttm) +{ + struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); + bool backup = ttm->page_flags & TTM_TT_FLAG_SWAPPED; + struct sg_table *st = &i915_tt->cached_rsgt.table; + + shmem_sg_free_table(st, file_inode(i915_tt->filp)->i_mapping, + backup, backup); +} + +static void i915_ttm_tt_release(struct kref *ref) +{ + struct i915_ttm_tt *i915_tt = + container_of(ref, typeof(*i915_tt), cached_rsgt.kref); + struct sg_table *st = &i915_tt->cached_rsgt.table; + + GEM_WARN_ON(st->sgl); + + kfree(i915_tt); +} + +static const struct i915_refct_sgt_ops tt_rsgt_ops = { + .release = i915_ttm_tt_release +}; + static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags) { struct ttm_resource_manager *man = ttm_manager_type(bo->bdev, bo->resource->mem_type); struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); + enum ttm_caching caching; struct i915_ttm_tt *i915_tt; int ret; + if (!obj) + return NULL; + i915_tt = kzalloc(sizeof(*i915_tt), GFP_KERNEL); if (!i915_tt) return NULL; @@ -196,38 +263,66 @@ static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo, man->use_tt) page_flags |= TTM_TT_FLAG_ZERO_ALLOC; - ret = ttm_tt_init(&i915_tt->ttm, bo, page_flags, - i915_ttm_select_tt_caching(obj)); - if (ret) { - kfree(i915_tt); - return NULL; + caching = i915_ttm_select_tt_caching(obj); + if (i915_gem_object_is_shrinkable(obj) && caching == ttm_cached) { + page_flags |= TTM_TT_FLAG_EXTERNAL | + TTM_TT_FLAG_EXTERNAL_MAPPABLE; + i915_tt->is_shmem = true; } + ret = ttm_tt_init(&i915_tt->ttm, bo, page_flags, caching); + if (ret) + goto err_free; + + __i915_refct_sgt_init(&i915_tt->cached_rsgt, bo->base.size, + &tt_rsgt_ops); + i915_tt->dev = obj->base.dev->dev; return &i915_tt->ttm; + +err_free: + kfree(i915_tt); + return NULL; +} + +static int i915_ttm_tt_populate(struct ttm_device *bdev, + struct ttm_tt *ttm, + struct ttm_operation_ctx *ctx) +{ + struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); + + if (i915_tt->is_shmem) + return i915_ttm_tt_shmem_populate(bdev, ttm, ctx); + + return ttm_pool_alloc(&bdev->pool, ttm, ctx); } static void i915_ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm) { struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); + struct sg_table *st = &i915_tt->cached_rsgt.table; - if (i915_tt->cached_st) { - dma_unmap_sgtable(i915_tt->dev, i915_tt->cached_st, - DMA_BIDIRECTIONAL, 0); - sg_free_table(i915_tt->cached_st); - kfree(i915_tt->cached_st); - i915_tt->cached_st = NULL; + if (st->sgl) + dma_unmap_sgtable(i915_tt->dev, st, DMA_BIDIRECTIONAL, 0); + + if (i915_tt->is_shmem) { + i915_ttm_tt_shmem_unpopulate(ttm); + } else { + sg_free_table(st); + ttm_pool_free(&bdev->pool, ttm); } - ttm_pool_free(&bdev->pool, ttm); } static void i915_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm) { struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); + if (i915_tt->filp) + fput(i915_tt->filp); + ttm_tt_fini(ttm); - kfree(i915_tt); + i915_refct_sgt_put(&i915_tt->cached_rsgt); } static bool i915_ttm_eviction_valuable(struct ttm_buffer_object *bo, @@ -235,6 +330,17 @@ static bool i915_ttm_eviction_valuable(struct ttm_buffer_object *bo, { struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); + if (!obj) + return false; + + /* + * EXTERNAL objects should never be swapped out by TTM, instead we need + * to handle that ourselves. TTM will already skip such objects for us, + * but we would like to avoid grabbing locks for no good reason. + */ + if (bo->ttm && bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL) + return false; + /* Will do for now. Our pinned objects are still on TTM's LRU lists */ return i915_gem_object_evictable(obj); } @@ -245,28 +351,19 @@ static void i915_ttm_evict_flags(struct ttm_buffer_object *bo, *placement = i915_sys_placement; } -static int i915_ttm_move_notify(struct ttm_buffer_object *bo) -{ - struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); - int ret; - - ret = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE); - if (ret) - return ret; - - ret = __i915_gem_object_put_pages(obj); - if (ret) - return ret; - - return 0; -} - -static void i915_ttm_free_cached_io_st(struct drm_i915_gem_object *obj) +/** + * i915_ttm_free_cached_io_rsgt - Free object cached LMEM information + * @obj: The GEM object + * This function frees any LMEM-related information that is cached on + * the object. For example the radix tree for fast page lookup and the + * cached refcounted sg-table + */ +void i915_ttm_free_cached_io_rsgt(struct drm_i915_gem_object *obj) { struct radix_tree_iter iter; void __rcu **slot; - if (!obj->ttm.cached_io_st) + if (!obj->ttm.cached_io_rsgt) return; rcu_read_lock(); @@ -274,93 +371,106 @@ static void i915_ttm_free_cached_io_st(struct drm_i915_gem_object *obj) radix_tree_delete(&obj->ttm.get_io_page.radix, iter.index); rcu_read_unlock(); - sg_free_table(obj->ttm.cached_io_st); - kfree(obj->ttm.cached_io_st); - obj->ttm.cached_io_st = NULL; + i915_refct_sgt_put(obj->ttm.cached_io_rsgt); + obj->ttm.cached_io_rsgt = NULL; } -static void -i915_ttm_adjust_domains_after_move(struct drm_i915_gem_object *obj) +/** + * i915_ttm_purge - Clear an object of its memory + * @obj: The object + * + * This function is called to clear an object of it's memory when it is + * marked as not needed anymore. + * + * Return: 0 on success, negative error code on failure. + */ +int i915_ttm_purge(struct drm_i915_gem_object *obj) { struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); + struct i915_ttm_tt *i915_tt = + container_of(bo->ttm, typeof(*i915_tt), ttm); + struct ttm_operation_ctx ctx = { + .interruptible = true, + .no_wait_gpu = false, + }; + struct ttm_placement place = {}; + int ret; - if (cpu_maps_iomem(bo->resource) || bo->ttm->caching != ttm_cached) { - obj->write_domain = I915_GEM_DOMAIN_WC; - obj->read_domains = I915_GEM_DOMAIN_WC; - } else { - obj->write_domain = I915_GEM_DOMAIN_CPU; - obj->read_domains = I915_GEM_DOMAIN_CPU; - } -} + if (obj->mm.madv == __I915_MADV_PURGED) + return 0; -static void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj) -{ - struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); - unsigned int cache_level; - unsigned int i; + ret = ttm_bo_validate(bo, &place, &ctx); + if (ret) + return ret; - /* - * If object was moved to an allowable region, update the object - * region to consider it migrated. Note that if it's currently not - * in an allowable region, it's evicted and we don't update the - * object region. - */ - if (intel_region_to_ttm_type(obj->mm.region) != bo->resource->mem_type) { - for (i = 0; i < obj->mm.n_placements; ++i) { - struct intel_memory_region *mr = obj->mm.placements[i]; - - if (intel_region_to_ttm_type(mr) == bo->resource->mem_type && - mr != obj->mm.region) { - i915_gem_object_release_memory_region(obj); - i915_gem_object_init_memory_region(obj, mr); - break; - } - } + if (bo->ttm && i915_tt->filp) { + /* + * The below fput(which eventually calls shmem_truncate) might + * be delayed by worker, so when directly called to purge the + * pages(like by the shrinker) we should try to be more + * aggressive and release the pages immediately. + */ + shmem_truncate_range(file_inode(i915_tt->filp), + 0, (loff_t)-1); + fput(fetch_and_zero(&i915_tt->filp)); } - obj->mem_flags &= ~(I915_BO_FLAG_STRUCT_PAGE | I915_BO_FLAG_IOMEM); - - obj->mem_flags |= cpu_maps_iomem(bo->resource) ? I915_BO_FLAG_IOMEM : - I915_BO_FLAG_STRUCT_PAGE; + obj->write_domain = 0; + obj->read_domains = 0; + i915_ttm_adjust_gem_after_move(obj); + i915_ttm_free_cached_io_rsgt(obj); + obj->mm.madv = __I915_MADV_PURGED; - cache_level = i915_ttm_cache_level(to_i915(bo->base.dev), bo->resource, - bo->ttm); - i915_gem_object_set_cache_coherency(obj, cache_level); + return 0; } -static void i915_ttm_purge(struct drm_i915_gem_object *obj) +static int i915_ttm_shrinker_release_pages(struct drm_i915_gem_object *obj, + bool no_wait_gpu, + bool should_writeback) { struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); + struct i915_ttm_tt *i915_tt = + container_of(bo->ttm, typeof(*i915_tt), ttm); struct ttm_operation_ctx ctx = { .interruptible = true, - .no_wait_gpu = false, + .no_wait_gpu = no_wait_gpu, }; struct ttm_placement place = {}; int ret; - if (obj->mm.madv == __I915_MADV_PURGED) - return; + if (!bo->ttm || bo->resource->mem_type != TTM_PL_SYSTEM) + return 0; + + GEM_BUG_ON(!i915_tt->is_shmem); + + if (!i915_tt->filp) + return 0; + + ret = ttm_bo_wait_ctx(bo, &ctx); + if (ret) + return ret; - /* TTM's purge interface. Note that we might be reentering. */ + switch (obj->mm.madv) { + case I915_MADV_DONTNEED: + return i915_ttm_purge(obj); + case __I915_MADV_PURGED: + return 0; + } + + if (bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED) + return 0; + + bo->ttm->page_flags |= TTM_TT_FLAG_SWAPPED; ret = ttm_bo_validate(bo, &place, &ctx); - if (!ret) { - obj->write_domain = 0; - obj->read_domains = 0; - i915_ttm_adjust_gem_after_move(obj); - i915_ttm_free_cached_io_st(obj); - obj->mm.madv = __I915_MADV_PURGED; + if (ret) { + bo->ttm->page_flags &= ~TTM_TT_FLAG_SWAPPED; + return ret; } -} -static void i915_ttm_swap_notify(struct ttm_buffer_object *bo) -{ - struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); - int ret = i915_ttm_move_notify(bo); + if (should_writeback) + __shmem_writeback(obj->base.size, i915_tt->filp->f_mapping); - GEM_WARN_ON(ret); - GEM_WARN_ON(obj->ttm.cached_io_st); - if (!ret && obj->mm.madv != I915_MADV_WILLNEED) - i915_ttm_purge(obj); + return 0; } static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo) @@ -369,232 +479,101 @@ static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo) if (likely(obj)) { __i915_gem_object_pages_fini(obj); - i915_ttm_free_cached_io_st(obj); + i915_ttm_free_cached_io_rsgt(obj); } } -static struct intel_memory_region * -i915_ttm_region(struct ttm_device *bdev, int ttm_mem_type) -{ - struct drm_i915_private *i915 = container_of(bdev, typeof(*i915), bdev); - - /* There's some room for optimization here... */ - GEM_BUG_ON(ttm_mem_type != I915_PL_SYSTEM && - ttm_mem_type < I915_PL_LMEM0); - if (ttm_mem_type == I915_PL_SYSTEM) - return intel_memory_region_lookup(i915, INTEL_MEMORY_SYSTEM, - 0); - - return intel_memory_region_lookup(i915, INTEL_MEMORY_LOCAL, - ttm_mem_type - I915_PL_LMEM0); -} - -static struct sg_table *i915_ttm_tt_get_st(struct ttm_tt *ttm) +static struct i915_refct_sgt *i915_ttm_tt_get_st(struct ttm_tt *ttm) { struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); struct sg_table *st; int ret; - if (i915_tt->cached_st) - return i915_tt->cached_st; - - st = kzalloc(sizeof(*st), GFP_KERNEL); - if (!st) - return ERR_PTR(-ENOMEM); + if (i915_tt->cached_rsgt.table.sgl) + return i915_refct_sgt_get(&i915_tt->cached_rsgt); + st = &i915_tt->cached_rsgt.table; ret = sg_alloc_table_from_pages_segment(st, ttm->pages, ttm->num_pages, 0, (unsigned long)ttm->num_pages << PAGE_SHIFT, i915_sg_segment_size(), GFP_KERNEL); if (ret) { - kfree(st); + st->sgl = NULL; return ERR_PTR(ret); } ret = dma_map_sgtable(i915_tt->dev, st, DMA_BIDIRECTIONAL, 0); if (ret) { sg_free_table(st); - kfree(st); return ERR_PTR(ret); } - i915_tt->cached_st = st; - return st; + return i915_refct_sgt_get(&i915_tt->cached_rsgt); } -static struct sg_table * +/** + * i915_ttm_resource_get_st - Get a refcounted sg-table pointing to the + * resource memory + * @obj: The GEM object used for sg-table caching + * @res: The struct ttm_resource for which an sg-table is requested. + * + * This function returns a refcounted sg-table representing the memory + * pointed to by @res. If @res is the object's current resource it may also + * cache the sg_table on the object or attempt to access an already cached + * sg-table. The refcounted sg-table needs to be put when no-longer in use. + * + * Return: A valid pointer to a struct i915_refct_sgt or error pointer on + * failure. + */ +struct i915_refct_sgt * i915_ttm_resource_get_st(struct drm_i915_gem_object *obj, struct ttm_resource *res) { struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); - if (!gpu_binds_iomem(res)) + if (!i915_ttm_gtt_binds_lmem(res)) return i915_ttm_tt_get_st(bo->ttm); /* * If CPU mapping differs, we need to add the ttm_tt pages to * the resulting st. Might make sense for GGTT. */ - GEM_WARN_ON(!cpu_maps_iomem(res)); - return intel_region_ttm_resource_to_st(obj->mm.region, res); -} - -static int i915_ttm_accel_move(struct ttm_buffer_object *bo, - bool clear, - struct ttm_resource *dst_mem, - struct ttm_tt *dst_ttm, - struct sg_table *dst_st) -{ - struct drm_i915_private *i915 = container_of(bo->bdev, typeof(*i915), - bdev); - struct ttm_resource_manager *src_man = - ttm_manager_type(bo->bdev, bo->resource->mem_type); - struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); - struct sg_table *src_st; - struct i915_request *rq; - struct ttm_tt *src_ttm = bo->ttm; - enum i915_cache_level src_level, dst_level; - int ret; - - if (!i915->gt.migrate.context || intel_gt_is_wedged(&i915->gt)) - return -EINVAL; + GEM_WARN_ON(!i915_ttm_cpu_maps_iomem(res)); + if (bo->resource == res) { + if (!obj->ttm.cached_io_rsgt) { + struct i915_refct_sgt *rsgt; - dst_level = i915_ttm_cache_level(i915, dst_mem, dst_ttm); - if (clear) { - if (bo->type == ttm_bo_type_kernel) - return -EINVAL; + rsgt = intel_region_ttm_resource_to_rsgt(obj->mm.region, + res); + if (IS_ERR(rsgt)) + return rsgt; - intel_engine_pm_get(i915->gt.migrate.context->engine); - ret = intel_context_migrate_clear(i915->gt.migrate.context, NULL, - dst_st->sgl, dst_level, - gpu_binds_iomem(dst_mem), - 0, &rq); - - if (!ret && rq) { - i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT); - i915_request_put(rq); - } - intel_engine_pm_put(i915->gt.migrate.context->engine); - } else { - src_st = src_man->use_tt ? i915_ttm_tt_get_st(src_ttm) : - obj->ttm.cached_io_st; - - src_level = i915_ttm_cache_level(i915, bo->resource, src_ttm); - intel_engine_pm_get(i915->gt.migrate.context->engine); - ret = intel_context_migrate_copy(i915->gt.migrate.context, - NULL, src_st->sgl, src_level, - gpu_binds_iomem(bo->resource), - dst_st->sgl, dst_level, - gpu_binds_iomem(dst_mem), - &rq); - if (!ret && rq) { - i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT); - i915_request_put(rq); + obj->ttm.cached_io_rsgt = rsgt; } - intel_engine_pm_put(i915->gt.migrate.context->engine); + return i915_refct_sgt_get(obj->ttm.cached_io_rsgt); } - return ret; -} - -static void __i915_ttm_move(struct ttm_buffer_object *bo, bool clear, - struct ttm_resource *dst_mem, - struct ttm_tt *dst_ttm, - struct sg_table *dst_st, - bool allow_accel) -{ - int ret = -EINVAL; - - if (allow_accel) - ret = i915_ttm_accel_move(bo, clear, dst_mem, dst_ttm, dst_st); - if (ret) { - struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); - struct intel_memory_region *dst_reg, *src_reg; - union { - struct ttm_kmap_iter_tt tt; - struct ttm_kmap_iter_iomap io; - } _dst_iter, _src_iter; - struct ttm_kmap_iter *dst_iter, *src_iter; - - dst_reg = i915_ttm_region(bo->bdev, dst_mem->mem_type); - src_reg = i915_ttm_region(bo->bdev, bo->resource->mem_type); - GEM_BUG_ON(!dst_reg || !src_reg); - - dst_iter = !cpu_maps_iomem(dst_mem) ? - ttm_kmap_iter_tt_init(&_dst_iter.tt, dst_ttm) : - ttm_kmap_iter_iomap_init(&_dst_iter.io, &dst_reg->iomap, - dst_st, dst_reg->region.start); - - src_iter = !cpu_maps_iomem(bo->resource) ? - ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm) : - ttm_kmap_iter_iomap_init(&_src_iter.io, &src_reg->iomap, - obj->ttm.cached_io_st, - src_reg->region.start); - - ttm_move_memcpy(clear, dst_mem->num_pages, dst_iter, src_iter); - } + return intel_region_ttm_resource_to_rsgt(obj->mm.region, res); } -static int i915_ttm_move(struct ttm_buffer_object *bo, bool evict, - struct ttm_operation_ctx *ctx, - struct ttm_resource *dst_mem, - struct ttm_place *hop) +static void i915_ttm_swap_notify(struct ttm_buffer_object *bo) { struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); - struct ttm_resource_manager *dst_man = - ttm_manager_type(bo->bdev, dst_mem->mem_type); - struct ttm_tt *ttm = bo->ttm; - struct sg_table *dst_st; - bool clear; int ret; - /* Sync for now. We could do the actual copy async. */ - ret = ttm_bo_wait_ctx(bo, ctx); - if (ret) - return ret; + if (!obj) + return; ret = i915_ttm_move_notify(bo); - if (ret) - return ret; - - if (obj->mm.madv != I915_MADV_WILLNEED) { + GEM_WARN_ON(ret); + GEM_WARN_ON(obj->ttm.cached_io_rsgt); + if (!ret && obj->mm.madv != I915_MADV_WILLNEED) i915_ttm_purge(obj); - ttm_resource_free(bo, &dst_mem); - return 0; - } - - /* Populate ttm with pages if needed. Typically system memory. */ - if (ttm && (dst_man->use_tt || (ttm->page_flags & TTM_TT_FLAG_SWAPPED))) { - ret = ttm_tt_populate(bo->bdev, ttm, ctx); - if (ret) - return ret; - } - - dst_st = i915_ttm_resource_get_st(obj, dst_mem); - if (IS_ERR(dst_st)) - return PTR_ERR(dst_st); - - clear = !cpu_maps_iomem(bo->resource) && (!ttm || !ttm_tt_is_populated(ttm)); - if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC))) - __i915_ttm_move(bo, clear, dst_mem, bo->ttm, dst_st, true); - - ttm_bo_move_sync_cleanup(bo, dst_mem); - i915_ttm_adjust_domains_after_move(obj); - i915_ttm_free_cached_io_st(obj); - - if (gpu_binds_iomem(dst_mem) || cpu_maps_iomem(dst_mem)) { - obj->ttm.cached_io_st = dst_st; - obj->ttm.get_io_page.sg_pos = dst_st->sgl; - obj->ttm.get_io_page.sg_idx = 0; - } - - i915_ttm_adjust_gem_after_move(obj); - return 0; } static int i915_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem) { - if (!cpu_maps_iomem(mem)) + if (!i915_ttm_cpu_maps_iomem(mem)) return 0; mem->bus.caching = ttm_write_combined; @@ -607,19 +586,26 @@ static unsigned long i915_ttm_io_mem_pfn(struct ttm_buffer_object *bo, unsigned long page_offset) { struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); - unsigned long base = obj->mm.region->iomap.base - obj->mm.region->region.start; struct scatterlist *sg; + unsigned long base; unsigned int ofs; + GEM_BUG_ON(!obj); GEM_WARN_ON(bo->ttm); + base = obj->mm.region->iomap.base - obj->mm.region->region.start; sg = __i915_gem_object_get_sg(obj, &obj->ttm.get_io_page, page_offset, &ofs, true); return ((base + sg_dma_address(sg)) >> PAGE_SHIFT) + ofs; } +/* + * All callbacks need to take care not to downcast a struct ttm_buffer_object + * without checking its subclass, since it might be a TTM ghost object. + */ static struct ttm_device_funcs i915_ttm_bo_driver = { .ttm_tt_create = i915_ttm_tt_create, + .ttm_tt_populate = i915_ttm_tt_populate, .ttm_tt_unpopulate = i915_ttm_tt_unpopulate, .ttm_tt_destroy = i915_ttm_tt_destroy, .eviction_valuable = i915_ttm_eviction_valuable, @@ -649,7 +635,6 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj, .interruptible = true, .no_wait_gpu = false, }; - struct sg_table *st; int real_num_busy; int ret; @@ -676,7 +661,6 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj, return i915_ttm_err_to_gem(ret); } - i915_ttm_adjust_lru(obj); if (bo->ttm && !ttm_tt_is_populated(bo->ttm)) { ret = ttm_tt_populate(bo->bdev, bo->ttm, &ctx); if (ret) @@ -687,14 +671,19 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj, } if (!i915_gem_object_has_pages(obj)) { - /* Object either has a page vector or is an iomem object */ - st = bo->ttm ? i915_ttm_tt_get_st(bo->ttm) : obj->ttm.cached_io_st; - if (IS_ERR(st)) - return PTR_ERR(st); + struct i915_refct_sgt *rsgt = + i915_ttm_resource_get_st(obj, bo->resource); + + if (IS_ERR(rsgt)) + return PTR_ERR(rsgt); - __i915_gem_object_set_pages(obj, st, i915_sg_dma_sizes(st->sgl)); + GEM_BUG_ON(obj->mm.rsgt); + obj->mm.rsgt = rsgt; + __i915_gem_object_set_pages(obj, &rsgt->table, + i915_sg_dma_sizes(rsgt->table.sgl)); } + i915_ttm_adjust_lru(obj); return ret; } @@ -766,12 +755,21 @@ static void i915_ttm_put_pages(struct drm_i915_gem_object *obj, * and shrinkers will move it out if needed. */ - i915_ttm_adjust_lru(obj); + if (obj->mm.rsgt) + i915_refct_sgt_put(fetch_and_zero(&obj->mm.rsgt)); } -static void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj) +/** + * i915_ttm_adjust_lru - Adjust an object's position on relevant LRU lists. + * @obj: The object + */ +void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj) { struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); + struct i915_ttm_tt *i915_tt = + container_of(bo->ttm, typeof(*i915_tt), ttm); + bool shrinkable = + bo->ttm && i915_tt->filp && ttm_tt_is_populated(bo->ttm); /* * Don't manipulate the TTM LRUs while in TTM bo destruction. @@ -781,10 +779,53 @@ static void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj) return; /* + * We skip managing the shrinker LRU in set_pages() and just manage + * everything here. This does at least solve the issue with having + * temporary shmem mappings(like with evicted lmem) not being visible to + * the shrinker. Only our shmem objects are shrinkable, everything else + * we keep as unshrinkable. + * + * To make sure everything plays nice we keep an extra shrink pin in TTM + * if the underlying pages are not currently shrinkable. Once we release + * our pin, like when the pages are moved to shmem, the pages will then + * be added to the shrinker LRU, assuming the caller isn't also holding + * a pin. + * + * TODO: consider maybe also bumping the shrinker list here when we have + * already unpinned it, which should give us something more like an LRU. + * + * TODO: There is a small window of opportunity for this function to + * get called from eviction after we've dropped the last GEM refcount, + * but before the TTM deleted flag is set on the object. Avoid + * adjusting the shrinker list in such cases, since the object is + * not available to the shrinker anyway due to its zero refcount. + * To fix this properly we should move to a TTM shrinker LRU list for + * these objects. + */ + if (kref_get_unless_zero(&obj->base.refcount)) { + if (shrinkable != obj->mm.ttm_shrinkable) { + if (shrinkable) { + if (obj->mm.madv == I915_MADV_WILLNEED) + __i915_gem_object_make_shrinkable(obj); + else + __i915_gem_object_make_purgeable(obj); + } else { + i915_gem_object_make_unshrinkable(obj); + } + + obj->mm.ttm_shrinkable = shrinkable; + } + i915_gem_object_put(obj); + } + + /* * Put on the correct LRU list depending on the MADV status */ spin_lock(&bo->bdev->lru_lock); - if (obj->mm.madv != I915_MADV_WILLNEED) { + if (shrinkable) { + /* Try to keep shmem_tt from being considered for shrinking. */ + bo->priority = TTM_MAX_BO_PRIORITY - 1; + } else if (obj->mm.madv != I915_MADV_WILLNEED) { bo->priority = I915_TTM_PRIO_PURGE; } else if (!i915_gem_object_has_pages(obj)) { if (bo->priority < I915_TTM_PRIO_HAS_PAGES) @@ -823,15 +864,39 @@ static void i915_ttm_delayed_free(struct drm_i915_gem_object *obj) static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) { struct vm_area_struct *area = vmf->vma; - struct drm_i915_gem_object *obj = - i915_ttm_to_gem(area->vm_private_data); + struct ttm_buffer_object *bo = area->vm_private_data; + struct drm_device *dev = bo->base.dev; + struct drm_i915_gem_object *obj; + vm_fault_t ret; + int idx; + + obj = i915_ttm_to_gem(bo); + if (!obj) + return VM_FAULT_SIGBUS; /* Sanity check that we allow writing into this object */ if (unlikely(i915_gem_object_is_readonly(obj) && area->vm_flags & VM_WRITE)) return VM_FAULT_SIGBUS; - return ttm_bo_vm_fault(vmf); + ret = ttm_bo_vm_reserve(bo, vmf); + if (ret) + return ret; + + if (drm_dev_enter(dev, &idx)) { + ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, + TTM_BO_VM_NUM_PREFAULT); + drm_dev_exit(idx); + } else { + ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot); + } + if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) + return ret; + + i915_ttm_adjust_lru(obj); + + dma_resv_unlock(bo->base.resv); + return ret; } static int @@ -882,13 +947,18 @@ static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj) static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = { .name = "i915_gem_object_ttm", + .flags = I915_GEM_OBJECT_IS_SHRINKABLE | + I915_GEM_OBJECT_SELF_MANAGED_SHRINK_LIST, .get_pages = i915_ttm_get_pages, .put_pages = i915_ttm_put_pages, .truncate = i915_ttm_purge, + .shrinker_release_pages = i915_ttm_shrinker_release_pages, + .adjust_lru = i915_ttm_adjust_lru, .delayed_free = i915_ttm_delayed_free, .migrate = i915_ttm_migrate, + .mmap_offset = i915_ttm_mmap_offset, .mmap_ops = &vm_ops_ttm, }; @@ -901,6 +971,18 @@ void i915_ttm_bo_destroy(struct ttm_buffer_object *bo) mutex_destroy(&obj->ttm.get_io_page.lock); if (obj->ttm.created) { + /* + * We freely manage the shrinker LRU outide of the mm.pages life + * cycle. As a result when destroying the object we should be + * extra paranoid and ensure we remove it from the LRU, before + * we free the object. + * + * Touching the ttm_shrinkable outside of the object lock here + * should be safe now that the last GEM object ref was dropped. + */ + if (obj->mm.ttm_shrinkable) + i915_gem_object_make_unshrinkable(obj); + i915_ttm_backup_free(obj); /* This releases all gem object bindings to the backend. */ @@ -940,10 +1022,9 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem, i915_gem_object_init(obj, &i915_gem_ttm_obj_ops, &lock_class, flags); /* Don't put on a region list until we're either locked or fully initialized. */ - obj->mm.region = intel_memory_region_get(mem); + obj->mm.region = mem; INIT_LIST_HEAD(&obj->mm.region_link); - i915_gem_object_make_unshrinkable(obj); INIT_RADIX_TREE(&obj->ttm.get_io_page.radix, GFP_KERNEL | __GFP_NOWARN); mutex_init(&obj->ttm.get_io_page.lock); bo_type = (obj->flags & I915_BO_ALLOC_USER) ? ttm_bo_type_device : @@ -955,6 +1036,14 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem, GEM_BUG_ON(page_size && obj->mm.n_placements); /* + * Keep an extra shrink pin to prevent the object from being made + * shrinkable too early. If the ttm_tt is ever allocated in shmem, we + * drop the pin. The TTM backend manages the shrinker LRU itself, + * outside of the normal mm.pages life cycle. + */ + i915_gem_object_make_unshrinkable(obj); + + /* * If this function fails, it will call the destructor, but * our caller still owns the object. So no freeing in the * destructor until obj->ttm.created is true. @@ -980,6 +1069,7 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem, static const struct intel_memory_region_ops ttm_system_region_ops = { .init_object = __i915_gem_ttm_object_init, + .release = intel_region_ttm_fini, }; struct intel_memory_region * @@ -999,50 +1089,3 @@ i915_gem_ttm_system_setup(struct drm_i915_private *i915, intel_memory_region_set_name(mr, "system-ttm"); return mr; } - -/** - * i915_gem_obj_copy_ttm - Copy the contents of one ttm-based gem object to - * another - * @dst: The destination object - * @src: The source object - * @allow_accel: Allow using the blitter. Otherwise TTM memcpy is used. - * @intr: Whether to perform waits interruptible: - * - * Note: The caller is responsible for assuring that the underlying - * TTM objects are populated if needed and locked. - * - * Return: Zero on success. Negative error code on error. If @intr == true, - * then it may return -ERESTARTSYS or -EINTR. - */ -int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst, - struct drm_i915_gem_object *src, - bool allow_accel, bool intr) -{ - struct ttm_buffer_object *dst_bo = i915_gem_to_ttm(dst); - struct ttm_buffer_object *src_bo = i915_gem_to_ttm(src); - struct ttm_operation_ctx ctx = { - .interruptible = intr, - }; - struct sg_table *dst_st; - int ret; - - assert_object_held(dst); - assert_object_held(src); - - /* - * Sync for now. This will change with async moves. - */ - ret = ttm_bo_wait_ctx(dst_bo, &ctx); - if (!ret) - ret = ttm_bo_wait_ctx(src_bo, &ctx); - if (ret) - return ret; - - dst_st = gpu_binds_iomem(dst_bo->resource) ? - dst->ttm.cached_io_st : i915_ttm_tt_get_st(dst_bo->ttm); - - __i915_ttm_move(src_bo, false, dst_bo->resource, dst_bo->ttm, - dst_st, allow_accel); - - return 0; -} diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h index 0b7291dd897c..9d698ad00853 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h @@ -5,6 +5,8 @@ #ifndef _I915_GEM_TTM_H_ #define _I915_GEM_TTM_H_ +#include <drm/ttm/ttm_placement.h> + #include "gem/i915_gem_object_types.h" /** @@ -35,7 +37,7 @@ void i915_ttm_bo_destroy(struct ttm_buffer_object *bo); static inline struct drm_i915_gem_object * i915_ttm_to_gem(struct ttm_buffer_object *bo) { - if (GEM_WARN_ON(bo->destroy != i915_ttm_bo_destroy)) + if (bo->destroy != i915_ttm_bo_destroy) return NULL; return container_of(bo, struct drm_i915_gem_object, __do_not_access); @@ -47,10 +49,6 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem, resource_size_t page_size, unsigned int flags); -int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst, - struct drm_i915_gem_object *src, - bool allow_accel, bool intr); - /* Internal I915 TTM declarations and definitions below. */ #define I915_PL_LMEM0 TTM_PL_PRIV @@ -60,4 +58,37 @@ int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst, struct ttm_placement *i915_ttm_sys_placement(void); +void i915_ttm_free_cached_io_rsgt(struct drm_i915_gem_object *obj); + +struct i915_refct_sgt * +i915_ttm_resource_get_st(struct drm_i915_gem_object *obj, + struct ttm_resource *res); + +void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj); + +int i915_ttm_purge(struct drm_i915_gem_object *obj); + +/** + * i915_ttm_gtt_binds_lmem - Should the memory be viewed as LMEM by the GTT? + * @mem: struct ttm_resource representing the memory. + * + * Return: true if memory should be viewed as LMEM for GTT binding purposes, + * false otherwise. + */ +static inline bool i915_ttm_gtt_binds_lmem(struct ttm_resource *mem) +{ + return mem->mem_type != I915_PL_SYSTEM; +} + +/** + * i915_ttm_cpu_maps_iomem - Should the memory be viewed as IOMEM by the CPU? + * @mem: struct ttm_resource representing the memory. + * + * Return: true if memory should be viewed as IOMEM for CPU mapping purposes. + */ +static inline bool i915_ttm_cpu_maps_iomem(struct ttm_resource *mem) +{ + /* Once / if we support GGTT, this is also false for cached ttm_tts */ + return mem->mem_type != I915_PL_SYSTEM; +} #endif diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c new file mode 100644 index 000000000000..80df9f592407 --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c @@ -0,0 +1,874 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2021 Intel Corporation + */ + +#include <linux/dma-fence-array.h> + +#include <drm/ttm/ttm_bo_driver.h> + +#include "i915_drv.h" +#include "intel_memory_region.h" +#include "intel_region_ttm.h" + +#include "gem/i915_gem_object.h" +#include "gem/i915_gem_region.h" +#include "gem/i915_gem_ttm.h" +#include "gem/i915_gem_ttm_move.h" + +#include "gt/intel_engine_pm.h" +#include "gt/intel_gt.h" +#include "gt/intel_migrate.h" + +/** + * DOC: Selftest failure modes for failsafe migration: + * + * For fail_gpu_migration, the gpu blit scheduled is always a clear blit + * rather than a copy blit, and then we force the failure paths as if + * the blit fence returned an error. + * + * For fail_work_allocation we fail the kmalloc of the async worker, we + * sync the gpu blit. If it then fails, or fail_gpu_migration is set to + * true, then a memcpy operation is performed sync. + */ +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +static bool fail_gpu_migration; +static bool fail_work_allocation; + +void i915_ttm_migrate_set_failure_modes(bool gpu_migration, + bool work_allocation) +{ + fail_gpu_migration = gpu_migration; + fail_work_allocation = work_allocation; +} +#endif + +/** + * DOC: Set of utilities to dynamically collect dependencies and + * eventually coalesce them into a single fence which is fed into + * the GT migration code, since it only accepts a single dependency + * fence. + * The single fence returned from these utilities, in the case of + * dependencies from multiple fence contexts, a struct dma_fence_array, + * since the i915 request code can break that up and await the individual + * fences. + * + * Once we can do async unbinding, this is also needed to coalesce + * the migration fence with the unbind fences. + * + * While collecting the individual dependencies, we store the refcounted + * struct dma_fence pointers in a realloc-managed pointer array, since + * that can be easily fed into a dma_fence_array. Other options are + * available, like for example an xarray for similarity with drm/sched. + * Can be changed easily if needed. + * + * A struct i915_deps need to be initialized using i915_deps_init(). + * If i915_deps_add_dependency() or i915_deps_add_resv() return an + * error code they will internally call i915_deps_fini(), which frees + * all internal references and allocations. After a call to + * i915_deps_to_fence(), or i915_deps_sync(), the struct should similarly + * be viewed as uninitialized. + * + * We might want to break this out into a separate file as a utility. + */ + +#define I915_DEPS_MIN_ALLOC_CHUNK 8U + +/** + * struct i915_deps - Collect dependencies into a single dma-fence + * @single: Storage for pointer if the collection is a single fence. + * @fence: Allocated array of fence pointers if more than a single fence; + * otherwise points to the address of @single. + * @num_deps: Current number of dependency fences. + * @fences_size: Size of the @fences array in number of pointers. + * @gfp: Allocation mode. + */ +struct i915_deps { + struct dma_fence *single; + struct dma_fence **fences; + unsigned int num_deps; + unsigned int fences_size; + gfp_t gfp; +}; + +static void i915_deps_reset_fences(struct i915_deps *deps) +{ + if (deps->fences != &deps->single) + kfree(deps->fences); + deps->num_deps = 0; + deps->fences_size = 1; + deps->fences = &deps->single; +} + +static void i915_deps_init(struct i915_deps *deps, gfp_t gfp) +{ + deps->fences = NULL; + deps->gfp = gfp; + i915_deps_reset_fences(deps); +} + +static void i915_deps_fini(struct i915_deps *deps) +{ + unsigned int i; + + for (i = 0; i < deps->num_deps; ++i) + dma_fence_put(deps->fences[i]); + + if (deps->fences != &deps->single) + kfree(deps->fences); +} + +static int i915_deps_grow(struct i915_deps *deps, struct dma_fence *fence, + const struct ttm_operation_ctx *ctx) +{ + int ret; + + if (deps->num_deps >= deps->fences_size) { + unsigned int new_size = 2 * deps->fences_size; + struct dma_fence **new_fences; + + new_size = max(new_size, I915_DEPS_MIN_ALLOC_CHUNK); + new_fences = kmalloc_array(new_size, sizeof(*new_fences), deps->gfp); + if (!new_fences) + goto sync; + + memcpy(new_fences, deps->fences, + deps->fences_size * sizeof(*new_fences)); + swap(new_fences, deps->fences); + if (new_fences != &deps->single) + kfree(new_fences); + deps->fences_size = new_size; + } + deps->fences[deps->num_deps++] = dma_fence_get(fence); + return 0; + +sync: + if (ctx->no_wait_gpu && !dma_fence_is_signaled(fence)) { + ret = -EBUSY; + goto unref; + } + + ret = dma_fence_wait(fence, ctx->interruptible); + if (ret) + goto unref; + + ret = fence->error; + if (ret) + goto unref; + + return 0; + +unref: + i915_deps_fini(deps); + return ret; +} + +static int i915_deps_sync(struct i915_deps *deps, + const struct ttm_operation_ctx *ctx) +{ + struct dma_fence **fences = deps->fences; + unsigned int i; + int ret = 0; + + for (i = 0; i < deps->num_deps; ++i, ++fences) { + if (ctx->no_wait_gpu && !dma_fence_is_signaled(*fences)) { + ret = -EBUSY; + break; + } + + ret = dma_fence_wait(*fences, ctx->interruptible); + if (!ret) + ret = (*fences)->error; + if (ret) + break; + } + + i915_deps_fini(deps); + return ret; +} + +static int i915_deps_add_dependency(struct i915_deps *deps, + struct dma_fence *fence, + const struct ttm_operation_ctx *ctx) +{ + unsigned int i; + int ret; + + if (!fence) + return 0; + + if (dma_fence_is_signaled(fence)) { + ret = fence->error; + if (ret) + i915_deps_fini(deps); + return ret; + } + + for (i = 0; i < deps->num_deps; ++i) { + struct dma_fence *entry = deps->fences[i]; + + if (!entry->context || entry->context != fence->context) + continue; + + if (dma_fence_is_later(fence, entry)) { + dma_fence_put(entry); + deps->fences[i] = dma_fence_get(fence); + } + + return 0; + } + + return i915_deps_grow(deps, fence, ctx); +} + +static struct dma_fence *i915_deps_to_fence(struct i915_deps *deps, + const struct ttm_operation_ctx *ctx) +{ + struct dma_fence_array *array; + + if (deps->num_deps == 0) + return NULL; + + if (deps->num_deps == 1) { + deps->num_deps = 0; + return deps->fences[0]; + } + + /* + * TODO: Alter the allocation mode here to not try too hard to + * make things async. + */ + array = dma_fence_array_create(deps->num_deps, deps->fences, 0, 0, + false); + if (!array) + return ERR_PTR(i915_deps_sync(deps, ctx)); + + deps->fences = NULL; + i915_deps_reset_fences(deps); + + return &array->base; +} + +static int i915_deps_add_resv(struct i915_deps *deps, struct dma_resv *resv, + bool all, const bool no_excl, + const struct ttm_operation_ctx *ctx) +{ + struct dma_resv_iter iter; + struct dma_fence *fence; + + dma_resv_assert_held(resv); + dma_resv_for_each_fence(&iter, resv, all, fence) { + int ret; + + if (no_excl && dma_resv_iter_is_exclusive(&iter)) + continue; + + ret = i915_deps_add_dependency(deps, fence, ctx); + if (ret) + return ret; + } + + return 0; +} + +static enum i915_cache_level +i915_ttm_cache_level(struct drm_i915_private *i915, struct ttm_resource *res, + struct ttm_tt *ttm) +{ + return ((HAS_LLC(i915) || HAS_SNOOP(i915)) && + !i915_ttm_gtt_binds_lmem(res) && + ttm->caching == ttm_cached) ? I915_CACHE_LLC : + I915_CACHE_NONE; +} + +static struct intel_memory_region * +i915_ttm_region(struct ttm_device *bdev, int ttm_mem_type) +{ + struct drm_i915_private *i915 = container_of(bdev, typeof(*i915), bdev); + + /* There's some room for optimization here... */ + GEM_BUG_ON(ttm_mem_type != I915_PL_SYSTEM && + ttm_mem_type < I915_PL_LMEM0); + if (ttm_mem_type == I915_PL_SYSTEM) + return intel_memory_region_lookup(i915, INTEL_MEMORY_SYSTEM, + 0); + + return intel_memory_region_lookup(i915, INTEL_MEMORY_LOCAL, + ttm_mem_type - I915_PL_LMEM0); +} + +/** + * i915_ttm_adjust_domains_after_move - Adjust the GEM domains after a + * TTM move + * @obj: The gem object + */ +void i915_ttm_adjust_domains_after_move(struct drm_i915_gem_object *obj) +{ + struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); + + if (i915_ttm_cpu_maps_iomem(bo->resource) || bo->ttm->caching != ttm_cached) { + obj->write_domain = I915_GEM_DOMAIN_WC; + obj->read_domains = I915_GEM_DOMAIN_WC; + } else { + obj->write_domain = I915_GEM_DOMAIN_CPU; + obj->read_domains = I915_GEM_DOMAIN_CPU; + } +} + +/** + * i915_ttm_adjust_gem_after_move - Adjust the GEM state after a TTM move + * @obj: The gem object + * + * Adjusts the GEM object's region, mem_flags and cache coherency after a + * TTM move. + */ +void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj) +{ + struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); + unsigned int cache_level; + unsigned int i; + + /* + * If object was moved to an allowable region, update the object + * region to consider it migrated. Note that if it's currently not + * in an allowable region, it's evicted and we don't update the + * object region. + */ + if (intel_region_to_ttm_type(obj->mm.region) != bo->resource->mem_type) { + for (i = 0; i < obj->mm.n_placements; ++i) { + struct intel_memory_region *mr = obj->mm.placements[i]; + + if (intel_region_to_ttm_type(mr) == bo->resource->mem_type && + mr != obj->mm.region) { + i915_gem_object_release_memory_region(obj); + i915_gem_object_init_memory_region(obj, mr); + break; + } + } + } + + obj->mem_flags &= ~(I915_BO_FLAG_STRUCT_PAGE | I915_BO_FLAG_IOMEM); + + obj->mem_flags |= i915_ttm_cpu_maps_iomem(bo->resource) ? I915_BO_FLAG_IOMEM : + I915_BO_FLAG_STRUCT_PAGE; + + cache_level = i915_ttm_cache_level(to_i915(bo->base.dev), bo->resource, + bo->ttm); + i915_gem_object_set_cache_coherency(obj, cache_level); +} + +/** + * i915_ttm_move_notify - Prepare an object for move + * @bo: The ttm buffer object. + * + * This function prepares an object for move by removing all GPU bindings, + * removing all CPU mapings and finally releasing the pages sg-table. + * + * Return: 0 if successful, negative error code on error. + */ +int i915_ttm_move_notify(struct ttm_buffer_object *bo) +{ + struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); + int ret; + + ret = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE); + if (ret) + return ret; + + ret = __i915_gem_object_put_pages(obj); + if (ret) + return ret; + + return 0; +} + +static struct dma_fence *i915_ttm_accel_move(struct ttm_buffer_object *bo, + bool clear, + struct ttm_resource *dst_mem, + struct ttm_tt *dst_ttm, + struct sg_table *dst_st, + struct dma_fence *dep) +{ + struct drm_i915_private *i915 = container_of(bo->bdev, typeof(*i915), + bdev); + struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); + struct i915_request *rq; + struct ttm_tt *src_ttm = bo->ttm; + enum i915_cache_level src_level, dst_level; + int ret; + + if (!i915->gt.migrate.context || intel_gt_is_wedged(&i915->gt)) + return ERR_PTR(-EINVAL); + + /* With fail_gpu_migration, we always perform a GPU clear. */ + if (I915_SELFTEST_ONLY(fail_gpu_migration)) + clear = true; + + dst_level = i915_ttm_cache_level(i915, dst_mem, dst_ttm); + if (clear) { + if (bo->type == ttm_bo_type_kernel && + !I915_SELFTEST_ONLY(fail_gpu_migration)) + return ERR_PTR(-EINVAL); + + intel_engine_pm_get(i915->gt.migrate.context->engine); + ret = intel_context_migrate_clear(i915->gt.migrate.context, dep, + dst_st->sgl, dst_level, + i915_ttm_gtt_binds_lmem(dst_mem), + 0, &rq); + } else { + struct i915_refct_sgt *src_rsgt = + i915_ttm_resource_get_st(obj, bo->resource); + + if (IS_ERR(src_rsgt)) + return ERR_CAST(src_rsgt); + + src_level = i915_ttm_cache_level(i915, bo->resource, src_ttm); + intel_engine_pm_get(i915->gt.migrate.context->engine); + ret = intel_context_migrate_copy(i915->gt.migrate.context, + dep, src_rsgt->table.sgl, + src_level, + i915_ttm_gtt_binds_lmem(bo->resource), + dst_st->sgl, dst_level, + i915_ttm_gtt_binds_lmem(dst_mem), + &rq); + + i915_refct_sgt_put(src_rsgt); + } + + intel_engine_pm_put(i915->gt.migrate.context->engine); + + if (ret && rq) { + i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT); + i915_request_put(rq); + } + + return ret ? ERR_PTR(ret) : &rq->fence; +} + +/** + * struct i915_ttm_memcpy_arg - argument for the bo memcpy functionality. + * @_dst_iter: Storage space for the destination kmap iterator. + * @_src_iter: Storage space for the source kmap iterator. + * @dst_iter: Pointer to the destination kmap iterator. + * @src_iter: Pointer to the source kmap iterator. + * @clear: Whether to clear instead of copy. + * @src_rsgt: Refcounted scatter-gather list of source memory. + * @dst_rsgt: Refcounted scatter-gather list of destination memory. + */ +struct i915_ttm_memcpy_arg { + union { + struct ttm_kmap_iter_tt tt; + struct ttm_kmap_iter_iomap io; + } _dst_iter, + _src_iter; + struct ttm_kmap_iter *dst_iter; + struct ttm_kmap_iter *src_iter; + unsigned long num_pages; + bool clear; + struct i915_refct_sgt *src_rsgt; + struct i915_refct_sgt *dst_rsgt; +}; + +/** + * struct i915_ttm_memcpy_work - Async memcpy worker under a dma-fence. + * @fence: The dma-fence. + * @work: The work struct use for the memcpy work. + * @lock: The fence lock. Not used to protect anything else ATM. + * @irq_work: Low latency worker to signal the fence since it can't be done + * from the callback for lockdep reasons. + * @cb: Callback for the accelerated migration fence. + * @arg: The argument for the memcpy functionality. + */ +struct i915_ttm_memcpy_work { + struct dma_fence fence; + struct work_struct work; + /* The fence lock */ + spinlock_t lock; + struct irq_work irq_work; + struct dma_fence_cb cb; + struct i915_ttm_memcpy_arg arg; +}; + +static void i915_ttm_move_memcpy(struct i915_ttm_memcpy_arg *arg) +{ + ttm_move_memcpy(arg->clear, arg->num_pages, + arg->dst_iter, arg->src_iter); +} + +static void i915_ttm_memcpy_init(struct i915_ttm_memcpy_arg *arg, + struct ttm_buffer_object *bo, bool clear, + struct ttm_resource *dst_mem, + struct ttm_tt *dst_ttm, + struct i915_refct_sgt *dst_rsgt) +{ + struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); + struct intel_memory_region *dst_reg, *src_reg; + + dst_reg = i915_ttm_region(bo->bdev, dst_mem->mem_type); + src_reg = i915_ttm_region(bo->bdev, bo->resource->mem_type); + GEM_BUG_ON(!dst_reg || !src_reg); + + arg->dst_iter = !i915_ttm_cpu_maps_iomem(dst_mem) ? + ttm_kmap_iter_tt_init(&arg->_dst_iter.tt, dst_ttm) : + ttm_kmap_iter_iomap_init(&arg->_dst_iter.io, &dst_reg->iomap, + &dst_rsgt->table, dst_reg->region.start); + + arg->src_iter = !i915_ttm_cpu_maps_iomem(bo->resource) ? + ttm_kmap_iter_tt_init(&arg->_src_iter.tt, bo->ttm) : + ttm_kmap_iter_iomap_init(&arg->_src_iter.io, &src_reg->iomap, + &obj->ttm.cached_io_rsgt->table, + src_reg->region.start); + arg->clear = clear; + arg->num_pages = bo->base.size >> PAGE_SHIFT; + + arg->dst_rsgt = i915_refct_sgt_get(dst_rsgt); + arg->src_rsgt = clear ? NULL : + i915_ttm_resource_get_st(obj, bo->resource); +} + +static void i915_ttm_memcpy_release(struct i915_ttm_memcpy_arg *arg) +{ + i915_refct_sgt_put(arg->src_rsgt); + i915_refct_sgt_put(arg->dst_rsgt); +} + +static void __memcpy_work(struct work_struct *work) +{ + struct i915_ttm_memcpy_work *copy_work = + container_of(work, typeof(*copy_work), work); + struct i915_ttm_memcpy_arg *arg = ©_work->arg; + bool cookie = dma_fence_begin_signalling(); + + i915_ttm_move_memcpy(arg); + dma_fence_end_signalling(cookie); + + dma_fence_signal(©_work->fence); + + i915_ttm_memcpy_release(arg); + dma_fence_put(©_work->fence); +} + +static void __memcpy_irq_work(struct irq_work *irq_work) +{ + struct i915_ttm_memcpy_work *copy_work = + container_of(irq_work, typeof(*copy_work), irq_work); + struct i915_ttm_memcpy_arg *arg = ©_work->arg; + + dma_fence_signal(©_work->fence); + i915_ttm_memcpy_release(arg); + dma_fence_put(©_work->fence); +} + +static void __memcpy_cb(struct dma_fence *fence, struct dma_fence_cb *cb) +{ + struct i915_ttm_memcpy_work *copy_work = + container_of(cb, typeof(*copy_work), cb); + + if (unlikely(fence->error || I915_SELFTEST_ONLY(fail_gpu_migration))) { + INIT_WORK(©_work->work, __memcpy_work); + queue_work(system_unbound_wq, ©_work->work); + } else { + init_irq_work(©_work->irq_work, __memcpy_irq_work); + irq_work_queue(©_work->irq_work); + } +} + +static const char *get_driver_name(struct dma_fence *fence) +{ + return "i915_ttm_memcpy_work"; +} + +static const char *get_timeline_name(struct dma_fence *fence) +{ + return "unbound"; +} + +static const struct dma_fence_ops dma_fence_memcpy_ops = { + .get_driver_name = get_driver_name, + .get_timeline_name = get_timeline_name, +}; + +static struct dma_fence * +i915_ttm_memcpy_work_arm(struct i915_ttm_memcpy_work *work, + struct dma_fence *dep) +{ + int ret; + + spin_lock_init(&work->lock); + dma_fence_init(&work->fence, &dma_fence_memcpy_ops, &work->lock, 0, 0); + dma_fence_get(&work->fence); + ret = dma_fence_add_callback(dep, &work->cb, __memcpy_cb); + if (ret) { + if (ret != -ENOENT) + dma_fence_wait(dep, false); + + return ERR_PTR(I915_SELFTEST_ONLY(fail_gpu_migration) ? -EINVAL : + dep->error); + } + + return &work->fence; +} + +static struct dma_fence * +__i915_ttm_move(struct ttm_buffer_object *bo, bool clear, + struct ttm_resource *dst_mem, struct ttm_tt *dst_ttm, + struct i915_refct_sgt *dst_rsgt, bool allow_accel, + struct dma_fence *move_dep) +{ + struct i915_ttm_memcpy_work *copy_work = NULL; + struct i915_ttm_memcpy_arg _arg, *arg = &_arg; + struct dma_fence *fence = ERR_PTR(-EINVAL); + + if (allow_accel) { + fence = i915_ttm_accel_move(bo, clear, dst_mem, dst_ttm, + &dst_rsgt->table, move_dep); + + /* + * We only need to intercept the error when moving to lmem. + * When moving to system, TTM or shmem will provide us with + * cleared pages. + */ + if (!IS_ERR(fence) && !i915_ttm_gtt_binds_lmem(dst_mem) && + !I915_SELFTEST_ONLY(fail_gpu_migration || + fail_work_allocation)) + goto out; + } + + /* If we've scheduled gpu migration. Try to arm error intercept. */ + if (!IS_ERR(fence)) { + struct dma_fence *dep = fence; + + if (!I915_SELFTEST_ONLY(fail_work_allocation)) + copy_work = kzalloc(sizeof(*copy_work), GFP_KERNEL); + + if (copy_work) { + arg = ©_work->arg; + i915_ttm_memcpy_init(arg, bo, clear, dst_mem, dst_ttm, + dst_rsgt); + fence = i915_ttm_memcpy_work_arm(copy_work, dep); + } else { + dma_fence_wait(dep, false); + fence = ERR_PTR(I915_SELFTEST_ONLY(fail_gpu_migration) ? + -EINVAL : fence->error); + } + dma_fence_put(dep); + + if (!IS_ERR(fence)) + goto out; + } else if (move_dep) { + int err = dma_fence_wait(move_dep, true); + + if (err) + return ERR_PTR(err); + } + + /* Error intercept failed or no accelerated migration to start with */ + if (!copy_work) + i915_ttm_memcpy_init(arg, bo, clear, dst_mem, dst_ttm, + dst_rsgt); + i915_ttm_move_memcpy(arg); + i915_ttm_memcpy_release(arg); + kfree(copy_work); + + return NULL; +out: + if (!fence && copy_work) { + i915_ttm_memcpy_release(arg); + kfree(copy_work); + } + + return fence; +} + +static struct dma_fence *prev_fence(struct ttm_buffer_object *bo, + struct ttm_operation_ctx *ctx) +{ + struct i915_deps deps; + int ret; + + /* + * Instead of trying hard with GFP_KERNEL to allocate memory, + * the dependency collection will just sync if it doesn't + * succeed. + */ + i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); + ret = i915_deps_add_dependency(&deps, bo->moving, ctx); + if (!ret) + /* + * TODO: Only await excl fence here, and shared fences before + * signaling the migration fence. + */ + ret = i915_deps_add_resv(&deps, bo->base.resv, true, false, ctx); + if (ret) + return ERR_PTR(ret); + + return i915_deps_to_fence(&deps, ctx); +} + +/** + * i915_ttm_move - The TTM move callback used by i915. + * @bo: The buffer object. + * @evict: Whether this is an eviction. + * @dst_mem: The destination ttm resource. + * @hop: If we need multihop, what temporary memory type to move to. + * + * Return: 0 if successful, negative error code otherwise. + */ +int i915_ttm_move(struct ttm_buffer_object *bo, bool evict, + struct ttm_operation_ctx *ctx, + struct ttm_resource *dst_mem, + struct ttm_place *hop) +{ + struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); + struct ttm_resource_manager *dst_man = + ttm_manager_type(bo->bdev, dst_mem->mem_type); + struct dma_fence *migration_fence = NULL; + struct ttm_tt *ttm = bo->ttm; + struct i915_refct_sgt *dst_rsgt; + bool clear; + int ret; + + if (GEM_WARN_ON(!obj)) { + ttm_bo_move_null(bo, dst_mem); + return 0; + } + + ret = i915_ttm_move_notify(bo); + if (ret) + return ret; + + if (obj->mm.madv != I915_MADV_WILLNEED) { + i915_ttm_purge(obj); + ttm_resource_free(bo, &dst_mem); + return 0; + } + + /* Populate ttm with pages if needed. Typically system memory. */ + if (ttm && (dst_man->use_tt || (ttm->page_flags & TTM_TT_FLAG_SWAPPED))) { + ret = ttm_tt_populate(bo->bdev, ttm, ctx); + if (ret) + return ret; + } + + dst_rsgt = i915_ttm_resource_get_st(obj, dst_mem); + if (IS_ERR(dst_rsgt)) + return PTR_ERR(dst_rsgt); + + clear = !i915_ttm_cpu_maps_iomem(bo->resource) && (!ttm || !ttm_tt_is_populated(ttm)); + if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC))) { + struct dma_fence *dep = prev_fence(bo, ctx); + + if (IS_ERR(dep)) { + i915_refct_sgt_put(dst_rsgt); + return PTR_ERR(dep); + } + + migration_fence = __i915_ttm_move(bo, clear, dst_mem, bo->ttm, + dst_rsgt, true, dep); + dma_fence_put(dep); + } + + /* We can possibly get an -ERESTARTSYS here */ + if (IS_ERR(migration_fence)) { + i915_refct_sgt_put(dst_rsgt); + return PTR_ERR(migration_fence); + } + + if (migration_fence) { + ret = ttm_bo_move_accel_cleanup(bo, migration_fence, evict, + true, dst_mem); + if (ret) { + dma_fence_wait(migration_fence, false); + ttm_bo_move_sync_cleanup(bo, dst_mem); + } + dma_fence_put(migration_fence); + } else { + ttm_bo_move_sync_cleanup(bo, dst_mem); + } + + i915_ttm_adjust_domains_after_move(obj); + i915_ttm_free_cached_io_rsgt(obj); + + if (i915_ttm_gtt_binds_lmem(dst_mem) || i915_ttm_cpu_maps_iomem(dst_mem)) { + obj->ttm.cached_io_rsgt = dst_rsgt; + obj->ttm.get_io_page.sg_pos = dst_rsgt->table.sgl; + obj->ttm.get_io_page.sg_idx = 0; + } else { + i915_refct_sgt_put(dst_rsgt); + } + + i915_ttm_adjust_lru(obj); + i915_ttm_adjust_gem_after_move(obj); + return 0; +} + +/** + * i915_gem_obj_copy_ttm - Copy the contents of one ttm-based gem object to + * another + * @dst: The destination object + * @src: The source object + * @allow_accel: Allow using the blitter. Otherwise TTM memcpy is used. + * @intr: Whether to perform waits interruptible: + * + * Note: The caller is responsible for assuring that the underlying + * TTM objects are populated if needed and locked. + * + * Return: Zero on success. Negative error code on error. If @intr == true, + * then it may return -ERESTARTSYS or -EINTR. + */ +int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst, + struct drm_i915_gem_object *src, + bool allow_accel, bool intr) +{ + struct ttm_buffer_object *dst_bo = i915_gem_to_ttm(dst); + struct ttm_buffer_object *src_bo = i915_gem_to_ttm(src); + struct ttm_operation_ctx ctx = { + .interruptible = intr, + }; + struct i915_refct_sgt *dst_rsgt; + struct dma_fence *copy_fence, *dep_fence; + struct i915_deps deps; + int ret, shared_err; + + assert_object_held(dst); + assert_object_held(src); + i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); + + /* + * We plan to add a shared fence only for the source. If that + * fails, we await all source fences before commencing + * the copy instead of only the exclusive. + */ + shared_err = dma_resv_reserve_shared(src_bo->base.resv, 1); + ret = i915_deps_add_resv(&deps, dst_bo->base.resv, true, false, &ctx); + if (!ret) + ret = i915_deps_add_resv(&deps, src_bo->base.resv, + !!shared_err, false, &ctx); + if (ret) + return ret; + + dep_fence = i915_deps_to_fence(&deps, &ctx); + if (IS_ERR(dep_fence)) + return PTR_ERR(dep_fence); + + dst_rsgt = i915_ttm_resource_get_st(dst, dst_bo->resource); + copy_fence = __i915_ttm_move(src_bo, false, dst_bo->resource, + dst_bo->ttm, dst_rsgt, allow_accel, + dep_fence); + + i915_refct_sgt_put(dst_rsgt); + if (IS_ERR_OR_NULL(copy_fence)) + return PTR_ERR_OR_ZERO(copy_fence); + + dma_resv_add_excl_fence(dst_bo->base.resv, copy_fence); + + /* If we failed to reserve a shared slot, add an exclusive fence */ + if (shared_err) + dma_resv_add_excl_fence(src_bo->base.resv, copy_fence); + else + dma_resv_add_shared_fence(src_bo->base.resv, copy_fence); + + dma_fence_put(copy_fence); + + return 0; +} diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.h b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.h new file mode 100644 index 000000000000..d2e7f149e05c --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ +#ifndef _I915_GEM_TTM_MOVE_H_ +#define _I915_GEM_TTM_MOVE_H_ + +#include <linux/types.h> + +#include "i915_selftest.h" + +struct ttm_buffer_object; +struct ttm_operation_ctx; +struct ttm_place; +struct ttm_resource; +struct ttm_tt; + +struct drm_i915_gem_object; +struct i915_refct_sgt; + +int i915_ttm_move_notify(struct ttm_buffer_object *bo); + +I915_SELFTEST_DECLARE(void i915_ttm_migrate_set_failure_modes(bool gpu_migration, + bool work_allocation)); + +int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst, + struct drm_i915_gem_object *src, + bool allow_accel, bool intr); + +/* Internal I915 TTM declarations and definitions below. */ + +int i915_ttm_move(struct ttm_buffer_object *bo, bool evict, + struct ttm_operation_ctx *ctx, + struct ttm_resource *dst_mem, + struct ttm_place *hop); + +void i915_ttm_adjust_domains_after_move(struct drm_i915_gem_object *obj); + +void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj); + +#endif diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c index 3b6d14b5c604..9aad84059d56 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c @@ -12,6 +12,7 @@ #include "gem/i915_gem_region.h" #include "gem/i915_gem_ttm.h" +#include "gem/i915_gem_ttm_move.h" #include "gem/i915_gem_ttm_pm.h" /** @@ -79,6 +80,7 @@ static int i915_ttm_backup(struct i915_gem_apply_to_region *apply, err = i915_gem_obj_copy_ttm(backup, obj, pm_apply->allow_gpu, false); GEM_WARN_ON(err); + ttm_bo_wait_ctx(backup_bo, &ctx); obj->ttm.backup = backup; return 0; @@ -169,6 +171,7 @@ static int i915_ttm_restore(struct i915_gem_apply_to_region *apply, err = i915_gem_obj_copy_ttm(obj, backup, pm_apply->allow_gpu, false); GEM_WARN_ON(err); + ttm_bo_wait_ctx(backup_bo, &ctx); obj->ttm.backup = NULL; err = 0; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c index f909aaa09d9c..dab3d30c09a0 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c @@ -10,7 +10,6 @@ #include "gt/intel_engine.h" -#include "dma_resv_utils.h" #include "i915_gem_ioctls.h" #include "i915_gem_object.h" @@ -25,7 +24,7 @@ i915_gem_object_wait_fence(struct dma_fence *fence, return timeout; if (dma_fence_is_i915(fence)) - return i915_request_wait(to_request(fence), flags, timeout); + return i915_request_wait_timeout(to_request(fence), flags, timeout); return dma_fence_wait_timeout(fence, flags & I915_WAIT_INTERRUPTIBLE, @@ -37,58 +36,22 @@ i915_gem_object_wait_reservation(struct dma_resv *resv, unsigned int flags, long timeout) { - struct dma_fence *excl; - bool prune_fences = false; - - if (flags & I915_WAIT_ALL) { - struct dma_fence **shared; - unsigned int count, i; - int ret; - - ret = dma_resv_get_fences(resv, &excl, &count, &shared); - if (ret) - return ret; - - for (i = 0; i < count; i++) { - timeout = i915_gem_object_wait_fence(shared[i], - flags, timeout); - if (timeout < 0) - break; - - dma_fence_put(shared[i]); - } - - for (; i < count; i++) - dma_fence_put(shared[i]); - kfree(shared); + struct dma_resv_iter cursor; + struct dma_fence *fence; + long ret = timeout ?: 1; + + dma_resv_iter_begin(&cursor, resv, flags & I915_WAIT_ALL); + dma_resv_for_each_fence_unlocked(&cursor, fence) { + ret = i915_gem_object_wait_fence(fence, flags, timeout); + if (ret <= 0) + break; - /* - * If both shared fences and an exclusive fence exist, - * then by construction the shared fences must be later - * than the exclusive fence. If we successfully wait for - * all the shared fences, we know that the exclusive fence - * must all be signaled. If all the shared fences are - * signaled, we can prune the array and recover the - * floating references on the fences/requests. - */ - prune_fences = count && timeout >= 0; - } else { - excl = dma_resv_get_excl_unlocked(resv); + if (timeout) + timeout = ret; } + dma_resv_iter_end(&cursor); - if (excl && timeout >= 0) - timeout = i915_gem_object_wait_fence(excl, flags, timeout); - - dma_fence_put(excl); - - /* - * Opportunistically prune the fences iff we know they have *all* been - * signaled. - */ - if (prune_fences) - dma_resv_prune(resv); - - return timeout; + return ret; } static void fence_set_priority(struct dma_fence *fence, @@ -151,32 +114,13 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, unsigned int flags, const struct i915_sched_attr *attr) { - struct dma_fence *excl; + struct dma_resv_iter cursor; + struct dma_fence *fence; - if (flags & I915_WAIT_ALL) { - struct dma_fence **shared; - unsigned int count, i; - int ret; - - ret = dma_resv_get_fences(obj->base.resv, &excl, &count, - &shared); - if (ret) - return ret; - - for (i = 0; i < count; i++) { - i915_gem_fence_wait_priority(shared[i], attr); - dma_fence_put(shared[i]); - } - - kfree(shared); - } else { - excl = dma_resv_get_excl_unlocked(obj->base.resv); - } - - if (excl) { - i915_gem_fence_wait_priority(excl, attr); - dma_fence_put(excl); - } + dma_resv_iter_begin(&cursor, obj->base.resv, flags & I915_WAIT_ALL); + dma_resv_for_each_fence_unlocked(&cursor, fence) + i915_gem_fence_wait_priority(fence, attr); + dma_resv_iter_end(&cursor); return 0; } @@ -196,7 +140,11 @@ i915_gem_object_wait(struct drm_i915_gem_object *obj, timeout = i915_gem_object_wait_reservation(obj->base.resv, flags, timeout); - return timeout < 0 ? timeout : 0; + + if (timeout < 0) + return timeout; + + return !timeout ? -ETIME : 0; } static inline unsigned long nsecs_to_jiffies_timeout(const u64 n) @@ -306,6 +254,6 @@ int i915_gem_object_wait_migration(struct drm_i915_gem_object *obj, unsigned int flags) { might_sleep(); - /* NOP for now. */ - return 0; + + return i915_gem_object_wait_moving_fence(obj, !!(flags & I915_WAIT_INTERRUPTIBLE)); } diff --git a/drivers/gpu/drm/i915/gem/i915_gemfs.c b/drivers/gpu/drm/i915/gem/i915_gemfs.c index dbdbdc344d87..7271fbf813fa 100644 --- a/drivers/gpu/drm/i915/gem/i915_gemfs.c +++ b/drivers/gpu/drm/i915/gem/i915_gemfs.c @@ -12,6 +12,7 @@ int i915_gemfs_init(struct drm_i915_private *i915) { + char huge_opt[] = "huge=within_size"; /* r/w */ struct file_system_type *type; struct vfsmount *gemfs; char *opts; @@ -31,10 +32,8 @@ int i915_gemfs_init(struct drm_i915_private *i915) */ opts = NULL; - if (intel_vtd_active()) { + if (intel_vtd_active(i915)) { if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { - static char huge_opt[] = "huge=within_size"; /* r/w */ - opts = huge_opt; drm_info(&i915->drm, "Transparent Hugepage mode '%s'\n", diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c index b2003133deaf..c69c7d45aabc 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c @@ -22,6 +22,22 @@ #include "selftests/mock_region.h" #include "selftests/i915_random.h" +static struct i915_gem_context *hugepage_ctx(struct drm_i915_private *i915, + struct file *file) +{ + struct i915_gem_context *ctx = live_context(i915, file); + struct i915_address_space *vm; + + if (IS_ERR(ctx)) + return ctx; + + vm = ctx->vm; + if (vm) + WRITE_ONCE(vm->scrub_64K, true); + + return ctx; +} + static const unsigned int page_sizes[] = { I915_GTT_PAGE_SIZE_2M, I915_GTT_PAGE_SIZE_64K, @@ -552,7 +568,7 @@ out_unpin: out_put: i915_gem_object_put(obj); out_region: - intel_memory_region_put(mem); + intel_memory_region_destroy(mem); return err; } @@ -959,6 +975,8 @@ static int igt_mock_ppgtt_64K(void *arg) __i915_gem_object_put_pages(obj); i915_gem_object_unlock(obj); i915_gem_object_put(obj); + + i915_gem_drain_freed_objects(i915); } } @@ -1080,10 +1098,6 @@ static int __igt_write_huge(struct intel_context *ce, if (IS_ERR(vma)) return PTR_ERR(vma); - err = i915_vma_unbind(vma); - if (err) - return err; - err = i915_vma_pin(vma, size, 0, flags | offset); if (err) { /* @@ -1117,7 +1131,7 @@ out_vma_unpin: return err; } -static int igt_write_huge(struct i915_gem_context *ctx, +static int igt_write_huge(struct drm_i915_private *i915, struct drm_i915_gem_object *obj) { struct i915_gem_engines *engines; @@ -1127,6 +1141,8 @@ static int igt_write_huge(struct i915_gem_context *ctx, IGT_TIMEOUT(end_time); unsigned int max_page_size; unsigned int count; + struct i915_gem_context *ctx; + struct file *file; u64 max; u64 num; u64 size; @@ -1134,6 +1150,16 @@ static int igt_write_huge(struct i915_gem_context *ctx, int i, n; int err = 0; + file = mock_file(i915); + if (IS_ERR(file)) + return PTR_ERR(file); + + ctx = hugepage_ctx(i915, file); + if (IS_ERR(ctx)) { + err = PTR_ERR(ctx); + goto out; + } + GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); size = obj->base.size; @@ -1153,7 +1179,7 @@ static int igt_write_huge(struct i915_gem_context *ctx, } i915_gem_context_unlock_engines(ctx); if (!n) - return 0; + goto out; /* * To keep things interesting when alternating between engines in our @@ -1215,6 +1241,8 @@ static int igt_write_huge(struct i915_gem_context *ctx, kfree(order); +out: + fput(file); return err; } @@ -1277,8 +1305,7 @@ static u32 igt_random_size(struct rnd_state *prng, static int igt_ppgtt_smoke_huge(void *arg) { - struct i915_gem_context *ctx = arg; - struct drm_i915_private *i915 = ctx->i915; + struct drm_i915_private *i915 = arg; struct drm_i915_gem_object *obj; I915_RND_STATE(prng); struct { @@ -1302,6 +1329,7 @@ static int igt_ppgtt_smoke_huge(void *arg) u32 min = backends[i].min; u32 max = backends[i].max; u32 size = max; + try_again: size = igt_random_size(&prng, min, rounddown_pow_of_two(size)); @@ -1336,7 +1364,7 @@ try_again: goto out_unpin; } - err = igt_write_huge(ctx, obj); + err = igt_write_huge(i915, obj); if (err) { pr_err("%s write-huge failed with size=%u, i=%d\n", __func__, size, i); @@ -1363,8 +1391,7 @@ out_put: static int igt_ppgtt_sanity_check(void *arg) { - struct i915_gem_context *ctx = arg; - struct drm_i915_private *i915 = ctx->i915; + struct drm_i915_private *i915 = arg; unsigned int supported = INTEL_INFO(i915)->page_sizes; struct { igt_create_fn fn; @@ -1431,7 +1458,7 @@ static int igt_ppgtt_sanity_check(void *arg) if (pages) obj->mm.page_sizes.sg = pages; - err = igt_write_huge(ctx, obj); + err = igt_write_huge(i915, obj); i915_gem_object_lock(obj, NULL); i915_gem_object_unpin_pages(obj); @@ -1458,15 +1485,27 @@ out: static int igt_tmpfs_fallback(void *arg) { - struct i915_gem_context *ctx = arg; - struct drm_i915_private *i915 = ctx->i915; + struct drm_i915_private *i915 = arg; + struct i915_address_space *vm; + struct i915_gem_context *ctx; struct vfsmount *gemfs = i915->mm.gemfs; - struct i915_address_space *vm = i915_gem_context_get_eb_vm(ctx); struct drm_i915_gem_object *obj; struct i915_vma *vma; + struct file *file; u32 *vaddr; int err = 0; + file = mock_file(i915); + if (IS_ERR(file)) + return PTR_ERR(file); + + ctx = hugepage_ctx(i915, file); + if (IS_ERR(ctx)) { + err = PTR_ERR(ctx); + goto out; + } + vm = i915_gem_context_get_eb_vm(ctx); + /* * Make sure that we don't burst into a ball of flames upon falling back * to tmpfs, which we rely on if on the off-chance we encouter a failure @@ -1510,33 +1549,47 @@ out_restore: i915->mm.gemfs = gemfs; i915_vm_put(vm); +out: + fput(file); return err; } static int igt_shrink_thp(void *arg) { - struct i915_gem_context *ctx = arg; - struct drm_i915_private *i915 = ctx->i915; - struct i915_address_space *vm = i915_gem_context_get_eb_vm(ctx); + struct drm_i915_private *i915 = arg; + struct i915_address_space *vm; + struct i915_gem_context *ctx; struct drm_i915_gem_object *obj; struct i915_gem_engines_iter it; struct intel_context *ce; struct i915_vma *vma; + struct file *file; unsigned int flags = PIN_USER; unsigned int n; bool should_swap; - int err = 0; + int err; + + if (!igt_can_allocate_thp(i915)) { + pr_info("missing THP support, skipping\n"); + return 0; + } + + file = mock_file(i915); + if (IS_ERR(file)) + return PTR_ERR(file); + + ctx = hugepage_ctx(i915, file); + if (IS_ERR(ctx)) { + err = PTR_ERR(ctx); + goto out; + } + vm = i915_gem_context_get_eb_vm(ctx); /* * Sanity check shrinking huge-paged object -- make sure nothing blows * up. */ - if (!igt_can_allocate_thp(i915)) { - pr_info("missing THP support, skipping\n"); - goto out_vm; - } - obj = i915_gem_object_create_shmem(i915, SZ_2M); if (IS_ERR(obj)) { err = PTR_ERR(obj); @@ -1626,7 +1679,8 @@ out_put: i915_gem_object_put(obj); out_vm: i915_vm_put(vm); - +out: + fput(file); return err; } @@ -1687,10 +1741,6 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_ppgtt_smoke_huge), SUBTEST(igt_ppgtt_sanity_check), }; - struct i915_gem_context *ctx; - struct i915_address_space *vm; - struct file *file; - int err; if (!HAS_PPGTT(i915)) { pr_info("PPGTT not supported, skipping live-selftests\n"); @@ -1700,23 +1750,5 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915) if (intel_gt_is_wedged(&i915->gt)) return 0; - file = mock_file(i915); - if (IS_ERR(file)) - return PTR_ERR(file); - - ctx = live_context(i915, file); - if (IS_ERR(ctx)) { - err = PTR_ERR(ctx); - goto out_file; - } - - vm = ctx->vm; - if (vm) - WRITE_ONCE(vm->scrub_64K, true); - - err = i915_subtests(tests, ctx); - -out_file: - fput(file); - return err; + return i915_live_subtests(tests, i915); } diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index b32f7fed2d9c..21b71568cd5f 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -88,7 +88,7 @@ static int live_nop_switch(void *arg) rq = i915_request_get(this); i915_request_add(this); } - if (i915_request_wait(rq, 0, HZ) < 0) { + if (i915_request_wait(rq, 0, 10 * HZ) < 0) { pr_err("Failed to populated %d contexts\n", nctx); intel_gt_set_wedged(&i915->gt); i915_request_put(rq); diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c index 4a6bb64c3a35..3cc74b0fed06 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c @@ -102,7 +102,7 @@ static int igt_dmabuf_import_same_driver_lmem(void *arg) obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &lmem, 1); if (IS_ERR(obj)) { pr_err("__i915_gem_object_create_user failed with err=%ld\n", - PTR_ERR(dmabuf)); + PTR_ERR(obj)); err = PTR_ERR(obj); goto out_ret; } @@ -158,7 +158,7 @@ static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915, regions, num_regions); if (IS_ERR(obj)) { pr_err("__i915_gem_object_create_user failed with err=%ld\n", - PTR_ERR(dmabuf)); + PTR_ERR(obj)); err = PTR_ERR(obj); goto out_ret; } diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c index 28a700f08b49..4b8e6b098659 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c @@ -4,6 +4,7 @@ */ #include "gt/intel_migrate.h" +#include "gem/i915_gem_ttm_move.h" static int igt_fill_check_buffer(struct drm_i915_gem_object *obj, bool fill) @@ -227,13 +228,34 @@ out_put: return err; } +static int igt_lmem_pages_failsafe_migrate(void *arg) +{ + int fail_gpu, fail_alloc, ret; + + for (fail_gpu = 0; fail_gpu < 2; ++fail_gpu) { + for (fail_alloc = 0; fail_alloc < 2; ++fail_alloc) { + pr_info("Simulated failure modes: gpu: %d, alloc: %d\n", + fail_gpu, fail_alloc); + i915_ttm_migrate_set_failure_modes(fail_gpu, + fail_alloc); + ret = igt_lmem_pages_migrate(arg); + if (ret) + goto out_err; + } + } + +out_err: + i915_ttm_migrate_set_failure_modes(false, false); + return ret; +} + int i915_gem_migrate_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(igt_smem_create_migrate), SUBTEST(igt_lmem_create_migrate), SUBTEST(igt_same_create_migrate), - SUBTEST(igt_lmem_pages_migrate), + SUBTEST(igt_lmem_pages_failsafe_migrate), }; if (!HAS_LMEM(i915)) diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c index 890191f286e3..4a166d25fe60 100644 --- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c @@ -185,7 +185,6 @@ static void gen6_alloc_va_range(struct i915_address_space *vm, pt = stash->pt[0]; __i915_gem_object_pin_pages(pt->base); - i915_gem_object_make_unshrinkable(pt->base); fill32_px(pt, vm->scratch[0]->encode); @@ -262,13 +261,10 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm) { struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); - __i915_vma_put(ppgtt->vma); - gen6_ppgtt_free_pd(ppgtt); free_scratch(vm); mutex_destroy(&ppgtt->flush); - mutex_destroy(&ppgtt->pin_mutex); free_pd(&ppgtt->base.vm, ppgtt->base.pd); } @@ -331,37 +327,6 @@ static const struct i915_vma_ops pd_vma_ops = { .unbind_vma = pd_vma_unbind, }; -static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size) -{ - struct i915_ggtt *ggtt = ppgtt->base.vm.gt->ggtt; - struct i915_vma *vma; - - GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); - GEM_BUG_ON(size > ggtt->vm.total); - - vma = i915_vma_alloc(); - if (!vma) - return ERR_PTR(-ENOMEM); - - i915_active_init(&vma->active, NULL, NULL, 0); - - kref_init(&vma->ref); - mutex_init(&vma->pages_mutex); - vma->vm = i915_vm_get(&ggtt->vm); - vma->ops = &pd_vma_ops; - vma->private = ppgtt; - - vma->size = size; - vma->fence_size = size; - atomic_set(&vma->flags, I915_VMA_GGTT); - vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */ - - INIT_LIST_HEAD(&vma->obj_link); - INIT_LIST_HEAD(&vma->closed_link); - - return vma; -} - int gen6_ppgtt_pin(struct i915_ppgtt *base, struct i915_gem_ww_ctx *ww) { struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base); @@ -378,42 +343,92 @@ int gen6_ppgtt_pin(struct i915_ppgtt *base, struct i915_gem_ww_ctx *ww) if (atomic_add_unless(&ppgtt->pin_count, 1, 0)) return 0; - if (mutex_lock_interruptible(&ppgtt->pin_mutex)) - return -EINTR; + /* grab the ppgtt resv to pin the object */ + err = i915_vm_lock_objects(&ppgtt->base.vm, ww); + if (err) + return err; /* * PPGTT PDEs reside in the GGTT and consists of 512 entries. The * allocator works in address space sizes, so it's multiplied by page * size. We allocate at the top of the GTT to avoid fragmentation. */ - err = 0; - if (!atomic_read(&ppgtt->pin_count)) + if (!atomic_read(&ppgtt->pin_count)) { err = i915_ggtt_pin(ppgtt->vma, ww, GEN6_PD_ALIGN, PIN_HIGH); + + GEM_BUG_ON(ppgtt->vma->fence); + clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(ppgtt->vma)); + } if (!err) atomic_inc(&ppgtt->pin_count); - mutex_unlock(&ppgtt->pin_mutex); return err; } -void gen6_ppgtt_unpin(struct i915_ppgtt *base) +static int pd_dummy_obj_get_pages(struct drm_i915_gem_object *obj) { - struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base); + obj->mm.pages = ZERO_SIZE_PTR; + return 0; +} - GEM_BUG_ON(!atomic_read(&ppgtt->pin_count)); - if (atomic_dec_and_test(&ppgtt->pin_count)) - i915_vma_unpin(ppgtt->vma); +static void pd_dummy_obj_put_pages(struct drm_i915_gem_object *obj, + struct sg_table *pages) +{ } -void gen6_ppgtt_unpin_all(struct i915_ppgtt *base) +static const struct drm_i915_gem_object_ops pd_dummy_obj_ops = { + .name = "pd_dummy_obj", + .get_pages = pd_dummy_obj_get_pages, + .put_pages = pd_dummy_obj_put_pages, +}; + +static struct i915_page_directory * +gen6_alloc_top_pd(struct gen6_ppgtt *ppgtt) { - struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base); + struct i915_ggtt * const ggtt = ppgtt->base.vm.gt->ggtt; + struct i915_page_directory *pd; + int err; - if (!atomic_read(&ppgtt->pin_count)) - return; + pd = __alloc_pd(I915_PDES); + if (unlikely(!pd)) + return ERR_PTR(-ENOMEM); - i915_vma_unpin(ppgtt->vma); - atomic_set(&ppgtt->pin_count, 0); + pd->pt.base = __i915_gem_object_create_internal(ppgtt->base.vm.gt->i915, + &pd_dummy_obj_ops, + I915_PDES * SZ_4K); + if (IS_ERR(pd->pt.base)) { + err = PTR_ERR(pd->pt.base); + pd->pt.base = NULL; + goto err_pd; + } + + pd->pt.base->base.resv = i915_vm_resv_get(&ppgtt->base.vm); + pd->pt.base->shares_resv_from = &ppgtt->base.vm; + + ppgtt->vma = i915_vma_instance(pd->pt.base, &ggtt->vm, NULL); + if (IS_ERR(ppgtt->vma)) { + err = PTR_ERR(ppgtt->vma); + ppgtt->vma = NULL; + goto err_pd; + } + + /* The dummy object we create is special, override ops.. */ + ppgtt->vma->ops = &pd_vma_ops; + ppgtt->vma->private = ppgtt; + return pd; + +err_pd: + free_pd(&ppgtt->base.vm, pd); + return ERR_PTR(err); +} + +void gen6_ppgtt_unpin(struct i915_ppgtt *base) +{ + struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base); + + GEM_BUG_ON(!atomic_read(&ppgtt->pin_count)); + if (atomic_dec_and_test(&ppgtt->pin_count)) + i915_vma_unpin(ppgtt->vma); } struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt) @@ -427,7 +442,6 @@ struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt) return ERR_PTR(-ENOMEM); mutex_init(&ppgtt->flush); - mutex_init(&ppgtt->pin_mutex); ppgtt_init(&ppgtt->base, gt, 0); ppgtt->base.vm.pd_shift = ilog2(SZ_4K * SZ_4K / sizeof(gen6_pte_t)); @@ -442,19 +456,13 @@ struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt) ppgtt->base.vm.alloc_pt_dma = alloc_pt_dma; ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode; - ppgtt->base.pd = __alloc_pd(I915_PDES); - if (!ppgtt->base.pd) { - err = -ENOMEM; - goto err_free; - } - err = gen6_ppgtt_init_scratch(ppgtt); if (err) - goto err_pd; + goto err_free; - ppgtt->vma = pd_vma_create(ppgtt, GEN6_PD_SIZE); - if (IS_ERR(ppgtt->vma)) { - err = PTR_ERR(ppgtt->vma); + ppgtt->base.pd = gen6_alloc_top_pd(ppgtt); + if (IS_ERR(ppgtt->base.pd)) { + err = PTR_ERR(ppgtt->base.pd); goto err_scratch; } @@ -462,10 +470,7 @@ struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt) err_scratch: free_scratch(&ppgtt->base.vm); -err_pd: - free_pd(&ppgtt->base.vm, ppgtt->base.pd); err_free: - mutex_destroy(&ppgtt->pin_mutex); kfree(ppgtt); return ERR_PTR(err); } diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.h b/drivers/gpu/drm/i915/gt/gen6_ppgtt.h index 6a61a5c3a85a..5e5cf2ec3309 100644 --- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.h +++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.h @@ -19,7 +19,6 @@ struct gen6_ppgtt { u32 pp_dir; atomic_t pin_count; - struct mutex pin_mutex; bool scan_for_unused_pt; }; @@ -71,7 +70,6 @@ static inline struct gen6_ppgtt *to_gen6_ppgtt(struct i915_ppgtt *base) int gen6_ppgtt_pin(struct i915_ppgtt *base, struct i915_gem_ww_ctx *ww); void gen6_ppgtt_unpin(struct i915_ppgtt *base); -void gen6_ppgtt_unpin_all(struct i915_ppgtt *base); void gen6_ppgtt_enable(struct intel_gt *gt); void gen7_ppgtt_enable(struct intel_gt *gt); struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt); diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c index 461844dffd7e..e320610dd0b8 100644 --- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c @@ -42,7 +42,7 @@ int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode) vf_flush_wa = true; /* WaForGAMHang:kbl */ - if (IS_KBL_GT_STEP(rq->engine->i915, 0, STEP_C0)) + if (IS_KBL_GRAPHICS_STEP(rq->engine->i915, 0, STEP_C0)) dc_flush_wa = true; } diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c index 037a9a6e4889..95c02096a61b 100644 --- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c @@ -18,7 +18,7 @@ static u64 gen8_pde_encode(const dma_addr_t addr, const enum i915_cache_level level) { - u64 pde = addr | _PAGE_PRESENT | _PAGE_RW; + u64 pde = addr | GEN8_PAGE_PRESENT | GEN8_PAGE_RW; if (level != I915_CACHE_NONE) pde |= PPAT_CACHED_PDE; @@ -32,10 +32,10 @@ static u64 gen8_pte_encode(dma_addr_t addr, enum i915_cache_level level, u32 flags) { - gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW; + gen8_pte_t pte = addr | GEN8_PAGE_PRESENT | GEN8_PAGE_RW; if (unlikely(flags & PTE_READ_ONLY)) - pte &= ~_PAGE_RW; + pte &= ~GEN8_PAGE_RW; if (flags & PTE_LM) pte |= GEN12_PPGTT_PTE_LM; @@ -301,7 +301,6 @@ static void __gen8_ppgtt_alloc(struct i915_address_space * const vm, pt = stash->pt[!!lvl]; __i915_gem_object_pin_pages(pt->base); - i915_gem_object_make_unshrinkable(pt->base); fill_px(pt, vm->scratch[lvl]->encode); @@ -652,7 +651,7 @@ static int gen8_init_scratch(struct i915_address_space *vm) vm->scratch[0]->encode = gen8_pte_encode(px_dma(vm->scratch[0]), - I915_CACHE_LLC, pte_flags); + I915_CACHE_NONE, pte_flags); for (i = 1; i <= vm->top; i++) { struct drm_i915_gem_object *obj; @@ -668,7 +667,7 @@ static int gen8_init_scratch(struct i915_address_space *vm) } fill_px(obj, vm->scratch[i - 1]->encode); - obj->encode = gen8_pde_encode(px_dma(obj), I915_CACHE_LLC); + obj->encode = gen8_pde_encode(px_dma(obj), I915_CACHE_NONE); vm->scratch[i] = obj; } diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 5634d14052bc..ba083d800a08 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -219,7 +219,7 @@ int __intel_context_do_pin_ww(struct intel_context *ce, */ err = i915_gem_object_lock(ce->timeline->hwsp_ggtt->obj, ww); - if (!err && ce->ring->vma->obj) + if (!err) err = i915_gem_object_lock(ce->ring->vma->obj, ww); if (!err && ce->state) err = i915_gem_object_lock(ce->state->obj, ww); @@ -228,17 +228,17 @@ int __intel_context_do_pin_ww(struct intel_context *ce, if (err) return err; - err = i915_active_acquire(&ce->active); + err = ce->ops->pre_pin(ce, ww, &vaddr); if (err) goto err_ctx_unpin; - err = ce->ops->pre_pin(ce, ww, &vaddr); + err = i915_active_acquire(&ce->active); if (err) - goto err_release; + goto err_post_unpin; err = mutex_lock_interruptible(&ce->pin_mutex); if (err) - goto err_post_unpin; + goto err_release; intel_engine_pm_might_get(ce->engine); @@ -273,11 +273,11 @@ int __intel_context_do_pin_ww(struct intel_context *ce, err_unlock: mutex_unlock(&ce->pin_mutex); +err_release: + i915_active_release(&ce->active); err_post_unpin: if (!handoff) ce->ops->post_unpin(ce); -err_release: - i915_active_release(&ce->active); err_ctx_unpin: intel_context_post_unpin(ce); @@ -364,7 +364,7 @@ static int __intel_context_active(struct i915_active *active) return 0; } -static int __i915_sw_fence_call +static int sw_fence_dummy_notify(struct i915_sw_fence *sf, enum i915_sw_fence_notify state) { diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index ff6753ccb129..352254e001b4 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -325,6 +325,38 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id, engine->id = id; engine->legacy_idx = INVALID_ENGINE; engine->mask = BIT(id); + if (GRAPHICS_VER(gt->i915) >= 11) { + static const u32 engine_reset_domains[] = { + [RCS0] = GEN11_GRDOM_RENDER, + [BCS0] = GEN11_GRDOM_BLT, + [VCS0] = GEN11_GRDOM_MEDIA, + [VCS1] = GEN11_GRDOM_MEDIA2, + [VCS2] = GEN11_GRDOM_MEDIA3, + [VCS3] = GEN11_GRDOM_MEDIA4, + [VCS4] = GEN11_GRDOM_MEDIA5, + [VCS5] = GEN11_GRDOM_MEDIA6, + [VCS6] = GEN11_GRDOM_MEDIA7, + [VCS7] = GEN11_GRDOM_MEDIA8, + [VECS0] = GEN11_GRDOM_VECS, + [VECS1] = GEN11_GRDOM_VECS2, + [VECS2] = GEN11_GRDOM_VECS3, + [VECS3] = GEN11_GRDOM_VECS4, + }; + GEM_BUG_ON(id >= ARRAY_SIZE(engine_reset_domains) || + !engine_reset_domains[id]); + engine->reset_domain = engine_reset_domains[id]; + } else { + static const u32 engine_reset_domains[] = { + [RCS0] = GEN6_GRDOM_RENDER, + [BCS0] = GEN6_GRDOM_BLT, + [VCS0] = GEN6_GRDOM_MEDIA, + [VCS1] = GEN8_GRDOM_MEDIA2, + [VECS0] = GEN6_GRDOM_VECS, + }; + GEM_BUG_ON(id >= ARRAY_SIZE(engine_reset_domains) || + !engine_reset_domains[id]); + engine->reset_domain = engine_reset_domains[id]; + } engine->i915 = i915; engine->gt = gt; engine->uncore = gt->uncore; @@ -363,7 +395,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id, DRIVER_CAPS(i915)->has_logical_contexts = true; ewma__engine_latency_init(&engine->latency); - seqcount_init(&engine->stats.lock); + seqcount_init(&engine->stats.execlists.lock); ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier); @@ -1676,14 +1708,18 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine, static void print_request_ring(struct drm_printer *m, struct i915_request *rq) { + struct i915_vma_snapshot *vsnap = &rq->batch_snapshot; void *ring; int size; + if (!i915_vma_snapshot_present(vsnap)) + vsnap = NULL; + drm_printf(m, "[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]:\n", rq->head, rq->postfix, rq->tail, - rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u, - rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u); + vsnap ? upper_32_bits(vsnap->gtt_offset) : ~0u, + vsnap ? lower_32_bits(vsnap->gtt_offset) : ~0u); size = rq->tail - rq->head; if (rq->tail < rq->head) @@ -1915,22 +1951,6 @@ void intel_engine_dump(struct intel_engine_cs *engine, intel_engine_print_breadcrumbs(engine, m); } -static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine, - ktime_t *now) -{ - ktime_t total = engine->stats.total; - - /* - * If the engine is executing something at the moment - * add it to the total. - */ - *now = ktime_get(); - if (READ_ONCE(engine->stats.active)) - total = ktime_add(total, ktime_sub(*now, engine->stats.start)); - - return total; -} - /** * intel_engine_get_busy_time() - Return current accumulated engine busyness * @engine: engine to report on @@ -1940,15 +1960,7 @@ static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine, */ ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine, ktime_t *now) { - unsigned int seq; - ktime_t total; - - do { - seq = read_seqcount_begin(&engine->stats.lock); - total = __intel_engine_get_busy_time(engine, now); - } while (read_seqcount_retry(&engine->stats.lock, seq)); - - return total; + return engine->busyness(engine, now); } struct intel_context * diff --git a/drivers/gpu/drm/i915/gt/intel_engine_stats.h b/drivers/gpu/drm/i915/gt/intel_engine_stats.h index 24fbdd94351a..8e762d683e50 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_stats.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_stats.h @@ -15,45 +15,46 @@ static inline void intel_engine_context_in(struct intel_engine_cs *engine) { + struct intel_engine_execlists_stats *stats = &engine->stats.execlists; unsigned long flags; - if (engine->stats.active) { - engine->stats.active++; + if (stats->active) { + stats->active++; return; } /* The writer is serialised; but the pmu reader may be from hardirq */ local_irq_save(flags); - write_seqcount_begin(&engine->stats.lock); + write_seqcount_begin(&stats->lock); - engine->stats.start = ktime_get(); - engine->stats.active++; + stats->start = ktime_get(); + stats->active++; - write_seqcount_end(&engine->stats.lock); + write_seqcount_end(&stats->lock); local_irq_restore(flags); - GEM_BUG_ON(!engine->stats.active); + GEM_BUG_ON(!stats->active); } static inline void intel_engine_context_out(struct intel_engine_cs *engine) { + struct intel_engine_execlists_stats *stats = &engine->stats.execlists; unsigned long flags; - GEM_BUG_ON(!engine->stats.active); - if (engine->stats.active > 1) { - engine->stats.active--; + GEM_BUG_ON(!stats->active); + if (stats->active > 1) { + stats->active--; return; } local_irq_save(flags); - write_seqcount_begin(&engine->stats.lock); + write_seqcount_begin(&stats->lock); - engine->stats.active--; - engine->stats.total = - ktime_add(engine->stats.total, - ktime_sub(ktime_get(), engine->stats.start)); + stats->active--; + stats->total = ktime_add(stats->total, + ktime_sub(ktime_get(), stats->start)); - write_seqcount_end(&engine->stats.lock); + write_seqcount_end(&stats->lock); local_irq_restore(flags); } diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index e0f773585c29..36365bdbe1ee 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -257,6 +257,55 @@ struct intel_engine_execlists { #define INTEL_ENGINE_CS_MAX_NAME 8 +struct intel_engine_execlists_stats { + /** + * @active: Number of contexts currently scheduled in. + */ + unsigned int active; + + /** + * @lock: Lock protecting the below fields. + */ + seqcount_t lock; + + /** + * @total: Total time this engine was busy. + * + * Accumulated time not counting the most recent block in cases where + * engine is currently busy (active > 0). + */ + ktime_t total; + + /** + * @start: Timestamp of the last idle to active transition. + * + * Idle is defined as active == 0, active is active > 0. + */ + ktime_t start; +}; + +struct intel_engine_guc_stats { + /** + * @running: Active state of the engine when busyness was last sampled. + */ + bool running; + + /** + * @prev_total: Previous value of total runtime clock cycles. + */ + u32 prev_total; + + /** + * @total_gt_clks: Total gt clock cycles this engine was busy. + */ + u64 total_gt_clks; + + /** + * @start_gt_clk: GT clock time of last idle to active transition. + */ + u64 start_gt_clk; +}; + struct intel_engine_cs { struct drm_i915_private *i915; struct intel_gt *gt; @@ -269,6 +318,7 @@ struct intel_engine_cs { unsigned int guc_id; intel_engine_mask_t mask; + u32 reset_domain; /** * @logical_mask: logical mask of engine, reported to user space via * query IOCTL and used to communicate with the GuC in logical space. @@ -439,6 +489,12 @@ struct intel_engine_cs { void (*add_active_request)(struct i915_request *rq); void (*remove_active_request)(struct i915_request *rq); + /* + * Get engine busyness and the time at which the busyness was sampled. + */ + ktime_t (*busyness)(struct intel_engine_cs *engine, + ktime_t *now); + struct intel_engine_execlists execlists; /* @@ -488,30 +544,10 @@ struct intel_engine_cs { u32 (*get_cmd_length_mask)(u32 cmd_header); struct { - /** - * @active: Number of contexts currently scheduled in. - */ - unsigned int active; - - /** - * @lock: Lock protecting the below fields. - */ - seqcount_t lock; - - /** - * @total: Total time this engine was busy. - * - * Accumulated time not counting the most recent block in cases - * where engine is currently busy (active > 0). - */ - ktime_t total; - - /** - * @start: Timestamp of the last idle to active transition. - * - * Idle is defined as active == 0, active is active > 0. - */ - ktime_t start; + union { + struct intel_engine_execlists_stats execlists; + struct intel_engine_guc_stats guc; + }; /** * @rps: Utilisation at last RPS sampling. diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c index bedb80057046..a69df5e9e77a 100644 --- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c @@ -2186,7 +2186,8 @@ struct execlists_capture { static void execlists_capture_work(struct work_struct *work) { struct execlists_capture *cap = container_of(work, typeof(*cap), work); - const gfp_t gfp = GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN; + const gfp_t gfp = __GFP_KSWAPD_RECLAIM | __GFP_RETRY_MAYFAIL | + __GFP_NOWARN; struct intel_engine_cs *engine = cap->rq->engine; struct intel_gt_coredump *gt = cap->error->gt; struct intel_engine_capture_vma *vma; @@ -3293,6 +3294,38 @@ static void execlists_release(struct intel_engine_cs *engine) lrc_fini_wa_ctx(engine); } +static ktime_t __execlists_engine_busyness(struct intel_engine_cs *engine, + ktime_t *now) +{ + struct intel_engine_execlists_stats *stats = &engine->stats.execlists; + ktime_t total = stats->total; + + /* + * If the engine is executing something at the moment + * add it to the total. + */ + *now = ktime_get(); + if (READ_ONCE(stats->active)) + total = ktime_add(total, ktime_sub(*now, stats->start)); + + return total; +} + +static ktime_t execlists_engine_busyness(struct intel_engine_cs *engine, + ktime_t *now) +{ + struct intel_engine_execlists_stats *stats = &engine->stats.execlists; + unsigned int seq; + ktime_t total; + + do { + seq = read_seqcount_begin(&stats->lock); + total = __execlists_engine_busyness(engine, now); + } while (read_seqcount_retry(&stats->lock, seq)); + + return total; +} + static void logical_ring_default_vfuncs(struct intel_engine_cs *engine) { @@ -3349,6 +3382,8 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine) engine->emit_bb_start = gen8_emit_bb_start; else engine->emit_bb_start = gen8_emit_bb_start_noarb; + + engine->busyness = execlists_engine_busyness; } static void logical_ring_default_irqs(struct intel_engine_cs *engine) diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c index 57c97554393b..cbc6d2b1fd9e 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@ -3,12 +3,14 @@ * Copyright © 2020 Intel Corporation */ +#include <linux/agp_backend.h> #include <linux/stop_machine.h> #include <asm/set_memory.h> #include <asm/smp.h> #include <drm/i915_drm.h> +#include <drm/intel-gtt.h> #include "gem/i915_gem_lmem.h" @@ -104,7 +106,7 @@ static bool needs_idle_maps(struct drm_i915_private *i915) * Query intel_iommu to see if we need the workaround. Presumably that * was loaded first. */ - if (!intel_vtd_active()) + if (!intel_vtd_active(i915)) return false; if (GRAPHICS_VER(i915) == 5 && IS_MOBILE(i915)) @@ -116,17 +118,26 @@ static bool needs_idle_maps(struct drm_i915_private *i915) return false; } -void i915_ggtt_suspend(struct i915_ggtt *ggtt) +/** + * i915_ggtt_suspend_vm - Suspend the memory mappings for a GGTT or DPT VM + * @vm: The VM to suspend the mappings for + * + * Suspend the memory mappings for all objects mapped to HW via the GGTT or a + * DPT page table. + */ +void i915_ggtt_suspend_vm(struct i915_address_space *vm) { struct i915_vma *vma, *vn; int open; - mutex_lock(&ggtt->vm.mutex); + drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt); + + mutex_lock(&vm->mutex); /* Skip rewriting PTE on VMA unbind. */ - open = atomic_xchg(&ggtt->vm.open, 0); + open = atomic_xchg(&vm->open, 0); - list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) { + list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) { GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); i915_vma_wait_for_bind(vma); @@ -139,11 +150,17 @@ void i915_ggtt_suspend(struct i915_ggtt *ggtt) } } - ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); - ggtt->invalidate(ggtt); - atomic_set(&ggtt->vm.open, open); + vm->clear_range(vm, 0, vm->total); - mutex_unlock(&ggtt->vm.mutex); + atomic_set(&vm->open, open); + + mutex_unlock(&vm->mutex); +} + +void i915_ggtt_suspend(struct i915_ggtt *ggtt) +{ + i915_ggtt_suspend_vm(&ggtt->vm); + ggtt->invalidate(ggtt); intel_gt_check_and_clear_faults(ggtt->vm.gt); } @@ -192,7 +209,7 @@ u64 gen8_ggtt_pte_encode(dma_addr_t addr, enum i915_cache_level level, u32 flags) { - gen8_pte_t pte = addr | _PAGE_PRESENT; + gen8_pte_t pte = addr | GEN8_PAGE_PRESENT; if (flags & PTE_LM) pte |= GEN12_GGTT_PTE_LM; @@ -1216,7 +1233,7 @@ int i915_ggtt_probe_hw(struct drm_i915_private *i915) if (ret) return ret; - if (intel_vtd_active()) + if (intel_vtd_active(i915)) drm_info(&i915->drm, "VT-d active for gfx access\n"); return 0; @@ -1253,37 +1270,59 @@ void i915_ggtt_disable_guc(struct i915_ggtt *ggtt) ggtt->invalidate(ggtt); } -void i915_ggtt_resume(struct i915_ggtt *ggtt) +/** + * i915_ggtt_resume_vm - Restore the memory mappings for a GGTT or DPT VM + * @vm: The VM to restore the mappings for + * + * Restore the memory mappings for all objects mapped to HW via the GGTT or a + * DPT page table. + * + * Returns %true if restoring the mapping for any object that was in a write + * domain before suspend. + */ +bool i915_ggtt_resume_vm(struct i915_address_space *vm) { struct i915_vma *vma; - bool flush = false; + bool write_domain_objs = false; int open; - intel_gt_check_and_clear_faults(ggtt->vm.gt); + drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt); /* First fill our portion of the GTT with scratch pages */ - ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); + vm->clear_range(vm, 0, vm->total); /* Skip rewriting PTE on VMA unbind. */ - open = atomic_xchg(&ggtt->vm.open, 0); + open = atomic_xchg(&vm->open, 0); /* clflush objects bound into the GGTT and rebind them. */ - list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) { + list_for_each_entry(vma, &vm->bound_list, vm_link) { struct drm_i915_gem_object *obj = vma->obj; unsigned int was_bound = atomic_read(&vma->flags) & I915_VMA_BIND_MASK; GEM_BUG_ON(!was_bound); - vma->ops->bind_vma(&ggtt->vm, NULL, vma, + vma->ops->bind_vma(vm, NULL, vma, obj ? obj->cache_level : 0, was_bound); if (obj) { /* only used during resume => exclusive access */ - flush |= fetch_and_zero(&obj->write_domain); + write_domain_objs |= fetch_and_zero(&obj->write_domain); obj->read_domains |= I915_GEM_DOMAIN_GTT; } } - atomic_set(&ggtt->vm.open, open); + atomic_set(&vm->open, open); + + return write_domain_objs; +} + +void i915_ggtt_resume(struct i915_ggtt *ggtt) +{ + bool flush; + + intel_gt_check_and_clear_faults(ggtt->vm.gt); + + flush = i915_ggtt_resume_vm(&ggtt->vm); + ggtt->invalidate(ggtt); if (flush) @@ -1388,30 +1427,39 @@ err_st_alloc: } static struct scatterlist * -remap_pages(struct drm_i915_gem_object *obj, - unsigned int offset, unsigned int alignment_pad, - unsigned int width, unsigned int height, - unsigned int src_stride, unsigned int dst_stride, - struct sg_table *st, struct scatterlist *sg) +add_padding_pages(unsigned int count, + struct sg_table *st, struct scatterlist *sg) +{ + st->nents++; + + /* + * The DE ignores the PTEs for the padding tiles, the sg entry + * here is just a convenience to indicate how many padding PTEs + * to insert at this spot. + */ + sg_set_page(sg, NULL, count * I915_GTT_PAGE_SIZE, 0); + sg_dma_address(sg) = 0; + sg_dma_len(sg) = count * I915_GTT_PAGE_SIZE; + sg = sg_next(sg); + + return sg; +} + +static struct scatterlist * +remap_tiled_color_plane_pages(struct drm_i915_gem_object *obj, + unsigned int offset, unsigned int alignment_pad, + unsigned int width, unsigned int height, + unsigned int src_stride, unsigned int dst_stride, + struct sg_table *st, struct scatterlist *sg, + unsigned int *gtt_offset) { unsigned int row; if (!width || !height) return sg; - if (alignment_pad) { - st->nents++; - - /* - * The DE ignores the PTEs for the padding tiles, the sg entry - * here is just a convenience to indicate how many padding PTEs - * to insert at this spot. - */ - sg_set_page(sg, NULL, alignment_pad * 4096, 0); - sg_dma_address(sg) = 0; - sg_dma_len(sg) = alignment_pad * 4096; - sg = sg_next(sg); - } + if (alignment_pad) + sg = add_padding_pages(alignment_pad, st, sg); for (row = 0; row < height; row++) { unsigned int left = width * I915_GTT_PAGE_SIZE; @@ -1448,18 +1496,98 @@ remap_pages(struct drm_i915_gem_object *obj, if (!left) continue; + sg = add_padding_pages(left >> PAGE_SHIFT, st, sg); + } + + *gtt_offset += alignment_pad + dst_stride * height; + + return sg; +} + +static struct scatterlist * +remap_contiguous_pages(struct drm_i915_gem_object *obj, + unsigned int obj_offset, + unsigned int count, + struct sg_table *st, struct scatterlist *sg) +{ + struct scatterlist *iter; + unsigned int offset; + + iter = i915_gem_object_get_sg_dma(obj, obj_offset, &offset); + GEM_BUG_ON(!iter); + + do { + unsigned int len; + + len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT), + count << PAGE_SHIFT); + sg_set_page(sg, NULL, len, 0); + sg_dma_address(sg) = + sg_dma_address(iter) + (offset << PAGE_SHIFT); + sg_dma_len(sg) = len; + st->nents++; + count -= len >> PAGE_SHIFT; + if (count == 0) + return sg; - /* - * The DE ignores the PTEs for the padding tiles, the sg entry - * here is just a conenience to indicate how many padding PTEs - * to insert at this spot. - */ - sg_set_page(sg, NULL, left, 0); - sg_dma_address(sg) = 0; - sg_dma_len(sg) = left; - sg = sg_next(sg); - } + sg = __sg_next(sg); + iter = __sg_next(iter); + offset = 0; + } while (1); +} + +static struct scatterlist * +remap_linear_color_plane_pages(struct drm_i915_gem_object *obj, + unsigned int obj_offset, unsigned int alignment_pad, + unsigned int size, + struct sg_table *st, struct scatterlist *sg, + unsigned int *gtt_offset) +{ + if (!size) + return sg; + + if (alignment_pad) + sg = add_padding_pages(alignment_pad, st, sg); + + sg = remap_contiguous_pages(obj, obj_offset, size, st, sg); + sg = sg_next(sg); + + *gtt_offset += alignment_pad + size; + + return sg; +} + +static struct scatterlist * +remap_color_plane_pages(const struct intel_remapped_info *rem_info, + struct drm_i915_gem_object *obj, + int color_plane, + struct sg_table *st, struct scatterlist *sg, + unsigned int *gtt_offset) +{ + unsigned int alignment_pad = 0; + + if (rem_info->plane_alignment) + alignment_pad = ALIGN(*gtt_offset, rem_info->plane_alignment) - *gtt_offset; + + if (rem_info->plane[color_plane].linear) + sg = remap_linear_color_plane_pages(obj, + rem_info->plane[color_plane].offset, + alignment_pad, + rem_info->plane[color_plane].size, + st, sg, + gtt_offset); + + else + sg = remap_tiled_color_plane_pages(obj, + rem_info->plane[color_plane].offset, + alignment_pad, + rem_info->plane[color_plane].width, + rem_info->plane[color_plane].height, + rem_info->plane[color_plane].src_stride, + rem_info->plane[color_plane].dst_stride, + st, sg, + gtt_offset); return sg; } @@ -1488,21 +1616,8 @@ intel_remap_pages(struct intel_remapped_info *rem_info, st->nents = 0; sg = st->sgl; - for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) { - unsigned int alignment_pad = 0; - - if (rem_info->plane_alignment) - alignment_pad = ALIGN(gtt_offset, rem_info->plane_alignment) - gtt_offset; - - sg = remap_pages(obj, - rem_info->plane[i].offset, alignment_pad, - rem_info->plane[i].width, rem_info->plane[i].height, - rem_info->plane[i].src_stride, rem_info->plane[i].dst_stride, - st, sg); - - gtt_offset += alignment_pad + - rem_info->plane[i].dst_stride * rem_info->plane[i].height; - } + for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) + sg = remap_color_plane_pages(rem_info, obj, i, st, sg, >t_offset); i915_sg_trim(st); @@ -1524,9 +1639,8 @@ intel_partial_pages(const struct i915_ggtt_view *view, struct drm_i915_gem_object *obj) { struct sg_table *st; - struct scatterlist *sg, *iter; + struct scatterlist *sg; unsigned int count = view->partial.size; - unsigned int offset; int ret = -ENOMEM; st = kmalloc(sizeof(*st), GFP_KERNEL); @@ -1537,34 +1651,14 @@ intel_partial_pages(const struct i915_ggtt_view *view, if (ret) goto err_sg_alloc; - iter = i915_gem_object_get_sg_dma(obj, view->partial.offset, &offset); - GEM_BUG_ON(!iter); - - sg = st->sgl; st->nents = 0; - do { - unsigned int len; - len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT), - count << PAGE_SHIFT); - sg_set_page(sg, NULL, len, 0); - sg_dma_address(sg) = - sg_dma_address(iter) + (offset << PAGE_SHIFT); - sg_dma_len(sg) = len; + sg = remap_contiguous_pages(obj, view->partial.offset, count, st, st->sgl); - st->nents++; - count -= len >> PAGE_SHIFT; - if (count == 0) { - sg_mark_end(sg); - i915_sg_trim(st); /* Drop any unused tail entries. */ + sg_mark_end(sg); + i915_sg_trim(st); /* Drop any unused tail entries. */ - return st; - } - - sg = __sg_next(sg); - iter = __sg_next(iter); - offset = 0; - } while (1); + return st; err_sg_alloc: kfree(st); diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index 1cb1948ac959..f2422d48be32 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -3,6 +3,8 @@ * Copyright © 2019 Intel Corporation */ +#include <drm/intel-gtt.h> + #include "intel_gt_debugfs.h" #include "gem/i915_gem_lmem.h" diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c index 524eaf678790..c0fa41e4c803 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c @@ -86,6 +86,7 @@ static int __gt_unpark(struct intel_wakeref *wf) intel_rc6_unpark(>->rc6); intel_rps_unpark(>->rps); i915_pmu_gt_unparked(i915); + intel_guc_busyness_unpark(gt); intel_gt_unpark_requests(gt); runtime_begin(gt); @@ -104,6 +105,7 @@ static int __gt_park(struct intel_wakeref *wf) runtime_end(gt); intel_gt_park_requests(gt); + intel_guc_busyness_park(gt); i915_vma_parked(gt); i915_pmu_gt_parked(i915); intel_rps_park(>->rps); @@ -301,7 +303,7 @@ void intel_gt_suspend_prepare(struct intel_gt *gt) user_forcewake(gt, true); wait_for_suspend(gt); - intel_pxp_suspend(>->pxp, false); + intel_pxp_suspend_prepare(>->pxp); } static suspend_state_t pm_suspend_target(void) @@ -326,6 +328,7 @@ void intel_gt_suspend_late(struct intel_gt *gt) GEM_BUG_ON(gt->awake); intel_uc_suspend(>->uc); + intel_pxp_suspend(>->pxp); /* * On disabling the device, we want to turn off HW access to memory @@ -353,7 +356,7 @@ void intel_gt_suspend_late(struct intel_gt *gt) void intel_gt_runtime_suspend(struct intel_gt *gt) { - intel_pxp_suspend(>->pxp, true); + intel_pxp_runtime_suspend(>->pxp); intel_uc_runtime_suspend(>->uc); GT_TRACE(gt, "\n"); @@ -371,7 +374,7 @@ int intel_gt_runtime_resume(struct intel_gt *gt) if (ret) return ret; - intel_pxp_resume(>->pxp); + intel_pxp_runtime_resume(>->pxp); return 0; } diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c index 67d14afa6623..9fee968d57db 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.c +++ b/drivers/gpu/drm/i915/gt/intel_gtt.c @@ -6,6 +6,9 @@ #include <linux/slab.h> /* fault-inject.h is not standalone! */ #include <linux/fault-inject.h> +#include <linux/sched/mm.h> + +#include <drm/drm_cache.h> #include "gem/i915_gem_lmem.h" #include "i915_trace.h" @@ -273,6 +276,7 @@ static void poison_scratch_page(struct drm_i915_gem_object *scratch) val = POISON_FREE; memset(vaddr, val, scratch->base.size); + drm_clflush_virt_range(vaddr, scratch->base.size); } int setup_scratch_page(struct i915_address_space *vm) diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h index bc6750263359..51afe66d00f2 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.h +++ b/drivers/gpu/drm/i915/gt/intel_gtt.h @@ -135,6 +135,9 @@ typedef u64 gen8_pte_t; #define GEN8_PPAT_ELLC_OVERRIDE (0<<2) #define GEN8_PPAT(i, x) ((u64)(x) << ((i) * 8)) +#define GEN8_PAGE_PRESENT BIT_ULL(0) +#define GEN8_PAGE_RW BIT_ULL(1) + #define GEN8_PDE_IPS_64K BIT(11) #define GEN8_PDE_PS_2M BIT(7) @@ -544,6 +547,8 @@ int i915_ppgtt_init_hw(struct intel_gt *gt); struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt, unsigned long lmem_pt_obj_flags); +void i915_ggtt_suspend_vm(struct i915_address_space *vm); +bool i915_ggtt_resume_vm(struct i915_address_space *vm); void i915_ggtt_suspend(struct i915_ggtt *gtt); void i915_ggtt_resume(struct i915_ggtt *ggtt); diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 56156cf18c41..b3489599e4de 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -1167,6 +1167,11 @@ gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs) cs = gen12_emit_cmd_buf_wa(ce, cs); cs = gen12_emit_restore_scratch(ce, cs); + /* Wa_16013000631:dg2 */ + if (IS_DG2_GRAPHICS_STEP(ce->engine->i915, G10, STEP_B0, STEP_C0) || + IS_DG2_G11(ce->engine->i915)) + cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE, 0); + return cs; } diff --git a/drivers/gpu/drm/i915/gt/intel_migrate.c b/drivers/gpu/drm/i915/gt/intel_migrate.c index afb1cce9a352..19a01878fee3 100644 --- a/drivers/gpu/drm/i915/gt/intel_migrate.c +++ b/drivers/gpu/drm/i915/gt/intel_migrate.c @@ -13,7 +13,6 @@ struct insert_pte_data { u64 offset; - bool is_lmem; }; #define CHUNK_SZ SZ_8M /* ~1ms at 8GiB/s preemption delay */ @@ -40,7 +39,7 @@ static void insert_pte(struct i915_address_space *vm, struct insert_pte_data *d = data; vm->insert_page(vm, px_dma(pt), d->offset, I915_CACHE_NONE, - d->is_lmem ? PTE_LM : 0); + i915_gem_object_is_lmem(pt->base) ? PTE_LM : 0); d->offset += PAGE_SIZE; } @@ -134,8 +133,7 @@ static struct i915_address_space *migrate_vm(struct intel_gt *gt) goto err_vm; /* Now allow the GPU to rewrite the PTE via its own ppGTT */ - d.is_lmem = i915_gem_object_is_lmem(vm->vm.scratch[0]); - vm->vm.foreach(&vm->vm, base, base + sz, insert_pte, &d); + vm->vm.foreach(&vm->vm, base, d.offset - base, insert_pte, &d); } return &vm->vm; @@ -281,10 +279,10 @@ static int emit_pte(struct i915_request *rq, GEM_BUG_ON(GRAPHICS_VER(rq->engine->i915) < 8); /* Compute the page directory offset for the target address range */ - offset += (u64)rq->engine->instance << 32; offset >>= 12; offset *= sizeof(u64); offset += 2 * CHUNK_SZ; + offset += (u64)rq->engine->instance << 32; cs = intel_ring_begin(rq, 6); if (IS_ERR(cs)) diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c index 15f9ada28a7a..9c253ba593c6 100644 --- a/drivers/gpu/drm/i915/gt/intel_mocs.c +++ b/drivers/gpu/drm/i915/gt/intel_mocs.c @@ -424,7 +424,7 @@ static unsigned int get_mocs_settings(const struct drm_i915_private *i915, table->unused_entries_index = I915_MOCS_PTE; if (IS_DG2(i915)) { - if (IS_DG2_GT_STEP(i915, G10, STEP_A0, STEP_B0)) { + if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0)) { table->size = ARRAY_SIZE(dg2_mocs_table_g10_ax); table->table = dg2_mocs_table_g10_ax; } else { diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c index 43093dd2d0c9..c3155ee58689 100644 --- a/drivers/gpu/drm/i915/gt/intel_rc6.c +++ b/drivers/gpu/drm/i915/gt/intel_rc6.c @@ -117,10 +117,17 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6) GEN6_RC_CTL_RC6_ENABLE | GEN6_RC_CTL_EI_MODE(1); - pg_enable = - GEN9_RENDER_PG_ENABLE | - GEN9_MEDIA_PG_ENABLE | - GEN11_MEDIA_SAMPLER_PG_ENABLE; + /* Wa_16011777198 - Render powergating must remain disabled */ + if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_C0) || + IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_B0)) + pg_enable = + GEN9_MEDIA_PG_ENABLE | + GEN11_MEDIA_SAMPLER_PG_ENABLE; + else + pg_enable = + GEN9_RENDER_PG_ENABLE | + GEN9_MEDIA_PG_ENABLE | + GEN11_MEDIA_SAMPLER_PG_ENABLE; if (GRAPHICS_VER(gt->i915) >= 12) { for (i = 0; i < I915_MAX_VCS; i++) diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c index afb35d2e5c73..9ea49e0a27c0 100644 --- a/drivers/gpu/drm/i915/gt/intel_region_lmem.c +++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c @@ -66,12 +66,16 @@ static void release_fake_lmem_bar(struct intel_memory_region *mem) DMA_ATTR_FORCE_CONTIGUOUS); } -static void +static int region_lmem_release(struct intel_memory_region *mem) { - intel_region_ttm_fini(mem); + int ret; + + ret = intel_region_ttm_fini(mem); io_mapping_fini(&mem->iomap); release_fake_lmem_bar(mem); + + return ret; } static int @@ -158,7 +162,7 @@ intel_gt_setup_fake_lmem(struct intel_gt *gt) static bool get_legacy_lowmem_region(struct intel_uncore *uncore, u64 *start, u32 *size) { - if (!IS_DG1_GT_STEP(uncore->i915, STEP_A0, STEP_C0)) + if (!IS_DG1_GRAPHICS_STEP(uncore->i915, STEP_A0, STEP_C0)) return false; *start = 0; @@ -231,7 +235,7 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt) return mem; err_region_put: - intel_memory_region_put(mem); + intel_memory_region_destroy(mem); return ERR_PTR(err); } diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 91200c43951f..7be0002d9d70 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -6,7 +6,7 @@ #include <linux/sched/mm.h> #include <linux/stop_machine.h> -#include "display/intel_display_types.h" +#include "display/intel_display.h" #include "display/intel_overlay.h" #include "gem/i915_gem_context.h" @@ -297,13 +297,6 @@ static int gen6_reset_engines(struct intel_gt *gt, intel_engine_mask_t engine_mask, unsigned int retry) { - static const u32 hw_engine_mask[] = { - [RCS0] = GEN6_GRDOM_RENDER, - [BCS0] = GEN6_GRDOM_BLT, - [VCS0] = GEN6_GRDOM_MEDIA, - [VCS1] = GEN8_GRDOM_MEDIA2, - [VECS0] = GEN6_GRDOM_VECS, - }; struct intel_engine_cs *engine; u32 hw_mask; @@ -314,8 +307,7 @@ static int gen6_reset_engines(struct intel_gt *gt, hw_mask = 0; for_each_engine_masked(engine, gt, engine_mask, tmp) { - GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask)); - hw_mask |= hw_engine_mask[engine->id]; + hw_mask |= engine->reset_domain; } } @@ -492,22 +484,6 @@ static int gen11_reset_engines(struct intel_gt *gt, intel_engine_mask_t engine_mask, unsigned int retry) { - static const u32 hw_engine_mask[] = { - [RCS0] = GEN11_GRDOM_RENDER, - [BCS0] = GEN11_GRDOM_BLT, - [VCS0] = GEN11_GRDOM_MEDIA, - [VCS1] = GEN11_GRDOM_MEDIA2, - [VCS2] = GEN11_GRDOM_MEDIA3, - [VCS3] = GEN11_GRDOM_MEDIA4, - [VCS4] = GEN11_GRDOM_MEDIA5, - [VCS5] = GEN11_GRDOM_MEDIA6, - [VCS6] = GEN11_GRDOM_MEDIA7, - [VCS7] = GEN11_GRDOM_MEDIA8, - [VECS0] = GEN11_GRDOM_VECS, - [VECS1] = GEN11_GRDOM_VECS2, - [VECS2] = GEN11_GRDOM_VECS3, - [VECS3] = GEN11_GRDOM_VECS4, - }; struct intel_engine_cs *engine; intel_engine_mask_t tmp; u32 reset_mask, unlock_mask = 0; @@ -518,8 +494,7 @@ static int gen11_reset_engines(struct intel_gt *gt, } else { reset_mask = 0; for_each_engine_masked(engine, gt, engine_mask, tmp) { - GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask)); - reset_mask |= hw_engine_mask[engine->id]; + reset_mask |= engine->reset_domain; ret = gen11_lock_sfc(engine, &reset_mask, &unlock_mask); if (ret) goto sfc_unlock; @@ -1367,20 +1342,27 @@ void intel_gt_handle_error(struct intel_gt *gt, /* Make sure i915_reset_trylock() sees the I915_RESET_BACKOFF */ synchronize_rcu_expedited(); - /* Prevent any other reset-engine attempt. */ - for_each_engine(engine, gt, tmp) { - while (test_and_set_bit(I915_RESET_ENGINE + engine->id, - >->reset.flags)) - wait_on_bit(>->reset.flags, - I915_RESET_ENGINE + engine->id, - TASK_UNINTERRUPTIBLE); + /* + * Prevent any other reset-engine attempt. We don't do this for GuC + * submission the GuC owns the per-engine reset, not the i915. + */ + if (!intel_uc_uses_guc_submission(>->uc)) { + for_each_engine(engine, gt, tmp) { + while (test_and_set_bit(I915_RESET_ENGINE + engine->id, + >->reset.flags)) + wait_on_bit(>->reset.flags, + I915_RESET_ENGINE + engine->id, + TASK_UNINTERRUPTIBLE); + } } intel_gt_reset_global(gt, engine_mask, msg); - for_each_engine(engine, gt, tmp) - clear_bit_unlock(I915_RESET_ENGINE + engine->id, - >->reset.flags); + if (!intel_uc_uses_guc_submission(>->uc)) { + for_each_engine(engine, gt, tmp) + clear_bit_unlock(I915_RESET_ENGINE + engine->id, + >->reset.flags); + } clear_bit_unlock(I915_RESET_BACKOFF, >->reset.flags); smp_mb__after_atomic(); wake_up_all(>->reset.queue); @@ -1441,6 +1423,7 @@ void intel_gt_set_wedged_on_init(struct intel_gt *gt) BUILD_BUG_ON(I915_RESET_ENGINE + I915_NUM_ENGINES > I915_WEDGED_ON_INIT); intel_gt_set_wedged(gt); + i915_disable_error_state(gt->i915, -ENODEV); set_bit(I915_WEDGED_ON_INIT, >->reset.flags); /* Wedged on init is non-recoverable */ @@ -1450,6 +1433,7 @@ void intel_gt_set_wedged_on_init(struct intel_gt *gt) void intel_gt_set_wedged_on_fini(struct intel_gt *gt) { intel_gt_set_wedged(gt); + i915_disable_error_state(gt->i915, -ENODEV); set_bit(I915_WEDGED_ON_FINI, >->reset.flags); intel_gt_retire_requests(gt); /* cleanup any wedged requests */ } diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c index 586dca1731ce..3e6fac0340ef 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c @@ -1357,7 +1357,7 @@ retry: err = i915_gem_object_lock(timeline->hwsp_ggtt->obj, &ww); if (!err && gen7_wa_vma) err = i915_gem_object_lock(gen7_wa_vma->obj, &ww); - if (!err && engine->legacy.ring->vma->obj) + if (!err) err = i915_gem_object_lock(engine->legacy.ring->vma->obj, &ww); if (!err) err = intel_timeline_pin(timeline, &ww); diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c index 5e275f8dda8c..07ff7ba7b2b7 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.c +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -936,8 +936,70 @@ void intel_rps_park(struct intel_rps *rps) GT_TRACE(rps_to_gt(rps), "park:%x\n", rps->cur_freq); } +u32 intel_rps_get_boost_frequency(struct intel_rps *rps) +{ + struct intel_guc_slpc *slpc; + + if (rps_uses_slpc(rps)) { + slpc = rps_to_slpc(rps); + + return slpc->boost_freq; + } else { + return intel_gpu_freq(rps, rps->boost_freq); + } +} + +static int rps_set_boost_freq(struct intel_rps *rps, u32 val) +{ + bool boost = false; + + /* Validate against (static) hardware limits */ + val = intel_freq_opcode(rps, val); + if (val < rps->min_freq || val > rps->max_freq) + return -EINVAL; + + mutex_lock(&rps->lock); + if (val != rps->boost_freq) { + rps->boost_freq = val; + boost = atomic_read(&rps->num_waiters); + } + mutex_unlock(&rps->lock); + if (boost) + schedule_work(&rps->work); + + return 0; +} + +int intel_rps_set_boost_frequency(struct intel_rps *rps, u32 freq) +{ + struct intel_guc_slpc *slpc; + + if (rps_uses_slpc(rps)) { + slpc = rps_to_slpc(rps); + + return intel_guc_slpc_set_boost_freq(slpc, freq); + } else { + return rps_set_boost_freq(rps, freq); + } +} + +void intel_rps_dec_waiters(struct intel_rps *rps) +{ + struct intel_guc_slpc *slpc; + + if (rps_uses_slpc(rps)) { + slpc = rps_to_slpc(rps); + + intel_guc_slpc_dec_waiters(slpc); + } else { + atomic_dec(&rps->num_waiters); + } +} + void intel_rps_boost(struct i915_request *rq) { + struct intel_guc_slpc *slpc; + if (i915_request_signaled(rq) || i915_request_has_waitboost(rq)) return; @@ -945,6 +1007,16 @@ void intel_rps_boost(struct i915_request *rq) if (!test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags)) { struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps; + if (rps_uses_slpc(rps)) { + slpc = rps_to_slpc(rps); + + /* Return if old value is non zero */ + if (!atomic_fetch_inc(&slpc->num_waiters)) + schedule_work(&slpc->boost_work); + + return; + } + if (atomic_fetch_inc(&rps->num_waiters)) return; diff --git a/drivers/gpu/drm/i915/gt/intel_rps.h b/drivers/gpu/drm/i915/gt/intel_rps.h index 11960d64ca82..aee12f37d38a 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.h +++ b/drivers/gpu/drm/i915/gt/intel_rps.h @@ -23,6 +23,9 @@ void intel_rps_disable(struct intel_rps *rps); void intel_rps_park(struct intel_rps *rps); void intel_rps_unpark(struct intel_rps *rps); void intel_rps_boost(struct i915_request *rq); +void intel_rps_dec_waiters(struct intel_rps *rps); +u32 intel_rps_get_boost_frequency(struct intel_rps *rps); +int intel_rps_set_boost_frequency(struct intel_rps *rps, u32 freq); int intel_rps_set(struct intel_rps *rps, u8 val); void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive); diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index e1f362530889..3113266c286e 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -482,7 +482,7 @@ static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine, gen9_ctx_workarounds_init(engine, wal); /* WaToEnableHwFixForPushConstHWBug:kbl */ - if (IS_KBL_GT_STEP(i915, STEP_C0, STEP_FOREVER)) + if (IS_KBL_GRAPHICS_STEP(i915, STEP_C0, STEP_FOREVER)) wa_masked_en(wal, COMMON_SLICE_CHICKEN2, GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); @@ -560,6 +560,22 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine, /* * These settings aren't actually workarounds, but general tuning settings that + * need to be programmed on dg2 platform. + */ +static void dg2_ctx_gt_tuning_init(struct intel_engine_cs *engine, + struct i915_wa_list *wal) +{ + wa_write_clr_set(wal, GEN11_L3SQCREG5, L3_PWM_TIMER_INIT_VAL_MASK, + REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f)); + wa_add(wal, + FF_MODE2, + FF_MODE2_TDS_TIMER_MASK, + FF_MODE2_TDS_TIMER_128, + 0, false); +} + +/* + * These settings aren't actually workarounds, but general tuning settings that * need to be programmed on several platforms. */ static void gen12_ctx_gt_tuning_init(struct intel_engine_cs *engine, @@ -621,13 +637,6 @@ static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine, FF_MODE2_GS_TIMER_MASK, FF_MODE2_GS_TIMER_224, 0, false); - - /* - * Wa_14012131227:dg1 - * Wa_1508744258:tgl,rkl,dg1,adl-s,adl-p - */ - wa_masked_en(wal, GEN7_COMMON_SLICE_CHICKEN1, - GEN9_RHWO_OPTIMIZATION_DISABLE); } static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine, @@ -644,6 +653,42 @@ static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine, DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE); } +static void dg2_ctx_workarounds_init(struct intel_engine_cs *engine, + struct i915_wa_list *wal) +{ + dg2_ctx_gt_tuning_init(engine, wal); + + /* Wa_16011186671:dg2_g11 */ + if (IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0)) { + wa_masked_dis(wal, VFLSKPD, DIS_MULT_MISS_RD_SQUASH); + wa_masked_en(wal, VFLSKPD, DIS_OVER_FETCH_CACHE); + } + + if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0)) { + /* Wa_14010469329:dg2_g10 */ + wa_masked_en(wal, GEN11_COMMON_SLICE_CHICKEN3, + XEHP_DUAL_SIMD8_SEQ_MERGE_DISABLE); + + /* + * Wa_22010465075:dg2_g10 + * Wa_22010613112:dg2_g10 + * Wa_14010698770:dg2_g10 + */ + wa_masked_en(wal, GEN11_COMMON_SLICE_CHICKEN3, + GEN12_DISABLE_CPS_AWARE_COLOR_PIPE); + } + + /* Wa_16013271637:dg2 */ + wa_masked_en(wal, SLICE_COMMON_ECO_CHICKEN1, + MSC_MSAA_REODER_BUF_BYPASS_DISABLE); + + /* Wa_22012532006:dg2 */ + if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_C0) || + IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0)) + wa_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7, + DG2_DISABLE_ROUND_ENABLE_ALLOW_FOR_SSLA); +} + static void fakewa_disable_nestedbb_mode(struct intel_engine_cs *engine, struct i915_wa_list *wal) { @@ -730,7 +775,11 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine, if (engine->class != RENDER_CLASS) goto done; - if (IS_DG1(i915)) + if (IS_DG2(i915)) + dg2_ctx_workarounds_init(engine, wal); + else if (IS_XEHPSDV(i915)) + ; /* noop; none at this time */ + else if (IS_DG1(i915)) dg1_ctx_workarounds_init(engine, wal); else if (GRAPHICS_VER(i915) == 12) gen12_ctx_workarounds_init(engine, wal); @@ -878,10 +927,51 @@ hsw_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) } static void +gen9_wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal) +{ + const struct sseu_dev_info *sseu = &i915->gt.info.sseu; + unsigned int slice, subslice; + u32 mcr, mcr_mask; + + GEM_BUG_ON(GRAPHICS_VER(i915) != 9); + + /* + * WaProgramMgsrForCorrectSliceSpecificMmioReads:gen9,glk,kbl,cml + * Before any MMIO read into slice/subslice specific registers, MCR + * packet control register needs to be programmed to point to any + * enabled s/ss pair. Otherwise, incorrect values will be returned. + * This means each subsequent MMIO read will be forwarded to an + * specific s/ss combination, but this is OK since these registers + * are consistent across s/ss in almost all cases. In the rare + * occasions, such as INSTDONE, where this value is dependent + * on s/ss combo, the read should be done with read_subslice_reg. + */ + slice = ffs(sseu->slice_mask) - 1; + GEM_BUG_ON(slice >= ARRAY_SIZE(sseu->subslice_mask)); + subslice = ffs(intel_sseu_get_subslices(sseu, slice)); + GEM_BUG_ON(!subslice); + subslice--; + + /* + * We use GEN8_MCR..() macros to calculate the |mcr| value for + * Gen9 to address WaProgramMgsrForCorrectSliceSpecificMmioReads + */ + mcr = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice); + mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK; + + drm_dbg(&i915->drm, "MCR slice:%d/subslice:%d = %x\n", slice, subslice, mcr); + + wa_write_clr_set(wal, GEN8_MCR_SELECTOR, mcr_mask, mcr); +} + +static void gen9_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) { struct drm_i915_private *i915 = gt->i915; + /* WaProgramMgsrForCorrectSliceSpecificMmioReads:glk,kbl,cml,gen9 */ + gen9_wa_init_mcr(i915, wal); + /* WaDisableKillLogic:bxt,skl,kbl */ if (!IS_COFFEELAKE(i915) && !IS_COMETLAKE(i915)) wa_write_or(wal, @@ -916,7 +1006,7 @@ skl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); /* WaInPlaceDecompressionHang:skl */ - if (IS_SKL_GT_STEP(gt->i915, STEP_A0, STEP_H0)) + if (IS_SKL_GRAPHICS_STEP(gt->i915, STEP_A0, STEP_H0)) wa_write_or(wal, GEN9_GAMT_ECO_REG_RW_IA, GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); @@ -928,7 +1018,7 @@ kbl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) gen9_gt_workarounds_init(gt, wal); /* WaDisableDynamicCreditSharing:kbl */ - if (IS_KBL_GT_STEP(gt->i915, 0, STEP_C0)) + if (IS_KBL_GRAPHICS_STEP(gt->i915, 0, STEP_C0)) wa_write_or(wal, GAMT_CHKN_BIT_REG, GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING); @@ -1134,9 +1224,18 @@ icl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) GAMT_CHKN_BIT_REG, GAMT_CHKN_DISABLE_L3_COH_PIPE); + /* Wa_1407352427:icl,ehl */ + wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2, + PSDUNIT_CLKGATE_DIS); + + /* Wa_1406680159:icl,ehl */ + wa_write_or(wal, + SUBSLICE_UNIT_LEVEL_CLKGATE, + GWUNIT_CLKGATE_DIS); + /* Wa_1607087056:icl,ehl,jsl */ if (IS_ICELAKE(i915) || - IS_JSL_EHL_GT_STEP(i915, STEP_A0, STEP_B0)) + IS_JSL_EHL_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) wa_write_or(wal, SLICE_UNIT_LEVEL_CLKGATE, L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS); @@ -1190,19 +1289,19 @@ tgl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) gen12_gt_workarounds_init(gt, wal); /* Wa_1409420604:tgl */ - if (IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_B0)) + if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) wa_write_or(wal, SUBSLICE_UNIT_LEVEL_CLKGATE2, CPSSUNIT_CLKGATE_DIS); /* Wa_1607087056:tgl also know as BUG:1409180338 */ - if (IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_B0)) + if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) wa_write_or(wal, SLICE_UNIT_LEVEL_CLKGATE, L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS); /* Wa_1408615072:tgl[a0] */ - if (IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_B0)) + if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2, VSUNIT_CLKGATE_DIS_TGL); } @@ -1215,7 +1314,7 @@ dg1_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) gen12_gt_workarounds_init(gt, wal); /* Wa_1607087056:dg1 */ - if (IS_DG1_GT_STEP(i915, STEP_A0, STEP_B0)) + if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) wa_write_or(wal, SLICE_UNIT_LEVEL_CLKGATE, L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS); @@ -1236,7 +1335,179 @@ dg1_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) static void xehpsdv_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) { + struct drm_i915_private *i915 = gt->i915; + + xehp_init_mcr(gt, wal); + + /* Wa_1409757795:xehpsdv */ + wa_write_or(wal, SCCGCTL94DC, CG3DDISURB); + + /* Wa_18011725039:xehpsdv */ + if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A1, STEP_B0)) { + wa_masked_dis(wal, MLTICTXCTL, TDONRENDER); + wa_write_or(wal, L3SQCREG1_CCS0, FLUSHALLNONCOH); + } + + /* Wa_16011155590:xehpsdv */ + if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) + wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE, + TSGUNIT_CLKGATE_DIS); + + /* Wa_14011780169:xehpsdv */ + if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_B0, STEP_FOREVER)) { + wa_write_or(wal, UNSLCGCTL9440, GAMTLBOACS_CLKGATE_DIS | + GAMTLBVDBOX7_CLKGATE_DIS | + GAMTLBVDBOX6_CLKGATE_DIS | + GAMTLBVDBOX5_CLKGATE_DIS | + GAMTLBVDBOX4_CLKGATE_DIS | + GAMTLBVDBOX3_CLKGATE_DIS | + GAMTLBVDBOX2_CLKGATE_DIS | + GAMTLBVDBOX1_CLKGATE_DIS | + GAMTLBVDBOX0_CLKGATE_DIS | + GAMTLBKCR_CLKGATE_DIS | + GAMTLBGUC_CLKGATE_DIS | + GAMTLBBLT_CLKGATE_DIS); + wa_write_or(wal, UNSLCGCTL9444, GAMTLBGFXA0_CLKGATE_DIS | + GAMTLBGFXA1_CLKGATE_DIS | + GAMTLBCOMPA0_CLKGATE_DIS | + GAMTLBCOMPA1_CLKGATE_DIS | + GAMTLBCOMPB0_CLKGATE_DIS | + GAMTLBCOMPB1_CLKGATE_DIS | + GAMTLBCOMPC0_CLKGATE_DIS | + GAMTLBCOMPC1_CLKGATE_DIS | + GAMTLBCOMPD0_CLKGATE_DIS | + GAMTLBCOMPD1_CLKGATE_DIS | + GAMTLBMERT_CLKGATE_DIS | + GAMTLBVEBOX3_CLKGATE_DIS | + GAMTLBVEBOX2_CLKGATE_DIS | + GAMTLBVEBOX1_CLKGATE_DIS | + GAMTLBVEBOX0_CLKGATE_DIS); + } + + /* Wa_14012362059:xehpsdv */ + wa_write_or(wal, GEN12_MERT_MOD_CTRL, FORCE_MISS_FTLB); + + /* Wa_16012725990:xehpsdv */ + if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A1, STEP_FOREVER)) + wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE, VFUNIT_CLKGATE_DIS); + + /* Wa_14011060649:xehpsdv */ + wa_14011060649(gt, wal); + + /* Wa_14014368820:xehpsdv */ + wa_write_or(wal, GEN12_GAMCNTRL_CTRL, INVALIDATION_BROADCAST_MODE_DIS | + GLOBAL_INVALIDATION_MODE); +} + +static void +dg2_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) +{ + struct intel_engine_cs *engine; + int id; + xehp_init_mcr(gt, wal); + + /* Wa_14011060649:dg2 */ + wa_14011060649(gt, wal); + + /* + * Although there are per-engine instances of these registers, + * they technically exist outside the engine itself and are not + * impacted by engine resets. Furthermore, they're part of the + * GuC blacklist so trying to treat them as engine workarounds + * will result in GuC initialization failure and a wedged GPU. + */ + for_each_engine(engine, gt, id) { + if (engine->class != VIDEO_DECODE_CLASS) + continue; + + /* Wa_16010515920:dg2_g10 */ + if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0)) + wa_write_or(wal, VDBOX_CGCTL3F18(engine->mmio_base), + ALNUNIT_CLKGATE_DIS); + } + + if (IS_DG2_G10(gt->i915)) { + /* Wa_22010523718:dg2 */ + wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE, + CG3DDISCFEG_CLKGATE_DIS); + + /* Wa_14011006942:dg2 */ + wa_write_or(wal, SUBSLICE_UNIT_LEVEL_CLKGATE, + DSS_ROUTER_CLKGATE_DIS); + } + + if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0)) { + /* Wa_14010680813:dg2_g10 */ + wa_write_or(wal, GEN12_GAMSTLB_CTRL, CONTROL_BLOCK_CLKGATE_DIS | + EGRESS_BLOCK_CLKGATE_DIS | TAG_BLOCK_CLKGATE_DIS); + + /* Wa_14010948348:dg2_g10 */ + wa_write_or(wal, UNSLCGCTL9430, MSQDUNIT_CLKGATE_DIS); + + /* Wa_14011037102:dg2_g10 */ + wa_write_or(wal, UNSLCGCTL9444, LTCDD_CLKGATE_DIS); + + /* Wa_14011371254:dg2_g10 */ + wa_write_or(wal, SLICE_UNIT_LEVEL_CLKGATE, NODEDSS_CLKGATE_DIS); + + /* Wa_14011431319:dg2_g10 */ + wa_write_or(wal, UNSLCGCTL9440, GAMTLBOACS_CLKGATE_DIS | + GAMTLBVDBOX7_CLKGATE_DIS | + GAMTLBVDBOX6_CLKGATE_DIS | + GAMTLBVDBOX5_CLKGATE_DIS | + GAMTLBVDBOX4_CLKGATE_DIS | + GAMTLBVDBOX3_CLKGATE_DIS | + GAMTLBVDBOX2_CLKGATE_DIS | + GAMTLBVDBOX1_CLKGATE_DIS | + GAMTLBVDBOX0_CLKGATE_DIS | + GAMTLBKCR_CLKGATE_DIS | + GAMTLBGUC_CLKGATE_DIS | + GAMTLBBLT_CLKGATE_DIS); + wa_write_or(wal, UNSLCGCTL9444, GAMTLBGFXA0_CLKGATE_DIS | + GAMTLBGFXA1_CLKGATE_DIS | + GAMTLBCOMPA0_CLKGATE_DIS | + GAMTLBCOMPA1_CLKGATE_DIS | + GAMTLBCOMPB0_CLKGATE_DIS | + GAMTLBCOMPB1_CLKGATE_DIS | + GAMTLBCOMPC0_CLKGATE_DIS | + GAMTLBCOMPC1_CLKGATE_DIS | + GAMTLBCOMPD0_CLKGATE_DIS | + GAMTLBCOMPD1_CLKGATE_DIS | + GAMTLBMERT_CLKGATE_DIS | + GAMTLBVEBOX3_CLKGATE_DIS | + GAMTLBVEBOX2_CLKGATE_DIS | + GAMTLBVEBOX1_CLKGATE_DIS | + GAMTLBVEBOX0_CLKGATE_DIS); + + /* Wa_14010569222:dg2_g10 */ + wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE, + GAMEDIA_CLKGATE_DIS); + + /* Wa_14011028019:dg2_g10 */ + wa_write_or(wal, SSMCGCTL9530, RTFUNIT_CLKGATE_DIS); + } + + if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0) || + IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_B0)) { + /* Wa_14012362059:dg2 */ + wa_write_or(wal, GEN12_MERT_MOD_CTRL, FORCE_MISS_FTLB); + } + + /* Wa_1509235366:dg2 */ + wa_write_or(wal, GEN12_GAMCNTRL_CTRL, INVALIDATION_BROADCAST_MODE_DIS | + GLOBAL_INVALIDATION_MODE); + + /* Wa_14014830051:dg2 */ + wa_write_clr(wal, SARB_CHICKEN1, COMP_CKN_IN); + + /* + * The following are not actually "workarounds" but rather + * recommended tuning settings documented in the bspec's + * performance guide section. + */ + wa_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS); + wa_write_or(wal, GEN12_SQCM, EN_32B_ACCESS); } static void @@ -1244,7 +1515,9 @@ gt_init_workarounds(struct intel_gt *gt, struct i915_wa_list *wal) { struct drm_i915_private *i915 = gt->i915; - if (IS_XEHPSDV(i915)) + if (IS_DG2(i915)) + dg2_gt_workarounds_init(gt, wal); + else if (IS_XEHPSDV(i915)) xehpsdv_gt_workarounds_init(gt, wal); else if (IS_DG1(i915)) dg1_gt_workarounds_init(gt, wal); @@ -1518,7 +1791,7 @@ static void cfl_whitelist_build(struct intel_engine_cs *engine) RING_FORCE_TO_NONPRIV_RANGE_4); } -static void cml_whitelist_build(struct intel_engine_cs *engine) +static void allow_read_ctx_timestamp(struct intel_engine_cs *engine) { struct i915_wa_list *w = &engine->whitelist; @@ -1526,6 +1799,11 @@ static void cml_whitelist_build(struct intel_engine_cs *engine) whitelist_reg_ext(w, RING_CTX_TIMESTAMP(engine->mmio_base), RING_FORCE_TO_NONPRIV_ACCESS_RD); +} + +static void cml_whitelist_build(struct intel_engine_cs *engine) +{ + allow_read_ctx_timestamp(engine); cfl_whitelist_build(engine); } @@ -1534,6 +1812,8 @@ static void icl_whitelist_build(struct intel_engine_cs *engine) { struct i915_wa_list *w = &engine->whitelist; + allow_read_ctx_timestamp(engine); + switch (engine->class) { case RENDER_CLASS: /* WaAllowUMDToModifyHalfSliceChicken7:icl */ @@ -1569,15 +1849,9 @@ static void icl_whitelist_build(struct intel_engine_cs *engine) /* hucStatus2RegOffset */ whitelist_reg_ext(w, _MMIO(0x23B0 + engine->mmio_base), RING_FORCE_TO_NONPRIV_ACCESS_RD); - whitelist_reg_ext(w, - RING_CTX_TIMESTAMP(engine->mmio_base), - RING_FORCE_TO_NONPRIV_ACCESS_RD); break; default: - whitelist_reg_ext(w, - RING_CTX_TIMESTAMP(engine->mmio_base), - RING_FORCE_TO_NONPRIV_ACCESS_RD); break; } } @@ -1586,6 +1860,8 @@ static void tgl_whitelist_build(struct intel_engine_cs *engine) { struct i915_wa_list *w = &engine->whitelist; + allow_read_ctx_timestamp(engine); + switch (engine->class) { case RENDER_CLASS: /* @@ -1602,16 +1878,17 @@ static void tgl_whitelist_build(struct intel_engine_cs *engine) RING_FORCE_TO_NONPRIV_ACCESS_RD | RING_FORCE_TO_NONPRIV_RANGE_4); - /* Wa_1808121037:tgl */ + /* + * Wa_1808121037:tgl + * Wa_14012131227:dg1 + * Wa_1508744258:tgl,rkl,dg1,adl-s,adl-p + */ whitelist_reg(w, GEN7_COMMON_SLICE_CHICKEN1); /* Wa_1806527549:tgl */ whitelist_reg(w, HIZ_CHICKEN); break; default: - whitelist_reg_ext(w, - RING_CTX_TIMESTAMP(engine->mmio_base), - RING_FORCE_TO_NONPRIV_ACCESS_RD); break; } } @@ -1623,13 +1900,46 @@ static void dg1_whitelist_build(struct intel_engine_cs *engine) tgl_whitelist_build(engine); /* GEN:BUG:1409280441:dg1 */ - if (IS_DG1_GT_STEP(engine->i915, STEP_A0, STEP_B0) && + if (IS_DG1_GRAPHICS_STEP(engine->i915, STEP_A0, STEP_B0) && (engine->class == RENDER_CLASS || engine->class == COPY_ENGINE_CLASS)) whitelist_reg_ext(w, RING_ID(engine->mmio_base), RING_FORCE_TO_NONPRIV_ACCESS_RD); } +static void xehpsdv_whitelist_build(struct intel_engine_cs *engine) +{ + allow_read_ctx_timestamp(engine); +} + +static void dg2_whitelist_build(struct intel_engine_cs *engine) +{ + struct i915_wa_list *w = &engine->whitelist; + + allow_read_ctx_timestamp(engine); + + switch (engine->class) { + case RENDER_CLASS: + /* + * Wa_1507100340:dg2_g10 + * + * This covers 4 registers which are next to one another : + * - PS_INVOCATION_COUNT + * - PS_INVOCATION_COUNT_UDW + * - PS_DEPTH_COUNT + * - PS_DEPTH_COUNT_UDW + */ + if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0)) + whitelist_reg_ext(w, PS_INVOCATION_COUNT, + RING_FORCE_TO_NONPRIV_ACCESS_RD | + RING_FORCE_TO_NONPRIV_RANGE_4); + + break; + default: + break; + } +} + void intel_engine_init_whitelist(struct intel_engine_cs *engine) { struct drm_i915_private *i915 = engine->i915; @@ -1637,7 +1947,11 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine) wa_init_start(w, "whitelist", engine->name); - if (IS_DG1(i915)) + if (IS_DG2(i915)) + dg2_whitelist_build(engine); + else if (IS_XEHPSDV(i915)) + xehpsdv_whitelist_build(engine); + else if (IS_DG1(i915)) dg1_whitelist_build(engine); else if (GRAPHICS_VER(i915) == 12) tgl_whitelist_build(engine); @@ -1711,13 +2025,119 @@ engine_fake_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) CMD_CCTL_MOCS_OVERRIDE(mocs, mocs)); } } + +static bool needs_wa_1308578152(struct intel_engine_cs *engine) +{ + u64 dss_mask = intel_sseu_get_subslices(&engine->gt->info.sseu, 0); + + return (dss_mask & GENMASK(GEN_DSS_PER_GSLICE - 1, 0)) == 0; +} + static void rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) { struct drm_i915_private *i915 = engine->i915; - if (IS_DG1_GT_STEP(i915, STEP_A0, STEP_B0) || - IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_B0)) { + if (IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0)) { + /* Wa_14013392000:dg2_g11 */ + wa_masked_en(wal, GEN7_ROW_CHICKEN2, GEN12_ENABLE_LARGE_GRF_MODE); + + /* Wa_16011620976:dg2_g11 */ + wa_write_or(wal, LSC_CHICKEN_BIT_0_UDW, DIS_CHAIN_2XSIMD8); + } + + if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0) || + IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0)) { + /* Wa_14012419201:dg2 */ + wa_masked_en(wal, GEN9_ROW_CHICKEN4, + GEN12_DISABLE_HDR_PAST_PAYLOAD_HOLD_FIX); + } + + if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_B0, STEP_C0) || + IS_DG2_G11(engine->i915)) { + /* + * Wa_22012826095:dg2 + * Wa_22013059131:dg2 + */ + wa_write_clr_set(wal, LSC_CHICKEN_BIT_0_UDW, + MAXREQS_PER_BANK, + REG_FIELD_PREP(MAXREQS_PER_BANK, 2)); + + /* Wa_22013059131:dg2 */ + wa_write_or(wal, LSC_CHICKEN_BIT_0, + FORCE_1_SUB_MESSAGE_PER_FRAGMENT); + } + + /* Wa_1308578152:dg2_g10 when first gslice is fused off */ + if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_B0, STEP_C0) && + needs_wa_1308578152(engine)) { + wa_masked_dis(wal, GEN12_CS_DEBUG_MODE1_CCCSUNIT_BE_COMMON, + GEN12_REPLAY_MODE_GRANULARITY); + } + + if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_B0, STEP_FOREVER) || + IS_DG2_G11(engine->i915)) { + /* Wa_22013037850:dg2 */ + wa_write_or(wal, LSC_CHICKEN_BIT_0_UDW, + DISABLE_128B_EVICTION_COMMAND_UDW); + + /* Wa_22012856258:dg2 */ + wa_masked_en(wal, GEN7_ROW_CHICKEN2, + GEN12_DISABLE_READ_SUPPRESSION); + + /* + * Wa_22010960976:dg2 + * Wa_14013347512:dg2 + */ + wa_masked_dis(wal, GEN12_HDC_CHICKEN0, + LSC_L1_FLUSH_CTL_3D_DATAPORT_FLUSH_EVENTS_MASK); + } + + if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0)) { + /* + * Wa_1608949956:dg2_g10 + * Wa_14010198302:dg2_g10 + */ + wa_masked_en(wal, GEN8_ROW_CHICKEN, + MDQ_ARBITRATION_MODE | UGM_BACKUP_MODE); + + /* + * Wa_14010918519:dg2_g10 + * + * LSC_CHICKEN_BIT_0 always reads back as 0 is this stepping, + * so ignoring verification. + */ + wa_add(wal, LSC_CHICKEN_BIT_0_UDW, 0, + FORCE_SLM_FENCE_SCOPE_TO_TILE | FORCE_UGM_FENCE_SCOPE_TO_TILE, + 0, false); + } + + if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0)) { + /* Wa_22010430635:dg2 */ + wa_masked_en(wal, + GEN9_ROW_CHICKEN4, + GEN12_DISABLE_GRF_CLEAR); + + /* Wa_14010648519:dg2 */ + wa_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE); + } + + if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_C0) || + IS_DG2_G11(engine->i915)) { + /* Wa_22012654132:dg2 */ + wa_add(wal, GEN10_CACHE_MODE_SS, 0, + _MASKED_BIT_ENABLE(ENABLE_PREFETCH_INTO_IC), + 0 /* write-only, so skip validation */, + true); + } + + /* Wa_14013202645:dg2 */ + if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_B0, STEP_C0) || + IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0)) + wa_write_or(wal, RT_CTRL, DIS_NULL_QUERY); + + if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) || + IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) { /* * Wa_1607138336:tgl[a0],dg1[a0] * Wa_1607063988:tgl[a0],dg1[a0] @@ -1727,7 +2147,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) GEN12_DISABLE_POSH_BUSY_FF_DOP_CG); } - if (IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_B0)) { + if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) { /* * Wa_1606679103:tgl * (see also Wa_1606682166:icl) @@ -1762,7 +2182,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) } if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) || - IS_DG1_GT_STEP(i915, STEP_A0, STEP_B0) || + IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) { /* Wa_1409804808:tgl,rkl,dg1[a0],adl-s,adl-p */ wa_masked_en(wal, GEN7_ROW_CHICKEN2, @@ -1775,8 +2195,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH); } - - if (IS_DG1_GT_STEP(i915, STEP_A0, STEP_B0) || + if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) { /* * Wa_1607030317:tgl @@ -1859,15 +2278,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE, VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS); - /* Wa_1407352427:icl,ehl */ - wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2, - PSDUNIT_CLKGATE_DIS); - - /* Wa_1406680159:icl,ehl */ - wa_write_or(wal, - SUBSLICE_UNIT_LEVEL_CLKGATE, - GWUNIT_CLKGATE_DIS); - /* * Wa_1408767742:icl[a2..forever],ehl[all] * Wa_1605460711:icl[a0..c0] @@ -2138,7 +2548,7 @@ xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) struct drm_i915_private *i915 = engine->i915; /* WaKBLVECSSemaphoreWaitPoll:kbl */ - if (IS_KBL_GT_STEP(i915, STEP_A0, STEP_F0)) { + if (IS_KBL_GRAPHICS_STEP(i915, STEP_A0, STEP_F0)) { wa_write(wal, RING_SEMA_WAIT_POLL(engine->mmio_base), 1); diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c index 8b89215afe46..bb99fc03f503 100644 --- a/drivers/gpu/drm/i915/gt/mock_engine.c +++ b/drivers/gpu/drm/i915/gt/mock_engine.c @@ -35,9 +35,31 @@ static void mock_timeline_unpin(struct intel_timeline *tl) atomic_dec(&tl->pin_count); } +static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size) +{ + struct i915_address_space *vm = &ggtt->vm; + struct drm_i915_private *i915 = vm->i915; + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + + obj = i915_gem_object_create_internal(i915, size); + if (IS_ERR(obj)) + return ERR_CAST(obj); + + vma = i915_vma_instance(obj, vm, NULL); + if (IS_ERR(vma)) + goto err; + + return vma; + +err: + i915_gem_object_put(obj); + return vma; +} + static struct intel_ring *mock_ring(struct intel_engine_cs *engine) { - const unsigned long sz = PAGE_SIZE / 2; + const unsigned long sz = PAGE_SIZE; struct intel_ring *ring; ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL); @@ -50,15 +72,11 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine) ring->vaddr = (void *)(ring + 1); atomic_set(&ring->pin_count, 1); - ring->vma = i915_vma_alloc(); - if (!ring->vma) { + ring->vma = create_ring_vma(engine->gt->ggtt, PAGE_SIZE); + if (IS_ERR(ring->vma)) { kfree(ring); return NULL; } - i915_active_init(&ring->vma->active, NULL, NULL, 0); - __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(ring->vma)); - __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &ring->vma->node.flags); - ring->vma->node.size = sz; intel_ring_update_space(ring); @@ -67,8 +85,7 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine) static void mock_ring_free(struct intel_ring *ring) { - i915_active_fini(&ring->vma->active); - i915_vma_free(ring->vma); + i915_vma_put(ring->vma); kfree(ring); } @@ -125,6 +142,7 @@ static void mock_context_unpin(struct intel_context *ce) static void mock_context_post_unpin(struct intel_context *ce) { + i915_vma_unpin(ce->ring->vma); } static void mock_context_destroy(struct kref *ref) @@ -169,7 +187,7 @@ static int mock_context_alloc(struct intel_context *ce) static int mock_context_pre_pin(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void **unused) { - return 0; + return i915_vma_pin_ww(ce->ring->vma, ww, 0, 0, PIN_GLOBAL | PIN_HIGH); } static int mock_context_pin(struct intel_context *ce, void *unused) diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c index 75569666105d..75f6efc9882f 100644 --- a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c +++ b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c @@ -214,6 +214,31 @@ static int live_engine_timestamps(void *arg) return 0; } +static int __spin_until_busier(struct intel_engine_cs *engine, ktime_t busyness) +{ + ktime_t start, unused, dt; + + if (!intel_engine_uses_guc(engine)) + return 0; + + /* + * In GuC mode of submission, the busyness stats may get updated after + * the batch starts running. Poll for a change in busyness and timeout + * after 500 us. + */ + start = ktime_get(); + while (intel_engine_get_busy_time(engine, &unused) == busyness) { + dt = ktime_get() - start; + if (dt > 500000) { + pr_err("active wait timed out %lld\n", dt); + ENGINE_TRACE(engine, "active wait time out %lld\n", dt); + return -ETIME; + } + } + + return 0; +} + static int live_engine_busy_stats(void *arg) { struct intel_gt *gt = arg; @@ -232,6 +257,7 @@ static int live_engine_busy_stats(void *arg) GEM_BUG_ON(intel_gt_pm_is_awake(gt)); for_each_engine(engine, gt, id) { struct i915_request *rq; + ktime_t busyness, dummy; ktime_t de, dt; ktime_t t[2]; @@ -274,16 +300,23 @@ static int live_engine_busy_stats(void *arg) } i915_request_add(rq); + busyness = intel_engine_get_busy_time(engine, &dummy); if (!igt_wait_for_spinner(&spin, rq)) { intel_gt_set_wedged(engine->gt); err = -ETIME; goto end; } + err = __spin_until_busier(engine, busyness); + if (err) { + GEM_TRACE_DUMP(); + goto end; + } + ENGINE_TRACE(engine, "measuring busy time\n"); preempt_disable(); de = intel_engine_get_busy_time(engine, &t[0]); - udelay(100); + mdelay(10); de = ktime_sub(intel_engine_get_busy_time(engine, &t[1]), de); preempt_enable(); dt = ktime_sub(t[1], t[0]); diff --git a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c index b9441217ca3d..55c5cdb99f45 100644 --- a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c @@ -43,7 +43,7 @@ static void measure_clocks(struct intel_engine_cs *engine, int i; for (i = 0; i < 5; i++) { - preempt_disable(); + local_irq_disable(); cycles[i] = -ENGINE_READ_FW(engine, RING_TIMESTAMP); dt[i] = ktime_get(); @@ -51,7 +51,7 @@ static void measure_clocks(struct intel_engine_cs *engine, dt[i] = ktime_sub(ktime_get(), dt[i]); cycles[i] += ENGINE_READ_FW(engine, RING_TIMESTAMP); - preempt_enable(); + local_irq_enable(); } /* Use the median of both cycle/dt; close enough */ diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c index 7e2d99dd012d..e5ad4d5a91c0 100644 --- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c +++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c @@ -471,7 +471,8 @@ static int igt_reset_nop_engine(void *arg) count = 0; st_engine_heartbeat_disable(engine); - set_bit(I915_RESET_ENGINE + id, >->reset.flags); + GEM_BUG_ON(test_and_set_bit(I915_RESET_ENGINE + id, + >->reset.flags)); do { int i; @@ -528,7 +529,7 @@ static int igt_reset_nop_engine(void *arg) break; } } while (time_before(jiffies, end_time)); - clear_bit(I915_RESET_ENGINE + id, >->reset.flags); + clear_and_wake_up_bit(I915_RESET_ENGINE + id, >->reset.flags); st_engine_heartbeat_enable(engine); pr_info("%s(%s): %d resets\n", __func__, engine->name, count); @@ -582,7 +583,8 @@ static int igt_reset_fail_engine(void *arg) } st_engine_heartbeat_disable(engine); - set_bit(I915_RESET_ENGINE + id, >->reset.flags); + GEM_BUG_ON(test_and_set_bit(I915_RESET_ENGINE + id, + >->reset.flags)); force_reset_timeout(engine); err = intel_engine_reset(engine, NULL); @@ -679,7 +681,7 @@ static int igt_reset_fail_engine(void *arg) out: pr_info("%s(%s): %d resets\n", __func__, engine->name, count); skip: - clear_bit(I915_RESET_ENGINE + id, >->reset.flags); + clear_and_wake_up_bit(I915_RESET_ENGINE + id, >->reset.flags); st_engine_heartbeat_enable(engine); intel_context_put(ce); @@ -734,7 +736,8 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active) reset_engine_count = i915_reset_engine_count(global, engine); st_engine_heartbeat_disable(engine); - set_bit(I915_RESET_ENGINE + id, >->reset.flags); + GEM_BUG_ON(test_and_set_bit(I915_RESET_ENGINE + id, + >->reset.flags)); count = 0; do { struct i915_request *rq = NULL; @@ -824,7 +827,7 @@ restore: if (err) break; } while (time_before(jiffies, end_time)); - clear_bit(I915_RESET_ENGINE + id, >->reset.flags); + clear_and_wake_up_bit(I915_RESET_ENGINE + id, >->reset.flags); st_engine_heartbeat_enable(engine); pr_info("%s: Completed %lu %s resets\n", engine->name, count, active ? "active" : "idle"); @@ -1042,7 +1045,8 @@ static int __igt_reset_engines(struct intel_gt *gt, yield(); /* start all threads before we begin */ st_engine_heartbeat_disable_no_pm(engine); - set_bit(I915_RESET_ENGINE + id, >->reset.flags); + GEM_BUG_ON(test_and_set_bit(I915_RESET_ENGINE + id, + >->reset.flags)); do { struct i915_request *rq = NULL; struct intel_selftest_saved_policy saved; @@ -1165,7 +1169,7 @@ restore: if (err) break; } while (time_before(jiffies, end_time)); - clear_bit(I915_RESET_ENGINE + id, >->reset.flags); + clear_and_wake_up_bit(I915_RESET_ENGINE + id, >->reset.flags); st_engine_heartbeat_enable_no_pm(engine); pr_info("i915_reset_engine(%s:%s): %lu resets\n", diff --git a/drivers/gpu/drm/i915/gt/selftest_migrate.c b/drivers/gpu/drm/i915/gt/selftest_migrate.c index 12ef2837c89b..e21787301bbd 100644 --- a/drivers/gpu/drm/i915/gt/selftest_migrate.c +++ b/drivers/gpu/drm/i915/gt/selftest_migrate.c @@ -49,6 +49,7 @@ static int copy(struct intel_migrate *migrate, if (IS_ERR(src)) return 0; + sz = src->base.size; dst = i915_gem_object_create_internal(i915, sz); if (IS_ERR(dst)) goto err_free_src; diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h index ba10bd374cee..fe5d7d261797 100644 --- a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h +++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h @@ -144,6 +144,7 @@ enum intel_guc_action { INTEL_GUC_ACTION_DEREGISTER_CONTEXT_DONE = 0x4600, INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC = 0x4601, INTEL_GUC_ACTION_RESET_CLIENT = 0x5507, + INTEL_GUC_ACTION_SET_ENG_UTIL_BUFF = 0x550A, INTEL_GUC_ACTION_LIMIT }; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h index 31cf9fb48c7e..1cb46098030d 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h @@ -138,6 +138,8 @@ struct intel_guc { u32 ads_regset_size; /** @ads_golden_ctxt_size: size of the golden contexts in the ADS */ u32 ads_golden_ctxt_size; + /** @ads_engine_usage_size: size of engine usage in the ADS */ + u32 ads_engine_usage_size; /** @lrc_desc_pool: object allocated to hold the GuC LRC descriptor pool */ struct i915_vma *lrc_desc_pool; @@ -172,6 +174,34 @@ struct intel_guc { /** @send_mutex: used to serialize the intel_guc_send actions */ struct mutex send_mutex; + + /** + * @timestamp: GT timestamp object that stores a copy of the timestamp + * and adjusts it for overflow using a worker. + */ + struct { + /** + * @lock: Lock protecting the below fields and the engine stats. + */ + spinlock_t lock; + + /** + * @gt_stamp: 64 bit extended value of the GT timestamp. + */ + u64 gt_stamp; + + /** + * @ping_delay: Period for polling the GT timestamp for + * overflow. + */ + unsigned long ping_delay; + + /** + * @work: Periodic work to adjust GT timestamp, engine and + * context usage for overflows. + */ + struct delayed_work work; + } timestamp; }; static inline struct intel_guc *log_to_guc(struct intel_guc_log *log) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c index 621c893a009f..1a1edae67e4e 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c @@ -26,6 +26,8 @@ * | guc_policies | * +---------------------------------------+ * | guc_gt_system_info | + * +---------------------------------------+ + * | guc_engine_usage | * +---------------------------------------+ <== static * | guc_mmio_reg[countA] (engine 0.0) | * | guc_mmio_reg[countB] (engine 0.1) | @@ -47,6 +49,7 @@ struct __guc_ads_blob { struct guc_ads ads; struct guc_policies policies; struct guc_gt_system_info system_info; + struct guc_engine_usage engine_usage; /* From here on, location is dynamic! Refer to above diagram. */ struct guc_mmio_reg regset[0]; } __packed; @@ -628,3 +631,21 @@ void intel_guc_ads_reset(struct intel_guc *guc) guc_ads_private_data_reset(guc); } + +u32 intel_guc_engine_usage_offset(struct intel_guc *guc) +{ + struct __guc_ads_blob *blob = guc->ads_blob; + u32 base = intel_guc_ggtt_offset(guc, guc->ads_vma); + u32 offset = base + ptr_offset(blob, engine_usage); + + return offset; +} + +struct guc_engine_usage_record *intel_guc_engine_usage(struct intel_engine_cs *engine) +{ + struct intel_guc *guc = &engine->gt->uc.guc; + struct __guc_ads_blob *blob = guc->ads_blob; + u8 guc_class = engine_class_to_guc_class(engine->class); + + return &blob->engine_usage.engines[guc_class][ilog2(engine->logical_mask)]; +} diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h index 3d85051d57e4..e74c110facff 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h @@ -6,8 +6,11 @@ #ifndef _INTEL_GUC_ADS_H_ #define _INTEL_GUC_ADS_H_ +#include <linux/types.h> + struct intel_guc; struct drm_printer; +struct intel_engine_cs; int intel_guc_ads_create(struct intel_guc *guc); void intel_guc_ads_destroy(struct intel_guc *guc); @@ -15,5 +18,7 @@ void intel_guc_ads_init_late(struct intel_guc *guc); void intel_guc_ads_reset(struct intel_guc *guc); void intel_guc_ads_print_policy_info(struct intel_guc *guc, struct drm_printer *p); +struct guc_engine_usage_record *intel_guc_engine_usage(struct intel_engine_cs *engine); +u32 intel_guc_engine_usage_offset(struct intel_guc *guc); #endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h index 722933e26347..7072e30e99f4 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h @@ -294,6 +294,19 @@ struct guc_ads { u32 reserved[15]; } __packed; +/* Engine usage stats */ +struct guc_engine_usage_record { + u32 current_context_index; + u32 last_switch_in_stamp; + u32 reserved0; + u32 total_runtime; + u32 reserved1[4]; +} __packed; + +struct guc_engine_usage { + struct guc_engine_usage_record engines[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS]; +} __packed; + /* GuC logging structures */ enum guc_log_buffer_type { diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c index 65a3e7fdb2b2..22c1c12369f2 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c @@ -79,29 +79,6 @@ static void slpc_mem_set_disabled(struct slpc_shared_data *data, slpc_mem_set_param(data, enable_id, 0); } -int intel_guc_slpc_init(struct intel_guc_slpc *slpc) -{ - struct intel_guc *guc = slpc_to_guc(slpc); - struct drm_i915_private *i915 = slpc_to_i915(slpc); - u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data)); - int err; - - GEM_BUG_ON(slpc->vma); - - err = intel_guc_allocate_and_map_vma(guc, size, &slpc->vma, (void **)&slpc->vaddr); - if (unlikely(err)) { - drm_err(&i915->drm, - "Failed to allocate SLPC struct (err=%pe)\n", - ERR_PTR(err)); - return err; - } - - slpc->max_freq_softlimit = 0; - slpc->min_freq_softlimit = 0; - - return err; -} - static u32 slpc_get_state(struct intel_guc_slpc *slpc) { struct slpc_shared_data *data; @@ -203,6 +180,86 @@ static int slpc_unset_param(struct intel_guc_slpc *slpc, return guc_action_slpc_unset_param(guc, id); } +static int slpc_force_min_freq(struct intel_guc_slpc *slpc, u32 freq) +{ + struct drm_i915_private *i915 = slpc_to_i915(slpc); + struct intel_guc *guc = slpc_to_guc(slpc); + intel_wakeref_t wakeref; + int ret = 0; + + lockdep_assert_held(&slpc->lock); + + if (!intel_guc_is_ready(guc)) + return -ENODEV; + + /* + * This function is a little different as compared to + * intel_guc_slpc_set_min_freq(). Softlimit will not be updated + * here since this is used to temporarily change min freq, + * for example, during a waitboost. Caller is responsible for + * checking bounds. + */ + + with_intel_runtime_pm(&i915->runtime_pm, wakeref) { + ret = slpc_set_param(slpc, + SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ, + freq); + if (ret) + drm_err(&i915->drm, "Unable to force min freq to %u: %d", + freq, ret); + } + + return ret; +} + +static void slpc_boost_work(struct work_struct *work) +{ + struct intel_guc_slpc *slpc = container_of(work, typeof(*slpc), boost_work); + + /* + * Raise min freq to boost. It's possible that + * this is greater than current max. But it will + * certainly be limited by RP0. An error setting + * the min param is not fatal. + */ + mutex_lock(&slpc->lock); + if (atomic_read(&slpc->num_waiters)) { + slpc_force_min_freq(slpc, slpc->boost_freq); + slpc->num_boosts++; + } + mutex_unlock(&slpc->lock); +} + +int intel_guc_slpc_init(struct intel_guc_slpc *slpc) +{ + struct intel_guc *guc = slpc_to_guc(slpc); + struct drm_i915_private *i915 = slpc_to_i915(slpc); + u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data)); + int err; + + GEM_BUG_ON(slpc->vma); + + err = intel_guc_allocate_and_map_vma(guc, size, &slpc->vma, (void **)&slpc->vaddr); + if (unlikely(err)) { + drm_err(&i915->drm, + "Failed to allocate SLPC struct (err=%pe)\n", + ERR_PTR(err)); + return err; + } + + slpc->max_freq_softlimit = 0; + slpc->min_freq_softlimit = 0; + + slpc->boost_freq = 0; + atomic_set(&slpc->num_waiters, 0); + slpc->num_boosts = 0; + + mutex_init(&slpc->lock); + INIT_WORK(&slpc->boost_work, slpc_boost_work); + + return err; +} + static const char *slpc_global_state_to_string(enum slpc_global_state state) { switch (state) { @@ -393,7 +450,11 @@ int intel_guc_slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 val) val > slpc->max_freq_softlimit) return -EINVAL; + /* Need a lock now since waitboost can be modifying min as well */ + mutex_lock(&slpc->lock); + with_intel_runtime_pm(&i915->runtime_pm, wakeref) { + ret = slpc_set_param(slpc, SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ, val); @@ -406,6 +467,8 @@ int intel_guc_slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 val) if (!ret) slpc->min_freq_softlimit = val; + mutex_unlock(&slpc->lock); + return ret; } @@ -522,6 +585,9 @@ static void slpc_get_rp_values(struct intel_guc_slpc *slpc) GT_FREQUENCY_MULTIPLIER; slpc->min_freq = REG_FIELD_GET(RPN_CAP_MASK, rp_state_cap) * GT_FREQUENCY_MULTIPLIER; + + if (!slpc->boost_freq) + slpc->boost_freq = slpc->rp0_freq; } /* @@ -588,6 +654,47 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc) return 0; } +int intel_guc_slpc_set_boost_freq(struct intel_guc_slpc *slpc, u32 val) +{ + int ret = 0; + + if (val < slpc->min_freq || val > slpc->rp0_freq) + return -EINVAL; + + mutex_lock(&slpc->lock); + + if (slpc->boost_freq != val) { + /* Apply only if there are active waiters */ + if (atomic_read(&slpc->num_waiters)) { + ret = slpc_force_min_freq(slpc, val); + if (ret) { + ret = -EIO; + goto done; + } + } + + slpc->boost_freq = val; + } + +done: + mutex_unlock(&slpc->lock); + return ret; +} + +void intel_guc_slpc_dec_waiters(struct intel_guc_slpc *slpc) +{ + /* + * Return min back to the softlimit. + * This is called during request retire, + * so we don't need to fail that if the + * set_param fails. + */ + mutex_lock(&slpc->lock); + if (atomic_dec_and_test(&slpc->num_waiters)) + slpc_force_min_freq(slpc, slpc->min_freq_softlimit); + mutex_unlock(&slpc->lock); +} + int intel_guc_slpc_print_info(struct intel_guc_slpc *slpc, struct drm_printer *p) { struct drm_i915_private *i915 = slpc_to_i915(slpc); @@ -611,6 +718,8 @@ int intel_guc_slpc_print_info(struct intel_guc_slpc *slpc, struct drm_printer *p slpc_decode_max_freq(slpc)); drm_printf(p, "\tMin freq: %u MHz\n", slpc_decode_min_freq(slpc)); + drm_printf(p, "\twaitboosts: %u\n", + slpc->num_boosts); } } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h index e45054d5b9b4..0caa8fee3c04 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h @@ -34,9 +34,12 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc); void intel_guc_slpc_fini(struct intel_guc_slpc *slpc); int intel_guc_slpc_set_max_freq(struct intel_guc_slpc *slpc, u32 val); int intel_guc_slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 val); +int intel_guc_slpc_set_boost_freq(struct intel_guc_slpc *slpc, u32 val); int intel_guc_slpc_get_max_freq(struct intel_guc_slpc *slpc, u32 *val); int intel_guc_slpc_get_min_freq(struct intel_guc_slpc *slpc, u32 *val); int intel_guc_slpc_print_info(struct intel_guc_slpc *slpc, struct drm_printer *p); void intel_guc_pm_intrmsk_enable(struct intel_gt *gt); +void intel_guc_slpc_boost(struct intel_guc_slpc *slpc); +void intel_guc_slpc_dec_waiters(struct intel_guc_slpc *slpc); #endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h index 41d13527666f..bf5b9a563c09 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h @@ -6,6 +6,9 @@ #ifndef _INTEL_GUC_SLPC_TYPES_H_ #define _INTEL_GUC_SLPC_TYPES_H_ +#include <linux/atomic.h> +#include <linux/workqueue.h> +#include <linux/mutex.h> #include <linux/types.h> #define SLPC_RESET_TIMEOUT_MS 5 @@ -20,10 +23,20 @@ struct intel_guc_slpc { u32 min_freq; u32 rp0_freq; u32 rp1_freq; + u32 boost_freq; /* frequency softlimits */ u32 min_freq_softlimit; u32 max_freq_softlimit; + + /* Protects set/reset of boost freq + * and value of num_waiters + */ + struct mutex lock; + + struct work_struct boost_work; + atomic_t num_waiters; + u32 num_boosts; }; #endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 38b47e73e35d..1f9d4fde421f 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -13,6 +13,7 @@ #include "gt/intel_engine_heartbeat.h" #include "gt/intel_gpu_commands.h" #include "gt/intel_gt.h" +#include "gt/intel_gt_clock_utils.h" #include "gt/intel_gt_irq.h" #include "gt/intel_gt_pm.h" #include "gt/intel_gt_requests.h" @@ -21,6 +22,7 @@ #include "gt/intel_mocs.h" #include "gt/intel_ring.h" +#include "intel_guc_ads.h" #include "intel_guc_submission.h" #include "i915_drv.h" @@ -1077,6 +1079,271 @@ static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc) xa_unlock_irqrestore(&guc->context_lookup, flags); } +/* + * GuC stores busyness stats for each engine at context in/out boundaries. A + * context 'in' logs execution start time, 'out' adds in -> out delta to total. + * i915/kmd accesses 'start', 'total' and 'context id' from memory shared with + * GuC. + * + * __i915_pmu_event_read samples engine busyness. When sampling, if context id + * is valid (!= ~0) and start is non-zero, the engine is considered to be + * active. For an active engine total busyness = total + (now - start), where + * 'now' is the time at which the busyness is sampled. For inactive engine, + * total busyness = total. + * + * All times are captured from GUCPMTIMESTAMP reg and are in gt clock domain. + * + * The start and total values provided by GuC are 32 bits and wrap around in a + * few minutes. Since perf pmu provides busyness as 64 bit monotonically + * increasing ns values, there is a need for this implementation to account for + * overflows and extend the GuC provided values to 64 bits before returning + * busyness to the user. In order to do that, a worker runs periodically at + * frequency = 1/8th the time it takes for the timestamp to wrap (i.e. once in + * 27 seconds for a gt clock frequency of 19.2 MHz). + */ + +#define WRAP_TIME_CLKS U32_MAX +#define POLL_TIME_CLKS (WRAP_TIME_CLKS >> 3) + +static void +__extend_last_switch(struct intel_guc *guc, u64 *prev_start, u32 new_start) +{ + u32 gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp); + u32 gt_stamp_last = lower_32_bits(guc->timestamp.gt_stamp); + + if (new_start == lower_32_bits(*prev_start)) + return; + + if (new_start < gt_stamp_last && + (new_start - gt_stamp_last) <= POLL_TIME_CLKS) + gt_stamp_hi++; + + if (new_start > gt_stamp_last && + (gt_stamp_last - new_start) <= POLL_TIME_CLKS && gt_stamp_hi) + gt_stamp_hi--; + + *prev_start = ((u64)gt_stamp_hi << 32) | new_start; +} + +static void guc_update_engine_gt_clks(struct intel_engine_cs *engine) +{ + struct guc_engine_usage_record *rec = intel_guc_engine_usage(engine); + struct intel_engine_guc_stats *stats = &engine->stats.guc; + struct intel_guc *guc = &engine->gt->uc.guc; + u32 last_switch = rec->last_switch_in_stamp; + u32 ctx_id = rec->current_context_index; + u32 total = rec->total_runtime; + + lockdep_assert_held(&guc->timestamp.lock); + + stats->running = ctx_id != ~0U && last_switch; + if (stats->running) + __extend_last_switch(guc, &stats->start_gt_clk, last_switch); + + /* + * Instead of adjusting the total for overflow, just add the + * difference from previous sample stats->total_gt_clks + */ + if (total && total != ~0U) { + stats->total_gt_clks += (u32)(total - stats->prev_total); + stats->prev_total = total; + } +} + +static void guc_update_pm_timestamp(struct intel_guc *guc, + struct intel_engine_cs *engine, + ktime_t *now) +{ + u32 gt_stamp_now, gt_stamp_hi; + + lockdep_assert_held(&guc->timestamp.lock); + + gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp); + gt_stamp_now = intel_uncore_read(engine->uncore, + RING_TIMESTAMP(engine->mmio_base)); + *now = ktime_get(); + + if (gt_stamp_now < lower_32_bits(guc->timestamp.gt_stamp)) + gt_stamp_hi++; + + guc->timestamp.gt_stamp = ((u64)gt_stamp_hi << 32) | gt_stamp_now; +} + +/* + * Unlike the execlist mode of submission total and active times are in terms of + * gt clocks. The *now parameter is retained to return the cpu time at which the + * busyness was sampled. + */ +static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now) +{ + struct intel_engine_guc_stats stats_saved, *stats = &engine->stats.guc; + struct i915_gpu_error *gpu_error = &engine->i915->gpu_error; + struct intel_gt *gt = engine->gt; + struct intel_guc *guc = >->uc.guc; + u64 total, gt_stamp_saved; + unsigned long flags; + u32 reset_count; + bool in_reset; + + spin_lock_irqsave(&guc->timestamp.lock, flags); + + /* + * If a reset happened, we risk reading partially updated engine + * busyness from GuC, so we just use the driver stored copy of busyness. + * Synchronize with gt reset using reset_count and the + * I915_RESET_BACKOFF flag. Note that reset flow updates the reset_count + * after I915_RESET_BACKOFF flag, so ensure that the reset_count is + * usable by checking the flag afterwards. + */ + reset_count = i915_reset_count(gpu_error); + in_reset = test_bit(I915_RESET_BACKOFF, >->reset.flags); + + *now = ktime_get(); + + /* + * The active busyness depends on start_gt_clk and gt_stamp. + * gt_stamp is updated by i915 only when gt is awake and the + * start_gt_clk is derived from GuC state. To get a consistent + * view of activity, we query the GuC state only if gt is awake. + */ + if (intel_gt_pm_get_if_awake(gt) && !in_reset) { + stats_saved = *stats; + gt_stamp_saved = guc->timestamp.gt_stamp; + guc_update_engine_gt_clks(engine); + guc_update_pm_timestamp(guc, engine, now); + intel_gt_pm_put_async(gt); + if (i915_reset_count(gpu_error) != reset_count) { + *stats = stats_saved; + guc->timestamp.gt_stamp = gt_stamp_saved; + } + } + + total = intel_gt_clock_interval_to_ns(gt, stats->total_gt_clks); + if (stats->running) { + u64 clk = guc->timestamp.gt_stamp - stats->start_gt_clk; + + total += intel_gt_clock_interval_to_ns(gt, clk); + } + + spin_unlock_irqrestore(&guc->timestamp.lock, flags); + + return ns_to_ktime(total); +} + +static void __reset_guc_busyness_stats(struct intel_guc *guc) +{ + struct intel_gt *gt = guc_to_gt(guc); + struct intel_engine_cs *engine; + enum intel_engine_id id; + unsigned long flags; + ktime_t unused; + + cancel_delayed_work_sync(&guc->timestamp.work); + + spin_lock_irqsave(&guc->timestamp.lock, flags); + + for_each_engine(engine, gt, id) { + guc_update_pm_timestamp(guc, engine, &unused); + guc_update_engine_gt_clks(engine); + engine->stats.guc.prev_total = 0; + } + + spin_unlock_irqrestore(&guc->timestamp.lock, flags); +} + +static void __update_guc_busyness_stats(struct intel_guc *guc) +{ + struct intel_gt *gt = guc_to_gt(guc); + struct intel_engine_cs *engine; + enum intel_engine_id id; + unsigned long flags; + ktime_t unused; + + spin_lock_irqsave(&guc->timestamp.lock, flags); + for_each_engine(engine, gt, id) { + guc_update_pm_timestamp(guc, engine, &unused); + guc_update_engine_gt_clks(engine); + } + spin_unlock_irqrestore(&guc->timestamp.lock, flags); +} + +static void guc_timestamp_ping(struct work_struct *wrk) +{ + struct intel_guc *guc = container_of(wrk, typeof(*guc), + timestamp.work.work); + struct intel_uc *uc = container_of(guc, typeof(*uc), guc); + struct intel_gt *gt = guc_to_gt(guc); + intel_wakeref_t wakeref; + int srcu, ret; + + /* + * Synchronize with gt reset to make sure the worker does not + * corrupt the engine/guc stats. + */ + ret = intel_gt_reset_trylock(gt, &srcu); + if (ret) + return; + + with_intel_runtime_pm(>->i915->runtime_pm, wakeref) + __update_guc_busyness_stats(guc); + + intel_gt_reset_unlock(gt, srcu); + + mod_delayed_work(system_highpri_wq, &guc->timestamp.work, + guc->timestamp.ping_delay); +} + +static int guc_action_enable_usage_stats(struct intel_guc *guc) +{ + u32 offset = intel_guc_engine_usage_offset(guc); + u32 action[] = { + INTEL_GUC_ACTION_SET_ENG_UTIL_BUFF, + offset, + 0, + }; + + return intel_guc_send(guc, action, ARRAY_SIZE(action)); +} + +static void guc_init_engine_stats(struct intel_guc *guc) +{ + struct intel_gt *gt = guc_to_gt(guc); + intel_wakeref_t wakeref; + + mod_delayed_work(system_highpri_wq, &guc->timestamp.work, + guc->timestamp.ping_delay); + + with_intel_runtime_pm(>->i915->runtime_pm, wakeref) { + int ret = guc_action_enable_usage_stats(guc); + + if (ret) + drm_err(>->i915->drm, + "Failed to enable usage stats: %d!\n", ret); + } +} + +void intel_guc_busyness_park(struct intel_gt *gt) +{ + struct intel_guc *guc = >->uc.guc; + + if (!guc_submission_initialized(guc)) + return; + + cancel_delayed_work(&guc->timestamp.work); + __update_guc_busyness_stats(guc); +} + +void intel_guc_busyness_unpark(struct intel_gt *gt) +{ + struct intel_guc *guc = >->uc.guc; + + if (!guc_submission_initialized(guc)) + return; + + mod_delayed_work(system_highpri_wq, &guc->timestamp.work, + guc->timestamp.ping_delay); +} + static inline bool submission_disabled(struct intel_guc *guc) { @@ -1138,6 +1405,7 @@ void intel_guc_submission_reset_prepare(struct intel_guc *guc) intel_gt_park_heartbeats(guc_to_gt(guc)); disable_submission(guc); guc->interrupts.disable(guc); + __reset_guc_busyness_stats(guc); /* Flush IRQ handler */ spin_lock_irq(&guc_to_gt(guc)->irq_lock); @@ -1484,6 +1752,7 @@ static void destroyed_worker_func(struct work_struct *w); */ int intel_guc_submission_init(struct intel_guc *guc) { + struct intel_gt *gt = guc_to_gt(guc); int ret; if (guc->lrc_desc_pool) @@ -1512,6 +1781,10 @@ int intel_guc_submission_init(struct intel_guc *guc) if (!guc->submission_state.guc_ids_bitmap) return -ENOMEM; + spin_lock_init(&guc->timestamp.lock); + INIT_DELAYED_WORK(&guc->timestamp.work, guc_timestamp_ping); + guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ; + return 0; } @@ -3080,8 +3353,8 @@ guc_create_parallel(struct intel_engine_cs **engines, ce = intel_engine_create_virtual(siblings, num_siblings, FORCE_VIRTUAL); - if (!ce) { - err = ERR_PTR(-ENOMEM); + if (IS_ERR(ce)) { + err = ERR_CAST(ce); goto unwind; } @@ -3369,7 +3642,9 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine) engine->emit_flush = gen12_emit_flush_xcs; } engine->set_default_submission = guc_set_default_submission; + engine->busyness = guc_engine_busyness; + engine->flags |= I915_ENGINE_SUPPORTS_STATS; engine->flags |= I915_ENGINE_HAS_PREEMPTION; engine->flags |= I915_ENGINE_HAS_TIMESLICES; @@ -3468,6 +3743,7 @@ int intel_guc_submission_setup(struct intel_engine_cs *engine) void intel_guc_submission_enable(struct intel_guc *guc) { guc_init_lrc_mapping(guc); + guc_init_engine_stats(guc); } void intel_guc_submission_disable(struct intel_guc *guc) @@ -3695,6 +3971,7 @@ int intel_guc_context_reset_process_msg(struct intel_guc *guc, const u32 *msg, u32 len) { struct intel_context *ce; + unsigned long flags; int desc_idx; if (unlikely(len != 1)) { @@ -3703,11 +3980,24 @@ int intel_guc_context_reset_process_msg(struct intel_guc *guc, } desc_idx = msg[0]; + + /* + * The context lookup uses the xarray but lookups only require an RCU lock + * not the full spinlock. So take the lock explicitly and keep it until the + * context has been reference count locked to ensure it can't be destroyed + * asynchronously until the reset is done. + */ + xa_lock_irqsave(&guc->context_lookup, flags); ce = g2h_context_lookup(guc, desc_idx); + if (ce) + intel_context_get(ce); + xa_unlock_irqrestore(&guc->context_lookup, flags); + if (unlikely(!ce)) return -EPROTO; guc_handle_context_reset(guc, ce); + intel_context_put(ce); return 0; } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h index c7ef44fa0c36..5a95a9f0a8e3 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h @@ -28,6 +28,8 @@ void intel_guc_submission_print_context_info(struct intel_guc *guc, void intel_guc_dump_active_requests(struct intel_engine_cs *engine, struct i915_request *hung_rq, struct drm_printer *m); +void intel_guc_busyness_park(struct intel_gt *gt); +void intel_guc_busyness_unpark(struct intel_gt *gt); bool intel_guc_virtual_engine_has_heartbeat(const struct intel_engine_cs *ve); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index 2fef3b0bbe95..8f17005ce85f 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -35,7 +35,7 @@ static void uc_expand_default_options(struct intel_uc *uc) } /* Intermediate platforms are HuC authentication only */ - if (IS_ALDERLAKE_S(i915)) { + if (IS_ALDERLAKE_S(i915) && !IS_ADLS_RPLS(i915)) { i915->params.enable_guc = ENABLE_GUC_LOAD_HUC; return; } diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c index 11a8baba6822..9ec064199364 100644 --- a/drivers/gpu/drm/i915/gvt/fb_decoder.c +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c @@ -427,7 +427,7 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu, plane->tiled = !!(val & SPRITE_TILED); color_order = !!(val & SPRITE_RGB_ORDER_RGBX); - yuv_order = (val & SPRITE_YUV_BYTE_ORDER_MASK) >> + yuv_order = (val & SPRITE_YUV_ORDER_MASK) >> _SPRITE_YUV_ORDER_SHIFT; fmt = (val & SPRITE_PIXFORMAT_MASK) >> _SPRITE_FMT_SHIFT; diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 53d0cb327539..99d1781fa5f0 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -446,17 +446,17 @@ static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e) || e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) return (e->val64 != 0); else - return (e->val64 & _PAGE_PRESENT); + return (e->val64 & GEN8_PAGE_PRESENT); } static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e) { - e->val64 &= ~_PAGE_PRESENT; + e->val64 &= ~GEN8_PAGE_PRESENT; } static void gtt_entry_set_present(struct intel_gvt_gtt_entry *e) { - e->val64 |= _PAGE_PRESENT; + e->val64 |= GEN8_PAGE_PRESENT; } static bool gen8_gtt_test_64k_splited(struct intel_gvt_gtt_entry *e) @@ -2439,7 +2439,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, /* The entry parameters like present/writeable/cache type * set to the same as i915's scratch page tree. */ - se.val64 |= _PAGE_PRESENT | _PAGE_RW; + se.val64 |= GEN8_PAGE_PRESENT | GEN8_PAGE_RW; if (type == GTT_TYPE_PPGTT_PDE_PT) se.val64 |= PPAT_CACHED; @@ -2896,7 +2896,7 @@ void intel_gvt_restore_ggtt(struct intel_gvt *gvt) offset = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT; for (idx = 0; idx < num_low; idx++) { pte = mm->ggtt_mm.host_ggtt_aperture[idx]; - if (pte & _PAGE_PRESENT) + if (pte & GEN8_PAGE_PRESENT) write_pte64(vgpu->gvt->gt->ggtt, offset + idx, pte); } @@ -2904,7 +2904,7 @@ void intel_gvt_restore_ggtt(struct intel_gvt *gvt) offset = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT; for (idx = 0; idx < num_hi; idx++) { pte = mm->ggtt_mm.host_ggtt_hidden[idx]; - if (pte & _PAGE_PRESENT) + if (pte & GEN8_PAGE_PRESENT) write_pte64(vgpu->gvt->gt->ggtt, offset + idx, pte); } } diff --git a/drivers/gpu/drm/i915/i915_active_types.h b/drivers/gpu/drm/i915/i915_active_types.h index c149f348a972..b02a78ac87db 100644 --- a/drivers/gpu/drm/i915/i915_active_types.h +++ b/drivers/gpu/drm/i915/i915_active_types.h @@ -15,8 +15,6 @@ #include <linux/rcupdate.h> #include <linux/workqueue.h> -#include "i915_utils.h" - struct i915_active_fence { struct dma_fence __rcu *fence; struct dma_fence_cb cb; diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index fe638b5da7c0..bafb902269de 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -48,7 +48,6 @@ #include "i915_debugfs_params.h" #include "i915_irq.h" #include "i915_scheduler.h" -#include "i915_trace.h" #include "intel_pm.h" static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node) @@ -65,6 +64,7 @@ static int i915_capabilities(struct seq_file *m, void *data) intel_device_info_print_static(INTEL_INFO(i915), &p); intel_device_info_print_runtime(RUNTIME_INFO(i915), &p); + i915_print_iommu_status(i915, &p); intel_gt_info_print(&i915->gt.info, &p); intel_driver_caps_print(&i915->caps, &p); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_driver.c index b18a250e5d2e..e9125f14b3d1 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@ -29,8 +29,8 @@ #include <linux/acpi.h> #include <linux/device.h> -#include <linux/oom.h> #include <linux/module.h> +#include <linux/oom.h> #include <linux/pci.h> #include <linux/pm.h> #include <linux/pm_runtime.h> @@ -48,12 +48,14 @@ #include "display/intel_acpi.h" #include "display/intel_bw.h" #include "display/intel_cdclk.h" -#include "display/intel_dmc.h" #include "display/intel_display_types.h" +#include "display/intel_dmc.h" #include "display/intel_dp.h" +#include "display/intel_dpt.h" #include "display/intel_fbdev.h" #include "display/intel_hotplug.h" #include "display/intel_overlay.h" +#include "display/intel_pch_refclk.h" #include "display/intel_pipe_crc.h" #include "display/intel_pps.h" #include "display/intel_sprite.h" @@ -70,6 +72,7 @@ #include "pxp/intel_pxp_pm.h" #include "i915_debugfs.h" +#include "i915_driver.h" #include "i915_drv.h" #include "i915_ioc32.h" #include "i915_irq.h" @@ -79,7 +82,6 @@ #include "i915_suspend.h" #include "i915_switcheroo.h" #include "i915_sysfs.h" -#include "i915_trace.h" #include "i915_vgpu.h" #include "intel_dram.h" #include "intel_gvt.h" @@ -89,7 +91,7 @@ #include "intel_region_ttm.h" #include "vlv_suspend.h" -static const struct drm_driver driver; +static const struct drm_driver i915_drm_driver; static int i915_get_bridge_dev(struct drm_i915_private *dev_priv) { @@ -322,7 +324,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv) mutex_init(&dev_priv->sb_lock); cpu_latency_qos_add_request(&dev_priv->sb_qos, PM_QOS_DEFAULT_VALUE); - mutex_init(&dev_priv->av_mutex); + mutex_init(&dev_priv->audio.mutex); mutex_init(&dev_priv->wm.wm_mutex); mutex_init(&dev_priv->pps_mutex); mutex_init(&dev_priv->hdcp_comp_mutex); @@ -415,10 +417,14 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv) if (ret < 0) return ret; - ret = intel_uncore_init_mmio(&dev_priv->uncore); + ret = intel_uncore_setup_mmio(&dev_priv->uncore); if (ret < 0) goto err_bridge; + ret = intel_uncore_init_mmio(&dev_priv->uncore); + if (ret) + goto err_mmio; + /* Try to make sure MCHBAR is enabled before poking at it */ intel_setup_mchbar(dev_priv); intel_device_info_runtime_init(dev_priv); @@ -435,6 +441,8 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv) err_uncore: intel_teardown_mchbar(dev_priv); intel_uncore_fini_mmio(&dev_priv->uncore); +err_mmio: + intel_uncore_cleanup_mmio(&dev_priv->uncore); err_bridge: pci_dev_put(dev_priv->bridge_dev); @@ -449,6 +457,7 @@ static void i915_driver_mmio_release(struct drm_i915_private *dev_priv) { intel_teardown_mchbar(dev_priv); intel_uncore_fini_mmio(&dev_priv->uncore); + intel_uncore_cleanup_mmio(&dev_priv->uncore); pci_dev_put(dev_priv->bridge_dev); } @@ -731,6 +740,12 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv) i915_gem_driver_unregister(dev_priv); } +void +i915_print_iommu_status(struct drm_i915_private *i915, struct drm_printer *p) +{ + drm_printf(p, "iommu: %s\n", enableddisabled(intel_vtd_active(i915))); +} + static void i915_welcome_messages(struct drm_i915_private *dev_priv) { if (drm_debug_enabled(DRM_UT_DRIVER)) { @@ -746,6 +761,7 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv) intel_device_info_print_static(INTEL_INFO(dev_priv), &p); intel_device_info_print_runtime(RUNTIME_INFO(dev_priv), &p); + i915_print_iommu_status(dev_priv, &p); intel_gt_info_print(&dev_priv->gt.info, &p); } @@ -766,7 +782,7 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent) struct intel_device_info *device_info; struct drm_i915_private *i915; - i915 = devm_drm_dev_alloc(&pdev->dev, &driver, + i915 = devm_drm_dev_alloc(&pdev->dev, &i915_drm_driver, struct drm_i915_private, drm); if (IS_ERR(i915)) return i915; @@ -807,7 +823,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return PTR_ERR(i915); /* Disable nuclear pageflip by default on pre-ILK */ - if (!i915->params.nuclear_pageflip && match_info->graphics_ver < 5) + if (!i915->params.nuclear_pageflip && match_info->graphics.ver < 5) i915->drm.driver_features &= ~DRIVER_ATOMIC; /* @@ -1127,6 +1143,8 @@ static int i915_drm_suspend(struct drm_device *dev) intel_suspend_hw(dev_priv); + /* Must be called before GGTT is suspended. */ + intel_dpt_suspend(dev_priv); i915_ggtt_suspend(&dev_priv->ggtt); i915_save_display(dev_priv); @@ -1183,6 +1201,14 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) goto out; } + /* + * FIXME: Temporary hammer to avoid freezing the machine on our DGFX + * This should be totally removed when we handle the pci states properly + * on runtime PM and on s2idle cases. + */ + if (suspend_to_idle(dev_priv)) + pci_d3cold_disable(pdev); + pci_disable_device(pdev); /* * During hibernation on some platforms the BIOS may try to access @@ -1207,7 +1233,8 @@ out: return ret; } -int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state) +int i915_driver_suspend_switcheroo(struct drm_i915_private *i915, + pm_message_t state) { int error; @@ -1243,6 +1270,8 @@ static int i915_drm_resume(struct drm_device *dev) drm_err(&dev_priv->drm, "failed to re-enable GGTT\n"); i915_ggtt_resume(&dev_priv->ggtt); + /* Must be called after GGTT is resumed. */ + intel_dpt_resume(dev_priv); intel_dmc_ucode_resume(dev_priv); @@ -1344,6 +1373,8 @@ static int i915_drm_resume_early(struct drm_device *dev) pci_set_master(pdev); + pci_d3cold_enable(pdev); + disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); ret = vlv_resume_prepare(dev_priv, false); @@ -1364,7 +1395,7 @@ static int i915_drm_resume_early(struct drm_device *dev) return ret; } -int i915_resume_switcheroo(struct drm_i915_private *i915) +int i915_driver_resume_switcheroo(struct drm_i915_private *i915) { int ret; @@ -1520,6 +1551,7 @@ static int intel_runtime_suspend(struct device *kdev) { struct drm_i915_private *dev_priv = kdev_to_i915(kdev); struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; + struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); int ret; if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv))) @@ -1565,6 +1597,12 @@ static int intel_runtime_suspend(struct device *kdev) drm_err(&dev_priv->drm, "Unclaimed access detected prior to suspending\n"); + /* + * FIXME: Temporary hammer to avoid freezing the machine on our DGFX + * This should be totally removed when we handle the pci states properly + * on runtime PM and on s2idle cases. + */ + pci_d3cold_disable(pdev); rpm->suspended = true; /* @@ -1603,6 +1641,7 @@ static int intel_runtime_resume(struct device *kdev) { struct drm_i915_private *dev_priv = kdev_to_i915(kdev); struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; + struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); int ret; if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv))) @@ -1615,6 +1654,7 @@ static int intel_runtime_resume(struct device *kdev) intel_opregion_notify_adapter(dev_priv, PCI_D0); rpm->suspended = false; + pci_d3cold_enable(pdev); if (intel_uncore_unclaimed_mmio(&dev_priv->uncore)) drm_dbg(&dev_priv->drm, "Unclaimed access during suspend, bios?\n"); @@ -1777,7 +1817,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW), }; -static const struct drm_driver driver = { +static const struct drm_driver i915_drm_driver = { /* Don't use MTRRs here; the Xserver or userspace app should * deal with them for Intel hardware. */ diff --git a/drivers/gpu/drm/i915/i915_driver.h b/drivers/gpu/drm/i915/i915_driver.h new file mode 100644 index 000000000000..9ef8db4aa0a6 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_driver.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2019 Intel Corporation + */ + +#ifndef __I915_DRIVER_H__ +#define __I915_DRIVER_H__ + +#include <linux/pm.h> + +struct pci_dev; +struct pci_device_id; +struct drm_i915_private; + +extern const struct dev_pm_ops i915_pm_ops; + +int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent); +void i915_driver_remove(struct drm_i915_private *i915); +void i915_driver_shutdown(struct drm_i915_private *i915); + +int i915_driver_resume_switcheroo(struct drm_i915_private *i915); +int i915_driver_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state); + +#endif /* __I915_DRIVER_H__ */ diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 12256218634f..58aa55bc7461 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -50,7 +50,6 @@ #include <linux/stackdepot.h> #include <linux/xarray.h> -#include <drm/intel-gtt.h> #include <drm/drm_gem.h> #include <drm/drm_auth.h> #include <drm/drm_cache.h> @@ -90,6 +89,7 @@ #include "intel_device_info.h" #include "intel_memory_region.h" #include "intel_pch.h" +#include "intel_pm_types.h" #include "intel_runtime_pm.h" #include "intel_step.h" #include "intel_uncore.h" @@ -117,30 +117,6 @@ struct drm_i915_gem_object; -enum hpd_pin { - HPD_NONE = 0, - HPD_TV = HPD_NONE, /* TV is known to be unreliable */ - HPD_CRT, - HPD_SDVO_B, - HPD_SDVO_C, - HPD_PORT_A, - HPD_PORT_B, - HPD_PORT_C, - HPD_PORT_D, - HPD_PORT_E, - HPD_PORT_TC1, - HPD_PORT_TC2, - HPD_PORT_TC3, - HPD_PORT_TC4, - HPD_PORT_TC5, - HPD_PORT_TC6, - - HPD_NUM_PINS -}; - -#define for_each_hpd_pin(__pin) \ - for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++) - /* Threshold == 5 for long IRQs, 50 for short */ #define HPD_STORM_DEFAULT_THRESHOLD 50 @@ -191,8 +167,6 @@ struct i915_hotplug { I915_GEM_DOMAIN_VERTEX) struct drm_i915_private; -struct i915_mm_struct; -struct i915_mmu_object; struct drm_i915_file_private { struct drm_i915_private *dev_priv; @@ -364,15 +338,6 @@ struct intel_color_funcs { void (*read_luts)(struct intel_crtc_state *crtc_state); }; -struct intel_audio_funcs { - void (*audio_codec_enable)(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state); - void (*audio_codec_disable)(struct intel_encoder *encoder, - const struct intel_crtc_state *old_crtc_state, - const struct drm_connector_state *old_conn_state); -}; - struct intel_cdclk_funcs { void (*get_cdclk)(struct drm_i915_private *dev_priv, struct intel_cdclk_config *cdclk_config); @@ -411,102 +376,8 @@ struct drm_i915_display_funcs { void (*commit_modeset_enables)(struct intel_atomic_state *state); }; - #define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */ -struct intel_fbc { - /* This is always the inner lock when overlapping with struct_mutex and - * it's the outer lock when overlapping with stolen_lock. */ - struct mutex lock; - unsigned int possible_framebuffer_bits; - unsigned int busy_bits; - struct intel_crtc *crtc; - - struct drm_mm_node compressed_fb; - struct drm_mm_node compressed_llb; - - u8 limit; - - bool false_color; - - bool active; - bool activated; - bool flip_pending; - - bool underrun_detected; - struct work_struct underrun_work; - - /* - * Due to the atomic rules we can't access some structures without the - * appropriate locking, so we cache information here in order to avoid - * these problems. - */ - struct intel_fbc_state_cache { - struct { - unsigned int mode_flags; - u32 hsw_bdw_pixel_rate; - } crtc; - - struct { - unsigned int rotation; - int src_w; - int src_h; - bool visible; - /* - * Display surface base address adjustement for - * pageflips. Note that on gen4+ this only adjusts up - * to a tile, offsets within a tile are handled in - * the hw itself (with the TILEOFF register). - */ - int adjusted_x; - int adjusted_y; - - u16 pixel_blend_mode; - } plane; - - struct { - const struct drm_format_info *format; - unsigned int stride; - u64 modifier; - } fb; - - unsigned int fence_y_offset; - u16 interval; - s8 fence_id; - bool psr2_active; - } state_cache; - - /* - * This structure contains everything that's relevant to program the - * hardware registers. When we want to figure out if we need to disable - * and re-enable FBC for a new configuration we just check if there's - * something different in the struct. The genx_fbc_activate functions - * are supposed to read from it in order to program the registers. - */ - struct intel_fbc_reg_params { - struct { - enum pipe pipe; - enum i9xx_plane_id i9xx_plane; - } crtc; - - struct { - const struct drm_format_info *format; - unsigned int stride; - u64 modifier; - } fb; - - unsigned int cfb_stride; - unsigned int cfb_size; - unsigned int fence_y_offset; - u16 override_cfb_stride; - u16 interval; - s8 fence_id; - bool plane_visible; - } params; - - const char *no_fbc_reason; -}; - /* * HIGH_RR is the highest eDP panel refresh rate read from EDID * LOW_RR is the lowest eDP panel refresh rate found from EDID @@ -543,7 +414,6 @@ struct i915_drrs { #define QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK (1<<8) struct intel_fbdev; -struct intel_fbc_work; struct intel_gmbus { struct i2c_adapter adapter; @@ -738,69 +608,6 @@ struct intel_vbt_data { struct sdvo_device_mapping sdvo_mappings[2]; }; -enum intel_ddb_partitioning { - INTEL_DDB_PART_1_2, - INTEL_DDB_PART_5_6, /* IVB+ */ -}; - -struct ilk_wm_values { - u32 wm_pipe[3]; - u32 wm_lp[3]; - u32 wm_lp_spr[3]; - bool enable_fbc_wm; - enum intel_ddb_partitioning partitioning; -}; - -struct g4x_pipe_wm { - u16 plane[I915_MAX_PLANES]; - u16 fbc; -}; - -struct g4x_sr_wm { - u16 plane; - u16 cursor; - u16 fbc; -}; - -struct vlv_wm_ddl_values { - u8 plane[I915_MAX_PLANES]; -}; - -struct vlv_wm_values { - struct g4x_pipe_wm pipe[3]; - struct g4x_sr_wm sr; - struct vlv_wm_ddl_values ddl[3]; - u8 level; - bool cxsr; -}; - -struct g4x_wm_values { - struct g4x_pipe_wm pipe[2]; - struct g4x_sr_wm sr; - struct g4x_sr_wm hpll; - bool cxsr; - bool hpll_en; - bool fbc_en; -}; - -struct skl_ddb_entry { - u16 start, end; /* in number of blocks, 'end' is exclusive */ -}; - -static inline u16 skl_ddb_entry_size(const struct skl_ddb_entry *entry) -{ - return entry->end - entry->start; -} - -static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1, - const struct skl_ddb_entry *e2) -{ - if (e1->start == e2->start && e1->end == e2->end) - return true; - - return false; -} - struct i915_frontbuffer_tracking { spinlock_t lock; @@ -828,6 +635,30 @@ struct i915_selftest_stash { struct ida mock_region_instances; }; +/* intel_audio.c private */ +struct intel_audio_funcs; +struct intel_audio_private { + /* Display internal audio functions */ + const struct intel_audio_funcs *funcs; + + /* hda/i915 audio component */ + struct i915_audio_component *component; + bool component_registered; + /* mutex for audio/video sync */ + struct mutex mutex; + int power_refcount; + u32 freq_cntrl; + + /* Used to save the pipe-to-encoder mapping for audio */ + struct intel_encoder *encoder_map[I915_MAX_PIPES]; + + /* necessary resource sharing with HDMI LPE audio driver. */ + struct { + struct platform_device *platdev; + int irq; + } lpe; +}; + struct drm_i915_private { struct drm_device drm; @@ -918,7 +749,7 @@ struct drm_i915_private { u32 pipestat_irq_mask[I915_MAX_PIPES]; struct i915_hotplug hotplug; - struct intel_fbc fbc; + struct intel_fbc *fbc; struct i915_drrs drrs; struct intel_opregion opregion; struct intel_vbt_data vbt; @@ -995,9 +826,6 @@ struct drm_i915_private { /* Display internal color functions */ const struct intel_color_funcs *color_funcs; - /* Display internal audio functions */ - const struct intel_audio_funcs *audio_funcs; - /* Display CDCLK functions */ const struct intel_cdclk_funcs *cdclk_funcs; @@ -1016,9 +844,6 @@ struct drm_i915_private { /* Kernel Modesetting */ - struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES]; - struct intel_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES]; - /** * dpll and cdclk state is protected by connection_mutex * dpll.lock serializes intel_{prepare,enable,disable}_shared_dpll. @@ -1084,17 +909,6 @@ struct drm_i915_private { struct drm_property *broadcast_rgb_property; struct drm_property *force_audio_property; - /* hda/i915 audio component */ - struct i915_audio_component *audio_component; - bool audio_component_registered; - /** - * av_mutex - mutex for audio/video sync - * - */ - struct mutex av_mutex; - int audio_power_refcount; - u32 audio_freq_cntrl; - u32 fdi_rx_config; /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */ @@ -1227,14 +1041,7 @@ struct drm_i915_private { bool ipc_enabled; - /* Used to save the pipe-to-encoder mapping for audio */ - struct intel_encoder *av_enc_map[I915_MAX_PIPES]; - - /* necessary resource sharing with HDMI LPE audio driver. */ - struct { - struct platform_device *platdev; - int irq; - } lpe_audio; + struct intel_audio_private audio; struct i915_pmu pmu; @@ -1327,15 +1134,15 @@ static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev) #define IP_VER(ver, rel) ((ver) << 8 | (rel)) -#define GRAPHICS_VER(i915) (INTEL_INFO(i915)->graphics_ver) -#define GRAPHICS_VER_FULL(i915) IP_VER(INTEL_INFO(i915)->graphics_ver, \ - INTEL_INFO(i915)->graphics_rel) +#define GRAPHICS_VER(i915) (INTEL_INFO(i915)->graphics.ver) +#define GRAPHICS_VER_FULL(i915) IP_VER(INTEL_INFO(i915)->graphics.ver, \ + INTEL_INFO(i915)->graphics.rel) #define IS_GRAPHICS_VER(i915, from, until) \ (GRAPHICS_VER(i915) >= (from) && GRAPHICS_VER(i915) <= (until)) -#define MEDIA_VER(i915) (INTEL_INFO(i915)->media_ver) -#define MEDIA_VER_FULL(i915) IP_VER(INTEL_INFO(i915)->media_ver, \ - INTEL_INFO(i915)->media_rel) +#define MEDIA_VER(i915) (INTEL_INFO(i915)->media.ver) +#define MEDIA_VER_FULL(i915) IP_VER(INTEL_INFO(i915)->media.arch, \ + INTEL_INFO(i915)->media.rel) #define IS_MEDIA_VER(i915, from, until) \ (MEDIA_VER(i915) >= (from) && MEDIA_VER(i915) <= (until)) @@ -1348,15 +1155,20 @@ static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev) #define HAS_DSB(dev_priv) (INTEL_INFO(dev_priv)->display.has_dsb) #define INTEL_DISPLAY_STEP(__i915) (RUNTIME_INFO(__i915)->step.display_step) -#define INTEL_GT_STEP(__i915) (RUNTIME_INFO(__i915)->step.gt_step) +#define INTEL_GRAPHICS_STEP(__i915) (RUNTIME_INFO(__i915)->step.graphics_step) +#define INTEL_MEDIA_STEP(__i915) (RUNTIME_INFO(__i915)->step.media_step) #define IS_DISPLAY_STEP(__i915, since, until) \ (drm_WARN_ON(&(__i915)->drm, INTEL_DISPLAY_STEP(__i915) == STEP_NONE), \ INTEL_DISPLAY_STEP(__i915) >= (since) && INTEL_DISPLAY_STEP(__i915) < (until)) -#define IS_GT_STEP(__i915, since, until) \ - (drm_WARN_ON(&(__i915)->drm, INTEL_GT_STEP(__i915) == STEP_NONE), \ - INTEL_GT_STEP(__i915) >= (since) && INTEL_GT_STEP(__i915) < (until)) +#define IS_GRAPHICS_STEP(__i915, since, until) \ + (drm_WARN_ON(&(__i915)->drm, INTEL_GRAPHICS_STEP(__i915) == STEP_NONE), \ + INTEL_GRAPHICS_STEP(__i915) >= (since) && INTEL_GRAPHICS_STEP(__i915) < (until)) + +#define IS_MEDIA_STEP(__i915, since, until) \ + (drm_WARN_ON(&(__i915)->drm, INTEL_MEDIA_STEP(__i915) == STEP_NONE), \ + INTEL_MEDIA_STEP(__i915) >= (since) && INTEL_MEDIA_STEP(__i915) < (until)) static __always_inline unsigned int __platform_mask_index(const struct intel_runtime_info *info, @@ -1455,7 +1267,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define IS_GEMINILAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_GEMINILAKE) #define IS_COFFEELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COFFEELAKE) #define IS_COMETLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COMETLAKE) -#define IS_CANNONLAKE(dev_priv) 0 #define IS_ICELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ICELAKE) #define IS_JSL_EHL(dev_priv) (IS_PLATFORM(dev_priv, INTEL_JASPERLAKE) || \ IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE)) @@ -1470,6 +1281,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G10) #define IS_DG2_G11(dev_priv) \ IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G11) +#define IS_ADLS_RPLS(dev_priv) \ + IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_S, INTEL_SUBPLATFORM_RPL_S) #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \ (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00) #define IS_BDW_ULT(dev_priv) \ @@ -1530,15 +1343,15 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define IS_TGL_Y(dev_priv) \ IS_SUBPLATFORM(dev_priv, INTEL_TIGERLAKE, INTEL_SUBPLATFORM_ULX) -#define IS_SKL_GT_STEP(p, since, until) (IS_SKYLAKE(p) && IS_GT_STEP(p, since, until)) +#define IS_SKL_GRAPHICS_STEP(p, since, until) (IS_SKYLAKE(p) && IS_GRAPHICS_STEP(p, since, until)) -#define IS_KBL_GT_STEP(dev_priv, since, until) \ - (IS_KABYLAKE(dev_priv) && IS_GT_STEP(dev_priv, since, until)) +#define IS_KBL_GRAPHICS_STEP(dev_priv, since, until) \ + (IS_KABYLAKE(dev_priv) && IS_GRAPHICS_STEP(dev_priv, since, until)) #define IS_KBL_DISPLAY_STEP(dev_priv, since, until) \ (IS_KABYLAKE(dev_priv) && IS_DISPLAY_STEP(dev_priv, since, until)) -#define IS_JSL_EHL_GT_STEP(p, since, until) \ - (IS_JSL_EHL(p) && IS_GT_STEP(p, since, until)) +#define IS_JSL_EHL_GRAPHICS_STEP(p, since, until) \ + (IS_JSL_EHL(p) && IS_GRAPHICS_STEP(p, since, until)) #define IS_JSL_EHL_DISPLAY_STEP(p, since, until) \ (IS_JSL_EHL(p) && IS_DISPLAY_STEP(p, since, until)) @@ -1546,19 +1359,19 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, (IS_TIGERLAKE(__i915) && \ IS_DISPLAY_STEP(__i915, since, until)) -#define IS_TGL_UY_GT_STEP(__i915, since, until) \ +#define IS_TGL_UY_GRAPHICS_STEP(__i915, since, until) \ ((IS_TGL_U(__i915) || IS_TGL_Y(__i915)) && \ - IS_GT_STEP(__i915, since, until)) + IS_GRAPHICS_STEP(__i915, since, until)) -#define IS_TGL_GT_STEP(__i915, since, until) \ +#define IS_TGL_GRAPHICS_STEP(__i915, since, until) \ (IS_TIGERLAKE(__i915) && !(IS_TGL_U(__i915) || IS_TGL_Y(__i915)) && \ - IS_GT_STEP(__i915, since, until)) + IS_GRAPHICS_STEP(__i915, since, until)) #define IS_RKL_DISPLAY_STEP(p, since, until) \ (IS_ROCKETLAKE(p) && IS_DISPLAY_STEP(p, since, until)) -#define IS_DG1_GT_STEP(p, since, until) \ - (IS_DG1(p) && IS_GT_STEP(p, since, until)) +#define IS_DG1_GRAPHICS_STEP(p, since, until) \ + (IS_DG1(p) && IS_GRAPHICS_STEP(p, since, until)) #define IS_DG1_DISPLAY_STEP(p, since, until) \ (IS_DG1(p) && IS_DISPLAY_STEP(p, since, until)) @@ -1566,20 +1379,20 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, (IS_ALDERLAKE_S(__i915) && \ IS_DISPLAY_STEP(__i915, since, until)) -#define IS_ADLS_GT_STEP(__i915, since, until) \ +#define IS_ADLS_GRAPHICS_STEP(__i915, since, until) \ (IS_ALDERLAKE_S(__i915) && \ - IS_GT_STEP(__i915, since, until)) + IS_GRAPHICS_STEP(__i915, since, until)) #define IS_ADLP_DISPLAY_STEP(__i915, since, until) \ (IS_ALDERLAKE_P(__i915) && \ IS_DISPLAY_STEP(__i915, since, until)) -#define IS_ADLP_GT_STEP(__i915, since, until) \ +#define IS_ADLP_GRAPHICS_STEP(__i915, since, until) \ (IS_ALDERLAKE_P(__i915) && \ - IS_GT_STEP(__i915, since, until)) + IS_GRAPHICS_STEP(__i915, since, until)) -#define IS_XEHPSDV_GT_STEP(__i915, since, until) \ - (IS_XEHPSDV(__i915) && IS_GT_STEP(__i915, since, until)) +#define IS_XEHPSDV_GRAPHICS_STEP(__i915, since, until) \ + (IS_XEHPSDV(__i915) && IS_GRAPHICS_STEP(__i915, since, until)) /* * DG2 hardware steppings are a bit unusual. The hardware design was forked @@ -1595,11 +1408,11 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, * and stepping-specific logic will be applied with a general DG2-wide stepping * number. */ -#define IS_DG2_GT_STEP(__i915, variant, since, until) \ +#define IS_DG2_GRAPHICS_STEP(__i915, variant, since, until) \ (IS_SUBPLATFORM(__i915, INTEL_DG2, INTEL_SUBPLATFORM_##variant) && \ - IS_GT_STEP(__i915, since, until)) + IS_GRAPHICS_STEP(__i915, since, until)) -#define IS_DG2_DISP_STEP(__i915, since, until) \ +#define IS_DG2_DISPLAY_STEP(__i915, since, until) \ (IS_DG2(__i915) && \ IS_DISPLAY_STEP(__i915, since, until)) @@ -1696,7 +1509,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_PSR_HW_TRACKING(dev_priv) \ (INTEL_INFO(dev_priv)->display.has_psr_hw_tracking) #define HAS_PSR2_SEL_FETCH(dev_priv) (GRAPHICS_VER(dev_priv) >= 12) -#define HAS_TRANSCODER(dev_priv, trans) ((INTEL_INFO(dev_priv)->cpu_transcoder_mask & BIT(trans)) != 0) +#define HAS_TRANSCODER(dev_priv, trans) ((INTEL_INFO(dev_priv)->display.cpu_transcoder_mask & BIT(trans)) != 0) #define HAS_RC6(dev_priv) (INTEL_INFO(dev_priv)->has_rc6) #define HAS_RC6p(dev_priv) (INTEL_INFO(dev_priv)->has_rc6p) @@ -1741,11 +1554,11 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define GT_FREQUENCY_MULTIPLIER 50 #define GEN9_FREQ_SCALER 3 -#define INTEL_NUM_PIPES(dev_priv) (hweight8(INTEL_INFO(dev_priv)->pipe_mask)) +#define INTEL_NUM_PIPES(dev_priv) (hweight8(INTEL_INFO(dev_priv)->display.pipe_mask)) -#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->pipe_mask != 0) +#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->display.pipe_mask != 0) -#define HAS_VRR(i915) (GRAPHICS_VER(i915) >= 12) +#define HAS_VRR(i915) (GRAPHICS_VER(i915) >= 11) #define HAS_ASYNC_FLIPS(i915) (DISPLAY_VER(i915) >= 5) @@ -1761,26 +1574,27 @@ static inline bool run_as_guest(void) #define HAS_D12_PLANE_MINIMIZATION(dev_priv) (IS_ROCKETLAKE(dev_priv) || \ IS_ALDERLAKE_S(dev_priv)) -static inline bool intel_vtd_active(void) +static inline bool intel_vtd_active(struct drm_i915_private *i915) { -#ifdef CONFIG_INTEL_IOMMU - if (intel_iommu_gfx_mapped) + if (device_iommu_mapped(i915->drm.dev)) return true; -#endif /* Running as a guest, we assume the host is enforcing VT'd */ return run_as_guest(); } +void +i915_print_iommu_status(struct drm_i915_private *i915, struct drm_printer *p); + static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv) { - return GRAPHICS_VER(dev_priv) >= 6 && intel_vtd_active(); + return GRAPHICS_VER(dev_priv) >= 6 && intel_vtd_active(dev_priv); } static inline bool intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *i915) { - return IS_BROXTON(i915) && intel_vtd_active(); + return IS_BROXTON(i915) && intel_vtd_active(i915); } static inline bool @@ -1789,16 +1603,7 @@ intel_vm_no_concurrent_access_wa(struct drm_i915_private *i915) return IS_CHERRYVIEW(i915) || intel_ggtt_update_needs_vtd_wa(i915); } -/* i915_drv.c */ -extern const struct dev_pm_ops i915_pm_ops; - -int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent); -void i915_driver_remove(struct drm_i915_private *i915); -void i915_driver_shutdown(struct drm_i915_private *i915); - -int i915_resume_switcheroo(struct drm_i915_private *i915); -int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state); - +/* i915_getparam.c */ int i915_getparam_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); @@ -1819,6 +1624,7 @@ static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915) */ while (atomic_read(&i915->mm.free_count)) { flush_work(&i915->mm.free_work); + flush_delayed_work(&i915->bdev.wq); rcu_barrier(); } } @@ -1933,6 +1739,10 @@ int i915_gem_evict_vm(struct i915_address_space *vm); struct drm_i915_gem_object * i915_gem_object_create_internal(struct drm_i915_private *dev_priv, phys_addr_t size); +struct drm_i915_gem_object * +__i915_gem_object_create_internal(struct drm_i915_private *dev_priv, + const struct drm_i915_gem_object_ops *ops, + phys_addr_t size); /* i915_gem_tiling.c */ static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) @@ -1972,14 +1782,6 @@ mkwrite_device_info(struct drm_i915_private *dev_priv) int i915_reg_read_ioctl(struct drm_device *dev, void *data, struct drm_file *file); -/* i915_mm.c */ -int remap_io_mapping(struct vm_area_struct *vma, - unsigned long addr, unsigned long pfn, unsigned long size, - struct io_mapping *iomap); -int remap_io_sg(struct vm_area_struct *vma, - unsigned long addr, unsigned long size, - struct scatterlist *sgl, resource_size_t iobase); - static inline int intel_hws_csb_write_index(struct drm_i915_private *i915) { if (GRAPHICS_VER(i915) >= 11) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 981e383d1a5d..527228d4da7e 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -764,7 +764,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, * perspective, requiring manual detiling by the client. */ if (!i915_gem_object_has_struct_page(obj) || - cpu_write_needs_clflush(obj)) + i915_gem_cpu_write_needs_clflush(obj)) /* Note that the gtt paths might fail with non-page-backed user * pointers (e.g. gtt mappings when moving data between * textures). Fallback to the shmem path in that case. @@ -1005,7 +1005,8 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, obj->ops->adjust_lru(obj); } - if (i915_gem_object_has_pages(obj)) { + if (i915_gem_object_has_pages(obj) || + i915_gem_object_has_self_managed_shrink_list(obj)) { unsigned long flags; spin_lock_irqsave(&i915->mm.obj_lock, flags); diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 2a2d7643b551..96d2d99f5b98 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -48,8 +48,9 @@ #include "i915_gpu_error.h" #include "i915_memcpy.h" #include "i915_scatterlist.h" +#include "i915_vma_snapshot.h" -#define ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN) +#define ALLOW_FAIL (__GFP_KSWAPD_RECLAIM | __GFP_RETRY_MAYFAIL | __GFP_NOWARN) #define ATOMIC_MAYFAIL (GFP_ATOMIC | __GFP_NOWARN) static void __sg_set_buf(struct scatterlist *sg, @@ -275,16 +276,16 @@ static bool compress_start(struct i915_vma_compress *c) static void *compress_next_page(struct i915_vma_compress *c, struct i915_vma_coredump *dst) { - void *page; + void *page_addr; + struct page *page; - if (dst->page_count >= dst->num_pages) - return ERR_PTR(-ENOSPC); - - page = pool_alloc(&c->pool, ALLOW_FAIL); - if (!page) + page_addr = pool_alloc(&c->pool, ALLOW_FAIL); + if (!page_addr) return ERR_PTR(-ENOMEM); - return dst->pages[dst->page_count++] = page; + page = virt_to_page(page_addr); + list_add_tail(&page->lru, &dst->page_list); + return page_addr; } static int compress_page(struct i915_vma_compress *c, @@ -397,7 +398,7 @@ static int compress_page(struct i915_vma_compress *c, if (!(wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE))) memcpy(ptr, src, PAGE_SIZE); - dst->pages[dst->page_count++] = ptr; + list_add_tail(&virt_to_page(ptr)->lru, &dst->page_list); cond_resched(); return 0; @@ -614,7 +615,7 @@ static void print_error_vma(struct drm_i915_error_state_buf *m, const struct i915_vma_coredump *vma) { char out[ASCII85_BUFSZ]; - int page; + struct page *page; if (!vma) return; @@ -628,16 +629,17 @@ static void print_error_vma(struct drm_i915_error_state_buf *m, err_printf(m, "gtt_page_sizes = 0x%08x\n", vma->gtt_page_sizes); err_compression_marker(m); - for (page = 0; page < vma->page_count; page++) { + list_for_each_entry(page, &vma->page_list, lru) { int i, len; + const u32 *addr = page_address(page); len = PAGE_SIZE; - if (page == vma->page_count - 1) + if (page == list_last_entry(&vma->page_list, typeof(*page), lru)) len -= vma->unused; len = ascii85_encode_len(len); for (i = 0; i < len; i++) - err_puts(m, ascii85_encode(vma->pages[page][i], out)); + err_puts(m, ascii85_encode(addr[i], out)); } err_puts(m, "\n"); } @@ -946,10 +948,12 @@ static void i915_vma_coredump_free(struct i915_vma_coredump *vma) { while (vma) { struct i915_vma_coredump *next = vma->next; - int page; + struct page *page, *n; - for (page = 0; page < vma->page_count; page++) - free_page((unsigned long)vma->pages[page]); + list_for_each_entry_safe(page, n, &vma->page_list, lru) { + list_del_init(&page->lru); + __free_page(page); + } kfree(vma); vma = next; @@ -1009,25 +1013,21 @@ void __i915_gpu_coredump_free(struct kref *error_ref) static struct i915_vma_coredump * i915_vma_coredump_create(const struct intel_gt *gt, - const struct i915_vma *vma, - const char *name, + const struct i915_vma_snapshot *vsnap, struct i915_vma_compress *compress) { struct i915_ggtt *ggtt = gt->ggtt; const u64 slot = ggtt->error_capture.start; struct i915_vma_coredump *dst; - unsigned long num_pages; struct sgt_iter iter; int ret; might_sleep(); - if (!vma || !vma->pages || !compress) + if (!vsnap || !vsnap->pages || !compress) return NULL; - num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT; - num_pages = DIV_ROUND_UP(10 * num_pages, 8); /* worstcase zlib growth */ - dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), ALLOW_FAIL); + dst = kmalloc(sizeof(*dst), ALLOW_FAIL); if (!dst) return NULL; @@ -1036,14 +1036,13 @@ i915_vma_coredump_create(const struct intel_gt *gt, return NULL; } - strcpy(dst->name, name); + INIT_LIST_HEAD(&dst->page_list); + strcpy(dst->name, vsnap->name); dst->next = NULL; - dst->gtt_offset = vma->node.start; - dst->gtt_size = vma->node.size; - dst->gtt_page_sizes = vma->page_sizes.gtt; - dst->num_pages = num_pages; - dst->page_count = 0; + dst->gtt_offset = vsnap->gtt_offset; + dst->gtt_size = vsnap->gtt_size; + dst->gtt_page_sizes = vsnap->page_sizes; dst->unused = 0; ret = -EINVAL; @@ -1051,7 +1050,7 @@ i915_vma_coredump_create(const struct intel_gt *gt, void __iomem *s; dma_addr_t dma; - for_each_sgt_daddr(dma, iter, vma->pages) { + for_each_sgt_daddr(dma, iter, vsnap->pages) { mutex_lock(&ggtt->error_mutex); ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0); @@ -1069,11 +1068,11 @@ i915_vma_coredump_create(const struct intel_gt *gt, if (ret) break; } - } else if (__i915_gem_object_is_lmem(vma->obj)) { - struct intel_memory_region *mem = vma->obj->mm.region; + } else if (vsnap->mr && vsnap->mr->type != INTEL_MEMORY_SYSTEM) { + struct intel_memory_region *mem = vsnap->mr; dma_addr_t dma; - for_each_sgt_daddr(dma, iter, vma->pages) { + for_each_sgt_daddr(dma, iter, vsnap->pages) { void __iomem *s; s = io_mapping_map_wc(&mem->iomap, @@ -1089,7 +1088,7 @@ i915_vma_coredump_create(const struct intel_gt *gt, } else { struct page *page; - for_each_sgt_page(page, iter, vma->pages) { + for_each_sgt_page(page, iter, vsnap->pages) { void *s; drm_clflush_pages(&page, 1); @@ -1106,8 +1105,13 @@ i915_vma_coredump_create(const struct intel_gt *gt, } if (ret || compress_flush(compress, dst)) { - while (dst->page_count--) - pool_free(&compress->pool, dst->pages[dst->page_count]); + struct page *page, *n; + + list_for_each_entry_safe_reverse(page, n, &dst->page_list, lru) { + list_del_init(&page->lru); + pool_free(&compress->pool, page_address(page)); + } + kfree(dst); dst = NULL; } @@ -1320,38 +1324,72 @@ static bool record_context(struct i915_gem_context_coredump *e, struct intel_engine_capture_vma { struct intel_engine_capture_vma *next; - struct i915_vma *vma; + struct i915_vma_snapshot *vsnap; char name[16]; + bool lockdep_cookie; }; static struct intel_engine_capture_vma * -capture_vma(struct intel_engine_capture_vma *next, - struct i915_vma *vma, - const char *name, - gfp_t gfp) +capture_vma_snapshot(struct intel_engine_capture_vma *next, + struct i915_vma_snapshot *vsnap, + gfp_t gfp) { struct intel_engine_capture_vma *c; - if (!vma) + if (!i915_vma_snapshot_present(vsnap)) return next; c = kmalloc(sizeof(*c), gfp); if (!c) return next; - if (!i915_active_acquire_if_busy(&vma->active)) { + if (!i915_vma_snapshot_resource_pin(vsnap, &c->lockdep_cookie)) { kfree(c); return next; } - strcpy(c->name, name); - c->vma = vma; /* reference held while active */ + strcpy(c->name, vsnap->name); + c->vsnap = vsnap; + i915_vma_snapshot_get(vsnap); c->next = next; return c; } static struct intel_engine_capture_vma * +capture_vma(struct intel_engine_capture_vma *next, + struct i915_vma *vma, + const char *name, + gfp_t gfp) +{ + struct i915_vma_snapshot *vsnap; + + if (!vma) + return next; + + /* + * If the vma isn't pinned, then the vma should be snapshotted + * to a struct i915_vma_snapshot at command submission time. + * Not here. + */ + GEM_WARN_ON(!i915_vma_is_pinned(vma)); + if (!i915_vma_is_pinned(vma)) + return next; + + vsnap = i915_vma_snapshot_alloc(gfp); + if (!vsnap) + return next; + + i915_vma_snapshot_init(vsnap, vma, name); + next = capture_vma_snapshot(next, vsnap, gfp); + + /* FIXME: Replace on async unbind. */ + i915_vma_snapshot_put(vsnap); + + return next; +} + +static struct intel_engine_capture_vma * capture_user(struct intel_engine_capture_vma *capture, const struct i915_request *rq, gfp_t gfp) @@ -1359,7 +1397,7 @@ capture_user(struct intel_engine_capture_vma *capture, struct i915_capture_list *c; for (c = rq->capture_list; c; c = c->next) - capture = capture_vma(capture, c->vma, "user", gfp); + capture = capture_vma_snapshot(capture, c->vma_snapshot, gfp); return capture; } @@ -1373,6 +1411,36 @@ static void add_vma(struct intel_engine_coredump *ee, } } +static struct i915_vma_coredump * +create_vma_coredump(const struct intel_gt *gt, struct i915_vma *vma, + const char *name, struct i915_vma_compress *compress) +{ + struct i915_vma_coredump *ret = NULL; + struct i915_vma_snapshot tmp; + bool lockdep_cookie; + + if (!vma) + return NULL; + + i915_vma_snapshot_init_onstack(&tmp, vma, name); + if (i915_vma_snapshot_resource_pin(&tmp, &lockdep_cookie)) { + ret = i915_vma_coredump_create(gt, &tmp, compress); + i915_vma_snapshot_resource_unpin(&tmp, lockdep_cookie); + } + i915_vma_snapshot_put_onstack(&tmp); + + return ret; +} + +static void add_vma_coredump(struct intel_engine_coredump *ee, + const struct intel_gt *gt, + struct i915_vma *vma, + const char *name, + struct i915_vma_compress *compress) +{ + add_vma(ee, create_vma_coredump(gt, vma, name, compress)); +} + struct intel_engine_coredump * intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp) { @@ -1406,7 +1474,7 @@ intel_engine_coredump_add_request(struct intel_engine_coredump *ee, * as the simplest method to avoid being overwritten * by userspace. */ - vma = capture_vma(vma, rq->batch, "batch", gfp); + vma = capture_vma_snapshot(vma, &rq->batch_snapshot, gfp); vma = capture_user(vma, rq, gfp); vma = capture_vma(vma, rq->ring->vma, "ring", gfp); vma = capture_vma(vma, rq->context->state, "HW context", gfp); @@ -1427,30 +1495,24 @@ intel_engine_coredump_add_vma(struct intel_engine_coredump *ee, while (capture) { struct intel_engine_capture_vma *this = capture; - struct i915_vma *vma = this->vma; + struct i915_vma_snapshot *vsnap = this->vsnap; add_vma(ee, i915_vma_coredump_create(engine->gt, - vma, this->name, - compress)); + vsnap, compress)); - i915_active_release(&vma->active); + i915_vma_snapshot_resource_unpin(vsnap, this->lockdep_cookie); + i915_vma_snapshot_put(vsnap); capture = this->next; kfree(this); } - add_vma(ee, - i915_vma_coredump_create(engine->gt, - engine->status_page.vma, - "HW Status", - compress)); + add_vma_coredump(ee, engine->gt, engine->status_page.vma, + "HW Status", compress); - add_vma(ee, - i915_vma_coredump_create(engine->gt, - engine->wa_ctx.vma, - "WA context", - compress)); + add_vma_coredump(ee, engine->gt, engine->wa_ctx.vma, + "WA context", compress); } static struct intel_engine_coredump * @@ -1486,17 +1548,25 @@ capture_engine(struct intel_engine_cs *engine, } } if (rq) - capture = intel_engine_coredump_add_request(ee, rq, - ATOMIC_MAYFAIL); + rq = i915_request_get_rcu(rq); + + if (!rq) + goto no_request_capture; + + capture = intel_engine_coredump_add_request(ee, rq, ATOMIC_MAYFAIL); if (!capture) { -no_request_capture: - kfree(ee); - return NULL; + i915_request_put(rq); + goto no_request_capture; } intel_engine_coredump_add_vma(ee, capture, compress); + i915_request_put(rq); return ee; + +no_request_capture: + kfree(ee); + return NULL; } static void @@ -1550,10 +1620,8 @@ gt_record_uc(struct intel_gt_coredump *gt, */ error_uc->guc_fw.path = kstrdup(uc->guc.fw.path, ALLOW_FAIL); error_uc->huc_fw.path = kstrdup(uc->huc.fw.path, ALLOW_FAIL); - error_uc->guc_log = - i915_vma_coredump_create(gt->_gt, - uc->guc.log.vma, "GuC log buffer", - compress); + error_uc->guc_log = create_vma_coredump(gt->_gt, uc->guc.log.vma, + "GuC log buffer", compress); return error_uc; } @@ -1750,10 +1818,7 @@ static void capture_gen(struct i915_gpu_coredump *error) error->wakelock = atomic_read(&i915->runtime_pm.wakeref_count); error->suspended = i915->runtime_pm.suspended; - error->iommu = -1; -#ifdef CONFIG_INTEL_IOMMU - error->iommu = intel_iommu_gfx_mapped; -#endif + error->iommu = intel_vtd_active(i915); error->reset_count = i915_reset_count(&i915->gpu_error); error->suspend_count = i915->suspend_count; @@ -1839,8 +1904,8 @@ void i915_vma_capture_finish(struct intel_gt_coredump *gt, kfree(compress); } -struct i915_gpu_coredump * -i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask) +static struct i915_gpu_coredump * +__i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask) { struct drm_i915_private *i915 = gt->i915; struct i915_gpu_coredump *error; @@ -1881,6 +1946,22 @@ i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask) return error; } +struct i915_gpu_coredump * +i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask) +{ + static DEFINE_MUTEX(capture_mutex); + int ret = mutex_lock_interruptible(&capture_mutex); + struct i915_gpu_coredump *dump; + + if (ret) + return ERR_PTR(ret); + + dump = __i915_gpu_coredump(gt, engine_mask); + mutex_unlock(&capture_mutex); + + return dump; +} + void i915_error_state_store(struct i915_gpu_coredump *error) { struct drm_i915_private *i915; diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h index b98d8cdbe4f2..5aedf5129814 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.h +++ b/drivers/gpu/drm/i915/i915_gpu_error.h @@ -39,10 +39,8 @@ struct i915_vma_coredump { u64 gtt_size; u32 gtt_page_sizes; - int num_pages; - int page_count; int unused; - u32 *pages[]; + struct list_head page_list; }; struct i915_request_coredump { diff --git a/drivers/gpu/drm/i915/i915_iosf_mbi.h b/drivers/gpu/drm/i915/i915_iosf_mbi.h new file mode 100644 index 000000000000..8f81b7603d37 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_iosf_mbi.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef __I915_IOSF_MBI_H__ +#define __I915_IOSF_MBI_H__ + +#if IS_ENABLED(CONFIG_IOSF_MBI) +#include <asm/iosf_mbi.h> +#else + +/* Stubs to compile for all non-x86 archs */ +#define MBI_PMIC_BUS_ACCESS_BEGIN 1 +#define MBI_PMIC_BUS_ACCESS_END 2 + +struct notifier_block; + +static inline void iosf_mbi_punit_acquire(void) {} +static inline void iosf_mbi_punit_release(void) {} +static inline void iosf_mbi_assert_punit_acquired(void) {} + +static inline +int iosf_mbi_register_pmic_bus_access_notifier(struct notifier_block *nb) +{ + return 0; +} + +static inline int +iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(struct notifier_block *nb) +{ + return 0; +} + +static inline +int iosf_mbi_unregister_pmic_bus_access_notifier(struct notifier_block *nb) +{ + return 0; +} +#endif + +#endif /* __I915_IOSF_MBI_H__ */ diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 77680bca46ee..5b98fb0532b5 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -35,6 +35,7 @@ #include <drm/drm_drv.h> #include "display/intel_de.h" +#include "display/intel_display_trace.h" #include "display/intel_display_types.h" #include "display/intel_fifo_underrun.h" #include "display/intel_hotplug.h" @@ -49,7 +50,6 @@ #include "i915_drv.h" #include "i915_irq.h" -#include "i915_trace.h" #include "intel_pm.h" /** @@ -224,7 +224,7 @@ static void intel_hpd_init_pins(struct drm_i915_private *dev_priv) static void intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe) { - struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); drm_crtc_handle_vblank(&crtc->base); } @@ -1318,7 +1318,7 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, u32 crc2, u32 crc3, u32 crc4) { - struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc; u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 }; @@ -1357,7 +1357,7 @@ display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, static void flip_done_handler(struct drm_i915_private *i915, enum pipe pipe) { - struct intel_crtc *crtc = intel_get_crtc_for_pipe(i915, pipe); + struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe); struct drm_crtc_state *crtc_state = crtc->base.state; struct drm_pending_vblank_event *e = crtc_state->event; struct drm_device *dev = &i915->drm; @@ -2772,7 +2772,7 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg) { struct drm_i915_private * const i915 = arg; struct intel_gt *gt = &i915->gt; - void __iomem * const regs = i915->uncore.regs; + void __iomem * const regs = gt->uncore->regs; u32 master_tile_ctl, master_ctl; u32 gu_misc_iir; @@ -3016,7 +3016,7 @@ static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) if (IS_CHERRYVIEW(dev_priv)) intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV); else - intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK); + intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_VLV); i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); intel_uncore_write(uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT)); @@ -3173,11 +3173,12 @@ static void gen11_display_irq_reset(struct drm_i915_private *dev_priv) static void gen11_irq_reset(struct drm_i915_private *dev_priv) { - struct intel_uncore *uncore = &dev_priv->uncore; + struct intel_gt *gt = &dev_priv->gt; + struct intel_uncore *uncore = gt->uncore; gen11_master_intr_disable(dev_priv->uncore.regs); - gen11_gt_irq_reset(&dev_priv->gt); + gen11_gt_irq_reset(gt); gen11_display_irq_reset(dev_priv); GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_); @@ -3186,11 +3187,12 @@ static void gen11_irq_reset(struct drm_i915_private *dev_priv) static void dg1_irq_reset(struct drm_i915_private *dev_priv) { - struct intel_uncore *uncore = &dev_priv->uncore; + struct intel_gt *gt = &dev_priv->gt; + struct intel_uncore *uncore = gt->uncore; dg1_master_intr_disable(dev_priv->uncore.regs); - gen11_gt_irq_reset(&dev_priv->gt); + gen11_gt_irq_reset(gt); gen11_display_irq_reset(dev_priv); GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_); @@ -3869,13 +3871,14 @@ static void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv) static void gen11_irq_postinstall(struct drm_i915_private *dev_priv) { - struct intel_uncore *uncore = &dev_priv->uncore; + struct intel_gt *gt = &dev_priv->gt; + struct intel_uncore *uncore = gt->uncore; u32 gu_misc_masked = GEN11_GU_MISC_GSE; if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) icp_irq_postinstall(dev_priv); - gen11_gt_irq_postinstall(&dev_priv->gt); + gen11_gt_irq_postinstall(gt); gen11_de_irq_postinstall(dev_priv); GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked); @@ -3886,10 +3889,11 @@ static void gen11_irq_postinstall(struct drm_i915_private *dev_priv) static void dg1_irq_postinstall(struct drm_i915_private *dev_priv) { - struct intel_uncore *uncore = &dev_priv->uncore; + struct intel_gt *gt = &dev_priv->gt; + struct intel_uncore *uncore = gt->uncore; u32 gu_misc_masked = GEN11_GU_MISC_GSE; - gen11_gt_irq_postinstall(&dev_priv->gt); + gen11_gt_irq_postinstall(gt); GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked); @@ -3900,8 +3904,8 @@ static void dg1_irq_postinstall(struct drm_i915_private *dev_priv) GEN11_DISPLAY_IRQ_ENABLE); } - dg1_master_intr_enable(dev_priv->uncore.regs); - intel_uncore_posting_read(&dev_priv->uncore, DG1_MSTR_TILE_INTR); + dg1_master_intr_enable(uncore->regs); + intel_uncore_posting_read(uncore, DG1_MSTR_TILE_INTR); } static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c index 666808cb3a32..7998bc74ab49 100644 --- a/drivers/gpu/drm/i915/i915_mm.c +++ b/drivers/gpu/drm/i915/i915_mm.c @@ -27,6 +27,7 @@ #include "i915_drv.h" +#include "i915_mm.h" struct remap_pfn { struct mm_struct *mm; @@ -37,17 +38,6 @@ struct remap_pfn { resource_size_t iobase; }; -static int remap_pfn(pte_t *pte, unsigned long addr, void *data) -{ - struct remap_pfn *r = data; - - /* Special PTE are not associated with any struct page */ - set_pte_at(r->mm, addr, pte, pte_mkspecial(pfn_pte(r->pfn, r->prot))); - r->pfn++; - - return 0; -} - #define use_dma(io) ((io) != -1) static inline unsigned long sgt_pfn(const struct remap_pfn *r) @@ -77,6 +67,20 @@ static int remap_sg(pte_t *pte, unsigned long addr, void *data) return 0; } +#define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP) + +#if IS_ENABLED(CONFIG_X86) +static int remap_pfn(pte_t *pte, unsigned long addr, void *data) +{ + struct remap_pfn *r = data; + + /* Special PTE are not associated with any struct page */ + set_pte_at(r->mm, addr, pte, pte_mkspecial(pfn_pte(r->pfn, r->prot))); + r->pfn++; + + return 0; +} + /** * remap_io_mapping - remap an IO mapping to userspace * @vma: user vma to map to @@ -94,7 +98,6 @@ int remap_io_mapping(struct vm_area_struct *vma, struct remap_pfn r; int err; -#define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP) GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS); /* We rely on prevalidation of the io-mapping to skip track_pfn(). */ @@ -111,6 +114,7 @@ int remap_io_mapping(struct vm_area_struct *vma, return 0; } +#endif /** * remap_io_sg - remap an IO mapping to userspace diff --git a/drivers/gpu/drm/i915/i915_mm.h b/drivers/gpu/drm/i915/i915_mm.h new file mode 100644 index 000000000000..76f1d53bdf34 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_mm.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef __I915_MM_H__ +#define __I915_MM_H__ + +#include <linux/types.h> + +struct vm_area_struct; +struct io_mapping; +struct scatterlist; + +#if IS_ENABLED(CONFIG_X86) +int remap_io_mapping(struct vm_area_struct *vma, + unsigned long addr, unsigned long pfn, unsigned long size, + struct io_mapping *iomap); +#else +static inline +int remap_io_mapping(struct vm_area_struct *vma, + unsigned long addr, unsigned long pfn, unsigned long size, + struct io_mapping *iomap) +{ + pr_err("Architecture has no %s() and shouldn't be calling this function\n", __func__); + WARN_ON_ONCE(1); + return 0; +} +#endif + +int remap_io_sg(struct vm_area_struct *vma, + unsigned long addr, unsigned long size, + struct scatterlist *sgl, resource_size_t iobase); + +#endif /* __I915_MM_H__ */ diff --git a/drivers/gpu/drm/i915/i915_module.c b/drivers/gpu/drm/i915/i915_module.c index ab2295dd4500..f6bcd2f89257 100644 --- a/drivers/gpu/drm/i915/i915_module.c +++ b/drivers/gpu/drm/i915/i915_module.c @@ -4,7 +4,7 @@ * Copyright © 2021 Intel Corporation */ -#include <linux/console.h> +#include <drm/drm_drv.h> #include "gem/i915_gem_context.h" #include "gem/i915_gem_object.h" @@ -24,14 +24,14 @@ static int i915_check_nomodeset(void) /* * Enable KMS by default, unless explicitly overriden by - * either the i915.modeset prarameter or by the - * vga_text_mode_force boot option. + * either the i915.modeset parameter or by the + * nomodeset boot option. */ if (i915_modparams.modeset == 0) use_kms = false; - if (vgacon_text_force() && i915_modparams.modeset == -1) + if (drm_firmware_drivers_only() && i915_modparams.modeset == -1) use_kms = false; if (!use_kms) { diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 169837de395d..ae36dfd77dcf 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -22,18 +22,17 @@ * */ -#include <linux/vga_switcheroo.h> - #include <drm/drm_drv.h> #include <drm/i915_pciids.h> +#include "i915_driver.h" #include "i915_drv.h" #include "i915_pci.h" #define PLATFORM(x) .platform = (x) #define GEN(x) \ - .graphics_ver = (x), \ - .media_ver = (x), \ + .graphics.ver = (x), \ + .media.ver = (x), \ .display.ver = (x) #define I845_PIPE_OFFSETS \ @@ -145,6 +144,12 @@ .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \ DRM_COLOR_LUT_EQUAL_CHANNELS, \ } +#define ICL_COLORS \ + .color = { .degamma_lut_size = 33, .gamma_lut_size = 262145, \ + .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \ + DRM_COLOR_LUT_EQUAL_CHANNELS, \ + .gamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \ + } /* Keep in gen based order, and chronological order within a gen */ @@ -157,8 +162,8 @@ #define I830_FEATURES \ GEN(2), \ .is_mobile = 1, \ - .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ - .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ + .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ + .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ .display.has_overlay = 1, \ .display.cursor_needs_physical = 1, \ .display.overlay_needs_physical = 1, \ @@ -178,8 +183,8 @@ #define I845_FEATURES \ GEN(2), \ - .pipe_mask = BIT(PIPE_A), \ - .cpu_transcoder_mask = BIT(TRANSCODER_A), \ + .display.pipe_mask = BIT(PIPE_A), \ + .display.cpu_transcoder_mask = BIT(TRANSCODER_A), \ .display.has_overlay = 1, \ .display.overlay_needs_physical = 1, \ .display.has_gmch = 1, \ @@ -220,8 +225,8 @@ static const struct intel_device_info i865g_info = { #define GEN3_FEATURES \ GEN(3), \ - .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ - .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ + .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ + .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ .display.has_gmch = 1, \ .gpu_reset_clobbers_display = true, \ .platform_engine_mask = BIT(RCS0), \ @@ -310,8 +315,8 @@ static const struct intel_device_info pnv_m_info = { #define GEN4_FEATURES \ GEN(4), \ - .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ - .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ + .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ + .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ .display.has_hotplug = 1, \ .display.has_gmch = 1, \ .gpu_reset_clobbers_display = true, \ @@ -363,8 +368,8 @@ static const struct intel_device_info gm45_info = { #define GEN5_FEATURES \ GEN(5), \ - .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ - .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ + .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ + .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ .display.has_hotplug = 1, \ .platform_engine_mask = BIT(RCS0) | BIT(VCS0), \ .has_snoop = true, \ @@ -393,8 +398,8 @@ static const struct intel_device_info ilk_m_info = { #define GEN6_FEATURES \ GEN(6), \ - .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ - .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ + .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ + .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ .display.has_hotplug = 1, \ .display.has_fbc = 1, \ .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \ @@ -444,8 +449,8 @@ static const struct intel_device_info snb_m_gt2_info = { #define GEN7_FEATURES \ GEN(7), \ - .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \ - .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), \ + .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \ + .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), \ .display.has_hotplug = 1, \ .display.has_fbc = 1, \ .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \ @@ -499,8 +504,8 @@ static const struct intel_device_info ivb_q_info = { GEN7_FEATURES, PLATFORM(INTEL_IVYBRIDGE), .gt = 2, - .pipe_mask = 0, /* legal, last one wins */ - .cpu_transcoder_mask = 0, + .display.pipe_mask = 0, /* legal, last one wins */ + .display.cpu_transcoder_mask = 0, .has_l3_dpf = 1, }; @@ -508,8 +513,8 @@ static const struct intel_device_info vlv_info = { PLATFORM(INTEL_VALLEYVIEW), GEN(7), .is_lp = 1, - .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), - .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), + .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), + .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), .has_runtime_pm = 1, .has_rc6 = 1, .has_reset_engine = true, @@ -533,7 +538,7 @@ static const struct intel_device_info vlv_info = { #define G75_FEATURES \ GEN7_FEATURES, \ .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \ - .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ + .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP), \ .display.has_ddi = 1, \ .display.has_fpga_dbg = 1, \ @@ -603,8 +608,8 @@ static const struct intel_device_info bdw_gt3_info = { static const struct intel_device_info chv_info = { PLATFORM(INTEL_CHERRYVIEW), GEN(8), - .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), - .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), + .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), + .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), .display.has_hotplug = 1, .is_lp = 1, .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), @@ -681,8 +686,8 @@ static const struct intel_device_info skl_gt4_info = { .dbuf.slice_mask = BIT(DBUF_S1), \ .display.has_hotplug = 1, \ .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \ - .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \ - .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ + .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \ + .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \ BIT(TRANSCODER_DSI_A) | BIT(TRANSCODER_DSI_C), \ .has_64bit_reloc = 1, \ @@ -790,8 +795,8 @@ static const struct intel_device_info cml_gt2_info = { #define GEN11_FEATURES \ GEN9_FEATURES, \ GEN11_DEFAULT_PAGE_SIZES, \ - .abox_mask = BIT(0), \ - .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ + .display.abox_mask = BIT(0), \ + .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \ BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), \ .pipe_offsets = { \ @@ -811,7 +816,7 @@ static const struct intel_device_info cml_gt2_info = { [TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \ }, \ GEN(11), \ - .color = { .degamma_lut_size = 33, .gamma_lut_size = 262145 }, \ + ICL_COLORS, \ .dbuf.size = 2048, \ .dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2), \ .display.has_dsc = 1, \ @@ -842,9 +847,9 @@ static const struct intel_device_info jsl_info = { #define GEN12_FEATURES \ GEN11_FEATURES, \ GEN(12), \ - .abox_mask = GENMASK(2, 1), \ - .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \ - .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ + .display.abox_mask = GENMASK(2, 1), \ + .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \ + .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ BIT(TRANSCODER_C) | BIT(TRANSCODER_D) | \ BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), \ .pipe_offsets = { \ @@ -866,7 +871,7 @@ static const struct intel_device_info jsl_info = { TGL_CURSOR_OFFSETS, \ .has_global_mocs = 1, \ .has_pxp = 1, \ - .display.has_dsb = 1 + .display.has_dsb = 0 /* FIXME: LUT load is broken with DSB */ static const struct intel_device_info tgl_info = { GEN12_FEATURES, @@ -879,9 +884,9 @@ static const struct intel_device_info tgl_info = { static const struct intel_device_info rkl_info = { GEN12_FEATURES, PLATFORM(INTEL_ROCKETLAKE), - .abox_mask = BIT(0), - .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), - .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | + .display.abox_mask = BIT(0), + .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), + .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), .display.has_hti = 1, .display.has_psr_hw_tracking = 0, @@ -899,9 +904,9 @@ static const struct intel_device_info rkl_info = { static const struct intel_device_info dg1_info = { GEN12_FEATURES, DGFX_FEATURES, - .graphics_rel = 10, + .graphics.rel = 10, PLATFORM(INTEL_DG1), - .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), + .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), .require_force_probe = 1, .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | @@ -913,7 +918,7 @@ static const struct intel_device_info dg1_info = { static const struct intel_device_info adl_s_info = { GEN12_FEATURES, PLATFORM(INTEL_ALDERLAKE_S), - .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), + .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), .display.has_hti = 1, .display.has_psr_hw_tracking = 0, .platform_engine_mask = @@ -930,10 +935,11 @@ static const struct intel_device_info adl_s_info = { } #define XE_LPD_FEATURES \ - .abox_mask = GENMASK(1, 0), \ - .color = { .degamma_lut_size = 0, .gamma_lut_size = 0 }, \ - .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ - BIT(TRANSCODER_C) | BIT(TRANSCODER_D), \ + .display.abox_mask = GENMASK(1, 0), \ + .color = { .degamma_lut_size = 128, .gamma_lut_size = 1024, \ + .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \ + DRM_COLOR_LUT_EQUAL_CHANNELS, \ + }, \ .dbuf.size = 4096, \ .dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | \ BIT(DBUF_S4), \ @@ -949,18 +955,22 @@ static const struct intel_device_info adl_s_info = { .display.has_ipc = 1, \ .display.has_psr = 1, \ .display.ver = 13, \ - .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \ + .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \ .pipe_offsets = { \ [TRANSCODER_A] = PIPE_A_OFFSET, \ [TRANSCODER_B] = PIPE_B_OFFSET, \ [TRANSCODER_C] = PIPE_C_OFFSET, \ [TRANSCODER_D] = PIPE_D_OFFSET, \ + [TRANSCODER_DSI_0] = PIPE_DSI0_OFFSET, \ + [TRANSCODER_DSI_1] = PIPE_DSI1_OFFSET, \ }, \ .trans_offsets = { \ [TRANSCODER_A] = TRANSCODER_A_OFFSET, \ [TRANSCODER_B] = TRANSCODER_B_OFFSET, \ [TRANSCODER_C] = TRANSCODER_C_OFFSET, \ [TRANSCODER_D] = TRANSCODER_D_OFFSET, \ + [TRANSCODER_DSI_0] = TRANSCODER_DSI0_OFFSET, \ + [TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \ }, \ XE_LPD_CURSOR_OFFSETS @@ -968,7 +978,9 @@ static const struct intel_device_info adl_p_info = { GEN12_FEATURES, XE_LPD_FEATURES, PLATFORM(INTEL_ALDERLAKE_P), - .require_force_probe = 1, + .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | + BIT(TRANSCODER_C) | BIT(TRANSCODER_D) | + BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), .display.has_cdclk_crawl = 1, .display.has_modular_fia = 1, .display.has_psr_hw_tracking = 0, @@ -986,8 +998,8 @@ static const struct intel_device_info adl_p_info = { I915_GTT_PAGE_SIZE_2M #define XE_HP_FEATURES \ - .graphics_ver = 12, \ - .graphics_rel = 50, \ + .graphics.ver = 12, \ + .graphics.rel = 50, \ XE_HP_PAGE_SIZES, \ .dma_mask_size = 46, \ .has_64bit_reloc = 1, \ @@ -1005,8 +1017,8 @@ static const struct intel_device_info adl_p_info = { .ppgtt_type = INTEL_PPGTT_FULL #define XE_HPM_FEATURES \ - .media_ver = 12, \ - .media_rel = 50 + .media.ver = 12, \ + .media.rel = 50 __maybe_unused static const struct intel_device_info xehpsdv_info = { @@ -1015,7 +1027,6 @@ static const struct intel_device_info xehpsdv_info = { DGFX_FEATURES, PLATFORM(INTEL_XEHPSDV), .display = { }, - .pipe_mask = 0, .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VECS1) | BIT(VECS2) | BIT(VECS3) | @@ -1030,14 +1041,16 @@ static const struct intel_device_info dg2_info = { XE_HPM_FEATURES, XE_LPD_FEATURES, DGFX_FEATURES, - .graphics_rel = 55, - .media_rel = 55, + .graphics.rel = 55, + .media.rel = 55, PLATFORM(INTEL_DG2), .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VECS1) | BIT(VCS0) | BIT(VCS2), .require_force_probe = 1, + .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | + BIT(TRANSCODER_C) | BIT(TRANSCODER_D), }; #undef PLATFORM @@ -1117,6 +1130,7 @@ static const struct pci_device_id pciidlist[] = { INTEL_ADLS_IDS(&adl_s_info), INTEL_ADLP_IDS(&adl_p_info), INTEL_DG1_IDS(&dg1_info), + INTEL_RPLS_IDS(&adl_s_info), {0, 0, 0} }; MODULE_DEVICE_TABLE(pci, pciidlist); @@ -1189,11 +1203,8 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (PCI_FUNC(pdev->devfn)) return -ENODEV; - /* - * apple-gmux is needed on dual GPU MacBook Pro - * to probe the panel if we're the inactive GPU. - */ - if (vga_switcheroo_client_probe_defer(pdev)) + /* Detect if we need to wait for other drivers early on */ + if (intel_modeset_probe_defer(pdev)) return -EPROBE_DEFER; err = i915_driver_probe(pdev, ent); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index da9055c3ebf0..d27ba273cc68 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -371,6 +371,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define VLV_G3DCTL _MMIO(0x9024) #define VLV_GSCKGCTL _MMIO(0x9028) +#define FBC_LLC_READ_CTRL _MMIO(0x9044) +#define FBC_LLC_FULLY_OPEN REG_BIT(30) + #define GEN6_MBCTL _MMIO(0x0907c) #define GEN6_MBCTL_ENABLE_BOOT_FETCH (1 << 4) #define GEN6_MBCTL_CTX_FETCH_NEEDED (1 << 3) @@ -498,6 +501,18 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define ECOBITS_PPGTT_CACHE64B (3 << 8) #define ECOBITS_PPGTT_CACHE4B (0 << 8) +#define GEN12_GAMCNTRL_CTRL _MMIO(0xcf54) +#define INVALIDATION_BROADCAST_MODE_DIS REG_BIT(12) +#define GLOBAL_INVALIDATION_MODE REG_BIT(2) + +#define GEN12_GAMSTLB_CTRL _MMIO(0xcf4c) +#define CONTROL_BLOCK_CLKGATE_DIS REG_BIT(12) +#define EGRESS_BLOCK_CLKGATE_DIS REG_BIT(11) +#define TAG_BLOCK_CLKGATE_DIS REG_BIT(7) + +#define GEN12_MERT_MOD_CTRL _MMIO(0xcf28) +#define FORCE_MISS_FTLB REG_BIT(3) + #define GAB_CTL _MMIO(0x24000) #define GAB_CTL_CONT_AFTER_PAGEFAULT (1 << 8) @@ -719,6 +734,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GEN12_OA_TLB_INV_CR _MMIO(0xceec) +#define GEN12_SQCM _MMIO(0x8724) +#define EN_32B_ACCESS REG_BIT(30) + /* Gen12 OAR unit */ #define GEN12_OAR_OACONTROL _MMIO(0x2960) #define GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT 1 @@ -770,6 +788,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define EU_PERF_CNTL5 _MMIO(0xe55c) #define EU_PERF_CNTL6 _MMIO(0xe65c) +#define RT_CTRL _MMIO(0xe530) +#define DIS_NULL_QUERY REG_BIT(10) + /* * OA Boolean state */ @@ -2244,6 +2265,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define SNPS_PHY_MPLLB_DP2_MODE REG_BIT(9) #define SNPS_PHY_MPLLB_WORD_DIV2_EN REG_BIT(8) #define SNPS_PHY_MPLLB_TX_CLK_DIV REG_GENMASK(7, 5) +#define SNPS_PHY_MPLLB_SHIM_DIV32_CLK_SEL REG_BIT(0) #define SNPS_PHY_MPLLB_FRACN1(phy) _MMIO_SNPS(phy, 0x168008) #define SNPS_PHY_MPLLB_FRACN_EN REG_BIT(31) @@ -2662,6 +2684,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define RING_WAIT (1 << 11) /* gen3+, PRBx_CTL */ #define RING_WAIT_SEMAPHORE (1 << 10) /* gen6+ */ +#define GUCPMTIMESTAMP _MMIO(0xC3E8) + /* There are 16 64-bit CS General Purpose Registers per-engine on Gen8+ */ #define GEN8_RING_CS_GPR(base, n) _MMIO((base) + 0x600 + (n) * 8) #define GEN8_RING_CS_GPR_UDW(base, n) _MMIO((base) + 0x600 + (n) * 8 + 4) @@ -2772,6 +2796,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define VDBOX_CGCTL3F10(base) _MMIO((base) + 0x3f10) #define IECPUNIT_CLKGATE_DIS REG_BIT(22) +#define VDBOX_CGCTL3F18(base) _MMIO((base) + 0x3f18) +#define ALNUNIT_CLKGATE_DIS REG_BIT(13) + #define ERROR_GEN6 _MMIO(0x40a0) #define GEN7_ERR_INT _MMIO(0x44040) #define ERR_INT_POISON (1 << 31) @@ -2795,12 +2822,12 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GEN12_AUX_ERR_DBG _MMIO(0x43f4) #define FPGA_DBG _MMIO(0x42300) -#define FPGA_DBG_RM_NOCLAIM (1 << 31) +#define FPGA_DBG_RM_NOCLAIM REG_BIT(31) #define CLAIM_ER _MMIO(VLV_DISPLAY_BASE + 0x2028) -#define CLAIM_ER_CLR (1 << 31) -#define CLAIM_ER_OVERFLOW (1 << 16) -#define CLAIM_ER_CTR_MASK 0xffff +#define CLAIM_ER_CLR REG_BIT(31) +#define CLAIM_ER_OVERFLOW REG_BIT(16) +#define CLAIM_ER_CTR_MASK REG_GENMASK(15, 0) #define DERRMR _MMIO(0x44050) /* Note that HBLANK events are reserved on bdw+ */ @@ -2870,6 +2897,15 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE (1 << 2) #define GEN11_ENABLE_32_PLANE_MODE (1 << 7) +#define SCCGCTL94DC _MMIO(0x94dc) +#define CG3DDISURB REG_BIT(14) + +#define MLTICTXCTL _MMIO(0xb170) +#define TDONRENDER REG_BIT(2) + +#define L3SQCREG1_CCS0 _MMIO(0xb200) +#define FLUSHALLNONCOH REG_BIT(5) + /* WaClearTdlStateAckDirtyBits */ #define GEN8_STATE_ACK _MMIO(0x20F0) #define GEN9_STATE_ACK_SLICE1 _MMIO(0x20F8) @@ -3106,7 +3142,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GEN9_RCS_FE_FSM2 _MMIO(0x22a4) #define GEN10_CACHE_MODE_SS _MMIO(0xe420) -#define FLOAT_BLEND_OPTIMIZATION_ENABLE (1 << 4) +#define ENABLE_PREFETCH_INTO_IC REG_BIT(3) +#define FLOAT_BLEND_OPTIMIZATION_ENABLE REG_BIT(4) /* Fuse readout registers for GT */ #define HSW_PAVP_FUSE1 _MMIO(0x911C) @@ -3307,93 +3344,98 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define FBC_CFB_BASE _MMIO(0x3200) /* 4k page aligned */ #define FBC_LL_BASE _MMIO(0x3204) /* 4k page aligned */ #define FBC_CONTROL _MMIO(0x3208) -#define FBC_CTL_EN REG_BIT(31) -#define FBC_CTL_PERIODIC REG_BIT(30) -#define FBC_CTL_INTERVAL_MASK REG_GENMASK(29, 16) -#define FBC_CTL_INTERVAL(x) REG_FIELD_PREP(FBC_CTL_INTERVAL_MASK, (x)) -#define FBC_CTL_STOP_ON_MOD REG_BIT(15) -#define FBC_CTL_UNCOMPRESSIBLE REG_BIT(14) /* i915+ */ -#define FBC_CTL_C3_IDLE REG_BIT(13) /* i945gm */ -#define FBC_CTL_STRIDE_MASK REG_GENMASK(12, 5) -#define FBC_CTL_STRIDE(x) REG_FIELD_PREP(FBC_CTL_STRIDE_MASK, (x)) -#define FBC_CTL_FENCENO_MASK REG_GENMASK(3, 0) -#define FBC_CTL_FENCENO(x) REG_FIELD_PREP(FBC_CTL_FENCENO_MASK, (x)) +#define FBC_CTL_EN REG_BIT(31) +#define FBC_CTL_PERIODIC REG_BIT(30) +#define FBC_CTL_INTERVAL_MASK REG_GENMASK(29, 16) +#define FBC_CTL_INTERVAL(x) REG_FIELD_PREP(FBC_CTL_INTERVAL_MASK, (x)) +#define FBC_CTL_STOP_ON_MOD REG_BIT(15) +#define FBC_CTL_UNCOMPRESSIBLE REG_BIT(14) /* i915+ */ +#define FBC_CTL_C3_IDLE REG_BIT(13) /* i945gm only */ +#define FBC_CTL_STRIDE_MASK REG_GENMASK(12, 5) +#define FBC_CTL_STRIDE(x) REG_FIELD_PREP(FBC_CTL_STRIDE_MASK, (x)) +#define FBC_CTL_FENCENO_MASK REG_GENMASK(3, 0) +#define FBC_CTL_FENCENO(x) REG_FIELD_PREP(FBC_CTL_FENCENO_MASK, (x)) #define FBC_COMMAND _MMIO(0x320c) -#define FBC_CMD_COMPRESS (1 << 0) +#define FBC_CMD_COMPRESS REG_BIT(0) #define FBC_STATUS _MMIO(0x3210) -#define FBC_STAT_COMPRESSING (1 << 31) -#define FBC_STAT_COMPRESSED (1 << 30) -#define FBC_STAT_MODIFIED (1 << 29) -#define FBC_STAT_CURRENT_LINE_SHIFT (0) -#define FBC_CONTROL2 _MMIO(0x3214) -#define FBC_CTL_FENCE_DBL (0 << 4) -#define FBC_CTL_IDLE_IMM (0 << 2) -#define FBC_CTL_IDLE_FULL (1 << 2) -#define FBC_CTL_IDLE_LINE (2 << 2) -#define FBC_CTL_IDLE_DEBUG (3 << 2) -#define FBC_CTL_CPU_FENCE (1 << 1) -#define FBC_CTL_PLANE(plane) ((plane) << 0) -#define FBC_FENCE_OFF _MMIO(0x3218) /* BSpec typo has 321Bh */ -#define FBC_TAG(i) _MMIO(0x3300 + (i) * 4) +#define FBC_STAT_COMPRESSING REG_BIT(31) +#define FBC_STAT_COMPRESSED REG_BIT(30) +#define FBC_STAT_MODIFIED REG_BIT(29) +#define FBC_STAT_CURRENT_LINE_MASK REG_GENMASK(10, 0) +#define FBC_CONTROL2 _MMIO(0x3214) /* i965gm only */ +#define FBC_CTL_FENCE_DBL REG_BIT(4) +#define FBC_CTL_IDLE_MASK REG_GENMASK(3, 2) +#define FBC_CTL_IDLE_IMM REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 0) +#define FBC_CTL_IDLE_FULL REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 1) +#define FBC_CTL_IDLE_LINE REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 2) +#define FBC_CTL_IDLE_DEBUG REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 3) +#define FBC_CTL_CPU_FENCE_EN REG_BIT(1) +#define FBC_CTL_PLANE_MASK REG_GENMASK(1, 0) +#define FBC_CTL_PLANE(i9xx_plane) REG_FIELD_PREP(FBC_CTL_PLANE_MASK, (i9xx_plane)) +#define FBC_FENCE_OFF _MMIO(0x3218) /* i965gm only, BSpec typo has 321Bh */ +#define FBC_MOD_NUM _MMIO(0x3220) /* i965gm only */ +#define FBC_MOD_NUM_MASK REG_GENMASK(31, 1) +#define FBC_MOD_NUM_VALID REG_BIT(0) +#define FBC_TAG(i) _MMIO(0x3300 + (i) * 4) /* 49 reisters */ +#define FBC_TAG_MASK REG_GENMASK(1, 0) /* 16 tags per register */ +#define FBC_TAG_MODIFIED REG_FIELD_PREP(FBC_TAG_MASK, 0) +#define FBC_TAG_UNCOMPRESSED REG_FIELD_PREP(FBC_TAG_MASK, 1) +#define FBC_TAG_UNCOMPRESSIBLE REG_FIELD_PREP(FBC_TAG_MASK, 2) +#define FBC_TAG_COMPRESSED REG_FIELD_PREP(FBC_TAG_MASK, 3) #define FBC_LL_SIZE (1536) -#define FBC_LLC_READ_CTRL _MMIO(0x9044) -#define FBC_LLC_FULLY_OPEN (1 << 30) - /* Framebuffer compression for GM45+ */ #define DPFC_CB_BASE _MMIO(0x3200) +#define ILK_DPFC_CB_BASE _MMIO(0x43200) #define DPFC_CONTROL _MMIO(0x3208) -#define DPFC_CTL_EN (1 << 31) -#define DPFC_CTL_PLANE(plane) ((plane) << 30) -#define IVB_DPFC_CTL_PLANE(plane) ((plane) << 29) -#define DPFC_CTL_FENCE_EN (1 << 29) -#define IVB_DPFC_CTL_FENCE_EN (1 << 28) -#define DPFC_CTL_PERSISTENT_MODE (1 << 25) -#define DPFC_SR_EN (1 << 10) -#define DPFC_CTL_LIMIT_1X (0 << 6) -#define DPFC_CTL_LIMIT_2X (1 << 6) -#define DPFC_CTL_LIMIT_4X (2 << 6) +#define ILK_DPFC_CONTROL _MMIO(0x43208) +#define DPFC_CTL_EN REG_BIT(31) +#define DPFC_CTL_PLANE_MASK_G4X REG_BIT(30) /* g4x-snb */ +#define DPFC_CTL_PLANE_G4X(i9xx_plane) REG_FIELD_PREP(DPFC_CTL_PLANE_MASK_G4X, (i9xx_plane)) +#define DPFC_CTL_FENCE_EN_G4X REG_BIT(29) /* g4x-snb */ +#define DPFC_CTL_PLANE_MASK_IVB REG_GENMASK(30, 29) /* ivb only */ +#define DPFC_CTL_PLANE_IVB(i9xx_plane) REG_FIELD_PREP(DPFC_CTL_PLANE_MASK_IVB, (i9xx_plane)) +#define DPFC_CTL_FENCE_EN_IVB REG_BIT(28) /* ivb+ */ +#define DPFC_CTL_PERSISTENT_MODE REG_BIT(25) /* g4x-snb */ +#define DPFC_CTL_FALSE_COLOR REG_BIT(10) /* ivb+ */ +#define DPFC_CTL_SR_EN REG_BIT(10) /* g4x only */ +#define DPFC_CTL_SR_EXIT_DIS REG_BIT(9) /* g4x only */ +#define DPFC_CTL_LIMIT_MASK REG_GENMASK(7, 6) +#define DPFC_CTL_LIMIT_1X REG_FIELD_PREP(DPFC_CTL_LIMIT_MASK, 0) +#define DPFC_CTL_LIMIT_2X REG_FIELD_PREP(DPFC_CTL_LIMIT_MASK, 1) +#define DPFC_CTL_LIMIT_4X REG_FIELD_PREP(DPFC_CTL_LIMIT_MASK, 2) +#define DPFC_CTL_FENCENO_MASK REG_GENMASK(3, 0) +#define DPFC_CTL_FENCENO(fence) REG_FIELD_PREP(DPFC_CTL_FENCENO_MASK, (fence)) #define DPFC_RECOMP_CTL _MMIO(0x320c) -#define DPFC_RECOMP_STALL_EN (1 << 27) -#define DPFC_RECOMP_STALL_WM_SHIFT (16) -#define DPFC_RECOMP_STALL_WM_MASK (0x07ff0000) -#define DPFC_RECOMP_TIMER_COUNT_SHIFT (0) -#define DPFC_RECOMP_TIMER_COUNT_MASK (0x0000003f) +#define ILK_DPFC_RECOMP_CTL _MMIO(0x4320c) +#define DPFC_RECOMP_STALL_EN REG_BIT(27) +#define DPFC_RECOMP_STALL_WM_MASK REG_GENMASK(26, 16) +#define DPFC_RECOMP_TIMER_COUNT_MASK REG_GENMASK(5, 0) #define DPFC_STATUS _MMIO(0x3210) -#define DPFC_INVAL_SEG_SHIFT (16) -#define DPFC_INVAL_SEG_MASK (0x07ff0000) -#define DPFC_COMP_SEG_SHIFT (0) -#define DPFC_COMP_SEG_MASK (0x000007ff) +#define ILK_DPFC_STATUS _MMIO(0x43210) +#define DPFC_INVAL_SEG_MASK REG_GENMASK(26, 16) +#define DPFC_COMP_SEG_MASK REG_GENMASK(10, 0) #define DPFC_STATUS2 _MMIO(0x3214) +#define ILK_DPFC_STATUS2 _MMIO(0x43214) +#define DPFC_COMP_SEG_MASK_IVB REG_GENMASK(11, 0) #define DPFC_FENCE_YOFF _MMIO(0x3218) -#define DPFC_CHICKEN _MMIO(0x3224) -#define DPFC_HT_MODIFY (1 << 31) - -/* Framebuffer compression for Ironlake */ -#define ILK_DPFC_CB_BASE _MMIO(0x43200) -#define ILK_DPFC_CONTROL _MMIO(0x43208) -#define FBC_CTL_FALSE_COLOR (1 << 10) -/* The bit 28-8 is reserved */ -#define DPFC_RESERVED (0x1FFFFF00) -#define ILK_DPFC_RECOMP_CTL _MMIO(0x4320c) -#define ILK_DPFC_STATUS _MMIO(0x43210) -#define ILK_DPFC_COMP_SEG_MASK 0x7ff -#define IVB_FBC_STATUS2 _MMIO(0x43214) -#define IVB_FBC_COMP_SEG_MASK 0x7ff -#define BDW_FBC_COMP_SEG_MASK 0xfff #define ILK_DPFC_FENCE_YOFF _MMIO(0x43218) +#define DPFC_CHICKEN _MMIO(0x3224) #define ILK_DPFC_CHICKEN _MMIO(0x43224) -#define ILK_DPFC_DISABLE_DUMMY0 (1 << 8) -#define ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL (1 << 14) -#define ILK_DPFC_NUKE_ON_ANY_MODIFICATION (1 << 23) +#define DPFC_HT_MODIFY REG_BIT(31) /* pre-ivb */ +#define DPFC_NUKE_ON_ANY_MODIFICATION REG_BIT(23) /* bdw+ */ +#define DPFC_CHICKEN_COMP_DUMMY_PIXEL REG_BIT(14) /* glk+ */ +#define DPFC_DISABLE_DUMMY0 REG_BIT(8) /* ivb+ */ + #define GLK_FBC_STRIDE _MMIO(0x43228) #define FBC_STRIDE_OVERRIDE REG_BIT(15) #define FBC_STRIDE_MASK REG_GENMASK(14, 0) #define FBC_STRIDE(x) REG_FIELD_PREP(FBC_STRIDE_MASK, (x)) + #define ILK_FBC_RT_BASE _MMIO(0x2128) -#define ILK_FBC_RT_VALID (1 << 0) -#define SNB_FBC_FRONT_BUFFER (1 << 1) +#define ILK_FBC_RT_VALID REG_BIT(0) +#define SNB_FBC_FRONT_BUFFER REG_BIT(1) #define ILK_DISPLAY_CHICKEN1 _MMIO(0x42000) #define ILK_FBCQ_DIS (1 << 22) @@ -3417,8 +3459,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) * The following two registers are of type GTTMMADR */ #define SNB_DPFC_CTL_SA _MMIO(0x100100) -#define SNB_CPU_FENCE_ENABLE (1 << 29) -#define DPFC_CPU_FENCE_OFFSET _MMIO(0x100104) +#define SNB_DPFC_FENCE_EN REG_BIT(29) +#define SNB_DPFC_FENCENO_MASK REG_GENMASK(4, 0) +#define SNB_DPFC_FENCENO(fence) REG_FIELD_PREP(SNB_DPFC_FENCENO_MASK, (fence)) +#define SNB_DPFC_CPU_FENCE_OFFSET _MMIO(0x100104) /* Framebuffer compression for Ivybridge */ #define IVB_FBC_RT_BASE _MMIO(0x7020) @@ -3428,8 +3472,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define IPS_ENABLE (1 << 31) #define MSG_FBC_REND_STATE _MMIO(0x50380) -#define FBC_REND_NUKE (1 << 2) -#define FBC_REND_CACHE_CLEAN (1 << 1) +#define FBC_REND_NUKE REG_BIT(2) +#define FBC_REND_CACHE_CLEAN REG_BIT(1) /* * GPIO regs @@ -4278,21 +4322,62 @@ enum { /* * GEN10 clock gating regs */ + +#define UNSLCGCTL9440 _MMIO(0x9440) +#define GAMTLBOACS_CLKGATE_DIS REG_BIT(28) +#define GAMTLBVDBOX5_CLKGATE_DIS REG_BIT(27) +#define GAMTLBVDBOX6_CLKGATE_DIS REG_BIT(26) +#define GAMTLBVDBOX3_CLKGATE_DIS REG_BIT(24) +#define GAMTLBVDBOX4_CLKGATE_DIS REG_BIT(23) +#define GAMTLBVDBOX7_CLKGATE_DIS REG_BIT(22) +#define GAMTLBVDBOX2_CLKGATE_DIS REG_BIT(21) +#define GAMTLBVDBOX0_CLKGATE_DIS REG_BIT(17) +#define GAMTLBKCR_CLKGATE_DIS REG_BIT(16) +#define GAMTLBGUC_CLKGATE_DIS REG_BIT(15) +#define GAMTLBBLT_CLKGATE_DIS REG_BIT(14) +#define GAMTLBVDBOX1_CLKGATE_DIS REG_BIT(6) + +#define UNSLCGCTL9444 _MMIO(0x9444) +#define GAMTLBGFXA0_CLKGATE_DIS REG_BIT(30) +#define GAMTLBGFXA1_CLKGATE_DIS REG_BIT(29) +#define GAMTLBCOMPA0_CLKGATE_DIS REG_BIT(28) +#define GAMTLBCOMPA1_CLKGATE_DIS REG_BIT(27) +#define GAMTLBCOMPB0_CLKGATE_DIS REG_BIT(26) +#define GAMTLBCOMPB1_CLKGATE_DIS REG_BIT(25) +#define GAMTLBCOMPC0_CLKGATE_DIS REG_BIT(24) +#define GAMTLBCOMPC1_CLKGATE_DIS REG_BIT(23) +#define GAMTLBCOMPD0_CLKGATE_DIS REG_BIT(22) +#define GAMTLBCOMPD1_CLKGATE_DIS REG_BIT(21) +#define GAMTLBMERT_CLKGATE_DIS REG_BIT(20) +#define GAMTLBVEBOX3_CLKGATE_DIS REG_BIT(19) +#define GAMTLBVEBOX2_CLKGATE_DIS REG_BIT(18) +#define GAMTLBVEBOX1_CLKGATE_DIS REG_BIT(17) +#define GAMTLBVEBOX0_CLKGATE_DIS REG_BIT(16) +#define LTCDD_CLKGATE_DIS REG_BIT(10) + #define SLICE_UNIT_LEVEL_CLKGATE _MMIO(0x94d4) #define SARBUNIT_CLKGATE_DIS (1 << 5) #define RCCUNIT_CLKGATE_DIS (1 << 7) #define MSCUNIT_CLKGATE_DIS (1 << 10) +#define NODEDSS_CLKGATE_DIS REG_BIT(12) #define L3_CLKGATE_DIS REG_BIT(16) #define L3_CR2X_CLKGATE_DIS REG_BIT(17) #define SUBSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9524) -#define GWUNIT_CLKGATE_DIS (1 << 16) +#define DSS_ROUTER_CLKGATE_DIS REG_BIT(28) +#define GWUNIT_CLKGATE_DIS REG_BIT(16) #define SUBSLICE_UNIT_LEVEL_CLKGATE2 _MMIO(0x9528) #define CPSSUNIT_CLKGATE_DIS REG_BIT(9) +#define SSMCGCTL9530 _MMIO(0x9530) +#define RTFUNIT_CLKGATE_DIS REG_BIT(18) + #define UNSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9434) #define VFUNIT_CLKGATE_DIS REG_BIT(20) +#define TSGUNIT_CLKGATE_DIS REG_BIT(17) /* XEHPSDV */ +#define CG3DDISCFEG_CLKGATE_DIS REG_BIT(17) /* DG2 */ +#define GAMEDIA_CLKGATE_DIS REG_BIT(11) #define HSUNIT_CLKGATE_DIS REG_BIT(8) #define VSUNIT_CLKGATE_DIS REG_BIT(3) @@ -4309,47 +4394,52 @@ enum { /* Pipe A CRC regs */ #define _PIPE_CRC_CTL_A 0x60050 -#define PIPE_CRC_ENABLE (1 << 31) +#define PIPE_CRC_ENABLE REG_BIT(31) /* skl+ source selection */ -#define PIPE_CRC_SOURCE_PLANE_1_SKL (0 << 28) -#define PIPE_CRC_SOURCE_PLANE_2_SKL (2 << 28) -#define PIPE_CRC_SOURCE_DMUX_SKL (4 << 28) -#define PIPE_CRC_SOURCE_PLANE_3_SKL (6 << 28) -#define PIPE_CRC_SOURCE_PLANE_4_SKL (7 << 28) -#define PIPE_CRC_SOURCE_PLANE_5_SKL (5 << 28) -#define PIPE_CRC_SOURCE_PLANE_6_SKL (3 << 28) -#define PIPE_CRC_SOURCE_PLANE_7_SKL (1 << 28) +#define PIPE_CRC_SOURCE_MASK_SKL REG_GENMASK(30, 28) +#define PIPE_CRC_SOURCE_PLANE_1_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 0) +#define PIPE_CRC_SOURCE_PLANE_2_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 2) +#define PIPE_CRC_SOURCE_DMUX_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 4) +#define PIPE_CRC_SOURCE_PLANE_3_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 6) +#define PIPE_CRC_SOURCE_PLANE_4_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 7) +#define PIPE_CRC_SOURCE_PLANE_5_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 5) +#define PIPE_CRC_SOURCE_PLANE_6_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 3) +#define PIPE_CRC_SOURCE_PLANE_7_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 1) /* ivb+ source selection */ -#define PIPE_CRC_SOURCE_PRIMARY_IVB (0 << 29) -#define PIPE_CRC_SOURCE_SPRITE_IVB (1 << 29) -#define PIPE_CRC_SOURCE_PF_IVB (2 << 29) +#define PIPE_CRC_SOURCE_MASK_IVB REG_GENMASK(30, 29) +#define PIPE_CRC_SOURCE_PRIMARY_IVB REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_IVB, 0) +#define PIPE_CRC_SOURCE_SPRITE_IVB REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_IVB, 1) +#define PIPE_CRC_SOURCE_PF_IVB REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_IVB, 2) /* ilk+ source selection */ -#define PIPE_CRC_SOURCE_PRIMARY_ILK (0 << 28) -#define PIPE_CRC_SOURCE_SPRITE_ILK (1 << 28) -#define PIPE_CRC_SOURCE_PIPE_ILK (2 << 28) -/* embedded DP port on the north display block, reserved on ivb */ -#define PIPE_CRC_SOURCE_PORT_A_ILK (4 << 28) -#define PIPE_CRC_SOURCE_FDI_ILK (5 << 28) /* reserved on ivb */ +#define PIPE_CRC_SOURCE_MASK_ILK REG_GENMASK(30, 28) +#define PIPE_CRC_SOURCE_PRIMARY_ILK REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_ILK, 0) +#define PIPE_CRC_SOURCE_SPRITE_ILK REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_ILK, 1) +#define PIPE_CRC_SOURCE_PIPE_ILK REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_ILK, 2) +/* embedded DP port on the north display block */ +#define PIPE_CRC_SOURCE_PORT_A_ILK REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_ILK, 4) +#define PIPE_CRC_SOURCE_FDI_ILK REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_ILK, 5) /* vlv source selection */ -#define PIPE_CRC_SOURCE_PIPE_VLV (0 << 27) -#define PIPE_CRC_SOURCE_HDMIB_VLV (1 << 27) -#define PIPE_CRC_SOURCE_HDMIC_VLV (2 << 27) +#define PIPE_CRC_SOURCE_MASK_VLV REG_GENMASK(30, 27) +#define PIPE_CRC_SOURCE_PIPE_VLV REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_VLV, 0) +#define PIPE_CRC_SOURCE_HDMIB_VLV REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_VLV, 1) +#define PIPE_CRC_SOURCE_HDMIC_VLV REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_VLV, 2) /* with DP port the pipe source is invalid */ -#define PIPE_CRC_SOURCE_DP_D_VLV (3 << 27) -#define PIPE_CRC_SOURCE_DP_B_VLV (6 << 27) -#define PIPE_CRC_SOURCE_DP_C_VLV (7 << 27) +#define PIPE_CRC_SOURCE_DP_D_VLV REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_VLV, 3) +#define PIPE_CRC_SOURCE_DP_B_VLV REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_VLV, 6) +#define PIPE_CRC_SOURCE_DP_C_VLV REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_VLV, 7) /* gen3+ source selection */ -#define PIPE_CRC_SOURCE_PIPE_I9XX (0 << 28) -#define PIPE_CRC_SOURCE_SDVOB_I9XX (1 << 28) -#define PIPE_CRC_SOURCE_SDVOC_I9XX (2 << 28) +#define PIPE_CRC_SOURCE_MASK_I9XX REG_GENMASK(30, 28) +#define PIPE_CRC_SOURCE_PIPE_I9XX REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 0) +#define PIPE_CRC_SOURCE_SDVOB_I9XX REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 1) +#define PIPE_CRC_SOURCE_SDVOC_I9XX REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 2) /* with DP/TV port the pipe source is invalid */ -#define PIPE_CRC_SOURCE_DP_D_G4X (3 << 28) -#define PIPE_CRC_SOURCE_TV_PRE (4 << 28) -#define PIPE_CRC_SOURCE_TV_POST (5 << 28) -#define PIPE_CRC_SOURCE_DP_B_G4X (6 << 28) -#define PIPE_CRC_SOURCE_DP_C_G4X (7 << 28) +#define PIPE_CRC_SOURCE_DP_D_G4X REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 3) +#define PIPE_CRC_SOURCE_TV_PRE REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 4) +#define PIPE_CRC_SOURCE_TV_POST REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 5) +#define PIPE_CRC_SOURCE_DP_B_G4X REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 6) +#define PIPE_CRC_SOURCE_DP_C_G4X REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 7) /* gen2 doesn't have source selection bits */ -#define PIPE_CRC_INCLUDE_BORDER_I8XX (1 << 30) +#define PIPE_CRC_INCLUDE_BORDER_I8XX REG_BIT(30) #define _PIPE_CRC_RES_1_A_IVB 0x60064 #define _PIPE_CRC_RES_2_A_IVB 0x60068 @@ -4698,11 +4788,11 @@ enum { #define PSR_EVENT_LPSP_MODE_EXIT (1 << 1) #define PSR_EVENT_PSR_DISABLE (1 << 0) -#define _PSR2_STATUS_A 0x60940 -#define _PSR2_STATUS_EDP 0x6f940 -#define EDP_PSR2_STATUS(tran) _MMIO_TRANS2(tran, _PSR2_STATUS_A) -#define EDP_PSR2_STATUS_STATE_MASK (0xf << 28) -#define EDP_PSR2_STATUS_STATE_SHIFT 28 +#define _PSR2_STATUS_A 0x60940 +#define _PSR2_STATUS_EDP 0x6f940 +#define EDP_PSR2_STATUS(tran) _MMIO_TRANS2(tran, _PSR2_STATUS_A) +#define EDP_PSR2_STATUS_STATE_MASK REG_GENMASK(31, 28) +#define EDP_PSR2_STATUS_STATE_DEEP_SLEEP REG_FIELD_PREP(EDP_PSR2_STATUS_STATE_MASK, 0x8) #define _PSR2_SU_STATUS_A 0x60914 #define _PSR2_SU_STATUS_EDP 0x6f914 @@ -4999,9 +5089,9 @@ enum { #define PORT_DFT2_G4X _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61154) #define DC_BALANCE_RESET_VLV (1 << 31) #define PIPE_SCRAMBLE_RESET_MASK ((1 << 14) | (0x3 << 0)) -#define PIPE_C_SCRAMBLE_RESET (1 << 14) /* chv */ -#define PIPE_B_SCRAMBLE_RESET (1 << 1) -#define PIPE_A_SCRAMBLE_RESET (1 << 0) +#define PIPE_C_SCRAMBLE_RESET REG_BIT(14) /* chv */ +#define PIPE_B_SCRAMBLE_RESET REG_BIT(1) +#define PIPE_A_SCRAMBLE_RESET REG_BIT(0) /* Gen 3 SDVO bits: */ #define SDVO_ENABLE (1 << 31) @@ -6266,55 +6356,55 @@ enum { #define PIPE_STATUS_PORT_UNDERRUN_XELPD REG_BIT(26) #define VLV_DPFLIPSTAT _MMIO(VLV_DISPLAY_BASE + 0x70028) -#define PIPEB_LINE_COMPARE_INT_EN (1 << 29) -#define PIPEB_HLINE_INT_EN (1 << 28) -#define PIPEB_VBLANK_INT_EN (1 << 27) -#define SPRITED_FLIP_DONE_INT_EN (1 << 26) -#define SPRITEC_FLIP_DONE_INT_EN (1 << 25) -#define PLANEB_FLIP_DONE_INT_EN (1 << 24) -#define PIPE_PSR_INT_EN (1 << 22) -#define PIPEA_LINE_COMPARE_INT_EN (1 << 21) -#define PIPEA_HLINE_INT_EN (1 << 20) -#define PIPEA_VBLANK_INT_EN (1 << 19) -#define SPRITEB_FLIP_DONE_INT_EN (1 << 18) -#define SPRITEA_FLIP_DONE_INT_EN (1 << 17) -#define PLANEA_FLIPDONE_INT_EN (1 << 16) -#define PIPEC_LINE_COMPARE_INT_EN (1 << 13) -#define PIPEC_HLINE_INT_EN (1 << 12) -#define PIPEC_VBLANK_INT_EN (1 << 11) -#define SPRITEF_FLIPDONE_INT_EN (1 << 10) -#define SPRITEE_FLIPDONE_INT_EN (1 << 9) -#define PLANEC_FLIPDONE_INT_EN (1 << 8) +#define PIPEB_LINE_COMPARE_INT_EN REG_BIT(29) +#define PIPEB_HLINE_INT_EN REG_BIT(28) +#define PIPEB_VBLANK_INT_EN REG_BIT(27) +#define SPRITED_FLIP_DONE_INT_EN REG_BIT(26) +#define SPRITEC_FLIP_DONE_INT_EN REG_BIT(25) +#define PLANEB_FLIP_DONE_INT_EN REG_BIT(24) +#define PIPE_PSR_INT_EN REG_BIT(22) +#define PIPEA_LINE_COMPARE_INT_EN REG_BIT(21) +#define PIPEA_HLINE_INT_EN REG_BIT(20) +#define PIPEA_VBLANK_INT_EN REG_BIT(19) +#define SPRITEB_FLIP_DONE_INT_EN REG_BIT(18) +#define SPRITEA_FLIP_DONE_INT_EN REG_BIT(17) +#define PLANEA_FLIPDONE_INT_EN REG_BIT(16) +#define PIPEC_LINE_COMPARE_INT_EN REG_BIT(13) +#define PIPEC_HLINE_INT_EN REG_BIT(12) +#define PIPEC_VBLANK_INT_EN REG_BIT(11) +#define SPRITEF_FLIPDONE_INT_EN REG_BIT(10) +#define SPRITEE_FLIPDONE_INT_EN REG_BIT(9) +#define PLANEC_FLIPDONE_INT_EN REG_BIT(8) #define DPINVGTT _MMIO(VLV_DISPLAY_BASE + 0x7002c) /* VLV/CHV only */ -#define SPRITEF_INVALID_GTT_INT_EN (1 << 27) -#define SPRITEE_INVALID_GTT_INT_EN (1 << 26) -#define PLANEC_INVALID_GTT_INT_EN (1 << 25) -#define CURSORC_INVALID_GTT_INT_EN (1 << 24) -#define CURSORB_INVALID_GTT_INT_EN (1 << 23) -#define CURSORA_INVALID_GTT_INT_EN (1 << 22) -#define SPRITED_INVALID_GTT_INT_EN (1 << 21) -#define SPRITEC_INVALID_GTT_INT_EN (1 << 20) -#define PLANEB_INVALID_GTT_INT_EN (1 << 19) -#define SPRITEB_INVALID_GTT_INT_EN (1 << 18) -#define SPRITEA_INVALID_GTT_INT_EN (1 << 17) -#define PLANEA_INVALID_GTT_INT_EN (1 << 16) -#define DPINVGTT_EN_MASK 0xff0000 -#define DPINVGTT_EN_MASK_CHV 0xfff0000 -#define SPRITEF_INVALID_GTT_STATUS (1 << 11) -#define SPRITEE_INVALID_GTT_STATUS (1 << 10) -#define PLANEC_INVALID_GTT_STATUS (1 << 9) -#define CURSORC_INVALID_GTT_STATUS (1 << 8) -#define CURSORB_INVALID_GTT_STATUS (1 << 7) -#define CURSORA_INVALID_GTT_STATUS (1 << 6) -#define SPRITED_INVALID_GTT_STATUS (1 << 5) -#define SPRITEC_INVALID_GTT_STATUS (1 << 4) -#define PLANEB_INVALID_GTT_STATUS (1 << 3) -#define SPRITEB_INVALID_GTT_STATUS (1 << 2) -#define SPRITEA_INVALID_GTT_STATUS (1 << 1) -#define PLANEA_INVALID_GTT_STATUS (1 << 0) -#define DPINVGTT_STATUS_MASK 0xff -#define DPINVGTT_STATUS_MASK_CHV 0xfff +#define DPINVGTT_EN_MASK_CHV REG_GENMASK(27, 16) +#define DPINVGTT_EN_MASK_VLV REG_GENMASK(23, 16) +#define SPRITEF_INVALID_GTT_INT_EN REG_BIT(27) +#define SPRITEE_INVALID_GTT_INT_EN REG_BIT(26) +#define PLANEC_INVALID_GTT_INT_EN REG_BIT(25) +#define CURSORC_INVALID_GTT_INT_EN REG_BIT(24) +#define CURSORB_INVALID_GTT_INT_EN REG_BIT(23) +#define CURSORA_INVALID_GTT_INT_EN REG_BIT(22) +#define SPRITED_INVALID_GTT_INT_EN REG_BIT(21) +#define SPRITEC_INVALID_GTT_INT_EN REG_BIT(20) +#define PLANEB_INVALID_GTT_INT_EN REG_BIT(19) +#define SPRITEB_INVALID_GTT_INT_EN REG_BIT(18) +#define SPRITEA_INVALID_GTT_INT_EN REG_BIT(17) +#define PLANEA_INVALID_GTT_INT_EN REG_BIT(16) +#define DPINVGTT_STATUS_MASK_CHV REG_GENMASK(11, 0) +#define DPINVGTT_STATUS_MASK_VLV REG_GENMASK(7, 0) +#define SPRITEF_INVALID_GTT_STATUS REG_BIT(11) +#define SPRITEE_INVALID_GTT_STATUS REG_BIT(10) +#define PLANEC_INVALID_GTT_STATUS REG_BIT(9) +#define CURSORC_INVALID_GTT_STATUS REG_BIT(8) +#define CURSORB_INVALID_GTT_STATUS REG_BIT(7) +#define CURSORA_INVALID_GTT_STATUS REG_BIT(6) +#define SPRITED_INVALID_GTT_STATUS REG_BIT(5) +#define SPRITEC_INVALID_GTT_STATUS REG_BIT(4) +#define PLANEB_INVALID_GTT_STATUS REG_BIT(3) +#define SPRITEB_INVALID_GTT_STATUS REG_BIT(2) +#define SPRITEA_INVALID_GTT_STATUS REG_BIT(1) +#define PLANEA_INVALID_GTT_STATUS REG_BIT(0) #define DSPARB _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70030) #define DSPARB_CSTART_MASK (0x7f << 7) @@ -6877,7 +6967,7 @@ enum { #define DVS_SOURCE_KEY (1 << 22) #define DVS_RGB_ORDER_XBGR (1 << 20) #define DVS_YUV_FORMAT_BT709 (1 << 18) -#define DVS_YUV_BYTE_ORDER_MASK (3 << 16) +#define DVS_YUV_ORDER_MASK (3 << 16) #define DVS_YUV_ORDER_YUYV (0 << 16) #define DVS_YUV_ORDER_UYVY (1 << 16) #define DVS_YUV_ORDER_YVYU (2 << 16) @@ -6956,7 +7046,7 @@ enum { #define SPRITE_RGB_ORDER_RGBX (1 << 20) /* only for 888 and 161616 */ #define SPRITE_YUV_TO_RGB_CSC_DISABLE (1 << 19) #define SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709 (1 << 18) /* 0 is BT601 */ -#define SPRITE_YUV_BYTE_ORDER_MASK (3 << 16) +#define SPRITE_YUV_ORDER_MASK (3 << 16) #define SPRITE_YUV_ORDER_YUYV (0 << 16) #define SPRITE_YUV_ORDER_UYVY (1 << 16) #define SPRITE_YUV_ORDER_YVYU (2 << 16) @@ -7041,7 +7131,7 @@ enum { #define SP_ALPHA_PREMULTIPLY (1 << 23) /* CHV pipe B */ #define SP_SOURCE_KEY (1 << 22) #define SP_YUV_FORMAT_BT709 (1 << 18) -#define SP_YUV_BYTE_ORDER_MASK (3 << 16) +#define SP_YUV_ORDER_MASK (3 << 16) #define SP_YUV_ORDER_YUYV (0 << 16) #define SP_YUV_ORDER_UYVY (1 << 16) #define SP_YUV_ORDER_YVYU (2 << 16) @@ -7182,10 +7272,10 @@ enum { #define PLANE_CTL_YUV420_Y_PLANE (1 << 19) #define PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709 (1 << 18) #define PLANE_CTL_YUV422_ORDER_MASK (0x3 << 16) -#define PLANE_CTL_YUV422_YUYV (0 << 16) -#define PLANE_CTL_YUV422_UYVY (1 << 16) -#define PLANE_CTL_YUV422_YVYU (2 << 16) -#define PLANE_CTL_YUV422_VYUY (3 << 16) +#define PLANE_CTL_YUV422_ORDER_YUYV (0 << 16) +#define PLANE_CTL_YUV422_ORDER_UYVY (1 << 16) +#define PLANE_CTL_YUV422_ORDER_YVYU (2 << 16) +#define PLANE_CTL_YUV422_ORDER_VYUY (3 << 16) #define PLANE_CTL_RENDER_DECOMPRESSION_ENABLE (1 << 15) #define PLANE_CTL_TRICKLE_FEED_DISABLE (1 << 14) #define PLANE_CTL_CLEAR_COLOR_DISABLE (1 << 13) /* TGL+ */ @@ -7239,10 +7329,10 @@ enum { #define _PLANE_CUS_CTL_1_A 0x701c8 #define _PLANE_CUS_CTL_2_A 0x702c8 #define PLANE_CUS_ENABLE (1 << 31) -#define PLANE_CUS_PLANE_4_RKL (0 << 30) -#define PLANE_CUS_PLANE_5_RKL (1 << 30) -#define PLANE_CUS_PLANE_6 (0 << 30) -#define PLANE_CUS_PLANE_7 (1 << 30) +#define PLANE_CUS_Y_PLANE_4_RKL (0 << 30) +#define PLANE_CUS_Y_PLANE_5_RKL (1 << 30) +#define PLANE_CUS_Y_PLANE_6_ICL (0 << 30) +#define PLANE_CUS_Y_PLANE_7_ICL (1 << 30) #define PLANE_CUS_HPHASE_SIGN_NEGATIVE (1 << 19) #define PLANE_CUS_HPHASE_0 (0 << 16) #define PLANE_CUS_HPHASE_0_25 (1 << 16) @@ -7274,12 +7364,12 @@ enum { #define _PLANE_NV12_BUF_CFG_1_A 0x70278 #define _PLANE_NV12_BUF_CFG_2_A 0x70378 -#define _PLANE_CC_VAL_1_B 0x711b4 -#define _PLANE_CC_VAL_2_B 0x712b4 -#define _PLANE_CC_VAL_1(pipe) _PIPE(pipe, _PLANE_CC_VAL_1_A, _PLANE_CC_VAL_1_B) -#define _PLANE_CC_VAL_2(pipe) _PIPE(pipe, _PLANE_CC_VAL_2_A, _PLANE_CC_VAL_2_B) -#define PLANE_CC_VAL(pipe, plane) \ - _MMIO_PLANE(plane, _PLANE_CC_VAL_1(pipe), _PLANE_CC_VAL_2(pipe)) +#define _PLANE_CC_VAL_1_B 0x711b4 +#define _PLANE_CC_VAL_2_B 0x712b4 +#define _PLANE_CC_VAL_1(pipe, dw) (_PIPE(pipe, _PLANE_CC_VAL_1_A, _PLANE_CC_VAL_1_B) + (dw) * 4) +#define _PLANE_CC_VAL_2(pipe, dw) (_PIPE(pipe, _PLANE_CC_VAL_2_A, _PLANE_CC_VAL_2_B) + (dw) * 4) +#define PLANE_CC_VAL(pipe, plane, dw) \ + _MMIO_PLANE((plane), _PLANE_CC_VAL_1((pipe), (dw)), _PLANE_CC_VAL_2((pipe), (dw))) /* Input CSC Register Definitions */ #define _PLANE_INPUT_CSC_RY_GY_1_A 0x701E0 @@ -8263,7 +8353,7 @@ enum { /* * The below are numbered starting from "S1" on gen11/gen12, but starting - * with gen13 display, the bspec switches to a 0-based numbering scheme + * with display 13, the bspec switches to a 0-based numbering scheme * (although the addresses stay the same so new S0 = old S1, new S1 = old S2). * We'll just use the 0-based numbering here for all platforms since it's the * way things will be named by the hardware team going forward, plus it's more @@ -8308,9 +8398,10 @@ enum { #define RESET_PCH_HANDSHAKE_ENABLE (1 << 4) #define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430) -#define SKL_SELECT_ALTERNATE_DC_EXIT (1 << 30) -#define ICL_DELAY_PMRSP (1 << 22) -#define MASK_WAKEMEM (1 << 13) +#define SKL_SELECT_ALTERNATE_DC_EXIT REG_BIT(30) +#define ICL_DELAY_PMRSP REG_BIT(22) +#define DISABLE_FLR_SRC REG_BIT(15) +#define MASK_WAKEMEM REG_BIT(13) #define GEN11_CHICKEN_DCPR_2 _MMIO(0x46434) #define DCPR_MASK_MAXLATENCY_MEMUP_CLR REG_BIT(27) @@ -8351,6 +8442,9 @@ enum { #define GEN9_CTX_PREEMPT_REG _MMIO(0x2248) #define GEN12_DISABLE_POSH_BUSY_FF_DOP_CG REG_BIT(11) +#define GEN12_CS_DEBUG_MODE1_CCCSUNIT_BE_COMMON _MMIO(0x20EC) +#define GEN12_REPLAY_MODE_GRANULARITY REG_BIT(0) + #define GEN8_CS_CHICKEN1 _MMIO(0x2580) #define GEN9_PREEMPT_3D_OBJECT_LEVEL (1 << 0) #define GEN9_PREEMPT_GPGPU_LEVEL(hi, lo) (((hi) << 2) | ((lo) << 1)) @@ -8374,9 +8468,10 @@ enum { #define GEN8_ERRDETBCTRL (1 << 9) #define GEN11_COMMON_SLICE_CHICKEN3 _MMIO(0x7304) - #define DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN REG_BIT(12) - #define GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC REG_BIT(11) - #define GEN12_DISABLE_CPS_AWARE_COLOR_PIPE REG_BIT(9) +#define DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN REG_BIT(12) +#define XEHP_DUAL_SIMD8_SEQ_MERGE_DISABLE REG_BIT(12) +#define GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC REG_BIT(11) +#define GEN12_DISABLE_CPS_AWARE_COLOR_PIPE REG_BIT(9) #define HIZ_CHICKEN _MMIO(0x7018) # define CHV_HZ_8X8_MODE_IN_1X REG_BIT(15) @@ -8430,6 +8525,12 @@ enum { #define GEN8_LQSC_FLUSH_COHERENT_LINES (1 << 21) #define GEN8_LQSQ_NONIA_COHERENT_ATOMICS_ENABLE REG_BIT(22) +#define GEN11_L3SQCREG5 _MMIO(0xb158) +#define L3_PWM_TIMER_INIT_VAL_MASK REG_GENMASK(9, 0) + +#define XEHP_L3SCQREG7 _MMIO(0xb188) +#define BLEND_FILL_CACHING_OPT_DIS REG_BIT(3) + /* GEN8 chicken */ #define HDC_CHICKEN0 _MMIO(0x7300) #define ICL_HDC_MODE _MMIO(0xE5F4) @@ -8440,6 +8541,12 @@ enum { #define HDC_FORCE_NON_COHERENT (1 << 4) #define HDC_BARRIER_PERFORMANCE_DISABLE (1 << 10) +#define GEN12_HDC_CHICKEN0 _MMIO(0xE5F0) +#define LSC_L1_FLUSH_CTL_3D_DATAPORT_FLUSH_EVENTS_MASK REG_GENMASK(13, 11) + +#define SARB_CHICKEN1 _MMIO(0xe90c) +#define COMP_CKN_IN REG_GENMASK(30, 29) + #define GEN8_HDC_CHICKEN1 _MMIO(0x7304) /* GEN9 chicken */ @@ -8467,8 +8574,13 @@ enum { _PIPEB_CHICKEN) #define UNDERRUN_RECOVERY_DISABLE_ADLP REG_BIT(30) #define UNDERRUN_RECOVERY_ENABLE_DG2 REG_BIT(30) -#define PIXEL_ROUNDING_TRUNC_FB_PASSTHRU (1 << 15) -#define PER_PIXEL_ALPHA_BYPASS_EN (1 << 7) +#define PIXEL_ROUNDING_TRUNC_FB_PASSTHRU REG_BIT(15) +#define DG2_RENDER_CCSTAG_4_3_EN REG_BIT(12) +#define PER_PIXEL_ALPHA_BYPASS_EN REG_BIT(7) + +#define VFLSKPD _MMIO(0x62a8) +#define DIS_OVER_FETCH_CACHE REG_BIT(1) +#define DIS_MULT_MISS_RD_SQUASH REG_BIT(0) #define FF_MODE2 _MMIO(0x6604) #define FF_MODE2_GS_TIMER_MASK REG_GENMASK(31, 24) @@ -9293,6 +9405,9 @@ enum { #define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1 << 14) #define GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ (1 << 28) +#define UNSLCGCTL9430 _MMIO(0x9430) +#define MSQDUNIT_CLKGATE_DIS REG_BIT(3) + #define GEN6_GFXPAUSE _MMIO(0xA000) #define GEN6_RPNSWREQ _MMIO(0xA008) #define GEN6_TURBO_DISABLE (1 << 31) @@ -9608,24 +9723,39 @@ enum { #define GEN9_CCS_TLB_PREFETCH_ENABLE (1 << 3) #define GEN8_ROW_CHICKEN _MMIO(0xe4f0) -#define FLOW_CONTROL_ENABLE (1 << 15) -#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1 << 8) -#define STALL_DOP_GATING_DISABLE (1 << 5) -#define THROTTLE_12_5 (7 << 2) -#define DISABLE_EARLY_EOT (1 << 1) +#define FLOW_CONTROL_ENABLE REG_BIT(15) +#define UGM_BACKUP_MODE REG_BIT(13) +#define MDQ_ARBITRATION_MODE REG_BIT(12) +#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE REG_BIT(8) +#define STALL_DOP_GATING_DISABLE REG_BIT(5) +#define THROTTLE_12_5 REG_GENMASK(4, 2) +#define DISABLE_EARLY_EOT REG_BIT(1) #define GEN7_ROW_CHICKEN2 _MMIO(0xe4f4) +#define GEN12_DISABLE_READ_SUPPRESSION REG_BIT(15) #define GEN12_DISABLE_EARLY_READ REG_BIT(14) +#define GEN12_ENABLE_LARGE_GRF_MODE REG_BIT(12) #define GEN12_PUSH_CONST_DEREF_HOLD_DIS REG_BIT(8) +#define LSC_CHICKEN_BIT_0 _MMIO(0xe7c8) +#define FORCE_1_SUB_MESSAGE_PER_FRAGMENT REG_BIT(15) +#define LSC_CHICKEN_BIT_0_UDW _MMIO(0xe7c8 + 4) +#define DIS_CHAIN_2XSIMD8 REG_BIT(55 - 32) +#define FORCE_SLM_FENCE_SCOPE_TO_TILE REG_BIT(42 - 32) +#define FORCE_UGM_FENCE_SCOPE_TO_TILE REG_BIT(41 - 32) +#define MAXREQS_PER_BANK REG_GENMASK(39 - 32, 37 - 32) +#define DISABLE_128B_EVICTION_COMMAND_UDW REG_BIT(36 - 32) + #define GEN7_ROW_CHICKEN2_GT2 _MMIO(0xf4f4) #define DOP_CLOCK_GATING_DISABLE (1 << 0) #define PUSH_CONSTANT_DEREF_DISABLE (1 << 8) #define GEN11_TDL_CLOCK_GATING_FIX_DISABLE (1 << 1) -#define GEN9_ROW_CHICKEN4 _MMIO(0xe48c) -#define GEN12_DISABLE_TDL_PUSH REG_BIT(9) -#define GEN11_DIS_PICK_2ND_EU REG_BIT(7) +#define GEN9_ROW_CHICKEN4 _MMIO(0xe48c) +#define GEN12_DISABLE_GRF_CLEAR REG_BIT(13) +#define GEN12_DISABLE_TDL_PUSH REG_BIT(9) +#define GEN11_DIS_PICK_2ND_EU REG_BIT(7) +#define GEN12_DISABLE_HDR_PAST_PAYLOAD_HOLD_FIX REG_BIT(4) #define HSW_ROW_CHICKEN3 _MMIO(0xe49c) #define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6) @@ -9640,9 +9770,10 @@ enum { #define GEN8_SAMPLER_POWER_BYPASS_DIS (1 << 1) #define GEN9_HALF_SLICE_CHICKEN7 _MMIO(0xe194) -#define GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR (1 << 8) -#define GEN9_ENABLE_YV12_BUGFIX (1 << 4) -#define GEN9_ENABLE_GPGPU_PREEMPTION (1 << 2) +#define DG2_DISABLE_ROUND_ENABLE_ALLOW_FOR_SSLA REG_BIT(15) +#define GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR REG_BIT(8) +#define GEN9_ENABLE_YV12_BUGFIX REG_BIT(4) +#define GEN9_ENABLE_GPGPU_PREEMPTION REG_BIT(2) /* Audio */ #define G4X_AUD_VID_DID _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x62020) @@ -9781,6 +9912,10 @@ enum { #define AUD_PIN_BUF_CTL _MMIO(0x48414) #define AUD_PIN_BUF_ENABLE REG_BIT(31) +#define AUD_TS_CDCLK_M _MMIO(0x65ea0) +#define AUD_TS_CDCLK_M_EN REG_BIT(31) +#define AUD_TS_CDCLK_N _MMIO(0x65ea4) + /* Display Audio Config Reg */ #define AUD_CONFIG_BE _MMIO(0x65ef0) #define HBLANK_EARLY_ENABLE_ICL(pipe) (0x1 << (20 - (pipe))) @@ -10212,8 +10347,6 @@ enum skl_power_gate { #define TGL_TRANS_DDI_PORT_MASK (0xf << TGL_TRANS_DDI_PORT_SHIFT) #define TRANS_DDI_SELECT_PORT(x) ((x) << TRANS_DDI_PORT_SHIFT) #define TGL_TRANS_DDI_SELECT_PORT(x) (((x) + 1) << TGL_TRANS_DDI_PORT_SHIFT) -#define TRANS_DDI_FUNC_CTL_VAL_TO_PORT(val) (((val) & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT) -#define TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(val) ((((val) & TGL_TRANS_DDI_PORT_MASK) >> TGL_TRANS_DDI_PORT_SHIFT) - 1) #define TRANS_DDI_MODE_SELECT_MASK (7 << 24) #define TRANS_DDI_MODE_SELECT_HDMI (0 << 24) #define TRANS_DDI_MODE_SELECT_DVI (1 << 24) @@ -10523,6 +10656,14 @@ enum skl_power_gate { #define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1 << 16) #define CDCLK_FREQ_DECIMAL_MASK (0x7ff) +/* CDCLK_SQUASH_CTL */ +#define CDCLK_SQUASH_CTL _MMIO(0x46008) +#define CDCLK_SQUASH_ENABLE REG_BIT(31) +#define CDCLK_SQUASH_WINDOW_SIZE_MASK REG_GENMASK(27, 24) +#define CDCLK_SQUASH_WINDOW_SIZE(x) REG_FIELD_PREP(CDCLK_SQUASH_WINDOW_SIZE_MASK, (x)) +#define CDCLK_SQUASH_WAVEFORM_MASK REG_GENMASK(15, 0) +#define CDCLK_SQUASH_WAVEFORM(x) REG_FIELD_PREP(CDCLK_SQUASH_WAVEFORM_MASK, (x)) + /* LCPLL_CTL */ #define LCPLL1_CTL _MMIO(0x46010) #define LCPLL2_CTL _MMIO(0x46014) @@ -11717,7 +11858,9 @@ enum skl_power_gate { #define TGL_DSI_CHKN_REG(port) _MMIO_PORT(port, \ _TGL_DSI_CHKN_REG_0, \ _TGL_DSI_CHKN_REG_1) -#define TGL_DSI_CHKN_LSHS_GB REG_GENMASK(15, 12) +#define TGL_DSI_CHKN_LSHS_GB_MASK REG_GENMASK(15, 12) +#define TGL_DSI_CHKN_LSHS_GB(byte_clocks) REG_FIELD_PREP(TGL_DSI_CHKN_LSHS_GB_MASK, \ + (byte_clocks)) /* Display Stream Splitter Control */ #define DSS_CTL1 _MMIO(0x67400) @@ -12464,11 +12607,19 @@ enum skl_power_gate { #define PMFLUSH_GAPL3UNBLOCK (1 << 21) #define PMFLUSHDONE_LNEBLK (1 << 22) +#define XEHP_L3NODEARBCFG _MMIO(0xb0b4) +#define XEHP_LNESPARE REG_BIT(19) + #define GEN12_GLOBAL_MOCS(i) _MMIO(0x4000 + (i) * 4) /* Global MOCS regs */ #define GEN12_GSMBASE _MMIO(0x108100) #define GEN12_DSMBASE _MMIO(0x1080C0) +#define XEHP_CLOCK_GATE_DIS _MMIO(0x101014) +#define SGSI_SIDECLK_DIS REG_BIT(17) +#define SGGI_DIS REG_BIT(15) +#define SGR_DIS REG_BIT(13) + /* gamt regs */ #define GEN8_L3_LRA_1_GPGPU _MMIO(0x4dd4) #define GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW 0x67F1427F /* max/min for LRA1/2 */ @@ -12845,4 +12996,7 @@ enum skl_power_gate { #define CLKGATE_DIS_MISC _MMIO(0x46534) #define CLKGATE_DIS_MISC_DMASC_GATING_DIS REG_BIT(21) +#define SLICE_COMMON_ECO_CHICKEN1 _MMIO(0x731C) +#define MSC_MSAA_REODER_BUF_BYPASS_DISABLE REG_BIT(14) + #endif /* _I915_REG_H_ */ diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 820a1f38b271..ad175d662b4e 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -29,6 +29,7 @@ #include <linux/sched.h> #include <linux/sched/clock.h> #include <linux/sched/signal.h> +#include <linux/sched/mm.h> #include "gem/i915_gem_context.h" #include "gt/intel_breadcrumbs.h" @@ -96,9 +97,9 @@ static signed long i915_fence_wait(struct dma_fence *fence, bool interruptible, signed long timeout) { - return i915_request_wait(to_request(fence), - interruptible | I915_WAIT_PRIORITY, - timeout); + return i915_request_wait_timeout(to_request(fence), + interruptible | I915_WAIT_PRIORITY, + timeout); } struct kmem_cache *i915_request_slab_cache(void) @@ -113,6 +114,10 @@ static void i915_fence_release(struct dma_fence *fence) GEM_BUG_ON(rq->guc_prio != GUC_PRIO_INIT && rq->guc_prio != GUC_PRIO_FINI); + i915_request_free_capture_list(fetch_and_zero(&rq->capture_list)); + if (i915_vma_snapshot_present(&rq->batch_snapshot)) + i915_vma_snapshot_put_onstack(&rq->batch_snapshot); + /* * The request is put onto a RCU freelist (i.e. the address * is immediately reused), mark the fences as being freed now. @@ -186,19 +191,6 @@ void i915_request_notify_execute_cb_imm(struct i915_request *rq) __notify_execute_cb(rq, irq_work_imm); } -static void free_capture_list(struct i915_request *request) -{ - struct i915_capture_list *capture; - - capture = fetch_and_zero(&request->capture_list); - while (capture) { - struct i915_capture_list *next = capture->next; - - kfree(capture); - capture = next; - } -} - static void __i915_request_fill(struct i915_request *rq, u8 val) { void *vaddr = rq->ring->vaddr; @@ -303,6 +295,37 @@ static void __rq_cancel_watchdog(struct i915_request *rq) i915_request_put(rq); } +#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) + +/** + * i915_request_free_capture_list - Free a capture list + * @capture: Pointer to the first list item or NULL + * + */ +void i915_request_free_capture_list(struct i915_capture_list *capture) +{ + while (capture) { + struct i915_capture_list *next = capture->next; + + i915_vma_snapshot_put(capture->vma_snapshot); + capture = next; + } +} + +#define assert_capture_list_is_null(_rq) GEM_BUG_ON((_rq)->capture_list) + +#define clear_capture_list(_rq) ((_rq)->capture_list = NULL) + +#else + +#define i915_request_free_capture_list(_a) do {} while (0) + +#define assert_capture_list_is_null(_a) do {} while (0) + +#define clear_capture_list(_rq) do {} while (0) + +#endif + bool i915_request_retire(struct i915_request *rq) { if (!__i915_request_is_complete(rq)) @@ -339,7 +362,7 @@ bool i915_request_retire(struct i915_request *rq) } if (test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags)) - atomic_dec(&rq->engine->gt->rps.num_waiters); + intel_rps_dec_waiters(&rq->engine->gt->rps); /* * We only loosely track inflight requests across preemption, @@ -359,7 +382,6 @@ bool i915_request_retire(struct i915_request *rq) intel_context_exit(rq->context); intel_context_unpin(rq->context); - free_capture_list(rq); i915_sched_node_fini(&rq->sched); i915_request_put(rq); @@ -719,7 +741,7 @@ void i915_request_cancel(struct i915_request *rq, int error) intel_context_cancel_request(rq->context, rq); } -static int __i915_sw_fence_call +static int submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) { struct i915_request *request = @@ -755,7 +777,7 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) return NOTIFY_DONE; } -static int __i915_sw_fence_call +static int semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) { struct i915_request *rq = container_of(fence, typeof(*rq), semaphore); @@ -829,11 +851,18 @@ static void __i915_request_ctor(void *arg) i915_sw_fence_init(&rq->submit, submit_notify); i915_sw_fence_init(&rq->semaphore, semaphore_notify); - rq->capture_list = NULL; + clear_capture_list(rq); + rq->batch_snapshot.present = false; init_llist_head(&rq->execute_cb); } +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +#define clear_batch_ptr(_rq) ((_rq)->batch = NULL) +#else +#define clear_batch_ptr(_a) do {} while (0) +#endif + struct i915_request * __i915_request_create(struct intel_context *ce, gfp_t gfp) { @@ -925,10 +954,11 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp) i915_sched_node_reinit(&rq->sched); /* No zalloc, everything must be cleared after use */ - rq->batch = NULL; + clear_batch_ptr(rq); __rq_init_watchdog(rq); - GEM_BUG_ON(rq->capture_list); + assert_capture_list_is_null(rq); GEM_BUG_ON(!llist_empty(&rq->execute_cb)); + GEM_BUG_ON(i915_vma_snapshot_present(&rq->batch_snapshot)); /* * Reserve space in the ring buffer for all the commands required to @@ -1857,23 +1887,27 @@ static void request_wait_wake(struct dma_fence *fence, struct dma_fence_cb *cb) } /** - * i915_request_wait - wait until execution of request has finished + * i915_request_wait_timeout - wait until execution of request has finished * @rq: the request to wait upon * @flags: how to wait * @timeout: how long to wait in jiffies * - * i915_request_wait() waits for the request to be completed, for a + * i915_request_wait_timeout() waits for the request to be completed, for a * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an * unbounded wait). * * Returns the remaining time (in jiffies) if the request completed, which may - * be zero or -ETIME if the request is unfinished after the timeout expires. + * be zero if the request is unfinished after the timeout expires. + * If the timeout is 0, it will return 1 if the fence is signaled. + * * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is * pending before the request completes. + * + * NOTE: This function has the same wait semantics as dma-fence. */ -long i915_request_wait(struct i915_request *rq, - unsigned int flags, - long timeout) +long i915_request_wait_timeout(struct i915_request *rq, + unsigned int flags, + long timeout) { const int state = flags & I915_WAIT_INTERRUPTIBLE ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; @@ -1883,7 +1917,7 @@ long i915_request_wait(struct i915_request *rq, GEM_BUG_ON(timeout < 0); if (dma_fence_is_signaled(&rq->fence)) - return timeout; + return timeout ?: 1; if (!timeout) return -ETIME; @@ -1992,6 +2026,39 @@ out: return timeout; } +/** + * i915_request_wait - wait until execution of request has finished + * @rq: the request to wait upon + * @flags: how to wait + * @timeout: how long to wait in jiffies + * + * i915_request_wait() waits for the request to be completed, for a + * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an + * unbounded wait). + * + * Returns the remaining time (in jiffies) if the request completed, which may + * be zero or -ETIME if the request is unfinished after the timeout expires. + * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is + * pending before the request completes. + * + * NOTE: This function behaves differently from dma-fence wait semantics for + * timeout = 0. It returns 0 on success, and -ETIME if not signaled. + */ +long i915_request_wait(struct i915_request *rq, + unsigned int flags, + long timeout) +{ + long ret = i915_request_wait_timeout(rq, flags, timeout); + + if (!ret) + return -ETIME; + + if (ret > 0 && !timeout) + return 0; + + return ret; +} + static int print_sched_attr(const struct i915_sched_attr *attr, char *buf, int x, int len) { diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index dc359242d1ae..0ed01979491b 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -40,6 +40,7 @@ #include "i915_scheduler.h" #include "i915_selftest.h" #include "i915_sw_fence.h" +#include "i915_vma_snapshot.h" #include <uapi/drm/i915_drm.h> @@ -48,11 +49,17 @@ struct drm_i915_gem_object; struct drm_printer; struct i915_request; +#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) struct i915_capture_list { + struct i915_vma_snapshot *vma_snapshot; struct i915_capture_list *next; - struct i915_vma *vma; }; +void i915_request_free_capture_list(struct i915_capture_list *capture); +#else +#define i915_request_free_capture_list(_a) do {} while (0) +#endif + #define RQ_TRACE(rq, fmt, ...) do { \ const struct i915_request *rq__ = (rq); \ ENGINE_TRACE(rq__->engine, "fence %llx:%lld, current %d " fmt, \ @@ -289,10 +296,12 @@ struct i915_request { /** Preallocate space in the ring for the emitting the request */ u32 reserved_space; - /** Batch buffer related to this request if any (used for - * error state dump only). - */ - struct i915_vma *batch; + /** Batch buffer pointer for selftest internal use. */ + I915_SELFTEST_DECLARE(struct i915_vma *batch); + + struct i915_vma_snapshot batch_snapshot; + +#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) /** * Additional buffers requested by userspace to be captured upon * a GPU hang. The vma/obj on this list are protected by their @@ -300,6 +309,7 @@ struct i915_request { * on the active_list (of their final request). */ struct i915_capture_list *capture_list; +#endif /** Time at which this request was emitted, in jiffies. */ unsigned long emitted_jiffies; @@ -414,6 +424,11 @@ void i915_request_unsubmit(struct i915_request *request); void i915_request_cancel(struct i915_request *rq, int error); +long i915_request_wait_timeout(struct i915_request *rq, + unsigned int flags, + long timeout) + __attribute__((nonnull(1))); + long i915_request_wait(struct i915_request *rq, unsigned int flags, long timeout) diff --git a/drivers/gpu/drm/i915/i915_scatterlist.c b/drivers/gpu/drm/i915/i915_scatterlist.c index 4a6712dca838..41f2adb6a583 100644 --- a/drivers/gpu/drm/i915/i915_scatterlist.c +++ b/drivers/gpu/drm/i915/i915_scatterlist.c @@ -41,8 +41,32 @@ bool i915_sg_trim(struct sg_table *orig_st) return true; } +static void i915_refct_sgt_release(struct kref *ref) +{ + struct i915_refct_sgt *rsgt = + container_of(ref, typeof(*rsgt), kref); + + sg_free_table(&rsgt->table); + kfree(rsgt); +} + +static const struct i915_refct_sgt_ops rsgt_ops = { + .release = i915_refct_sgt_release +}; + +/** + * i915_refct_sgt_init - Initialize a struct i915_refct_sgt with default ops + * @rsgt: The struct i915_refct_sgt to initialize. + * size: The size of the underlying memory buffer. + */ +void i915_refct_sgt_init(struct i915_refct_sgt *rsgt, size_t size) +{ + __i915_refct_sgt_init(rsgt, size, &rsgt_ops); +} + /** - * i915_sg_from_mm_node - Create an sg_table from a struct drm_mm_node + * i915_rsgt_from_mm_node - Create a refcounted sg_table from a struct + * drm_mm_node * @node: The drm_mm_node. * @region_start: An offset to add to the dma addresses of the sg list. * @@ -50,25 +74,28 @@ bool i915_sg_trim(struct sg_table *orig_st) * taking a maximum segment length into account, splitting into segments * if necessary. * - * Return: A pointer to a kmalloced struct sg_table on success, negative + * Return: A pointer to a kmalloced struct i915_refct_sgt on success, negative * error code cast to an error pointer on failure. */ -struct sg_table *i915_sg_from_mm_node(const struct drm_mm_node *node, - u64 region_start) +struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node, + u64 region_start) { const u64 max_segment = SZ_1G; /* Do we have a limit on this? */ u64 segment_pages = max_segment >> PAGE_SHIFT; u64 block_size, offset, prev_end; + struct i915_refct_sgt *rsgt; struct sg_table *st; struct scatterlist *sg; - st = kmalloc(sizeof(*st), GFP_KERNEL); - if (!st) + rsgt = kmalloc(sizeof(*rsgt), GFP_KERNEL); + if (!rsgt) return ERR_PTR(-ENOMEM); + i915_refct_sgt_init(rsgt, node->size << PAGE_SHIFT); + st = &rsgt->table; if (sg_alloc_table(st, DIV_ROUND_UP(node->size, segment_pages), GFP_KERNEL)) { - kfree(st); + i915_refct_sgt_put(rsgt); return ERR_PTR(-ENOMEM); } @@ -104,11 +131,11 @@ struct sg_table *i915_sg_from_mm_node(const struct drm_mm_node *node, sg_mark_end(sg); i915_sg_trim(st); - return st; + return rsgt; } /** - * i915_sg_from_buddy_resource - Create an sg_table from a struct + * i915_rsgt_from_buddy_resource - Create a refcounted sg_table from a struct * i915_buddy_block list * @res: The struct i915_ttm_buddy_resource. * @region_start: An offset to add to the dma addresses of the sg list. @@ -117,11 +144,11 @@ struct sg_table *i915_sg_from_mm_node(const struct drm_mm_node *node, * taking a maximum segment length into account, splitting into segments * if necessary. * - * Return: A pointer to a kmalloced struct sg_table on success, negative + * Return: A pointer to a kmalloced struct i915_refct_sgts on success, negative * error code cast to an error pointer on failure. */ -struct sg_table *i915_sg_from_buddy_resource(struct ttm_resource *res, - u64 region_start) +struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res, + u64 region_start) { struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res); const u64 size = res->num_pages << PAGE_SHIFT; @@ -129,18 +156,21 @@ struct sg_table *i915_sg_from_buddy_resource(struct ttm_resource *res, struct i915_buddy_mm *mm = bman_res->mm; struct list_head *blocks = &bman_res->blocks; struct i915_buddy_block *block; + struct i915_refct_sgt *rsgt; struct scatterlist *sg; struct sg_table *st; resource_size_t prev_end; GEM_BUG_ON(list_empty(blocks)); - st = kmalloc(sizeof(*st), GFP_KERNEL); - if (!st) + rsgt = kmalloc(sizeof(*rsgt), GFP_KERNEL); + if (!rsgt) return ERR_PTR(-ENOMEM); + i915_refct_sgt_init(rsgt, size); + st = &rsgt->table; if (sg_alloc_table(st, res->num_pages, GFP_KERNEL)) { - kfree(st); + i915_refct_sgt_put(rsgt); return ERR_PTR(-ENOMEM); } @@ -181,7 +211,7 @@ struct sg_table *i915_sg_from_buddy_resource(struct ttm_resource *res, sg_mark_end(sg); i915_sg_trim(st); - return st; + return rsgt; } #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) diff --git a/drivers/gpu/drm/i915/i915_scatterlist.h b/drivers/gpu/drm/i915/i915_scatterlist.h index b8bd5925b03f..12c6a1684081 100644 --- a/drivers/gpu/drm/i915/i915_scatterlist.h +++ b/drivers/gpu/drm/i915/i915_scatterlist.h @@ -144,10 +144,78 @@ static inline unsigned int i915_sg_segment_size(void) bool i915_sg_trim(struct sg_table *orig_st); -struct sg_table *i915_sg_from_mm_node(const struct drm_mm_node *node, - u64 region_start); +/** + * struct i915_refct_sgt_ops - Operations structure for struct i915_refct_sgt + */ +struct i915_refct_sgt_ops { + /** + * release() - Free the memory of the struct i915_refct_sgt + * @ref: struct kref that is embedded in the struct i915_refct_sgt + */ + void (*release)(struct kref *ref); +}; + +/** + * struct i915_refct_sgt - A refcounted scatter-gather table + * @kref: struct kref for refcounting + * @table: struct sg_table holding the scatter-gather table itself. Note that + * @table->sgl = NULL can be used to determine whether a scatter-gather table + * is present or not. + * @size: The size in bytes of the underlying memory buffer + * @ops: The operations structure. + */ +struct i915_refct_sgt { + struct kref kref; + struct sg_table table; + size_t size; + const struct i915_refct_sgt_ops *ops; +}; + +/** + * i915_refct_sgt_put - Put a refcounted sg-table + * @rsgt the struct i915_refct_sgt to put. + */ +static inline void i915_refct_sgt_put(struct i915_refct_sgt *rsgt) +{ + if (rsgt) + kref_put(&rsgt->kref, rsgt->ops->release); +} + +/** + * i915_refct_sgt_get - Get a refcounted sg-table + * @rsgt the struct i915_refct_sgt to get. + */ +static inline struct i915_refct_sgt * +i915_refct_sgt_get(struct i915_refct_sgt *rsgt) +{ + kref_get(&rsgt->kref); + return rsgt; +} + +/** + * __i915_refct_sgt_init - Initialize a refcounted sg-list with a custom + * operations structure + * @rsgt The struct i915_refct_sgt to initialize. + * @size: Size in bytes of the underlying memory buffer. + * @ops: A customized operations structure in case the refcounted sg-list + * is embedded into another structure. + */ +static inline void __i915_refct_sgt_init(struct i915_refct_sgt *rsgt, + size_t size, + const struct i915_refct_sgt_ops *ops) +{ + kref_init(&rsgt->kref); + rsgt->table.sgl = NULL; + rsgt->size = size; + rsgt->ops = ops; +} + +void i915_refct_sgt_init(struct i915_refct_sgt *rsgt, size_t size); + +struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node, + u64 region_start); -struct sg_table *i915_sg_from_buddy_resource(struct ttm_resource *res, - u64 region_start); +struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res, + u64 region_start); #endif diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c index c589a681da77..2a74a9a1cafe 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.c +++ b/drivers/gpu/drm/i915/i915_sw_fence.c @@ -18,7 +18,9 @@ #define I915_SW_FENCE_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr) #endif +#ifdef CONFIG_DRM_I915_SW_FENCE_CHECK_DAG static DEFINE_SPINLOCK(i915_sw_fence_lock); +#endif #define WQ_FLAG_BITS \ BITS_PER_TYPE(typeof_member(struct wait_queue_entry, flags)) @@ -34,7 +36,7 @@ enum { static void *i915_sw_fence_debug_hint(void *addr) { - return (void *)(((struct i915_sw_fence *)addr)->flags & I915_SW_FENCE_MASK); + return (void *)(((struct i915_sw_fence *)addr)->fn); } #ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS @@ -126,10 +128,7 @@ static inline void debug_fence_assert(struct i915_sw_fence *fence) static int __i915_sw_fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) { - i915_sw_fence_notify_t fn; - - fn = (i915_sw_fence_notify_t)(fence->flags & I915_SW_FENCE_MASK); - return fn(fence, state); + return fence->fn(fence, state); } #ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS @@ -242,10 +241,13 @@ void __i915_sw_fence_init(struct i915_sw_fence *fence, const char *name, struct lock_class_key *key) { - BUG_ON(!fn || (unsigned long)fn & ~I915_SW_FENCE_MASK); + BUG_ON(!fn); __init_waitqueue_head(&fence->wait, name, key); - fence->flags = (unsigned long)fn; + fence->fn = fn; +#ifdef CONFIG_DRM_I915_SW_FENCE_CHECK_DAG + fence->flags = 0; +#endif i915_sw_fence_reinit(fence); } @@ -257,7 +259,6 @@ void i915_sw_fence_reinit(struct i915_sw_fence *fence) atomic_set(&fence->pending, 1); fence->error = 0; - I915_SW_FENCE_BUG_ON(!fence->flags); I915_SW_FENCE_BUG_ON(!list_empty(&fence->wait.head)); } @@ -279,6 +280,7 @@ static int i915_sw_fence_wake(wait_queue_entry_t *wq, unsigned mode, int flags, return 0; } +#ifdef CONFIG_DRM_I915_SW_FENCE_CHECK_DAG static bool __i915_sw_fence_check_if_after(struct i915_sw_fence *fence, const struct i915_sw_fence * const signaler) { @@ -322,9 +324,6 @@ static bool i915_sw_fence_check_if_after(struct i915_sw_fence *fence, unsigned long flags; bool err; - if (!IS_ENABLED(CONFIG_DRM_I915_SW_FENCE_CHECK_DAG)) - return false; - spin_lock_irqsave(&i915_sw_fence_lock, flags); err = __i915_sw_fence_check_if_after(fence, signaler); __i915_sw_fence_clear_checked_bit(fence); @@ -332,6 +331,13 @@ static bool i915_sw_fence_check_if_after(struct i915_sw_fence *fence, return err; } +#else +static bool i915_sw_fence_check_if_after(struct i915_sw_fence *fence, + const struct i915_sw_fence * const signaler) +{ + return false; +} +#endif static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, struct i915_sw_fence *signaler, @@ -572,56 +578,25 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, unsigned long timeout, gfp_t gfp) { - struct dma_fence *excl; + struct dma_resv_iter cursor; + struct dma_fence *f; int ret = 0, pending; debug_fence_assert(fence); might_sleep_if(gfpflags_allow_blocking(gfp)); - if (write) { - struct dma_fence **shared; - unsigned int count, i; - - ret = dma_resv_get_fences(resv, &excl, &count, &shared); - if (ret) - return ret; - - for (i = 0; i < count; i++) { - if (shared[i]->ops == exclude) - continue; - - pending = i915_sw_fence_await_dma_fence(fence, - shared[i], - timeout, - gfp); - if (pending < 0) { - ret = pending; - break; - } - - ret |= pending; - } - - for (i = 0; i < count; i++) - dma_fence_put(shared[i]); - kfree(shared); - } else { - excl = dma_resv_get_excl_unlocked(resv); - } - - if (ret >= 0 && excl && excl->ops != exclude) { - pending = i915_sw_fence_await_dma_fence(fence, - excl, - timeout, + dma_resv_iter_begin(&cursor, resv, write); + dma_resv_for_each_fence_unlocked(&cursor, f) { + pending = i915_sw_fence_await_dma_fence(fence, f, timeout, gfp); - if (pending < 0) + if (pending < 0) { ret = pending; - else - ret |= pending; - } - - dma_fence_put(excl); + break; + } + ret |= pending; + } + dma_resv_iter_end(&cursor); return ret; } diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h index 30a863353ee6..a7c603bc1b01 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.h +++ b/drivers/gpu/drm/i915/i915_sw_fence.h @@ -17,26 +17,27 @@ struct completion; struct dma_resv; +struct i915_sw_fence; + +enum i915_sw_fence_notify { + FENCE_COMPLETE, + FENCE_FREE +}; + +typedef int (*i915_sw_fence_notify_t)(struct i915_sw_fence *, + enum i915_sw_fence_notify state); struct i915_sw_fence { wait_queue_head_t wait; + i915_sw_fence_notify_t fn; +#ifdef CONFIG_DRM_I915_SW_FENCE_CHECK_DAG unsigned long flags; +#endif atomic_t pending; int error; }; #define I915_SW_FENCE_CHECKED_BIT 0 /* used internally for DAG checking */ -#define I915_SW_FENCE_PRIVATE_BIT 1 /* available for use by owner */ -#define I915_SW_FENCE_MASK (~3) - -enum i915_sw_fence_notify { - FENCE_COMPLETE, - FENCE_FREE -}; - -typedef int (*i915_sw_fence_notify_t)(struct i915_sw_fence *, - enum i915_sw_fence_notify state); -#define __i915_sw_fence_call __aligned(4) void __i915_sw_fence_init(struct i915_sw_fence *fence, i915_sw_fence_notify_t fn, diff --git a/drivers/gpu/drm/i915/i915_sw_fence_work.c b/drivers/gpu/drm/i915/i915_sw_fence_work.c index 5b33ef23d54c..d2e56b387993 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence_work.c +++ b/drivers/gpu/drm/i915/i915_sw_fence_work.c @@ -23,7 +23,7 @@ static void fence_work(struct work_struct *work) dma_fence_put(&f->dma); } -static int __i915_sw_fence_call +static int fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) { struct dma_fence_work *f = container_of(fence, typeof(*f), chain); diff --git a/drivers/gpu/drm/i915/i915_switcheroo.c b/drivers/gpu/drm/i915/i915_switcheroo.c index de0e224b56ce..23777d500cdf 100644 --- a/drivers/gpu/drm/i915/i915_switcheroo.c +++ b/drivers/gpu/drm/i915/i915_switcheroo.c @@ -5,6 +5,7 @@ #include <linux/vga_switcheroo.h> +#include "i915_driver.h" #include "i915_drv.h" #include "i915_switcheroo.h" @@ -24,12 +25,12 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING; /* i915 resume handler doesn't set to D0 */ pci_set_power_state(pdev, PCI_D0); - i915_resume_switcheroo(i915); + i915_driver_resume_switcheroo(i915); i915->drm.switch_power_state = DRM_SWITCH_POWER_ON; } else { drm_info(&i915->drm, "switched off\n"); i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING; - i915_suspend_switcheroo(i915, pmm); + i915_driver_suspend_switcheroo(i915, pmm); i915->drm.switch_power_state = DRM_SWITCH_POWER_OFF; } } diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 1804f4142740..59d441cedc75 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -279,7 +279,7 @@ static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribu struct drm_i915_private *i915 = kdev_minor_to_i915(kdev); struct intel_rps *rps = &i915->gt.rps; - return sysfs_emit(buf, "%d\n", intel_gpu_freq(rps, rps->boost_freq)); + return sysfs_emit(buf, "%d\n", intel_rps_get_boost_frequency(rps)); } static ssize_t gt_boost_freq_mhz_store(struct device *kdev, @@ -288,7 +288,6 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev, { struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); struct intel_rps *rps = &dev_priv->gt.rps; - bool boost = false; ssize_t ret; u32 val; @@ -296,21 +295,9 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev, if (ret) return ret; - /* Validate against (static) hardware limits */ - val = intel_freq_opcode(rps, val); - if (val < rps->min_freq || val > rps->max_freq) - return -EINVAL; - - mutex_lock(&rps->lock); - if (val != rps->boost_freq) { - rps->boost_freq = val; - boost = atomic_read(&rps->num_waiters); - } - mutex_unlock(&rps->lock); - if (boost) - schedule_work(&rps->work); + ret = intel_rps_set_boost_frequency(rps, val); - return count; + return ret ?: count; } static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev, diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 8104981a6604..37b5c9e9d260 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -1,4 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM i915 + #if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) #define _I915_TRACE_H_ @@ -8,511 +12,11 @@ #include <drm/drm_drv.h> -#include "display/intel_crtc.h" -#include "display/intel_display_types.h" #include "gt/intel_engine.h" #include "i915_drv.h" #include "i915_irq.h" -#undef TRACE_SYSTEM -#define TRACE_SYSTEM i915 -#define TRACE_INCLUDE_FILE i915_trace - -/* watermark/fifo updates */ - -TRACE_EVENT(intel_pipe_enable, - TP_PROTO(struct intel_crtc *crtc), - TP_ARGS(crtc), - - TP_STRUCT__entry( - __array(u32, frame, 3) - __array(u32, scanline, 3) - __field(enum pipe, pipe) - ), - TP_fast_assign( - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_crtc *it__; - for_each_intel_crtc(&dev_priv->drm, it__) { - __entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__); - __entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__); - } - __entry->pipe = crtc->pipe; - ), - - TP_printk("pipe %c enable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u", - pipe_name(__entry->pipe), - __entry->frame[PIPE_A], __entry->scanline[PIPE_A], - __entry->frame[PIPE_B], __entry->scanline[PIPE_B], - __entry->frame[PIPE_C], __entry->scanline[PIPE_C]) -); - -TRACE_EVENT(intel_pipe_disable, - TP_PROTO(struct intel_crtc *crtc), - TP_ARGS(crtc), - - TP_STRUCT__entry( - __array(u32, frame, 3) - __array(u32, scanline, 3) - __field(enum pipe, pipe) - ), - - TP_fast_assign( - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_crtc *it__; - for_each_intel_crtc(&dev_priv->drm, it__) { - __entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__); - __entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__); - } - __entry->pipe = crtc->pipe; - ), - - TP_printk("pipe %c disable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u", - pipe_name(__entry->pipe), - __entry->frame[PIPE_A], __entry->scanline[PIPE_A], - __entry->frame[PIPE_B], __entry->scanline[PIPE_B], - __entry->frame[PIPE_C], __entry->scanline[PIPE_C]) -); - -TRACE_EVENT(intel_pipe_crc, - TP_PROTO(struct intel_crtc *crtc, const u32 *crcs), - TP_ARGS(crtc, crcs), - - TP_STRUCT__entry( - __field(enum pipe, pipe) - __field(u32, frame) - __field(u32, scanline) - __array(u32, crcs, 5) - ), - - TP_fast_assign( - __entry->pipe = crtc->pipe; - __entry->frame = intel_crtc_get_vblank_counter(crtc); - __entry->scanline = intel_get_crtc_scanline(crtc); - memcpy(__entry->crcs, crcs, sizeof(__entry->crcs)); - ), - - TP_printk("pipe %c, frame=%u, scanline=%u crc=%08x %08x %08x %08x %08x", - pipe_name(__entry->pipe), __entry->frame, __entry->scanline, - __entry->crcs[0], __entry->crcs[1], __entry->crcs[2], - __entry->crcs[3], __entry->crcs[4]) -); - -TRACE_EVENT(intel_cpu_fifo_underrun, - TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe), - TP_ARGS(dev_priv, pipe), - - TP_STRUCT__entry( - __field(enum pipe, pipe) - __field(u32, frame) - __field(u32, scanline) - ), - - TP_fast_assign( - struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); - __entry->pipe = pipe; - __entry->frame = intel_crtc_get_vblank_counter(crtc); - __entry->scanline = intel_get_crtc_scanline(crtc); - ), - - TP_printk("pipe %c, frame=%u, scanline=%u", - pipe_name(__entry->pipe), - __entry->frame, __entry->scanline) -); - -TRACE_EVENT(intel_pch_fifo_underrun, - TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pch_transcoder), - TP_ARGS(dev_priv, pch_transcoder), - - TP_STRUCT__entry( - __field(enum pipe, pipe) - __field(u32, frame) - __field(u32, scanline) - ), - - TP_fast_assign( - enum pipe pipe = pch_transcoder; - struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); - __entry->pipe = pipe; - __entry->frame = intel_crtc_get_vblank_counter(crtc); - __entry->scanline = intel_get_crtc_scanline(crtc); - ), - - TP_printk("pch transcoder %c, frame=%u, scanline=%u", - pipe_name(__entry->pipe), - __entry->frame, __entry->scanline) -); - -TRACE_EVENT(intel_memory_cxsr, - TP_PROTO(struct drm_i915_private *dev_priv, bool old, bool new), - TP_ARGS(dev_priv, old, new), - - TP_STRUCT__entry( - __array(u32, frame, 3) - __array(u32, scanline, 3) - __field(bool, old) - __field(bool, new) - ), - - TP_fast_assign( - struct intel_crtc *crtc; - for_each_intel_crtc(&dev_priv->drm, crtc) { - __entry->frame[crtc->pipe] = intel_crtc_get_vblank_counter(crtc); - __entry->scanline[crtc->pipe] = intel_get_crtc_scanline(crtc); - } - __entry->old = old; - __entry->new = new; - ), - - TP_printk("%s->%s, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u", - onoff(__entry->old), onoff(__entry->new), - __entry->frame[PIPE_A], __entry->scanline[PIPE_A], - __entry->frame[PIPE_B], __entry->scanline[PIPE_B], - __entry->frame[PIPE_C], __entry->scanline[PIPE_C]) -); - -TRACE_EVENT(g4x_wm, - TP_PROTO(struct intel_crtc *crtc, const struct g4x_wm_values *wm), - TP_ARGS(crtc, wm), - - TP_STRUCT__entry( - __field(enum pipe, pipe) - __field(u32, frame) - __field(u32, scanline) - __field(u16, primary) - __field(u16, sprite) - __field(u16, cursor) - __field(u16, sr_plane) - __field(u16, sr_cursor) - __field(u16, sr_fbc) - __field(u16, hpll_plane) - __field(u16, hpll_cursor) - __field(u16, hpll_fbc) - __field(bool, cxsr) - __field(bool, hpll) - __field(bool, fbc) - ), - - TP_fast_assign( - __entry->pipe = crtc->pipe; - __entry->frame = intel_crtc_get_vblank_counter(crtc); - __entry->scanline = intel_get_crtc_scanline(crtc); - __entry->primary = wm->pipe[crtc->pipe].plane[PLANE_PRIMARY]; - __entry->sprite = wm->pipe[crtc->pipe].plane[PLANE_SPRITE0]; - __entry->cursor = wm->pipe[crtc->pipe].plane[PLANE_CURSOR]; - __entry->sr_plane = wm->sr.plane; - __entry->sr_cursor = wm->sr.cursor; - __entry->sr_fbc = wm->sr.fbc; - __entry->hpll_plane = wm->hpll.plane; - __entry->hpll_cursor = wm->hpll.cursor; - __entry->hpll_fbc = wm->hpll.fbc; - __entry->cxsr = wm->cxsr; - __entry->hpll = wm->hpll_en; - __entry->fbc = wm->fbc_en; - ), - - TP_printk("pipe %c, frame=%u, scanline=%u, wm %d/%d/%d, sr %s/%d/%d/%d, hpll %s/%d/%d/%d, fbc %s", - pipe_name(__entry->pipe), __entry->frame, __entry->scanline, - __entry->primary, __entry->sprite, __entry->cursor, - yesno(__entry->cxsr), __entry->sr_plane, __entry->sr_cursor, __entry->sr_fbc, - yesno(__entry->hpll), __entry->hpll_plane, __entry->hpll_cursor, __entry->hpll_fbc, - yesno(__entry->fbc)) -); - -TRACE_EVENT(vlv_wm, - TP_PROTO(struct intel_crtc *crtc, const struct vlv_wm_values *wm), - TP_ARGS(crtc, wm), - - TP_STRUCT__entry( - __field(enum pipe, pipe) - __field(u32, frame) - __field(u32, scanline) - __field(u32, level) - __field(u32, cxsr) - __field(u32, primary) - __field(u32, sprite0) - __field(u32, sprite1) - __field(u32, cursor) - __field(u32, sr_plane) - __field(u32, sr_cursor) - ), - - TP_fast_assign( - __entry->pipe = crtc->pipe; - __entry->frame = intel_crtc_get_vblank_counter(crtc); - __entry->scanline = intel_get_crtc_scanline(crtc); - __entry->level = wm->level; - __entry->cxsr = wm->cxsr; - __entry->primary = wm->pipe[crtc->pipe].plane[PLANE_PRIMARY]; - __entry->sprite0 = wm->pipe[crtc->pipe].plane[PLANE_SPRITE0]; - __entry->sprite1 = wm->pipe[crtc->pipe].plane[PLANE_SPRITE1]; - __entry->cursor = wm->pipe[crtc->pipe].plane[PLANE_CURSOR]; - __entry->sr_plane = wm->sr.plane; - __entry->sr_cursor = wm->sr.cursor; - ), - - TP_printk("pipe %c, frame=%u, scanline=%u, level=%d, cxsr=%d, wm %d/%d/%d/%d, sr %d/%d", - pipe_name(__entry->pipe), __entry->frame, - __entry->scanline, __entry->level, __entry->cxsr, - __entry->primary, __entry->sprite0, __entry->sprite1, __entry->cursor, - __entry->sr_plane, __entry->sr_cursor) -); - -TRACE_EVENT(vlv_fifo_size, - TP_PROTO(struct intel_crtc *crtc, u32 sprite0_start, u32 sprite1_start, u32 fifo_size), - TP_ARGS(crtc, sprite0_start, sprite1_start, fifo_size), - - TP_STRUCT__entry( - __field(enum pipe, pipe) - __field(u32, frame) - __field(u32, scanline) - __field(u32, sprite0_start) - __field(u32, sprite1_start) - __field(u32, fifo_size) - ), - - TP_fast_assign( - __entry->pipe = crtc->pipe; - __entry->frame = intel_crtc_get_vblank_counter(crtc); - __entry->scanline = intel_get_crtc_scanline(crtc); - __entry->sprite0_start = sprite0_start; - __entry->sprite1_start = sprite1_start; - __entry->fifo_size = fifo_size; - ), - - TP_printk("pipe %c, frame=%u, scanline=%u, %d/%d/%d", - pipe_name(__entry->pipe), __entry->frame, - __entry->scanline, __entry->sprite0_start, - __entry->sprite1_start, __entry->fifo_size) -); - -/* plane updates */ - -TRACE_EVENT(intel_update_plane, - TP_PROTO(struct drm_plane *plane, struct intel_crtc *crtc), - TP_ARGS(plane, crtc), - - TP_STRUCT__entry( - __field(enum pipe, pipe) - __field(u32, frame) - __field(u32, scanline) - __array(int, src, 4) - __array(int, dst, 4) - __string(name, plane->name) - ), - - TP_fast_assign( - __assign_str(name, plane->name); - __entry->pipe = crtc->pipe; - __entry->frame = intel_crtc_get_vblank_counter(crtc); - __entry->scanline = intel_get_crtc_scanline(crtc); - memcpy(__entry->src, &plane->state->src, sizeof(__entry->src)); - memcpy(__entry->dst, &plane->state->dst, sizeof(__entry->dst)); - ), - - TP_printk("pipe %c, plane %s, frame=%u, scanline=%u, " DRM_RECT_FP_FMT " -> " DRM_RECT_FMT, - pipe_name(__entry->pipe), __get_str(name), - __entry->frame, __entry->scanline, - DRM_RECT_FP_ARG((const struct drm_rect *)__entry->src), - DRM_RECT_ARG((const struct drm_rect *)__entry->dst)) -); - -TRACE_EVENT(intel_disable_plane, - TP_PROTO(struct drm_plane *plane, struct intel_crtc *crtc), - TP_ARGS(plane, crtc), - - TP_STRUCT__entry( - __field(enum pipe, pipe) - __field(u32, frame) - __field(u32, scanline) - __string(name, plane->name) - ), - - TP_fast_assign( - __assign_str(name, plane->name); - __entry->pipe = crtc->pipe; - __entry->frame = intel_crtc_get_vblank_counter(crtc); - __entry->scanline = intel_get_crtc_scanline(crtc); - ), - - TP_printk("pipe %c, plane %s, frame=%u, scanline=%u", - pipe_name(__entry->pipe), __get_str(name), - __entry->frame, __entry->scanline) -); - -/* fbc */ - -TRACE_EVENT(intel_fbc_activate, - TP_PROTO(struct intel_crtc *crtc), - TP_ARGS(crtc), - - TP_STRUCT__entry( - __field(enum pipe, pipe) - __field(u32, frame) - __field(u32, scanline) - ), - - TP_fast_assign( - __entry->pipe = crtc->pipe; - __entry->frame = intel_crtc_get_vblank_counter(crtc); - __entry->scanline = intel_get_crtc_scanline(crtc); - ), - - TP_printk("pipe %c, frame=%u, scanline=%u", - pipe_name(__entry->pipe), __entry->frame, __entry->scanline) -); - -TRACE_EVENT(intel_fbc_deactivate, - TP_PROTO(struct intel_crtc *crtc), - TP_ARGS(crtc), - - TP_STRUCT__entry( - __field(enum pipe, pipe) - __field(u32, frame) - __field(u32, scanline) - ), - - TP_fast_assign( - __entry->pipe = crtc->pipe; - __entry->frame = intel_crtc_get_vblank_counter(crtc); - __entry->scanline = intel_get_crtc_scanline(crtc); - ), - - TP_printk("pipe %c, frame=%u, scanline=%u", - pipe_name(__entry->pipe), __entry->frame, __entry->scanline) -); - -TRACE_EVENT(intel_fbc_nuke, - TP_PROTO(struct intel_crtc *crtc), - TP_ARGS(crtc), - - TP_STRUCT__entry( - __field(enum pipe, pipe) - __field(u32, frame) - __field(u32, scanline) - ), - - TP_fast_assign( - __entry->pipe = crtc->pipe; - __entry->frame = intel_crtc_get_vblank_counter(crtc); - __entry->scanline = intel_get_crtc_scanline(crtc); - ), - - TP_printk("pipe %c, frame=%u, scanline=%u", - pipe_name(__entry->pipe), __entry->frame, __entry->scanline) -); - -/* pipe updates */ - -TRACE_EVENT(intel_pipe_update_start, - TP_PROTO(struct intel_crtc *crtc), - TP_ARGS(crtc), - - TP_STRUCT__entry( - __field(enum pipe, pipe) - __field(u32, frame) - __field(u32, scanline) - __field(u32, min) - __field(u32, max) - ), - - TP_fast_assign( - __entry->pipe = crtc->pipe; - __entry->frame = intel_crtc_get_vblank_counter(crtc); - __entry->scanline = intel_get_crtc_scanline(crtc); - __entry->min = crtc->debug.min_vbl; - __entry->max = crtc->debug.max_vbl; - ), - - TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u", - pipe_name(__entry->pipe), __entry->frame, - __entry->scanline, __entry->min, __entry->max) -); - -TRACE_EVENT(intel_pipe_update_vblank_evaded, - TP_PROTO(struct intel_crtc *crtc), - TP_ARGS(crtc), - - TP_STRUCT__entry( - __field(enum pipe, pipe) - __field(u32, frame) - __field(u32, scanline) - __field(u32, min) - __field(u32, max) - ), - - TP_fast_assign( - __entry->pipe = crtc->pipe; - __entry->frame = crtc->debug.start_vbl_count; - __entry->scanline = crtc->debug.scanline_start; - __entry->min = crtc->debug.min_vbl; - __entry->max = crtc->debug.max_vbl; - ), - - TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u", - pipe_name(__entry->pipe), __entry->frame, - __entry->scanline, __entry->min, __entry->max) -); - -TRACE_EVENT(intel_pipe_update_end, - TP_PROTO(struct intel_crtc *crtc, u32 frame, int scanline_end), - TP_ARGS(crtc, frame, scanline_end), - - TP_STRUCT__entry( - __field(enum pipe, pipe) - __field(u32, frame) - __field(u32, scanline) - ), - - TP_fast_assign( - __entry->pipe = crtc->pipe; - __entry->frame = frame; - __entry->scanline = scanline_end; - ), - - TP_printk("pipe %c, frame=%u, scanline=%u", - pipe_name(__entry->pipe), __entry->frame, - __entry->scanline) -); - -/* frontbuffer tracking */ - -TRACE_EVENT(intel_frontbuffer_invalidate, - TP_PROTO(unsigned int frontbuffer_bits, unsigned int origin), - TP_ARGS(frontbuffer_bits, origin), - - TP_STRUCT__entry( - __field(unsigned int, frontbuffer_bits) - __field(unsigned int, origin) - ), - - TP_fast_assign( - __entry->frontbuffer_bits = frontbuffer_bits; - __entry->origin = origin; - ), - - TP_printk("frontbuffer_bits=0x%08x, origin=%u", - __entry->frontbuffer_bits, __entry->origin) -); - -TRACE_EVENT(intel_frontbuffer_flush, - TP_PROTO(unsigned int frontbuffer_bits, unsigned int origin), - TP_ARGS(frontbuffer_bits, origin), - - TP_STRUCT__entry( - __field(unsigned int, frontbuffer_bits) - __field(unsigned int, origin) - ), - - TP_fast_assign( - __entry->frontbuffer_bits = frontbuffer_bits; - __entry->origin = origin; - ), - - TP_printk("frontbuffer_bits=0x%08x, origin=%u", - __entry->frontbuffer_bits, __entry->origin) -); - /* object tracking */ TRACE_EVENT(i915_gem_object_create, @@ -1260,5 +764,7 @@ DEFINE_EVENT(i915_context, i915_context_free, /* This part must be outside protection */ #undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE #define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915 +#define TRACE_INCLUDE_FILE i915_trace #include <trace/define_trace.h> diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index bef795e265a6..927f0d4f8e11 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -40,12 +40,12 @@ static struct kmem_cache *slab_vmas; -struct i915_vma *i915_vma_alloc(void) +static struct i915_vma *i915_vma_alloc(void) { return kmem_cache_zalloc(slab_vmas, GFP_KERNEL); } -void i915_vma_free(struct i915_vma *vma) +static void i915_vma_free(struct i915_vma *vma) { return kmem_cache_free(slab_vmas, vma); } @@ -113,7 +113,6 @@ vma_create(struct drm_i915_gem_object *obj, vma->vm = i915_vm_get(vm); vma->ops = &vm->vma_ops; vma->obj = obj; - vma->resv = obj->base.resv; vma->size = obj->base.size; vma->display_alignment = I915_GTT_MIN_ALIGNMENT; @@ -346,7 +345,7 @@ int i915_vma_wait_for_bind(struct i915_vma *vma) fence = dma_fence_get_rcu_safe(&vma->active.excl.fence); rcu_read_unlock(); if (fence) { - err = dma_fence_wait(fence, MAX_SCHEDULE_TIMEOUT); + err = dma_fence_wait(fence, true); dma_fence_put(fence); } } @@ -354,6 +353,32 @@ int i915_vma_wait_for_bind(struct i915_vma *vma) return err; } +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) +static int i915_vma_verify_bind_complete(struct i915_vma *vma) +{ + int err = 0; + + if (i915_active_has_exclusive(&vma->active)) { + struct dma_fence *fence = + i915_active_fence_get(&vma->active.excl); + + if (!fence) + return 0; + + if (dma_fence_is_signaled(fence)) + err = fence->error; + else + err = -EBUSY; + + dma_fence_put(fence); + } + + return err; +} +#else +#define i915_vma_verify_bind_complete(_vma) 0 +#endif + /** * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space. * @vma: VMA to map @@ -423,11 +448,16 @@ int i915_vma_bind(struct i915_vma *vma, work->base.dma.error = 0; /* enable the queue_work() */ + __i915_gem_object_pin_pages(vma->obj); + work->pinned = i915_gem_object_get(vma->obj); + } else { if (vma->obj) { - __i915_gem_object_pin_pages(vma->obj); - work->pinned = i915_gem_object_get(vma->obj); + int ret; + + ret = i915_gem_object_wait_moving_fence(vma->obj, true); + if (ret) + return ret; } - } else { vma->ops->bind_vma(vma->vm, NULL, vma, cache_level, bind_flags); } @@ -449,6 +479,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) GEM_BUG_ON(!i915_vma_is_ggtt(vma)); GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)); + GEM_BUG_ON(i915_vma_verify_bind_complete(vma)); ptr = READ_ONCE(vma->iomap); if (ptr == NULL) { @@ -667,7 +698,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) } color = 0; - if (vma->obj && i915_vm_has_cache_coloring(vma->vm)) + if (i915_vm_has_cache_coloring(vma->vm)) color = vma->obj->cache_level; if (flags & PIN_OFFSET_FIXED) { @@ -792,17 +823,14 @@ unpinned: static int vma_get_pages(struct i915_vma *vma) { int err = 0; - bool pinned_pages = false; + bool pinned_pages = true; if (atomic_add_unless(&vma->pages_count, 1, 0)) return 0; - if (vma->obj) { - err = i915_gem_object_pin_pages(vma->obj); - if (err) - return err; - pinned_pages = true; - } + err = i915_gem_object_pin_pages(vma->obj); + if (err) + return err; /* Allocations ahoy! */ if (mutex_lock_interruptible(&vma->pages_mutex)) { @@ -835,8 +863,8 @@ static void __vma_put_pages(struct i915_vma *vma, unsigned int count) if (atomic_sub_return(count, &vma->pages_count) == 0) { vma->ops->clear_pages(vma); GEM_BUG_ON(vma->pages); - if (vma->obj) - i915_gem_object_unpin_pages(vma->obj); + + i915_gem_object_unpin_pages(vma->obj); } mutex_unlock(&vma->pages_mutex); } @@ -867,12 +895,13 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, u64 size, u64 alignment, u64 flags) { struct i915_vma_work *work = NULL; + struct dma_fence *moving = NULL; intel_wakeref_t wakeref = 0; unsigned int bound; int err; #ifdef CONFIG_PROVE_LOCKING - if (debug_locks && !WARN_ON(!ww) && vma->resv) + if (debug_locks && !WARN_ON(!ww)) assert_vma_held(vma); #endif @@ -892,7 +921,8 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, if (flags & PIN_GLOBAL) wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm); - if (flags & vma->vm->bind_async_flags) { + moving = vma->obj ? i915_gem_object_get_moving_fence(vma->obj) : NULL; + if (flags & vma->vm->bind_async_flags || moving) { /* lock VM */ err = i915_vm_lock_objects(vma->vm, ww); if (err) @@ -906,6 +936,8 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, work->vm = i915_vm_get(vma->vm); + dma_fence_work_chain(&work->base, moving); + /* Allocate enough page directories to used PTE */ if (vma->vm->allocate_va_range) { err = i915_vm_alloc_pt_stash(vma->vm, @@ -980,7 +1012,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, GEM_BUG_ON(!vma->pages); err = i915_vma_bind(vma, - vma->obj ? vma->obj->cache_level : 0, + vma->obj->cache_level, flags, work); if (err) goto err_remove; @@ -1010,7 +1042,10 @@ err_fence: err_rpm: if (wakeref) intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref); + if (moving) + dma_fence_put(moving); vma_put_pages(vma); + return err; } @@ -1034,7 +1069,7 @@ int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, GEM_BUG_ON(!i915_vma_is_ggtt(vma)); #ifdef CONFIG_LOCKDEP - WARN_ON(!ww && vma->resv && dma_resv_held(vma->resv)); + WARN_ON(!ww && dma_resv_held(vma->obj->base.resv)); #endif do { @@ -1113,6 +1148,7 @@ void i915_vma_reopen(struct i915_vma *vma) void i915_vma_release(struct kref *ref) { struct i915_vma *vma = container_of(ref, typeof(*vma), ref); + struct drm_i915_gem_object *obj = vma->obj; if (drm_mm_node_allocated(&vma->node)) { mutex_lock(&vma->vm->mutex); @@ -1123,15 +1159,11 @@ void i915_vma_release(struct kref *ref) } GEM_BUG_ON(i915_vma_is_active(vma)); - if (vma->obj) { - struct drm_i915_gem_object *obj = vma->obj; - - spin_lock(&obj->vma.lock); - list_del(&vma->obj_link); - if (!RB_EMPTY_NODE(&vma->obj_node)) - rb_erase(&vma->obj_node, &obj->vma.tree); - spin_unlock(&obj->vma.lock); - } + spin_lock(&obj->vma.lock); + list_del(&vma->obj_link); + if (!RB_EMPTY_NODE(&vma->obj_node)) + rb_erase(&vma->obj_node, &obj->vma.tree); + spin_unlock(&obj->vma.lock); __i915_vma_remove_closed(vma); i915_vm_put(vma->vm); @@ -1256,19 +1288,19 @@ int _i915_vma_move_to_active(struct i915_vma *vma, } if (fence) { - dma_resv_add_excl_fence(vma->resv, fence); + dma_resv_add_excl_fence(vma->obj->base.resv, fence); obj->write_domain = I915_GEM_DOMAIN_RENDER; obj->read_domains = 0; } } else { if (!(flags & __EXEC_OBJECT_NO_RESERVE)) { - err = dma_resv_reserve_shared(vma->resv, 1); + err = dma_resv_reserve_shared(vma->obj->base.resv, 1); if (unlikely(err)) return err; } if (fence) { - dma_resv_add_shared_fence(vma->resv, fence); + dma_resv_add_shared_fence(vma->obj->base.resv, fence); obj->write_domain = 0; } } diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h index 648dbe744c96..4033aa08d5e4 100644 --- a/drivers/gpu/drm/i915/i915_vma.h +++ b/drivers/gpu/drm/i915/i915_vma.h @@ -234,16 +234,16 @@ static inline void __i915_vma_put(struct i915_vma *vma) kref_put(&vma->ref, i915_vma_release); } -#define assert_vma_held(vma) dma_resv_assert_held((vma)->resv) +#define assert_vma_held(vma) dma_resv_assert_held((vma)->obj->base.resv) static inline void i915_vma_lock(struct i915_vma *vma) { - dma_resv_lock(vma->resv, NULL); + dma_resv_lock(vma->obj->base.resv, NULL); } static inline void i915_vma_unlock(struct i915_vma *vma) { - dma_resv_unlock(vma->resv); + dma_resv_unlock(vma->obj->base.resv); } int __must_check @@ -418,9 +418,6 @@ static inline void i915_vma_clear_scanout(struct i915_vma *vma) list_for_each_entry(V, &(OBJ)->vma.list, obj_link) \ for_each_until(!i915_vma_is_ggtt(V)) -struct i915_vma *i915_vma_alloc(void); -void i915_vma_free(struct i915_vma *vma); - struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma); void i915_vma_make_shrinkable(struct i915_vma *vma); void i915_vma_make_purgeable(struct i915_vma *vma); diff --git a/drivers/gpu/drm/i915/i915_vma_snapshot.c b/drivers/gpu/drm/i915/i915_vma_snapshot.c new file mode 100644 index 000000000000..2949ceea9884 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_vma_snapshot.c @@ -0,0 +1,134 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2021 Intel Corporation + */ + +#include "i915_vma_snapshot.h" +#include "i915_vma_types.h" +#include "i915_vma.h" + +/** + * i915_vma_snapshot_init - Initialize a struct i915_vma_snapshot from + * a struct i915_vma. + * @vsnap: The i915_vma_snapshot to init. + * @vma: A struct i915_vma used to initialize @vsnap. + * @name: Name associated with the snapshot. The character pointer needs to + * stay alive over the lifitime of the shapsot + */ +void i915_vma_snapshot_init(struct i915_vma_snapshot *vsnap, + struct i915_vma *vma, + const char *name) +{ + if (!i915_vma_is_pinned(vma)) + assert_object_held(vma->obj); + + vsnap->name = name; + vsnap->size = vma->size; + vsnap->obj_size = vma->obj->base.size; + vsnap->gtt_offset = vma->node.start; + vsnap->gtt_size = vma->node.size; + vsnap->page_sizes = vma->page_sizes.gtt; + vsnap->pages = vma->pages; + vsnap->pages_rsgt = NULL; + vsnap->mr = NULL; + if (vma->obj->mm.rsgt) + vsnap->pages_rsgt = i915_refct_sgt_get(vma->obj->mm.rsgt); + vsnap->mr = vma->obj->mm.region; + kref_init(&vsnap->kref); + vsnap->vma_resource = &vma->active; + vsnap->onstack = false; + vsnap->present = true; +} + +/** + * i915_vma_snapshot_init_onstack - Initialize a struct i915_vma_snapshot from + * a struct i915_vma, but avoid kfreeing it on last put. + * @vsnap: The i915_vma_snapshot to init. + * @vma: A struct i915_vma used to initialize @vsnap. + * @name: Name associated with the snapshot. The character pointer needs to + * stay alive over the lifitime of the shapsot + */ +void i915_vma_snapshot_init_onstack(struct i915_vma_snapshot *vsnap, + struct i915_vma *vma, + const char *name) +{ + i915_vma_snapshot_init(vsnap, vma, name); + vsnap->onstack = true; +} + +static void vma_snapshot_release(struct kref *ref) +{ + struct i915_vma_snapshot *vsnap = + container_of(ref, typeof(*vsnap), kref); + + vsnap->present = false; + if (vsnap->pages_rsgt) + i915_refct_sgt_put(vsnap->pages_rsgt); + if (!vsnap->onstack) + kfree(vsnap); +} + +/** + * i915_vma_snapshot_put - Put an i915_vma_snapshot pointer reference + * @vsnap: The pointer reference + */ +void i915_vma_snapshot_put(struct i915_vma_snapshot *vsnap) +{ + kref_put(&vsnap->kref, vma_snapshot_release); +} + +/** + * i915_vma_snapshot_put_onstack - Put an onstcak i915_vma_snapshot pointer + * reference and varify that the structure is released + * @vsnap: The pointer reference + * + * This function is intended to be paired with a i915_vma_init_onstack() + * and should be called before exiting the scope that declared or + * freeing the structure that embedded @vsnap to verify that all references + * have been released. + */ +void i915_vma_snapshot_put_onstack(struct i915_vma_snapshot *vsnap) +{ + if (!kref_put(&vsnap->kref, vma_snapshot_release)) + GEM_BUG_ON(1); +} + +/** + * i915_vma_snapshot_resource_pin - Temporarily block the memory the + * vma snapshot is pointing to from being released. + * @vsnap: The vma snapshot. + * @lockdep_cookie: Pointer to bool needed for lockdep support. This needs + * to be passed to the paired i915_vma_snapshot_resource_unpin. + * + * This function will temporarily try to hold up a fence or similar structure + * and will therefore enter a fence signaling critical section. + * + * Return: true if we succeeded in blocking the memory from being released, + * false otherwise. + */ +bool i915_vma_snapshot_resource_pin(struct i915_vma_snapshot *vsnap, + bool *lockdep_cookie) +{ + bool pinned = i915_active_acquire_if_busy(vsnap->vma_resource); + + if (pinned) + *lockdep_cookie = dma_fence_begin_signalling(); + + return pinned; +} + +/** + * i915_vma_snapshot_resource_unpin - Unblock vma snapshot memory from + * being released. + * @vsnap: The vma snapshot. + * @lockdep_cookie: Cookie returned from matching i915_vma_resource_pin(). + * + * Might leave a fence signalling critical section and signal a fence. + */ +void i915_vma_snapshot_resource_unpin(struct i915_vma_snapshot *vsnap, + bool lockdep_cookie) +{ + dma_fence_end_signalling(lockdep_cookie); + + return i915_active_release(vsnap->vma_resource); +} diff --git a/drivers/gpu/drm/i915/i915_vma_snapshot.h b/drivers/gpu/drm/i915/i915_vma_snapshot.h new file mode 100644 index 000000000000..940581df4622 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_vma_snapshot.h @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ +#ifndef _I915_VMA_SNAPSHOT_H_ +#define _I915_VMA_SNAPSHOT_H_ + +#include <linux/kref.h> +#include <linux/slab.h> +#include <linux/types.h> + +struct i915_active; +struct i915_refct_sgt; +struct i915_vma; +struct intel_memory_region; +struct sg_table; + +/** + * DOC: Simple utilities for snapshotting GPU vma metadata, later used for + * error capture. Vi use a separate header for this to avoid issues due to + * recursive header includes. + */ + +/** + * struct i915_vma_snapshot - Snapshot of vma metadata. + * @size: The vma size in bytes. + * @obj_size: The size of the underlying object in bytes. + * @gtt_offset: The gtt offset the vma is bound to. + * @gtt_size: The size in bytes allocated for the vma in the GTT. + * @pages: The struct sg_table pointing to the pages bound. + * @pages_rsgt: The refcounted sg_table holding the reference for @pages if any. + * @mr: The memory region pointed for the pages bound. + * @kref: Reference for this structure. + * @vma_resource: FIXME: A means to keep the unbind fence from signaling. + * Temporarily while we have only sync unbinds, and still use the vma + * active, we use that. With async unbinding we need a signaling refcount + * for the unbind fence. + * @page_sizes: The vma GTT page sizes information. + * @onstack: Whether the structure shouldn't be freed on final put. + * @present: Whether the structure is present and initialized. + */ +struct i915_vma_snapshot { + const char *name; + size_t size; + size_t obj_size; + size_t gtt_offset; + size_t gtt_size; + struct sg_table *pages; + struct i915_refct_sgt *pages_rsgt; + struct intel_memory_region *mr; + struct kref kref; + struct i915_active *vma_resource; + u32 page_sizes; + bool onstack:1; + bool present:1; +}; + +void i915_vma_snapshot_init(struct i915_vma_snapshot *vsnap, + struct i915_vma *vma, + const char *name); + +void i915_vma_snapshot_init_onstack(struct i915_vma_snapshot *vsnap, + struct i915_vma *vma, + const char *name); + +void i915_vma_snapshot_put(struct i915_vma_snapshot *vsnap); + +void i915_vma_snapshot_put_onstack(struct i915_vma_snapshot *vsnap); + +bool i915_vma_snapshot_resource_pin(struct i915_vma_snapshot *vsnap, + bool *lockdep_cookie); + +void i915_vma_snapshot_resource_unpin(struct i915_vma_snapshot *vsnap, + bool lockdep_cookie); + +/** + * i915_vma_snapshot_alloc - Allocate a struct i915_vma_snapshot + * @gfp: Allocation mode. + * + * Return: A pointer to a struct i915_vma_snapshot if successful. + * NULL otherwise. + */ +static inline struct i915_vma_snapshot *i915_vma_snapshot_alloc(gfp_t gfp) +{ + return kmalloc(sizeof(struct i915_vma_snapshot), gfp); +} + +/** + * i915_vma_snapshot_get - Take a reference on a struct i915_vma_snapshot + * + * Return: A pointer to a struct i915_vma_snapshot. + */ +static inline struct i915_vma_snapshot * +i915_vma_snapshot_get(struct i915_vma_snapshot *vsnap) +{ + kref_get(&vsnap->kref); + return vsnap; +} + +/** + * i915_vma_snapshot_present - Whether a struct i915_vma_snapshot is + * present and initialized. + * + * Return: true if present and initialized; false otherwise. + */ +static inline bool +i915_vma_snapshot_present(const struct i915_vma_snapshot *vsnap) +{ + return vsnap && vsnap->present; +} + +#endif diff --git a/drivers/gpu/drm/i915/i915_vma_types.h b/drivers/gpu/drm/i915/i915_vma_types.h index 80e93bf00f2e..f03fa96a1701 100644 --- a/drivers/gpu/drm/i915/i915_vma_types.h +++ b/drivers/gpu/drm/i915/i915_vma_types.h @@ -97,11 +97,20 @@ enum i915_cache_level; struct intel_remapped_plane_info { /* in gtt pages */ - u32 offset; - u16 width; - u16 height; - u16 src_stride; - u16 dst_stride; + u32 offset:31; + u32 linear:1; + union { + /* in gtt pages for !linear */ + struct { + u16 width; + u16 height; + u16 src_stride; + u16 dst_stride; + }; + + /* in gtt pages for linear */ + u32 size; + }; } __packed; struct intel_remapped_info { @@ -178,7 +187,6 @@ struct i915_vma { const struct i915_vma_ops *ops; struct drm_i915_gem_object *obj; - struct dma_resv *resv; /** Alias of obj->resv */ struct sg_table *pages; void __iomem *iomap; diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 305facedd284..04fd266d70e2 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -83,33 +83,26 @@ const char *intel_platform_name(enum intel_platform platform) return platform_names[platform]; } -static const char *iommu_name(void) -{ - const char *msg = "n/a"; - -#ifdef CONFIG_INTEL_IOMMU - msg = enableddisabled(intel_iommu_gfx_mapped); -#endif - - return msg; -} - void intel_device_info_print_static(const struct intel_device_info *info, struct drm_printer *p) { - if (info->graphics_rel) - drm_printf(p, "graphics version: %u.%02u\n", info->graphics_ver, info->graphics_rel); + if (info->graphics.rel) + drm_printf(p, "graphics version: %u.%02u\n", info->graphics.ver, + info->graphics.rel); else - drm_printf(p, "graphics version: %u\n", info->graphics_ver); + drm_printf(p, "graphics version: %u\n", info->graphics.ver); - if (info->media_rel) - drm_printf(p, "media version: %u.%02u\n", info->media_ver, info->media_rel); + if (info->media.rel) + drm_printf(p, "media version: %u.%02u\n", info->media.ver, info->media.rel); else - drm_printf(p, "media version: %u\n", info->media_ver); + drm_printf(p, "media version: %u\n", info->media.ver); + + if (info->display.rel) + drm_printf(p, "display version: %u.%02u\n", info->display.ver, info->display.rel); + else + drm_printf(p, "display version: %u\n", info->display.ver); - drm_printf(p, "display version: %u\n", info->display.ver); drm_printf(p, "gt: %d\n", info->gt); - drm_printf(p, "iommu: %s\n", iommu_name()); drm_printf(p, "memory-regions: %x\n", info->memory_regions); drm_printf(p, "page-sizes: %x\n", info->page_sizes); drm_printf(p, "platform: %s\n", intel_platform_name(info->platform)); @@ -177,6 +170,10 @@ static const u16 subplatform_portf_ids[] = { INTEL_ICL_PORT_F_IDS(0), }; +static const u16 subplatform_rpls_ids[] = { + INTEL_RPLS_IDS(0), +}; + static bool find_devid(u16 id, const u16 *p, unsigned int num) { for (; num; num--, p++) { @@ -213,6 +210,9 @@ void intel_device_info_subplatform_init(struct drm_i915_private *i915) } else if (find_devid(devid, subplatform_portf_ids, ARRAY_SIZE(subplatform_portf_ids))) { mask = BIT(INTEL_SUBPLATFORM_PORTF); + } else if (find_devid(devid, subplatform_rpls_ids, + ARRAY_SIZE(subplatform_rpls_ids))) { + mask = BIT(INTEL_SUBPLATFORM_RPL_S); } if (IS_TIGERLAKE(i915)) { @@ -326,33 +326,33 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) { drm_info(&dev_priv->drm, "Display fused off, disabling\n"); - info->pipe_mask = 0; - info->cpu_transcoder_mask = 0; + info->display.pipe_mask = 0; + info->display.cpu_transcoder_mask = 0; } else if (fuse_strap & IVB_PIPE_C_DISABLE) { drm_info(&dev_priv->drm, "PipeC fused off\n"); - info->pipe_mask &= ~BIT(PIPE_C); - info->cpu_transcoder_mask &= ~BIT(TRANSCODER_C); + info->display.pipe_mask &= ~BIT(PIPE_C); + info->display.cpu_transcoder_mask &= ~BIT(TRANSCODER_C); } } else if (HAS_DISPLAY(dev_priv) && DISPLAY_VER(dev_priv) >= 9) { u32 dfsm = intel_de_read(dev_priv, SKL_DFSM); if (dfsm & SKL_DFSM_PIPE_A_DISABLE) { - info->pipe_mask &= ~BIT(PIPE_A); - info->cpu_transcoder_mask &= ~BIT(TRANSCODER_A); + info->display.pipe_mask &= ~BIT(PIPE_A); + info->display.cpu_transcoder_mask &= ~BIT(TRANSCODER_A); } if (dfsm & SKL_DFSM_PIPE_B_DISABLE) { - info->pipe_mask &= ~BIT(PIPE_B); - info->cpu_transcoder_mask &= ~BIT(TRANSCODER_B); + info->display.pipe_mask &= ~BIT(PIPE_B); + info->display.cpu_transcoder_mask &= ~BIT(TRANSCODER_B); } if (dfsm & SKL_DFSM_PIPE_C_DISABLE) { - info->pipe_mask &= ~BIT(PIPE_C); - info->cpu_transcoder_mask &= ~BIT(TRANSCODER_C); + info->display.pipe_mask &= ~BIT(PIPE_C); + info->display.cpu_transcoder_mask &= ~BIT(TRANSCODER_C); } if (DISPLAY_VER(dev_priv) >= 12 && (dfsm & TGL_DFSM_PIPE_D_DISABLE)) { - info->pipe_mask &= ~BIT(PIPE_D); - info->cpu_transcoder_mask &= ~BIT(TRANSCODER_D); + info->display.pipe_mask &= ~BIT(PIPE_D); + info->display.cpu_transcoder_mask &= ~BIT(TRANSCODER_D); } if (dfsm & SKL_DFSM_DISPLAY_HDCP_DISABLE) @@ -369,7 +369,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) info->display.has_dsc = 0; } - if (GRAPHICS_VER(dev_priv) == 6 && intel_vtd_active()) { + if (GRAPHICS_VER(dev_priv) == 6 && intel_vtd_active(dev_priv)) { drm_info(&dev_priv->drm, "Disabling ppGTT for VT-d support\n"); info->ppgtt_type = INTEL_PPGTT_NONE; diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index 8e6f48d1eb7b..d9ac6540d058 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -110,6 +110,9 @@ enum intel_platform { #define INTEL_SUBPLATFORM_G10 0 #define INTEL_SUBPLATFORM_G11 1 +/* ADL-S */ +#define INTEL_SUBPLATFORM_RPL_S 0 + enum intel_ppgtt_type { INTEL_PPGTT_NONE = I915_GEM_PPGTT_NONE, INTEL_PPGTT_ALIASING = I915_GEM_PPGTT_ALIASING, @@ -166,11 +169,14 @@ enum intel_ppgtt_type { func(overlay_needs_physical); \ func(supports_tv); +struct ip_version { + u8 ver; + u8 rel; +}; + struct intel_device_info { - u8 graphics_ver; - u8 graphics_rel; - u8 media_ver; - u8 media_rel; + struct ip_version graphics; + struct ip_version media; intel_engine_mask_t platform_engine_mask; /* Engines supported by the HW */ @@ -189,17 +195,17 @@ struct intel_device_info { u8 gt; /* GT number, 0 if undefined */ - u8 pipe_mask; - u8 cpu_transcoder_mask; - - u8 abox_mask; - #define DEFINE_FLAG(name) u8 name:1 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG); #undef DEFINE_FLAG struct { u8 ver; + u8 rel; + + u8 pipe_mask; + u8 cpu_transcoder_mask; + u8 abox_mask; #define DEFINE_FLAG(name) u8 name:1 DEV_INFO_DISPLAY_FOR_EACH_FLAG(DEFINE_FLAG); diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c index e7f7e6627750..b43121609e25 100644 --- a/drivers/gpu/drm/i915/intel_memory_region.c +++ b/drivers/gpu/drm/i915/intel_memory_region.c @@ -126,7 +126,6 @@ intel_memory_region_create(struct drm_i915_private *i915, goto err_free; } - kref_init(&mem->kref); return mem; err_free: @@ -144,28 +143,17 @@ void intel_memory_region_set_name(struct intel_memory_region *mem, va_end(ap); } -static void __intel_memory_region_destroy(struct kref *kref) +void intel_memory_region_destroy(struct intel_memory_region *mem) { - struct intel_memory_region *mem = - container_of(kref, typeof(*mem), kref); + int ret = 0; if (mem->ops->release) - mem->ops->release(mem); + ret = mem->ops->release(mem); + GEM_WARN_ON(!list_empty_careful(&mem->objects.list)); mutex_destroy(&mem->objects.lock); - kfree(mem); -} - -struct intel_memory_region * -intel_memory_region_get(struct intel_memory_region *mem) -{ - kref_get(&mem->kref); - return mem; -} - -void intel_memory_region_put(struct intel_memory_region *mem) -{ - kref_put(&mem->kref, __intel_memory_region_destroy); + if (!ret) + kfree(mem); } /* Global memory region registration -- only slight layer inversions! */ @@ -234,7 +222,7 @@ void intel_memory_regions_driver_release(struct drm_i915_private *i915) fetch_and_zero(&i915->mm.regions[i]); if (region) - intel_memory_region_put(region); + intel_memory_region_destroy(region); } } diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h index 3feae3353d33..5625c9c38993 100644 --- a/drivers/gpu/drm/i915/intel_memory_region.h +++ b/drivers/gpu/drm/i915/intel_memory_region.h @@ -6,7 +6,6 @@ #ifndef __INTEL_MEMORY_REGION_H__ #define __INTEL_MEMORY_REGION_H__ -#include <linux/kref.h> #include <linux/ioport.h> #include <linux/mutex.h> #include <linux/io-mapping.h> @@ -51,7 +50,7 @@ struct intel_memory_region_ops { unsigned int flags; int (*init)(struct intel_memory_region *mem); - void (*release)(struct intel_memory_region *mem); + int (*release)(struct intel_memory_region *mem); int (*init_object)(struct intel_memory_region *mem, struct drm_i915_gem_object *obj, @@ -71,8 +70,6 @@ struct intel_memory_region { /* For fake LMEM */ struct drm_mm_node fake_mappable; - struct kref kref; - resource_size_t io_start; resource_size_t min_page_size; resource_size_t total; @@ -110,9 +107,7 @@ intel_memory_region_create(struct drm_i915_private *i915, u16 instance, const struct intel_memory_region_ops *ops); -struct intel_memory_region * -intel_memory_region_get(struct intel_memory_region *mem); -void intel_memory_region_put(struct intel_memory_region *mem); +void intel_memory_region_destroy(struct intel_memory_region *mem); int intel_memory_regions_hw_probe(struct drm_i915_private *i915); void intel_memory_regions_driver_release(struct drm_i915_private *i915); diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c index d1d4b97b86f5..da8f82c2342f 100644 --- a/drivers/gpu/drm/i915/intel_pch.c +++ b/drivers/gpu/drm/i915/intel_pch.c @@ -129,6 +129,7 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id) return PCH_JSP; case INTEL_PCH_ADP_DEVICE_ID_TYPE: case INTEL_PCH_ADP2_DEVICE_ID_TYPE: + case INTEL_PCH_ADP3_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found Alder Lake PCH\n"); drm_WARN_ON(&dev_priv->drm, !IS_ALDERLAKE_S(dev_priv) && !IS_ALDERLAKE_P(dev_priv)); diff --git a/drivers/gpu/drm/i915/intel_pch.h b/drivers/gpu/drm/i915/intel_pch.h index 7c0d83d292dc..6bff77521094 100644 --- a/drivers/gpu/drm/i915/intel_pch.h +++ b/drivers/gpu/drm/i915/intel_pch.h @@ -57,6 +57,7 @@ enum intel_pch { #define INTEL_PCH_JSP2_DEVICE_ID_TYPE 0x3880 #define INTEL_PCH_ADP_DEVICE_ID_TYPE 0x7A80 #define INTEL_PCH_ADP2_DEVICE_ID_TYPE 0x5180 +#define INTEL_PCH_ADP3_DEVICE_ID_TYPE 0x7A00 #define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100 #define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000 #define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */ diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index ecbb3d141632..434b1f8b7fe3 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -36,7 +36,9 @@ #include "display/intel_atomic_plane.h" #include "display/intel_bw.h" #include "display/intel_de.h" +#include "display/intel_display_trace.h" #include "display/intel_display_types.h" +#include "display/intel_fb.h" #include "display/intel_fbc.h" #include "display/intel_sprite.h" #include "display/skl_universal_plane.h" @@ -46,7 +48,6 @@ #include "i915_drv.h" #include "i915_fixed.h" #include "i915_irq.h" -#include "i915_trace.h" #include "intel_pcode.h" #include "intel_pm.h" #include "vlv_sideband.h" @@ -97,7 +98,7 @@ static void gen9_init_clock_gating(struct drm_i915_private *dev_priv) * "Plane N strech max must be programmed to 11b (x1) * when Async flips are enabled on that plane." */ - if (!IS_GEMINILAKE(dev_priv) && intel_vtd_active()) + if (!IS_GEMINILAKE(dev_priv) && intel_vtd_active(dev_priv)) intel_uncore_rmw(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe), SKL_PLANE1_STRETCH_MAX_MASK, SKL_PLANE1_STRETCH_MAX_X1); } @@ -160,7 +161,7 @@ static void bxt_init_clock_gating(struct drm_i915_private *dev_priv) * Display WA #0883: bxt */ intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) | - ILK_DPFC_DISABLE_DUMMY0); + DPFC_DISABLE_DUMMY0); } static void glk_init_clock_gating(struct drm_i915_private *dev_priv) @@ -988,7 +989,7 @@ static void g4x_write_wm_values(struct drm_i915_private *dev_priv, enum pipe pipe; for_each_pipe(dev_priv, pipe) - trace_g4x_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm); + trace_g4x_wm(intel_crtc_for_pipe(dev_priv, pipe), wm); intel_uncore_write(&dev_priv->uncore, DSPFW1, FW_WM(wm->sr.plane, SR) | @@ -1020,7 +1021,7 @@ static void vlv_write_wm_values(struct drm_i915_private *dev_priv, enum pipe pipe; for_each_pipe(dev_priv, pipe) { - trace_vlv_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm); + trace_vlv_wm(intel_crtc_for_pipe(dev_priv, pipe), wm); intel_uncore_write(&dev_priv->uncore, VLV_DDL(pipe), (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) | @@ -2335,6 +2336,20 @@ static void i965_update_wm(struct drm_i915_private *dev_priv) #undef FW_WM +static struct intel_crtc *intel_crtc_for_plane(struct drm_i915_private *i915, + enum i9xx_plane_id i9xx_plane) +{ + struct intel_plane *plane; + + for_each_intel_plane(&i915->drm, plane) { + if (plane->id == PLANE_PRIMARY && + plane->i9xx_plane == i9xx_plane) + return intel_crtc_for_pipe(i915, plane->pipe); + } + + return NULL; +} + static void i9xx_update_wm(struct drm_i915_private *dev_priv) { const struct intel_watermark_params *wm_info; @@ -2356,7 +2371,7 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv) fifo_size = i830_get_fifo_size(dev_priv, PLANE_A); else fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_A); - crtc = intel_get_crtc_for_plane(dev_priv, PLANE_A); + crtc = intel_crtc_for_plane(dev_priv, PLANE_A); if (intel_crtc_active(crtc)) { const struct drm_display_mode *pipe_mode = &crtc->config->hw.pipe_mode; @@ -2386,7 +2401,7 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv) fifo_size = i830_get_fifo_size(dev_priv, PLANE_B); else fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_B); - crtc = intel_get_crtc_for_plane(dev_priv, PLANE_B); + crtc = intel_crtc_for_plane(dev_priv, PLANE_B); if (intel_crtc_active(crtc)) { const struct drm_display_mode *pipe_mode = &crtc->config->hw.pipe_mode; @@ -3062,9 +3077,9 @@ static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv) * The BIOS provided WM memory latency values are often * inadequate for high resolution displays. Adjust them. */ - changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) | - ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) | - ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12); + changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12); + changed |= ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12); + changed |= ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12); if (!changed) return; @@ -3368,13 +3383,8 @@ static void ilk_wm_merge(struct drm_i915_private *dev_priv, } /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */ - /* - * FIXME this is racy. FBC might get enabled later. - * What we should check here is whether FBC can be - * enabled sometime later. - */ - if (DISPLAY_VER(dev_priv) == 5 && !merged->fbc_wm_enabled && - intel_fbc_is_active(dev_priv)) { + if (DISPLAY_VER(dev_priv) == 5 && HAS_FBC(dev_priv) && + dev_priv->params.enable_fbc && !merged->fbc_wm_enabled) { for (level = 2; level <= max_level; level++) { struct intel_wm_level *wm = &merged->wm[level]; @@ -5094,6 +5104,18 @@ skl_check_nv12_wm_level(struct skl_wm_level *wm, struct skl_wm_level *uv_wm, } } +static bool icl_need_wm1_wa(struct drm_i915_private *i915, + enum plane_id plane_id) +{ + /* + * Wa_1408961008:icl, ehl + * Wa_14012656716:tgl, adl + * Underruns with WM1+ disabled + */ + return DISPLAY_VER(i915) == 11 || + (IS_DISPLAY_VER(i915, 12, 13) && plane_id == PLANE_CURSOR); +} + static int skl_allocate_plane_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc) @@ -5264,11 +5286,7 @@ skl_allocate_plane_ddb(struct intel_atomic_state *state, skl_check_nv12_wm_level(&wm->wm[level], &wm->uv_wm[level], total[plane_id], uv_total[plane_id]); - /* - * Wa_1408961008:icl, ehl - * Underruns with WM1+ disabled - */ - if (DISPLAY_VER(dev_priv) == 11 && + if (icl_need_wm1_wa(dev_priv, plane_id) && level == 1 && wm->wm[0].enable) { wm->wm[level].blocks = wm->wm[0].blocks; wm->wm[level].lines = wm->wm[0].lines; @@ -6900,7 +6918,7 @@ void g4x_wm_sanitize(struct drm_i915_private *dev_priv) for_each_intel_plane(&dev_priv->drm, plane) { struct intel_crtc *crtc = - intel_get_crtc_for_pipe(dev_priv, plane->pipe); + intel_crtc_for_pipe(dev_priv, plane->pipe); struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); struct intel_plane_state *plane_state = @@ -7056,7 +7074,7 @@ void vlv_wm_sanitize(struct drm_i915_private *dev_priv) for_each_intel_plane(&dev_priv->drm, plane) { struct intel_crtc *crtc = - intel_get_crtc_for_pipe(dev_priv, plane->pipe); + intel_crtc_for_pipe(dev_priv, plane->pipe); struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); struct intel_plane_state *plane_state = @@ -7434,7 +7452,7 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv) { /* Wa_1409120013:icl,ehl */ intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, - ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL); + DPFC_CHICKEN_COMP_DUMMY_PIXEL); /*Wa_14010594013:icl, ehl */ intel_uncore_rmw(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1, @@ -7443,11 +7461,11 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv) static void gen12lp_init_clock_gating(struct drm_i915_private *dev_priv) { - /* Wa_1409120013:tgl,rkl,adl-s,dg1 */ + /* Wa_1409120013:tgl,rkl,adl-s,dg1,dg2 */ if (IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv) || - IS_ALDERLAKE_S(dev_priv) || IS_DG1(dev_priv)) + IS_ALDERLAKE_S(dev_priv) || IS_DG1(dev_priv) || IS_DG2(dev_priv)) intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, - ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL); + DPFC_CHICKEN_COMP_DUMMY_PIXEL); /* Wa_1409825376:tgl (pre-prod)*/ if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) @@ -7473,11 +7491,34 @@ static void dg1_init_clock_gating(struct drm_i915_private *dev_priv) gen12lp_init_clock_gating(dev_priv); /* Wa_1409836686:dg1[a0] */ - if (IS_DG1_GT_STEP(dev_priv, STEP_A0, STEP_B0)) + if (IS_DG1_GRAPHICS_STEP(dev_priv, STEP_A0, STEP_B0)) intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_3, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_3) | DPT_GATING_DIS); } +static void xehpsdv_init_clock_gating(struct drm_i915_private *dev_priv) +{ + /* Wa_22010146351:xehpsdv */ + if (IS_XEHPSDV_GRAPHICS_STEP(dev_priv, STEP_A0, STEP_B0)) + intel_uncore_rmw(&dev_priv->uncore, XEHP_CLOCK_GATE_DIS, 0, SGR_DIS); +} + +static void dg2_init_clock_gating(struct drm_i915_private *i915) +{ + /* Wa_22010954014:dg2_g10 */ + if (IS_DG2_G10(i915)) + intel_uncore_rmw(&i915->uncore, XEHP_CLOCK_GATE_DIS, 0, + SGSI_SIDECLK_DIS); + + /* + * Wa_14010733611:dg2_g10 + * Wa_22010146351:dg2_g10 + */ + if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0)) + intel_uncore_rmw(&i915->uncore, XEHP_CLOCK_GATE_DIS, 0, + SGR_DIS | SGGI_DIS); +} + static void cnp_init_clock_gating(struct drm_i915_private *dev_priv) { if (!HAS_PCH_CNP(dev_priv)) @@ -7509,7 +7550,7 @@ static void cfl_init_clock_gating(struct drm_i915_private *dev_priv) * Display WA #0873: cfl */ intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) | - ILK_DPFC_NUKE_ON_ANY_MODIFICATION); + DPFC_NUKE_ON_ANY_MODIFICATION); } static void kbl_init_clock_gating(struct drm_i915_private *dev_priv) @@ -7521,12 +7562,12 @@ static void kbl_init_clock_gating(struct drm_i915_private *dev_priv) FBC_LLC_FULLY_OPEN); /* WaDisableSDEUnitClockGating:kbl */ - if (IS_KBL_GT_STEP(dev_priv, 0, STEP_C0)) + if (IS_KBL_GRAPHICS_STEP(dev_priv, 0, STEP_C0)) intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) | GEN8_SDEUNIT_CLOCK_GATE_DISABLE); /* WaDisableGamClockGating:kbl */ - if (IS_KBL_GT_STEP(dev_priv, 0, STEP_C0)) + if (IS_KBL_GRAPHICS_STEP(dev_priv, 0, STEP_C0)) intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1, intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) | GEN6_GAMUNIT_CLOCK_GATE_DISABLE); @@ -7542,7 +7583,7 @@ static void kbl_init_clock_gating(struct drm_i915_private *dev_priv) * Display WA #0873: kbl */ intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) | - ILK_DPFC_NUKE_ON_ANY_MODIFICATION); + DPFC_NUKE_ON_ANY_MODIFICATION); } static void skl_init_clock_gating(struct drm_i915_private *dev_priv) @@ -7569,14 +7610,14 @@ static void skl_init_clock_gating(struct drm_i915_private *dev_priv) * Display WA #0873: skl */ intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) | - ILK_DPFC_NUKE_ON_ANY_MODIFICATION); + DPFC_NUKE_ON_ANY_MODIFICATION); /* * WaFbcHighMemBwCorruptionAvoidance:skl * Display WA #0883: skl */ intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) | - ILK_DPFC_DISABLE_DUMMY0); + DPFC_DISABLE_DUMMY0); } static void bdw_init_clock_gating(struct drm_i915_private *dev_priv) @@ -7888,6 +7929,8 @@ static const struct drm_i915_clock_gating_funcs platform##_clock_gating_funcs = .init_clock_gating = platform##_init_clock_gating, \ } +CG_FUNCS(dg2); +CG_FUNCS(xehpsdv); CG_FUNCS(adlp); CG_FUNCS(dg1); CG_FUNCS(gen12lp); @@ -7924,7 +7967,11 @@ CG_FUNCS(nop); */ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv) { - if (IS_ALDERLAKE_P(dev_priv)) + if (IS_DG2(dev_priv)) + dev_priv->clock_gating_funcs = &dg2_clock_gating_funcs; + else if (IS_XEHPSDV(dev_priv)) + dev_priv->clock_gating_funcs = &xehpsdv_clock_gating_funcs; + else if (IS_ALDERLAKE_P(dev_priv)) dev_priv->clock_gating_funcs = &adlp_clock_gating_funcs; else if (IS_DG1(dev_priv)) dev_priv->clock_gating_funcs = &dg1_clock_gating_funcs; diff --git a/drivers/gpu/drm/i915/intel_pm_types.h b/drivers/gpu/drm/i915/intel_pm_types.h new file mode 100644 index 000000000000..211632f58751 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_pm_types.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef __INTEL_PM_TYPES_H__ +#define __INTEL_PM_TYPES_H__ + +#include <linux/types.h> + +#include "display/intel_display.h" + +enum intel_ddb_partitioning { + INTEL_DDB_PART_1_2, + INTEL_DDB_PART_5_6, /* IVB+ */ +}; + +struct ilk_wm_values { + u32 wm_pipe[3]; + u32 wm_lp[3]; + u32 wm_lp_spr[3]; + bool enable_fbc_wm; + enum intel_ddb_partitioning partitioning; +}; + +struct g4x_pipe_wm { + u16 plane[I915_MAX_PLANES]; + u16 fbc; +}; + +struct g4x_sr_wm { + u16 plane; + u16 cursor; + u16 fbc; +}; + +struct vlv_wm_ddl_values { + u8 plane[I915_MAX_PLANES]; +}; + +struct vlv_wm_values { + struct g4x_pipe_wm pipe[3]; + struct g4x_sr_wm sr; + struct vlv_wm_ddl_values ddl[3]; + u8 level; + bool cxsr; +}; + +struct g4x_wm_values { + struct g4x_pipe_wm pipe[2]; + struct g4x_sr_wm sr; + struct g4x_sr_wm hpll; + bool cxsr; + bool hpll_en; + bool fbc_en; +}; + +struct skl_ddb_entry { + u16 start, end; /* in number of blocks, 'end' is exclusive */ +}; + +static inline u16 skl_ddb_entry_size(const struct skl_ddb_entry *entry) +{ + return entry->end - entry->start; +} + +static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1, + const struct skl_ddb_entry *e2) +{ + if (e1->start == e2->start && e1->end == e2->end) + return true; + + return false; +} + +#endif /* __INTEL_PM_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/intel_region_ttm.c b/drivers/gpu/drm/i915/intel_region_ttm.c index 98c7339bf8ba..f2b888c16958 100644 --- a/drivers/gpu/drm/i915/intel_region_ttm.c +++ b/drivers/gpu/drm/i915/intel_region_ttm.c @@ -104,19 +104,50 @@ int intel_region_ttm_init(struct intel_memory_region *mem) * memory region, and if it was registered with the TTM device, * removes that registration. */ -void intel_region_ttm_fini(struct intel_memory_region *mem) +int intel_region_ttm_fini(struct intel_memory_region *mem) { - int ret; + struct ttm_resource_manager *man = mem->region_private; + int ret = -EBUSY; + int count; + + /* + * Put the region's move fences. This releases requests that + * may hold on to contexts and vms that may hold on to buffer + * objects placed in this region. + */ + if (man) + ttm_resource_manager_cleanup(man); + + /* Flush objects from region. */ + for (count = 0; count < 10; ++count) { + i915_gem_flush_free_objects(mem->i915); + + mutex_lock(&mem->objects.lock); + if (list_empty(&mem->objects.list)) + ret = 0; + mutex_unlock(&mem->objects.lock); + if (!ret) + break; + + msleep(20); + flush_delayed_work(&mem->i915->bdev.wq); + } + + /* If we leaked objects, Don't free the region causing use after free */ + if (ret || !man) + return ret; ret = i915_ttm_buddy_man_fini(&mem->i915->bdev, intel_region_to_ttm_type(mem)); GEM_WARN_ON(ret); mem->region_private = NULL; + + return ret; } /** - * intel_region_ttm_resource_to_st - Convert an opaque TTM resource manager resource - * to an sg_table. + * intel_region_ttm_resource_to_rsgt - + * Convert an opaque TTM resource manager resource to a refcounted sg_table. * @mem: The memory region. * @res: The resource manager resource obtained from the TTM resource manager. * @@ -126,17 +157,18 @@ void intel_region_ttm_fini(struct intel_memory_region *mem) * * Return: A malloced sg_table on success, an error pointer on failure. */ -struct sg_table *intel_region_ttm_resource_to_st(struct intel_memory_region *mem, - struct ttm_resource *res) +struct i915_refct_sgt * +intel_region_ttm_resource_to_rsgt(struct intel_memory_region *mem, + struct ttm_resource *res) { if (mem->is_range_manager) { struct ttm_range_mgr_node *range_node = to_ttm_range_mgr_node(res); - return i915_sg_from_mm_node(&range_node->mm_nodes[0], - mem->region.start); + return i915_rsgt_from_mm_node(&range_node->mm_nodes[0], + mem->region.start); } else { - return i915_sg_from_buddy_resource(res, mem->region.start); + return i915_rsgt_from_buddy_resource(res, mem->region.start); } } diff --git a/drivers/gpu/drm/i915/intel_region_ttm.h b/drivers/gpu/drm/i915/intel_region_ttm.h index 6f44075920f2..fdee5e7bd46c 100644 --- a/drivers/gpu/drm/i915/intel_region_ttm.h +++ b/drivers/gpu/drm/i915/intel_region_ttm.h @@ -20,10 +20,11 @@ void intel_region_ttm_device_fini(struct drm_i915_private *dev_priv); int intel_region_ttm_init(struct intel_memory_region *mem); -void intel_region_ttm_fini(struct intel_memory_region *mem); +int intel_region_ttm_fini(struct intel_memory_region *mem); -struct sg_table *intel_region_ttm_resource_to_st(struct intel_memory_region *mem, - struct ttm_resource *res); +struct i915_refct_sgt * +intel_region_ttm_resource_to_rsgt(struct intel_memory_region *mem, + struct ttm_resource *res); void intel_region_ttm_resource_free(struct intel_memory_region *mem, struct ttm_resource *res); diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 0d85f3c5c526..22dab36afcb6 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -590,6 +590,9 @@ void intel_runtime_pm_enable(struct intel_runtime_pm *rpm) pm_runtime_use_autosuspend(kdev); } + /* Enable by default */ + pm_runtime_allow(kdev); + /* * The core calls the driver load handler with an RPM reference held. * We drop that here and will reacquire it during unloading in diff --git a/drivers/gpu/drm/i915/intel_step.c b/drivers/gpu/drm/i915/intel_step.c index 6cf967631395..a4b16b9e2e55 100644 --- a/drivers/gpu/drm/i915/intel_step.c +++ b/drivers/gpu/drm/i915/intel_step.c @@ -23,7 +23,8 @@ * use a macro to define these to make it easier to identify the platforms * where the two steppings can deviate. */ -#define COMMON_STEP(x) .gt_step = STEP_##x, .display_step = STEP_##x +#define COMMON_STEP(x) .graphics_step = STEP_##x, .display_step = STEP_##x, .media_step = STEP_##x +#define COMMON_GT_MEDIA_STEP(x) .graphics_step = STEP_##x, .media_step = STEP_##x static const struct intel_step_info skl_revids[] = { [0x6] = { COMMON_STEP(G0) }, @@ -33,13 +34,13 @@ static const struct intel_step_info skl_revids[] = { }; static const struct intel_step_info kbl_revids[] = { - [1] = { .gt_step = STEP_B0, .display_step = STEP_B0 }, - [2] = { .gt_step = STEP_C0, .display_step = STEP_B0 }, - [3] = { .gt_step = STEP_D0, .display_step = STEP_B0 }, - [4] = { .gt_step = STEP_F0, .display_step = STEP_C0 }, - [5] = { .gt_step = STEP_C0, .display_step = STEP_B1 }, - [6] = { .gt_step = STEP_D1, .display_step = STEP_B1 }, - [7] = { .gt_step = STEP_G0, .display_step = STEP_C0 }, + [1] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_B0 }, + [2] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_B0 }, + [3] = { COMMON_GT_MEDIA_STEP(D0), .display_step = STEP_B0 }, + [4] = { COMMON_GT_MEDIA_STEP(F0), .display_step = STEP_C0 }, + [5] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_B1 }, + [6] = { COMMON_GT_MEDIA_STEP(D1), .display_step = STEP_B1 }, + [7] = { COMMON_GT_MEDIA_STEP(G0), .display_step = STEP_C0 }, }; static const struct intel_step_info bxt_revids[] = { @@ -63,16 +64,16 @@ static const struct intel_step_info jsl_ehl_revids[] = { }; static const struct intel_step_info tgl_uy_revids[] = { - [0] = { .gt_step = STEP_A0, .display_step = STEP_A0 }, - [1] = { .gt_step = STEP_B0, .display_step = STEP_C0 }, - [2] = { .gt_step = STEP_B1, .display_step = STEP_C0 }, - [3] = { .gt_step = STEP_C0, .display_step = STEP_D0 }, + [0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_A0 }, + [1] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_C0 }, + [2] = { COMMON_GT_MEDIA_STEP(B1), .display_step = STEP_C0 }, + [3] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_D0 }, }; /* Same GT stepping between tgl_uy_revids and tgl_revids don't mean the same HW */ static const struct intel_step_info tgl_revids[] = { - [0] = { .gt_step = STEP_A0, .display_step = STEP_B0 }, - [1] = { .gt_step = STEP_B0, .display_step = STEP_D0 }, + [0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_B0 }, + [1] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_D0 }, }; static const struct intel_step_info rkl_revids[] = { @@ -87,38 +88,38 @@ static const struct intel_step_info dg1_revids[] = { }; static const struct intel_step_info adls_revids[] = { - [0x0] = { .gt_step = STEP_A0, .display_step = STEP_A0 }, - [0x1] = { .gt_step = STEP_A0, .display_step = STEP_A2 }, - [0x4] = { .gt_step = STEP_B0, .display_step = STEP_B0 }, - [0x8] = { .gt_step = STEP_C0, .display_step = STEP_B0 }, - [0xC] = { .gt_step = STEP_D0, .display_step = STEP_C0 }, + [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_A0 }, + [0x1] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_A2 }, + [0x4] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_B0 }, + [0x8] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_B0 }, + [0xC] = { COMMON_GT_MEDIA_STEP(D0), .display_step = STEP_C0 }, }; static const struct intel_step_info adlp_revids[] = { - [0x0] = { .gt_step = STEP_A0, .display_step = STEP_A0 }, - [0x4] = { .gt_step = STEP_B0, .display_step = STEP_B0 }, - [0x8] = { .gt_step = STEP_C0, .display_step = STEP_C0 }, - [0xC] = { .gt_step = STEP_C0, .display_step = STEP_D0 }, + [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_A0 }, + [0x4] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_B0 }, + [0x8] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_C0 }, + [0xC] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_D0 }, }; static const struct intel_step_info xehpsdv_revids[] = { - [0x0] = { .gt_step = STEP_A0 }, - [0x1] = { .gt_step = STEP_A1 }, - [0x4] = { .gt_step = STEP_B0 }, - [0x8] = { .gt_step = STEP_C0 }, + [0x0] = { COMMON_GT_MEDIA_STEP(A0) }, + [0x1] = { COMMON_GT_MEDIA_STEP(A1) }, + [0x4] = { COMMON_GT_MEDIA_STEP(B0) }, + [0x8] = { COMMON_GT_MEDIA_STEP(C0) }, }; static const struct intel_step_info dg2_g10_revid_step_tbl[] = { - [0x0] = { .gt_step = STEP_A0, .display_step = STEP_A0 }, - [0x1] = { .gt_step = STEP_A1, .display_step = STEP_A0 }, - [0x4] = { .gt_step = STEP_B0, .display_step = STEP_B0 }, - [0x8] = { .gt_step = STEP_C0, .display_step = STEP_C0 }, + [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_A0 }, + [0x1] = { COMMON_GT_MEDIA_STEP(A1), .display_step = STEP_A0 }, + [0x4] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_B0 }, + [0x8] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_C0 }, }; static const struct intel_step_info dg2_g11_revid_step_tbl[] = { - [0x0] = { .gt_step = STEP_A0, .display_step = STEP_B0 }, - [0x4] = { .gt_step = STEP_B0, .display_step = STEP_C0 }, - [0x5] = { .gt_step = STEP_B1, .display_step = STEP_C0 }, + [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_B0 }, + [0x4] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_C0 }, + [0x5] = { COMMON_GT_MEDIA_STEP(B1), .display_step = STEP_C0 }, }; void intel_step_init(struct drm_i915_private *i915) @@ -179,7 +180,7 @@ void intel_step_init(struct drm_i915_private *i915) if (!revids) return; - if (revid < size && revids[revid].gt_step != STEP_NONE) { + if (revid < size && revids[revid].graphics_step != STEP_NONE) { step = revids[revid]; } else { drm_warn(&i915->drm, "Unknown revid 0x%02x\n", revid); @@ -192,7 +193,7 @@ void intel_step_init(struct drm_i915_private *i915) * steppings in the array are not monotonically increasing, but * it's better than defaulting to 0. */ - while (revid < size && revids[revid].gt_step == STEP_NONE) + while (revid < size && revids[revid].graphics_step == STEP_NONE) revid++; if (revid < size) { @@ -201,12 +202,12 @@ void intel_step_init(struct drm_i915_private *i915) step = revids[revid]; } else { drm_dbg(&i915->drm, "Using future steppings\n"); - step.gt_step = STEP_FUTURE; + step.graphics_step = STEP_FUTURE; step.display_step = STEP_FUTURE; } } - if (drm_WARN_ON(&i915->drm, step.gt_step == STEP_NONE)) + if (drm_WARN_ON(&i915->drm, step.graphics_step == STEP_NONE)) return; RUNTIME_INFO(i915)->step = step; diff --git a/drivers/gpu/drm/i915/intel_step.h b/drivers/gpu/drm/i915/intel_step.h index f6641e2a3c77..d71a99bd5179 100644 --- a/drivers/gpu/drm/i915/intel_step.h +++ b/drivers/gpu/drm/i915/intel_step.h @@ -11,8 +11,9 @@ struct drm_i915_private; struct intel_step_info { - u8 gt_step; + u8 graphics_step; u8 display_step; + u8 media_step; }; #define STEP_ENUM_VAL(name) STEP_##name, diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index e072054adac5..abdac78d3976 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -22,11 +22,11 @@ */ #include <linux/pm_runtime.h> -#include <asm/iosf_mbi.h> #include "gt/intel_lrc_reg.h" /* for shadow reg list */ #include "i915_drv.h" +#include "i915_iosf_mbi.h" #include "i915_trace.h" #include "i915_vgpu.h" #include "intel_pm.h" @@ -2020,7 +2020,7 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb, return NOTIFY_OK; } -static int uncore_mmio_setup(struct intel_uncore *uncore) +int intel_uncore_setup_mmio(struct intel_uncore *uncore) { struct drm_i915_private *i915 = uncore->i915; struct pci_dev *pdev = to_pci_dev(i915->drm.dev); @@ -2053,7 +2053,7 @@ static int uncore_mmio_setup(struct intel_uncore *uncore) return 0; } -static void uncore_mmio_cleanup(struct intel_uncore *uncore) +void intel_uncore_cleanup_mmio(struct intel_uncore *uncore) { struct pci_dev *pdev = to_pci_dev(uncore->i915->drm.dev); @@ -2146,10 +2146,6 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore) struct drm_i915_private *i915 = uncore->i915; int ret; - ret = uncore_mmio_setup(uncore); - if (ret) - return ret; - /* * The boot firmware initializes local memory and assesses its health. * If memory training fails, the punit will have been instructed to @@ -2170,7 +2166,7 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore) } else { ret = uncore_forcewake_init(uncore); if (ret) - goto out_mmio_cleanup; + return ret; } /* make sure fw funcs are set if and only if we have fw*/ @@ -2192,11 +2188,6 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore) drm_dbg(&i915->drm, "unclaimed mmio detected on uncore init, clearing\n"); return 0; - -out_mmio_cleanup: - uncore_mmio_cleanup(uncore); - - return ret; } /* @@ -2261,8 +2252,6 @@ void intel_uncore_fini_mmio(struct intel_uncore *uncore) intel_uncore_fw_domains_fini(uncore); iosf_mbi_punit_release(); } - - uncore_mmio_cleanup(uncore); } static const struct reg_whitelist { diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h index 3248e4e2c540..d1d17b04e29f 100644 --- a/drivers/gpu/drm/i915/intel_uncore.h +++ b/drivers/gpu/drm/i915/intel_uncore.h @@ -218,11 +218,13 @@ void intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug); void intel_uncore_init_early(struct intel_uncore *uncore, struct drm_i915_private *i915); +int intel_uncore_setup_mmio(struct intel_uncore *uncore); int intel_uncore_init_mmio(struct intel_uncore *uncore); void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore, struct intel_gt *gt); bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore); bool intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore); +void intel_uncore_cleanup_mmio(struct intel_uncore *uncore); void intel_uncore_fini_mmio(struct intel_uncore *uncore); void intel_uncore_suspend(struct intel_uncore *uncore); void intel_uncore_resume_early(struct intel_uncore *uncore); diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.c b/drivers/gpu/drm/i915/pxp/intel_pxp.c index e2314ad9546d..15311eaed848 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp.c +++ b/drivers/gpu/drm/i915/pxp/intel_pxp.c @@ -44,6 +44,11 @@ struct intel_gt *pxp_to_gt(const struct intel_pxp *pxp) return container_of(pxp, struct intel_gt, pxp); } +bool intel_pxp_is_enabled(const struct intel_pxp *pxp) +{ + return pxp->ce; +} + bool intel_pxp_is_active(const struct intel_pxp *pxp) { return pxp->arb_is_valid; diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.h b/drivers/gpu/drm/i915/pxp/intel_pxp.h index aa262258d4d4..73847e535cab 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp.h +++ b/drivers/gpu/drm/i915/pxp/intel_pxp.h @@ -6,17 +6,15 @@ #ifndef __INTEL_PXP_H__ #define __INTEL_PXP_H__ -#include "intel_pxp_types.h" +#include <linux/errno.h> +#include <linux/types.h> +struct intel_pxp; struct drm_i915_gem_object; -static inline bool intel_pxp_is_enabled(const struct intel_pxp *pxp) -{ - return pxp->ce; -} - #ifdef CONFIG_DRM_I915_PXP struct intel_gt *pxp_to_gt(const struct intel_pxp *pxp); +bool intel_pxp_is_enabled(const struct intel_pxp *pxp); bool intel_pxp_is_active(const struct intel_pxp *pxp); void intel_pxp_init(struct intel_pxp *pxp); @@ -48,6 +46,11 @@ static inline int intel_pxp_start(struct intel_pxp *pxp) return -ENODEV; } +static inline bool intel_pxp_is_enabled(const struct intel_pxp *pxp) +{ + return false; +} + static inline bool intel_pxp_is_active(const struct intel_pxp *pxp) { return false; diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c index 23fd86de5a24..6a7d4e2ee138 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c @@ -7,26 +7,29 @@ #include "intel_pxp_irq.h" #include "intel_pxp_pm.h" #include "intel_pxp_session.h" +#include "i915_drv.h" -void intel_pxp_suspend(struct intel_pxp *pxp, bool runtime) +void intel_pxp_suspend_prepare(struct intel_pxp *pxp) { if (!intel_pxp_is_enabled(pxp)) return; pxp->arb_is_valid = false; - /* - * Contexts using protected objects keep a runtime PM reference, so we - * can only runtime suspend when all of them have been either closed - * or banned. Therefore, there is no need to invalidate in that - * scenario. - */ - if (!runtime) - intel_pxp_invalidate(pxp); + intel_pxp_invalidate(pxp); +} - intel_pxp_fini_hw(pxp); +void intel_pxp_suspend(struct intel_pxp *pxp) +{ + intel_wakeref_t wakeref; - pxp->hw_state_invalidated = false; + if (!intel_pxp_is_enabled(pxp)) + return; + + with_intel_runtime_pm(&pxp_to_gt(pxp)->i915->runtime_pm, wakeref) { + intel_pxp_fini_hw(pxp); + pxp->hw_state_invalidated = false; + } } void intel_pxp_resume(struct intel_pxp *pxp) @@ -44,3 +47,15 @@ void intel_pxp_resume(struct intel_pxp *pxp) intel_pxp_init_hw(pxp); } + +void intel_pxp_runtime_suspend(struct intel_pxp *pxp) +{ + if (!intel_pxp_is_enabled(pxp)) + return; + + pxp->arb_is_valid = false; + + intel_pxp_fini_hw(pxp); + + pxp->hw_state_invalidated = false; +} diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h index c89e97a0c3d0..16990a3f2f85 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h @@ -9,16 +9,29 @@ #include "intel_pxp_types.h" #ifdef CONFIG_DRM_I915_PXP -void intel_pxp_suspend(struct intel_pxp *pxp, bool runtime); +void intel_pxp_suspend_prepare(struct intel_pxp *pxp); +void intel_pxp_suspend(struct intel_pxp *pxp); void intel_pxp_resume(struct intel_pxp *pxp); +void intel_pxp_runtime_suspend(struct intel_pxp *pxp); #else -static inline void intel_pxp_suspend(struct intel_pxp *pxp, bool runtime) +static inline void intel_pxp_suspend_prepare(struct intel_pxp *pxp) +{ +} + +static inline void intel_pxp_suspend(struct intel_pxp *pxp) { } static inline void intel_pxp_resume(struct intel_pxp *pxp) { } -#endif +static inline void intel_pxp_runtime_suspend(struct intel_pxp *pxp) +{ +} +#endif +static inline void intel_pxp_runtime_resume(struct intel_pxp *pxp) +{ + intel_pxp_resume(pxp); +} #endif /* __INTEL_PXP_PM_H__ */ diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c index d02732f04757..598840b73dfa 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c @@ -3,7 +3,8 @@ * Copyright(c) 2020, Intel Corporation. All rights reserved. */ -#include "drm/i915_drm.h" +#include <drm/i915_drm.h> + #include "i915_drv.h" #include "intel_pxp.h" diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c index 49508f31dcb7..5d169624ad60 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c @@ -4,8 +4,10 @@ */ #include <linux/component.h> -#include "drm/i915_pxp_tee_interface.h" -#include "drm/i915_component.h" + +#include <drm/i915_pxp_tee_interface.h> +#include <drm/i915_component.h> + #include "i915_drv.h" #include "intel_pxp.h" #include "intel_pxp_session.h" diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_types.h b/drivers/gpu/drm/i915/pxp/intel_pxp_types.h index 73ef7d1754e1..7ce5f37ee12e 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_types.h +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_types.h @@ -7,9 +7,7 @@ #define __INTEL_PXP_TYPES_H__ #include <linux/completion.h> -#include <linux/list.h> #include <linux/mutex.h> -#include <linux/spinlock.h> #include <linux/types.h> #include <linux/workqueue.h> diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c index f99bb0113726..7e0658a77659 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c @@ -442,6 +442,7 @@ static int igt_evict_contexts(void *arg) /* Overfill the GGTT with context objects and so try to evict one. */ for_each_engine(engine, gt, id) { struct i915_sw_fence fence; + struct i915_request *last = NULL; count = 0; onstack_fence_init(&fence); @@ -479,6 +480,9 @@ static int igt_evict_contexts(void *arg) i915_request_add(rq); count++; + if (last) + i915_request_put(last); + last = i915_request_get(rq); err = 0; } while(1); onstack_fence_fini(&fence); @@ -486,6 +490,21 @@ static int igt_evict_contexts(void *arg) count, engine->name); if (err) break; + if (last) { + if (i915_request_wait(last, 0, HZ) < 0) { + err = -EIO; + i915_request_put(last); + pr_err("Failed waiting for last request (on %s)", + engine->name); + break; + } + i915_request_put(last); + } + err = intel_gt_wait_for_idle(engine->gt, HZ * 3); + if (err) { + pr_err("Failed to idle GT (on %s)", engine->name); + break; + } } mutex_lock(&ggtt->vm.mutex); diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index d67710d10615..9979ef9197cd 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -209,6 +209,10 @@ static int igt_request_rewind(void *arg) int err = -EINVAL; ctx[0] = mock_context(i915, "A"); + if (!ctx[0]) { + err = -ENOMEM; + goto err_ctx_0; + } ce = i915_gem_context_get_engine(ctx[0], RCS0); GEM_BUG_ON(IS_ERR(ce)); @@ -223,6 +227,10 @@ static int igt_request_rewind(void *arg) i915_request_add(request); ctx[1] = mock_context(i915, "B"); + if (!ctx[1]) { + err = -ENOMEM; + goto err_ctx_1; + } ce = i915_gem_context_get_engine(ctx[1], RCS0); GEM_BUG_ON(IS_ERR(ce)); @@ -261,9 +269,11 @@ err: i915_request_put(vip); err_context_1: mock_context_close(ctx[1]); +err_ctx_1: i915_request_put(request); err_context_0: mock_context_close(ctx[0]); +err_ctx_0: mock_device_flush(i915); return err; } @@ -2805,7 +2815,7 @@ static int p_sync0(void *arg) i915_request_add(rq); err = 0; - if (i915_request_wait(rq, 0, HZ / 5) < 0) + if (i915_request_wait(rq, 0, HZ) < 0) err = -ETIME; i915_request_put(rq); if (err) @@ -2876,7 +2886,7 @@ static int p_sync1(void *arg) i915_request_add(rq); err = 0; - if (prev && i915_request_wait(prev, 0, HZ / 5) < 0) + if (prev && i915_request_wait(prev, 0, HZ) < 0) err = -ETIME; i915_request_put(prev); prev = rq; diff --git a/drivers/gpu/drm/i915/selftests/i915_sw_fence.c b/drivers/gpu/drm/i915/selftests/i915_sw_fence.c index cbf45d85cbff..daa985e5a19b 100644 --- a/drivers/gpu/drm/i915/selftests/i915_sw_fence.c +++ b/drivers/gpu/drm/i915/selftests/i915_sw_fence.c @@ -28,7 +28,7 @@ #include "../i915_selftest.h" -static int __i915_sw_fence_call +static int fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) { switch (state) { diff --git a/drivers/gpu/drm/i915/selftests/igt_reset.c b/drivers/gpu/drm/i915/selftests/igt_reset.c index 9f8590b868a9..a2838c65f8a5 100644 --- a/drivers/gpu/drm/i915/selftests/igt_reset.c +++ b/drivers/gpu/drm/i915/selftests/igt_reset.c @@ -36,7 +36,7 @@ void igt_global_reset_unlock(struct intel_gt *gt) enum intel_engine_id id; for_each_engine(engine, gt, id) - clear_bit(I915_RESET_ENGINE + id, >->reset.flags); + clear_and_wake_up_bit(I915_RESET_ENGINE + id, >->reset.flags); clear_bit(I915_RESET_BACKOFF, >->reset.flags); wake_up_all(>->reset.queue); diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c index 418caae84759..0d5df0dc7212 100644 --- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c +++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c @@ -225,7 +225,7 @@ static int igt_mock_reserve(void *arg) out_close: close_objects(mem, &objects); - intel_memory_region_put(mem); + intel_memory_region_destroy(mem); out_free_order: kfree(order); return err; @@ -439,7 +439,7 @@ static int igt_mock_splintered_region(void *arg) out_close: close_objects(mem, &objects); out_put: - intel_memory_region_put(mem); + intel_memory_region_destroy(mem); return err; } @@ -507,7 +507,7 @@ static int igt_mock_max_segment(void *arg) out_close: close_objects(mem, &objects); out_put: - intel_memory_region_put(mem); + intel_memory_region_destroy(mem); return err; } @@ -1196,7 +1196,7 @@ int intel_memory_region_mock_selftests(void) err = i915_subtests(tests, mem); - intel_memory_region_put(mem); + intel_memory_region_destroy(mem); out_unref: mock_destroy_device(i915); return err; diff --git a/drivers/gpu/drm/i915/selftests/lib_sw_fence.c b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c index 080b90b63d16..bf2752cc1e0b 100644 --- a/drivers/gpu/drm/i915/selftests/lib_sw_fence.c +++ b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c @@ -26,7 +26,7 @@ /* Small library of different fence types useful for writing tests */ -static int __i915_sw_fence_call +static int nop_fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) { return NOTIFY_DONE; @@ -41,12 +41,12 @@ void __onstack_fence_init(struct i915_sw_fence *fence, __init_waitqueue_head(&fence->wait, name, key); atomic_set(&fence->pending, 1); fence->error = 0; - fence->flags = (unsigned long)nop_fence_notify; + fence->fn = nop_fence_notify; } void onstack_fence_fini(struct i915_sw_fence *fence) { - if (!fence->flags) + if (!fence->fn) return; i915_sw_fence_commit(fence); @@ -89,7 +89,7 @@ struct heap_fence { }; }; -static int __i915_sw_fence_call +static int heap_fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) { struct heap_fence *h = container_of(fence, typeof(*h), fence); diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 4f8180146888..d0e2e61de8d4 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -165,7 +165,7 @@ struct drm_i915_private *mock_gem_device(void) /* Using the global GTT may ask questions about KMS users, so prepare */ drm_mode_config_init(&i915->drm); - mkwrite_device_info(i915)->graphics_ver = -1; + mkwrite_device_info(i915)->graphics.ver = -1; mkwrite_device_info(i915)->page_sizes = I915_GTT_PAGE_SIZE_4K | @@ -177,6 +177,8 @@ struct drm_i915_private *mock_gem_device(void) mock_uncore_init(&i915->uncore, i915); + spin_lock_init(&i915->gpu_error.lock); + i915_gem_init__mm(i915); intel_gt_init_early(&i915->gt, i915); atomic_inc(&i915->gt.wakeref.count); /* disable; no hw support */ diff --git a/drivers/gpu/drm/i915/selftests/mock_region.c b/drivers/gpu/drm/i915/selftests/mock_region.c index 75793008c4ef..19bff8afcaaa 100644 --- a/drivers/gpu/drm/i915/selftests/mock_region.c +++ b/drivers/gpu/drm/i915/selftests/mock_region.c @@ -15,9 +15,9 @@ static void mock_region_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages) { + i915_refct_sgt_put(obj->mm.rsgt); + obj->mm.rsgt = NULL; intel_region_ttm_resource_free(obj->mm.region, obj->mm.res); - sg_free_table(pages); - kfree(pages); } static int mock_region_get_pages(struct drm_i915_gem_object *obj) @@ -36,12 +36,14 @@ static int mock_region_get_pages(struct drm_i915_gem_object *obj) if (IS_ERR(obj->mm.res)) return PTR_ERR(obj->mm.res); - pages = intel_region_ttm_resource_to_st(obj->mm.region, obj->mm.res); - if (IS_ERR(pages)) { - err = PTR_ERR(pages); + obj->mm.rsgt = intel_region_ttm_resource_to_rsgt(obj->mm.region, + obj->mm.res); + if (IS_ERR(obj->mm.rsgt)) { + err = PTR_ERR(obj->mm.rsgt); goto err_free_resource; } + pages = &obj->mm.rsgt->table; __i915_gem_object_set_pages(obj, pages, i915_sg_dma_sizes(pages->sgl)); return 0; @@ -82,13 +84,16 @@ static int mock_object_init(struct intel_memory_region *mem, return 0; } -static void mock_region_fini(struct intel_memory_region *mem) +static int mock_region_fini(struct intel_memory_region *mem) { struct drm_i915_private *i915 = mem->i915; int instance = mem->instance; + int ret; - intel_region_ttm_fini(mem); + ret = intel_region_ttm_fini(mem); ida_free(&i915->selftest.mock_region_instances, instance); + + return ret; } static const struct intel_memory_region_ops mock_region_ops = { diff --git a/drivers/gpu/drm/i915/vlv_sideband.c b/drivers/gpu/drm/i915/vlv_sideband.c index 35380738a951..ed2ac5752ac4 100644 --- a/drivers/gpu/drm/i915/vlv_sideband.c +++ b/drivers/gpu/drm/i915/vlv_sideband.c @@ -3,9 +3,8 @@ * Copyright © 2013-2021 Intel Corporation */ -#include <asm/iosf_mbi.h> - #include "i915_drv.h" +#include "i915_iosf_mbi.h" #include "vlv_sideband.h" /* diff --git a/drivers/gpu/drm/imx/Kconfig b/drivers/gpu/drm/imx/Kconfig index b5fa0e45a839..bb9738c7c825 100644 --- a/drivers/gpu/drm/imx/Kconfig +++ b/drivers/gpu/drm/imx/Kconfig @@ -4,7 +4,7 @@ config DRM_IMX select DRM_KMS_HELPER select VIDEOMODE_HELPERS select DRM_GEM_CMA_HELPER - select DRM_KMS_CMA_HELPER + select DRM_KMS_HELPER depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM || COMPILE_TEST) depends on IMX_IPUV3_CORE help diff --git a/drivers/gpu/drm/imx/dcss/Kconfig b/drivers/gpu/drm/imx/dcss/Kconfig index 2b17a964ff05..7374f1952762 100644 --- a/drivers/gpu/drm/imx/dcss/Kconfig +++ b/drivers/gpu/drm/imx/dcss/Kconfig @@ -1,7 +1,7 @@ config DRM_IMX_DCSS tristate "i.MX8MQ DCSS" select IMX_IRQSTEER - select DRM_KMS_CMA_HELPER + select DRM_KMS_HELPER select VIDEOMODE_HELPERS depends on DRM && ARCH_MXC && ARM64 help diff --git a/drivers/gpu/drm/ingenic/Kconfig b/drivers/gpu/drm/ingenic/Kconfig index 3b57f8be007c..001f59fb06d5 100644 --- a/drivers/gpu/drm/ingenic/Kconfig +++ b/drivers/gpu/drm/ingenic/Kconfig @@ -8,7 +8,6 @@ config DRM_INGENIC select DRM_BRIDGE select DRM_PANEL_BRIDGE select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER select DRM_GEM_CMA_HELPER select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE help diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c index a5df1c8d34cd..b4943a56be09 100644 --- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c +++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c @@ -21,6 +21,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> +#include <drm/drm_bridge_connector.h> #include <drm/drm_color_mgmt.h> #include <drm/drm_crtc.h> #include <drm/drm_crtc_helper.h> @@ -41,6 +42,8 @@ #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> +#define HWDESC_PALETTE 2 + struct ingenic_dma_hwdesc { u32 next; u32 addr; @@ -49,9 +52,7 @@ struct ingenic_dma_hwdesc { } __aligned(16); struct ingenic_dma_hwdescs { - struct ingenic_dma_hwdesc hwdesc_f0; - struct ingenic_dma_hwdesc hwdesc_f1; - struct ingenic_dma_hwdesc hwdesc_pal; + struct ingenic_dma_hwdesc hwdesc[3]; u16 palette[256] __aligned(16); }; @@ -64,6 +65,11 @@ struct jz_soc_info { unsigned int num_formats_f0, num_formats_f1; }; +struct ingenic_drm_private_state { + struct drm_private_state base; + bool use_palette; +}; + struct ingenic_drm { struct drm_device drm; /* @@ -99,8 +105,53 @@ struct ingenic_drm { struct mutex clk_mutex; bool update_clk_rate; struct notifier_block clock_nb; + + struct drm_private_obj private_obj; }; +struct ingenic_drm_bridge { + struct drm_encoder encoder; + struct drm_bridge bridge, *next_bridge; + + struct drm_bus_cfg bus_cfg; +}; + +static inline struct ingenic_drm_bridge * +to_ingenic_drm_bridge(struct drm_encoder *encoder) +{ + return container_of(encoder, struct ingenic_drm_bridge, encoder); +} + +static inline struct ingenic_drm_private_state * +to_ingenic_drm_priv_state(struct drm_private_state *state) +{ + return container_of(state, struct ingenic_drm_private_state, base); +} + +static struct ingenic_drm_private_state * +ingenic_drm_get_priv_state(struct ingenic_drm *priv, struct drm_atomic_state *state) +{ + struct drm_private_state *priv_state; + + priv_state = drm_atomic_get_private_obj_state(state, &priv->private_obj); + if (IS_ERR(priv_state)) + return ERR_CAST(priv_state); + + return to_ingenic_drm_priv_state(priv_state); +} + +static struct ingenic_drm_private_state * +ingenic_drm_get_new_priv_state(struct ingenic_drm *priv, struct drm_atomic_state *state) +{ + struct drm_private_state *priv_state; + + priv_state = drm_atomic_get_new_private_obj_state(state, &priv->private_obj); + if (!priv_state) + return NULL; + + return to_ingenic_drm_priv_state(priv_state); +} + static bool ingenic_drm_writeable_reg(struct device *dev, unsigned int reg) { switch (reg) { @@ -141,6 +192,14 @@ static inline struct ingenic_drm *drm_nb_get_priv(struct notifier_block *nb) return container_of(nb, struct ingenic_drm, clock_nb); } +static inline dma_addr_t dma_hwdesc_addr(const struct ingenic_drm *priv, + unsigned int idx) +{ + u32 offset = offsetof(struct ingenic_dma_hwdescs, hwdesc[idx]); + + return priv->dma_hwdescs_phys + offset; +} + static int ingenic_drm_update_pixclk(struct notifier_block *nb, unsigned long action, void *data) @@ -163,9 +222,20 @@ static void ingenic_drm_crtc_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct ingenic_drm *priv = drm_crtc_get_priv(crtc); + struct ingenic_drm_private_state *priv_state; + unsigned int next_id; + + priv_state = ingenic_drm_get_priv_state(priv, state); + if (WARN_ON(IS_ERR(priv_state))) + return; regmap_write(priv->map, JZ_REG_LCD_STATE, 0); + /* Set addresses of our DMA descriptor chains */ + next_id = priv_state->use_palette ? HWDESC_PALETTE : 0; + regmap_write(priv->map, JZ_REG_LCD_DA0, dma_hwdesc_addr(priv, next_id)); + regmap_write(priv->map, JZ_REG_LCD_DA1, dma_hwdesc_addr(priv, 1)); + regmap_update_bits(priv->map, JZ_REG_LCD_CTRL, JZ_LCD_CTRL_ENABLE | JZ_LCD_CTRL_DISABLE, JZ_LCD_CTRL_ENABLE); @@ -369,6 +439,7 @@ static int ingenic_drm_plane_atomic_check(struct drm_plane *plane, struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane); struct ingenic_drm *priv = drm_device_get_priv(plane->dev); + struct ingenic_drm_private_state *priv_state; struct drm_crtc_state *crtc_state; struct drm_crtc *crtc = new_plane_state->crtc ?: old_plane_state->crtc; int ret; @@ -381,6 +452,10 @@ static int ingenic_drm_plane_atomic_check(struct drm_plane *plane, if (WARN_ON(!crtc_state)) return -EINVAL; + priv_state = ingenic_drm_get_priv_state(priv, state); + if (IS_ERR(priv_state)) + return PTR_ERR(priv_state); + ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state, DRM_PLANE_HELPER_NO_SCALING, DRM_PLANE_HELPER_NO_SCALING, @@ -399,6 +474,9 @@ static int ingenic_drm_plane_atomic_check(struct drm_plane *plane, (new_plane_state->src_h >> 16) != new_plane_state->crtc_h)) return -EINVAL; + priv_state->use_palette = new_plane_state->fb && + new_plane_state->fb->format->format == DRM_FORMAT_C8; + /* * Require full modeset if enabling or disabling a plane, or changing * its position, size or depth. @@ -558,9 +636,10 @@ static void ingenic_drm_plane_atomic_update(struct drm_plane *plane, struct ingenic_drm *priv = drm_device_get_priv(plane->dev); struct drm_plane_state *newstate = drm_atomic_get_new_plane_state(state, plane); struct drm_plane_state *oldstate = drm_atomic_get_old_plane_state(state, plane); + unsigned int width, height, cpp, next_id, plane_id; + struct ingenic_drm_private_state *priv_state; struct drm_crtc_state *crtc_state; struct ingenic_dma_hwdesc *hwdesc; - unsigned int width, height, cpp, offset; dma_addr_t addr; u32 fourcc; @@ -569,32 +648,26 @@ static void ingenic_drm_plane_atomic_update(struct drm_plane *plane, drm_fb_cma_sync_non_coherent(&priv->drm, oldstate, newstate); crtc_state = newstate->crtc->state; + plane_id = !!(priv->soc_info->has_osd && plane != &priv->f0); addr = drm_fb_cma_get_gem_addr(newstate->fb, newstate, 0); width = newstate->src_w >> 16; height = newstate->src_h >> 16; cpp = newstate->fb->format->cpp[0]; - if (!priv->soc_info->has_osd || plane == &priv->f0) - hwdesc = &priv->dma_hwdescs->hwdesc_f0; - else - hwdesc = &priv->dma_hwdescs->hwdesc_f1; + priv_state = ingenic_drm_get_new_priv_state(priv, state); + next_id = (priv_state && priv_state->use_palette) ? HWDESC_PALETTE : plane_id; + hwdesc = &priv->dma_hwdescs->hwdesc[plane_id]; hwdesc->addr = addr; hwdesc->cmd = JZ_LCD_CMD_EOF_IRQ | (width * height * cpp / 4); + hwdesc->next = dma_hwdesc_addr(priv, next_id); if (drm_atomic_crtc_needs_modeset(crtc_state)) { fourcc = newstate->fb->format->format; ingenic_drm_plane_config(priv->dev, plane, fourcc); - if (fourcc == DRM_FORMAT_C8) - offset = offsetof(struct ingenic_dma_hwdescs, hwdesc_pal); - else - offset = offsetof(struct ingenic_dma_hwdescs, hwdesc_f0); - - priv->dma_hwdescs->hwdesc_f0.next = priv->dma_hwdescs_phys + offset; - crtc_state->color_mgmt_changed = fourcc == DRM_FORMAT_C8; } @@ -609,11 +682,10 @@ static void ingenic_drm_encoder_atomic_mode_set(struct drm_encoder *encoder, { struct ingenic_drm *priv = drm_device_get_priv(encoder->dev); struct drm_display_mode *mode = &crtc_state->adjusted_mode; - struct drm_connector *conn = conn_state->connector; - struct drm_display_info *info = &conn->display_info; + struct ingenic_drm_bridge *bridge = to_ingenic_drm_bridge(encoder); unsigned int cfg, rgbcfg = 0; - priv->panel_is_sharp = info->bus_flags & DRM_BUS_FLAG_SHARP_SIGNALS; + priv->panel_is_sharp = bridge->bus_cfg.flags & DRM_BUS_FLAG_SHARP_SIGNALS; if (priv->panel_is_sharp) { cfg = JZ_LCD_CFG_MODE_SPECIAL_TFT_1 | JZ_LCD_CFG_REV_POLARITY; @@ -626,19 +698,19 @@ static void ingenic_drm_encoder_atomic_mode_set(struct drm_encoder *encoder, cfg |= JZ_LCD_CFG_HSYNC_ACTIVE_LOW; if (mode->flags & DRM_MODE_FLAG_NVSYNC) cfg |= JZ_LCD_CFG_VSYNC_ACTIVE_LOW; - if (info->bus_flags & DRM_BUS_FLAG_DE_LOW) + if (bridge->bus_cfg.flags & DRM_BUS_FLAG_DE_LOW) cfg |= JZ_LCD_CFG_DE_ACTIVE_LOW; - if (info->bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE) + if (bridge->bus_cfg.flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE) cfg |= JZ_LCD_CFG_PCLK_FALLING_EDGE; if (!priv->panel_is_sharp) { - if (conn->connector_type == DRM_MODE_CONNECTOR_TV) { + if (conn_state->connector->connector_type == DRM_MODE_CONNECTOR_TV) { if (mode->flags & DRM_MODE_FLAG_INTERLACE) cfg |= JZ_LCD_CFG_MODE_TV_OUT_I; else cfg |= JZ_LCD_CFG_MODE_TV_OUT_P; } else { - switch (*info->bus_formats) { + switch (bridge->bus_cfg.format) { case MEDIA_BUS_FMT_RGB565_1X16: cfg |= JZ_LCD_CFG_MODE_GENERIC_16BIT; break; @@ -664,20 +736,29 @@ static void ingenic_drm_encoder_atomic_mode_set(struct drm_encoder *encoder, regmap_write(priv->map, JZ_REG_LCD_RGBC, rgbcfg); } -static int ingenic_drm_encoder_atomic_check(struct drm_encoder *encoder, - struct drm_crtc_state *crtc_state, - struct drm_connector_state *conn_state) +static int ingenic_drm_bridge_attach(struct drm_bridge *bridge, + enum drm_bridge_attach_flags flags) +{ + struct ingenic_drm_bridge *ib = to_ingenic_drm_bridge(bridge->encoder); + + return drm_bridge_attach(bridge->encoder, ib->next_bridge, + &ib->bridge, flags); +} + +static int ingenic_drm_bridge_atomic_check(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) { - struct drm_display_info *info = &conn_state->connector->display_info; struct drm_display_mode *mode = &crtc_state->adjusted_mode; + struct ingenic_drm_bridge *ib = to_ingenic_drm_bridge(bridge->encoder); - if (info->num_bus_formats != 1) - return -EINVAL; + ib->bus_cfg = bridge_state->output_bus_cfg; if (conn_state->connector->connector_type == DRM_MODE_CONNECTOR_TV) return 0; - switch (*info->bus_formats) { + switch (bridge_state->output_bus_cfg.format) { case MEDIA_BUS_FMT_RGB888_3X8: case MEDIA_BUS_FMT_RGB888_3X8_DELTA: /* @@ -764,6 +845,28 @@ ingenic_drm_gem_create_object(struct drm_device *drm, size_t size) return &obj->base; } +static struct drm_private_state * +ingenic_drm_duplicate_state(struct drm_private_obj *obj) +{ + struct ingenic_drm_private_state *state = to_ingenic_drm_priv_state(obj->state); + + state = kmemdup(state, sizeof(*state), GFP_KERNEL); + if (!state) + return NULL; + + __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); + + return &state->base; +} + +static void ingenic_drm_destroy_state(struct drm_private_obj *obj, + struct drm_private_state *state) +{ + struct ingenic_drm_private_state *priv_state = to_ingenic_drm_priv_state(state); + + kfree(priv_state); +} + DEFINE_DRM_GEM_CMA_FOPS(ingenic_drm_fops); static const struct drm_driver ingenic_drm_driver_data = { @@ -819,8 +922,16 @@ static const struct drm_crtc_helper_funcs ingenic_drm_crtc_helper_funcs = { }; static const struct drm_encoder_helper_funcs ingenic_drm_encoder_helper_funcs = { - .atomic_mode_set = ingenic_drm_encoder_atomic_mode_set, - .atomic_check = ingenic_drm_encoder_atomic_check, + .atomic_mode_set = ingenic_drm_encoder_atomic_mode_set, +}; + +static const struct drm_bridge_funcs ingenic_drm_bridge_funcs = { + .attach = ingenic_drm_bridge_attach, + .atomic_check = ingenic_drm_bridge_atomic_check, + .atomic_reset = drm_atomic_helper_bridge_reset, + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, + .atomic_get_input_bus_fmts = drm_atomic_helper_bridge_propagate_bus_fmt, }; static const struct drm_mode_config_funcs ingenic_drm_mode_config_funcs = { @@ -834,6 +945,11 @@ static struct drm_mode_config_helper_funcs ingenic_drm_mode_config_helpers = { .atomic_commit_tail = drm_atomic_helper_commit_tail, }; +static const struct drm_private_state_funcs ingenic_drm_private_state_funcs = { + .atomic_duplicate_state = ingenic_drm_duplicate_state, + .atomic_destroy_state = ingenic_drm_destroy_state, +}; + static void ingenic_drm_unbind_all(void *d) { struct ingenic_drm *priv = d; @@ -846,21 +962,57 @@ static void __maybe_unused ingenic_drm_release_rmem(void *d) of_reserved_mem_device_release(d); } +static void ingenic_drm_configure_hwdesc(struct ingenic_drm *priv, + unsigned int hwdesc, + unsigned int next_hwdesc, u32 id) +{ + struct ingenic_dma_hwdesc *desc = &priv->dma_hwdescs->hwdesc[hwdesc]; + + desc->next = dma_hwdesc_addr(priv, next_hwdesc); + desc->id = id; +} + +static void ingenic_drm_configure_hwdesc_palette(struct ingenic_drm *priv) +{ + struct ingenic_dma_hwdesc *desc; + + ingenic_drm_configure_hwdesc(priv, HWDESC_PALETTE, 0, 0xc0); + + desc = &priv->dma_hwdescs->hwdesc[HWDESC_PALETTE]; + desc->addr = priv->dma_hwdescs_phys + + offsetof(struct ingenic_dma_hwdescs, palette); + desc->cmd = JZ_LCD_CMD_ENABLE_PAL + | (sizeof(priv->dma_hwdescs->palette) / 4); +} + +static void ingenic_drm_configure_hwdesc_plane(struct ingenic_drm *priv, + unsigned int plane) +{ + ingenic_drm_configure_hwdesc(priv, plane, plane, 0xf0 | plane); +} + +static void ingenic_drm_atomic_private_obj_fini(struct drm_device *drm, void *private_obj) +{ + drm_atomic_private_obj_fini(private_obj); +} + static int ingenic_drm_bind(struct device *dev, bool has_components) { struct platform_device *pdev = to_platform_device(dev); + struct ingenic_drm_private_state *private_state; const struct jz_soc_info *soc_info; struct ingenic_drm *priv; struct clk *parent_clk; struct drm_plane *primary; struct drm_bridge *bridge; struct drm_panel *panel; + struct drm_connector *connector; struct drm_encoder *encoder; + struct ingenic_drm_bridge *ib; struct drm_device *drm; void __iomem *base; long parent_rate; unsigned int i, clone_mask = 0; - dma_addr_t dma_hwdesc_phys_f0, dma_hwdesc_phys_f1; int ret, irq; soc_info = of_device_get_match_data(dev); @@ -942,27 +1094,14 @@ static int ingenic_drm_bind(struct device *dev, bool has_components) if (!priv->dma_hwdescs) return -ENOMEM; - /* Configure DMA hwdesc for foreground0 plane */ - dma_hwdesc_phys_f0 = priv->dma_hwdescs_phys - + offsetof(struct ingenic_dma_hwdescs, hwdesc_f0); - priv->dma_hwdescs->hwdesc_f0.next = dma_hwdesc_phys_f0; - priv->dma_hwdescs->hwdesc_f0.id = 0xf0; + ingenic_drm_configure_hwdesc_plane(priv, 0); /* Configure DMA hwdesc for foreground1 plane */ - dma_hwdesc_phys_f1 = priv->dma_hwdescs_phys - + offsetof(struct ingenic_dma_hwdescs, hwdesc_f1); - priv->dma_hwdescs->hwdesc_f1.next = dma_hwdesc_phys_f1; - priv->dma_hwdescs->hwdesc_f1.id = 0xf1; + ingenic_drm_configure_hwdesc_plane(priv, 1); /* Configure DMA hwdesc for palette */ - priv->dma_hwdescs->hwdesc_pal.next = priv->dma_hwdescs_phys - + offsetof(struct ingenic_dma_hwdescs, hwdesc_f0); - priv->dma_hwdescs->hwdesc_pal.id = 0xc0; - priv->dma_hwdescs->hwdesc_pal.addr = priv->dma_hwdescs_phys - + offsetof(struct ingenic_dma_hwdescs, palette); - priv->dma_hwdescs->hwdesc_pal.cmd = JZ_LCD_CMD_ENABLE_PAL - | (sizeof(priv->dma_hwdescs->palette) / 4); + ingenic_drm_configure_hwdesc_palette(priv); primary = priv->soc_info->has_osd ? &priv->f1 : &priv->f0; @@ -1046,20 +1185,36 @@ static int ingenic_drm_bind(struct device *dev, bool has_components) bridge = devm_drm_panel_bridge_add_typed(dev, panel, DRM_MODE_CONNECTOR_DPI); - encoder = drmm_plain_encoder_alloc(drm, NULL, DRM_MODE_ENCODER_DPI, NULL); - if (IS_ERR(encoder)) { - ret = PTR_ERR(encoder); + ib = drmm_encoder_alloc(drm, struct ingenic_drm_bridge, encoder, + NULL, DRM_MODE_ENCODER_DPI, NULL); + if (IS_ERR(ib)) { + ret = PTR_ERR(ib); dev_err(dev, "Failed to init encoder: %d\n", ret); return ret; } - encoder->possible_crtcs = 1; + encoder = &ib->encoder; + encoder->possible_crtcs = drm_crtc_mask(&priv->crtc); drm_encoder_helper_add(encoder, &ingenic_drm_encoder_helper_funcs); - ret = drm_bridge_attach(encoder, bridge, NULL, 0); - if (ret) + ib->bridge.funcs = &ingenic_drm_bridge_funcs; + ib->next_bridge = bridge; + + ret = drm_bridge_attach(encoder, &ib->bridge, NULL, + DRM_BRIDGE_ATTACH_NO_CONNECTOR); + if (ret) { + dev_err(dev, "Unable to attach bridge\n"); return ret; + } + + connector = drm_bridge_connector_init(drm, encoder); + if (IS_ERR(connector)) { + dev_err(dev, "Unable to init connector\n"); + return PTR_ERR(connector); + } + + drm_connector_attach_encoder(connector, encoder); } drm_for_each_encoder(encoder, drm) { @@ -1112,10 +1267,6 @@ static int ingenic_drm_bind(struct device *dev, bool has_components) } } - /* Set address of our DMA descriptor chain */ - regmap_write(priv->map, JZ_REG_LCD_DA0, dma_hwdesc_phys_f0); - regmap_write(priv->map, JZ_REG_LCD_DA1, dma_hwdesc_phys_f1); - /* Enable OSD if available */ if (soc_info->has_osd) regmap_write(priv->map, JZ_REG_LCD_OSDC, JZ_LCD_OSDC_OSDEN); @@ -1130,6 +1281,20 @@ static int ingenic_drm_bind(struct device *dev, bool has_components) goto err_devclk_disable; } + private_state = kzalloc(sizeof(*private_state), GFP_KERNEL); + if (!private_state) { + ret = -ENOMEM; + goto err_clk_notifier_unregister; + } + + drm_atomic_private_obj_init(drm, &priv->private_obj, &private_state->base, + &ingenic_drm_private_state_funcs); + + ret = drmm_add_action_or_reset(drm, ingenic_drm_atomic_private_obj_fini, + &priv->private_obj); + if (ret) + goto err_private_state_free; + ret = drm_dev_register(drm, 0); if (ret) { dev_err(dev, "Failed to register DRM driver\n"); @@ -1140,6 +1305,8 @@ static int ingenic_drm_bind(struct device *dev, bool has_components) return 0; +err_private_state_free: + kfree(private_state); err_clk_notifier_unregister: clk_notifier_unregister(parent_clk, &priv->clock_nb); err_devclk_disable: diff --git a/drivers/gpu/drm/ingenic/ingenic-ipu.c b/drivers/gpu/drm/ingenic/ingenic-ipu.c index aeb8a757d213..2737fc521e15 100644 --- a/drivers/gpu/drm/ingenic/ingenic-ipu.c +++ b/drivers/gpu/drm/ingenic/ingenic-ipu.c @@ -45,6 +45,12 @@ struct soc_info { unsigned int weight, unsigned int offset); }; +struct ingenic_ipu_private_state { + struct drm_private_state base; + + unsigned int num_w, num_h, denom_w, denom_h; +}; + struct ingenic_ipu { struct drm_plane plane; struct drm_device *drm; @@ -54,12 +60,12 @@ struct ingenic_ipu { const struct soc_info *soc_info; bool clk_enabled; - unsigned int num_w, num_h, denom_w, denom_h; - dma_addr_t addr_y, addr_u, addr_v; struct drm_property *sharpness_prop; unsigned int sharpness; + + struct drm_private_obj private_obj; }; /* Signed 15.16 fixed-point math (for bicubic scaling coefficients) */ @@ -73,6 +79,36 @@ static inline struct ingenic_ipu *plane_to_ingenic_ipu(struct drm_plane *plane) return container_of(plane, struct ingenic_ipu, plane); } +static inline struct ingenic_ipu_private_state * +to_ingenic_ipu_priv_state(struct drm_private_state *state) +{ + return container_of(state, struct ingenic_ipu_private_state, base); +} + +static struct ingenic_ipu_private_state * +ingenic_ipu_get_priv_state(struct ingenic_ipu *priv, struct drm_atomic_state *state) +{ + struct drm_private_state *priv_state; + + priv_state = drm_atomic_get_private_obj_state(state, &priv->private_obj); + if (IS_ERR(priv_state)) + return ERR_CAST(priv_state); + + return to_ingenic_ipu_priv_state(priv_state); +} + +static struct ingenic_ipu_private_state * +ingenic_ipu_get_new_priv_state(struct ingenic_ipu *priv, struct drm_atomic_state *state) +{ + struct drm_private_state *priv_state; + + priv_state = drm_atomic_get_new_private_obj_state(state, &priv->private_obj); + if (!priv_state) + return NULL; + + return to_ingenic_ipu_priv_state(priv_state); +} + /* * Apply conventional cubic convolution kernel. Both parameters * and return value are 15.16 signed fixed-point. @@ -293,11 +329,16 @@ static void ingenic_ipu_plane_atomic_update(struct drm_plane *plane, const struct drm_format_info *finfo; u32 ctrl, stride = 0, coef_index = 0, format = 0; bool needs_modeset, upscaling_w, upscaling_h; + struct ingenic_ipu_private_state *ipu_state; int err; if (!newstate || !newstate->fb) return; + ipu_state = ingenic_ipu_get_new_priv_state(ipu, state); + if (WARN_ON(!ipu_state)) + return; + finfo = drm_format_info(newstate->fb->format->format); if (!ipu->clk_enabled) { @@ -470,27 +511,27 @@ static void ingenic_ipu_plane_atomic_update(struct drm_plane *plane, if (ipu->soc_info->has_bicubic) ctrl |= JZ_IPU_CTRL_ZOOM_SEL; - upscaling_w = ipu->num_w > ipu->denom_w; + upscaling_w = ipu_state->num_w > ipu_state->denom_w; if (upscaling_w) ctrl |= JZ_IPU_CTRL_HSCALE; - if (ipu->num_w != 1 || ipu->denom_w != 1) { + if (ipu_state->num_w != 1 || ipu_state->denom_w != 1) { if (!ipu->soc_info->has_bicubic && !upscaling_w) - coef_index |= (ipu->denom_w - 1) << 16; + coef_index |= (ipu_state->denom_w - 1) << 16; else - coef_index |= (ipu->num_w - 1) << 16; + coef_index |= (ipu_state->num_w - 1) << 16; ctrl |= JZ_IPU_CTRL_HRSZ_EN; } - upscaling_h = ipu->num_h > ipu->denom_h; + upscaling_h = ipu_state->num_h > ipu_state->denom_h; if (upscaling_h) ctrl |= JZ_IPU_CTRL_VSCALE; - if (ipu->num_h != 1 || ipu->denom_h != 1) { + if (ipu_state->num_h != 1 || ipu_state->denom_h != 1) { if (!ipu->soc_info->has_bicubic && !upscaling_h) - coef_index |= ipu->denom_h - 1; + coef_index |= ipu_state->denom_h - 1; else - coef_index |= ipu->num_h - 1; + coef_index |= ipu_state->num_h - 1; ctrl |= JZ_IPU_CTRL_VRSZ_EN; } @@ -501,13 +542,13 @@ static void ingenic_ipu_plane_atomic_update(struct drm_plane *plane, /* Set the LUT index register */ regmap_write(ipu->map, JZ_REG_IPU_RSZ_COEF_INDEX, coef_index); - if (ipu->num_w != 1 || ipu->denom_w != 1) + if (ipu_state->num_w != 1 || ipu_state->denom_w != 1) ingenic_ipu_set_coefs(ipu, JZ_REG_IPU_HRSZ_COEF_LUT, - ipu->num_w, ipu->denom_w); + ipu_state->num_w, ipu_state->denom_w); - if (ipu->num_h != 1 || ipu->denom_h != 1) + if (ipu_state->num_h != 1 || ipu_state->denom_h != 1) ingenic_ipu_set_coefs(ipu, JZ_REG_IPU_VRSZ_COEF_LUT, - ipu->num_h, ipu->denom_h); + ipu_state->num_h, ipu_state->denom_h); /* Clear STATUS register */ regmap_write(ipu->map, JZ_REG_IPU_STATUS, 0); @@ -519,7 +560,8 @@ static void ingenic_ipu_plane_atomic_update(struct drm_plane *plane, dev_dbg(ipu->dev, "Scaling %ux%u to %ux%u (%u:%u horiz, %u:%u vert)\n", newstate->src_w >> 16, newstate->src_h >> 16, newstate->crtc_w, newstate->crtc_h, - ipu->num_w, ipu->denom_w, ipu->num_h, ipu->denom_h); + ipu_state->num_w, ipu_state->denom_w, + ipu_state->num_h, ipu_state->denom_h); } static int ingenic_ipu_plane_atomic_check(struct drm_plane *plane, @@ -533,6 +575,7 @@ static int ingenic_ipu_plane_atomic_check(struct drm_plane *plane, struct ingenic_ipu *ipu = plane_to_ingenic_ipu(plane); struct drm_crtc *crtc = new_plane_state->crtc ?: old_plane_state->crtc; struct drm_crtc_state *crtc_state; + struct ingenic_ipu_private_state *ipu_state; if (!crtc) return 0; @@ -541,6 +584,10 @@ static int ingenic_ipu_plane_atomic_check(struct drm_plane *plane, if (WARN_ON(!crtc_state)) return -EINVAL; + ipu_state = ingenic_ipu_get_priv_state(ipu, state); + if (IS_ERR(ipu_state)) + return PTR_ERR(ipu_state); + /* Request a full modeset if we are enabling or disabling the IPU. */ if (!old_plane_state->crtc ^ !new_plane_state->crtc) crtc_state->mode_changed = true; @@ -593,10 +640,10 @@ static int ingenic_ipu_plane_atomic_check(struct drm_plane *plane, if (num_h > max_h) return -EINVAL; - ipu->num_w = num_w; - ipu->num_h = num_h; - ipu->denom_w = denom_w; - ipu->denom_h = denom_h; + ipu_state->num_w = num_w; + ipu_state->num_h = num_h; + ipu_state->denom_w = denom_w; + ipu_state->denom_h = denom_h; out_check_damage: if (ingenic_drm_map_noncoherent(ipu->master)) @@ -679,6 +726,33 @@ static const struct drm_plane_funcs ingenic_ipu_plane_funcs = { .atomic_set_property = ingenic_ipu_plane_atomic_set_property, }; +static struct drm_private_state * +ingenic_ipu_duplicate_state(struct drm_private_obj *obj) +{ + struct ingenic_ipu_private_state *state = to_ingenic_ipu_priv_state(obj->state); + + state = kmemdup(state, sizeof(*state), GFP_KERNEL); + if (!state) + return NULL; + + __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); + + return &state->base; +} + +static void ingenic_ipu_destroy_state(struct drm_private_obj *obj, + struct drm_private_state *state) +{ + struct ingenic_ipu_private_state *priv_state = to_ingenic_ipu_priv_state(state); + + kfree(priv_state); +} + +static const struct drm_private_state_funcs ingenic_ipu_private_state_funcs = { + .atomic_duplicate_state = ingenic_ipu_duplicate_state, + .atomic_destroy_state = ingenic_ipu_destroy_state, +}; + static irqreturn_t ingenic_ipu_irq_handler(int irq, void *arg) { struct ingenic_ipu *ipu = arg; @@ -717,6 +791,7 @@ static const struct regmap_config ingenic_ipu_regmap_config = { static int ingenic_ipu_bind(struct device *dev, struct device *master, void *d) { struct platform_device *pdev = to_platform_device(dev); + struct ingenic_ipu_private_state *private_state; const struct soc_info *soc_info; struct drm_device *drm = d; struct drm_plane *plane; @@ -810,7 +885,20 @@ static int ingenic_ipu_bind(struct device *dev, struct device *master, void *d) return err; } + private_state = kzalloc(sizeof(*private_state), GFP_KERNEL); + if (!private_state) { + err = -ENOMEM; + goto err_clk_unprepare; + } + + drm_atomic_private_obj_init(drm, &ipu->private_obj, &private_state->base, + &ingenic_ipu_private_state_funcs); + return 0; + +err_clk_unprepare: + clk_unprepare(ipu->clk); + return err; } static void ingenic_ipu_unbind(struct device *dev, @@ -818,6 +906,7 @@ static void ingenic_ipu_unbind(struct device *dev, { struct ingenic_ipu *ipu = dev_get_drvdata(dev); + drm_atomic_private_obj_fini(&ipu->private_obj); clk_unprepare(ipu->clk); } diff --git a/drivers/gpu/drm/kmb/Kconfig b/drivers/gpu/drm/kmb/Kconfig index bc4cb5e1cd8a..5fdd43dad507 100644 --- a/drivers/gpu/drm/kmb/Kconfig +++ b/drivers/gpu/drm/kmb/Kconfig @@ -3,7 +3,6 @@ config DRM_KMB_DISPLAY depends on DRM depends on ARCH_KEEMBAY || COMPILE_TEST select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER select DRM_GEM_CMA_HELPER select DRM_MIPI_DSI help diff --git a/drivers/gpu/drm/kmb/kmb_drv.c b/drivers/gpu/drm/kmb/kmb_drv.c index 961ac6fb5fcf..ed2424350773 100644 --- a/drivers/gpu/drm/kmb/kmb_drv.c +++ b/drivers/gpu/drm/kmb/kmb_drv.c @@ -15,6 +15,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> +#include <drm/drm_fb_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_probe_helper.h> @@ -176,6 +177,7 @@ static int kmb_setup_mode_config(struct drm_device *drm) drm->mode_config.min_height = KMB_FB_MIN_HEIGHT; drm->mode_config.max_width = KMB_FB_MAX_WIDTH; drm->mode_config.max_height = KMB_FB_MAX_HEIGHT; + drm->mode_config.preferred_depth = 24; drm->mode_config.funcs = &kmb_mode_config_funcs; ret = kmb_setup_crtc(drm); @@ -559,6 +561,8 @@ static int kmb_probe(struct platform_device *pdev) if (ret) goto err_register; + drm_fbdev_generic_setup(&kmb->drm, 0); + return 0; err_register: diff --git a/drivers/gpu/drm/lima/lima_device.c b/drivers/gpu/drm/lima/lima_device.c index 65fdca366e41..02cef0cea657 100644 --- a/drivers/gpu/drm/lima/lima_device.c +++ b/drivers/gpu/drm/lima/lima_device.c @@ -4,6 +4,7 @@ #include <linux/regulator/consumer.h> #include <linux/reset.h> #include <linux/clk.h> +#include <linux/slab.h> #include <linux/dma-mapping.h> #include <linux/platform_device.h> @@ -357,6 +358,7 @@ int lima_device_init(struct lima_device *ldev) int err, i; dma_set_coherent_mask(ldev->dev, DMA_BIT_MASK(32)); + dma_set_max_seg_size(ldev->dev, UINT_MAX); err = lima_clk_init(ldev); if (err) diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c index 640acc060467..f9a9198ef198 100644 --- a/drivers/gpu/drm/lima/lima_gem.c +++ b/drivers/gpu/drm/lima/lima_gem.c @@ -127,7 +127,7 @@ int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file, if (err) goto out; } else { - struct sg_table *sgt = drm_gem_shmem_get_pages_sgt(obj); + struct sg_table *sgt = drm_gem_shmem_get_pages_sgt(shmem); if (IS_ERR(sgt)) { err = PTR_ERR(sgt); @@ -151,7 +151,7 @@ static void lima_gem_free_object(struct drm_gem_object *obj) if (!list_empty(&bo->va)) dev_err(obj->dev->dev, "lima gem free bo still has va\n"); - drm_gem_shmem_free_object(obj); + drm_gem_shmem_free(&bo->base); } static int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file) @@ -179,7 +179,7 @@ static int lima_gem_pin(struct drm_gem_object *obj) if (bo->heap_size) return -EINVAL; - return drm_gem_shmem_pin(obj); + return drm_gem_shmem_pin(&bo->base); } static int lima_gem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) @@ -189,7 +189,7 @@ static int lima_gem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) if (bo->heap_size) return -EINVAL; - return drm_gem_shmem_vmap(obj, map); + return drm_gem_shmem_vmap(&bo->base, map); } static int lima_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) @@ -199,19 +199,19 @@ static int lima_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) if (bo->heap_size) return -EINVAL; - return drm_gem_shmem_mmap(obj, vma); + return drm_gem_shmem_mmap(&bo->base, vma); } static const struct drm_gem_object_funcs lima_gem_funcs = { .free = lima_gem_free_object, .open = lima_gem_object_open, .close = lima_gem_object_close, - .print_info = drm_gem_shmem_print_info, + .print_info = drm_gem_shmem_object_print_info, .pin = lima_gem_pin, - .unpin = drm_gem_shmem_unpin, - .get_sg_table = drm_gem_shmem_get_sg_table, + .unpin = drm_gem_shmem_object_unpin, + .get_sg_table = drm_gem_shmem_object_get_sg_table, .vmap = lima_gem_vmap, - .vunmap = drm_gem_shmem_vunmap, + .vunmap = drm_gem_shmem_object_vunmap, .mmap = lima_gem_mmap, }; @@ -221,7 +221,7 @@ struct drm_gem_object *lima_gem_create_object(struct drm_device *dev, size_t siz bo = kzalloc(sizeof(*bo), GFP_KERNEL); if (!bo) - return NULL; + return ERR_PTR(-ENOMEM); mutex_init(&bo->lock); INIT_LIST_HEAD(&bo->va); diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c index 99d5f6f1a882..5612d73f238f 100644 --- a/drivers/gpu/drm/lima/lima_sched.c +++ b/drivers/gpu/drm/lima/lima_sched.c @@ -371,7 +371,7 @@ static void lima_sched_build_error_task_list(struct lima_sched_task *task) } else { buffer_chunk->size = lima_bo_size(bo); - ret = drm_gem_shmem_vmap(&bo->base.base, &map); + ret = drm_gem_shmem_vmap(&bo->base, &map); if (ret) { kvfree(et); goto out; @@ -379,7 +379,7 @@ static void lima_sched_build_error_task_list(struct lima_sched_task *task) memcpy(buffer_chunk + 1, map.vaddr, buffer_chunk->size); - drm_gem_shmem_vunmap(&bo->base.base, &map); + drm_gem_shmem_vunmap(&bo->base, &map); } buffer_chunk = (void *)(buffer_chunk + 1) + buffer_chunk->size; diff --git a/drivers/gpu/drm/mcde/Kconfig b/drivers/gpu/drm/mcde/Kconfig index 71c689b573c9..d0bf1bc8da3f 100644 --- a/drivers/gpu/drm/mcde/Kconfig +++ b/drivers/gpu/drm/mcde/Kconfig @@ -10,7 +10,6 @@ config DRM_MCDE select DRM_BRIDGE select DRM_PANEL_BRIDGE select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER select DRM_GEM_CMA_HELPER select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE help diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c b/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c index 141cb36b9c07..3a53ebc4e172 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c @@ -205,9 +205,15 @@ static const struct mtk_disp_ccorr_data mt8183_ccorr_driver_data = { .matrix_bits = 10, }; +static const struct mtk_disp_ccorr_data mt8192_ccorr_driver_data = { + .matrix_bits = 11, +}; + static const struct of_device_id mtk_disp_ccorr_driver_dt_match[] = { { .compatible = "mediatek,mt8183-disp-ccorr", .data = &mt8183_ccorr_driver_data}, + { .compatible = "mediatek,mt8192-disp-ccorr", + .data = &mt8192_ccorr_driver_data}, {}, }; MODULE_DEVICE_TABLE(of, mtk_disp_ccorr_driver_dt_match); diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c index 5326989d5206..2146299e5f52 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c @@ -456,6 +456,22 @@ static const struct mtk_disp_ovl_data mt8183_ovl_2l_driver_data = { .fmt_rgb565_is_0 = true, }; +static const struct mtk_disp_ovl_data mt8192_ovl_driver_data = { + .addr = DISP_REG_OVL_ADDR_MT8173, + .gmc_bits = 10, + .layer_nr = 4, + .fmt_rgb565_is_0 = true, + .smi_id_en = true, +}; + +static const struct mtk_disp_ovl_data mt8192_ovl_2l_driver_data = { + .addr = DISP_REG_OVL_ADDR_MT8173, + .gmc_bits = 10, + .layer_nr = 2, + .fmt_rgb565_is_0 = true, + .smi_id_en = true, +}; + static const struct of_device_id mtk_disp_ovl_driver_dt_match[] = { { .compatible = "mediatek,mt2701-disp-ovl", .data = &mt2701_ovl_driver_data}, @@ -465,6 +481,10 @@ static const struct of_device_id mtk_disp_ovl_driver_dt_match[] = { .data = &mt8183_ovl_driver_data}, { .compatible = "mediatek,mt8183-disp-ovl-2l", .data = &mt8183_ovl_2l_driver_data}, + { .compatible = "mediatek,mt8192-disp-ovl", + .data = &mt8192_ovl_driver_data}, + { .compatible = "mediatek,mt8192-disp-ovl-2l", + .data = &mt8192_ovl_2l_driver_data}, {}, }; MODULE_DEVICE_TABLE(of, mtk_disp_ovl_driver_dt_match); diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c index 75d7f45579e2..d41a3970b944 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c @@ -353,6 +353,10 @@ static const struct mtk_disp_rdma_data mt8183_rdma_driver_data = { .fifo_size = 5 * SZ_1K, }; +static const struct mtk_disp_rdma_data mt8192_rdma_driver_data = { + .fifo_size = 5 * SZ_1K, +}; + static const struct of_device_id mtk_disp_rdma_driver_dt_match[] = { { .compatible = "mediatek,mt2701-disp-rdma", .data = &mt2701_rdma_driver_data}, @@ -360,6 +364,8 @@ static const struct of_device_id mtk_disp_rdma_driver_dt_match[] = { .data = &mt8173_rdma_driver_data}, { .compatible = "mediatek,mt8183-disp-rdma", .data = &mt8183_rdma_driver_data}, + { .compatible = "mediatek,mt8192-disp-rdma", + .data = &mt8192_rdma_driver_data}, {}, }; MODULE_DEVICE_TABLE(of, mtk_disp_rdma_driver_dt_match); diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index a4e80e499674..d661edf7e0fe 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -4,6 +4,8 @@ */ #include <linux/clk.h> +#include <linux/dma-mapping.h> +#include <linux/mailbox_controller.h> #include <linux/pm_runtime.h> #include <linux/soc/mediatek/mtk-cmdq.h> #include <linux/soc/mediatek/mtk-mmsys.h> @@ -50,8 +52,10 @@ struct mtk_drm_crtc { bool pending_async_planes; #if IS_REACHABLE(CONFIG_MTK_CMDQ) - struct cmdq_client *cmdq_client; + struct cmdq_client cmdq_client; + struct cmdq_pkt cmdq_handle; u32 cmdq_event; + u32 cmdq_vblank_cnt; #endif struct device *mmsys_dev; @@ -104,12 +108,60 @@ static void mtk_drm_finish_page_flip(struct mtk_drm_crtc *mtk_crtc) } } +#if IS_REACHABLE(CONFIG_MTK_CMDQ) +static int mtk_drm_cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *pkt, + size_t size) +{ + struct device *dev; + dma_addr_t dma_addr; + + pkt->va_base = kzalloc(size, GFP_KERNEL); + if (!pkt->va_base) { + kfree(pkt); + return -ENOMEM; + } + pkt->buf_size = size; + pkt->cl = (void *)client; + + dev = client->chan->mbox->dev; + dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma_addr)) { + dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size); + kfree(pkt->va_base); + kfree(pkt); + return -ENOMEM; + } + + pkt->pa_base = dma_addr; + + return 0; +} + +static void mtk_drm_cmdq_pkt_destroy(struct cmdq_pkt *pkt) +{ + struct cmdq_client *client = (struct cmdq_client *)pkt->cl; + + dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size, + DMA_TO_DEVICE); + kfree(pkt->va_base); + kfree(pkt); +} +#endif + static void mtk_drm_crtc_destroy(struct drm_crtc *crtc) { struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); mtk_mutex_put(mtk_crtc->mutex); +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + mtk_drm_cmdq_pkt_destroy(&mtk_crtc->cmdq_handle); + if (mtk_crtc->cmdq_client.chan) { + mbox_free_channel(mtk_crtc->cmdq_client.chan); + mtk_crtc->cmdq_client.chan = NULL; + } +#endif drm_crtc_cleanup(crtc); } @@ -222,9 +274,46 @@ struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc, } #if IS_REACHABLE(CONFIG_MTK_CMDQ) -static void ddp_cmdq_cb(struct cmdq_cb_data data) +static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg) { - cmdq_pkt_destroy(data.data); + struct cmdq_cb_data *data = mssg; + struct cmdq_client *cmdq_cl = container_of(cl, struct cmdq_client, client); + struct mtk_drm_crtc *mtk_crtc = container_of(cmdq_cl, struct mtk_drm_crtc, cmdq_client); + struct mtk_crtc_state *state; + unsigned int i; + + if (data->sta < 0) + return; + + state = to_mtk_crtc_state(mtk_crtc->base.state); + + state->pending_config = false; + + if (mtk_crtc->pending_planes) { + for (i = 0; i < mtk_crtc->layer_nr; i++) { + struct drm_plane *plane = &mtk_crtc->planes[i]; + struct mtk_plane_state *plane_state; + + plane_state = to_mtk_plane_state(plane->state); + + plane_state->pending.config = false; + } + mtk_crtc->pending_planes = false; + } + + if (mtk_crtc->pending_async_planes) { + for (i = 0; i < mtk_crtc->layer_nr; i++) { + struct drm_plane *plane = &mtk_crtc->planes[i]; + struct mtk_plane_state *plane_state; + + plane_state = to_mtk_plane_state(plane->state); + + plane_state->pending.async_config = false; + } + mtk_crtc->pending_async_planes = false; + } + + mtk_crtc->cmdq_vblank_cnt = 0; } #endif @@ -378,7 +467,8 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc, state->pending_vrefresh, 0, cmdq_handle); - state->pending_config = false; + if (!cmdq_handle) + state->pending_config = false; } if (mtk_crtc->pending_planes) { @@ -398,9 +488,12 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc, mtk_ddp_comp_layer_config(comp, local_layer, plane_state, cmdq_handle); - plane_state->pending.config = false; + if (!cmdq_handle) + plane_state->pending.config = false; } - mtk_crtc->pending_planes = false; + + if (!cmdq_handle) + mtk_crtc->pending_planes = false; } if (mtk_crtc->pending_async_planes) { @@ -420,9 +513,12 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc, mtk_ddp_comp_layer_config(comp, local_layer, plane_state, cmdq_handle); - plane_state->pending.async_config = false; + if (!cmdq_handle) + plane_state->pending.async_config = false; } - mtk_crtc->pending_async_planes = false; + + if (!cmdq_handle) + mtk_crtc->pending_async_planes = false; } } @@ -430,7 +526,7 @@ static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc, bool needs_vblank) { #if IS_REACHABLE(CONFIG_MTK_CMDQ) - struct cmdq_pkt *cmdq_handle; + struct cmdq_pkt *cmdq_handle = &mtk_crtc->cmdq_handle; #endif struct drm_crtc *crtc = &mtk_crtc->base; struct mtk_drm_private *priv = crtc->dev->dev_private; @@ -468,14 +564,28 @@ static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc, mtk_mutex_release(mtk_crtc->mutex); } #if IS_REACHABLE(CONFIG_MTK_CMDQ) - if (mtk_crtc->cmdq_client) { - mbox_flush(mtk_crtc->cmdq_client->chan, 2000); - cmdq_handle = cmdq_pkt_create(mtk_crtc->cmdq_client, PAGE_SIZE); + if (mtk_crtc->cmdq_client.chan) { + mbox_flush(mtk_crtc->cmdq_client.chan, 2000); + cmdq_handle->cmd_buf_size = 0; cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event); cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false); mtk_crtc_ddp_config(crtc, cmdq_handle); cmdq_pkt_finalize(cmdq_handle); - cmdq_pkt_flush_async(cmdq_handle, ddp_cmdq_cb, cmdq_handle); + dma_sync_single_for_device(mtk_crtc->cmdq_client.chan->mbox->dev, + cmdq_handle->pa_base, + cmdq_handle->cmd_buf_size, + DMA_TO_DEVICE); + /* + * CMDQ command should execute in next 3 vblank. + * One vblank interrupt before send message (occasionally) + * and one vblank interrupt after cmdq done, + * so it's timeout after 3 vblank interrupt. + * If it fail to execute in next 3 vblank, timeout happen. + */ + mtk_crtc->cmdq_vblank_cnt = 3; + + mbox_send_message(mtk_crtc->cmdq_client.chan, cmdq_handle); + mbox_client_txdone(mtk_crtc->cmdq_client.chan, 0); } #endif mtk_crtc->config_updating = false; @@ -489,12 +599,15 @@ static void mtk_crtc_ddp_irq(void *data) struct mtk_drm_private *priv = crtc->dev->dev_private; #if IS_REACHABLE(CONFIG_MTK_CMDQ) - if (!priv->data->shadow_register && !mtk_crtc->cmdq_client) + if (!priv->data->shadow_register && !mtk_crtc->cmdq_client.chan) + mtk_crtc_ddp_config(crtc, NULL); + else if (mtk_crtc->cmdq_vblank_cnt > 0 && --mtk_crtc->cmdq_vblank_cnt == 0) + DRM_ERROR("mtk_crtc %d CMDQ execute command timeout!\n", + drm_crtc_index(&mtk_crtc->base)); #else if (!priv->data->shadow_register) -#endif mtk_crtc_ddp_config(crtc, NULL); - +#endif mtk_drm_finish_page_flip(mtk_crtc); } @@ -829,16 +942,20 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, mutex_init(&mtk_crtc->hw_lock); #if IS_REACHABLE(CONFIG_MTK_CMDQ) - mtk_crtc->cmdq_client = - cmdq_mbox_create(mtk_crtc->mmsys_dev, - drm_crtc_index(&mtk_crtc->base)); - if (IS_ERR(mtk_crtc->cmdq_client)) { + mtk_crtc->cmdq_client.client.dev = mtk_crtc->mmsys_dev; + mtk_crtc->cmdq_client.client.tx_block = false; + mtk_crtc->cmdq_client.client.knows_txdone = true; + mtk_crtc->cmdq_client.client.rx_callback = ddp_cmdq_cb; + mtk_crtc->cmdq_client.chan = + mbox_request_channel(&mtk_crtc->cmdq_client.client, + drm_crtc_index(&mtk_crtc->base)); + if (IS_ERR(mtk_crtc->cmdq_client.chan)) { dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n", drm_crtc_index(&mtk_crtc->base)); - mtk_crtc->cmdq_client = NULL; + mtk_crtc->cmdq_client.chan = NULL; } - if (mtk_crtc->cmdq_client) { + if (mtk_crtc->cmdq_client.chan) { ret = of_property_read_u32_index(priv->mutex_node, "mediatek,gce-events", drm_crtc_index(&mtk_crtc->base), @@ -846,8 +963,18 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, if (ret) { dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n", drm_crtc_index(&mtk_crtc->base)); - cmdq_mbox_destroy(mtk_crtc->cmdq_client); - mtk_crtc->cmdq_client = NULL; + mbox_free_channel(mtk_crtc->cmdq_client.chan); + mtk_crtc->cmdq_client.chan = NULL; + } else { + ret = mtk_drm_cmdq_pkt_create(&mtk_crtc->cmdq_client, + &mtk_crtc->cmdq_handle, + PAGE_SIZE); + if (ret) { + dev_dbg(dev, "mtk_crtc %d failed to create cmdq packet\n", + drm_crtc_index(&mtk_crtc->base)); + mbox_free_channel(mtk_crtc->cmdq_client.chan); + mtk_crtc->cmdq_client.chan = NULL; + } } } #endif diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c index 99cbf44463e4..b4b682bc1991 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c @@ -20,45 +20,39 @@ #include "mtk_drm_ddp_comp.h" #include "mtk_drm_crtc.h" -#define DISP_OD_EN 0x0000 -#define DISP_OD_INTEN 0x0008 -#define DISP_OD_INTSTA 0x000c -#define DISP_OD_CFG 0x0020 -#define DISP_OD_SIZE 0x0030 -#define DISP_DITHER_5 0x0114 -#define DISP_DITHER_7 0x011c -#define DISP_DITHER_15 0x013c -#define DISP_DITHER_16 0x0140 -#define DISP_REG_UFO_START 0x0000 - -#define DISP_DITHER_EN 0x0000 +#define DISP_REG_DITHER_EN 0x0000 #define DITHER_EN BIT(0) -#define DISP_DITHER_CFG 0x0020 +#define DISP_REG_DITHER_CFG 0x0020 #define DITHER_RELAY_MODE BIT(0) #define DITHER_ENGINE_EN BIT(1) -#define DISP_DITHER_SIZE 0x0030 - -#define LUT_10BIT_MASK 0x03ff - -#define OD_RELAYMODE BIT(0) - -#define UFO_BYPASS BIT(2) - #define DISP_DITHERING BIT(2) +#define DISP_REG_DITHER_SIZE 0x0030 +#define DISP_REG_DITHER_5 0x0114 +#define DISP_REG_DITHER_7 0x011c +#define DISP_REG_DITHER_15 0x013c #define DITHER_LSB_ERR_SHIFT_R(x) (((x) & 0x7) << 28) -#define DITHER_OVFLW_BIT_R(x) (((x) & 0x7) << 24) #define DITHER_ADD_LSHIFT_R(x) (((x) & 0x7) << 20) -#define DITHER_ADD_RSHIFT_R(x) (((x) & 0x7) << 16) #define DITHER_NEW_BIT_MODE BIT(0) +#define DISP_REG_DITHER_16 0x0140 #define DITHER_LSB_ERR_SHIFT_B(x) (((x) & 0x7) << 28) -#define DITHER_OVFLW_BIT_B(x) (((x) & 0x7) << 24) #define DITHER_ADD_LSHIFT_B(x) (((x) & 0x7) << 20) -#define DITHER_ADD_RSHIFT_B(x) (((x) & 0x7) << 16) #define DITHER_LSB_ERR_SHIFT_G(x) (((x) & 0x7) << 12) -#define DITHER_OVFLW_BIT_G(x) (((x) & 0x7) << 8) #define DITHER_ADD_LSHIFT_G(x) (((x) & 0x7) << 4) -#define DITHER_ADD_RSHIFT_G(x) (((x) & 0x7) << 0) + +#define DISP_REG_OD_EN 0x0000 +#define DISP_REG_OD_CFG 0x0020 +#define OD_RELAYMODE BIT(0) +#define DISP_REG_OD_SIZE 0x0030 + +#define DISP_REG_POSTMASK_EN 0x0000 +#define POSTMASK_EN BIT(0) +#define DISP_REG_POSTMASK_CFG 0x0020 +#define POSTMASK_RELAY_MODE BIT(0) +#define DISP_REG_POSTMASK_SIZE 0x0030 + +#define DISP_REG_UFO_START 0x0000 +#define UFO_BYPASS BIT(2) struct mtk_ddp_comp_dev { struct clk *clk; @@ -134,25 +128,52 @@ void mtk_dither_set_common(void __iomem *regs, struct cmdq_client_reg *cmdq_reg, return; if (bpc >= MTK_MIN_BPC) { - mtk_ddp_write(cmdq_pkt, 0, cmdq_reg, regs, DISP_DITHER_5); - mtk_ddp_write(cmdq_pkt, 0, cmdq_reg, regs, DISP_DITHER_7); + mtk_ddp_write(cmdq_pkt, 0, cmdq_reg, regs, DISP_REG_DITHER_5); + mtk_ddp_write(cmdq_pkt, 0, cmdq_reg, regs, DISP_REG_DITHER_7); mtk_ddp_write(cmdq_pkt, DITHER_LSB_ERR_SHIFT_R(MTK_MAX_BPC - bpc) | DITHER_ADD_LSHIFT_R(MTK_MAX_BPC - bpc) | DITHER_NEW_BIT_MODE, - cmdq_reg, regs, DISP_DITHER_15); + cmdq_reg, regs, DISP_REG_DITHER_15); mtk_ddp_write(cmdq_pkt, DITHER_LSB_ERR_SHIFT_B(MTK_MAX_BPC - bpc) | DITHER_ADD_LSHIFT_B(MTK_MAX_BPC - bpc) | DITHER_LSB_ERR_SHIFT_G(MTK_MAX_BPC - bpc) | DITHER_ADD_LSHIFT_G(MTK_MAX_BPC - bpc), - cmdq_reg, regs, DISP_DITHER_16); + cmdq_reg, regs, DISP_REG_DITHER_16); mtk_ddp_write(cmdq_pkt, dither_en, cmdq_reg, regs, cfg); } } +static void mtk_dither_config(struct device *dev, unsigned int w, + unsigned int h, unsigned int vrefresh, + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) +{ + struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); + + mtk_ddp_write(cmdq_pkt, h << 16 | w, &priv->cmdq_reg, priv->regs, DISP_REG_DITHER_SIZE); + mtk_ddp_write(cmdq_pkt, DITHER_RELAY_MODE, &priv->cmdq_reg, priv->regs, + DISP_REG_DITHER_CFG); + mtk_dither_set_common(priv->regs, &priv->cmdq_reg, bpc, DISP_REG_DITHER_CFG, + DITHER_ENGINE_EN, cmdq_pkt); +} + +static void mtk_dither_start(struct device *dev) +{ + struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); + + writel(DITHER_EN, priv->regs + DISP_REG_DITHER_EN); +} + +static void mtk_dither_stop(struct device *dev) +{ + struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); + + writel_relaxed(0x0, priv->regs + DISP_REG_DITHER_EN); +} + static void mtk_dither_set(struct device *dev, unsigned int bpc, - unsigned int cfg, struct cmdq_pkt *cmdq_pkt) + unsigned int cfg, struct cmdq_pkt *cmdq_pkt) { struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); @@ -166,49 +187,49 @@ static void mtk_od_config(struct device *dev, unsigned int w, { struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); - mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_OD_SIZE); - mtk_ddp_write(cmdq_pkt, OD_RELAYMODE, &priv->cmdq_reg, priv->regs, DISP_OD_CFG); - mtk_dither_set(dev, bpc, DISP_OD_CFG, cmdq_pkt); + mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_REG_OD_SIZE); + mtk_ddp_write(cmdq_pkt, OD_RELAYMODE, &priv->cmdq_reg, priv->regs, DISP_REG_OD_CFG); + mtk_dither_set(dev, bpc, DISP_REG_OD_CFG, cmdq_pkt); } static void mtk_od_start(struct device *dev) { struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); - writel(1, priv->regs + DISP_OD_EN); + writel(1, priv->regs + DISP_REG_OD_EN); } -static void mtk_ufoe_start(struct device *dev) +static void mtk_postmask_config(struct device *dev, unsigned int w, + unsigned int h, unsigned int vrefresh, + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) { struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); - writel(UFO_BYPASS, priv->regs + DISP_REG_UFO_START); + mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, + DISP_REG_POSTMASK_SIZE); + mtk_ddp_write(cmdq_pkt, POSTMASK_RELAY_MODE, &priv->cmdq_reg, + priv->regs, DISP_REG_POSTMASK_CFG); } -static void mtk_dither_config(struct device *dev, unsigned int w, - unsigned int h, unsigned int vrefresh, - unsigned int bpc, struct cmdq_pkt *cmdq_pkt) +static void mtk_postmask_start(struct device *dev) { struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); - mtk_ddp_write(cmdq_pkt, h << 16 | w, &priv->cmdq_reg, priv->regs, DISP_DITHER_SIZE); - mtk_ddp_write(cmdq_pkt, DITHER_RELAY_MODE, &priv->cmdq_reg, priv->regs, DISP_DITHER_CFG); - mtk_dither_set_common(priv->regs, &priv->cmdq_reg, bpc, DISP_DITHER_CFG, - DITHER_ENGINE_EN, cmdq_pkt); + writel(POSTMASK_EN, priv->regs + DISP_REG_POSTMASK_EN); } -static void mtk_dither_start(struct device *dev) +static void mtk_postmask_stop(struct device *dev) { struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); - writel(DITHER_EN, priv->regs + DISP_DITHER_EN); + writel_relaxed(0x0, priv->regs + DISP_REG_POSTMASK_EN); } -static void mtk_dither_stop(struct device *dev) +static void mtk_ufoe_start(struct device *dev) { struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); - writel_relaxed(0x0, priv->regs + DISP_DITHER_EN); + writel(UFO_BYPASS, priv->regs + DISP_REG_UFO_START); } static const struct mtk_ddp_comp_funcs ddp_aal = { @@ -286,6 +307,14 @@ static const struct mtk_ddp_comp_funcs ddp_ovl = { .bgclr_in_off = mtk_ovl_bgclr_in_off, }; +static const struct mtk_ddp_comp_funcs ddp_postmask = { + .clk_enable = mtk_ddp_clk_enable, + .clk_disable = mtk_ddp_clk_disable, + .config = mtk_postmask_config, + .start = mtk_postmask_start, + .stop = mtk_postmask_stop, +}; + static const struct mtk_ddp_comp_funcs ddp_rdma = { .clk_enable = mtk_rdma_clk_enable, .clk_disable = mtk_rdma_clk_disable, @@ -305,22 +334,23 @@ static const struct mtk_ddp_comp_funcs ddp_ufoe = { }; static const char * const mtk_ddp_comp_stem[MTK_DDP_COMP_TYPE_MAX] = { + [MTK_DISP_AAL] = "aal", + [MTK_DISP_BLS] = "bls", + [MTK_DISP_CCORR] = "ccorr", + [MTK_DISP_COLOR] = "color", + [MTK_DISP_DITHER] = "dither", + [MTK_DISP_GAMMA] = "gamma", + [MTK_DISP_MUTEX] = "mutex", + [MTK_DISP_OD] = "od", [MTK_DISP_OVL] = "ovl", [MTK_DISP_OVL_2L] = "ovl-2l", + [MTK_DISP_POSTMASK] = "postmask", + [MTK_DISP_PWM] = "pwm", [MTK_DISP_RDMA] = "rdma", - [MTK_DISP_WDMA] = "wdma", - [MTK_DISP_COLOR] = "color", - [MTK_DISP_CCORR] = "ccorr", - [MTK_DISP_AAL] = "aal", - [MTK_DISP_GAMMA] = "gamma", - [MTK_DISP_DITHER] = "dither", [MTK_DISP_UFOE] = "ufoe", - [MTK_DSI] = "dsi", + [MTK_DISP_WDMA] = "wdma", [MTK_DPI] = "dpi", - [MTK_DISP_PWM] = "pwm", - [MTK_DISP_MUTEX] = "mutex", - [MTK_DISP_OD] = "od", - [MTK_DISP_BLS] = "bls", + [MTK_DSI] = "dsi", }; struct mtk_ddp_comp_match { @@ -330,35 +360,38 @@ struct mtk_ddp_comp_match { }; static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = { - [DDP_COMPONENT_AAL0] = { MTK_DISP_AAL, 0, &ddp_aal }, - [DDP_COMPONENT_AAL1] = { MTK_DISP_AAL, 1, &ddp_aal }, - [DDP_COMPONENT_BLS] = { MTK_DISP_BLS, 0, NULL }, - [DDP_COMPONENT_CCORR] = { MTK_DISP_CCORR, 0, &ddp_ccorr }, - [DDP_COMPONENT_COLOR0] = { MTK_DISP_COLOR, 0, &ddp_color }, - [DDP_COMPONENT_COLOR1] = { MTK_DISP_COLOR, 1, &ddp_color }, - [DDP_COMPONENT_DITHER] = { MTK_DISP_DITHER, 0, &ddp_dither }, - [DDP_COMPONENT_DPI0] = { MTK_DPI, 0, &ddp_dpi }, - [DDP_COMPONENT_DPI1] = { MTK_DPI, 1, &ddp_dpi }, - [DDP_COMPONENT_DSI0] = { MTK_DSI, 0, &ddp_dsi }, - [DDP_COMPONENT_DSI1] = { MTK_DSI, 1, &ddp_dsi }, - [DDP_COMPONENT_DSI2] = { MTK_DSI, 2, &ddp_dsi }, - [DDP_COMPONENT_DSI3] = { MTK_DSI, 3, &ddp_dsi }, - [DDP_COMPONENT_GAMMA] = { MTK_DISP_GAMMA, 0, &ddp_gamma }, - [DDP_COMPONENT_OD0] = { MTK_DISP_OD, 0, &ddp_od }, - [DDP_COMPONENT_OD1] = { MTK_DISP_OD, 1, &ddp_od }, - [DDP_COMPONENT_OVL0] = { MTK_DISP_OVL, 0, &ddp_ovl }, - [DDP_COMPONENT_OVL1] = { MTK_DISP_OVL, 1, &ddp_ovl }, - [DDP_COMPONENT_OVL_2L0] = { MTK_DISP_OVL_2L, 0, &ddp_ovl }, - [DDP_COMPONENT_OVL_2L1] = { MTK_DISP_OVL_2L, 1, &ddp_ovl }, - [DDP_COMPONENT_PWM0] = { MTK_DISP_PWM, 0, NULL }, - [DDP_COMPONENT_PWM1] = { MTK_DISP_PWM, 1, NULL }, - [DDP_COMPONENT_PWM2] = { MTK_DISP_PWM, 2, NULL }, - [DDP_COMPONENT_RDMA0] = { MTK_DISP_RDMA, 0, &ddp_rdma }, - [DDP_COMPONENT_RDMA1] = { MTK_DISP_RDMA, 1, &ddp_rdma }, - [DDP_COMPONENT_RDMA2] = { MTK_DISP_RDMA, 2, &ddp_rdma }, - [DDP_COMPONENT_UFOE] = { MTK_DISP_UFOE, 0, &ddp_ufoe }, - [DDP_COMPONENT_WDMA0] = { MTK_DISP_WDMA, 0, NULL }, - [DDP_COMPONENT_WDMA1] = { MTK_DISP_WDMA, 1, NULL }, + [DDP_COMPONENT_AAL0] = { MTK_DISP_AAL, 0, &ddp_aal }, + [DDP_COMPONENT_AAL1] = { MTK_DISP_AAL, 1, &ddp_aal }, + [DDP_COMPONENT_BLS] = { MTK_DISP_BLS, 0, NULL }, + [DDP_COMPONENT_CCORR] = { MTK_DISP_CCORR, 0, &ddp_ccorr }, + [DDP_COMPONENT_COLOR0] = { MTK_DISP_COLOR, 0, &ddp_color }, + [DDP_COMPONENT_COLOR1] = { MTK_DISP_COLOR, 1, &ddp_color }, + [DDP_COMPONENT_DITHER] = { MTK_DISP_DITHER, 0, &ddp_dither }, + [DDP_COMPONENT_DPI0] = { MTK_DPI, 0, &ddp_dpi }, + [DDP_COMPONENT_DPI1] = { MTK_DPI, 1, &ddp_dpi }, + [DDP_COMPONENT_DSI0] = { MTK_DSI, 0, &ddp_dsi }, + [DDP_COMPONENT_DSI1] = { MTK_DSI, 1, &ddp_dsi }, + [DDP_COMPONENT_DSI2] = { MTK_DSI, 2, &ddp_dsi }, + [DDP_COMPONENT_DSI3] = { MTK_DSI, 3, &ddp_dsi }, + [DDP_COMPONENT_GAMMA] = { MTK_DISP_GAMMA, 0, &ddp_gamma }, + [DDP_COMPONENT_OD0] = { MTK_DISP_OD, 0, &ddp_od }, + [DDP_COMPONENT_OD1] = { MTK_DISP_OD, 1, &ddp_od }, + [DDP_COMPONENT_OVL0] = { MTK_DISP_OVL, 0, &ddp_ovl }, + [DDP_COMPONENT_OVL1] = { MTK_DISP_OVL, 1, &ddp_ovl }, + [DDP_COMPONENT_OVL_2L0] = { MTK_DISP_OVL_2L, 0, &ddp_ovl }, + [DDP_COMPONENT_OVL_2L1] = { MTK_DISP_OVL_2L, 1, &ddp_ovl }, + [DDP_COMPONENT_OVL_2L2] = { MTK_DISP_OVL_2L, 2, &ddp_ovl }, + [DDP_COMPONENT_POSTMASK0] = { MTK_DISP_POSTMASK, 0, &ddp_postmask }, + [DDP_COMPONENT_PWM0] = { MTK_DISP_PWM, 0, NULL }, + [DDP_COMPONENT_PWM1] = { MTK_DISP_PWM, 1, NULL }, + [DDP_COMPONENT_PWM2] = { MTK_DISP_PWM, 2, NULL }, + [DDP_COMPONENT_RDMA0] = { MTK_DISP_RDMA, 0, &ddp_rdma }, + [DDP_COMPONENT_RDMA1] = { MTK_DISP_RDMA, 1, &ddp_rdma }, + [DDP_COMPONENT_RDMA2] = { MTK_DISP_RDMA, 2, &ddp_rdma }, + [DDP_COMPONENT_RDMA4] = { MTK_DISP_RDMA, 4, &ddp_rdma }, + [DDP_COMPONENT_UFOE] = { MTK_DISP_UFOE, 0, &ddp_ufoe }, + [DDP_COMPONENT_WDMA0] = { MTK_DISP_WDMA, 0, NULL }, + [DDP_COMPONENT_WDMA1] = { MTK_DISP_WDMA, 1, NULL }, }; static bool mtk_drm_find_comp_in_ddp(struct device *dev, @@ -475,12 +508,12 @@ int mtk_ddp_comp_init(struct device_node *node, struct mtk_ddp_comp *comp, type == MTK_DISP_CCORR || type == MTK_DISP_COLOR || type == MTK_DISP_GAMMA || - type == MTK_DPI || - type == MTK_DSI || type == MTK_DISP_OVL || type == MTK_DISP_OVL_2L || type == MTK_DISP_PWM || - type == MTK_DISP_RDMA) + type == MTK_DISP_RDMA || + type == MTK_DPI || + type == MTK_DSI) return 0; priv = devm_kzalloc(comp->dev, sizeof(*priv), GFP_KERNEL); diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h index bb914d976cf5..4c6a98662305 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h @@ -18,22 +18,23 @@ struct mtk_plane_state; struct drm_crtc_state; enum mtk_ddp_comp_type { - MTK_DISP_OVL, - MTK_DISP_OVL_2L, - MTK_DISP_RDMA, - MTK_DISP_WDMA, - MTK_DISP_COLOR, + MTK_DISP_AAL, + MTK_DISP_BLS, MTK_DISP_CCORR, + MTK_DISP_COLOR, MTK_DISP_DITHER, - MTK_DISP_AAL, MTK_DISP_GAMMA, - MTK_DISP_UFOE, - MTK_DSI, - MTK_DPI, - MTK_DISP_PWM, MTK_DISP_MUTEX, MTK_DISP_OD, - MTK_DISP_BLS, + MTK_DISP_OVL, + MTK_DISP_OVL_2L, + MTK_DISP_POSTMASK, + MTK_DISP_PWM, + MTK_DISP_RDMA, + MTK_DISP_UFOE, + MTK_DISP_WDMA, + MTK_DPI, + MTK_DSI, MTK_DDP_COMP_TYPE_MAX, }; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index aec39724ebeb..56ff8c57ef8f 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -158,6 +158,25 @@ static const enum mtk_ddp_comp_id mt8183_mtk_ddp_ext[] = { DDP_COMPONENT_DPI0, }; +static const enum mtk_ddp_comp_id mt8192_mtk_ddp_main[] = { + DDP_COMPONENT_OVL0, + DDP_COMPONENT_OVL_2L0, + DDP_COMPONENT_RDMA0, + DDP_COMPONENT_COLOR0, + DDP_COMPONENT_CCORR, + DDP_COMPONENT_AAL0, + DDP_COMPONENT_GAMMA, + DDP_COMPONENT_POSTMASK0, + DDP_COMPONENT_DITHER, + DDP_COMPONENT_DSI0, +}; + +static const enum mtk_ddp_comp_id mt8192_mtk_ddp_ext[] = { + DDP_COMPONENT_OVL_2L2, + DDP_COMPONENT_RDMA4, + DDP_COMPONENT_DPI0, +}; + static const struct mtk_mmsys_driver_data mt2701_mmsys_driver_data = { .main_path = mt2701_mtk_ddp_main, .main_len = ARRAY_SIZE(mt2701_mtk_ddp_main), @@ -202,6 +221,13 @@ static const struct mtk_mmsys_driver_data mt8183_mmsys_driver_data = { .ext_len = ARRAY_SIZE(mt8183_mtk_ddp_ext), }; +static const struct mtk_mmsys_driver_data mt8192_mmsys_driver_data = { + .main_path = mt8192_mtk_ddp_main, + .main_len = ARRAY_SIZE(mt8192_mtk_ddp_main), + .ext_path = mt8192_mtk_ddp_ext, + .ext_len = ARRAY_SIZE(mt8192_mtk_ddp_ext), +}; + static int mtk_drm_kms_init(struct drm_device *drm) { struct mtk_drm_private *private = drm->dev_private; @@ -397,68 +423,36 @@ static const struct component_master_ops mtk_drm_ops = { }; static const struct of_device_id mtk_ddp_comp_dt_ids[] = { - { .compatible = "mediatek,mt2701-disp-ovl", - .data = (void *)MTK_DISP_OVL }, - { .compatible = "mediatek,mt8167-disp-ovl", - .data = (void *)MTK_DISP_OVL }, - { .compatible = "mediatek,mt8173-disp-ovl", - .data = (void *)MTK_DISP_OVL }, - { .compatible = "mediatek,mt8183-disp-ovl", - .data = (void *)MTK_DISP_OVL }, - { .compatible = "mediatek,mt8183-disp-ovl-2l", - .data = (void *)MTK_DISP_OVL_2L }, - { .compatible = "mediatek,mt2701-disp-rdma", - .data = (void *)MTK_DISP_RDMA }, - { .compatible = "mediatek,mt8167-disp-rdma", - .data = (void *)MTK_DISP_RDMA }, - { .compatible = "mediatek,mt8173-disp-rdma", - .data = (void *)MTK_DISP_RDMA }, - { .compatible = "mediatek,mt8183-disp-rdma", - .data = (void *)MTK_DISP_RDMA }, - { .compatible = "mediatek,mt8173-disp-wdma", - .data = (void *)MTK_DISP_WDMA }, + { .compatible = "mediatek,mt8167-disp-aal", + .data = (void *)MTK_DISP_AAL}, + { .compatible = "mediatek,mt8173-disp-aal", + .data = (void *)MTK_DISP_AAL}, + { .compatible = "mediatek,mt8183-disp-aal", + .data = (void *)MTK_DISP_AAL}, + { .compatible = "mediatek,mt8192-disp-aal", + .data = (void *)MTK_DISP_AAL}, { .compatible = "mediatek,mt8167-disp-ccorr", .data = (void *)MTK_DISP_CCORR }, { .compatible = "mediatek,mt8183-disp-ccorr", .data = (void *)MTK_DISP_CCORR }, + { .compatible = "mediatek,mt8192-disp-ccorr", + .data = (void *)MTK_DISP_CCORR }, { .compatible = "mediatek,mt2701-disp-color", .data = (void *)MTK_DISP_COLOR }, { .compatible = "mediatek,mt8167-disp-color", .data = (void *)MTK_DISP_COLOR }, { .compatible = "mediatek,mt8173-disp-color", .data = (void *)MTK_DISP_COLOR }, - { .compatible = "mediatek,mt8167-disp-aal", - .data = (void *)MTK_DISP_AAL}, - { .compatible = "mediatek,mt8173-disp-aal", - .data = (void *)MTK_DISP_AAL}, - { .compatible = "mediatek,mt8183-disp-aal", - .data = (void *)MTK_DISP_AAL}, + { .compatible = "mediatek,mt8167-disp-dither", + .data = (void *)MTK_DISP_DITHER }, + { .compatible = "mediatek,mt8183-disp-dither", + .data = (void *)MTK_DISP_DITHER }, { .compatible = "mediatek,mt8167-disp-gamma", .data = (void *)MTK_DISP_GAMMA, }, { .compatible = "mediatek,mt8173-disp-gamma", .data = (void *)MTK_DISP_GAMMA, }, { .compatible = "mediatek,mt8183-disp-gamma", .data = (void *)MTK_DISP_GAMMA, }, - { .compatible = "mediatek,mt8167-disp-dither", - .data = (void *)MTK_DISP_DITHER }, - { .compatible = "mediatek,mt8183-disp-dither", - .data = (void *)MTK_DISP_DITHER }, - { .compatible = "mediatek,mt8173-disp-ufoe", - .data = (void *)MTK_DISP_UFOE }, - { .compatible = "mediatek,mt2701-dsi", - .data = (void *)MTK_DSI }, - { .compatible = "mediatek,mt8167-dsi", - .data = (void *)MTK_DSI }, - { .compatible = "mediatek,mt8173-dsi", - .data = (void *)MTK_DSI }, - { .compatible = "mediatek,mt8183-dsi", - .data = (void *)MTK_DSI }, - { .compatible = "mediatek,mt2701-dpi", - .data = (void *)MTK_DPI }, - { .compatible = "mediatek,mt8173-dpi", - .data = (void *)MTK_DPI }, - { .compatible = "mediatek,mt8183-dpi", - .data = (void *)MTK_DPI }, { .compatible = "mediatek,mt2701-disp-mutex", .data = (void *)MTK_DISP_MUTEX }, { .compatible = "mediatek,mt2712-disp-mutex", @@ -469,14 +463,60 @@ static const struct of_device_id mtk_ddp_comp_dt_ids[] = { .data = (void *)MTK_DISP_MUTEX }, { .compatible = "mediatek,mt8183-disp-mutex", .data = (void *)MTK_DISP_MUTEX }, + { .compatible = "mediatek,mt8192-disp-mutex", + .data = (void *)MTK_DISP_MUTEX }, + { .compatible = "mediatek,mt8173-disp-od", + .data = (void *)MTK_DISP_OD }, + { .compatible = "mediatek,mt2701-disp-ovl", + .data = (void *)MTK_DISP_OVL }, + { .compatible = "mediatek,mt8167-disp-ovl", + .data = (void *)MTK_DISP_OVL }, + { .compatible = "mediatek,mt8173-disp-ovl", + .data = (void *)MTK_DISP_OVL }, + { .compatible = "mediatek,mt8183-disp-ovl", + .data = (void *)MTK_DISP_OVL }, + { .compatible = "mediatek,mt8192-disp-ovl", + .data = (void *)MTK_DISP_OVL }, + { .compatible = "mediatek,mt8183-disp-ovl-2l", + .data = (void *)MTK_DISP_OVL_2L }, + { .compatible = "mediatek,mt8192-disp-ovl-2l", + .data = (void *)MTK_DISP_OVL_2L }, + { .compatible = "mediatek,mt8192-disp-postmask", + .data = (void *)MTK_DISP_POSTMASK }, { .compatible = "mediatek,mt2701-disp-pwm", .data = (void *)MTK_DISP_BLS }, { .compatible = "mediatek,mt8167-disp-pwm", .data = (void *)MTK_DISP_PWM }, { .compatible = "mediatek,mt8173-disp-pwm", .data = (void *)MTK_DISP_PWM }, - { .compatible = "mediatek,mt8173-disp-od", - .data = (void *)MTK_DISP_OD }, + { .compatible = "mediatek,mt2701-disp-rdma", + .data = (void *)MTK_DISP_RDMA }, + { .compatible = "mediatek,mt8167-disp-rdma", + .data = (void *)MTK_DISP_RDMA }, + { .compatible = "mediatek,mt8173-disp-rdma", + .data = (void *)MTK_DISP_RDMA }, + { .compatible = "mediatek,mt8183-disp-rdma", + .data = (void *)MTK_DISP_RDMA }, + { .compatible = "mediatek,mt8192-disp-rdma", + .data = (void *)MTK_DISP_RDMA }, + { .compatible = "mediatek,mt8173-disp-ufoe", + .data = (void *)MTK_DISP_UFOE }, + { .compatible = "mediatek,mt8173-disp-wdma", + .data = (void *)MTK_DISP_WDMA }, + { .compatible = "mediatek,mt2701-dpi", + .data = (void *)MTK_DPI }, + { .compatible = "mediatek,mt8167-dsi", + .data = (void *)MTK_DSI }, + { .compatible = "mediatek,mt8173-dpi", + .data = (void *)MTK_DPI }, + { .compatible = "mediatek,mt8183-dpi", + .data = (void *)MTK_DPI }, + { .compatible = "mediatek,mt2701-dsi", + .data = (void *)MTK_DSI }, + { .compatible = "mediatek,mt8173-dsi", + .data = (void *)MTK_DSI }, + { .compatible = "mediatek,mt8183-dsi", + .data = (void *)MTK_DSI }, { } }; @@ -493,6 +533,8 @@ static const struct of_device_id mtk_drm_of_ids[] = { .data = &mt8173_mmsys_driver_data}, { .compatible = "mediatek,mt8183-mmsys", .data = &mt8183_mmsys_driver_data}, + { .compatible = "mediatek,mt8192-mmsys", + .data = &mt8192_mmsys_driver_data}, { } }; MODULE_DEVICE_TABLE(of, mtk_drm_of_ids); @@ -568,8 +610,8 @@ static int mtk_drm_probe(struct platform_device *pdev) comp_type == MTK_DISP_OVL || comp_type == MTK_DISP_OVL_2L || comp_type == MTK_DISP_RDMA || - comp_type == MTK_DSI || - comp_type == MTK_DPI) { + comp_type == MTK_DPI || + comp_type == MTK_DSI) { dev_info(dev, "Adding component match for %pOF\n", node); drm_of_component_match_add(dev, &match, compare_of, diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c index 734a1fb052df..075747a6d4aa 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c @@ -44,9 +44,10 @@ static void mtk_plane_reset(struct drm_plane *plane) state = kzalloc(sizeof(*state), GFP_KERNEL); if (!state) return; - plane->state = &state->base; } + __drm_atomic_helper_plane_reset(plane, &state->base); + state->base.plane = plane; state->pending.format = DRM_FORMAT_RGB565; } diff --git a/drivers/gpu/drm/meson/Kconfig b/drivers/gpu/drm/meson/Kconfig index 9f9281dd49f8..6c70fc3214af 100644 --- a/drivers/gpu/drm/meson/Kconfig +++ b/drivers/gpu/drm/meson/Kconfig @@ -4,11 +4,12 @@ config DRM_MESON depends on DRM && OF && (ARM || ARM64) depends on ARCH_MESON || COMPILE_TEST select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER select DRM_GEM_CMA_HELPER + select DRM_DISPLAY_CONNECTOR select VIDEOMODE_HELPERS select REGMAP_MMIO select MESON_CANVAS + select CEC_CORE if CEC_NOTIFIER config DRM_MESON_DW_HDMI tristate "HDMI Synopsys Controller support for Amlogic Meson Display" diff --git a/drivers/gpu/drm/meson/Makefile b/drivers/gpu/drm/meson/Makefile index 28a519cdf66b..3afa31bdc950 100644 --- a/drivers/gpu/drm/meson/Makefile +++ b/drivers/gpu/drm/meson/Makefile @@ -1,7 +1,8 @@ # SPDX-License-Identifier: GPL-2.0-only -meson-drm-y := meson_drv.o meson_plane.o meson_crtc.o meson_venc_cvbs.o +meson-drm-y := meson_drv.o meson_plane.o meson_crtc.o meson_encoder_cvbs.o meson-drm-y += meson_viu.o meson_vpp.o meson_venc.o meson_vclk.o meson_overlay.o meson-drm-y += meson_rdma.o meson_osd_afbcd.o +meson-drm-y += meson_encoder_hdmi.o obj-$(CONFIG_DRM_MESON) += meson-drm.o obj-$(CONFIG_DRM_MESON_DW_HDMI) += meson_dw_hdmi.o diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index 7f41a33592c8..80f1d439841a 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c @@ -31,7 +31,8 @@ #include "meson_plane.h" #include "meson_osd_afbcd.h" #include "meson_registers.h" -#include "meson_venc_cvbs.h" +#include "meson_encoder_cvbs.h" +#include "meson_encoder_hdmi.h" #include "meson_viu.h" #include "meson_vpp.h" #include "meson_rdma.h" @@ -306,7 +307,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) /* Encoder Initialization */ - ret = meson_venc_cvbs_create(priv); + ret = meson_encoder_cvbs_init(priv); if (ret) goto free_drm; @@ -318,6 +319,10 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) } } + ret = meson_encoder_hdmi_init(priv); + if (ret) + goto free_drm; + ret = meson_plane_create(priv); if (ret) goto free_drm; @@ -426,46 +431,6 @@ static int compare_of(struct device *dev, void *data) return dev->of_node == data; } -/* Possible connectors nodes to ignore */ -static const struct of_device_id connectors_match[] = { - { .compatible = "composite-video-connector" }, - { .compatible = "svideo-connector" }, - { .compatible = "hdmi-connector" }, - { .compatible = "dvi-connector" }, - {} -}; - -static int meson_probe_remote(struct platform_device *pdev, - struct component_match **match, - struct device_node *parent, - struct device_node *remote) -{ - struct device_node *ep, *remote_node; - int count = 1; - - /* If node is a connector, return and do not add to match table */ - if (of_match_node(connectors_match, remote)) - return 1; - - component_match_add(&pdev->dev, match, compare_of, remote); - - for_each_endpoint_of_node(remote, ep) { - remote_node = of_graph_get_remote_port_parent(ep); - if (!remote_node || - remote_node == parent || /* Ignore parent endpoint */ - !of_device_is_available(remote_node)) { - of_node_put(remote_node); - continue; - } - - count += meson_probe_remote(pdev, match, remote, remote_node); - - of_node_put(remote_node); - } - - return count; -} - static void meson_drv_shutdown(struct platform_device *pdev) { struct meson_drm *priv = dev_get_drvdata(&pdev->dev); @@ -477,6 +442,13 @@ static void meson_drv_shutdown(struct platform_device *pdev) drm_atomic_helper_shutdown(priv->drm); } +/* Possible connectors nodes to ignore */ +static const struct of_device_id connectors_match[] = { + { .compatible = "composite-video-connector" }, + { .compatible = "svideo-connector" }, + {} +}; + static int meson_drv_probe(struct platform_device *pdev) { struct component_match *match = NULL; @@ -491,8 +463,21 @@ static int meson_drv_probe(struct platform_device *pdev) continue; } - count += meson_probe_remote(pdev, &match, np, remote); + /* If an analog connector is detected, count it as an output */ + if (of_match_node(connectors_match, remote)) { + ++count; + of_node_put(remote); + continue; + } + + dev_dbg(&pdev->dev, "parent %pOF remote match add %pOF parent %s\n", + np, remote, dev_name(&pdev->dev)); + + component_match_add(&pdev->dev, &match, compare_of, remote); + of_node_put(remote); + + ++count; } if (count && !match) diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c index 0afbd1e70bfc..5cd2b2ebbbd3 100644 --- a/drivers/gpu/drm/meson/meson_dw_hdmi.c +++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c @@ -22,14 +22,11 @@ #include <drm/drm_probe_helper.h> #include <drm/drm_print.h> -#include <linux/media-bus-format.h> #include <linux/videodev2.h> #include "meson_drv.h" #include "meson_dw_hdmi.h" #include "meson_registers.h" -#include "meson_vclk.h" -#include "meson_venc.h" #define DRIVER_NAME "meson-dw-hdmi" #define DRIVER_DESC "Amlogic Meson HDMI-TX DRM driver" @@ -135,8 +132,6 @@ struct meson_dw_hdmi_data { }; struct meson_dw_hdmi { - struct drm_encoder encoder; - struct drm_bridge bridge; struct dw_hdmi_plat_data dw_plat_data; struct meson_drm *priv; struct device *dev; @@ -148,12 +143,8 @@ struct meson_dw_hdmi { struct regulator *hdmi_supply; u32 irq_stat; struct dw_hdmi *hdmi; - unsigned long output_bus_fmt; + struct drm_bridge *bridge; }; -#define encoder_to_meson_dw_hdmi(x) \ - container_of(x, struct meson_dw_hdmi, encoder) -#define bridge_to_meson_dw_hdmi(x) \ - container_of(x, struct meson_dw_hdmi, bridge) static inline int dw_hdmi_is_compatible(struct meson_dw_hdmi *dw_hdmi, const char *compat) @@ -295,14 +286,14 @@ static inline void dw_hdmi_dwc_write_bits(struct meson_dw_hdmi *dw_hdmi, /* Setup PHY bandwidth modes */ static void meson_hdmi_phy_setup_mode(struct meson_dw_hdmi *dw_hdmi, - const struct drm_display_mode *mode) + const struct drm_display_mode *mode, + bool mode_is_420) { struct meson_drm *priv = dw_hdmi->priv; unsigned int pixel_clock = mode->clock; /* For 420, pixel clock is half unlike venc clock */ - if (dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24) - pixel_clock /= 2; + if (mode_is_420) pixel_clock /= 2; if (dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxl-dw-hdmi") || dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxm-dw-hdmi")) { @@ -374,68 +365,25 @@ static inline void meson_dw_hdmi_phy_reset(struct meson_dw_hdmi *dw_hdmi) mdelay(2); } -static void dw_hdmi_set_vclk(struct meson_dw_hdmi *dw_hdmi, - const struct drm_display_mode *mode) -{ - struct meson_drm *priv = dw_hdmi->priv; - int vic = drm_match_cea_mode(mode); - unsigned int phy_freq; - unsigned int vclk_freq; - unsigned int venc_freq; - unsigned int hdmi_freq; - - vclk_freq = mode->clock; - - /* For 420, pixel clock is half unlike venc clock */ - if (dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24) - vclk_freq /= 2; - - /* TMDS clock is pixel_clock * 10 */ - phy_freq = vclk_freq * 10; - - if (!vic) { - meson_vclk_setup(priv, MESON_VCLK_TARGET_DMT, phy_freq, - vclk_freq, vclk_freq, vclk_freq, false); - return; - } - - /* 480i/576i needs global pixel doubling */ - if (mode->flags & DRM_MODE_FLAG_DBLCLK) - vclk_freq *= 2; - - venc_freq = vclk_freq; - hdmi_freq = vclk_freq; - - /* VENC double pixels for 1080i, 720p and YUV420 modes */ - if (meson_venc_hdmi_venc_repeat(vic) || - dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24) - venc_freq *= 2; - - vclk_freq = max(venc_freq, hdmi_freq); - - if (mode->flags & DRM_MODE_FLAG_DBLCLK) - venc_freq /= 2; - - DRM_DEBUG_DRIVER("vclk:%d phy=%d venc=%d hdmi=%d enci=%d\n", - phy_freq, vclk_freq, venc_freq, hdmi_freq, - priv->venc.hdmi_use_enci); - - meson_vclk_setup(priv, MESON_VCLK_TARGET_HDMI, phy_freq, vclk_freq, - venc_freq, hdmi_freq, priv->venc.hdmi_use_enci); -} - static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data, const struct drm_display_info *display, const struct drm_display_mode *mode) { struct meson_dw_hdmi *dw_hdmi = (struct meson_dw_hdmi *)data; + bool is_hdmi2_sink = display->hdmi.scdc.supported; struct meson_drm *priv = dw_hdmi->priv; unsigned int wr_clk = readl_relaxed(priv->io_base + _REG(VPU_HDMI_SETTING)); + bool mode_is_420 = false; DRM_DEBUG_DRIVER("\"%s\" div%d\n", mode->name, mode->clock > 340000 ? 40 : 10); + if (drm_mode_is_420_only(display, mode) || + (!is_hdmi2_sink && + drm_mode_is_420_also(display, mode))) + mode_is_420 = true; + /* Enable clocks */ regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL, 0xffff, 0x100); @@ -457,8 +405,7 @@ static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data, dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_BIST_CNTL, BIT(12)); /* TMDS pattern setup */ - if (mode->clock > 340000 && - dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_YUV8_1X24) { + if (mode->clock > 340000 && !mode_is_420) { dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_01, 0); dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_23, @@ -476,7 +423,7 @@ static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data, dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_CNTL, 0x2); /* Setup PHY parameters */ - meson_hdmi_phy_setup_mode(dw_hdmi, mode); + meson_hdmi_phy_setup_mode(dw_hdmi, mode, mode_is_420); /* Setup PHY */ regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1, @@ -622,214 +569,15 @@ static irqreturn_t dw_hdmi_top_thread_irq(int irq, void *dev_id) dw_hdmi_setup_rx_sense(dw_hdmi->hdmi, hpd_connected, hpd_connected); - drm_helper_hpd_irq_event(dw_hdmi->encoder.dev); + drm_helper_hpd_irq_event(dw_hdmi->bridge->dev); + drm_bridge_hpd_notify(dw_hdmi->bridge, + hpd_connected ? connector_status_connected + : connector_status_disconnected); } return IRQ_HANDLED; } -static enum drm_mode_status -dw_hdmi_mode_valid(struct dw_hdmi *hdmi, void *data, - const struct drm_display_info *display_info, - const struct drm_display_mode *mode) -{ - struct meson_dw_hdmi *dw_hdmi = data; - struct meson_drm *priv = dw_hdmi->priv; - bool is_hdmi2_sink = display_info->hdmi.scdc.supported; - unsigned int phy_freq; - unsigned int vclk_freq; - unsigned int venc_freq; - unsigned int hdmi_freq; - int vic = drm_match_cea_mode(mode); - enum drm_mode_status status; - - DRM_DEBUG_DRIVER("Modeline " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode)); - - /* If sink does not support 540MHz, reject the non-420 HDMI2 modes */ - if (display_info->max_tmds_clock && - mode->clock > display_info->max_tmds_clock && - !drm_mode_is_420_only(display_info, mode) && - !drm_mode_is_420_also(display_info, mode)) - return MODE_BAD; - - /* Check against non-VIC supported modes */ - if (!vic) { - status = meson_venc_hdmi_supported_mode(mode); - if (status != MODE_OK) - return status; - - return meson_vclk_dmt_supported_freq(priv, mode->clock); - /* Check against supported VIC modes */ - } else if (!meson_venc_hdmi_supported_vic(vic)) - return MODE_BAD; - - vclk_freq = mode->clock; - - /* For 420, pixel clock is half unlike venc clock */ - if (drm_mode_is_420_only(display_info, mode) || - (!is_hdmi2_sink && - drm_mode_is_420_also(display_info, mode))) - vclk_freq /= 2; - - /* TMDS clock is pixel_clock * 10 */ - phy_freq = vclk_freq * 10; - - /* 480i/576i needs global pixel doubling */ - if (mode->flags & DRM_MODE_FLAG_DBLCLK) - vclk_freq *= 2; - - venc_freq = vclk_freq; - hdmi_freq = vclk_freq; - - /* VENC double pixels for 1080i, 720p and YUV420 modes */ - if (meson_venc_hdmi_venc_repeat(vic) || - drm_mode_is_420_only(display_info, mode) || - (!is_hdmi2_sink && - drm_mode_is_420_also(display_info, mode))) - venc_freq *= 2; - - vclk_freq = max(venc_freq, hdmi_freq); - - if (mode->flags & DRM_MODE_FLAG_DBLCLK) - venc_freq /= 2; - - dev_dbg(dw_hdmi->dev, "%s: vclk:%d phy=%d venc=%d hdmi=%d\n", - __func__, phy_freq, vclk_freq, venc_freq, hdmi_freq); - - return meson_vclk_vic_supported_freq(priv, phy_freq, vclk_freq); -} - -/* Encoder */ - -static const u32 meson_dw_hdmi_out_bus_fmts[] = { - MEDIA_BUS_FMT_YUV8_1X24, - MEDIA_BUS_FMT_UYYVYY8_0_5X24, -}; - -static void meson_venc_hdmi_encoder_destroy(struct drm_encoder *encoder) -{ - drm_encoder_cleanup(encoder); -} - -static const struct drm_encoder_funcs meson_venc_hdmi_encoder_funcs = { - .destroy = meson_venc_hdmi_encoder_destroy, -}; - -static u32 * -meson_venc_hdmi_encoder_get_inp_bus_fmts(struct drm_bridge *bridge, - struct drm_bridge_state *bridge_state, - struct drm_crtc_state *crtc_state, - struct drm_connector_state *conn_state, - u32 output_fmt, - unsigned int *num_input_fmts) -{ - u32 *input_fmts = NULL; - int i; - - *num_input_fmts = 0; - - for (i = 0 ; i < ARRAY_SIZE(meson_dw_hdmi_out_bus_fmts) ; ++i) { - if (output_fmt == meson_dw_hdmi_out_bus_fmts[i]) { - *num_input_fmts = 1; - input_fmts = kcalloc(*num_input_fmts, - sizeof(*input_fmts), - GFP_KERNEL); - if (!input_fmts) - return NULL; - - input_fmts[0] = output_fmt; - - break; - } - } - - return input_fmts; -} - -static int meson_venc_hdmi_encoder_atomic_check(struct drm_bridge *bridge, - struct drm_bridge_state *bridge_state, - struct drm_crtc_state *crtc_state, - struct drm_connector_state *conn_state) -{ - struct meson_dw_hdmi *dw_hdmi = bridge_to_meson_dw_hdmi(bridge); - - dw_hdmi->output_bus_fmt = bridge_state->output_bus_cfg.format; - - DRM_DEBUG_DRIVER("output_bus_fmt %lx\n", dw_hdmi->output_bus_fmt); - - return 0; -} - -static void meson_venc_hdmi_encoder_disable(struct drm_bridge *bridge) -{ - struct meson_dw_hdmi *dw_hdmi = bridge_to_meson_dw_hdmi(bridge); - struct meson_drm *priv = dw_hdmi->priv; - - DRM_DEBUG_DRIVER("\n"); - - writel_bits_relaxed(0x3, 0, - priv->io_base + _REG(VPU_HDMI_SETTING)); - - writel_relaxed(0, priv->io_base + _REG(ENCI_VIDEO_EN)); - writel_relaxed(0, priv->io_base + _REG(ENCP_VIDEO_EN)); -} - -static void meson_venc_hdmi_encoder_enable(struct drm_bridge *bridge) -{ - struct meson_dw_hdmi *dw_hdmi = bridge_to_meson_dw_hdmi(bridge); - struct meson_drm *priv = dw_hdmi->priv; - - DRM_DEBUG_DRIVER("%s\n", priv->venc.hdmi_use_enci ? "VENCI" : "VENCP"); - - if (priv->venc.hdmi_use_enci) - writel_relaxed(1, priv->io_base + _REG(ENCI_VIDEO_EN)); - else - writel_relaxed(1, priv->io_base + _REG(ENCP_VIDEO_EN)); -} - -static void meson_venc_hdmi_encoder_mode_set(struct drm_bridge *bridge, - const struct drm_display_mode *mode, - const struct drm_display_mode *adjusted_mode) -{ - struct meson_dw_hdmi *dw_hdmi = bridge_to_meson_dw_hdmi(bridge); - struct meson_drm *priv = dw_hdmi->priv; - int vic = drm_match_cea_mode(mode); - unsigned int ycrcb_map = VPU_HDMI_OUTPUT_CBYCR; - bool yuv420_mode = false; - - DRM_DEBUG_DRIVER("\"%s\" vic %d\n", mode->name, vic); - - if (dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24) { - ycrcb_map = VPU_HDMI_OUTPUT_CRYCB; - yuv420_mode = true; - } - - /* VENC + VENC-DVI Mode setup */ - meson_venc_hdmi_mode_set(priv, vic, ycrcb_map, yuv420_mode, mode); - - /* VCLK Set clock */ - dw_hdmi_set_vclk(dw_hdmi, mode); - - if (dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24) - /* Setup YUV420 to HDMI-TX, no 10bit diphering */ - writel_relaxed(2 | (2 << 2), - priv->io_base + _REG(VPU_HDMI_FMT_CTRL)); - else - /* Setup YUV444 to HDMI-TX, no 10bit diphering */ - writel_relaxed(0, priv->io_base + _REG(VPU_HDMI_FMT_CTRL)); -} - -static const struct drm_bridge_funcs meson_venc_hdmi_encoder_bridge_funcs = { - .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, - .atomic_get_input_bus_fmts = meson_venc_hdmi_encoder_get_inp_bus_fmts, - .atomic_reset = drm_atomic_helper_bridge_reset, - .atomic_check = meson_venc_hdmi_encoder_atomic_check, - .enable = meson_venc_hdmi_encoder_enable, - .disable = meson_venc_hdmi_encoder_disable, - .mode_set = meson_venc_hdmi_encoder_mode_set, -}; - /* DW HDMI Regmap */ static int meson_dw_hdmi_reg_read(void *context, unsigned int reg, @@ -876,28 +624,6 @@ static const struct meson_dw_hdmi_data meson_dw_hdmi_g12a_data = { .dwc_write = dw_hdmi_g12a_dwc_write, }; -static bool meson_hdmi_connector_is_available(struct device *dev) -{ - struct device_node *ep, *remote; - - /* HDMI Connector is on the second port, first endpoint */ - ep = of_graph_get_endpoint_by_regs(dev->of_node, 1, 0); - if (!ep) - return false; - - /* If the endpoint node exists, consider it enabled */ - remote = of_graph_get_remote_port(ep); - if (remote) { - of_node_put(ep); - return true; - } - - of_node_put(ep); - of_node_put(remote); - - return false; -} - static void meson_dw_hdmi_init(struct meson_dw_hdmi *meson_dw_hdmi) { struct meson_drm *priv = meson_dw_hdmi->priv; @@ -976,18 +702,11 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master, struct drm_device *drm = data; struct meson_drm *priv = drm->dev_private; struct dw_hdmi_plat_data *dw_plat_data; - struct drm_bridge *next_bridge; - struct drm_encoder *encoder; int irq; int ret; DRM_DEBUG_DRIVER("\n"); - if (!meson_hdmi_connector_is_available(dev)) { - dev_info(drm->dev, "HDMI Output connector not available\n"); - return -ENODEV; - } - match = of_device_get_match_data(&pdev->dev); if (!match) { dev_err(&pdev->dev, "failed to get match data\n"); @@ -1003,7 +722,6 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master, meson_dw_hdmi->dev = dev; meson_dw_hdmi->data = match; dw_plat_data = &meson_dw_hdmi->dw_plat_data; - encoder = &meson_dw_hdmi->encoder; meson_dw_hdmi->hdmi_supply = devm_regulator_get_optional(dev, "hdmi"); if (IS_ERR(meson_dw_hdmi->hdmi_supply)) { @@ -1074,34 +792,18 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master, return ret; } - /* Encoder */ - - ret = drm_encoder_init(drm, encoder, &meson_venc_hdmi_encoder_funcs, - DRM_MODE_ENCODER_TMDS, "meson_hdmi"); - if (ret) { - dev_err(priv->dev, "Failed to init HDMI encoder\n"); - return ret; - } - - meson_dw_hdmi->bridge.funcs = &meson_venc_hdmi_encoder_bridge_funcs; - drm_bridge_attach(encoder, &meson_dw_hdmi->bridge, NULL, 0); - - encoder->possible_crtcs = BIT(0); - meson_dw_hdmi_init(meson_dw_hdmi); - DRM_DEBUG_DRIVER("encoder initialized\n"); - /* Bridge / Connector */ dw_plat_data->priv_data = meson_dw_hdmi; - dw_plat_data->mode_valid = dw_hdmi_mode_valid; dw_plat_data->phy_ops = &meson_dw_hdmi_phy_ops; dw_plat_data->phy_name = "meson_dw_hdmi_phy"; dw_plat_data->phy_data = meson_dw_hdmi; dw_plat_data->input_bus_encoding = V4L2_YCBCR_ENC_709; dw_plat_data->ycbcr_420_allowed = true; dw_plat_data->disable_cec = true; + dw_plat_data->output_port = 1; if (dw_hdmi_is_compatible(meson_dw_hdmi, "amlogic,meson-gxl-dw-hdmi") || dw_hdmi_is_compatible(meson_dw_hdmi, "amlogic,meson-gxm-dw-hdmi") || @@ -1110,15 +812,11 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master, platform_set_drvdata(pdev, meson_dw_hdmi); - meson_dw_hdmi->hdmi = dw_hdmi_probe(pdev, - &meson_dw_hdmi->dw_plat_data); + meson_dw_hdmi->hdmi = dw_hdmi_probe(pdev, &meson_dw_hdmi->dw_plat_data); if (IS_ERR(meson_dw_hdmi->hdmi)) return PTR_ERR(meson_dw_hdmi->hdmi); - next_bridge = of_drm_find_bridge(pdev->dev.of_node); - if (next_bridge) - drm_bridge_attach(encoder, next_bridge, - &meson_dw_hdmi->bridge, 0); + meson_dw_hdmi->bridge = of_drm_find_bridge(pdev->dev.of_node); DRM_DEBUG_DRIVER("HDMI controller initialized\n"); diff --git a/drivers/gpu/drm/meson/meson_encoder_cvbs.c b/drivers/gpu/drm/meson/meson_encoder_cvbs.c new file mode 100644 index 000000000000..fd8db97ba8ba --- /dev/null +++ b/drivers/gpu/drm/meson/meson_encoder_cvbs.c @@ -0,0 +1,284 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2016 BayLibre, SAS + * Author: Neil Armstrong <narmstrong@baylibre.com> + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + * Copyright (C) 2014 Endless Mobile + * + * Written by: + * Jasper St. Pierre <jstpierre@mecheye.net> + */ + +#include <linux/export.h> +#include <linux/of_graph.h> + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> +#include <drm/drm_bridge_connector.h> +#include <drm/drm_device.h> +#include <drm/drm_edid.h> +#include <drm/drm_probe_helper.h> +#include <drm/drm_simple_kms_helper.h> + +#include "meson_registers.h" +#include "meson_vclk.h" +#include "meson_encoder_cvbs.h" + +/* HHI VDAC Registers */ +#define HHI_VDAC_CNTL0 0x2F4 /* 0xbd offset in data sheet */ +#define HHI_VDAC_CNTL0_G12A 0x2EC /* 0xbd offset in data sheet */ +#define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */ +#define HHI_VDAC_CNTL1_G12A 0x2F0 /* 0xbe offset in data sheet */ + +struct meson_encoder_cvbs { + struct drm_encoder encoder; + struct drm_bridge bridge; + struct drm_bridge *next_bridge; + struct meson_drm *priv; +}; + +#define bridge_to_meson_encoder_cvbs(x) \ + container_of(x, struct meson_encoder_cvbs, bridge) + +/* Supported Modes */ + +struct meson_cvbs_mode meson_cvbs_modes[MESON_CVBS_MODES_COUNT] = { + { /* PAL */ + .enci = &meson_cvbs_enci_pal, + .mode = { + DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 13500, + 720, 732, 795, 864, 0, 576, 580, 586, 625, 0, + DRM_MODE_FLAG_INTERLACE), + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, + }, + }, + { /* NTSC */ + .enci = &meson_cvbs_enci_ntsc, + .mode = { + DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 13500, + 720, 739, 801, 858, 0, 480, 488, 494, 525, 0, + DRM_MODE_FLAG_INTERLACE), + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, + }, + }, +}; + +static const struct meson_cvbs_mode * +meson_cvbs_get_mode(const struct drm_display_mode *req_mode) +{ + int i; + + for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) { + struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i]; + + if (drm_mode_match(req_mode, &meson_mode->mode, + DRM_MODE_MATCH_TIMINGS | + DRM_MODE_MATCH_CLOCK | + DRM_MODE_MATCH_FLAGS | + DRM_MODE_MATCH_3D_FLAGS)) + return meson_mode; + } + + return NULL; +} + +static int meson_encoder_cvbs_attach(struct drm_bridge *bridge, + enum drm_bridge_attach_flags flags) +{ + struct meson_encoder_cvbs *meson_encoder_cvbs = + bridge_to_meson_encoder_cvbs(bridge); + + return drm_bridge_attach(bridge->encoder, meson_encoder_cvbs->next_bridge, + &meson_encoder_cvbs->bridge, flags); +} + +static int meson_encoder_cvbs_get_modes(struct drm_bridge *bridge, + struct drm_connector *connector) +{ + struct meson_encoder_cvbs *meson_encoder_cvbs = + bridge_to_meson_encoder_cvbs(bridge); + struct meson_drm *priv = meson_encoder_cvbs->priv; + struct drm_display_mode *mode; + int i; + + for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) { + struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i]; + + mode = drm_mode_duplicate(priv->drm, &meson_mode->mode); + if (!mode) { + dev_err(priv->dev, "Failed to create a new display mode\n"); + return 0; + } + + drm_mode_probed_add(connector, mode); + } + + return i; +} + +static int meson_encoder_cvbs_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *display_info, + const struct drm_display_mode *mode) +{ + if (meson_cvbs_get_mode(mode)) + return MODE_OK; + + return MODE_BAD; +} + +static int meson_encoder_cvbs_atomic_check(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + if (meson_cvbs_get_mode(&crtc_state->mode)) + return 0; + + return -EINVAL; +} + +static void meson_encoder_cvbs_atomic_enable(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state) +{ + struct meson_encoder_cvbs *encoder_cvbs = bridge_to_meson_encoder_cvbs(bridge); + struct drm_atomic_state *state = bridge_state->base.state; + struct meson_drm *priv = encoder_cvbs->priv; + const struct meson_cvbs_mode *meson_mode; + struct drm_connector_state *conn_state; + struct drm_crtc_state *crtc_state; + struct drm_connector *connector; + + connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder); + if (WARN_ON(!connector)) + return; + + conn_state = drm_atomic_get_new_connector_state(state, connector); + if (WARN_ON(!conn_state)) + return; + + crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc); + if (WARN_ON(!crtc_state)) + return; + + meson_mode = meson_cvbs_get_mode(&crtc_state->adjusted_mode); + if (WARN_ON(!meson_mode)) + return; + + meson_venci_cvbs_mode_set(priv, meson_mode->enci); + + /* Setup 27MHz vclk2 for ENCI and VDAC */ + meson_vclk_setup(priv, MESON_VCLK_TARGET_CVBS, + MESON_VCLK_CVBS, MESON_VCLK_CVBS, + MESON_VCLK_CVBS, MESON_VCLK_CVBS, + true); + + /* VDAC0 source is not from ATV */ + writel_bits_relaxed(VENC_VDAC_SEL_ATV_DMD, 0, + priv->io_base + _REG(VENC_VDAC_DACSEL0)); + + if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB)) { + regmap_write(priv->hhi, HHI_VDAC_CNTL0, 1); + regmap_write(priv->hhi, HHI_VDAC_CNTL1, 0); + } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) || + meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL)) { + regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0xf0001); + regmap_write(priv->hhi, HHI_VDAC_CNTL1, 0); + } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) { + regmap_write(priv->hhi, HHI_VDAC_CNTL0_G12A, 0x906001); + regmap_write(priv->hhi, HHI_VDAC_CNTL1_G12A, 0); + } +} + +static void meson_encoder_cvbs_atomic_disable(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state) +{ + struct meson_encoder_cvbs *meson_encoder_cvbs = + bridge_to_meson_encoder_cvbs(bridge); + struct meson_drm *priv = meson_encoder_cvbs->priv; + + /* Disable CVBS VDAC */ + if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) { + regmap_write(priv->hhi, HHI_VDAC_CNTL0_G12A, 0); + regmap_write(priv->hhi, HHI_VDAC_CNTL1_G12A, 0); + } else { + regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0); + regmap_write(priv->hhi, HHI_VDAC_CNTL1, 8); + } +} + +static const struct drm_bridge_funcs meson_encoder_cvbs_bridge_funcs = { + .attach = meson_encoder_cvbs_attach, + .mode_valid = meson_encoder_cvbs_mode_valid, + .get_modes = meson_encoder_cvbs_get_modes, + .atomic_enable = meson_encoder_cvbs_atomic_enable, + .atomic_disable = meson_encoder_cvbs_atomic_disable, + .atomic_check = meson_encoder_cvbs_atomic_check, + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, + .atomic_reset = drm_atomic_helper_bridge_reset, +}; + +int meson_encoder_cvbs_init(struct meson_drm *priv) +{ + struct drm_device *drm = priv->drm; + struct meson_encoder_cvbs *meson_encoder_cvbs; + struct drm_connector *connector; + struct device_node *remote; + int ret; + + meson_encoder_cvbs = devm_kzalloc(priv->dev, sizeof(*meson_encoder_cvbs), GFP_KERNEL); + if (!meson_encoder_cvbs) + return -ENOMEM; + + /* CVBS Connector Bridge */ + remote = of_graph_get_remote_node(priv->dev->of_node, 0, 0); + if (!remote) { + dev_info(drm->dev, "CVBS Output connector not available\n"); + return 0; + } + + meson_encoder_cvbs->next_bridge = of_drm_find_bridge(remote); + if (!meson_encoder_cvbs->next_bridge) { + dev_err(priv->dev, "Failed to find CVBS Connector bridge\n"); + return -EPROBE_DEFER; + } + + /* CVBS Encoder Bridge */ + meson_encoder_cvbs->bridge.funcs = &meson_encoder_cvbs_bridge_funcs; + meson_encoder_cvbs->bridge.of_node = priv->dev->of_node; + meson_encoder_cvbs->bridge.type = DRM_MODE_CONNECTOR_Composite; + meson_encoder_cvbs->bridge.ops = DRM_BRIDGE_OP_MODES; + meson_encoder_cvbs->bridge.interlace_allowed = true; + + drm_bridge_add(&meson_encoder_cvbs->bridge); + + meson_encoder_cvbs->priv = priv; + + /* Encoder */ + ret = drm_simple_encoder_init(priv->drm, &meson_encoder_cvbs->encoder, + DRM_MODE_ENCODER_TVDAC); + if (ret) { + dev_err(priv->dev, "Failed to init CVBS encoder: %d\n", ret); + return ret; + } + + meson_encoder_cvbs->encoder.possible_crtcs = BIT(0); + + /* Attach CVBS Encoder Bridge to Encoder */ + ret = drm_bridge_attach(&meson_encoder_cvbs->encoder, &meson_encoder_cvbs->bridge, NULL, + DRM_BRIDGE_ATTACH_NO_CONNECTOR); + if (ret) { + dev_err(priv->dev, "Failed to attach bridge: %d\n", ret); + return ret; + } + + /* Initialize & attach Bridge Connector */ + connector = drm_bridge_connector_init(priv->drm, &meson_encoder_cvbs->encoder); + if (IS_ERR(connector)) { + dev_err(priv->dev, "Unable to create CVBS bridge connector\n"); + return PTR_ERR(connector); + } + drm_connector_attach_encoder(connector, &meson_encoder_cvbs->encoder); + + return 0; +} diff --git a/drivers/gpu/drm/meson/meson_venc_cvbs.h b/drivers/gpu/drm/meson/meson_encoder_cvbs.h index ab7f76ba469c..61d9d183ce7f 100644 --- a/drivers/gpu/drm/meson/meson_venc_cvbs.h +++ b/drivers/gpu/drm/meson/meson_encoder_cvbs.h @@ -24,6 +24,6 @@ struct meson_cvbs_mode { /* Modes supported by the CVBS output */ extern struct meson_cvbs_mode meson_cvbs_modes[MESON_CVBS_MODES_COUNT]; -int meson_venc_cvbs_create(struct meson_drm *priv); +int meson_encoder_cvbs_init(struct meson_drm *priv); #endif /* __MESON_VENC_CVBS_H */ diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.c b/drivers/gpu/drm/meson/meson_encoder_hdmi.c new file mode 100644 index 000000000000..5e306de6f485 --- /dev/null +++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.c @@ -0,0 +1,447 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2016 BayLibre, SAS + * Author: Neil Armstrong <narmstrong@baylibre.com> + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + */ + +#include <linux/clk.h> +#include <linux/component.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/of_graph.h> +#include <linux/regulator/consumer.h> +#include <linux/reset.h> + +#include <media/cec-notifier.h> + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> +#include <drm/drm_bridge_connector.h> +#include <drm/drm_device.h> +#include <drm/drm_edid.h> +#include <drm/drm_probe_helper.h> +#include <drm/drm_simple_kms_helper.h> + +#include <linux/media-bus-format.h> +#include <linux/videodev2.h> + +#include "meson_drv.h" +#include "meson_registers.h" +#include "meson_vclk.h" +#include "meson_venc.h" +#include "meson_encoder_hdmi.h" + +struct meson_encoder_hdmi { + struct drm_encoder encoder; + struct drm_bridge bridge; + struct drm_bridge *next_bridge; + struct drm_connector *connector; + struct meson_drm *priv; + unsigned long output_bus_fmt; + struct cec_notifier *cec_notifier; +}; + +#define bridge_to_meson_encoder_hdmi(x) \ + container_of(x, struct meson_encoder_hdmi, bridge) + +static int meson_encoder_hdmi_attach(struct drm_bridge *bridge, + enum drm_bridge_attach_flags flags) +{ + struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge); + + return drm_bridge_attach(bridge->encoder, encoder_hdmi->next_bridge, + &encoder_hdmi->bridge, flags); +} + +static void meson_encoder_hdmi_detach(struct drm_bridge *bridge) +{ + struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge); + + cec_notifier_conn_unregister(encoder_hdmi->cec_notifier); + encoder_hdmi->cec_notifier = NULL; +} + +static void meson_encoder_hdmi_set_vclk(struct meson_encoder_hdmi *encoder_hdmi, + const struct drm_display_mode *mode) +{ + struct meson_drm *priv = encoder_hdmi->priv; + int vic = drm_match_cea_mode(mode); + unsigned int phy_freq; + unsigned int vclk_freq; + unsigned int venc_freq; + unsigned int hdmi_freq; + + vclk_freq = mode->clock; + + /* For 420, pixel clock is half unlike venc clock */ + if (encoder_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24) + vclk_freq /= 2; + + /* TMDS clock is pixel_clock * 10 */ + phy_freq = vclk_freq * 10; + + if (!vic) { + meson_vclk_setup(priv, MESON_VCLK_TARGET_DMT, phy_freq, + vclk_freq, vclk_freq, vclk_freq, false); + return; + } + + /* 480i/576i needs global pixel doubling */ + if (mode->flags & DRM_MODE_FLAG_DBLCLK) + vclk_freq *= 2; + + venc_freq = vclk_freq; + hdmi_freq = vclk_freq; + + /* VENC double pixels for 1080i, 720p and YUV420 modes */ + if (meson_venc_hdmi_venc_repeat(vic) || + encoder_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24) + venc_freq *= 2; + + vclk_freq = max(venc_freq, hdmi_freq); + + if (mode->flags & DRM_MODE_FLAG_DBLCLK) + venc_freq /= 2; + + dev_dbg(priv->dev, "vclk:%d phy=%d venc=%d hdmi=%d enci=%d\n", + phy_freq, vclk_freq, venc_freq, hdmi_freq, + priv->venc.hdmi_use_enci); + + meson_vclk_setup(priv, MESON_VCLK_TARGET_HDMI, phy_freq, vclk_freq, + venc_freq, hdmi_freq, priv->venc.hdmi_use_enci); +} + +static enum drm_mode_status meson_encoder_hdmi_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *display_info, + const struct drm_display_mode *mode) +{ + struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge); + struct meson_drm *priv = encoder_hdmi->priv; + bool is_hdmi2_sink = display_info->hdmi.scdc.supported; + unsigned int phy_freq; + unsigned int vclk_freq; + unsigned int venc_freq; + unsigned int hdmi_freq; + int vic = drm_match_cea_mode(mode); + enum drm_mode_status status; + + dev_dbg(priv->dev, "Modeline " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode)); + + /* If sink does not support 540MHz, reject the non-420 HDMI2 modes */ + if (display_info->max_tmds_clock && + mode->clock > display_info->max_tmds_clock && + !drm_mode_is_420_only(display_info, mode) && + !drm_mode_is_420_also(display_info, mode)) + return MODE_BAD; + + /* Check against non-VIC supported modes */ + if (!vic) { + status = meson_venc_hdmi_supported_mode(mode); + if (status != MODE_OK) + return status; + + return meson_vclk_dmt_supported_freq(priv, mode->clock); + /* Check against supported VIC modes */ + } else if (!meson_venc_hdmi_supported_vic(vic)) + return MODE_BAD; + + vclk_freq = mode->clock; + + /* For 420, pixel clock is half unlike venc clock */ + if (drm_mode_is_420_only(display_info, mode) || + (!is_hdmi2_sink && + drm_mode_is_420_also(display_info, mode))) + vclk_freq /= 2; + + /* TMDS clock is pixel_clock * 10 */ + phy_freq = vclk_freq * 10; + + /* 480i/576i needs global pixel doubling */ + if (mode->flags & DRM_MODE_FLAG_DBLCLK) + vclk_freq *= 2; + + venc_freq = vclk_freq; + hdmi_freq = vclk_freq; + + /* VENC double pixels for 1080i, 720p and YUV420 modes */ + if (meson_venc_hdmi_venc_repeat(vic) || + drm_mode_is_420_only(display_info, mode) || + (!is_hdmi2_sink && + drm_mode_is_420_also(display_info, mode))) + venc_freq *= 2; + + vclk_freq = max(venc_freq, hdmi_freq); + + if (mode->flags & DRM_MODE_FLAG_DBLCLK) + venc_freq /= 2; + + dev_dbg(priv->dev, "%s: vclk:%d phy=%d venc=%d hdmi=%d\n", + __func__, phy_freq, vclk_freq, venc_freq, hdmi_freq); + + return meson_vclk_vic_supported_freq(priv, phy_freq, vclk_freq); +} + +static void meson_encoder_hdmi_atomic_enable(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state) +{ + struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge); + struct drm_atomic_state *state = bridge_state->base.state; + unsigned int ycrcb_map = VPU_HDMI_OUTPUT_CBYCR; + struct meson_drm *priv = encoder_hdmi->priv; + struct drm_connector_state *conn_state; + const struct drm_display_mode *mode; + struct drm_crtc_state *crtc_state; + struct drm_connector *connector; + bool yuv420_mode = false; + int vic; + + connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder); + if (WARN_ON(!connector)) + return; + + conn_state = drm_atomic_get_new_connector_state(state, connector); + if (WARN_ON(!conn_state)) + return; + + crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc); + if (WARN_ON(!crtc_state)) + return; + + mode = &crtc_state->adjusted_mode; + + vic = drm_match_cea_mode(mode); + + dev_dbg(priv->dev, "\"%s\" vic %d\n", mode->name, vic); + + if (encoder_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24) { + ycrcb_map = VPU_HDMI_OUTPUT_CRYCB; + yuv420_mode = true; + } + + /* VENC + VENC-DVI Mode setup */ + meson_venc_hdmi_mode_set(priv, vic, ycrcb_map, yuv420_mode, mode); + + /* VCLK Set clock */ + meson_encoder_hdmi_set_vclk(encoder_hdmi, mode); + + if (encoder_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24) + /* Setup YUV420 to HDMI-TX, no 10bit diphering */ + writel_relaxed(2 | (2 << 2), + priv->io_base + _REG(VPU_HDMI_FMT_CTRL)); + else + /* Setup YUV444 to HDMI-TX, no 10bit diphering */ + writel_relaxed(0, priv->io_base + _REG(VPU_HDMI_FMT_CTRL)); + + dev_dbg(priv->dev, "%s\n", priv->venc.hdmi_use_enci ? "VENCI" : "VENCP"); + + if (priv->venc.hdmi_use_enci) + writel_relaxed(1, priv->io_base + _REG(ENCI_VIDEO_EN)); + else + writel_relaxed(1, priv->io_base + _REG(ENCP_VIDEO_EN)); +} + +static void meson_encoder_hdmi_atomic_disable(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state) +{ + struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge); + struct meson_drm *priv = encoder_hdmi->priv; + + writel_bits_relaxed(0x3, 0, + priv->io_base + _REG(VPU_HDMI_SETTING)); + + writel_relaxed(0, priv->io_base + _REG(ENCI_VIDEO_EN)); + writel_relaxed(0, priv->io_base + _REG(ENCP_VIDEO_EN)); +} + +static const u32 meson_encoder_hdmi_out_bus_fmts[] = { + MEDIA_BUS_FMT_YUV8_1X24, + MEDIA_BUS_FMT_UYYVYY8_0_5X24, +}; + +static u32 * +meson_encoder_hdmi_get_inp_bus_fmts(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state, + u32 output_fmt, + unsigned int *num_input_fmts) +{ + u32 *input_fmts = NULL; + int i; + + *num_input_fmts = 0; + + for (i = 0 ; i < ARRAY_SIZE(meson_encoder_hdmi_out_bus_fmts) ; ++i) { + if (output_fmt == meson_encoder_hdmi_out_bus_fmts[i]) { + *num_input_fmts = 1; + input_fmts = kcalloc(*num_input_fmts, + sizeof(*input_fmts), + GFP_KERNEL); + if (!input_fmts) + return NULL; + + input_fmts[0] = output_fmt; + + break; + } + } + + return input_fmts; +} + +static int meson_encoder_hdmi_atomic_check(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge); + struct drm_connector_state *old_conn_state = + drm_atomic_get_old_connector_state(conn_state->state, conn_state->connector); + struct meson_drm *priv = encoder_hdmi->priv; + + encoder_hdmi->output_bus_fmt = bridge_state->output_bus_cfg.format; + + dev_dbg(priv->dev, "output_bus_fmt %lx\n", encoder_hdmi->output_bus_fmt); + + if (!drm_connector_atomic_hdr_metadata_equal(old_conn_state, conn_state)) + crtc_state->mode_changed = true; + + return 0; +} + +static void meson_encoder_hdmi_hpd_notify(struct drm_bridge *bridge, + enum drm_connector_status status) +{ + struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge); + struct edid *edid; + + if (!encoder_hdmi->cec_notifier) + return; + + if (status == connector_status_connected) { + edid = drm_bridge_get_edid(encoder_hdmi->next_bridge, encoder_hdmi->connector); + if (!edid) + return; + + cec_notifier_set_phys_addr_from_edid(encoder_hdmi->cec_notifier, edid); + } else + cec_notifier_phys_addr_invalidate(encoder_hdmi->cec_notifier); +} + +static const struct drm_bridge_funcs meson_encoder_hdmi_bridge_funcs = { + .attach = meson_encoder_hdmi_attach, + .detach = meson_encoder_hdmi_detach, + .mode_valid = meson_encoder_hdmi_mode_valid, + .hpd_notify = meson_encoder_hdmi_hpd_notify, + .atomic_enable = meson_encoder_hdmi_atomic_enable, + .atomic_disable = meson_encoder_hdmi_atomic_disable, + .atomic_get_input_bus_fmts = meson_encoder_hdmi_get_inp_bus_fmts, + .atomic_check = meson_encoder_hdmi_atomic_check, + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, + .atomic_reset = drm_atomic_helper_bridge_reset, +}; + +int meson_encoder_hdmi_init(struct meson_drm *priv) +{ + struct meson_encoder_hdmi *meson_encoder_hdmi; + struct platform_device *pdev; + struct device_node *remote; + int ret; + + meson_encoder_hdmi = devm_kzalloc(priv->dev, sizeof(*meson_encoder_hdmi), GFP_KERNEL); + if (!meson_encoder_hdmi) + return -ENOMEM; + + /* HDMI Transceiver Bridge */ + remote = of_graph_get_remote_node(priv->dev->of_node, 1, 0); + if (!remote) { + dev_err(priv->dev, "HDMI transceiver device is disabled"); + return 0; + } + + meson_encoder_hdmi->next_bridge = of_drm_find_bridge(remote); + if (!meson_encoder_hdmi->next_bridge) { + dev_err(priv->dev, "Failed to find HDMI transceiver bridge\n"); + return -EPROBE_DEFER; + } + + /* HDMI Encoder Bridge */ + meson_encoder_hdmi->bridge.funcs = &meson_encoder_hdmi_bridge_funcs; + meson_encoder_hdmi->bridge.of_node = priv->dev->of_node; + meson_encoder_hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA; + meson_encoder_hdmi->bridge.interlace_allowed = true; + + drm_bridge_add(&meson_encoder_hdmi->bridge); + + meson_encoder_hdmi->priv = priv; + + /* Encoder */ + ret = drm_simple_encoder_init(priv->drm, &meson_encoder_hdmi->encoder, + DRM_MODE_ENCODER_TMDS); + if (ret) { + dev_err(priv->dev, "Failed to init HDMI encoder: %d\n", ret); + return ret; + } + + meson_encoder_hdmi->encoder.possible_crtcs = BIT(0); + + /* Attach HDMI Encoder Bridge to Encoder */ + ret = drm_bridge_attach(&meson_encoder_hdmi->encoder, &meson_encoder_hdmi->bridge, NULL, + DRM_BRIDGE_ATTACH_NO_CONNECTOR); + if (ret) { + dev_err(priv->dev, "Failed to attach bridge: %d\n", ret); + return ret; + } + + /* Initialize & attach Bridge Connector */ + meson_encoder_hdmi->connector = drm_bridge_connector_init(priv->drm, + &meson_encoder_hdmi->encoder); + if (IS_ERR(meson_encoder_hdmi->connector)) { + dev_err(priv->dev, "Unable to create HDMI bridge connector\n"); + return PTR_ERR(meson_encoder_hdmi->connector); + } + drm_connector_attach_encoder(meson_encoder_hdmi->connector, + &meson_encoder_hdmi->encoder); + + /* + * We should have now in place: + * encoder->[hdmi encoder bridge]->[dw-hdmi bridge]->[display connector bridge]->[display connector] + */ + + /* + * drm_connector_attach_max_bpc_property() requires the + * connector to have a state. + */ + drm_atomic_helper_connector_reset(meson_encoder_hdmi->connector); + + if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL) || + meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) || + meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) + drm_connector_attach_hdr_output_metadata_property(meson_encoder_hdmi->connector); + + drm_connector_attach_max_bpc_property(meson_encoder_hdmi->connector, 8, 8); + + /* Handle this here until handled by drm_bridge_connector_init() */ + meson_encoder_hdmi->connector->ycbcr_420_allowed = true; + + pdev = of_find_device_by_node(remote); + if (pdev) { + struct cec_connector_info conn_info; + struct cec_notifier *notifier; + + cec_fill_conn_info_from_drm(&conn_info, meson_encoder_hdmi->connector); + + notifier = cec_notifier_conn_register(&pdev->dev, NULL, &conn_info); + if (!notifier) + return -ENOMEM; + + meson_encoder_hdmi->cec_notifier = notifier; + } + + dev_dbg(priv->dev, "HDMI encoder initialized\n"); + + return 0; +} diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.h b/drivers/gpu/drm/meson/meson_encoder_hdmi.h new file mode 100644 index 000000000000..ed19494f0956 --- /dev/null +++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2021 BayLibre, SAS + * Author: Neil Armstrong <narmstrong@baylibre.com> + */ + +#ifndef __MESON_ENCODER_HDMI_H +#define __MESON_ENCODER_HDMI_H + +int meson_encoder_hdmi_init(struct meson_drm *priv); + +#endif /* __MESON_ENCODER_HDMI_H */ diff --git a/drivers/gpu/drm/meson/meson_venc_cvbs.c b/drivers/gpu/drm/meson/meson_venc_cvbs.c deleted file mode 100644 index f1747fde1fe0..000000000000 --- a/drivers/gpu/drm/meson/meson_venc_cvbs.c +++ /dev/null @@ -1,293 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright (C) 2016 BayLibre, SAS - * Author: Neil Armstrong <narmstrong@baylibre.com> - * Copyright (C) 2015 Amlogic, Inc. All rights reserved. - * Copyright (C) 2014 Endless Mobile - * - * Written by: - * Jasper St. Pierre <jstpierre@mecheye.net> - */ - -#include <linux/export.h> -#include <linux/of_graph.h> - -#include <drm/drm_atomic_helper.h> -#include <drm/drm_device.h> -#include <drm/drm_edid.h> -#include <drm/drm_probe_helper.h> -#include <drm/drm_print.h> - -#include "meson_registers.h" -#include "meson_vclk.h" -#include "meson_venc_cvbs.h" - -/* HHI VDAC Registers */ -#define HHI_VDAC_CNTL0 0x2F4 /* 0xbd offset in data sheet */ -#define HHI_VDAC_CNTL0_G12A 0x2EC /* 0xbd offset in data sheet */ -#define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */ -#define HHI_VDAC_CNTL1_G12A 0x2F0 /* 0xbe offset in data sheet */ - -struct meson_venc_cvbs { - struct drm_encoder encoder; - struct drm_connector connector; - struct meson_drm *priv; -}; -#define encoder_to_meson_venc_cvbs(x) \ - container_of(x, struct meson_venc_cvbs, encoder) - -#define connector_to_meson_venc_cvbs(x) \ - container_of(x, struct meson_venc_cvbs, connector) - -/* Supported Modes */ - -struct meson_cvbs_mode meson_cvbs_modes[MESON_CVBS_MODES_COUNT] = { - { /* PAL */ - .enci = &meson_cvbs_enci_pal, - .mode = { - DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 13500, - 720, 732, 795, 864, 0, 576, 580, 586, 625, 0, - DRM_MODE_FLAG_INTERLACE), - .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, - }, - }, - { /* NTSC */ - .enci = &meson_cvbs_enci_ntsc, - .mode = { - DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 13500, - 720, 739, 801, 858, 0, 480, 488, 494, 525, 0, - DRM_MODE_FLAG_INTERLACE), - .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, - }, - }, -}; - -static const struct meson_cvbs_mode * -meson_cvbs_get_mode(const struct drm_display_mode *req_mode) -{ - int i; - - for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) { - struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i]; - - if (drm_mode_match(req_mode, &meson_mode->mode, - DRM_MODE_MATCH_TIMINGS | - DRM_MODE_MATCH_CLOCK | - DRM_MODE_MATCH_FLAGS | - DRM_MODE_MATCH_3D_FLAGS)) - return meson_mode; - } - - return NULL; -} - -/* Connector */ - -static void meson_cvbs_connector_destroy(struct drm_connector *connector) -{ - drm_connector_cleanup(connector); -} - -static enum drm_connector_status -meson_cvbs_connector_detect(struct drm_connector *connector, bool force) -{ - /* FIXME: Add load-detect or jack-detect if possible */ - return connector_status_connected; -} - -static int meson_cvbs_connector_get_modes(struct drm_connector *connector) -{ - struct drm_device *dev = connector->dev; - struct drm_display_mode *mode; - int i; - - for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) { - struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i]; - - mode = drm_mode_duplicate(dev, &meson_mode->mode); - if (!mode) { - DRM_ERROR("Failed to create a new display mode\n"); - return 0; - } - - drm_mode_probed_add(connector, mode); - } - - return i; -} - -static int meson_cvbs_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) -{ - /* Validate the modes added in get_modes */ - return MODE_OK; -} - -static const struct drm_connector_funcs meson_cvbs_connector_funcs = { - .detect = meson_cvbs_connector_detect, - .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = meson_cvbs_connector_destroy, - .reset = drm_atomic_helper_connector_reset, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, -}; - -static const -struct drm_connector_helper_funcs meson_cvbs_connector_helper_funcs = { - .get_modes = meson_cvbs_connector_get_modes, - .mode_valid = meson_cvbs_connector_mode_valid, -}; - -/* Encoder */ - -static void meson_venc_cvbs_encoder_destroy(struct drm_encoder *encoder) -{ - drm_encoder_cleanup(encoder); -} - -static const struct drm_encoder_funcs meson_venc_cvbs_encoder_funcs = { - .destroy = meson_venc_cvbs_encoder_destroy, -}; - -static int meson_venc_cvbs_encoder_atomic_check(struct drm_encoder *encoder, - struct drm_crtc_state *crtc_state, - struct drm_connector_state *conn_state) -{ - if (meson_cvbs_get_mode(&crtc_state->mode)) - return 0; - - return -EINVAL; -} - -static void meson_venc_cvbs_encoder_disable(struct drm_encoder *encoder) -{ - struct meson_venc_cvbs *meson_venc_cvbs = - encoder_to_meson_venc_cvbs(encoder); - struct meson_drm *priv = meson_venc_cvbs->priv; - - /* Disable CVBS VDAC */ - if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) { - regmap_write(priv->hhi, HHI_VDAC_CNTL0_G12A, 0); - regmap_write(priv->hhi, HHI_VDAC_CNTL1_G12A, 0); - } else { - regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0); - regmap_write(priv->hhi, HHI_VDAC_CNTL1, 8); - } -} - -static void meson_venc_cvbs_encoder_enable(struct drm_encoder *encoder) -{ - struct meson_venc_cvbs *meson_venc_cvbs = - encoder_to_meson_venc_cvbs(encoder); - struct meson_drm *priv = meson_venc_cvbs->priv; - - /* VDAC0 source is not from ATV */ - writel_bits_relaxed(VENC_VDAC_SEL_ATV_DMD, 0, - priv->io_base + _REG(VENC_VDAC_DACSEL0)); - - if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB)) { - regmap_write(priv->hhi, HHI_VDAC_CNTL0, 1); - regmap_write(priv->hhi, HHI_VDAC_CNTL1, 0); - } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) || - meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL)) { - regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0xf0001); - regmap_write(priv->hhi, HHI_VDAC_CNTL1, 0); - } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) { - regmap_write(priv->hhi, HHI_VDAC_CNTL0_G12A, 0x906001); - regmap_write(priv->hhi, HHI_VDAC_CNTL1_G12A, 0); - } -} - -static void meson_venc_cvbs_encoder_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - const struct meson_cvbs_mode *meson_mode = meson_cvbs_get_mode(mode); - struct meson_venc_cvbs *meson_venc_cvbs = - encoder_to_meson_venc_cvbs(encoder); - struct meson_drm *priv = meson_venc_cvbs->priv; - - if (meson_mode) { - meson_venci_cvbs_mode_set(priv, meson_mode->enci); - - /* Setup 27MHz vclk2 for ENCI and VDAC */ - meson_vclk_setup(priv, MESON_VCLK_TARGET_CVBS, - MESON_VCLK_CVBS, MESON_VCLK_CVBS, - MESON_VCLK_CVBS, MESON_VCLK_CVBS, - true); - } -} - -static const struct drm_encoder_helper_funcs - meson_venc_cvbs_encoder_helper_funcs = { - .atomic_check = meson_venc_cvbs_encoder_atomic_check, - .disable = meson_venc_cvbs_encoder_disable, - .enable = meson_venc_cvbs_encoder_enable, - .mode_set = meson_venc_cvbs_encoder_mode_set, -}; - -static bool meson_venc_cvbs_connector_is_available(struct meson_drm *priv) -{ - struct device_node *remote; - - remote = of_graph_get_remote_node(priv->dev->of_node, 0, 0); - if (!remote) - return false; - - of_node_put(remote); - return true; -} - -int meson_venc_cvbs_create(struct meson_drm *priv) -{ - struct drm_device *drm = priv->drm; - struct meson_venc_cvbs *meson_venc_cvbs; - struct drm_connector *connector; - struct drm_encoder *encoder; - int ret; - - if (!meson_venc_cvbs_connector_is_available(priv)) { - dev_info(drm->dev, "CVBS Output connector not available\n"); - return 0; - } - - meson_venc_cvbs = devm_kzalloc(priv->dev, sizeof(*meson_venc_cvbs), - GFP_KERNEL); - if (!meson_venc_cvbs) - return -ENOMEM; - - meson_venc_cvbs->priv = priv; - encoder = &meson_venc_cvbs->encoder; - connector = &meson_venc_cvbs->connector; - - /* Connector */ - - drm_connector_helper_add(connector, - &meson_cvbs_connector_helper_funcs); - - ret = drm_connector_init(drm, connector, &meson_cvbs_connector_funcs, - DRM_MODE_CONNECTOR_Composite); - if (ret) { - dev_err(priv->dev, "Failed to init CVBS connector\n"); - return ret; - } - - connector->interlace_allowed = 1; - - /* Encoder */ - - drm_encoder_helper_add(encoder, &meson_venc_cvbs_encoder_helper_funcs); - - ret = drm_encoder_init(drm, encoder, &meson_venc_cvbs_encoder_funcs, - DRM_MODE_ENCODER_TVDAC, "meson_venc_cvbs"); - if (ret) { - dev_err(priv->dev, "Failed to init CVBS encoder\n"); - return ret; - } - - encoder->possible_crtcs = BIT(0); - - drm_connector_attach_encoder(connector, encoder); - - return 0; -} diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c index 6b9243713b3c..740108a006ba 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.c +++ b/drivers/gpu/drm/mgag200/mgag200_drv.c @@ -6,7 +6,6 @@ * Dave Airlie */ -#include <linux/console.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/vmalloc.h> @@ -378,7 +377,7 @@ static struct pci_driver mgag200_pci_driver = { static int __init mgag200_init(void) { - if (vgacon_text_force() && mgag200_modeset == -1) + if (drm_firmware_drivers_only() && mgag200_modeset == -1) return -EINVAL; if (mgag200_modeset == 0) diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index fd98e8bbc550..b983541a4c53 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -847,9 +847,11 @@ static void mgag200_handle_damage(struct mga_device *mdev, struct drm_framebuffer *fb, struct drm_rect *clip, const struct dma_buf_map *map) { + void __iomem *dst = mdev->vram; void *vmap = map->vaddr; /* TODO: Use mapping abstraction properly */ - drm_fb_memcpy_dstclip(mdev->vram, fb->pitches[0], vmap, fb, clip); + dst += drm_fb_clip_offset(fb->pitches[0], fb->format, clip); + drm_fb_memcpy_toio(dst, fb->pitches[0], vmap, fb, clip); /* Always scanout image at VRAM offset 0 */ mgag200_set_startadd(mdev, (u32)0); diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index ae11061727ff..39197b4beea7 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -4,8 +4,8 @@ config DRM_MSM tristate "MSM DRM" depends on DRM depends on ARCH_QCOM || SOC_IMX5 || COMPILE_TEST + depends on COMMON_CLK depends on IOMMU_SUPPORT - depends on (OF && COMMON_CLK) || COMPILE_TEST depends on QCOM_OCMEM || QCOM_OCMEM=n depends on QCOM_LLCC || QCOM_LLCC=n depends on QCOM_COMMAND_DB || QCOM_COMMAND_DB=n diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 40577f8856d8..093454457545 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -23,8 +23,10 @@ msm-y := \ hdmi/hdmi_i2c.o \ hdmi/hdmi_phy.o \ hdmi/hdmi_phy_8960.o \ + hdmi/hdmi_phy_8996.o \ hdmi/hdmi_phy_8x60.o \ hdmi/hdmi_phy_8x74.o \ + hdmi/hdmi_pll_8960.o \ edp/edp.o \ edp/edp_aux.o \ edp/edp_bridge.o \ @@ -37,6 +39,7 @@ msm-y := \ disp/mdp4/mdp4_dtv_encoder.o \ disp/mdp4/mdp4_lcdc_encoder.o \ disp/mdp4/mdp4_lvds_connector.o \ + disp/mdp4/mdp4_lvds_pll.o \ disp/mdp4/mdp4_irq.o \ disp/mdp4/mdp4_kms.o \ disp/mdp4/mdp4_plane.o \ @@ -116,9 +119,6 @@ msm-$(CONFIG_DRM_MSM_DP)+= dp/dp_aux.o \ dp/dp_audio.o msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o -msm-$(CONFIG_COMMON_CLK) += disp/mdp4/mdp4_lvds_pll.o -msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_pll_8960.o -msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_phy_8996.o msm-$(CONFIG_DRM_MSM_HDMI_HDCP) += hdmi/hdmi_hdcp.o diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index 267a880811d6..78aad5216a61 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -1424,17 +1424,24 @@ static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu) { struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; struct msm_gpu *gpu = &adreno_gpu->base; - u32 gpu_scid, cntl1_regval = 0; + u32 cntl1_regval = 0; if (IS_ERR(a6xx_gpu->llc_mmio)) return; if (!llcc_slice_activate(a6xx_gpu->llc_slice)) { - gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice); + u32 gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice); gpu_scid &= 0x1f; cntl1_regval = (gpu_scid << 0) | (gpu_scid << 5) | (gpu_scid << 10) | (gpu_scid << 15) | (gpu_scid << 20); + + /* On A660, the SCID programming for UCHE traffic is done in + * A6XX_GBIF_SCACHE_CNTL0[14:10] + */ + if (adreno_is_a660_family(adreno_gpu)) + gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL0, (0x1f << 10) | + (1 << 8), (gpu_scid << 10) | (1 << 8)); } /* @@ -1471,13 +1478,6 @@ static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu) } gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL1, GENMASK(24, 0), cntl1_regval); - - /* On A660, the SCID programming for UCHE traffic is done in - * A6XX_GBIF_SCACHE_CNTL0[14:10] - */ - if (adreno_is_a660_family(adreno_gpu)) - gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL0, (0x1f << 10) | - (1 << 8), (gpu_scid << 10) | (1 << 8)); } static void a6xx_llc_slices_destroy(struct a6xx_gpu *a6xx_gpu) @@ -1640,7 +1640,7 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu) return (unsigned long)busy_time; } -void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp) +static void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c index 7501849ed15d..6e90209cd543 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c @@ -777,12 +777,12 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu, struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); a6xx_state->gmu_registers = state_kcalloc(a6xx_state, - 2, sizeof(*a6xx_state->gmu_registers)); + 3, sizeof(*a6xx_state->gmu_registers)); if (!a6xx_state->gmu_registers) return; - a6xx_state->nr_gmu_registers = 2; + a6xx_state->nr_gmu_registers = 3; /* Get the CX GMU registers from AHB */ _a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[0], diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c index eb40d8413bca..6d36f63c3338 100644 --- a/drivers/gpu/drm/msm/dp/dp_aux.c +++ b/drivers/gpu/drm/msm/dp/dp_aux.c @@ -33,6 +33,7 @@ struct dp_aux_private { bool read; bool no_send_addr; bool no_send_stop; + bool initted; u32 offset; u32 segment; @@ -331,6 +332,10 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux, } mutex_lock(&aux->mutex); + if (!aux->initted) { + ret = -EIO; + goto exit; + } dp_aux_update_offset_and_segment(aux, msg); dp_aux_transfer_helper(aux, msg, true); @@ -380,6 +385,8 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux, } aux->cmd_busy = false; + +exit: mutex_unlock(&aux->mutex); return ret; @@ -431,8 +438,13 @@ void dp_aux_init(struct drm_dp_aux *dp_aux) aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + mutex_lock(&aux->mutex); + dp_catalog_aux_enable(aux->catalog, true); aux->retry_cnt = 0; + aux->initted = true; + + mutex_unlock(&aux->mutex); } void dp_aux_deinit(struct drm_dp_aux *dp_aux) @@ -441,7 +453,12 @@ void dp_aux_deinit(struct drm_dp_aux *dp_aux) aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + mutex_lock(&aux->mutex); + + aux->initted = false; dp_catalog_aux_enable(aux->catalog, false); + + mutex_unlock(&aux->mutex); } int dp_aux_register(struct drm_dp_aux *dp_aux) diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c index 75ae3008b68f..5cd230a5d5d3 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.c +++ b/drivers/gpu/drm/msm/dsi/dsi.c @@ -112,18 +112,7 @@ static int dsi_bind(struct device *dev, struct device *master, void *data) { struct drm_device *drm = dev_get_drvdata(master); struct msm_drm_private *priv = drm->dev_private; - struct platform_device *pdev = to_platform_device(dev); - struct msm_dsi *msm_dsi; - - DBG(""); - msm_dsi = dsi_init(pdev); - if (IS_ERR(msm_dsi)) { - /* Don't fail the bind if the dsi port is not connected */ - if (PTR_ERR(msm_dsi) == -ENODEV) - return 0; - else - return PTR_ERR(msm_dsi); - } + struct msm_dsi *msm_dsi = dev_get_drvdata(dev); priv->dsi[msm_dsi->id] = msm_dsi; @@ -136,12 +125,8 @@ static void dsi_unbind(struct device *dev, struct device *master, struct drm_device *drm = dev_get_drvdata(master); struct msm_drm_private *priv = drm->dev_private; struct msm_dsi *msm_dsi = dev_get_drvdata(dev); - int id = msm_dsi->id; - if (priv->dsi[id]) { - dsi_destroy(msm_dsi); - priv->dsi[id] = NULL; - } + priv->dsi[msm_dsi->id] = NULL; } static const struct component_ops dsi_ops = { @@ -149,15 +134,40 @@ static const struct component_ops dsi_ops = { .unbind = dsi_unbind, }; -static int dsi_dev_probe(struct platform_device *pdev) +int dsi_dev_attach(struct platform_device *pdev) { return component_add(&pdev->dev, &dsi_ops); } +void dsi_dev_detach(struct platform_device *pdev) +{ + component_del(&pdev->dev, &dsi_ops); +} + +static int dsi_dev_probe(struct platform_device *pdev) +{ + struct msm_dsi *msm_dsi; + + DBG(""); + msm_dsi = dsi_init(pdev); + if (IS_ERR(msm_dsi)) { + /* Don't fail the bind if the dsi port is not connected */ + if (PTR_ERR(msm_dsi) == -ENODEV) + return 0; + else + return PTR_ERR(msm_dsi); + } + + return 0; +} + static int dsi_dev_remove(struct platform_device *pdev) { + struct msm_dsi *msm_dsi = platform_get_drvdata(pdev); + DBG(""); - component_del(&pdev->dev, &dsi_ops); + dsi_destroy(msm_dsi); + return 0; } diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h index 569c8ff062ba..bb39e7ca802d 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.h +++ b/drivers/gpu/drm/msm/dsi/dsi.h @@ -118,7 +118,7 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host, struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host); unsigned long msm_dsi_host_get_mode_flags(struct mipi_dsi_host *host); struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host); -int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer); +int msm_dsi_host_register(struct mipi_dsi_host *host); void msm_dsi_host_unregister(struct mipi_dsi_host *host); int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host, struct msm_dsi_phy *src_phy); diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index f69a125f9559..a6893cc45fe4 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -1586,6 +1586,10 @@ static int dsi_host_attach(struct mipi_dsi_host *host, if (ret) return ret; + ret = dsi_dev_attach(msm_host->pdev); + if (ret) + return ret; + DBG("id=%d", msm_host->id); if (msm_host->dev) queue_work(msm_host->workqueue, &msm_host->hpd_work); @@ -1598,6 +1602,8 @@ static int dsi_host_detach(struct mipi_dsi_host *host, { struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + dsi_dev_detach(msm_host->pdev); + msm_host->device_node = NULL; DBG("id=%d", msm_host->id); @@ -1658,6 +1664,8 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host, if (!prop) { DRM_DEV_DEBUG(dev, "failed to find data lane mapping, using default\n"); + /* Set the number of date lanes to 4 by default. */ + msm_host->num_data_lanes = 4; return 0; } @@ -1931,7 +1939,7 @@ int msm_dsi_host_modeset_init(struct mipi_dsi_host *host, return 0; } -int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer) +int msm_dsi_host_register(struct mipi_dsi_host *host) { struct msm_dsi_host *msm_host = to_msm_dsi_host(host); int ret; @@ -1945,20 +1953,6 @@ int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer) return ret; msm_host->registered = true; - - /* If the panel driver has not been probed after host register, - * we should defer the host's probe. - * It makes sure panel is connected when fbcon detects - * connector status and gets the proper display mode to - * create framebuffer. - * Don't try to defer if there is nothing connected to the dsi - * output - */ - if (check_defer && msm_host->device_node) { - if (IS_ERR(of_drm_find_panel(msm_host->device_node))) - if (!of_drm_find_bridge(msm_host->device_node)) - return -EPROBE_DEFER; - } } return 0; diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c index 20c4d650fd80..01bf8d907933 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_manager.c +++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c @@ -74,7 +74,7 @@ static int dsi_mgr_setup_components(int id) int ret; if (!IS_BONDED_DSI()) { - ret = msm_dsi_host_register(msm_dsi->host, true); + ret = msm_dsi_host_register(msm_dsi->host); if (ret) return ret; @@ -94,10 +94,10 @@ static int dsi_mgr_setup_components(int id) * because only master DSI device adds the panel to global * panel list. The panel's device is the master DSI device. */ - ret = msm_dsi_host_register(slave_link_dsi->host, false); + ret = msm_dsi_host_register(slave_link_dsi->host); if (ret) return ret; - ret = msm_dsi_host_register(master_link_dsi->host, true); + ret = msm_dsi_host_register(master_link_dsi->host); if (ret) return ret; diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c index 09d2d279c30a..dee13fedee3b 100644 --- a/drivers/gpu/drm/msm/msm_debugfs.c +++ b/drivers/gpu/drm/msm/msm_debugfs.c @@ -77,6 +77,7 @@ static int msm_gpu_open(struct inode *inode, struct file *file) goto free_priv; pm_runtime_get_sync(&gpu->pdev->dev); + msm_gpu_hw_init(gpu); show_priv->state = gpu->funcs->gpu_state_get(gpu); pm_runtime_put_sync(&gpu->pdev->dev); diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 7936e8d498dd..892c04365239 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -967,29 +967,18 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data, return ret; } -static int msm_ioctl_wait_fence(struct drm_device *dev, void *data, - struct drm_file *file) +static int wait_fence(struct msm_gpu_submitqueue *queue, uint32_t fence_id, + ktime_t timeout) { - struct msm_drm_private *priv = dev->dev_private; - struct drm_msm_wait_fence *args = data; - ktime_t timeout = to_ktime(args->timeout); - struct msm_gpu_submitqueue *queue; - struct msm_gpu *gpu = priv->gpu; struct dma_fence *fence; int ret; - if (args->pad) { - DRM_ERROR("invalid pad: %08x\n", args->pad); + if (fence_id > queue->last_fence) { + DRM_ERROR_RATELIMITED("waiting on invalid fence: %u (of %u)\n", + fence_id, queue->last_fence); return -EINVAL; } - if (!gpu) - return 0; - - queue = msm_submitqueue_get(file->driver_priv, args->queueid); - if (!queue) - return -ENOENT; - /* * Map submitqueue scoped "seqno" (which is actually an idr key) * back to underlying dma-fence @@ -1001,7 +990,7 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data, ret = mutex_lock_interruptible(&queue->lock); if (ret) return ret; - fence = idr_find(&queue->fence_idr, args->fence); + fence = idr_find(&queue->fence_idr, fence_id); if (fence) fence = dma_fence_get_rcu(fence); mutex_unlock(&queue->lock); @@ -1017,6 +1006,32 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data, } dma_fence_put(fence); + + return ret; +} + +static int msm_ioctl_wait_fence(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct msm_drm_private *priv = dev->dev_private; + struct drm_msm_wait_fence *args = data; + struct msm_gpu_submitqueue *queue; + int ret; + + if (args->pad) { + DRM_ERROR("invalid pad: %08x\n", args->pad); + return -EINVAL; + } + + if (!priv->gpu) + return 0; + + queue = msm_submitqueue_get(file->driver_priv, args->queueid); + if (!queue) + return -ENOENT; + + ret = wait_fence(queue, args->fence, to_ktime(args->timeout)); + msm_submitqueue_put(queue); return ret; diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 69952b239384..eb984d925f4d 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -343,6 +343,8 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev, struct msm_dsi; #ifdef CONFIG_DRM_MSM_DSI +int dsi_dev_attach(struct platform_device *pdev); +void dsi_dev_detach(struct platform_device *pdev); void __init msm_dsi_register(void); void __exit msm_dsi_unregister(void); int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev, diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 104fdfc14027..02b9ae65a96a 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -866,23 +866,11 @@ int msm_gem_cpu_fini(struct drm_gem_object *obj) } #ifdef CONFIG_DEBUG_FS -static void describe_fence(struct dma_fence *fence, const char *type, - struct seq_file *m) -{ - if (!dma_fence_is_signaled(fence)) - seq_printf(m, "\t%9s: %s %s seq %llu\n", type, - fence->ops->get_driver_name(fence), - fence->ops->get_timeline_name(fence), - fence->seqno); -} - void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m, struct msm_gem_stats *stats) { struct msm_gem_object *msm_obj = to_msm_bo(obj); struct dma_resv *robj = obj->resv; - struct dma_resv_list *fobj; - struct dma_fence *fence; struct msm_gem_vma *vma; uint64_t off = drm_vma_node_start(&obj->vma_node); const char *madv; @@ -956,22 +944,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m, seq_puts(m, "\n"); } - rcu_read_lock(); - fobj = dma_resv_shared_list(robj); - if (fobj) { - unsigned int i, shared_count = fobj->shared_count; - - for (i = 0; i < shared_count; i++) { - fence = rcu_dereference(fobj->shared[i]); - describe_fence(fence, "Shared", m); - } - } - - fence = dma_resv_excl_fence(robj); - if (fence) - describe_fence(fence, "Exclusive", m); - rcu_read_unlock(); - + dma_resv_describe(robj, m); msm_gem_unlock(obj); } @@ -1056,8 +1029,7 @@ static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct { struct msm_gem_object *msm_obj = to_msm_bo(obj); - vma->vm_flags &= ~VM_PFNMAP; - vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND; + vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP; vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags)); return 0; @@ -1121,7 +1093,7 @@ static int msm_gem_new_impl(struct drm_device *dev, break; fallthrough; default: - DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n", + DRM_DEV_DEBUG(dev->dev, "invalid cache flag: %x\n", (flags & MSM_BO_CACHE_MASK)); return -EINVAL; } diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c index 4a1420b05e97..086dacf2f26a 100644 --- a/drivers/gpu/drm/msm/msm_gem_shrinker.c +++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c @@ -5,6 +5,7 @@ */ #include <linux/vmalloc.h> +#include <linux/sched/mm.h> #include "msm_drv.h" #include "msm_gem.h" diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 3cb029f10925..282628d6b72c 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -772,6 +772,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, args->nr_cmds); if (IS_ERR(submit)) { ret = PTR_ERR(submit); + submit = NULL; goto out_unlock; } @@ -904,6 +905,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, drm_sched_entity_push_job(&submit->base); args->fence = submit->fence_id; + queue->last_fence = submit->fence_id; msm_reset_syncobjs(syncobjs_to_reset, args->nr_in_syncobjs); msm_process_post_deps(post_deps, args->nr_out_syncobjs, diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index 59cdd00b69d0..48ea2de911f1 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -359,6 +359,8 @@ static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio, * @ring_nr: the ringbuffer used by this submitqueue, which is determined * by the submitqueue's priority * @faults: the number of GPU hangs associated with this submitqueue + * @last_fence: the sequence number of the last allocated fence (for error + * checking) * @ctx: the per-drm_file context associated with the submitqueue (ie. * which set of pgtables do submits jobs associated with the * submitqueue use) @@ -374,6 +376,7 @@ struct msm_gpu_submitqueue { u32 flags; u32 ring_nr; int faults; + uint32_t last_fence; struct msm_file_private *ctx; struct list_head node; struct idr fence_idr; diff --git a/drivers/gpu/drm/msm/msm_gpu_devfreq.c b/drivers/gpu/drm/msm/msm_gpu_devfreq.c index 8b7473f69cb8..384e90c4b2a7 100644 --- a/drivers/gpu/drm/msm/msm_gpu_devfreq.c +++ b/drivers/gpu/drm/msm/msm_gpu_devfreq.c @@ -20,6 +20,10 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq, struct msm_gpu *gpu = dev_to_gpu(dev); struct dev_pm_opp *opp; + /* + * Note that devfreq_recommended_opp() can modify the freq + * to something that actually is in the opp table: + */ opp = devfreq_recommended_opp(dev, freq, flags); /* @@ -28,6 +32,7 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq, */ if (gpu->devfreq.idle_freq) { gpu->devfreq.idle_freq = *freq; + dev_pm_opp_put(opp); return 0; } @@ -203,9 +208,6 @@ static void msm_devfreq_idle_work(struct kthread_work *work) struct msm_gpu *gpu = container_of(df, struct msm_gpu, devfreq); unsigned long idle_freq, target_freq = 0; - if (!df->devfreq) - return; - /* * Hold devfreq lock to synchronize with get_dev_status()/ * target() callbacks @@ -227,6 +229,9 @@ void msm_devfreq_idle(struct msm_gpu *gpu) { struct msm_gpu_devfreq *df = &gpu->devfreq; + if (!df->devfreq) + return; + msm_hrtimer_queue_work(&df->idle_work, ms_to_ktime(1), - HRTIMER_MODE_ABS); + HRTIMER_MODE_REL); } diff --git a/drivers/gpu/drm/mxsfb/Kconfig b/drivers/gpu/drm/mxsfb/Kconfig index ee22cd25d3e3..987170e16ebd 100644 --- a/drivers/gpu/drm/mxsfb/Kconfig +++ b/drivers/gpu/drm/mxsfb/Kconfig @@ -10,7 +10,7 @@ config DRM_MXSFB depends on COMMON_CLK select DRM_MXS select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER + select DRM_GEM_CMA_HELPER select DRM_PANEL select DRM_PANEL_BRIDGE help diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c index 7739f46470d3..99fee4d8cd31 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c @@ -205,7 +205,7 @@ nv04_display_destroy(struct drm_device *dev) nvif_notify_dtor(&disp->flip); nouveau_display(dev)->priv = NULL; - kfree(disp); + vfree(disp); nvif_object_unmap(&drm->client.device.object); } @@ -223,7 +223,7 @@ nv04_display_create(struct drm_device *dev) struct nv04_display *disp; int i, ret; - disp = kzalloc(sizeof(*disp), GFP_KERNEL); + disp = vzalloc(sizeof(*disp)); if (!disp) return -ENOMEM; diff --git a/drivers/gpu/drm/nouveau/dispnv50/Kbuild b/drivers/gpu/drm/nouveau/dispnv50/Kbuild index 4488e1c061b3..28be2912ff74 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/Kbuild +++ b/drivers/gpu/drm/nouveau/dispnv50/Kbuild @@ -13,6 +13,7 @@ nouveau-y += dispnv50/corec57d.o nouveau-$(CONFIG_DEBUG_FS) += dispnv50/crc.o nouveau-$(CONFIG_DEBUG_FS) += dispnv50/crc907d.o nouveau-$(CONFIG_DEBUG_FS) += dispnv50/crcc37d.o +nouveau-$(CONFIG_DEBUG_FS) += dispnv50/crcc57d.o nouveau-y += dispnv50/dac507d.o nouveau-y += dispnv50/dac907d.o diff --git a/drivers/gpu/drm/nouveau/dispnv50/base907c.c b/drivers/gpu/drm/nouveau/dispnv50/base907c.c index 5396e3707cc4..e6b0417c325b 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/base907c.c +++ b/drivers/gpu/drm/nouveau/dispnv50/base907c.c @@ -103,12 +103,9 @@ base907c_xlut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw) return 0; } -static bool +static void base907c_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size) { - if (size != 256 && size != 1024) - return false; - if (size == 1024) asyw->xlut.i.mode = NV907C_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE; else @@ -116,7 +113,6 @@ base907c_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size) asyw->xlut.i.enable = NV907C_SET_BASE_LUT_LO_ENABLE_ENABLE; asyw->xlut.i.load = head907d_olut_load; - return true; } static inline u32 diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec57d.c b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c index 75876546eac1..53b1e2a569c1 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/corec57d.c +++ b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c @@ -69,7 +69,7 @@ corec57d = { .head = &headc57d, .sor = &sorc37d, #if IS_ENABLED(CONFIG_DEBUG_FS) - .crc = &crcc37d, + .crc = &crcc57d, #endif }; diff --git a/drivers/gpu/drm/nouveau/dispnv50/crc.c b/drivers/gpu/drm/nouveau/dispnv50/crc.c index 66f32d965c72..29428e770f14 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/crc.c +++ b/drivers/gpu/drm/nouveau/dispnv50/crc.c @@ -84,7 +84,10 @@ static void nv50_crc_ctx_flip_work(struct kthread_work *base) struct nv50_crc *crc = container_of(work, struct nv50_crc, flip_work); struct nv50_head *head = container_of(crc, struct nv50_head, crc); struct drm_crtc *crtc = &head->base.base; - struct nv50_disp *disp = nv50_disp(crtc->dev); + struct drm_device *dev = crtc->dev; + struct nv50_disp *disp = nv50_disp(dev); + const uint64_t start_vbl = drm_crtc_vblank_count(crtc); + uint64_t end_vbl; u8 new_idx = crc->ctx_idx ^ 1; /* @@ -92,23 +95,24 @@ static void nv50_crc_ctx_flip_work(struct kthread_work *base) * try again for the next vblank if we don't grab the lock */ if (!mutex_trylock(&disp->mutex)) { - DRM_DEV_DEBUG_KMS(crtc->dev->dev, - "Lock contended, delaying CRC ctx flip for head-%d\n", - head->base.index); - drm_vblank_work_schedule(work, - drm_crtc_vblank_count(crtc) + 1, - true); + drm_dbg_kms(dev, "Lock contended, delaying CRC ctx flip for %s\n", crtc->name); + drm_vblank_work_schedule(work, start_vbl + 1, true); return; } - DRM_DEV_DEBUG_KMS(crtc->dev->dev, - "Flipping notifier ctx for head %d (%d -> %d)\n", - drm_crtc_index(crtc), crc->ctx_idx, new_idx); + drm_dbg_kms(dev, "Flipping notifier ctx for %s (%d -> %d)\n", + crtc->name, crc->ctx_idx, new_idx); nv50_crc_program_ctx(head, NULL); nv50_crc_program_ctx(head, &crc->ctx[new_idx]); mutex_unlock(&disp->mutex); + end_vbl = drm_crtc_vblank_count(crtc); + if (unlikely(end_vbl != start_vbl)) + NV_ERROR(nouveau_drm(dev), + "Failed to flip CRC context on %s on time (%llu > %llu)\n", + crtc->name, end_vbl, start_vbl); + spin_lock_irq(&crc->lock); crc->ctx_changed = true; spin_unlock_irq(&crc->lock); @@ -189,9 +193,9 @@ void nv50_crc_handle_vblank(struct nv50_head *head) * updates back-to-back without waiting, we'll just be * optimistic and assume we always miss exactly one frame. */ - DRM_DEV_DEBUG_KMS(head->base.base.dev->dev, - "Notifier ctx flip for head-%d finished, lost CRC for frame %llu\n", - head->base.index, crc->frame); + drm_dbg_kms(head->base.base.dev, + "Notifier ctx flip for head-%d finished, lost CRC for frame %llu\n", + head->base.index, crc->frame); crc->frame++; nv50_crc_reset_ctx(ctx); @@ -347,8 +351,6 @@ int nv50_crc_atomic_check_head(struct nv50_head *head, struct nv50_head_atom *armh) { struct nv50_atom *atom = nv50_atom(asyh->state.state); - struct drm_device *dev = head->base.base.dev; - struct nv50_disp *disp = nv50_disp(dev); bool changed = armh->crc.src != asyh->crc.src; if (!armh->crc.src && !asyh->crc.src) { @@ -357,30 +359,7 @@ int nv50_crc_atomic_check_head(struct nv50_head *head, return 0; } - /* While we don't care about entry tags, Volta+ hw always needs the - * controlling wndw channel programmed to a wndw that's owned by our - * head - */ - if (asyh->crc.src && disp->disp->object.oclass >= GV100_DISP && - !(BIT(asyh->crc.wndw) & asyh->wndw.owned)) { - if (!asyh->wndw.owned) { - /* TODO: once we support flexible channel ownership, - * we should write some code here to handle attempting - * to "steal" a plane: e.g. take a plane that is - * currently not-visible and owned by another head, - * and reassign it to this head. If we fail to do so, - * we shuld reject the mode outright as CRC capture - * then becomes impossible. - */ - NV_ATOMIC(nouveau_drm(dev), - "No available wndws for CRC readback\n"); - return -EINVAL; - } - asyh->crc.wndw = ffs(asyh->wndw.owned) - 1; - } - - if (drm_atomic_crtc_needs_modeset(&asyh->state) || changed || - armh->crc.wndw != asyh->crc.wndw) { + if (drm_atomic_crtc_needs_modeset(&asyh->state) || changed) { asyh->clr.crc = armh->crc.src && armh->state.active; asyh->set.crc = asyh->crc.src && asyh->state.active; if (changed) @@ -467,9 +446,8 @@ void nv50_crc_atomic_set(struct nv50_head *head, struct nouveau_encoder *outp = nv50_real_outp(nv50_head_atom_get_encoder(asyh)); - func->set_src(head, outp->or, - nv50_crc_source_type(outp, asyh->crc.src), - &crc->ctx[crc->ctx_idx], asyh->crc.wndw); + func->set_src(head, outp->or, nv50_crc_source_type(outp, asyh->crc.src), + &crc->ctx[crc->ctx_idx]); } void nv50_crc_atomic_clr(struct nv50_head *head) @@ -477,7 +455,7 @@ void nv50_crc_atomic_clr(struct nv50_head *head) const struct nv50_crc_func *func = nv50_disp(head->base.base.dev)->core->func->crc; - func->set_src(head, 0, NV50_CRC_SOURCE_TYPE_NONE, NULL, 0); + func->set_src(head, 0, NV50_CRC_SOURCE_TYPE_NONE, NULL); } static inline int diff --git a/drivers/gpu/drm/nouveau/dispnv50/crc.h b/drivers/gpu/drm/nouveau/dispnv50/crc.h index 4fce871b04c8..4823f1fde2dd 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/crc.h +++ b/drivers/gpu/drm/nouveau/dispnv50/crc.h @@ -45,13 +45,11 @@ struct nv50_crc_notifier_ctx { struct nv50_crc_atom { enum nv50_crc_source src; - /* Only used for gv100+ */ - u8 wndw : 4; }; struct nv50_crc_func { - int (*set_src)(struct nv50_head *, int or, enum nv50_crc_source_type, - struct nv50_crc_notifier_ctx *, u32 wndw); + int (*set_src)(struct nv50_head *, int or, enum nv50_crc_source_type type, + struct nv50_crc_notifier_ctx *ctx); int (*set_ctx)(struct nv50_head *, struct nv50_crc_notifier_ctx *); u32 (*get_entry)(struct nv50_head *, struct nv50_crc_notifier_ctx *, enum nv50_crc_source, int idx); @@ -95,6 +93,7 @@ void nv50_crc_atomic_clr(struct nv50_head *); extern const struct nv50_crc_func crc907d; extern const struct nv50_crc_func crcc37d; +extern const struct nv50_crc_func crcc57d; #else /* IS_ENABLED(CONFIG_DEBUG_FS) */ struct nv50_crc {}; diff --git a/drivers/gpu/drm/nouveau/dispnv50/crc907d.c b/drivers/gpu/drm/nouveau/dispnv50/crc907d.c index 0fb0fdb9f119..f9ad641555b7 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/crc907d.c +++ b/drivers/gpu/drm/nouveau/dispnv50/crc907d.c @@ -23,9 +23,8 @@ struct crc907d_notifier { } __packed; static int -crc907d_set_src(struct nv50_head *head, int or, - enum nv50_crc_source_type source, - struct nv50_crc_notifier_ctx *ctx, u32 wndw) +crc907d_set_src(struct nv50_head *head, int or, enum nv50_crc_source_type source, + struct nv50_crc_notifier_ctx *ctx) { struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push; const int i = head->base.index; @@ -33,7 +32,8 @@ crc907d_set_src(struct nv50_head *head, int or, NVDEF(NV907D, HEAD_SET_CRC_CONTROL, EXPECT_BUFFER_COLLAPSE, FALSE) | NVDEF(NV907D, HEAD_SET_CRC_CONTROL, TIMESTAMP_MODE, FALSE) | NVDEF(NV907D, HEAD_SET_CRC_CONTROL, SECONDARY_OUTPUT, NONE) | - NVDEF(NV907D, HEAD_SET_CRC_CONTROL, CRC_DURING_SNOOZE, DISABLE); + NVDEF(NV907D, HEAD_SET_CRC_CONTROL, CRC_DURING_SNOOZE, DISABLE) | + NVDEF(NV907D, HEAD_SET_CRC_CONTROL, WIDE_PIPE_CRC, ENABLE); int ret; switch (source) { diff --git a/drivers/gpu/drm/nouveau/dispnv50/crcc37d.c b/drivers/gpu/drm/nouveau/dispnv50/crcc37d.c index 814e5bd97446..f10f6c484408 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/crcc37d.c +++ b/drivers/gpu/drm/nouveau/dispnv50/crcc37d.c @@ -2,6 +2,7 @@ #include <drm/drm_crtc.h> #include "crc.h" +#include "crcc37d.h" #include "core.h" #include "disp.h" #include "head.h" @@ -10,38 +11,13 @@ #include <nvhw/class/clc37d.h> -#define CRCC37D_MAX_ENTRIES 2047 - -struct crcc37d_notifier { - u32 status; - - /* reserved */ - u32 :32; - u32 :32; - u32 :32; - u32 :32; - u32 :32; - u32 :32; - u32 :32; - - struct crcc37d_entry { - u32 status[2]; - u32 :32; /* reserved */ - u32 compositor_crc; - u32 rg_crc; - u32 output_crc[2]; - u32 :32; /* reserved */ - } entries[CRCC37D_MAX_ENTRIES]; -} __packed; - static int -crcc37d_set_src(struct nv50_head *head, int or, - enum nv50_crc_source_type source, - struct nv50_crc_notifier_ctx *ctx, u32 wndw) +crcc37d_set_src(struct nv50_head *head, int or, enum nv50_crc_source_type source, + struct nv50_crc_notifier_ctx *ctx) { struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push; const int i = head->base.index; - u32 crc_args = NVVAL(NVC37D, HEAD_SET_CRC_CONTROL, CONTROLLING_CHANNEL, wndw) | + u32 crc_args = NVVAL(NVC37D, HEAD_SET_CRC_CONTROL, CONTROLLING_CHANNEL, i * 4) | NVDEF(NVC37D, HEAD_SET_CRC_CONTROL, EXPECT_BUFFER_COLLAPSE, FALSE) | NVDEF(NVC37D, HEAD_SET_CRC_CONTROL, SECONDARY_CRC, NONE) | NVDEF(NVC37D, HEAD_SET_CRC_CONTROL, CRC_DURING_SNOOZE, DISABLE); @@ -75,8 +51,7 @@ crcc37d_set_src(struct nv50_head *head, int or, return 0; } -static int -crcc37d_set_ctx(struct nv50_head *head, struct nv50_crc_notifier_ctx *ctx) +int crcc37d_set_ctx(struct nv50_head *head, struct nv50_crc_notifier_ctx *ctx) { struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push; const int i = head->base.index; @@ -89,9 +64,8 @@ crcc37d_set_ctx(struct nv50_head *head, struct nv50_crc_notifier_ctx *ctx) return 0; } -static u32 crcc37d_get_entry(struct nv50_head *head, - struct nv50_crc_notifier_ctx *ctx, - enum nv50_crc_source source, int idx) +u32 crcc37d_get_entry(struct nv50_head *head, struct nv50_crc_notifier_ctx *ctx, + enum nv50_crc_source source, int idx) { struct crcc37d_notifier __iomem *notifier = ctx->mem.object.map.ptr; struct crcc37d_entry __iomem *entry = ¬ifier->entries[idx]; @@ -105,8 +79,7 @@ static u32 crcc37d_get_entry(struct nv50_head *head, return ioread32_native(crc_addr); } -static bool crcc37d_ctx_finished(struct nv50_head *head, - struct nv50_crc_notifier_ctx *ctx) +bool crcc37d_ctx_finished(struct nv50_head *head, struct nv50_crc_notifier_ctx *ctx) { struct nouveau_drm *drm = nouveau_drm(head->base.base.dev); struct crcc37d_notifier __iomem *notifier = ctx->mem.object.map.ptr; @@ -148,7 +121,7 @@ const struct nv50_crc_func crcc37d = { .set_ctx = crcc37d_set_ctx, .get_entry = crcc37d_get_entry, .ctx_finished = crcc37d_ctx_finished, - .flip_threshold = CRCC37D_MAX_ENTRIES - 30, + .flip_threshold = CRCC37D_FLIP_THRESHOLD, .num_entries = CRCC37D_MAX_ENTRIES, .notifier_len = sizeof(struct crcc37d_notifier), }; diff --git a/drivers/gpu/drm/nouveau/dispnv50/crcc37d.h b/drivers/gpu/drm/nouveau/dispnv50/crcc37d.h new file mode 100644 index 000000000000..5775137b832d --- /dev/null +++ b/drivers/gpu/drm/nouveau/dispnv50/crcc37d.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: MIT */ + +#ifndef __CRCC37D_H__ +#define __CRCC37D_H__ + +#include <linux/types.h> + +#include "crc.h" + +#define CRCC37D_MAX_ENTRIES 2047 +#define CRCC37D_FLIP_THRESHOLD (CRCC37D_MAX_ENTRIES - 30) + +struct crcc37d_notifier { + u32 status; + + /* reserved */ + u32:32; + u32:32; + u32:32; + u32:32; + u32:32; + u32:32; + u32:32; + + struct crcc37d_entry { + u32 status[2]; + u32:32; /* reserved */ + u32 compositor_crc; + u32 rg_crc; + u32 output_crc[2]; + u32:32; /* reserved */ + } entries[CRCC37D_MAX_ENTRIES]; +} __packed; + +int crcc37d_set_ctx(struct nv50_head *head, struct nv50_crc_notifier_ctx *ctx); +u32 crcc37d_get_entry(struct nv50_head *head, struct nv50_crc_notifier_ctx *ctx, + enum nv50_crc_source source, int idx); +bool crcc37d_ctx_finished(struct nv50_head *head, struct nv50_crc_notifier_ctx *ctx); + +#endif /* !__CRCC37D_H__ */ diff --git a/drivers/gpu/drm/nouveau/dispnv50/crcc57d.c b/drivers/gpu/drm/nouveau/dispnv50/crcc57d.c new file mode 100644 index 000000000000..cc0130e3d496 --- /dev/null +++ b/drivers/gpu/drm/nouveau/dispnv50/crcc57d.c @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: MIT + +#include "crc.h" +#include "crcc37d.h" +#include "core.h" +#include "disp.h" +#include "head.h" + +#include <nvif/pushc37b.h> + +#include <nvhw/class/clc57d.h> + +static int crcc57d_set_src(struct nv50_head *head, int or, enum nv50_crc_source_type source, + struct nv50_crc_notifier_ctx *ctx) +{ + struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push; + const int i = head->base.index; + u32 crc_args = NVDEF(NVC57D, HEAD_SET_CRC_CONTROL, CONTROLLING_CHANNEL, CORE) | + NVDEF(NVC57D, HEAD_SET_CRC_CONTROL, EXPECT_BUFFER_COLLAPSE, FALSE) | + NVDEF(NVC57D, HEAD_SET_CRC_CONTROL, SECONDARY_CRC, NONE) | + NVDEF(NVC57D, HEAD_SET_CRC_CONTROL, CRC_DURING_SNOOZE, DISABLE); + int ret; + + switch (source) { + case NV50_CRC_SOURCE_TYPE_SOR: + crc_args |= NVDEF(NVC57D, HEAD_SET_CRC_CONTROL, PRIMARY_CRC, SOR(or)); + break; + case NV50_CRC_SOURCE_TYPE_SF: + crc_args |= NVDEF(NVC57D, HEAD_SET_CRC_CONTROL, PRIMARY_CRC, SF); + break; + default: + break; + } + + ret = PUSH_WAIT(push, 4); + if (ret) + return ret; + + if (source) { + PUSH_MTHD(push, NVC57D, HEAD_SET_CONTEXT_DMA_CRC(i), ctx->ntfy.handle); + PUSH_MTHD(push, NVC57D, HEAD_SET_CRC_CONTROL(i), crc_args); + } else { + PUSH_MTHD(push, NVC57D, HEAD_SET_CRC_CONTROL(i), 0); + PUSH_MTHD(push, NVC57D, HEAD_SET_CONTEXT_DMA_CRC(i), 0); + } + + return 0; +} + +const struct nv50_crc_func crcc57d = { + .set_src = crcc57d_set_src, + .set_ctx = crcc37d_set_ctx, + .get_entry = crcc37d_get_entry, + .ctx_finished = crcc37d_ctx_finished, + .flip_threshold = CRCC37D_FLIP_THRESHOLD, + .num_entries = CRCC37D_MAX_ENTRIES, + .notifier_len = sizeof(struct crcc37d_notifier), +}; diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c index 54fbd6fe751d..00e19fd959ea 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c +++ b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c @@ -98,6 +98,7 @@ static int curs507a_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, struct nv50_head_atom *asyh) { + struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev); struct nv50_head *head = nv50_head(asyw->state.crtc); int ret; @@ -109,8 +110,20 @@ curs507a_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, if (ret || !asyh->curs.visible) return ret; - if (asyw->image.w != asyw->image.h) + if (asyw->state.crtc_w != asyw->state.crtc_h) { + NV_ATOMIC(drm, "Plane width/height must be equal for cursors\n"); return -EINVAL; + } + + if (asyw->image.w != asyw->state.crtc_w) { + NV_ATOMIC(drm, "Plane width must be equal to fb width for cursors (height can be larger though)\n"); + return -EINVAL; + } + + if (asyw->state.src_x || asyw->state.src_y) { + NV_ATOMIC(drm, "Cursor planes do not support framebuffer offsets\n"); + return -EINVAL; + } ret = head->func->curs_layout(head, asyw, asyh); if (ret) diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 8e28403ea9b1..ae1f41205520 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -852,6 +852,9 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc, ret = drm_hdmi_avi_infoframe_from_display_mode(&avi_frame.avi, &nv_connector->base, mode); if (!ret) { + drm_hdmi_avi_infoframe_quant_range(&avi_frame.avi, + &nv_connector->base, mode, + HDMI_QUANTIZATION_RANGE_FULL); /* We have an AVI InfoFrame, populate it to the display */ args.pwr.avi_infoframe_length = hdmi_infoframe_pack(&avi_frame, args.infoframes, 17); @@ -1387,12 +1390,11 @@ nv50_mstm_cleanup(struct nv50_mstm *mstm) { struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev); struct drm_encoder *encoder; - int ret; NV_ATOMIC(drm, "%s: mstm cleanup\n", mstm->outp->base.base.name); - ret = drm_dp_check_act_status(&mstm->mgr); + drm_dp_check_act_status(&mstm->mgr); - ret = drm_dp_update_payload_part2(&mstm->mgr); + drm_dp_update_payload_part2(&mstm->mgr); drm_for_each_encoder(encoder, mstm->outp->base.base.dev) { if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) { @@ -1411,10 +1413,9 @@ nv50_mstm_prepare(struct nv50_mstm *mstm) { struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev); struct drm_encoder *encoder; - int ret; NV_ATOMIC(drm, "%s: mstm prepare\n", mstm->outp->base.base.name); - ret = drm_dp_update_payload_part1(&mstm->mgr, 1); + drm_dp_update_payload_part1(&mstm->mgr, 1); drm_for_each_encoder(encoder, mstm->outp->base.base.dev) { if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) { diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c index 72099d1e4816..c3c57be54e1c 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/head.c +++ b/drivers/gpu/drm/nouveau/dispnv50/head.c @@ -226,10 +226,24 @@ static int nv50_head_atomic_check_lut(struct nv50_head *head, struct nv50_head_atom *asyh) { - struct nv50_disp *disp = nv50_disp(head->base.base.dev); - struct drm_property_blob *olut = asyh->state.gamma_lut; + struct drm_device *dev = head->base.base.dev; + struct drm_crtc *crtc = &head->base.base; + struct nv50_disp *disp = nv50_disp(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + struct drm_property_blob *olut = asyh->state.gamma_lut, + *ilut = asyh->state.degamma_lut; int size; + /* Ensure that the ilut is valid */ + if (ilut) { + size = drm_color_lut_size(ilut); + if (!head->func->ilut_check(size)) { + NV_ATOMIC(drm, "Invalid size %d for degamma on [CRTC:%d:%s]\n", + size, crtc->base.id, crtc->name); + return -EINVAL; + } + } + /* Determine whether core output LUT should be enabled. */ if (olut) { /* Check if any window(s) have stolen the core output LUT @@ -256,7 +270,8 @@ nv50_head_atomic_check_lut(struct nv50_head *head, } if (!head->func->olut(head, asyh, size)) { - DRM_DEBUG_KMS("Invalid olut\n"); + NV_ATOMIC(drm, "Invalid size %d for gamma on [CRTC:%d:%s]\n", + size, crtc->base.id, crtc->name); return -EINVAL; } asyh->olut.handle = disp->core->chan.vram.handle; @@ -330,8 +345,17 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state) struct drm_connector_state *conns; struct drm_connector *conn; int i, ret; + bool check_lut = asyh->state.color_mgmt_changed || + memcmp(&armh->wndw, &asyh->wndw, sizeof(asyh->wndw)); NV_ATOMIC(drm, "%s atomic_check %d\n", crtc->name, asyh->state.active); + + if (check_lut) { + ret = nv50_head_atomic_check_lut(head, asyh); + if (ret) + return ret; + } + if (asyh->state.active) { for_each_new_connector_in_state(asyh->state.state, conn, conns, i) { if (conns->crtc == crtc) { @@ -357,14 +381,8 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state) if (asyh->state.mode_changed || asyh->state.connectors_changed) nv50_head_atomic_check_mode(head, asyh); - if (asyh->state.color_mgmt_changed || - memcmp(&armh->wndw, &asyh->wndw, sizeof(asyh->wndw))) { - int ret = nv50_head_atomic_check_lut(head, asyh); - if (ret) - return ret; - + if (check_lut) asyh->olut.visible = asyh->olut.handle != 0; - } if (asyc) { if (asyc->set.scaler) diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.h b/drivers/gpu/drm/nouveau/dispnv50/head.h index 0bac6be9ba34..41c8788dfb31 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/head.h +++ b/drivers/gpu/drm/nouveau/dispnv50/head.h @@ -29,6 +29,7 @@ struct nv50_head_func { int (*view)(struct nv50_head *, struct nv50_head_atom *); int (*mode)(struct nv50_head *, struct nv50_head_atom *); bool (*olut)(struct nv50_head *, struct nv50_head_atom *, int); + bool (*ilut_check)(int size); bool olut_identity; int olut_size; int (*olut_set)(struct nv50_head *, struct nv50_head_atom *); @@ -71,6 +72,7 @@ extern const struct nv50_head_func head907d; int head907d_view(struct nv50_head *, struct nv50_head_atom *); int head907d_mode(struct nv50_head *, struct nv50_head_atom *); bool head907d_olut(struct nv50_head *, struct nv50_head_atom *, int); +bool head907d_ilut_check(int size); int head907d_olut_set(struct nv50_head *, struct nv50_head_atom *); int head907d_olut_clr(struct nv50_head *); int head907d_core_set(struct nv50_head *, struct nv50_head_atom *); diff --git a/drivers/gpu/drm/nouveau/dispnv50/head907d.c b/drivers/gpu/drm/nouveau/dispnv50/head907d.c index 85648d790743..18fe4c1e2d6a 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/head907d.c +++ b/drivers/gpu/drm/nouveau/dispnv50/head907d.c @@ -314,6 +314,11 @@ head907d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size) return true; } +bool head907d_ilut_check(int size) +{ + return size == 256 || size == 1024; +} + int head907d_mode(struct nv50_head *head, struct nv50_head_atom *asyh) { @@ -409,6 +414,7 @@ head907d = { .view = head907d_view, .mode = head907d_mode, .olut = head907d_olut, + .ilut_check = head907d_ilut_check, .olut_size = 1024, .olut_set = head907d_olut_set, .olut_clr = head907d_olut_clr, diff --git a/drivers/gpu/drm/nouveau/dispnv50/head917d.c b/drivers/gpu/drm/nouveau/dispnv50/head917d.c index ea9f8667305e..4ce47b55f72c 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/head917d.c +++ b/drivers/gpu/drm/nouveau/dispnv50/head917d.c @@ -119,6 +119,7 @@ head917d = { .view = head907d_view, .mode = head907d_mode, .olut = head907d_olut, + .ilut_check = head907d_ilut_check, .olut_size = 1024, .olut_set = head907d_olut_set, .olut_clr = head907d_olut_clr, diff --git a/drivers/gpu/drm/nouveau/dispnv50/headc37d.c b/drivers/gpu/drm/nouveau/dispnv50/headc37d.c index 63adfeba50e5..a4a3b78ea42c 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/headc37d.c +++ b/drivers/gpu/drm/nouveau/dispnv50/headc37d.c @@ -285,6 +285,7 @@ headc37d = { .view = headc37d_view, .mode = headc37d_mode, .olut = headc37d_olut, + .ilut_check = head907d_ilut_check, .olut_size = 1024, .olut_set = headc37d_olut_set, .olut_clr = headc37d_olut_clr, diff --git a/drivers/gpu/drm/nouveau/dispnv50/headc57d.c b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c index fd51527b56b8..543f08ceaad6 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/headc57d.c +++ b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c @@ -169,7 +169,7 @@ headc57d_olut_load(struct drm_color_lut *in, int size, void __iomem *mem) writew(readw(mem - 4), mem + 4); } -bool +static bool headc57d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size) { if (size != 0 && size != 256 && size != 1024) @@ -236,6 +236,7 @@ headc57d = { .view = headc37d_view, .mode = headc57d_mode, .olut = headc57d_olut, + .ilut_check = head907d_ilut_check, .olut_identity = true, .olut_size = 1024, .olut_set = headc57d_olut_set, diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c index 8d048bacd6f0..133c8736426a 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c @@ -403,10 +403,7 @@ nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw, /* Recalculate LUT state. */ memset(&asyw->xlut, 0x00, sizeof(asyw->xlut)); if ((asyw->ilut = wndw->func->ilut ? ilut : NULL)) { - if (!wndw->func->ilut(wndw, asyw, drm_color_lut_size(ilut))) { - DRM_DEBUG_KMS("Invalid ilut\n"); - return -EINVAL; - } + wndw->func->ilut(wndw, asyw, drm_color_lut_size(ilut)); asyw->xlut.handle = wndw->wndw.vram.handle; asyw->xlut.i.buffer = !asyw->xlut.i.buffer; asyw->set.xlut = true; @@ -539,6 +536,8 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state) struct nouveau_bo *nvbo; struct nv50_head_atom *asyh; struct nv50_wndw_ctxdma *ctxdma; + struct dma_resv_iter cursor; + struct dma_fence *fence; int ret; NV_ATOMIC(drm, "%s prepare: %p\n", plane->name, fb); @@ -561,7 +560,13 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state) asyw->image.handle[0] = ctxdma->object.handle; } - asyw->state.fence = dma_resv_get_excl_unlocked(nvbo->bo.base.resv); + dma_resv_iter_begin(&cursor, nvbo->bo.base.resv, false); + dma_resv_for_each_fence_unlocked(&cursor, fence) { + /* TODO: We only use the first writer here */ + asyw->state.fence = dma_fence_get(fence); + break; + } + dma_resv_iter_end(&cursor); asyw->image.offset[0] = nvbo->offset; if (wndw->func->prepare) { diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.h b/drivers/gpu/drm/nouveau/dispnv50/wndw.h index f4e0c5080034..9c9f2c2a71a5 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndw.h +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.h @@ -64,7 +64,7 @@ struct nv50_wndw_func { int (*ntfy_clr)(struct nv50_wndw *); int (*ntfy_wait_begun)(struct nouveau_bo *, u32 offset, struct nvif_device *); - bool (*ilut)(struct nv50_wndw *, struct nv50_wndw_atom *, int); + void (*ilut)(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyh, int size); void (*csc)(struct nv50_wndw *, struct nv50_wndw_atom *, const struct drm_color_ctm *); int (*csc_set)(struct nv50_wndw *, struct nv50_wndw_atom *); @@ -129,7 +129,7 @@ int wndwc37e_update(struct nv50_wndw *, u32 *); int wndwc57e_new(struct nouveau_drm *, enum drm_plane_type, int, s32, struct nv50_wndw **); -bool wndwc57e_ilut(struct nv50_wndw *, struct nv50_wndw_atom *, int); +void wndwc57e_ilut(struct nv50_wndw *, struct nv50_wndw_atom *, int); int wndwc57e_ilut_set(struct nv50_wndw *, struct nv50_wndw_atom *); int wndwc57e_ilut_clr(struct nv50_wndw *); int wndwc57e_csc_set(struct nv50_wndw *, struct nv50_wndw_atom *); diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c index 57df997c5ff3..183d2c0e65b6 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c +++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c @@ -82,18 +82,14 @@ wndwc37e_ilut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw) return 0; } -static bool +static void wndwc37e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size) { - if (size != 256 && size != 1024) - return false; - asyw->xlut.i.size = size == 1024 ? NVC37E_SET_CONTROL_INPUT_LUT_SIZE_SIZE_1025 : NVC37E_SET_CONTROL_INPUT_LUT_SIZE_SIZE_257; asyw->xlut.i.range = NVC37E_SET_CONTROL_INPUT_LUT_RANGE_UNITY; asyw->xlut.i.output_mode = NVC37E_SET_CONTROL_INPUT_LUT_OUTPUT_MODE_INTERPOLATE; asyw->xlut.i.load = head907d_olut_load; - return true; } int diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c index abdd3bb658b3..37f6da8b3f2a 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c +++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c @@ -179,11 +179,11 @@ wndwc57e_ilut_load(struct drm_color_lut *in, int size, void __iomem *mem) writew(readw(mem - 4), mem + 4); } -bool +void wndwc57e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size) { - if (size = size ? size : 1024, size != 256 && size != 1024) - return false; + if (!size) + size = 1024; if (size == 256) asyw->xlut.i.mode = NVC57E_SET_ILUT_CONTROL_MODE_DIRECT8; @@ -193,7 +193,6 @@ wndwc57e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size) asyw->xlut.i.size = 4 /* VSS header. */ + size + 1 /* Entries. */; asyw->xlut.i.output_mode = NVC57E_SET_ILUT_CONTROL_INTERPOLATE_DISABLE; asyw->xlut.i.load = wndwc57e_ilut_load; - return true; } /**************************************************************** diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl907d.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl907d.h index 79aff6ff3138..f972ef1409f4 100644 --- a/drivers/gpu/drm/nouveau/include/nvhw/class/cl907d.h +++ b/drivers/gpu/drm/nouveau/include/nvhw/class/cl907d.h @@ -246,6 +246,9 @@ #define NV907D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE 5:5 #define NV907D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_DISABLE (0x00000000) #define NV907D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_ENABLE (0x00000001) +#define NV907D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC 6:6 +#define NV907D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC_DISABLE (0x00000000) +#define NV907D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC_ENABLE (0x00000001) #define NV907D_HEAD_SET_CONTEXT_DMA_CRC(a) (0x00000438 + (a)*0x00000300) #define NV907D_HEAD_SET_CONTEXT_DMA_CRC_HANDLE 31:0 #define NV907D_HEAD_SET_OUTPUT_LUT_LO(a) (0x00000448 + (a)*0x00000300) diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/clc57d.h b/drivers/gpu/drm/nouveau/include/nvhw/class/clc57d.h index d83ac815e06c..d4bad2da3e56 100644 --- a/drivers/gpu/drm/nouveau/include/nvhw/class/clc57d.h +++ b/drivers/gpu/drm/nouveau/include/nvhw/class/clc57d.h @@ -265,6 +265,75 @@ #define NVC57D_HEAD_SET_RASTER_BLANK_START(a) (0x00002070 + (a)*0x00000400) #define NVC57D_HEAD_SET_RASTER_BLANK_START_X 14:0 #define NVC57D_HEAD_SET_RASTER_BLANK_START_Y 30:16 +#define NVC57D_HEAD_SET_CONTEXT_DMA_CRC(a) (0x00002180 + (a)*0x00000400) +#define NVC57D_HEAD_SET_CONTEXT_DMA_CRC_HANDLE 31:0 +#define NVC57D_HEAD_SET_CRC_CONTROL(a) (0x00002184 + (a)*0x00000400) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL 5:0 +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_0 (0x00000000) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_1 (0x00000001) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_2 (0x00000002) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_3 (0x00000003) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_4 (0x00000004) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_5 (0x00000005) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_6 (0x00000006) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_7 (0x00000007) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_8 (0x00000008) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_9 (0x00000009) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_10 (0x0000000A) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_11 (0x0000000B) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_12 (0x0000000C) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_13 (0x0000000D) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_14 (0x0000000E) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_15 (0x0000000F) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_16 (0x00000010) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_17 (0x00000011) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_18 (0x00000012) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_19 (0x00000013) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_20 (0x00000014) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_21 (0x00000015) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_22 (0x00000016) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_23 (0x00000017) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_24 (0x00000018) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_25 (0x00000019) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_26 (0x0000001A) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_27 (0x0000001B) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_28 (0x0000001C) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_29 (0x0000001D) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_30 (0x0000001E) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_31 (0x0000001F) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_CORE (0x00000020) +#define NVC57D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE 8:8 +#define NVC57D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_FALSE (0x00000000) +#define NVC57D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_TRUE (0x00000001) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC 19:12 +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_NONE (0x00000000) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SF (0x00000030) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR(i) (0x00000050 +(i)) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR__SIZE_1 8 +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR0 (0x00000050) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR1 (0x00000051) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR2 (0x00000052) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR3 (0x00000053) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR4 (0x00000054) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR5 (0x00000055) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR6 (0x00000056) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR7 (0x00000057) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC 27:20 +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_NONE (0x00000000) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SF (0x00000030) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR(i) (0x00000050 +(i)) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR__SIZE_1 8 +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR0 (0x00000050) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR1 (0x00000051) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR2 (0x00000052) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR3 (0x00000053) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR4 (0x00000054) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR5 (0x00000055) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR6 (0x00000056) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR7 (0x00000057) +#define NVC57D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE 9:9 +#define NVC57D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_ENABLE (0x00000001) #define NVC57D_HEAD_SET_OLUT_CONTROL(a) (0x00002280 + (a)*0x00000400) #define NVC57D_HEAD_SET_OLUT_CONTROL_INTERPOLATE 0:0 #define NVC57D_HEAD_SET_OLUT_CONTROL_INTERPOLATE_DISABLE (0x00000000) diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c index 1cbd71abc80a..ae2f2abc8f5a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_backlight.c +++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c @@ -308,7 +308,10 @@ nv50_backlight_init(struct nouveau_backlight *bl, if (ret < 0) return ret; - if (drm_edp_backlight_supported(edp_dpcd)) { + /* TODO: Add support for hybrid PWM/DPCD panels */ + if (drm_edp_backlight_supported(edp_dpcd) && + (edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP) && + (edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP)) { NV_DEBUG(drm, "DPCD backlight controls supported on %s\n", nv_conn->base.name); diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index e8c445eb1100..41b78e9ecd4e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c @@ -2045,7 +2045,6 @@ nouveau_run_vbios_init(struct drm_device *dev) { struct nouveau_drm *drm = nouveau_drm(dev); struct nvbios *bios = &drm->vbios; - int ret = 0; /* Reset the BIOS head to 0. */ bios->state.crtchead = 0; @@ -2058,7 +2057,7 @@ nouveau_run_vbios_init(struct drm_device *dev) bios->fp.lvds_init_run = false; } - return ret; + return 0; } static bool diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 929de41c281f..2b460835a438 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -306,7 +306,7 @@ nouveau_framebuffer_new(struct drm_device *dev, struct nouveau_bo *nvbo = nouveau_gem_object(gem); struct drm_framebuffer *fb; const struct drm_format_info *info; - unsigned int width, height, i; + unsigned int height, i; uint32_t tile_mode; uint8_t kind; int ret; @@ -343,9 +343,6 @@ nouveau_framebuffer_new(struct drm_device *dev, info = drm_get_format_info(dev, mode_cmd); for (i = 0; i < info->num_planes; i++) { - width = drm_format_info_plane_width(info, - mode_cmd->width, - i); height = drm_format_info_plane_height(info, mode_cmd->height, i); diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index e7efd9ede8e4..561309d447e0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -22,7 +22,6 @@ * Authors: Ben Skeggs */ -#include <linux/console.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/pci.h> @@ -32,6 +31,7 @@ #include <drm/drm_aperture.h> #include <drm/drm_crtc_helper.h> +#include <drm/drm_drv.h> #include <drm/drm_gem_ttm_helper.h> #include <drm/drm_ioctl.h> #include <drm/drm_vblank.h> @@ -1358,7 +1358,7 @@ nouveau_drm_init(void) nouveau_display_options(); if (nouveau_modeset == -1) { - if (vgacon_text_force()) + if (drm_firmware_drivers_only()) nouveau_modeset = 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 05d0b3eb3690..26f9299df881 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -339,14 +339,15 @@ nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr) } int -nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool exclusive, bool intr) +nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, + bool exclusive, bool intr) { struct nouveau_fence_chan *fctx = chan->fence; - struct dma_fence *fence; struct dma_resv *resv = nvbo->bo.base.resv; - struct dma_resv_list *fobj; + struct dma_resv_iter cursor; + struct dma_fence *fence; struct nouveau_fence *f; - int ret = 0, i; + int ret; if (!exclusive) { ret = dma_resv_reserve_shared(resv, 1); @@ -355,10 +356,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e return ret; } - fobj = dma_resv_shared_list(resv); - fence = dma_resv_excl_fence(resv); - - if (fence) { + dma_resv_for_each_fence(&cursor, resv, exclusive, fence) { struct nouveau_channel *prev = NULL; bool must_wait = true; @@ -366,41 +364,19 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e if (f) { rcu_read_lock(); prev = rcu_dereference(f->channel); - if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0)) + if (prev && (prev == chan || + fctx->sync(f, prev, chan) == 0)) must_wait = false; rcu_read_unlock(); } - if (must_wait) + if (must_wait) { ret = dma_fence_wait(fence, intr); - - return ret; - } - - if (!exclusive || !fobj) - return ret; - - for (i = 0; i < fobj->shared_count && !ret; ++i) { - struct nouveau_channel *prev = NULL; - bool must_wait = true; - - fence = rcu_dereference_protected(fobj->shared[i], - dma_resv_held(resv)); - - f = nouveau_local_fence(fence, chan->drm); - if (f) { - rcu_read_lock(); - prev = rcu_dereference(f->channel); - if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0)) - must_wait = false; - rcu_read_unlock(); + if (ret) + return ret; } - - if (must_wait) - ret = dma_fence_wait(fence, intr); } - - return ret; + return 0; } void diff --git a/drivers/gpu/drm/nouveau/nvkm/core/client.c b/drivers/gpu/drm/nouveau/nvkm/core/client.c index ac671202919e..0c8c55c73b12 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/client.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/client.c @@ -60,7 +60,7 @@ nvkm_uclient_new(const struct nvkm_oclass *oclass, void *argv, u32 argc, return 0; } -const struct nvkm_sclass +static const struct nvkm_sclass nvkm_uclient_sclass = { .oclass = NVIF_CLASS_CLIENT, .minver = 0, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c index b51d690f375f..88d262ba648c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c @@ -2627,6 +2627,27 @@ nv174_chipset = { }; static const struct nvkm_device_chip +nv176_chipset = { + .name = "GA106", + .bar = { 0x00000001, tu102_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .devinit = { 0x00000001, ga100_devinit_new }, + .fb = { 0x00000001, ga102_fb_new }, + .gpio = { 0x00000001, ga102_gpio_new }, + .i2c = { 0x00000001, gm200_i2c_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .mc = { 0x00000001, ga100_mc_new }, + .mmu = { 0x00000001, tu102_mmu_new }, + .pci = { 0x00000001, gp100_pci_new }, + .privring = { 0x00000001, gm200_privring_new }, + .timer = { 0x00000001, gk20a_timer_new }, + .top = { 0x00000001, ga100_top_new }, + .disp = { 0x00000001, ga102_disp_new }, + .dma = { 0x00000001, gv100_dma_new }, + .fifo = { 0x00000001, ga102_fifo_new }, +}; + +static const struct nvkm_device_chip nv177_chipset = { .name = "GA107", .bar = { 0x00000001, tu102_bar_new }, @@ -3072,6 +3093,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func, case 0x168: device->chip = &nv168_chipset; break; case 0x172: device->chip = &nv172_chipset; break; case 0x174: device->chip = &nv174_chipset; break; + case 0x176: device->chip = &nv176_chipset; break; case 0x177: device->chip = &nv177_chipset; break; default: if (nvkm_boolopt(device->cfgopt, "NvEnableUnsupportedChipsets", false)) { diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c index f28894fdede9..113ddc103ac2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c @@ -161,8 +161,8 @@ nvkm_udevice_info(struct nvkm_udevice *udev, void *data, u32 size) if (imem && args->v0.ram_size > 0) args->v0.ram_user = args->v0.ram_user - imem->reserved; - strncpy(args->v0.chip, device->chip->name, sizeof(args->v0.chip)); - strncpy(args->v0.name, device->name, sizeof(args->v0.name)); + snprintf(args->v0.chip, sizeof(args->v0.chip), "%s", device->chip->name); + snprintf(args->v0.name, sizeof(args->v0.name), "%s", device->name); return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregv100.c index e20a48f201f6..448a515057c7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregv100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregv100.c @@ -106,6 +106,8 @@ gv100_disp_core_mthd_head = { { 0x20a4, 0x6820a4 }, { 0x20a8, 0x6820a8 }, { 0x20ac, 0x6820ac }, + { 0x2180, 0x682180 }, + { 0x2184, 0x682184 }, { 0x218c, 0x68218c }, { 0x2194, 0x682194 }, { 0x2198, 0x682198 }, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigv100.c index 6e3c450eaace..3ff49344abc7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigv100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigv100.c @@ -62,7 +62,6 @@ gv100_hdmi_ctrl(struct nvkm_ior *ior, int head, bool enable, u8 max_ac_packet, nvkm_wr32(device, 0x6f0108 + hdmi, vendor_infoframe.header); nvkm_wr32(device, 0x6f010c + hdmi, vendor_infoframe.subpack0_low); nvkm_wr32(device, 0x6f0110 + hdmi, vendor_infoframe.subpack0_high); - nvkm_wr32(device, 0x6f0110 + hdmi, 0x00000000); nvkm_wr32(device, 0x6f0114 + hdmi, 0x00000000); nvkm_wr32(device, 0x6f0118 + hdmi, 0x00000000); nvkm_wr32(device, 0x6f011c + hdmi, 0x00000000); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c index e417044cc347..260b197f81bc 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c @@ -49,7 +49,7 @@ tu102_fifo_runlist_commit(struct gk104_fifo *fifo, int runl, /*XXX: how to wait? can you even wait? */ } -const struct gk104_fifo_runlist_func +static const struct gk104_fifo_runlist_func tu102_fifo_runlist = { .size = 16, .cgrp = gv100_fifo_runlist_cgrp, diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c index 262641a014b0..c91130a6be2a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c @@ -117,8 +117,12 @@ nvkm_falcon_disable(struct nvkm_falcon *falcon) int nvkm_falcon_reset(struct nvkm_falcon *falcon) { - nvkm_falcon_disable(falcon); - return nvkm_falcon_enable(falcon); + if (!falcon->func->reset) { + nvkm_falcon_disable(falcon); + return nvkm_falcon_enable(falcon); + } + + return falcon->func->reset(falcon); } int diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c index cdb1ead26d84..82b4c8e1457c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c @@ -207,11 +207,13 @@ int gm200_acr_wpr_parse(struct nvkm_acr *acr) { const struct wpr_header *hdr = (void *)acr->wpr_fw->data; + struct nvkm_acr_lsfw *lsfw; while (hdr->falcon_id != WPR_HEADER_V0_FALCON_ID_INVALID) { wpr_header_dump(&acr->subdev, hdr); - if (!nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id)) - return -ENOMEM; + lsfw = nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id); + if (IS_ERR(lsfw)) + return PTR_ERR(lsfw); } return 0; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c index fb9132a39bb1..fd97a935a380 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c @@ -161,11 +161,13 @@ int gp102_acr_wpr_parse(struct nvkm_acr *acr) { const struct wpr_header_v1 *hdr = (void *)acr->wpr_fw->data; + struct nvkm_acr_lsfw *lsfw; while (hdr->falcon_id != WPR_HEADER_V1_FALCON_ID_INVALID) { wpr_header_v1_dump(&acr->subdev, hdr); - if (!nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id)) - return -ENOMEM; + lsfw = nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id); + if (IS_ERR(lsfw)) + return PTR_ERR(lsfw); } return 0; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c index 9de74f41dcd2..142079403864 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c @@ -401,7 +401,7 @@ init_table_(struct nvbios_init *init, u16 offset, const char *name) #define init_macro_table(b) init_table_((b), 0x04, "macro table") #define init_condition_table(b) init_table_((b), 0x06, "condition table") #define init_io_condition_table(b) init_table_((b), 0x08, "io condition table") -#define init_io_flag_condition_table(b) init_table_((b), 0x0a, "io flag conditon table") +#define init_io_flag_condition_table(b) init_table_((b), 0x0a, "io flag condition table") #define init_function_table(b) init_table_((b), 0x0c, "function table") #define init_xlat_table(b) init_table_((b), 0x10, "xlat table"); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c index fb90d47e1225..a9cdf2411187 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c @@ -32,7 +32,6 @@ mcp89_devinit_disable(struct nvkm_devinit *init) struct nvkm_device *device = init->subdev.device; u32 r001540 = nvkm_rd32(device, 0x001540); u32 r00154c = nvkm_rd32(device, 0x00154c); - u64 disable = 0; if (!(r001540 & 0x40000000)) { nvkm_subdev_disable(device, NVKM_ENGINE_MSPDEC, 0); @@ -48,7 +47,7 @@ mcp89_devinit_disable(struct nvkm_devinit *init) if (!(r00154c & 0x00000200)) nvkm_subdev_disable(device, NVKM_ENGINE_CE, 0); - return disable; + return 0; } static const struct nvkm_devinit_func diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c index 24382875fb4f..455e95a89259 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c @@ -94,20 +94,13 @@ nvkm_pmu_fini(struct nvkm_subdev *subdev, bool suspend) return 0; } -static int +static void nvkm_pmu_reset(struct nvkm_pmu *pmu) { struct nvkm_device *device = pmu->subdev.device; if (!pmu->func->enabled(pmu)) - return 0; - - /* Inhibit interrupts, and wait for idle. */ - nvkm_wr32(device, 0x10a014, 0x0000ffff); - nvkm_msec(device, 2000, - if (!nvkm_rd32(device, 0x10a04c)) - break; - ); + return; /* Reset. */ if (pmu->func->reset) @@ -118,25 +111,37 @@ nvkm_pmu_reset(struct nvkm_pmu *pmu) if (!(nvkm_rd32(device, 0x10a10c) & 0x00000006)) break; ); - - return 0; } static int nvkm_pmu_preinit(struct nvkm_subdev *subdev) { struct nvkm_pmu *pmu = nvkm_pmu(subdev); - return nvkm_pmu_reset(pmu); + nvkm_pmu_reset(pmu); + return 0; } static int nvkm_pmu_init(struct nvkm_subdev *subdev) { struct nvkm_pmu *pmu = nvkm_pmu(subdev); - int ret = nvkm_pmu_reset(pmu); - if (ret == 0 && pmu->func->init) - ret = pmu->func->init(pmu); - return ret; + struct nvkm_device *device = pmu->subdev.device; + + if (!pmu->func->init) + return 0; + + if (pmu->func->enabled(pmu)) { + /* Inhibit interrupts, and wait for idle. */ + nvkm_wr32(device, 0x10a014, 0x0000ffff); + nvkm_msec(device, 2000, + if (!nvkm_rd32(device, 0x10a04c)) + break; + ); + + nvkm_pmu_reset(pmu); + } + + return pmu->func->init(pmu); } static void * diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c index 5968c7696596..40439e329aa9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c @@ -23,9 +23,38 @@ */ #include "priv.h" +static int +gm200_pmu_flcn_reset(struct nvkm_falcon *falcon) +{ + struct nvkm_pmu *pmu = container_of(falcon, typeof(*pmu), falcon); + + nvkm_falcon_wr32(falcon, 0x014, 0x0000ffff); + pmu->func->reset(pmu); + return nvkm_falcon_enable(falcon); +} + +const struct nvkm_falcon_func +gm200_pmu_flcn = { + .debug = 0xc08, + .fbif = 0xe00, + .load_imem = nvkm_falcon_v1_load_imem, + .load_dmem = nvkm_falcon_v1_load_dmem, + .read_dmem = nvkm_falcon_v1_read_dmem, + .bind_context = nvkm_falcon_v1_bind_context, + .wait_for_halt = nvkm_falcon_v1_wait_for_halt, + .clear_interrupt = nvkm_falcon_v1_clear_interrupt, + .set_start_addr = nvkm_falcon_v1_set_start_addr, + .start = nvkm_falcon_v1_start, + .enable = nvkm_falcon_v1_enable, + .disable = nvkm_falcon_v1_disable, + .reset = gm200_pmu_flcn_reset, + .cmdq = { 0x4a0, 0x4b0, 4 }, + .msgq = { 0x4c8, 0x4cc, 0 }, +}; + static const struct nvkm_pmu_func gm200_pmu = { - .flcn = >215_pmu_flcn, + .flcn = &gm200_pmu_flcn, .enabled = gf100_pmu_enabled, .reset = gf100_pmu_reset, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c index 148706977eec..e1772211b0a4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c @@ -211,7 +211,7 @@ gm20b_pmu_recv(struct nvkm_pmu *pmu) static const struct nvkm_pmu_func gm20b_pmu = { - .flcn = >215_pmu_flcn, + .flcn = &gm200_pmu_flcn, .enabled = gf100_pmu_enabled, .intr = gt215_pmu_intr, .recv = gm20b_pmu_recv, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c index 00da1b873ce8..6bf7fc1bd1e3 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c @@ -39,7 +39,7 @@ gp102_pmu_enabled(struct nvkm_pmu *pmu) static const struct nvkm_pmu_func gp102_pmu = { - .flcn = >215_pmu_flcn, + .flcn = &gm200_pmu_flcn, .enabled = gp102_pmu_enabled, .reset = gp102_pmu_reset, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c index 461f722656e2..ba1583bb618b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c @@ -78,7 +78,7 @@ gp10b_pmu_acr = { static const struct nvkm_pmu_func gp10b_pmu = { - .flcn = >215_pmu_flcn, + .flcn = &gm200_pmu_flcn, .enabled = gf100_pmu_enabled, .intr = gt215_pmu_intr, .recv = gm20b_pmu_recv, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h index e7860d177353..bcaade758ff7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h @@ -44,6 +44,8 @@ void gf100_pmu_reset(struct nvkm_pmu *); void gk110_pmu_pgob(struct nvkm_pmu *, bool); +extern const struct nvkm_falcon_func gm200_pmu_flcn; + void gm20b_pmu_acr_bld_patch(struct nvkm_acr *, u32, s64); void gm20b_pmu_acr_bld_write(struct nvkm_acr *, u32, struct nvkm_acr_lsfw *); int gm20b_pmu_acr_boot(struct nvkm_falcon *); diff --git a/drivers/gpu/drm/omapdrm/Makefile b/drivers/gpu/drm/omapdrm/Makefile index 21e8277ff88f..710b4e0abcf0 100644 --- a/drivers/gpu/drm/omapdrm/Makefile +++ b/drivers/gpu/drm/omapdrm/Makefile @@ -9,6 +9,7 @@ omapdrm-y := omap_drv.o \ omap_debugfs.o \ omap_crtc.o \ omap_plane.o \ + omap_overlay.o \ omap_encoder.o \ omap_fb.o \ omap_gem.o \ diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c index 5619420cc2cc..c4de142cc85b 100644 --- a/drivers/gpu/drm/omapdrm/dss/dispc.c +++ b/drivers/gpu/drm/omapdrm/dss/dispc.c @@ -92,6 +92,8 @@ struct dispc_features { u8 mgr_height_start; u16 mgr_width_max; u16 mgr_height_max; + u16 ovl_width_max; + u16 ovl_height_max; unsigned long max_lcd_pclk; unsigned long max_tv_pclk; unsigned int max_downscale; @@ -1279,8 +1281,8 @@ static u32 dispc_ovl_get_burst_size(struct dispc_device *dispc, return dispc->feat->burst_size_unit * 8; } -static bool dispc_ovl_color_mode_supported(struct dispc_device *dispc, - enum omap_plane_id plane, u32 fourcc) +bool dispc_ovl_color_mode_supported(struct dispc_device *dispc, + enum omap_plane_id plane, u32 fourcc) { const u32 *modes; unsigned int i; @@ -2487,6 +2489,11 @@ static int dispc_ovl_calc_scaling_44xx(struct dispc_device *dispc, return 0; } +enum omap_overlay_caps dispc_ovl_get_caps(struct dispc_device *dispc, enum omap_plane_id plane) +{ + return dispc->feat->overlay_caps[plane]; +} + #define DIV_FRAC(dividend, divisor) \ ((dividend) * 100 / (divisor) - ((dividend) / (divisor) * 100)) @@ -2599,6 +2606,12 @@ static int dispc_ovl_calc_scaling(struct dispc_device *dispc, return 0; } +void dispc_ovl_get_max_size(struct dispc_device *dispc, u16 *width, u16 *height) +{ + *width = dispc->feat->ovl_width_max; + *height = dispc->feat->ovl_height_max; +} + static int dispc_ovl_setup_common(struct dispc_device *dispc, enum omap_plane_id plane, enum omap_overlay_caps caps, @@ -4240,6 +4253,8 @@ static const struct dispc_features omap24xx_dispc_feats = { .mgr_height_start = 26, .mgr_width_max = 2048, .mgr_height_max = 2048, + .ovl_width_max = 2048, + .ovl_height_max = 2048, .max_lcd_pclk = 66500000, .max_downscale = 2, /* @@ -4278,6 +4293,8 @@ static const struct dispc_features omap34xx_rev1_0_dispc_feats = { .mgr_height_start = 26, .mgr_width_max = 2048, .mgr_height_max = 2048, + .ovl_width_max = 2048, + .ovl_height_max = 2048, .max_lcd_pclk = 173000000, .max_tv_pclk = 59000000, .max_downscale = 4, @@ -4313,6 +4330,8 @@ static const struct dispc_features omap34xx_rev3_0_dispc_feats = { .mgr_height_start = 26, .mgr_width_max = 2048, .mgr_height_max = 2048, + .ovl_width_max = 2048, + .ovl_height_max = 2048, .max_lcd_pclk = 173000000, .max_tv_pclk = 59000000, .max_downscale = 4, @@ -4348,6 +4367,8 @@ static const struct dispc_features omap36xx_dispc_feats = { .mgr_height_start = 26, .mgr_width_max = 2048, .mgr_height_max = 2048, + .ovl_width_max = 2048, + .ovl_height_max = 2048, .max_lcd_pclk = 173000000, .max_tv_pclk = 59000000, .max_downscale = 4, @@ -4383,6 +4404,8 @@ static const struct dispc_features am43xx_dispc_feats = { .mgr_height_start = 26, .mgr_width_max = 2048, .mgr_height_max = 2048, + .ovl_width_max = 2048, + .ovl_height_max = 2048, .max_lcd_pclk = 173000000, .max_tv_pclk = 59000000, .max_downscale = 4, @@ -4418,6 +4441,8 @@ static const struct dispc_features omap44xx_dispc_feats = { .mgr_height_start = 26, .mgr_width_max = 2048, .mgr_height_max = 2048, + .ovl_width_max = 2048, + .ovl_height_max = 2048, .max_lcd_pclk = 170000000, .max_tv_pclk = 185625000, .max_downscale = 4, @@ -4457,8 +4482,10 @@ static const struct dispc_features omap54xx_dispc_feats = { .mgr_height_start = 27, .mgr_width_max = 4096, .mgr_height_max = 4096, + .ovl_width_max = 2048, + .ovl_height_max = 4096, .max_lcd_pclk = 170000000, - .max_tv_pclk = 186000000, + .max_tv_pclk = 192000000, .max_downscale = 4, .max_line_width = 2048, .min_pcd = 1, @@ -4725,7 +4752,6 @@ static int dispc_bind(struct device *dev, struct device *master, void *data) struct dispc_device *dispc; u32 rev; int r = 0; - struct resource *dispc_mem; struct device_node *np = pdev->dev.of_node; dispc = kzalloc(sizeof(*dispc), GFP_KERNEL); @@ -4750,8 +4776,7 @@ static int dispc_bind(struct device *dev, struct device *master, void *data) if (r) goto err_free; - dispc_mem = platform_get_resource(dispc->pdev, IORESOURCE_MEM, 0); - dispc->base = devm_ioremap_resource(&pdev->dev, dispc_mem); + dispc->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(dispc->base)) { r = PTR_ERR(dispc->base); goto err_free; @@ -4844,7 +4869,7 @@ static int dispc_remove(struct platform_device *pdev) return 0; } -static int dispc_runtime_suspend(struct device *dev) +static __maybe_unused int dispc_runtime_suspend(struct device *dev) { struct dispc_device *dispc = dev_get_drvdata(dev); @@ -4859,7 +4884,7 @@ static int dispc_runtime_suspend(struct device *dev) return 0; } -static int dispc_runtime_resume(struct device *dev) +static __maybe_unused int dispc_runtime_resume(struct device *dev) { struct dispc_device *dispc = dev_get_drvdata(dev); @@ -4887,8 +4912,7 @@ static int dispc_runtime_resume(struct device *dev) } static const struct dev_pm_ops dispc_pm_ops = { - .runtime_suspend = dispc_runtime_suspend, - .runtime_resume = dispc_runtime_resume, + SET_RUNTIME_PM_OPS(dispc_runtime_suspend, dispc_runtime_resume, NULL) SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) }; diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index 503b5d4bf2c2..a6845856cbce 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c @@ -4884,7 +4884,6 @@ static int dsi_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct dsi_data *dsi; struct resource *dsi_mem; - struct resource *res; unsigned int i; int r; @@ -4921,13 +4920,11 @@ static int dsi_probe(struct platform_device *pdev) if (IS_ERR(dsi->proto_base)) return PTR_ERR(dsi->proto_base); - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy"); - dsi->phy_base = devm_ioremap_resource(dev, res); + dsi->phy_base = devm_platform_ioremap_resource_byname(pdev, "phy"); if (IS_ERR(dsi->phy_base)) return PTR_ERR(dsi->phy_base); - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pll"); - dsi->pll_base = devm_ioremap_resource(dev, res); + dsi->pll_base = devm_platform_ioremap_resource_byname(pdev, "pll"); if (IS_ERR(dsi->pll_base)) return PTR_ERR(dsi->pll_base); @@ -5061,7 +5058,7 @@ static int dsi_remove(struct platform_device *pdev) return 0; } -static int dsi_runtime_suspend(struct device *dev) +static __maybe_unused int dsi_runtime_suspend(struct device *dev) { struct dsi_data *dsi = dev_get_drvdata(dev); @@ -5074,7 +5071,7 @@ static int dsi_runtime_suspend(struct device *dev) return 0; } -static int dsi_runtime_resume(struct device *dev) +static __maybe_unused int dsi_runtime_resume(struct device *dev) { struct dsi_data *dsi = dev_get_drvdata(dev); @@ -5086,8 +5083,7 @@ static int dsi_runtime_resume(struct device *dev) } static const struct dev_pm_ops dsi_pm_ops = { - .runtime_suspend = dsi_runtime_suspend, - .runtime_resume = dsi_runtime_resume, + SET_RUNTIME_PM_OPS(dsi_runtime_suspend, dsi_runtime_resume, NULL) SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) }; diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c index d6a5862b4dbf..69b3e15b9356 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.c +++ b/drivers/gpu/drm/omapdrm/dss/dss.c @@ -1424,7 +1424,6 @@ static int dss_probe(struct platform_device *pdev) const struct soc_device_attribute *soc; struct dss_component_match_data cmatch; struct component_match *match = NULL; - struct resource *dss_mem; struct dss_device *dss; int r; @@ -1452,8 +1451,7 @@ static int dss_probe(struct platform_device *pdev) dss->feat = of_match_device(dss_of_match, &pdev->dev)->data; /* Map I/O registers, get and setup clocks. */ - dss_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - dss->base = devm_ioremap_resource(&pdev->dev, dss_mem); + dss->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(dss->base)) { r = PTR_ERR(dss->base); goto err_free_dss; @@ -1571,7 +1569,7 @@ static void dss_shutdown(struct platform_device *pdev) DSSDBG("shutdown\n"); } -static int dss_runtime_suspend(struct device *dev) +static __maybe_unused int dss_runtime_suspend(struct device *dev) { struct dss_device *dss = dev_get_drvdata(dev); @@ -1583,7 +1581,7 @@ static int dss_runtime_suspend(struct device *dev) return 0; } -static int dss_runtime_resume(struct device *dev) +static __maybe_unused int dss_runtime_resume(struct device *dev) { struct dss_device *dss = dev_get_drvdata(dev); int r; @@ -1606,8 +1604,7 @@ static int dss_runtime_resume(struct device *dev) } static const struct dev_pm_ops dss_pm_ops = { - .runtime_suspend = dss_runtime_suspend, - .runtime_resume = dss_runtime_resume, + SET_RUNTIME_PM_OPS(dss_runtime_suspend, dss_runtime_resume, NULL) SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) }; diff --git a/drivers/gpu/drm/omapdrm/dss/dss.h b/drivers/gpu/drm/omapdrm/dss/dss.h index a547527bb2f3..4ff02fbc0e71 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.h +++ b/drivers/gpu/drm/omapdrm/dss/dss.h @@ -397,6 +397,11 @@ int dispc_get_num_mgrs(struct dispc_device *dispc); const u32 *dispc_ovl_get_color_modes(struct dispc_device *dispc, enum omap_plane_id plane); +void dispc_ovl_get_max_size(struct dispc_device *dispc, u16 *width, u16 *height); +bool dispc_ovl_color_mode_supported(struct dispc_device *dispc, + enum omap_plane_id plane, u32 fourcc); +enum omap_overlay_caps dispc_ovl_get_caps(struct dispc_device *dispc, enum omap_plane_id plane); + u32 dispc_read_irqstatus(struct dispc_device *dispc); void dispc_clear_irqstatus(struct dispc_device *dispc, u32 mask); void dispc_write_irqenable(struct dispc_device *dispc, u32 mask); diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c index 43592c1cf081..852987e67e40 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * HDMI CEC * @@ -10,19 +11,6 @@ * Heavily modified to use the linux CEC framework: * * Copyright 2016-2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved. - * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #include <linux/kernel.h> diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.h b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.h index 0292337c97cc..1ca5b5ca8a99 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.h +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.h @@ -1,20 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * HDMI header definition for OMAP4 HDMI CEC IP * * Copyright 2016-2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved. - * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _HDMI4_CEC_H_ diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c index 35faa7f028c4..8720bf4f18fe 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c @@ -870,7 +870,6 @@ static const struct soc_device_attribute hdmi4_soc_devices[] = { int hdmi4_core_init(struct platform_device *pdev, struct hdmi_core_data *core) { const struct hdmi4_features *features; - struct resource *res; const struct soc_device_attribute *soc; soc = soc_device_match(hdmi4_soc_devices); @@ -881,8 +880,7 @@ int hdmi4_core_init(struct platform_device *pdev, struct hdmi_core_data *core) core->cts_swmode = features->cts_swmode; core->audio_use_mclk = features->audio_use_mclk; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core"); - core->base = devm_ioremap_resource(&pdev->dev, res); + core->base = devm_platform_ioremap_resource_byname(pdev, "core"); if (IS_ERR(core->base)) return PTR_ERR(core->base); diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c index 6cc2ad7a420c..21564c38234f 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c @@ -872,10 +872,7 @@ int hdmi5_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp, int hdmi5_core_init(struct platform_device *pdev, struct hdmi_core_data *core) { - struct resource *res; - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core"); - core->base = devm_ioremap_resource(&pdev->dev, res); + core->base = devm_platform_ioremap_resource_byname(pdev, "core"); if (IS_ERR(core->base)) return PTR_ERR(core->base); diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c b/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c index 5dc200f09c3c..060e8f76f2be 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c @@ -182,15 +182,12 @@ static const struct hdmi_phy_features omap54xx_phy_feats = { int hdmi_phy_init(struct platform_device *pdev, struct hdmi_phy_data *phy, unsigned int version) { - struct resource *res; - if (version == 4) phy->features = &omap44xx_phy_feats; else phy->features = &omap54xx_phy_feats; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy"); - phy->base = devm_ioremap_resource(&pdev->dev, res); + phy->base = devm_platform_ioremap_resource_byname(pdev, "phy"); if (IS_ERR(phy->base)) return PTR_ERR(phy->base); diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c b/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c index 13bf649aba52..eea719243eaf 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c @@ -162,13 +162,11 @@ int hdmi_pll_init(struct dss_device *dss, struct platform_device *pdev, struct hdmi_pll_data *pll, struct hdmi_wp_data *wp) { int r; - struct resource *res; pll->pdev = pdev; pll->wp = wp; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pll"); - pll->base = devm_ioremap_resource(&pdev->dev, res); + pll->base = devm_platform_ioremap_resource_byname(pdev, "pll"); if (IS_ERR(pll->base)) return PTR_ERR(pll->base); diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c index e522c17955d0..4480b69ab5a7 100644 --- a/drivers/gpu/drm/omapdrm/dss/venc.c +++ b/drivers/gpu/drm/omapdrm/dss/venc.c @@ -806,7 +806,6 @@ static const struct soc_device_attribute venc_soc_devices[] = { static int venc_probe(struct platform_device *pdev) { struct venc_device *venc; - struct resource *venc_mem; int r; venc = kzalloc(sizeof(*venc), GFP_KERNEL); @@ -823,8 +822,7 @@ static int venc_probe(struct platform_device *pdev) venc->config = &venc_config_pal_trm; - venc_mem = platform_get_resource(venc->pdev, IORESOURCE_MEM, 0); - venc->base = devm_ioremap_resource(&pdev->dev, venc_mem); + venc->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(venc->base)) { r = PTR_ERR(venc->base); goto err_free; @@ -881,7 +879,7 @@ static int venc_remove(struct platform_device *pdev) return 0; } -static int venc_runtime_suspend(struct device *dev) +static __maybe_unused int venc_runtime_suspend(struct device *dev) { struct venc_device *venc = dev_get_drvdata(dev); @@ -891,7 +889,7 @@ static int venc_runtime_suspend(struct device *dev) return 0; } -static int venc_runtime_resume(struct device *dev) +static __maybe_unused int venc_runtime_resume(struct device *dev) { struct venc_device *venc = dev_get_drvdata(dev); @@ -902,8 +900,7 @@ static int venc_runtime_resume(struct device *dev) } static const struct dev_pm_ops venc_pm_ops = { - .runtime_suspend = venc_runtime_suspend, - .runtime_resume = venc_runtime_resume, + SET_RUNTIME_PM_OPS(venc_runtime_suspend, venc_runtime_resume, NULL) SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) }; diff --git a/drivers/gpu/drm/omapdrm/dss/video-pll.c b/drivers/gpu/drm/omapdrm/dss/video-pll.c index b72c3ffddc9a..b6b52049f753 100644 --- a/drivers/gpu/drm/omapdrm/dss/video-pll.c +++ b/drivers/gpu/drm/omapdrm/dss/video-pll.c @@ -137,7 +137,6 @@ struct dss_pll *dss_video_pll_init(struct dss_device *dss, const char * const clkctrl_name[] = { "pll1_clkctrl", "pll2_clkctrl" }; const char * const clkin_name[] = { "video1_clk", "video2_clk" }; - struct resource *res; struct dss_video_pll *vpll; void __iomem *pll_base, *clkctrl_base; struct clk *clk; @@ -146,16 +145,13 @@ struct dss_pll *dss_video_pll_init(struct dss_device *dss, /* PLL CONTROL */ - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, reg_name[id]); - pll_base = devm_ioremap_resource(&pdev->dev, res); + pll_base = devm_platform_ioremap_resource_byname(pdev, reg_name[id]); if (IS_ERR(pll_base)) return ERR_CAST(pll_base); /* CLOCK CONTROL */ - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - clkctrl_name[id]); - clkctrl_base = devm_ioremap_resource(&pdev->dev, res); + clkctrl_base = devm_platform_ioremap_resource_byname(pdev, clkctrl_name[id]); if (IS_ERR(clkctrl_base)) return ERR_CAST(clkctrl_base); diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_priv.h b/drivers/gpu/drm/omapdrm/omap_dmm_priv.h index 58a8239d3e69..2288ed56f23d 100644 --- a/drivers/gpu/drm/omapdrm/omap_dmm_priv.h +++ b/drivers/gpu/drm/omapdrm/omap_dmm_priv.h @@ -1,16 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ * Author: Rob Clark <rob@ti.com> * Andy Gross <andy.gross@ti.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #ifndef OMAP_DMM_PRIV_H diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c index ed770caf55c2..852e78a5f142 100644 --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c @@ -1,18 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * DMM IOMMU driver support functions for TI OMAP processors. * * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ * Author: Rob Clark <rob@ti.com> * Andy Gross <andy.gross@ti.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include <linux/completion.h> diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h index 2f8918fe06d5..87a32b3cd3b0 100644 --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h @@ -1,16 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ * Author: Rob Clark <rob@ti.com> * Andy Gross <andy.gross@ti.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #ifndef OMAP_DMM_TILER_H #define OMAP_DMM_TILER_H diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index c05d3975cb31..2720a58ccd90 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -117,6 +117,102 @@ static void omap_atomic_commit_tail(struct drm_atomic_state *old_state) dispc_runtime_put(priv->dispc); } +static int drm_atomic_state_normalized_zpos_cmp(const void *a, const void *b) +{ + const struct drm_plane_state *sa = *(struct drm_plane_state **)a; + const struct drm_plane_state *sb = *(struct drm_plane_state **)b; + + if (sa->normalized_zpos != sb->normalized_zpos) + return sa->normalized_zpos - sb->normalized_zpos; + else + return sa->plane->base.id - sb->plane->base.id; +} + +/* + * This replaces the drm_atomic_normalize_zpos to handle the dual overlay case. + * + * Since both halves need to be 'appear' side by side the zpos is + * recalculated when dealing with dual overlay cases so that the other + * planes zpos is consistent. + */ +static int omap_atomic_update_normalize_zpos(struct drm_device *dev, + struct drm_atomic_state *state) +{ + struct drm_crtc *crtc; + struct drm_crtc_state *old_state, *new_state; + struct drm_plane *plane; + int c, i, n, inc; + int total_planes = dev->mode_config.num_total_plane; + struct drm_plane_state **states; + int ret = 0; + + states = kmalloc_array(total_planes, sizeof(*states), GFP_KERNEL); + if (!states) + return -ENOMEM; + + for_each_oldnew_crtc_in_state(state, crtc, old_state, new_state, c) { + if (old_state->plane_mask == new_state->plane_mask && + !new_state->zpos_changed) + continue; + + /* Reset plane increment and index value for every crtc */ + n = 0; + + /* + * Normalization process might create new states for planes + * which normalized_zpos has to be recalculated. + */ + drm_for_each_plane_mask(plane, dev, new_state->plane_mask) { + struct drm_plane_state *plane_state = + drm_atomic_get_plane_state(new_state->state, + plane); + if (IS_ERR(plane_state)) { + ret = PTR_ERR(plane_state); + goto done; + } + states[n++] = plane_state; + } + + sort(states, n, sizeof(*states), + drm_atomic_state_normalized_zpos_cmp, NULL); + + for (i = 0, inc = 0; i < n; i++) { + plane = states[i]->plane; + + states[i]->normalized_zpos = i + inc; + DRM_DEBUG_ATOMIC("[PLANE:%d:%s] updated normalized zpos value %d\n", + plane->base.id, plane->name, + states[i]->normalized_zpos); + + if (is_omap_plane_dual_overlay(states[i])) + inc++; + } + new_state->zpos_changed = true; + } + +done: + kfree(states); + return ret; +} + +static int omap_atomic_check(struct drm_device *dev, + struct drm_atomic_state *state) +{ + int ret; + + ret = drm_atomic_helper_check(dev, state); + if (ret) + return ret; + + if (dev->mode_config.normalize_zpos) { + ret = omap_atomic_update_normalize_zpos(dev, state); + if (ret) + return ret; + } + + return 0; +} + static const struct drm_mode_config_helper_funcs omap_mode_config_helper_funcs = { .atomic_commit_tail = omap_atomic_commit_tail, }; @@ -124,10 +220,86 @@ static const struct drm_mode_config_helper_funcs omap_mode_config_helper_funcs = static const struct drm_mode_config_funcs omap_mode_config_funcs = { .fb_create = omap_framebuffer_create, .output_poll_changed = drm_fb_helper_output_poll_changed, - .atomic_check = drm_atomic_helper_check, + .atomic_check = omap_atomic_check, .atomic_commit = drm_atomic_helper_commit, }; +/* Global/shared object state funcs */ + +/* + * This is a helper that returns the private state currently in operation. + * Note that this would return the "old_state" if called in the atomic check + * path, and the "new_state" after the atomic swap has been done. + */ +struct omap_global_state * +omap_get_existing_global_state(struct omap_drm_private *priv) +{ + return to_omap_global_state(priv->glob_obj.state); +} + +/* + * This acquires the modeset lock set aside for global state, creates + * a new duplicated private object state. + */ +struct omap_global_state *__must_check +omap_get_global_state(struct drm_atomic_state *s) +{ + struct omap_drm_private *priv = s->dev->dev_private; + struct drm_private_state *priv_state; + + priv_state = drm_atomic_get_private_obj_state(s, &priv->glob_obj); + if (IS_ERR(priv_state)) + return ERR_CAST(priv_state); + + return to_omap_global_state(priv_state); +} + +static struct drm_private_state * +omap_global_duplicate_state(struct drm_private_obj *obj) +{ + struct omap_global_state *state; + + state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL); + if (!state) + return NULL; + + __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); + + return &state->base; +} + +static void omap_global_destroy_state(struct drm_private_obj *obj, + struct drm_private_state *state) +{ + struct omap_global_state *omap_state = to_omap_global_state(state); + + kfree(omap_state); +} + +static const struct drm_private_state_funcs omap_global_state_funcs = { + .atomic_duplicate_state = omap_global_duplicate_state, + .atomic_destroy_state = omap_global_destroy_state, +}; + +static int omap_global_obj_init(struct drm_device *dev) +{ + struct omap_drm_private *priv = dev->dev_private; + struct omap_global_state *state; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return -ENOMEM; + + drm_atomic_private_obj_init(dev, &priv->glob_obj, &state->base, + &omap_global_state_funcs); + return 0; +} + +static void omap_global_obj_fini(struct omap_drm_private *priv) +{ + drm_atomic_private_obj_fini(&priv->glob_obj); +} + static void omap_disconnect_pipelines(struct drm_device *ddev) { struct omap_drm_private *priv = ddev->dev_private; @@ -231,8 +403,6 @@ static int omap_modeset_init(struct drm_device *dev) if (!omapdss_stack_is_ready()) return -EPROBE_DEFER; - drm_mode_config_init(dev); - ret = omap_modeset_init_properties(dev); if (ret < 0) return ret; @@ -583,10 +753,20 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev) omap_gem_init(ddev); + drm_mode_config_init(ddev); + + ret = omap_global_obj_init(ddev); + if (ret) + goto err_gem_deinit; + + ret = omap_hwoverlays_init(priv); + if (ret) + goto err_free_priv_obj; + ret = omap_modeset_init(ddev); if (ret) { dev_err(priv->dev, "omap_modeset_init failed: ret=%d\n", ret); - goto err_gem_deinit; + goto err_free_overlays; } /* Initialize vblank handling, start with all CRTCs disabled. */ @@ -618,7 +798,12 @@ err_cleanup_helpers: omap_fbdev_fini(ddev); err_cleanup_modeset: omap_modeset_fini(ddev); +err_free_overlays: + omap_hwoverlays_destroy(priv); +err_free_priv_obj: + omap_global_obj_fini(priv); err_gem_deinit: + drm_mode_config_cleanup(ddev); omap_gem_deinit(ddev); destroy_workqueue(priv->wq); omap_disconnect_pipelines(ddev); @@ -642,6 +827,9 @@ static void omapdrm_cleanup(struct omap_drm_private *priv) drm_atomic_helper_shutdown(ddev); omap_modeset_fini(ddev); + omap_hwoverlays_destroy(priv); + omap_global_obj_fini(priv); + drm_mode_config_cleanup(ddev); omap_gem_deinit(ddev); destroy_workqueue(priv->wq); diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h index 591d4c273f02..825960fd3ea9 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.h +++ b/drivers/gpu/drm/omapdrm/omap_drv.h @@ -14,6 +14,7 @@ #include "dss/omapdss.h" #include "dss/dss.h" +#include <drm/drm_atomic.h> #include <drm/drm_gem.h> #include <drm/omap_drm.h> @@ -24,6 +25,7 @@ #include "omap_gem.h" #include "omap_irq.h" #include "omap_plane.h" +#include "omap_overlay.h" #define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__) #define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__) /* verbose debug */ @@ -40,6 +42,19 @@ struct omap_drm_pipeline { unsigned int alias_id; }; +/* + * Global private object state for tracking resources that are shared across + * multiple kms objects (planes/crtcs/etc). + */ +#define to_omap_global_state(x) container_of(x, struct omap_global_state, base) + +struct omap_global_state { + struct drm_private_state base; + + /* global atomic state of assignment between overlays and planes */ + struct drm_plane *hwoverlay_to_plane[8]; +}; + struct omap_drm_private { struct drm_device *ddev; struct device *dev; @@ -57,6 +72,11 @@ struct omap_drm_private { unsigned int num_planes; struct drm_plane *planes[8]; + unsigned int num_ovls; + struct omap_hw_overlay *overlays[8]; + + struct drm_private_obj glob_obj; + struct drm_fb_helper *fbdev; struct workqueue_struct *wq; @@ -85,4 +105,8 @@ struct omap_drm_private { void omap_debugfs_init(struct drm_minor *minor); +struct omap_global_state * __must_check omap_get_global_state(struct drm_atomic_state *s); + +struct omap_global_state *omap_get_existing_global_state(struct omap_drm_private *priv); + #endif /* __OMAPDRM_DRV_H__ */ diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c index 190afc564914..895e66b08a81 100644 --- a/drivers/gpu/drm/omapdrm/omap_fb.c +++ b/drivers/gpu/drm/omapdrm/omap_fb.c @@ -131,7 +131,9 @@ static u32 drm_rotation_to_tiler(unsigned int drm_rot) /* update ovl info for scanout, handles cases of multi-planar fb's, etc. */ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, - struct drm_plane_state *state, struct omap_overlay_info *info) + struct drm_plane_state *state, + struct omap_overlay_info *info, + struct omap_overlay_info *r_info) { struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb); const struct drm_format_info *format = omap_fb->format; @@ -218,6 +220,35 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, } else { info->p_uv_addr = 0; } + + if (r_info) { + info->width /= 2; + info->out_width /= 2; + + *r_info = *info; + + if (fb->format->is_yuv) { + if (info->width & 1) { + info->width++; + r_info->width--; + } + + if (info->out_width & 1) { + info->out_width++; + r_info->out_width--; + } + } + + r_info->pos_x = info->pos_x + info->out_width; + + r_info->paddr = get_linear_addr(fb, format, 0, + x + info->width, y); + if (fb->format->format == DRM_FORMAT_NV12) { + r_info->p_uv_addr = + get_linear_addr(fb, format, 1, + x + info->width, y); + } + } } /* pin, prepare for scanout: */ diff --git a/drivers/gpu/drm/omapdrm/omap_fb.h b/drivers/gpu/drm/omapdrm/omap_fb.h index c0e19aed8220..b75f0b5ef1d8 100644 --- a/drivers/gpu/drm/omapdrm/omap_fb.h +++ b/drivers/gpu/drm/omapdrm/omap_fb.h @@ -26,7 +26,9 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, int omap_framebuffer_pin(struct drm_framebuffer *fb); void omap_framebuffer_unpin(struct drm_framebuffer *fb); void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, - struct drm_plane_state *state, struct omap_overlay_info *info); + struct drm_plane_state *state, + struct omap_overlay_info *info, + struct omap_overlay_info *r_info); bool omap_framebuffer_supports_rotation(struct drm_framebuffer *fb); void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m); diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index 38af6195d959..b0fa17409b66 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c @@ -789,7 +789,7 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr) if (omap_obj->flags & OMAP_BO_TILED_MASK) { block = tiler_reserve_2d(fmt, omap_obj->width, - omap_obj->height, 0); + omap_obj->height, PAGE_SIZE); } else { block = tiler_reserve_1d(obj->size); } @@ -851,6 +851,11 @@ static void omap_gem_unpin_locked(struct drm_gem_object *obj) return; if (refcount_dec_and_test(&omap_obj->dma_addr_cnt)) { + if (omap_obj->sgt) { + sg_free_table(omap_obj->sgt); + kfree(omap_obj->sgt); + omap_obj->sgt = NULL; + } ret = tiler_unpin(omap_obj->block); if (ret) { dev_err(obj->dev->dev, @@ -963,6 +968,78 @@ int omap_gem_put_pages(struct drm_gem_object *obj) return 0; } +struct sg_table *omap_gem_get_sg(struct drm_gem_object *obj) +{ + struct omap_gem_object *omap_obj = to_omap_bo(obj); + dma_addr_t addr; + struct sg_table *sgt; + struct scatterlist *sg; + unsigned int count, len, stride, i; + int ret; + + ret = omap_gem_pin(obj, &addr); + if (ret) + return ERR_PTR(ret); + + mutex_lock(&omap_obj->lock); + + sgt = omap_obj->sgt; + if (sgt) + goto out; + + sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) { + ret = -ENOMEM; + goto err_unpin; + } + + if (omap_obj->flags & OMAP_BO_TILED_MASK) { + enum tiler_fmt fmt = gem2fmt(omap_obj->flags); + + len = omap_obj->width << (int)fmt; + count = omap_obj->height; + stride = tiler_stride(fmt, 0); + } else { + len = obj->size; + count = 1; + stride = 0; + } + + ret = sg_alloc_table(sgt, count, GFP_KERNEL); + if (ret) + goto err_free; + + for_each_sg(sgt->sgl, sg, count, i) { + sg_set_page(sg, phys_to_page(addr), len, offset_in_page(addr)); + sg_dma_address(sg) = addr; + sg_dma_len(sg) = len; + + addr += stride; + } + + omap_obj->sgt = sgt; +out: + mutex_unlock(&omap_obj->lock); + return sgt; + +err_free: + kfree(sgt); +err_unpin: + mutex_unlock(&omap_obj->lock); + omap_gem_unpin(obj); + return ERR_PTR(ret); +} + +void omap_gem_put_sg(struct drm_gem_object *obj, struct sg_table *sgt) +{ + struct omap_gem_object *omap_obj = to_omap_bo(obj); + + if (WARN_ON(omap_obj->sgt != sgt)) + return; + + omap_gem_unpin(obj); +} + #ifdef CONFIG_DRM_FBDEV_EMULATION /* * Get kernel virtual address for CPU access.. this more or less only diff --git a/drivers/gpu/drm/omapdrm/omap_gem.h b/drivers/gpu/drm/omapdrm/omap_gem.h index eda9b4839c30..19209e319663 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.h +++ b/drivers/gpu/drm/omapdrm/omap_gem.h @@ -82,5 +82,7 @@ u32 omap_gem_flags(struct drm_gem_object *obj); int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient, int x, int y, dma_addr_t *dma_addr); int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient); +struct sg_table *omap_gem_get_sg(struct drm_gem_object *obj); +void omap_gem_put_sg(struct drm_gem_object *obj, struct sg_table *sgt); #endif /* __OMAPDRM_GEM_H__ */ diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c index 809f86cfc540..57af3d97be77 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c +++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c @@ -23,45 +23,21 @@ static struct sg_table *omap_gem_map_dma_buf( { struct drm_gem_object *obj = attachment->dmabuf->priv; struct sg_table *sg; - dma_addr_t dma_addr; - int ret; - - sg = kzalloc(sizeof(*sg), GFP_KERNEL); - if (!sg) - return ERR_PTR(-ENOMEM); - - /* camera, etc, need physically contiguous.. but we need a - * better way to know this.. - */ - ret = omap_gem_pin(obj, &dma_addr); - if (ret) - goto out; - - ret = sg_alloc_table(sg, 1, GFP_KERNEL); - if (ret) - goto out; - - sg_init_table(sg->sgl, 1); - sg_dma_len(sg->sgl) = obj->size; - sg_set_page(sg->sgl, pfn_to_page(PFN_DOWN(dma_addr)), obj->size, 0); - sg_dma_address(sg->sgl) = dma_addr; + sg = omap_gem_get_sg(obj); + if (IS_ERR(sg)) + return sg; /* this must be after omap_gem_pin() to ensure we have pages attached */ omap_gem_dma_sync_buffer(obj, dir); return sg; -out: - kfree(sg); - return ERR_PTR(ret); } static void omap_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, struct sg_table *sg, enum dma_data_direction dir) { struct drm_gem_object *obj = attachment->dmabuf->priv; - omap_gem_unpin(obj); - sg_free_table(sg); - kfree(sg); + omap_gem_put_sg(obj, sg); } static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer, @@ -114,7 +90,7 @@ struct dma_buf *omap_gem_prime_export(struct drm_gem_object *obj, int flags) DEFINE_DMA_BUF_EXPORT_INFO(exp_info); exp_info.ops = &omap_dmabuf_ops; - exp_info.size = obj->size; + exp_info.size = omap_gem_mmap_size(obj); exp_info.flags = flags; exp_info.priv = obj; diff --git a/drivers/gpu/drm/omapdrm/omap_overlay.c b/drivers/gpu/drm/omapdrm/omap_overlay.c new file mode 100644 index 000000000000..10730c9b2752 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/omap_overlay.c @@ -0,0 +1,212 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/ + * Author: Benoit Parrot <bparrot@ti.com> + */ + +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_plane_helper.h> + +#include "omap_dmm_tiler.h" +#include "omap_drv.h" + +/* + * overlay funcs + */ +static const char * const overlay_id_to_name[] = { + [OMAP_DSS_GFX] = "gfx", + [OMAP_DSS_VIDEO1] = "vid1", + [OMAP_DSS_VIDEO2] = "vid2", + [OMAP_DSS_VIDEO3] = "vid3", +}; + +/* + * Find a free overlay with the required caps and supported fourcc + */ +static struct omap_hw_overlay * +omap_plane_find_free_overlay(struct drm_device *dev, struct drm_plane *hwoverlay_to_plane[], + u32 caps, u32 fourcc) +{ + struct omap_drm_private *priv = dev->dev_private; + int i; + + DBG("caps: %x fourcc: %x", caps, fourcc); + + for (i = 0; i < priv->num_ovls; i++) { + struct omap_hw_overlay *cur = priv->overlays[i]; + + DBG("%d: id: %d cur->caps: %x", + cur->idx, cur->id, cur->caps); + + /* skip if already in-use */ + if (hwoverlay_to_plane[cur->idx]) + continue; + + /* skip if doesn't support some required caps: */ + if (caps & ~cur->caps) + continue; + + /* check supported format */ + if (!dispc_ovl_color_mode_supported(priv->dispc, + cur->id, fourcc)) + continue; + + return cur; + } + + DBG("no match"); + return NULL; +} + +/* + * Assign a new overlay to a plane with the required caps and supported fourcc + * If a plane need a new overlay, the previous one should have been released + * with omap_overlay_release() + * This should be called from the plane atomic_check() in order to prepare the + * next global overlay_map to be enabled when atomic transaction is valid. + */ +int omap_overlay_assign(struct drm_atomic_state *s, struct drm_plane *plane, + u32 caps, u32 fourcc, struct omap_hw_overlay **overlay, + struct omap_hw_overlay **r_overlay) +{ + /* Get the global state of the current atomic transaction */ + struct omap_global_state *state = omap_get_global_state(s); + struct drm_plane **overlay_map = state->hwoverlay_to_plane; + struct omap_hw_overlay *ovl, *r_ovl; + + ovl = omap_plane_find_free_overlay(s->dev, overlay_map, caps, fourcc); + if (!ovl) + return -ENOMEM; + + overlay_map[ovl->idx] = plane; + *overlay = ovl; + + if (r_overlay) { + r_ovl = omap_plane_find_free_overlay(s->dev, overlay_map, + caps, fourcc); + if (!r_ovl) { + overlay_map[r_ovl->idx] = NULL; + *overlay = NULL; + return -ENOMEM; + } + + overlay_map[r_ovl->idx] = plane; + *r_overlay = r_ovl; + } + + DBG("%s: assign to plane %s caps %x", ovl->name, plane->name, caps); + + if (r_overlay) { + DBG("%s: assign to right of plane %s caps %x", + r_ovl->name, plane->name, caps); + } + + return 0; +} + +/* + * Release an overlay from a plane if the plane gets not visible or the plane + * need a new overlay if overlay caps changes. + * This should be called from the plane atomic_check() in order to prepare the + * next global overlay_map to be enabled when atomic transaction is valid. + */ +void omap_overlay_release(struct drm_atomic_state *s, struct omap_hw_overlay *overlay) +{ + /* Get the global state of the current atomic transaction */ + struct omap_global_state *state = omap_get_global_state(s); + struct drm_plane **overlay_map = state->hwoverlay_to_plane; + + if (!overlay) + return; + + if (WARN_ON(!overlay_map[overlay->idx])) + return; + + DBG("%s: release from plane %s", overlay->name, overlay_map[overlay->idx]->name); + + overlay_map[overlay->idx] = NULL; +} + +/* + * Update an overlay state that was attached to a plane before the current atomic state. + * This should be called from the plane atomic_update() or atomic_disable(), + * where an overlay association to a plane could have changed between the old and current + * atomic state. + */ +void omap_overlay_update_state(struct omap_drm_private *priv, + struct omap_hw_overlay *overlay) +{ + struct omap_global_state *state = omap_get_existing_global_state(priv); + struct drm_plane **overlay_map = state->hwoverlay_to_plane; + + /* Check if this overlay is not used anymore, then disable it */ + if (!overlay_map[overlay->idx]) { + DBG("%s: disabled", overlay->name); + + /* disable the overlay */ + dispc_ovl_enable(priv->dispc, overlay->id, false); + } +} + +static void omap_overlay_destroy(struct omap_hw_overlay *overlay) +{ + kfree(overlay); +} + +static struct omap_hw_overlay *omap_overlay_init(enum omap_plane_id overlay_id, + enum omap_overlay_caps caps) +{ + struct omap_hw_overlay *overlay; + + overlay = kzalloc(sizeof(*overlay), GFP_KERNEL); + if (!overlay) + return ERR_PTR(-ENOMEM); + + overlay->name = overlay_id_to_name[overlay_id]; + overlay->id = overlay_id; + overlay->caps = caps; + + return overlay; +} + +int omap_hwoverlays_init(struct omap_drm_private *priv) +{ + static const enum omap_plane_id hw_plane_ids[] = { + OMAP_DSS_GFX, OMAP_DSS_VIDEO1, + OMAP_DSS_VIDEO2, OMAP_DSS_VIDEO3, + }; + u32 num_overlays = dispc_get_num_ovls(priv->dispc); + enum omap_overlay_caps caps; + int i, ret; + + for (i = 0; i < num_overlays; i++) { + struct omap_hw_overlay *overlay; + + caps = dispc_ovl_get_caps(priv->dispc, hw_plane_ids[i]); + overlay = omap_overlay_init(hw_plane_ids[i], caps); + if (IS_ERR(overlay)) { + ret = PTR_ERR(overlay); + dev_err(priv->dev, "failed to construct overlay for %s (%d)\n", + overlay_id_to_name[i], ret); + omap_hwoverlays_destroy(priv); + return ret; + } + overlay->idx = priv->num_ovls; + priv->overlays[priv->num_ovls++] = overlay; + } + + return 0; +} + +void omap_hwoverlays_destroy(struct omap_drm_private *priv) +{ + int i; + + for (i = 0; i < priv->num_ovls; i++) { + omap_overlay_destroy(priv->overlays[i]); + priv->overlays[i] = NULL; + } + + priv->num_ovls = 0; +} diff --git a/drivers/gpu/drm/omapdrm/omap_overlay.h b/drivers/gpu/drm/omapdrm/omap_overlay.h new file mode 100644 index 000000000000..e36a43f35563 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/omap_overlay.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/ + * Author: Benoit Parrot <bparrot@ti.com> + */ + +#ifndef __OMAPDRM_OVERLAY_H__ +#define __OMAPDRM_OVERLAY_H__ + +#include <linux/types.h> + +enum drm_plane_type; + +struct drm_device; +struct drm_mode_object; +struct drm_plane; + +/* Used to associate a HW overlay/plane to a plane */ +struct omap_hw_overlay { + unsigned int idx; + + const char *name; + enum omap_plane_id id; + + enum omap_overlay_caps caps; +}; + +int omap_hwoverlays_init(struct omap_drm_private *priv); +void omap_hwoverlays_destroy(struct omap_drm_private *priv); +int omap_overlay_assign(struct drm_atomic_state *s, struct drm_plane *plane, + u32 caps, u32 fourcc, struct omap_hw_overlay **overlay, + struct omap_hw_overlay **r_overlay); +void omap_overlay_release(struct drm_atomic_state *s, struct omap_hw_overlay *overlay); +void omap_overlay_update_state(struct omap_drm_private *priv, struct omap_hw_overlay *overlay); +#endif /* __OMAPDRM_OVERLAY_H__ */ diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c index 512af976b7e9..b35205c4e979 100644 --- a/drivers/gpu/drm/omapdrm/omap_plane.c +++ b/drivers/gpu/drm/omapdrm/omap_plane.c @@ -8,6 +8,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_plane_helper.h> +#include <drm/drm_fourcc.h> #include "omap_dmm_tiler.h" #include "omap_drv.h" @@ -16,14 +17,30 @@ * plane funcs */ +#define to_omap_plane_state(x) container_of(x, struct omap_plane_state, base) + +struct omap_plane_state { + /* Must be first. */ + struct drm_plane_state base; + + struct omap_hw_overlay *overlay; + struct omap_hw_overlay *r_overlay; /* right overlay */ +}; + #define to_omap_plane(x) container_of(x, struct omap_plane, base) struct omap_plane { struct drm_plane base; enum omap_plane_id id; - const char *name; }; +bool is_omap_plane_dual_overlay(struct drm_plane_state *state) +{ + struct omap_plane_state *omap_state = to_omap_plane_state(state); + + return !!omap_state->r_overlay; +} + static int omap_plane_prepare_fb(struct drm_plane *plane, struct drm_plane_state *new_state) { @@ -46,13 +63,35 @@ static void omap_plane_atomic_update(struct drm_plane *plane, struct drm_atomic_state *state) { struct omap_drm_private *priv = plane->dev->dev_private; - struct omap_plane *omap_plane = to_omap_plane(plane); struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane); - struct omap_overlay_info info; + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + plane); + struct omap_plane_state *new_omap_state; + struct omap_plane_state *old_omap_state; + struct omap_overlay_info info, r_info; + enum omap_plane_id ovl_id, r_ovl_id; int ret; + bool dual_ovl; + + new_omap_state = to_omap_plane_state(new_state); + old_omap_state = to_omap_plane_state(old_state); - DBG("%s, crtc=%p fb=%p", omap_plane->name, new_state->crtc, + dual_ovl = is_omap_plane_dual_overlay(new_state); + + /* Cleanup previously held overlay if needed */ + if (old_omap_state->overlay) + omap_overlay_update_state(priv, old_omap_state->overlay); + if (old_omap_state->r_overlay) + omap_overlay_update_state(priv, old_omap_state->r_overlay); + + if (!new_omap_state->overlay) { + DBG("[PLANE:%d:%s] no overlay attached", plane->base.id, plane->name); + return; + } + + ovl_id = new_omap_state->overlay->id; + DBG("%s, crtc=%p fb=%p", plane->name, new_state->crtc, new_state->fb); memset(&info, 0, sizeof(info)); @@ -67,65 +106,155 @@ static void omap_plane_atomic_update(struct drm_plane *plane, info.color_encoding = new_state->color_encoding; info.color_range = new_state->color_range; + r_info = info; + /* update scanout: */ - omap_framebuffer_update_scanout(new_state->fb, new_state, &info); + omap_framebuffer_update_scanout(new_state->fb, new_state, &info, + dual_ovl ? &r_info : NULL); - DBG("%dx%d -> %dx%d (%d)", info.width, info.height, - info.out_width, info.out_height, - info.screen_width); + DBG("%s: %dx%d -> %dx%d (%d)", + new_omap_state->overlay->name, info.width, info.height, + info.out_width, info.out_height, info.screen_width); DBG("%d,%d %pad %pad", info.pos_x, info.pos_y, &info.paddr, &info.p_uv_addr); + if (dual_ovl) { + r_ovl_id = new_omap_state->r_overlay->id; + /* + * If the current plane uses 2 hw planes the very next + * zorder is used by the r_overlay so we just use the + * main overlay zorder + 1 + */ + r_info.zorder = info.zorder + 1; + + DBG("%s: %dx%d -> %dx%d (%d)", + new_omap_state->r_overlay->name, + r_info.width, r_info.height, + r_info.out_width, r_info.out_height, r_info.screen_width); + DBG("%d,%d %pad %pad", r_info.pos_x, r_info.pos_y, + &r_info.paddr, &r_info.p_uv_addr); + } + /* and finally, update omapdss: */ - ret = dispc_ovl_setup(priv->dispc, omap_plane->id, &info, + ret = dispc_ovl_setup(priv->dispc, ovl_id, &info, omap_crtc_timings(new_state->crtc), false, omap_crtc_channel(new_state->crtc)); if (ret) { dev_err(plane->dev->dev, "Failed to setup plane %s\n", - omap_plane->name); - dispc_ovl_enable(priv->dispc, omap_plane->id, false); + plane->name); + dispc_ovl_enable(priv->dispc, ovl_id, false); return; } - dispc_ovl_enable(priv->dispc, omap_plane->id, true); + dispc_ovl_enable(priv->dispc, ovl_id, true); + + if (dual_ovl) { + ret = dispc_ovl_setup(priv->dispc, r_ovl_id, &r_info, + omap_crtc_timings(new_state->crtc), false, + omap_crtc_channel(new_state->crtc)); + if (ret) { + dev_err(plane->dev->dev, "Failed to setup plane right-overlay %s\n", + plane->name); + dispc_ovl_enable(priv->dispc, r_ovl_id, false); + dispc_ovl_enable(priv->dispc, ovl_id, false); + return; + } + + dispc_ovl_enable(priv->dispc, r_ovl_id, true); + } } static void omap_plane_atomic_disable(struct drm_plane *plane, struct drm_atomic_state *state) { - struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, - plane); struct omap_drm_private *priv = plane->dev->dev_private; struct omap_plane *omap_plane = to_omap_plane(plane); + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + plane); + struct omap_plane_state *new_omap_state; + struct omap_plane_state *old_omap_state; + + new_omap_state = to_omap_plane_state(new_state); + old_omap_state = to_omap_plane_state(old_state); + + if (!old_omap_state->overlay) + return; new_state->rotation = DRM_MODE_ROTATE_0; new_state->zpos = plane->type == DRM_PLANE_TYPE_PRIMARY ? 0 : omap_plane->id; - dispc_ovl_enable(priv->dispc, omap_plane->id, false); + omap_overlay_update_state(priv, old_omap_state->overlay); + new_omap_state->overlay = NULL; + + if (is_omap_plane_dual_overlay(old_state)) { + omap_overlay_update_state(priv, old_omap_state->r_overlay); + new_omap_state->r_overlay = NULL; + } } +#define FRAC_16_16(mult, div) (((mult) << 16) / (div)) + static int omap_plane_atomic_check(struct drm_plane *plane, struct drm_atomic_state *state) { struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane); + struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, + plane); + struct omap_drm_private *priv = plane->dev->dev_private; + struct omap_plane_state *omap_state = to_omap_plane_state(new_plane_state); + struct omap_global_state *omap_overlay_global_state; struct drm_crtc_state *crtc_state; + bool new_r_hw_overlay = false; + bool new_hw_overlay = false; + u32 max_width, max_height; + struct drm_crtc *crtc; + u16 width, height; + u32 caps = 0; + u32 fourcc; + int ret; - if (!new_plane_state->fb) - return 0; + omap_overlay_global_state = omap_get_global_state(state); + if (IS_ERR(omap_overlay_global_state)) + return PTR_ERR(omap_overlay_global_state); + + dispc_ovl_get_max_size(priv->dispc, &width, &height); + max_width = width << 16; + max_height = height << 16; - /* crtc should only be NULL when disabling (i.e., !new_plane_state->fb) */ - if (WARN_ON(!new_plane_state->crtc)) + crtc = new_plane_state->crtc ? new_plane_state->crtc : plane->state->crtc; + if (!crtc) return 0; - crtc_state = drm_atomic_get_existing_crtc_state(state, - new_plane_state->crtc); + crtc_state = drm_atomic_get_existing_crtc_state(state, crtc); /* we should have a crtc state if the plane is attached to a crtc */ if (WARN_ON(!crtc_state)) return 0; - if (!crtc_state->enable) + /* + * Note: these are just sanity checks to filter out totally bad scaling + * factors. The real limits must be calculated case by case, and + * unfortunately we currently do those checks only at the commit + * phase in dispc. + */ + ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state, + FRAC_16_16(1, 8), FRAC_16_16(8, 1), + true, true); + if (ret) + return ret; + + DBG("%s: visible %d -> %d", plane->name, + old_plane_state->visible, new_plane_state->visible); + + if (!new_plane_state->visible) { + omap_overlay_release(state, omap_state->overlay); + omap_overlay_release(state, omap_state->r_overlay); + omap_state->overlay = NULL; + omap_state->r_overlay = NULL; return 0; + } if (new_plane_state->crtc_x < 0 || new_plane_state->crtc_y < 0) return -EINVAL; @@ -136,10 +265,96 @@ static int omap_plane_atomic_check(struct drm_plane *plane, if (new_plane_state->crtc_y + new_plane_state->crtc_h > crtc_state->adjusted_mode.vdisplay) return -EINVAL; + /* Make sure dimensions are within bounds. */ + if (new_plane_state->src_h > max_height || new_plane_state->crtc_h > height) + return -EINVAL; + + + if (new_plane_state->src_w > max_width || new_plane_state->crtc_w > width) { + bool is_fourcc_yuv = new_plane_state->fb->format->is_yuv; + + if (is_fourcc_yuv && (((new_plane_state->src_w >> 16) / 2 & 1) || + new_plane_state->crtc_w / 2 & 1)) { + /* + * When calculating the split overlay width + * and it yield an odd value we will need to adjust + * the indivual width +/- 1. So make sure it fits + */ + if (new_plane_state->src_w <= ((2 * width - 1) << 16) && + new_plane_state->crtc_w <= (2 * width - 1)) + new_r_hw_overlay = true; + else + return -EINVAL; + } else { + if (new_plane_state->src_w <= (2 * max_width) && + new_plane_state->crtc_w <= (2 * width)) + new_r_hw_overlay = true; + else + return -EINVAL; + } + } + if (new_plane_state->rotation != DRM_MODE_ROTATE_0 && !omap_framebuffer_supports_rotation(new_plane_state->fb)) return -EINVAL; + if ((new_plane_state->src_w >> 16) != new_plane_state->crtc_w || + (new_plane_state->src_h >> 16) != new_plane_state->crtc_h) + caps |= OMAP_DSS_OVL_CAP_SCALE; + + fourcc = new_plane_state->fb->format->format; + + /* + * (re)allocate hw overlay if we don't have one or + * there is a caps mismatch + */ + if (!omap_state->overlay || (caps & ~omap_state->overlay->caps)) { + new_hw_overlay = true; + } else { + /* check supported format */ + if (!dispc_ovl_color_mode_supported(priv->dispc, omap_state->overlay->id, + fourcc)) + new_hw_overlay = true; + } + + /* + * check if we need two overlays and only have 1 or + * if we had 2 overlays but will only need 1 + */ + if ((new_r_hw_overlay && !omap_state->r_overlay) || + (!new_r_hw_overlay && omap_state->r_overlay)) + new_hw_overlay = true; + + if (new_hw_overlay) { + struct omap_hw_overlay *old_ovl = omap_state->overlay; + struct omap_hw_overlay *old_r_ovl = omap_state->r_overlay; + struct omap_hw_overlay *new_ovl = NULL; + struct omap_hw_overlay *new_r_ovl = NULL; + + omap_overlay_release(state, old_ovl); + omap_overlay_release(state, old_r_ovl); + + ret = omap_overlay_assign(state, plane, caps, fourcc, &new_ovl, + new_r_hw_overlay ? &new_r_ovl : NULL); + if (ret) { + DBG("%s: failed to assign hw_overlay", plane->name); + omap_state->overlay = NULL; + omap_state->r_overlay = NULL; + return ret; + } + + omap_state->overlay = new_ovl; + if (new_r_hw_overlay) + omap_state->r_overlay = new_r_ovl; + else + omap_state->r_overlay = NULL; + } + + DBG("plane: %s overlay_id: %d", plane->name, omap_state->overlay->id); + + if (omap_state->r_overlay) + DBG("plane: %s r_overlay_id: %d", plane->name, omap_state->r_overlay->id); + return 0; } @@ -155,7 +370,7 @@ static void omap_plane_destroy(struct drm_plane *plane) { struct omap_plane *omap_plane = to_omap_plane(plane); - DBG("%s", omap_plane->name); + DBG("%s", plane->name); drm_plane_cleanup(plane); @@ -189,11 +404,17 @@ void omap_plane_install_properties(struct drm_plane *plane, static void omap_plane_reset(struct drm_plane *plane) { struct omap_plane *omap_plane = to_omap_plane(plane); + struct omap_plane_state *omap_state; + + if (plane->state) + drm_atomic_helper_plane_destroy_state(plane, plane->state); - drm_atomic_helper_plane_reset(plane); - if (!plane->state) + omap_state = kzalloc(sizeof(*omap_state), GFP_KERNEL); + if (!omap_state) return; + __drm_atomic_helper_plane_reset(plane, &omap_state->base); + /* * Set the zpos default depending on whether we are a primary or overlay * plane. @@ -204,6 +425,47 @@ static void omap_plane_reset(struct drm_plane *plane) plane->state->color_range = DRM_COLOR_YCBCR_FULL_RANGE; } +static struct drm_plane_state * +omap_plane_atomic_duplicate_state(struct drm_plane *plane) +{ + struct omap_plane_state *state, *current_state; + + if (WARN_ON(!plane->state)) + return NULL; + + current_state = to_omap_plane_state(plane->state); + + state = kmalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return NULL; + + __drm_atomic_helper_plane_duplicate_state(plane, &state->base); + + state->overlay = current_state->overlay; + state->r_overlay = current_state->r_overlay; + + return &state->base; +} + +static void omap_plane_atomic_print_state(struct drm_printer *p, + const struct drm_plane_state *state) +{ + struct omap_plane_state *omap_state = to_omap_plane_state(state); + + if (omap_state->overlay) + drm_printf(p, "\toverlay=%s (caps=0x%x)\n", + omap_state->overlay->name, + omap_state->overlay->caps); + else + drm_printf(p, "\toverlay=None\n"); + if (omap_state->r_overlay) + drm_printf(p, "\tr_overlay=%s (caps=0x%x)\n", + omap_state->r_overlay->name, + omap_state->r_overlay->caps); + else + drm_printf(p, "\tr_overlay=None\n"); +} + static int omap_plane_atomic_set_property(struct drm_plane *plane, struct drm_plane_state *state, struct drm_property *property, @@ -239,10 +501,11 @@ static const struct drm_plane_funcs omap_plane_funcs = { .disable_plane = drm_atomic_helper_disable_plane, .reset = omap_plane_reset, .destroy = omap_plane_destroy, - .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .atomic_duplicate_state = omap_plane_atomic_duplicate_state, .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, .atomic_set_property = omap_plane_atomic_set_property, .atomic_get_property = omap_plane_atomic_get_property, + .atomic_print_state = omap_plane_atomic_print_state, }; static bool omap_plane_supports_yuv(struct drm_plane *plane) @@ -261,20 +524,6 @@ static bool omap_plane_supports_yuv(struct drm_plane *plane) return false; } -static const char *plane_id_to_name[] = { - [OMAP_DSS_GFX] = "gfx", - [OMAP_DSS_VIDEO1] = "vid1", - [OMAP_DSS_VIDEO2] = "vid2", - [OMAP_DSS_VIDEO3] = "vid3", -}; - -static const enum omap_plane_id plane_idx_to_id[] = { - OMAP_DSS_GFX, - OMAP_DSS_VIDEO1, - OMAP_DSS_VIDEO2, - OMAP_DSS_VIDEO3, -}; - /* initialize plane */ struct drm_plane *omap_plane_init(struct drm_device *dev, int idx, enum drm_plane_type type, @@ -284,27 +533,25 @@ struct drm_plane *omap_plane_init(struct drm_device *dev, unsigned int num_planes = dispc_get_num_ovls(priv->dispc); struct drm_plane *plane; struct omap_plane *omap_plane; - enum omap_plane_id id; int ret; u32 nformats; const u32 *formats; - if (WARN_ON(idx >= ARRAY_SIZE(plane_idx_to_id))) + if (WARN_ON(idx >= num_planes)) return ERR_PTR(-EINVAL); - id = plane_idx_to_id[idx]; - - DBG("%s: type=%d", plane_id_to_name[id], type); - omap_plane = kzalloc(sizeof(*omap_plane), GFP_KERNEL); if (!omap_plane) return ERR_PTR(-ENOMEM); - formats = dispc_ovl_get_color_modes(priv->dispc, id); + omap_plane->id = idx; + + DBG("%d: type=%d", omap_plane->id, type); + DBG(" crtc_mask: 0x%04x", possible_crtcs); + + formats = dispc_ovl_get_color_modes(priv->dispc, omap_plane->id); for (nformats = 0; formats[nformats]; ++nformats) ; - omap_plane->id = id; - omap_plane->name = plane_id_to_name[id]; plane = &omap_plane->base; @@ -334,8 +581,8 @@ struct drm_plane *omap_plane_init(struct drm_device *dev, return plane; error: - dev_err(dev->dev, "%s(): could not create plane: %s\n", - __func__, plane_id_to_name[id]); + dev_err(dev->dev, "%s(): could not create plane: %d\n", + __func__, omap_plane->id); kfree(omap_plane); return NULL; diff --git a/drivers/gpu/drm/omapdrm/omap_plane.h b/drivers/gpu/drm/omapdrm/omap_plane.h index 0c28fe8ffa20..a9a33e12722a 100644 --- a/drivers/gpu/drm/omapdrm/omap_plane.h +++ b/drivers/gpu/drm/omapdrm/omap_plane.h @@ -22,5 +22,6 @@ struct drm_plane *omap_plane_init(struct drm_device *dev, u32 possible_crtcs); void omap_plane_install_properties(struct drm_plane *plane, struct drm_mode_object *obj); +bool is_omap_plane_dual_overlay(struct drm_plane_state *state); #endif /* __OMAPDRM_PLANE_H__ */ diff --git a/drivers/gpu/drm/omapdrm/tcm-sita.c b/drivers/gpu/drm/omapdrm/tcm-sita.c index 8338dc665301..fde0208ec01e 100644 --- a/drivers/gpu/drm/omapdrm/tcm-sita.c +++ b/drivers/gpu/drm/omapdrm/tcm-sita.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * SImple Tiler Allocator (SiTA): 2D and 1D allocation(reservation) algorithm * @@ -6,15 +7,6 @@ * Andy Gross <andy.gross@ti.com> * * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/ - * - * This package is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED - * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. - * */ #include <linux/init.h> #include <linux/module.h> diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig index cfc8d644cedf..434c2861bb40 100644 --- a/drivers/gpu/drm/panel/Kconfig +++ b/drivers/gpu/drm/panel/Kconfig @@ -37,6 +37,17 @@ config DRM_PANEL_ASUS_Z00T_TM5P5_NT35596 NT35596 1080x1920 video mode panel as found in some Asus Zenfone 2 Laser Z00T devices. +config DRM_PANEL_BOE_BF060Y8M_AJ0 + tristate "Boe BF060Y8M-AJ0 panel" + depends on OF + depends on DRM_MIPI_DSI + depends on BACKLIGHT_CLASS_DEVICE + help + Say Y here if you want to enable support for Boe BF060Y8M-AJ0 + 5.99" AMOLED modules. The panel has a 1080x2160 resolution and + uses 24 bit RGB per pixel. It provides a MIPI DSI interface to + the host and backlight is controlled through DSI commands. + config DRM_PANEL_BOE_HIMAX8279D tristate "Boe Himax8279d panel" depends on OF @@ -141,7 +152,7 @@ config DRM_PANEL_ILITEK_ILI9341 tristate "Ilitek ILI9341 240x320 QVGA panels" depends on OF && SPI depends on DRM_KMS_HELPER - depends on DRM_KMS_CMA_HELPER + depends on DRM_GEM_CMA_HELPER depends on BACKLIGHT_CLASS_DEVICE select DRM_MIPI_DBI help @@ -189,6 +200,15 @@ config DRM_PANEL_JDI_LT070ME05000 The panel has a 1200(RGB)×1920 (WUXGA) resolution and uses 24 bit per pixel. +config DRM_PANEL_JDI_R63452 + tristate "JDI R63452 Full HD DSI panel" + depends on OF + depends on DRM_MIPI_DSI + depends on BACKLIGHT_CLASS_DEVICE + help + Say Y here if you want to enable support for the JDI R63452 + DSI command mode panel as found in Xiaomi Mi 5 Devices. + config DRM_PANEL_KHADAS_TS050 tristate "Khadas TS050 panel" depends on OF @@ -272,6 +292,17 @@ config DRM_PANEL_NOVATEK_NT35510 around the Novatek NT35510 display controller, such as some Hydis panels. +config DRM_PANEL_NOVATEK_NT35950 + tristate "Novatek NT35950 DSI panel" + depends on OF + depends on DRM_MIPI_DSI + depends on BACKLIGHT_CLASS_DEVICE + help + Say Y here if you want to enable support for the panels built + around the Novatek NT35950 display controller, such as some + Sharp panels used in Sony Xperia Z5 Premium and XZ Premium + mobile phones. + config DRM_PANEL_NOVATEK_NT36672A tristate "Novatek NT36672A DSI panel" depends on OF @@ -580,6 +611,16 @@ config DRM_PANEL_SONY_ACX565AKM Say Y here if you want to enable support for the Sony ACX565AKM 800x600 3.5" panel (found on the Nokia N900). +config DRM_PANEL_SONY_TULIP_TRULY_NT35521 + tristate "Sony Tulip Truly NT35521 panel" + depends on GPIOLIB && OF + depends on DRM_MIPI_DSI + depends on BACKLIGHT_CLASS_DEVICE + help + Say Y here if you want to enable support for the Sony Tulip + NT35521 1280x720 video mode panel as found on Sony Xperia M4 + Aqua phone. + config DRM_PANEL_TDO_TL070WSH30 tristate "TDO TL070WSH30 DSI panel" depends on OF diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile index bca4cc1f2715..d99fbbce49d1 100644 --- a/drivers/gpu/drm/panel/Makefile +++ b/drivers/gpu/drm/panel/Makefile @@ -2,6 +2,7 @@ obj-$(CONFIG_DRM_PANEL_ABT_Y030XX067A) += panel-abt-y030xx067a.o obj-$(CONFIG_DRM_PANEL_ARM_VERSATILE) += panel-arm-versatile.o obj-$(CONFIG_DRM_PANEL_ASUS_Z00T_TM5P5_NT35596) += panel-asus-z00t-tm5p5-n35596.o +obj-$(CONFIG_DRM_PANEL_BOE_BF060Y8M_AJ0) += panel-boe-bf060y8m-aj0.o obj-$(CONFIG_DRM_PANEL_BOE_HIMAX8279D) += panel-boe-himax8279d.o obj-$(CONFIG_DRM_PANEL_BOE_TV101WUM_NL6) += panel-boe-tv101wum-nl6.o obj-$(CONFIG_DRM_PANEL_DSI_CM) += panel-dsi-cm.o @@ -17,6 +18,7 @@ obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9881C) += panel-ilitek-ili9881c.o obj-$(CONFIG_DRM_PANEL_INNOLUX_EJ030NA) += panel-innolux-ej030na.o obj-$(CONFIG_DRM_PANEL_INNOLUX_P079ZCA) += panel-innolux-p079zca.o obj-$(CONFIG_DRM_PANEL_JDI_LT070ME05000) += panel-jdi-lt070me05000.o +obj-$(CONFIG_DRM_PANEL_JDI_R63452) += panel-jdi-fhd-r63452.o obj-$(CONFIG_DRM_PANEL_KHADAS_TS050) += panel-khadas-ts050.o obj-$(CONFIG_DRM_PANEL_KINGDISPLAY_KD097D04) += panel-kingdisplay-kd097d04.o obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK050H3146W) += panel-leadtek-ltk050h3146w.o @@ -25,6 +27,7 @@ obj-$(CONFIG_DRM_PANEL_LG_LB035Q02) += panel-lg-lb035q02.o obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o obj-$(CONFIG_DRM_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o obj-$(CONFIG_DRM_PANEL_NOVATEK_NT35510) += panel-novatek-nt35510.o +obj-$(CONFIG_DRM_PANEL_NOVATEK_NT35950) += panel-novatek-nt35950.o obj-$(CONFIG_DRM_PANEL_NOVATEK_NT36672A) += panel-novatek-nt36672a.o obj-$(CONFIG_DRM_PANEL_NOVATEK_NT39016) += panel-novatek-nt39016.o obj-$(CONFIG_DRM_PANEL_MANTIX_MLAF057WE51) += panel-mantix-mlaf057we51.o @@ -59,6 +62,7 @@ obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7703) += panel-sitronix-st7703.o obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7789V) += panel-sitronix-st7789v.o obj-$(CONFIG_DRM_PANEL_SONY_ACX424AKP) += panel-sony-acx424akp.o obj-$(CONFIG_DRM_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o +obj-$(CONFIG_DRM_PANEL_SONY_TULIP_TRULY_NT35521) += panel-sony-tulip-truly-nt35521.o obj-$(CONFIG_DRM_PANEL_TDO_TL070WSH30) += panel-tdo-tl070wsh30.o obj-$(CONFIG_DRM_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o obj-$(CONFIG_DRM_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o diff --git a/drivers/gpu/drm/panel/panel-abt-y030xx067a.c b/drivers/gpu/drm/panel/panel-abt-y030xx067a.c index 3d8a9ab47cae..f043b484055b 100644 --- a/drivers/gpu/drm/panel/panel-abt-y030xx067a.c +++ b/drivers/gpu/drm/panel/panel-abt-y030xx067a.c @@ -272,16 +272,14 @@ static int y030xx067a_probe(struct spi_device *spi) return -EINVAL; priv->supply = devm_regulator_get(dev, "power"); - if (IS_ERR(priv->supply)) { - dev_err(dev, "Failed to get power supply\n"); - return PTR_ERR(priv->supply); - } + if (IS_ERR(priv->supply)) + return dev_err_probe(dev, PTR_ERR(priv->supply), + "Failed to get power supply\n"); priv->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); - if (IS_ERR(priv->reset_gpio)) { - dev_err(dev, "Failed to get reset GPIO\n"); - return PTR_ERR(priv->reset_gpio); - } + if (IS_ERR(priv->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(priv->reset_gpio), + "Failed to get reset GPIO\n"); drm_panel_init(&priv->panel, dev, &y030xx067a_funcs, DRM_MODE_CONNECTOR_DPI); diff --git a/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c b/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c new file mode 100644 index 000000000000..ef00cd67dc40 --- /dev/null +++ b/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c @@ -0,0 +1,445 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * BOE BF060Y8M-AJ0 5.99" MIPI-DSI OLED Panel on SW43404 DriverIC + * + * Copyright (c) 2020 AngeloGioacchino Del Regno + * <angelogioacchino.delregno@somainline.org> + */ + +#include <linux/backlight.h> +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/regulator/consumer.h> +#include <video/mipi_display.h> +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_modes.h> +#include <drm/drm_panel.h> + +#define DCS_ALLOW_HBM_RANGE 0x0c +#define DCS_DISALLOW_HBM_RANGE 0x08 + +enum boe_bf060y8m_aj0_supplies { + BF060Y8M_VREG_VCC, + BF060Y8M_VREG_VDDIO, + BF060Y8M_VREG_VCI, + BF060Y8M_VREG_EL_VDD, + BF060Y8M_VREG_EL_VSS, + BF060Y8M_VREG_MAX +}; + +struct boe_bf060y8m_aj0 { + struct drm_panel panel; + struct mipi_dsi_device *dsi; + struct regulator_bulk_data vregs[BF060Y8M_VREG_MAX]; + struct gpio_desc *reset_gpio; + bool prepared; +}; + +static inline +struct boe_bf060y8m_aj0 *to_boe_bf060y8m_aj0(struct drm_panel *panel) +{ + return container_of(panel, struct boe_bf060y8m_aj0, panel); +} + +#define dsi_dcs_write_seq(dsi, seq...) do { \ + static const u8 d[] = { seq }; \ + int ret; \ + ret = mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d)); \ + if (ret < 0) \ + return ret; \ + } while (0) + +static void boe_bf060y8m_aj0_reset(struct boe_bf060y8m_aj0 *boe) +{ + gpiod_set_value_cansleep(boe->reset_gpio, 0); + usleep_range(2000, 3000); + gpiod_set_value_cansleep(boe->reset_gpio, 1); + usleep_range(15000, 16000); + gpiod_set_value_cansleep(boe->reset_gpio, 0); + usleep_range(5000, 6000); +} + +static int boe_bf060y8m_aj0_on(struct boe_bf060y8m_aj0 *boe) +{ + struct mipi_dsi_device *dsi = boe->dsi; + struct device *dev = &dsi->dev; + int ret; + + dsi_dcs_write_seq(dsi, 0xb0, 0xa5, 0x00); + dsi_dcs_write_seq(dsi, 0xb2, 0x00, 0x4c); + dsi_dcs_write_seq(dsi, MIPI_DCS_SET_3D_CONTROL, 0x10); + dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, DCS_ALLOW_HBM_RANGE); + dsi_dcs_write_seq(dsi, 0xf8, + 0x00, 0x08, 0x10, 0x00, 0x22, 0x00, 0x00, 0x2d); + + ret = mipi_dsi_dcs_exit_sleep_mode(dsi); + if (ret < 0) { + dev_err(dev, "Failed to exit sleep mode: %d\n", ret); + return ret; + } + msleep(30); + + dsi_dcs_write_seq(dsi, 0xb0, 0xa5, 0x00); + dsi_dcs_write_seq(dsi, 0xc0, + 0x08, 0x48, 0x65, 0x33, 0x33, 0x33, + 0x2a, 0x31, 0x39, 0x20, 0x09); + dsi_dcs_write_seq(dsi, 0xc1, 0x00, 0x00, 0x00, 0x1f, 0x1f, + 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, + 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f); + dsi_dcs_write_seq(dsi, 0xe2, 0x20, 0x04, 0x10, 0x12, 0x92, + 0x4f, 0x8f, 0x44, 0x84, 0x83, 0x83, 0x83, + 0x5c, 0x5c, 0x5c); + dsi_dcs_write_seq(dsi, 0xde, 0x01, 0x2c, 0x00, 0x77, 0x3e); + + msleep(30); + + ret = mipi_dsi_dcs_set_display_on(dsi); + if (ret < 0) { + dev_err(dev, "Failed to set display on: %d\n", ret); + return ret; + } + msleep(50); + + return 0; +} + +static int boe_bf060y8m_aj0_off(struct boe_bf060y8m_aj0 *boe) +{ + struct mipi_dsi_device *dsi = boe->dsi; + struct device *dev = &dsi->dev; + int ret; + + /* OFF commands sent in HS mode */ + dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; + ret = mipi_dsi_dcs_set_display_off(dsi); + if (ret < 0) { + dev_err(dev, "Failed to set display off: %d\n", ret); + return ret; + } + msleep(20); + + ret = mipi_dsi_dcs_enter_sleep_mode(dsi); + if (ret < 0) { + dev_err(dev, "Failed to enter sleep mode: %d\n", ret); + return ret; + } + usleep_range(1000, 2000); + dsi->mode_flags |= MIPI_DSI_MODE_LPM; + + return 0; +} + +static int boe_bf060y8m_aj0_prepare(struct drm_panel *panel) +{ + struct boe_bf060y8m_aj0 *boe = to_boe_bf060y8m_aj0(panel); + struct device *dev = &boe->dsi->dev; + int ret; + + if (boe->prepared) + return 0; + + /* + * Enable EL Driving Voltage first - doing that at the beginning + * or at the end of the power sequence doesn't matter, so enable + * it here to avoid yet another usleep at the end. + */ + ret = regulator_enable(boe->vregs[BF060Y8M_VREG_EL_VDD].consumer); + if (ret) + return ret; + ret = regulator_enable(boe->vregs[BF060Y8M_VREG_EL_VSS].consumer); + if (ret) + goto err_elvss; + + ret = regulator_enable(boe->vregs[BF060Y8M_VREG_VCC].consumer); + if (ret) + goto err_vcc; + usleep_range(1000, 2000); + ret = regulator_enable(boe->vregs[BF060Y8M_VREG_VDDIO].consumer); + if (ret) + goto err_vddio; + usleep_range(500, 1000); + ret = regulator_enable(boe->vregs[BF060Y8M_VREG_VCI].consumer); + if (ret) + goto err_vci; + usleep_range(2000, 3000); + + boe_bf060y8m_aj0_reset(boe); + + ret = boe_bf060y8m_aj0_on(boe); + if (ret < 0) { + dev_err(dev, "Failed to initialize panel: %d\n", ret); + gpiod_set_value_cansleep(boe->reset_gpio, 1); + return ret; + } + + boe->prepared = true; + return 0; + +err_vci: + regulator_disable(boe->vregs[BF060Y8M_VREG_VDDIO].consumer); +err_vddio: + regulator_disable(boe->vregs[BF060Y8M_VREG_VCC].consumer); +err_vcc: + regulator_disable(boe->vregs[BF060Y8M_VREG_EL_VSS].consumer); +err_elvss: + regulator_disable(boe->vregs[BF060Y8M_VREG_EL_VDD].consumer); + return ret; +} + +static int boe_bf060y8m_aj0_unprepare(struct drm_panel *panel) +{ + struct boe_bf060y8m_aj0 *boe = to_boe_bf060y8m_aj0(panel); + struct device *dev = &boe->dsi->dev; + int ret; + + if (!boe->prepared) + return 0; + + ret = boe_bf060y8m_aj0_off(boe); + if (ret < 0) + dev_err(dev, "Failed to un-initialize panel: %d\n", ret); + + gpiod_set_value_cansleep(boe->reset_gpio, 1); + ret = regulator_bulk_disable(ARRAY_SIZE(boe->vregs), boe->vregs); + + boe->prepared = false; + return 0; +} + +static const struct drm_display_mode boe_bf060y8m_aj0_mode = { + .clock = 165268, + .hdisplay = 1080, + .hsync_start = 1080 + 36, + .hsync_end = 1080 + 36 + 24, + .htotal = 1080 + 36 + 24 + 96, + .vdisplay = 2160, + .vsync_start = 2160 + 16, + .vsync_end = 2160 + 16 + 1, + .vtotal = 2160 + 16 + 1 + 15, + .width_mm = 68, /* 68.04 mm */ + .height_mm = 136, /* 136.08 mm */ +}; + +static int boe_bf060y8m_aj0_get_modes(struct drm_panel *panel, + struct drm_connector *connector) +{ + struct drm_display_mode *mode; + + mode = drm_mode_duplicate(connector->dev, &boe_bf060y8m_aj0_mode); + if (!mode) + return -ENOMEM; + + drm_mode_set_name(mode); + + mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; + connector->display_info.width_mm = mode->width_mm; + connector->display_info.height_mm = mode->height_mm; + drm_mode_probed_add(connector, mode); + + return 1; +} + +static const struct drm_panel_funcs boe_bf060y8m_aj0_panel_funcs = { + .prepare = boe_bf060y8m_aj0_prepare, + .unprepare = boe_bf060y8m_aj0_unprepare, + .get_modes = boe_bf060y8m_aj0_get_modes, +}; + +static int boe_bf060y8m_aj0_bl_update_status(struct backlight_device *bl) +{ + struct mipi_dsi_device *dsi = bl_get_data(bl); + u16 brightness = backlight_get_brightness(bl); + int ret; + + ret = mipi_dsi_dcs_set_display_brightness(dsi, brightness); + if (ret < 0) + return ret; + + return 0; +} + +static int boe_bf060y8m_aj0_bl_get_brightness(struct backlight_device *bl) +{ + struct mipi_dsi_device *dsi = bl_get_data(bl); + u16 brightness; + int ret; + + ret = mipi_dsi_dcs_get_display_brightness(dsi, &brightness); + if (ret < 0) + return ret; + + return brightness & 0xff; +} + +static const struct backlight_ops boe_bf060y8m_aj0_bl_ops = { + .update_status = boe_bf060y8m_aj0_bl_update_status, + .get_brightness = boe_bf060y8m_aj0_bl_get_brightness, +}; + +static struct backlight_device * +boe_bf060y8m_aj0_create_backlight(struct mipi_dsi_device *dsi) +{ + struct device *dev = &dsi->dev; + const struct backlight_properties props = { + .type = BACKLIGHT_RAW, + .brightness = 127, + .max_brightness = 255, + .scale = BACKLIGHT_SCALE_NON_LINEAR, + }; + + return devm_backlight_device_register(dev, dev_name(dev), dev, dsi, + &boe_bf060y8m_aj0_bl_ops, &props); +} + +static int boe_bf060y8m_aj0_init_vregs(struct boe_bf060y8m_aj0 *boe, + struct device *dev) +{ + struct regulator *vreg; + int ret; + + boe->vregs[BF060Y8M_VREG_VCC].supply = "vcc"; + boe->vregs[BF060Y8M_VREG_VDDIO].supply = "vddio"; + boe->vregs[BF060Y8M_VREG_VCI].supply = "vci"; + boe->vregs[BF060Y8M_VREG_EL_VDD].supply = "elvdd"; + boe->vregs[BF060Y8M_VREG_EL_VSS].supply = "elvss"; + ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(boe->vregs), + boe->vregs); + if (ret < 0) { + dev_err(dev, "Failed to get regulators: %d\n", ret); + return ret; + } + + vreg = boe->vregs[BF060Y8M_VREG_VCC].consumer; + ret = regulator_is_supported_voltage(vreg, 2700000, 3600000); + if (!ret) + return ret; + + vreg = boe->vregs[BF060Y8M_VREG_VDDIO].consumer; + ret = regulator_is_supported_voltage(vreg, 1620000, 1980000); + if (!ret) + return ret; + + vreg = boe->vregs[BF060Y8M_VREG_VCI].consumer; + ret = regulator_is_supported_voltage(vreg, 2600000, 3600000); + if (!ret) + return ret; + + vreg = boe->vregs[BF060Y8M_VREG_EL_VDD].consumer; + ret = regulator_is_supported_voltage(vreg, 4400000, 4800000); + if (!ret) + return ret; + + /* ELVSS is negative: -5.00V to -1.40V */ + vreg = boe->vregs[BF060Y8M_VREG_EL_VSS].consumer; + ret = regulator_is_supported_voltage(vreg, 1400000, 5000000); + if (!ret) + return ret; + + /* + * Set min/max rated current, known only for VCI and VDDIO and, + * in case of failure, just go on gracefully, as this step is not + * guaranteed to succeed on all regulator HW but do a debug print + * to inform the developer during debugging. + * In any case, these two supplies are also optional, so they may + * be fixed-regulator which, at the time of writing, does not + * support fake current limiting. + */ + vreg = boe->vregs[BF060Y8M_VREG_VDDIO].consumer; + ret = regulator_set_current_limit(vreg, 1500, 2500); + if (ret) + dev_dbg(dev, "Current limit cannot be set on %s: %d\n", + boe->vregs[1].supply, ret); + + vreg = boe->vregs[BF060Y8M_VREG_VCI].consumer; + ret = regulator_set_current_limit(vreg, 20000, 40000); + if (ret) + dev_dbg(dev, "Current limit cannot be set on %s: %d\n", + boe->vregs[2].supply, ret); + + return 0; +} + +static int boe_bf060y8m_aj0_probe(struct mipi_dsi_device *dsi) +{ + struct device *dev = &dsi->dev; + struct boe_bf060y8m_aj0 *boe; + int ret; + + boe = devm_kzalloc(dev, sizeof(*boe), GFP_KERNEL); + if (!boe) + return -ENOMEM; + + ret = boe_bf060y8m_aj0_init_vregs(boe, dev); + if (ret) + return dev_err_probe(dev, ret, + "Failed to initialize supplies.\n"); + + boe->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_ASIS); + if (IS_ERR(boe->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(boe->reset_gpio), + "Failed to get reset-gpios\n"); + + boe->dsi = dsi; + mipi_dsi_set_drvdata(dsi, boe); + + dsi->lanes = 4; + dsi->format = MIPI_DSI_FMT_RGB888; + dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_NO_EOT_PACKET | + MIPI_DSI_MODE_VIDEO_SYNC_PULSE | + MIPI_DSI_CLOCK_NON_CONTINUOUS | + MIPI_DSI_MODE_LPM; + + drm_panel_init(&boe->panel, dev, &boe_bf060y8m_aj0_panel_funcs, + DRM_MODE_CONNECTOR_DSI); + + boe->panel.backlight = boe_bf060y8m_aj0_create_backlight(dsi); + if (IS_ERR(boe->panel.backlight)) + return dev_err_probe(dev, PTR_ERR(boe->panel.backlight), + "Failed to create backlight\n"); + + drm_panel_add(&boe->panel); + + ret = mipi_dsi_attach(dsi); + if (ret < 0) { + dev_err(dev, "Failed to attach to DSI host: %d\n", ret); + return ret; + } + + return 0; +} + +static int boe_bf060y8m_aj0_remove(struct mipi_dsi_device *dsi) +{ + struct boe_bf060y8m_aj0 *boe = mipi_dsi_get_drvdata(dsi); + int ret; + + ret = mipi_dsi_detach(dsi); + if (ret < 0) + dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret); + + drm_panel_remove(&boe->panel); + + return 0; +} + +static const struct of_device_id boe_bf060y8m_aj0_of_match[] = { + { .compatible = "boe,bf060y8m-aj0" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, boe_bf060y8m_aj0_of_match); + +static struct mipi_dsi_driver boe_bf060y8m_aj0_driver = { + .probe = boe_bf060y8m_aj0_probe, + .remove = boe_bf060y8m_aj0_remove, + .driver = { + .name = "panel-sw43404-boe-fhd-amoled", + .of_match_table = boe_bf060y8m_aj0_of_match, + }, +}; +module_mipi_dsi_driver(boe_bf060y8m_aj0_driver); + +MODULE_AUTHOR("AngeloGioacchino Del Regno <angelogioacchino.delregno@somainline.org>"); +MODULE_DESCRIPTION("BOE BF060Y8M-AJ0 MIPI-DSI OLED panel"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c index 529561b4fbbc..5fcbde789ddb 100644 --- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c +++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c @@ -84,8 +84,8 @@ static const struct panel_init_cmd boe_tv110c9m_init_cmd[] = { _INIT_DCS_CMD(0x0D, 0x63), _INIT_DCS_CMD(0x0E, 0x91), _INIT_DCS_CMD(0x0F, 0x73), - _INIT_DCS_CMD(0x95, 0xEB), - _INIT_DCS_CMD(0x96, 0xEB), + _INIT_DCS_CMD(0x95, 0xE6), + _INIT_DCS_CMD(0x96, 0xF0), _INIT_DCS_CMD(0x30, 0x11), _INIT_DCS_CMD(0x6D, 0x66), _INIT_DCS_CMD(0x75, 0xA2), @@ -111,18 +111,18 @@ static const struct panel_init_cmd boe_tv110c9m_init_cmd[] = { _INIT_DCS_CMD(0xB0, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x45, 0x00, 0x65, 0x00, 0x81, 0x00, 0x99, 0x00, 0xAE, 0x00, 0xC1), _INIT_DCS_CMD(0xB1, 0x00, 0xD2, 0x01, 0x0B, 0x01, 0x34, 0x01, 0x76, 0x01, 0xA3, 0x01, 0xEF, 0x02, 0x27, 0x02, 0x29), _INIT_DCS_CMD(0xB2, 0x02, 0x5F, 0x02, 0x9E, 0x02, 0xC9, 0x03, 0x00, 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73), - _INIT_DCS_CMD(0xB3, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xAF, 0x03, 0xDF, 0x03, 0xF5, 0x03, 0xF7), + _INIT_DCS_CMD(0xB3, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xA7, 0x03, 0xCF, 0x03, 0xDE, 0x03, 0xE0), _INIT_DCS_CMD(0xB4, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x45, 0x00, 0x65, 0x00, 0x81, 0x00, 0x99, 0x00, 0xAE, 0x00, 0xC1), _INIT_DCS_CMD(0xB5, 0x00, 0xD2, 0x01, 0x0B, 0x01, 0x34, 0x01, 0x76, 0x01, 0xA3, 0x01, 0xEF, 0x02, 0x27, 0x02, 0x29), _INIT_DCS_CMD(0xB6, 0x02, 0x5F, 0x02, 0x9E, 0x02, 0xC9, 0x03, 0x00, 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73), - _INIT_DCS_CMD(0xB7, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xAF, 0x03, 0xDF, 0x03, 0xF5, 0x03, 0xF7), + _INIT_DCS_CMD(0xB7, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xA7, 0x03, 0xCF, 0x03, 0xDE, 0x03, 0xE0), _INIT_DCS_CMD(0xB8, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x45, 0x00, 0x65, 0x00, 0x81, 0x00, 0x99, 0x00, 0xAE, 0x00, 0xC1), _INIT_DCS_CMD(0xB9, 0x00, 0xD2, 0x01, 0x0B, 0x01, 0x34, 0x01, 0x76, 0x01, 0xA3, 0x01, 0xEF, 0x02, 0x27, 0x02, 0x29), _INIT_DCS_CMD(0xBA, 0x02, 0x5F, 0x02, 0x9E, 0x02, 0xC9, 0x03, 0x00, 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73), - _INIT_DCS_CMD(0xBB, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xAF, 0x03, 0xDF, 0x03, 0xF5, 0x03, 0xF7), + _INIT_DCS_CMD(0xBB, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xA7, 0x03, 0xCF, 0x03, 0xDE, 0x03, 0xE0), _INIT_DCS_CMD(0xFF, 0x24), _INIT_DCS_CMD(0xFB, 0x01), @@ -225,6 +225,7 @@ static const struct panel_init_cmd boe_tv110c9m_init_cmd[] = { _INIT_DCS_CMD(0x7F, 0x3C), _INIT_DCS_CMD(0x82, 0x04), _INIT_DCS_CMD(0x97, 0xC0), + _INIT_DCS_CMD(0xB6, 0x05, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x05, 0x00, 0x00), _INIT_DCS_CMD(0x91, 0x44), _INIT_DCS_CMD(0x92, 0xA9), @@ -332,12 +333,39 @@ static const struct panel_init_cmd boe_tv110c9m_init_cmd[] = { _INIT_DCS_CMD(0x34, 0x78), _INIT_DCS_CMD(0x35, 0x16), _INIT_DCS_CMD(0xC8, 0x04), - _INIT_DCS_CMD(0xC9, 0x80), + _INIT_DCS_CMD(0xC9, 0x9E), _INIT_DCS_CMD(0xCA, 0x4E), _INIT_DCS_CMD(0xCB, 0x00), - _INIT_DCS_CMD(0xA9, 0x4C), - _INIT_DCS_CMD(0xAA, 0x47), + _INIT_DCS_CMD(0xA9, 0x49), + _INIT_DCS_CMD(0xAA, 0x4B), + _INIT_DCS_CMD(0xAB, 0x48), + _INIT_DCS_CMD(0xAC, 0x43), + _INIT_DCS_CMD(0xAD, 0x40), + _INIT_DCS_CMD(0xAE, 0x50), + _INIT_DCS_CMD(0xAF, 0x44), + _INIT_DCS_CMD(0xB0, 0x54), + _INIT_DCS_CMD(0xB1, 0x4E), + _INIT_DCS_CMD(0xB2, 0x4D), + _INIT_DCS_CMD(0xB3, 0x4C), + _INIT_DCS_CMD(0xB4, 0x41), + _INIT_DCS_CMD(0xB5, 0x47), + _INIT_DCS_CMD(0xB6, 0x53), + _INIT_DCS_CMD(0xB7, 0x3E), + _INIT_DCS_CMD(0xB8, 0x51), + _INIT_DCS_CMD(0xB9, 0x3C), + _INIT_DCS_CMD(0xBA, 0x3B), + _INIT_DCS_CMD(0xBB, 0x46), + _INIT_DCS_CMD(0xBC, 0x45), + _INIT_DCS_CMD(0xBD, 0x55), + _INIT_DCS_CMD(0xBE, 0x3D), + _INIT_DCS_CMD(0xBF, 0x3F), + _INIT_DCS_CMD(0xC0, 0x52), + _INIT_DCS_CMD(0xC1, 0x4A), + _INIT_DCS_CMD(0xC2, 0x39), + _INIT_DCS_CMD(0xC3, 0x4F), + _INIT_DCS_CMD(0xC4, 0x3A), + _INIT_DCS_CMD(0xC5, 0x42), _INIT_DCS_CMD(0xFF, 0x27), _INIT_DCS_CMD(0xFB, 0x01), @@ -419,7 +447,7 @@ static const struct panel_init_cmd boe_tv110c9m_init_cmd[] = { {}, }; -static const struct panel_init_cmd inx_init_cmd[] = { +static const struct panel_init_cmd inx_hj110iz_init_cmd[] = { _INIT_DCS_CMD(0xFF, 0x20), _INIT_DCS_CMD(0xFB, 0x01), _INIT_DCS_CMD(0x05, 0xD1), @@ -428,10 +456,10 @@ static const struct panel_init_cmd inx_init_cmd[] = { _INIT_DCS_CMD(0x08, 0x4B), _INIT_DCS_CMD(0x0E, 0x91), _INIT_DCS_CMD(0x0F, 0x69), - _INIT_DCS_CMD(0x95, 0xFF), - _INIT_DCS_CMD(0x96, 0xFF), - _INIT_DCS_CMD(0x9D, 0x0A), - _INIT_DCS_CMD(0x9E, 0x0A), + _INIT_DCS_CMD(0x95, 0xF5), + _INIT_DCS_CMD(0x96, 0xF5), + _INIT_DCS_CMD(0x9D, 0x00), + _INIT_DCS_CMD(0x9E, 0x00), _INIT_DCS_CMD(0x69, 0x98), _INIT_DCS_CMD(0x75, 0xA2), _INIT_DCS_CMD(0x77, 0xB3), @@ -493,17 +521,17 @@ static const struct panel_init_cmd inx_init_cmd[] = { _INIT_DCS_CMD(0x2A, 0x03), _INIT_DCS_CMD(0x2B, 0x03), - _INIT_DCS_CMD(0x2F, 0x06), + _INIT_DCS_CMD(0x2F, 0x05), _INIT_DCS_CMD(0x30, 0x32), _INIT_DCS_CMD(0x31, 0x43), - _INIT_DCS_CMD(0x33, 0x06), + _INIT_DCS_CMD(0x33, 0x05), _INIT_DCS_CMD(0x34, 0x32), _INIT_DCS_CMD(0x35, 0x43), _INIT_DCS_CMD(0x37, 0x44), _INIT_DCS_CMD(0x38, 0x40), _INIT_DCS_CMD(0x39, 0x00), - _INIT_DCS_CMD(0x3A, 0x01), - _INIT_DCS_CMD(0x3B, 0x48), + _INIT_DCS_CMD(0x3A, 0x18), + _INIT_DCS_CMD(0x3B, 0x00), _INIT_DCS_CMD(0x3D, 0x93), _INIT_DCS_CMD(0xAB, 0x44), _INIT_DCS_CMD(0xAC, 0x40), @@ -520,8 +548,8 @@ static const struct panel_init_cmd inx_init_cmd[] = { _INIT_DCS_CMD(0x56, 0x08), _INIT_DCS_CMD(0x58, 0x21), _INIT_DCS_CMD(0x59, 0x40), - _INIT_DCS_CMD(0x5A, 0x09), - _INIT_DCS_CMD(0x5B, 0x48), + _INIT_DCS_CMD(0x5A, 0x00), + _INIT_DCS_CMD(0x5B, 0x2C), _INIT_DCS_CMD(0x5E, 0x00, 0x10), _INIT_DCS_CMD(0x5F, 0x00), @@ -558,33 +586,36 @@ static const struct panel_init_cmd inx_init_cmd[] = { _INIT_DCS_CMD(0xEF, 0x01), _INIT_DCS_CMD(0xF0, 0x7A), + _INIT_DCS_CMD(0xB6, 0x05, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x05, 0x00, 0x00), _INIT_DCS_CMD(0xFF, 0x25), _INIT_DCS_CMD(0xFB, 0x01), _INIT_DCS_CMD(0x05, 0x00), + _INIT_DCS_CMD(0x13, 0x02), + _INIT_DCS_CMD(0x14, 0xDF), _INIT_DCS_CMD(0xF1, 0x10), _INIT_DCS_CMD(0x1E, 0x00), - _INIT_DCS_CMD(0x1F, 0x09), - _INIT_DCS_CMD(0x20, 0x46), + _INIT_DCS_CMD(0x1F, 0x00), + _INIT_DCS_CMD(0x20, 0x2C), _INIT_DCS_CMD(0x25, 0x00), - _INIT_DCS_CMD(0x26, 0x09), - _INIT_DCS_CMD(0x27, 0x46), + _INIT_DCS_CMD(0x26, 0x00), + _INIT_DCS_CMD(0x27, 0x2C), _INIT_DCS_CMD(0x3F, 0x80), _INIT_DCS_CMD(0x40, 0x00), _INIT_DCS_CMD(0x43, 0x00), - _INIT_DCS_CMD(0x44, 0x09), - _INIT_DCS_CMD(0x45, 0x46), + _INIT_DCS_CMD(0x44, 0x18), + _INIT_DCS_CMD(0x45, 0x00), - _INIT_DCS_CMD(0x48, 0x09), - _INIT_DCS_CMD(0x49, 0x46), + _INIT_DCS_CMD(0x48, 0x00), + _INIT_DCS_CMD(0x49, 0x2C), _INIT_DCS_CMD(0x5B, 0x80), _INIT_DCS_CMD(0x5C, 0x00), - _INIT_DCS_CMD(0x5D, 0x01), - _INIT_DCS_CMD(0x5E, 0x46), - _INIT_DCS_CMD(0x61, 0x01), - _INIT_DCS_CMD(0x62, 0x46), + _INIT_DCS_CMD(0x5D, 0x00), + _INIT_DCS_CMD(0x5E, 0x00), + _INIT_DCS_CMD(0x61, 0x00), + _INIT_DCS_CMD(0x62, 0x2C), _INIT_DCS_CMD(0x68, 0x10), _INIT_DCS_CMD(0xFF, 0x26), _INIT_DCS_CMD(0xFB, 0x01), @@ -700,16 +731,22 @@ static const struct panel_init_cmd inx_init_cmd[] = { _INIT_DCS_CMD(0xA3, 0x30), _INIT_DCS_CMD(0xA4, 0xC0), _INIT_DCS_CMD(0xE8, 0x00), + _INIT_DCS_CMD(0x97, 0x3C), + _INIT_DCS_CMD(0x98, 0x02), + _INIT_DCS_CMD(0x99, 0x95), + _INIT_DCS_CMD(0x9A, 0x06), + _INIT_DCS_CMD(0x9B, 0x00), + _INIT_DCS_CMD(0x9C, 0x0B), + _INIT_DCS_CMD(0x9D, 0x0A), + _INIT_DCS_CMD(0x9E, 0x90), _INIT_DCS_CMD(0xFF, 0xF0), _INIT_DCS_CMD(0xFB, 0x01), _INIT_DCS_CMD(0x3A, 0x08), _INIT_DCS_CMD(0xFF, 0xD0), _INIT_DCS_CMD(0xFB, 0x01), _INIT_DCS_CMD(0x00, 0x33), - _INIT_DCS_CMD(0x02, 0x77), _INIT_DCS_CMD(0x08, 0x01), _INIT_DCS_CMD(0x09, 0xBF), - _INIT_DCS_CMD(0x28, 0x30), _INIT_DCS_CMD(0x2F, 0x33), _INIT_DCS_CMD(0xFF, 0x23), _INIT_DCS_CMD(0xFB, 0x01), @@ -718,6 +755,9 @@ static const struct panel_init_cmd inx_init_cmd[] = { _INIT_DCS_CMD(0xFF, 0x20), _INIT_DCS_CMD(0xFB, 0x01), _INIT_DCS_CMD(0x30, 0x00), + _INIT_DCS_CMD(0xFF, 0x24), + _INIT_DCS_CMD(0x5C, 0x88), + _INIT_DCS_CMD(0x5D, 0x08), _INIT_DCS_CMD(0xFF, 0x10), _INIT_DCS_CMD(0xB9, 0x01), _INIT_DCS_CMD(0xFF, 0x20), @@ -1312,7 +1352,7 @@ static const struct panel_desc inx_hj110iz_desc = { | MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_VIDEO_BURST, - .init_cmds = inx_init_cmd, + .init_cmds = inx_hj110iz_init_cmd, }; static const struct drm_display_mode boe_tv101wum_nl6_default_mode = { diff --git a/drivers/gpu/drm/panel/panel-dsi-cm.c b/drivers/gpu/drm/panel/panel-dsi-cm.c index da4a69067e18..b58cb064975f 100644 --- a/drivers/gpu/drm/panel/panel-dsi-cm.c +++ b/drivers/gpu/drm/panel/panel-dsi-cm.c @@ -248,7 +248,7 @@ static ssize_t num_dsi_errors_show(struct device *dev, if (r) return r; - return snprintf(buf, PAGE_SIZE, "%d\n", errors); + return sysfs_emit(buf, "%d\n", errors); } static ssize_t hw_revision_show(struct device *dev, @@ -268,7 +268,7 @@ static ssize_t hw_revision_show(struct device *dev, if (r) return r; - return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x\n", id1, id2, id3); + return sysfs_emit(buf, "%02x.%02x.%02x\n", id1, id2, id3); } static DEVICE_ATTR_RO(num_dsi_errors); diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c index fc03046de134..176ef0c3cc1d 100644 --- a/drivers/gpu/drm/panel/panel-edp.c +++ b/drivers/gpu/drm/panel/panel-edp.c @@ -196,10 +196,10 @@ struct edp_panel_entry { /** @panel_id: 32-bit ID for panel, encoded with drm_edid_encode_panel_id(). */ u32 panel_id; - /* @delay: The power sequencing delays needed for this panel. */ + /** @delay: The power sequencing delays needed for this panel. */ const struct panel_delay *delay; - /* @name: Name of this panel (for printing to logs). */ + /** @name: Name of this panel (for printing to logs). */ const char *name; }; diff --git a/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c b/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c index 2a602aee61c3..cb0bb3076099 100644 --- a/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c +++ b/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c @@ -456,16 +456,13 @@ static int k101_im2ba02_dsi_probe(struct mipi_dsi_device *dsi) ret = devm_regulator_bulk_get(&dsi->dev, ARRAY_SIZE(ctx->supplies), ctx->supplies); - if (ret < 0) { - dev_err(&dsi->dev, "Couldn't get regulators\n"); - return ret; - } + if (ret < 0) + return dev_err_probe(&dsi->dev, ret, "Couldn't get regulators\n"); ctx->reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW); - if (IS_ERR(ctx->reset)) { - dev_err(&dsi->dev, "Couldn't get our reset GPIO\n"); - return PTR_ERR(ctx->reset); - } + if (IS_ERR(ctx->reset)) + return dev_err_probe(&dsi->dev, PTR_ERR(ctx->reset), + "Couldn't get our reset GPIO\n"); drm_panel_init(&ctx->panel, &dsi->dev, &k101_im2ba02_funcs, DRM_MODE_CONNECTOR_DSI); diff --git a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c index 581661b506f8..a9cd7135cb51 100644 --- a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c +++ b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c @@ -200,22 +200,19 @@ static int feiyang_dsi_probe(struct mipi_dsi_device *dsi) DRM_MODE_CONNECTOR_DSI); ctx->dvdd = devm_regulator_get(&dsi->dev, "dvdd"); - if (IS_ERR(ctx->dvdd)) { - dev_err(&dsi->dev, "Couldn't get dvdd regulator\n"); - return PTR_ERR(ctx->dvdd); - } + if (IS_ERR(ctx->dvdd)) + return dev_err_probe(&dsi->dev, PTR_ERR(ctx->dvdd), + "Couldn't get dvdd regulator\n"); ctx->avdd = devm_regulator_get(&dsi->dev, "avdd"); - if (IS_ERR(ctx->avdd)) { - dev_err(&dsi->dev, "Couldn't get avdd regulator\n"); - return PTR_ERR(ctx->avdd); - } + if (IS_ERR(ctx->avdd)) + return dev_err_probe(&dsi->dev, PTR_ERR(ctx->avdd), + "Couldn't get avdd regulator\n"); ctx->reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW); - if (IS_ERR(ctx->reset)) { - dev_err(&dsi->dev, "Couldn't get our reset GPIO\n"); - return PTR_ERR(ctx->reset); - } + if (IS_ERR(ctx->reset)) + return dev_err_probe(&dsi->dev, PTR_ERR(ctx->reset), + "Couldn't get our reset GPIO\n"); ret = drm_panel_of_backlight(&ctx->panel); if (ret) @@ -227,7 +224,13 @@ static int feiyang_dsi_probe(struct mipi_dsi_device *dsi) dsi->format = MIPI_DSI_FMT_RGB888; dsi->lanes = 4; - return mipi_dsi_attach(dsi); + ret = mipi_dsi_attach(dsi); + if (ret < 0) { + drm_panel_remove(&ctx->panel); + return ret; + } + + return 0; } static int feiyang_dsi_remove(struct mipi_dsi_device *dsi) diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c index 534dd7414d42..ba30d11547ad 100644 --- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c +++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c @@ -42,6 +42,7 @@ struct ili9881c_desc { const struct ili9881c_instr *init; const size_t init_length; const struct drm_display_mode *mode; + const unsigned long mode_flags; }; struct ili9881c { @@ -51,6 +52,8 @@ struct ili9881c { struct regulator *power; struct gpio_desc *reset; + + enum drm_panel_orientation orientation; }; #define ILI9881C_SWITCH_PAGE_INSTR(_page) \ @@ -453,6 +456,213 @@ static const struct ili9881c_instr k101_im2byl02_init[] = { ILI9881C_COMMAND_INSTR(0xD3, 0x3F), /* VN0 */ }; +static const struct ili9881c_instr w552946ab_init[] = { + ILI9881C_SWITCH_PAGE_INSTR(3), + ILI9881C_COMMAND_INSTR(0x01, 0x00), + ILI9881C_COMMAND_INSTR(0x02, 0x00), + ILI9881C_COMMAND_INSTR(0x03, 0x53), + ILI9881C_COMMAND_INSTR(0x04, 0x53), + ILI9881C_COMMAND_INSTR(0x05, 0x13), + ILI9881C_COMMAND_INSTR(0x06, 0x04), + ILI9881C_COMMAND_INSTR(0x07, 0x02), + ILI9881C_COMMAND_INSTR(0x08, 0x02), + ILI9881C_COMMAND_INSTR(0x09, 0x00), + ILI9881C_COMMAND_INSTR(0x0A, 0x00), + ILI9881C_COMMAND_INSTR(0x0B, 0x00), + ILI9881C_COMMAND_INSTR(0x0C, 0x00), + ILI9881C_COMMAND_INSTR(0x0D, 0x00), + ILI9881C_COMMAND_INSTR(0x0E, 0x00), + ILI9881C_COMMAND_INSTR(0x0F, 0x00), + + ILI9881C_COMMAND_INSTR(0x10, 0x00), + ILI9881C_COMMAND_INSTR(0x11, 0x00), + ILI9881C_COMMAND_INSTR(0x12, 0x00), + ILI9881C_COMMAND_INSTR(0x13, 0x00), + ILI9881C_COMMAND_INSTR(0x14, 0x00), + ILI9881C_COMMAND_INSTR(0x15, 0x08), + ILI9881C_COMMAND_INSTR(0x16, 0x10), + ILI9881C_COMMAND_INSTR(0x17, 0x00), + ILI9881C_COMMAND_INSTR(0x18, 0x08), + ILI9881C_COMMAND_INSTR(0x19, 0x00), + ILI9881C_COMMAND_INSTR(0x1A, 0x00), + ILI9881C_COMMAND_INSTR(0x1B, 0x00), + ILI9881C_COMMAND_INSTR(0x1C, 0x00), + ILI9881C_COMMAND_INSTR(0x1D, 0x00), + ILI9881C_COMMAND_INSTR(0x1E, 0xC0), + ILI9881C_COMMAND_INSTR(0x1F, 0x80), + + ILI9881C_COMMAND_INSTR(0x20, 0x02), + ILI9881C_COMMAND_INSTR(0x21, 0x09), + ILI9881C_COMMAND_INSTR(0x22, 0x00), + ILI9881C_COMMAND_INSTR(0x23, 0x00), + ILI9881C_COMMAND_INSTR(0x24, 0x00), + ILI9881C_COMMAND_INSTR(0x25, 0x00), + ILI9881C_COMMAND_INSTR(0x26, 0x00), + ILI9881C_COMMAND_INSTR(0x27, 0x00), + ILI9881C_COMMAND_INSTR(0x28, 0x55), + ILI9881C_COMMAND_INSTR(0x29, 0x03), + ILI9881C_COMMAND_INSTR(0x2A, 0x00), + ILI9881C_COMMAND_INSTR(0x2B, 0x00), + ILI9881C_COMMAND_INSTR(0x2C, 0x00), + ILI9881C_COMMAND_INSTR(0x2D, 0x00), + ILI9881C_COMMAND_INSTR(0x2E, 0x00), + ILI9881C_COMMAND_INSTR(0x2F, 0x00), + + ILI9881C_COMMAND_INSTR(0x30, 0x00), + ILI9881C_COMMAND_INSTR(0x31, 0x00), + ILI9881C_COMMAND_INSTR(0x32, 0x00), + ILI9881C_COMMAND_INSTR(0x33, 0x00), + ILI9881C_COMMAND_INSTR(0x34, 0x04), + ILI9881C_COMMAND_INSTR(0x35, 0x05), + ILI9881C_COMMAND_INSTR(0x36, 0x05), + ILI9881C_COMMAND_INSTR(0x37, 0x00), + ILI9881C_COMMAND_INSTR(0x38, 0x3C), + ILI9881C_COMMAND_INSTR(0x39, 0x35), + ILI9881C_COMMAND_INSTR(0x3A, 0x00), + ILI9881C_COMMAND_INSTR(0x3B, 0x40), + ILI9881C_COMMAND_INSTR(0x3C, 0x00), + ILI9881C_COMMAND_INSTR(0x3D, 0x00), + ILI9881C_COMMAND_INSTR(0x3E, 0x00), + ILI9881C_COMMAND_INSTR(0x3F, 0x00), + + ILI9881C_COMMAND_INSTR(0x40, 0x00), + ILI9881C_COMMAND_INSTR(0x41, 0x88), + ILI9881C_COMMAND_INSTR(0x42, 0x00), + ILI9881C_COMMAND_INSTR(0x43, 0x00), + ILI9881C_COMMAND_INSTR(0x44, 0x1F), + + ILI9881C_COMMAND_INSTR(0x50, 0x01), + ILI9881C_COMMAND_INSTR(0x51, 0x23), + ILI9881C_COMMAND_INSTR(0x52, 0x45), + ILI9881C_COMMAND_INSTR(0x53, 0x67), + ILI9881C_COMMAND_INSTR(0x54, 0x89), + ILI9881C_COMMAND_INSTR(0x55, 0xaB), + ILI9881C_COMMAND_INSTR(0x56, 0x01), + ILI9881C_COMMAND_INSTR(0x57, 0x23), + ILI9881C_COMMAND_INSTR(0x58, 0x45), + ILI9881C_COMMAND_INSTR(0x59, 0x67), + ILI9881C_COMMAND_INSTR(0x5A, 0x89), + ILI9881C_COMMAND_INSTR(0x5B, 0xAB), + ILI9881C_COMMAND_INSTR(0x5C, 0xCD), + ILI9881C_COMMAND_INSTR(0x5D, 0xEF), + ILI9881C_COMMAND_INSTR(0x5E, 0x03), + ILI9881C_COMMAND_INSTR(0x5F, 0x14), + + ILI9881C_COMMAND_INSTR(0x60, 0x15), + ILI9881C_COMMAND_INSTR(0x61, 0x0C), + ILI9881C_COMMAND_INSTR(0x62, 0x0D), + ILI9881C_COMMAND_INSTR(0x63, 0x0E), + ILI9881C_COMMAND_INSTR(0x64, 0x0F), + ILI9881C_COMMAND_INSTR(0x65, 0x10), + ILI9881C_COMMAND_INSTR(0x66, 0x11), + ILI9881C_COMMAND_INSTR(0x67, 0x08), + ILI9881C_COMMAND_INSTR(0x68, 0x02), + ILI9881C_COMMAND_INSTR(0x69, 0x0A), + ILI9881C_COMMAND_INSTR(0x6A, 0x02), + ILI9881C_COMMAND_INSTR(0x6B, 0x02), + ILI9881C_COMMAND_INSTR(0x6C, 0x02), + ILI9881C_COMMAND_INSTR(0x6D, 0x02), + ILI9881C_COMMAND_INSTR(0x6E, 0x02), + ILI9881C_COMMAND_INSTR(0x6F, 0x02), + + ILI9881C_COMMAND_INSTR(0x70, 0x02), + ILI9881C_COMMAND_INSTR(0x71, 0x02), + ILI9881C_COMMAND_INSTR(0x72, 0x06), + ILI9881C_COMMAND_INSTR(0x73, 0x02), + ILI9881C_COMMAND_INSTR(0x74, 0x02), + ILI9881C_COMMAND_INSTR(0x75, 0x14), + ILI9881C_COMMAND_INSTR(0x76, 0x15), + ILI9881C_COMMAND_INSTR(0x77, 0x0F), + ILI9881C_COMMAND_INSTR(0x78, 0x0E), + ILI9881C_COMMAND_INSTR(0x79, 0x0D), + ILI9881C_COMMAND_INSTR(0x7A, 0x0C), + ILI9881C_COMMAND_INSTR(0x7B, 0x11), + ILI9881C_COMMAND_INSTR(0x7C, 0x10), + ILI9881C_COMMAND_INSTR(0x7D, 0x06), + ILI9881C_COMMAND_INSTR(0x7E, 0x02), + ILI9881C_COMMAND_INSTR(0x7F, 0x0A), + + ILI9881C_COMMAND_INSTR(0x80, 0x02), + ILI9881C_COMMAND_INSTR(0x81, 0x02), + ILI9881C_COMMAND_INSTR(0x82, 0x02), + ILI9881C_COMMAND_INSTR(0x83, 0x02), + ILI9881C_COMMAND_INSTR(0x84, 0x02), + ILI9881C_COMMAND_INSTR(0x85, 0x02), + ILI9881C_COMMAND_INSTR(0x86, 0x02), + ILI9881C_COMMAND_INSTR(0x87, 0x02), + ILI9881C_COMMAND_INSTR(0x88, 0x08), + ILI9881C_COMMAND_INSTR(0x89, 0x02), + ILI9881C_COMMAND_INSTR(0x8A, 0x02), + + ILI9881C_SWITCH_PAGE_INSTR(4), + ILI9881C_COMMAND_INSTR(0x00, 0x80), + ILI9881C_COMMAND_INSTR(0x70, 0x00), + ILI9881C_COMMAND_INSTR(0x71, 0x00), + ILI9881C_COMMAND_INSTR(0x66, 0xFE), + ILI9881C_COMMAND_INSTR(0x82, 0x15), + ILI9881C_COMMAND_INSTR(0x84, 0x15), + ILI9881C_COMMAND_INSTR(0x85, 0x15), + ILI9881C_COMMAND_INSTR(0x3a, 0x24), + ILI9881C_COMMAND_INSTR(0x32, 0xAC), + ILI9881C_COMMAND_INSTR(0x8C, 0x80), + ILI9881C_COMMAND_INSTR(0x3C, 0xF5), + ILI9881C_COMMAND_INSTR(0x88, 0x33), + + ILI9881C_SWITCH_PAGE_INSTR(1), + ILI9881C_COMMAND_INSTR(0x22, 0x0A), + ILI9881C_COMMAND_INSTR(0x31, 0x00), + ILI9881C_COMMAND_INSTR(0x53, 0x78), + ILI9881C_COMMAND_INSTR(0x50, 0x5B), + ILI9881C_COMMAND_INSTR(0x51, 0x5B), + ILI9881C_COMMAND_INSTR(0x60, 0x20), + ILI9881C_COMMAND_INSTR(0x61, 0x00), + ILI9881C_COMMAND_INSTR(0x62, 0x0D), + ILI9881C_COMMAND_INSTR(0x63, 0x00), + + ILI9881C_COMMAND_INSTR(0xA0, 0x00), + ILI9881C_COMMAND_INSTR(0xA1, 0x10), + ILI9881C_COMMAND_INSTR(0xA2, 0x1C), + ILI9881C_COMMAND_INSTR(0xA3, 0x13), + ILI9881C_COMMAND_INSTR(0xA4, 0x15), + ILI9881C_COMMAND_INSTR(0xA5, 0x26), + ILI9881C_COMMAND_INSTR(0xA6, 0x1A), + ILI9881C_COMMAND_INSTR(0xA7, 0x1D), + ILI9881C_COMMAND_INSTR(0xA8, 0x67), + ILI9881C_COMMAND_INSTR(0xA9, 0x1C), + ILI9881C_COMMAND_INSTR(0xAA, 0x29), + ILI9881C_COMMAND_INSTR(0xAB, 0x5B), + ILI9881C_COMMAND_INSTR(0xAC, 0x26), + ILI9881C_COMMAND_INSTR(0xAD, 0x28), + ILI9881C_COMMAND_INSTR(0xAE, 0x5C), + ILI9881C_COMMAND_INSTR(0xAF, 0x30), + ILI9881C_COMMAND_INSTR(0xB0, 0x31), + ILI9881C_COMMAND_INSTR(0xB1, 0x2E), + ILI9881C_COMMAND_INSTR(0xB2, 0x32), + ILI9881C_COMMAND_INSTR(0xB3, 0x00), + + ILI9881C_COMMAND_INSTR(0xC0, 0x00), + ILI9881C_COMMAND_INSTR(0xC1, 0x10), + ILI9881C_COMMAND_INSTR(0xC2, 0x1C), + ILI9881C_COMMAND_INSTR(0xC3, 0x13), + ILI9881C_COMMAND_INSTR(0xC4, 0x15), + ILI9881C_COMMAND_INSTR(0xC5, 0x26), + ILI9881C_COMMAND_INSTR(0xC6, 0x1A), + ILI9881C_COMMAND_INSTR(0xC7, 0x1D), + ILI9881C_COMMAND_INSTR(0xC8, 0x67), + ILI9881C_COMMAND_INSTR(0xC9, 0x1C), + ILI9881C_COMMAND_INSTR(0xCA, 0x29), + ILI9881C_COMMAND_INSTR(0xCB, 0x5B), + ILI9881C_COMMAND_INSTR(0xCC, 0x26), + ILI9881C_COMMAND_INSTR(0xCD, 0x28), + ILI9881C_COMMAND_INSTR(0xCE, 0x5C), + ILI9881C_COMMAND_INSTR(0xCF, 0x30), + ILI9881C_COMMAND_INSTR(0xD0, 0x31), + ILI9881C_COMMAND_INSTR(0xD1, 0x2E), + ILI9881C_COMMAND_INSTR(0xD2, 0x32), + ILI9881C_COMMAND_INSTR(0xD3, 0x00), + ILI9881C_SWITCH_PAGE_INSTR(0), +}; + static inline struct ili9881c *panel_to_ili9881c(struct drm_panel *panel) { return container_of(panel, struct ili9881c, panel); @@ -603,6 +813,23 @@ static const struct drm_display_mode k101_im2byl02_default_mode = { .height_mm = 217, }; +static const struct drm_display_mode w552946aba_default_mode = { + .clock = 64000, + + .hdisplay = 720, + .hsync_start = 720 + 40, + .hsync_end = 720 + 40 + 10, + .htotal = 720 + 40 + 10 + 40, + + .vdisplay = 1280, + .vsync_start = 1280 + 22, + .vsync_end = 1280 + 22 + 4, + .vtotal = 1280 + 22 + 4 + 11, + + .width_mm = 68, + .height_mm = 121, +}; + static int ili9881c_get_modes(struct drm_panel *panel, struct drm_connector *connector) { @@ -626,6 +853,8 @@ static int ili9881c_get_modes(struct drm_panel *panel, connector->display_info.width_mm = mode->width_mm; connector->display_info.height_mm = mode->height_mm; + drm_connector_set_panel_orientation(connector, ctx->orientation); + return 1; } @@ -653,15 +882,20 @@ static int ili9881c_dsi_probe(struct mipi_dsi_device *dsi) DRM_MODE_CONNECTOR_DSI); ctx->power = devm_regulator_get(&dsi->dev, "power"); - if (IS_ERR(ctx->power)) { - dev_err(&dsi->dev, "Couldn't get our power regulator\n"); - return PTR_ERR(ctx->power); - } - - ctx->reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW); - if (IS_ERR(ctx->reset)) { - dev_err(&dsi->dev, "Couldn't get our reset GPIO\n"); - return PTR_ERR(ctx->reset); + if (IS_ERR(ctx->power)) + return dev_err_probe(&dsi->dev, PTR_ERR(ctx->power), + "Couldn't get our power regulator\n"); + + ctx->reset = devm_gpiod_get_optional(&dsi->dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(ctx->reset)) + return dev_err_probe(&dsi->dev, PTR_ERR(ctx->reset), + "Couldn't get our reset GPIO\n"); + + ret = of_drm_get_panel_orientation(dsi->dev.of_node, &ctx->orientation); + if (ret) { + dev_err(&dsi->dev, "%pOF: failed to get orientation: %d\n", + dsi->dev.of_node, ret); + return ret; } ret = drm_panel_of_backlight(&ctx->panel); @@ -670,7 +904,7 @@ static int ili9881c_dsi_probe(struct mipi_dsi_device *dsi) drm_panel_add(&ctx->panel); - dsi->mode_flags = MIPI_DSI_MODE_VIDEO_SYNC_PULSE; + dsi->mode_flags = ctx->desc->mode_flags; dsi->format = MIPI_DSI_FMT_RGB888; dsi->lanes = 4; @@ -691,17 +925,28 @@ static const struct ili9881c_desc lhr050h41_desc = { .init = lhr050h41_init, .init_length = ARRAY_SIZE(lhr050h41_init), .mode = &lhr050h41_default_mode, + .mode_flags = MIPI_DSI_MODE_VIDEO_SYNC_PULSE, }; static const struct ili9881c_desc k101_im2byl02_desc = { .init = k101_im2byl02_init, .init_length = ARRAY_SIZE(k101_im2byl02_init), .mode = &k101_im2byl02_default_mode, + .mode_flags = MIPI_DSI_MODE_VIDEO_SYNC_PULSE, +}; + +static const struct ili9881c_desc w552946aba_desc = { + .init = w552946ab_init, + .init_length = ARRAY_SIZE(w552946ab_init), + .mode = &w552946aba_default_mode, + .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | + MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET, }; static const struct of_device_id ili9881c_of_match[] = { { .compatible = "bananapi,lhr050h41", .data = &lhr050h41_desc }, { .compatible = "feixin,k101-im2byl02", .data = &k101_im2byl02_desc }, + { .compatible = "wanchanglong,w552946aba", .data = &w552946aba_desc }, { } }; MODULE_DEVICE_TABLE(of, ili9881c_of_match); diff --git a/drivers/gpu/drm/panel/panel-innolux-ej030na.c b/drivers/gpu/drm/panel/panel-innolux-ej030na.c index 34b98f70bd22..c558de3f99be 100644 --- a/drivers/gpu/drm/panel/panel-innolux-ej030na.c +++ b/drivers/gpu/drm/panel/panel-innolux-ej030na.c @@ -198,16 +198,14 @@ static int ej030na_probe(struct spi_device *spi) return -EINVAL; priv->supply = devm_regulator_get(dev, "power"); - if (IS_ERR(priv->supply)) { - dev_err(dev, "Failed to get power supply\n"); - return PTR_ERR(priv->supply); - } + if (IS_ERR(priv->supply)) + return dev_err_probe(dev, PTR_ERR(priv->supply), + "Failed to get power supply\n"); priv->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); - if (IS_ERR(priv->reset_gpio)) { - dev_err(dev, "Failed to get reset GPIO\n"); - return PTR_ERR(priv->reset_gpio); - } + if (IS_ERR(priv->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(priv->reset_gpio), + "Failed to get reset GPIO\n"); drm_panel_init(&priv->panel, dev, &ej030na_funcs, DRM_MODE_CONNECTOR_DPI); diff --git a/drivers/gpu/drm/panel/panel-innolux-p079zca.c b/drivers/gpu/drm/panel/panel-innolux-p079zca.c index aea316225391..f194b62e290c 100644 --- a/drivers/gpu/drm/panel/panel-innolux-p079zca.c +++ b/drivers/gpu/drm/panel/panel-innolux-p079zca.c @@ -484,6 +484,7 @@ static void innolux_panel_del(struct innolux_panel *innolux) static int innolux_panel_probe(struct mipi_dsi_device *dsi) { const struct panel_desc *desc; + struct innolux_panel *innolux; int err; desc = of_device_get_match_data(&dsi->dev); @@ -495,7 +496,14 @@ static int innolux_panel_probe(struct mipi_dsi_device *dsi) if (err < 0) return err; - return mipi_dsi_attach(dsi); + err = mipi_dsi_attach(dsi); + if (err < 0) { + innolux = mipi_dsi_get_drvdata(dsi); + innolux_panel_del(innolux); + return err; + } + + return 0; } static int innolux_panel_remove(struct mipi_dsi_device *dsi) diff --git a/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c b/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c new file mode 100644 index 000000000000..31eafbc38ec0 --- /dev/null +++ b/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c @@ -0,0 +1,323 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021 Raffaele Tranquillini <raffaele.tranquillini@gmail.com> + * + * Generated using linux-mdss-dsi-panel-driver-generator from Lineage OS device tree: + * https://github.com/LineageOS/android_kernel_xiaomi_msm8996/blob/lineage-18.1/arch/arm/boot/dts/qcom/a1-msm8996-mtp.dtsi + */ + +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/module.h> +#include <linux/of.h> + +#include <video/mipi_display.h> + +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_modes.h> +#include <drm/drm_panel.h> + +struct jdi_fhd_r63452 { + struct drm_panel panel; + struct mipi_dsi_device *dsi; + struct gpio_desc *reset_gpio; + bool prepared; +}; + +static inline struct jdi_fhd_r63452 *to_jdi_fhd_r63452(struct drm_panel *panel) +{ + return container_of(panel, struct jdi_fhd_r63452, panel); +} + +#define dsi_generic_write_seq(dsi, seq...) do { \ + static const u8 d[] = { seq }; \ + int ret; \ + ret = mipi_dsi_generic_write(dsi, d, ARRAY_SIZE(d)); \ + if (ret < 0) \ + return ret; \ + } while (0) + +#define dsi_dcs_write_seq(dsi, seq...) do { \ + static const u8 d[] = { seq }; \ + int ret; \ + ret = mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d)); \ + if (ret < 0) \ + return ret; \ + } while (0) + +static void jdi_fhd_r63452_reset(struct jdi_fhd_r63452 *ctx) +{ + gpiod_set_value_cansleep(ctx->reset_gpio, 0); + usleep_range(10000, 11000); + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + usleep_range(1000, 2000); + gpiod_set_value_cansleep(ctx->reset_gpio, 0); + usleep_range(10000, 11000); +} + +static int jdi_fhd_r63452_on(struct jdi_fhd_r63452 *ctx) +{ + struct mipi_dsi_device *dsi = ctx->dsi; + struct device *dev = &dsi->dev; + int ret; + + dsi->mode_flags |= MIPI_DSI_MODE_LPM; + + dsi_generic_write_seq(dsi, 0xb0, 0x00); + dsi_generic_write_seq(dsi, 0xd6, 0x01); + dsi_generic_write_seq(dsi, 0xec, + 0x64, 0xdc, 0xec, 0x3b, 0x52, 0x00, 0x0b, 0x0b, + 0x13, 0x15, 0x68, 0x0b, 0xb5); + dsi_generic_write_seq(dsi, 0xb0, 0x03); + + ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK); + if (ret < 0) { + dev_err(dev, "Failed to set tear on: %d\n", ret); + return ret; + } + + dsi_dcs_write_seq(dsi, MIPI_DCS_SET_ADDRESS_MODE, 0x00); + + ret = mipi_dsi_dcs_set_pixel_format(dsi, 0x77); + if (ret < 0) { + dev_err(dev, "Failed to set pixel format: %d\n", ret); + return ret; + } + + ret = mipi_dsi_dcs_set_column_address(dsi, 0x0000, 0x0437); + if (ret < 0) { + dev_err(dev, "Failed to set column address: %d\n", ret); + return ret; + } + + ret = mipi_dsi_dcs_set_page_address(dsi, 0x0000, 0x077f); + if (ret < 0) { + dev_err(dev, "Failed to set page address: %d\n", ret); + return ret; + } + + ret = mipi_dsi_dcs_set_tear_scanline(dsi, 0x0000); + if (ret < 0) { + dev_err(dev, "Failed to set tear scanline: %d\n", ret); + return ret; + } + + ret = mipi_dsi_dcs_set_display_brightness(dsi, 0x00ff); + if (ret < 0) { + dev_err(dev, "Failed to set display brightness: %d\n", ret); + return ret; + } + + dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x24); + dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, 0x00); + dsi_dcs_write_seq(dsi, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0x00); + dsi_dcs_write_seq(dsi, 0x84, 0x00); + + ret = mipi_dsi_dcs_set_display_on(dsi); + if (ret < 0) { + dev_err(dev, "Failed to set display on: %d\n", ret); + return ret; + } + msleep(20); + + ret = mipi_dsi_dcs_exit_sleep_mode(dsi); + if (ret < 0) { + dev_err(dev, "Failed to exit sleep mode: %d\n", ret); + return ret; + } + msleep(80); + + dsi_generic_write_seq(dsi, 0xb0, 0x04); + dsi_dcs_write_seq(dsi, 0x84, 0x00); + dsi_generic_write_seq(dsi, 0xc8, 0x11); + dsi_generic_write_seq(dsi, 0xb0, 0x03); + + return 0; +} + +static int jdi_fhd_r63452_off(struct jdi_fhd_r63452 *ctx) +{ + struct mipi_dsi_device *dsi = ctx->dsi; + struct device *dev = &dsi->dev; + int ret; + + dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; + + dsi_generic_write_seq(dsi, 0xb0, 0x00); + dsi_generic_write_seq(dsi, 0xd6, 0x01); + dsi_generic_write_seq(dsi, 0xec, + 0x64, 0xdc, 0xec, 0x3b, 0x52, 0x00, 0x0b, 0x0b, + 0x13, 0x15, 0x68, 0x0b, 0x95); + dsi_generic_write_seq(dsi, 0xb0, 0x03); + + ret = mipi_dsi_dcs_set_display_off(dsi); + if (ret < 0) { + dev_err(dev, "Failed to set display off: %d\n", ret); + return ret; + } + usleep_range(2000, 3000); + + ret = mipi_dsi_dcs_enter_sleep_mode(dsi); + if (ret < 0) { + dev_err(dev, "Failed to enter sleep mode: %d\n", ret); + return ret; + } + msleep(120); + + return 0; +} + +static int jdi_fhd_r63452_prepare(struct drm_panel *panel) +{ + struct jdi_fhd_r63452 *ctx = to_jdi_fhd_r63452(panel); + struct device *dev = &ctx->dsi->dev; + int ret; + + if (ctx->prepared) + return 0; + + jdi_fhd_r63452_reset(ctx); + + ret = jdi_fhd_r63452_on(ctx); + if (ret < 0) { + dev_err(dev, "Failed to initialize panel: %d\n", ret); + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + return ret; + } + + ctx->prepared = true; + return 0; +} + +static int jdi_fhd_r63452_unprepare(struct drm_panel *panel) +{ + struct jdi_fhd_r63452 *ctx = to_jdi_fhd_r63452(panel); + struct device *dev = &ctx->dsi->dev; + int ret; + + if (!ctx->prepared) + return 0; + + ret = jdi_fhd_r63452_off(ctx); + if (ret < 0) + dev_err(dev, "Failed to un-initialize panel: %d\n", ret); + + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + + ctx->prepared = false; + return 0; +} + +static const struct drm_display_mode jdi_fhd_r63452_mode = { + .clock = (1080 + 120 + 16 + 40) * (1920 + 4 + 2 + 4) * 60 / 1000, + .hdisplay = 1080, + .hsync_start = 1080 + 120, + .hsync_end = 1080 + 120 + 16, + .htotal = 1080 + 120 + 16 + 40, + .vdisplay = 1920, + .vsync_start = 1920 + 4, + .vsync_end = 1920 + 4 + 2, + .vtotal = 1920 + 4 + 2 + 4, + .width_mm = 64, + .height_mm = 114, +}; + +static int jdi_fhd_r63452_get_modes(struct drm_panel *panel, + struct drm_connector *connector) +{ + struct drm_display_mode *mode; + + mode = drm_mode_duplicate(connector->dev, &jdi_fhd_r63452_mode); + if (!mode) + return -ENOMEM; + + drm_mode_set_name(mode); + + mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; + connector->display_info.width_mm = mode->width_mm; + connector->display_info.height_mm = mode->height_mm; + drm_mode_probed_add(connector, mode); + + return 1; +} + +static const struct drm_panel_funcs jdi_fhd_r63452_panel_funcs = { + .prepare = jdi_fhd_r63452_prepare, + .unprepare = jdi_fhd_r63452_unprepare, + .get_modes = jdi_fhd_r63452_get_modes, +}; + +static int jdi_fhd_r63452_probe(struct mipi_dsi_device *dsi) +{ + struct device *dev = &dsi->dev; + struct jdi_fhd_r63452 *ctx; + int ret; + + ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(ctx->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio), + "Failed to get reset-gpios\n"); + + ctx->dsi = dsi; + mipi_dsi_set_drvdata(dsi, ctx); + + dsi->lanes = 4; + dsi->format = MIPI_DSI_FMT_RGB888; + dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST | + MIPI_DSI_CLOCK_NON_CONTINUOUS; + + drm_panel_init(&ctx->panel, dev, &jdi_fhd_r63452_panel_funcs, + DRM_MODE_CONNECTOR_DSI); + + ret = drm_panel_of_backlight(&ctx->panel); + if (ret) + return dev_err_probe(dev, ret, "Failed to get backlight\n"); + + drm_panel_add(&ctx->panel); + + ret = mipi_dsi_attach(dsi); + if (ret < 0) { + dev_err(dev, "Failed to attach to DSI host: %d\n", ret); + return ret; + } + + return 0; +} + +static int jdi_fhd_r63452_remove(struct mipi_dsi_device *dsi) +{ + struct jdi_fhd_r63452 *ctx = mipi_dsi_get_drvdata(dsi); + int ret; + + ret = mipi_dsi_detach(dsi); + if (ret < 0) + dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret); + + drm_panel_remove(&ctx->panel); + + return 0; +} + +static const struct of_device_id jdi_fhd_r63452_of_match[] = { + { .compatible = "jdi,fhd-r63452" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, jdi_fhd_r63452_of_match); + +static struct mipi_dsi_driver jdi_fhd_r63452_driver = { + .probe = jdi_fhd_r63452_probe, + .remove = jdi_fhd_r63452_remove, + .driver = { + .name = "panel-jdi-fhd-r63452", + .of_match_table = jdi_fhd_r63452_of_match, + }, +}; +module_mipi_dsi_driver(jdi_fhd_r63452_driver); + +MODULE_AUTHOR("Raffaele Tranquillini <raffaele.tranquillini@gmail.com>"); +MODULE_DESCRIPTION("DRM driver for JDI FHD R63452 DSI panel, command mode"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c index 733010b5e4f5..3c86ad262d5e 100644 --- a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c +++ b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c @@ -473,7 +473,13 @@ static int jdi_panel_probe(struct mipi_dsi_device *dsi) if (ret < 0) return ret; - return mipi_dsi_attach(dsi); + ret = mipi_dsi_attach(dsi); + if (ret < 0) { + jdi_panel_del(jdi); + return ret; + } + + return 0; } static int jdi_panel_remove(struct mipi_dsi_device *dsi) diff --git a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c index 86e4213e8bb1..daccb1fd5fda 100644 --- a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c +++ b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c @@ -406,7 +406,13 @@ static int kingdisplay_panel_probe(struct mipi_dsi_device *dsi) if (err < 0) return err; - return mipi_dsi_attach(dsi); + err = mipi_dsi_attach(dsi); + if (err < 0) { + kingdisplay_panel_del(kingdisplay); + return err; + } + + return 0; } static int kingdisplay_panel_remove(struct mipi_dsi_device *dsi) diff --git a/drivers/gpu/drm/panel/panel-lvds.c b/drivers/gpu/drm/panel/panel-lvds.c index 59a8d99e777d..27a1c9923b09 100644 --- a/drivers/gpu/drm/panel/panel-lvds.c +++ b/drivers/gpu/drm/panel/panel-lvds.c @@ -20,6 +20,7 @@ #include <video/videomode.h> #include <drm/drm_crtc.h> +#include <drm/drm_of.h> #include <drm/drm_panel.h> struct panel_lvds { @@ -116,7 +117,6 @@ static int panel_lvds_parse_dt(struct panel_lvds *lvds) { struct device_node *np = lvds->dev->of_node; struct display_timing timing; - const char *mapping; int ret; ret = of_drm_get_panel_orientation(np, &lvds->orientation); @@ -149,24 +149,14 @@ static int panel_lvds_parse_dt(struct panel_lvds *lvds) of_property_read_string(np, "label", &lvds->label); - ret = of_property_read_string(np, "data-mapping", &mapping); + ret = drm_of_lvds_get_data_mapping(np); if (ret < 0) { dev_err(lvds->dev, "%pOF: invalid or missing %s DT property\n", np, "data-mapping"); - return -ENODEV; + return ret; } - if (!strcmp(mapping, "jeida-18")) { - lvds->bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG; - } else if (!strcmp(mapping, "jeida-24")) { - lvds->bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA; - } else if (!strcmp(mapping, "vesa-24")) { - lvds->bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG; - } else { - dev_err(lvds->dev, "%pOF: invalid or missing %s DT property\n", - np, "data-mapping"); - return -EINVAL; - } + lvds->bus_format = ret; lvds->data_mirror = of_property_read_bool(np, "data-mirror"); diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35950.c b/drivers/gpu/drm/panel/panel-novatek-nt35950.c new file mode 100644 index 000000000000..288c7fa83ecc --- /dev/null +++ b/drivers/gpu/drm/panel/panel-novatek-nt35950.c @@ -0,0 +1,702 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Novatek NT35950 DriverIC panels driver + * + * Copyright (c) 2021 AngeloGioacchino Del Regno + * <angelogioacchino.delregno@somainline.org> + */ +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/of_graph.h> +#include <linux/regulator/consumer.h> + +#include <drm/drm_connector.h> +#include <drm/drm_crtc.h> +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_modes.h> +#include <drm/drm_panel.h> + +#define MCS_CMD_MAUCCTR 0xf0 /* Manufacturer command enable */ +#define MCS_PARAM_SCALER_FUNCTION 0x58 /* Scale-up function */ +#define MCS_PARAM_SCALEUP_MODE 0xc9 + #define MCS_SCALEUP_SIMPLE 0x0 + #define MCS_SCALEUP_BILINEAR BIT(0) + #define MCS_SCALEUP_DUPLICATE (BIT(0) | BIT(4)) + +/* VESA Display Stream Compression param */ +#define MCS_PARAM_VESA_DSC_ON 0x03 + +/* Data Compression mode */ +#define MCS_PARAM_DATA_COMPRESSION 0x90 + #define MCS_DATA_COMPRESSION_NONE 0x00 + #define MCS_DATA_COMPRESSION_FBC 0x02 + #define MCS_DATA_COMPRESSION_DSC 0x03 + +/* Display Output control */ +#define MCS_PARAM_DISP_OUTPUT_CTRL 0xb4 + #define MCS_DISP_OUT_SRAM_EN BIT(0) + #define MCS_DISP_OUT_VIDEO_MODE BIT(4) + +/* VESA Display Stream Compression setting */ +#define MCS_PARAM_VESA_DSC_SETTING 0xc0 + +/* SubPixel Rendering (SPR) */ +#define MCS_PARAM_SPR_EN 0xe3 +#define MCS_PARAM_SPR_MODE 0xef + #define MCS_SPR_MODE_YYG_RAINBOW_RGB 0x01 + +#define NT35950_VREG_MAX 4 + +struct nt35950 { + struct drm_panel panel; + struct drm_connector *connector; + struct mipi_dsi_device *dsi[2]; + struct regulator_bulk_data vregs[NT35950_VREG_MAX]; + struct gpio_desc *reset_gpio; + const struct nt35950_panel_desc *desc; + + int cur_mode; + u8 last_page; + bool prepared; +}; + +struct nt35950_panel_mode { + const struct drm_display_mode mode; + + bool enable_sram; + bool is_video_mode; + u8 scaler_on; + u8 scaler_mode; + u8 compression; + u8 spr_en; + u8 spr_mode; +}; + +struct nt35950_panel_desc { + const char *model_name; + const struct mipi_dsi_device_info dsi_info; + const struct nt35950_panel_mode *mode_data; + + bool is_dual_dsi; + u8 num_lanes; + u8 num_modes; +}; + +static inline struct nt35950 *to_nt35950(struct drm_panel *panel) +{ + return container_of(panel, struct nt35950, panel); +} + +#define dsi_dcs_write_seq(dsi, seq...) do { \ + static const u8 d[] = { seq }; \ + int ret; \ + ret = mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d)); \ + if (ret < 0) \ + return ret; \ + } while (0) + +static void nt35950_reset(struct nt35950 *nt) +{ + gpiod_set_value_cansleep(nt->reset_gpio, 1); + usleep_range(12000, 13000); + gpiod_set_value_cansleep(nt->reset_gpio, 0); + usleep_range(300, 400); + gpiod_set_value_cansleep(nt->reset_gpio, 1); + usleep_range(12000, 13000); +} + +/* + * nt35950_set_cmd2_page - Select manufacturer control (CMD2) page + * @nt: Main driver structure + * @page: Page number (0-7) + * + * Return: Number of transferred bytes or negative number on error + */ +static int nt35950_set_cmd2_page(struct nt35950 *nt, u8 page) +{ + const u8 mauc_cmd2_page[] = { MCS_CMD_MAUCCTR, 0x55, 0xaa, 0x52, + 0x08, page }; + int ret; + + ret = mipi_dsi_dcs_write_buffer(nt->dsi[0], mauc_cmd2_page, + ARRAY_SIZE(mauc_cmd2_page)); + if (ret < 0) + return ret; + + nt->last_page = page; + return 0; +} + +/* + * nt35950_set_data_compression - Set data compression mode + * @nt: Main driver structure + * @comp_mode: Compression mode + * + * Return: Number of transferred bytes or negative number on error + */ +static int nt35950_set_data_compression(struct nt35950 *nt, u8 comp_mode) +{ + u8 cmd_data_compression[] = { MCS_PARAM_DATA_COMPRESSION, comp_mode }; + u8 cmd_vesa_dsc_on[] = { MCS_PARAM_VESA_DSC_ON, !!comp_mode }; + u8 cmd_vesa_dsc_setting[] = { MCS_PARAM_VESA_DSC_SETTING, 0x03 }; + u8 last_page = nt->last_page; + int ret; + + /* Set CMD2 Page 0 if we're not there yet */ + if (last_page != 0) { + ret = nt35950_set_cmd2_page(nt, 0); + if (ret < 0) + return ret; + } + + ret = mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd_data_compression, + ARRAY_SIZE(cmd_data_compression)); + if (ret < 0) + return ret; + + ret = mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd_vesa_dsc_on, + ARRAY_SIZE(cmd_vesa_dsc_on)); + if (ret < 0) + return ret; + + /* Set the vesa dsc setting on Page 4 */ + ret = nt35950_set_cmd2_page(nt, 4); + if (ret < 0) + return ret; + + /* Display Stream Compression setting, always 0x03 */ + ret = mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd_vesa_dsc_setting, + ARRAY_SIZE(cmd_vesa_dsc_setting)); + if (ret < 0) + return ret; + + /* Get back to the previously set page */ + return nt35950_set_cmd2_page(nt, last_page); +} + +/* + * nt35950_set_scaler - Enable/disable resolution upscaling + * @nt: Main driver structure + * @scale_up: Scale up function control + * + * Return: Number of transferred bytes or negative number on error + */ +static int nt35950_set_scaler(struct nt35950 *nt, u8 scale_up) +{ + u8 cmd_scaler[] = { MCS_PARAM_SCALER_FUNCTION, scale_up }; + + return mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd_scaler, + ARRAY_SIZE(cmd_scaler)); +} + +/* + * nt35950_set_scale_mode - Resolution upscaling mode + * @nt: Main driver structure + * @mode: Scaler mode (MCS_DATA_COMPRESSION_*) + * + * Return: Number of transferred bytes or negative number on error + */ +static int nt35950_set_scale_mode(struct nt35950 *nt, u8 mode) +{ + u8 cmd_scaler[] = { MCS_PARAM_SCALEUP_MODE, mode }; + + return mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd_scaler, + ARRAY_SIZE(cmd_scaler)); +} + +/* + * nt35950_inject_black_image - Display a completely black image + * @nt: Main driver structure + * + * After IC setup, the attached panel may show random data + * due to driveric behavior changes (resolution, compression, + * scaling, etc). This function, called after parameters setup, + * makes the driver ic to output a completely black image to + * the display. + * It makes sense to push a black image before sending the sleep-out + * and display-on commands. + * + * Return: Number of transferred bytes or negative number on error + */ +static int nt35950_inject_black_image(struct nt35950 *nt) +{ + const u8 cmd0_black_img[] = { 0x6f, 0x01 }; + const u8 cmd1_black_img[] = { 0xf3, 0x10 }; + u8 cmd_test[] = { 0xff, 0xaa, 0x55, 0xa5, 0x80 }; + int ret; + + /* Enable test command */ + ret = mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd_test, ARRAY_SIZE(cmd_test)); + if (ret < 0) + return ret; + + /* Send a black image */ + ret = mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd0_black_img, + ARRAY_SIZE(cmd0_black_img)); + if (ret < 0) + return ret; + ret = mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd1_black_img, + ARRAY_SIZE(cmd1_black_img)); + if (ret < 0) + return ret; + + /* Disable test command */ + cmd_test[ARRAY_SIZE(cmd_test) - 1] = 0x00; + return mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd_test, ARRAY_SIZE(cmd_test)); +} + +/* + * nt35950_set_dispout - Set Display Output register parameters + * @nt: Main driver structure + * + * Return: Number of transferred bytes or negative number on error + */ +static int nt35950_set_dispout(struct nt35950 *nt) +{ + u8 cmd_dispout[] = { MCS_PARAM_DISP_OUTPUT_CTRL, 0x00 }; + const struct nt35950_panel_mode *mode_data = nt->desc->mode_data; + + if (mode_data[nt->cur_mode].is_video_mode) + cmd_dispout[1] |= MCS_DISP_OUT_VIDEO_MODE; + if (mode_data[nt->cur_mode].enable_sram) + cmd_dispout[1] |= MCS_DISP_OUT_SRAM_EN; + + return mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd_dispout, + ARRAY_SIZE(cmd_dispout)); +} + +static int nt35950_get_current_mode(struct nt35950 *nt) +{ + struct drm_connector *connector = nt->connector; + struct drm_crtc_state *crtc_state; + int i; + + /* Return the default (first) mode if no info available yet */ + if (!connector->state || !connector->state->crtc) + return 0; + + crtc_state = connector->state->crtc->state; + + for (i = 0; i < nt->desc->num_modes; i++) { + if (drm_mode_match(&crtc_state->mode, + &nt->desc->mode_data[i].mode, + DRM_MODE_MATCH_TIMINGS | DRM_MODE_MATCH_CLOCK)) + return i; + } + + return 0; +} + +static int nt35950_on(struct nt35950 *nt) +{ + const struct nt35950_panel_mode *mode_data = nt->desc->mode_data; + struct mipi_dsi_device *dsi = nt->dsi[0]; + struct device *dev = &dsi->dev; + int ret; + + nt->cur_mode = nt35950_get_current_mode(nt); + nt->dsi[0]->mode_flags |= MIPI_DSI_MODE_LPM; + nt->dsi[1]->mode_flags |= MIPI_DSI_MODE_LPM; + + ret = nt35950_set_cmd2_page(nt, 0); + if (ret < 0) + return ret; + + ret = nt35950_set_data_compression(nt, mode_data[nt->cur_mode].compression); + if (ret < 0) + return ret; + + ret = nt35950_set_scale_mode(nt, mode_data[nt->cur_mode].scaler_mode); + if (ret < 0) + return ret; + + ret = nt35950_set_scaler(nt, mode_data[nt->cur_mode].scaler_on); + if (ret < 0) + return ret; + + ret = nt35950_set_dispout(nt); + if (ret < 0) + return ret; + + ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK); + if (ret < 0) { + dev_err(dev, "Failed to set tear on: %d\n", ret); + return ret; + } + + ret = mipi_dsi_dcs_set_tear_scanline(dsi, 0); + if (ret < 0) { + dev_err(dev, "Failed to set tear scanline: %d\n", ret); + return ret; + } + + /* CMD2 Page 1 */ + ret = nt35950_set_cmd2_page(nt, 1); + if (ret < 0) + return ret; + + /* Unknown command */ + dsi_dcs_write_seq(dsi, 0xd4, 0x88, 0x88); + + /* CMD2 Page 7 */ + ret = nt35950_set_cmd2_page(nt, 7); + if (ret < 0) + return ret; + + /* Enable SubPixel Rendering */ + dsi_dcs_write_seq(dsi, MCS_PARAM_SPR_EN, 0x01); + + /* SPR Mode: YYG Rainbow-RGB */ + dsi_dcs_write_seq(dsi, MCS_PARAM_SPR_MODE, MCS_SPR_MODE_YYG_RAINBOW_RGB); + + /* CMD3 */ + ret = nt35950_inject_black_image(nt); + if (ret < 0) + return ret; + + ret = mipi_dsi_dcs_exit_sleep_mode(dsi); + if (ret < 0) + return ret; + msleep(120); + + ret = mipi_dsi_dcs_set_display_on(dsi); + if (ret < 0) + return ret; + msleep(120); + + nt->dsi[0]->mode_flags &= ~MIPI_DSI_MODE_LPM; + nt->dsi[1]->mode_flags &= ~MIPI_DSI_MODE_LPM; + + return 0; +} + +static int nt35950_off(struct nt35950 *nt) +{ + struct device *dev = &nt->dsi[0]->dev; + int ret; + + ret = mipi_dsi_dcs_set_display_off(nt->dsi[0]); + if (ret < 0) { + dev_err(dev, "Failed to set display off: %d\n", ret); + goto set_lpm; + } + usleep_range(10000, 11000); + + ret = mipi_dsi_dcs_enter_sleep_mode(nt->dsi[0]); + if (ret < 0) { + dev_err(dev, "Failed to enter sleep mode: %d\n", ret); + goto set_lpm; + } + msleep(150); + +set_lpm: + nt->dsi[0]->mode_flags |= MIPI_DSI_MODE_LPM; + nt->dsi[1]->mode_flags |= MIPI_DSI_MODE_LPM; + + return 0; +} + +static int nt35950_sharp_init_vregs(struct nt35950 *nt, struct device *dev) +{ + int ret; + + nt->vregs[0].supply = "vddio"; + nt->vregs[1].supply = "avdd"; + nt->vregs[2].supply = "avee"; + nt->vregs[3].supply = "dvdd"; + ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(nt->vregs), + nt->vregs); + if (ret < 0) + return ret; + + ret = regulator_is_supported_voltage(nt->vregs[0].consumer, + 1750000, 1950000); + if (!ret) + return -EINVAL; + ret = regulator_is_supported_voltage(nt->vregs[1].consumer, + 5200000, 5900000); + if (!ret) + return -EINVAL; + /* AVEE is negative: -5.90V to -5.20V */ + ret = regulator_is_supported_voltage(nt->vregs[2].consumer, + 5200000, 5900000); + if (!ret) + return -EINVAL; + + ret = regulator_is_supported_voltage(nt->vregs[3].consumer, + 1300000, 1400000); + if (!ret) + return -EINVAL; + + return 0; +} + +static int nt35950_prepare(struct drm_panel *panel) +{ + struct nt35950 *nt = to_nt35950(panel); + struct device *dev = &nt->dsi[0]->dev; + int ret; + + if (nt->prepared) + return 0; + + ret = regulator_enable(nt->vregs[0].consumer); + if (ret) + return ret; + usleep_range(2000, 5000); + + ret = regulator_enable(nt->vregs[3].consumer); + if (ret) + goto end; + usleep_range(15000, 18000); + + ret = regulator_enable(nt->vregs[1].consumer); + if (ret) + goto end; + + ret = regulator_enable(nt->vregs[2].consumer); + if (ret) + goto end; + usleep_range(12000, 13000); + + nt35950_reset(nt); + + ret = nt35950_on(nt); + if (ret < 0) { + dev_err(dev, "Failed to initialize panel: %d\n", ret); + goto end; + } + nt->prepared = true; + +end: + if (ret < 0) { + regulator_bulk_disable(ARRAY_SIZE(nt->vregs), nt->vregs); + return ret; + } + + return 0; +} + +static int nt35950_unprepare(struct drm_panel *panel) +{ + struct nt35950 *nt = to_nt35950(panel); + struct device *dev = &nt->dsi[0]->dev; + int ret; + + if (!nt->prepared) + return 0; + + ret = nt35950_off(nt); + if (ret < 0) + dev_err(dev, "Failed to deinitialize panel: %d\n", ret); + + gpiod_set_value_cansleep(nt->reset_gpio, 0); + regulator_bulk_disable(ARRAY_SIZE(nt->vregs), nt->vregs); + + nt->prepared = false; + return 0; +} + +static int nt35950_get_modes(struct drm_panel *panel, + struct drm_connector *connector) +{ + struct nt35950 *nt = to_nt35950(panel); + int i; + + for (i = 0; i < nt->desc->num_modes; i++) { + struct drm_display_mode *mode; + + mode = drm_mode_duplicate(connector->dev, + &nt->desc->mode_data[i].mode); + if (!mode) + return -ENOMEM; + + drm_mode_set_name(mode); + + mode->type |= DRM_MODE_TYPE_DRIVER; + if (nt->desc->num_modes == 1) + mode->type |= DRM_MODE_TYPE_PREFERRED; + + drm_mode_probed_add(connector, mode); + } + + connector->display_info.bpc = 8; + connector->display_info.height_mm = nt->desc->mode_data[0].mode.height_mm; + connector->display_info.width_mm = nt->desc->mode_data[0].mode.width_mm; + nt->connector = connector; + + return nt->desc->num_modes; +} + +static const struct drm_panel_funcs nt35950_panel_funcs = { + .prepare = nt35950_prepare, + .unprepare = nt35950_unprepare, + .get_modes = nt35950_get_modes, +}; + +static int nt35950_probe(struct mipi_dsi_device *dsi) +{ + struct device *dev = &dsi->dev; + struct device_node *dsi_r; + struct mipi_dsi_host *dsi_r_host; + struct nt35950 *nt; + const struct mipi_dsi_device_info *info; + int i, num_dsis = 1, ret; + + nt = devm_kzalloc(dev, sizeof(*nt), GFP_KERNEL); + if (!nt) + return -ENOMEM; + + ret = nt35950_sharp_init_vregs(nt, dev); + if (ret) + return dev_err_probe(dev, ret, "Regulator init failure.\n"); + + nt->desc = of_device_get_match_data(dev); + if (!nt->desc) + return -ENODEV; + + nt->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_ASIS); + if (IS_ERR(nt->reset_gpio)) { + return dev_err_probe(dev, PTR_ERR(nt->reset_gpio), + "Failed to get reset gpio\n"); + } + + /* If the panel is connected on two DSIs then DSI0 left, DSI1 right */ + if (nt->desc->is_dual_dsi) { + info = &nt->desc->dsi_info; + dsi_r = of_graph_get_remote_node(dsi->dev.of_node, 1, -1); + if (!dsi_r) { + dev_err(dev, "Cannot get secondary DSI node.\n"); + return -ENODEV; + } + dsi_r_host = of_find_mipi_dsi_host_by_node(dsi_r); + of_node_put(dsi_r); + if (!dsi_r_host) { + dev_err(dev, "Cannot get secondary DSI host\n"); + return -EPROBE_DEFER; + } + + nt->dsi[1] = mipi_dsi_device_register_full(dsi_r_host, info); + if (!nt->dsi[1]) { + dev_err(dev, "Cannot get secondary DSI node\n"); + return -ENODEV; + } + num_dsis++; + } + + nt->dsi[0] = dsi; + mipi_dsi_set_drvdata(dsi, nt); + + drm_panel_init(&nt->panel, dev, &nt35950_panel_funcs, + DRM_MODE_CONNECTOR_DSI); + + ret = drm_panel_of_backlight(&nt->panel); + if (ret) + return dev_err_probe(dev, ret, "Failed to get backlight\n"); + + drm_panel_add(&nt->panel); + + for (i = 0; i < num_dsis; i++) { + nt->dsi[i]->lanes = nt->desc->num_lanes; + nt->dsi[i]->format = MIPI_DSI_FMT_RGB888; + + nt->dsi[i]->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS | + MIPI_DSI_MODE_LPM; + + if (nt->desc->mode_data[0].is_video_mode) + nt->dsi[i]->mode_flags |= MIPI_DSI_MODE_VIDEO; + + ret = mipi_dsi_attach(nt->dsi[i]); + if (ret < 0) { + return dev_err_probe(dev, ret, + "Cannot attach to DSI%d host.\n", i); + } + } + + /* Make sure to set RESX LOW before starting the power-on sequence */ + gpiod_set_value_cansleep(nt->reset_gpio, 0); + return 0; +} + +static int nt35950_remove(struct mipi_dsi_device *dsi) +{ + struct nt35950 *nt = mipi_dsi_get_drvdata(dsi); + int ret; + + ret = mipi_dsi_detach(nt->dsi[0]); + if (ret < 0) + dev_err(&dsi->dev, + "Failed to detach from DSI0 host: %d\n", ret); + + if (nt->dsi[1]) { + ret = mipi_dsi_detach(nt->dsi[1]); + if (ret < 0) + dev_err(&dsi->dev, + "Failed to detach from DSI1 host: %d\n", ret); + mipi_dsi_device_unregister(nt->dsi[1]); + } + + drm_panel_remove(&nt->panel); + + return 0; +} + +static const struct nt35950_panel_mode sharp_ls055d1sx04_modes[] = { + { + /* 1920x1080 60Hz no compression */ + .mode = { + .clock = 214537, + .hdisplay = 1080, + .hsync_start = 1080 + 400, + .hsync_end = 1080 + 400 + 40, + .htotal = 1080 + 400 + 40 + 300, + .vdisplay = 1920, + .vsync_start = 1920 + 12, + .vsync_end = 1920 + 12 + 2, + .vtotal = 1920 + 12 + 2 + 10, + .width_mm = 68, + .height_mm = 121, + }, + .compression = MCS_DATA_COMPRESSION_NONE, + .enable_sram = true, + .is_video_mode = false, + .scaler_on = 1, + .scaler_mode = MCS_SCALEUP_DUPLICATE, + }, + /* TODO: Add 2160x3840 60Hz when DSC is supported */ +}; + +static const struct nt35950_panel_desc sharp_ls055d1sx04 = { + .model_name = "Sharp LS055D1SX04", + .dsi_info = { + .type = "LS055D1SX04", + .channel = 0, + .node = NULL, + }, + .mode_data = sharp_ls055d1sx04_modes, + .num_modes = ARRAY_SIZE(sharp_ls055d1sx04_modes), + .is_dual_dsi = true, + .num_lanes = 4, +}; + +static const struct of_device_id nt35950_of_match[] = { + { .compatible = "sharp,ls055d1sx04", .data = &sharp_ls055d1sx04 }, + { } +}; +MODULE_DEVICE_TABLE(of, nt35950_of_match); + +static struct mipi_dsi_driver nt35950_driver = { + .probe = nt35950_probe, + .remove = nt35950_remove, + .driver = { + .name = "panel-novatek-nt35950", + .of_match_table = nt35950_of_match, + }, +}; +module_mipi_dsi_driver(nt35950_driver); + +MODULE_AUTHOR("AngeloGioacchino Del Regno <angelogioacchino.delregno@somainline.org>"); +MODULE_DESCRIPTION("Novatek NT35950 DriverIC panels driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36672a.c b/drivers/gpu/drm/panel/panel-novatek-nt36672a.c index 533cd3934b8b..231f371901e8 100644 --- a/drivers/gpu/drm/panel/panel-novatek-nt36672a.c +++ b/drivers/gpu/drm/panel/panel-novatek-nt36672a.c @@ -618,7 +618,7 @@ static int nt36672a_panel_add(struct nt36672a_panel *pinfo) ret = regulator_set_load(pinfo->supplies[i].consumer, nt36672a_regulator_enable_loads[i]); if (ret) - return dev_err_probe(dev, ret, "failed to set regulator enable loads\n"); + return dev_err_probe(dev, ret, "failed to set regulator enable loads\n"); } pinfo->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); @@ -656,7 +656,13 @@ static int nt36672a_panel_probe(struct mipi_dsi_device *dsi) if (err < 0) return err; - return mipi_dsi_attach(dsi); + err = mipi_dsi_attach(dsi); + if (err < 0) { + drm_panel_remove(&pinfo->base); + return err; + } + + return 0; } static int nt36672a_panel_remove(struct mipi_dsi_device *dsi) diff --git a/drivers/gpu/drm/panel/panel-novatek-nt39016.c b/drivers/gpu/drm/panel/panel-novatek-nt39016.c index f8151fe3ac9a..d036853db865 100644 --- a/drivers/gpu/drm/panel/panel-novatek-nt39016.c +++ b/drivers/gpu/drm/panel/panel-novatek-nt39016.c @@ -258,16 +258,13 @@ static int nt39016_probe(struct spi_device *spi) return -EINVAL; panel->supply = devm_regulator_get(dev, "power"); - if (IS_ERR(panel->supply)) { - dev_err(dev, "Failed to get power supply\n"); - return PTR_ERR(panel->supply); - } + if (IS_ERR(panel->supply)) + return dev_err_probe(dev, PTR_ERR(panel->supply), + "Failed to get power supply\n"); panel->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); - if (IS_ERR(panel->reset_gpio)) { - dev_err(dev, "Failed to get reset GPIO\n"); - return PTR_ERR(panel->reset_gpio); - } + if (IS_ERR(panel->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(panel->reset_gpio), "Failed to get reset GPIO\n"); spi->bits_per_word = 8; spi->mode = SPI_MODE_3 | SPI_3WIRE; @@ -287,11 +284,8 @@ static int nt39016_probe(struct spi_device *spi) DRM_MODE_CONNECTOR_DPI); err = drm_panel_of_backlight(&panel->drm_panel); - if (err) { - if (err != -EPROBE_DEFER) - dev_err(dev, "Failed to get backlight handle\n"); - return err; - } + if (err) + return dev_err_probe(dev, err, "Failed to get backlight handle\n"); drm_panel_add(&panel->drm_panel); diff --git a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c index 3c20beeb1781..3991f5d950af 100644 --- a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c +++ b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c @@ -241,7 +241,13 @@ static int wuxga_nt_panel_probe(struct mipi_dsi_device *dsi) if (ret < 0) return ret; - return mipi_dsi_attach(dsi); + ret = mipi_dsi_attach(dsi); + if (ret < 0) { + wuxga_nt_panel_del(wuxga_nt); + return ret; + } + + return 0; } static int wuxga_nt_panel_remove(struct mipi_dsi_device *dsi) diff --git a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c index a3782830ae3c..1fb579a574d9 100644 --- a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c +++ b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c @@ -199,7 +199,13 @@ static int rb070d30_panel_dsi_probe(struct mipi_dsi_device *dsi) dsi->format = MIPI_DSI_FMT_RGB888; dsi->lanes = 4; - return mipi_dsi_attach(dsi); + ret = mipi_dsi_attach(dsi); + if (ret < 0) { + drm_panel_remove(&ctx->panel); + return ret; + } + + return 0; } static int rb070d30_panel_dsi_remove(struct mipi_dsi_device *dsi) diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c index ccc8ed6fe3ae..e38262b67ff7 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c @@ -452,27 +452,22 @@ static int s6e63j0x03_probe(struct mipi_dsi_device *dsi) ctx->supplies[1].supply = "vci"; ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies), ctx->supplies); - if (ret < 0) { - dev_err(dev, "failed to get regulators: %d\n", ret); - return ret; - } + if (ret < 0) + return dev_err_probe(dev, ret, "failed to get regulators\n"); ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); - if (IS_ERR(ctx->reset_gpio)) { - dev_err(dev, "cannot get reset-gpio: %ld\n", - PTR_ERR(ctx->reset_gpio)); - return PTR_ERR(ctx->reset_gpio); - } + if (IS_ERR(ctx->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio), + "cannot get reset-gpio\n"); drm_panel_init(&ctx->panel, dev, &s6e63j0x03_funcs, DRM_MODE_CONNECTOR_DSI); ctx->bl_dev = backlight_device_register("s6e63j0x03", dev, ctx, &s6e63j0x03_bl_ops, NULL); - if (IS_ERR(ctx->bl_dev)) { - dev_err(dev, "failed to register backlight device\n"); - return PTR_ERR(ctx->bl_dev); - } + if (IS_ERR(ctx->bl_dev)) + return dev_err_probe(dev, PTR_ERR(ctx->bl_dev), + "failed to register backlight device\n"); ctx->bl_dev->props.max_brightness = MAX_BRIGHTNESS; ctx->bl_dev->props.brightness = DEFAULT_BRIGHTNESS; diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c index ea63799ff2a1..29fde3823212 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c @@ -247,6 +247,7 @@ static int s6e88a0_ams452ef01_probe(struct mipi_dsi_device *dsi) ret = mipi_dsi_attach(dsi); if (ret < 0) { dev_err(dev, "Failed to attach to DSI host: %d\n", ret); + drm_panel_remove(&ctx->panel); return ret; } diff --git a/drivers/gpu/drm/panel/panel-samsung-sofef00.c b/drivers/gpu/drm/panel/panel-samsung-sofef00.c index 8cb1853574bb..1fb37fda4ba9 100644 --- a/drivers/gpu/drm/panel/panel-samsung-sofef00.c +++ b/drivers/gpu/drm/panel/panel-samsung-sofef00.c @@ -270,18 +270,14 @@ static int sofef00_panel_probe(struct mipi_dsi_device *dsi) } ctx->supply = devm_regulator_get(dev, "vddio"); - if (IS_ERR(ctx->supply)) { - ret = PTR_ERR(ctx->supply); - dev_err(dev, "Failed to get vddio regulator: %d\n", ret); - return ret; - } + if (IS_ERR(ctx->supply)) + return dev_err_probe(dev, PTR_ERR(ctx->supply), + "Failed to get vddio regulator\n"); ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); - if (IS_ERR(ctx->reset_gpio)) { - ret = PTR_ERR(ctx->reset_gpio); - dev_warn(dev, "Failed to get reset-gpios: %d\n", ret); - return ret; - } + if (IS_ERR(ctx->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio), + "Failed to get reset-gpios\n"); ctx->dsi = dsi; mipi_dsi_set_drvdata(dsi, ctx); @@ -302,6 +298,7 @@ static int sofef00_panel_probe(struct mipi_dsi_device *dsi) ret = mipi_dsi_attach(dsi); if (ret < 0) { dev_err(dev, "Failed to attach to DSI host: %d\n", ret); + drm_panel_remove(&ctx->panel); return ret; } diff --git a/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c index 94992f45113a..a07d0f6c3e69 100644 --- a/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c +++ b/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c @@ -146,22 +146,19 @@ static int ls037v7dw01_probe(struct platform_device *pdev) lcd->pdev = pdev; lcd->vdd = devm_regulator_get(&pdev->dev, "envdd"); - if (IS_ERR(lcd->vdd)) { - dev_err(&pdev->dev, "failed to get regulator\n"); - return PTR_ERR(lcd->vdd); - } + if (IS_ERR(lcd->vdd)) + return dev_err_probe(&pdev->dev, PTR_ERR(lcd->vdd), + "failed to get regulator\n"); lcd->ini_gpio = devm_gpiod_get(&pdev->dev, "enable", GPIOD_OUT_LOW); - if (IS_ERR(lcd->ini_gpio)) { - dev_err(&pdev->dev, "failed to get enable gpio\n"); - return PTR_ERR(lcd->ini_gpio); - } + if (IS_ERR(lcd->ini_gpio)) + return dev_err_probe(&pdev->dev, PTR_ERR(lcd->ini_gpio), + "failed to get enable gpio\n"); lcd->resb_gpio = devm_gpiod_get(&pdev->dev, "reset", GPIOD_OUT_LOW); - if (IS_ERR(lcd->resb_gpio)) { - dev_err(&pdev->dev, "failed to get reset gpio\n"); - return PTR_ERR(lcd->resb_gpio); - } + if (IS_ERR(lcd->resb_gpio)) + return dev_err_probe(&pdev->dev, PTR_ERR(lcd->resb_gpio), + "failed to get reset gpio\n"); lcd->mo_gpio = devm_gpiod_get_index(&pdev->dev, "mode", 0, GPIOD_OUT_LOW); diff --git a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c index b937e24dac8e..25829a0a8e80 100644 --- a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c +++ b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c @@ -296,7 +296,13 @@ static int sharp_nt_panel_probe(struct mipi_dsi_device *dsi) if (ret < 0) return ret; - return mipi_dsi_attach(dsi); + ret = mipi_dsi_attach(dsi); + if (ret < 0) { + sharp_nt_panel_del(sharp_nt); + return ret; + } + + return 0; } static int sharp_nt_panel_remove(struct mipi_dsi_device *dsi) diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index eb475a3a774b..9e46db5e359c 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -2031,6 +2031,31 @@ static const struct panel_desc innolux_g070y2_l01 = { .connector_type = DRM_MODE_CONNECTOR_LVDS, }; +static const struct drm_display_mode innolux_g070y2_t02_mode = { + .clock = 33333, + .hdisplay = 800, + .hsync_start = 800 + 210, + .hsync_end = 800 + 210 + 20, + .htotal = 800 + 210 + 20 + 46, + .vdisplay = 480, + .vsync_start = 480 + 22, + .vsync_end = 480 + 22 + 10, + .vtotal = 480 + 22 + 23 + 10, +}; + +static const struct panel_desc innolux_g070y2_t02 = { + .modes = &innolux_g070y2_t02_mode, + .num_modes = 1, + .bpc = 8, + .size = { + .width = 152, + .height = 92, + }, + .bus_format = MEDIA_BUS_FMT_RGB888_1X24, + .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE, + .connector_type = DRM_MODE_CONNECTOR_DPI, +}; + static const struct display_timing innolux_g101ice_l01_timing = { .pixelclock = { 60400000, 71100000, 74700000 }, .hactive = { 1280, 1280, 1280 }, @@ -3227,6 +3252,33 @@ static const struct panel_desc starry_kr070pe2t = { .connector_type = DRM_MODE_CONNECTOR_DPI, }; +static const struct display_timing tsd_tst043015cmhx_timing = { + .pixelclock = { 5000000, 9000000, 12000000 }, + .hactive = { 480, 480, 480 }, + .hfront_porch = { 4, 5, 65 }, + .hback_porch = { 36, 40, 255 }, + .hsync_len = { 1, 1, 1 }, + .vactive = { 272, 272, 272 }, + .vfront_porch = { 2, 8, 97 }, + .vback_porch = { 3, 8, 31 }, + .vsync_len = { 1, 1, 1 }, + + .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW | + DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE, +}; + +static const struct panel_desc tsd_tst043015cmhx = { + .timings = &tsd_tst043015cmhx_timing, + .num_timings = 1, + .bpc = 8, + .size = { + .width = 105, + .height = 67, + }, + .bus_format = MEDIA_BUS_FMT_RGB888_1X24, + .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE, +}; + static const struct drm_display_mode tfc_s9700rtwv43tr_01b_mode = { .clock = 30000, .hdisplay = 800, @@ -3473,6 +3525,31 @@ static const struct panel_desc urt_umsh_8596md_parallel = { .bus_format = MEDIA_BUS_FMT_RGB666_1X18, }; +static const struct drm_display_mode vivax_tpc9150_panel_mode = { + .clock = 60000, + .hdisplay = 1024, + .hsync_start = 1024 + 160, + .hsync_end = 1024 + 160 + 100, + .htotal = 1024 + 160 + 100 + 60, + .vdisplay = 600, + .vsync_start = 600 + 12, + .vsync_end = 600 + 12 + 10, + .vtotal = 600 + 12 + 10 + 13, +}; + +static const struct panel_desc vivax_tpc9150_panel = { + .modes = &vivax_tpc9150_panel_mode, + .num_modes = 1, + .bpc = 6, + .size = { + .width = 200, + .height = 115, + }, + .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG, + .bus_flags = DRM_BUS_FLAG_DE_HIGH, + .connector_type = DRM_MODE_CONNECTOR_LVDS, +}; + static const struct drm_display_mode vl050_8048nt_c01_mode = { .clock = 33333, .hdisplay = 800, @@ -3738,6 +3815,9 @@ static const struct of_device_id platform_of_match[] = { .compatible = "innolux,g070y2-l01", .data = &innolux_g070y2_l01, }, { + .compatible = "innolux,g070y2-t02", + .data = &innolux_g070y2_t02, + }, { .compatible = "innolux,g101ice-l01", .data = &innolux_g101ice_l01 }, { @@ -3876,6 +3956,9 @@ static const struct of_device_id platform_of_match[] = { .compatible = "starry,kr070pe2t", .data = &starry_kr070pe2t, }, { + .compatible = "team-source-display,tst043015cmhx", + .data = &tsd_tst043015cmhx, + }, { .compatible = "tfc,s9700rtwv43tr-01b", .data = &tfc_s9700rtwv43tr_01b, }, { @@ -3921,6 +4004,9 @@ static const struct of_device_id platform_of_match[] = { .compatible = "urt,umsh-8596md-20t", .data = &urt_umsh_8596md_parallel, }, { + .compatible = "vivax,tpc9150-panel", + .data = &vivax_tpc9150_panel, + }, { .compatible = "vxt,vl050-8048nt-c01", .data = &vl050_8048nt_c01, }, { diff --git a/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c b/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c new file mode 100644 index 000000000000..69f07b15fca4 --- /dev/null +++ b/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c @@ -0,0 +1,552 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, Linaro Limited + * + * Generated with linux-mdss-dsi-panel-driver-generator from vendor device tree: + * Copyright (c) 2013, The Linux Foundation. All rights reserved. + */ + +#include <linux/backlight.h> +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/regulator/consumer.h> + +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_modes.h> +#include <drm/drm_panel.h> + +struct truly_nt35521 { + struct drm_panel panel; + struct mipi_dsi_device *dsi; + struct regulator_bulk_data supplies[2]; + struct gpio_desc *reset_gpio; + struct gpio_desc *blen_gpio; + bool prepared; + bool enabled; +}; + +static inline +struct truly_nt35521 *to_truly_nt35521(struct drm_panel *panel) +{ + return container_of(panel, struct truly_nt35521, panel); +} + +#define dsi_generic_write_seq(dsi, seq...) do { \ + static const u8 d[] = { seq }; \ + int ret; \ + ret = mipi_dsi_generic_write(dsi, d, ARRAY_SIZE(d)); \ + if (ret < 0) \ + return ret; \ + } while (0) + +static void truly_nt35521_reset(struct truly_nt35521 *ctx) +{ + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + usleep_range(1000, 2000); + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + usleep_range(10000, 11000); + gpiod_set_value_cansleep(ctx->reset_gpio, 0); + msleep(150); +} + +static int truly_nt35521_on(struct truly_nt35521 *ctx) +{ + struct mipi_dsi_device *dsi = ctx->dsi; + struct device *dev = &dsi->dev; + int ret; + + dsi->mode_flags |= MIPI_DSI_MODE_LPM; + + dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x00); + dsi_generic_write_seq(dsi, 0xff, 0xaa, 0x55, 0xa5, 0x80); + dsi_generic_write_seq(dsi, 0x6f, 0x11, 0x00); + dsi_generic_write_seq(dsi, 0xf7, 0x20, 0x00); + dsi_generic_write_seq(dsi, 0x6f, 0x01); + dsi_generic_write_seq(dsi, 0xb1, 0x21); + dsi_generic_write_seq(dsi, 0xbd, 0x01, 0xa0, 0x10, 0x08, 0x01); + dsi_generic_write_seq(dsi, 0xb8, 0x01, 0x02, 0x0c, 0x02); + dsi_generic_write_seq(dsi, 0xbb, 0x11, 0x11); + dsi_generic_write_seq(dsi, 0xbc, 0x00, 0x00); + dsi_generic_write_seq(dsi, 0xb6, 0x02); + dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x01); + dsi_generic_write_seq(dsi, 0xb0, 0x09, 0x09); + dsi_generic_write_seq(dsi, 0xb1, 0x09, 0x09); + dsi_generic_write_seq(dsi, 0xbc, 0x8c, 0x00); + dsi_generic_write_seq(dsi, 0xbd, 0x8c, 0x00); + dsi_generic_write_seq(dsi, 0xca, 0x00); + dsi_generic_write_seq(dsi, 0xc0, 0x04); + dsi_generic_write_seq(dsi, 0xbe, 0xb5); + dsi_generic_write_seq(dsi, 0xb3, 0x35, 0x35); + dsi_generic_write_seq(dsi, 0xb4, 0x25, 0x25); + dsi_generic_write_seq(dsi, 0xb9, 0x43, 0x43); + dsi_generic_write_seq(dsi, 0xba, 0x24, 0x24); + dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x02); + dsi_generic_write_seq(dsi, 0xee, 0x03); + dsi_generic_write_seq(dsi, 0xb0, + 0x00, 0xb2, 0x00, 0xb3, 0x00, 0xb6, 0x00, 0xc3, + 0x00, 0xce, 0x00, 0xe1, 0x00, 0xf3, 0x01, 0x11); + dsi_generic_write_seq(dsi, 0xb1, + 0x01, 0x2e, 0x01, 0x5c, 0x01, 0x82, 0x01, 0xc3, + 0x01, 0xfe, 0x02, 0x00, 0x02, 0x37, 0x02, 0x77); + dsi_generic_write_seq(dsi, 0xb2, + 0x02, 0xa1, 0x02, 0xd7, 0x02, 0xfe, 0x03, 0x2c, + 0x03, 0x4b, 0x03, 0x63, 0x03, 0x8f, 0x03, 0x90); + dsi_generic_write_seq(dsi, 0xb3, 0x03, 0x96, 0x03, 0x98); + dsi_generic_write_seq(dsi, 0xb4, + 0x00, 0x81, 0x00, 0x8b, 0x00, 0x9c, 0x00, 0xa9, + 0x00, 0xb5, 0x00, 0xcb, 0x00, 0xdf, 0x01, 0x02); + dsi_generic_write_seq(dsi, 0xb5, + 0x01, 0x1f, 0x01, 0x51, 0x01, 0x7a, 0x01, 0xbf, + 0x01, 0xfa, 0x01, 0xfc, 0x02, 0x34, 0x02, 0x76); + dsi_generic_write_seq(dsi, 0xb6, + 0x02, 0x9f, 0x02, 0xd7, 0x02, 0xfc, 0x03, 0x2c, + 0x03, 0x4a, 0x03, 0x63, 0x03, 0x8f, 0x03, 0xa2); + dsi_generic_write_seq(dsi, 0xb7, 0x03, 0xb8, 0x03, 0xba); + dsi_generic_write_seq(dsi, 0xb8, + 0x00, 0x01, 0x00, 0x02, 0x00, 0x0e, 0x00, 0x2a, + 0x00, 0x41, 0x00, 0x67, 0x00, 0x87, 0x00, 0xb9); + dsi_generic_write_seq(dsi, 0xb9, + 0x00, 0xe2, 0x01, 0x22, 0x01, 0x54, 0x01, 0xa3, + 0x01, 0xe6, 0x01, 0xe7, 0x02, 0x24, 0x02, 0x67); + dsi_generic_write_seq(dsi, 0xba, + 0x02, 0x93, 0x02, 0xcd, 0x02, 0xf6, 0x03, 0x31, + 0x03, 0x6c, 0x03, 0xe9, 0x03, 0xef, 0x03, 0xf4); + dsi_generic_write_seq(dsi, 0xbb, 0x03, 0xf6, 0x03, 0xf7); + dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x03); + dsi_generic_write_seq(dsi, 0xb0, 0x22, 0x00); + dsi_generic_write_seq(dsi, 0xb1, 0x22, 0x00); + dsi_generic_write_seq(dsi, 0xb2, 0x05, 0x00, 0x60, 0x00, 0x00); + dsi_generic_write_seq(dsi, 0xb3, 0x05, 0x00, 0x60, 0x00, 0x00); + dsi_generic_write_seq(dsi, 0xb4, 0x05, 0x00, 0x60, 0x00, 0x00); + dsi_generic_write_seq(dsi, 0xb5, 0x05, 0x00, 0x60, 0x00, 0x00); + dsi_generic_write_seq(dsi, 0xba, 0x53, 0x00, 0x60, 0x00, 0x00); + dsi_generic_write_seq(dsi, 0xbb, 0x53, 0x00, 0x60, 0x00, 0x00); + dsi_generic_write_seq(dsi, 0xbc, 0x53, 0x00, 0x60, 0x00, 0x00); + dsi_generic_write_seq(dsi, 0xbd, 0x53, 0x00, 0x60, 0x00, 0x00); + dsi_generic_write_seq(dsi, 0xc0, 0x00, 0x34, 0x00, 0x00); + dsi_generic_write_seq(dsi, 0xc1, 0x00, 0x00, 0x34, 0x00); + dsi_generic_write_seq(dsi, 0xc2, 0x00, 0x00, 0x34, 0x00); + dsi_generic_write_seq(dsi, 0xc3, 0x00, 0x00, 0x34, 0x00); + dsi_generic_write_seq(dsi, 0xc4, 0x60); + dsi_generic_write_seq(dsi, 0xc5, 0xc0); + dsi_generic_write_seq(dsi, 0xc6, 0x00); + dsi_generic_write_seq(dsi, 0xc7, 0x00); + dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x05); + dsi_generic_write_seq(dsi, 0xb0, 0x17, 0x06); + dsi_generic_write_seq(dsi, 0xb1, 0x17, 0x06); + dsi_generic_write_seq(dsi, 0xb2, 0x17, 0x06); + dsi_generic_write_seq(dsi, 0xb3, 0x17, 0x06); + dsi_generic_write_seq(dsi, 0xb4, 0x17, 0x06); + dsi_generic_write_seq(dsi, 0xb5, 0x17, 0x06); + dsi_generic_write_seq(dsi, 0xb6, 0x17, 0x06); + dsi_generic_write_seq(dsi, 0xb7, 0x17, 0x06); + dsi_generic_write_seq(dsi, 0xb8, 0x00); + dsi_generic_write_seq(dsi, 0xb9, 0x00, 0x03); + dsi_generic_write_seq(dsi, 0xba, 0x00, 0x00); + dsi_generic_write_seq(dsi, 0xbb, 0x02, 0x03); + dsi_generic_write_seq(dsi, 0xbc, 0x02, 0x03); + dsi_generic_write_seq(dsi, 0xbd, 0x03, 0x03, 0x00, 0x03, 0x03); + dsi_generic_write_seq(dsi, 0xc0, 0x0b); + dsi_generic_write_seq(dsi, 0xc1, 0x09); + dsi_generic_write_seq(dsi, 0xc2, 0xa6); + dsi_generic_write_seq(dsi, 0xc3, 0x05); + dsi_generic_write_seq(dsi, 0xc4, 0x00); + dsi_generic_write_seq(dsi, 0xc5, 0x02); + dsi_generic_write_seq(dsi, 0xc6, 0x22); + dsi_generic_write_seq(dsi, 0xc7, 0x03); + dsi_generic_write_seq(dsi, 0xc8, 0x07, 0x20); + dsi_generic_write_seq(dsi, 0xc9, 0x03, 0x20); + dsi_generic_write_seq(dsi, 0xca, 0x01, 0x60); + dsi_generic_write_seq(dsi, 0xcb, 0x01, 0x60); + dsi_generic_write_seq(dsi, 0xcc, 0x00, 0x00, 0x02); + dsi_generic_write_seq(dsi, 0xcd, 0x00, 0x00, 0x02); + dsi_generic_write_seq(dsi, 0xce, 0x00, 0x00, 0x02); + dsi_generic_write_seq(dsi, 0xcf, 0x00, 0x00, 0x02); + dsi_generic_write_seq(dsi, 0xd1, 0x00, 0x05, 0x01, 0x07, 0x10); + dsi_generic_write_seq(dsi, 0xd2, 0x10, 0x05, 0x05, 0x03, 0x10); + dsi_generic_write_seq(dsi, 0xd3, 0x20, 0x00, 0x43, 0x07, 0x10); + dsi_generic_write_seq(dsi, 0xd4, 0x30, 0x00, 0x43, 0x07, 0x10); + dsi_generic_write_seq(dsi, 0xd0, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00); + dsi_generic_write_seq(dsi, 0xd5, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00); + dsi_generic_write_seq(dsi, 0xd6, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00); + dsi_generic_write_seq(dsi, 0xd7, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00); + dsi_generic_write_seq(dsi, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00); + dsi_generic_write_seq(dsi, 0xe5, 0x06); + dsi_generic_write_seq(dsi, 0xe6, 0x06); + dsi_generic_write_seq(dsi, 0xe7, 0x00); + dsi_generic_write_seq(dsi, 0xe8, 0x06); + dsi_generic_write_seq(dsi, 0xe9, 0x06); + dsi_generic_write_seq(dsi, 0xea, 0x06); + dsi_generic_write_seq(dsi, 0xeb, 0x00); + dsi_generic_write_seq(dsi, 0xec, 0x00); + dsi_generic_write_seq(dsi, 0xed, 0x30); + dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x06); + dsi_generic_write_seq(dsi, 0xb0, 0x31, 0x31); + dsi_generic_write_seq(dsi, 0xb1, 0x31, 0x31); + dsi_generic_write_seq(dsi, 0xb2, 0x2d, 0x2e); + dsi_generic_write_seq(dsi, 0xb3, 0x31, 0x34); + dsi_generic_write_seq(dsi, 0xb4, 0x29, 0x2a); + dsi_generic_write_seq(dsi, 0xb5, 0x12, 0x10); + dsi_generic_write_seq(dsi, 0xb6, 0x18, 0x16); + dsi_generic_write_seq(dsi, 0xb7, 0x00, 0x02); + dsi_generic_write_seq(dsi, 0xb8, 0x08, 0x31); + dsi_generic_write_seq(dsi, 0xb9, 0x31, 0x31); + dsi_generic_write_seq(dsi, 0xba, 0x31, 0x31); + dsi_generic_write_seq(dsi, 0xbb, 0x31, 0x08); + dsi_generic_write_seq(dsi, 0xbc, 0x03, 0x01); + dsi_generic_write_seq(dsi, 0xbd, 0x17, 0x19); + dsi_generic_write_seq(dsi, 0xbe, 0x11, 0x13); + dsi_generic_write_seq(dsi, 0xbf, 0x2a, 0x29); + dsi_generic_write_seq(dsi, 0xc0, 0x34, 0x31); + dsi_generic_write_seq(dsi, 0xc1, 0x2e, 0x2d); + dsi_generic_write_seq(dsi, 0xc2, 0x31, 0x31); + dsi_generic_write_seq(dsi, 0xc3, 0x31, 0x31); + dsi_generic_write_seq(dsi, 0xc4, 0x31, 0x31); + dsi_generic_write_seq(dsi, 0xc5, 0x31, 0x31); + dsi_generic_write_seq(dsi, 0xc6, 0x2e, 0x2d); + dsi_generic_write_seq(dsi, 0xc7, 0x31, 0x34); + dsi_generic_write_seq(dsi, 0xc8, 0x29, 0x2a); + dsi_generic_write_seq(dsi, 0xc9, 0x17, 0x19); + dsi_generic_write_seq(dsi, 0xca, 0x11, 0x13); + dsi_generic_write_seq(dsi, 0xcb, 0x03, 0x01); + dsi_generic_write_seq(dsi, 0xcc, 0x08, 0x31); + dsi_generic_write_seq(dsi, 0xcd, 0x31, 0x31); + dsi_generic_write_seq(dsi, 0xce, 0x31, 0x31); + dsi_generic_write_seq(dsi, 0xcf, 0x31, 0x08); + dsi_generic_write_seq(dsi, 0xd0, 0x00, 0x02); + dsi_generic_write_seq(dsi, 0xd1, 0x12, 0x10); + dsi_generic_write_seq(dsi, 0xd2, 0x18, 0x16); + dsi_generic_write_seq(dsi, 0xd3, 0x2a, 0x29); + dsi_generic_write_seq(dsi, 0xd4, 0x34, 0x31); + dsi_generic_write_seq(dsi, 0xd5, 0x2d, 0x2e); + dsi_generic_write_seq(dsi, 0xd6, 0x31, 0x31); + dsi_generic_write_seq(dsi, 0xd7, 0x31, 0x31); + dsi_generic_write_seq(dsi, 0xe5, 0x31, 0x31); + dsi_generic_write_seq(dsi, 0xe6, 0x31, 0x31); + dsi_generic_write_seq(dsi, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00); + dsi_generic_write_seq(dsi, 0xd9, 0x00, 0x00, 0x00, 0x00, 0x00); + dsi_generic_write_seq(dsi, 0xe7, 0x00); + dsi_generic_write_seq(dsi, 0x6f, 0x02); + dsi_generic_write_seq(dsi, 0xf7, 0x47); + dsi_generic_write_seq(dsi, 0x6f, 0x0a); + dsi_generic_write_seq(dsi, 0xf7, 0x02); + dsi_generic_write_seq(dsi, 0x6f, 0x17); + dsi_generic_write_seq(dsi, 0xf4, 0x60); + dsi_generic_write_seq(dsi, 0x6f, 0x01); + dsi_generic_write_seq(dsi, 0xf9, 0x46); + dsi_generic_write_seq(dsi, 0x6f, 0x11); + dsi_generic_write_seq(dsi, 0xf3, 0x01); + dsi_generic_write_seq(dsi, 0x35, 0x00); + dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x00); + dsi_generic_write_seq(dsi, 0xd9, 0x02, 0x03, 0x00); + dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x00, 0x00); + dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x00); + dsi_generic_write_seq(dsi, 0xb1, 0x6c, 0x21); + dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x00, 0x00); + dsi_generic_write_seq(dsi, 0x35, 0x00); + + ret = mipi_dsi_dcs_exit_sleep_mode(dsi); + if (ret < 0) { + dev_err(dev, "Failed to exit sleep mode: %d\n", ret); + return ret; + } + msleep(120); + + ret = mipi_dsi_dcs_set_display_on(dsi); + if (ret < 0) { + dev_err(dev, "Failed to set display on: %d\n", ret); + return ret; + } + usleep_range(1000, 2000); + + dsi_generic_write_seq(dsi, 0x53, 0x24); + + return 0; +} + +static int truly_nt35521_off(struct truly_nt35521 *ctx) +{ + struct mipi_dsi_device *dsi = ctx->dsi; + struct device *dev = &dsi->dev; + int ret; + + dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; + + ret = mipi_dsi_dcs_set_display_off(dsi); + if (ret < 0) { + dev_err(dev, "Failed to set display off: %d\n", ret); + return ret; + } + msleep(50); + + ret = mipi_dsi_dcs_enter_sleep_mode(dsi); + if (ret < 0) { + dev_err(dev, "Failed to enter sleep mode: %d\n", ret); + return ret; + } + msleep(150); + + return 0; +} + +static int truly_nt35521_prepare(struct drm_panel *panel) +{ + struct truly_nt35521 *ctx = to_truly_nt35521(panel); + struct device *dev = &ctx->dsi->dev; + int ret; + + if (ctx->prepared) + return 0; + + ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies); + if (ret < 0) { + dev_err(dev, "Failed to enable regulators: %d\n", ret); + return ret; + } + + truly_nt35521_reset(ctx); + + ret = truly_nt35521_on(ctx); + if (ret < 0) { + dev_err(dev, "Failed to initialize panel: %d\n", ret); + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + return ret; + } + + ctx->prepared = true; + return 0; +} + +static int truly_nt35521_unprepare(struct drm_panel *panel) +{ + struct truly_nt35521 *ctx = to_truly_nt35521(panel); + struct device *dev = &ctx->dsi->dev; + int ret; + + if (!ctx->prepared) + return 0; + + ret = truly_nt35521_off(ctx); + if (ret < 0) + dev_err(dev, "Failed to un-initialize panel: %d\n", ret); + + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), + ctx->supplies); + + ctx->prepared = false; + return 0; +} + +static int truly_nt35521_enable(struct drm_panel *panel) +{ + struct truly_nt35521 *ctx = to_truly_nt35521(panel); + + if (ctx->enabled) + return 0; + + gpiod_set_value_cansleep(ctx->blen_gpio, 1); + + ctx->enabled = true; + return 0; +} + +static int truly_nt35521_disable(struct drm_panel *panel) +{ + struct truly_nt35521 *ctx = to_truly_nt35521(panel); + + if (!ctx->enabled) + return 0; + + gpiod_set_value_cansleep(ctx->blen_gpio, 0); + + ctx->enabled = false; + return 0; +} + +static const struct drm_display_mode truly_nt35521_mode = { + .clock = (720 + 232 + 20 + 112) * (1280 + 18 + 1 + 18) * 60 / 1000, + .hdisplay = 720, + .hsync_start = 720 + 232, + .hsync_end = 720 + 232 + 20, + .htotal = 720 + 232 + 20 + 112, + .vdisplay = 1280, + .vsync_start = 1280 + 18, + .vsync_end = 1280 + 18 + 1, + .vtotal = 1280 + 18 + 1 + 18, + .width_mm = 65, + .height_mm = 116, +}; + +static int truly_nt35521_get_modes(struct drm_panel *panel, + struct drm_connector *connector) +{ + struct drm_display_mode *mode; + + mode = drm_mode_duplicate(connector->dev, &truly_nt35521_mode); + if (!mode) + return -ENOMEM; + + drm_mode_set_name(mode); + + mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; + connector->display_info.width_mm = mode->width_mm; + connector->display_info.height_mm = mode->height_mm; + drm_mode_probed_add(connector, mode); + + return 1; +} + +static const struct drm_panel_funcs truly_nt35521_panel_funcs = { + .prepare = truly_nt35521_prepare, + .unprepare = truly_nt35521_unprepare, + .enable = truly_nt35521_enable, + .disable = truly_nt35521_disable, + .get_modes = truly_nt35521_get_modes, +}; + +static int truly_nt35521_bl_update_status(struct backlight_device *bl) +{ + struct mipi_dsi_device *dsi = bl_get_data(bl); + u16 brightness = backlight_get_brightness(bl); + int ret; + + ret = mipi_dsi_dcs_set_display_brightness(dsi, brightness); + if (ret < 0) + return ret; + + return 0; +} + +static int truly_nt35521_bl_get_brightness(struct backlight_device *bl) +{ + struct mipi_dsi_device *dsi = bl_get_data(bl); + u16 brightness; + int ret; + + ret = mipi_dsi_dcs_get_display_brightness(dsi, &brightness); + if (ret < 0) + return ret; + + return brightness & 0xff; +} + +static const struct backlight_ops truly_nt35521_bl_ops = { + .update_status = truly_nt35521_bl_update_status, + .get_brightness = truly_nt35521_bl_get_brightness, +}; + +static struct backlight_device * +truly_nt35521_create_backlight(struct mipi_dsi_device *dsi) +{ + struct device *dev = &dsi->dev; + const struct backlight_properties props = { + .type = BACKLIGHT_RAW, + .brightness = 255, + .max_brightness = 255, + }; + + return devm_backlight_device_register(dev, dev_name(dev), dev, dsi, + &truly_nt35521_bl_ops, &props); +} + +static int truly_nt35521_probe(struct mipi_dsi_device *dsi) +{ + struct device *dev = &dsi->dev; + struct truly_nt35521 *ctx; + int ret; + + ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->supplies[0].supply = "positive5"; + ctx->supplies[1].supply = "negative5"; + ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies), + ctx->supplies); + if (ret < 0) { + dev_err(dev, "Failed to get regulators: %d\n", ret); + return ret; + } + + ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(ctx->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio), + "Failed to get reset-gpios\n"); + + ctx->blen_gpio = devm_gpiod_get(dev, "backlight", GPIOD_OUT_LOW); + if (IS_ERR(ctx->blen_gpio)) + return dev_err_probe(dev, PTR_ERR(ctx->blen_gpio), + "Failed to get backlight-gpios\n"); + + ctx->dsi = dsi; + mipi_dsi_set_drvdata(dsi, ctx); + + dsi->lanes = 4; + dsi->format = MIPI_DSI_FMT_RGB888; + dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | + MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_NO_EOT_PACKET | + MIPI_DSI_CLOCK_NON_CONTINUOUS; + + drm_panel_init(&ctx->panel, dev, &truly_nt35521_panel_funcs, + DRM_MODE_CONNECTOR_DSI); + + ctx->panel.backlight = truly_nt35521_create_backlight(dsi); + if (IS_ERR(ctx->panel.backlight)) + return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight), + "Failed to create backlight\n"); + + drm_panel_add(&ctx->panel); + + ret = mipi_dsi_attach(dsi); + if (ret < 0) { + dev_err(dev, "Failed to attach to DSI host: %d\n", ret); + drm_panel_remove(&ctx->panel); + return ret; + } + + return 0; +} + +static int truly_nt35521_remove(struct mipi_dsi_device *dsi) +{ + struct truly_nt35521 *ctx = mipi_dsi_get_drvdata(dsi); + int ret; + + ret = mipi_dsi_detach(dsi); + if (ret < 0) + dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret); + + drm_panel_remove(&ctx->panel); + + return 0; +} + +static const struct of_device_id truly_nt35521_of_match[] = { + { .compatible = "sony,tulip-truly-nt35521" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, truly_nt35521_of_match); + +static struct mipi_dsi_driver truly_nt35521_driver = { + .probe = truly_nt35521_probe, + .remove = truly_nt35521_remove, + .driver = { + .name = "panel-truly-nt35521", + .of_match_table = truly_nt35521_of_match, + }, +}; +module_mipi_dsi_driver(truly_nt35521_driver); + +MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>"); +MODULE_DESCRIPTION("DRM driver for Sony Tulip Truly NT35521 panel"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c index bacaf1b7fb70..1866cdb8f9c1 100644 --- a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c +++ b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c @@ -431,16 +431,14 @@ static int td043mtea1_probe(struct spi_device *spi) memcpy(lcd->gamma, td043mtea1_def_gamma, sizeof(lcd->gamma)); lcd->vcc_reg = devm_regulator_get(&spi->dev, "vcc"); - if (IS_ERR(lcd->vcc_reg)) { - dev_err(&spi->dev, "failed to get VCC regulator\n"); - return PTR_ERR(lcd->vcc_reg); - } + if (IS_ERR(lcd->vcc_reg)) + return dev_err_probe(&spi->dev, PTR_ERR(lcd->vcc_reg), + "failed to get VCC regulator\n"); lcd->reset_gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_HIGH); - if (IS_ERR(lcd->reset_gpio)) { - dev_err(&spi->dev, "failed to get reset GPIO\n"); - return PTR_ERR(lcd->reset_gpio); - } + if (IS_ERR(lcd->reset_gpio)) + return dev_err_probe(&spi->dev, PTR_ERR(lcd->reset_gpio), + "failed to get reset GPIO\n"); spi->bits_per_word = 16; spi->mode = SPI_MODE_0; diff --git a/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c b/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c index d17aae8b71d7..8177f5a360fb 100644 --- a/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c +++ b/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c @@ -283,26 +283,19 @@ static int xpp055c272_probe(struct mipi_dsi_device *dsi) return -ENOMEM; ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); - if (IS_ERR(ctx->reset_gpio)) { - dev_err(dev, "cannot get reset gpio\n"); - return PTR_ERR(ctx->reset_gpio); - } + if (IS_ERR(ctx->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio), + "cannot get reset gpio\n"); ctx->vci = devm_regulator_get(dev, "vci"); - if (IS_ERR(ctx->vci)) { - ret = PTR_ERR(ctx->vci); - if (ret != -EPROBE_DEFER) - dev_err(dev, "Failed to request vci regulator: %d\n", ret); - return ret; - } + if (IS_ERR(ctx->vci)) + return dev_err_probe(dev, PTR_ERR(ctx->vci), + "Failed to request vci regulator\n"); ctx->iovcc = devm_regulator_get(dev, "iovcc"); - if (IS_ERR(ctx->iovcc)) { - ret = PTR_ERR(ctx->iovcc); - if (ret != -EPROBE_DEFER) - dev_err(dev, "Failed to request iovcc regulator: %d\n", ret); - return ret; - } + if (IS_ERR(ctx->iovcc)) + return dev_err_probe(dev, PTR_ERR(ctx->iovcc), + "Failed to request iovcc regulator\n"); mipi_dsi_set_drvdata(dsi, ctx); diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index 82ad9a67f251..96bb5a465627 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -427,7 +427,7 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data, } } - args->retained = drm_gem_shmem_madvise(gem_obj, args->madv); + args->retained = drm_gem_shmem_madvise(&bo->base, args->madv); if (args->retained) { if (args->madv == PANFROST_MADV_DONTNEED) diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c index 23377481f4e3..ead65f5fa2bc 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem.c @@ -49,7 +49,7 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj) kvfree(bo->sgts); } - drm_gem_shmem_free_object(obj); + drm_gem_shmem_free(&bo->base); } struct panfrost_gem_mapping * @@ -187,23 +187,25 @@ void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv) static int panfrost_gem_pin(struct drm_gem_object *obj) { - if (to_panfrost_bo(obj)->is_heap) + struct panfrost_gem_object *bo = to_panfrost_bo(obj); + + if (bo->is_heap) return -EINVAL; - return drm_gem_shmem_pin(obj); + return drm_gem_shmem_pin(&bo->base); } static const struct drm_gem_object_funcs panfrost_gem_funcs = { .free = panfrost_gem_free_object, .open = panfrost_gem_open, .close = panfrost_gem_close, - .print_info = drm_gem_shmem_print_info, + .print_info = drm_gem_shmem_object_print_info, .pin = panfrost_gem_pin, - .unpin = drm_gem_shmem_unpin, - .get_sg_table = drm_gem_shmem_get_sg_table, - .vmap = drm_gem_shmem_vmap, - .vunmap = drm_gem_shmem_vunmap, - .mmap = drm_gem_shmem_mmap, + .unpin = drm_gem_shmem_object_unpin, + .get_sg_table = drm_gem_shmem_object_get_sg_table, + .vmap = drm_gem_shmem_object_vmap, + .vunmap = drm_gem_shmem_object_vunmap, + .mmap = drm_gem_shmem_object_mmap, }; /** @@ -221,7 +223,7 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t obj = kzalloc(sizeof(*obj), GFP_KERNEL); if (!obj) - return NULL; + return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&obj->mappings.list); mutex_init(&obj->mappings.lock); diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c index 1b9f68d8e9aa..b0142341e223 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c @@ -52,7 +52,7 @@ static bool panfrost_gem_purge(struct drm_gem_object *obj) goto unlock_mappings; panfrost_gem_teardown_mappings_locked(bo); - drm_gem_shmem_purge_locked(obj); + drm_gem_shmem_purge_locked(&bo->base); ret = true; mutex_unlock(&shmem->pages_lock); diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index f51d3f791a17..39562f2d11a4 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -304,7 +304,8 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu, int panfrost_mmu_map(struct panfrost_gem_mapping *mapping) { struct panfrost_gem_object *bo = mapping->obj; - struct drm_gem_object *obj = &bo->base.base; + struct drm_gem_shmem_object *shmem = &bo->base; + struct drm_gem_object *obj = &shmem->base; struct panfrost_device *pfdev = to_panfrost_device(obj->dev); struct sg_table *sgt; int prot = IOMMU_READ | IOMMU_WRITE; @@ -315,7 +316,7 @@ int panfrost_mmu_map(struct panfrost_gem_mapping *mapping) if (bo->noexec) prot |= IOMMU_NOEXEC; - sgt = drm_gem_shmem_get_pages_sgt(obj); + sgt = drm_gem_shmem_get_pages_sgt(shmem); if (WARN_ON(IS_ERR(sgt))) return PTR_ERR(sgt); diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c index e116a4d9b8e5..1d36df5af98d 100644 --- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c +++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c @@ -105,7 +105,7 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev, goto err_close_bo; } - ret = drm_gem_shmem_vmap(&bo->base, &map); + ret = drm_gem_shmem_vmap(bo, &map); if (ret) goto err_put_mapping; perfcnt->buf = map.vaddr; @@ -164,7 +164,7 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev, return 0; err_vunmap: - drm_gem_shmem_vunmap(&bo->base, &map); + drm_gem_shmem_vunmap(bo, &map); err_put_mapping: panfrost_gem_mapping_put(perfcnt->mapping); err_close_bo: @@ -194,7 +194,7 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev, GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_OFF)); perfcnt->user = NULL; - drm_gem_shmem_vunmap(&perfcnt->mapping->obj->base.base, &map); + drm_gem_shmem_vunmap(&perfcnt->mapping->obj->base, &map); perfcnt->buf = NULL; panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv); panfrost_mmu_as_put(pfdev, perfcnt->mapping->mmu); diff --git a/drivers/gpu/drm/pl111/Kconfig b/drivers/gpu/drm/pl111/Kconfig index 3aae387a96af..91ee05b01303 100644 --- a/drivers/gpu/drm/pl111/Kconfig +++ b/drivers/gpu/drm/pl111/Kconfig @@ -6,7 +6,6 @@ config DRM_PL111 depends on VEXPRESS_CONFIG || VEXPRESS_CONFIG=n depends on COMMON_CLK select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER select DRM_GEM_CMA_HELPER select DRM_BRIDGE select DRM_PANEL_BRIDGE diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c index 1f9a59601bb1..6a36b0fd845c 100644 --- a/drivers/gpu/drm/qxl/qxl_debugfs.c +++ b/drivers/gpu/drm/qxl/qxl_debugfs.c @@ -57,13 +57,16 @@ qxl_debugfs_buffers_info(struct seq_file *m, void *data) struct qxl_bo *bo; list_for_each_entry(bo, &qdev->gem.objects, list) { - struct dma_resv_list *fobj; - int rel; - - rcu_read_lock(); - fobj = dma_resv_shared_list(bo->tbo.base.resv); - rel = fobj ? fobj->shared_count : 0; - rcu_read_unlock(); + struct dma_resv_iter cursor; + struct dma_fence *fence; + int rel = 0; + + dma_resv_iter_begin(&cursor, bo->tbo.base.resv, true); + dma_resv_for_each_fence_unlocked(&cursor, fence) { + if (dma_resv_iter_is_restarted(&cursor)) + rel = 0; + ++rel; + } seq_printf(m, "size %ld, pc %d, num releases %d\n", (unsigned long)bo->tbo.base.size, diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c index fc47b0deb021..e4b16421500b 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.c +++ b/drivers/gpu/drm/qxl/qxl_drv.c @@ -29,7 +29,6 @@ #include "qxl_drv.h" -#include <linux/console.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/vgaarb.h> @@ -295,7 +294,7 @@ static struct drm_driver qxl_driver = { static int __init qxl_init(void) { - if (vgacon_text_force() && qxl_modeset == -1) + if (drm_firmware_drivers_only() && qxl_modeset == -1) return -EINVAL; if (qxl_modeset == 0) diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index b74cebca1f89..956c72b5aa33 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -31,7 +31,6 @@ #include <linux/compat.h> -#include <linux/console.h> #include <linux/module.h> #include <linux/pm_runtime.h> #include <linux/vga_switcheroo.h> @@ -637,15 +636,11 @@ static struct pci_driver radeon_kms_pci_driver = { static int __init radeon_module_init(void) { - if (vgacon_text_force() && radeon_modeset == -1) { - DRM_INFO("VGACON disable radeon kernel modesetting.\n"); + if (drm_firmware_drivers_only() && radeon_modeset == -1) radeon_modeset = 0; - } - if (radeon_modeset == 0) { - DRM_ERROR("No UMS support in radeon module!\n"); + if (radeon_modeset == 0) return -EINVAL; - } DRM_INFO("radeon kernel modesetting enabled.\n"); radeon_register_atpx_handler(); diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 482fb0ae6cb5..e2488559cc9f 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -168,7 +168,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) if (!r) { acpi_status = radeon_acpi_init(rdev); if (acpi_status) - dev_dbg(dev->dev, "Error during ACPI methods call\n"); + dev_dbg(dev->dev, "Error during ACPI methods call\n"); } if (radeon_is_px(dev)) { @@ -648,6 +648,8 @@ void radeon_driver_lastclose_kms(struct drm_device *dev) int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) { struct radeon_device *rdev = dev->dev_private; + struct radeon_fpriv *fpriv; + struct radeon_vm *vm; int r; file_priv->driver_priv = NULL; @@ -660,8 +662,6 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) /* new gpu have virtual address space support */ if (rdev->family >= CHIP_CAYMAN) { - struct radeon_fpriv *fpriv; - struct radeon_vm *vm; fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); if (unlikely(!fpriv)) { @@ -672,35 +672,39 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) if (rdev->accel_working) { vm = &fpriv->vm; r = radeon_vm_init(rdev, vm); - if (r) { - kfree(fpriv); - goto out_suspend; - } + if (r) + goto out_fpriv; r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); - if (r) { - radeon_vm_fini(rdev, vm); - kfree(fpriv); - goto out_suspend; - } + if (r) + goto out_vm_fini; /* map the ib pool buffer read only into * virtual address space */ vm->ib_bo_va = radeon_vm_bo_add(rdev, vm, rdev->ring_tmp_bo.bo); + if (!vm->ib_bo_va) { + r = -ENOMEM; + goto out_vm_fini; + } + r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va, RADEON_VA_IB_OFFSET, RADEON_VM_PAGE_READABLE | RADEON_VM_PAGE_SNOOPED); - if (r) { - radeon_vm_fini(rdev, vm); - kfree(fpriv); - goto out_suspend; - } + if (r) + goto out_vm_fini; } file_priv->driver_priv = fpriv; } + if (!r) + goto out_suspend; + +out_vm_fini: + radeon_vm_fini(rdev, vm); +out_fpriv: + kfree(fpriv); out_suspend: pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c index 9257b60144c4..b991ba1bcd51 100644 --- a/drivers/gpu/drm/radeon/radeon_sync.c +++ b/drivers/gpu/drm/radeon/radeon_sync.c @@ -91,33 +91,17 @@ int radeon_sync_resv(struct radeon_device *rdev, struct dma_resv *resv, bool shared) { - struct dma_resv_list *flist; - struct dma_fence *f; + struct dma_resv_iter cursor; struct radeon_fence *fence; - unsigned i; + struct dma_fence *f; int r = 0; - /* always sync to the exclusive fence */ - f = dma_resv_excl_fence(resv); - fence = f ? to_radeon_fence(f) : NULL; - if (fence && fence->rdev == rdev) - radeon_sync_fence(sync, fence); - else if (f) - r = dma_fence_wait(f, true); - - flist = dma_resv_shared_list(resv); - if (shared || !flist || r) - return r; - - for (i = 0; i < flist->shared_count; ++i) { - f = rcu_dereference_protected(flist->shared[i], - dma_resv_held(resv)); + dma_resv_for_each_fence(&cursor, resv, shared, f) { fence = to_radeon_fence(f); if (fence && fence->rdev == rdev) radeon_sync_fence(sync, fence); else r = dma_fence_wait(f, true); - if (r) break; } diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 2ea86919d953..377f9cdb5b53 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -469,7 +469,6 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, { int32_t *msg, msg_type, handle; unsigned img_size = 0; - struct dma_fence *f; void *ptr; int i, r; @@ -479,13 +478,11 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, return -EINVAL; } - f = dma_resv_excl_fence(bo->tbo.base.resv); - if (f) { - r = radeon_fence_wait((struct radeon_fence *)f, false); - if (r) { - DRM_ERROR("Failed waiting for UVD message (%d)!\n", r); - return r; - } + r = dma_resv_wait_timeout(bo->tbo.base.resv, false, false, + MAX_SCHEDULE_TIMEOUT); + if (r <= 0) { + DRM_ERROR("Failed waiting for UVD message (%d)!\n", r); + return r ? r : -ETIME; } r = radeon_bo_kmap(bo, &ptr); diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c index 511a942e851d..ca4a36464340 100644 --- a/drivers/gpu/drm/radeon/radeon_vce.c +++ b/drivers/gpu/drm/radeon/radeon_vce.c @@ -513,7 +513,7 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi, * @allocated: allocated a new handle? * * Validates the handle and return the found session index or -EINVAL - * we we don't have another free session index. + * we don't have another free session index. */ static int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle, bool *allocated) diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig index b47e74421e34..f6e6a6d5d987 100644 --- a/drivers/gpu/drm/rcar-du/Kconfig +++ b/drivers/gpu/drm/rcar-du/Kconfig @@ -4,23 +4,24 @@ config DRM_RCAR_DU depends on DRM && OF depends on ARM || ARM64 depends on ARCH_RENESAS || COMPILE_TEST - imply DRM_RCAR_CMM - imply DRM_RCAR_LVDS select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER select DRM_GEM_CMA_HELPER select VIDEOMODE_HELPERS help Choose this option if you have an R-Car chipset. If M is selected the module will be called rcar-du-drm. -config DRM_RCAR_CMM - tristate "R-Car DU Color Management Module (CMM) Support" - depends on DRM && OF +config DRM_RCAR_USE_CMM + bool "R-Car DU Color Management Module (CMM) Support" depends on DRM_RCAR_DU + default DRM_RCAR_DU help Enable support for R-Car Color Management Module (CMM). +config DRM_RCAR_CMM + def_tristate DRM_RCAR_DU + depends on DRM_RCAR_USE_CMM + config DRM_RCAR_DW_HDMI tristate "R-Car Gen3 and RZ/G2 DU HDMI Encoder Support" depends on DRM && OF @@ -28,15 +29,27 @@ config DRM_RCAR_DW_HDMI help Enable support for R-Car Gen3 or RZ/G2 internal HDMI encoder. +config DRM_RCAR_USE_LVDS + bool "R-Car DU LVDS Encoder Support" + depends on DRM_BRIDGE && OF + default DRM_RCAR_DU + help + Enable support for the R-Car Display Unit embedded LVDS encoders. + config DRM_RCAR_LVDS - tristate "R-Car DU LVDS Encoder Support" - depends on DRM && DRM_BRIDGE && OF + def_tristate DRM_RCAR_DU + depends on DRM_RCAR_USE_LVDS select DRM_KMS_HELPER select DRM_PANEL select OF_FLATTREE select OF_OVERLAY + +config DRM_RCAR_MIPI_DSI + tristate "R-Car DU MIPI DSI Encoder Support" + depends on DRM && DRM_BRIDGE && OF + select DRM_MIPI_DSI help - Enable support for the R-Car Display Unit embedded LVDS encoders. + Enable support for the R-Car Display Unit embedded MIPI DSI encoders. config DRM_RCAR_VSP bool "R-Car DU VSP Compositor Support" if ARM diff --git a/drivers/gpu/drm/rcar-du/Makefile b/drivers/gpu/drm/rcar-du/Makefile index 4d1187ccc3e5..286bc81b3e7c 100644 --- a/drivers/gpu/drm/rcar-du/Makefile +++ b/drivers/gpu/drm/rcar-du/Makefile @@ -19,6 +19,7 @@ obj-$(CONFIG_DRM_RCAR_CMM) += rcar_cmm.o obj-$(CONFIG_DRM_RCAR_DU) += rcar-du-drm.o obj-$(CONFIG_DRM_RCAR_DW_HDMI) += rcar_dw_hdmi.o obj-$(CONFIG_DRM_RCAR_LVDS) += rcar_lvds.o +obj-$(CONFIG_DRM_RCAR_MIPI_DSI) += rcar_mipi_dsi.o # 'remote-endpoint' is fixed up at run-time DTC_FLAGS_rcar_du_of_lvds_r8a7790 += -Wno-graph_endpoint diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c index 5672830ca184..f361a604337f 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c @@ -215,6 +215,7 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) const struct drm_display_mode *mode = &rcrtc->crtc.state->adjusted_mode; struct rcar_du_device *rcdu = rcrtc->dev; unsigned long mode_clock = mode->clock * 1000; + unsigned int hdse_offset; u32 dsmr; u32 escr; @@ -261,12 +262,13 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) rcar_du_group_write(rcrtc->group, DPLLCR, dpllcr); escr = ESCR_DCLKSEL_DCLKIN | div; - } else if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index)) { + } else if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index) || + rcdu->info->dsi_clk_mask & BIT(rcrtc->index)) { /* - * Use the LVDS PLL output as the dot clock when outputting to - * the LVDS encoder on an SoC that supports this clock routing - * option. We use the clock directly in that case, without any - * additional divider. + * Use the external LVDS or DSI PLL output as the dot clock when + * outputting to the LVDS or DSI encoder on an SoC that supports + * this clock routing option. We use the clock directly in that + * case, without any additional divider. */ escr = ESCR_DCLKSEL_DCLKIN; } else { @@ -298,10 +300,15 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) | DSMR_DIPM_DISP | DSMR_CSPM; rcar_du_crtc_write(rcrtc, DSMR, dsmr); + hdse_offset = 19; + if (rcrtc->group->cmms_mask & BIT(rcrtc->index % 2)) + hdse_offset += 25; + /* Display timings */ - rcar_du_crtc_write(rcrtc, HDSR, mode->htotal - mode->hsync_start - 19); + rcar_du_crtc_write(rcrtc, HDSR, mode->htotal - mode->hsync_start - + hdse_offset); rcar_du_crtc_write(rcrtc, HDER, mode->htotal - mode->hsync_start + - mode->hdisplay - 19); + mode->hdisplay - hdse_offset); rcar_du_crtc_write(rcrtc, HSWR, mode->hsync_end - mode->hsync_start - 1); rcar_du_crtc_write(rcrtc, HCR, mode->htotal - 1); @@ -836,6 +843,7 @@ rcar_du_crtc_mode_valid(struct drm_crtc *crtc, struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); struct rcar_du_device *rcdu = rcrtc->dev; bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE; + unsigned int min_sync_porch; unsigned int vbp; if (interlaced && !rcar_du_has(rcdu, RCAR_DU_FEATURE_INTERLACED)) @@ -843,9 +851,14 @@ rcar_du_crtc_mode_valid(struct drm_crtc *crtc, /* * The hardware requires a minimum combined horizontal sync and back - * porch of 20 pixels and a minimum vertical back porch of 3 lines. + * porch of 20 pixels (when CMM isn't used) or 45 pixels (when CMM is + * used), and a minimum vertical back porch of 3 lines. */ - if (mode->htotal - mode->hsync_start < 20) + min_sync_porch = 20; + if (rcrtc->group->cmms_mask & BIT(rcrtc->index % 2)) + min_sync_porch += 25; + + if (mode->htotal - mode->hsync_start < min_sync_porch) return MODE_HBLANK_NARROW; vbp = (mode->vtotal - mode->vsync_end) / (interlaced ? 2 : 1); diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index 5612a9e7a905..5a8131ef81d5 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c @@ -544,10 +544,12 @@ const char *rcar_du_output_name(enum rcar_du_output output) static const char * const names[] = { [RCAR_DU_OUTPUT_DPAD0] = "DPAD0", [RCAR_DU_OUTPUT_DPAD1] = "DPAD1", - [RCAR_DU_OUTPUT_LVDS0] = "LVDS0", - [RCAR_DU_OUTPUT_LVDS1] = "LVDS1", + [RCAR_DU_OUTPUT_DSI0] = "DSI0", + [RCAR_DU_OUTPUT_DSI1] = "DSI1", [RCAR_DU_OUTPUT_HDMI0] = "HDMI0", [RCAR_DU_OUTPUT_HDMI1] = "HDMI1", + [RCAR_DU_OUTPUT_LVDS0] = "LVDS0", + [RCAR_DU_OUTPUT_LVDS1] = "LVDS1", [RCAR_DU_OUTPUT_TCON] = "TCON", }; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c index eacb1f17f747..190dbb7f15dd 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c @@ -327,11 +327,11 @@ const struct rcar_du_format_info *rcar_du_format_info(u32 fourcc) */ static const struct drm_gem_object_funcs rcar_du_gem_funcs = { - .free = drm_gem_cma_free_object, - .print_info = drm_gem_cma_print_info, - .get_sg_table = drm_gem_cma_get_sg_table, - .vmap = drm_gem_cma_vmap, - .mmap = drm_gem_cma_mmap, + .free = drm_gem_cma_object_free, + .print_info = drm_gem_cma_object_print_info, + .get_sg_table = drm_gem_cma_object_get_sg_table, + .vmap = drm_gem_cma_object_vmap, + .mmap = drm_gem_cma_object_mmap, .vm_ops = &drm_gem_cma_vm_ops, }; diff --git a/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c b/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c new file mode 100644 index 000000000000..891bb956fd61 --- /dev/null +++ b/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c @@ -0,0 +1,819 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * rcar_mipi_dsi.c -- R-Car MIPI DSI Encoder + * + * Copyright (C) 2020 Renesas Electronics Corporation + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_graph.h> +#include <linux/platform_device.h> +#include <linux/reset.h> +#include <linux/slab.h> + +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_of.h> +#include <drm/drm_panel.h> +#include <drm/drm_probe_helper.h> + +#include "rcar_mipi_dsi_regs.h" + +struct rcar_mipi_dsi { + struct device *dev; + const struct rcar_mipi_dsi_device_info *info; + struct reset_control *rstc; + + struct mipi_dsi_host host; + struct drm_bridge bridge; + struct drm_bridge *next_bridge; + struct drm_connector connector; + + void __iomem *mmio; + struct { + struct clk *mod; + struct clk *pll; + struct clk *dsi; + } clocks; + + enum mipi_dsi_pixel_format format; + unsigned int num_data_lanes; + unsigned int lanes; +}; + +static inline struct rcar_mipi_dsi * +bridge_to_rcar_mipi_dsi(struct drm_bridge *bridge) +{ + return container_of(bridge, struct rcar_mipi_dsi, bridge); +} + +static inline struct rcar_mipi_dsi * +host_to_rcar_mipi_dsi(struct mipi_dsi_host *host) +{ + return container_of(host, struct rcar_mipi_dsi, host); +} + +static const u32 phtw[] = { + 0x01020114, 0x01600115, /* General testing */ + 0x01030116, 0x0102011d, /* General testing */ + 0x011101a4, 0x018601a4, /* 1Gbps testing */ + 0x014201a0, 0x010001a3, /* 1Gbps testing */ + 0x0101011f, /* 1Gbps testing */ +}; + +static const u32 phtw2[] = { + 0x010c0130, 0x010c0140, /* General testing */ + 0x010c0150, 0x010c0180, /* General testing */ + 0x010c0190, + 0x010a0160, 0x010a0170, + 0x01800164, 0x01800174, /* 1Gbps testing */ +}; + +static const u32 hsfreqrange_table[][2] = { + { 80000000U, 0x00 }, { 90000000U, 0x10 }, { 100000000U, 0x20 }, + { 110000000U, 0x30 }, { 120000000U, 0x01 }, { 130000000U, 0x11 }, + { 140000000U, 0x21 }, { 150000000U, 0x31 }, { 160000000U, 0x02 }, + { 170000000U, 0x12 }, { 180000000U, 0x22 }, { 190000000U, 0x32 }, + { 205000000U, 0x03 }, { 220000000U, 0x13 }, { 235000000U, 0x23 }, + { 250000000U, 0x33 }, { 275000000U, 0x04 }, { 300000000U, 0x14 }, + { 325000000U, 0x25 }, { 350000000U, 0x35 }, { 400000000U, 0x05 }, + { 450000000U, 0x16 }, { 500000000U, 0x26 }, { 550000000U, 0x37 }, + { 600000000U, 0x07 }, { 650000000U, 0x18 }, { 700000000U, 0x28 }, + { 750000000U, 0x39 }, { 800000000U, 0x09 }, { 850000000U, 0x19 }, + { 900000000U, 0x29 }, { 950000000U, 0x3a }, { 1000000000U, 0x0a }, + { 1050000000U, 0x1a }, { 1100000000U, 0x2a }, { 1150000000U, 0x3b }, + { 1200000000U, 0x0b }, { 1250000000U, 0x1b }, { 1300000000U, 0x2b }, + { 1350000000U, 0x3c }, { 1400000000U, 0x0c }, { 1450000000U, 0x1c }, + { 1500000000U, 0x2c }, { 1550000000U, 0x3d }, { 1600000000U, 0x0d }, + { 1650000000U, 0x1d }, { 1700000000U, 0x2e }, { 1750000000U, 0x3e }, + { 1800000000U, 0x0e }, { 1850000000U, 0x1e }, { 1900000000U, 0x2f }, + { 1950000000U, 0x3f }, { 2000000000U, 0x0f }, { 2050000000U, 0x40 }, + { 2100000000U, 0x41 }, { 2150000000U, 0x42 }, { 2200000000U, 0x43 }, + { 2250000000U, 0x44 }, { 2300000000U, 0x45 }, { 2350000000U, 0x46 }, + { 2400000000U, 0x47 }, { 2450000000U, 0x48 }, { 2500000000U, 0x49 }, + { /* sentinel */ }, +}; + +struct vco_cntrl_value { + u32 min_freq; + u32 max_freq; + u16 value; +}; + +static const struct vco_cntrl_value vco_cntrl_table[] = { + { .min_freq = 40000000U, .max_freq = 55000000U, .value = 0x3f }, + { .min_freq = 52500000U, .max_freq = 80000000U, .value = 0x39 }, + { .min_freq = 80000000U, .max_freq = 110000000U, .value = 0x2f }, + { .min_freq = 105000000U, .max_freq = 160000000U, .value = 0x29 }, + { .min_freq = 160000000U, .max_freq = 220000000U, .value = 0x1f }, + { .min_freq = 210000000U, .max_freq = 320000000U, .value = 0x19 }, + { .min_freq = 320000000U, .max_freq = 440000000U, .value = 0x0f }, + { .min_freq = 420000000U, .max_freq = 660000000U, .value = 0x09 }, + { .min_freq = 630000000U, .max_freq = 1149000000U, .value = 0x03 }, + { .min_freq = 1100000000U, .max_freq = 1152000000U, .value = 0x01 }, + { .min_freq = 1150000000U, .max_freq = 1250000000U, .value = 0x01 }, + { /* sentinel */ }, +}; + +static void rcar_mipi_dsi_write(struct rcar_mipi_dsi *dsi, u32 reg, u32 data) +{ + iowrite32(data, dsi->mmio + reg); +} + +static u32 rcar_mipi_dsi_read(struct rcar_mipi_dsi *dsi, u32 reg) +{ + return ioread32(dsi->mmio + reg); +} + +static void rcar_mipi_dsi_clr(struct rcar_mipi_dsi *dsi, u32 reg, u32 clr) +{ + rcar_mipi_dsi_write(dsi, reg, rcar_mipi_dsi_read(dsi, reg) & ~clr); +} + +static void rcar_mipi_dsi_set(struct rcar_mipi_dsi *dsi, u32 reg, u32 set) +{ + rcar_mipi_dsi_write(dsi, reg, rcar_mipi_dsi_read(dsi, reg) | set); +} + +static int rcar_mipi_dsi_phtw_test(struct rcar_mipi_dsi *dsi, u32 phtw) +{ + u32 status; + int ret; + + rcar_mipi_dsi_write(dsi, PHTW, phtw); + + ret = read_poll_timeout(rcar_mipi_dsi_read, status, + !(status & (PHTW_DWEN | PHTW_CWEN)), + 2000, 10000, false, dsi, PHTW); + if (ret < 0) { + dev_err(dsi->dev, "PHY test interface write timeout (0x%08x)\n", + phtw); + return ret; + } + + return ret; +} + +/* ----------------------------------------------------------------------------- + * Hardware Setup + */ + +struct dsi_setup_info { + unsigned long fout; + u16 vco_cntrl; + u16 prop_cntrl; + u16 hsfreqrange; + u16 div; + unsigned int m; + unsigned int n; +}; + +static void rcar_mipi_dsi_parameters_calc(struct rcar_mipi_dsi *dsi, + struct clk *clk, unsigned long target, + struct dsi_setup_info *setup_info) +{ + + const struct vco_cntrl_value *vco_cntrl; + unsigned long fout_target; + unsigned long fin, fout; + unsigned long hsfreq; + unsigned int best_err = -1; + unsigned int divider; + unsigned int n; + unsigned int i; + unsigned int err; + + /* + * Calculate Fout = dot clock * ColorDepth / (2 * Lane Count) + * The range out Fout is [40 - 1250] Mhz + */ + fout_target = target * mipi_dsi_pixel_format_to_bpp(dsi->format) + / (2 * dsi->lanes); + if (fout_target < 40000000 || fout_target > 1250000000) + return; + + /* Find vco_cntrl */ + for (vco_cntrl = vco_cntrl_table; vco_cntrl->min_freq != 0; vco_cntrl++) { + if (fout_target > vco_cntrl->min_freq && + fout_target <= vco_cntrl->max_freq) { + setup_info->vco_cntrl = vco_cntrl->value; + if (fout_target >= 1150000000) + setup_info->prop_cntrl = 0x0c; + else + setup_info->prop_cntrl = 0x0b; + break; + } + } + + /* Add divider */ + setup_info->div = (setup_info->vco_cntrl & 0x30) >> 4; + + /* Find hsfreqrange */ + hsfreq = fout_target * 2; + for (i = 0; i < ARRAY_SIZE(hsfreqrange_table); i++) { + if (hsfreqrange_table[i][0] >= hsfreq) { + setup_info->hsfreqrange = hsfreqrange_table[i][1]; + break; + } + } + + /* + * Calculate n and m for PLL clock + * Following the HW manual the ranges of n and m are + * n = [3-8] and m = [64-625] + */ + fin = clk_get_rate(clk); + divider = 1 << setup_info->div; + for (n = 3; n < 9; n++) { + unsigned long fpfd; + unsigned int m; + + fpfd = fin / n; + + for (m = 64; m < 626; m++) { + fout = fpfd * m / divider; + err = abs((long)(fout - fout_target) * 10000 / + (long)fout_target); + if (err < best_err) { + setup_info->m = m - 2; + setup_info->n = n - 1; + setup_info->fout = fout; + best_err = err; + if (err == 0) + goto done; + } + } + } + +done: + dev_dbg(dsi->dev, + "%pC %lu Hz -> Fout %lu Hz (target %lu Hz, error %d.%02u%%), PLL M/N/DIV %u/%u/%u\n", + clk, fin, setup_info->fout, fout_target, best_err / 100, + best_err % 100, setup_info->m, setup_info->n, setup_info->div); + dev_dbg(dsi->dev, + "vco_cntrl = 0x%x\tprop_cntrl = 0x%x\thsfreqrange = 0x%x\n", + setup_info->vco_cntrl, setup_info->prop_cntrl, + setup_info->hsfreqrange); +} + +static void rcar_mipi_dsi_set_display_timing(struct rcar_mipi_dsi *dsi, + const struct drm_display_mode *mode) +{ + u32 setr; + u32 vprmset0r; + u32 vprmset1r; + u32 vprmset2r; + u32 vprmset3r; + u32 vprmset4r; + + /* Configuration for Pixel Stream and Packet Header */ + if (mipi_dsi_pixel_format_to_bpp(dsi->format) == 24) + rcar_mipi_dsi_write(dsi, TXVMPSPHSETR, TXVMPSPHSETR_DT_RGB24); + else if (mipi_dsi_pixel_format_to_bpp(dsi->format) == 18) + rcar_mipi_dsi_write(dsi, TXVMPSPHSETR, TXVMPSPHSETR_DT_RGB18); + else if (mipi_dsi_pixel_format_to_bpp(dsi->format) == 16) + rcar_mipi_dsi_write(dsi, TXVMPSPHSETR, TXVMPSPHSETR_DT_RGB16); + else { + dev_warn(dsi->dev, "unsupported format"); + return; + } + + /* Configuration for Blanking sequence and Input Pixel */ + setr = TXVMSETR_HSABPEN_EN | TXVMSETR_HBPBPEN_EN + | TXVMSETR_HFPBPEN_EN | TXVMSETR_SYNSEQ_PULSES + | TXVMSETR_PIXWDTH | TXVMSETR_VSTPM; + rcar_mipi_dsi_write(dsi, TXVMSETR, setr); + + /* Configuration for Video Parameters */ + vprmset0r = (mode->flags & DRM_MODE_FLAG_PVSYNC ? + TXVMVPRMSET0R_VSPOL_HIG : TXVMVPRMSET0R_VSPOL_LOW) + | (mode->flags & DRM_MODE_FLAG_PHSYNC ? + TXVMVPRMSET0R_HSPOL_HIG : TXVMVPRMSET0R_HSPOL_LOW) + | TXVMVPRMSET0R_CSPC_RGB | TXVMVPRMSET0R_BPP_24; + + vprmset1r = TXVMVPRMSET1R_VACTIVE(mode->vdisplay) + | TXVMVPRMSET1R_VSA(mode->vsync_end - mode->vsync_start); + + vprmset2r = TXVMVPRMSET2R_VFP(mode->vsync_start - mode->vdisplay) + | TXVMVPRMSET2R_VBP(mode->vtotal - mode->vsync_end); + + vprmset3r = TXVMVPRMSET3R_HACTIVE(mode->hdisplay) + | TXVMVPRMSET3R_HSA(mode->hsync_end - mode->hsync_start); + + vprmset4r = TXVMVPRMSET4R_HFP(mode->hsync_start - mode->hdisplay) + | TXVMVPRMSET4R_HBP(mode->htotal - mode->hsync_end); + + rcar_mipi_dsi_write(dsi, TXVMVPRMSET0R, vprmset0r); + rcar_mipi_dsi_write(dsi, TXVMVPRMSET1R, vprmset1r); + rcar_mipi_dsi_write(dsi, TXVMVPRMSET2R, vprmset2r); + rcar_mipi_dsi_write(dsi, TXVMVPRMSET3R, vprmset3r); + rcar_mipi_dsi_write(dsi, TXVMVPRMSET4R, vprmset4r); +} + +static int rcar_mipi_dsi_startup(struct rcar_mipi_dsi *dsi, + const struct drm_display_mode *mode) +{ + struct dsi_setup_info setup_info = {}; + unsigned int timeout; + int ret, i; + int dsi_format; + u32 phy_setup; + u32 clockset2, clockset3; + u32 ppisetr; + u32 vclkset; + + /* Checking valid format */ + dsi_format = mipi_dsi_pixel_format_to_bpp(dsi->format); + if (dsi_format < 0) { + dev_warn(dsi->dev, "invalid format"); + return -EINVAL; + } + + /* Parameters Calculation */ + rcar_mipi_dsi_parameters_calc(dsi, dsi->clocks.pll, + mode->clock * 1000, &setup_info); + + /* LPCLK enable */ + rcar_mipi_dsi_set(dsi, LPCLKSET, LPCLKSET_CKEN); + + /* CFGCLK enabled */ + rcar_mipi_dsi_set(dsi, CFGCLKSET, CFGCLKSET_CKEN); + + rcar_mipi_dsi_clr(dsi, PHYSETUP, PHYSETUP_RSTZ); + rcar_mipi_dsi_clr(dsi, PHYSETUP, PHYSETUP_SHUTDOWNZ); + + rcar_mipi_dsi_set(dsi, PHTC, PHTC_TESTCLR); + rcar_mipi_dsi_clr(dsi, PHTC, PHTC_TESTCLR); + + /* PHY setting */ + phy_setup = rcar_mipi_dsi_read(dsi, PHYSETUP); + phy_setup &= ~PHYSETUP_HSFREQRANGE_MASK; + phy_setup |= PHYSETUP_HSFREQRANGE(setup_info.hsfreqrange); + rcar_mipi_dsi_write(dsi, PHYSETUP, phy_setup); + + for (i = 0; i < ARRAY_SIZE(phtw); i++) { + ret = rcar_mipi_dsi_phtw_test(dsi, phtw[i]); + if (ret < 0) + return ret; + } + + /* PLL Clock Setting */ + rcar_mipi_dsi_clr(dsi, CLOCKSET1, CLOCKSET1_SHADOW_CLEAR); + rcar_mipi_dsi_set(dsi, CLOCKSET1, CLOCKSET1_SHADOW_CLEAR); + rcar_mipi_dsi_clr(dsi, CLOCKSET1, CLOCKSET1_SHADOW_CLEAR); + + clockset2 = CLOCKSET2_M(setup_info.m) | CLOCKSET2_N(setup_info.n) + | CLOCKSET2_VCO_CNTRL(setup_info.vco_cntrl); + clockset3 = CLOCKSET3_PROP_CNTRL(setup_info.prop_cntrl) + | CLOCKSET3_INT_CNTRL(0) + | CLOCKSET3_CPBIAS_CNTRL(0x10) + | CLOCKSET3_GMP_CNTRL(1); + rcar_mipi_dsi_write(dsi, CLOCKSET2, clockset2); + rcar_mipi_dsi_write(dsi, CLOCKSET3, clockset3); + + rcar_mipi_dsi_clr(dsi, CLOCKSET1, CLOCKSET1_UPDATEPLL); + rcar_mipi_dsi_set(dsi, CLOCKSET1, CLOCKSET1_UPDATEPLL); + udelay(10); + rcar_mipi_dsi_clr(dsi, CLOCKSET1, CLOCKSET1_UPDATEPLL); + + ppisetr = PPISETR_DLEN_3 | PPISETR_CLEN; + rcar_mipi_dsi_write(dsi, PPISETR, ppisetr); + + rcar_mipi_dsi_set(dsi, PHYSETUP, PHYSETUP_SHUTDOWNZ); + rcar_mipi_dsi_set(dsi, PHYSETUP, PHYSETUP_RSTZ); + usleep_range(400, 500); + + /* Checking PPI clock status register */ + for (timeout = 10; timeout > 0; --timeout) { + if ((rcar_mipi_dsi_read(dsi, PPICLSR) & PPICLSR_STPST) && + (rcar_mipi_dsi_read(dsi, PPIDLSR) & PPIDLSR_STPST) && + (rcar_mipi_dsi_read(dsi, CLOCKSET1) & CLOCKSET1_LOCK)) + break; + + usleep_range(1000, 2000); + } + + if (!timeout) { + dev_err(dsi->dev, "failed to enable PPI clock\n"); + return -ETIMEDOUT; + } + + for (i = 0; i < ARRAY_SIZE(phtw2); i++) { + ret = rcar_mipi_dsi_phtw_test(dsi, phtw2[i]); + if (ret < 0) + return ret; + } + + /* Enable DOT clock */ + vclkset = VCLKSET_CKEN; + rcar_mipi_dsi_set(dsi, VCLKSET, vclkset); + + if (dsi_format == 24) + vclkset |= VCLKSET_BPP_24; + else if (dsi_format == 18) + vclkset |= VCLKSET_BPP_18; + else if (dsi_format == 16) + vclkset |= VCLKSET_BPP_16; + else { + dev_warn(dsi->dev, "unsupported format"); + return -EINVAL; + } + vclkset |= VCLKSET_COLOR_RGB | VCLKSET_DIV(setup_info.div) + | VCLKSET_LANE(dsi->lanes - 1); + + rcar_mipi_dsi_set(dsi, VCLKSET, vclkset); + + /* After setting VCLKSET register, enable VCLKEN */ + rcar_mipi_dsi_set(dsi, VCLKEN, VCLKEN_CKEN); + + dev_dbg(dsi->dev, "DSI device is started\n"); + + return 0; +} + +static void rcar_mipi_dsi_shutdown(struct rcar_mipi_dsi *dsi) +{ + rcar_mipi_dsi_clr(dsi, PHYSETUP, PHYSETUP_RSTZ); + rcar_mipi_dsi_clr(dsi, PHYSETUP, PHYSETUP_SHUTDOWNZ); + + dev_dbg(dsi->dev, "DSI device is shutdown\n"); +} + +static int rcar_mipi_dsi_clk_enable(struct rcar_mipi_dsi *dsi) +{ + int ret; + + reset_control_deassert(dsi->rstc); + + ret = clk_prepare_enable(dsi->clocks.mod); + if (ret < 0) + goto err_reset; + + ret = clk_prepare_enable(dsi->clocks.dsi); + if (ret < 0) + goto err_clock; + + return 0; + +err_clock: + clk_disable_unprepare(dsi->clocks.mod); +err_reset: + reset_control_assert(dsi->rstc); + return ret; +} + +static void rcar_mipi_dsi_clk_disable(struct rcar_mipi_dsi *dsi) +{ + clk_disable_unprepare(dsi->clocks.dsi); + clk_disable_unprepare(dsi->clocks.mod); + + reset_control_assert(dsi->rstc); +} + +static int rcar_mipi_dsi_start_hs_clock(struct rcar_mipi_dsi *dsi) +{ + /* + * In HW manual, we need to check TxDDRClkHS-Q Stable? but it dont + * write how to check. So we skip this check in this patch + */ + u32 status; + int ret; + + /* Start HS clock. */ + rcar_mipi_dsi_set(dsi, PPICLCR, PPICLCR_TXREQHS); + + ret = read_poll_timeout(rcar_mipi_dsi_read, status, + status & PPICLSR_TOHS, + 2000, 10000, false, dsi, PPICLSR); + if (ret < 0) { + dev_err(dsi->dev, "failed to enable HS clock\n"); + return ret; + } + + rcar_mipi_dsi_set(dsi, PPICLSCR, PPICLSCR_TOHS); + + return 0; +} + +static int rcar_mipi_dsi_start_video(struct rcar_mipi_dsi *dsi) +{ + u32 status; + int ret; + + /* Wait for the link to be ready. */ + ret = read_poll_timeout(rcar_mipi_dsi_read, status, + !(status & (LINKSR_LPBUSY | LINKSR_HSBUSY)), + 2000, 10000, false, dsi, LINKSR); + if (ret < 0) { + dev_err(dsi->dev, "Link failed to become ready\n"); + return ret; + } + + /* De-assert video FIFO clear. */ + rcar_mipi_dsi_clr(dsi, TXVMCR, TXVMCR_VFCLR); + + ret = read_poll_timeout(rcar_mipi_dsi_read, status, + status & TXVMSR_VFRDY, + 2000, 10000, false, dsi, TXVMSR); + if (ret < 0) { + dev_err(dsi->dev, "Failed to de-assert video FIFO clear\n"); + return ret; + } + + /* Enable transmission in video mode. */ + rcar_mipi_dsi_set(dsi, TXVMCR, TXVMCR_EN_VIDEO); + + ret = read_poll_timeout(rcar_mipi_dsi_read, status, + status & TXVMSR_RDY, + 2000, 10000, false, dsi, TXVMSR); + if (ret < 0) { + dev_err(dsi->dev, "Failed to enable video transmission\n"); + return ret; + } + + return 0; +} + +/* ----------------------------------------------------------------------------- + * Bridge + */ + +static int rcar_mipi_dsi_attach(struct drm_bridge *bridge, + enum drm_bridge_attach_flags flags) +{ + struct rcar_mipi_dsi *dsi = bridge_to_rcar_mipi_dsi(bridge); + + return drm_bridge_attach(bridge->encoder, dsi->next_bridge, bridge, + flags); +} + +static void rcar_mipi_dsi_atomic_enable(struct drm_bridge *bridge, + struct drm_bridge_state *old_bridge_state) +{ + struct drm_atomic_state *state = old_bridge_state->base.state; + struct rcar_mipi_dsi *dsi = bridge_to_rcar_mipi_dsi(bridge); + const struct drm_display_mode *mode; + struct drm_connector *connector; + struct drm_crtc *crtc; + int ret; + + connector = drm_atomic_get_new_connector_for_encoder(state, + bridge->encoder); + crtc = drm_atomic_get_new_connector_state(state, connector)->crtc; + mode = &drm_atomic_get_new_crtc_state(state, crtc)->adjusted_mode; + + ret = rcar_mipi_dsi_clk_enable(dsi); + if (ret < 0) { + dev_err(dsi->dev, "failed to enable DSI clocks\n"); + return; + } + + ret = rcar_mipi_dsi_startup(dsi, mode); + if (ret < 0) + goto err_dsi_startup; + + rcar_mipi_dsi_set_display_timing(dsi, mode); + + ret = rcar_mipi_dsi_start_hs_clock(dsi); + if (ret < 0) + goto err_dsi_start_hs; + + rcar_mipi_dsi_start_video(dsi); + + return; + +err_dsi_start_hs: + rcar_mipi_dsi_shutdown(dsi); +err_dsi_startup: + rcar_mipi_dsi_clk_disable(dsi); +} + +static void rcar_mipi_dsi_atomic_disable(struct drm_bridge *bridge, + struct drm_bridge_state *old_bridge_state) +{ + struct rcar_mipi_dsi *dsi = bridge_to_rcar_mipi_dsi(bridge); + + rcar_mipi_dsi_shutdown(dsi); + rcar_mipi_dsi_clk_disable(dsi); +} + +static enum drm_mode_status +rcar_mipi_dsi_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, + const struct drm_display_mode *mode) +{ + if (mode->clock > 297000) + return MODE_CLOCK_HIGH; + + return MODE_OK; +} + +static const struct drm_bridge_funcs rcar_mipi_dsi_bridge_ops = { + .attach = rcar_mipi_dsi_attach, + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, + .atomic_reset = drm_atomic_helper_bridge_reset, + .atomic_enable = rcar_mipi_dsi_atomic_enable, + .atomic_disable = rcar_mipi_dsi_atomic_disable, + .mode_valid = rcar_mipi_dsi_bridge_mode_valid, +}; + +/* ----------------------------------------------------------------------------- + * Host setting + */ + +static int rcar_mipi_dsi_host_attach(struct mipi_dsi_host *host, + struct mipi_dsi_device *device) +{ + struct rcar_mipi_dsi *dsi = host_to_rcar_mipi_dsi(host); + int ret; + + if (device->lanes > dsi->num_data_lanes) + return -EINVAL; + + dsi->lanes = device->lanes; + dsi->format = device->format; + + dsi->next_bridge = devm_drm_of_get_bridge(dsi->dev, dsi->dev->of_node, + 1, 0); + if (IS_ERR(dsi->next_bridge)) { + ret = PTR_ERR(dsi->next_bridge); + dev_err(dsi->dev, "failed to get next bridge: %d\n", ret); + return ret; + } + + /* Initialize the DRM bridge. */ + dsi->bridge.funcs = &rcar_mipi_dsi_bridge_ops; + dsi->bridge.of_node = dsi->dev->of_node; + drm_bridge_add(&dsi->bridge); + + return 0; +} + +static int rcar_mipi_dsi_host_detach(struct mipi_dsi_host *host, + struct mipi_dsi_device *device) +{ + struct rcar_mipi_dsi *dsi = host_to_rcar_mipi_dsi(host); + + drm_bridge_remove(&dsi->bridge); + + return 0; +} + +static const struct mipi_dsi_host_ops rcar_mipi_dsi_host_ops = { + .attach = rcar_mipi_dsi_host_attach, + .detach = rcar_mipi_dsi_host_detach, +}; + +/* ----------------------------------------------------------------------------- + * Probe & Remove + */ + +static int rcar_mipi_dsi_parse_dt(struct rcar_mipi_dsi *dsi) +{ + struct device_node *ep; + u32 data_lanes[4]; + int ret; + + ep = of_graph_get_endpoint_by_regs(dsi->dev->of_node, 1, 0); + if (!ep) { + dev_dbg(dsi->dev, "unconnected port@1\n"); + return -ENODEV; + } + + ret = of_property_read_variable_u32_array(ep, "data-lanes", data_lanes, + 1, 4); + of_node_put(ep); + + if (ret < 0) { + dev_err(dsi->dev, "missing or invalid data-lanes property\n"); + return -ENODEV; + } + + dsi->num_data_lanes = ret; + return 0; +} + +static struct clk *rcar_mipi_dsi_get_clock(struct rcar_mipi_dsi *dsi, + const char *name, + bool optional) +{ + struct clk *clk; + + clk = devm_clk_get(dsi->dev, name); + if (!IS_ERR(clk)) + return clk; + + if (PTR_ERR(clk) == -ENOENT && optional) + return NULL; + + dev_err_probe(dsi->dev, PTR_ERR(clk), "failed to get %s clock\n", + name ? name : "module"); + + return clk; +} + +static int rcar_mipi_dsi_get_clocks(struct rcar_mipi_dsi *dsi) +{ + dsi->clocks.mod = rcar_mipi_dsi_get_clock(dsi, NULL, false); + if (IS_ERR(dsi->clocks.mod)) + return PTR_ERR(dsi->clocks.mod); + + dsi->clocks.pll = rcar_mipi_dsi_get_clock(dsi, "pll", true); + if (IS_ERR(dsi->clocks.pll)) + return PTR_ERR(dsi->clocks.pll); + + dsi->clocks.dsi = rcar_mipi_dsi_get_clock(dsi, "dsi", true); + if (IS_ERR(dsi->clocks.dsi)) + return PTR_ERR(dsi->clocks.dsi); + + if (!dsi->clocks.pll && !dsi->clocks.dsi) { + dev_err(dsi->dev, "no input clock (pll, dsi)\n"); + return -EINVAL; + } + + return 0; +} + +static int rcar_mipi_dsi_probe(struct platform_device *pdev) +{ + struct rcar_mipi_dsi *dsi; + struct resource *mem; + int ret; + + dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL); + if (dsi == NULL) + return -ENOMEM; + + platform_set_drvdata(pdev, dsi); + + dsi->dev = &pdev->dev; + dsi->info = of_device_get_match_data(&pdev->dev); + + ret = rcar_mipi_dsi_parse_dt(dsi); + if (ret < 0) + return ret; + + /* Acquire resources. */ + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + dsi->mmio = devm_ioremap_resource(dsi->dev, mem); + if (IS_ERR(dsi->mmio)) + return PTR_ERR(dsi->mmio); + + ret = rcar_mipi_dsi_get_clocks(dsi); + if (ret < 0) + return ret; + + dsi->rstc = devm_reset_control_get(dsi->dev, NULL); + if (IS_ERR(dsi->rstc)) { + dev_err(dsi->dev, "failed to get cpg reset\n"); + return PTR_ERR(dsi->rstc); + } + + /* Initialize the DSI host. */ + dsi->host.dev = dsi->dev; + dsi->host.ops = &rcar_mipi_dsi_host_ops; + ret = mipi_dsi_host_register(&dsi->host); + if (ret < 0) + return ret; + + return 0; +} + +static int rcar_mipi_dsi_remove(struct platform_device *pdev) +{ + struct rcar_mipi_dsi *dsi = platform_get_drvdata(pdev); + + mipi_dsi_host_unregister(&dsi->host); + + return 0; +} + +static const struct of_device_id rcar_mipi_dsi_of_table[] = { + { .compatible = "renesas,r8a779a0-dsi-csi2-tx" }, + { } +}; + +MODULE_DEVICE_TABLE(of, rcar_mipi_dsi_of_table); + +static struct platform_driver rcar_mipi_dsi_platform_driver = { + .probe = rcar_mipi_dsi_probe, + .remove = rcar_mipi_dsi_remove, + .driver = { + .name = "rcar-mipi-dsi", + .of_match_table = rcar_mipi_dsi_of_table, + }, +}; + +module_platform_driver(rcar_mipi_dsi_platform_driver); + +MODULE_DESCRIPTION("Renesas R-Car MIPI DSI Encoder Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/rcar-du/rcar_mipi_dsi_regs.h b/drivers/gpu/drm/rcar-du/rcar_mipi_dsi_regs.h new file mode 100644 index 000000000000..0e7a9274749f --- /dev/null +++ b/drivers/gpu/drm/rcar-du/rcar_mipi_dsi_regs.h @@ -0,0 +1,172 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * rcar_mipi_dsi_regs.h -- R-Car MIPI DSI Interface Registers Definitions + * + * Copyright (C) 2020 Renesas Electronics Corporation + */ + +#ifndef __RCAR_MIPI_DSI_REGS_H__ +#define __RCAR_MIPI_DSI_REGS_H__ + +#define LINKSR 0x010 +#define LINKSR_LPBUSY (1 << 1) +#define LINKSR_HSBUSY (1 << 0) + +/* + * Video Mode Register + */ +#define TXVMSETR 0x180 +#define TXVMSETR_SYNSEQ_PULSES (0 << 16) +#define TXVMSETR_SYNSEQ_EVENTS (1 << 16) +#define TXVMSETR_VSTPM (1 << 15) +#define TXVMSETR_PIXWDTH (1 << 8) +#define TXVMSETR_VSEN_EN (1 << 4) +#define TXVMSETR_VSEN_DIS (0 << 4) +#define TXVMSETR_HFPBPEN_EN (1 << 2) +#define TXVMSETR_HFPBPEN_DIS (0 << 2) +#define TXVMSETR_HBPBPEN_EN (1 << 1) +#define TXVMSETR_HBPBPEN_DIS (0 << 1) +#define TXVMSETR_HSABPEN_EN (1 << 0) +#define TXVMSETR_HSABPEN_DIS (0 << 0) + +#define TXVMCR 0x190 +#define TXVMCR_VFCLR (1 << 12) +#define TXVMCR_EN_VIDEO (1 << 0) + +#define TXVMSR 0x1a0 +#define TXVMSR_STR (1 << 16) +#define TXVMSR_VFRDY (1 << 12) +#define TXVMSR_ACT (1 << 8) +#define TXVMSR_RDY (1 << 0) + +#define TXVMSCR 0x1a4 +#define TXVMSCR_STR (1 << 16) + +#define TXVMPSPHSETR 0x1c0 +#define TXVMPSPHSETR_DT_RGB16 (0x0e << 16) +#define TXVMPSPHSETR_DT_RGB18 (0x1e << 16) +#define TXVMPSPHSETR_DT_RGB18_LS (0x2e << 16) +#define TXVMPSPHSETR_DT_RGB24 (0x3e << 16) +#define TXVMPSPHSETR_DT_YCBCR16 (0x2c << 16) + +#define TXVMVPRMSET0R 0x1d0 +#define TXVMVPRMSET0R_HSPOL_HIG (0 << 17) +#define TXVMVPRMSET0R_HSPOL_LOW (1 << 17) +#define TXVMVPRMSET0R_VSPOL_HIG (0 << 16) +#define TXVMVPRMSET0R_VSPOL_LOW (1 << 16) +#define TXVMVPRMSET0R_CSPC_RGB (0 << 4) +#define TXVMVPRMSET0R_CSPC_YCbCr (1 << 4) +#define TXVMVPRMSET0R_BPP_16 (0 << 0) +#define TXVMVPRMSET0R_BPP_18 (1 << 0) +#define TXVMVPRMSET0R_BPP_24 (2 << 0) + +#define TXVMVPRMSET1R 0x1d4 +#define TXVMVPRMSET1R_VACTIVE(x) (((x) & 0x7fff) << 16) +#define TXVMVPRMSET1R_VSA(x) (((x) & 0xfff) << 0) + +#define TXVMVPRMSET2R 0x1d8 +#define TXVMVPRMSET2R_VFP(x) (((x) & 0x1fff) << 16) +#define TXVMVPRMSET2R_VBP(x) (((x) & 0x1fff) << 0) + +#define TXVMVPRMSET3R 0x1dc +#define TXVMVPRMSET3R_HACTIVE(x) (((x) & 0x7fff) << 16) +#define TXVMVPRMSET3R_HSA(x) (((x) & 0xfff) << 0) + +#define TXVMVPRMSET4R 0x1e0 +#define TXVMVPRMSET4R_HFP(x) (((x) & 0x1fff) << 16) +#define TXVMVPRMSET4R_HBP(x) (((x) & 0x1fff) << 0) + +/* + * PHY-Protocol Interface (PPI) Registers + */ +#define PPISETR 0x700 +#define PPISETR_DLEN_0 (0x1 << 0) +#define PPISETR_DLEN_1 (0x3 << 0) +#define PPISETR_DLEN_2 (0x7 << 0) +#define PPISETR_DLEN_3 (0xf << 0) +#define PPISETR_CLEN (1 << 8) + +#define PPICLCR 0x710 +#define PPICLCR_TXREQHS (1 << 8) +#define PPICLCR_TXULPSEXT (1 << 1) +#define PPICLCR_TXULPSCLK (1 << 0) + +#define PPICLSR 0x720 +#define PPICLSR_HSTOLP (1 << 27) +#define PPICLSR_TOHS (1 << 26) +#define PPICLSR_STPST (1 << 0) + +#define PPICLSCR 0x724 +#define PPICLSCR_HSTOLP (1 << 27) +#define PPICLSCR_TOHS (1 << 26) + +#define PPIDLSR 0x760 +#define PPIDLSR_STPST (0xf << 0) + +/* + * Clocks registers + */ +#define LPCLKSET 0x1000 +#define LPCLKSET_CKEN (1 << 8) +#define LPCLKSET_LPCLKDIV(x) (((x) & 0x3f) << 0) + +#define CFGCLKSET 0x1004 +#define CFGCLKSET_CKEN (1 << 8) +#define CFGCLKSET_CFGCLKDIV(x) (((x) & 0x3f) << 0) + +#define DOTCLKDIV 0x1008 +#define DOTCLKDIV_CKEN (1 << 8) +#define DOTCLKDIV_DOTCLKDIV(x) (((x) & 0x3f) << 0) + +#define VCLKSET 0x100c +#define VCLKSET_CKEN (1 << 16) +#define VCLKSET_COLOR_RGB (0 << 8) +#define VCLKSET_COLOR_YCC (1 << 8) +#define VCLKSET_DIV(x) (((x) & 0x3) << 4) +#define VCLKSET_BPP_16 (0 << 2) +#define VCLKSET_BPP_18 (1 << 2) +#define VCLKSET_BPP_18L (2 << 2) +#define VCLKSET_BPP_24 (3 << 2) +#define VCLKSET_LANE(x) (((x) & 0x3) << 0) + +#define VCLKEN 0x1010 +#define VCLKEN_CKEN (1 << 0) + +#define PHYSETUP 0x1014 +#define PHYSETUP_HSFREQRANGE(x) (((x) & 0x7f) << 16) +#define PHYSETUP_HSFREQRANGE_MASK (0x7f << 16) +#define PHYSETUP_CFGCLKFREQRANGE(x) (((x) & 0x3f) << 8) +#define PHYSETUP_SHUTDOWNZ (1 << 1) +#define PHYSETUP_RSTZ (1 << 0) + +#define CLOCKSET1 0x101c +#define CLOCKSET1_LOCK_PHY (1 << 17) +#define CLOCKSET1_LOCK (1 << 16) +#define CLOCKSET1_CLKSEL (1 << 8) +#define CLOCKSET1_CLKINSEL_EXTAL (0 << 2) +#define CLOCKSET1_CLKINSEL_DIG (1 << 2) +#define CLOCKSET1_CLKINSEL_DU (1 << 3) +#define CLOCKSET1_SHADOW_CLEAR (1 << 1) +#define CLOCKSET1_UPDATEPLL (1 << 0) + +#define CLOCKSET2 0x1020 +#define CLOCKSET2_M(x) (((x) & 0xfff) << 16) +#define CLOCKSET2_VCO_CNTRL(x) (((x) & 0x3f) << 8) +#define CLOCKSET2_N(x) (((x) & 0xf) << 0) + +#define CLOCKSET3 0x1024 +#define CLOCKSET3_PROP_CNTRL(x) (((x) & 0x3f) << 24) +#define CLOCKSET3_INT_CNTRL(x) (((x) & 0x3f) << 16) +#define CLOCKSET3_CPBIAS_CNTRL(x) (((x) & 0x7f) << 8) +#define CLOCKSET3_GMP_CNTRL(x) (((x) & 0x3) << 0) + +#define PHTW 0x1034 +#define PHTW_DWEN (1 << 24) +#define PHTW_TESTDIN_DATA(x) (((x) & 0xff) << 16) +#define PHTW_CWEN (1 << 8) +#define PHTW_TESTDIN_CODE(x) (((x) & 0xff) << 0) + +#define PHTC 0x103c +#define PHTC_TESTCLR (1 << 0) + +#endif /* __RCAR_MIPI_DSI_REGS_H__ */ diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile index 17a9e7eb2130..1a56f696558c 100644 --- a/drivers/gpu/drm/rockchip/Makefile +++ b/drivers/gpu/drm/rockchip/Makefile @@ -5,7 +5,6 @@ rockchipdrm-y := rockchip_drm_drv.o rockchip_drm_fb.o \ rockchip_drm_gem.o rockchip_drm_vop.o rockchip_vop_reg.o -rockchipdrm-$(CONFIG_DRM_FBDEV_EMULATION) += rockchip_drm_fbdev.o rockchipdrm-$(CONFIG_ROCKCHIP_ANALOGIX_DP) += analogix_dp-rockchip.o rockchipdrm-$(CONFIG_ROCKCHIP_CDN_DP) += cdn-dp-core.o cdn-dp-reg.o diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c index a9acbcc420d0..4ed7a6868197 100644 --- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c @@ -267,6 +267,8 @@ struct dw_mipi_dsi_rockchip { struct dw_mipi_dsi *dmd; const struct rockchip_dw_dsi_chip_data *cdata; struct dw_mipi_dsi_plat_data pdata; + + bool dsi_bound; }; struct dphy_pll_parameter_map { @@ -772,10 +774,6 @@ static void dw_mipi_dsi_encoder_enable(struct drm_encoder *encoder) if (mux < 0) return; - pm_runtime_get_sync(dsi->dev); - if (dsi->slave) - pm_runtime_get_sync(dsi->slave->dev); - /* * For the RK3399, the clk of grf must be enabled before writing grf * register. And for RK3288 or other soc, this grf_clk must be NULL, @@ -794,20 +792,10 @@ static void dw_mipi_dsi_encoder_enable(struct drm_encoder *encoder) clk_disable_unprepare(dsi->grf_clk); } -static void dw_mipi_dsi_encoder_disable(struct drm_encoder *encoder) -{ - struct dw_mipi_dsi_rockchip *dsi = to_dsi(encoder); - - if (dsi->slave) - pm_runtime_put(dsi->slave->dev); - pm_runtime_put(dsi->dev); -} - static const struct drm_encoder_helper_funcs dw_mipi_dsi_encoder_helper_funcs = { .atomic_check = dw_mipi_dsi_encoder_atomic_check, .enable = dw_mipi_dsi_encoder_enable, - .disable = dw_mipi_dsi_encoder_disable, }; static int rockchip_dsi_drm_create_encoder(struct dw_mipi_dsi_rockchip *dsi, @@ -937,10 +925,14 @@ static int dw_mipi_dsi_rockchip_bind(struct device *dev, put_device(second); } + pm_runtime_get_sync(dsi->dev); + if (dsi->slave) + pm_runtime_get_sync(dsi->slave->dev); + ret = clk_prepare_enable(dsi->pllref_clk); if (ret) { DRM_DEV_ERROR(dev, "Failed to enable pllref_clk: %d\n", ret); - return ret; + goto out_pm_runtime; } /* @@ -952,7 +944,7 @@ static int dw_mipi_dsi_rockchip_bind(struct device *dev, ret = clk_prepare_enable(dsi->grf_clk); if (ret) { DRM_DEV_ERROR(dsi->dev, "Failed to enable grf_clk: %d\n", ret); - return ret; + goto out_pll_clk; } dw_mipi_dsi_rockchip_config(dsi); @@ -964,16 +956,27 @@ static int dw_mipi_dsi_rockchip_bind(struct device *dev, ret = rockchip_dsi_drm_create_encoder(dsi, drm_dev); if (ret) { DRM_DEV_ERROR(dev, "Failed to create drm encoder\n"); - return ret; + goto out_pll_clk; } ret = dw_mipi_dsi_bind(dsi->dmd, &dsi->encoder); if (ret) { DRM_DEV_ERROR(dev, "Failed to bind: %d\n", ret); - return ret; + goto out_pll_clk; } + dsi->dsi_bound = true; + return 0; + +out_pll_clk: + clk_disable_unprepare(dsi->pllref_clk); +out_pm_runtime: + pm_runtime_put(dsi->dev); + if (dsi->slave) + pm_runtime_put(dsi->slave->dev); + + return ret; } static void dw_mipi_dsi_rockchip_unbind(struct device *dev, @@ -985,9 +988,15 @@ static void dw_mipi_dsi_rockchip_unbind(struct device *dev, if (dsi->is_slave) return; + dsi->dsi_bound = false; + dw_mipi_dsi_unbind(dsi->dmd); clk_disable_unprepare(dsi->pllref_clk); + + pm_runtime_put(dsi->dev); + if (dsi->slave) + pm_runtime_put(dsi->slave->dev); } static const struct component_ops dw_mipi_dsi_rockchip_ops = { @@ -1275,6 +1284,36 @@ static const struct phy_ops dw_mipi_dsi_dphy_ops = { .exit = dw_mipi_dsi_dphy_exit, }; +static int __maybe_unused dw_mipi_dsi_rockchip_resume(struct device *dev) +{ + struct dw_mipi_dsi_rockchip *dsi = dev_get_drvdata(dev); + int ret; + + /* + * Re-configure DSI state, if we were previously initialized. We need + * to do this before rockchip_drm_drv tries to re-enable() any panels. + */ + if (dsi->dsi_bound) { + ret = clk_prepare_enable(dsi->grf_clk); + if (ret) { + DRM_DEV_ERROR(dsi->dev, "Failed to enable grf_clk: %d\n", ret); + return ret; + } + + dw_mipi_dsi_rockchip_config(dsi); + if (dsi->slave) + dw_mipi_dsi_rockchip_config(dsi->slave); + + clk_disable_unprepare(dsi->grf_clk); + } + + return 0; +} + +static const struct dev_pm_ops dw_mipi_dsi_rockchip_pm_ops = { + SET_LATE_SYSTEM_SLEEP_PM_OPS(NULL, dw_mipi_dsi_rockchip_resume) +}; + static int dw_mipi_dsi_rockchip_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -1396,14 +1435,10 @@ static int dw_mipi_dsi_rockchip_probe(struct platform_device *pdev) if (ret != -EPROBE_DEFER) DRM_DEV_ERROR(dev, "Failed to probe dw_mipi_dsi: %d\n", ret); - goto err_clkdisable; + return ret; } return 0; - -err_clkdisable: - clk_disable_unprepare(dsi->pllref_clk); - return ret; } static int dw_mipi_dsi_rockchip_remove(struct platform_device *pdev) @@ -1592,6 +1627,7 @@ struct platform_driver dw_mipi_dsi_rockchip_driver = { .remove = dw_mipi_dsi_rockchip_remove, .driver = { .of_match_table = dw_mipi_dsi_rockchip_dt_ids, + .pm = &dw_mipi_dsi_rockchip_pm_ops, .name = "dw-mipi-dsi-rockchip", }, }; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index e4ebe60b3cc1..bec207de4544 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c @@ -26,7 +26,6 @@ #include "rockchip_drm_drv.h" #include "rockchip_drm_fb.h" -#include "rockchip_drm_fbdev.h" #include "rockchip_drm_gem.h" #define DRIVER_NAME "rockchip" @@ -159,10 +158,6 @@ static int rockchip_drm_bind(struct device *dev) drm_mode_config_reset(drm_dev); - ret = rockchip_drm_fbdev_init(drm_dev); - if (ret) - goto err_unbind_all; - /* init kms poll for handling hpd */ drm_kms_helper_poll_init(drm_dev); @@ -170,10 +165,11 @@ static int rockchip_drm_bind(struct device *dev) if (ret) goto err_kms_helper_poll_fini; + drm_fbdev_generic_setup(drm_dev, 0); + return 0; err_kms_helper_poll_fini: drm_kms_helper_poll_fini(drm_dev); - rockchip_drm_fbdev_fini(drm_dev); err_unbind_all: component_unbind_all(dev, drm_dev); err_iommu_cleanup: @@ -189,7 +185,6 @@ static void rockchip_drm_unbind(struct device *dev) drm_dev_unregister(drm_dev); - rockchip_drm_fbdev_fini(drm_dev); drm_kms_helper_poll_fini(drm_dev); drm_atomic_helper_shutdown(drm_dev); @@ -199,25 +194,15 @@ static void rockchip_drm_unbind(struct device *dev) drm_dev_put(drm_dev); } -static const struct file_operations rockchip_drm_driver_fops = { - .owner = THIS_MODULE, - .open = drm_open, - .mmap = rockchip_gem_mmap, - .poll = drm_poll, - .read = drm_read, - .unlocked_ioctl = drm_ioctl, - .compat_ioctl = drm_compat_ioctl, - .release = drm_release, -}; +DEFINE_DRM_GEM_FOPS(rockchip_drm_driver_fops); static const struct drm_driver rockchip_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, - .lastclose = drm_fb_helper_lastclose, .dumb_create = rockchip_gem_dumb_create, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import_sg_table = rockchip_gem_prime_import_sg_table, - .gem_prime_mmap = rockchip_gem_mmap_buf, + .gem_prime_mmap = drm_gem_prime_mmap, .fops = &rockchip_drm_driver_fops, .name = DRIVER_NAME, .desc = DRIVER_DESC, diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h index aa0909e8edf9..143a48330f84 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h @@ -43,8 +43,6 @@ struct rockchip_crtc_state { * @mm_lock: protect drm_mm on multi-threads. */ struct rockchip_drm_private { - struct drm_fb_helper fbdev_helper; - struct drm_gem_object *fbdev_bo; struct iommu_domain *domain; struct mutex mm_lock; struct drm_mm mm; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c deleted file mode 100644 index 2fdc455c4ad7..000000000000 --- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c +++ /dev/null @@ -1,163 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd - * Author:Mark Yao <mark.yao@rock-chips.com> - */ - -#include <drm/drm.h> -#include <drm/drm_fb_helper.h> -#include <drm/drm_fourcc.h> -#include <drm/drm_probe_helper.h> - -#include "rockchip_drm_drv.h" -#include "rockchip_drm_gem.h" -#include "rockchip_drm_fb.h" -#include "rockchip_drm_fbdev.h" - -#define PREFERRED_BPP 32 -#define to_drm_private(x) \ - container_of(x, struct rockchip_drm_private, fbdev_helper) - -static int rockchip_fbdev_mmap(struct fb_info *info, - struct vm_area_struct *vma) -{ - struct drm_fb_helper *helper = info->par; - struct rockchip_drm_private *private = to_drm_private(helper); - - return rockchip_gem_mmap_buf(private->fbdev_bo, vma); -} - -static const struct fb_ops rockchip_drm_fbdev_ops = { - .owner = THIS_MODULE, - DRM_FB_HELPER_DEFAULT_OPS, - .fb_mmap = rockchip_fbdev_mmap, - .fb_fillrect = drm_fb_helper_cfb_fillrect, - .fb_copyarea = drm_fb_helper_cfb_copyarea, - .fb_imageblit = drm_fb_helper_cfb_imageblit, -}; - -static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper, - struct drm_fb_helper_surface_size *sizes) -{ - struct rockchip_drm_private *private = to_drm_private(helper); - struct drm_mode_fb_cmd2 mode_cmd = { 0 }; - struct drm_device *dev = helper->dev; - struct rockchip_gem_object *rk_obj; - struct drm_framebuffer *fb; - unsigned int bytes_per_pixel; - unsigned long offset; - struct fb_info *fbi; - size_t size; - int ret; - - bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8); - - mode_cmd.width = sizes->surface_width; - mode_cmd.height = sizes->surface_height; - mode_cmd.pitches[0] = sizes->surface_width * bytes_per_pixel; - mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, - sizes->surface_depth); - - size = mode_cmd.pitches[0] * mode_cmd.height; - - rk_obj = rockchip_gem_create_object(dev, size, true); - if (IS_ERR(rk_obj)) - return -ENOMEM; - - private->fbdev_bo = &rk_obj->base; - - fbi = drm_fb_helper_alloc_fbi(helper); - if (IS_ERR(fbi)) { - DRM_DEV_ERROR(dev->dev, "Failed to create framebuffer info.\n"); - ret = PTR_ERR(fbi); - goto out; - } - - helper->fb = rockchip_drm_framebuffer_init(dev, &mode_cmd, - private->fbdev_bo); - if (IS_ERR(helper->fb)) { - DRM_DEV_ERROR(dev->dev, - "Failed to allocate DRM framebuffer.\n"); - ret = PTR_ERR(helper->fb); - goto out; - } - - fbi->fbops = &rockchip_drm_fbdev_ops; - - fb = helper->fb; - drm_fb_helper_fill_info(fbi, helper, sizes); - - offset = fbi->var.xoffset * bytes_per_pixel; - offset += fbi->var.yoffset * fb->pitches[0]; - - dev->mode_config.fb_base = 0; - fbi->screen_base = rk_obj->kvaddr + offset; - fbi->screen_size = rk_obj->base.size; - fbi->fix.smem_len = rk_obj->base.size; - - DRM_DEBUG_KMS("FB [%dx%d]-%d kvaddr=%p offset=%ld size=%zu\n", - fb->width, fb->height, fb->format->depth, - rk_obj->kvaddr, - offset, size); - - return 0; - -out: - rockchip_gem_free_object(&rk_obj->base); - return ret; -} - -static const struct drm_fb_helper_funcs rockchip_drm_fb_helper_funcs = { - .fb_probe = rockchip_drm_fbdev_create, -}; - -int rockchip_drm_fbdev_init(struct drm_device *dev) -{ - struct rockchip_drm_private *private = dev->dev_private; - struct drm_fb_helper *helper; - int ret; - - if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector) - return -EINVAL; - - helper = &private->fbdev_helper; - - drm_fb_helper_prepare(dev, helper, &rockchip_drm_fb_helper_funcs); - - ret = drm_fb_helper_init(dev, helper); - if (ret < 0) { - DRM_DEV_ERROR(dev->dev, - "Failed to initialize drm fb helper - %d.\n", - ret); - return ret; - } - - ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP); - if (ret < 0) { - DRM_DEV_ERROR(dev->dev, - "Failed to set initial hw config - %d.\n", - ret); - goto err_drm_fb_helper_fini; - } - - return 0; - -err_drm_fb_helper_fini: - drm_fb_helper_fini(helper); - return ret; -} - -void rockchip_drm_fbdev_fini(struct drm_device *dev) -{ - struct rockchip_drm_private *private = dev->dev_private; - struct drm_fb_helper *helper; - - helper = &private->fbdev_helper; - - drm_fb_helper_unregister_fbi(helper); - - if (helper->fb) - drm_framebuffer_put(helper->fb); - - drm_fb_helper_fini(helper); -} diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h deleted file mode 100644 index 5fb7ac2371a8..000000000000 --- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h +++ /dev/null @@ -1,24 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd - * Author:Mark Yao <mark.yao@rock-chips.com> - */ - -#ifndef _ROCKCHIP_DRM_FBDEV_H -#define _ROCKCHIP_DRM_FBDEV_H - -#ifdef CONFIG_DRM_FBDEV_EMULATION -int rockchip_drm_fbdev_init(struct drm_device *dev); -void rockchip_drm_fbdev_fini(struct drm_device *dev); -#else -static inline int rockchip_drm_fbdev_init(struct drm_device *dev) -{ - return 0; -} - -static inline void rockchip_drm_fbdev_fini(struct drm_device *dev) -{ -} -#endif - -#endif /* _ROCKCHIP_DRM_FBDEV_H */ diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c index 7971f57436dd..63eb73b624aa 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c @@ -241,11 +241,21 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj, struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); /* + * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the + * whole buffer from the start. + */ + vma->vm_pgoff = 0; + + /* * We allocated a struct page table for rk_obj, so clear * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap(). */ + vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; vma->vm_flags &= ~VM_PFNMAP; + vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); + vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); + if (rk_obj->pages) ret = rockchip_drm_gem_object_mmap_iommu(obj, vma); else @@ -257,39 +267,6 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj, return ret; } -int rockchip_gem_mmap_buf(struct drm_gem_object *obj, - struct vm_area_struct *vma) -{ - int ret; - - ret = drm_gem_mmap_obj(obj, obj->size, vma); - if (ret) - return ret; - - return rockchip_drm_gem_object_mmap(obj, vma); -} - -/* drm driver mmap file operations */ -int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma) -{ - struct drm_gem_object *obj; - int ret; - - ret = drm_gem_mmap(filp, vma); - if (ret) - return ret; - - /* - * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the - * whole buffer from the start. - */ - vma->vm_pgoff = 0; - - obj = vma->vm_private_data; - - return rockchip_drm_gem_object_mmap(obj, vma); -} - static void rockchip_gem_release_object(struct rockchip_gem_object *rk_obj) { drm_gem_object_release(&rk_obj->base); @@ -301,6 +278,7 @@ static const struct drm_gem_object_funcs rockchip_gem_object_funcs = { .get_sg_table = rockchip_gem_prime_get_sg_table, .vmap = rockchip_gem_prime_vmap, .vunmap = rockchip_gem_prime_vunmap, + .mmap = rockchip_drm_gem_object_mmap, .vm_ops = &drm_gem_cma_vm_ops, }; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h index 5a70a56cd406..47c1861eece0 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h @@ -34,13 +34,6 @@ rockchip_gem_prime_import_sg_table(struct drm_device *dev, int rockchip_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map); void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map); -/* drm driver mmap file operations */ -int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma); - -/* mmap a gem object to userspace. */ -int rockchip_gem_mmap_buf(struct drm_gem_object *obj, - struct vm_area_struct *vma); - struct rockchip_gem_object * rockchip_gem_create_object(struct drm_device *drm, unsigned int size, bool alloc_kmap); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index a25b98b7f5bd..3e8d9e2d1b67 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -726,7 +726,9 @@ static void vop_crtc_atomic_disable(struct drm_crtc *crtc, spin_unlock(&vop->reg_lock); - wait_for_completion(&vop->dsp_hold_completion); + if (!wait_for_completion_timeout(&vop->dsp_hold_completion, + msecs_to_jiffies(200))) + WARN(1, "%s: timed out waiting for DSP hold", crtc->name); vop_dsp_hold_valid_irq_disable(vop); diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index 27e1573af96e..191c56064f19 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -190,6 +190,16 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout) } EXPORT_SYMBOL(drm_sched_entity_flush); +static void drm_sched_entity_kill_jobs_irq_work(struct irq_work *wrk) +{ + struct drm_sched_job *job = container_of(wrk, typeof(*job), work); + + drm_sched_fence_finished(job->s_fence); + WARN_ON(job->s_fence->parent); + job->sched->ops->free_job(job); +} + + /* Signal the scheduler finished fence when the entity in question is killed. */ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, struct dma_fence_cb *cb) @@ -197,9 +207,8 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, struct drm_sched_job *job = container_of(cb, struct drm_sched_job, finish_cb); - drm_sched_fence_finished(job->s_fence); - WARN_ON(job->s_fence->parent); - job->sched->ops->free_job(job); + init_irq_work(&job->work, drm_sched_entity_kill_jobs_irq_work); + irq_work_queue(&job->work); } static struct dma_fence * diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 5bc5f775abe1..f91fb31ab7a7 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -704,9 +704,13 @@ int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job, int ret; dma_resv_for_each_fence(&cursor, obj->resv, write, fence) { + /* Make sure to grab an additional ref on the added fence */ + dma_fence_get(fence); ret = drm_sched_job_add_dependency(job, fence); - if (ret) + if (ret) { + dma_fence_put(fence); return ret; + } } return 0; } diff --git a/drivers/gpu/drm/shmobile/Kconfig b/drivers/gpu/drm/shmobile/Kconfig index e2a6c82c8252..288b838a904a 100644 --- a/drivers/gpu/drm/shmobile/Kconfig +++ b/drivers/gpu/drm/shmobile/Kconfig @@ -5,7 +5,6 @@ config DRM_SHMOBILE depends on ARCH_SHMOBILE || COMPILE_TEST select BACKLIGHT_CLASS_DEVICE select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER select DRM_GEM_CMA_HELPER help Choose this option if you have an SH Mobile chipset. diff --git a/drivers/gpu/drm/sprd/Kconfig b/drivers/gpu/drm/sprd/Kconfig new file mode 100644 index 000000000000..3edeaeca0e65 --- /dev/null +++ b/drivers/gpu/drm/sprd/Kconfig @@ -0,0 +1,13 @@ +config DRM_SPRD + tristate "DRM Support for Unisoc SoCs Platform" + depends on ARCH_SPRD || COMPILE_TEST + depends on DRM && OF + select DRM_GEM_CMA_HELPER + select DRM_KMS_CMA_HELPER + select DRM_KMS_HELPER + select DRM_MIPI_DSI + select VIDEOMODE_HELPERS + help + Choose this option if you have a Unisoc chipset. + If M is selected the module will be called sprd_drm. + diff --git a/drivers/gpu/drm/sprd/Makefile b/drivers/gpu/drm/sprd/Makefile new file mode 100644 index 000000000000..e82e6a6f89de --- /dev/null +++ b/drivers/gpu/drm/sprd/Makefile @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0 + +sprd-drm-y := sprd_drm.o \ + sprd_dpu.o \ + sprd_dsi.o \ + megacores_pll.o + +obj-$(CONFIG_DRM_SPRD) += sprd-drm.o
\ No newline at end of file diff --git a/drivers/gpu/drm/sprd/megacores_pll.c b/drivers/gpu/drm/sprd/megacores_pll.c new file mode 100644 index 000000000000..3091dfdc11e3 --- /dev/null +++ b/drivers/gpu/drm/sprd/megacores_pll.c @@ -0,0 +1,305 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020 Unisoc Inc. + */ + +#include <asm/div64.h> +#include <linux/delay.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/regmap.h> +#include <linux/string.h> + +#include "sprd_dsi.h" + +#define L 0 +#define H 1 +#define CLK 0 +#define DATA 1 +#define INFINITY 0xffffffff +#define MIN_OUTPUT_FREQ (100) + +#define AVERAGE(a, b) (min(a, b) + abs((b) - (a)) / 2) + +/* sharkle */ +#define VCO_BAND_LOW 750 +#define VCO_BAND_MID 1100 +#define VCO_BAND_HIGH 1500 +#define PHY_REF_CLK 26000 + +static int dphy_calc_pll_param(struct dphy_pll *pll) +{ + const u32 khz = 1000; + const u32 mhz = 1000000; + const unsigned long long factor = 100; + unsigned long long tmp; + int i; + + pll->potential_fvco = pll->freq / khz; + pll->ref_clk = PHY_REF_CLK / khz; + + for (i = 0; i < 4; ++i) { + if (pll->potential_fvco >= VCO_BAND_LOW && + pll->potential_fvco <= VCO_BAND_HIGH) { + pll->fvco = pll->potential_fvco; + pll->out_sel = BIT(i); + break; + } + pll->potential_fvco <<= 1; + } + if (pll->fvco == 0) + return -EINVAL; + + if (pll->fvco >= VCO_BAND_LOW && pll->fvco <= VCO_BAND_MID) { + /* vco band control */ + pll->vco_band = 0x0; + /* low pass filter control */ + pll->lpf_sel = 1; + } else if (pll->fvco > VCO_BAND_MID && pll->fvco <= VCO_BAND_HIGH) { + pll->vco_band = 0x1; + pll->lpf_sel = 0; + } else { + return -EINVAL; + } + + pll->nint = pll->fvco / pll->ref_clk; + tmp = pll->fvco * factor * mhz; + do_div(tmp, pll->ref_clk); + tmp = tmp - pll->nint * factor * mhz; + tmp *= BIT(20); + do_div(tmp, 100000000); + pll->kint = (u32)tmp; + pll->refin = 3; /* pre-divider bypass */ + pll->sdm_en = true; /* use fraction N PLL */ + pll->fdk_s = 0x1; /* fraction */ + pll->cp_s = 0x0; + pll->det_delay = 0x1; + + return 0; +} + +static void dphy_set_pll_reg(struct dphy_pll *pll, struct regmap *regmap) +{ + u8 reg_val[9] = {0}; + int i; + + u8 reg_addr[] = { + 0x03, 0x04, 0x06, 0x08, 0x09, + 0x0a, 0x0b, 0x0e, 0x0f + }; + + reg_val[0] = 1 | (1 << 1) | (pll->lpf_sel << 2); + reg_val[1] = pll->div | (1 << 3) | (pll->cp_s << 5) | (pll->fdk_s << 7); + reg_val[2] = pll->nint; + reg_val[3] = pll->vco_band | (pll->sdm_en << 1) | (pll->refin << 2); + reg_val[4] = pll->kint >> 12; + reg_val[5] = pll->kint >> 4; + reg_val[6] = pll->out_sel | ((pll->kint << 4) & 0xf); + reg_val[7] = 1 << 4; + reg_val[8] = pll->det_delay; + + for (i = 0; i < sizeof(reg_addr); ++i) { + regmap_write(regmap, reg_addr[i], reg_val[i]); + DRM_DEBUG("%02x: %02x\n", reg_addr[i], reg_val[i]); + } +} + +int dphy_pll_config(struct dsi_context *ctx) +{ + struct sprd_dsi *dsi = container_of(ctx, struct sprd_dsi, ctx); + struct regmap *regmap = ctx->regmap; + struct dphy_pll *pll = &ctx->pll; + int ret; + + pll->freq = dsi->slave->hs_rate; + + /* FREQ = 26M * (NINT + KINT / 2^20) / out_sel */ + ret = dphy_calc_pll_param(pll); + if (ret) { + drm_err(dsi->drm, "failed to calculate dphy pll parameters\n"); + return ret; + } + dphy_set_pll_reg(pll, regmap); + + return 0; +} + +static void dphy_set_timing_reg(struct regmap *regmap, int type, u8 val[]) +{ + switch (type) { + case REQUEST_TIME: + regmap_write(regmap, 0x31, val[CLK]); + regmap_write(regmap, 0x41, val[DATA]); + regmap_write(regmap, 0x51, val[DATA]); + regmap_write(regmap, 0x61, val[DATA]); + regmap_write(regmap, 0x71, val[DATA]); + + regmap_write(regmap, 0x90, val[CLK]); + regmap_write(regmap, 0xa0, val[DATA]); + regmap_write(regmap, 0xb0, val[DATA]); + regmap_write(regmap, 0xc0, val[DATA]); + regmap_write(regmap, 0xd0, val[DATA]); + break; + case PREPARE_TIME: + regmap_write(regmap, 0x32, val[CLK]); + regmap_write(regmap, 0x42, val[DATA]); + regmap_write(regmap, 0x52, val[DATA]); + regmap_write(regmap, 0x62, val[DATA]); + regmap_write(regmap, 0x72, val[DATA]); + + regmap_write(regmap, 0x91, val[CLK]); + regmap_write(regmap, 0xa1, val[DATA]); + regmap_write(regmap, 0xb1, val[DATA]); + regmap_write(regmap, 0xc1, val[DATA]); + regmap_write(regmap, 0xd1, val[DATA]); + break; + case ZERO_TIME: + regmap_write(regmap, 0x33, val[CLK]); + regmap_write(regmap, 0x43, val[DATA]); + regmap_write(regmap, 0x53, val[DATA]); + regmap_write(regmap, 0x63, val[DATA]); + regmap_write(regmap, 0x73, val[DATA]); + + regmap_write(regmap, 0x92, val[CLK]); + regmap_write(regmap, 0xa2, val[DATA]); + regmap_write(regmap, 0xb2, val[DATA]); + regmap_write(regmap, 0xc2, val[DATA]); + regmap_write(regmap, 0xd2, val[DATA]); + break; + case TRAIL_TIME: + regmap_write(regmap, 0x34, val[CLK]); + regmap_write(regmap, 0x44, val[DATA]); + regmap_write(regmap, 0x54, val[DATA]); + regmap_write(regmap, 0x64, val[DATA]); + regmap_write(regmap, 0x74, val[DATA]); + + regmap_write(regmap, 0x93, val[CLK]); + regmap_write(regmap, 0xa3, val[DATA]); + regmap_write(regmap, 0xb3, val[DATA]); + regmap_write(regmap, 0xc3, val[DATA]); + regmap_write(regmap, 0xd3, val[DATA]); + break; + case EXIT_TIME: + regmap_write(regmap, 0x36, val[CLK]); + regmap_write(regmap, 0x46, val[DATA]); + regmap_write(regmap, 0x56, val[DATA]); + regmap_write(regmap, 0x66, val[DATA]); + regmap_write(regmap, 0x76, val[DATA]); + + regmap_write(regmap, 0x95, val[CLK]); + regmap_write(regmap, 0xA5, val[DATA]); + regmap_write(regmap, 0xB5, val[DATA]); + regmap_write(regmap, 0xc5, val[DATA]); + regmap_write(regmap, 0xd5, val[DATA]); + break; + case CLKPOST_TIME: + regmap_write(regmap, 0x35, val[CLK]); + regmap_write(regmap, 0x94, val[CLK]); + break; + + /* the following just use default value */ + case SETTLE_TIME: + fallthrough; + case TA_GET: + fallthrough; + case TA_GO: + fallthrough; + case TA_SURE: + fallthrough; + default: + break; + } +} + +void dphy_timing_config(struct dsi_context *ctx) +{ + struct regmap *regmap = ctx->regmap; + struct dphy_pll *pll = &ctx->pll; + const u32 factor = 2; + const u32 scale = 100; + u32 t_ui, t_byteck, t_half_byteck; + u32 range[2], constant; + u8 val[2]; + u32 tmp = 0; + + /* t_ui: 1 ui, byteck: 8 ui, half byteck: 4 ui */ + t_ui = 1000 * scale / (pll->freq / 1000); + t_byteck = t_ui << 3; + t_half_byteck = t_ui << 2; + constant = t_ui << 1; + + /* REQUEST_TIME: HS T-LPX: LP-01 + * For T-LPX, mipi spec defined min value is 50ns, + * but maybe it shouldn't be too small, because BTA, + * LP-10, LP-00, LP-01, all of this is related to T-LPX. + */ + range[L] = 50 * scale; + range[H] = INFINITY; + val[CLK] = DIV_ROUND_UP(range[L] * (factor << 1), t_byteck) - 2; + val[DATA] = val[CLK]; + dphy_set_timing_reg(regmap, REQUEST_TIME, val); + + /* PREPARE_TIME: HS sequence: LP-00 */ + range[L] = 38 * scale; + range[H] = 95 * scale; + tmp = AVERAGE(range[L], range[H]); + val[CLK] = DIV_ROUND_UP(AVERAGE(range[L], range[H]), t_half_byteck) - 1; + range[L] = 40 * scale + 4 * t_ui; + range[H] = 85 * scale + 6 * t_ui; + tmp |= AVERAGE(range[L], range[H]) << 16; + val[DATA] = DIV_ROUND_UP(AVERAGE(range[L], range[H]), t_half_byteck) - 1; + dphy_set_timing_reg(regmap, PREPARE_TIME, val); + + /* ZERO_TIME: HS-ZERO */ + range[L] = 300 * scale; + range[H] = INFINITY; + val[CLK] = DIV_ROUND_UP(range[L] * factor + (tmp & 0xffff) + - 525 * t_byteck / 100, t_byteck) - 2; + range[L] = 145 * scale + 10 * t_ui; + val[DATA] = DIV_ROUND_UP(range[L] * factor + + ((tmp >> 16) & 0xffff) - 525 * t_byteck / 100, + t_byteck) - 2; + dphy_set_timing_reg(regmap, ZERO_TIME, val); + + /* TRAIL_TIME: HS-TRAIL */ + range[L] = 60 * scale; + range[H] = INFINITY; + val[CLK] = DIV_ROUND_UP(range[L] * factor - constant, t_half_byteck); + range[L] = max(8 * t_ui, 60 * scale + 4 * t_ui); + val[DATA] = DIV_ROUND_UP(range[L] * 3 / 2 - constant, t_half_byteck) - 2; + dphy_set_timing_reg(regmap, TRAIL_TIME, val); + + /* EXIT_TIME: */ + range[L] = 100 * scale; + range[H] = INFINITY; + val[CLK] = DIV_ROUND_UP(range[L] * factor, t_byteck) - 2; + val[DATA] = val[CLK]; + dphy_set_timing_reg(regmap, EXIT_TIME, val); + + /* CLKPOST_TIME: */ + range[L] = 60 * scale + 52 * t_ui; + range[H] = INFINITY; + val[CLK] = DIV_ROUND_UP(range[L] * factor, t_byteck) - 2; + val[DATA] = val[CLK]; + dphy_set_timing_reg(regmap, CLKPOST_TIME, val); + + /* SETTLE_TIME: + * This time is used for receiver. So for transmitter, + * it can be ignored. + */ + + /* TA_GO: + * transmitter drives bridge state(LP-00) before releasing control, + * reg 0x1f default value: 0x04, which is good. + */ + + /* TA_SURE: + * After LP-10 state and before bridge state(LP-00), + * reg 0x20 default value: 0x01, which is good. + */ + + /* TA_GET: + * receiver drives Bridge state(LP-00) before releasing control + * reg 0x21 default value: 0x03, which is good. + */ +} diff --git a/drivers/gpu/drm/sprd/sprd_dpu.c b/drivers/gpu/drm/sprd/sprd_dpu.c new file mode 100644 index 000000000000..06a3414ee43a --- /dev/null +++ b/drivers/gpu/drm/sprd/sprd_dpu.c @@ -0,0 +1,880 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020 Unisoc Inc. + */ + +#include <linux/component.h> +#include <linux/delay.h> +#include <linux/dma-buf.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/of_graph.h> +#include <linux/of_irq.h> +#include <linux/wait.h> +#include <linux/workqueue.h> + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_fb_cma_helper.h> +#include <drm/drm_gem_cma_helper.h> +#include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_plane_helper.h> + +#include "sprd_drm.h" +#include "sprd_dpu.h" +#include "sprd_dsi.h" + +/* Global control registers */ +#define REG_DPU_CTRL 0x04 +#define REG_DPU_CFG0 0x08 +#define REG_PANEL_SIZE 0x20 +#define REG_BLEND_SIZE 0x24 +#define REG_BG_COLOR 0x2C + +/* Layer0 control registers */ +#define REG_LAY_BASE_ADDR0 0x30 +#define REG_LAY_BASE_ADDR1 0x34 +#define REG_LAY_BASE_ADDR2 0x38 +#define REG_LAY_CTRL 0x40 +#define REG_LAY_SIZE 0x44 +#define REG_LAY_PITCH 0x48 +#define REG_LAY_POS 0x4C +#define REG_LAY_ALPHA 0x50 +#define REG_LAY_CROP_START 0x5C + +/* Interrupt control registers */ +#define REG_DPU_INT_EN 0x1E0 +#define REG_DPU_INT_CLR 0x1E4 +#define REG_DPU_INT_STS 0x1E8 + +/* DPI control registers */ +#define REG_DPI_CTRL 0x1F0 +#define REG_DPI_H_TIMING 0x1F4 +#define REG_DPI_V_TIMING 0x1F8 + +/* MMU control registers */ +#define REG_MMU_EN 0x800 +#define REG_MMU_VPN_RANGE 0x80C +#define REG_MMU_PPN1 0x83C +#define REG_MMU_RANGE1 0x840 +#define REG_MMU_PPN2 0x844 +#define REG_MMU_RANGE2 0x848 + +/* Global control bits */ +#define BIT_DPU_RUN BIT(0) +#define BIT_DPU_STOP BIT(1) +#define BIT_DPU_REG_UPDATE BIT(2) +#define BIT_DPU_IF_EDPI BIT(0) + +/* Layer control bits */ +#define BIT_DPU_LAY_EN BIT(0) +#define BIT_DPU_LAY_LAYER_ALPHA (0x01 << 2) +#define BIT_DPU_LAY_COMBO_ALPHA (0x02 << 2) +#define BIT_DPU_LAY_FORMAT_YUV422_2PLANE (0x00 << 4) +#define BIT_DPU_LAY_FORMAT_YUV420_2PLANE (0x01 << 4) +#define BIT_DPU_LAY_FORMAT_YUV420_3PLANE (0x02 << 4) +#define BIT_DPU_LAY_FORMAT_ARGB8888 (0x03 << 4) +#define BIT_DPU_LAY_FORMAT_RGB565 (0x04 << 4) +#define BIT_DPU_LAY_DATA_ENDIAN_B0B1B2B3 (0x00 << 8) +#define BIT_DPU_LAY_DATA_ENDIAN_B3B2B1B0 (0x01 << 8) +#define BIT_DPU_LAY_NO_SWITCH (0x00 << 10) +#define BIT_DPU_LAY_RB_OR_UV_SWITCH (0x01 << 10) +#define BIT_DPU_LAY_MODE_BLEND_NORMAL (0x00 << 16) +#define BIT_DPU_LAY_MODE_BLEND_PREMULT (0x01 << 16) +#define BIT_DPU_LAY_ROTATION_0 (0x00 << 20) +#define BIT_DPU_LAY_ROTATION_90 (0x01 << 20) +#define BIT_DPU_LAY_ROTATION_180 (0x02 << 20) +#define BIT_DPU_LAY_ROTATION_270 (0x03 << 20) +#define BIT_DPU_LAY_ROTATION_0_M (0x04 << 20) +#define BIT_DPU_LAY_ROTATION_90_M (0x05 << 20) +#define BIT_DPU_LAY_ROTATION_180_M (0x06 << 20) +#define BIT_DPU_LAY_ROTATION_270_M (0x07 << 20) + +/* Interrupt control & status bits */ +#define BIT_DPU_INT_DONE BIT(0) +#define BIT_DPU_INT_TE BIT(1) +#define BIT_DPU_INT_ERR BIT(2) +#define BIT_DPU_INT_UPDATE_DONE BIT(4) +#define BIT_DPU_INT_VSYNC BIT(5) + +/* DPI control bits */ +#define BIT_DPU_EDPI_TE_EN BIT(8) +#define BIT_DPU_EDPI_FROM_EXTERNAL_PAD BIT(10) +#define BIT_DPU_DPI_HALT_EN BIT(16) + +static const u32 layer_fmts[] = { + DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_RGBA8888, + DRM_FORMAT_BGRA8888, + DRM_FORMAT_RGBX8888, + DRM_FORMAT_RGB565, + DRM_FORMAT_BGR565, + DRM_FORMAT_NV12, + DRM_FORMAT_NV21, + DRM_FORMAT_NV16, + DRM_FORMAT_NV61, + DRM_FORMAT_YUV420, + DRM_FORMAT_YVU420, +}; + +struct sprd_plane { + struct drm_plane base; +}; + +static int dpu_wait_stop_done(struct sprd_dpu *dpu) +{ + struct dpu_context *ctx = &dpu->ctx; + int rc; + + if (ctx->stopped) + return 0; + + rc = wait_event_interruptible_timeout(ctx->wait_queue, ctx->evt_stop, + msecs_to_jiffies(500)); + ctx->evt_stop = false; + + ctx->stopped = true; + + if (!rc) { + drm_err(dpu->drm, "dpu wait for stop done time out!\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static int dpu_wait_update_done(struct sprd_dpu *dpu) +{ + struct dpu_context *ctx = &dpu->ctx; + int rc; + + ctx->evt_update = false; + + rc = wait_event_interruptible_timeout(ctx->wait_queue, ctx->evt_update, + msecs_to_jiffies(500)); + + if (!rc) { + drm_err(dpu->drm, "dpu wait for reg update done time out!\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static u32 drm_format_to_dpu(struct drm_framebuffer *fb) +{ + u32 format = 0; + + switch (fb->format->format) { + case DRM_FORMAT_BGRA8888: + /* BGRA8888 -> ARGB8888 */ + format |= BIT_DPU_LAY_DATA_ENDIAN_B3B2B1B0; + format |= BIT_DPU_LAY_FORMAT_ARGB8888; + break; + case DRM_FORMAT_RGBX8888: + case DRM_FORMAT_RGBA8888: + /* RGBA8888 -> ABGR8888 */ + format |= BIT_DPU_LAY_DATA_ENDIAN_B3B2B1B0; + fallthrough; + case DRM_FORMAT_ABGR8888: + /* RB switch */ + format |= BIT_DPU_LAY_RB_OR_UV_SWITCH; + fallthrough; + case DRM_FORMAT_ARGB8888: + format |= BIT_DPU_LAY_FORMAT_ARGB8888; + break; + case DRM_FORMAT_XBGR8888: + /* RB switch */ + format |= BIT_DPU_LAY_RB_OR_UV_SWITCH; + fallthrough; + case DRM_FORMAT_XRGB8888: + format |= BIT_DPU_LAY_FORMAT_ARGB8888; + break; + case DRM_FORMAT_BGR565: + /* RB switch */ + format |= BIT_DPU_LAY_RB_OR_UV_SWITCH; + fallthrough; + case DRM_FORMAT_RGB565: + format |= BIT_DPU_LAY_FORMAT_RGB565; + break; + case DRM_FORMAT_NV12: + /* 2-Lane: Yuv420 */ + format |= BIT_DPU_LAY_FORMAT_YUV420_2PLANE; + /* Y endian */ + format |= BIT_DPU_LAY_DATA_ENDIAN_B0B1B2B3; + /* UV endian */ + format |= BIT_DPU_LAY_NO_SWITCH; + break; + case DRM_FORMAT_NV21: + /* 2-Lane: Yuv420 */ + format |= BIT_DPU_LAY_FORMAT_YUV420_2PLANE; + /* Y endian */ + format |= BIT_DPU_LAY_DATA_ENDIAN_B0B1B2B3; + /* UV endian */ + format |= BIT_DPU_LAY_RB_OR_UV_SWITCH; + break; + case DRM_FORMAT_NV16: + /* 2-Lane: Yuv422 */ + format |= BIT_DPU_LAY_FORMAT_YUV422_2PLANE; + /* Y endian */ + format |= BIT_DPU_LAY_DATA_ENDIAN_B3B2B1B0; + /* UV endian */ + format |= BIT_DPU_LAY_RB_OR_UV_SWITCH; + break; + case DRM_FORMAT_NV61: + /* 2-Lane: Yuv422 */ + format |= BIT_DPU_LAY_FORMAT_YUV422_2PLANE; + /* Y endian */ + format |= BIT_DPU_LAY_DATA_ENDIAN_B0B1B2B3; + /* UV endian */ + format |= BIT_DPU_LAY_NO_SWITCH; + break; + case DRM_FORMAT_YUV420: + format |= BIT_DPU_LAY_FORMAT_YUV420_3PLANE; + /* Y endian */ + format |= BIT_DPU_LAY_DATA_ENDIAN_B0B1B2B3; + /* UV endian */ + format |= BIT_DPU_LAY_NO_SWITCH; + break; + case DRM_FORMAT_YVU420: + format |= BIT_DPU_LAY_FORMAT_YUV420_3PLANE; + /* Y endian */ + format |= BIT_DPU_LAY_DATA_ENDIAN_B0B1B2B3; + /* UV endian */ + format |= BIT_DPU_LAY_RB_OR_UV_SWITCH; + break; + default: + break; + } + + return format; +} + +static u32 drm_rotation_to_dpu(struct drm_plane_state *state) +{ + u32 rotation = 0; + + switch (state->rotation) { + default: + case DRM_MODE_ROTATE_0: + rotation = BIT_DPU_LAY_ROTATION_0; + break; + case DRM_MODE_ROTATE_90: + rotation = BIT_DPU_LAY_ROTATION_90; + break; + case DRM_MODE_ROTATE_180: + rotation = BIT_DPU_LAY_ROTATION_180; + break; + case DRM_MODE_ROTATE_270: + rotation = BIT_DPU_LAY_ROTATION_270; + break; + case DRM_MODE_REFLECT_Y: + rotation = BIT_DPU_LAY_ROTATION_180_M; + break; + case (DRM_MODE_REFLECT_Y | DRM_MODE_ROTATE_90): + rotation = BIT_DPU_LAY_ROTATION_90_M; + break; + case DRM_MODE_REFLECT_X: + rotation = BIT_DPU_LAY_ROTATION_0_M; + break; + case (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90): + rotation = BIT_DPU_LAY_ROTATION_270_M; + break; + } + + return rotation; +} + +static u32 drm_blend_to_dpu(struct drm_plane_state *state) +{ + u32 blend = 0; + + switch (state->pixel_blend_mode) { + case DRM_MODE_BLEND_COVERAGE: + /* alpha mode select - combo alpha */ + blend |= BIT_DPU_LAY_COMBO_ALPHA; + /* Normal mode */ + blend |= BIT_DPU_LAY_MODE_BLEND_NORMAL; + break; + case DRM_MODE_BLEND_PREMULTI: + /* alpha mode select - combo alpha */ + blend |= BIT_DPU_LAY_COMBO_ALPHA; + /* Pre-mult mode */ + blend |= BIT_DPU_LAY_MODE_BLEND_PREMULT; + break; + case DRM_MODE_BLEND_PIXEL_NONE: + default: + /* don't do blending, maybe RGBX */ + /* alpha mode select - layer alpha */ + blend |= BIT_DPU_LAY_LAYER_ALPHA; + break; + } + + return blend; +} + +static void sprd_dpu_layer(struct sprd_dpu *dpu, struct drm_plane_state *state) +{ + struct dpu_context *ctx = &dpu->ctx; + struct drm_gem_cma_object *cma_obj; + struct drm_framebuffer *fb = state->fb; + u32 addr, size, offset, pitch, blend, format, rotation; + u32 src_x = state->src_x >> 16; + u32 src_y = state->src_y >> 16; + u32 src_w = state->src_w >> 16; + u32 src_h = state->src_h >> 16; + u32 dst_x = state->crtc_x; + u32 dst_y = state->crtc_y; + u32 alpha = state->alpha; + u32 index = state->zpos; + int i; + + offset = (dst_x & 0xffff) | (dst_y << 16); + size = (src_w & 0xffff) | (src_h << 16); + + for (i = 0; i < fb->format->num_planes; i++) { + cma_obj = drm_fb_cma_get_gem_obj(fb, i); + addr = cma_obj->paddr + fb->offsets[i]; + + if (i == 0) + layer_reg_wr(ctx, REG_LAY_BASE_ADDR0, addr, index); + else if (i == 1) + layer_reg_wr(ctx, REG_LAY_BASE_ADDR1, addr, index); + else + layer_reg_wr(ctx, REG_LAY_BASE_ADDR2, addr, index); + } + + if (fb->format->num_planes == 3) { + /* UV pitch is 1/2 of Y pitch */ + pitch = (fb->pitches[0] / fb->format->cpp[0]) | + (fb->pitches[0] / fb->format->cpp[0] << 15); + } else { + pitch = fb->pitches[0] / fb->format->cpp[0]; + } + + layer_reg_wr(ctx, REG_LAY_POS, offset, index); + layer_reg_wr(ctx, REG_LAY_SIZE, size, index); + layer_reg_wr(ctx, REG_LAY_CROP_START, + src_y << 16 | src_x, index); + layer_reg_wr(ctx, REG_LAY_ALPHA, alpha, index); + layer_reg_wr(ctx, REG_LAY_PITCH, pitch, index); + + format = drm_format_to_dpu(fb); + blend = drm_blend_to_dpu(state); + rotation = drm_rotation_to_dpu(state); + + layer_reg_wr(ctx, REG_LAY_CTRL, BIT_DPU_LAY_EN | + format | + blend | + rotation, + index); +} + +static void sprd_dpu_flip(struct sprd_dpu *dpu) +{ + struct dpu_context *ctx = &dpu->ctx; + + /* + * Make sure the dpu is in stop status. DPU has no shadow + * registers in EDPI mode. So the config registers can only be + * updated in the rising edge of DPU_RUN bit. + */ + if (ctx->if_type == SPRD_DPU_IF_EDPI) + dpu_wait_stop_done(dpu); + + /* update trigger and wait */ + if (ctx->if_type == SPRD_DPU_IF_DPI) { + if (!ctx->stopped) { + dpu_reg_set(ctx, REG_DPU_CTRL, BIT_DPU_REG_UPDATE); + dpu_wait_update_done(dpu); + } + + dpu_reg_set(ctx, REG_DPU_INT_EN, BIT_DPU_INT_ERR); + } else if (ctx->if_type == SPRD_DPU_IF_EDPI) { + dpu_reg_set(ctx, REG_DPU_CTRL, BIT_DPU_RUN); + + ctx->stopped = false; + } +} + +static void sprd_dpu_init(struct sprd_dpu *dpu) +{ + struct dpu_context *ctx = &dpu->ctx; + u32 int_mask = 0; + + writel(0x00, ctx->base + REG_BG_COLOR); + writel(0x00, ctx->base + REG_MMU_EN); + writel(0x00, ctx->base + REG_MMU_PPN1); + writel(0xffff, ctx->base + REG_MMU_RANGE1); + writel(0x00, ctx->base + REG_MMU_PPN2); + writel(0xffff, ctx->base + REG_MMU_RANGE2); + writel(0x1ffff, ctx->base + REG_MMU_VPN_RANGE); + + if (ctx->if_type == SPRD_DPU_IF_DPI) { + /* use dpi as interface */ + dpu_reg_clr(ctx, REG_DPU_CFG0, BIT_DPU_IF_EDPI); + /* disable Halt function for SPRD DSI */ + dpu_reg_clr(ctx, REG_DPI_CTRL, BIT_DPU_DPI_HALT_EN); + /* select te from external pad */ + dpu_reg_set(ctx, REG_DPI_CTRL, BIT_DPU_EDPI_FROM_EXTERNAL_PAD); + + /* enable dpu update done INT */ + int_mask |= BIT_DPU_INT_UPDATE_DONE; + /* enable dpu done INT */ + int_mask |= BIT_DPU_INT_DONE; + /* enable dpu dpi vsync */ + int_mask |= BIT_DPU_INT_VSYNC; + /* enable dpu TE INT */ + int_mask |= BIT_DPU_INT_TE; + /* enable underflow err INT */ + int_mask |= BIT_DPU_INT_ERR; + } else if (ctx->if_type == SPRD_DPU_IF_EDPI) { + /* use edpi as interface */ + dpu_reg_set(ctx, REG_DPU_CFG0, BIT_DPU_IF_EDPI); + /* use external te */ + dpu_reg_set(ctx, REG_DPI_CTRL, BIT_DPU_EDPI_FROM_EXTERNAL_PAD); + /* enable te */ + dpu_reg_set(ctx, REG_DPI_CTRL, BIT_DPU_EDPI_TE_EN); + + /* enable stop done INT */ + int_mask |= BIT_DPU_INT_DONE; + /* enable TE INT */ + int_mask |= BIT_DPU_INT_TE; + } + + writel(int_mask, ctx->base + REG_DPU_INT_EN); +} + +static void sprd_dpu_fini(struct sprd_dpu *dpu) +{ + struct dpu_context *ctx = &dpu->ctx; + + writel(0x00, ctx->base + REG_DPU_INT_EN); + writel(0xff, ctx->base + REG_DPU_INT_CLR); +} + +static void sprd_dpi_init(struct sprd_dpu *dpu) +{ + struct dpu_context *ctx = &dpu->ctx; + u32 reg_val; + u32 size; + + size = (ctx->vm.vactive << 16) | ctx->vm.hactive; + writel(size, ctx->base + REG_PANEL_SIZE); + writel(size, ctx->base + REG_BLEND_SIZE); + + if (ctx->if_type == SPRD_DPU_IF_DPI) { + /* set dpi timing */ + reg_val = ctx->vm.hsync_len << 0 | + ctx->vm.hback_porch << 8 | + ctx->vm.hfront_porch << 20; + writel(reg_val, ctx->base + REG_DPI_H_TIMING); + + reg_val = ctx->vm.vsync_len << 0 | + ctx->vm.vback_porch << 8 | + ctx->vm.vfront_porch << 20; + writel(reg_val, ctx->base + REG_DPI_V_TIMING); + } +} + +void sprd_dpu_run(struct sprd_dpu *dpu) +{ + struct dpu_context *ctx = &dpu->ctx; + + dpu_reg_set(ctx, REG_DPU_CTRL, BIT_DPU_RUN); + + ctx->stopped = false; +} + +void sprd_dpu_stop(struct sprd_dpu *dpu) +{ + struct dpu_context *ctx = &dpu->ctx; + + if (ctx->if_type == SPRD_DPU_IF_DPI) + dpu_reg_set(ctx, REG_DPU_CTRL, BIT_DPU_STOP); + + dpu_wait_stop_done(dpu); +} + +static int sprd_plane_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, + plane); + struct drm_crtc_state *crtc_state; + u32 fmt; + + if (!plane_state->fb || !plane_state->crtc) + return 0; + + fmt = drm_format_to_dpu(plane_state->fb); + if (!fmt) + return -EINVAL; + + crtc_state = drm_atomic_get_crtc_state(plane_state->state, plane_state->crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + + return drm_atomic_helper_check_plane_state(plane_state, crtc_state, + DRM_PLANE_HELPER_NO_SCALING, + DRM_PLANE_HELPER_NO_SCALING, + true, true); +} + +static void sprd_plane_atomic_update(struct drm_plane *drm_plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + drm_plane); + struct sprd_dpu *dpu = to_sprd_crtc(new_state->crtc); + + /* start configure dpu layers */ + sprd_dpu_layer(dpu, new_state); +} + +static void sprd_plane_atomic_disable(struct drm_plane *drm_plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + drm_plane); + struct sprd_dpu *dpu = to_sprd_crtc(old_state->crtc); + + layer_reg_wr(&dpu->ctx, REG_LAY_CTRL, 0x00, old_state->zpos); +} + +static void sprd_plane_create_properties(struct sprd_plane *plane, int index) +{ + unsigned int supported_modes = BIT(DRM_MODE_BLEND_PIXEL_NONE) | + BIT(DRM_MODE_BLEND_PREMULTI) | + BIT(DRM_MODE_BLEND_COVERAGE); + + /* create rotation property */ + drm_plane_create_rotation_property(&plane->base, + DRM_MODE_ROTATE_0, + DRM_MODE_ROTATE_MASK | + DRM_MODE_REFLECT_MASK); + + /* create alpha property */ + drm_plane_create_alpha_property(&plane->base); + + /* create blend mode property */ + drm_plane_create_blend_mode_property(&plane->base, supported_modes); + + /* create zpos property */ + drm_plane_create_zpos_immutable_property(&plane->base, index); +} + +static const struct drm_plane_helper_funcs sprd_plane_helper_funcs = { + .atomic_check = sprd_plane_atomic_check, + .atomic_update = sprd_plane_atomic_update, + .atomic_disable = sprd_plane_atomic_disable, +}; + +static const struct drm_plane_funcs sprd_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = drm_plane_cleanup, + .reset = drm_atomic_helper_plane_reset, + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, +}; + +static struct sprd_plane *sprd_planes_init(struct drm_device *drm) +{ + struct sprd_plane *plane, *primary; + enum drm_plane_type plane_type; + int i; + + for (i = 0; i < 6; i++) { + plane_type = (i == 0) ? DRM_PLANE_TYPE_PRIMARY : + DRM_PLANE_TYPE_OVERLAY; + + plane = drmm_universal_plane_alloc(drm, struct sprd_plane, base, + 1, &sprd_plane_funcs, + layer_fmts, ARRAY_SIZE(layer_fmts), + NULL, plane_type, NULL); + if (IS_ERR(plane)) { + drm_err(drm, "failed to init drm plane: %d\n", i); + return plane; + } + + drm_plane_helper_add(&plane->base, &sprd_plane_helper_funcs); + + sprd_plane_create_properties(plane, i); + + if (i == 0) + primary = plane; + } + + return primary; +} + +static void sprd_crtc_mode_set_nofb(struct drm_crtc *crtc) +{ + struct sprd_dpu *dpu = to_sprd_crtc(crtc); + struct drm_display_mode *mode = &crtc->state->adjusted_mode; + struct drm_encoder *encoder; + struct sprd_dsi *dsi; + + drm_display_mode_to_videomode(mode, &dpu->ctx.vm); + + drm_for_each_encoder_mask(encoder, crtc->dev, + crtc->state->encoder_mask) { + dsi = encoder_to_dsi(encoder); + + if (dsi->slave->mode_flags & MIPI_DSI_MODE_VIDEO) + dpu->ctx.if_type = SPRD_DPU_IF_DPI; + else + dpu->ctx.if_type = SPRD_DPU_IF_EDPI; + } + + sprd_dpi_init(dpu); +} + +static void sprd_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct sprd_dpu *dpu = to_sprd_crtc(crtc); + + sprd_dpu_init(dpu); + + drm_crtc_vblank_on(&dpu->base); +} + +static void sprd_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct sprd_dpu *dpu = to_sprd_crtc(crtc); + struct drm_device *drm = dpu->base.dev; + + drm_crtc_vblank_off(&dpu->base); + + sprd_dpu_fini(dpu); + + spin_lock_irq(&drm->event_lock); + if (crtc->state->event) { + drm_crtc_send_vblank_event(crtc, crtc->state->event); + crtc->state->event = NULL; + } + spin_unlock_irq(&drm->event_lock); +} + +static void sprd_crtc_atomic_flush(struct drm_crtc *crtc, + struct drm_atomic_state *state) + +{ + struct sprd_dpu *dpu = to_sprd_crtc(crtc); + struct drm_device *drm = dpu->base.dev; + + sprd_dpu_flip(dpu); + + spin_lock_irq(&drm->event_lock); + if (crtc->state->event) { + drm_crtc_send_vblank_event(crtc, crtc->state->event); + crtc->state->event = NULL; + } + spin_unlock_irq(&drm->event_lock); +} + +static int sprd_crtc_enable_vblank(struct drm_crtc *crtc) +{ + struct sprd_dpu *dpu = to_sprd_crtc(crtc); + + dpu_reg_set(&dpu->ctx, REG_DPU_INT_EN, BIT_DPU_INT_VSYNC); + + return 0; +} + +static void sprd_crtc_disable_vblank(struct drm_crtc *crtc) +{ + struct sprd_dpu *dpu = to_sprd_crtc(crtc); + + dpu_reg_clr(&dpu->ctx, REG_DPU_INT_EN, BIT_DPU_INT_VSYNC); +} + +static const struct drm_crtc_helper_funcs sprd_crtc_helper_funcs = { + .mode_set_nofb = sprd_crtc_mode_set_nofb, + .atomic_flush = sprd_crtc_atomic_flush, + .atomic_enable = sprd_crtc_atomic_enable, + .atomic_disable = sprd_crtc_atomic_disable, +}; + +static const struct drm_crtc_funcs sprd_crtc_funcs = { + .destroy = drm_crtc_cleanup, + .set_config = drm_atomic_helper_set_config, + .page_flip = drm_atomic_helper_page_flip, + .reset = drm_atomic_helper_crtc_reset, + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, + .enable_vblank = sprd_crtc_enable_vblank, + .disable_vblank = sprd_crtc_disable_vblank, +}; + +static struct sprd_dpu *sprd_crtc_init(struct drm_device *drm, + struct drm_plane *primary, struct device *dev) +{ + struct device_node *port; + struct sprd_dpu *dpu; + + dpu = drmm_crtc_alloc_with_planes(drm, struct sprd_dpu, base, + primary, NULL, + &sprd_crtc_funcs, NULL); + if (IS_ERR(dpu)) { + drm_err(drm, "failed to init crtc\n"); + return dpu; + } + drm_crtc_helper_add(&dpu->base, &sprd_crtc_helper_funcs); + + /* + * set crtc port so that drm_of_find_possible_crtcs call works + */ + port = of_graph_get_port_by_id(dev->of_node, 0); + if (!port) { + drm_err(drm, "failed to found crtc output port for %s\n", + dev->of_node->full_name); + return ERR_PTR(-EINVAL); + } + dpu->base.port = port; + of_node_put(port); + + return dpu; +} + +static irqreturn_t sprd_dpu_isr(int irq, void *data) +{ + struct sprd_dpu *dpu = data; + struct dpu_context *ctx = &dpu->ctx; + u32 reg_val, int_mask = 0; + + reg_val = readl(ctx->base + REG_DPU_INT_STS); + + /* disable err interrupt */ + if (reg_val & BIT_DPU_INT_ERR) { + int_mask |= BIT_DPU_INT_ERR; + drm_warn(dpu->drm, "Warning: dpu underflow!\n"); + } + + /* dpu update done isr */ + if (reg_val & BIT_DPU_INT_UPDATE_DONE) { + ctx->evt_update = true; + wake_up_interruptible_all(&ctx->wait_queue); + } + + /* dpu stop done isr */ + if (reg_val & BIT_DPU_INT_DONE) { + ctx->evt_stop = true; + wake_up_interruptible_all(&ctx->wait_queue); + } + + if (reg_val & BIT_DPU_INT_VSYNC) + drm_crtc_handle_vblank(&dpu->base); + + writel(reg_val, ctx->base + REG_DPU_INT_CLR); + dpu_reg_clr(ctx, REG_DPU_INT_EN, int_mask); + + return IRQ_HANDLED; +} + +static int sprd_dpu_context_init(struct sprd_dpu *dpu, + struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct dpu_context *ctx = &dpu->ctx; + struct resource *res; + int ret; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ctx->base = devm_ioremap(dev, res->start, resource_size(res)); + if (!ctx->base) { + dev_err(dev, "failed to map dpu registers\n"); + return -EFAULT; + } + + ctx->irq = platform_get_irq(pdev, 0); + if (ctx->irq < 0) { + dev_err(dev, "failed to get dpu irq\n"); + return ctx->irq; + } + + /* disable and clear interrupts before register dpu IRQ. */ + writel(0x00, ctx->base + REG_DPU_INT_EN); + writel(0xff, ctx->base + REG_DPU_INT_CLR); + + ret = devm_request_irq(dev, ctx->irq, sprd_dpu_isr, + IRQF_TRIGGER_NONE, "DPU", dpu); + if (ret) { + dev_err(dev, "failed to register dpu irq handler\n"); + return ret; + } + + init_waitqueue_head(&ctx->wait_queue); + + return 0; +} + +static int sprd_dpu_bind(struct device *dev, struct device *master, void *data) +{ + struct drm_device *drm = data; + struct sprd_dpu *dpu; + struct sprd_plane *plane; + int ret; + + plane = sprd_planes_init(drm); + if (IS_ERR(plane)) + return PTR_ERR(plane); + + dpu = sprd_crtc_init(drm, &plane->base, dev); + if (IS_ERR(dpu)) + return PTR_ERR(dpu); + + dpu->drm = drm; + dev_set_drvdata(dev, dpu); + + ret = sprd_dpu_context_init(dpu, dev); + if (ret) + return ret; + + return 0; +} + +static const struct component_ops dpu_component_ops = { + .bind = sprd_dpu_bind, +}; + +static const struct of_device_id dpu_match_table[] = { + { .compatible = "sprd,sharkl3-dpu" }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, dpu_match_table); + +static int sprd_dpu_probe(struct platform_device *pdev) +{ + return component_add(&pdev->dev, &dpu_component_ops); +} + +static int sprd_dpu_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &dpu_component_ops); + + return 0; +} + +struct platform_driver sprd_dpu_driver = { + .probe = sprd_dpu_probe, + .remove = sprd_dpu_remove, + .driver = { + .name = "sprd-dpu-drv", + .of_match_table = dpu_match_table, + }, +}; + +MODULE_AUTHOR("Leon He <leon.he@unisoc.com>"); +MODULE_AUTHOR("Kevin Tang <kevin.tang@unisoc.com>"); +MODULE_DESCRIPTION("Unisoc Display Controller Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/sprd/sprd_dpu.h b/drivers/gpu/drm/sprd/sprd_dpu.h new file mode 100644 index 000000000000..157a78f24dc1 --- /dev/null +++ b/drivers/gpu/drm/sprd/sprd_dpu.h @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020 Unisoc Inc. + */ + +#ifndef __SPRD_DPU_H__ +#define __SPRD_DPU_H__ + +#include <linux/bug.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <linux/string.h> +#include <video/videomode.h> + +#include <drm/drm_crtc.h> +#include <drm/drm_fourcc.h> +#include <drm/drm_print.h> +#include <drm/drm_vblank.h> +#include <uapi/drm/drm_mode.h> + +/* DPU Layer registers offset */ +#define DPU_LAY_REG_OFFSET 0x30 + +enum { + SPRD_DPU_IF_DPI, + SPRD_DPU_IF_EDPI, + SPRD_DPU_IF_LIMIT +}; + +/** + * Sprd DPU context structure + * + * @base: DPU controller base address + * @irq: IRQ number to install the handler for + * @if_type: The type of DPI interface, default is DPI mode. + * @vm: videomode structure to use for DPU and DPI initialization + * @stopped: indicates whether DPU are stopped + * @wait_queue: wait queue, used to wait for DPU shadow register update done and + * DPU stop register done interrupt signal. + * @evt_update: wait queue condition for DPU shadow register + * @evt_stop: wait queue condition for DPU stop register + */ +struct dpu_context { + void __iomem *base; + int irq; + u8 if_type; + struct videomode vm; + bool stopped; + wait_queue_head_t wait_queue; + bool evt_update; + bool evt_stop; +}; + +/** + * Sprd DPU device structure + * + * @crtc: crtc object + * @drm: A point to drm device + * @ctx: DPU's implementation specific context object + */ +struct sprd_dpu { + struct drm_crtc base; + struct drm_device *drm; + struct dpu_context ctx; +}; + +static inline struct sprd_dpu *to_sprd_crtc(struct drm_crtc *crtc) +{ + return container_of(crtc, struct sprd_dpu, base); +} + +static inline void +dpu_reg_set(struct dpu_context *ctx, u32 offset, u32 set_bits) +{ + u32 bits = readl_relaxed(ctx->base + offset); + + writel(bits | set_bits, ctx->base + offset); +} + +static inline void +dpu_reg_clr(struct dpu_context *ctx, u32 offset, u32 clr_bits) +{ + u32 bits = readl_relaxed(ctx->base + offset); + + writel(bits & ~clr_bits, ctx->base + offset); +} + +static inline u32 +layer_reg_rd(struct dpu_context *ctx, u32 offset, int index) +{ + u32 layer_offset = offset + index * DPU_LAY_REG_OFFSET; + + return readl(ctx->base + layer_offset); +} + +static inline void +layer_reg_wr(struct dpu_context *ctx, u32 offset, u32 cfg_bits, int index) +{ + u32 layer_offset = offset + index * DPU_LAY_REG_OFFSET; + + writel(cfg_bits, ctx->base + layer_offset); +} + +void sprd_dpu_run(struct sprd_dpu *dpu); +void sprd_dpu_stop(struct sprd_dpu *dpu); + +#endif diff --git a/drivers/gpu/drm/sprd/sprd_drm.c b/drivers/gpu/drm/sprd/sprd_drm.c new file mode 100644 index 000000000000..a077e2d4d721 --- /dev/null +++ b/drivers/gpu/drm/sprd/sprd_drm.c @@ -0,0 +1,205 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020 Unisoc Inc. + */ + +#include <linux/component.h> +#include <linux/dma-mapping.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/of_graph.h> +#include <linux/of_platform.h> + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_drv.h> +#include <drm/drm_gem_cma_helper.h> +#include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_of.h> +#include <drm/drm_probe_helper.h> +#include <drm/drm_vblank.h> + +#include "sprd_drm.h" + +#define DRIVER_NAME "sprd" +#define DRIVER_DESC "Spreadtrum SoCs' DRM Driver" +#define DRIVER_DATE "20200201" +#define DRIVER_MAJOR 1 +#define DRIVER_MINOR 0 + +static const struct drm_mode_config_helper_funcs sprd_drm_mode_config_helper = { + .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm, +}; + +static const struct drm_mode_config_funcs sprd_drm_mode_config_funcs = { + .fb_create = drm_gem_fb_create, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, +}; + +static void sprd_drm_mode_config_init(struct drm_device *drm) +{ + drm->mode_config.min_width = 0; + drm->mode_config.min_height = 0; + drm->mode_config.max_width = 8192; + drm->mode_config.max_height = 8192; + drm->mode_config.allow_fb_modifiers = true; + + drm->mode_config.funcs = &sprd_drm_mode_config_funcs; + drm->mode_config.helper_private = &sprd_drm_mode_config_helper; +} + +DEFINE_DRM_GEM_CMA_FOPS(sprd_drm_fops); + +static struct drm_driver sprd_drm_drv = { + .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, + .fops = &sprd_drm_fops, + + /* GEM Operations */ + DRM_GEM_CMA_DRIVER_OPS, + + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .date = DRIVER_DATE, + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, +}; + +static int sprd_drm_bind(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct drm_device *drm; + struct sprd_drm *sprd; + int ret; + + sprd = devm_drm_dev_alloc(dev, &sprd_drm_drv, struct sprd_drm, drm); + if (IS_ERR(sprd)) + return PTR_ERR(sprd); + + drm = &sprd->drm; + platform_set_drvdata(pdev, drm); + + ret = drmm_mode_config_init(drm); + if (ret) + return ret; + + sprd_drm_mode_config_init(drm); + + /* bind and init sub drivers */ + ret = component_bind_all(drm->dev, drm); + if (ret) { + drm_err(drm, "failed to bind all component.\n"); + return ret; + } + + /* vblank init */ + ret = drm_vblank_init(drm, drm->mode_config.num_crtc); + if (ret) { + drm_err(drm, "failed to initialize vblank.\n"); + goto err_unbind_all; + } + + /* reset all the states of crtc/plane/encoder/connector */ + drm_mode_config_reset(drm); + + /* init kms poll for handling hpd */ + drm_kms_helper_poll_init(drm); + + ret = drm_dev_register(drm, 0); + if (ret < 0) + goto err_kms_helper_poll_fini; + + return 0; + +err_kms_helper_poll_fini: + drm_kms_helper_poll_fini(drm); +err_unbind_all: + component_unbind_all(drm->dev, drm); + return ret; +} + +static void sprd_drm_unbind(struct device *dev) +{ + struct drm_device *drm = dev_get_drvdata(dev); + + drm_dev_unregister(drm); + + drm_kms_helper_poll_fini(drm); + + component_unbind_all(drm->dev, drm); +} + +static const struct component_master_ops drm_component_ops = { + .bind = sprd_drm_bind, + .unbind = sprd_drm_unbind, +}; + +static int compare_of(struct device *dev, void *data) +{ + return dev->of_node == data; +} + +static int sprd_drm_probe(struct platform_device *pdev) +{ + return drm_of_component_probe(&pdev->dev, compare_of, &drm_component_ops); +} + +static int sprd_drm_remove(struct platform_device *pdev) +{ + component_master_del(&pdev->dev, &drm_component_ops); + return 0; +} + +static void sprd_drm_shutdown(struct platform_device *pdev) +{ + struct drm_device *drm = platform_get_drvdata(pdev); + + if (!drm) { + drm_warn(drm, "drm device is not available, no shutdown\n"); + return; + } + + drm_atomic_helper_shutdown(drm); +} + +static const struct of_device_id drm_match_table[] = { + { .compatible = "sprd,display-subsystem", }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, drm_match_table); + +static struct platform_driver sprd_drm_driver = { + .probe = sprd_drm_probe, + .remove = sprd_drm_remove, + .shutdown = sprd_drm_shutdown, + .driver = { + .name = "sprd-drm-drv", + .of_match_table = drm_match_table, + }, +}; + +static struct platform_driver *sprd_drm_drivers[] = { + &sprd_drm_driver, + &sprd_dpu_driver, + &sprd_dsi_driver, +}; + +static int __init sprd_drm_init(void) +{ + return platform_register_drivers(sprd_drm_drivers, + ARRAY_SIZE(sprd_drm_drivers)); +} + +static void __exit sprd_drm_exit(void) +{ + platform_unregister_drivers(sprd_drm_drivers, + ARRAY_SIZE(sprd_drm_drivers)); +} + +module_init(sprd_drm_init); +module_exit(sprd_drm_exit); + +MODULE_AUTHOR("Leon He <leon.he@unisoc.com>"); +MODULE_AUTHOR("Kevin Tang <kevin.tang@unisoc.com>"); +MODULE_DESCRIPTION("Unisoc DRM KMS Master Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/sprd/sprd_drm.h b/drivers/gpu/drm/sprd/sprd_drm.h new file mode 100644 index 000000000000..95d1b972f333 --- /dev/null +++ b/drivers/gpu/drm/sprd/sprd_drm.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020 Unisoc Inc. + */ + +#ifndef _SPRD_DRM_H_ +#define _SPRD_DRM_H_ + +#include <drm/drm_atomic.h> +#include <drm/drm_print.h> + +struct sprd_drm { + struct drm_device drm; +}; + +extern struct platform_driver sprd_dpu_driver; +extern struct platform_driver sprd_dsi_driver; + +#endif /* _SPRD_DRM_H_ */ diff --git a/drivers/gpu/drm/sprd/sprd_dsi.c b/drivers/gpu/drm/sprd/sprd_dsi.c new file mode 100644 index 000000000000..911b3cddc264 --- /dev/null +++ b/drivers/gpu/drm/sprd/sprd_dsi.c @@ -0,0 +1,1073 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020 Unisoc Inc. + */ + +#include <linux/component.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/of_irq.h> +#include <linux/of_graph.h> +#include <video/mipi_display.h> + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_of.h> +#include <drm/drm_probe_helper.h> + +#include "sprd_drm.h" +#include "sprd_dpu.h" +#include "sprd_dsi.h" + +#define SOFT_RESET 0x04 +#define MASK_PROTOCOL_INT 0x0C +#define MASK_INTERNAL_INT 0x14 +#define DSI_MODE_CFG 0x18 + +#define VIRTUAL_CHANNEL_ID 0x1C +#define GEN_RX_VCID GENMASK(1, 0) +#define VIDEO_PKT_VCID GENMASK(3, 2) + +#define DPI_VIDEO_FORMAT 0x20 +#define DPI_VIDEO_MODE_FORMAT GENMASK(5, 0) +#define LOOSELY18_EN BIT(6) + +#define VIDEO_PKT_CONFIG 0x24 +#define VIDEO_PKT_SIZE GENMASK(15, 0) +#define VIDEO_LINE_CHUNK_NUM GENMASK(31, 16) + +#define VIDEO_LINE_HBLK_TIME 0x28 +#define VIDEO_LINE_HBP_TIME GENMASK(15, 0) +#define VIDEO_LINE_HSA_TIME GENMASK(31, 16) + +#define VIDEO_LINE_TIME 0x2C + +#define VIDEO_VBLK_LINES 0x30 +#define VFP_LINES GENMASK(9, 0) +#define VBP_LINES GENMASK(19, 10) +#define VSA_LINES GENMASK(29, 20) + +#define VIDEO_VACTIVE_LINES 0x34 + +#define VID_MODE_CFG 0x38 +#define VID_MODE_TYPE GENMASK(1, 0) +#define LP_VSA_EN BIT(8) +#define LP_VBP_EN BIT(9) +#define LP_VFP_EN BIT(10) +#define LP_VACT_EN BIT(11) +#define LP_HBP_EN BIT(12) +#define LP_HFP_EN BIT(13) +#define FRAME_BTA_ACK_EN BIT(14) + +#define TIMEOUT_CNT_CLK_CONFIG 0x40 +#define HTX_TO_CONFIG 0x44 +#define LRX_H_TO_CONFIG 0x48 + +#define TX_ESC_CLK_CONFIG 0x5C + +#define CMD_MODE_CFG 0x68 +#define TEAR_FX_EN BIT(0) + +#define GEN_HDR 0x6C +#define GEN_DT GENMASK(5, 0) +#define GEN_VC GENMASK(7, 6) + +#define GEN_PLD_DATA 0x70 + +#define PHY_CLK_LANE_LP_CTRL 0x74 +#define PHY_CLKLANE_TX_REQ_HS BIT(0) +#define AUTO_CLKLANE_CTRL_EN BIT(1) + +#define PHY_INTERFACE_CTRL 0x78 +#define RF_PHY_SHUTDOWN BIT(0) +#define RF_PHY_RESET_N BIT(1) +#define RF_PHY_CLK_EN BIT(2) + +#define CMD_MODE_STATUS 0x98 +#define GEN_CMD_RDATA_FIFO_EMPTY BIT(1) +#define GEN_CMD_WDATA_FIFO_EMPTY BIT(3) +#define GEN_CMD_CMD_FIFO_EMPTY BIT(5) +#define GEN_CMD_RDCMD_DONE BIT(7) + +#define PHY_STATUS 0x9C +#define PHY_LOCK BIT(1) + +#define PHY_MIN_STOP_TIME 0xA0 +#define PHY_LANE_NUM_CONFIG 0xA4 + +#define PHY_CLKLANE_TIME_CONFIG 0xA8 +#define PHY_CLKLANE_LP_TO_HS_TIME GENMASK(15, 0) +#define PHY_CLKLANE_HS_TO_LP_TIME GENMASK(31, 16) + +#define PHY_DATALANE_TIME_CONFIG 0xAC +#define PHY_DATALANE_LP_TO_HS_TIME GENMASK(15, 0) +#define PHY_DATALANE_HS_TO_LP_TIME GENMASK(31, 16) + +#define MAX_READ_TIME 0xB0 + +#define RX_PKT_CHECK_CONFIG 0xB4 +#define RX_PKT_ECC_EN BIT(0) +#define RX_PKT_CRC_EN BIT(1) + +#define TA_EN 0xB8 + +#define EOTP_EN 0xBC +#define TX_EOTP_EN BIT(0) +#define RX_EOTP_EN BIT(1) + +#define VIDEO_NULLPKT_SIZE 0xC0 +#define DCS_WM_PKT_SIZE 0xC4 + +#define VIDEO_SIG_DELAY_CONFIG 0xD0 +#define VIDEO_SIG_DELAY GENMASK(23, 0) + +#define PHY_TST_CTRL0 0xF0 +#define PHY_TESTCLR BIT(0) +#define PHY_TESTCLK BIT(1) + +#define PHY_TST_CTRL1 0xF4 +#define PHY_TESTDIN GENMASK(7, 0) +#define PHY_TESTDOUT GENMASK(15, 8) +#define PHY_TESTEN BIT(16) + +#define host_to_dsi(host) \ + container_of(host, struct sprd_dsi, host) + +static inline u32 +dsi_reg_rd(struct dsi_context *ctx, u32 offset, u32 mask, + u32 shift) +{ + return (readl(ctx->base + offset) & mask) >> shift; +} + +static inline void +dsi_reg_wr(struct dsi_context *ctx, u32 offset, u32 mask, + u32 shift, u32 val) +{ + u32 ret; + + ret = readl(ctx->base + offset); + ret &= ~mask; + ret |= (val << shift) & mask; + writel(ret, ctx->base + offset); +} + +static inline void +dsi_reg_up(struct dsi_context *ctx, u32 offset, u32 mask, + u32 val) +{ + u32 ret = readl(ctx->base + offset); + + writel((ret & ~mask) | (val & mask), ctx->base + offset); +} + +static int regmap_tst_io_write(void *context, u32 reg, u32 val) +{ + struct sprd_dsi *dsi = context; + struct dsi_context *ctx = &dsi->ctx; + + if (val > 0xff || reg > 0xff) + return -EINVAL; + + drm_dbg(dsi->drm, "reg = 0x%02x, val = 0x%02x\n", reg, val); + + dsi_reg_up(ctx, PHY_TST_CTRL1, PHY_TESTEN, PHY_TESTEN); + dsi_reg_wr(ctx, PHY_TST_CTRL1, PHY_TESTDIN, 0, reg); + dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLK, PHY_TESTCLK); + dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLK, 0); + dsi_reg_up(ctx, PHY_TST_CTRL1, PHY_TESTEN, 0); + dsi_reg_wr(ctx, PHY_TST_CTRL1, PHY_TESTDIN, 0, val); + dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLK, PHY_TESTCLK); + dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLK, 0); + + return 0; +} + +static int regmap_tst_io_read(void *context, u32 reg, u32 *val) +{ + struct sprd_dsi *dsi = context; + struct dsi_context *ctx = &dsi->ctx; + int ret; + + if (reg > 0xff) + return -EINVAL; + + dsi_reg_up(ctx, PHY_TST_CTRL1, PHY_TESTEN, PHY_TESTEN); + dsi_reg_wr(ctx, PHY_TST_CTRL1, PHY_TESTDIN, 0, reg); + dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLK, PHY_TESTCLK); + dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLK, 0); + dsi_reg_up(ctx, PHY_TST_CTRL1, PHY_TESTEN, 0); + + udelay(1); + + ret = dsi_reg_rd(ctx, PHY_TST_CTRL1, PHY_TESTDOUT, 8); + if (ret < 0) + return ret; + + *val = ret; + + drm_dbg(dsi->drm, "reg = 0x%02x, val = 0x%02x\n", reg, *val); + return 0; +} + +static struct regmap_bus regmap_tst_io = { + .reg_write = regmap_tst_io_write, + .reg_read = regmap_tst_io_read, +}; + +static const struct regmap_config byte_config = { + .reg_bits = 8, + .val_bits = 8, +}; + +static int dphy_wait_pll_locked(struct dsi_context *ctx) +{ + struct sprd_dsi *dsi = container_of(ctx, struct sprd_dsi, ctx); + int i; + + for (i = 0; i < 50000; i++) { + if (dsi_reg_rd(ctx, PHY_STATUS, PHY_LOCK, 1)) + return 0; + udelay(3); + } + + drm_err(dsi->drm, "dphy pll can not be locked\n"); + return -ETIMEDOUT; +} + +static int dsi_wait_tx_payload_fifo_empty(struct dsi_context *ctx) +{ + int i; + + for (i = 0; i < 5000; i++) { + if (dsi_reg_rd(ctx, CMD_MODE_STATUS, GEN_CMD_WDATA_FIFO_EMPTY, 3)) + return 0; + udelay(1); + } + + return -ETIMEDOUT; +} + +static int dsi_wait_tx_cmd_fifo_empty(struct dsi_context *ctx) +{ + int i; + + for (i = 0; i < 5000; i++) { + if (dsi_reg_rd(ctx, CMD_MODE_STATUS, GEN_CMD_CMD_FIFO_EMPTY, 5)) + return 0; + udelay(1); + } + + return -ETIMEDOUT; +} + +static int dsi_wait_rd_resp_completed(struct dsi_context *ctx) +{ + int i; + + for (i = 0; i < 10000; i++) { + if (dsi_reg_rd(ctx, CMD_MODE_STATUS, GEN_CMD_RDCMD_DONE, 7)) + return 0; + udelay(10); + } + + return -ETIMEDOUT; +} + +static u16 calc_bytes_per_pixel_x100(int coding) +{ + u16 bpp_x100; + + switch (coding) { + case COLOR_CODE_16BIT_CONFIG1: + case COLOR_CODE_16BIT_CONFIG2: + case COLOR_CODE_16BIT_CONFIG3: + bpp_x100 = 200; + break; + case COLOR_CODE_18BIT_CONFIG1: + case COLOR_CODE_18BIT_CONFIG2: + bpp_x100 = 225; + break; + case COLOR_CODE_24BIT: + bpp_x100 = 300; + break; + case COLOR_CODE_COMPRESSTION: + bpp_x100 = 100; + break; + case COLOR_CODE_20BIT_YCC422_LOOSELY: + bpp_x100 = 250; + break; + case COLOR_CODE_24BIT_YCC422: + bpp_x100 = 300; + break; + case COLOR_CODE_16BIT_YCC422: + bpp_x100 = 200; + break; + case COLOR_CODE_30BIT: + bpp_x100 = 375; + break; + case COLOR_CODE_36BIT: + bpp_x100 = 450; + break; + case COLOR_CODE_12BIT_YCC420: + bpp_x100 = 150; + break; + default: + DRM_ERROR("invalid color coding"); + bpp_x100 = 0; + break; + } + + return bpp_x100; +} + +static u8 calc_video_size_step(int coding) +{ + u8 video_size_step; + + switch (coding) { + case COLOR_CODE_16BIT_CONFIG1: + case COLOR_CODE_16BIT_CONFIG2: + case COLOR_CODE_16BIT_CONFIG3: + case COLOR_CODE_18BIT_CONFIG1: + case COLOR_CODE_18BIT_CONFIG2: + case COLOR_CODE_24BIT: + case COLOR_CODE_COMPRESSTION: + return video_size_step = 1; + case COLOR_CODE_20BIT_YCC422_LOOSELY: + case COLOR_CODE_24BIT_YCC422: + case COLOR_CODE_16BIT_YCC422: + case COLOR_CODE_30BIT: + case COLOR_CODE_36BIT: + case COLOR_CODE_12BIT_YCC420: + return video_size_step = 2; + default: + DRM_ERROR("invalid color coding"); + return 0; + } +} + +static u16 round_video_size(int coding, u16 video_size) +{ + switch (coding) { + case COLOR_CODE_16BIT_YCC422: + case COLOR_CODE_24BIT_YCC422: + case COLOR_CODE_20BIT_YCC422_LOOSELY: + case COLOR_CODE_12BIT_YCC420: + /* round up active H pixels to a multiple of 2 */ + if ((video_size % 2) != 0) + video_size += 1; + break; + default: + break; + } + + return video_size; +} + +#define SPRD_MIPI_DSI_FMT_DSC 0xff +static u32 fmt_to_coding(u32 fmt) +{ + switch (fmt) { + case MIPI_DSI_FMT_RGB565: + return COLOR_CODE_16BIT_CONFIG1; + case MIPI_DSI_FMT_RGB666: + case MIPI_DSI_FMT_RGB666_PACKED: + return COLOR_CODE_18BIT_CONFIG1; + case MIPI_DSI_FMT_RGB888: + return COLOR_CODE_24BIT; + case SPRD_MIPI_DSI_FMT_DSC: + return COLOR_CODE_COMPRESSTION; + default: + DRM_ERROR("Unsupported format (%d)\n", fmt); + return COLOR_CODE_24BIT; + } +} + +#define ns_to_cycle(ns, byte_clk) \ + DIV_ROUND_UP((ns) * (byte_clk), 1000000) + +static void sprd_dsi_init(struct dsi_context *ctx) +{ + struct sprd_dsi *dsi = container_of(ctx, struct sprd_dsi, ctx); + u32 byte_clk = dsi->slave->hs_rate / 8; + u16 data_hs2lp, data_lp2hs, clk_hs2lp, clk_lp2hs; + u16 max_rd_time; + int div; + + writel(0, ctx->base + SOFT_RESET); + writel(0xffffffff, ctx->base + MASK_PROTOCOL_INT); + writel(0xffffffff, ctx->base + MASK_INTERNAL_INT); + writel(1, ctx->base + DSI_MODE_CFG); + dsi_reg_up(ctx, EOTP_EN, RX_EOTP_EN, 0); + dsi_reg_up(ctx, EOTP_EN, TX_EOTP_EN, 0); + dsi_reg_up(ctx, RX_PKT_CHECK_CONFIG, RX_PKT_ECC_EN, RX_PKT_ECC_EN); + dsi_reg_up(ctx, RX_PKT_CHECK_CONFIG, RX_PKT_CRC_EN, RX_PKT_CRC_EN); + writel(1, ctx->base + TA_EN); + dsi_reg_up(ctx, VIRTUAL_CHANNEL_ID, VIDEO_PKT_VCID, 0); + dsi_reg_up(ctx, VIRTUAL_CHANNEL_ID, GEN_RX_VCID, 0); + + div = DIV_ROUND_UP(byte_clk, dsi->slave->lp_rate); + writel(div, ctx->base + TX_ESC_CLK_CONFIG); + + max_rd_time = ns_to_cycle(ctx->max_rd_time, byte_clk); + writel(max_rd_time, ctx->base + MAX_READ_TIME); + + data_hs2lp = ns_to_cycle(ctx->data_hs2lp, byte_clk); + data_lp2hs = ns_to_cycle(ctx->data_lp2hs, byte_clk); + clk_hs2lp = ns_to_cycle(ctx->clk_hs2lp, byte_clk); + clk_lp2hs = ns_to_cycle(ctx->clk_lp2hs, byte_clk); + dsi_reg_wr(ctx, PHY_DATALANE_TIME_CONFIG, + PHY_DATALANE_HS_TO_LP_TIME, 16, data_hs2lp); + dsi_reg_wr(ctx, PHY_DATALANE_TIME_CONFIG, + PHY_DATALANE_LP_TO_HS_TIME, 0, data_lp2hs); + dsi_reg_wr(ctx, PHY_CLKLANE_TIME_CONFIG, + PHY_CLKLANE_HS_TO_LP_TIME, 16, clk_hs2lp); + dsi_reg_wr(ctx, PHY_CLKLANE_TIME_CONFIG, + PHY_CLKLANE_LP_TO_HS_TIME, 0, clk_lp2hs); + + writel(1, ctx->base + SOFT_RESET); +} + +/* + * Free up resources and shutdown host controller and PHY + */ +static void sprd_dsi_fini(struct dsi_context *ctx) +{ + writel(0xffffffff, ctx->base + MASK_PROTOCOL_INT); + writel(0xffffffff, ctx->base + MASK_INTERNAL_INT); + writel(0, ctx->base + SOFT_RESET); +} + +/* + * If not in burst mode, it will compute the video and null packet sizes + * according to necessity. + * Configure timers for data lanes and/or clock lane to return to LP when + * bandwidth is not filled by data. + */ +static int sprd_dsi_dpi_video(struct dsi_context *ctx) +{ + struct sprd_dsi *dsi = container_of(ctx, struct sprd_dsi, ctx); + struct videomode *vm = &ctx->vm; + u32 byte_clk = dsi->slave->hs_rate / 8; + u16 bpp_x100; + u16 video_size; + u32 ratio_x1000; + u16 null_pkt_size = 0; + u8 video_size_step; + u32 hs_to; + u32 total_bytes; + u32 bytes_per_chunk; + u32 chunks = 0; + u32 bytes_left = 0; + u32 chunk_overhead; + const u8 pkt_header = 6; + u8 coding; + int div; + u16 hline; + u16 byte_cycle; + + coding = fmt_to_coding(dsi->slave->format); + video_size = round_video_size(coding, vm->hactive); + bpp_x100 = calc_bytes_per_pixel_x100(coding); + video_size_step = calc_video_size_step(coding); + ratio_x1000 = byte_clk * 1000 / (vm->pixelclock / 1000); + hline = vm->hactive + vm->hsync_len + vm->hfront_porch + + vm->hback_porch; + + writel(0, ctx->base + SOFT_RESET); + dsi_reg_wr(ctx, VID_MODE_CFG, FRAME_BTA_ACK_EN, 15, ctx->frame_ack_en); + dsi_reg_wr(ctx, DPI_VIDEO_FORMAT, DPI_VIDEO_MODE_FORMAT, 0, coding); + dsi_reg_wr(ctx, VID_MODE_CFG, VID_MODE_TYPE, 0, ctx->burst_mode); + byte_cycle = 95 * hline * ratio_x1000 / 100000; + dsi_reg_wr(ctx, VIDEO_SIG_DELAY_CONFIG, VIDEO_SIG_DELAY, 0, byte_cycle); + byte_cycle = hline * ratio_x1000 / 1000; + writel(byte_cycle, ctx->base + VIDEO_LINE_TIME); + byte_cycle = vm->hsync_len * ratio_x1000 / 1000; + dsi_reg_wr(ctx, VIDEO_LINE_HBLK_TIME, VIDEO_LINE_HSA_TIME, 16, byte_cycle); + byte_cycle = vm->hback_porch * ratio_x1000 / 1000; + dsi_reg_wr(ctx, VIDEO_LINE_HBLK_TIME, VIDEO_LINE_HBP_TIME, 0, byte_cycle); + writel(vm->vactive, ctx->base + VIDEO_VACTIVE_LINES); + dsi_reg_wr(ctx, VIDEO_VBLK_LINES, VFP_LINES, 0, vm->vfront_porch); + dsi_reg_wr(ctx, VIDEO_VBLK_LINES, VBP_LINES, 10, vm->vback_porch); + dsi_reg_wr(ctx, VIDEO_VBLK_LINES, VSA_LINES, 20, vm->vsync_len); + dsi_reg_up(ctx, VID_MODE_CFG, LP_HBP_EN | LP_HFP_EN | LP_VACT_EN | + LP_VFP_EN | LP_VBP_EN | LP_VSA_EN, LP_HBP_EN | LP_HFP_EN | + LP_VACT_EN | LP_VFP_EN | LP_VBP_EN | LP_VSA_EN); + + hs_to = (hline * vm->vactive) + (2 * bpp_x100) / 100; + for (div = 0x80; (div < hs_to) && (div > 2); div--) { + if ((hs_to % div) == 0) { + writel(div, ctx->base + TIMEOUT_CNT_CLK_CONFIG); + writel(hs_to / div, ctx->base + LRX_H_TO_CONFIG); + writel(hs_to / div, ctx->base + HTX_TO_CONFIG); + break; + } + } + + if (ctx->burst_mode == VIDEO_BURST_WITH_SYNC_PULSES) { + dsi_reg_wr(ctx, VIDEO_PKT_CONFIG, VIDEO_PKT_SIZE, 0, video_size); + writel(0, ctx->base + VIDEO_NULLPKT_SIZE); + dsi_reg_up(ctx, VIDEO_PKT_CONFIG, VIDEO_LINE_CHUNK_NUM, 0); + } else { + /* non burst transmission */ + null_pkt_size = 0; + + /* bytes to be sent - first as one chunk */ + bytes_per_chunk = vm->hactive * bpp_x100 / 100 + pkt_header; + + /* hline total bytes from the DPI interface */ + total_bytes = (vm->hactive + vm->hfront_porch) * + ratio_x1000 / dsi->slave->lanes / 1000; + + /* check if the pixels actually fit on the DSI link */ + if (total_bytes < bytes_per_chunk) { + drm_err(dsi->drm, "current resolution can not be set\n"); + return -EINVAL; + } + + chunk_overhead = total_bytes - bytes_per_chunk; + + /* overhead higher than 1 -> enable multi packets */ + if (chunk_overhead > 1) { + /* multi packets */ + for (video_size = video_size_step; + video_size < vm->hactive; + video_size += video_size_step) { + if (vm->hactive * 1000 / video_size % 1000) + continue; + + chunks = vm->hactive / video_size; + bytes_per_chunk = bpp_x100 * video_size / 100 + + pkt_header; + if (total_bytes >= (bytes_per_chunk * chunks)) { + bytes_left = total_bytes - + bytes_per_chunk * chunks; + break; + } + } + + /* prevent overflow (unsigned - unsigned) */ + if (bytes_left > (pkt_header * chunks)) { + null_pkt_size = (bytes_left - + pkt_header * chunks) / chunks; + /* avoid register overflow */ + if (null_pkt_size > 1023) + null_pkt_size = 1023; + } + + } else { + /* single packet */ + chunks = 1; + + /* must be a multiple of 4 except 18 loosely */ + for (video_size = vm->hactive; + (video_size % video_size_step) != 0; + video_size++) + ; + } + + dsi_reg_wr(ctx, VIDEO_PKT_CONFIG, VIDEO_PKT_SIZE, 0, video_size); + writel(null_pkt_size, ctx->base + VIDEO_NULLPKT_SIZE); + dsi_reg_wr(ctx, VIDEO_PKT_CONFIG, VIDEO_LINE_CHUNK_NUM, 16, chunks); + } + + writel(ctx->int0_mask, ctx->base + MASK_PROTOCOL_INT); + writel(ctx->int1_mask, ctx->base + MASK_INTERNAL_INT); + writel(1, ctx->base + SOFT_RESET); + + return 0; +} + +static void sprd_dsi_edpi_video(struct dsi_context *ctx) +{ + struct sprd_dsi *dsi = container_of(ctx, struct sprd_dsi, ctx); + const u32 fifo_depth = 1096; + const u32 word_length = 4; + u32 hactive = ctx->vm.hactive; + u32 bpp_x100; + u32 max_fifo_len; + u8 coding; + + coding = fmt_to_coding(dsi->slave->format); + bpp_x100 = calc_bytes_per_pixel_x100(coding); + max_fifo_len = word_length * fifo_depth * 100 / bpp_x100; + + writel(0, ctx->base + SOFT_RESET); + dsi_reg_wr(ctx, DPI_VIDEO_FORMAT, DPI_VIDEO_MODE_FORMAT, 0, coding); + dsi_reg_wr(ctx, CMD_MODE_CFG, TEAR_FX_EN, 0, ctx->te_ack_en); + + if (max_fifo_len > hactive) + writel(hactive, ctx->base + DCS_WM_PKT_SIZE); + else + writel(max_fifo_len, ctx->base + DCS_WM_PKT_SIZE); + + writel(ctx->int0_mask, ctx->base + MASK_PROTOCOL_INT); + writel(ctx->int1_mask, ctx->base + MASK_INTERNAL_INT); + writel(1, ctx->base + SOFT_RESET); +} + +/* + * Send a packet on the generic interface, + * this function has an active delay to wait for the buffer to clear. + * The delay is limited to: + * (param_length / 4) x DSIH_FIFO_ACTIVE_WAIT x register access time + * the controller restricts the sending of. + * + * This function will not be able to send Null and Blanking packets due to + * controller restriction + */ +static int sprd_dsi_wr_pkt(struct dsi_context *ctx, u8 vc, u8 type, + const u8 *param, u16 len) +{ + struct sprd_dsi *dsi = container_of(ctx, struct sprd_dsi, ctx); + u8 wc_lsbyte, wc_msbyte; + u32 payload; + int i, j, ret; + + if (vc > 3) + return -EINVAL; + + /* 1st: for long packet, must config payload first */ + ret = dsi_wait_tx_payload_fifo_empty(ctx); + if (ret) { + drm_err(dsi->drm, "tx payload fifo is not empty\n"); + return ret; + } + + if (len > 2) { + for (i = 0, j = 0; i < len; i += j) { + payload = 0; + for (j = 0; (j < 4) && ((j + i) < (len)); j++) + payload |= param[i + j] << (j * 8); + + writel(payload, ctx->base + GEN_PLD_DATA); + } + wc_lsbyte = len & 0xff; + wc_msbyte = len >> 8; + } else { + wc_lsbyte = (len > 0) ? param[0] : 0; + wc_msbyte = (len > 1) ? param[1] : 0; + } + + /* 2nd: then set packet header */ + ret = dsi_wait_tx_cmd_fifo_empty(ctx); + if (ret) { + drm_err(dsi->drm, "tx cmd fifo is not empty\n"); + return ret; + } + + writel(type | (vc << 6) | (wc_lsbyte << 8) | (wc_msbyte << 16), + ctx->base + GEN_HDR); + + return 0; +} + +/* + * Send READ packet to peripheral using the generic interface, + * this will force command mode and stop video mode (because of BTA). + * + * This function has an active delay to wait for the buffer to clear, + * the delay is limited to 2 x DSIH_FIFO_ACTIVE_WAIT + * (waiting for command buffer, and waiting for receiving) + * @note this function will enable BTA + */ +static int sprd_dsi_rd_pkt(struct dsi_context *ctx, u8 vc, u8 type, + u8 msb_byte, u8 lsb_byte, + u8 *buffer, u8 bytes_to_read) +{ + struct sprd_dsi *dsi = container_of(ctx, struct sprd_dsi, ctx); + int i, ret; + int count = 0; + u32 temp; + + if (vc > 3) + return -EINVAL; + + /* 1st: send read command to peripheral */ + ret = dsi_reg_rd(ctx, CMD_MODE_STATUS, GEN_CMD_CMD_FIFO_EMPTY, 5); + if (!ret) + return -EIO; + + writel(type | (vc << 6) | (lsb_byte << 8) | (msb_byte << 16), + ctx->base + GEN_HDR); + + /* 2nd: wait peripheral response completed */ + ret = dsi_wait_rd_resp_completed(ctx); + if (ret) { + drm_err(dsi->drm, "wait read response time out\n"); + return ret; + } + + /* 3rd: get data from rx payload fifo */ + ret = dsi_reg_rd(ctx, CMD_MODE_STATUS, GEN_CMD_RDATA_FIFO_EMPTY, 1); + if (ret) { + drm_err(dsi->drm, "rx payload fifo empty\n"); + return -EIO; + } + + for (i = 0; i < 100; i++) { + temp = readl(ctx->base + GEN_PLD_DATA); + + if (count < bytes_to_read) + buffer[count++] = temp & 0xff; + if (count < bytes_to_read) + buffer[count++] = (temp >> 8) & 0xff; + if (count < bytes_to_read) + buffer[count++] = (temp >> 16) & 0xff; + if (count < bytes_to_read) + buffer[count++] = (temp >> 24) & 0xff; + + ret = dsi_reg_rd(ctx, CMD_MODE_STATUS, GEN_CMD_RDATA_FIFO_EMPTY, 1); + if (ret) + return count; + } + + return 0; +} + +static void sprd_dsi_set_work_mode(struct dsi_context *ctx, u8 mode) +{ + if (mode == DSI_MODE_CMD) + writel(1, ctx->base + DSI_MODE_CFG); + else + writel(0, ctx->base + DSI_MODE_CFG); +} + +static void sprd_dsi_state_reset(struct dsi_context *ctx) +{ + writel(0, ctx->base + SOFT_RESET); + udelay(100); + writel(1, ctx->base + SOFT_RESET); +} + +static int sprd_dphy_init(struct dsi_context *ctx) +{ + struct sprd_dsi *dsi = container_of(ctx, struct sprd_dsi, ctx); + int ret; + + dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_RESET_N, 0); + dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_SHUTDOWN, 0); + dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_CLK_EN, 0); + + dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLR, 0); + dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLR, PHY_TESTCLR); + dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLR, 0); + + dphy_pll_config(ctx); + dphy_timing_config(ctx); + + dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_SHUTDOWN, RF_PHY_SHUTDOWN); + dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_RESET_N, RF_PHY_RESET_N); + writel(0x1C, ctx->base + PHY_MIN_STOP_TIME); + dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_CLK_EN, RF_PHY_CLK_EN); + writel(dsi->slave->lanes - 1, ctx->base + PHY_LANE_NUM_CONFIG); + + ret = dphy_wait_pll_locked(ctx); + if (ret) { + drm_err(dsi->drm, "dphy initial failed\n"); + return ret; + } + + return 0; +} + +static void sprd_dphy_fini(struct dsi_context *ctx) +{ + dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_RESET_N, 0); + dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_SHUTDOWN, 0); + dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_RESET_N, RF_PHY_RESET_N); +} + +static void sprd_dsi_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adj_mode) +{ + struct sprd_dsi *dsi = encoder_to_dsi(encoder); + + drm_display_mode_to_videomode(adj_mode, &dsi->ctx.vm); +} + +static void sprd_dsi_encoder_enable(struct drm_encoder *encoder) +{ + struct sprd_dsi *dsi = encoder_to_dsi(encoder); + struct sprd_dpu *dpu = to_sprd_crtc(encoder->crtc); + struct dsi_context *ctx = &dsi->ctx; + + if (ctx->enabled) { + drm_warn(dsi->drm, "dsi is initialized\n"); + return; + } + + sprd_dsi_init(ctx); + if (ctx->work_mode == DSI_MODE_VIDEO) + sprd_dsi_dpi_video(ctx); + else + sprd_dsi_edpi_video(ctx); + + sprd_dphy_init(ctx); + + sprd_dsi_set_work_mode(ctx, ctx->work_mode); + sprd_dsi_state_reset(ctx); + + if (dsi->slave->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) { + dsi_reg_up(ctx, PHY_CLK_LANE_LP_CTRL, AUTO_CLKLANE_CTRL_EN, + AUTO_CLKLANE_CTRL_EN); + } else { + dsi_reg_up(ctx, PHY_CLK_LANE_LP_CTRL, RF_PHY_CLK_EN, RF_PHY_CLK_EN); + dsi_reg_up(ctx, PHY_CLK_LANE_LP_CTRL, PHY_CLKLANE_TX_REQ_HS, + PHY_CLKLANE_TX_REQ_HS); + dphy_wait_pll_locked(ctx); + } + + sprd_dpu_run(dpu); + + ctx->enabled = true; +} + +static void sprd_dsi_encoder_disable(struct drm_encoder *encoder) +{ + struct sprd_dsi *dsi = encoder_to_dsi(encoder); + struct sprd_dpu *dpu = to_sprd_crtc(encoder->crtc); + struct dsi_context *ctx = &dsi->ctx; + + if (!ctx->enabled) { + drm_warn(dsi->drm, "dsi isn't initialized\n"); + return; + } + + sprd_dpu_stop(dpu); + sprd_dphy_fini(ctx); + sprd_dsi_fini(ctx); + + ctx->enabled = false; +} + +static const struct drm_encoder_helper_funcs sprd_encoder_helper_funcs = { + .mode_set = sprd_dsi_encoder_mode_set, + .enable = sprd_dsi_encoder_enable, + .disable = sprd_dsi_encoder_disable +}; + +static const struct drm_encoder_funcs sprd_encoder_funcs = { + .destroy = drm_encoder_cleanup, +}; + +static int sprd_dsi_encoder_init(struct sprd_dsi *dsi, + struct device *dev) +{ + struct drm_encoder *encoder = &dsi->encoder; + u32 crtc_mask; + int ret; + + crtc_mask = drm_of_find_possible_crtcs(dsi->drm, dev->of_node); + if (!crtc_mask) { + drm_err(dsi->drm, "failed to find crtc mask\n"); + return -EINVAL; + } + + drm_dbg(dsi->drm, "find possible crtcs: 0x%08x\n", crtc_mask); + + encoder->possible_crtcs = crtc_mask; + ret = drm_encoder_init(dsi->drm, encoder, &sprd_encoder_funcs, + DRM_MODE_ENCODER_DSI, NULL); + if (ret) { + drm_err(dsi->drm, "failed to init dsi encoder\n"); + return ret; + } + + drm_encoder_helper_add(encoder, &sprd_encoder_helper_funcs); + + return 0; +} + +static int sprd_dsi_bridge_init(struct sprd_dsi *dsi, + struct device *dev) +{ + int ret; + + dsi->panel_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 1, 0); + if (IS_ERR(dsi->panel_bridge)) + return PTR_ERR(dsi->panel_bridge); + + ret = drm_bridge_attach(&dsi->encoder, dsi->panel_bridge, NULL, 0); + if (ret) + return ret; + + return 0; +} + +static int sprd_dsi_context_init(struct sprd_dsi *dsi, + struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct dsi_context *ctx = &dsi->ctx; + struct resource *res; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ctx->base = devm_ioremap(dev, res->start, resource_size(res)); + if (!ctx->base) { + drm_err(dsi->drm, "failed to map dsi host registers\n"); + return -ENXIO; + } + + ctx->regmap = devm_regmap_init(dev, ®map_tst_io, dsi, &byte_config); + if (IS_ERR(ctx->regmap)) { + drm_err(dsi->drm, "dphy regmap init failed\n"); + return PTR_ERR(ctx->regmap); + } + + ctx->data_hs2lp = 120; + ctx->data_lp2hs = 500; + ctx->clk_hs2lp = 4; + ctx->clk_lp2hs = 15; + ctx->max_rd_time = 6000; + ctx->int0_mask = 0xffffffff; + ctx->int1_mask = 0xffffffff; + ctx->enabled = true; + + return 0; +} + +static int sprd_dsi_bind(struct device *dev, struct device *master, void *data) +{ + struct drm_device *drm = data; + struct sprd_dsi *dsi = dev_get_drvdata(dev); + int ret; + + dsi->drm = drm; + + ret = sprd_dsi_encoder_init(dsi, dev); + if (ret) + return ret; + + ret = sprd_dsi_bridge_init(dsi, dev); + if (ret) + return ret; + + ret = sprd_dsi_context_init(dsi, dev); + if (ret) + return ret; + + return 0; +} + +static void sprd_dsi_unbind(struct device *dev, + struct device *master, void *data) +{ + struct sprd_dsi *dsi = dev_get_drvdata(dev); + + drm_of_panel_bridge_remove(dev->of_node, 1, 0); + + drm_encoder_cleanup(&dsi->encoder); +} + +static const struct component_ops dsi_component_ops = { + .bind = sprd_dsi_bind, + .unbind = sprd_dsi_unbind, +}; + +static int sprd_dsi_host_attach(struct mipi_dsi_host *host, + struct mipi_dsi_device *slave) +{ + struct sprd_dsi *dsi = host_to_dsi(host); + struct dsi_context *ctx = &dsi->ctx; + + dsi->slave = slave; + + if (slave->mode_flags & MIPI_DSI_MODE_VIDEO) + ctx->work_mode = DSI_MODE_VIDEO; + else + ctx->work_mode = DSI_MODE_CMD; + + if (slave->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) + ctx->burst_mode = VIDEO_BURST_WITH_SYNC_PULSES; + else if (slave->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) + ctx->burst_mode = VIDEO_NON_BURST_WITH_SYNC_PULSES; + else + ctx->burst_mode = VIDEO_NON_BURST_WITH_SYNC_EVENTS; + + return component_add(host->dev, &dsi_component_ops); +} + +static int sprd_dsi_host_detach(struct mipi_dsi_host *host, + struct mipi_dsi_device *slave) +{ + component_del(host->dev, &dsi_component_ops); + + return 0; +} + +static ssize_t sprd_dsi_host_transfer(struct mipi_dsi_host *host, + const struct mipi_dsi_msg *msg) +{ + struct sprd_dsi *dsi = host_to_dsi(host); + const u8 *tx_buf = msg->tx_buf; + + if (msg->rx_buf && msg->rx_len) { + u8 lsb = (msg->tx_len > 0) ? tx_buf[0] : 0; + u8 msb = (msg->tx_len > 1) ? tx_buf[1] : 0; + + return sprd_dsi_rd_pkt(&dsi->ctx, msg->channel, msg->type, + msb, lsb, msg->rx_buf, msg->rx_len); + } + + if (msg->tx_buf && msg->tx_len) + return sprd_dsi_wr_pkt(&dsi->ctx, msg->channel, msg->type, + tx_buf, msg->tx_len); + + return 0; +} + +static const struct mipi_dsi_host_ops sprd_dsi_host_ops = { + .attach = sprd_dsi_host_attach, + .detach = sprd_dsi_host_detach, + .transfer = sprd_dsi_host_transfer, +}; + +static const struct of_device_id dsi_match_table[] = { + { .compatible = "sprd,sharkl3-dsi-host" }, + { /* sentinel */ }, +}; + +static int sprd_dsi_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct sprd_dsi *dsi; + + dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL); + if (!dsi) + return -ENOMEM; + + dev_set_drvdata(dev, dsi); + + dsi->host.ops = &sprd_dsi_host_ops; + dsi->host.dev = dev; + + return mipi_dsi_host_register(&dsi->host); +} + +static int sprd_dsi_remove(struct platform_device *pdev) +{ + struct sprd_dsi *dsi = dev_get_drvdata(&pdev->dev); + + mipi_dsi_host_unregister(&dsi->host); + + return 0; +} + +struct platform_driver sprd_dsi_driver = { + .probe = sprd_dsi_probe, + .remove = sprd_dsi_remove, + .driver = { + .name = "sprd-dsi-drv", + .of_match_table = dsi_match_table, + }, +}; + +MODULE_AUTHOR("Leon He <leon.he@unisoc.com>"); +MODULE_AUTHOR("Kevin Tang <kevin.tang@unisoc.com>"); +MODULE_DESCRIPTION("Unisoc MIPI DSI HOST Controller Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/sprd/sprd_dsi.h b/drivers/gpu/drm/sprd/sprd_dsi.h new file mode 100644 index 000000000000..d858ebb11115 --- /dev/null +++ b/drivers/gpu/drm/sprd/sprd_dsi.h @@ -0,0 +1,126 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020 Unisoc Inc. + */ + +#ifndef __SPRD_DSI_H__ +#define __SPRD_DSI_H__ + +#include <linux/of.h> +#include <linux/device.h> +#include <linux/regmap.h> +#include <video/videomode.h> + +#include <drm/drm_bridge.h> +#include <drm/drm_connector.h> +#include <drm/drm_encoder.h> +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_print.h> +#include <drm/drm_panel.h> + +#define encoder_to_dsi(encoder) \ + container_of(encoder, struct sprd_dsi, encoder) + +enum dsi_work_mode { + DSI_MODE_CMD = 0, + DSI_MODE_VIDEO +}; + +enum video_burst_mode { + VIDEO_NON_BURST_WITH_SYNC_PULSES = 0, + VIDEO_NON_BURST_WITH_SYNC_EVENTS, + VIDEO_BURST_WITH_SYNC_PULSES +}; + +enum dsi_color_coding { + COLOR_CODE_16BIT_CONFIG1 = 0, + COLOR_CODE_16BIT_CONFIG2, + COLOR_CODE_16BIT_CONFIG3, + COLOR_CODE_18BIT_CONFIG1, + COLOR_CODE_18BIT_CONFIG2, + COLOR_CODE_24BIT, + COLOR_CODE_20BIT_YCC422_LOOSELY, + COLOR_CODE_24BIT_YCC422, + COLOR_CODE_16BIT_YCC422, + COLOR_CODE_30BIT, + COLOR_CODE_36BIT, + COLOR_CODE_12BIT_YCC420, + COLOR_CODE_COMPRESSTION, + COLOR_CODE_MAX +}; + +enum pll_timing { + NONE, + REQUEST_TIME, + PREPARE_TIME, + SETTLE_TIME, + ZERO_TIME, + TRAIL_TIME, + EXIT_TIME, + CLKPOST_TIME, + TA_GET, + TA_GO, + TA_SURE, + TA_WAIT, +}; + +struct dphy_pll { + u8 refin; /* Pre-divider control signal */ + u8 cp_s; /* 00: SDM_EN=1, 10: SDM_EN=0 */ + u8 fdk_s; /* PLL mode control: integer or fraction */ + u8 sdm_en; + u8 div; + u8 int_n; /* integer N PLL */ + u32 ref_clk; /* dphy reference clock, unit: MHz */ + u32 freq; /* panel config, unit: KHz */ + u32 fvco; + u32 potential_fvco; + u32 nint; /* sigma delta modulator NINT control */ + u32 kint; /* sigma delta modulator KINT control */ + u8 lpf_sel; /* low pass filter control */ + u8 out_sel; /* post divider control */ + u8 vco_band; /* vco range */ + u8 det_delay; +}; + +struct dsi_context { + void __iomem *base; + struct regmap *regmap; + struct dphy_pll pll; + struct videomode vm; + bool enabled; + + u8 work_mode; + u8 burst_mode; + u32 int0_mask; + u32 int1_mask; + + /* maximum time (ns) for data lanes from HS to LP */ + u16 data_hs2lp; + /* maximum time (ns) for data lanes from LP to HS */ + u16 data_lp2hs; + /* maximum time (ns) for clk lanes from HS to LP */ + u16 clk_hs2lp; + /* maximum time (ns) for clk lanes from LP to HS */ + u16 clk_lp2hs; + /* maximum time (ns) for BTA operation - REQUIRED */ + u16 max_rd_time; + /* enable receiving frame ack packets - for video mode */ + bool frame_ack_en; + /* enable receiving tear effect ack packets - for cmd mode */ + bool te_ack_en; +}; + +struct sprd_dsi { + struct drm_device *drm; + struct mipi_dsi_host host; + struct mipi_dsi_device *slave; + struct drm_encoder encoder; + struct drm_bridge *panel_bridge; + struct dsi_context ctx; +}; + +int dphy_pll_config(struct dsi_context *ctx); +void dphy_timing_config(struct dsi_context *ctx); + +#endif /* __SPRD_DSI_H__ */ diff --git a/drivers/gpu/drm/sti/Kconfig b/drivers/gpu/drm/sti/Kconfig index d0cfdd36b38f..246a94afbe74 100644 --- a/drivers/gpu/drm/sti/Kconfig +++ b/drivers/gpu/drm/sti/Kconfig @@ -5,7 +5,6 @@ config DRM_STI select RESET_CONTROLLER select DRM_KMS_HELPER select DRM_GEM_CMA_HELPER - select DRM_KMS_CMA_HELPER select DRM_PANEL select FW_LOADER select SND_SOC_HDMI_CODEC if SND_SOC diff --git a/drivers/gpu/drm/stm/Kconfig b/drivers/gpu/drm/stm/Kconfig index b7d66915a2be..e0379488cd0d 100644 --- a/drivers/gpu/drm/stm/Kconfig +++ b/drivers/gpu/drm/stm/Kconfig @@ -4,7 +4,6 @@ config DRM_STM depends on DRM && (ARCH_STM32 || ARCH_MULTIPLATFORM) select DRM_KMS_HELPER select DRM_GEM_CMA_HELPER - select DRM_KMS_CMA_HELPER select DRM_PANEL_BRIDGE select VIDEOMODE_HELPERS select FB_PROVIDE_GET_FB_UNMAPPED_AREA if FB diff --git a/drivers/gpu/drm/sun4i/Kconfig b/drivers/gpu/drm/sun4i/Kconfig index 5755f0432e77..befc5a80222d 100644 --- a/drivers/gpu/drm/sun4i/Kconfig +++ b/drivers/gpu/drm/sun4i/Kconfig @@ -5,7 +5,6 @@ config DRM_SUN4I depends on ARCH_SUNXI || COMPILE_TEST select DRM_GEM_CMA_HELPER select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER select DRM_PANEL select REGMAP_MMIO select VIDEOMODE_HELPERS @@ -46,6 +45,7 @@ config DRM_SUN6I_DSI default MACH_SUN8I select CRC_CCITT select DRM_MIPI_DSI + select RESET_CONTROLLER select PHY_SUN6I_MIPI_DPHY help Choose this option if you want have an Allwinner SoC with diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index 54dd562e294c..b630614b3d72 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c @@ -53,7 +53,7 @@ static const struct drm_driver sun4i_drv_driver = { .minor = 0, /* GEM Operations */ - DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(drm_sun4i_gem_dumb_create), + DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(drm_sun4i_gem_dumb_create), }; static int sun4i_drv_bind(struct device *dev) diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig index 1650a448eabd..8cf5aeb9db6c 100644 --- a/drivers/gpu/drm/tegra/Kconfig +++ b/drivers/gpu/drm/tegra/Kconfig @@ -12,6 +12,9 @@ config DRM_TEGRA select INTERCONNECT select IOMMU_IOVA select CEC_CORE if CEC_NOTIFIER + select SND_SIMPLE_CARD if SND_SOC_TEGRA20_SPDIF + select SND_SOC_HDMI_CODEC if SND_SOC_TEGRA20_SPDIF + select SND_AUDIO_GRAPH_CARD if SND_SOC_TEGRA20_SPDIF help Choose this option if you have an NVIDIA Tegra SoC. diff --git a/drivers/gpu/drm/tegra/Makefile b/drivers/gpu/drm/tegra/Makefile index d801909182cf..df6cc986aeba 100644 --- a/drivers/gpu/drm/tegra/Makefile +++ b/drivers/gpu/drm/tegra/Makefile @@ -23,7 +23,8 @@ tegra-drm-y := \ gr2d.o \ gr3d.o \ falcon.o \ - vic.o + vic.o \ + nvdec.o tegra-drm-y += trace.o diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index a29d64f87563..eb70eee8992a 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c @@ -11,9 +11,12 @@ #include <linux/interconnect.h> #include <linux/module.h> #include <linux/of_device.h> +#include <linux/pm_domain.h> +#include <linux/pm_opp.h> #include <linux/pm_runtime.h> #include <linux/reset.h> +#include <soc/tegra/common.h> #include <soc/tegra/pmc.h> #include <drm/drm_atomic.h> @@ -890,11 +893,9 @@ static int tegra_cursor_atomic_check(struct drm_plane *plane, return 0; } -static void tegra_cursor_atomic_update(struct drm_plane *plane, - struct drm_atomic_state *state) +static void __tegra_cursor_atomic_update(struct drm_plane *plane, + struct drm_plane_state *new_state) { - struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, - plane); struct tegra_plane_state *tegra_plane_state = to_tegra_plane_state(new_state); struct tegra_dc *dc = to_tegra_dc(new_state->crtc); struct tegra_drm *tegra = plane->dev->dev_private; @@ -990,6 +991,14 @@ static void tegra_cursor_atomic_update(struct drm_plane *plane, tegra_dc_writel(dc, value, DC_DISP_CURSOR_POSITION); } +static void tegra_cursor_atomic_update(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane); + + __tegra_cursor_atomic_update(plane, new_state); +} + static void tegra_cursor_atomic_disable(struct drm_plane *plane, struct drm_atomic_state *state) { @@ -1009,12 +1018,78 @@ static void tegra_cursor_atomic_disable(struct drm_plane *plane, tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); } +static int tegra_cursor_atomic_async_check(struct drm_plane *plane, struct drm_atomic_state *state) +{ + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane); + struct drm_crtc_state *crtc_state; + int min_scale, max_scale; + int err; + + crtc_state = drm_atomic_get_existing_crtc_state(state, new_state->crtc); + if (WARN_ON(!crtc_state)) + return -EINVAL; + + if (!crtc_state->active) + return -EINVAL; + + if (plane->state->crtc != new_state->crtc || + plane->state->src_w != new_state->src_w || + plane->state->src_h != new_state->src_h || + plane->state->crtc_w != new_state->crtc_w || + plane->state->crtc_h != new_state->crtc_h || + plane->state->fb != new_state->fb || + plane->state->fb == NULL) + return -EINVAL; + + min_scale = (1 << 16) / 8; + max_scale = (8 << 16) / 1; + + err = drm_atomic_helper_check_plane_state(new_state, crtc_state, min_scale, max_scale, + true, true); + if (err < 0) + return err; + + if (new_state->visible != plane->state->visible) + return -EINVAL; + + return 0; +} + +static void tegra_cursor_atomic_async_update(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane); + struct tegra_dc *dc = to_tegra_dc(new_state->crtc); + + plane->state->src_x = new_state->src_x; + plane->state->src_y = new_state->src_y; + plane->state->crtc_x = new_state->crtc_x; + plane->state->crtc_y = new_state->crtc_y; + + if (new_state->visible) { + struct tegra_plane *p = to_tegra_plane(plane); + u32 value; + + __tegra_cursor_atomic_update(plane, new_state); + + value = (WIN_A_ACT_REQ << p->index) << 8 | GENERAL_UPDATE; + tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL); + (void)tegra_dc_readl(dc, DC_CMD_STATE_CONTROL); + + value = (WIN_A_ACT_REQ << p->index) | GENERAL_ACT_REQ; + tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL); + (void)tegra_dc_readl(dc, DC_CMD_STATE_CONTROL); + } +} + static const struct drm_plane_helper_funcs tegra_cursor_plane_helper_funcs = { .prepare_fb = tegra_plane_prepare_fb, .cleanup_fb = tegra_plane_cleanup_fb, .atomic_check = tegra_cursor_atomic_check, .atomic_update = tegra_cursor_atomic_update, .atomic_disable = tegra_cursor_atomic_disable, + .atomic_async_check = tegra_cursor_atomic_async_check, + .atomic_async_update = tegra_cursor_atomic_async_update, }; static const uint64_t linear_modifiers[] = { @@ -1267,9 +1342,9 @@ static struct drm_plane *tegra_dc_add_planes(struct drm_device *drm, err = PTR_ERR(planes[i]); while (i--) - tegra_plane_funcs.destroy(planes[i]); + planes[i]->funcs->destroy(planes[i]); - tegra_plane_funcs.destroy(primary); + primary->funcs->destroy(primary); return ERR_PTR(err); } } @@ -1762,10 +1837,55 @@ int tegra_dc_state_setup_clock(struct tegra_dc *dc, return 0; } -static void tegra_dc_commit_state(struct tegra_dc *dc, - struct tegra_dc_state *state) +static void tegra_dc_update_voltage_state(struct tegra_dc *dc, + struct tegra_dc_state *state) +{ + unsigned long rate, pstate; + struct dev_pm_opp *opp; + int err; + + if (!dc->has_opp_table) + return; + + /* calculate actual pixel clock rate which depends on internal divider */ + rate = DIV_ROUND_UP(clk_get_rate(dc->clk) * 2, state->div + 2); + + /* find suitable OPP for the rate */ + opp = dev_pm_opp_find_freq_ceil(dc->dev, &rate); + + /* + * Very high resolution modes may results in a clock rate that is + * above the characterized maximum. In this case it's okay to fall + * back to the characterized maximum. + */ + if (opp == ERR_PTR(-ERANGE)) + opp = dev_pm_opp_find_freq_floor(dc->dev, &rate); + + if (IS_ERR(opp)) { + dev_err(dc->dev, "failed to find OPP for %luHz: %pe\n", + rate, opp); + return; + } + + pstate = dev_pm_opp_get_required_pstate(opp, 0); + dev_pm_opp_put(opp); + + /* + * The minimum core voltage depends on the pixel clock rate (which + * depends on internal clock divider of the CRTC) and not on the + * rate of the display controller clock. This is why we're not using + * dev_pm_opp_set_rate() API and instead controlling the power domain + * directly. + */ + err = dev_pm_genpd_set_performance_state(dc->dev, pstate); + if (err) + dev_err(dc->dev, "failed to set power domain state to %lu: %d\n", + pstate, err); +} + +static void tegra_dc_set_clock_rate(struct tegra_dc *dc, + struct tegra_dc_state *state) { - u32 value; int err; err = clk_set_parent(dc->clk, state->clk); @@ -1797,10 +1917,7 @@ static void tegra_dc_commit_state(struct tegra_dc *dc, state->div); DRM_DEBUG_KMS("pclk: %lu\n", state->pclk); - if (!dc->soc->has_nvdisplay) { - value = SHIFT_CLK_DIVIDER(state->div) | PIXEL_CLK_DIVIDER_PCD1; - tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL); - } + tegra_dc_update_voltage_state(dc, state); } static void tegra_dc_stop(struct tegra_dc *dc) @@ -1991,6 +2108,13 @@ static void tegra_crtc_atomic_disable(struct drm_crtc *crtc, err = host1x_client_suspend(&dc->client); if (err < 0) dev_err(dc->dev, "failed to suspend: %d\n", err); + + if (dc->has_opp_table) { + err = dev_pm_genpd_set_performance_state(dc->dev, 0); + if (err) + dev_err(dc->dev, + "failed to clear power domain state: %d\n", err); + } } static void tegra_crtc_atomic_enable(struct drm_crtc *crtc, @@ -2002,6 +2126,9 @@ static void tegra_crtc_atomic_enable(struct drm_crtc *crtc, u32 value; int err; + /* apply PLL changes */ + tegra_dc_set_clock_rate(dc, crtc_state); + err = host1x_client_resume(&dc->client); if (err < 0) { dev_err(dc->dev, "failed to resume: %d\n", err); @@ -2076,8 +2203,11 @@ static void tegra_crtc_atomic_enable(struct drm_crtc *crtc, else tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR); - /* apply PLL and pixel clock changes */ - tegra_dc_commit_state(dc, crtc_state); + /* apply pixel clock changes */ + if (!dc->soc->has_nvdisplay) { + value = SHIFT_CLK_DIVIDER(crtc_state->div) | PIXEL_CLK_DIVIDER_PCD1; + tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL); + } /* program display mode */ tegra_dc_set_timings(dc, mode); @@ -2107,6 +2237,12 @@ static void tegra_crtc_atomic_enable(struct drm_crtc *crtc, tegra_dc_writel(dc, value, DC_COM_RG_UNDERFLOW); } + if (dc->rgb) { + /* XXX: parameterize? */ + value = SC0_H_QUALIFIER_NONE | SC1_H_QUALIFIER_NONE; + tegra_dc_writel(dc, value, DC_DISP_SHIFT_CLOCK_OPTIONS); + } + tegra_dc_commit(dc); drm_crtc_vblank_on(crtc); @@ -2685,6 +2821,7 @@ static const struct tegra_dc_soc_info tegra20_dc_soc_info = { .has_win_b_vfilter_mem_client = true, .has_win_c_without_vert_filter = true, .plane_tiled_memory_bandwidth_x2 = false, + .has_pll_d2_out0 = false, }; static const struct tegra_dc_soc_info tegra30_dc_soc_info = { @@ -2707,6 +2844,7 @@ static const struct tegra_dc_soc_info tegra30_dc_soc_info = { .has_win_b_vfilter_mem_client = true, .has_win_c_without_vert_filter = false, .plane_tiled_memory_bandwidth_x2 = true, + .has_pll_d2_out0 = true, }; static const struct tegra_dc_soc_info tegra114_dc_soc_info = { @@ -2729,6 +2867,7 @@ static const struct tegra_dc_soc_info tegra114_dc_soc_info = { .has_win_b_vfilter_mem_client = false, .has_win_c_without_vert_filter = false, .plane_tiled_memory_bandwidth_x2 = true, + .has_pll_d2_out0 = true, }; static const struct tegra_dc_soc_info tegra124_dc_soc_info = { @@ -2751,6 +2890,7 @@ static const struct tegra_dc_soc_info tegra124_dc_soc_info = { .has_win_b_vfilter_mem_client = false, .has_win_c_without_vert_filter = false, .plane_tiled_memory_bandwidth_x2 = false, + .has_pll_d2_out0 = true, }; static const struct tegra_dc_soc_info tegra210_dc_soc_info = { @@ -2773,6 +2913,7 @@ static const struct tegra_dc_soc_info tegra210_dc_soc_info = { .has_win_b_vfilter_mem_client = false, .has_win_c_without_vert_filter = false, .plane_tiled_memory_bandwidth_x2 = false, + .has_pll_d2_out0 = true, }; static const struct tegra_windowgroup_soc tegra186_dc_wgrps[] = { @@ -2823,6 +2964,7 @@ static const struct tegra_dc_soc_info tegra186_dc_soc_info = { .wgrps = tegra186_dc_wgrps, .num_wgrps = ARRAY_SIZE(tegra186_dc_wgrps), .plane_tiled_memory_bandwidth_x2 = false, + .has_pll_d2_out0 = false, }; static const struct tegra_windowgroup_soc tegra194_dc_wgrps[] = { @@ -2873,6 +3015,7 @@ static const struct tegra_dc_soc_info tegra194_dc_soc_info = { .wgrps = tegra194_dc_wgrps, .num_wgrps = ARRAY_SIZE(tegra194_dc_wgrps), .plane_tiled_memory_bandwidth_x2 = false, + .has_pll_d2_out0 = false, }; static const struct of_device_id tegra_dc_of_match[] = { @@ -2973,6 +3116,23 @@ static int tegra_dc_couple(struct tegra_dc *dc) return 0; } +static int tegra_dc_init_opp_table(struct tegra_dc *dc) +{ + struct tegra_core_opp_params opp_params = {}; + int err; + + err = devm_tegra_core_dev_init_opp_table(dc->dev, &opp_params); + if (err && err != -ENODEV) + return err; + + if (err) + dc->has_opp_table = false; + else + dc->has_opp_table = true; + + return 0; +} + static int tegra_dc_probe(struct platform_device *pdev) { u64 dma_mask = dma_get_mask(pdev->dev.parent); @@ -3038,6 +3198,10 @@ static int tegra_dc_probe(struct platform_device *pdev) tegra_powergate_power_off(dc->powergate); } + err = tegra_dc_init_opp_table(dc); + if (err < 0) + return err; + dc->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(dc->regs)) return PTR_ERR(dc->regs); diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h index 40378308d527..3f91a10ea6c7 100644 --- a/drivers/gpu/drm/tegra/dc.h +++ b/drivers/gpu/drm/tegra/dc.h @@ -76,6 +76,7 @@ struct tegra_dc_soc_info { bool has_win_b_vfilter_mem_client; bool has_win_c_without_vert_filter; bool plane_tiled_memory_bandwidth_x2; + bool has_pll_d2_out0; }; struct tegra_dc { @@ -100,6 +101,8 @@ struct tegra_dc { struct drm_info_list *debugfs_files; const struct tegra_dc_soc_info *soc; + + bool has_opp_table; }; static inline struct tegra_dc * diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index 8d37d6b00562..e9de91a4e7e8 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -10,6 +10,7 @@ #include <linux/iommu.h> #include <linux/module.h> #include <linux/platform_device.h> +#include <linux/pm_runtime.h> #include <drm/drm_aperture.h> #include <drm/drm_atomic.h> @@ -21,6 +22,10 @@ #include <drm/drm_prime.h> #include <drm/drm_vblank.h> +#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) +#include <asm/dma-iommu.h> +#endif + #include "dc.h" #include "drm.h" #include "gem.h" @@ -116,6 +121,7 @@ static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp) static void tegra_drm_context_free(struct tegra_drm_context *context) { context->client->ops->close_channel(context); + pm_runtime_put(context->client->base.dev); kfree(context); } @@ -427,13 +433,20 @@ static int tegra_client_open(struct tegra_drm_file *fpriv, { int err; + err = pm_runtime_resume_and_get(client->base.dev); + if (err) + return err; + err = client->ops->open_channel(client, context); - if (err < 0) + if (err < 0) { + pm_runtime_put(client->base.dev); return err; + } err = idr_alloc(&fpriv->legacy_contexts, context, 1, 0, GFP_KERNEL); if (err < 0) { client->ops->close_channel(context); + pm_runtime_put(client->base.dev); return err; } @@ -936,6 +949,17 @@ int host1x_client_iommu_attach(struct host1x_client *client) struct iommu_group *group = NULL; int err; +#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) + if (client->dev->archdata.mapping) { + struct dma_iommu_mapping *mapping = + to_dma_iommu_mapping(client->dev); + arm_iommu_detach_device(client->dev); + arm_iommu_release_mapping(mapping); + + domain = iommu_get_domain_for_dev(client->dev); + } +#endif + /* * If the host1x client is already attached to an IOMMU domain that is * not the shared IOMMU domain, don't try to attach it to a different @@ -1344,15 +1368,18 @@ static const struct of_device_id host1x_drm_subdevs[] = { { .compatible = "nvidia,tegra210-sor", }, { .compatible = "nvidia,tegra210-sor1", }, { .compatible = "nvidia,tegra210-vic", }, + { .compatible = "nvidia,tegra210-nvdec", }, { .compatible = "nvidia,tegra186-display", }, { .compatible = "nvidia,tegra186-dc", }, { .compatible = "nvidia,tegra186-sor", }, { .compatible = "nvidia,tegra186-sor1", }, { .compatible = "nvidia,tegra186-vic", }, + { .compatible = "nvidia,tegra186-nvdec", }, { .compatible = "nvidia,tegra194-display", }, { .compatible = "nvidia,tegra194-dc", }, { .compatible = "nvidia,tegra194-sor", }, { .compatible = "nvidia,tegra194-vic", }, + { .compatible = "nvidia,tegra194-nvdec", }, { /* sentinel */ } }; @@ -1376,6 +1403,7 @@ static struct platform_driver * const drivers[] = { &tegra_gr2d_driver, &tegra_gr3d_driver, &tegra_vic_driver, + &tegra_nvdec_driver, }; static int __init host1x_drm_init(void) diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h index 8b28327c931c..fc0a19554eac 100644 --- a/drivers/gpu/drm/tegra/drm.h +++ b/drivers/gpu/drm/tegra/drm.h @@ -202,5 +202,6 @@ extern struct platform_driver tegra_sor_driver; extern struct platform_driver tegra_gr2d_driver; extern struct platform_driver tegra_gr3d_driver; extern struct platform_driver tegra_vic_driver; +extern struct platform_driver tegra_nvdec_driver; #endif /* HOST1X_DRM_H */ diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c index d38fd7e12b57..fce0e52973c2 100644 --- a/drivers/gpu/drm/tegra/gem.c +++ b/drivers/gpu/drm/tegra/gem.c @@ -23,86 +23,97 @@ MODULE_IMPORT_NS(DMA_BUF); -static void tegra_bo_put(struct host1x_bo *bo) +static unsigned int sg_dma_count_chunks(struct scatterlist *sgl, unsigned int nents) { - struct tegra_bo *obj = host1x_to_tegra_bo(bo); + dma_addr_t next = ~(dma_addr_t)0; + unsigned int count = 0, i; + struct scatterlist *s; - drm_gem_object_put(&obj->gem); -} + for_each_sg(sgl, s, nents, i) { + /* sg_dma_address(s) is only valid for entries that have sg_dma_len(s) != 0. */ + if (!sg_dma_len(s)) + continue; -/* XXX move this into lib/scatterlist.c? */ -static int sg_alloc_table_from_sg(struct sg_table *sgt, struct scatterlist *sg, - unsigned int nents, gfp_t gfp_mask) -{ - struct scatterlist *dst; - unsigned int i; - int err; + if (sg_dma_address(s) != next) { + next = sg_dma_address(s) + sg_dma_len(s); + count++; + } + } - err = sg_alloc_table(sgt, nents, gfp_mask); - if (err < 0) - return err; + return count; +} - dst = sgt->sgl; +static inline unsigned int sgt_dma_count_chunks(struct sg_table *sgt) +{ + return sg_dma_count_chunks(sgt->sgl, sgt->nents); +} - for (i = 0; i < nents; i++) { - sg_set_page(dst, sg_page(sg), sg->length, 0); - dst = sg_next(dst); - sg = sg_next(sg); - } +static void tegra_bo_put(struct host1x_bo *bo) +{ + struct tegra_bo *obj = host1x_to_tegra_bo(bo); - return 0; + drm_gem_object_put(&obj->gem); } -static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo, - dma_addr_t *phys) +static struct host1x_bo_mapping *tegra_bo_pin(struct device *dev, struct host1x_bo *bo, + enum dma_data_direction direction) { struct tegra_bo *obj = host1x_to_tegra_bo(bo); - struct sg_table *sgt; + struct drm_gem_object *gem = &obj->gem; + struct host1x_bo_mapping *map; int err; + map = kzalloc(sizeof(*map), GFP_KERNEL); + if (!map) + return ERR_PTR(-ENOMEM); + + kref_init(&map->ref); + map->bo = host1x_bo_get(bo); + map->direction = direction; + map->dev = dev; + /* - * If we've manually mapped the buffer object through the IOMMU, make - * sure to return the IOVA address of our mapping. - * - * Similarly, for buffers that have been allocated by the DMA API the - * physical address can be used for devices that are not attached to - * an IOMMU. For these devices, callers must pass a valid pointer via - * the @phys argument. - * - * Imported buffers were also already mapped at import time, so the - * existing mapping can be reused. + * Imported buffers need special treatment to satisfy the semantics of DMA-BUF. */ - if (phys) { - *phys = obj->iova; - return NULL; + if (gem->import_attach) { + struct dma_buf *buf = gem->import_attach->dmabuf; + + map->attach = dma_buf_attach(buf, dev); + if (IS_ERR(map->attach)) { + err = PTR_ERR(map->attach); + goto free; + } + + map->sgt = dma_buf_map_attachment(map->attach, direction); + if (IS_ERR(map->sgt)) { + dma_buf_detach(buf, map->attach); + err = PTR_ERR(map->sgt); + goto free; + } + + err = sgt_dma_count_chunks(map->sgt); + map->size = gem->size; + + goto out; } /* * If we don't have a mapping for this buffer yet, return an SG table * so that host1x can do the mapping for us via the DMA API. */ - sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); - if (!sgt) - return ERR_PTR(-ENOMEM); + map->sgt = kzalloc(sizeof(*map->sgt), GFP_KERNEL); + if (!map->sgt) { + err = -ENOMEM; + goto free; + } if (obj->pages) { /* * If the buffer object was allocated from the explicit IOMMU * API code paths, construct an SG table from the pages. */ - err = sg_alloc_table_from_pages(sgt, obj->pages, obj->num_pages, - 0, obj->gem.size, GFP_KERNEL); - if (err < 0) - goto free; - } else if (obj->sgt) { - /* - * If the buffer object already has an SG table but no pages - * were allocated for it, it means the buffer was imported and - * the SG table needs to be copied to avoid overwriting any - * other potential users of the original SG table. - */ - err = sg_alloc_table_from_sg(sgt, obj->sgt->sgl, - obj->sgt->orig_nents, GFP_KERNEL); + err = sg_alloc_table_from_pages(map->sgt, obj->pages, obj->num_pages, 0, gem->size, + GFP_KERNEL); if (err < 0) goto free; } else { @@ -111,25 +122,53 @@ static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo, * not imported, it had to be allocated with the DMA API, so * the DMA API helper can be used. */ - err = dma_get_sgtable(dev, sgt, obj->vaddr, obj->iova, - obj->gem.size); + err = dma_get_sgtable(dev, map->sgt, obj->vaddr, obj->iova, gem->size); if (err < 0) goto free; } - return sgt; + err = dma_map_sgtable(dev, map->sgt, direction, 0); + if (err) + goto free_sgt; + +out: + /* + * If we've manually mapped the buffer object through the IOMMU, make sure to return the + * existing IOVA address of our mapping. + */ + if (!obj->mm) { + map->phys = sg_dma_address(map->sgt->sgl); + map->chunks = err; + } else { + map->phys = obj->iova; + map->chunks = 1; + } + + map->size = gem->size; + return map; + +free_sgt: + sg_free_table(map->sgt); free: - kfree(sgt); + kfree(map->sgt); + kfree(map); return ERR_PTR(err); } -static void tegra_bo_unpin(struct device *dev, struct sg_table *sgt) +static void tegra_bo_unpin(struct host1x_bo_mapping *map) { - if (sgt) { - sg_free_table(sgt); - kfree(sgt); + if (map->attach) { + dma_buf_unmap_attachment(map->attach, map->sgt, map->direction); + dma_buf_detach(map->attach->dmabuf, map->attach); + } else { + dma_unmap_sgtable(map->dev, map->sgt, map->direction, 0); + sg_free_table(map->sgt); + kfree(map->sgt); } + + host1x_bo_put(map->bo); + kfree(map); } static void *tegra_bo_mmap(struct host1x_bo *bo) @@ -452,8 +491,18 @@ free: void tegra_bo_free_object(struct drm_gem_object *gem) { struct tegra_drm *tegra = gem->dev->dev_private; + struct host1x_bo_mapping *mapping, *tmp; struct tegra_bo *bo = to_tegra_bo(gem); + /* remove all mappings of this buffer object from any caches */ + list_for_each_entry_safe(mapping, tmp, &bo->base.mappings, list) { + if (mapping->cache) + host1x_bo_unpin(mapping); + else + dev_err(gem->dev->dev, "mapping %p stale for device %s\n", mapping, + dev_name(mapping->dev)); + } + if (tegra->domain) tegra_bo_iommu_unmap(tegra, bo); diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c index de288cba3905..e3bb4c99ed39 100644 --- a/drivers/gpu/drm/tegra/gr2d.c +++ b/drivers/gpu/drm/tegra/gr2d.c @@ -4,14 +4,25 @@ */ #include <linux/clk.h> +#include <linux/delay.h> #include <linux/iommu.h> #include <linux/module.h> #include <linux/of_device.h> +#include <linux/pm_runtime.h> +#include <linux/reset.h> + +#include <soc/tegra/common.h> #include "drm.h" #include "gem.h" #include "gr2d.h" +enum { + RST_MC, + RST_GR2D, + RST_GR2D_MAX, +}; + struct gr2d_soc { unsigned int version; }; @@ -21,6 +32,9 @@ struct gr2d { struct host1x_channel *channel; struct clk *clk; + struct reset_control_bulk_data resets[RST_GR2D_MAX]; + unsigned int nresets; + const struct gr2d_soc *soc; DECLARE_BITMAP(addr_regs, GR2D_NUM_REGS); @@ -56,15 +70,22 @@ static int gr2d_init(struct host1x_client *client) goto free; } + pm_runtime_enable(client->dev); + pm_runtime_use_autosuspend(client->dev); + pm_runtime_set_autosuspend_delay(client->dev, 200); + err = tegra_drm_register_client(dev->dev_private, drm); if (err < 0) { dev_err(client->dev, "failed to register client: %d\n", err); - goto detach; + goto disable_rpm; } return 0; -detach: +disable_rpm: + pm_runtime_dont_use_autosuspend(client->dev); + pm_runtime_force_suspend(client->dev); + host1x_client_iommu_detach(client); free: host1x_syncpt_put(client->syncpts[0]); @@ -85,10 +106,15 @@ static int gr2d_exit(struct host1x_client *client) if (err < 0) return err; + pm_runtime_dont_use_autosuspend(client->dev); + pm_runtime_force_suspend(client->dev); + host1x_client_iommu_detach(client); host1x_syncpt_put(client->syncpts[0]); host1x_channel_put(gr2d->channel); + gr2d->channel = NULL; + return 0; } @@ -190,6 +216,27 @@ static const u32 gr2d_addr_regs[] = { GR2D_VA_BASE_ADDR_SB, }; +static int gr2d_get_resets(struct device *dev, struct gr2d *gr2d) +{ + int err; + + gr2d->resets[RST_MC].id = "mc"; + gr2d->resets[RST_GR2D].id = "2d"; + gr2d->nresets = RST_GR2D_MAX; + + err = devm_reset_control_bulk_get_optional_exclusive_released( + dev, gr2d->nresets, gr2d->resets); + if (err) { + dev_err(dev, "failed to get reset: %d\n", err); + return err; + } + + if (WARN_ON(!gr2d->resets[RST_GR2D].rstc)) + return -ENOENT; + + return 0; +} + static int gr2d_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -202,6 +249,8 @@ static int gr2d_probe(struct platform_device *pdev) if (!gr2d) return -ENOMEM; + platform_set_drvdata(pdev, gr2d); + gr2d->soc = of_device_get_match_data(dev); syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL); @@ -214,11 +263,9 @@ static int gr2d_probe(struct platform_device *pdev) return PTR_ERR(gr2d->clk); } - err = clk_prepare_enable(gr2d->clk); - if (err) { - dev_err(dev, "cannot turn on clock\n"); + err = gr2d_get_resets(dev, gr2d); + if (err) return err; - } INIT_LIST_HEAD(&gr2d->client.base.list); gr2d->client.base.ops = &gr2d_client_ops; @@ -231,10 +278,13 @@ static int gr2d_probe(struct platform_device *pdev) gr2d->client.version = gr2d->soc->version; gr2d->client.ops = &gr2d_ops; + err = devm_tegra_core_dev_init_opp_table_common(dev); + if (err) + return err; + err = host1x_client_register(&gr2d->client.base); if (err < 0) { dev_err(dev, "failed to register host1x client: %d\n", err); - clk_disable_unprepare(gr2d->clk); return err; } @@ -242,8 +292,6 @@ static int gr2d_probe(struct platform_device *pdev) for (i = 0; i < ARRAY_SIZE(gr2d_addr_regs); i++) set_bit(gr2d_addr_regs[i], gr2d->addr_regs); - platform_set_drvdata(pdev, gr2d); - return 0; } @@ -259,15 +307,100 @@ static int gr2d_remove(struct platform_device *pdev) return err; } + return 0; +} + +static int __maybe_unused gr2d_runtime_suspend(struct device *dev) +{ + struct gr2d *gr2d = dev_get_drvdata(dev); + int err; + + host1x_channel_stop(gr2d->channel); + reset_control_bulk_release(gr2d->nresets, gr2d->resets); + + /* + * GR2D module shouldn't be reset while hardware is idling, otherwise + * host1x's cmdproc will stuck on trying to access any G2 register + * after reset. GR2D module could be either hot-reset or reset after + * power-gating of the HEG partition. Hence we will put in reset only + * the memory client part of the module, the HEG GENPD will take care + * of resetting GR2D module across power-gating. + * + * On Tegra20 there is no HEG partition, but it's okay to have + * undetermined h/w state since userspace is expected to reprogram + * the state on each job submission anyways. + */ + err = reset_control_acquire(gr2d->resets[RST_MC].rstc); + if (err) { + dev_err(dev, "failed to acquire MC reset: %d\n", err); + goto acquire_reset; + } + + err = reset_control_assert(gr2d->resets[RST_MC].rstc); + reset_control_release(gr2d->resets[RST_MC].rstc); + if (err) { + dev_err(dev, "failed to assert MC reset: %d\n", err); + goto acquire_reset; + } + clk_disable_unprepare(gr2d->clk); return 0; + +acquire_reset: + reset_control_bulk_acquire(gr2d->nresets, gr2d->resets); + reset_control_bulk_deassert(gr2d->nresets, gr2d->resets); + + return err; } +static int __maybe_unused gr2d_runtime_resume(struct device *dev) +{ + struct gr2d *gr2d = dev_get_drvdata(dev); + int err; + + err = reset_control_bulk_acquire(gr2d->nresets, gr2d->resets); + if (err) { + dev_err(dev, "failed to acquire reset: %d\n", err); + return err; + } + + err = clk_prepare_enable(gr2d->clk); + if (err) { + dev_err(dev, "failed to enable clock: %d\n", err); + goto release_reset; + } + + usleep_range(2000, 4000); + + /* this is a reset array which deasserts both 2D MC and 2D itself */ + err = reset_control_bulk_deassert(gr2d->nresets, gr2d->resets); + if (err) { + dev_err(dev, "failed to deassert reset: %d\n", err); + goto disable_clk; + } + + return 0; + +disable_clk: + clk_disable_unprepare(gr2d->clk); +release_reset: + reset_control_bulk_release(gr2d->nresets, gr2d->resets); + + return err; +} + +static const struct dev_pm_ops tegra_gr2d_pm = { + SET_RUNTIME_PM_OPS(gr2d_runtime_suspend, gr2d_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) +}; + struct platform_driver tegra_gr2d_driver = { .driver = { .name = "tegra-gr2d", .of_match_table = gr2d_match, + .pm = &tegra_gr2d_pm, }, .probe = gr2d_probe, .remove = gr2d_remove, diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c index 24442ade0da3..a1fd3113ea96 100644 --- a/drivers/gpu/drm/tegra/gr3d.c +++ b/drivers/gpu/drm/tegra/gr3d.c @@ -5,32 +5,47 @@ */ #include <linux/clk.h> +#include <linux/delay.h> #include <linux/host1x.h> #include <linux/iommu.h> #include <linux/module.h> #include <linux/of_device.h> #include <linux/platform_device.h> +#include <linux/pm_domain.h> +#include <linux/pm_opp.h> +#include <linux/pm_runtime.h> #include <linux/reset.h> +#include <soc/tegra/common.h> #include <soc/tegra/pmc.h> #include "drm.h" #include "gem.h" #include "gr3d.h" +enum { + RST_MC, + RST_GR3D, + RST_MC2, + RST_GR3D2, + RST_GR3D_MAX, +}; + struct gr3d_soc { unsigned int version; + unsigned int num_clocks; + unsigned int num_resets; }; struct gr3d { struct tegra_drm_client client; struct host1x_channel *channel; - struct clk *clk_secondary; - struct clk *clk; - struct reset_control *rst_secondary; - struct reset_control *rst; const struct gr3d_soc *soc; + struct clk_bulk_data *clocks; + unsigned int nclocks; + struct reset_control_bulk_data resets[RST_GR3D_MAX]; + unsigned int nresets; DECLARE_BITMAP(addr_regs, GR3D_NUM_REGS); }; @@ -65,15 +80,22 @@ static int gr3d_init(struct host1x_client *client) goto free; } + pm_runtime_enable(client->dev); + pm_runtime_use_autosuspend(client->dev); + pm_runtime_set_autosuspend_delay(client->dev, 200); + err = tegra_drm_register_client(dev->dev_private, drm); if (err < 0) { dev_err(client->dev, "failed to register client: %d\n", err); - goto detach; + goto disable_rpm; } return 0; -detach: +disable_rpm: + pm_runtime_dont_use_autosuspend(client->dev); + pm_runtime_force_suspend(client->dev); + host1x_client_iommu_detach(client); free: host1x_syncpt_put(client->syncpts[0]); @@ -93,10 +115,15 @@ static int gr3d_exit(struct host1x_client *client) if (err < 0) return err; + pm_runtime_dont_use_autosuspend(client->dev); + pm_runtime_force_suspend(client->dev); + host1x_client_iommu_detach(client); host1x_syncpt_put(client->syncpts[0]); host1x_channel_put(gr3d->channel); + gr3d->channel = NULL; + return 0; } @@ -155,14 +182,20 @@ static const struct tegra_drm_client_ops gr3d_ops = { static const struct gr3d_soc tegra20_gr3d_soc = { .version = 0x20, + .num_clocks = 1, + .num_resets = 2, }; static const struct gr3d_soc tegra30_gr3d_soc = { .version = 0x30, + .num_clocks = 2, + .num_resets = 4, }; static const struct gr3d_soc tegra114_gr3d_soc = { .version = 0x35, + .num_clocks = 1, + .num_resets = 2, }; static const struct of_device_id tegra_gr3d_match[] = { @@ -278,69 +311,216 @@ static const u32 gr3d_addr_regs[] = { GR3D_GLOBAL_SAMP23SURFADDR(15), }; -static int gr3d_probe(struct platform_device *pdev) +static int gr3d_power_up_legacy_domain(struct device *dev, const char *name, + unsigned int id) { - struct device_node *np = pdev->dev.of_node; - struct host1x_syncpt **syncpts; - struct gr3d *gr3d; + struct gr3d *gr3d = dev_get_drvdata(dev); + struct reset_control *reset; + struct clk *clk; unsigned int i; int err; - gr3d = devm_kzalloc(&pdev->dev, sizeof(*gr3d), GFP_KERNEL); - if (!gr3d) - return -ENOMEM; - - gr3d->soc = of_device_get_match_data(&pdev->dev); + /* + * Tegra20 device-tree doesn't specify 3d clock name and there is only + * one clock for Tegra20. Tegra30+ device-trees always specified names + * for the clocks. + */ + if (gr3d->nclocks == 1) { + if (id == TEGRA_POWERGATE_3D1) + return 0; + + clk = gr3d->clocks[0].clk; + } else { + for (i = 0; i < gr3d->nclocks; i++) { + if (WARN_ON(!gr3d->clocks[i].id)) + continue; + + if (!strcmp(gr3d->clocks[i].id, name)) { + clk = gr3d->clocks[i].clk; + break; + } + } - syncpts = devm_kzalloc(&pdev->dev, sizeof(*syncpts), GFP_KERNEL); - if (!syncpts) - return -ENOMEM; + if (WARN_ON(i == gr3d->nclocks)) + return -EINVAL; + } - gr3d->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(gr3d->clk)) { - dev_err(&pdev->dev, "cannot get clock\n"); - return PTR_ERR(gr3d->clk); + /* + * We use array of resets, which includes MC resets, and MC + * reset shouldn't be asserted while hardware is gated because + * MC flushing will fail for gated hardware. Hence for legacy + * PD we request the individual reset separately. + */ + reset = reset_control_get_exclusive_released(dev, name); + if (IS_ERR(reset)) + return PTR_ERR(reset); + + err = reset_control_acquire(reset); + if (err) { + dev_err(dev, "failed to acquire %s reset: %d\n", name, err); + } else { + err = tegra_powergate_sequence_power_up(id, clk, reset); + reset_control_release(reset); } - gr3d->rst = devm_reset_control_get(&pdev->dev, "3d"); - if (IS_ERR(gr3d->rst)) { - dev_err(&pdev->dev, "cannot get reset\n"); - return PTR_ERR(gr3d->rst); + reset_control_put(reset); + if (err) + return err; + + /* + * tegra_powergate_sequence_power_up() leaves clocks enabled, + * while GENPD not. Hence keep clock-enable balanced. + */ + clk_disable_unprepare(clk); + + return 0; +} + +static void gr3d_del_link(void *link) +{ + device_link_del(link); +} + +static int gr3d_init_power(struct device *dev, struct gr3d *gr3d) +{ + static const char * const opp_genpd_names[] = { "3d0", "3d1", NULL }; + const u32 link_flags = DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME; + struct device **opp_virt_devs, *pd_dev; + struct device_link *link; + unsigned int i; + int err; + + err = of_count_phandle_with_args(dev->of_node, "power-domains", + "#power-domain-cells"); + if (err < 0) { + if (err != -ENOENT) + return err; + + /* + * Older device-trees don't use GENPD. In this case we should + * toggle power domain manually. + */ + err = gr3d_power_up_legacy_domain(dev, "3d", + TEGRA_POWERGATE_3D); + if (err) + return err; + + err = gr3d_power_up_legacy_domain(dev, "3d2", + TEGRA_POWERGATE_3D1); + if (err) + return err; + + return 0; } - if (of_device_is_compatible(np, "nvidia,tegra30-gr3d")) { - gr3d->clk_secondary = devm_clk_get(&pdev->dev, "3d2"); - if (IS_ERR(gr3d->clk_secondary)) { - dev_err(&pdev->dev, "cannot get secondary clock\n"); - return PTR_ERR(gr3d->clk_secondary); + /* + * The PM domain core automatically attaches a single power domain, + * otherwise it skips attaching completely. We have a single domain + * on Tegra20 and two domains on Tegra30+. + */ + if (dev->pm_domain) + return 0; + + err = devm_pm_opp_attach_genpd(dev, opp_genpd_names, &opp_virt_devs); + if (err) + return err; + + for (i = 0; opp_genpd_names[i]; i++) { + pd_dev = opp_virt_devs[i]; + if (!pd_dev) { + dev_err(dev, "failed to get %s power domain\n", + opp_genpd_names[i]); + return -EINVAL; } - gr3d->rst_secondary = devm_reset_control_get(&pdev->dev, - "3d2"); - if (IS_ERR(gr3d->rst_secondary)) { - dev_err(&pdev->dev, "cannot get secondary reset\n"); - return PTR_ERR(gr3d->rst_secondary); + link = device_link_add(dev, pd_dev, link_flags); + if (!link) { + dev_err(dev, "failed to link to %s\n", dev_name(pd_dev)); + return -EINVAL; } + + err = devm_add_action_or_reset(dev, gr3d_del_link, link); + if (err) + return err; } - err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_3D, gr3d->clk, - gr3d->rst); + return 0; +} + +static int gr3d_get_clocks(struct device *dev, struct gr3d *gr3d) +{ + int err; + + err = devm_clk_bulk_get_all(dev, &gr3d->clocks); if (err < 0) { - dev_err(&pdev->dev, "failed to power up 3D unit\n"); + dev_err(dev, "failed to get clock: %d\n", err); return err; } + gr3d->nclocks = err; - if (gr3d->clk_secondary) { - err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_3D1, - gr3d->clk_secondary, - gr3d->rst_secondary); - if (err < 0) { - dev_err(&pdev->dev, - "failed to power up secondary 3D unit\n"); - return err; - } + if (gr3d->nclocks != gr3d->soc->num_clocks) { + dev_err(dev, "invalid number of clocks: %u\n", gr3d->nclocks); + return -ENOENT; + } + + return 0; +} + +static int gr3d_get_resets(struct device *dev, struct gr3d *gr3d) +{ + int err; + + gr3d->resets[RST_MC].id = "mc"; + gr3d->resets[RST_MC2].id = "mc2"; + gr3d->resets[RST_GR3D].id = "3d"; + gr3d->resets[RST_GR3D2].id = "3d2"; + gr3d->nresets = gr3d->soc->num_resets; + + err = devm_reset_control_bulk_get_optional_exclusive_released( + dev, gr3d->nresets, gr3d->resets); + if (err) { + dev_err(dev, "failed to get reset: %d\n", err); + return err; } + if (WARN_ON(!gr3d->resets[RST_GR3D].rstc) || + WARN_ON(!gr3d->resets[RST_GR3D2].rstc && gr3d->nresets == 4)) + return -ENOENT; + + return 0; +} + +static int gr3d_probe(struct platform_device *pdev) +{ + struct host1x_syncpt **syncpts; + struct gr3d *gr3d; + unsigned int i; + int err; + + gr3d = devm_kzalloc(&pdev->dev, sizeof(*gr3d), GFP_KERNEL); + if (!gr3d) + return -ENOMEM; + + platform_set_drvdata(pdev, gr3d); + + gr3d->soc = of_device_get_match_data(&pdev->dev); + + syncpts = devm_kzalloc(&pdev->dev, sizeof(*syncpts), GFP_KERNEL); + if (!syncpts) + return -ENOMEM; + + err = gr3d_get_clocks(&pdev->dev, gr3d); + if (err) + return err; + + err = gr3d_get_resets(&pdev->dev, gr3d); + if (err) + return err; + + err = gr3d_init_power(&pdev->dev, gr3d); + if (err) + return err; + INIT_LIST_HEAD(&gr3d->client.base.list); gr3d->client.base.ops = &gr3d_client_ops; gr3d->client.base.dev = &pdev->dev; @@ -352,6 +532,10 @@ static int gr3d_probe(struct platform_device *pdev) gr3d->client.version = gr3d->soc->version; gr3d->client.ops = &gr3d_ops; + err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev); + if (err) + return err; + err = host1x_client_register(&gr3d->client.base); if (err < 0) { dev_err(&pdev->dev, "failed to register host1x client: %d\n", @@ -363,8 +547,6 @@ static int gr3d_probe(struct platform_device *pdev) for (i = 0; i < ARRAY_SIZE(gr3d_addr_regs); i++) set_bit(gr3d_addr_regs[i], gr3d->addr_regs); - platform_set_drvdata(pdev, gr3d); - return 0; } @@ -380,23 +562,80 @@ static int gr3d_remove(struct platform_device *pdev) return err; } - if (gr3d->clk_secondary) { - reset_control_assert(gr3d->rst_secondary); - tegra_powergate_power_off(TEGRA_POWERGATE_3D1); - clk_disable_unprepare(gr3d->clk_secondary); + return 0; +} + +static int __maybe_unused gr3d_runtime_suspend(struct device *dev) +{ + struct gr3d *gr3d = dev_get_drvdata(dev); + int err; + + host1x_channel_stop(gr3d->channel); + + err = reset_control_bulk_assert(gr3d->nresets, gr3d->resets); + if (err) { + dev_err(dev, "failed to assert reset: %d\n", err); + return err; + } + + usleep_range(10, 20); + + /* + * Older device-trees don't specify MC resets and power-gating can't + * be done safely in that case. Hence we will keep the power ungated + * for older DTBs. For newer DTBs, GENPD will perform the power-gating. + */ + + clk_bulk_disable_unprepare(gr3d->nclocks, gr3d->clocks); + reset_control_bulk_release(gr3d->nresets, gr3d->resets); + + return 0; +} + +static int __maybe_unused gr3d_runtime_resume(struct device *dev) +{ + struct gr3d *gr3d = dev_get_drvdata(dev); + int err; + + err = reset_control_bulk_acquire(gr3d->nresets, gr3d->resets); + if (err) { + dev_err(dev, "failed to acquire reset: %d\n", err); + return err; + } + + err = clk_bulk_prepare_enable(gr3d->nclocks, gr3d->clocks); + if (err) { + dev_err(dev, "failed to enable clock: %d\n", err); + goto release_reset; } - reset_control_assert(gr3d->rst); - tegra_powergate_power_off(TEGRA_POWERGATE_3D); - clk_disable_unprepare(gr3d->clk); + err = reset_control_bulk_deassert(gr3d->nresets, gr3d->resets); + if (err) { + dev_err(dev, "failed to deassert reset: %d\n", err); + goto disable_clk; + } return 0; + +disable_clk: + clk_bulk_disable_unprepare(gr3d->nclocks, gr3d->clocks); +release_reset: + reset_control_bulk_release(gr3d->nresets, gr3d->resets); + + return err; } +static const struct dev_pm_ops tegra_gr3d_pm = { + SET_RUNTIME_PM_OPS(gr3d_runtime_suspend, gr3d_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) +}; + struct platform_driver tegra_gr3d_driver = { .driver = { .name = "tegra-gr3d", .of_match_table = tegra_gr3d_match, + .pm = &tegra_gr3d_pm, }, .probe = gr3d_probe, .remove = gr3d_remove, diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c index e5d2a4026028..8845af5d325f 100644 --- a/drivers/gpu/drm/tegra/hdmi.c +++ b/drivers/gpu/drm/tegra/hdmi.c @@ -11,10 +11,14 @@ #include <linux/math64.h> #include <linux/module.h> #include <linux/of_device.h> +#include <linux/pm_opp.h> #include <linux/pm_runtime.h> #include <linux/regulator/consumer.h> #include <linux/reset.h> +#include <soc/tegra/common.h> +#include <sound/hdmi-codec.h> + #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_debugfs.h> @@ -78,6 +82,9 @@ struct tegra_hdmi { bool dvi; struct drm_info_list *debugfs_files; + + struct platform_device *audio_pdev; + struct mutex audio_lock; }; static inline struct tegra_hdmi * @@ -360,6 +367,18 @@ static const struct tmds_config tegra124_tmds_config[] = { }, }; +static void tegra_hdmi_audio_lock(struct tegra_hdmi *hdmi) +{ + mutex_lock(&hdmi->audio_lock); + disable_irq(hdmi->irq); +} + +static void tegra_hdmi_audio_unlock(struct tegra_hdmi *hdmi) +{ + enable_irq(hdmi->irq); + mutex_unlock(&hdmi->audio_lock); +} + static int tegra_hdmi_get_audio_config(unsigned int audio_freq, unsigned int pix_clock, struct tegra_hdmi_audio_config *config) @@ -829,6 +848,23 @@ static void tegra_hdmi_setup_tmds(struct tegra_hdmi *hdmi, HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT); } +static int tegra_hdmi_reconfigure_audio(struct tegra_hdmi *hdmi) +{ + int err; + + err = tegra_hdmi_setup_audio(hdmi); + if (err < 0) { + tegra_hdmi_disable_audio_infoframe(hdmi); + tegra_hdmi_disable_audio(hdmi); + } else { + tegra_hdmi_setup_audio_infoframe(hdmi); + tegra_hdmi_enable_audio_infoframe(hdmi); + tegra_hdmi_enable_audio(hdmi); + } + + return err; +} + static bool tegra_output_is_hdmi(struct tegra_output *output) { struct edid *edid; @@ -1135,6 +1171,8 @@ static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder) u32 value; int err; + tegra_hdmi_audio_lock(hdmi); + /* * The following accesses registers of the display controller, so make * sure it's only executed when the output is attached to one. @@ -1159,6 +1197,10 @@ static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder) tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_INT_ENABLE); tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_INT_MASK); + hdmi->pixel_clock = 0; + + tegra_hdmi_audio_unlock(hdmi); + err = host1x_client_suspend(&hdmi->client); if (err < 0) dev_err(hdmi->dev, "failed to suspend: %d\n", err); @@ -1182,6 +1224,8 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder) return; } + tegra_hdmi_audio_lock(hdmi); + /* * Enable and unmask the HDA codec SCRATCH0 register interrupt. This * is used for interoperability between the HDA codec driver and the @@ -1195,7 +1239,7 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder) h_back_porch = mode->htotal - mode->hsync_end; h_front_porch = mode->hsync_start - mode->hdisplay; - err = clk_set_rate(hdmi->clk, hdmi->pixel_clock); + err = dev_pm_opp_set_rate(hdmi->dev, hdmi->pixel_clock); if (err < 0) { dev_err(hdmi->dev, "failed to set HDMI clock frequency: %d\n", err); @@ -1387,6 +1431,8 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder) } /* TODO: add HDCP support */ + + tegra_hdmi_audio_unlock(hdmi); } static int @@ -1416,6 +1462,91 @@ static const struct drm_encoder_helper_funcs tegra_hdmi_encoder_helper_funcs = { .atomic_check = tegra_hdmi_encoder_atomic_check, }; +static int tegra_hdmi_hw_params(struct device *dev, void *data, + struct hdmi_codec_daifmt *fmt, + struct hdmi_codec_params *hparms) +{ + struct tegra_hdmi *hdmi = data; + int ret = 0; + + tegra_hdmi_audio_lock(hdmi); + + hdmi->format.sample_rate = hparms->sample_rate; + hdmi->format.channels = hparms->channels; + + if (hdmi->pixel_clock && !hdmi->dvi) + ret = tegra_hdmi_reconfigure_audio(hdmi); + + tegra_hdmi_audio_unlock(hdmi); + + return ret; +} + +static int tegra_hdmi_audio_startup(struct device *dev, void *data) +{ + struct tegra_hdmi *hdmi = data; + int ret; + + ret = host1x_client_resume(&hdmi->client); + if (ret < 0) + dev_err(hdmi->dev, "failed to resume: %d\n", ret); + + return ret; +} + +static void tegra_hdmi_audio_shutdown(struct device *dev, void *data) +{ + struct tegra_hdmi *hdmi = data; + int ret; + + tegra_hdmi_audio_lock(hdmi); + + hdmi->format.sample_rate = 0; + hdmi->format.channels = 0; + + tegra_hdmi_audio_unlock(hdmi); + + ret = host1x_client_suspend(&hdmi->client); + if (ret < 0) + dev_err(hdmi->dev, "failed to suspend: %d\n", ret); +} + +static const struct hdmi_codec_ops tegra_hdmi_codec_ops = { + .hw_params = tegra_hdmi_hw_params, + .audio_startup = tegra_hdmi_audio_startup, + .audio_shutdown = tegra_hdmi_audio_shutdown, +}; + +static int tegra_hdmi_codec_register(struct tegra_hdmi *hdmi) +{ + struct hdmi_codec_pdata codec_data = {}; + + if (hdmi->config->has_hda) + return 0; + + codec_data.ops = &tegra_hdmi_codec_ops; + codec_data.data = hdmi; + codec_data.spdif = 1; + + hdmi->audio_pdev = platform_device_register_data(hdmi->dev, + HDMI_CODEC_DRV_NAME, + PLATFORM_DEVID_AUTO, + &codec_data, + sizeof(codec_data)); + if (IS_ERR(hdmi->audio_pdev)) + return PTR_ERR(hdmi->audio_pdev); + + hdmi->format.channels = 2; + + return 0; +} + +static void tegra_hdmi_codec_unregister(struct tegra_hdmi *hdmi) +{ + if (hdmi->audio_pdev) + platform_device_unregister(hdmi->audio_pdev); +} + static int tegra_hdmi_init(struct host1x_client *client) { struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client); @@ -1453,28 +1584,47 @@ static int tegra_hdmi_init(struct host1x_client *client) if (err < 0) { dev_err(client->dev, "failed to enable HDMI regulator: %d\n", err); - return err; + goto output_exit; } err = regulator_enable(hdmi->pll); if (err < 0) { dev_err(hdmi->dev, "failed to enable PLL regulator: %d\n", err); - return err; + goto disable_hdmi; } err = regulator_enable(hdmi->vdd); if (err < 0) { dev_err(hdmi->dev, "failed to enable VDD regulator: %d\n", err); - return err; + goto disable_pll; + } + + err = tegra_hdmi_codec_register(hdmi); + if (err < 0) { + dev_err(hdmi->dev, "failed to register audio codec: %d\n", err); + goto disable_vdd; } return 0; + +disable_vdd: + regulator_disable(hdmi->vdd); +disable_pll: + regulator_disable(hdmi->pll); +disable_hdmi: + regulator_disable(hdmi->hdmi); +output_exit: + tegra_output_exit(&hdmi->output); + + return err; } static int tegra_hdmi_exit(struct host1x_client *client) { struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client); + tegra_hdmi_codec_unregister(hdmi); + tegra_output_exit(&hdmi->output); regulator_disable(hdmi->vdd); @@ -1599,7 +1749,6 @@ static irqreturn_t tegra_hdmi_irq(int irq, void *data) { struct tegra_hdmi *hdmi = data; u32 value; - int err; value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_INT_STATUS); tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_INT_STATUS); @@ -1614,16 +1763,7 @@ static irqreturn_t tegra_hdmi_irq(int irq, void *data) format = value & SOR_AUDIO_HDA_CODEC_SCRATCH0_FMT_MASK; tegra_hda_parse_format(format, &hdmi->format); - - err = tegra_hdmi_setup_audio(hdmi); - if (err < 0) { - tegra_hdmi_disable_audio_infoframe(hdmi); - tegra_hdmi_disable_audio(hdmi); - } else { - tegra_hdmi_setup_audio_infoframe(hdmi); - tegra_hdmi_enable_audio_infoframe(hdmi); - tegra_hdmi_enable_audio(hdmi); - } + tegra_hdmi_reconfigure_audio(hdmi); } else { tegra_hdmi_disable_audio_infoframe(hdmi); tegra_hdmi_disable_audio(hdmi); @@ -1651,6 +1791,8 @@ static int tegra_hdmi_probe(struct platform_device *pdev) hdmi->stereo = false; hdmi->dvi = false; + mutex_init(&hdmi->audio_lock); + hdmi->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(hdmi->clk)) { dev_err(&pdev->dev, "failed to get clock\n"); @@ -1732,7 +1874,14 @@ static int tegra_hdmi_probe(struct platform_device *pdev) } platform_set_drvdata(pdev, hdmi); - pm_runtime_enable(&pdev->dev); + + err = devm_pm_runtime_enable(&pdev->dev); + if (err) + return err; + + err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev); + if (err) + return err; INIT_LIST_HEAD(&hdmi->client.list); hdmi->client.ops = &hdmi_client_ops; @@ -1753,8 +1902,6 @@ static int tegra_hdmi_remove(struct platform_device *pdev) struct tegra_hdmi *hdmi = platform_get_drvdata(pdev); int err; - pm_runtime_disable(&pdev->dev); - err = host1x_client_unregister(&hdmi->client); if (err < 0) { dev_err(&pdev->dev, "failed to unregister host1x client: %d\n", diff --git a/drivers/gpu/drm/tegra/hub.h b/drivers/gpu/drm/tegra/hub.h index 3efa1be07ff8..23c4b2115ed1 100644 --- a/drivers/gpu/drm/tegra/hub.h +++ b/drivers/gpu/drm/tegra/hub.h @@ -72,7 +72,6 @@ to_tegra_display_hub_state(struct drm_private_state *priv) return container_of(priv, struct tegra_display_hub_state, base); } -struct tegra_dc; struct tegra_plane; int tegra_display_hub_prepare(struct tegra_display_hub *hub); diff --git a/drivers/gpu/drm/tegra/nvdec.c b/drivers/gpu/drm/tegra/nvdec.c new file mode 100644 index 000000000000..79e1e88203cf --- /dev/null +++ b/drivers/gpu/drm/tegra/nvdec.c @@ -0,0 +1,466 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2015-2021, NVIDIA Corporation. + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/host1x.h> +#include <linux/iommu.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/reset.h> + +#include <soc/tegra/pmc.h> + +#include "drm.h" +#include "falcon.h" +#include "vic.h" + +struct nvdec_config { + const char *firmware; + unsigned int version; + bool supports_sid; +}; + +struct nvdec { + struct falcon falcon; + + void __iomem *regs; + struct tegra_drm_client client; + struct host1x_channel *channel; + struct device *dev; + struct clk *clk; + + /* Platform configuration */ + const struct nvdec_config *config; +}; + +static inline struct nvdec *to_nvdec(struct tegra_drm_client *client) +{ + return container_of(client, struct nvdec, client); +} + +static inline void nvdec_writel(struct nvdec *nvdec, u32 value, + unsigned int offset) +{ + writel(value, nvdec->regs + offset); +} + +static int nvdec_boot(struct nvdec *nvdec) +{ +#ifdef CONFIG_IOMMU_API + struct iommu_fwspec *spec = dev_iommu_fwspec_get(nvdec->dev); +#endif + int err; + +#ifdef CONFIG_IOMMU_API + if (nvdec->config->supports_sid && spec) { + u32 value; + + value = TRANSCFG_ATT(1, TRANSCFG_SID_FALCON) | TRANSCFG_ATT(0, TRANSCFG_SID_HW); + nvdec_writel(nvdec, value, VIC_TFBIF_TRANSCFG); + + if (spec->num_ids > 0) { + value = spec->ids[0] & 0xffff; + + nvdec_writel(nvdec, value, VIC_THI_STREAMID0); + nvdec_writel(nvdec, value, VIC_THI_STREAMID1); + } + } +#endif + + err = falcon_boot(&nvdec->falcon); + if (err < 0) + return err; + + err = falcon_wait_idle(&nvdec->falcon); + if (err < 0) { + dev_err(nvdec->dev, "falcon boot timed out\n"); + return err; + } + + return 0; +} + +static int nvdec_init(struct host1x_client *client) +{ + struct tegra_drm_client *drm = host1x_to_drm_client(client); + struct drm_device *dev = dev_get_drvdata(client->host); + struct tegra_drm *tegra = dev->dev_private; + struct nvdec *nvdec = to_nvdec(drm); + int err; + + err = host1x_client_iommu_attach(client); + if (err < 0 && err != -ENODEV) { + dev_err(nvdec->dev, "failed to attach to domain: %d\n", err); + return err; + } + + nvdec->channel = host1x_channel_request(client); + if (!nvdec->channel) { + err = -ENOMEM; + goto detach; + } + + client->syncpts[0] = host1x_syncpt_request(client, 0); + if (!client->syncpts[0]) { + err = -ENOMEM; + goto free_channel; + } + + pm_runtime_enable(client->dev); + pm_runtime_use_autosuspend(client->dev); + pm_runtime_set_autosuspend_delay(client->dev, 500); + + err = tegra_drm_register_client(tegra, drm); + if (err < 0) + goto disable_rpm; + + /* + * Inherit the DMA parameters (such as maximum segment size) from the + * parent host1x device. + */ + client->dev->dma_parms = client->host->dma_parms; + + return 0; + +disable_rpm: + pm_runtime_dont_use_autosuspend(client->dev); + pm_runtime_force_suspend(client->dev); + + host1x_syncpt_put(client->syncpts[0]); +free_channel: + host1x_channel_put(nvdec->channel); +detach: + host1x_client_iommu_detach(client); + + return err; +} + +static int nvdec_exit(struct host1x_client *client) +{ + struct tegra_drm_client *drm = host1x_to_drm_client(client); + struct drm_device *dev = dev_get_drvdata(client->host); + struct tegra_drm *tegra = dev->dev_private; + struct nvdec *nvdec = to_nvdec(drm); + int err; + + /* avoid a dangling pointer just in case this disappears */ + client->dev->dma_parms = NULL; + + err = tegra_drm_unregister_client(tegra, drm); + if (err < 0) + return err; + + pm_runtime_dont_use_autosuspend(client->dev); + pm_runtime_force_suspend(client->dev); + + host1x_syncpt_put(client->syncpts[0]); + host1x_channel_put(nvdec->channel); + host1x_client_iommu_detach(client); + + nvdec->channel = NULL; + + if (client->group) { + dma_unmap_single(nvdec->dev, nvdec->falcon.firmware.phys, + nvdec->falcon.firmware.size, DMA_TO_DEVICE); + tegra_drm_free(tegra, nvdec->falcon.firmware.size, + nvdec->falcon.firmware.virt, + nvdec->falcon.firmware.iova); + } else { + dma_free_coherent(nvdec->dev, nvdec->falcon.firmware.size, + nvdec->falcon.firmware.virt, + nvdec->falcon.firmware.iova); + } + + return 0; +} + +static const struct host1x_client_ops nvdec_client_ops = { + .init = nvdec_init, + .exit = nvdec_exit, +}; + +static int nvdec_load_firmware(struct nvdec *nvdec) +{ + struct host1x_client *client = &nvdec->client.base; + struct tegra_drm *tegra = nvdec->client.drm; + dma_addr_t iova; + size_t size; + void *virt; + int err; + + if (nvdec->falcon.firmware.virt) + return 0; + + err = falcon_read_firmware(&nvdec->falcon, nvdec->config->firmware); + if (err < 0) + return err; + + size = nvdec->falcon.firmware.size; + + if (!client->group) { + virt = dma_alloc_coherent(nvdec->dev, size, &iova, GFP_KERNEL); + + err = dma_mapping_error(nvdec->dev, iova); + if (err < 0) + return err; + } else { + virt = tegra_drm_alloc(tegra, size, &iova); + } + + nvdec->falcon.firmware.virt = virt; + nvdec->falcon.firmware.iova = iova; + + err = falcon_load_firmware(&nvdec->falcon); + if (err < 0) + goto cleanup; + + /* + * In this case we have received an IOVA from the shared domain, so we + * need to make sure to get the physical address so that the DMA API + * knows what memory pages to flush the cache for. + */ + if (client->group) { + dma_addr_t phys; + + phys = dma_map_single(nvdec->dev, virt, size, DMA_TO_DEVICE); + + err = dma_mapping_error(nvdec->dev, phys); + if (err < 0) + goto cleanup; + + nvdec->falcon.firmware.phys = phys; + } + + return 0; + +cleanup: + if (!client->group) + dma_free_coherent(nvdec->dev, size, virt, iova); + else + tegra_drm_free(tegra, size, virt, iova); + + return err; +} + + +static __maybe_unused int nvdec_runtime_resume(struct device *dev) +{ + struct nvdec *nvdec = dev_get_drvdata(dev); + int err; + + err = clk_prepare_enable(nvdec->clk); + if (err < 0) + return err; + + usleep_range(10, 20); + + err = nvdec_load_firmware(nvdec); + if (err < 0) + goto disable; + + err = nvdec_boot(nvdec); + if (err < 0) + goto disable; + + return 0; + +disable: + clk_disable_unprepare(nvdec->clk); + return err; +} + +static __maybe_unused int nvdec_runtime_suspend(struct device *dev) +{ + struct nvdec *nvdec = dev_get_drvdata(dev); + + host1x_channel_stop(nvdec->channel); + + clk_disable_unprepare(nvdec->clk); + + return 0; +} + +static int nvdec_open_channel(struct tegra_drm_client *client, + struct tegra_drm_context *context) +{ + struct nvdec *nvdec = to_nvdec(client); + + context->channel = host1x_channel_get(nvdec->channel); + if (!context->channel) + return -ENOMEM; + + return 0; +} + +static void nvdec_close_channel(struct tegra_drm_context *context) +{ + host1x_channel_put(context->channel); +} + +static const struct tegra_drm_client_ops nvdec_ops = { + .open_channel = nvdec_open_channel, + .close_channel = nvdec_close_channel, + .submit = tegra_drm_submit, +}; + +#define NVIDIA_TEGRA_210_NVDEC_FIRMWARE "nvidia/tegra210/nvdec.bin" + +static const struct nvdec_config nvdec_t210_config = { + .firmware = NVIDIA_TEGRA_210_NVDEC_FIRMWARE, + .version = 0x21, + .supports_sid = false, +}; + +#define NVIDIA_TEGRA_186_NVDEC_FIRMWARE "nvidia/tegra186/nvdec.bin" + +static const struct nvdec_config nvdec_t186_config = { + .firmware = NVIDIA_TEGRA_186_NVDEC_FIRMWARE, + .version = 0x18, + .supports_sid = true, +}; + +#define NVIDIA_TEGRA_194_NVDEC_FIRMWARE "nvidia/tegra194/nvdec.bin" + +static const struct nvdec_config nvdec_t194_config = { + .firmware = NVIDIA_TEGRA_194_NVDEC_FIRMWARE, + .version = 0x19, + .supports_sid = true, +}; + +static const struct of_device_id tegra_nvdec_of_match[] = { + { .compatible = "nvidia,tegra210-nvdec", .data = &nvdec_t210_config }, + { .compatible = "nvidia,tegra186-nvdec", .data = &nvdec_t186_config }, + { .compatible = "nvidia,tegra194-nvdec", .data = &nvdec_t194_config }, + { }, +}; +MODULE_DEVICE_TABLE(of, tegra_nvdec_of_match); + +static int nvdec_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct host1x_syncpt **syncpts; + struct nvdec *nvdec; + u32 host_class; + int err; + + /* inherit DMA mask from host1x parent */ + err = dma_coerce_mask_and_coherent(dev, *dev->parent->dma_mask); + if (err < 0) { + dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err); + return err; + } + + nvdec = devm_kzalloc(dev, sizeof(*nvdec), GFP_KERNEL); + if (!nvdec) + return -ENOMEM; + + nvdec->config = of_device_get_match_data(dev); + + syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL); + if (!syncpts) + return -ENOMEM; + + nvdec->regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); + if (IS_ERR(nvdec->regs)) + return PTR_ERR(nvdec->regs); + + nvdec->clk = devm_clk_get(dev, NULL); + if (IS_ERR(nvdec->clk)) { + dev_err(&pdev->dev, "failed to get clock\n"); + return PTR_ERR(nvdec->clk); + } + + err = clk_set_rate(nvdec->clk, ULONG_MAX); + if (err < 0) { + dev_err(&pdev->dev, "failed to set clock rate\n"); + return err; + } + + err = of_property_read_u32(dev->of_node, "nvidia,host1x-class", &host_class); + if (err < 0) + host_class = HOST1X_CLASS_NVDEC; + + nvdec->falcon.dev = dev; + nvdec->falcon.regs = nvdec->regs; + + err = falcon_init(&nvdec->falcon); + if (err < 0) + return err; + + platform_set_drvdata(pdev, nvdec); + + INIT_LIST_HEAD(&nvdec->client.base.list); + nvdec->client.base.ops = &nvdec_client_ops; + nvdec->client.base.dev = dev; + nvdec->client.base.class = host_class; + nvdec->client.base.syncpts = syncpts; + nvdec->client.base.num_syncpts = 1; + nvdec->dev = dev; + + INIT_LIST_HEAD(&nvdec->client.list); + nvdec->client.version = nvdec->config->version; + nvdec->client.ops = &nvdec_ops; + + err = host1x_client_register(&nvdec->client.base); + if (err < 0) { + dev_err(dev, "failed to register host1x client: %d\n", err); + goto exit_falcon; + } + + return 0; + +exit_falcon: + falcon_exit(&nvdec->falcon); + + return err; +} + +static int nvdec_remove(struct platform_device *pdev) +{ + struct nvdec *nvdec = platform_get_drvdata(pdev); + int err; + + err = host1x_client_unregister(&nvdec->client.base); + if (err < 0) { + dev_err(&pdev->dev, "failed to unregister host1x client: %d\n", + err); + return err; + } + + falcon_exit(&nvdec->falcon); + + return 0; +} + +static const struct dev_pm_ops nvdec_pm_ops = { + SET_RUNTIME_PM_OPS(nvdec_runtime_suspend, nvdec_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) +}; + +struct platform_driver tegra_nvdec_driver = { + .driver = { + .name = "tegra-nvdec", + .of_match_table = tegra_nvdec_of_match, + .pm = &nvdec_pm_ops + }, + .probe = nvdec_probe, + .remove = nvdec_remove, +}; + +#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) +MODULE_FIRMWARE(NVIDIA_TEGRA_210_NVDEC_FIRMWARE); +#endif +#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) +MODULE_FIRMWARE(NVIDIA_TEGRA_186_NVDEC_FIRMWARE); +#endif +#if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) +MODULE_FIRMWARE(NVIDIA_TEGRA_194_NVDEC_FIRMWARE); +#endif diff --git a/drivers/gpu/drm/tegra/plane.c b/drivers/gpu/drm/tegra/plane.c index 16a1cdc28657..321cb1f13da6 100644 --- a/drivers/gpu/drm/tegra/plane.c +++ b/drivers/gpu/drm/tegra/plane.c @@ -74,7 +74,7 @@ tegra_plane_atomic_duplicate_state(struct drm_plane *plane) for (i = 0; i < 3; i++) { copy->iova[i] = DMA_MAPPING_ERROR; - copy->sgt[i] = NULL; + copy->map[i] = NULL; } return ©->base; @@ -138,55 +138,37 @@ const struct drm_plane_funcs tegra_plane_funcs = { static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state) { - struct iommu_domain *domain = iommu_get_domain_for_dev(dc->dev); unsigned int i; int err; for (i = 0; i < state->base.fb->format->num_planes; i++) { struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i); - dma_addr_t phys_addr, *phys; - struct sg_table *sgt; + struct host1x_bo_mapping *map; - /* - * If we're not attached to a domain, we already stored the - * physical address when the buffer was allocated. If we're - * part of a group that's shared between all display - * controllers, we've also already mapped the framebuffer - * through the SMMU. In both cases we can short-circuit the - * code below and retrieve the stored IOV address. - */ - if (!domain || dc->client.group) - phys = &phys_addr; - else - phys = NULL; - - sgt = host1x_bo_pin(dc->dev, &bo->base, phys); - if (IS_ERR(sgt)) { - err = PTR_ERR(sgt); + map = host1x_bo_pin(dc->dev, &bo->base, DMA_TO_DEVICE, &dc->client.cache); + if (IS_ERR(map)) { + err = PTR_ERR(map); goto unpin; } - if (sgt) { - err = dma_map_sgtable(dc->dev, sgt, DMA_TO_DEVICE, 0); - if (err) - goto unpin; - + if (!dc->client.group) { /* * The display controller needs contiguous memory, so * fail if the buffer is discontiguous and we fail to * map its SG table to a single contiguous chunk of * I/O virtual memory. */ - if (sgt->nents > 1) { + if (map->chunks > 1) { err = -EINVAL; goto unpin; } - state->iova[i] = sg_dma_address(sgt->sgl); - state->sgt[i] = sgt; + state->iova[i] = map->phys; } else { - state->iova[i] = phys_addr; + state->iova[i] = bo->iova; } + + state->map[i] = map; } return 0; @@ -195,15 +177,9 @@ unpin: dev_err(dc->dev, "failed to map plane %u: %d\n", i, err); while (i--) { - struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i); - struct sg_table *sgt = state->sgt[i]; - - if (sgt) - dma_unmap_sgtable(dc->dev, sgt, DMA_TO_DEVICE, 0); - - host1x_bo_unpin(dc->dev, &bo->base, sgt); + host1x_bo_unpin(state->map[i]); state->iova[i] = DMA_MAPPING_ERROR; - state->sgt[i] = NULL; + state->map[i] = NULL; } return err; @@ -214,15 +190,9 @@ static void tegra_dc_unpin(struct tegra_dc *dc, struct tegra_plane_state *state) unsigned int i; for (i = 0; i < state->base.fb->format->num_planes; i++) { - struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i); - struct sg_table *sgt = state->sgt[i]; - - if (sgt) - dma_unmap_sgtable(dc->dev, sgt, DMA_TO_DEVICE, 0); - - host1x_bo_unpin(dc->dev, &bo->base, sgt); + host1x_bo_unpin(state->map[i]); state->iova[i] = DMA_MAPPING_ERROR; - state->sgt[i] = NULL; + state->map[i] = NULL; } } @@ -230,11 +200,14 @@ int tegra_plane_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state) { struct tegra_dc *dc = to_tegra_dc(state->crtc); + int err; if (!state->fb) return 0; - drm_gem_plane_helper_prepare_fb(plane, state); + err = drm_gem_plane_helper_prepare_fb(plane, state); + if (err < 0) + return err; return tegra_dc_pin(dc, to_tegra_plane_state(state)); } diff --git a/drivers/gpu/drm/tegra/plane.h b/drivers/gpu/drm/tegra/plane.h index d9470780c803..dfb20712fbd7 100644 --- a/drivers/gpu/drm/tegra/plane.h +++ b/drivers/gpu/drm/tegra/plane.h @@ -43,7 +43,7 @@ struct tegra_plane_legacy_blending_state { struct tegra_plane_state { struct drm_plane_state base; - struct sg_table *sgt[3]; + struct host1x_bo_mapping *map[3]; dma_addr_t iova[3]; struct tegra_bo_tiling tiling; diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c index 606c78a2b988..ff8fce36d2aa 100644 --- a/drivers/gpu/drm/tegra/rgb.c +++ b/drivers/gpu/drm/tegra/rgb.c @@ -17,6 +17,8 @@ struct tegra_rgb { struct tegra_output output; struct tegra_dc *dc; + struct clk *pll_d_out0; + struct clk *pll_d2_out0; struct clk *clk_parent; struct clk *clk; }; @@ -116,13 +118,21 @@ static void tegra_rgb_encoder_enable(struct drm_encoder *encoder) DISP_ORDER_RED_BLUE; tegra_dc_writel(rgb->dc, value, DC_DISP_DISP_INTERFACE_CONTROL); - /* XXX: parameterize? */ - value = SC0_H_QUALIFIER_NONE | SC1_H_QUALIFIER_NONE; - tegra_dc_writel(rgb->dc, value, DC_DISP_SHIFT_CLOCK_OPTIONS); - tegra_dc_commit(rgb->dc); } +static bool tegra_rgb_pll_rate_change_allowed(struct tegra_rgb *rgb) +{ + if (!rgb->pll_d2_out0) + return false; + + if (!clk_is_match(rgb->clk_parent, rgb->pll_d_out0) && + !clk_is_match(rgb->clk_parent, rgb->pll_d2_out0)) + return false; + + return true; +} + static int tegra_rgb_encoder_atomic_check(struct drm_encoder *encoder, struct drm_crtc_state *crtc_state, @@ -151,8 +161,17 @@ tegra_rgb_encoder_atomic_check(struct drm_encoder *encoder, * and hope that the desired frequency can be matched (or at least * matched sufficiently close that the panel will still work). */ - div = ((clk_get_rate(rgb->clk) * 2) / pclk) - 2; - pclk = 0; + if (tegra_rgb_pll_rate_change_allowed(rgb)) { + /* + * Set display controller clock to x2 of PCLK in order to + * produce higher resolution pulse positions. + */ + div = 2; + pclk *= 2; + } else { + div = ((clk_get_rate(rgb->clk) * 2) / pclk) - 2; + pclk = 0; + } err = tegra_dc_state_setup_clock(dc, crtc_state, rgb->clk_parent, pclk, div); @@ -210,6 +229,22 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc) return err; } + rgb->pll_d_out0 = clk_get_sys(NULL, "pll_d_out0"); + if (IS_ERR(rgb->pll_d_out0)) { + err = PTR_ERR(rgb->pll_d_out0); + dev_err(dc->dev, "failed to get pll_d_out0: %d\n", err); + return err; + } + + if (dc->soc->has_pll_d2_out0) { + rgb->pll_d2_out0 = clk_get_sys(NULL, "pll_d2_out0"); + if (IS_ERR(rgb->pll_d2_out0)) { + err = PTR_ERR(rgb->pll_d2_out0); + dev_err(dc->dev, "failed to get pll_d2_out0: %d\n", err); + return err; + } + } + dc->rgb = &rgb->output; return 0; @@ -217,9 +252,15 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc) int tegra_dc_rgb_remove(struct tegra_dc *dc) { + struct tegra_rgb *rgb; + if (!dc->rgb) return 0; + rgb = to_rgb(dc->rgb); + clk_put(rgb->pll_d2_out0); + clk_put(rgb->pll_d_out0); + tegra_output_remove(dc->rgb); dc->rgb = NULL; diff --git a/drivers/gpu/drm/tegra/submit.c b/drivers/gpu/drm/tegra/submit.c index 776f825df52f..6d6dd8c35475 100644 --- a/drivers/gpu/drm/tegra/submit.c +++ b/drivers/gpu/drm/tegra/submit.c @@ -64,33 +64,62 @@ static void gather_bo_put(struct host1x_bo *host_bo) kref_put(&bo->ref, gather_bo_release); } -static struct sg_table * -gather_bo_pin(struct device *dev, struct host1x_bo *host_bo, dma_addr_t *phys) +static struct host1x_bo_mapping * +gather_bo_pin(struct device *dev, struct host1x_bo *bo, enum dma_data_direction direction) { - struct gather_bo *bo = container_of(host_bo, struct gather_bo, base); - struct sg_table *sgt; + struct gather_bo *gather = container_of(bo, struct gather_bo, base); + struct host1x_bo_mapping *map; int err; - sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); - if (!sgt) + map = kzalloc(sizeof(*map), GFP_KERNEL); + if (!map) return ERR_PTR(-ENOMEM); - err = dma_get_sgtable(bo->dev, sgt, bo->gather_data, bo->gather_data_dma, - bo->gather_data_words * 4); - if (err) { - kfree(sgt); - return ERR_PTR(err); + kref_init(&map->ref); + map->bo = host1x_bo_get(bo); + map->direction = direction; + map->dev = dev; + + map->sgt = kzalloc(sizeof(*map->sgt), GFP_KERNEL); + if (!map->sgt) { + err = -ENOMEM; + goto free; } - return sgt; + err = dma_get_sgtable(gather->dev, map->sgt, gather->gather_data, gather->gather_data_dma, + gather->gather_data_words * 4); + if (err) + goto free_sgt; + + err = dma_map_sgtable(dev, map->sgt, direction, 0); + if (err) + goto free_sgt; + + map->phys = sg_dma_address(map->sgt->sgl); + map->size = gather->gather_data_words * 4; + map->chunks = err; + + return map; + +free_sgt: + sg_free_table(map->sgt); + kfree(map->sgt); +free: + kfree(map); + return ERR_PTR(err); } -static void gather_bo_unpin(struct device *dev, struct sg_table *sgt) +static void gather_bo_unpin(struct host1x_bo_mapping *map) { - if (sgt) { - sg_free_table(sgt); - kfree(sgt); - } + if (!map) + return; + + dma_unmap_sgtable(map->dev, map->sgt, map->direction, 0); + sg_free_table(map->sgt); + kfree(map->sgt); + host1x_bo_put(map->bo); + + kfree(map); } static void *gather_bo_mmap(struct host1x_bo *host_bo) @@ -475,8 +504,8 @@ static void release_job(struct host1x_job *job) kfree(job_data->used_mappings); kfree(job_data); - if (pm_runtime_enabled(client->base.dev)) - pm_runtime_put_autosuspend(client->base.dev); + pm_runtime_mark_last_busy(client->base.dev); + pm_runtime_put_autosuspend(client->base.dev); } int tegra_drm_ioctl_channel_submit(struct drm_device *drm, void *data, @@ -560,12 +589,10 @@ int tegra_drm_ioctl_channel_submit(struct drm_device *drm, void *data, } /* Boot engine. */ - if (pm_runtime_enabled(context->client->base.dev)) { - err = pm_runtime_resume_and_get(context->client->base.dev); - if (err < 0) { - SUBMIT_ERR(context, "could not power up engine: %d", err); - goto unpin_job; - } + err = pm_runtime_resume_and_get(context->client->base.dev); + if (err < 0) { + SUBMIT_ERR(context, "could not power up engine: %d", err); + goto unpin_job; } job->user_data = job_data; diff --git a/drivers/gpu/drm/tegra/uapi.c b/drivers/gpu/drm/tegra/uapi.c index 690a339c52ec..9ab9179d2026 100644 --- a/drivers/gpu/drm/tegra/uapi.c +++ b/drivers/gpu/drm/tegra/uapi.c @@ -17,11 +17,7 @@ static void tegra_drm_mapping_release(struct kref *ref) struct tegra_drm_mapping *mapping = container_of(ref, struct tegra_drm_mapping, ref); - if (mapping->sgt) - dma_unmap_sgtable(mapping->dev, mapping->sgt, mapping->direction, - DMA_ATTR_SKIP_CPU_SYNC); - - host1x_bo_unpin(mapping->dev, mapping->bo, mapping->sgt); + host1x_bo_unpin(mapping->map); host1x_bo_put(mapping->bo); kfree(mapping); @@ -159,6 +155,7 @@ int tegra_drm_ioctl_channel_map(struct drm_device *drm, void *data, struct drm_f struct drm_tegra_channel_map *args = data; struct tegra_drm_mapping *mapping; struct tegra_drm_context *context; + enum dma_data_direction direction; int err = 0; if (args->flags & ~DRM_TEGRA_CHANNEL_MAP_READ_WRITE) @@ -180,68 +177,53 @@ int tegra_drm_ioctl_channel_map(struct drm_device *drm, void *data, struct drm_f kref_init(&mapping->ref); - mapping->dev = context->client->base.dev; mapping->bo = tegra_gem_lookup(file, args->handle); if (!mapping->bo) { err = -EINVAL; - goto unlock; + goto free; } - if (context->client->base.group) { - /* IOMMU domain managed directly using IOMMU API */ - host1x_bo_pin(mapping->dev, mapping->bo, &mapping->iova); - } else { - switch (args->flags & DRM_TEGRA_CHANNEL_MAP_READ_WRITE) { - case DRM_TEGRA_CHANNEL_MAP_READ_WRITE: - mapping->direction = DMA_BIDIRECTIONAL; - break; - - case DRM_TEGRA_CHANNEL_MAP_WRITE: - mapping->direction = DMA_FROM_DEVICE; - break; - - case DRM_TEGRA_CHANNEL_MAP_READ: - mapping->direction = DMA_TO_DEVICE; - break; + switch (args->flags & DRM_TEGRA_CHANNEL_MAP_READ_WRITE) { + case DRM_TEGRA_CHANNEL_MAP_READ_WRITE: + direction = DMA_BIDIRECTIONAL; + break; - default: - return -EINVAL; - } + case DRM_TEGRA_CHANNEL_MAP_WRITE: + direction = DMA_FROM_DEVICE; + break; - mapping->sgt = host1x_bo_pin(mapping->dev, mapping->bo, NULL); - if (IS_ERR(mapping->sgt)) { - err = PTR_ERR(mapping->sgt); - goto put_gem; - } + case DRM_TEGRA_CHANNEL_MAP_READ: + direction = DMA_TO_DEVICE; + break; - err = dma_map_sgtable(mapping->dev, mapping->sgt, mapping->direction, - DMA_ATTR_SKIP_CPU_SYNC); - if (err) - goto unpin; + default: + err = -EINVAL; + goto put_gem; + } - mapping->iova = sg_dma_address(mapping->sgt->sgl); + mapping->map = host1x_bo_pin(context->client->base.dev, mapping->bo, direction, NULL); + if (IS_ERR(mapping->map)) { + err = PTR_ERR(mapping->map); + goto put_gem; } + mapping->iova = mapping->map->phys; mapping->iova_end = mapping->iova + host1x_to_tegra_bo(mapping->bo)->gem.size; err = xa_alloc(&context->mappings, &args->mapping, mapping, XA_LIMIT(1, U32_MAX), GFP_KERNEL); if (err < 0) - goto unmap; + goto unpin; mutex_unlock(&fpriv->lock); return 0; -unmap: - if (mapping->sgt) { - dma_unmap_sgtable(mapping->dev, mapping->sgt, mapping->direction, - DMA_ATTR_SKIP_CPU_SYNC); - } unpin: - host1x_bo_unpin(mapping->dev, mapping->bo, mapping->sgt); + host1x_bo_unpin(mapping->map); put_gem: host1x_bo_put(mapping->bo); +free: kfree(mapping); unlock: mutex_unlock(&fpriv->lock); diff --git a/drivers/gpu/drm/tegra/uapi.h b/drivers/gpu/drm/tegra/uapi.h index 12adad770ad3..92ff1e44ff15 100644 --- a/drivers/gpu/drm/tegra/uapi.h +++ b/drivers/gpu/drm/tegra/uapi.h @@ -27,10 +27,9 @@ struct tegra_drm_file { struct tegra_drm_mapping { struct kref ref; - struct device *dev; + struct host1x_bo_mapping *map; struct host1x_bo *bo; - struct sg_table *sgt; - enum dma_data_direction direction; + dma_addr_t iova; dma_addr_t iova_end; }; diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c index c02010ff2b7f..1e342fa3d27b 100644 --- a/drivers/gpu/drm/tegra/vic.c +++ b/drivers/gpu/drm/tegra/vic.c @@ -5,6 +5,7 @@ #include <linux/clk.h> #include <linux/delay.h> +#include <linux/dma-mapping.h> #include <linux/host1x.h> #include <linux/iommu.h> #include <linux/module.h> @@ -151,9 +152,13 @@ static int vic_init(struct host1x_client *client) goto free_channel; } + pm_runtime_enable(client->dev); + pm_runtime_use_autosuspend(client->dev); + pm_runtime_set_autosuspend_delay(client->dev, 500); + err = tegra_drm_register_client(tegra, drm); if (err < 0) - goto free_syncpt; + goto disable_rpm; /* * Inherit the DMA parameters (such as maximum segment size) from the @@ -163,7 +168,10 @@ static int vic_init(struct host1x_client *client) return 0; -free_syncpt: +disable_rpm: + pm_runtime_dont_use_autosuspend(client->dev); + pm_runtime_force_suspend(client->dev); + host1x_syncpt_put(client->syncpts[0]); free_channel: host1x_channel_put(vic->channel); @@ -188,10 +196,15 @@ static int vic_exit(struct host1x_client *client) if (err < 0) return err; + pm_runtime_dont_use_autosuspend(client->dev); + pm_runtime_force_suspend(client->dev); + host1x_syncpt_put(client->syncpts[0]); host1x_channel_put(vic->channel); host1x_client_iommu_detach(client); + vic->channel = NULL; + if (client->group) { dma_unmap_single(vic->dev, vic->falcon.firmware.phys, vic->falcon.firmware.size, DMA_TO_DEVICE); @@ -232,12 +245,12 @@ static int vic_load_firmware(struct vic *vic) if (!client->group) { virt = dma_alloc_coherent(vic->dev, size, &iova, GFP_KERNEL); - - err = dma_mapping_error(vic->dev, iova); - if (err < 0) - return err; + if (!virt) + return -ENOMEM; } else { virt = tegra_drm_alloc(tegra, size, &iova); + if (IS_ERR(virt)) + return PTR_ERR(virt); } vic->falcon.firmware.virt = virt; @@ -315,6 +328,8 @@ static int vic_runtime_suspend(struct device *dev) struct vic *vic = dev_get_drvdata(dev); int err; + host1x_channel_stop(vic->channel); + err = reset_control_assert(vic->rst); if (err < 0) return err; @@ -330,27 +345,17 @@ static int vic_open_channel(struct tegra_drm_client *client, struct tegra_drm_context *context) { struct vic *vic = to_vic(client); - int err; - - err = pm_runtime_resume_and_get(vic->dev); - if (err < 0) - return err; context->channel = host1x_channel_get(vic->channel); - if (!context->channel) { - pm_runtime_put(vic->dev); + if (!context->channel) return -ENOMEM; - } return 0; } static void vic_close_channel(struct tegra_drm_context *context) { - struct vic *vic = to_vic(context->client); - host1x_channel_put(context->channel); - pm_runtime_put(vic->dev); } static const struct tegra_drm_client_ops vic_ops = { @@ -441,6 +446,12 @@ static int vic_probe(struct platform_device *pdev) return PTR_ERR(vic->clk); } + err = clk_set_rate(vic->clk, ULONG_MAX); + if (err < 0) { + dev_err(&pdev->dev, "failed to set clock rate\n"); + return err; + } + if (!dev->pm_domain) { vic->rst = devm_reset_control_get(dev, "vic"); if (IS_ERR(vic->rst)) { @@ -476,17 +487,8 @@ static int vic_probe(struct platform_device *pdev) goto exit_falcon; } - pm_runtime_enable(&pdev->dev); - if (!pm_runtime_enabled(&pdev->dev)) { - err = vic_runtime_resume(&pdev->dev); - if (err < 0) - goto unregister_client; - } - return 0; -unregister_client: - host1x_client_unregister(&vic->client.base); exit_falcon: falcon_exit(&vic->falcon); @@ -505,11 +507,6 @@ static int vic_remove(struct platform_device *pdev) return err; } - if (pm_runtime_enabled(&pdev->dev)) - pm_runtime_disable(&pdev->dev); - else - vic_runtime_suspend(&pdev->dev); - falcon_exit(&vic->falcon); return 0; @@ -517,6 +514,8 @@ static int vic_remove(struct platform_device *pdev) static const struct dev_pm_ops vic_pm_ops = { SET_RUNTIME_PM_OPS(vic_runtime_suspend, vic_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) }; struct platform_driver tegra_vic_driver = { diff --git a/drivers/gpu/drm/tidss/Kconfig b/drivers/gpu/drm/tidss/Kconfig index f790a5215302..bc4fa59b6fa9 100644 --- a/drivers/gpu/drm/tidss/Kconfig +++ b/drivers/gpu/drm/tidss/Kconfig @@ -3,7 +3,6 @@ config DRM_TIDSS depends on DRM && OF depends on ARM || ARM64 || COMPILE_TEST select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER select DRM_GEM_CMA_HELPER help The TI Keystone family SoCs introduced a new generation of diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c index d620f35688da..7c784e90e40e 100644 --- a/drivers/gpu/drm/tidss/tidss_drv.c +++ b/drivers/gpu/drm/tidss/tidss_drv.c @@ -88,16 +88,11 @@ static int __maybe_unused tidss_resume(struct device *dev) return drm_mode_config_helper_resume(&tidss->ddev); } -#ifdef CONFIG_PM - -static const struct dev_pm_ops tidss_pm_ops = { - .runtime_suspend = tidss_pm_runtime_suspend, - .runtime_resume = tidss_pm_runtime_resume, +static __maybe_unused const struct dev_pm_ops tidss_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(tidss_suspend, tidss_resume) + SET_RUNTIME_PM_OPS(tidss_pm_runtime_suspend, tidss_pm_runtime_resume, NULL) }; -#endif /* CONFIG_PM */ - /* DRM device Information */ static void tidss_release(struct drm_device *ddev) @@ -250,9 +245,7 @@ static struct platform_driver tidss_platform_driver = { .shutdown = tidss_shutdown, .driver = { .name = "tidss", -#ifdef CONFIG_PM - .pm = &tidss_pm_ops, -#endif + .pm = pm_ptr(&tidss_pm_ops), .of_match_table = tidss_of_table, .suppress_bind_attrs = true, }, diff --git a/drivers/gpu/drm/tilcdc/Kconfig b/drivers/gpu/drm/tilcdc/Kconfig index 9f505a149990..e315591eb36b 100644 --- a/drivers/gpu/drm/tilcdc/Kconfig +++ b/drivers/gpu/drm/tilcdc/Kconfig @@ -3,7 +3,6 @@ config DRM_TILCDC tristate "DRM Support for TI LCDC Display Controller" depends on DRM && OF && ARM select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER select DRM_GEM_CMA_HELPER select DRM_BRIDGE select DRM_PANEL_BRIDGE diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig index 1ceb93fbdc50..712e0004e96e 100644 --- a/drivers/gpu/drm/tiny/Kconfig +++ b/drivers/gpu/drm/tiny/Kconfig @@ -3,7 +3,7 @@ config DRM_ARCPGU tristate "ARC PGU" depends on DRM && OF - select DRM_KMS_CMA_HELPER + select DRM_GEM_CMA_HELPER select DRM_KMS_HELPER help Choose this option if you have an ARC PGU controller. @@ -71,7 +71,7 @@ config TINYDRM_HX8357D tristate "DRM support for HX8357D display panels" depends on DRM && SPI select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER + select DRM_GEM_CMA_HELPER select DRM_MIPI_DBI select BACKLIGHT_CLASS_DEVICE help @@ -80,11 +80,24 @@ config TINYDRM_HX8357D If M is selected the module will be called hx8357d. +config TINYDRM_ILI9163 + tristate "DRM support for ILI9163 display panels" + depends on DRM && SPI + select BACKLIGHT_CLASS_DEVICE + select DRM_GEM_CMA_HELPER + select DRM_KMS_HELPER + select DRM_MIPI_DBI + help + DRM driver for the following Ilitek ILI9163 panels: + * NHD-1.8-128160EF 128x160 TFT + + If M is selected the module will be called ili9163. + config TINYDRM_ILI9225 tristate "DRM support for ILI9225 display panels" depends on DRM && SPI select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER + select DRM_GEM_CMA_HELPER select DRM_MIPI_DBI help DRM driver for the following Ilitek ILI9225 panels: @@ -96,7 +109,7 @@ config TINYDRM_ILI9341 tristate "DRM support for ILI9341 display panels" depends on DRM && SPI select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER + select DRM_GEM_CMA_HELPER select DRM_MIPI_DBI select BACKLIGHT_CLASS_DEVICE help @@ -109,7 +122,7 @@ config TINYDRM_ILI9486 tristate "DRM support for ILI9486 display panels" depends on DRM && SPI select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER + select DRM_GEM_CMA_HELPER select DRM_MIPI_DBI select BACKLIGHT_CLASS_DEVICE help @@ -123,7 +136,7 @@ config TINYDRM_MI0283QT tristate "DRM support for MI0283QT" depends on DRM && SPI select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER + select DRM_GEM_CMA_HELPER select DRM_MIPI_DBI select BACKLIGHT_CLASS_DEVICE help @@ -134,7 +147,7 @@ config TINYDRM_REPAPER tristate "DRM support for Pervasive Displays RePaper panels (V231)" depends on DRM && SPI select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER + select DRM_GEM_CMA_HELPER help DRM driver for the following Pervasive Displays panels: 1.44" TFT EPD Panel (E1144CS021) @@ -148,7 +161,7 @@ config TINYDRM_ST7586 tristate "DRM support for Sitronix ST7586 display panels" depends on DRM && SPI select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER + select DRM_GEM_CMA_HELPER select DRM_MIPI_DBI help DRM driver for the following Sitronix ST7586 panels: @@ -160,7 +173,7 @@ config TINYDRM_ST7735R tristate "DRM support for Sitronix ST7715R/ST7735R display panels" depends on DRM && SPI select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER + select DRM_GEM_CMA_HELPER select DRM_MIPI_DBI select BACKLIGHT_CLASS_DEVICE help diff --git a/drivers/gpu/drm/tiny/Makefile b/drivers/gpu/drm/tiny/Makefile index e09942895c77..5d5505d40e7b 100644 --- a/drivers/gpu/drm/tiny/Makefile +++ b/drivers/gpu/drm/tiny/Makefile @@ -6,6 +6,7 @@ obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus.o obj-$(CONFIG_DRM_GM12U320) += gm12u320.o obj-$(CONFIG_DRM_SIMPLEDRM) += simpledrm.o obj-$(CONFIG_TINYDRM_HX8357D) += hx8357d.o +obj-$(CONFIG_TINYDRM_ILI9163) += ili9163.o obj-$(CONFIG_TINYDRM_ILI9225) += ili9225.o obj-$(CONFIG_TINYDRM_ILI9341) += ili9341.o obj-$(CONFIG_TINYDRM_ILI9486) += ili9486.o diff --git a/drivers/gpu/drm/tiny/bochs.c b/drivers/gpu/drm/tiny/bochs.c index 2ce3bd903b70..fc26a1ce11ee 100644 --- a/drivers/gpu/drm/tiny/bochs.c +++ b/drivers/gpu/drm/tiny/bochs.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -#include <linux/console.h> #include <linux/pci.h> #include <drm/drm_aperture.h> @@ -719,7 +718,7 @@ static struct pci_driver bochs_pci_driver = { static int __init bochs_init(void) { - if (vgacon_text_force() && bochs_modeset == -1) + if (drm_firmware_drivers_only() && bochs_modeset == -1) return -EINVAL; if (bochs_modeset == 0) diff --git a/drivers/gpu/drm/tiny/cirrus.c b/drivers/gpu/drm/tiny/cirrus.c index 4611ec408506..c95d9ff7d600 100644 --- a/drivers/gpu/drm/tiny/cirrus.c +++ b/drivers/gpu/drm/tiny/cirrus.c @@ -16,7 +16,6 @@ * Copyright 1999-2001 Jeff Garzik <jgarzik@pobox.com> */ -#include <linux/console.h> #include <linux/dma-buf-map.h> #include <linux/module.h> #include <linux/pci.h> @@ -317,28 +316,28 @@ static int cirrus_fb_blit_rect(struct drm_framebuffer *fb, const struct dma_buf_ struct drm_rect *rect) { struct cirrus_device *cirrus = to_cirrus(fb->dev); + void __iomem *dst = cirrus->vram; void *vmap = map->vaddr; /* TODO: Use mapping abstraction properly */ int idx; if (!drm_dev_enter(&cirrus->dev, &idx)) return -ENODEV; - if (cirrus->cpp == fb->format->cpp[0]) - drm_fb_memcpy_dstclip(cirrus->vram, fb->pitches[0], - vmap, fb, rect); + if (cirrus->cpp == fb->format->cpp[0]) { + dst += drm_fb_clip_offset(fb->pitches[0], fb->format, rect); + drm_fb_memcpy_toio(dst, fb->pitches[0], vmap, fb, rect); - else if (fb->format->cpp[0] == 4 && cirrus->cpp == 2) - drm_fb_xrgb8888_to_rgb565_dstclip(cirrus->vram, - cirrus->pitch, - vmap, fb, rect, false); + } else if (fb->format->cpp[0] == 4 && cirrus->cpp == 2) { + dst += drm_fb_clip_offset(cirrus->pitch, fb->format, rect); + drm_fb_xrgb8888_to_rgb565_toio(dst, cirrus->pitch, vmap, fb, rect, false); - else if (fb->format->cpp[0] == 4 && cirrus->cpp == 3) - drm_fb_xrgb8888_to_rgb888_dstclip(cirrus->vram, - cirrus->pitch, - vmap, fb, rect); + } else if (fb->format->cpp[0] == 4 && cirrus->cpp == 3) { + dst += drm_fb_clip_offset(cirrus->pitch, fb->format, rect); + drm_fb_xrgb8888_to_rgb888_toio(dst, cirrus->pitch, vmap, fb, rect); - else + } else { WARN_ON_ONCE("cpp mismatch"); + } drm_dev_exit(idx); @@ -636,8 +635,9 @@ static struct pci_driver cirrus_pci_driver = { static int __init cirrus_init(void) { - if (vgacon_text_force()) + if (drm_firmware_drivers_only()) return -EINVAL; + return pci_register_driver(&cirrus_pci_driver); } diff --git a/drivers/gpu/drm/tiny/ili9163.c b/drivers/gpu/drm/tiny/ili9163.c new file mode 100644 index 000000000000..bcc181351236 --- /dev/null +++ b/drivers/gpu/drm/tiny/ili9163.c @@ -0,0 +1,225 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <linux/backlight.h> +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/module.h> +#include <linux/property.h> +#include <linux/spi/spi.h> + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_drv.h> +#include <drm/drm_fb_helper.h> +#include <drm/drm_gem_atomic_helper.h> +#include <drm/drm_gem_cma_helper.h> +#include <drm/drm_mipi_dbi.h> +#include <drm/drm_modeset_helper.h> + +#include <video/mipi_display.h> + +#define ILI9163_FRMCTR1 0xb1 + +#define ILI9163_PWCTRL1 0xc0 +#define ILI9163_PWCTRL2 0xc1 +#define ILI9163_VMCTRL1 0xc5 +#define ILI9163_VMCTRL2 0xc7 +#define ILI9163_PWCTRLA 0xcb +#define ILI9163_PWCTRLB 0xcf + +#define ILI9163_EN3GAM 0xf2 + +#define ILI9163_MADCTL_BGR BIT(3) +#define ILI9163_MADCTL_MV BIT(5) +#define ILI9163_MADCTL_MX BIT(6) +#define ILI9163_MADCTL_MY BIT(7) + +static void yx240qv29_enable(struct drm_simple_display_pipe *pipe, + struct drm_crtc_state *crtc_state, + struct drm_plane_state *plane_state) +{ + struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev); + struct mipi_dbi *dbi = &dbidev->dbi; + u8 addr_mode; + int ret, idx; + + if (!drm_dev_enter(pipe->crtc.dev, &idx)) + return; + + DRM_DEBUG_KMS("\n"); + + ret = mipi_dbi_poweron_conditional_reset(dbidev); + if (ret < 0) + goto out_exit; + if (ret == 1) + goto out_enable; + + /* Gamma */ + mipi_dbi_command(dbi, MIPI_DCS_SET_GAMMA_CURVE, 0x04); + mipi_dbi_command(dbi, ILI9163_EN3GAM, 0x00); + + /* Frame Rate */ + mipi_dbi_command(dbi, ILI9163_FRMCTR1, 0x0a, 0x14); + + /* Power Control */ + mipi_dbi_command(dbi, ILI9163_PWCTRL1, 0x0a, 0x00); + mipi_dbi_command(dbi, ILI9163_PWCTRL2, 0x02); + + /* VCOM */ + mipi_dbi_command(dbi, ILI9163_VMCTRL1, 0x2f, 0x3e); + mipi_dbi_command(dbi, ILI9163_VMCTRL2, 0x40); + + /* Memory Access Control */ + mipi_dbi_command(dbi, MIPI_DCS_SET_PIXEL_FORMAT, MIPI_DCS_PIXEL_FMT_16BIT); + + mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE); + msleep(100); + + mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON); + msleep(100); + +out_enable: + switch (dbidev->rotation) { + default: + addr_mode = ILI9163_MADCTL_MX | ILI9163_MADCTL_MY; + break; + case 90: + addr_mode = ILI9163_MADCTL_MX | ILI9163_MADCTL_MV; + break; + case 180: + addr_mode = 0; + break; + case 270: + addr_mode = ILI9163_MADCTL_MY | ILI9163_MADCTL_MV; + break; + } + addr_mode |= ILI9163_MADCTL_BGR; + mipi_dbi_command(dbi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode); + mipi_dbi_enable_flush(dbidev, crtc_state, plane_state); +out_exit: + drm_dev_exit(idx); +} + +static const struct drm_simple_display_pipe_funcs ili9163_pipe_funcs = { + .enable = yx240qv29_enable, + .disable = mipi_dbi_pipe_disable, + .update = mipi_dbi_pipe_update, + .prepare_fb = drm_gem_simple_display_pipe_prepare_fb, +}; + +static const struct drm_display_mode yx240qv29_mode = { + DRM_SIMPLE_MODE(128, 160, 28, 35), +}; + +DEFINE_DRM_GEM_CMA_FOPS(ili9163_fops); + +static struct drm_driver ili9163_driver = { + .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, + .fops = &ili9163_fops, + DRM_GEM_CMA_DRIVER_OPS_VMAP, + .debugfs_init = mipi_dbi_debugfs_init, + .name = "ili9163", + .desc = "Ilitek ILI9163", + .date = "20210208", + .major = 1, + .minor = 0, +}; + +static const struct of_device_id ili9163_of_match[] = { + { .compatible = "newhaven,1.8-128160EF" }, + { } +}; +MODULE_DEVICE_TABLE(of, ili9163_of_match); + +static const struct spi_device_id ili9163_id[] = { + { "nhd-1.8-128160EF", 0 }, + { } +}; +MODULE_DEVICE_TABLE(spi, ili9163_id); + +static int ili9163_probe(struct spi_device *spi) +{ + struct device *dev = &spi->dev; + struct mipi_dbi_dev *dbidev; + struct drm_device *drm; + struct mipi_dbi *dbi; + struct gpio_desc *dc; + u32 rotation = 0; + int ret; + + dbidev = devm_drm_dev_alloc(dev, &ili9163_driver, + struct mipi_dbi_dev, drm); + if (IS_ERR(dbidev)) + return PTR_ERR(dbidev); + + dbi = &dbidev->dbi; + drm = &dbidev->drm; + + spi_set_drvdata(spi, drm); + + dbi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(dbi->reset)) { + DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n"); + return PTR_ERR(dbi->reset); + } + + dc = devm_gpiod_get_optional(dev, "dc", GPIOD_OUT_LOW); + if (IS_ERR(dc)) { + DRM_DEV_ERROR(dev, "Failed to get gpio 'dc'\n"); + return PTR_ERR(dc); + } + + dbidev->backlight = devm_of_find_backlight(dev); + if (IS_ERR(dbidev->backlight)) + return PTR_ERR(dbidev->backlight); + + device_property_read_u32(dev, "rotation", &rotation); + + ret = mipi_dbi_spi_init(spi, dbi, dc); + if (ret) + return ret; + + ret = mipi_dbi_dev_init(dbidev, &ili9163_pipe_funcs, &yx240qv29_mode, rotation); + if (ret) + return ret; + + drm_mode_config_reset(drm); + + ret = drm_dev_register(drm, 0); + if (ret) + return ret; + + drm_fbdev_generic_setup(drm, 0); + + return 0; +} + +static int ili9163_remove(struct spi_device *spi) +{ + struct drm_device *drm = spi_get_drvdata(spi); + + drm_dev_unplug(drm); + drm_atomic_helper_shutdown(drm); + + return 0; +} + +static void ili9163_shutdown(struct spi_device *spi) +{ + drm_atomic_helper_shutdown(spi_get_drvdata(spi)); +} + +static struct spi_driver ili9163_spi_driver = { + .driver = { + .name = "ili9163", + .of_match_table = ili9163_of_match, + }, + .id_table = ili9163_id, + .probe = ili9163_probe, + .remove = ili9163_remove, + .shutdown = ili9163_shutdown, +}; +module_spi_driver(ili9163_spi_driver); + +MODULE_DESCRIPTION("Ilitek ILI9163 DRM driver"); +MODULE_AUTHOR("Daniel Mack <daniel@zonque.org>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/tiny/repaper.c b/drivers/gpu/drm/tiny/repaper.c index 4d07b21a16e6..97a775c48cea 100644 --- a/drivers/gpu/drm/tiny/repaper.c +++ b/drivers/gpu/drm/tiny/repaper.c @@ -560,7 +560,7 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb) if (ret) goto out_free; - drm_fb_xrgb8888_to_gray8(buf, cma_obj->vaddr, fb, &clip); + drm_fb_xrgb8888_to_gray8(buf, 0, cma_obj->vaddr, fb, &clip); drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE); diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c index 481b48bde047..b977f5c94562 100644 --- a/drivers/gpu/drm/tiny/simpledrm.c +++ b/drivers/gpu/drm/tiny/simpledrm.c @@ -2,6 +2,7 @@ #include <linux/clk.h> #include <linux/of_clk.h> +#include <linux/minmax.h> #include <linux/platform_data/simplefb.h> #include <linux/platform_device.h> #include <linux/regulator/consumer.h> @@ -570,8 +571,8 @@ static const uint32_t simpledrm_default_formats[] = { //DRM_FORMAT_XRGB1555, //DRM_FORMAT_ARGB1555, DRM_FORMAT_RGB888, - //DRM_FORMAT_XRGB2101010, - //DRM_FORMAT_ARGB2101010, + DRM_FORMAT_XRGB2101010, + DRM_FORMAT_ARGB2101010, }; static const uint64_t simpledrm_format_modifiers[] = { @@ -641,16 +642,25 @@ simpledrm_simple_display_pipe_enable(struct drm_simple_display_pipe *pipe, struct drm_framebuffer *fb = plane_state->fb; void *vmap = shadow_plane_state->data[0].vaddr; /* TODO: Use mapping abstraction */ struct drm_device *dev = &sdev->dev; + void __iomem *dst = sdev->screen_base; + struct drm_rect src_clip, dst_clip; int idx; if (!fb) return; + drm_rect_fp_to_int(&src_clip, &plane_state->src); + + dst_clip = plane_state->dst; + if (!drm_rect_intersect(&dst_clip, &src_clip)) + return; + if (!drm_dev_enter(dev, &idx)) return; - drm_fb_blit_dstclip(sdev->screen_base, sdev->pitch, - sdev->format->format, vmap, fb); + dst += drm_fb_clip_offset(sdev->pitch, sdev->format, &dst_clip); + drm_fb_blit_toio(dst, sdev->pitch, sdev->format->format, vmap, fb, &src_clip); + drm_dev_exit(idx); } @@ -680,20 +690,25 @@ simpledrm_simple_display_pipe_update(struct drm_simple_display_pipe *pipe, void *vmap = shadow_plane_state->data[0].vaddr; /* TODO: Use mapping abstraction */ struct drm_framebuffer *fb = plane_state->fb; struct drm_device *dev = &sdev->dev; - struct drm_rect clip; + void __iomem *dst = sdev->screen_base; + struct drm_rect src_clip, dst_clip; int idx; if (!fb) return; - if (!drm_atomic_helper_damage_merged(old_plane_state, plane_state, &clip)) + if (!drm_atomic_helper_damage_merged(old_plane_state, plane_state, &src_clip)) + return; + + dst_clip = plane_state->dst; + if (!drm_rect_intersect(&dst_clip, &src_clip)) return; if (!drm_dev_enter(dev, &idx)) return; - drm_fb_blit_rect_dstclip(sdev->screen_base, sdev->pitch, - sdev->format->format, vmap, fb, &clip); + dst += drm_fb_clip_offset(sdev->pitch, sdev->format, &dst_clip); + drm_fb_blit_toio(dst, sdev->pitch, sdev->format->format, vmap, fb, &src_clip); drm_dev_exit(idx); } @@ -758,6 +773,7 @@ static int simpledrm_device_init_modeset(struct simpledrm_device *sdev) struct drm_display_mode *mode = &sdev->mode; struct drm_connector *connector = &sdev->connector; struct drm_simple_display_pipe *pipe = &sdev->pipe; + unsigned long max_width, max_height; const uint32_t *formats; size_t nformats; int ret; @@ -766,10 +782,13 @@ static int simpledrm_device_init_modeset(struct simpledrm_device *sdev) if (ret) return ret; + max_width = max_t(unsigned long, mode->hdisplay, DRM_SHADOW_PLANE_MAX_WIDTH); + max_height = max_t(unsigned long, mode->vdisplay, DRM_SHADOW_PLANE_MAX_HEIGHT); + dev->mode_config.min_width = mode->hdisplay; - dev->mode_config.max_width = mode->hdisplay; + dev->mode_config.max_width = max_width; dev->mode_config.min_height = mode->vdisplay; - dev->mode_config.max_height = mode->vdisplay; + dev->mode_config.max_height = max_height; dev->mode_config.prefer_shadow_fbdev = true; dev->mode_config.preferred_depth = sdev->format->cpp[0] * 8; dev->mode_config.funcs = &simpledrm_mode_config_funcs; @@ -788,6 +807,8 @@ static int simpledrm_device_init_modeset(struct simpledrm_device *sdev) if (ret) return ret; + drm_plane_enable_fb_damage_clips(&pipe->plane); + drm_mode_config_reset(dev); return 0; diff --git a/drivers/gpu/drm/tiny/st7586.c b/drivers/gpu/drm/tiny/st7586.c index ad0faa8723c2..51b9b9fb3ead 100644 --- a/drivers/gpu/drm/tiny/st7586.c +++ b/drivers/gpu/drm/tiny/st7586.c @@ -73,7 +73,7 @@ static void st7586_xrgb8888_to_gray332(u8 *dst, void *vaddr, if (!buf) return; - drm_fb_xrgb8888_to_gray8(buf, vaddr, fb, clip); + drm_fb_xrgb8888_to_gray8(buf, 0, vaddr, fb, clip); src = buf; for (y = clip->y1; y < clip->y2; y++) { diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 739f11c0109c..db3dc7ef5382 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -727,6 +727,8 @@ int ttm_mem_evict_first(struct ttm_device *bdev, ret = ttm_bo_evict(bo, ctx); if (locked) ttm_bo_unreserve(bo); + else + ttm_bo_move_to_lru_tail_unlocked(bo); ttm_bo_put(bo); return ret; @@ -1084,7 +1086,6 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, if (timeout == 0) return -EBUSY; - dma_resv_add_excl_fence(bo->base.resv, NULL); return 0; } EXPORT_SYMBOL(ttm_bo_wait); @@ -1103,7 +1104,7 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx, * as an indication that we're about to swap out. */ memset(&place, 0, sizeof(place)); - place.mem_type = TTM_PL_SYSTEM; + place.mem_type = bo->resource->mem_type; if (!ttm_bo_evict_swapout_allowable(bo, ctx, &place, &locked, NULL)) return -EBUSY; @@ -1135,6 +1136,7 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx, struct ttm_place hop; memset(&hop, 0, sizeof(hop)); + place.mem_type = TTM_PL_SYSTEM; ret = ttm_resource_alloc(bo, &place, &evict_mem); if (unlikely(ret)) goto out; diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c index 67d68a4a8640..072e0baf2ab4 100644 --- a/drivers/gpu/drm/ttm/ttm_range_manager.c +++ b/drivers/gpu/drm/ttm/ttm_range_manager.c @@ -128,15 +128,17 @@ static const struct ttm_resource_manager_func ttm_range_manager_func = { }; /** - * ttm_range_man_init + * ttm_range_man_init_nocheck - Initialise a generic range manager for the + * selected memory type. * * @bdev: ttm device * @type: memory manager type * @use_tt: if the memory manager uses tt * @p_size: size of area to be managed in pages. * - * Initialise a generic range manager for the selected memory type. * The range manager is installed for this device in the type slot. + * + * Return: %0 on success or a negative error code on failure */ int ttm_range_man_init_nocheck(struct ttm_device *bdev, unsigned type, bool use_tt, @@ -166,12 +168,13 @@ int ttm_range_man_init_nocheck(struct ttm_device *bdev, EXPORT_SYMBOL(ttm_range_man_init_nocheck); /** - * ttm_range_man_fini + * ttm_range_man_fini_nocheck - Remove the generic range manager from a slot + * and tear it down. * * @bdev: ttm device * @type: memory manager type * - * Remove the generic range manager from a slot and tear it down. + * Return: %0 on success or a negative error code on failure */ int ttm_range_man_fini_nocheck(struct ttm_device *bdev, unsigned type) diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 7e83c00a3f48..79c870a3bef8 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -34,6 +34,7 @@ #include <linux/sched.h> #include <linux/shmem_fs.h> #include <linux/file.h> +#include <linux/module.h> #include <drm/drm_cache.h> #include <drm/ttm/ttm_bo_driver.h> diff --git a/drivers/gpu/drm/tve200/Kconfig b/drivers/gpu/drm/tve200/Kconfig index e2d163c74ed6..47a7dbe6c114 100644 --- a/drivers/gpu/drm/tve200/Kconfig +++ b/drivers/gpu/drm/tve200/Kconfig @@ -8,7 +8,6 @@ config DRM_TVE200 select DRM_BRIDGE select DRM_PANEL_BRIDGE select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER select DRM_GEM_CMA_HELPER select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE help diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c index 6a8731ab9d7d..6e3113f419f4 100644 --- a/drivers/gpu/drm/v3d/v3d_bo.c +++ b/drivers/gpu/drm/v3d/v3d_bo.c @@ -47,18 +47,18 @@ void v3d_free_object(struct drm_gem_object *obj) /* GPU execution may have dirtied any pages in the BO. */ bo->base.pages_mark_dirty_on_put = true; - drm_gem_shmem_free_object(obj); + drm_gem_shmem_free(&bo->base); } static const struct drm_gem_object_funcs v3d_gem_funcs = { .free = v3d_free_object, - .print_info = drm_gem_shmem_print_info, - .pin = drm_gem_shmem_pin, - .unpin = drm_gem_shmem_unpin, - .get_sg_table = drm_gem_shmem_get_sg_table, - .vmap = drm_gem_shmem_vmap, - .vunmap = drm_gem_shmem_vunmap, - .mmap = drm_gem_shmem_mmap, + .print_info = drm_gem_shmem_object_print_info, + .pin = drm_gem_shmem_object_pin, + .unpin = drm_gem_shmem_object_unpin, + .get_sg_table = drm_gem_shmem_object_get_sg_table, + .vmap = drm_gem_shmem_object_vmap, + .vunmap = drm_gem_shmem_object_vunmap, + .mmap = drm_gem_shmem_object_mmap, }; /* gem_create_object function for allocating a BO struct and doing @@ -70,11 +70,11 @@ struct drm_gem_object *v3d_create_object(struct drm_device *dev, size_t size) struct drm_gem_object *obj; if (size == 0) - return NULL; + return ERR_PTR(-EINVAL); bo = kzalloc(sizeof(*bo), GFP_KERNEL); if (!bo) - return NULL; + return ERR_PTR(-ENOMEM); obj = &bo->base.base; obj->funcs = &v3d_gem_funcs; @@ -95,7 +95,7 @@ v3d_bo_create_finish(struct drm_gem_object *obj) /* So far we pin the BO in the MMU for its lifetime, so use * shmem's helper for getting a lifetime sgt. */ - sgt = drm_gem_shmem_get_pages_sgt(&bo->base.base); + sgt = drm_gem_shmem_get_pages_sgt(&bo->base); if (IS_ERR(sgt)) return PTR_ERR(sgt); @@ -141,7 +141,7 @@ struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv, return bo; free_obj: - drm_gem_shmem_free_object(&shmem_obj->base); + drm_gem_shmem_free(shmem_obj); return ERR_PTR(ret); } @@ -159,7 +159,7 @@ v3d_prime_import_sg_table(struct drm_device *dev, ret = v3d_bo_create_finish(obj); if (ret) { - drm_gem_shmem_free_object(obj); + drm_gem_shmem_free(&to_v3d_bo(obj)->base); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index e47ae40a865a..c7ed2e1cbab6 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -774,7 +774,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, if (args->flags & DRM_V3D_SUBMIT_CL_FLUSH_CACHE) { ret = v3d_job_init(v3d, file_priv, (void *)&clean_job, sizeof(*clean_job), - v3d_job_free, 0, 0, V3D_CACHE_CLEAN); + v3d_job_free, 0, NULL, V3D_CACHE_CLEAN); if (ret) goto fail; @@ -1007,7 +1007,7 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data, goto fail; ret = v3d_job_init(v3d, file_priv, (void *)&clean_job, sizeof(*clean_job), - v3d_job_free, 0, 0, V3D_CACHE_CLEAN); + v3d_job_free, 0, NULL, V3D_CACHE_CLEAN); if (ret) goto fail; diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c index a6c81af37345..f35d9e44c6b7 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_drv.c +++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c @@ -7,7 +7,6 @@ * Michael Thayer <michael.thayer@oracle.com, * Hans de Goede <hdegoede@redhat.com> */ -#include <linux/console.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/vt_kern.h> @@ -193,10 +192,8 @@ static const struct drm_driver driver = { static int __init vbox_init(void) { -#ifdef CONFIG_VGA_CONSOLE - if (vgacon_text_force() && vbox_modeset == -1) + if (drm_firmware_drivers_only() && vbox_modeset == -1) return -EINVAL; -#endif if (vbox_modeset == 0) return -EINVAL; diff --git a/drivers/gpu/drm/vboxvideo/vbox_main.c b/drivers/gpu/drm/vboxvideo/vbox_main.c index f28779715ccd..c9e8b3a63c62 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_main.c +++ b/drivers/gpu/drm/vboxvideo/vbox_main.c @@ -127,8 +127,8 @@ int vbox_hw_init(struct vbox_private *vbox) /* Create guest-heap mem-pool use 2^4 = 16 byte chunks */ vbox->guest_pool = devm_gen_pool_create(vbox->ddev.dev, 4, -1, "vboxvideo-accel"); - if (!vbox->guest_pool) - return -ENOMEM; + if (IS_ERR(vbox->guest_pool)) + return PTR_ERR(vbox->guest_pool); ret = gen_pool_add_virt(vbox->guest_pool, (unsigned long)vbox->guest_heap, diff --git a/drivers/gpu/drm/vc4/Kconfig b/drivers/gpu/drm/vc4/Kconfig index 345a5570a3da..de3424fed2fc 100644 --- a/drivers/gpu/drm/vc4/Kconfig +++ b/drivers/gpu/drm/vc4/Kconfig @@ -6,7 +6,6 @@ config DRM_VC4 depends on SND && SND_SOC depends on COMMON_CLK select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER select DRM_GEM_CMA_HELPER select DRM_PANEL_BRIDGE select SND_PCM diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c index fddaeb0b09c1..6d1281a343e9 100644 --- a/drivers/gpu/drm/vc4/vc4_bo.c +++ b/drivers/gpu/drm/vc4/vc4_bo.c @@ -177,7 +177,7 @@ static void vc4_bo_destroy(struct vc4_bo *bo) bo->validated_shader = NULL; } - drm_gem_cma_free_object(obj); + drm_gem_cma_free(&bo->base); } static void vc4_bo_remove_from_cache(struct vc4_bo *bo) @@ -720,7 +720,7 @@ static int vc4_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct return -EINVAL; } - return drm_gem_cma_mmap(obj, vma); + return drm_gem_cma_mmap(&bo->base, vma); } static const struct vm_operations_struct vc4_vm_ops = { @@ -732,8 +732,8 @@ static const struct vm_operations_struct vc4_vm_ops = { static const struct drm_gem_object_funcs vc4_gem_object_funcs = { .free = vc4_free_object, .export = vc4_prime_export, - .get_sg_table = drm_gem_cma_get_sg_table, - .vmap = drm_gem_cma_vmap, + .get_sg_table = drm_gem_cma_object_get_sg_table, + .vmap = drm_gem_cma_object_vmap, .mmap = vc4_gem_object_mmap, .vm_ops = &vc4_vm_ops, }; diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index 18f5009ce90e..287dbc89ad64 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -32,6 +32,7 @@ #include <linux/clk.h> #include <linux/component.h> #include <linux/of_device.h> +#include <linux/pm_runtime.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> @@ -42,6 +43,7 @@ #include <drm/drm_vblank.h> #include "vc4_drv.h" +#include "vc4_hdmi.h" #include "vc4_regs.h" #define HVS_FIFO_LATENCY_PIX 6 @@ -279,27 +281,15 @@ static u32 vc4_crtc_get_fifo_full_level_bits(struct vc4_crtc *vc4_crtc, * allows drivers to push pixels to more than one encoder from the * same CRTC. */ -static struct drm_encoder *vc4_get_crtc_encoder(struct drm_crtc *crtc, - struct drm_atomic_state *state, - struct drm_connector_state *(*get_state)(struct drm_atomic_state *state, - struct drm_connector *connector)) +struct drm_encoder *vc4_get_crtc_encoder(struct drm_crtc *crtc, + struct drm_crtc_state *state) { - struct drm_connector *connector; - struct drm_connector_list_iter conn_iter; + struct drm_encoder *encoder; - drm_connector_list_iter_begin(crtc->dev, &conn_iter); - drm_for_each_connector_iter(connector, &conn_iter) { - struct drm_connector_state *conn_state = get_state(state, connector); + WARN_ON(hweight32(state->encoder_mask) > 1); - if (!conn_state) - continue; - - if (conn_state->crtc == crtc) { - drm_connector_list_iter_end(&conn_iter); - return connector->encoder; - } - } - drm_connector_list_iter_end(&conn_iter); + drm_for_each_encoder_mask(encoder, crtc->dev, state->encoder_mask) + return encoder; return NULL; } @@ -313,12 +303,11 @@ static void vc4_crtc_pixelvalve_reset(struct drm_crtc *crtc) CRTC_WRITE(PV_CONTROL, CRTC_READ(PV_CONTROL) | PV_CONTROL_FIFO_CLR); } -static void vc4_crtc_config_pv(struct drm_crtc *crtc, struct drm_atomic_state *state) +static void vc4_crtc_config_pv(struct drm_crtc *crtc, struct drm_encoder *encoder, + struct drm_atomic_state *state) { struct drm_device *dev = crtc->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); - struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc, state, - drm_atomic_get_new_connector_state); struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder); struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); const struct vc4_pv_data *pv_data = vc4_crtc_to_vc4_pv_data(vc4_crtc); @@ -496,8 +485,10 @@ int vc4_crtc_disable_at_boot(struct drm_crtc *crtc) enum vc4_encoder_type encoder_type; const struct vc4_pv_data *pv_data; struct drm_encoder *encoder; + struct vc4_hdmi *vc4_hdmi; unsigned encoder_sel; int channel; + int ret; if (!(of_device_is_compatible(vc4_crtc->pdev->dev.of_node, "brcm,bcm2711-pixelvalve2") || @@ -525,7 +516,20 @@ int vc4_crtc_disable_at_boot(struct drm_crtc *crtc) if (WARN_ON(!encoder)) return 0; - return vc4_crtc_disable(crtc, encoder, NULL, channel); + vc4_hdmi = encoder_to_vc4_hdmi(encoder); + ret = pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev); + if (ret) + return ret; + + ret = vc4_crtc_disable(crtc, encoder, NULL, channel); + if (ret) + return ret; + + ret = pm_runtime_put(&vc4_hdmi->pdev->dev); + if (ret) + return ret; + + return 0; } static void vc4_crtc_atomic_disable(struct drm_crtc *crtc, @@ -534,10 +538,12 @@ static void vc4_crtc_atomic_disable(struct drm_crtc *crtc, struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state, crtc); struct vc4_crtc_state *old_vc4_state = to_vc4_crtc_state(old_state); - struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc, state, - drm_atomic_get_old_connector_state); + struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc, old_state); struct drm_device *dev = crtc->dev; + drm_dbg(dev, "Disabling CRTC %s (%u) connected to Encoder %s (%u)", + crtc->name, crtc->base.id, encoder->name, encoder->base.id); + require_hvs_enabled(dev); /* Disable vblank irq handling before crtc is disabled. */ @@ -562,12 +568,16 @@ static void vc4_crtc_atomic_disable(struct drm_crtc *crtc, static void vc4_crtc_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state) { + struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state, + crtc); struct drm_device *dev = crtc->dev; struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); - struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc, state, - drm_atomic_get_new_connector_state); + struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc, new_state); struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder); + drm_dbg(dev, "Enabling CRTC %s (%u) connected to Encoder %s (%u)", + crtc->name, crtc->base.id, encoder->name, encoder->base.id); + require_hvs_enabled(dev); /* Enable vblank irq handling before crtc is started otherwise @@ -580,7 +590,7 @@ static void vc4_crtc_atomic_enable(struct drm_crtc *crtc, if (vc4_encoder->pre_crtc_configure) vc4_encoder->pre_crtc_configure(encoder, state); - vc4_crtc_config_pv(crtc, state); + vc4_crtc_config_pv(crtc, encoder, state); CRTC_WRITE(PV_CONTROL, CRTC_READ(PV_CONTROL) | PV_CONTROL_EN); @@ -649,12 +659,27 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc, struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state); struct drm_connector *conn; struct drm_connector_state *conn_state; + struct drm_encoder *encoder; int ret, i; ret = vc4_hvs_atomic_check(crtc, state); if (ret) return ret; + encoder = vc4_get_crtc_encoder(crtc, crtc_state); + if (encoder) { + const struct drm_display_mode *mode = &crtc_state->adjusted_mode; + struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder); + + mode = &crtc_state->adjusted_mode; + if (vc4_encoder->type == VC4_ENCODER_TYPE_HDMI0) { + vc4_state->hvs_load = max(mode->clock * mode->hdisplay / mode->htotal + 1000, + mode->clock * 9 / 10) * 1000; + } else { + vc4_state->hvs_load = mode->clock * 1000; + } + } + for_each_new_connector_in_state(state, conn, conn_state, i) { if (conn_state->crtc != crtc) @@ -691,14 +716,14 @@ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc) struct drm_crtc *crtc = &vc4_crtc->base; struct drm_device *dev = crtc->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); - struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); - u32 chan = vc4_state->assigned_channel; + u32 chan = vc4_crtc->current_hvs_channel; unsigned long flags; spin_lock_irqsave(&dev->event_lock, flags); + spin_lock(&vc4_crtc->irq_lock); if (vc4_crtc->event && - (vc4_state->mm.start == HVS_READ(SCALER_DISPLACTX(chan)) || - vc4_state->feed_txp)) { + (vc4_crtc->current_dlist == HVS_READ(SCALER_DISPLACTX(chan)) || + vc4_crtc->feeds_txp)) { drm_crtc_send_vblank_event(crtc, vc4_crtc->event); vc4_crtc->event = NULL; drm_crtc_vblank_put(crtc); @@ -711,6 +736,7 @@ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc) */ vc4_hvs_unmask_underrun(dev, chan); } + spin_unlock(&vc4_crtc->irq_lock); spin_unlock_irqrestore(&dev->event_lock, flags); } @@ -876,7 +902,6 @@ struct drm_crtc_state *vc4_crtc_duplicate_state(struct drm_crtc *crtc) return NULL; old_vc4_state = to_vc4_crtc_state(crtc->state); - vc4_state->feed_txp = old_vc4_state->feed_txp; vc4_state->margins = old_vc4_state->margins; vc4_state->assigned_channel = old_vc4_state->assigned_channel; @@ -937,6 +962,7 @@ static const struct drm_crtc_funcs vc4_crtc_funcs = { static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = { .mode_valid = vc4_crtc_mode_valid, .atomic_check = vc4_crtc_atomic_check, + .atomic_begin = vc4_hvs_atomic_begin, .atomic_flush = vc4_hvs_atomic_flush, .atomic_enable = vc4_crtc_atomic_enable, .atomic_disable = vc4_crtc_atomic_disable, @@ -1111,6 +1137,7 @@ int vc4_crtc_init(struct drm_device *drm, struct vc4_crtc *vc4_crtc, return PTR_ERR(primary_plane); } + spin_lock_init(&vc4_crtc->irq_lock); drm_crtc_init_with_planes(drm, crtc, primary_plane, NULL, crtc_funcs, NULL); drm_crtc_helper_add(crtc, crtc_helper_funcs); diff --git a/drivers/gpu/drm/vc4/vc4_debugfs.c b/drivers/gpu/drm/vc4/vc4_debugfs.c index 6da22af4ee91..ba2d8ea562af 100644 --- a/drivers/gpu/drm/vc4/vc4_debugfs.c +++ b/drivers/gpu/drm/vc4/vc4_debugfs.c @@ -7,6 +7,7 @@ #include <linux/circ_buf.h> #include <linux/ctype.h> #include <linux/debugfs.h> +#include <linux/platform_device.h> #include "vc4_drv.h" #include "vc4_regs.h" @@ -26,8 +27,10 @@ vc4_debugfs_init(struct drm_minor *minor) struct vc4_dev *vc4 = to_vc4_dev(minor->dev); struct vc4_debugfs_info_entry *entry; - debugfs_create_bool("hvs_load_tracker", S_IRUGO | S_IWUSR, - minor->debugfs_root, &vc4->load_tracker_enabled); + if (!of_device_is_compatible(vc4->hvs->pdev->dev.of_node, + "brcm,bcm2711-vc5")) + debugfs_create_bool("hvs_load_tracker", S_IRUGO | S_IWUSR, + minor->debugfs_root, &vc4->load_tracker_enabled); list_for_each_entry(entry, &vc4->debugfs_list, link) { drm_debugfs_create_files(&entry->info, 1, diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h index ef73e0aaf726..4329e09d357c 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.h +++ b/drivers/gpu/drm/vc4/vc4_drv.h @@ -202,9 +202,6 @@ struct vc4_dev { int power_refcount; - /* Set to true when the load tracker is supported. */ - bool load_tracker_available; - /* Set to true when the load tracker is active. */ bool load_tracker_enabled; @@ -495,6 +492,33 @@ struct vc4_crtc { struct drm_pending_vblank_event *event; struct debugfs_regset32 regset; + + /** + * @feeds_txp: True if the CRTC feeds our writeback controller. + */ + bool feeds_txp; + + /** + * @irq_lock: Spinlock protecting the resources shared between + * the atomic code and our vblank handler. + */ + spinlock_t irq_lock; + + /** + * @current_dlist: Start offset of the display list currently + * set in the HVS for that CRTC. Protected by @irq_lock, and + * copied in vc4_hvs_update_dlist() for the CRTC interrupt + * handler to have access to that value. + */ + unsigned int current_dlist; + + /** + * @current_hvs_channel: HVS channel currently assigned to the + * CRTC. Protected by @irq_lock, and copied in + * vc4_hvs_atomic_begin() for the CRTC interrupt handler to have + * access to that value. + */ + unsigned int current_hvs_channel; }; static inline struct vc4_crtc * @@ -517,11 +541,13 @@ vc4_crtc_to_vc4_pv_data(const struct vc4_crtc *crtc) return container_of(data, struct vc4_pv_data, base); } +struct drm_encoder *vc4_get_crtc_encoder(struct drm_crtc *crtc, + struct drm_crtc_state *state); + struct vc4_crtc_state { struct drm_crtc_state base; /* Dlist area for this CRTC configuration. */ struct drm_mm_node mm; - bool feed_txp; bool txp_armed; unsigned int assigned_channel; @@ -532,6 +558,8 @@ struct vc4_crtc_state { unsigned int bottom; } margins; + unsigned long hvs_load; + /* Transitional state below, only valid during atomic commits */ bool update_muxing; }; @@ -908,6 +936,7 @@ extern struct platform_driver vc4_hvs_driver; void vc4_hvs_stop_channel(struct drm_device *dev, unsigned int output); int vc4_hvs_get_fifo_from_output(struct drm_device *dev, unsigned int output); int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state); +void vc4_hvs_atomic_begin(struct drm_crtc *crtc, struct drm_atomic_state *state); void vc4_hvs_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state); void vc4_hvs_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state); void vc4_hvs_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state); diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index b284623e2863..053fbaf765ca 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -94,6 +94,7 @@ # define VC4_HD_M_SW_RST BIT(2) # define VC4_HD_M_ENABLE BIT(0) +#define HSM_MIN_CLOCK_FREQ 120000000 #define CEC_CLOCK_FREQ 40000 #define HDMI_14_MAX_TMDS_CLK (340 * 1000 * 1000) @@ -117,6 +118,10 @@ static int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused) static void vc4_hdmi_reset(struct vc4_hdmi *vc4_hdmi) { + unsigned long flags; + + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); + HDMI_WRITE(HDMI_M_CTL, VC4_HD_M_SW_RST); udelay(1); HDMI_WRITE(HDMI_M_CTL, 0); @@ -128,24 +133,36 @@ static void vc4_hdmi_reset(struct vc4_hdmi *vc4_hdmi) VC4_HDMI_SW_RESET_FORMAT_DETECT); HDMI_WRITE(HDMI_SW_RESET_CONTROL, 0); + + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); } static void vc5_hdmi_reset(struct vc4_hdmi *vc4_hdmi) { + unsigned long flags; + reset_control_reset(vc4_hdmi->reset); + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); + HDMI_WRITE(HDMI_DVP_CTL, 0); HDMI_WRITE(HDMI_CLOCK_STOP, HDMI_READ(HDMI_CLOCK_STOP) | VC4_DVP_HT_CLOCK_STOP_PIXEL); + + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); } #ifdef CONFIG_DRM_VC4_HDMI_CEC static void vc4_hdmi_cec_update_clk_div(struct vc4_hdmi *vc4_hdmi) { + unsigned long cec_rate = clk_get_rate(vc4_hdmi->cec_clock); + unsigned long flags; u16 clk_cnt; u32 value; + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); + value = HDMI_READ(HDMI_CEC_CNTRL_1); value &= ~VC4_HDMI_CEC_DIV_CLK_CNT_MASK; @@ -153,27 +170,41 @@ static void vc4_hdmi_cec_update_clk_div(struct vc4_hdmi *vc4_hdmi) * Set the clock divider: the hsm_clock rate and this divider * setting will give a 40 kHz CEC clock. */ - clk_cnt = clk_get_rate(vc4_hdmi->cec_clock) / CEC_CLOCK_FREQ; + clk_cnt = cec_rate / CEC_CLOCK_FREQ; value |= clk_cnt << VC4_HDMI_CEC_DIV_CLK_CNT_SHIFT; HDMI_WRITE(HDMI_CEC_CNTRL_1, value); + + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); } #else static void vc4_hdmi_cec_update_clk_div(struct vc4_hdmi *vc4_hdmi) {} #endif +static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder); + static enum drm_connector_status vc4_hdmi_connector_detect(struct drm_connector *connector, bool force) { struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector); bool connected = false; - if (vc4_hdmi->hpd_gpio && - gpiod_get_value_cansleep(vc4_hdmi->hpd_gpio)) { - connected = true; - } else if (drm_probe_ddc(vc4_hdmi->ddc)) { - connected = true; - } else if (HDMI_READ(HDMI_HOTPLUG) & VC4_HDMI_HOTPLUG_CONNECTED) { - connected = true; + mutex_lock(&vc4_hdmi->mutex); + + WARN_ON(pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev)); + + if (vc4_hdmi->hpd_gpio) { + if (gpiod_get_value_cansleep(vc4_hdmi->hpd_gpio)) + connected = true; + } else { + unsigned long flags; + u32 hotplug; + + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); + hotplug = HDMI_READ(HDMI_HOTPLUG); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); + + if (hotplug & VC4_HDMI_HOTPLUG_CONNECTED) + connected = true; } if (connected) { @@ -187,10 +218,15 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force) } } + vc4_hdmi_enable_scrambling(&vc4_hdmi->encoder.base.base); + pm_runtime_put(&vc4_hdmi->pdev->dev); + mutex_unlock(&vc4_hdmi->mutex); return connector_status_connected; } cec_phys_addr_invalidate(vc4_hdmi->cec_adap); + pm_runtime_put(&vc4_hdmi->pdev->dev); + mutex_unlock(&vc4_hdmi->mutex); return connector_status_disconnected; } @@ -207,10 +243,14 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector) int ret = 0; struct edid *edid; + mutex_lock(&vc4_hdmi->mutex); + edid = drm_get_edid(connector, vc4_hdmi->ddc); cec_s_phys_addr_from_edid(vc4_hdmi->cec_adap, edid); - if (!edid) - return -ENODEV; + if (!edid) { + ret = -ENODEV; + goto out; + } vc4_encoder->hdmi_monitor = drm_detect_hdmi_monitor(edid); @@ -230,6 +270,9 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector) } } +out: + mutex_unlock(&vc4_hdmi->mutex); + return ret; } @@ -364,9 +407,12 @@ static int vc4_hdmi_stop_packet(struct drm_encoder *encoder, { struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); u32 packet_id = type - 0x80; + unsigned long flags; + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); HDMI_WRITE(HDMI_RAM_PACKET_CONFIG, HDMI_READ(HDMI_RAM_PACKET_CONFIG) & ~BIT(packet_id)); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); if (!poll) return 0; @@ -386,6 +432,7 @@ static void vc4_hdmi_write_infoframe(struct drm_encoder *encoder, void __iomem *base = __vc4_hdmi_get_field_base(vc4_hdmi, ram_packet_start->reg); uint8_t buffer[VC4_HDMI_PACKET_STRIDE]; + unsigned long flags; ssize_t len, i; int ret; @@ -403,6 +450,8 @@ static void vc4_hdmi_write_infoframe(struct drm_encoder *encoder, return; } + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); + for (i = 0; i < len; i += 7) { writel(buffer[i + 0] << 0 | buffer[i + 1] << 8 | @@ -420,6 +469,9 @@ static void vc4_hdmi_write_infoframe(struct drm_encoder *encoder, HDMI_WRITE(HDMI_RAM_PACKET_CONFIG, HDMI_READ(HDMI_RAM_PACKET_CONFIG) | BIT(packet_id)); + + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); + ret = wait_for((HDMI_READ(HDMI_RAM_PACKET_STATUS) & BIT(packet_id)), 100); if (ret) @@ -432,11 +484,12 @@ static void vc4_hdmi_set_avi_infoframe(struct drm_encoder *encoder) struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder); struct drm_connector *connector = &vc4_hdmi->connector; struct drm_connector_state *cstate = connector->state; - struct drm_crtc *crtc = encoder->crtc; - const struct drm_display_mode *mode = &crtc->state->adjusted_mode; + const struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode; union hdmi_infoframe frame; int ret; + lockdep_assert_held(&vc4_hdmi->mutex); + ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, connector, mode); if (ret < 0) { @@ -488,6 +541,8 @@ static void vc4_hdmi_set_hdr_infoframe(struct drm_encoder *encoder) struct drm_connector_state *conn_state = connector->state; union hdmi_infoframe frame; + lockdep_assert_held(&vc4_hdmi->mutex); + if (!vc4_hdmi->variant->supports_hdr) return; @@ -504,6 +559,8 @@ static void vc4_hdmi_set_infoframes(struct drm_encoder *encoder) { struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); + lockdep_assert_held(&vc4_hdmi->mutex); + vc4_hdmi_set_avi_infoframe(encoder); vc4_hdmi_set_spd_infoframe(encoder); /* @@ -523,6 +580,8 @@ static bool vc4_hdmi_supports_scrambling(struct drm_encoder *encoder, struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); struct drm_display_info *display = &vc4_hdmi->connector.display_info; + lockdep_assert_held(&vc4_hdmi->mutex); + if (!vc4_encoder->hdmi_monitor) return false; @@ -537,8 +596,11 @@ static bool vc4_hdmi_supports_scrambling(struct drm_encoder *encoder, static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder) { - struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode; struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); + struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode; + unsigned long flags; + + lockdep_assert_held(&vc4_hdmi->mutex); if (!vc4_hdmi_supports_scrambling(encoder, mode)) return; @@ -549,8 +611,12 @@ static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder) drm_scdc_set_high_tmds_clock_ratio(vc4_hdmi->ddc, true); drm_scdc_set_scrambling(vc4_hdmi->ddc, true); + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); HDMI_WRITE(HDMI_SCRAMBLER_CTL, HDMI_READ(HDMI_SCRAMBLER_CTL) | VC5_HDMI_SCRAMBLER_CTL_ENABLE); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); + + vc4_hdmi->scdc_enabled = true; queue_delayed_work(system_wq, &vc4_hdmi->scrambling_work, msecs_to_jiffies(SCRAMBLING_POLLING_DELAY_MS)); @@ -559,24 +625,22 @@ static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder) static void vc4_hdmi_disable_scrambling(struct drm_encoder *encoder) { struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); - struct drm_crtc *crtc = encoder->crtc; + unsigned long flags; - /* - * At boot, encoder->crtc will be NULL. Since we don't know the - * state of the scrambler and in order to avoid any - * inconsistency, let's disable it all the time. - */ - if (crtc && !vc4_hdmi_supports_scrambling(encoder, &crtc->mode)) - return; + lockdep_assert_held(&vc4_hdmi->mutex); - if (crtc && !vc4_hdmi_mode_needs_scrambling(&crtc->mode)) + if (!vc4_hdmi->scdc_enabled) return; + vc4_hdmi->scdc_enabled = false; + if (delayed_work_pending(&vc4_hdmi->scrambling_work)) cancel_delayed_work_sync(&vc4_hdmi->scrambling_work); + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); HDMI_WRITE(HDMI_SCRAMBLER_CTL, HDMI_READ(HDMI_SCRAMBLER_CTL) & ~VC5_HDMI_SCRAMBLER_CTL_ENABLE); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); drm_scdc_set_scrambling(vc4_hdmi->ddc, false); drm_scdc_set_high_tmds_clock_ratio(vc4_hdmi->ddc, false); @@ -602,47 +666,73 @@ static void vc4_hdmi_encoder_post_crtc_disable(struct drm_encoder *encoder, struct drm_atomic_state *state) { struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); + unsigned long flags; + + mutex_lock(&vc4_hdmi->mutex); + + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); HDMI_WRITE(HDMI_RAM_PACKET_CONFIG, 0); HDMI_WRITE(HDMI_VID_CTL, HDMI_READ(HDMI_VID_CTL) | VC4_HD_VID_CTL_CLRRGB); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); + mdelay(1); + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); HDMI_WRITE(HDMI_VID_CTL, HDMI_READ(HDMI_VID_CTL) & ~VC4_HD_VID_CTL_ENABLE); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); + vc4_hdmi_disable_scrambling(encoder); + + mutex_unlock(&vc4_hdmi->mutex); } static void vc4_hdmi_encoder_post_crtc_powerdown(struct drm_encoder *encoder, struct drm_atomic_state *state) { struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); + unsigned long flags; int ret; + mutex_lock(&vc4_hdmi->mutex); + + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); HDMI_WRITE(HDMI_VID_CTL, HDMI_READ(HDMI_VID_CTL) | VC4_HD_VID_CTL_BLANKPIX); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); if (vc4_hdmi->variant->phy_disable) vc4_hdmi->variant->phy_disable(vc4_hdmi); clk_disable_unprepare(vc4_hdmi->pixel_bvb_clock); - clk_disable_unprepare(vc4_hdmi->hsm_clock); clk_disable_unprepare(vc4_hdmi->pixel_clock); ret = pm_runtime_put(&vc4_hdmi->pdev->dev); if (ret < 0) DRM_ERROR("Failed to release power domain: %d\n", ret); + + mutex_unlock(&vc4_hdmi->mutex); } static void vc4_hdmi_encoder_disable(struct drm_encoder *encoder) { + struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); + + mutex_lock(&vc4_hdmi->mutex); + vc4_hdmi->output_enabled = false; + mutex_unlock(&vc4_hdmi->mutex); } static void vc4_hdmi_csc_setup(struct vc4_hdmi *vc4_hdmi, bool enable) { + unsigned long flags; u32 csc_ctl; + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); + csc_ctl = VC4_SET_FIELD(VC4_HD_CSC_CTL_ORDER_BGR, VC4_HD_CSC_CTL_ORDER); @@ -672,14 +762,19 @@ static void vc4_hdmi_csc_setup(struct vc4_hdmi *vc4_hdmi, bool enable) /* The RGB order applies even when CSC is disabled. */ HDMI_WRITE(HDMI_CSC_CTL, csc_ctl); + + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); } static void vc5_hdmi_csc_setup(struct vc4_hdmi *vc4_hdmi, bool enable) { + unsigned long flags; u32 csc_ctl; csc_ctl = 0x07; /* RGB_CONVERT_MODE = custom matrix, || USE_RGB_TO_YCBCR */ + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); + if (enable) { /* CEA VICs other than #1 requre limited range RGB * output unless overridden by an AVI infoframe. @@ -711,6 +806,8 @@ static void vc5_hdmi_csc_setup(struct vc4_hdmi *vc4_hdmi, bool enable) } HDMI_WRITE(HDMI_CSC_CTL, csc_ctl); + + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); } static void vc4_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi, @@ -734,6 +831,9 @@ static void vc4_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi, mode->crtc_vsync_end - interlaced, VC4_HDMI_VERTB_VBP)); + unsigned long flags; + + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); HDMI_WRITE(HDMI_HORZA, (vsync_pos ? VC4_HDMI_HORZA_VPOS : 0) | @@ -757,6 +857,8 @@ static void vc4_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi, HDMI_WRITE(HDMI_VERTB0, vertb_even); HDMI_WRITE(HDMI_VERTB1, vertb); + + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); } static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi, @@ -780,10 +882,13 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi, mode->crtc_vsync_end - interlaced, VC4_HDMI_VERTB_VBP)); + unsigned long flags; unsigned char gcp; bool gcp_en; u32 reg; + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); + HDMI_WRITE(HDMI_VEC_INTERFACE_XBAR, 0x354021); HDMI_WRITE(HDMI_HORZA, (vsync_pos ? VC5_HDMI_HORZA_VPOS : 0) | @@ -842,13 +947,18 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi, HDMI_WRITE(HDMI_GCP_CONFIG, reg); HDMI_WRITE(HDMI_CLOCK_STOP, 0); + + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); } static void vc4_hdmi_recenter_fifo(struct vc4_hdmi *vc4_hdmi) { + unsigned long flags; u32 drift; int ret; + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); + drift = HDMI_READ(HDMI_FIFO_CTL); drift &= VC4_HDMI_FIFO_VALID_WRITE_MASK; @@ -856,12 +966,20 @@ static void vc4_hdmi_recenter_fifo(struct vc4_hdmi *vc4_hdmi) drift & ~VC4_HDMI_FIFO_CTL_RECENTER); HDMI_WRITE(HDMI_FIFO_CTL, drift | VC4_HDMI_FIFO_CTL_RECENTER); + + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); + usleep_range(1000, 1100); + + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); + HDMI_WRITE(HDMI_FIFO_CTL, drift & ~VC4_HDMI_FIFO_CTL_RECENTER); HDMI_WRITE(HDMI_FIFO_CTL, drift | VC4_HDMI_FIFO_CTL_RECENTER); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); + ret = wait_for(HDMI_READ(HDMI_FIFO_CTL) & VC4_HDMI_FIFO_CTL_RECENTER_DONE, 1); WARN_ONCE(ret, "Timeout waiting for " @@ -891,29 +1009,14 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder, vc4_hdmi_encoder_get_connector_state(encoder, state); struct vc4_hdmi_connector_state *vc4_conn_state = conn_state_to_vc4_hdmi_conn_state(conn_state); - struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode; struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); - unsigned long bvb_rate, pixel_rate, hsm_rate; + struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode; + unsigned long pixel_rate = vc4_conn_state->pixel_rate; + unsigned long bvb_rate, hsm_rate; + unsigned long flags; int ret; - ret = pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev); - if (ret < 0) { - DRM_ERROR("Failed to retain power domain: %d\n", ret); - return; - } - - pixel_rate = vc4_conn_state->pixel_rate; - ret = clk_set_rate(vc4_hdmi->pixel_clock, pixel_rate); - if (ret) { - DRM_ERROR("Failed to set pixel clock rate: %d\n", ret); - return; - } - - ret = clk_prepare_enable(vc4_hdmi->pixel_clock); - if (ret) { - DRM_ERROR("Failed to turn on pixel clock: %d\n", ret); - return; - } + mutex_lock(&vc4_hdmi->mutex); /* * As stated in RPi's vc4 firmware "HDMI state machine (HSM) clock must @@ -935,16 +1038,28 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder, ret = clk_set_min_rate(vc4_hdmi->hsm_clock, hsm_rate); if (ret) { DRM_ERROR("Failed to set HSM clock rate: %d\n", ret); - return; + goto out; } - ret = clk_prepare_enable(vc4_hdmi->hsm_clock); + ret = pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev); + if (ret < 0) { + DRM_ERROR("Failed to retain power domain: %d\n", ret); + goto out; + } + + ret = clk_set_rate(vc4_hdmi->pixel_clock, pixel_rate); if (ret) { - DRM_ERROR("Failed to turn on HSM clock: %d\n", ret); - clk_disable_unprepare(vc4_hdmi->pixel_clock); - return; + DRM_ERROR("Failed to set pixel clock rate: %d\n", ret); + goto err_put_runtime_pm; } + ret = clk_prepare_enable(vc4_hdmi->pixel_clock); + if (ret) { + DRM_ERROR("Failed to turn on pixel clock: %d\n", ret); + goto err_put_runtime_pm; + } + + vc4_hdmi_cec_update_clk_div(vc4_hdmi); if (pixel_rate > 297000000) @@ -957,37 +1072,52 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder, ret = clk_set_min_rate(vc4_hdmi->pixel_bvb_clock, bvb_rate); if (ret) { DRM_ERROR("Failed to set pixel bvb clock rate: %d\n", ret); - clk_disable_unprepare(vc4_hdmi->hsm_clock); - clk_disable_unprepare(vc4_hdmi->pixel_clock); - return; + goto err_disable_pixel_clock; } ret = clk_prepare_enable(vc4_hdmi->pixel_bvb_clock); if (ret) { DRM_ERROR("Failed to turn on pixel bvb clock: %d\n", ret); - clk_disable_unprepare(vc4_hdmi->hsm_clock); - clk_disable_unprepare(vc4_hdmi->pixel_clock); - return; + goto err_disable_pixel_clock; } if (vc4_hdmi->variant->phy_init) vc4_hdmi->variant->phy_init(vc4_hdmi, vc4_conn_state); + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); + HDMI_WRITE(HDMI_SCHEDULER_CONTROL, HDMI_READ(HDMI_SCHEDULER_CONTROL) | VC4_HDMI_SCHEDULER_CONTROL_MANUAL_FORMAT | VC4_HDMI_SCHEDULER_CONTROL_IGNORE_VSYNC_PREDICTS); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); + if (vc4_hdmi->variant->set_timings) vc4_hdmi->variant->set_timings(vc4_hdmi, conn_state, mode); + + mutex_unlock(&vc4_hdmi->mutex); + + return; + +err_disable_pixel_clock: + clk_disable_unprepare(vc4_hdmi->pixel_clock); +err_put_runtime_pm: + pm_runtime_put(&vc4_hdmi->pdev->dev); +out: + mutex_unlock(&vc4_hdmi->mutex); + return; } static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder, struct drm_atomic_state *state) { - struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode; - struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder); struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); + struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode; + struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder); + unsigned long flags; + + mutex_lock(&vc4_hdmi->mutex); if (vc4_encoder->hdmi_monitor && drm_default_rgb_quant_range(mode) == HDMI_QUANTIZATION_RANGE_LIMITED) { @@ -1002,19 +1132,28 @@ static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder, vc4_encoder->limited_rgb_range = false; } + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); HDMI_WRITE(HDMI_FIFO_CTL, VC4_HDMI_FIFO_CTL_MASTER_SLAVE_N); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); + + mutex_unlock(&vc4_hdmi->mutex); } static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder, struct drm_atomic_state *state) { - struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode; struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); + struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode; struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder); bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC; bool vsync_pos = mode->flags & DRM_MODE_FLAG_PVSYNC; + unsigned long flags; int ret; + mutex_lock(&vc4_hdmi->mutex); + + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); + HDMI_WRITE(HDMI_VID_CTL, VC4_HD_VID_CTL_ENABLE | VC4_HD_VID_CTL_CLRRGB | @@ -1031,6 +1170,8 @@ static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder, HDMI_READ(HDMI_SCHEDULER_CONTROL) | VC4_HDMI_SCHEDULER_CONTROL_MODE_HDMI); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); + ret = wait_for(HDMI_READ(HDMI_SCHEDULER_CONTROL) & VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE, 1000); WARN_ONCE(ret, "Timeout waiting for " @@ -1043,6 +1184,8 @@ static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder, HDMI_READ(HDMI_SCHEDULER_CONTROL) & ~VC4_HDMI_SCHEDULER_CONTROL_MODE_HDMI); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); + ret = wait_for(!(HDMI_READ(HDMI_SCHEDULER_CONTROL) & VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE), 1000); WARN_ONCE(ret, "Timeout waiting for " @@ -1050,6 +1193,8 @@ static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder, } if (vc4_encoder->hdmi_monitor) { + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); + WARN_ON(!(HDMI_READ(HDMI_SCHEDULER_CONTROL) & VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE)); HDMI_WRITE(HDMI_SCHEDULER_CONTROL, @@ -1059,15 +1204,37 @@ static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder, HDMI_WRITE(HDMI_RAM_PACKET_CONFIG, VC4_HDMI_RAM_PACKET_ENABLE); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); + vc4_hdmi_set_infoframes(encoder); } vc4_hdmi_recenter_fifo(vc4_hdmi); vc4_hdmi_enable_scrambling(encoder); + + mutex_unlock(&vc4_hdmi->mutex); } static void vc4_hdmi_encoder_enable(struct drm_encoder *encoder) { + struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); + + mutex_lock(&vc4_hdmi->mutex); + vc4_hdmi->output_enabled = true; + mutex_unlock(&vc4_hdmi->mutex); +} + +static void vc4_hdmi_encoder_atomic_mode_set(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); + + mutex_lock(&vc4_hdmi->mutex); + memcpy(&vc4_hdmi->saved_adjusted_mode, + &crtc_state->adjusted_mode, + sizeof(vc4_hdmi->saved_adjusted_mode)); + mutex_unlock(&vc4_hdmi->mutex); } #define WIFI_2_4GHz_CH1_MIN_FREQ 2400000000ULL @@ -1146,6 +1313,7 @@ vc4_hdmi_encoder_mode_valid(struct drm_encoder *encoder, static const struct drm_encoder_helper_funcs vc4_hdmi_encoder_helper_funcs = { .atomic_check = vc4_hdmi_encoder_atomic_check, + .atomic_mode_set = vc4_hdmi_encoder_atomic_mode_set, .mode_valid = vc4_hdmi_encoder_mode_valid, .disable = vc4_hdmi_encoder_disable, .enable = vc4_hdmi_encoder_enable, @@ -1180,6 +1348,7 @@ static void vc4_hdmi_audio_set_mai_clock(struct vc4_hdmi *vc4_hdmi, unsigned int samplerate) { u32 hsm_clock = clk_get_rate(vc4_hdmi->audio_clock); + unsigned long flags; unsigned long n, m; rational_best_approximation(hsm_clock, samplerate, @@ -1189,19 +1358,22 @@ static void vc4_hdmi_audio_set_mai_clock(struct vc4_hdmi *vc4_hdmi, VC4_HD_MAI_SMP_M_SHIFT) + 1, &n, &m); + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); HDMI_WRITE(HDMI_MAI_SMP, VC4_SET_FIELD(n, VC4_HD_MAI_SMP_N) | VC4_SET_FIELD(m - 1, VC4_HD_MAI_SMP_M)); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); } static void vc4_hdmi_set_n_cts(struct vc4_hdmi *vc4_hdmi, unsigned int samplerate) { - struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base; - struct drm_crtc *crtc = encoder->crtc; - const struct drm_display_mode *mode = &crtc->state->adjusted_mode; + const struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode; u32 n, cts; u64 tmp; + lockdep_assert_held(&vc4_hdmi->mutex); + lockdep_assert_held(&vc4_hdmi->hw_lock); + n = 128 * samplerate / 1000; tmp = (u64)(mode->clock * 1000) * n; do_div(tmp, 128 * samplerate); @@ -1227,31 +1399,54 @@ static inline struct vc4_hdmi *dai_to_hdmi(struct snd_soc_dai *dai) return snd_soc_card_get_drvdata(card); } -static int vc4_hdmi_audio_startup(struct device *dev, void *data) +static bool vc4_hdmi_audio_can_stream(struct vc4_hdmi *vc4_hdmi) { - struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev); - struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base; + lockdep_assert_held(&vc4_hdmi->mutex); + + /* + * If the controller is disabled, prevent any ALSA output. + */ + if (!vc4_hdmi->output_enabled) + return false; /* - * If the HDMI encoder hasn't probed, or the encoder is - * currently in DVI mode, treat the codec dai as missing. + * If the encoder is currently in DVI mode, treat the codec DAI + * as missing. */ - if (!encoder->crtc || !(HDMI_READ(HDMI_RAM_PACKET_CONFIG) & - VC4_HDMI_RAM_PACKET_ENABLE)) + if (!(HDMI_READ(HDMI_RAM_PACKET_CONFIG) & VC4_HDMI_RAM_PACKET_ENABLE)) + return false; + + return true; +} + +static int vc4_hdmi_audio_startup(struct device *dev, void *data) +{ + struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev); + unsigned long flags; + + mutex_lock(&vc4_hdmi->mutex); + + if (!vc4_hdmi_audio_can_stream(vc4_hdmi)) { + mutex_unlock(&vc4_hdmi->mutex); return -ENODEV; + } vc4_hdmi->audio.streaming = true; + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); HDMI_WRITE(HDMI_MAI_CTL, VC4_HD_MAI_CTL_RESET | VC4_HD_MAI_CTL_FLUSH | VC4_HD_MAI_CTL_DLATE | VC4_HD_MAI_CTL_ERRORE | VC4_HD_MAI_CTL_ERRORF); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); if (vc4_hdmi->variant->phy_rng_enable) vc4_hdmi->variant->phy_rng_enable(vc4_hdmi); + mutex_unlock(&vc4_hdmi->mutex); + return 0; } @@ -1259,32 +1454,48 @@ static void vc4_hdmi_audio_reset(struct vc4_hdmi *vc4_hdmi) { struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base; struct device *dev = &vc4_hdmi->pdev->dev; + unsigned long flags; int ret; + lockdep_assert_held(&vc4_hdmi->mutex); + vc4_hdmi->audio.streaming = false; ret = vc4_hdmi_stop_packet(encoder, HDMI_INFOFRAME_TYPE_AUDIO, false); if (ret) dev_err(dev, "Failed to stop audio infoframe: %d\n", ret); + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); + HDMI_WRITE(HDMI_MAI_CTL, VC4_HD_MAI_CTL_RESET); HDMI_WRITE(HDMI_MAI_CTL, VC4_HD_MAI_CTL_ERRORF); HDMI_WRITE(HDMI_MAI_CTL, VC4_HD_MAI_CTL_FLUSH); + + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); } static void vc4_hdmi_audio_shutdown(struct device *dev, void *data) { struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev); + unsigned long flags; + + mutex_lock(&vc4_hdmi->mutex); + + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); HDMI_WRITE(HDMI_MAI_CTL, VC4_HD_MAI_CTL_DLATE | VC4_HD_MAI_CTL_ERRORE | VC4_HD_MAI_CTL_ERRORF); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); + if (vc4_hdmi->variant->phy_rng_disable) vc4_hdmi->variant->phy_rng_disable(vc4_hdmi); vc4_hdmi->audio.streaming = false; vc4_hdmi_audio_reset(vc4_hdmi); + + mutex_unlock(&vc4_hdmi->mutex); } static int sample_rate_to_mai_fmt(int samplerate) @@ -1334,6 +1545,7 @@ static int vc4_hdmi_audio_prepare(struct device *dev, void *data, struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base; unsigned int sample_rate = params->sample_rate; unsigned int channels = params->channels; + unsigned long flags; u32 audio_packet_config, channel_mask; u32 channel_map; u32 mai_audio_format; @@ -1342,14 +1554,22 @@ static int vc4_hdmi_audio_prepare(struct device *dev, void *data, dev_dbg(dev, "%s: %u Hz, %d bit, %d channels\n", __func__, sample_rate, params->sample_width, channels); + mutex_lock(&vc4_hdmi->mutex); + + if (!vc4_hdmi_audio_can_stream(vc4_hdmi)) { + mutex_unlock(&vc4_hdmi->mutex); + return -EINVAL; + } + + vc4_hdmi_audio_set_mai_clock(vc4_hdmi, sample_rate); + + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); HDMI_WRITE(HDMI_MAI_CTL, VC4_SET_FIELD(channels, VC4_HD_MAI_CTL_CHNUM) | VC4_HD_MAI_CTL_WHOLSMP | VC4_HD_MAI_CTL_CHALIGN | VC4_HD_MAI_CTL_ENABLE); - vc4_hdmi_audio_set_mai_clock(vc4_hdmi, sample_rate); - mai_sample_rate = sample_rate_to_mai_fmt(sample_rate); if (params->iec.status[0] & IEC958_AES0_NONAUDIO && params->channels == 8) @@ -1387,11 +1607,16 @@ static int vc4_hdmi_audio_prepare(struct device *dev, void *data, channel_map = vc4_hdmi->variant->channel_map(vc4_hdmi, channel_mask); HDMI_WRITE(HDMI_MAI_CHANNEL_MAP, channel_map); HDMI_WRITE(HDMI_AUDIO_PACKET_CONFIG, audio_packet_config); + vc4_hdmi_set_n_cts(vc4_hdmi, sample_rate); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); + memcpy(&vc4_hdmi->audio.infoframe, ¶ms->cea, sizeof(params->cea)); vc4_hdmi_set_audio_infoframe(encoder); + mutex_unlock(&vc4_hdmi->mutex); + return 0; } @@ -1434,7 +1659,9 @@ static int vc4_hdmi_audio_get_eld(struct device *dev, void *data, struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev); struct drm_connector *connector = &vc4_hdmi->connector; + mutex_lock(&vc4_hdmi->mutex); memcpy(buf, connector->eld, min(sizeof(connector->eld), len)); + mutex_unlock(&vc4_hdmi->mutex); return 0; } @@ -1656,6 +1883,8 @@ static void vc4_cec_read_msg(struct vc4_hdmi *vc4_hdmi, u32 cntrl1) struct cec_msg *msg = &vc4_hdmi->cec_rx_msg; unsigned int i; + lockdep_assert_held(&vc4_hdmi->hw_lock); + msg->len = 1 + ((cntrl1 & VC4_HDMI_CEC_REC_WRD_CNT_MASK) >> VC4_HDMI_CEC_REC_WRD_CNT_SHIFT); @@ -1674,11 +1903,12 @@ static void vc4_cec_read_msg(struct vc4_hdmi *vc4_hdmi, u32 cntrl1) } } -static irqreturn_t vc4_cec_irq_handler_tx_bare(int irq, void *priv) +static irqreturn_t vc4_cec_irq_handler_tx_bare_locked(struct vc4_hdmi *vc4_hdmi) { - struct vc4_hdmi *vc4_hdmi = priv; u32 cntrl1; + lockdep_assert_held(&vc4_hdmi->hw_lock); + cntrl1 = HDMI_READ(HDMI_CEC_CNTRL_1); vc4_hdmi->cec_tx_ok = cntrl1 & VC4_HDMI_CEC_TX_STATUS_GOOD; cntrl1 &= ~VC4_HDMI_CEC_START_XMIT_BEGIN; @@ -1687,11 +1917,24 @@ static irqreturn_t vc4_cec_irq_handler_tx_bare(int irq, void *priv) return IRQ_WAKE_THREAD; } -static irqreturn_t vc4_cec_irq_handler_rx_bare(int irq, void *priv) +static irqreturn_t vc4_cec_irq_handler_tx_bare(int irq, void *priv) { struct vc4_hdmi *vc4_hdmi = priv; + irqreturn_t ret; + + spin_lock(&vc4_hdmi->hw_lock); + ret = vc4_cec_irq_handler_tx_bare_locked(vc4_hdmi); + spin_unlock(&vc4_hdmi->hw_lock); + + return ret; +} + +static irqreturn_t vc4_cec_irq_handler_rx_bare_locked(struct vc4_hdmi *vc4_hdmi) +{ u32 cntrl1; + lockdep_assert_held(&vc4_hdmi->hw_lock); + vc4_hdmi->cec_rx_msg.len = 0; cntrl1 = HDMI_READ(HDMI_CEC_CNTRL_1); vc4_cec_read_msg(vc4_hdmi, cntrl1); @@ -1704,6 +1947,18 @@ static irqreturn_t vc4_cec_irq_handler_rx_bare(int irq, void *priv) return IRQ_WAKE_THREAD; } +static irqreturn_t vc4_cec_irq_handler_rx_bare(int irq, void *priv) +{ + struct vc4_hdmi *vc4_hdmi = priv; + irqreturn_t ret; + + spin_lock(&vc4_hdmi->hw_lock); + ret = vc4_cec_irq_handler_rx_bare_locked(vc4_hdmi); + spin_unlock(&vc4_hdmi->hw_lock); + + return ret; +} + static irqreturn_t vc4_cec_irq_handler(int irq, void *priv) { struct vc4_hdmi *vc4_hdmi = priv; @@ -1714,69 +1969,142 @@ static irqreturn_t vc4_cec_irq_handler(int irq, void *priv) if (!(stat & VC4_HDMI_CPU_CEC)) return IRQ_NONE; + spin_lock(&vc4_hdmi->hw_lock); cntrl5 = HDMI_READ(HDMI_CEC_CNTRL_5); vc4_hdmi->cec_irq_was_rx = cntrl5 & VC4_HDMI_CEC_RX_CEC_INT; if (vc4_hdmi->cec_irq_was_rx) - ret = vc4_cec_irq_handler_rx_bare(irq, priv); + ret = vc4_cec_irq_handler_rx_bare_locked(vc4_hdmi); else - ret = vc4_cec_irq_handler_tx_bare(irq, priv); + ret = vc4_cec_irq_handler_tx_bare_locked(vc4_hdmi); HDMI_WRITE(HDMI_CEC_CPU_CLEAR, VC4_HDMI_CPU_CEC); + spin_unlock(&vc4_hdmi->hw_lock); + return ret; } -static int vc4_hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable) +static int vc4_hdmi_cec_enable(struct cec_adapter *adap) { struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap); /* clock period in microseconds */ const u32 usecs = 1000000 / CEC_CLOCK_FREQ; - u32 val = HDMI_READ(HDMI_CEC_CNTRL_5); + unsigned long flags; + u32 val; + int ret; + + /* + * NOTE: This function should really take vc4_hdmi->mutex, but doing so + * results in a reentrancy since cec_s_phys_addr_from_edid() called in + * .detect or .get_modes might call .adap_enable, which leads to this + * function being called with that mutex held. + * + * Concurrency is not an issue for the moment since we don't share any + * state with KMS, so we can ignore the lock for now, but we need to + * keep it in mind if we were to change that assumption. + */ + ret = pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev); + if (ret) + return ret; + + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); + + val = HDMI_READ(HDMI_CEC_CNTRL_5); val &= ~(VC4_HDMI_CEC_TX_SW_RESET | VC4_HDMI_CEC_RX_SW_RESET | VC4_HDMI_CEC_CNT_TO_4700_US_MASK | VC4_HDMI_CEC_CNT_TO_4500_US_MASK); val |= ((4700 / usecs) << VC4_HDMI_CEC_CNT_TO_4700_US_SHIFT) | ((4500 / usecs) << VC4_HDMI_CEC_CNT_TO_4500_US_SHIFT); - if (enable) { - HDMI_WRITE(HDMI_CEC_CNTRL_5, val | - VC4_HDMI_CEC_TX_SW_RESET | VC4_HDMI_CEC_RX_SW_RESET); - HDMI_WRITE(HDMI_CEC_CNTRL_5, val); - HDMI_WRITE(HDMI_CEC_CNTRL_2, - ((1500 / usecs) << VC4_HDMI_CEC_CNT_TO_1500_US_SHIFT) | - ((1300 / usecs) << VC4_HDMI_CEC_CNT_TO_1300_US_SHIFT) | - ((800 / usecs) << VC4_HDMI_CEC_CNT_TO_800_US_SHIFT) | - ((600 / usecs) << VC4_HDMI_CEC_CNT_TO_600_US_SHIFT) | - ((400 / usecs) << VC4_HDMI_CEC_CNT_TO_400_US_SHIFT)); - HDMI_WRITE(HDMI_CEC_CNTRL_3, - ((2750 / usecs) << VC4_HDMI_CEC_CNT_TO_2750_US_SHIFT) | - ((2400 / usecs) << VC4_HDMI_CEC_CNT_TO_2400_US_SHIFT) | - ((2050 / usecs) << VC4_HDMI_CEC_CNT_TO_2050_US_SHIFT) | - ((1700 / usecs) << VC4_HDMI_CEC_CNT_TO_1700_US_SHIFT)); - HDMI_WRITE(HDMI_CEC_CNTRL_4, - ((4300 / usecs) << VC4_HDMI_CEC_CNT_TO_4300_US_SHIFT) | - ((3900 / usecs) << VC4_HDMI_CEC_CNT_TO_3900_US_SHIFT) | - ((3600 / usecs) << VC4_HDMI_CEC_CNT_TO_3600_US_SHIFT) | - ((3500 / usecs) << VC4_HDMI_CEC_CNT_TO_3500_US_SHIFT)); - - if (!vc4_hdmi->variant->external_irq_controller) - HDMI_WRITE(HDMI_CEC_CPU_MASK_CLEAR, VC4_HDMI_CPU_CEC); - } else { - if (!vc4_hdmi->variant->external_irq_controller) - HDMI_WRITE(HDMI_CEC_CPU_MASK_SET, VC4_HDMI_CPU_CEC); - HDMI_WRITE(HDMI_CEC_CNTRL_5, val | - VC4_HDMI_CEC_TX_SW_RESET | VC4_HDMI_CEC_RX_SW_RESET); - } + HDMI_WRITE(HDMI_CEC_CNTRL_5, val | + VC4_HDMI_CEC_TX_SW_RESET | VC4_HDMI_CEC_RX_SW_RESET); + HDMI_WRITE(HDMI_CEC_CNTRL_5, val); + HDMI_WRITE(HDMI_CEC_CNTRL_2, + ((1500 / usecs) << VC4_HDMI_CEC_CNT_TO_1500_US_SHIFT) | + ((1300 / usecs) << VC4_HDMI_CEC_CNT_TO_1300_US_SHIFT) | + ((800 / usecs) << VC4_HDMI_CEC_CNT_TO_800_US_SHIFT) | + ((600 / usecs) << VC4_HDMI_CEC_CNT_TO_600_US_SHIFT) | + ((400 / usecs) << VC4_HDMI_CEC_CNT_TO_400_US_SHIFT)); + HDMI_WRITE(HDMI_CEC_CNTRL_3, + ((2750 / usecs) << VC4_HDMI_CEC_CNT_TO_2750_US_SHIFT) | + ((2400 / usecs) << VC4_HDMI_CEC_CNT_TO_2400_US_SHIFT) | + ((2050 / usecs) << VC4_HDMI_CEC_CNT_TO_2050_US_SHIFT) | + ((1700 / usecs) << VC4_HDMI_CEC_CNT_TO_1700_US_SHIFT)); + HDMI_WRITE(HDMI_CEC_CNTRL_4, + ((4300 / usecs) << VC4_HDMI_CEC_CNT_TO_4300_US_SHIFT) | + ((3900 / usecs) << VC4_HDMI_CEC_CNT_TO_3900_US_SHIFT) | + ((3600 / usecs) << VC4_HDMI_CEC_CNT_TO_3600_US_SHIFT) | + ((3500 / usecs) << VC4_HDMI_CEC_CNT_TO_3500_US_SHIFT)); + + if (!vc4_hdmi->variant->external_irq_controller) + HDMI_WRITE(HDMI_CEC_CPU_MASK_CLEAR, VC4_HDMI_CPU_CEC); + + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); + return 0; } +static int vc4_hdmi_cec_disable(struct cec_adapter *adap) +{ + struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap); + unsigned long flags; + + /* + * NOTE: This function should really take vc4_hdmi->mutex, but doing so + * results in a reentrancy since cec_s_phys_addr_from_edid() called in + * .detect or .get_modes might call .adap_enable, which leads to this + * function being called with that mutex held. + * + * Concurrency is not an issue for the moment since we don't share any + * state with KMS, so we can ignore the lock for now, but we need to + * keep it in mind if we were to change that assumption. + */ + + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); + + if (!vc4_hdmi->variant->external_irq_controller) + HDMI_WRITE(HDMI_CEC_CPU_MASK_SET, VC4_HDMI_CPU_CEC); + + HDMI_WRITE(HDMI_CEC_CNTRL_5, HDMI_READ(HDMI_CEC_CNTRL_5) | + VC4_HDMI_CEC_TX_SW_RESET | VC4_HDMI_CEC_RX_SW_RESET); + + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); + + pm_runtime_put(&vc4_hdmi->pdev->dev); + + return 0; +} + +static int vc4_hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable) +{ + if (enable) + return vc4_hdmi_cec_enable(adap); + else + return vc4_hdmi_cec_disable(adap); +} + static int vc4_hdmi_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr) { struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap); + unsigned long flags; + + /* + * NOTE: This function should really take vc4_hdmi->mutex, but doing so + * results in a reentrancy since cec_s_phys_addr_from_edid() called in + * .detect or .get_modes might call .adap_enable, which leads to this + * function being called with that mutex held. + * + * Concurrency is not an issue for the moment since we don't share any + * state with KMS, so we can ignore the lock for now, but we need to + * keep it in mind if we were to change that assumption. + */ + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); HDMI_WRITE(HDMI_CEC_CNTRL_1, (HDMI_READ(HDMI_CEC_CNTRL_1) & ~VC4_HDMI_CEC_ADDR_MASK) | (log_addr & 0xf) << VC4_HDMI_CEC_ADDR_SHIFT); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); + return 0; } @@ -1785,14 +2113,28 @@ static int vc4_hdmi_cec_adap_transmit(struct cec_adapter *adap, u8 attempts, { struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap); struct drm_device *dev = vc4_hdmi->connector.dev; + unsigned long flags; u32 val; unsigned int i; + /* + * NOTE: This function should really take vc4_hdmi->mutex, but doing so + * results in a reentrancy since cec_s_phys_addr_from_edid() called in + * .detect or .get_modes might call .adap_enable, which leads to this + * function being called with that mutex held. + * + * Concurrency is not an issue for the moment since we don't share any + * state with KMS, so we can ignore the lock for now, but we need to + * keep it in mind if we were to change that assumption. + */ + if (msg->len > 16) { drm_err(dev, "Attempting to transmit too much data (%d)\n", msg->len); return -ENOMEM; } + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); + for (i = 0; i < msg->len; i += 4) HDMI_WRITE(HDMI_CEC_TX_DATA_1 + (i >> 2), (msg->msg[i]) | @@ -1808,6 +2150,9 @@ static int vc4_hdmi_cec_adap_transmit(struct cec_adapter *adap, u8 attempts, val |= VC4_HDMI_CEC_START_XMIT_BEGIN; HDMI_WRITE(HDMI_CEC_CNTRL_1, val); + + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); + return 0; } @@ -1822,6 +2167,7 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi) struct cec_connector_info conn_info; struct platform_device *pdev = vc4_hdmi->pdev; struct device *dev = &pdev->dev; + unsigned long flags; u32 value; int ret; @@ -1841,10 +2187,12 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi) cec_fill_conn_info_from_drm(&conn_info, &vc4_hdmi->connector); cec_s_conn_info(vc4_hdmi->cec_adap, &conn_info); + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); value = HDMI_READ(HDMI_CEC_CNTRL_1); /* Set the logical address to Unregistered */ value |= VC4_HDMI_CEC_ADDR_MASK; HDMI_WRITE(HDMI_CEC_CNTRL_1, value); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); vc4_hdmi_cec_update_clk_div(vc4_hdmi); @@ -1863,7 +2211,9 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi) if (ret) goto err_remove_cec_rx_handler; } else { + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); HDMI_WRITE(HDMI_CEC_CPU_MASK_SET, 0xffffffff); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); ret = request_threaded_irq(platform_get_irq(pdev, 0), vc4_cec_irq_handler, @@ -2099,6 +2449,27 @@ static int vc5_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi) return 0; } +static int __maybe_unused vc4_hdmi_runtime_suspend(struct device *dev) +{ + struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev); + + clk_disable_unprepare(vc4_hdmi->hsm_clock); + + return 0; +} + +static int vc4_hdmi_runtime_resume(struct device *dev) +{ + struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev); + int ret; + + ret = clk_prepare_enable(vc4_hdmi->hsm_clock); + if (ret) + return ret; + + return 0; +} + static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data) { const struct vc4_hdmi_variant *variant = of_device_get_match_data(dev); @@ -2112,6 +2483,8 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data) vc4_hdmi = devm_kzalloc(dev, sizeof(*vc4_hdmi), GFP_KERNEL); if (!vc4_hdmi) return -ENOMEM; + mutex_init(&vc4_hdmi->mutex); + spin_lock_init(&vc4_hdmi->hw_lock); INIT_DELAYED_WORK(&vc4_hdmi->scrambling_work, vc4_hdmi_scrambling_wq); dev_set_drvdata(dev, vc4_hdmi); @@ -2125,6 +2498,14 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data) vc4_hdmi->pdev = pdev; vc4_hdmi->variant = variant; + /* + * Since we don't know the state of the controller and its + * display (if any), let's assume it's always enabled. + * vc4_hdmi_disable_scrambling() will thus run at boot, make + * sure it's disabled, and avoid any inconsistency. + */ + vc4_hdmi->scdc_enabled = true; + ret = variant->init_resources(vc4_hdmi); if (ret) return ret; @@ -2162,6 +2543,31 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data) vc4_hdmi->disable_4kp60 = true; } + /* + * If we boot without any cable connected to the HDMI connector, + * the firmware will skip the HSM initialization and leave it + * with a rate of 0, resulting in a bus lockup when we're + * accessing the registers even if it's enabled. + * + * Let's put a sensible default at runtime_resume so that we + * don't end up in this situation. + */ + ret = clk_set_min_rate(vc4_hdmi->hsm_clock, HSM_MIN_CLOCK_FREQ); + if (ret) + goto err_put_ddc; + + /* + * We need to have the device powered up at this point to call + * our reset hook and for the CEC init. + */ + ret = vc4_hdmi_runtime_resume(dev); + if (ret) + goto err_put_ddc; + + pm_runtime_get_noresume(dev); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + if (vc4_hdmi->variant->reset) vc4_hdmi->variant->reset(vc4_hdmi); @@ -2173,8 +2579,6 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data) clk_prepare_enable(vc4_hdmi->pixel_bvb_clock); } - pm_runtime_enable(dev); - drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS); drm_encoder_helper_add(encoder, &vc4_hdmi_encoder_helper_funcs); @@ -2198,6 +2602,8 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data) vc4_hdmi_debugfs_regs, vc4_hdmi); + pm_runtime_put_sync(dev); + return 0; err_free_cec: @@ -2208,6 +2614,7 @@ err_destroy_conn: vc4_hdmi_connector_destroy(&vc4_hdmi->connector); err_destroy_encoder: drm_encoder_cleanup(encoder); + pm_runtime_put_sync(dev); pm_runtime_disable(dev); err_put_ddc: put_device(&vc4_hdmi->ddc->dev); @@ -2294,7 +2701,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi0_variant = { .encoder_type = VC4_ENCODER_TYPE_HDMI0, .debugfs_name = "hdmi0_regs", .card_name = "vc4-hdmi-0", - .max_pixel_clock = HDMI_14_MAX_TMDS_CLK, + .max_pixel_clock = 600000000, .registers = vc5_hdmi_hdmi0_fields, .num_registers = ARRAY_SIZE(vc5_hdmi_hdmi0_fields), .phy_lane_mapping = { @@ -2353,11 +2760,18 @@ static const struct of_device_id vc4_hdmi_dt_match[] = { {} }; +static const struct dev_pm_ops vc4_hdmi_pm_ops = { + SET_RUNTIME_PM_OPS(vc4_hdmi_runtime_suspend, + vc4_hdmi_runtime_resume, + NULL) +}; + struct platform_driver vc4_hdmi_driver = { .probe = vc4_hdmi_dev_probe, .remove = vc4_hdmi_dev_remove, .driver = { .name = "vc4_hdmi", .of_match_table = vc4_hdmi_dt_match, + .pm = &vc4_hdmi_pm_ops, }, }; diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h index 33e9f665ab8e..36c0b082a43b 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.h +++ b/drivers/gpu/drm/vc4/vc4_hdmi.h @@ -178,6 +178,43 @@ struct vc4_hdmi { struct debugfs_regset32 hdmi_regset; struct debugfs_regset32 hd_regset; + + /** + * @hw_lock: Spinlock protecting device register access. + */ + spinlock_t hw_lock; + + /** + * @mutex: Mutex protecting the driver access across multiple + * frameworks (KMS, ALSA). + * + * NOTE: While supported, CEC has been left out since + * cec_s_phys_addr_from_edid() might call .adap_enable and lead to a + * reentrancy issue between .get_modes (or .detect) and .adap_enable. + * Since we don't share any state between the CEC hooks and KMS', it's + * not a big deal. The only trouble might come from updating the CEC + * clock divider which might be affected by a modeset, but CEC should + * be resilient to that. + */ + struct mutex mutex; + + /** + * @saved_adjusted_mode: Copy of @drm_crtc_state.adjusted_mode + * for use by ALSA hooks and interrupt handlers. Protected by @mutex. + */ + struct drm_display_mode saved_adjusted_mode; + + /** + * @output_enabled: Is the HDMI controller currently active? + * Protected by @mutex. + */ + bool output_enabled; + + /** + * @scdc_enabled: Is the HDMI controller currently running with + * the scrambler on? Protected by @mutex. + */ + bool scdc_enabled; }; static inline struct vc4_hdmi * diff --git a/drivers/gpu/drm/vc4/vc4_hdmi_phy.c b/drivers/gpu/drm/vc4/vc4_hdmi_phy.c index 36535480f8e2..62148f0dc284 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi_phy.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi_phy.c @@ -130,31 +130,49 @@ void vc4_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi, struct vc4_hdmi_connector_state *conn_state) { + unsigned long flags; + /* PHY should be in reset, like * vc4_hdmi_encoder_disable() does. */ + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); + HDMI_WRITE(HDMI_TX_PHY_RESET_CTL, 0xf << 16); HDMI_WRITE(HDMI_TX_PHY_RESET_CTL, 0); + + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); } void vc4_hdmi_phy_disable(struct vc4_hdmi *vc4_hdmi) { + unsigned long flags; + + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); HDMI_WRITE(HDMI_TX_PHY_RESET_CTL, 0xf << 16); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); } void vc4_hdmi_phy_rng_enable(struct vc4_hdmi *vc4_hdmi) { + unsigned long flags; + + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); HDMI_WRITE(HDMI_TX_PHY_CTL_0, HDMI_READ(HDMI_TX_PHY_CTL_0) & ~VC4_HDMI_TX_PHY_RNG_PWRDN); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); } void vc4_hdmi_phy_rng_disable(struct vc4_hdmi *vc4_hdmi) { + unsigned long flags; + + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); HDMI_WRITE(HDMI_TX_PHY_CTL_0, HDMI_READ(HDMI_TX_PHY_CTL_0) | VC4_HDMI_TX_PHY_RNG_PWRDN); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); } static unsigned long long @@ -336,6 +354,8 @@ phy_get_channel_settings(enum vc4_hdmi_phy_channel chan, static void vc5_hdmi_reset_phy(struct vc4_hdmi *vc4_hdmi) { + lockdep_assert_held(&vc4_hdmi->hw_lock); + HDMI_WRITE(HDMI_TX_PHY_RESET_CTL, 0x0f); HDMI_WRITE(HDMI_TX_PHY_POWERDOWN_CTL, BIT(10)); } @@ -348,10 +368,13 @@ void vc5_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi, unsigned long long pixel_freq = conn_state->pixel_rate; unsigned long long vco_freq; unsigned char word_sel; + unsigned long flags; u8 vco_sel, vco_div; vco_freq = phy_get_vco_freq(pixel_freq, &vco_sel, &vco_div); + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); + vc5_hdmi_reset_phy(vc4_hdmi); HDMI_WRITE(HDMI_TX_PHY_POWERDOWN_CTL, @@ -501,23 +524,37 @@ void vc5_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi, HDMI_READ(HDMI_TX_PHY_RESET_CTL) | VC4_HDMI_TX_PHY_RESET_CTL_PLL_RESETB | VC4_HDMI_TX_PHY_RESET_CTL_PLLDIV_RESETB); + + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); } void vc5_hdmi_phy_disable(struct vc4_hdmi *vc4_hdmi) { + unsigned long flags; + + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); vc5_hdmi_reset_phy(vc4_hdmi); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); } void vc5_hdmi_phy_rng_enable(struct vc4_hdmi *vc4_hdmi) { + unsigned long flags; + + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); HDMI_WRITE(HDMI_TX_PHY_POWERDOWN_CTL, HDMI_READ(HDMI_TX_PHY_POWERDOWN_CTL) & ~VC4_HDMI_TX_PHY_POWERDOWN_CTL_RNDGEN_PWRDN); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); } void vc5_hdmi_phy_rng_disable(struct vc4_hdmi *vc4_hdmi) { + unsigned long flags; + + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); HDMI_WRITE(HDMI_TX_PHY_POWERDOWN_CTL, HDMI_READ(HDMI_TX_PHY_POWERDOWN_CTL) | VC4_HDMI_TX_PHY_POWERDOWN_CTL_RNDGEN_PWRDN); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); } diff --git a/drivers/gpu/drm/vc4/vc4_hdmi_regs.h b/drivers/gpu/drm/vc4/vc4_hdmi_regs.h index 19d2fdc446bc..fc971506bd4f 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi_regs.h +++ b/drivers/gpu/drm/vc4/vc4_hdmi_regs.h @@ -1,6 +1,8 @@ #ifndef _VC4_HDMI_REGS_H_ #define _VC4_HDMI_REGS_H_ +#include <linux/pm_runtime.h> + #include "vc4_hdmi.h" #define VC4_HDMI_PACKET_STRIDE 0x24 @@ -412,6 +414,8 @@ static inline u32 vc4_hdmi_read(struct vc4_hdmi *hdmi, const struct vc4_hdmi_variant *variant = hdmi->variant; void __iomem *base; + WARN_ON(!pm_runtime_active(&hdmi->pdev->dev)); + if (reg >= variant->num_registers) { dev_warn(&hdmi->pdev->dev, "Invalid register ID %u\n", reg); @@ -438,6 +442,10 @@ static inline void vc4_hdmi_write(struct vc4_hdmi *hdmi, const struct vc4_hdmi_variant *variant = hdmi->variant; void __iomem *base; + lockdep_assert_held(&hdmi->hw_lock); + + WARN_ON(!pm_runtime_active(&hdmi->pdev->dev)); + if (reg >= variant->num_registers) { dev_warn(&hdmi->pdev->dev, "Invalid register ID %u\n", reg); diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c index c239045e05d6..604933e20e6a 100644 --- a/drivers/gpu/drm/vc4/vc4_hvs.c +++ b/drivers/gpu/drm/vc4/vc4_hvs.c @@ -365,17 +365,16 @@ static void vc4_hvs_update_dlist(struct drm_crtc *crtc) struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); + unsigned long flags; if (crtc->state->event) { - unsigned long flags; - crtc->state->event->pipe = drm_crtc_index(crtc); WARN_ON(drm_crtc_vblank_get(crtc) != 0); spin_lock_irqsave(&dev->event_lock, flags); - if (!vc4_state->feed_txp || vc4_state->txp_armed) { + if (!vc4_crtc->feeds_txp || vc4_state->txp_armed) { vc4_crtc->event = crtc->state->event; crtc->state->event = NULL; } @@ -388,6 +387,22 @@ static void vc4_hvs_update_dlist(struct drm_crtc *crtc) HVS_WRITE(SCALER_DISPLISTX(vc4_state->assigned_channel), vc4_state->mm.start); } + + spin_lock_irqsave(&vc4_crtc->irq_lock, flags); + vc4_crtc->current_dlist = vc4_state->mm.start; + spin_unlock_irqrestore(&vc4_crtc->irq_lock, flags); +} + +void vc4_hvs_atomic_begin(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); + struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); + unsigned long flags; + + spin_lock_irqsave(&vc4_crtc->irq_lock, flags); + vc4_crtc->current_hvs_channel = vc4_state->assigned_channel; + spin_unlock_irqrestore(&vc4_crtc->irq_lock, flags); } void vc4_hvs_atomic_enable(struct drm_crtc *crtc, @@ -395,10 +410,9 @@ void vc4_hvs_atomic_enable(struct drm_crtc *crtc, { struct drm_device *dev = crtc->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); - struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc); - struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(new_crtc_state); struct drm_display_mode *mode = &crtc->state->adjusted_mode; - bool oneshot = vc4_state->feed_txp; + struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); + bool oneshot = vc4_crtc->feeds_txp; vc4_hvs_update_dlist(crtc); vc4_hvs_init_channel(vc4, crtc, mode, oneshot); diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c index f0b3e4cf5bce..24de29bc1cda 100644 --- a/drivers/gpu/drm/vc4/vc4_kms.c +++ b/drivers/gpu/drm/vc4/vc4_kms.c @@ -39,9 +39,11 @@ static struct vc4_ctm_state *to_vc4_ctm_state(struct drm_private_state *priv) struct vc4_hvs_state { struct drm_private_state base; + unsigned long core_clock_rate; struct { unsigned in_use: 1; + unsigned long fifo_load; struct drm_crtc_commit *pending_commit; } fifo_state[HVS_NUM_CHANNELS]; }; @@ -233,6 +235,7 @@ static void vc4_hvs_pv_muxing_commit(struct vc4_dev *vc4, unsigned int i; for_each_new_crtc_in_state(state, crtc, crtc_state, i) { + struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state); u32 dispctrl; u32 dsp3_mux; @@ -253,7 +256,7 @@ static void vc4_hvs_pv_muxing_commit(struct vc4_dev *vc4, * TXP IP, and we need to disable the FIFO2 -> pixelvalve1 * route. */ - if (vc4_state->feed_txp) + if (vc4_crtc->feeds_txp) dsp3_mux = VC4_SET_FIELD(3, SCALER_DISPCTRL_DSP3_MUX); else dsp3_mux = VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX); @@ -337,12 +340,21 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state) struct drm_device *dev = state->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_hvs *hvs = vc4->hvs; - struct drm_crtc_state *old_crtc_state; struct drm_crtc_state *new_crtc_state; + struct vc4_hvs_state *new_hvs_state; struct drm_crtc *crtc; struct vc4_hvs_state *old_hvs_state; + unsigned int channel; int i; + old_hvs_state = vc4_hvs_get_old_global_state(state); + if (WARN_ON(IS_ERR(old_hvs_state))) + return; + + new_hvs_state = vc4_hvs_get_new_global_state(state); + if (WARN_ON(IS_ERR(new_hvs_state))) + return; + for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { struct vc4_crtc_state *vc4_crtc_state; @@ -353,30 +365,32 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state) vc4_hvs_mask_underrun(dev, vc4_crtc_state->assigned_channel); } - if (vc4->hvs->hvs5) - clk_set_min_rate(hvs->core_clk, 500000000); - - old_hvs_state = vc4_hvs_get_old_global_state(state); - if (!old_hvs_state) - return; - - for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) { - struct vc4_crtc_state *vc4_crtc_state = - to_vc4_crtc_state(old_crtc_state); - unsigned int channel = vc4_crtc_state->assigned_channel; + for (channel = 0; channel < HVS_NUM_CHANNELS; channel++) { + struct drm_crtc_commit *commit; int ret; - if (channel == VC4_HVS_CHANNEL_DISABLED) + if (!old_hvs_state->fifo_state[channel].in_use) continue; - if (!old_hvs_state->fifo_state[channel].in_use) + commit = old_hvs_state->fifo_state[channel].pending_commit; + if (!commit) continue; - ret = drm_crtc_commit_wait(old_hvs_state->fifo_state[channel].pending_commit); + ret = drm_crtc_commit_wait(commit); if (ret) drm_err(dev, "Timed out waiting for commit\n"); + + drm_crtc_commit_put(commit); + old_hvs_state->fifo_state[channel].pending_commit = NULL; } + if (vc4->hvs->hvs5) { + unsigned long core_rate = max_t(unsigned long, + 500000000, + new_hvs_state->core_clock_rate); + + clk_set_min_rate(hvs->core_clk, core_rate); + } drm_atomic_helper_commit_modeset_disables(dev, state); vc4_ctm_commit(vc4, state); @@ -398,8 +412,12 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state) drm_atomic_helper_cleanup_planes(dev, state); - if (vc4->hvs->hvs5) - clk_set_min_rate(hvs->core_clk, 0); + if (vc4->hvs->hvs5) { + drm_dbg(dev, "Running the core clock at %lu Hz\n", + new_hvs_state->core_clock_rate); + + clk_set_min_rate(hvs->core_clk, new_hvs_state->core_clock_rate); + } } static int vc4_atomic_commit_setup(struct drm_atomic_state *state) @@ -410,8 +428,8 @@ static int vc4_atomic_commit_setup(struct drm_atomic_state *state) unsigned int i; hvs_state = vc4_hvs_get_new_global_state(state); - if (!hvs_state) - return -EINVAL; + if (WARN_ON(IS_ERR(hvs_state))) + return PTR_ERR(hvs_state); for_each_new_crtc_in_state(state, crtc, crtc_state, i) { struct vc4_crtc_state *vc4_crtc_state = @@ -551,9 +569,6 @@ static int vc4_load_tracker_atomic_check(struct drm_atomic_state *state) struct drm_plane *plane; int i; - if (!vc4->load_tracker_available) - return 0; - priv_state = drm_atomic_get_private_obj_state(state, &vc4->load_tracker); if (IS_ERR(priv_state)) @@ -628,9 +643,6 @@ static void vc4_load_tracker_obj_fini(struct drm_device *dev, void *unused) { struct vc4_dev *vc4 = to_vc4_dev(dev); - if (!vc4->load_tracker_available) - return; - drm_atomic_private_obj_fini(&vc4->load_tracker); } @@ -638,9 +650,6 @@ static int vc4_load_tracker_obj_init(struct vc4_dev *vc4) { struct vc4_load_tracker_state *load_state; - if (!vc4->load_tracker_available) - return 0; - load_state = kzalloc(sizeof(*load_state), GFP_KERNEL); if (!load_state) return -ENOMEM; @@ -665,17 +674,13 @@ vc4_hvs_channels_duplicate_state(struct drm_private_obj *obj) __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); - for (i = 0; i < HVS_NUM_CHANNELS; i++) { state->fifo_state[i].in_use = old_state->fifo_state[i].in_use; - - if (!old_state->fifo_state[i].pending_commit) - continue; - - state->fifo_state[i].pending_commit = - drm_crtc_commit_get(old_state->fifo_state[i].pending_commit); + state->fifo_state[i].fifo_load = old_state->fifo_state[i].fifo_load; } + state->core_clock_rate = old_state->core_clock_rate; + return &state->base; } @@ -762,8 +767,8 @@ static int vc4_pv_muxing_atomic_check(struct drm_device *dev, unsigned int i; hvs_new_state = vc4_hvs_get_global_state(state); - if (!hvs_new_state) - return -EINVAL; + if (IS_ERR(hvs_new_state)) + return PTR_ERR(hvs_new_state); for (i = 0; i < ARRAY_SIZE(hvs_new_state->fifo_state); i++) if (!hvs_new_state->fifo_state[i].in_use) @@ -831,6 +836,76 @@ static int vc4_pv_muxing_atomic_check(struct drm_device *dev, } static int +vc4_core_clock_atomic_check(struct drm_atomic_state *state) +{ + struct vc4_dev *vc4 = to_vc4_dev(state->dev); + struct drm_private_state *priv_state; + struct vc4_hvs_state *hvs_new_state; + struct vc4_load_tracker_state *load_state; + struct drm_crtc_state *old_crtc_state, *new_crtc_state; + struct drm_crtc *crtc; + unsigned int num_outputs; + unsigned long pixel_rate; + unsigned long cob_rate; + unsigned int i; + + priv_state = drm_atomic_get_private_obj_state(state, + &vc4->load_tracker); + if (IS_ERR(priv_state)) + return PTR_ERR(priv_state); + + load_state = to_vc4_load_tracker_state(priv_state); + + hvs_new_state = vc4_hvs_get_global_state(state); + if (IS_ERR(hvs_new_state)) + return PTR_ERR(hvs_new_state); + + for_each_oldnew_crtc_in_state(state, crtc, + old_crtc_state, + new_crtc_state, + i) { + if (old_crtc_state->active) { + struct vc4_crtc_state *old_vc4_state = + to_vc4_crtc_state(old_crtc_state); + unsigned int channel = old_vc4_state->assigned_channel; + + hvs_new_state->fifo_state[channel].fifo_load = 0; + } + + if (new_crtc_state->active) { + struct vc4_crtc_state *new_vc4_state = + to_vc4_crtc_state(new_crtc_state); + unsigned int channel = new_vc4_state->assigned_channel; + + hvs_new_state->fifo_state[channel].fifo_load = + new_vc4_state->hvs_load; + } + } + + cob_rate = 0; + num_outputs = 0; + for (i = 0; i < HVS_NUM_CHANNELS; i++) { + if (!hvs_new_state->fifo_state[i].in_use) + continue; + + num_outputs++; + cob_rate += hvs_new_state->fifo_state[i].fifo_load; + } + + pixel_rate = load_state->hvs_load; + if (num_outputs > 1) { + pixel_rate = (pixel_rate * 40) / 100; + } else { + pixel_rate = (pixel_rate * 60) / 100; + } + + hvs_new_state->core_clock_rate = max(cob_rate, pixel_rate); + + return 0; +} + + +static int vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) { int ret; @@ -847,7 +922,11 @@ vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) if (ret) return ret; - return vc4_load_tracker_atomic_check(state); + ret = vc4_load_tracker_atomic_check(state); + if (ret) + return ret; + + return vc4_core_clock_atomic_check(state); } static struct drm_mode_config_helper_funcs vc4_mode_config_helpers = { @@ -868,9 +947,12 @@ int vc4_kms_load(struct drm_device *dev) "brcm,bcm2711-vc5"); int ret; + /* + * The limits enforced by the load tracker aren't relevant for + * the BCM2711, but the load tracker computations are used for + * the core clock rate calculation. + */ if (!is_vc5) { - vc4->load_tracker_available = true; - /* Start with the load tracker enabled. Can be * disabled through the debugfs load_tracker file. */ diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index 19161b6ab27f..920a9eefe426 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c @@ -33,6 +33,7 @@ static const struct hvs_format { u32 hvs; /* HVS_FORMAT_* */ u32 pixel_order; u32 pixel_order_hvs5; + bool hvs5_only; } hvs_formats[] = { { .drm = DRM_FORMAT_XRGB8888, @@ -128,6 +129,12 @@ static const struct hvs_format { .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_2PLANE, .pixel_order = HVS_PIXEL_ORDER_XYCRCB, }, + { + .drm = DRM_FORMAT_P030, + .hvs = HVS_PIXEL_FORMAT_YCBCR_10BIT, + .pixel_order = HVS_PIXEL_ORDER_XYCBCR, + .hvs5_only = true, + }, }; static const struct hvs_format *vc4_get_hvs_format(u32 drm_format) @@ -529,11 +536,6 @@ static void vc4_plane_calc_load(struct drm_plane_state *state) struct vc4_plane_state *vc4_state; struct drm_crtc_state *crtc_state; unsigned int vscale_factor; - struct vc4_dev *vc4; - - vc4 = to_vc4_dev(state->plane->dev); - if (!vc4->load_tracker_available) - return; vc4_state = to_vc4_plane_state(state); crtc_state = drm_atomic_get_existing_crtc_state(state->state, @@ -621,6 +623,51 @@ static int vc4_plane_allocate_lbm(struct drm_plane_state *state) return 0; } +/* + * The colorspace conversion matrices are held in 3 entries in the dlist. + * Create an array of them, with entries for each full and limited mode, and + * each supported colorspace. + */ +static const u32 colorspace_coeffs[2][DRM_COLOR_ENCODING_MAX][3] = { + { + /* Limited range */ + { + /* BT601 */ + SCALER_CSC0_ITR_R_601_5, + SCALER_CSC1_ITR_R_601_5, + SCALER_CSC2_ITR_R_601_5, + }, { + /* BT709 */ + SCALER_CSC0_ITR_R_709_3, + SCALER_CSC1_ITR_R_709_3, + SCALER_CSC2_ITR_R_709_3, + }, { + /* BT2020 */ + SCALER_CSC0_ITR_R_2020, + SCALER_CSC1_ITR_R_2020, + SCALER_CSC2_ITR_R_2020, + } + }, { + /* Full range */ + { + /* JFIF */ + SCALER_CSC0_JPEG_JFIF, + SCALER_CSC1_JPEG_JFIF, + SCALER_CSC2_JPEG_JFIF, + }, { + /* BT709 */ + SCALER_CSC0_ITR_R_709_3_FR, + SCALER_CSC1_ITR_R_709_3_FR, + SCALER_CSC2_ITR_R_709_3_FR, + }, { + /* BT2020 */ + SCALER_CSC0_ITR_R_2020_FR, + SCALER_CSC1_ITR_R_2020_FR, + SCALER_CSC2_ITR_R_2020_FR, + } + } +}; + /* Writes out a full display list for an active plane to the plane's * private dlist state. */ @@ -767,47 +814,90 @@ static int vc4_plane_mode_set(struct drm_plane *plane, case DRM_FORMAT_MOD_BROADCOM_SAND128: case DRM_FORMAT_MOD_BROADCOM_SAND256: { uint32_t param = fourcc_mod_broadcom_param(fb->modifier); - u32 tile_w, tile, x_off, pix_per_tile; - - hvs_format = HVS_PIXEL_FORMAT_H264; - - switch (base_format_mod) { - case DRM_FORMAT_MOD_BROADCOM_SAND64: - tiling = SCALER_CTL0_TILING_64B; - tile_w = 64; - break; - case DRM_FORMAT_MOD_BROADCOM_SAND128: - tiling = SCALER_CTL0_TILING_128B; - tile_w = 128; - break; - case DRM_FORMAT_MOD_BROADCOM_SAND256: - tiling = SCALER_CTL0_TILING_256B_OR_T; - tile_w = 256; - break; - default: - break; - } if (param > SCALER_TILE_HEIGHT_MASK) { - DRM_DEBUG_KMS("SAND height too large (%d)\n", param); + DRM_DEBUG_KMS("SAND height too large (%d)\n", + param); return -EINVAL; } - pix_per_tile = tile_w / fb->format->cpp[0]; - tile = vc4_state->src_x / pix_per_tile; - x_off = vc4_state->src_x % pix_per_tile; + if (fb->format->format == DRM_FORMAT_P030) { + hvs_format = HVS_PIXEL_FORMAT_YCBCR_10BIT; + tiling = SCALER_CTL0_TILING_128B; + } else { + hvs_format = HVS_PIXEL_FORMAT_H264; + + switch (base_format_mod) { + case DRM_FORMAT_MOD_BROADCOM_SAND64: + tiling = SCALER_CTL0_TILING_64B; + break; + case DRM_FORMAT_MOD_BROADCOM_SAND128: + tiling = SCALER_CTL0_TILING_128B; + break; + case DRM_FORMAT_MOD_BROADCOM_SAND256: + tiling = SCALER_CTL0_TILING_256B_OR_T; + break; + default: + return -EINVAL; + } + } /* Adjust the base pointer to the first pixel to be scanned * out. + * + * For P030, y_ptr [31:4] is the 128bit word for the start pixel + * y_ptr [3:0] is the pixel (0-11) contained within that 128bit + * word that should be taken as the first pixel. + * Ditto uv_ptr [31:4] vs [3:0], however [3:0] contains the + * element within the 128bit word, eg for pixel 3 the value + * should be 6. */ for (i = 0; i < num_planes; i++) { + u32 tile_w, tile, x_off, pix_per_tile; + + if (fb->format->format == DRM_FORMAT_P030) { + /* + * Spec says: bits [31:4] of the given address + * should point to the 128-bit word containing + * the desired starting pixel, and bits[3:0] + * should be between 0 and 11, indicating which + * of the 12-pixels in that 128-bit word is the + * first pixel to be used + */ + u32 remaining_pixels = vc4_state->src_x % 96; + u32 aligned = remaining_pixels / 12; + u32 last_bits = remaining_pixels % 12; + + x_off = aligned * 16 + last_bits; + tile_w = 128; + pix_per_tile = 96; + } else { + switch (base_format_mod) { + case DRM_FORMAT_MOD_BROADCOM_SAND64: + tile_w = 64; + break; + case DRM_FORMAT_MOD_BROADCOM_SAND128: + tile_w = 128; + break; + case DRM_FORMAT_MOD_BROADCOM_SAND256: + tile_w = 256; + break; + default: + return -EINVAL; + } + pix_per_tile = tile_w / fb->format->cpp[0]; + x_off = (vc4_state->src_x % pix_per_tile) / + (i ? h_subsample : 1) * + fb->format->cpp[i]; + } + + tile = vc4_state->src_x / pix_per_tile; + vc4_state->offsets[i] += param * tile_w * tile; vc4_state->offsets[i] += src_y / (i ? v_subsample : 1) * tile_w; - vc4_state->offsets[i] += x_off / - (i ? h_subsample : 1) * - fb->format->cpp[i]; + vc4_state->offsets[i] += x_off & ~(i ? 1 : 0); } pitch0 = VC4_SET_FIELD(param, SCALER_TILE_HEIGHT); @@ -960,7 +1050,8 @@ static int vc4_plane_mode_set(struct drm_plane *plane, /* Pitch word 1/2 */ for (i = 1; i < num_planes; i++) { - if (hvs_format != HVS_PIXEL_FORMAT_H264) { + if (hvs_format != HVS_PIXEL_FORMAT_H264 && + hvs_format != HVS_PIXEL_FORMAT_YCBCR_10BIT) { vc4_dlist_write(vc4_state, VC4_SET_FIELD(fb->pitches[i], SCALER_SRC_PITCH)); @@ -971,9 +1062,20 @@ static int vc4_plane_mode_set(struct drm_plane *plane, /* Colorspace conversion words */ if (vc4_state->is_yuv) { - vc4_dlist_write(vc4_state, SCALER_CSC0_ITR_R_601_5); - vc4_dlist_write(vc4_state, SCALER_CSC1_ITR_R_601_5); - vc4_dlist_write(vc4_state, SCALER_CSC2_ITR_R_601_5); + enum drm_color_encoding color_encoding = state->color_encoding; + enum drm_color_range color_range = state->color_range; + const u32 *ccm; + + if (color_encoding >= DRM_COLOR_ENCODING_MAX) + color_encoding = DRM_COLOR_YCBCR_BT601; + if (color_range >= DRM_COLOR_RANGE_MAX) + color_range = DRM_COLOR_YCBCR_LIMITED_RANGE; + + ccm = colorspace_coeffs[color_range][color_encoding]; + + vc4_dlist_write(vc4_state, ccm[0]); + vc4_dlist_write(vc4_state, ccm[1]); + vc4_dlist_write(vc4_state, ccm[2]); } vc4_state->lbm_offset = 0; @@ -1320,6 +1422,13 @@ static bool vc4_format_mod_supported(struct drm_plane *plane, default: return false; } + case DRM_FORMAT_P030: + switch (fourcc_mod_broadcom_mod(modifier)) { + case DRM_FORMAT_MOD_BROADCOM_SAND128: + return true; + default: + return false; + } case DRM_FORMAT_RGBX1010102: case DRM_FORMAT_BGRX1010102: case DRM_FORMAT_RGBA1010102: @@ -1352,8 +1461,11 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev, struct drm_plane *plane = NULL; struct vc4_plane *vc4_plane; u32 formats[ARRAY_SIZE(hvs_formats)]; + int num_formats = 0; int ret = 0; unsigned i; + bool hvs5 = of_device_is_compatible(dev->dev->of_node, + "brcm,bcm2711-vc5"); static const uint64_t modifiers[] = { DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED, DRM_FORMAT_MOD_BROADCOM_SAND128, @@ -1368,13 +1480,17 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev, if (!vc4_plane) return ERR_PTR(-ENOMEM); - for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) - formats[i] = hvs_formats[i].drm; + for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) { + if (!hvs_formats[i].hvs5_only || hvs5) { + formats[num_formats] = hvs_formats[i].drm; + num_formats++; + } + } plane = &vc4_plane->base; ret = drm_universal_plane_init(dev, plane, 0, &vc4_plane_funcs, - formats, ARRAY_SIZE(formats), + formats, num_formats, modifiers, type, NULL); if (ret) return ERR_PTR(ret); @@ -1388,6 +1504,15 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev, DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y); + drm_plane_create_color_properties(plane, + BIT(DRM_COLOR_YCBCR_BT601) | + BIT(DRM_COLOR_YCBCR_BT709) | + BIT(DRM_COLOR_YCBCR_BT2020), + BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) | + BIT(DRM_COLOR_YCBCR_FULL_RANGE), + DRM_COLOR_YCBCR_BT709, + DRM_COLOR_YCBCR_LIMITED_RANGE); + return plane; } diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h index 489f921ef44d..7538b84a6dca 100644 --- a/drivers/gpu/drm/vc4/vc4_regs.h +++ b/drivers/gpu/drm/vc4/vc4_regs.h @@ -975,7 +975,10 @@ enum hvs_pixel_format { #define SCALER_CSC0_COEF_CR_OFS_SHIFT 0 #define SCALER_CSC0_ITR_R_601_5 0x00f00000 #define SCALER_CSC0_ITR_R_709_3 0x00f00000 +#define SCALER_CSC0_ITR_R_2020 0x00f00000 #define SCALER_CSC0_JPEG_JFIF 0x00000000 +#define SCALER_CSC0_ITR_R_709_3_FR 0x00000000 +#define SCALER_CSC0_ITR_R_2020_FR 0x00000000 /* S2.8 contribution of Cb to Green */ #define SCALER_CSC1_COEF_CB_GRN_MASK VC4_MASK(31, 22) @@ -990,8 +993,11 @@ enum hvs_pixel_format { #define SCALER_CSC1_COEF_CR_BLU_MASK VC4_MASK(1, 0) #define SCALER_CSC1_COEF_CR_BLU_SHIFT 0 #define SCALER_CSC1_ITR_R_601_5 0xe73304a8 -#define SCALER_CSC1_ITR_R_709_3 0xf2b784a8 -#define SCALER_CSC1_JPEG_JFIF 0xea34a400 +#define SCALER_CSC1_ITR_R_709_3 0xf27784a8 +#define SCALER_CSC1_ITR_R_2020 0xf43594a8 +#define SCALER_CSC1_JPEG_JFIF 0xea349400 +#define SCALER_CSC1_ITR_R_709_3_FR 0xf4388400 +#define SCALER_CSC1_ITR_R_2020_FR 0xf5b6d400 /* S2.8 contribution of Cb to Red */ #define SCALER_CSC2_COEF_CB_RED_MASK VC4_MASK(29, 20) @@ -1002,9 +1008,12 @@ enum hvs_pixel_format { /* S2.8 contribution of Cb to Blue */ #define SCALER_CSC2_COEF_CB_BLU_MASK VC4_MASK(19, 10) #define SCALER_CSC2_COEF_CB_BLU_SHIFT 10 -#define SCALER_CSC2_ITR_R_601_5 0x00066204 -#define SCALER_CSC2_ITR_R_709_3 0x00072a1c -#define SCALER_CSC2_JPEG_JFIF 0x000599c5 +#define SCALER_CSC2_ITR_R_601_5 0x00066604 +#define SCALER_CSC2_ITR_R_709_3 0x00072e1d +#define SCALER_CSC2_ITR_R_2020 0x0006b624 +#define SCALER_CSC2_JPEG_JFIF 0x00059dc6 +#define SCALER_CSC2_ITR_R_709_3_FR 0x00064ddb +#define SCALER_CSC2_ITR_R_2020_FR 0x0005e5e2 #define SCALER_TPZ0_VERT_RECALC BIT(31) #define SCALER_TPZ0_SCALE_MASK VC4_MASK(28, 8) diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c index 2fc7f4b5fa09..9809ca3e2945 100644 --- a/drivers/gpu/drm/vc4/vc4_txp.c +++ b/drivers/gpu/drm/vc4/vc4_txp.c @@ -391,7 +391,6 @@ static int vc4_txp_atomic_check(struct drm_crtc *crtc, { struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc); - struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state); int ret; ret = vc4_hvs_atomic_check(crtc, state); @@ -399,7 +398,6 @@ static int vc4_txp_atomic_check(struct drm_crtc *crtc, return ret; crtc_state->no_vblank = true; - vc4_state->feed_txp = true; return 0; } @@ -437,6 +435,7 @@ static void vc4_txp_atomic_disable(struct drm_crtc *crtc, static const struct drm_crtc_helper_funcs vc4_txp_crtc_helper_funcs = { .atomic_check = vc4_txp_atomic_check, + .atomic_begin = vc4_hvs_atomic_begin, .atomic_flush = vc4_hvs_atomic_flush, .atomic_enable = vc4_txp_atomic_enable, .atomic_disable = vc4_txp_atomic_disable, @@ -482,6 +481,7 @@ static int vc4_txp_bind(struct device *dev, struct device *master, void *data) vc4_crtc->pdev = pdev; vc4_crtc->data = &vc4_txp_crtc_data; + vc4_crtc->feeds_txp = true; txp->pdev = pdev; diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c index a87eafa89e9f..c5e3e5457737 100644 --- a/drivers/gpu/drm/vgem/vgem_drv.c +++ b/drivers/gpu/drm/vgem/vgem_drv.c @@ -97,7 +97,7 @@ static struct drm_gem_object *vgem_gem_create_object(struct drm_device *dev, siz obj = kzalloc(sizeof(*obj), GFP_KERNEL); if (!obj) - return NULL; + return ERR_PTR(-ENOMEM); /* * vgem doesn't have any begin/end cpu access ioctls, therefore must use diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index d86e1ad4a972..5f25a8d15464 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -27,7 +27,6 @@ */ #include <linux/module.h> -#include <linux/console.h> #include <linux/pci.h> #include <linux/poll.h> #include <linux/wait.h> @@ -104,7 +103,7 @@ static int virtio_gpu_probe(struct virtio_device *vdev) struct drm_device *dev; int ret; - if (vgacon_text_force() && virtio_gpu_modeset == -1) + if (drm_firmware_drivers_only() && virtio_gpu_modeset == -1) return -EINVAL; if (virtio_gpu_modeset == 0) @@ -157,36 +156,6 @@ static void virtio_gpu_config_changed(struct virtio_device *vdev) schedule_work(&vgdev->config_changed_work); } -static __poll_t virtio_gpu_poll(struct file *filp, - struct poll_table_struct *wait) -{ - struct drm_file *drm_file = filp->private_data; - struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv; - struct drm_device *dev = drm_file->minor->dev; - struct virtio_gpu_device *vgdev = dev->dev_private; - struct drm_pending_event *e = NULL; - __poll_t mask = 0; - - if (!vgdev->has_virgl_3d || !vfpriv || !vfpriv->ring_idx_mask) - return drm_poll(filp, wait); - - poll_wait(filp, &drm_file->event_wait, wait); - - if (!list_empty(&drm_file->event_list)) { - spin_lock_irq(&dev->event_lock); - e = list_first_entry(&drm_file->event_list, - struct drm_pending_event, link); - drm_file->event_space += e->event->length; - list_del(&e->link); - spin_unlock_irq(&dev->event_lock); - - kfree(e); - mask |= EPOLLIN | EPOLLRDNORM; - } - - return mask; -} - static struct virtio_device_id id_table[] = { { VIRTIO_ID_GPU, VIRTIO_DEV_ANY_ID }, { 0 }, @@ -226,17 +195,7 @@ MODULE_AUTHOR("Dave Airlie <airlied@redhat.com>"); MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>"); MODULE_AUTHOR("Alon Levy"); -static const struct file_operations virtio_gpu_driver_fops = { - .owner = THIS_MODULE, - .open = drm_open, - .release = drm_release, - .unlocked_ioctl = drm_ioctl, - .compat_ioctl = drm_compat_ioctl, - .poll = virtio_gpu_poll, - .read = drm_read, - .llseek = noop_llseek, - .mmap = drm_gem_mmap -}; +DEFINE_DRM_GEM_FOPS(virtio_gpu_driver_fops); static const struct drm_driver driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC, diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index e0265fe74aa5..0a194aaad419 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -138,7 +138,6 @@ struct virtio_gpu_fence_driver { spinlock_t lock; }; -#define VIRTGPU_EVENT_FENCE_SIGNALED_INTERNAL 0x10000000 struct virtio_gpu_fence_event { struct drm_pending_event base; struct drm_event event; diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index 5618a1d5879c..c708bab555c6 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c @@ -54,7 +54,7 @@ static int virtio_gpu_fence_event_create(struct drm_device *dev, if (!e) return -ENOMEM; - e->event.type = VIRTGPU_EVENT_FENCE_SIGNALED_INTERNAL; + e->event.type = VIRTGPU_EVENT_FENCE_SIGNALED; e->event.length = sizeof(e->event); ret = drm_event_reserve_init(dev, file, &e->base, &e->event); @@ -774,7 +774,7 @@ static int virtio_gpu_context_init_ioctl(struct drm_device *dev, goto out_unlock; } - if ((vgdev->capset_id_mask & (1 << value)) == 0) { + if ((vgdev->capset_id_mask & (1ULL << value)) == 0) { ret = -EINVAL; goto out_unlock; } @@ -819,7 +819,7 @@ static int virtio_gpu_context_init_ioctl(struct drm_device *dev, if (vfpriv->ring_idx_mask) { valid_ring_mask = 0; for (i = 0; i < vfpriv->num_rings; i++) - valid_ring_mask |= 1 << i; + valid_ring_mask |= 1ULL << i; if (~valid_ring_mask & vfpriv->ring_idx_mask) { ret = -EINVAL; diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c index f648b0e24447..baef2c5f2aaf 100644 --- a/drivers/gpu/drm/virtio/virtgpu_object.c +++ b/drivers/gpu/drm/virtio/virtgpu_object.c @@ -79,10 +79,10 @@ void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo) sg_free_table(shmem->pages); kfree(shmem->pages); shmem->pages = NULL; - drm_gem_shmem_unpin(&bo->base.base); + drm_gem_shmem_unpin(&bo->base); } - drm_gem_shmem_free_object(&bo->base.base); + drm_gem_shmem_free(&bo->base); } else if (virtio_gpu_is_vram(bo)) { struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo); @@ -116,15 +116,14 @@ static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs = { .free = virtio_gpu_free_object, .open = virtio_gpu_gem_object_open, .close = virtio_gpu_gem_object_close, - - .print_info = drm_gem_shmem_print_info, + .print_info = drm_gem_shmem_object_print_info, .export = virtgpu_gem_prime_export, - .pin = drm_gem_shmem_pin, - .unpin = drm_gem_shmem_unpin, - .get_sg_table = drm_gem_shmem_get_sg_table, - .vmap = drm_gem_shmem_vmap, - .vunmap = drm_gem_shmem_vunmap, - .mmap = drm_gem_shmem_mmap, + .pin = drm_gem_shmem_object_pin, + .unpin = drm_gem_shmem_object_unpin, + .get_sg_table = drm_gem_shmem_object_get_sg_table, + .vmap = drm_gem_shmem_object_vmap, + .vunmap = drm_gem_shmem_object_vunmap, + .mmap = drm_gem_shmem_object_mmap, }; bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo) @@ -140,7 +139,7 @@ struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev, shmem = kzalloc(sizeof(*shmem), GFP_KERNEL); if (!shmem) - return NULL; + return ERR_PTR(-ENOMEM); dshmem = &shmem->base.base; dshmem->base.funcs = &virtio_gpu_shmem_funcs; @@ -157,7 +156,7 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev, struct scatterlist *sg; int si, ret; - ret = drm_gem_shmem_pin(&bo->base.base); + ret = drm_gem_shmem_pin(&bo->base); if (ret < 0) return -EINVAL; @@ -167,9 +166,9 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev, * dma-ops. This is discouraged for other drivers, but should be fine * since virtio_gpu doesn't support dma-buf import from other devices. */ - shmem->pages = drm_gem_shmem_get_sg_table(&bo->base.base); + shmem->pages = drm_gem_shmem_get_sg_table(&bo->base); if (!shmem->pages) { - drm_gem_shmem_unpin(&bo->base.base); + drm_gem_shmem_unpin(&bo->base); return -EINVAL; } @@ -277,6 +276,6 @@ err_put_objs: err_put_id: virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle); err_free_gem: - drm_gem_shmem_free_object(&shmem_obj->base); + drm_gem_shmem_free(shmem_obj); return ret; } diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig index c9ce47c448e0..a4fabe208d9f 100644 --- a/drivers/gpu/drm/vmwgfx/Kconfig +++ b/drivers/gpu/drm/vmwgfx/Kconfig @@ -4,6 +4,7 @@ config DRM_VMWGFX depends on DRM && PCI && MMU depends on X86 || ARM64 select DRM_TTM + select DRM_TTM_HELPER select MAPPING_DIRTY_HELPERS # Only needed for the transitional use of drm_crtc_init - can be removed # again once vmwgfx sets up the primary plane itself. diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile index bc323f7d4032..e9d13f155d8c 100644 --- a/drivers/gpu/drm/vmwgfx/Makefile +++ b/drivers/gpu/drm/vmwgfx/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ +vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_hashtab.o vmwgfx_kms.o vmwgfx_drv.o \ vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_ttm_buffer.o \ vmwgfx_cmd.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ vmwgfx_overlay.o vmwgfx_gmrid_manager.o vmwgfx_fence.o \ @@ -9,7 +9,8 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \ vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \ vmwgfx_validation.o vmwgfx_page_dirty.o vmwgfx_streamoutput.o \ - vmwgfx_devcaps.o ttm_object.o ttm_memory.o + vmwgfx_devcaps.o ttm_object.o vmwgfx_system_manager.o \ + vmwgfx_gem.o vmwgfx-$(CONFIG_DRM_FBDEV_EMULATION) += vmwgfx_fb.o vmwgfx-$(CONFIG_TRANSPARENT_HUGEPAGE) += vmwgfx_thp.o diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h index 945c84b27e81..d90d940ad3f4 100644 --- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h +++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h @@ -1,6 +1,6 @@ -/********************************************************** +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* * Copyright 2012-2021 VMware, Inc. - * SPDX-License-Identifier: GPL-2.0 OR MIT * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation @@ -22,7 +22,7 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * - **********************************************************/ + */ /* * svga3d_cmd.h -- diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h index 379ec15c7758..815d0ab0053f 100644 --- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h +++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h @@ -1,6 +1,6 @@ -/********************************************************** +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* * Copyright 1998-2021 VMware, Inc. - * SPDX-License-Identifier: GPL-2.0 OR MIT * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation @@ -22,7 +22,7 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * - **********************************************************/ + */ /* * svga3d_devcaps.h -- @@ -347,6 +347,10 @@ typedef uint32 SVGA3dDevCapIndex; #define SVGA3D_DEVCAP_SM5 258 #define SVGA3D_DEVCAP_MULTISAMPLE_8X 259 +#define SVGA3D_DEVCAP_MAX_FORCED_SAMPLE_COUNT 260 + +#define SVGA3D_DEVCAP_GL43 261 + #define SVGA3D_DEVCAP_MAX 262 #define SVGA3D_DXFMT_SUPPORTED (1 << 0) diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h index 5af442dad542..925bf4b93f01 100644 --- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h +++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h @@ -1,6 +1,6 @@ -/********************************************************** +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* * Copyright 2012-2021 VMware, Inc. - * SPDX-License-Identifier: GPL-2.0 OR MIT * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation @@ -22,7 +22,7 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * - **********************************************************/ + */ /* * svga3d_dx.h -- @@ -508,11 +508,11 @@ typedef struct SVGA3dCmdDXSetPredication { #pragma pack(pop) #pragma pack(push, 1) -typedef struct MKS3dDXSOState { +typedef struct SVGA3dDXSOState { uint32 offset; uint32 intOffset; - uint32 vertexCount; - uint32 dead; + uint32 dead1; + uint32 dead2; } SVGA3dDXSOState; #pragma pack(pop) diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h index 35494a728c7a..6103b41fe92b 100644 --- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h +++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h @@ -1,6 +1,6 @@ -/********************************************************** +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* * Copyright 2012-2021 VMware, Inc. - * SPDX-License-Identifier: GPL-2.0 OR MIT * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation @@ -22,7 +22,7 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * - **********************************************************/ + */ /* * svga3d_limits.h -- @@ -82,4 +82,6 @@ #define SVGA3D_MIN_SBX_DATA_SIZE (GBYTES_2_BYTES(1)) #define SVGA3D_MAX_SBX_DATA_SIZE (GBYTES_2_BYTES(4)) +#define SVGA3D_MIN_SBX_DATA_SIZE_DVM (MBYTES_2_BYTES(900)) +#define SVGA3D_MAX_SBX_DATA_SIZE_DVM (MBYTES_2_BYTES(910)) #endif diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h index 988d8509c472..b24b4f55c941 100644 --- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h +++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h @@ -1,6 +1,6 @@ -/********************************************************** +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* * Copyright 1998-2015 VMware, Inc. - * SPDX-License-Identifier: GPL-2.0 OR MIT * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation @@ -22,7 +22,7 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * - **********************************************************/ + */ /* * svga3d_reg.h -- diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h index 70b88ee16cf6..e9219eb380a2 100644 --- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h +++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h @@ -1,6 +1,6 @@ -/********************************************************** +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* * Copyright 2012-2021 VMware, Inc. - * SPDX-License-Identifier: GPL-2.0 OR MIT * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation @@ -22,7 +22,7 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * - **********************************************************/ + */ /* * svga3d_types.h -- @@ -370,7 +370,6 @@ typedef enum SVGA3dSurfaceFormat { #define SVGA3D_SURFACE_TRANSFER_FROM_BUFFER (CONST64U(1) << 30) #define SVGA3D_SURFACE_RESERVED1 (CONST64U(1) << 31) -#define SVGA3D_SURFACE_VADECODE SVGA3D_SURFACE_RESERVED1 #define SVGA3D_SURFACE_MULTISAMPLE (CONST64U(1) << 32) diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h b/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h index bf242c21f352..405f20fc26c2 100644 --- a/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h +++ b/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h @@ -1,6 +1,6 @@ -/********************************************************** +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* * Copyright 2007,2020 VMware, Inc. - * SPDX-License-Identifier: GPL-2.0 OR MIT * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation @@ -22,7 +22,7 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * - **********************************************************/ + */ /* * svga_escape.h -- diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h b/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h index aec17c3c6c29..691f48f77e33 100644 --- a/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h +++ b/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h @@ -1,6 +1,6 @@ -/********************************************************** +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* * Copyright 2007-2021 VMware, Inc. - * SPDX-License-Identifier: GPL-2.0 OR MIT * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation @@ -22,7 +22,7 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * - **********************************************************/ + */ /* * svga_overlay.h -- diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h b/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h index b3602557de2e..acabdb550c10 100644 --- a/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h +++ b/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h @@ -1,6 +1,6 @@ -/********************************************************** +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* * Copyright 1998-2021 VMware, Inc. - * SPDX-License-Identifier: GPL-2.0 OR MIT * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation @@ -22,7 +22,7 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * - **********************************************************/ + */ /* * svga_reg.h -- @@ -442,6 +442,7 @@ typedef struct { #define SVGA_CAP2_TRACE_FULL_FB 0x00002000 #define SVGA_CAP2_EXTRA_REGS 0x00004000 #define SVGA_CAP2_LO_STAGING 0x00008000 +#define SVGA_CAP2_VIDEO_BLT 0x00010000 #define SVGA_CAP2_RESERVED 0x80000000 typedef enum { @@ -450,9 +451,10 @@ typedef enum { SVGABackdoorCap3dHWVersion = 2, SVGABackdoorCapDeviceCaps2 = 3, SVGABackdoorCapDevelCaps = 4, - SVGABackdoorDevelRenderer = 5, - SVGABackdoorDevelUsingISB = 6, - SVGABackdoorCapMax = 7, + SVGABackdoorCapDevCaps = 5, + SVGABackdoorDevelRenderer = 6, + SVGABackdoorDevelUsingISB = 7, + SVGABackdoorCapMax = 8, } SVGABackdoorCapType; enum { diff --git a/drivers/gpu/drm/vmwgfx/ttm_memory.c b/drivers/gpu/drm/vmwgfx/ttm_memory.c deleted file mode 100644 index 7f7fe35fc21d..000000000000 --- a/drivers/gpu/drm/vmwgfx/ttm_memory.c +++ /dev/null @@ -1,683 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 OR MIT */ -/************************************************************************** - * - * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - **************************************************************************/ - -#define pr_fmt(fmt) "[TTM] " fmt - -#include <linux/spinlock.h> -#include <linux/sched.h> -#include <linux/wait.h> -#include <linux/mm.h> -#include <linux/module.h> -#include <linux/slab.h> -#include <linux/swap.h> - -#include <drm/drm_device.h> -#include <drm/drm_file.h> -#include <drm/ttm/ttm_device.h> - -#include "ttm_memory.h" - -#define TTM_MEMORY_ALLOC_RETRIES 4 - -struct ttm_mem_global ttm_mem_glob; -EXPORT_SYMBOL(ttm_mem_glob); - -struct ttm_mem_zone { - struct kobject kobj; - struct ttm_mem_global *glob; - const char *name; - uint64_t zone_mem; - uint64_t emer_mem; - uint64_t max_mem; - uint64_t swap_limit; - uint64_t used_mem; -}; - -static struct attribute ttm_mem_sys = { - .name = "zone_memory", - .mode = S_IRUGO -}; -static struct attribute ttm_mem_emer = { - .name = "emergency_memory", - .mode = S_IRUGO | S_IWUSR -}; -static struct attribute ttm_mem_max = { - .name = "available_memory", - .mode = S_IRUGO | S_IWUSR -}; -static struct attribute ttm_mem_swap = { - .name = "swap_limit", - .mode = S_IRUGO | S_IWUSR -}; -static struct attribute ttm_mem_used = { - .name = "used_memory", - .mode = S_IRUGO -}; - -static void ttm_mem_zone_kobj_release(struct kobject *kobj) -{ - struct ttm_mem_zone *zone = - container_of(kobj, struct ttm_mem_zone, kobj); - - pr_info("Zone %7s: Used memory at exit: %llu KiB\n", - zone->name, (unsigned long long)zone->used_mem >> 10); - kfree(zone); -} - -static ssize_t ttm_mem_zone_show(struct kobject *kobj, - struct attribute *attr, - char *buffer) -{ - struct ttm_mem_zone *zone = - container_of(kobj, struct ttm_mem_zone, kobj); - uint64_t val = 0; - - spin_lock(&zone->glob->lock); - if (attr == &ttm_mem_sys) - val = zone->zone_mem; - else if (attr == &ttm_mem_emer) - val = zone->emer_mem; - else if (attr == &ttm_mem_max) - val = zone->max_mem; - else if (attr == &ttm_mem_swap) - val = zone->swap_limit; - else if (attr == &ttm_mem_used) - val = zone->used_mem; - spin_unlock(&zone->glob->lock); - - return snprintf(buffer, PAGE_SIZE, "%llu\n", - (unsigned long long) val >> 10); -} - -static void ttm_check_swapping(struct ttm_mem_global *glob); - -static ssize_t ttm_mem_zone_store(struct kobject *kobj, - struct attribute *attr, - const char *buffer, - size_t size) -{ - struct ttm_mem_zone *zone = - container_of(kobj, struct ttm_mem_zone, kobj); - int chars; - unsigned long val; - uint64_t val64; - - chars = sscanf(buffer, "%lu", &val); - if (chars == 0) - return size; - - val64 = val; - val64 <<= 10; - - spin_lock(&zone->glob->lock); - if (val64 > zone->zone_mem) - val64 = zone->zone_mem; - if (attr == &ttm_mem_emer) { - zone->emer_mem = val64; - if (zone->max_mem > val64) - zone->max_mem = val64; - } else if (attr == &ttm_mem_max) { - zone->max_mem = val64; - if (zone->emer_mem < val64) - zone->emer_mem = val64; - } else if (attr == &ttm_mem_swap) - zone->swap_limit = val64; - spin_unlock(&zone->glob->lock); - - ttm_check_swapping(zone->glob); - - return size; -} - -static struct attribute *ttm_mem_zone_attrs[] = { - &ttm_mem_sys, - &ttm_mem_emer, - &ttm_mem_max, - &ttm_mem_swap, - &ttm_mem_used, - NULL -}; - -static const struct sysfs_ops ttm_mem_zone_ops = { - .show = &ttm_mem_zone_show, - .store = &ttm_mem_zone_store -}; - -static struct kobj_type ttm_mem_zone_kobj_type = { - .release = &ttm_mem_zone_kobj_release, - .sysfs_ops = &ttm_mem_zone_ops, - .default_attrs = ttm_mem_zone_attrs, -}; - -static struct attribute ttm_mem_global_lower_mem_limit = { - .name = "lower_mem_limit", - .mode = S_IRUGO | S_IWUSR -}; - -static ssize_t ttm_mem_global_show(struct kobject *kobj, - struct attribute *attr, - char *buffer) -{ - struct ttm_mem_global *glob = - container_of(kobj, struct ttm_mem_global, kobj); - uint64_t val = 0; - - spin_lock(&glob->lock); - val = glob->lower_mem_limit; - spin_unlock(&glob->lock); - /* convert from number of pages to KB */ - val <<= (PAGE_SHIFT - 10); - return snprintf(buffer, PAGE_SIZE, "%llu\n", - (unsigned long long) val); -} - -static ssize_t ttm_mem_global_store(struct kobject *kobj, - struct attribute *attr, - const char *buffer, - size_t size) -{ - int chars; - uint64_t val64; - unsigned long val; - struct ttm_mem_global *glob = - container_of(kobj, struct ttm_mem_global, kobj); - - chars = sscanf(buffer, "%lu", &val); - if (chars == 0) - return size; - - val64 = val; - /* convert from KB to number of pages */ - val64 >>= (PAGE_SHIFT - 10); - - spin_lock(&glob->lock); - glob->lower_mem_limit = val64; - spin_unlock(&glob->lock); - - return size; -} - -static struct attribute *ttm_mem_global_attrs[] = { - &ttm_mem_global_lower_mem_limit, - NULL -}; - -static const struct sysfs_ops ttm_mem_global_ops = { - .show = &ttm_mem_global_show, - .store = &ttm_mem_global_store, -}; - -static struct kobj_type ttm_mem_glob_kobj_type = { - .sysfs_ops = &ttm_mem_global_ops, - .default_attrs = ttm_mem_global_attrs, -}; - -static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob, - bool from_wq, uint64_t extra) -{ - unsigned int i; - struct ttm_mem_zone *zone; - uint64_t target; - - for (i = 0; i < glob->num_zones; ++i) { - zone = glob->zones[i]; - - if (from_wq) - target = zone->swap_limit; - else if (capable(CAP_SYS_ADMIN)) - target = zone->emer_mem; - else - target = zone->max_mem; - - target = (extra > target) ? 0ULL : target; - - if (zone->used_mem > target) - return true; - } - return false; -} - -/* - * At this point we only support a single shrink callback. - * Extend this if needed, perhaps using a linked list of callbacks. - * Note that this function is reentrant: - * many threads may try to swap out at any given time. - */ - -static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq, - uint64_t extra, struct ttm_operation_ctx *ctx) -{ - int ret; - - spin_lock(&glob->lock); - - while (ttm_zones_above_swap_target(glob, from_wq, extra)) { - spin_unlock(&glob->lock); - ret = ttm_global_swapout(ctx, GFP_KERNEL); - spin_lock(&glob->lock); - if (unlikely(ret <= 0)) - break; - } - - spin_unlock(&glob->lock); -} - -static void ttm_shrink_work(struct work_struct *work) -{ - struct ttm_operation_ctx ctx = { - .interruptible = false, - .no_wait_gpu = false - }; - struct ttm_mem_global *glob = - container_of(work, struct ttm_mem_global, work); - - ttm_shrink(glob, true, 0ULL, &ctx); -} - -static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob, - const struct sysinfo *si) -{ - struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL); - uint64_t mem; - int ret; - - if (unlikely(!zone)) - return -ENOMEM; - - mem = si->totalram - si->totalhigh; - mem *= si->mem_unit; - - zone->name = "kernel"; - zone->zone_mem = mem; - zone->max_mem = mem >> 1; - zone->emer_mem = (mem >> 1) + (mem >> 2); - zone->swap_limit = zone->max_mem - (mem >> 3); - zone->used_mem = 0; - zone->glob = glob; - glob->zone_kernel = zone; - ret = kobject_init_and_add( - &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name); - if (unlikely(ret != 0)) { - kobject_put(&zone->kobj); - return ret; - } - glob->zones[glob->num_zones++] = zone; - return 0; -} - -#ifdef CONFIG_HIGHMEM -static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob, - const struct sysinfo *si) -{ - struct ttm_mem_zone *zone; - uint64_t mem; - int ret; - - if (si->totalhigh == 0) - return 0; - - zone = kzalloc(sizeof(*zone), GFP_KERNEL); - if (unlikely(!zone)) - return -ENOMEM; - - mem = si->totalram; - mem *= si->mem_unit; - - zone->name = "highmem"; - zone->zone_mem = mem; - zone->max_mem = mem >> 1; - zone->emer_mem = (mem >> 1) + (mem >> 2); - zone->swap_limit = zone->max_mem - (mem >> 3); - zone->used_mem = 0; - zone->glob = glob; - glob->zone_highmem = zone; - ret = kobject_init_and_add( - &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", - zone->name); - if (unlikely(ret != 0)) { - kobject_put(&zone->kobj); - return ret; - } - glob->zones[glob->num_zones++] = zone; - return 0; -} -#else -static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob, - const struct sysinfo *si) -{ - struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL); - uint64_t mem; - int ret; - - if (unlikely(!zone)) - return -ENOMEM; - - mem = si->totalram; - mem *= si->mem_unit; - - /** - * No special dma32 zone needed. - */ - - if (mem <= ((uint64_t) 1ULL << 32)) { - kfree(zone); - return 0; - } - - /* - * Limit max dma32 memory to 4GB for now - * until we can figure out how big this - * zone really is. - */ - - mem = ((uint64_t) 1ULL << 32); - zone->name = "dma32"; - zone->zone_mem = mem; - zone->max_mem = mem >> 1; - zone->emer_mem = (mem >> 1) + (mem >> 2); - zone->swap_limit = zone->max_mem - (mem >> 3); - zone->used_mem = 0; - zone->glob = glob; - glob->zone_dma32 = zone; - ret = kobject_init_and_add( - &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name); - if (unlikely(ret != 0)) { - kobject_put(&zone->kobj); - return ret; - } - glob->zones[glob->num_zones++] = zone; - return 0; -} -#endif - -int ttm_mem_global_init(struct ttm_mem_global *glob, struct device *dev) -{ - struct sysinfo si; - int ret; - int i; - struct ttm_mem_zone *zone; - - spin_lock_init(&glob->lock); - glob->swap_queue = create_singlethread_workqueue("ttm_swap"); - INIT_WORK(&glob->work, ttm_shrink_work); - - ret = kobject_init_and_add(&glob->kobj, &ttm_mem_glob_kobj_type, - &dev->kobj, "memory_accounting"); - if (unlikely(ret != 0)) { - kobject_put(&glob->kobj); - return ret; - } - - si_meminfo(&si); - - spin_lock(&glob->lock); - /* set it as 0 by default to keep original behavior of OOM */ - glob->lower_mem_limit = 0; - spin_unlock(&glob->lock); - - ret = ttm_mem_init_kernel_zone(glob, &si); - if (unlikely(ret != 0)) - goto out_no_zone; -#ifdef CONFIG_HIGHMEM - ret = ttm_mem_init_highmem_zone(glob, &si); - if (unlikely(ret != 0)) - goto out_no_zone; -#else - ret = ttm_mem_init_dma32_zone(glob, &si); - if (unlikely(ret != 0)) - goto out_no_zone; -#endif - for (i = 0; i < glob->num_zones; ++i) { - zone = glob->zones[i]; - pr_info("Zone %7s: Available graphics memory: %llu KiB\n", - zone->name, (unsigned long long)zone->max_mem >> 10); - } - return 0; -out_no_zone: - ttm_mem_global_release(glob); - return ret; -} - -void ttm_mem_global_release(struct ttm_mem_global *glob) -{ - struct ttm_mem_zone *zone; - unsigned int i; - - destroy_workqueue(glob->swap_queue); - glob->swap_queue = NULL; - for (i = 0; i < glob->num_zones; ++i) { - zone = glob->zones[i]; - kobject_del(&zone->kobj); - kobject_put(&zone->kobj); - } - kobject_del(&glob->kobj); - kobject_put(&glob->kobj); - memset(glob, 0, sizeof(*glob)); -} - -static void ttm_check_swapping(struct ttm_mem_global *glob) -{ - bool needs_swapping = false; - unsigned int i; - struct ttm_mem_zone *zone; - - spin_lock(&glob->lock); - for (i = 0; i < glob->num_zones; ++i) { - zone = glob->zones[i]; - if (zone->used_mem > zone->swap_limit) { - needs_swapping = true; - break; - } - } - - spin_unlock(&glob->lock); - - if (unlikely(needs_swapping)) - (void)queue_work(glob->swap_queue, &glob->work); - -} - -static void ttm_mem_global_free_zone(struct ttm_mem_global *glob, - struct ttm_mem_zone *single_zone, - uint64_t amount) -{ - unsigned int i; - struct ttm_mem_zone *zone; - - spin_lock(&glob->lock); - for (i = 0; i < glob->num_zones; ++i) { - zone = glob->zones[i]; - if (single_zone && zone != single_zone) - continue; - zone->used_mem -= amount; - } - spin_unlock(&glob->lock); -} - -void ttm_mem_global_free(struct ttm_mem_global *glob, - uint64_t amount) -{ - return ttm_mem_global_free_zone(glob, glob->zone_kernel, amount); -} -EXPORT_SYMBOL(ttm_mem_global_free); - -/* - * check if the available mem is under lower memory limit - * - * a. if no swap disk at all or free swap space is under swap_mem_limit - * but available system mem is bigger than sys_mem_limit, allow TTM - * allocation; - * - * b. if the available system mem is less than sys_mem_limit but free - * swap disk is bigger than swap_mem_limit, allow TTM allocation. - */ -bool -ttm_check_under_lowerlimit(struct ttm_mem_global *glob, - uint64_t num_pages, - struct ttm_operation_ctx *ctx) -{ - int64_t available; - - /* We allow over commit during suspend */ - if (ctx->force_alloc) - return false; - - available = get_nr_swap_pages() + si_mem_available(); - available -= num_pages; - if (available < glob->lower_mem_limit) - return true; - - return false; -} - -static int ttm_mem_global_reserve(struct ttm_mem_global *glob, - struct ttm_mem_zone *single_zone, - uint64_t amount, bool reserve) -{ - uint64_t limit; - int ret = -ENOMEM; - unsigned int i; - struct ttm_mem_zone *zone; - - spin_lock(&glob->lock); - for (i = 0; i < glob->num_zones; ++i) { - zone = glob->zones[i]; - if (single_zone && zone != single_zone) - continue; - - limit = (capable(CAP_SYS_ADMIN)) ? - zone->emer_mem : zone->max_mem; - - if (zone->used_mem > limit) - goto out_unlock; - } - - if (reserve) { - for (i = 0; i < glob->num_zones; ++i) { - zone = glob->zones[i]; - if (single_zone && zone != single_zone) - continue; - zone->used_mem += amount; - } - } - - ret = 0; -out_unlock: - spin_unlock(&glob->lock); - ttm_check_swapping(glob); - - return ret; -} - - -static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob, - struct ttm_mem_zone *single_zone, - uint64_t memory, - struct ttm_operation_ctx *ctx) -{ - int count = TTM_MEMORY_ALLOC_RETRIES; - - while (unlikely(ttm_mem_global_reserve(glob, - single_zone, - memory, true) - != 0)) { - if (ctx->no_wait_gpu) - return -ENOMEM; - if (unlikely(count-- == 0)) - return -ENOMEM; - ttm_shrink(glob, false, memory + (memory >> 2) + 16, ctx); - } - - return 0; -} - -int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, - struct ttm_operation_ctx *ctx) -{ - /** - * Normal allocations of kernel memory are registered in - * the kernel zone. - */ - - return ttm_mem_global_alloc_zone(glob, glob->zone_kernel, memory, ctx); -} -EXPORT_SYMBOL(ttm_mem_global_alloc); - -int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, - struct page *page, uint64_t size, - struct ttm_operation_ctx *ctx) -{ - struct ttm_mem_zone *zone = NULL; - - /** - * Page allocations may be registed in a single zone - * only if highmem or !dma32. - */ - -#ifdef CONFIG_HIGHMEM - if (PageHighMem(page) && glob->zone_highmem != NULL) - zone = glob->zone_highmem; -#else - if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL) - zone = glob->zone_kernel; -#endif - return ttm_mem_global_alloc_zone(glob, zone, size, ctx); -} - -void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page, - uint64_t size) -{ - struct ttm_mem_zone *zone = NULL; - -#ifdef CONFIG_HIGHMEM - if (PageHighMem(page) && glob->zone_highmem != NULL) - zone = glob->zone_highmem; -#else - if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL) - zone = glob->zone_kernel; -#endif - ttm_mem_global_free_zone(glob, zone, size); -} - -size_t ttm_round_pot(size_t size) -{ - if ((size & (size - 1)) == 0) - return size; - else if (size > PAGE_SIZE) - return PAGE_ALIGN(size); - else { - size_t tmp_size = 4; - - while (tmp_size < size) - tmp_size <<= 1; - - return tmp_size; - } - return 0; -} -EXPORT_SYMBOL(ttm_round_pot); diff --git a/drivers/gpu/drm/vmwgfx/ttm_memory.h b/drivers/gpu/drm/vmwgfx/ttm_memory.h deleted file mode 100644 index c50dba774485..000000000000 --- a/drivers/gpu/drm/vmwgfx/ttm_memory.h +++ /dev/null @@ -1,96 +0,0 @@ -/************************************************************************** - * - * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - **************************************************************************/ - -#ifndef TTM_MEMORY_H -#define TTM_MEMORY_H - -#include <linux/workqueue.h> -#include <linux/spinlock.h> -#include <linux/bug.h> -#include <linux/wait.h> -#include <linux/errno.h> -#include <linux/kobject.h> -#include <linux/mm.h> - -#include <drm/ttm/ttm_bo_api.h> - -/** - * struct ttm_mem_global - Global memory accounting structure. - * - * @shrink: A single callback to shrink TTM memory usage. Extend this - * to a linked list to be able to handle multiple callbacks when needed. - * @swap_queue: A workqueue to handle shrinking in low memory situations. We - * need a separate workqueue since it will spend a lot of time waiting - * for the GPU, and this will otherwise block other workqueue tasks(?) - * At this point we use only a single-threaded workqueue. - * @work: The workqueue callback for the shrink queue. - * @lock: Lock to protect the @shrink - and the memory accounting members, - * that is, essentially the whole structure with some exceptions. - * @lower_mem_limit: include lower limit of swap space and lower limit of - * system memory. - * @zones: Array of pointers to accounting zones. - * @num_zones: Number of populated entries in the @zones array. - * @zone_kernel: Pointer to the kernel zone. - * @zone_highmem: Pointer to the highmem zone if there is one. - * @zone_dma32: Pointer to the dma32 zone if there is one. - * - * Note that this structure is not per device. It should be global for all - * graphics devices. - */ - -#define TTM_MEM_MAX_ZONES 2 -struct ttm_mem_zone; -extern struct ttm_mem_global { - struct kobject kobj; - struct workqueue_struct *swap_queue; - struct work_struct work; - spinlock_t lock; - uint64_t lower_mem_limit; - struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES]; - unsigned int num_zones; - struct ttm_mem_zone *zone_kernel; -#ifdef CONFIG_HIGHMEM - struct ttm_mem_zone *zone_highmem; -#else - struct ttm_mem_zone *zone_dma32; -#endif -} ttm_mem_glob; - -int ttm_mem_global_init(struct ttm_mem_global *glob, struct device *dev); -void ttm_mem_global_release(struct ttm_mem_global *glob); -int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, - struct ttm_operation_ctx *ctx); -void ttm_mem_global_free(struct ttm_mem_global *glob, uint64_t amount); -int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, - struct page *page, uint64_t size, - struct ttm_operation_ctx *ctx); -void ttm_mem_global_free_page(struct ttm_mem_global *glob, - struct page *page, uint64_t size); -size_t ttm_round_pot(size_t size); -bool ttm_check_under_lowerlimit(struct ttm_mem_global *glob, uint64_t num_pages, - struct ttm_operation_ctx *ctx); -#endif diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.c b/drivers/gpu/drm/vmwgfx/ttm_object.c index 899945f54dc7..26a55fef1ab5 100644 --- a/drivers/gpu/drm/vmwgfx/ttm_object.c +++ b/drivers/gpu/drm/vmwgfx/ttm_object.c @@ -50,6 +50,7 @@ #include <linux/atomic.h> #include <linux/module.h> #include "ttm_object.h" +#include "vmwgfx_drv.h" MODULE_IMPORT_NS(DMA_BUF); @@ -73,7 +74,7 @@ struct ttm_object_file { struct ttm_object_device *tdev; spinlock_t lock; struct list_head ref_list; - struct drm_open_hash ref_hash[TTM_REF_NUM]; + struct vmwgfx_open_hash ref_hash; struct kref refcount; }; @@ -91,12 +92,10 @@ struct ttm_object_file { struct ttm_object_device { spinlock_t object_lock; - struct drm_open_hash object_hash; + struct vmwgfx_open_hash object_hash; atomic_t object_count; - struct ttm_mem_global *mem_glob; struct dma_buf_ops ops; void (*dmabuf_release)(struct dma_buf *dma_buf); - size_t dma_buf_size; struct idr idr; }; @@ -123,10 +122,9 @@ struct ttm_object_device { struct ttm_ref_object { struct rcu_head rcu_head; - struct drm_hash_item hash; + struct vmwgfx_hash_item hash; struct list_head head; struct kref kref; - enum ttm_ref_type ref_type; struct ttm_base_object *obj; struct ttm_object_file *tfile; }; @@ -162,9 +160,7 @@ int ttm_base_object_init(struct ttm_object_file *tfile, struct ttm_base_object *base, bool shareable, enum ttm_object_type object_type, - void (*refcount_release) (struct ttm_base_object **), - void (*ref_obj_release) (struct ttm_base_object *, - enum ttm_ref_type ref_type)) + void (*refcount_release) (struct ttm_base_object **)) { struct ttm_object_device *tdev = tfile->tdev; int ret; @@ -172,7 +168,6 @@ int ttm_base_object_init(struct ttm_object_file *tfile, base->shareable = shareable; base->tfile = ttm_object_file_ref(tfile); base->refcount_release = refcount_release; - base->ref_obj_release = ref_obj_release; base->object_type = object_type; kref_init(&base->refcount); idr_preload(GFP_KERNEL); @@ -184,7 +179,7 @@ int ttm_base_object_init(struct ttm_object_file *tfile, return ret; base->handle = ret; - ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false); + ret = ttm_ref_object_add(tfile, base, NULL, false); if (unlikely(ret != 0)) goto out_err1; @@ -247,12 +242,12 @@ void ttm_base_object_unref(struct ttm_base_object **p_base) struct ttm_base_object * ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint32_t key) { - struct drm_hash_item *hash; - struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE]; + struct vmwgfx_hash_item *hash; + struct vmwgfx_open_hash *ht = &tfile->ref_hash; int ret; rcu_read_lock(); - ret = drm_ht_find_item_rcu(ht, key, &hash); + ret = vmwgfx_ht_find_item_rcu(ht, key, &hash); if (ret) { rcu_read_unlock(); return NULL; @@ -267,12 +262,12 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile, uint32_t key) { struct ttm_base_object *base = NULL; - struct drm_hash_item *hash; - struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE]; + struct vmwgfx_hash_item *hash; + struct vmwgfx_open_hash *ht = &tfile->ref_hash; int ret; rcu_read_lock(); - ret = drm_ht_find_item_rcu(ht, key, &hash); + ret = vmwgfx_ht_find_item_rcu(ht, key, &hash); if (likely(ret == 0)) { base = drm_hash_entry(hash, struct ttm_ref_object, hash)->obj; @@ -299,64 +294,14 @@ ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key) return base; } -/** - * ttm_ref_object_exists - Check whether a caller has a valid ref object - * (has opened) a base object. - * - * @tfile: Pointer to a struct ttm_object_file identifying the caller. - * @base: Pointer to a struct base object. - * - * Checks wether the caller identified by @tfile has put a valid USAGE - * reference object on the base object identified by @base. - */ -bool ttm_ref_object_exists(struct ttm_object_file *tfile, - struct ttm_base_object *base) -{ - struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE]; - struct drm_hash_item *hash; - struct ttm_ref_object *ref; - - rcu_read_lock(); - if (unlikely(drm_ht_find_item_rcu(ht, base->handle, &hash) != 0)) - goto out_false; - - /* - * Verify that the ref object is really pointing to our base object. - * Our base object could actually be dead, and the ref object pointing - * to another base object with the same handle. - */ - ref = drm_hash_entry(hash, struct ttm_ref_object, hash); - if (unlikely(base != ref->obj)) - goto out_false; - - /* - * Verify that the ref->obj pointer was actually valid! - */ - rmb(); - if (unlikely(kref_read(&ref->kref) == 0)) - goto out_false; - - rcu_read_unlock(); - return true; - - out_false: - rcu_read_unlock(); - return false; -} - int ttm_ref_object_add(struct ttm_object_file *tfile, struct ttm_base_object *base, - enum ttm_ref_type ref_type, bool *existed, + bool *existed, bool require_existed) { - struct drm_open_hash *ht = &tfile->ref_hash[ref_type]; + struct vmwgfx_open_hash *ht = &tfile->ref_hash; struct ttm_ref_object *ref; - struct drm_hash_item *hash; - struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob; - struct ttm_operation_ctx ctx = { - .interruptible = false, - .no_wait_gpu = false - }; + struct vmwgfx_hash_item *hash; int ret = -EINVAL; if (base->tfile != tfile && !base->shareable) @@ -367,7 +312,7 @@ int ttm_ref_object_add(struct ttm_object_file *tfile, while (ret == -EINVAL) { rcu_read_lock(); - ret = drm_ht_find_item_rcu(ht, base->handle, &hash); + ret = vmwgfx_ht_find_item_rcu(ht, base->handle, &hash); if (ret == 0) { ref = drm_hash_entry(hash, struct ttm_ref_object, hash); @@ -381,24 +326,18 @@ int ttm_ref_object_add(struct ttm_object_file *tfile, if (require_existed) return -EPERM; - ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref), - &ctx); - if (unlikely(ret != 0)) - return ret; ref = kmalloc(sizeof(*ref), GFP_KERNEL); if (unlikely(ref == NULL)) { - ttm_mem_global_free(mem_glob, sizeof(*ref)); return -ENOMEM; } ref->hash.key = base->handle; ref->obj = base; ref->tfile = tfile; - ref->ref_type = ref_type; kref_init(&ref->kref); spin_lock(&tfile->lock); - ret = drm_ht_insert_item_rcu(ht, &ref->hash); + ret = vmwgfx_ht_insert_item_rcu(ht, &ref->hash); if (likely(ret == 0)) { list_add_tail(&ref->head, &tfile->ref_list); @@ -412,7 +351,6 @@ int ttm_ref_object_add(struct ttm_object_file *tfile, spin_unlock(&tfile->lock); BUG_ON(ret != -EINVAL); - ttm_mem_global_free(mem_glob, sizeof(*ref)); kfree(ref); } @@ -424,35 +362,29 @@ ttm_ref_object_release(struct kref *kref) { struct ttm_ref_object *ref = container_of(kref, struct ttm_ref_object, kref); - struct ttm_base_object *base = ref->obj; struct ttm_object_file *tfile = ref->tfile; - struct drm_open_hash *ht; - struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob; + struct vmwgfx_open_hash *ht; - ht = &tfile->ref_hash[ref->ref_type]; - (void)drm_ht_remove_item_rcu(ht, &ref->hash); + ht = &tfile->ref_hash; + (void)vmwgfx_ht_remove_item_rcu(ht, &ref->hash); list_del(&ref->head); spin_unlock(&tfile->lock); - if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release) - base->ref_obj_release(base, ref->ref_type); - ttm_base_object_unref(&ref->obj); - ttm_mem_global_free(mem_glob, sizeof(*ref)); kfree_rcu(ref, rcu_head); spin_lock(&tfile->lock); } int ttm_ref_object_base_unref(struct ttm_object_file *tfile, - unsigned long key, enum ttm_ref_type ref_type) + unsigned long key) { - struct drm_open_hash *ht = &tfile->ref_hash[ref_type]; + struct vmwgfx_open_hash *ht = &tfile->ref_hash; struct ttm_ref_object *ref; - struct drm_hash_item *hash; + struct vmwgfx_hash_item *hash; int ret; spin_lock(&tfile->lock); - ret = drm_ht_find_item(ht, key, &hash); + ret = vmwgfx_ht_find_item(ht, key, &hash); if (unlikely(ret != 0)) { spin_unlock(&tfile->lock); return -EINVAL; @@ -467,7 +399,6 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile) { struct ttm_ref_object *ref; struct list_head *list; - unsigned int i; struct ttm_object_file *tfile = *p_tfile; *p_tfile = NULL; @@ -485,8 +416,7 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile) } spin_unlock(&tfile->lock); - for (i = 0; i < TTM_REF_NUM; ++i) - drm_ht_remove(&tfile->ref_hash[i]); + vmwgfx_ht_remove(&tfile->ref_hash); ttm_object_file_unref(&tfile); } @@ -495,8 +425,6 @@ struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev, unsigned int hash_order) { struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL); - unsigned int i; - unsigned int j = 0; int ret; if (unlikely(tfile == NULL)) @@ -507,18 +435,13 @@ struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev, kref_init(&tfile->refcount); INIT_LIST_HEAD(&tfile->ref_list); - for (i = 0; i < TTM_REF_NUM; ++i) { - ret = drm_ht_create(&tfile->ref_hash[i], hash_order); - if (ret) { - j = i; - goto out_err; - } - } + ret = vmwgfx_ht_create(&tfile->ref_hash, hash_order); + if (ret) + goto out_err; return tfile; out_err: - for (i = 0; i < j; ++i) - drm_ht_remove(&tfile->ref_hash[i]); + vmwgfx_ht_remove(&tfile->ref_hash); kfree(tfile); @@ -526,8 +449,7 @@ out_err: } struct ttm_object_device * -ttm_object_device_init(struct ttm_mem_global *mem_glob, - unsigned int hash_order, +ttm_object_device_init(unsigned int hash_order, const struct dma_buf_ops *ops) { struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL); @@ -536,19 +458,24 @@ ttm_object_device_init(struct ttm_mem_global *mem_glob, if (unlikely(tdev == NULL)) return NULL; - tdev->mem_glob = mem_glob; spin_lock_init(&tdev->object_lock); atomic_set(&tdev->object_count, 0); - ret = drm_ht_create(&tdev->object_hash, hash_order); + ret = vmwgfx_ht_create(&tdev->object_hash, hash_order); if (ret != 0) goto out_no_object_hash; - idr_init_base(&tdev->idr, 1); + /* + * Our base is at VMWGFX_NUM_MOB + 1 because we want to create + * a seperate namespace for GEM handles (which are + * 1..VMWGFX_NUM_MOB) and the surface handles. Some ioctl's + * can take either handle as an argument so we want to + * easily be able to tell whether the handle refers to a + * GEM buffer or a surface. + */ + idr_init_base(&tdev->idr, VMWGFX_NUM_MOB + 1); tdev->ops = *ops; tdev->dmabuf_release = tdev->ops.release; tdev->ops.release = ttm_prime_dmabuf_release; - tdev->dma_buf_size = ttm_round_pot(sizeof(struct dma_buf)) + - ttm_round_pot(sizeof(struct file)); return tdev; out_no_object_hash: @@ -564,7 +491,7 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev) WARN_ON_ONCE(!idr_is_empty(&tdev->idr)); idr_destroy(&tdev->idr); - drm_ht_remove(&tdev->object_hash); + vmwgfx_ht_remove(&tdev->object_hash); kfree(tdev); } @@ -633,7 +560,6 @@ static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf) if (prime->dma_buf == dma_buf) prime->dma_buf = NULL; mutex_unlock(&prime->mutex); - ttm_mem_global_free(tdev->mem_glob, tdev->dma_buf_size); ttm_base_object_unref(&base); } @@ -667,7 +593,7 @@ int ttm_prime_fd_to_handle(struct ttm_object_file *tfile, prime = (struct ttm_prime_object *) dma_buf->priv; base = &prime->base; *handle = base->handle; - ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false); + ret = ttm_ref_object_add(tfile, base, NULL, false); dma_buf_put(dma_buf); @@ -715,30 +641,18 @@ int ttm_prime_handle_to_fd(struct ttm_object_file *tfile, dma_buf = prime->dma_buf; if (!dma_buf || !get_dma_buf_unless_doomed(dma_buf)) { DEFINE_DMA_BUF_EXPORT_INFO(exp_info); - struct ttm_operation_ctx ctx = { - .interruptible = true, - .no_wait_gpu = false - }; exp_info.ops = &tdev->ops; exp_info.size = prime->size; exp_info.flags = flags; exp_info.priv = prime; /* - * Need to create a new dma_buf, with memory accounting. + * Need to create a new dma_buf */ - ret = ttm_mem_global_alloc(tdev->mem_glob, tdev->dma_buf_size, - &ctx); - if (unlikely(ret != 0)) { - mutex_unlock(&prime->mutex); - goto out_unref; - } dma_buf = dma_buf_export(&exp_info); if (IS_ERR(dma_buf)) { ret = PTR_ERR(dma_buf); - ttm_mem_global_free(tdev->mem_glob, - tdev->dma_buf_size); mutex_unlock(&prime->mutex); goto out_unref; } @@ -773,7 +687,6 @@ out_unref: * @shareable: See ttm_base_object_init * @type: See ttm_base_object_init * @refcount_release: See ttm_base_object_init - * @ref_obj_release: See ttm_base_object_init * * Initializes an object which is compatible with the drm_prime model * for data sharing between processes and devices. @@ -781,9 +694,7 @@ out_unref: int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size, struct ttm_prime_object *prime, bool shareable, enum ttm_object_type type, - void (*refcount_release) (struct ttm_base_object **), - void (*ref_obj_release) (struct ttm_base_object *, - enum ttm_ref_type ref_type)) + void (*refcount_release) (struct ttm_base_object **)) { mutex_init(&prime->mutex); prime->size = PAGE_ALIGN(size); @@ -792,6 +703,5 @@ int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size, prime->refcount_release = refcount_release; return ttm_base_object_init(tfile, &prime->base, shareable, ttm_prime_type, - ttm_prime_refcount_release, - ref_obj_release); + ttm_prime_refcount_release); } diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.h b/drivers/gpu/drm/vmwgfx/ttm_object.h index 49b064f0cb19..4c8700027c6d 100644 --- a/drivers/gpu/drm/vmwgfx/ttm_object.h +++ b/drivers/gpu/drm/vmwgfx/ttm_object.h @@ -42,31 +42,7 @@ #include <linux/list.h> #include <linux/rcupdate.h> -#include <drm/drm_hashtab.h> - -#include "ttm_memory.h" - -/** - * enum ttm_ref_type - * - * Describes what type of reference a ref object holds. - * - * TTM_REF_USAGE is a simple refcount on a base object. - * - * TTM_REF_SYNCCPU_READ is a SYNCCPU_READ reference on a - * buffer object. - * - * TTM_REF_SYNCCPU_WRITE is a SYNCCPU_WRITE reference on a - * buffer object. - * - */ - -enum ttm_ref_type { - TTM_REF_USAGE, - TTM_REF_SYNCCPU_READ, - TTM_REF_SYNCCPU_WRITE, - TTM_REF_NUM -}; +#include "vmwgfx_hashtab.h" /** * enum ttm_object_type @@ -78,7 +54,6 @@ enum ttm_ref_type { enum ttm_object_type { ttm_fence_type, - ttm_buffer_type, ttm_lock_type, ttm_prime_type, ttm_driver_type0 = 256, @@ -129,8 +104,6 @@ struct ttm_base_object { struct ttm_object_file *tfile; struct kref refcount; void (*refcount_release) (struct ttm_base_object **base); - void (*ref_obj_release) (struct ttm_base_object *base, - enum ttm_ref_type ref_type); u32 handle; enum ttm_object_type object_type; u32 shareable; @@ -179,11 +152,7 @@ extern int ttm_base_object_init(struct ttm_object_file *tfile, bool shareable, enum ttm_object_type type, void (*refcount_release) (struct ttm_base_object - **), - void (*ref_obj_release) (struct ttm_base_object - *, - enum ttm_ref_type - ref_type)); + **)); /** * ttm_base_object_lookup @@ -247,12 +216,9 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base); */ extern int ttm_ref_object_add(struct ttm_object_file *tfile, struct ttm_base_object *base, - enum ttm_ref_type ref_type, bool *existed, + bool *existed, bool require_existed); -extern bool ttm_ref_object_exists(struct ttm_object_file *tfile, - struct ttm_base_object *base); - /** * ttm_ref_object_base_unref * @@ -265,8 +231,7 @@ extern bool ttm_ref_object_exists(struct ttm_object_file *tfile, * will be unreferenced. */ extern int ttm_ref_object_base_unref(struct ttm_object_file *tfile, - unsigned long key, - enum ttm_ref_type ref_type); + unsigned long key); /** * ttm_object_file_init - initialize a struct ttm_object file @@ -297,7 +262,6 @@ extern void ttm_object_file_release(struct ttm_object_file **p_tfile); /** * ttm_object device init - initialize a struct ttm_object_device * - * @mem_glob: struct ttm_mem_global for memory accounting. * @hash_order: Order of hash table used to hash the base objects. * @ops: DMA buf ops for prime objects of this device. * @@ -306,8 +270,7 @@ extern void ttm_object_file_release(struct ttm_object_file **p_tfile); */ extern struct ttm_object_device * -ttm_object_device_init(struct ttm_mem_global *mem_glob, - unsigned int hash_order, +ttm_object_device_init(unsigned int hash_order, const struct dma_buf_ops *ops); /** @@ -332,10 +295,7 @@ extern int ttm_prime_object_init(struct ttm_object_file *tfile, bool shareable, enum ttm_object_type type, void (*refcount_release) - (struct ttm_base_object **), - void (*ref_obj_release) - (struct ttm_base_object *, - enum ttm_ref_type ref_type)); + (struct ttm_base_object **)); static inline enum ttm_object_type ttm_base_object_type(struct ttm_base_object *base) @@ -353,13 +313,6 @@ extern int ttm_prime_handle_to_fd(struct ttm_object_file *tfile, #define ttm_prime_object_kfree(__obj, __prime) \ kfree_rcu(__obj, __prime.base.rhead) -/* - * Extra memory required by the base object's idr storage, which is allocated - * separately from the base object itself. We estimate an on-average 128 bytes - * per idr. - */ -#define TTM_OBJ_EXTRA_SIZE 128 - struct ttm_base_object * ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint32_t key); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c index 6f27d69bad0e..ae2de914eb89 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c @@ -354,6 +354,27 @@ void vmw_binding_add(struct vmw_ctx_binding_state *cbs, } /** + * vmw_binding_cb_offset_update: Update the offset of a cb binding + * + * @cbs: Pointer to the context binding state tracker. + * @shader_slot: The shader slot of the binding. + * @slot: The slot of the binding. + * @offsetInBytes: The new offset of the binding. + * + * Updates the offset of an existing cb binding in the context binding + * state structure @cbs. + */ +void vmw_binding_cb_offset_update(struct vmw_ctx_binding_state *cbs, + u32 shader_slot, u32 slot, u32 offsetInBytes) +{ + struct vmw_ctx_bindinfo *loc = + vmw_binding_loc(cbs, vmw_ctx_binding_cb, shader_slot, slot); + struct vmw_ctx_bindinfo_cb *loc_cb = + (struct vmw_ctx_bindinfo_cb *)((u8 *) loc); + loc_cb->offset = offsetInBytes; +} + +/** * vmw_binding_add_uav_index - Add UAV index for tracking. * @cbs: Pointer to the context binding state tracker. * @slot: UAV type to which bind this index. @@ -1070,7 +1091,7 @@ static int vmw_emit_set_uav(struct vmw_ctx_binding_state *cbs) size_t cmd_size, view_id_size; const struct vmw_resource *ctx = vmw_cbs_context(cbs); - vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_UAVIEWS); + vmw_collect_view_ids(cbs, loc, vmw_max_num_uavs(cbs->dev_priv)); view_id_size = cbs->bind_cmd_count*sizeof(uint32); cmd_size = sizeof(*cmd) + view_id_size; cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id); @@ -1100,7 +1121,7 @@ static int vmw_emit_set_cs_uav(struct vmw_ctx_binding_state *cbs) size_t cmd_size, view_id_size; const struct vmw_resource *ctx = vmw_cbs_context(cbs); - vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_UAVIEWS); + vmw_collect_view_ids(cbs, loc, vmw_max_num_uavs(cbs->dev_priv)); view_id_size = cbs->bind_cmd_count*sizeof(uint32); cmd_size = sizeof(*cmd) + view_id_size; cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id); @@ -1327,8 +1348,7 @@ static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind) } /** - * vmw_binding_state_alloc - Allocate a struct vmw_ctx_binding_state with - * memory accounting. + * vmw_binding_state_alloc - Allocate a struct vmw_ctx_binding_state. * * @dev_priv: Pointer to a device private structure. * @@ -1338,20 +1358,9 @@ struct vmw_ctx_binding_state * vmw_binding_state_alloc(struct vmw_private *dev_priv) { struct vmw_ctx_binding_state *cbs; - struct ttm_operation_ctx ctx = { - .interruptible = false, - .no_wait_gpu = false - }; - int ret; - - ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), sizeof(*cbs), - &ctx); - if (ret) - return ERR_PTR(ret); cbs = vzalloc(sizeof(*cbs)); if (!cbs) { - ttm_mem_global_free(vmw_mem_glob(dev_priv), sizeof(*cbs)); return ERR_PTR(-ENOMEM); } @@ -1362,17 +1371,13 @@ vmw_binding_state_alloc(struct vmw_private *dev_priv) } /** - * vmw_binding_state_free - Free a struct vmw_ctx_binding_state and its - * memory accounting info. + * vmw_binding_state_free - Free a struct vmw_ctx_binding_state. * * @cbs: Pointer to the struct vmw_ctx_binding_state to be freed. */ void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs) { - struct vmw_private *dev_priv = cbs->dev_priv; - vfree(cbs); - ttm_mem_global_free(vmw_mem_glob(dev_priv), sizeof(*cbs)); } /** diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h index dcb71fd0bb3b..85b90f7d398d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h @@ -200,7 +200,7 @@ struct vmw_dx_shader_bindings { * @splice_index: The device splice index set by user-space. */ struct vmw_ctx_bindinfo_uav { - struct vmw_ctx_bindinfo_view views[SVGA3D_MAX_UAVIEWS]; + struct vmw_ctx_bindinfo_view views[SVGA3D_DX11_1_MAX_UAVIEWS]; uint32 index; }; @@ -217,6 +217,8 @@ struct vmw_ctx_bindinfo_so { extern void vmw_binding_add(struct vmw_ctx_binding_state *cbs, const struct vmw_ctx_bindinfo *ci, u32 shader_slot, u32 slot); +extern void vmw_binding_cb_offset_update(struct vmw_ctx_binding_state *cbs, + u32 shader_slot, u32 slot, u32 offsetInBytes); extern void vmw_binding_add_uav_index(struct vmw_ctx_binding_state *cbs, uint32 slot, uint32 splice_index); extern void diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c index fd007f1c1776..15fead85450c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c @@ -33,18 +33,6 @@ /** - * struct vmw_user_buffer_object - User-space-visible buffer object - * - * @prime: The prime object providing user visibility. - * @vbo: The struct vmw_buffer_object - */ -struct vmw_user_buffer_object { - struct ttm_prime_object prime; - struct vmw_buffer_object vbo; -}; - - -/** * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct * vmw_buffer_object. * @@ -60,23 +48,6 @@ vmw_buffer_object(struct ttm_buffer_object *bo) /** - * vmw_user_buffer_object - Convert a struct ttm_buffer_object to a struct - * vmw_user_buffer_object. - * - * @bo: Pointer to the TTM buffer object. - * Return: Pointer to the struct vmw_buffer_object embedding the TTM buffer - * object. - */ -static struct vmw_user_buffer_object * -vmw_user_buffer_object(struct ttm_buffer_object *bo) -{ - struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo); - - return container_of(vmw_bo, struct vmw_user_buffer_object, vbo); -} - - -/** * vmw_bo_pin_in_placement - Validate a buffer to placement. * * @dev_priv: Driver private. @@ -392,39 +363,6 @@ void vmw_bo_unmap(struct vmw_buffer_object *vbo) /** - * vmw_bo_acc_size - Calculate the pinned memory usage of buffers - * - * @dev_priv: Pointer to a struct vmw_private identifying the device. - * @size: The requested buffer size. - * @user: Whether this is an ordinary dma buffer or a user dma buffer. - */ -static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size, - bool user) -{ - static size_t struct_size, user_struct_size; - size_t num_pages = PFN_UP(size); - size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *)); - - if (unlikely(struct_size == 0)) { - size_t backend_size = ttm_round_pot(vmw_tt_size); - - struct_size = backend_size + - ttm_round_pot(sizeof(struct vmw_buffer_object)); - user_struct_size = backend_size + - ttm_round_pot(sizeof(struct vmw_user_buffer_object)) + - TTM_OBJ_EXTRA_SIZE; - } - - if (dev_priv->map_mode == vmw_dma_alloc_coherent) - page_array_size += - ttm_round_pot(num_pages * sizeof(dma_addr_t)); - - return ((user) ? user_struct_size : struct_size) + - page_array_size; -} - - -/** * vmw_bo_bo_free - vmw buffer object destructor * * @bo: Pointer to the embedded struct ttm_buffer_object @@ -436,27 +374,10 @@ void vmw_bo_bo_free(struct ttm_buffer_object *bo) WARN_ON(vmw_bo->dirty); WARN_ON(!RB_EMPTY_ROOT(&vmw_bo->res_tree)); vmw_bo_unmap(vmw_bo); - dma_resv_fini(&bo->base._resv); + drm_gem_object_release(&bo->base); kfree(vmw_bo); } - -/** - * vmw_user_bo_destroy - vmw buffer object destructor - * - * @bo: Pointer to the embedded struct ttm_buffer_object - */ -static void vmw_user_bo_destroy(struct ttm_buffer_object *bo) -{ - struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo); - struct vmw_buffer_object *vbo = &vmw_user_bo->vbo; - - WARN_ON(vbo->dirty); - WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree)); - vmw_bo_unmap(vbo); - ttm_prime_object_kfree(vmw_user_bo, prime); -} - /** * vmw_bo_create_kernel - Create a pinned BO for internal kernel use. * @@ -471,33 +392,27 @@ int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size, struct ttm_placement *placement, struct ttm_buffer_object **p_bo) { - struct ttm_operation_ctx ctx = { false, false }; + struct ttm_operation_ctx ctx = { + .interruptible = false, + .no_wait_gpu = false + }; struct ttm_buffer_object *bo; - size_t acc_size; + struct drm_device *vdev = &dev_priv->drm; int ret; bo = kzalloc(sizeof(*bo), GFP_KERNEL); if (unlikely(!bo)) return -ENOMEM; - acc_size = ttm_round_pot(sizeof(*bo)); - acc_size += ttm_round_pot(PFN_UP(size) * sizeof(void *)); - acc_size += ttm_round_pot(sizeof(struct ttm_tt)); - - ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx); - if (unlikely(ret)) - goto error_free; - + size = ALIGN(size, PAGE_SIZE); - bo->base.size = size; - dma_resv_init(&bo->base._resv); - drm_vma_node_reset(&bo->base.vma_node); + drm_gem_private_object_init(vdev, &bo->base, size); ret = ttm_bo_init_reserved(&dev_priv->bdev, bo, size, - ttm_bo_type_device, placement, 0, + ttm_bo_type_kernel, placement, 0, &ctx, NULL, NULL, NULL); if (unlikely(ret)) - goto error_account; + goto error_free; ttm_bo_pin(bo); ttm_bo_unreserve(bo); @@ -505,14 +420,38 @@ int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size, return 0; -error_account: - ttm_mem_global_free(&ttm_mem_glob, acc_size); - error_free: kfree(bo); return ret; } +int vmw_bo_create(struct vmw_private *vmw, + size_t size, struct ttm_placement *placement, + bool interruptible, bool pin, + void (*bo_free)(struct ttm_buffer_object *bo), + struct vmw_buffer_object **p_bo) +{ + int ret; + + *p_bo = kmalloc(sizeof(**p_bo), GFP_KERNEL); + if (unlikely(!*p_bo)) { + DRM_ERROR("Failed to allocate a buffer.\n"); + return -ENOMEM; + } + + ret = vmw_bo_init(vmw, *p_bo, size, + placement, interruptible, pin, + bo_free); + if (unlikely(ret != 0)) + goto out_error; + + return ret; +out_error: + kfree(*p_bo); + *p_bo = NULL; + return ret; +} + /** * vmw_bo_init - Initialize a vmw buffer object * @@ -533,192 +472,44 @@ int vmw_bo_init(struct vmw_private *dev_priv, bool interruptible, bool pin, void (*bo_free)(struct ttm_buffer_object *bo)) { - struct ttm_operation_ctx ctx = { interruptible, false }; + struct ttm_operation_ctx ctx = { + .interruptible = interruptible, + .no_wait_gpu = false + }; struct ttm_device *bdev = &dev_priv->bdev; - size_t acc_size; + struct drm_device *vdev = &dev_priv->drm; int ret; - bool user = (bo_free == &vmw_user_bo_destroy); - - WARN_ON_ONCE(!bo_free && (!user && (bo_free != vmw_bo_bo_free))); - acc_size = vmw_bo_acc_size(dev_priv, size, user); + WARN_ON_ONCE(!bo_free); memset(vmw_bo, 0, sizeof(*vmw_bo)); BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3); vmw_bo->base.priority = 3; vmw_bo->res_tree = RB_ROOT; - ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx); - if (unlikely(ret)) - return ret; - - vmw_bo->base.base.size = size; - dma_resv_init(&vmw_bo->base.base._resv); - drm_vma_node_reset(&vmw_bo->base.base.vma_node); + size = ALIGN(size, PAGE_SIZE); + drm_gem_private_object_init(vdev, &vmw_bo->base.base, size); ret = ttm_bo_init_reserved(bdev, &vmw_bo->base, size, - ttm_bo_type_device, placement, + ttm_bo_type_device, + placement, 0, &ctx, NULL, NULL, bo_free); if (unlikely(ret)) { - ttm_mem_global_free(&ttm_mem_glob, acc_size); return ret; } if (pin) ttm_bo_pin(&vmw_bo->base); ttm_bo_unreserve(&vmw_bo->base); - return 0; -} - - -/** - * vmw_user_bo_release - TTM reference base object release callback for - * vmw user buffer objects - * - * @p_base: The TTM base object pointer about to be unreferenced. - * - * Clears the TTM base object pointer and drops the reference the - * base object has on the underlying struct vmw_buffer_object. - */ -static void vmw_user_bo_release(struct ttm_base_object **p_base) -{ - struct vmw_user_buffer_object *vmw_user_bo; - struct ttm_base_object *base = *p_base; - - *p_base = NULL; - - if (unlikely(base == NULL)) - return; - - vmw_user_bo = container_of(base, struct vmw_user_buffer_object, - prime.base); - ttm_bo_put(&vmw_user_bo->vbo.base); -} - - -/** - * vmw_user_bo_ref_obj_release - TTM synccpu reference object release callback - * for vmw user buffer objects - * - * @base: Pointer to the TTM base object - * @ref_type: Reference type of the reference reaching zero. - * - * Called when user-space drops its last synccpu reference on the buffer - * object, Either explicitly or as part of a cleanup file close. - */ -static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base, - enum ttm_ref_type ref_type) -{ - struct vmw_user_buffer_object *user_bo; - user_bo = container_of(base, struct vmw_user_buffer_object, prime.base); - - switch (ref_type) { - case TTM_REF_SYNCCPU_WRITE: - atomic_dec(&user_bo->vbo.cpu_writers); - break; - default: - WARN_ONCE(true, "Undefined buffer object reference release.\n"); - } -} - - -/** - * vmw_user_bo_alloc - Allocate a user buffer object - * - * @dev_priv: Pointer to a struct device private. - * @tfile: Pointer to a struct ttm_object_file on which to register the user - * object. - * @size: Size of the buffer object. - * @shareable: Boolean whether the buffer is shareable with other open files. - * @handle: Pointer to where the handle value should be assigned. - * @p_vbo: Pointer to where the refcounted struct vmw_buffer_object pointer - * should be assigned. - * @p_base: The TTM base object pointer about to be allocated. - * Return: Zero on success, negative error code on error. - */ -int vmw_user_bo_alloc(struct vmw_private *dev_priv, - struct ttm_object_file *tfile, - uint32_t size, - bool shareable, - uint32_t *handle, - struct vmw_buffer_object **p_vbo, - struct ttm_base_object **p_base) -{ - struct vmw_user_buffer_object *user_bo; - int ret; - - user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL); - if (unlikely(!user_bo)) { - DRM_ERROR("Failed to allocate a buffer.\n"); - return -ENOMEM; - } - - ret = vmw_bo_init(dev_priv, &user_bo->vbo, size, - (dev_priv->has_mob) ? - &vmw_sys_placement : - &vmw_vram_sys_placement, true, false, - &vmw_user_bo_destroy); - if (unlikely(ret != 0)) - return ret; - - ttm_bo_get(&user_bo->vbo.base); - ret = ttm_prime_object_init(tfile, - size, - &user_bo->prime, - shareable, - ttm_buffer_type, - &vmw_user_bo_release, - &vmw_user_bo_ref_obj_release); - if (unlikely(ret != 0)) { - ttm_bo_put(&user_bo->vbo.base); - goto out_no_base_object; - } - - *p_vbo = &user_bo->vbo; - if (p_base) { - *p_base = &user_bo->prime.base; - kref_get(&(*p_base)->refcount); - } - *handle = user_bo->prime.base.handle; - -out_no_base_object: - return ret; -} - - -/** - * vmw_user_bo_verify_access - verify access permissions on this - * buffer object. - * - * @bo: Pointer to the buffer object being accessed - * @tfile: Identifying the caller. - */ -int vmw_user_bo_verify_access(struct ttm_buffer_object *bo, - struct ttm_object_file *tfile) -{ - struct vmw_user_buffer_object *vmw_user_bo; - - if (unlikely(bo->destroy != vmw_user_bo_destroy)) - return -EPERM; - - vmw_user_bo = vmw_user_buffer_object(bo); - - /* Check that the caller has opened the object. */ - if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base))) - return 0; - - DRM_ERROR("Could not grant buffer access.\n"); - return -EPERM; + return 0; } - /** - * vmw_user_bo_synccpu_grab - Grab a struct vmw_user_buffer_object for cpu + * vmw_user_bo_synccpu_grab - Grab a struct vmw_buffer_object for cpu * access, idling previous GPU operations on the buffer and optionally * blocking it for further command submissions. * - * @user_bo: Pointer to the buffer object being grabbed for CPU access - * @tfile: Identifying the caller. + * @vmw_bo: Pointer to the buffer object being grabbed for CPU access * @flags: Flags indicating how the grab should be performed. * Return: Zero on success, Negative error code on error. In particular, * -EBUSY will be returned if a dontblock operation is requested and the @@ -727,13 +518,11 @@ int vmw_user_bo_verify_access(struct ttm_buffer_object *bo, * * A blocking grab will be automatically released when @tfile is closed. */ -static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo, - struct ttm_object_file *tfile, +static int vmw_user_bo_synccpu_grab(struct vmw_buffer_object *vmw_bo, uint32_t flags) { bool nonblock = !!(flags & drm_vmw_synccpu_dontblock); - struct ttm_buffer_object *bo = &user_bo->vbo.base; - bool existed; + struct ttm_buffer_object *bo = &vmw_bo->base; int ret; if (flags & drm_vmw_synccpu_allow_cs) { @@ -755,17 +544,12 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo, ret = ttm_bo_wait(bo, true, nonblock); if (likely(ret == 0)) - atomic_inc(&user_bo->vbo.cpu_writers); + atomic_inc(&vmw_bo->cpu_writers); ttm_bo_unreserve(bo); if (unlikely(ret != 0)) return ret; - ret = ttm_ref_object_add(tfile, &user_bo->prime.base, - TTM_REF_SYNCCPU_WRITE, &existed, false); - if (ret != 0 || existed) - atomic_dec(&user_bo->vbo.cpu_writers); - return ret; } @@ -773,19 +557,23 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo, * vmw_user_bo_synccpu_release - Release a previous grab for CPU access, * and unblock command submission on the buffer if blocked. * + * @filp: Identifying the caller. * @handle: Handle identifying the buffer object. - * @tfile: Identifying the caller. * @flags: Flags indicating the type of release. */ -static int vmw_user_bo_synccpu_release(uint32_t handle, - struct ttm_object_file *tfile, - uint32_t flags) +static int vmw_user_bo_synccpu_release(struct drm_file *filp, + uint32_t handle, + uint32_t flags) { - if (!(flags & drm_vmw_synccpu_allow_cs)) - return ttm_ref_object_base_unref(tfile, handle, - TTM_REF_SYNCCPU_WRITE); + struct vmw_buffer_object *vmw_bo; + int ret = vmw_user_bo_lookup(filp, handle, &vmw_bo); - return 0; + if (!(flags & drm_vmw_synccpu_allow_cs)) { + atomic_dec(&vmw_bo->cpu_writers); + } + ttm_bo_put(&vmw_bo->base); + + return ret; } @@ -807,9 +595,6 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data, struct drm_vmw_synccpu_arg *arg = (struct drm_vmw_synccpu_arg *) data; struct vmw_buffer_object *vbo; - struct vmw_user_buffer_object *user_bo; - struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; - struct ttm_base_object *buffer_base; int ret; if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0 @@ -822,16 +607,12 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data, switch (arg->op) { case drm_vmw_synccpu_grab: - ret = vmw_user_bo_lookup(tfile, arg->handle, &vbo, - &buffer_base); + ret = vmw_user_bo_lookup(file_priv, arg->handle, &vbo); if (unlikely(ret != 0)) return ret; - user_bo = container_of(vbo, struct vmw_user_buffer_object, - vbo); - ret = vmw_user_bo_synccpu_grab(user_bo, tfile, arg->flags); + ret = vmw_user_bo_synccpu_grab(vbo, arg->flags); vmw_bo_unreference(&vbo); - ttm_base_object_unref(&buffer_base); if (unlikely(ret != 0 && ret != -ERESTARTSYS && ret != -EBUSY)) { DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n", @@ -840,7 +621,8 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data, } break; case drm_vmw_synccpu_release: - ret = vmw_user_bo_synccpu_release(arg->handle, tfile, + ret = vmw_user_bo_synccpu_release(file_priv, + arg->handle, arg->flags); if (unlikely(ret != 0)) { DRM_ERROR("Failed synccpu release on handle 0x%08x.\n", @@ -856,50 +638,6 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data, return 0; } - -/** - * vmw_bo_alloc_ioctl - ioctl function implementing the buffer object - * allocation functionality. - * - * @dev: Identifies the drm device. - * @data: Pointer to the ioctl argument. - * @file_priv: Identifies the caller. - * Return: Zero on success, negative error code on error. - * - * This function checks the ioctl arguments for validity and allocates a - * struct vmw_user_buffer_object bo. - */ -int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct vmw_private *dev_priv = vmw_priv(dev); - union drm_vmw_alloc_dmabuf_arg *arg = - (union drm_vmw_alloc_dmabuf_arg *)data; - struct drm_vmw_alloc_dmabuf_req *req = &arg->req; - struct drm_vmw_dmabuf_rep *rep = &arg->rep; - struct vmw_buffer_object *vbo; - uint32_t handle; - int ret; - - ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, - req->size, false, &handle, &vbo, - NULL); - if (unlikely(ret != 0)) - goto out_no_bo; - - rep->handle = handle; - rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node); - rep->cur_gmr_id = handle; - rep->cur_gmr_offset = 0; - - vmw_bo_unreference(&vbo); - -out_no_bo: - - return ret; -} - - /** * vmw_bo_unref_ioctl - Generic handle close ioctl. * @@ -917,65 +655,48 @@ int vmw_bo_unref_ioctl(struct drm_device *dev, void *data, struct drm_vmw_unref_dmabuf_arg *arg = (struct drm_vmw_unref_dmabuf_arg *)data; - return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, - arg->handle, - TTM_REF_USAGE); + drm_gem_handle_delete(file_priv, arg->handle); + return 0; } /** * vmw_user_bo_lookup - Look up a vmw user buffer object from a handle. * - * @tfile: The TTM object file the handle is registered with. + * @filp: The file the handle is registered with. * @handle: The user buffer object handle * @out: Pointer to a where a pointer to the embedded * struct vmw_buffer_object should be placed. - * @p_base: Pointer to where a pointer to the TTM base object should be - * placed, or NULL if no such pointer is required. * Return: Zero on success, Negative error code on error. * - * Both the output base object pointer and the vmw buffer object pointer - * will be refcounted. + * The vmw buffer object pointer will be refcounted. */ -int vmw_user_bo_lookup(struct ttm_object_file *tfile, - uint32_t handle, struct vmw_buffer_object **out, - struct ttm_base_object **p_base) +int vmw_user_bo_lookup(struct drm_file *filp, + uint32_t handle, + struct vmw_buffer_object **out) { - struct vmw_user_buffer_object *vmw_user_bo; - struct ttm_base_object *base; + struct drm_gem_object *gobj; - base = ttm_base_object_lookup(tfile, handle); - if (unlikely(base == NULL)) { + gobj = drm_gem_object_lookup(filp, handle); + if (!gobj) { DRM_ERROR("Invalid buffer object handle 0x%08lx.\n", (unsigned long)handle); return -ESRCH; } - if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) { - ttm_base_object_unref(&base); - DRM_ERROR("Invalid buffer object handle 0x%08lx.\n", - (unsigned long)handle); - return -EINVAL; - } - - vmw_user_bo = container_of(base, struct vmw_user_buffer_object, - prime.base); - ttm_bo_get(&vmw_user_bo->vbo.base); - if (p_base) - *p_base = base; - else - ttm_base_object_unref(&base); - *out = &vmw_user_bo->vbo; + *out = gem_to_vmw_bo(gobj); + ttm_bo_get(&(*out)->base); + drm_gem_object_put(gobj); return 0; } /** * vmw_user_bo_noref_lookup - Look up a vmw user buffer object without reference - * @tfile: The TTM object file the handle is registered with. + * @filp: The TTM object file the handle is registered with. * @handle: The user buffer object handle. * - * This function looks up a struct vmw_user_bo and returns a pointer to the + * This function looks up a struct vmw_bo and returns a pointer to the * struct vmw_buffer_object it derives from without refcounting the pointer. * The returned pointer is only valid until vmw_user_bo_noref_release() is * called, and the object pointed to by the returned pointer may be doomed. @@ -988,52 +709,23 @@ int vmw_user_bo_lookup(struct ttm_object_file *tfile, * error pointer on failure. */ struct vmw_buffer_object * -vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle) +vmw_user_bo_noref_lookup(struct drm_file *filp, u32 handle) { - struct vmw_user_buffer_object *vmw_user_bo; - struct ttm_base_object *base; + struct vmw_buffer_object *vmw_bo; + struct ttm_buffer_object *bo; + struct drm_gem_object *gobj = drm_gem_object_lookup(filp, handle); - base = ttm_base_object_noref_lookup(tfile, handle); - if (!base) { + if (!gobj) { DRM_ERROR("Invalid buffer object handle 0x%08lx.\n", (unsigned long)handle); return ERR_PTR(-ESRCH); } + vmw_bo = gem_to_vmw_bo(gobj); + bo = ttm_bo_get_unless_zero(&vmw_bo->base); + vmw_bo = vmw_buffer_object(bo); + drm_gem_object_put(gobj); - if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) { - ttm_base_object_noref_release(); - DRM_ERROR("Invalid buffer object handle 0x%08lx.\n", - (unsigned long)handle); - return ERR_PTR(-EINVAL); - } - - vmw_user_bo = container_of(base, struct vmw_user_buffer_object, - prime.base); - return &vmw_user_bo->vbo; -} - -/** - * vmw_user_bo_reference - Open a handle to a vmw user buffer object. - * - * @tfile: The TTM object file to register the handle with. - * @vbo: The embedded vmw buffer object. - * @handle: Pointer to where the new handle should be placed. - * Return: Zero on success, Negative error code on error. - */ -int vmw_user_bo_reference(struct ttm_object_file *tfile, - struct vmw_buffer_object *vbo, - uint32_t *handle) -{ - struct vmw_user_buffer_object *user_bo; - - if (vbo->base.destroy != vmw_user_bo_destroy) - return -EINVAL; - - user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo); - - *handle = user_bo->prime.base.handle; - return ttm_ref_object_add(tfile, &user_bo->prime.base, - TTM_REF_USAGE, NULL, false); + return vmw_bo; } @@ -1087,68 +779,15 @@ int vmw_dumb_create(struct drm_file *file_priv, int ret; args->pitch = args->width * ((args->bpp + 7) / 8); - args->size = args->pitch * args->height; + args->size = ALIGN(args->pitch * args->height, PAGE_SIZE); - ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, - args->size, false, &args->handle, - &vbo, NULL); - if (unlikely(ret != 0)) - goto out_no_bo; + ret = vmw_gem_object_create_with_handle(dev_priv, file_priv, + args->size, &args->handle, + &vbo); - vmw_bo_unreference(&vbo); -out_no_bo: return ret; } - -/** - * vmw_dumb_map_offset - Return the address space offset of a dumb buffer - * - * @file_priv: Pointer to a struct drm_file identifying the caller. - * @dev: Pointer to the drm device. - * @handle: Handle identifying the dumb buffer. - * @offset: The address space offset returned. - * Return: Zero on success, negative error code on failure. - * - * This is a driver callback for the core drm dumb_map_offset functionality. - */ -int vmw_dumb_map_offset(struct drm_file *file_priv, - struct drm_device *dev, uint32_t handle, - uint64_t *offset) -{ - struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; - struct vmw_buffer_object *out_buf; - int ret; - - ret = vmw_user_bo_lookup(tfile, handle, &out_buf, NULL); - if (ret != 0) - return -EINVAL; - - *offset = drm_vma_node_offset_addr(&out_buf->base.base.vma_node); - vmw_bo_unreference(&out_buf); - return 0; -} - - -/** - * vmw_dumb_destroy - Destroy a dumb boffer - * - * @file_priv: Pointer to a struct drm_file identifying the caller. - * @dev: Pointer to the drm device. - * @handle: Handle identifying the dumb buffer. - * Return: Zero on success, negative error code on failure. - * - * This is a driver callback for the core drm dumb_destroy functionality. - */ -int vmw_dumb_destroy(struct drm_file *file_priv, - struct drm_device *dev, - uint32_t handle) -{ - return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, - handle, TTM_REF_USAGE); -} - - /** * vmw_bo_swap_notify - swapout notify callback. * @@ -1157,8 +796,7 @@ int vmw_dumb_destroy(struct drm_file *file_priv, void vmw_bo_swap_notify(struct ttm_buffer_object *bo) { /* Is @bo embedded in a struct vmw_buffer_object? */ - if (bo->destroy != vmw_bo_bo_free && - bo->destroy != vmw_user_bo_destroy) + if (vmw_bo_is_vmw_bo(bo)) return; /* Kill any cached kernel maps before swapout */ @@ -1182,8 +820,7 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo, struct vmw_buffer_object *vbo; /* Make sure @bo is embedded in a struct vmw_buffer_object? */ - if (bo->destroy != vmw_bo_bo_free && - bo->destroy != vmw_user_bo_destroy) + if (vmw_bo_is_vmw_bo(bo)) return; vbo = container_of(bo, struct vmw_buffer_object, base); @@ -1204,3 +841,22 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo, if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB) vmw_resource_unbind_list(vbo); } + +/** + * vmw_bo_is_vmw_bo - check if the buffer object is a &vmw_buffer_object + * @bo: buffer object to be checked + * + * Uses destroy function associated with the object to determine if this is + * a &vmw_buffer_object. + * + * Returns: + * true if the object is of &vmw_buffer_object type, false if not. + */ +bool vmw_bo_is_vmw_bo(struct ttm_buffer_object *bo) +{ + if (bo->destroy == &vmw_bo_bo_free || + bo->destroy == &vmw_gem_destroy) + return true; + + return false; +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c index 67db472d3493..a3bfbb6c3e14 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c @@ -145,6 +145,13 @@ struct vmw_fifo_state *vmw_fifo_create(struct vmw_private *dev_priv) (unsigned int) max, (unsigned int) min, (unsigned int) fifo->capabilities); + + if (unlikely(min >= max)) { + drm_warn(&dev_priv->drm, + "FIFO memory is not usable. Driver failed to initialize."); + return ERR_PTR(-ENXIO); + } + return fifo; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c index 8381750db81b..415774fde796 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c @@ -42,7 +42,7 @@ */ struct vmw_cmdbuf_res { struct vmw_resource *res; - struct drm_hash_item hash; + struct vmwgfx_hash_item hash; struct list_head head; enum vmw_cmdbuf_res_state state; struct vmw_cmdbuf_res_manager *man; @@ -59,7 +59,7 @@ struct vmw_cmdbuf_res { * @resources and @list are protected by the cmdbuf mutex for now. */ struct vmw_cmdbuf_res_manager { - struct drm_open_hash resources; + struct vmwgfx_open_hash resources; struct list_head list; struct vmw_private *dev_priv; }; @@ -81,11 +81,11 @@ vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man, enum vmw_cmdbuf_res_type res_type, u32 user_key) { - struct drm_hash_item *hash; + struct vmwgfx_hash_item *hash; int ret; unsigned long key = user_key | (res_type << 24); - ret = drm_ht_find_item(&man->resources, key, &hash); + ret = vmwgfx_ht_find_item(&man->resources, key, &hash); if (unlikely(ret != 0)) return ERR_PTR(ret); @@ -105,7 +105,7 @@ static void vmw_cmdbuf_res_free(struct vmw_cmdbuf_res_manager *man, struct vmw_cmdbuf_res *entry) { list_del(&entry->head); - WARN_ON(drm_ht_remove_item(&man->resources, &entry->hash)); + WARN_ON(vmwgfx_ht_remove_item(&man->resources, &entry->hash)); vmw_resource_unreference(&entry->res); kfree(entry); } @@ -167,7 +167,7 @@ void vmw_cmdbuf_res_revert(struct list_head *list) vmw_cmdbuf_res_free(entry->man, entry); break; case VMW_CMDBUF_RES_DEL: - ret = drm_ht_insert_item(&entry->man->resources, &entry->hash); + ret = vmwgfx_ht_insert_item(&entry->man->resources, &entry->hash); BUG_ON(ret); list_move_tail(&entry->head, &entry->man->list); entry->state = VMW_CMDBUF_RES_COMMITTED; @@ -206,7 +206,7 @@ int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man, return -ENOMEM; cres->hash.key = user_key | (res_type << 24); - ret = drm_ht_insert_item(&man->resources, &cres->hash); + ret = vmwgfx_ht_insert_item(&man->resources, &cres->hash); if (unlikely(ret != 0)) { kfree(cres); goto out_invalid_key; @@ -244,10 +244,10 @@ int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man, struct vmw_resource **res_p) { struct vmw_cmdbuf_res *entry; - struct drm_hash_item *hash; + struct vmwgfx_hash_item *hash; int ret; - ret = drm_ht_find_item(&man->resources, user_key | (res_type << 24), + ret = vmwgfx_ht_find_item(&man->resources, user_key | (res_type << 24), &hash); if (likely(ret != 0)) return -EINVAL; @@ -260,7 +260,7 @@ int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man, *res_p = NULL; break; case VMW_CMDBUF_RES_COMMITTED: - (void) drm_ht_remove_item(&man->resources, &entry->hash); + (void) vmwgfx_ht_remove_item(&man->resources, &entry->hash); list_del(&entry->head); entry->state = VMW_CMDBUF_RES_DEL; list_add_tail(&entry->head, list); @@ -295,7 +295,7 @@ vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv) man->dev_priv = dev_priv; INIT_LIST_HEAD(&man->list); - ret = drm_ht_create(&man->resources, VMW_CMDBUF_RES_MAN_HT_ORDER); + ret = vmwgfx_ht_create(&man->resources, VMW_CMDBUF_RES_MAN_HT_ORDER); if (ret == 0) return man; @@ -320,26 +320,7 @@ void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man) list_for_each_entry_safe(entry, next, &man->list, head) vmw_cmdbuf_res_free(man, entry); - drm_ht_remove(&man->resources); + vmwgfx_ht_remove(&man->resources); kfree(man); } -/** - * vmw_cmdbuf_res_man_size - Return the size of a command buffer managed - * resource manager - * - * Returns the approximate allocation size of a command buffer managed - * resource manager. - */ -size_t vmw_cmdbuf_res_man_size(void) -{ - static size_t res_man_size; - - if (unlikely(res_man_size == 0)) - res_man_size = - ttm_round_pot(sizeof(struct vmw_cmdbuf_res_manager)) + - ttm_round_pot(sizeof(struct hlist_head) << - VMW_CMDBUF_RES_MAN_HT_ORDER); - - return res_man_size; -} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c index 4446758b6880..e0f48cd9529b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c @@ -60,8 +60,6 @@ static int vmw_dx_context_unbind(struct vmw_resource *res, struct ttm_validate_buffer *val_buf); static int vmw_dx_context_destroy(struct vmw_resource *res); -static uint64_t vmw_user_context_size; - static const struct vmw_user_resource_conv user_context_conv = { .object_type = VMW_RES_CONTEXT, .base_obj_to_res = vmw_user_context_base_to_res, @@ -686,7 +684,6 @@ static void vmw_user_context_free(struct vmw_resource *res) { struct vmw_user_context *ctx = container_of(res, struct vmw_user_context, res); - struct vmw_private *dev_priv = res->dev_priv; if (ctx->cbs) vmw_binding_state_free(ctx->cbs); @@ -694,8 +691,6 @@ static void vmw_user_context_free(struct vmw_resource *res) (void) vmw_context_bind_dx_query(res, NULL); ttm_base_object_kfree(ctx, base); - ttm_mem_global_free(vmw_mem_glob(dev_priv), - vmw_user_context_size); } /* @@ -720,7 +715,7 @@ int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; - return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE); + return ttm_ref_object_base_unref(tfile, arg->cid); } static int vmw_context_define(struct drm_device *dev, void *data, @@ -732,10 +727,6 @@ static int vmw_context_define(struct drm_device *dev, void *data, struct vmw_resource *tmp; struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; - struct ttm_operation_ctx ttm_opt_ctx = { - .interruptible = true, - .no_wait_gpu = false - }; int ret; if (!has_sm4_context(dev_priv) && dx) { @@ -743,25 +734,8 @@ static int vmw_context_define(struct drm_device *dev, void *data, return -EINVAL; } - if (unlikely(vmw_user_context_size == 0)) - vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + - ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0) + - + VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE; - - ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), - vmw_user_context_size, - &ttm_opt_ctx); - if (unlikely(ret != 0)) { - if (ret != -ERESTARTSYS) - DRM_ERROR("Out of graphics memory for context" - " creation.\n"); - goto out_ret; - } - ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (unlikely(!ctx)) { - ttm_mem_global_free(vmw_mem_glob(dev_priv), - vmw_user_context_size); ret = -ENOMEM; goto out_ret; } @@ -780,7 +754,7 @@ static int vmw_context_define(struct drm_device *dev, void *data, tmp = vmw_resource_reference(&ctx->res); ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT, - &vmw_user_context_base_release, NULL); + &vmw_user_context_base_release); if (unlikely(ret != 0)) { vmw_resource_unreference(&tmp); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c index 17a98db00017..16f986b6cbea 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c @@ -407,12 +407,8 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) * for the new COTable. Initially pin the buffer object to make sure * we can use tryreserve without failure. */ - buf = kzalloc(sizeof(*buf), GFP_KERNEL); - if (!buf) - return -ENOMEM; - - ret = vmw_bo_init(dev_priv, buf, new_size, &vmw_mob_placement, - true, true, vmw_bo_bo_free); + ret = vmw_bo_create(dev_priv, new_size, &vmw_mob_placement, + true, true, vmw_bo_bo_free, &buf); if (ret) { DRM_ERROR("Failed initializing new cotable MOB.\n"); return ret; @@ -546,8 +542,6 @@ static void vmw_hw_cotable_destroy(struct vmw_resource *res) (void) vmw_cotable_destroy(res); } -static size_t cotable_acc_size; - /** * vmw_cotable_free - Cotable resource destructor * @@ -555,10 +549,7 @@ static size_t cotable_acc_size; */ static void vmw_cotable_free(struct vmw_resource *res) { - struct vmw_private *dev_priv = res->dev_priv; - kfree(res); - ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size); } /** @@ -574,21 +565,9 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv, u32 type) { struct vmw_cotable *vcotbl; - struct ttm_operation_ctx ttm_opt_ctx = { - .interruptible = true, - .no_wait_gpu = false - }; int ret; u32 num_entries; - if (unlikely(cotable_acc_size == 0)) - cotable_acc_size = ttm_round_pot(sizeof(struct vmw_cotable)); - - ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), - cotable_acc_size, &ttm_opt_ctx); - if (unlikely(ret)) - return ERR_PTR(ret); - vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL); if (unlikely(!vcotbl)) { ret = -ENOMEM; @@ -622,7 +601,6 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv, out_no_init: kfree(vcotbl); out_no_alloc: - ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index bfd71c86faa5..2d59bdad0373 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -25,7 +25,6 @@ * **************************************************************************/ -#include <linux/console.h> #include <linux/dma-mapping.h> #include <linux/module.h> #include <linux/pci.h> @@ -35,6 +34,7 @@ #include <drm/drm_drv.h> #include <drm/drm_ioctl.h> #include <drm/drm_sysfs.h> +#include <drm/drm_gem_ttm_helper.h> #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_range_manager.h> #include <drm/ttm/ttm_placement.h> @@ -51,9 +51,6 @@ #define VMW_MIN_INITIAL_WIDTH 800 #define VMW_MIN_INITIAL_HEIGHT 600 -#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE) - - /* * Fully encoded drm commands. Might move to vmw_drm.h */ @@ -166,7 +163,7 @@ static const struct drm_ioctl_desc vmw_ioctls[] = { DRM_IOCTL_DEF_DRV(VMW_GET_PARAM, vmw_getparam_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(VMW_ALLOC_DMABUF, vmw_bo_alloc_ioctl, + DRM_IOCTL_DEF_DRV(VMW_ALLOC_DMABUF, vmw_gem_object_create_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl, DRM_RENDER_ALLOW), @@ -258,8 +255,8 @@ static const struct drm_ioctl_desc vmw_ioctls[] = { }; static const struct pci_device_id vmw_pci_id_list[] = { - { PCI_DEVICE(0x15ad, VMWGFX_PCI_ID_SVGA2) }, - { PCI_DEVICE(0x15ad, VMWGFX_PCI_ID_SVGA3) }, + { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, VMWGFX_PCI_ID_SVGA2) }, + { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, VMWGFX_PCI_ID_SVGA3) }, { } }; MODULE_DEVICE_TABLE(pci, vmw_pci_id_list); @@ -367,6 +364,7 @@ static void vmw_print_sm_type(struct vmw_private *dev_priv) [VMW_SM_4] = "SM4", [VMW_SM_4_1] = "SM4_1", [VMW_SM_5] = "SM_5", + [VMW_SM_5_1X] = "SM_5_1X", [VMW_SM_MAX] = "Invalid" }; BUILD_BUG_ON(ARRAY_SIZE(names) != (VMW_SM_MAX + 1)); @@ -400,13 +398,9 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) * immediately succeed. This is because we're the only * user of the bo currently. */ - vbo = kzalloc(sizeof(*vbo), GFP_KERNEL); - if (!vbo) - return -ENOMEM; - - ret = vmw_bo_init(dev_priv, vbo, PAGE_SIZE, - &vmw_sys_placement, false, true, - &vmw_bo_bo_free); + ret = vmw_bo_create(dev_priv, PAGE_SIZE, + &vmw_sys_placement, false, true, + &vmw_bo_bo_free, &vbo); if (unlikely(ret != 0)) return ret; @@ -987,8 +981,7 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) goto out_err0; } - dev_priv->tdev = ttm_object_device_init(&ttm_mem_glob, 12, - &vmw_prime_dmabuf_ops); + dev_priv->tdev = ttm_object_device_init(12, &vmw_prime_dmabuf_ops); if (unlikely(dev_priv->tdev == NULL)) { drm_err(&dev_priv->drm, @@ -1071,6 +1064,12 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) "3D will be disabled.\n"); dev_priv->has_mob = false; } + if (vmw_sys_man_init(dev_priv) != 0) { + drm_info(&dev_priv->drm, + "No MOB page table memory available. " + "3D will be disabled.\n"); + dev_priv->has_mob = false; + } } if (dev_priv->has_mob && (dev_priv->capabilities & SVGA_CAP_DX)) { @@ -1078,8 +1077,6 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) dev_priv->sm_type = VMW_SM_4; } - vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN); - /* SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1 support */ if (has_sm4_context(dev_priv) && (dev_priv->capabilities2 & SVGA_CAP2_DX2)) { @@ -1087,8 +1084,11 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) dev_priv->sm_type = VMW_SM_4_1; if (has_sm4_1_context(dev_priv) && (dev_priv->capabilities2 & SVGA_CAP2_DX3)) { - if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_SM5)) + if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_SM5)) { dev_priv->sm_type = VMW_SM_5; + if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_GL43)) + dev_priv->sm_type = VMW_SM_5_1X; + } } } @@ -1121,8 +1121,10 @@ out_no_fifo: vmw_overlay_close(dev_priv); vmw_kms_close(dev_priv); out_no_kms: - if (dev_priv->has_mob) + if (dev_priv->has_mob) { vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB); + vmw_sys_man_fini(dev_priv); + } if (dev_priv->has_gmr) vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR); vmw_devcaps_destroy(dev_priv); @@ -1156,7 +1158,7 @@ static void vmw_driver_unload(struct drm_device *dev) unregister_pm_notifier(&dev_priv->pm_nb); if (dev_priv->ctx.res_ht_initialized) - drm_ht_remove(&dev_priv->ctx.res_ht); + vmwgfx_ht_remove(&dev_priv->ctx.res_ht); vfree(dev_priv->ctx.cmd_bounce); if (dev_priv->enable_fb) { vmw_fb_off(dev_priv); @@ -1172,8 +1174,10 @@ static void vmw_driver_unload(struct drm_device *dev) vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR); vmw_release_device_early(dev_priv); - if (dev_priv->has_mob) + if (dev_priv->has_mob) { vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB); + vmw_sys_man_fini(dev_priv); + } vmw_devcaps_destroy(dev_priv); vmw_vram_manager_fini(dev_priv); ttm_device_fini(&dev_priv->bdev); @@ -1388,7 +1392,6 @@ static void vmw_remove(struct pci_dev *pdev) { struct drm_device *dev = pci_get_drvdata(pdev); - ttm_mem_global_release(&ttm_mem_glob); drm_dev_unregister(dev); vmw_driver_unload(dev); } @@ -1576,7 +1579,7 @@ static const struct file_operations vmwgfx_driver_fops = { static const struct drm_driver driver = { .driver_features = - DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC, + DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC | DRIVER_GEM, .ioctls = vmw_ioctls, .num_ioctls = ARRAY_SIZE(vmw_ioctls), .master_set = vmw_master_set, @@ -1585,8 +1588,7 @@ static const struct drm_driver driver = { .postclose = vmw_postclose, .dumb_create = vmw_dumb_create, - .dumb_map_offset = vmw_dumb_map_offset, - .dumb_destroy = vmw_dumb_destroy, + .dumb_map_offset = drm_gem_ttm_dumb_map_offset, .prime_fd_to_handle = vmw_prime_fd_to_handle, .prime_handle_to_fd = vmw_prime_handle_to_fd, @@ -1617,41 +1619,43 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver); if (ret) - return ret; + goto out_error; ret = pcim_enable_device(pdev); if (ret) - return ret; + goto out_error; vmw = devm_drm_dev_alloc(&pdev->dev, &driver, struct vmw_private, drm); - if (IS_ERR(vmw)) - return PTR_ERR(vmw); + if (IS_ERR(vmw)) { + ret = PTR_ERR(vmw); + goto out_error; + } pci_set_drvdata(pdev, &vmw->drm); - ret = ttm_mem_global_init(&ttm_mem_glob, &pdev->dev); - if (ret) - return ret; - ret = vmw_driver_load(vmw, ent->device); if (ret) - return ret; + goto out_error; ret = drm_dev_register(&vmw->drm, 0); - if (ret) { - vmw_driver_unload(&vmw->drm); - return ret; - } + if (ret) + goto out_unload; + + vmw_debugfs_gem_init(vmw); return 0; +out_unload: + vmw_driver_unload(&vmw->drm); +out_error: + return ret; } static int __init vmwgfx_init(void) { int ret; - if (vgacon_text_force()) + if (drm_firmware_drivers_only()) return -EINVAL; ret = pci_register_driver(&vmw_pci_driver); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 858aff99a3fe..4ec2b99351cf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -34,7 +34,6 @@ #include <drm/drm_auth.h> #include <drm/drm_device.h> #include <drm/drm_file.h> -#include <drm/drm_hashtab.h> #include <drm/drm_rect.h> #include <drm/ttm/ttm_bo_driver.h> @@ -43,6 +42,7 @@ #include "ttm_object.h" #include "vmwgfx_fence.h" +#include "vmwgfx_hashtab.h" #include "vmwgfx_reg.h" #include "vmwgfx_validation.h" @@ -54,9 +54,9 @@ #define VMWGFX_DRIVER_NAME "vmwgfx" -#define VMWGFX_DRIVER_DATE "20210722" +#define VMWGFX_DRIVER_DATE "20211206" #define VMWGFX_DRIVER_MAJOR 2 -#define VMWGFX_DRIVER_MINOR 19 +#define VMWGFX_DRIVER_MINOR 20 #define VMWGFX_DRIVER_PATCHLEVEL 0 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) #define VMWGFX_MAX_RELOCATIONS 2048 @@ -82,8 +82,9 @@ VMWGFX_NUM_GB_SURFACE +\ VMWGFX_NUM_GB_SCREEN_TARGET) -#define VMW_PL_GMR (TTM_PL_PRIV + 0) -#define VMW_PL_MOB (TTM_PL_PRIV + 1) +#define VMW_PL_GMR (TTM_PL_PRIV + 0) +#define VMW_PL_MOB (TTM_PL_PRIV + 1) +#define VMW_PL_SYSTEM (TTM_PL_PRIV + 2) #define VMW_RES_CONTEXT ttm_driver_type0 #define VMW_RES_SURFACE ttm_driver_type1 @@ -133,7 +134,7 @@ struct vmw_buffer_object { */ struct vmw_validate_buffer { struct ttm_validate_buffer base; - struct drm_hash_item hash; + struct vmwgfx_hash_item hash; bool validate_as_mob; }; @@ -332,7 +333,6 @@ struct vmw_sg_table { struct page **pages; const dma_addr_t *addrs; struct sg_table *sgt; - unsigned long num_regions; unsigned long num_pages; }; @@ -360,6 +360,19 @@ struct vmw_piter { dma_addr_t (*dma_address)(struct vmw_piter *); }; + +struct vmw_ttm_tt { + struct ttm_tt dma_ttm; + struct vmw_private *dev_priv; + int gmr_id; + struct vmw_mob *mob; + int mem_type; + struct sg_table sgt; + struct vmw_sg_table vsgt; + bool mapped; + bool bound; +}; + /* * enum vmw_display_unit_type - Describes the display unit */ @@ -406,10 +419,11 @@ struct vmw_ctx_validation_info; * @ctx: The validation context */ struct vmw_sw_context{ - struct drm_open_hash res_ht; + struct vmwgfx_open_hash res_ht; bool res_ht_initialized; bool kernel; struct vmw_fpriv *fp; + struct drm_file *filp; uint32_t *cmd_bounce; uint32_t cmd_bounce_size; struct vmw_buffer_object *cur_query_bo; @@ -473,6 +487,7 @@ enum { * @VMW_SM_4: Context support upto SM4. * @VMW_SM_4_1: Context support upto SM4_1. * @VMW_SM_5: Context support up to SM5. + * @VMW_SM_5_1X: Adds support for sm5_1 and gl43 extensions. * @VMW_SM_MAX: Should be the last. */ enum vmw_sm_type { @@ -480,6 +495,7 @@ enum vmw_sm_type { VMW_SM_4, VMW_SM_4_1, VMW_SM_5, + VMW_SM_5_1X, VMW_SM_MAX }; @@ -627,9 +643,6 @@ struct vmw_private { struct vmw_cmdbuf_man *cman; DECLARE_BITMAP(irqthread_pending, VMW_IRQTHREAD_MAX); - /* Validation memory reservation */ - struct vmw_validation_mem vvm; - uint32 *devcaps; /* @@ -645,6 +658,11 @@ struct vmw_private { #endif }; +static inline struct vmw_buffer_object *gem_to_vmw_bo(struct drm_gem_object *gobj) +{ + return container_of((gobj), struct vmw_buffer_object, base.base); +} + static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res) { return container_of(res, struct vmw_surface, res); @@ -738,6 +756,24 @@ static inline bool has_sm5_context(const struct vmw_private *dev_priv) return (dev_priv->sm_type >= VMW_SM_5); } +/** + * has_gl43_context - Does the device support GL43 context. + * @dev_priv: Device private. + * + * Return: Bool value if device support SM5 context or not. + */ +static inline bool has_gl43_context(const struct vmw_private *dev_priv) +{ + return (dev_priv->sm_type >= VMW_SM_5_1X); +} + + +static inline u32 vmw_max_num_uavs(struct vmw_private *dev_priv) +{ + return (has_gl43_context(dev_priv) ? + SVGA3D_DX11_1_MAX_UAVIEWS : SVGA3D_MAX_UAVIEWS); +} + extern void vmw_svga_enable(struct vmw_private *dev_priv); extern void vmw_svga_disable(struct vmw_private *dev_priv); @@ -767,7 +803,7 @@ extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible, bool no_backup); extern bool vmw_resource_needs_backup(const struct vmw_resource *res); extern int vmw_user_lookup_handle(struct vmw_private *dev_priv, - struct ttm_object_file *tfile, + struct drm_file *filp, uint32_t handle, struct vmw_surface **out_surf, struct vmw_buffer_object **out_buf); @@ -833,6 +869,7 @@ static inline void vmw_user_resource_noref_release(void) /** * Buffer object helper functions - vmwgfx_bo.c */ +extern bool vmw_bo_is_vmw_bo(struct ttm_buffer_object *bo); extern int vmw_bo_pin_in_placement(struct vmw_private *vmw_priv, struct vmw_buffer_object *bo, struct ttm_placement *placement, @@ -857,32 +894,23 @@ extern int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size, struct ttm_placement *placement, struct ttm_buffer_object **p_bo); +extern int vmw_bo_create(struct vmw_private *dev_priv, + size_t size, struct ttm_placement *placement, + bool interruptible, bool pin, + void (*bo_free)(struct ttm_buffer_object *bo), + struct vmw_buffer_object **p_bo); extern int vmw_bo_init(struct vmw_private *dev_priv, struct vmw_buffer_object *vmw_bo, size_t size, struct ttm_placement *placement, bool interruptible, bool pin, void (*bo_free)(struct ttm_buffer_object *bo)); -extern int vmw_user_bo_verify_access(struct ttm_buffer_object *bo, - struct ttm_object_file *tfile); -extern int vmw_user_bo_alloc(struct vmw_private *dev_priv, - struct ttm_object_file *tfile, - uint32_t size, - bool shareable, - uint32_t *handle, - struct vmw_buffer_object **p_dma_buf, - struct ttm_base_object **p_base); -extern int vmw_user_bo_reference(struct ttm_object_file *tfile, - struct vmw_buffer_object *dma_buf, - uint32_t *handle); -extern int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); extern int vmw_bo_unref_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern int vmw_user_bo_lookup(struct ttm_object_file *tfile, - uint32_t id, struct vmw_buffer_object **out, - struct ttm_base_object **base); +extern int vmw_user_bo_lookup(struct drm_file *filp, + uint32_t handle, + struct vmw_buffer_object **out); extern void vmw_bo_fence_single(struct ttm_buffer_object *bo, struct vmw_fence_obj *fence); extern void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo); @@ -891,16 +919,7 @@ extern void vmw_bo_move_notify(struct ttm_buffer_object *bo, struct ttm_resource *mem); extern void vmw_bo_swap_notify(struct ttm_buffer_object *bo); extern struct vmw_buffer_object * -vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle); - -/** - * vmw_user_bo_noref_release - release a buffer object pointer looked up - * without reference - */ -static inline void vmw_user_bo_noref_release(void) -{ - ttm_base_object_noref_release(); -} +vmw_user_bo_noref_lookup(struct drm_file *filp, u32 handle); /** * vmw_bo_adjust_prio - Adjust the buffer object eviction priority @@ -952,6 +971,19 @@ static inline void vmw_bo_prio_del(struct vmw_buffer_object *vbo, int prio) } /** + * GEM related functionality - vmwgfx_gem.c + */ +extern int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv, + struct drm_file *filp, + uint32_t size, + uint32_t *handle, + struct vmw_buffer_object **p_vbo); +extern int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp); +extern void vmw_gem_destroy(struct ttm_buffer_object *bo); +extern void vmw_debugfs_gem_init(struct vmw_private *vdev); + +/** * Misc Ioctl functionality - vmwgfx_ioctl.c */ @@ -1027,9 +1059,6 @@ vmw_is_cursor_bypass3_enabled(const struct vmw_private *dev_priv) extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma); -extern void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv, - size_t gran); - /** * TTM buffer object driver - vmwgfx_ttm_buffer.c */ @@ -1039,7 +1068,6 @@ extern struct ttm_placement vmw_vram_placement; extern struct ttm_placement vmw_vram_sys_placement; extern struct ttm_placement vmw_vram_gmr_placement; extern struct ttm_placement vmw_sys_placement; -extern struct ttm_placement vmw_evictable_placement; extern struct ttm_placement vmw_srf_placement; extern struct ttm_placement vmw_mob_placement; extern struct ttm_placement vmw_nonfixed_placement; @@ -1218,13 +1246,6 @@ void vmw_kms_lost_device(struct drm_device *dev); int vmw_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); - -int vmw_dumb_map_offset(struct drm_file *file_priv, - struct drm_device *dev, uint32_t handle, - uint64_t *offset); -int vmw_dumb_destroy(struct drm_file *file_priv, - struct drm_device *dev, - uint32_t handle); extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible); extern void vmw_resource_unpin(struct vmw_resource *res); extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res); @@ -1252,6 +1273,12 @@ int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type); void vmw_gmrid_man_fini(struct vmw_private *dev_priv, int type); /** + * System memory manager + */ +int vmw_sys_man_init(struct vmw_private *dev_priv); +void vmw_sys_man_fini(struct vmw_private *dev_priv); + +/** * Prime - vmwgfx_prime.c */ @@ -1322,18 +1349,6 @@ extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -int vmw_surface_gb_priv_define(struct drm_device *dev, - uint32_t user_accounting_size, - SVGA3dSurfaceAllFlags svga3d_flags, - SVGA3dSurfaceFormat format, - bool for_scanout, - uint32_t num_mip_levels, - uint32_t multisample_count, - uint32_t array_size, - struct drm_vmw_size size, - SVGA3dMSPattern multisample_pattern, - SVGA3dMSQualityLevel quality_level, - struct vmw_surface **srf_out); extern int vmw_gb_surface_define_ext_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); @@ -1342,7 +1357,6 @@ extern int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev, struct drm_file *file_priv); int vmw_gb_surface_define(struct vmw_private *dev_priv, - uint32_t user_accounting_size, const struct vmw_surface_metadata *req, struct vmw_surface **srf_out); @@ -1403,7 +1417,6 @@ void vmw_dx_streamoutput_cotable_list_scrub(struct vmw_private *dev_priv, extern struct vmw_cmdbuf_res_manager * vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv); extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man); -extern size_t vmw_cmdbuf_res_man_size(void); extern struct vmw_resource * vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man, enum vmw_cmdbuf_res_type res_type, @@ -1600,11 +1613,6 @@ vmw_bo_reference(struct vmw_buffer_object *buf) return buf; } -static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv) -{ - return &ttm_mem_glob; -} - static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv) { atomic_inc(&dev_priv->num_fifo_resources); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 5f2ffa9de5c8..44ca23b0ea4e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -1171,14 +1171,13 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, int ret; vmw_validation_preload_bo(sw_context->ctx); - vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle); - if (IS_ERR(vmw_bo)) { + vmw_bo = vmw_user_bo_noref_lookup(sw_context->filp, handle); + if (IS_ERR_OR_NULL(vmw_bo)) { VMW_DEBUG_USER("Could not find or use MOB buffer.\n"); return PTR_ERR(vmw_bo); } - ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false); - vmw_user_bo_noref_release(); + ttm_bo_put(&vmw_bo->base); if (unlikely(ret != 0)) return ret; @@ -1226,14 +1225,13 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, int ret; vmw_validation_preload_bo(sw_context->ctx); - vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle); - if (IS_ERR(vmw_bo)) { + vmw_bo = vmw_user_bo_noref_lookup(sw_context->filp, handle); + if (IS_ERR_OR_NULL(vmw_bo)) { VMW_DEBUG_USER("Could not find or use GMR region.\n"); return PTR_ERR(vmw_bo); } - ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false); - vmw_user_bo_noref_release(); + ttm_bo_put(&vmw_bo->base); if (unlikely(ret != 0)) return ret; @@ -2166,6 +2164,44 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv, } /** + * vmw_cmd_dx_set_constant_buffer_offset - Validate + * SVGA_3D_CMD_DX_SET_VS/PS/GS/HS/DS/CS_CONSTANT_BUFFER_OFFSET command. + * + * @dev_priv: Pointer to a device private struct. + * @sw_context: The software context being used for this batch. + * @header: Pointer to the command header in the command stream. + */ +static int +vmw_cmd_dx_set_constant_buffer_offset(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetConstantBufferOffset); + + struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); + u32 shader_slot; + + if (!has_sm5_context(dev_priv)) + return -EINVAL; + + if (!ctx_node) + return -EINVAL; + + cmd = container_of(header, typeof(*cmd), header); + if (cmd->body.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) { + VMW_DEBUG_USER("Illegal const buffer slot %u.\n", + (unsigned int) cmd->body.slot); + return -EINVAL; + } + + shader_slot = cmd->header.id - SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET; + vmw_binding_cb_offset_update(ctx_node->staged, shader_slot, + cmd->body.slot, cmd->body.offsetInBytes); + + return 0; +} + +/** * vmw_cmd_dx_set_shader_res - Validate SVGA_3D_CMD_DX_SET_SHADER_RESOURCES * command * @@ -2918,7 +2954,7 @@ static int vmw_cmd_set_uav(struct vmw_private *dev_priv, if (!has_sm5_context(dev_priv)) return -EINVAL; - if (num_uav > SVGA3D_MAX_UAVIEWS) { + if (num_uav > vmw_max_num_uavs(dev_priv)) { VMW_DEBUG_USER("Invalid UAV binding.\n"); return -EINVAL; } @@ -2950,7 +2986,7 @@ static int vmw_cmd_set_cs_uav(struct vmw_private *dev_priv, if (!has_sm5_context(dev_priv)) return -EINVAL; - if (num_uav > SVGA3D_MAX_UAVIEWS) { + if (num_uav > vmw_max_num_uavs(dev_priv)) { VMW_DEBUG_USER("Invalid UAV binding.\n"); return -EINVAL; } @@ -3528,6 +3564,24 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = { VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER, &vmw_cmd_dx_transfer_from_buffer, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET, + &vmw_cmd_dx_set_constant_buffer_offset, + true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PS_CONSTANT_BUFFER_OFFSET, + &vmw_cmd_dx_set_constant_buffer_offset, + true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET, + &vmw_cmd_dx_set_constant_buffer_offset, + true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_HS_CONSTANT_BUFFER_OFFSET, + &vmw_cmd_dx_set_constant_buffer_offset, + true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DS_CONSTANT_BUFFER_OFFSET, + &vmw_cmd_dx_set_constant_buffer_offset, + true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_CONSTANT_BUFFER_OFFSET, + &vmw_cmd_dx_set_constant_buffer_offset, + true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy, true, false, true), @@ -3561,6 +3615,8 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = { &vmw_cmd_dx_define_streamoutput, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_STREAMOUTPUT, &vmw_cmd_dx_bind_streamoutput, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE_V2, + &vmw_cmd_dx_so_define, true, false, true), }; bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd) @@ -3869,8 +3925,7 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, fence_rep.fd = -1; } - ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle, - TTM_REF_USAGE); + ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle); VMW_DEBUG_USER("Fence copy error. Syncing.\n"); (void) vmw_fence_obj_wait(fence, false, false, VMW_FENCE_WAIT_TIMEOUT); @@ -4054,8 +4109,6 @@ int vmw_execbuf_process(struct drm_file *file_priv, struct sync_file *sync_file = NULL; DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1); - vmw_validation_set_val_mem(&val_ctx, &dev_priv->vvm); - if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) { out_fence_fd = get_unused_fd_flags(O_CLOEXEC); if (out_fence_fd < 0) { @@ -4101,6 +4154,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, sw_context->kernel = true; } + sw_context->filp = file_priv; sw_context->fp = vmw_fpriv(file_priv); INIT_LIST_HEAD(&sw_context->ctx_list); sw_context->cur_query_bo = dev_priv->pinned_bo; @@ -4117,7 +4171,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, vmw_binding_state_reset(sw_context->staged_bindings); if (!sw_context->res_ht_initialized) { - ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER); + ret = vmwgfx_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER); if (unlikely(ret != 0)) goto out_unlock; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index d18c6a56e3dc..8ee34576c7d0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c @@ -394,22 +394,15 @@ static int vmw_fb_create_bo(struct vmw_private *vmw_priv, struct vmw_buffer_object *vmw_bo; int ret; - vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL); - if (!vmw_bo) { - ret = -ENOMEM; - goto err_unlock; - } - - ret = vmw_bo_init(vmw_priv, vmw_bo, size, + ret = vmw_bo_create(vmw_priv, size, &vmw_sys_placement, false, false, - &vmw_bo_bo_free); + &vmw_bo_bo_free, &vmw_bo); if (unlikely(ret != 0)) - goto err_unlock; /* init frees the buffer on failure */ + return ret; *out = vmw_bo; -err_unlock: return ret; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index 9fe12329a4d5..c60d395f9e2e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -37,9 +37,6 @@ struct vmw_fence_manager { spinlock_t lock; struct list_head fence_list; struct work_struct work; - u32 user_fence_size; - u32 fence_size; - u32 event_fence_action_size; bool fifo_down; struct list_head cleanup_list; uint32_t pending_actions[VMW_ACTION_MAX]; @@ -304,11 +301,6 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv) INIT_LIST_HEAD(&fman->cleanup_list); INIT_WORK(&fman->work, &vmw_fence_work_func); fman->fifo_down = true; - fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)) + - TTM_OBJ_EXTRA_SIZE; - fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj)); - fman->event_fence_action_size = - ttm_round_pot(sizeof(struct vmw_event_fence_action)); mutex_init(&fman->goal_irq_mutex); fman->ctx = dma_fence_context_alloc(1); @@ -560,14 +552,8 @@ static void vmw_user_fence_destroy(struct vmw_fence_obj *fence) { struct vmw_user_fence *ufence = container_of(fence, struct vmw_user_fence, fence); - struct vmw_fence_manager *fman = fman_from_fence(fence); ttm_base_object_kfree(ufence, base); - /* - * Free kernel space accounting. - */ - ttm_mem_global_free(vmw_mem_glob(fman->dev_priv), - fman->user_fence_size); } static void vmw_user_fence_base_release(struct ttm_base_object **p_base) @@ -590,23 +576,8 @@ int vmw_user_fence_create(struct drm_file *file_priv, struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct vmw_user_fence *ufence; struct vmw_fence_obj *tmp; - struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv); - struct ttm_operation_ctx ctx = { - .interruptible = false, - .no_wait_gpu = false - }; int ret; - /* - * Kernel memory space accounting, since this object may - * be created by a user-space request. - */ - - ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size, - &ctx); - if (unlikely(ret != 0)) - return ret; - ufence = kzalloc(sizeof(*ufence), GFP_KERNEL); if (unlikely(!ufence)) { ret = -ENOMEM; @@ -625,9 +596,10 @@ int vmw_user_fence_create(struct drm_file *file_priv, * vmw_user_fence_base_release. */ tmp = vmw_fence_obj_reference(&ufence->fence); + ret = ttm_base_object_init(tfile, &ufence->base, false, VMW_RES_FENCE, - &vmw_user_fence_base_release, NULL); + &vmw_user_fence_base_release); if (unlikely(ret != 0)) { @@ -646,7 +618,6 @@ out_err: tmp = &ufence->fence; vmw_fence_obj_unreference(&tmp); out_no_object: - ttm_mem_global_free(mem_glob, fman->user_fence_size); return ret; } @@ -831,8 +802,7 @@ out: */ if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF)) - return ttm_ref_object_base_unref(tfile, arg->handle, - TTM_REF_USAGE); + return ttm_ref_object_base_unref(tfile, arg->handle); return ret; } @@ -874,8 +844,7 @@ int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data, (struct drm_vmw_fence_arg *) data; return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, - arg->handle, - TTM_REF_USAGE); + arg->handle); } /** @@ -1121,7 +1090,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data, if (user_fence_rep != NULL) { ret = ttm_ref_object_add(vmw_fp->tfile, base, - TTM_REF_USAGE, NULL, false); + NULL, false); if (unlikely(ret != 0)) { DRM_ERROR("Failed to reference a fence " "object.\n"); @@ -1164,7 +1133,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data, return 0; out_no_create: if (user_fence_rep != NULL) - ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE); + ttm_ref_object_base_unref(tfile, handle); out_no_ref_obj: vmw_fence_obj_unreference(&fence); return ret; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c new file mode 100644 index 000000000000..c016c434b6cb --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c @@ -0,0 +1,294 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* + * Copyright 2021 VMware, Inc. + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include "vmwgfx_drv.h" + +#include "drm/drm_prime.h" +#include "drm/drm_gem_ttm_helper.h" + +/** + * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct + * vmw_buffer_object. + * + * @bo: Pointer to the TTM buffer object. + * Return: Pointer to the struct vmw_buffer_object embedding the + * TTM buffer object. + */ +static struct vmw_buffer_object * +vmw_buffer_object(struct ttm_buffer_object *bo) +{ + return container_of(bo, struct vmw_buffer_object, base); +} + +static void vmw_gem_object_free(struct drm_gem_object *gobj) +{ + struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gobj); + if (bo) { + ttm_bo_put(bo); + } +} + +static int vmw_gem_object_open(struct drm_gem_object *obj, + struct drm_file *file_priv) +{ + return 0; +} + +static void vmw_gem_object_close(struct drm_gem_object *obj, + struct drm_file *file_priv) +{ +} + +static int vmw_gem_pin_private(struct drm_gem_object *obj, bool do_pin) +{ + struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(obj); + struct vmw_buffer_object *vbo = vmw_buffer_object(bo); + int ret; + + ret = ttm_bo_reserve(bo, false, false, NULL); + if (unlikely(ret != 0)) + goto err; + + vmw_bo_pin_reserved(vbo, do_pin); + + ttm_bo_unreserve(bo); + +err: + return ret; +} + + +static int vmw_gem_object_pin(struct drm_gem_object *obj) +{ + return vmw_gem_pin_private(obj, true); +} + +static void vmw_gem_object_unpin(struct drm_gem_object *obj) +{ + vmw_gem_pin_private(obj, false); +} + +static struct sg_table *vmw_gem_object_get_sg_table(struct drm_gem_object *obj) +{ + struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(obj); + struct vmw_ttm_tt *vmw_tt = + container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm); + + if (vmw_tt->vsgt.sgt) + return vmw_tt->vsgt.sgt; + + return drm_prime_pages_to_sg(obj->dev, vmw_tt->dma_ttm.pages, vmw_tt->dma_ttm.num_pages); +} + + +static const struct drm_gem_object_funcs vmw_gem_object_funcs = { + .free = vmw_gem_object_free, + .open = vmw_gem_object_open, + .close = vmw_gem_object_close, + .print_info = drm_gem_ttm_print_info, + .pin = vmw_gem_object_pin, + .unpin = vmw_gem_object_unpin, + .get_sg_table = vmw_gem_object_get_sg_table, + .vmap = drm_gem_ttm_vmap, + .vunmap = drm_gem_ttm_vunmap, + .mmap = drm_gem_ttm_mmap, +}; + +/** + * vmw_gem_destroy - vmw buffer object destructor + * + * @bo: Pointer to the embedded struct ttm_buffer_object + */ +void vmw_gem_destroy(struct ttm_buffer_object *bo) +{ + struct vmw_buffer_object *vbo = vmw_buffer_object(bo); + + WARN_ON(vbo->dirty); + WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree)); + vmw_bo_unmap(vbo); + drm_gem_object_release(&vbo->base.base); + kfree(vbo); +} + +int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv, + struct drm_file *filp, + uint32_t size, + uint32_t *handle, + struct vmw_buffer_object **p_vbo) +{ + int ret; + + ret = vmw_bo_create(dev_priv, size, + (dev_priv->has_mob) ? + &vmw_sys_placement : + &vmw_vram_sys_placement, + true, false, &vmw_gem_destroy, p_vbo); + + (*p_vbo)->base.base.funcs = &vmw_gem_object_funcs; + if (ret != 0) + goto out_no_bo; + + ret = drm_gem_handle_create(filp, &(*p_vbo)->base.base, handle); + /* drop reference from allocate - handle holds it now */ + drm_gem_object_put(&(*p_vbo)->base.base); +out_no_bo: + return ret; +} + + +int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp) +{ + struct vmw_private *dev_priv = vmw_priv(dev); + union drm_vmw_alloc_dmabuf_arg *arg = + (union drm_vmw_alloc_dmabuf_arg *)data; + struct drm_vmw_alloc_dmabuf_req *req = &arg->req; + struct drm_vmw_dmabuf_rep *rep = &arg->rep; + struct vmw_buffer_object *vbo; + uint32_t handle; + int ret; + + ret = vmw_gem_object_create_with_handle(dev_priv, filp, + req->size, &handle, &vbo); + if (ret) + goto out_no_bo; + + rep->handle = handle; + rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node); + rep->cur_gmr_id = handle; + rep->cur_gmr_offset = 0; +out_no_bo: + return ret; +} + +#if defined(CONFIG_DEBUG_FS) + +static void vmw_bo_print_info(int id, struct vmw_buffer_object *bo, struct seq_file *m) +{ + const char *placement; + const char *type; + + switch (bo->base.resource->mem_type) { + case TTM_PL_SYSTEM: + placement = " CPU"; + break; + case VMW_PL_GMR: + placement = " GMR"; + break; + case VMW_PL_MOB: + placement = " MOB"; + break; + case VMW_PL_SYSTEM: + placement = "VCPU"; + break; + case TTM_PL_VRAM: + placement = "VRAM"; + break; + default: + placement = "None"; + break; + } + + switch (bo->base.type) { + case ttm_bo_type_device: + type = "device"; + break; + case ttm_bo_type_kernel: + type = "kernel"; + break; + case ttm_bo_type_sg: + type = "sg "; + break; + default: + type = "none "; + break; + } + + seq_printf(m, "\t\t0x%08x: %12ld bytes %s, type = %s", + id, bo->base.base.size, placement, type); + seq_printf(m, ", priority = %u, pin_count = %u, GEM refs = %d, TTM refs = %d", + bo->base.priority, + bo->base.pin_count, + kref_read(&bo->base.base.refcount), + kref_read(&bo->base.kref)); + seq_puts(m, "\n"); +} + +static int vmw_debugfs_gem_info_show(struct seq_file *m, void *unused) +{ + struct vmw_private *vdev = (struct vmw_private *)m->private; + struct drm_device *dev = &vdev->drm; + struct drm_file *file; + int r; + + r = mutex_lock_interruptible(&dev->filelist_mutex); + if (r) + return r; + + list_for_each_entry(file, &dev->filelist, lhead) { + struct task_struct *task; + struct drm_gem_object *gobj; + int id; + + /* + * Although we have a valid reference on file->pid, that does + * not guarantee that the task_struct who called get_pid() is + * still alive (e.g. get_pid(current) => fork() => exit()). + * Therefore, we need to protect this ->comm access using RCU. + */ + rcu_read_lock(); + task = pid_task(file->pid, PIDTYPE_PID); + seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid), + task ? task->comm : "<unknown>"); + rcu_read_unlock(); + + spin_lock(&file->table_lock); + idr_for_each_entry(&file->object_idr, gobj, id) { + struct vmw_buffer_object *bo = gem_to_vmw_bo(gobj); + + vmw_bo_print_info(id, bo, m); + } + spin_unlock(&file->table_lock); + } + + mutex_unlock(&dev->filelist_mutex); + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(vmw_debugfs_gem_info); + +#endif + +void vmw_debugfs_gem_init(struct vmw_private *vdev) +{ +#if defined(CONFIG_DEBUG_FS) + struct drm_minor *minor = vdev->drm.primary; + struct dentry *root = minor->debugfs_root; + + debugfs_create_file("vmwgfx_gem_info", 0444, root, vdev, + &vmw_debugfs_gem_info_fops); +#endif +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c index b2c4af331c9d..ebb4505a31a3 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c @@ -42,6 +42,7 @@ struct vmwgfx_gmrid_man { uint32_t max_gmr_ids; uint32_t max_gmr_pages; uint32_t used_gmr_pages; + uint8_t type; }; static struct vmwgfx_gmrid_man *to_gmrid_manager(struct ttm_resource_manager *man) @@ -132,6 +133,18 @@ static void vmw_gmrid_man_put_node(struct ttm_resource_manager *man, kfree(res); } +static void vmw_gmrid_man_debug(struct ttm_resource_manager *man, + struct drm_printer *printer) +{ + struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man); + + BUG_ON(gman->type != VMW_PL_GMR && gman->type != VMW_PL_MOB); + + drm_printf(printer, "%s's used: %u pages, max: %u pages, %u id's\n", + (gman->type == VMW_PL_MOB) ? "Mob" : "GMR", + gman->used_gmr_pages, gman->max_gmr_pages, gman->max_gmr_ids); +} + static const struct ttm_resource_manager_func vmw_gmrid_manager_func; int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type) @@ -146,12 +159,12 @@ int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type) man = &gman->manager; man->func = &vmw_gmrid_manager_func; - /* TODO: This is most likely not correct */ man->use_tt = true; ttm_resource_manager_init(man, 0); spin_lock_init(&gman->lock); gman->used_gmr_pages = 0; ida_init(&gman->gmr_ida); + gman->type = type; switch (type) { case VMW_PL_GMR: @@ -190,4 +203,5 @@ void vmw_gmrid_man_fini(struct vmw_private *dev_priv, int type) static const struct ttm_resource_manager_func vmw_gmrid_manager_func = { .alloc = vmw_gmrid_man_get_node, .free = vmw_gmrid_man_put_node, + .debug = vmw_gmrid_man_debug }; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.c b/drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.c new file mode 100644 index 000000000000..06aebc12774e --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.c @@ -0,0 +1,199 @@ +/* + * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +/* + * Simple open hash tab implementation. + * + * Authors: + * Thomas Hellström <thomas-at-tungstengraphics-dot-com> + */ + +#include <linux/export.h> +#include <linux/hash.h> +#include <linux/mm.h> +#include <linux/rculist.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> + +#include <drm/drm_print.h> + +#include "vmwgfx_hashtab.h" + +int vmwgfx_ht_create(struct vmwgfx_open_hash *ht, unsigned int order) +{ + unsigned int size = 1 << order; + + ht->order = order; + ht->table = NULL; + if (size <= PAGE_SIZE / sizeof(*ht->table)) + ht->table = kcalloc(size, sizeof(*ht->table), GFP_KERNEL); + else + ht->table = vzalloc(array_size(size, sizeof(*ht->table))); + if (!ht->table) { + DRM_ERROR("Out of memory for hash table\n"); + return -ENOMEM; + } + return 0; +} + +void vmwgfx_ht_verbose_list(struct vmwgfx_open_hash *ht, unsigned long key) +{ + struct vmwgfx_hash_item *entry; + struct hlist_head *h_list; + unsigned int hashed_key; + int count = 0; + + hashed_key = hash_long(key, ht->order); + DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key); + h_list = &ht->table[hashed_key]; + hlist_for_each_entry(entry, h_list, head) + DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key); +} + +static struct hlist_node *vmwgfx_ht_find_key(struct vmwgfx_open_hash *ht, unsigned long key) +{ + struct vmwgfx_hash_item *entry; + struct hlist_head *h_list; + unsigned int hashed_key; + + hashed_key = hash_long(key, ht->order); + h_list = &ht->table[hashed_key]; + hlist_for_each_entry(entry, h_list, head) { + if (entry->key == key) + return &entry->head; + if (entry->key > key) + break; + } + return NULL; +} + +static struct hlist_node *vmwgfx_ht_find_key_rcu(struct vmwgfx_open_hash *ht, unsigned long key) +{ + struct vmwgfx_hash_item *entry; + struct hlist_head *h_list; + unsigned int hashed_key; + + hashed_key = hash_long(key, ht->order); + h_list = &ht->table[hashed_key]; + hlist_for_each_entry_rcu(entry, h_list, head) { + if (entry->key == key) + return &entry->head; + if (entry->key > key) + break; + } + return NULL; +} + +int vmwgfx_ht_insert_item(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item) +{ + struct vmwgfx_hash_item *entry; + struct hlist_head *h_list; + struct hlist_node *parent; + unsigned int hashed_key; + unsigned long key = item->key; + + hashed_key = hash_long(key, ht->order); + h_list = &ht->table[hashed_key]; + parent = NULL; + hlist_for_each_entry(entry, h_list, head) { + if (entry->key == key) + return -EINVAL; + if (entry->key > key) + break; + parent = &entry->head; + } + if (parent) + hlist_add_behind_rcu(&item->head, parent); + else + hlist_add_head_rcu(&item->head, h_list); + return 0; +} + +/* + * Just insert an item and return any "bits" bit key that hasn't been + * used before. + */ +int vmwgfx_ht_just_insert_please(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item, + unsigned long seed, int bits, int shift, + unsigned long add) +{ + int ret; + unsigned long mask = (1UL << bits) - 1; + unsigned long first, unshifted_key; + + unshifted_key = hash_long(seed, bits); + first = unshifted_key; + do { + item->key = (unshifted_key << shift) + add; + ret = vmwgfx_ht_insert_item(ht, item); + if (ret) + unshifted_key = (unshifted_key + 1) & mask; + } while (ret && (unshifted_key != first)); + + if (ret) { + DRM_ERROR("Available key bit space exhausted\n"); + return -EINVAL; + } + return 0; +} + +int vmwgfx_ht_find_item(struct vmwgfx_open_hash *ht, unsigned long key, + struct vmwgfx_hash_item **item) +{ + struct hlist_node *list; + + list = vmwgfx_ht_find_key_rcu(ht, key); + if (!list) + return -EINVAL; + + *item = hlist_entry(list, struct vmwgfx_hash_item, head); + return 0; +} + +int vmwgfx_ht_remove_key(struct vmwgfx_open_hash *ht, unsigned long key) +{ + struct hlist_node *list; + + list = vmwgfx_ht_find_key(ht, key); + if (list) { + hlist_del_init_rcu(list); + return 0; + } + return -EINVAL; +} + +int vmwgfx_ht_remove_item(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item) +{ + hlist_del_init_rcu(&item->head); + return 0; +} + +void vmwgfx_ht_remove(struct vmwgfx_open_hash *ht) +{ + if (ht->table) { + kvfree(ht->table); + ht->table = NULL; + } +} diff --git a/include/drm/drm_hashtab.h b/drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.h index bb95ff011baf..a9ce12922e21 100644 --- a/include/drm/drm_hashtab.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.h @@ -1,5 +1,4 @@ -/************************************************************************** - * +/* * Copyright 2006 Tungsten Graphics, Inc., Bismack, ND. USA. * All Rights Reserved. * @@ -22,9 +21,8 @@ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - * - **************************************************************************/ + */ + /* * Simple open hash tab implementation. * @@ -32,48 +30,54 @@ * Thomas Hellström <thomas-at-tungstengraphics-dot-com> */ -#ifndef DRM_HASHTAB_H -#define DRM_HASHTAB_H +/* + * TODO: Replace this hashtable with Linux' generic implementation + * from <linux/hashtable.h>. + */ + +#ifndef VMWGFX_HASHTAB_H +#define VMWGFX_HASHTAB_H #include <linux/list.h> #define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member) -struct drm_hash_item { +struct vmwgfx_hash_item { struct hlist_node head; unsigned long key; }; -struct drm_open_hash { +struct vmwgfx_open_hash { struct hlist_head *table; u8 order; }; -int drm_ht_create(struct drm_open_hash *ht, unsigned int order); -int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item); -int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item, - unsigned long seed, int bits, int shift, - unsigned long add); -int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item); +int vmwgfx_ht_create(struct vmwgfx_open_hash *ht, unsigned int order); +int vmwgfx_ht_insert_item(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item); +int vmwgfx_ht_just_insert_please(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item, + unsigned long seed, int bits, int shift, + unsigned long add); +int vmwgfx_ht_find_item(struct vmwgfx_open_hash *ht, unsigned long key, + struct vmwgfx_hash_item **item); -void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key); -int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key); -int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item); -void drm_ht_remove(struct drm_open_hash *ht); +void vmwgfx_ht_verbose_list(struct vmwgfx_open_hash *ht, unsigned long key); +int vmwgfx_ht_remove_key(struct vmwgfx_open_hash *ht, unsigned long key); +int vmwgfx_ht_remove_item(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item); +void vmwgfx_ht_remove(struct vmwgfx_open_hash *ht); /* * RCU-safe interface * * The user of this API needs to make sure that two or more instances of the * hash table manipulation functions are never run simultaneously. - * The lookup function drm_ht_find_item_rcu may, however, run simultaneously + * The lookup function vmwgfx_ht_find_item_rcu may, however, run simultaneously * with any of the manipulation functions as long as it's called from within * an RCU read-locked section. */ -#define drm_ht_insert_item_rcu drm_ht_insert_item -#define drm_ht_just_insert_please_rcu drm_ht_just_insert_please -#define drm_ht_remove_key_rcu drm_ht_remove_key -#define drm_ht_remove_item_rcu drm_ht_remove_item -#define drm_ht_find_item_rcu drm_ht_find_item +#define vmwgfx_ht_insert_item_rcu vmwgfx_ht_insert_item +#define vmwgfx_ht_just_insert_please_rcu vmwgfx_ht_just_insert_please +#define vmwgfx_ht_remove_key_rcu vmwgfx_ht_remove_key +#define vmwgfx_ht_remove_item_rcu vmwgfx_ht_remove_item +#define vmwgfx_ht_find_item_rcu vmwgfx_ht_find_item #endif diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index 28af34ab6ed6..471da2b4c177 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c @@ -105,6 +105,9 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, case DRM_VMW_PARAM_SM5: param->value = has_sm5_context(dev_priv); break; + case DRM_VMW_PARAM_GL43: + param->value = has_gl43_context(dev_priv); + break; default: return -EINVAL; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 74fa41909213..4e693e8de2c3 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -843,8 +843,6 @@ static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer) drm_framebuffer_cleanup(framebuffer); vmw_surface_unreference(&vfbs->surface); - if (vfbs->base.user_obj) - ttm_base_object_unref(&vfbs->base.user_obj); kfree(vfbs); } @@ -989,6 +987,16 @@ out_err1: * Buffer-object framebuffer code */ +static int vmw_framebuffer_bo_create_handle(struct drm_framebuffer *fb, + struct drm_file *file_priv, + unsigned int *handle) +{ + struct vmw_framebuffer_bo *vfbd = + vmw_framebuffer_to_vfbd(fb); + + return drm_gem_handle_create(file_priv, &vfbd->buffer->base.base, handle); +} + static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer) { struct vmw_framebuffer_bo *vfbd = @@ -996,8 +1004,6 @@ static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer) drm_framebuffer_cleanup(framebuffer); vmw_bo_unreference(&vfbd->buffer); - if (vfbd->base.user_obj) - ttm_base_object_unref(&vfbd->base.user_obj); kfree(vfbd); } @@ -1063,6 +1069,7 @@ static int vmw_framebuffer_bo_dirty_ext(struct drm_framebuffer *framebuffer, } static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = { + .create_handle = vmw_framebuffer_bo_create_handle, .destroy = vmw_framebuffer_bo_destroy, .dirty = vmw_framebuffer_bo_dirty_ext, }; @@ -1188,7 +1195,7 @@ static int vmw_create_bo_proxy(struct drm_device *dev, metadata.base_size.depth = 1; metadata.scanout = true; - ret = vmw_gb_surface_define(vmw_priv(dev), 0, &metadata, srf_out); + ret = vmw_gb_surface_define(vmw_priv(dev), &metadata, srf_out); if (ret) { DRM_ERROR("Failed to allocate proxy content buffer\n"); return ret; @@ -1251,6 +1258,7 @@ static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv, goto out_err1; } + vfbd->base.base.obj[0] = &bo->base.base; drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd); vfbd->base.bo = true; vfbd->buffer = vmw_bo_reference(bo); @@ -1368,34 +1376,13 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cmd) { struct vmw_private *dev_priv = vmw_priv(dev); - struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct vmw_framebuffer *vfb = NULL; struct vmw_surface *surface = NULL; struct vmw_buffer_object *bo = NULL; - struct ttm_base_object *user_obj; int ret; - /* - * Take a reference on the user object of the resource - * backing the kms fb. This ensures that user-space handle - * lookups on that resource will always work as long as - * it's registered with a kms framebuffer. This is important, - * since vmw_execbuf_process identifies resources in the - * command stream using user-space handles. - */ - - user_obj = ttm_base_object_lookup(tfile, mode_cmd->handles[0]); - if (unlikely(user_obj == NULL)) { - DRM_ERROR("Could not locate requested kms frame buffer.\n"); - return ERR_PTR(-ENOENT); - } - - /** - * End conditioned code. - */ - /* returns either a bo or surface */ - ret = vmw_user_lookup_handle(dev_priv, tfile, + ret = vmw_user_lookup_handle(dev_priv, file_priv, mode_cmd->handles[0], &surface, &bo); if (ret) @@ -1428,10 +1415,8 @@ err_out: if (ret) { DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret); - ttm_base_object_unref(&user_obj); return ERR_PTR(ret); - } else - vfb->user_obj = user_obj; + } return &vfb->base; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h index bbc809f7bd8a..4d36e8507380 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h @@ -219,7 +219,6 @@ struct vmw_framebuffer { int (*pin)(struct vmw_framebuffer *fb); int (*unpin)(struct vmw_framebuffer *fb); bool bo; - struct ttm_base_object *user_obj; uint32_t user_handle; }; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c index f9394207dd3c..2d91a44a3b22 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c @@ -146,9 +146,6 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv, if (otable->size <= PAGE_SIZE) { mob->pt_level = VMW_MOBFMT_PTDEPTH_0; mob->pt_root_page = vmw_piter_dma_addr(&iter); - } else if (vsgt->num_regions == 1) { - mob->pt_level = SVGA3D_MOBFMT_RANGE; - mob->pt_root_page = vmw_piter_dma_addr(&iter); } else { ret = vmw_mob_pt_populate(dev_priv, mob); if (unlikely(ret != 0)) @@ -413,10 +410,9 @@ struct vmw_mob *vmw_mob_create(unsigned long data_pages) * @mob: Pointer to the mob the pagetable of which we want to * populate. * - * This function allocates memory to be used for the pagetable, and - * adjusts TTM memory accounting accordingly. Returns ENOMEM if - * memory resources aren't sufficient and may cause TTM buffer objects - * to be swapped out by using the TTM memory accounting function. + * This function allocates memory to be used for the pagetable. + * Returns ENOMEM if memory resources aren't sufficient and may + * cause TTM buffer objects to be swapped out. */ static int vmw_mob_pt_populate(struct vmw_private *dev_priv, struct vmw_mob *mob) @@ -624,9 +620,6 @@ int vmw_mob_bind(struct vmw_private *dev_priv, if (likely(num_data_pages == 1)) { mob->pt_level = VMW_MOBFMT_PTDEPTH_0; mob->pt_root_page = vmw_piter_dma_addr(&data_iter); - } else if (vsgt->num_regions == 1) { - mob->pt_level = SVGA3D_MOBFMT_RANGE; - mob->pt_root_page = vmw_piter_dma_addr(&data_iter); } else if (unlikely(mob->pt_bo == NULL)) { ret = vmw_mob_pt_populate(dev_priv, mob); if (unlikely(ret != 0)) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c index 54c5d16eb3b7..e9f5c89b4ca6 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c @@ -451,7 +451,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data, goto out_unlock; } - ret = vmw_user_bo_lookup(tfile, arg->handle, &buf, NULL); + ret = vmw_user_bo_lookup(file_priv, arg->handle, &buf); if (ret) goto out_unlock; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c index 922317d1acc8..7bc99b1279f7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c @@ -57,7 +57,6 @@ enum vmw_bo_dirty_method { * @ref_count: Reference count for this structure * @bitmap_size: The size of the bitmap in bits. Typically equal to the * nuber of pages in the bo. - * @size: The accounting size for this struct. * @bitmap: A bitmap where each bit represents a page. A set bit means a * dirty page. */ @@ -68,7 +67,6 @@ struct vmw_bo_dirty { unsigned int change_count; unsigned int ref_count; unsigned long bitmap_size; - size_t size; unsigned long bitmap[]; }; @@ -233,12 +231,8 @@ int vmw_bo_dirty_add(struct vmw_buffer_object *vbo) { struct vmw_bo_dirty *dirty = vbo->dirty; pgoff_t num_pages = vbo->base.resource->num_pages; - size_t size, acc_size; + size_t size; int ret; - static struct ttm_operation_ctx ctx = { - .interruptible = false, - .no_wait_gpu = false - }; if (dirty) { dirty->ref_count++; @@ -246,20 +240,12 @@ int vmw_bo_dirty_add(struct vmw_buffer_object *vbo) } size = sizeof(*dirty) + BITS_TO_LONGS(num_pages) * sizeof(long); - acc_size = ttm_round_pot(size); - ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx); - if (ret) { - VMW_DEBUG_USER("Out of graphics memory for buffer object " - "dirty tracker.\n"); - return ret; - } dirty = kvzalloc(size, GFP_KERNEL); if (!dirty) { ret = -ENOMEM; goto out_no_dirty; } - dirty->size = acc_size; dirty->bitmap_size = num_pages; dirty->start = dirty->bitmap_size; dirty->end = 0; @@ -285,7 +271,6 @@ int vmw_bo_dirty_add(struct vmw_buffer_object *vbo) return 0; out_no_dirty: - ttm_mem_global_free(&ttm_mem_glob, acc_size); return ret; } @@ -304,10 +289,7 @@ void vmw_bo_dirty_release(struct vmw_buffer_object *vbo) struct vmw_bo_dirty *dirty = vbo->dirty; if (dirty && --dirty->ref_count == 0) { - size_t acc_size = dirty->size; - kvfree(dirty); - ttm_mem_global_free(&ttm_mem_glob, acc_size); vbo->dirty = NULL; } } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c index d9552a1efd13..2d72a5ee7c0c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c @@ -85,6 +85,5 @@ int vmw_prime_handle_to_fd(struct drm_device *dev, int *prime_fd) { struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; - return ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 8d1e869cc196..708899ba2102 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -320,11 +320,12 @@ vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv, * The pointer this pointed at by out_surf and out_buf needs to be null. */ int vmw_user_lookup_handle(struct vmw_private *dev_priv, - struct ttm_object_file *tfile, + struct drm_file *filp, uint32_t handle, struct vmw_surface **out_surf, struct vmw_buffer_object **out_buf) { + struct ttm_object_file *tfile = vmw_fpriv(filp)->tfile; struct vmw_resource *res; int ret; @@ -339,7 +340,7 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv, } *out_surf = NULL; - ret = vmw_user_bo_lookup(tfile, handle, out_buf, NULL); + ret = vmw_user_bo_lookup(filp, handle, out_buf); return ret; } @@ -362,14 +363,10 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res, return 0; } - backup = kzalloc(sizeof(*backup), GFP_KERNEL); - if (unlikely(!backup)) - return -ENOMEM; - - ret = vmw_bo_init(res->dev_priv, backup, res->backup_size, - res->func->backup_placement, - interruptible, false, - &vmw_bo_bo_free); + ret = vmw_bo_create(res->dev_priv, res->backup_size, + res->func->backup_placement, + interruptible, false, + &vmw_bo_bo_free, &backup); if (unlikely(ret != 0)) goto out_no_bo; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index bd157fb21b45..3004c7a719e9 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c @@ -442,19 +442,15 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane, vps->bo_size = 0; } - vps->bo = kzalloc(sizeof(*vps->bo), GFP_KERNEL); - if (!vps->bo) - return -ENOMEM; - vmw_svga_enable(dev_priv); /* After we have alloced the backing store might not be able to * resume the overlays, this is preferred to failing to alloc. */ vmw_overlay_pause_all(dev_priv); - ret = vmw_bo_init(dev_priv, vps->bo, size, - &vmw_vram_placement, - false, true, &vmw_bo_bo_free); + ret = vmw_bo_create(dev_priv, size, + &vmw_vram_placement, + false, true, &vmw_bo_bo_free, &vps->bo); vmw_overlay_resume_all(dev_priv); if (ret) { vps->bo = NULL; /* vmw_bo_init frees on error */ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c index b8dd62529104..108a496b5d18 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c @@ -53,10 +53,6 @@ struct vmw_dx_shader { struct list_head cotable_head; }; -static uint64_t vmw_user_shader_size; -static uint64_t vmw_shader_size; -static size_t vmw_shader_dx_size; - static void vmw_user_shader_free(struct vmw_resource *res); static struct vmw_resource * vmw_user_shader_base_to_res(struct ttm_base_object *base); @@ -79,7 +75,6 @@ static void vmw_dx_shader_commit_notify(struct vmw_resource *res, enum vmw_cmdbuf_res_state state); static bool vmw_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type); static u32 vmw_shader_key(u32 user_key, SVGA3dShaderType shader_type); -static uint64_t vmw_user_shader_size; static const struct vmw_user_resource_conv user_shader_conv = { .object_type = VMW_RES_SHADER, @@ -563,16 +558,14 @@ void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv, * * @res: The shader resource * - * Frees the DX shader resource and updates memory accounting. + * Frees the DX shader resource. */ static void vmw_dx_shader_res_free(struct vmw_resource *res) { - struct vmw_private *dev_priv = res->dev_priv; struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res); vmw_resource_unreference(&shader->cotable); kfree(shader); - ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_shader_dx_size); } /** @@ -594,30 +587,13 @@ int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man, struct vmw_dx_shader *shader; struct vmw_resource *res; struct vmw_private *dev_priv = ctx->dev_priv; - struct ttm_operation_ctx ttm_opt_ctx = { - .interruptible = true, - .no_wait_gpu = false - }; int ret; - if (!vmw_shader_dx_size) - vmw_shader_dx_size = ttm_round_pot(sizeof(*shader)); - if (!vmw_shader_id_ok(user_key, shader_type)) return -EINVAL; - ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), vmw_shader_dx_size, - &ttm_opt_ctx); - if (ret) { - if (ret != -ERESTARTSYS) - DRM_ERROR("Out of graphics memory for shader " - "creation.\n"); - return ret; - } - shader = kmalloc(sizeof(*shader), GFP_KERNEL); if (!shader) { - ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_shader_dx_size); return -ENOMEM; } @@ -669,21 +645,15 @@ static void vmw_user_shader_free(struct vmw_resource *res) { struct vmw_user_shader *ushader = container_of(res, struct vmw_user_shader, shader.res); - struct vmw_private *dev_priv = res->dev_priv; ttm_base_object_kfree(ushader, base); - ttm_mem_global_free(vmw_mem_glob(dev_priv), - vmw_user_shader_size); } static void vmw_shader_free(struct vmw_resource *res) { struct vmw_shader *shader = vmw_res_to_shader(res); - struct vmw_private *dev_priv = res->dev_priv; kfree(shader); - ttm_mem_global_free(vmw_mem_glob(dev_priv), - vmw_shader_size); } /* @@ -706,8 +676,7 @@ int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, struct drm_vmw_shader_arg *arg = (struct drm_vmw_shader_arg *)data; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; - return ttm_ref_object_base_unref(tfile, arg->handle, - TTM_REF_USAGE); + return ttm_ref_object_base_unref(tfile, arg->handle); } static int vmw_user_shader_alloc(struct vmw_private *dev_priv, @@ -722,31 +691,10 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv, { struct vmw_user_shader *ushader; struct vmw_resource *res, *tmp; - struct ttm_operation_ctx ctx = { - .interruptible = true, - .no_wait_gpu = false - }; int ret; - if (unlikely(vmw_user_shader_size == 0)) - vmw_user_shader_size = - ttm_round_pot(sizeof(struct vmw_user_shader)) + - VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE; - - ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), - vmw_user_shader_size, - &ctx); - if (unlikely(ret != 0)) { - if (ret != -ERESTARTSYS) - DRM_ERROR("Out of graphics memory for shader " - "creation.\n"); - goto out; - } - ushader = kzalloc(sizeof(*ushader), GFP_KERNEL); if (unlikely(!ushader)) { - ttm_mem_global_free(vmw_mem_glob(dev_priv), - vmw_user_shader_size); ret = -ENOMEM; goto out; } @@ -769,7 +717,7 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv, tmp = vmw_resource_reference(res); ret = ttm_base_object_init(tfile, &ushader->base, false, VMW_RES_SHADER, - &vmw_user_shader_base_release, NULL); + &vmw_user_shader_base_release); if (unlikely(ret != 0)) { vmw_resource_unreference(&tmp); @@ -793,31 +741,10 @@ static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv, { struct vmw_shader *shader; struct vmw_resource *res; - struct ttm_operation_ctx ctx = { - .interruptible = true, - .no_wait_gpu = false - }; int ret; - if (unlikely(vmw_shader_size == 0)) - vmw_shader_size = - ttm_round_pot(sizeof(struct vmw_shader)) + - VMW_IDA_ACC_SIZE; - - ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), - vmw_shader_size, - &ctx); - if (unlikely(ret != 0)) { - if (ret != -ERESTARTSYS) - DRM_ERROR("Out of graphics memory for shader " - "creation.\n"); - goto out_err; - } - shader = kzalloc(sizeof(*shader), GFP_KERNEL); if (unlikely(!shader)) { - ttm_mem_global_free(vmw_mem_glob(dev_priv), - vmw_shader_size); ret = -ENOMEM; goto out_err; } @@ -849,8 +776,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv, int ret; if (buffer_handle != SVGA3D_INVALID_ID) { - ret = vmw_user_bo_lookup(tfile, buffer_handle, - &buffer, NULL); + ret = vmw_user_bo_lookup(file_priv, buffer_handle, &buffer); if (unlikely(ret != 0)) { VMW_DEBUG_USER("Couldn't find buffer for shader creation.\n"); return ret; @@ -966,13 +892,8 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv, if (!vmw_shader_id_ok(user_key, shader_type)) return -EINVAL; - /* Allocate and pin a DMA buffer */ - buf = kzalloc(sizeof(*buf), GFP_KERNEL); - if (unlikely(!buf)) - return -ENOMEM; - - ret = vmw_bo_init(dev_priv, buf, size, &vmw_sys_placement, - true, true, vmw_bo_bo_free); + ret = vmw_bo_create(dev_priv, size, &vmw_sys_placement, + true, true, vmw_bo_bo_free, &buf); if (unlikely(ret != 0)) goto out; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c index 33b69a70cfe3..483ad544ea54 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c @@ -32,12 +32,10 @@ * struct vmw_user_simple_resource - User-space simple resource struct * * @base: The TTM base object implementing user-space visibility. - * @account_size: How much memory was accounted for this object. * @simple: The embedded struct vmw_simple_resource. */ struct vmw_user_simple_resource { struct ttm_base_object base; - size_t account_size; struct vmw_simple_resource simple; /* * Nothing to be placed after @simple, since size of @simple is @@ -91,18 +89,15 @@ static int vmw_simple_resource_init(struct vmw_private *dev_priv, * * @res: The struct vmw_resource member of the simple resource object. * - * Frees memory and memory accounting for the object. + * Frees memory for the object. */ static void vmw_simple_resource_free(struct vmw_resource *res) { struct vmw_user_simple_resource *usimple = container_of(res, struct vmw_user_simple_resource, simple.res); - struct vmw_private *dev_priv = res->dev_priv; - size_t size = usimple->account_size; ttm_base_object_kfree(usimple, base); - ttm_mem_global_free(vmw_mem_glob(dev_priv), size); } /** @@ -149,39 +144,19 @@ vmw_simple_resource_create_ioctl(struct drm_device *dev, void *data, struct vmw_resource *res; struct vmw_resource *tmp; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; - struct ttm_operation_ctx ctx = { - .interruptible = true, - .no_wait_gpu = false - }; size_t alloc_size; - size_t account_size; int ret; alloc_size = offsetof(struct vmw_user_simple_resource, simple) + func->size; - account_size = ttm_round_pot(alloc_size) + VMW_IDA_ACC_SIZE + - TTM_OBJ_EXTRA_SIZE; - - ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), account_size, - &ctx); - if (ret) { - if (ret != -ERESTARTSYS) - DRM_ERROR("Out of graphics memory for %s" - " creation.\n", func->res_func.type_name); - - goto out_ret; - } usimple = kzalloc(alloc_size, GFP_KERNEL); if (!usimple) { - ttm_mem_global_free(vmw_mem_glob(dev_priv), - account_size); ret = -ENOMEM; goto out_ret; } usimple->simple.func = func; - usimple->account_size = account_size; res = &usimple->simple.res; usimple->base.shareable = false; usimple->base.tfile = NULL; @@ -197,7 +172,7 @@ vmw_simple_resource_create_ioctl(struct drm_device *dev, void *data, tmp = vmw_resource_reference(res); ret = ttm_base_object_init(tfile, &usimple->base, false, func->ttm_res_type, - &vmw_simple_resource_base_release, NULL); + &vmw_simple_resource_base_release); if (ret) { vmw_resource_unreference(&tmp); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c index 9efb4463ce99..4ea32b01efc0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c @@ -279,18 +279,15 @@ static bool vmw_view_id_ok(u32 user_key, enum vmw_view_type view_type) * * @res: Pointer to a struct vmw_resource * - * Frees memory and memory accounting held by a struct vmw_view. + * Frees memory held by the struct vmw_view. */ static void vmw_view_res_free(struct vmw_resource *res) { struct vmw_view *view = vmw_view(res); - size_t size = offsetof(struct vmw_view, cmd) + view->cmd_size; - struct vmw_private *dev_priv = res->dev_priv; vmw_resource_unreference(&view->cotable); vmw_resource_unreference(&view->srf); kfree_rcu(view, rcu); - ttm_mem_global_free(vmw_mem_glob(dev_priv), size); } /** @@ -327,10 +324,6 @@ int vmw_view_add(struct vmw_cmdbuf_res_manager *man, struct vmw_private *dev_priv = ctx->dev_priv; struct vmw_resource *res; struct vmw_view *view; - struct ttm_operation_ctx ttm_opt_ctx = { - .interruptible = true, - .no_wait_gpu = false - }; size_t size; int ret; @@ -347,16 +340,8 @@ int vmw_view_add(struct vmw_cmdbuf_res_manager *man, size = offsetof(struct vmw_view, cmd) + cmd_size; - ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, &ttm_opt_ctx); - if (ret) { - if (ret != -ERESTARTSYS) - DRM_ERROR("Out of graphics memory for view creation\n"); - return ret; - } - view = kmalloc(size, GFP_KERNEL); if (!view) { - ttm_mem_global_free(vmw_mem_glob(dev_priv), size); return -ENOMEM; } @@ -582,4 +567,8 @@ static void vmw_so_build_asserts(void) offsetof(SVGA3dCmdDXDefineRenderTargetView, sid)); BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) != offsetof(SVGA3dCmdDXDefineDepthStencilView, sid)); + BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) != + offsetof(SVGA3dCmdDXDefineUAView, sid)); + BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) != + offsetof(SVGA3dCmdDXDefineDepthStencilView_v2, sid)); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.h b/drivers/gpu/drm/vmwgfx/vmwgfx_so.h index f48b84bfeeac..01c701e7466e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_so.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.h @@ -93,7 +93,10 @@ static inline enum vmw_view_type vmw_view_cmd_to_type(u32 id) id == SVGA_3D_CMD_DX_DESTROY_UA_VIEW) return vmw_view_ua; - if (tmp > (u32)vmw_view_max) + if (id == SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW_V2) + return vmw_view_ds; + + if (tmp > (u32)vmw_view_ds) return vmw_view_max; return (enum vmw_view_type) tmp; @@ -123,6 +126,7 @@ static inline enum vmw_so_type vmw_so_cmd_to_type(u32 id) case SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE: return vmw_so_ds; case SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE: + case SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE_V2: case SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE: return vmw_so_rs; case SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE: diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index d85310b2608d..8f0d651d0144 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c @@ -1123,7 +1123,7 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane, } if (!vps->surf) { - ret = vmw_gb_surface_define(dev_priv, 0, &metadata, + ret = vmw_gb_surface_define(dev_priv, &metadata, &vps->surf); if (ret != 0) { DRM_ERROR("Couldn't allocate STDU surface.\n"); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c b/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c index c8efa4a6c995..2de97419d5c9 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c @@ -60,8 +60,6 @@ static int vmw_dx_streamoutput_unbind(struct vmw_resource *res, bool readback, static void vmw_dx_streamoutput_commit_notify(struct vmw_resource *res, enum vmw_cmdbuf_res_state state); -static size_t vmw_streamoutput_size; - static const struct vmw_res_func vmw_dx_streamoutput_func = { .res_type = vmw_res_streamoutput, .needs_backup = true, @@ -254,12 +252,10 @@ vmw_dx_streamoutput_lookup(struct vmw_cmdbuf_res_manager *man, static void vmw_dx_streamoutput_res_free(struct vmw_resource *res) { - struct vmw_private *dev_priv = res->dev_priv; struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res); vmw_resource_unreference(&so->cotable); kfree(so); - ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_streamoutput_size); } static void vmw_dx_streamoutput_hw_destroy(struct vmw_resource *res) @@ -284,27 +280,10 @@ int vmw_dx_streamoutput_add(struct vmw_cmdbuf_res_manager *man, struct vmw_dx_streamoutput *so; struct vmw_resource *res; struct vmw_private *dev_priv = ctx->dev_priv; - struct ttm_operation_ctx ttm_opt_ctx = { - .interruptible = true, - .no_wait_gpu = false - }; int ret; - if (!vmw_streamoutput_size) - vmw_streamoutput_size = ttm_round_pot(sizeof(*so)); - - ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), - vmw_streamoutput_size, &ttm_opt_ctx); - if (ret) { - if (ret != -ERESTARTSYS) - DRM_ERROR("Out of graphics memory for streamout.\n"); - return ret; - } - so = kmalloc(sizeof(*so), GFP_KERNEL); if (!so) { - ttm_mem_global_free(vmw_mem_glob(dev_priv), - vmw_streamoutput_size); return -ENOMEM; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 5d53a5f9d123..00e8e27e4884 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -45,16 +45,12 @@ * @prime: The TTM prime object. * @base: The TTM base object handling user-space visibility. * @srf: The surface metadata. - * @size: TTM accounting size for the surface. * @master: Master of the creating client. Used for security check. - * @backup_base: The TTM base object of the backup buffer. */ struct vmw_user_surface { struct ttm_prime_object prime; struct vmw_surface srf; - uint32_t size; struct drm_master *master; - struct ttm_base_object *backup_base; }; /** @@ -74,13 +70,11 @@ struct vmw_surface_offset { /** * struct vmw_surface_dirty - Surface dirty-tracker * @cache: Cached layout information of the surface. - * @size: Accounting size for the struct vmw_surface_dirty. * @num_subres: Number of subresources. * @boxes: Array of SVGA3dBoxes indicating dirty regions. One per subresource. */ struct vmw_surface_dirty { struct vmw_surface_cache cache; - size_t size; u32 num_subres; SVGA3dBox boxes[]; }; @@ -129,9 +123,6 @@ static const struct vmw_user_resource_conv user_surface_conv = { const struct vmw_user_resource_conv *user_surface_converter = &user_surface_conv; - -static uint64_t vmw_user_surface_size; - static const struct vmw_res_func vmw_legacy_surface_func = { .res_type = vmw_res_surface, .needs_backup = false, @@ -359,7 +350,7 @@ static void vmw_surface_dma_encode(struct vmw_surface *srf, * vmw_surface. * * Destroys a the device surface associated with a struct vmw_surface if - * any, and adjusts accounting and resource count accordingly. + * any, and adjusts resource count accordingly. */ static void vmw_hw_surface_destroy(struct vmw_resource *res) { @@ -666,8 +657,6 @@ static void vmw_user_surface_free(struct vmw_resource *res) struct vmw_surface *srf = vmw_res_to_srf(res); struct vmw_user_surface *user_srf = container_of(srf, struct vmw_user_surface, srf); - struct vmw_private *dev_priv = srf->res.dev_priv; - uint32_t size = user_srf->size; WARN_ON_ONCE(res->dirty); if (user_srf->master) @@ -676,7 +665,6 @@ static void vmw_user_surface_free(struct vmw_resource *res) kfree(srf->metadata.sizes); kfree(srf->snooper.image); ttm_prime_object_kfree(user_srf, prime); - ttm_mem_global_free(vmw_mem_glob(dev_priv), size); } /** @@ -696,8 +684,6 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base) struct vmw_resource *res = &user_srf->srf.res; *p_base = NULL; - if (user_srf->backup_base) - ttm_base_object_unref(&user_srf->backup_base); vmw_resource_unreference(&res); } @@ -715,7 +701,7 @@ int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; - return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE); + return ttm_ref_object_base_unref(tfile, arg->sid); } /** @@ -740,23 +726,14 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, struct drm_vmw_surface_create_req *req = &arg->req; struct drm_vmw_surface_arg *rep = &arg->rep; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; - struct ttm_operation_ctx ctx = { - .interruptible = true, - .no_wait_gpu = false - }; int ret; int i, j; uint32_t cur_bo_offset; struct drm_vmw_size *cur_size; struct vmw_surface_offset *cur_offset; uint32_t num_sizes; - uint32_t size; const SVGA3dSurfaceDesc *desc; - if (unlikely(vmw_user_surface_size == 0)) - vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) + - VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE; - num_sizes = 0; for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) { if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS) @@ -768,10 +745,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, num_sizes == 0) return -EINVAL; - size = vmw_user_surface_size + - ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) + - ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset)); - desc = vmw_surface_get_desc(req->format); if (unlikely(desc->blockDesc == SVGA3DBLOCKDESC_NONE)) { VMW_DEBUG_USER("Invalid format %d for surface creation.\n", @@ -779,18 +752,10 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, return -EINVAL; } - ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), - size, &ctx); - if (unlikely(ret != 0)) { - if (ret != -ERESTARTSYS) - DRM_ERROR("Out of graphics memory for surface.\n"); - goto out_unlock; - } - user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL); if (unlikely(!user_srf)) { ret = -ENOMEM; - goto out_no_user_srf; + goto out_unlock; } srf = &user_srf->srf; @@ -805,7 +770,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, memcpy(metadata->mip_levels, req->mip_levels, sizeof(metadata->mip_levels)); metadata->num_sizes = num_sizes; - user_srf->size = size; metadata->sizes = memdup_user((struct drm_vmw_size __user *)(unsigned long) req->size_addr, @@ -883,22 +847,22 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, if (dev_priv->has_mob && req->shareable) { uint32_t backup_handle; - ret = vmw_user_bo_alloc(dev_priv, tfile, - res->backup_size, - true, - &backup_handle, - &res->backup, - &user_srf->backup_base); + ret = vmw_gem_object_create_with_handle(dev_priv, + file_priv, + res->backup_size, + &backup_handle, + &res->backup); if (unlikely(ret != 0)) { vmw_resource_unreference(&res); goto out_unlock; } + vmw_bo_reference(res->backup); } tmp = vmw_resource_reference(&srf->res); ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime, req->shareable, VMW_RES_SURFACE, - &vmw_user_surface_base_release, NULL); + &vmw_user_surface_base_release); if (unlikely(ret != 0)) { vmw_resource_unreference(&tmp); @@ -916,8 +880,6 @@ out_no_offsets: kfree(metadata->sizes); out_no_sizes: ttm_prime_object_kfree(user_srf, prime); -out_no_user_srf: - ttm_mem_global_free(vmw_mem_glob(dev_priv), size); out_unlock: return ret; } @@ -955,7 +917,6 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv, VMW_DEBUG_USER("Referenced object is not a surface.\n"); goto out_bad_resource; } - if (handle_type != DRM_VMW_HANDLE_PRIME) { bool require_exist = false; @@ -980,8 +941,7 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv, if (unlikely(drm_is_render_client(file_priv))) require_exist = true; - ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, - require_exist); + ret = ttm_ref_object_add(tfile, base, NULL, require_exist); if (unlikely(ret != 0)) { DRM_ERROR("Could not add a reference to a surface.\n"); goto out_bad_resource; @@ -995,7 +955,7 @@ out_bad_resource: ttm_base_object_unref(&base); out_no_lookup: if (handle_type == DRM_VMW_HANDLE_PRIME) - (void) ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE); + (void) ttm_ref_object_base_unref(tfile, handle); return ret; } @@ -1045,7 +1005,7 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, if (unlikely(ret != 0)) { VMW_DEBUG_USER("copy_to_user failed %p %u\n", user_sizes, srf->metadata.num_sizes); - ttm_ref_object_base_unref(tfile, base->handle, TTM_REF_USAGE); + ttm_ref_object_base_unref(tfile, base->handle); ret = -EFAULT; } @@ -1459,7 +1419,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev, struct vmw_resource *res; struct vmw_resource *tmp; int ret = 0; - uint32_t size; uint32_t backup_handle = 0; SVGA3dSurfaceAllFlags svga3d_flags_64 = SVGA3D_FLAGS_64(req->svga3d_flags_upper_32_bits, @@ -1506,12 +1465,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev, return -EINVAL; } - if (unlikely(vmw_user_surface_size == 0)) - vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) + - VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE; - - size = vmw_user_surface_size; - metadata.flags = svga3d_flags_64; metadata.format = req->base.format; metadata.mip_levels[0] = req->base.mip_levels; @@ -1526,7 +1479,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev, drm_vmw_surface_flag_scanout; /* Define a surface based on the parameters. */ - ret = vmw_gb_surface_define(dev_priv, size, &metadata, &srf); + ret = vmw_gb_surface_define(dev_priv, &metadata, &srf); if (ret != 0) { VMW_DEBUG_USER("Failed to define surface.\n"); return ret; @@ -1539,9 +1492,8 @@ vmw_gb_surface_define_internal(struct drm_device *dev, res = &user_srf->srf.res; if (req->base.buffer_handle != SVGA3D_INVALID_ID) { - ret = vmw_user_bo_lookup(tfile, req->base.buffer_handle, - &res->backup, - &user_srf->backup_base); + ret = vmw_user_bo_lookup(file_priv, req->base.buffer_handle, + &res->backup); if (ret == 0) { if (res->backup->base.base.size < res->backup_size) { VMW_DEBUG_USER("Surface backup buffer too small.\n"); @@ -1554,14 +1506,15 @@ vmw_gb_surface_define_internal(struct drm_device *dev, } } else if (req->base.drm_surface_flags & (drm_vmw_surface_flag_create_buffer | - drm_vmw_surface_flag_coherent)) - ret = vmw_user_bo_alloc(dev_priv, tfile, - res->backup_size, - req->base.drm_surface_flags & - drm_vmw_surface_flag_shareable, - &backup_handle, - &res->backup, - &user_srf->backup_base); + drm_vmw_surface_flag_coherent)) { + ret = vmw_gem_object_create_with_handle(dev_priv, file_priv, + res->backup_size, + &backup_handle, + &res->backup); + if (ret == 0) + vmw_bo_reference(res->backup); + + } if (unlikely(ret != 0)) { vmw_resource_unreference(&res); @@ -1593,7 +1546,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev, req->base.drm_surface_flags & drm_vmw_surface_flag_shareable, VMW_RES_SURFACE, - &vmw_user_surface_base_release, NULL); + &vmw_user_surface_base_release); if (unlikely(ret != 0)) { vmw_resource_unreference(&tmp); @@ -1613,7 +1566,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev, rep->buffer_size = 0; rep->buffer_handle = SVGA3D_INVALID_ID; } - vmw_resource_unreference(&res); out_unlock: @@ -1636,12 +1588,11 @@ vmw_gb_surface_reference_internal(struct drm_device *dev, struct drm_file *file_priv) { struct vmw_private *dev_priv = vmw_priv(dev); - struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct vmw_surface *srf; struct vmw_user_surface *user_srf; struct vmw_surface_metadata *metadata; struct ttm_base_object *base; - uint32_t backup_handle; + u32 backup_handle; int ret; ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid, @@ -1658,14 +1609,12 @@ vmw_gb_surface_reference_internal(struct drm_device *dev, metadata = &srf->metadata; mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */ - ret = vmw_user_bo_reference(tfile, srf->res.backup, &backup_handle); + ret = drm_gem_handle_create(file_priv, &srf->res.backup->base.base, + &backup_handle); mutex_unlock(&dev_priv->cmdbuf_mutex); - - if (unlikely(ret != 0)) { - DRM_ERROR("Could not add a reference to a GB surface " - "backup buffer.\n"); - (void) ttm_ref_object_base_unref(tfile, base->handle, - TTM_REF_USAGE); + if (ret != 0) { + drm_err(dev, "Wasn't able to create a backing handle for surface sid = %u.\n", + req->sid); goto out_bad_resource; } @@ -1955,11 +1904,7 @@ static int vmw_surface_dirty_alloc(struct vmw_resource *res) u32 num_mip; u32 num_subres; u32 num_samples; - size_t dirty_size, acc_size; - static struct ttm_operation_ctx ctx = { - .interruptible = false, - .no_wait_gpu = false - }; + size_t dirty_size; int ret; if (metadata->array_size) @@ -1973,14 +1918,6 @@ static int vmw_surface_dirty_alloc(struct vmw_resource *res) num_subres = num_layers * num_mip; dirty_size = struct_size(dirty, boxes, num_subres); - acc_size = ttm_round_pot(dirty_size); - ret = ttm_mem_global_alloc(vmw_mem_glob(res->dev_priv), - acc_size, &ctx); - if (ret) { - VMW_DEBUG_USER("Out of graphics memory for surface " - "dirty tracker.\n"); - return ret; - } dirty = kvzalloc(dirty_size, GFP_KERNEL); if (!dirty) { @@ -1990,13 +1927,12 @@ static int vmw_surface_dirty_alloc(struct vmw_resource *res) num_samples = max_t(u32, 1, metadata->multisample_count); ret = vmw_surface_setup_cache(&metadata->base_size, metadata->format, - num_mip, num_layers, num_samples, - &dirty->cache); + num_mip, num_layers, num_samples, + &dirty->cache); if (ret) goto out_no_cache; dirty->num_subres = num_subres; - dirty->size = acc_size; res->dirty = (struct vmw_resource_dirty *) dirty; return 0; @@ -2004,7 +1940,6 @@ static int vmw_surface_dirty_alloc(struct vmw_resource *res) out_no_cache: kvfree(dirty); out_no_dirty: - ttm_mem_global_free(vmw_mem_glob(res->dev_priv), acc_size); return ret; } @@ -2015,10 +1950,8 @@ static void vmw_surface_dirty_free(struct vmw_resource *res) { struct vmw_surface_dirty *dirty = (struct vmw_surface_dirty *) res->dirty; - size_t acc_size = dirty->size; kvfree(dirty); - ttm_mem_global_free(vmw_mem_glob(res->dev_priv), acc_size); res->dirty = NULL; } @@ -2051,8 +1984,6 @@ static int vmw_surface_clean(struct vmw_resource *res) * vmw_gb_surface_define - Define a private GB surface * * @dev_priv: Pointer to a device private. - * @user_accounting_size: Used to track user-space memory usage, set - * to 0 for kernel mode only memory * @metadata: Metadata representing the surface to create. * @user_srf_out: allocated user_srf. Set to NULL on failure. * @@ -2062,17 +1993,12 @@ static int vmw_surface_clean(struct vmw_resource *res) * it available to user mode drivers. */ int vmw_gb_surface_define(struct vmw_private *dev_priv, - uint32_t user_accounting_size, const struct vmw_surface_metadata *req, struct vmw_surface **srf_out) { struct vmw_surface_metadata *metadata; struct vmw_user_surface *user_srf; struct vmw_surface *srf; - struct ttm_operation_ctx ctx = { - .interruptible = true, - .no_wait_gpu = false - }; u32 sample_count = 1; u32 num_layers = 1; int ret; @@ -2113,22 +2039,13 @@ int vmw_gb_surface_define(struct vmw_private *dev_priv, if (req->sizes != NULL) return -EINVAL; - ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), - user_accounting_size, &ctx); - if (ret != 0) { - if (ret != -ERESTARTSYS) - DRM_ERROR("Out of graphics memory for surface.\n"); - goto out_unlock; - } - user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL); if (unlikely(!user_srf)) { ret = -ENOMEM; - goto out_no_user_srf; + goto out_unlock; } *srf_out = &user_srf->srf; - user_srf->size = user_accounting_size; user_srf->prime.base.shareable = false; user_srf->prime.base.tfile = NULL; @@ -2179,9 +2096,6 @@ int vmw_gb_surface_define(struct vmw_private *dev_priv, return ret; -out_no_user_srf: - ttm_mem_global_free(vmw_mem_glob(dev_priv), user_accounting_size); - out_unlock: return ret; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_system_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_system_manager.c new file mode 100644 index 000000000000..b0005b03a617 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_system_manager.c @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* + * Copyright 2021 VMware, Inc. + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include "vmwgfx_drv.h" + +#include <drm/ttm/ttm_bo_driver.h> +#include <drm/ttm/ttm_device.h> +#include <drm/ttm/ttm_placement.h> +#include <drm/ttm/ttm_resource.h> +#include <linux/slab.h> + + +static int vmw_sys_man_alloc(struct ttm_resource_manager *man, + struct ttm_buffer_object *bo, + const struct ttm_place *place, + struct ttm_resource **res) +{ + *res = kzalloc(sizeof(**res), GFP_KERNEL); + if (!*res) + return -ENOMEM; + + ttm_resource_init(bo, place, *res); + return 0; +} + +static void vmw_sys_man_free(struct ttm_resource_manager *man, + struct ttm_resource *res) +{ + kfree(res); +} + +static const struct ttm_resource_manager_func vmw_sys_manager_func = { + .alloc = vmw_sys_man_alloc, + .free = vmw_sys_man_free, +}; + +int vmw_sys_man_init(struct vmw_private *dev_priv) +{ + struct ttm_device *bdev = &dev_priv->bdev; + struct ttm_resource_manager *man = + kzalloc(sizeof(*man), GFP_KERNEL); + + if (!man) + return -ENOMEM; + + man->use_tt = true; + man->func = &vmw_sys_manager_func; + + ttm_resource_manager_init(man, 0); + ttm_set_driver_manager(bdev, VMW_PL_SYSTEM, man); + ttm_resource_manager_set_used(man, true); + return 0; +} + +void vmw_sys_man_fini(struct vmw_private *dev_priv) +{ + struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, + VMW_PL_SYSTEM); + + ttm_resource_manager_evict_all(&dev_priv->bdev, man); + + ttm_resource_manager_set_used(man, false); + ttm_resource_manager_cleanup(man); + + ttm_set_driver_manager(&dev_priv->bdev, VMW_PL_SYSTEM, NULL); + kfree(man); +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c index e899a936a42a..b84ecc6d6611 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c @@ -92,6 +92,13 @@ static const struct ttm_place gmr_vram_placement_flags[] = { } }; +static const struct ttm_place vmw_sys_placement_flags = { + .fpfn = 0, + .lpfn = 0, + .mem_type = VMW_PL_SYSTEM, + .flags = 0 +}; + struct ttm_placement vmw_vram_gmr_placement = { .num_placement = 2, .placement = vram_gmr_placement_flags, @@ -113,28 +120,11 @@ struct ttm_placement vmw_sys_placement = { .busy_placement = &sys_placement_flags }; -static const struct ttm_place evictable_placement_flags[] = { - { - .fpfn = 0, - .lpfn = 0, - .mem_type = TTM_PL_SYSTEM, - .flags = 0 - }, { - .fpfn = 0, - .lpfn = 0, - .mem_type = TTM_PL_VRAM, - .flags = 0 - }, { - .fpfn = 0, - .lpfn = 0, - .mem_type = VMW_PL_GMR, - .flags = 0 - }, { - .fpfn = 0, - .lpfn = 0, - .mem_type = VMW_PL_MOB, - .flags = 0 - } +struct ttm_placement vmw_pt_sys_placement = { + .num_placement = 1, + .placement = &vmw_sys_placement_flags, + .num_busy_placement = 1, + .busy_placement = &vmw_sys_placement_flags }; static const struct ttm_place nonfixed_placement_flags[] = { @@ -156,13 +146,6 @@ static const struct ttm_place nonfixed_placement_flags[] = { } }; -struct ttm_placement vmw_evictable_placement = { - .num_placement = 4, - .placement = evictable_placement_flags, - .num_busy_placement = 1, - .busy_placement = &sys_placement_flags -}; - struct ttm_placement vmw_srf_placement = { .num_placement = 1, .num_busy_placement = 2, @@ -184,19 +167,6 @@ struct ttm_placement vmw_nonfixed_placement = { .busy_placement = &sys_placement_flags }; -struct vmw_ttm_tt { - struct ttm_tt dma_ttm; - struct vmw_private *dev_priv; - int gmr_id; - struct vmw_mob *mob; - int mem_type; - struct sg_table sgt; - struct vmw_sg_table vsgt; - uint64_t sg_alloc_size; - bool mapped; - bool bound; -}; - const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt); /** @@ -317,17 +287,8 @@ static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt) static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt) { struct vmw_private *dev_priv = vmw_tt->dev_priv; - struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); struct vmw_sg_table *vsgt = &vmw_tt->vsgt; - struct ttm_operation_ctx ctx = { - .interruptible = true, - .no_wait_gpu = false - }; - struct vmw_piter iter; - dma_addr_t old; int ret = 0; - static size_t sgl_size; - static size_t sgt_size; if (vmw_tt->mapped) return 0; @@ -336,20 +297,12 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt) vsgt->pages = vmw_tt->dma_ttm.pages; vsgt->num_pages = vmw_tt->dma_ttm.num_pages; vsgt->addrs = vmw_tt->dma_ttm.dma_address; - vsgt->sgt = &vmw_tt->sgt; + vsgt->sgt = NULL; switch (dev_priv->map_mode) { case vmw_dma_map_bind: case vmw_dma_map_populate: - if (unlikely(!sgl_size)) { - sgl_size = ttm_round_pot(sizeof(struct scatterlist)); - sgt_size = ttm_round_pot(sizeof(struct sg_table)); - } - vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages; - ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, &ctx); - if (unlikely(ret != 0)) - return ret; - + vsgt->sgt = &vmw_tt->sgt; ret = sg_alloc_table_from_pages_segment( &vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0, (unsigned long)vsgt->num_pages << PAGE_SHIFT, @@ -357,15 +310,6 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt) if (ret) goto out_sg_alloc_fail; - if (vsgt->num_pages > vmw_tt->sgt.orig_nents) { - uint64_t over_alloc = - sgl_size * (vsgt->num_pages - - vmw_tt->sgt.orig_nents); - - ttm_mem_global_free(glob, over_alloc); - vmw_tt->sg_alloc_size -= over_alloc; - } - ret = vmw_ttm_map_for_dma(vmw_tt); if (unlikely(ret != 0)) goto out_map_fail; @@ -375,16 +319,6 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt) break; } - old = ~((dma_addr_t) 0); - vmw_tt->vsgt.num_regions = 0; - for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) { - dma_addr_t cur = vmw_piter_dma_addr(&iter); - - if (cur != old + PAGE_SIZE) - vmw_tt->vsgt.num_regions++; - old = cur; - } - vmw_tt->mapped = true; return 0; @@ -392,7 +326,6 @@ out_map_fail: sg_free_table(vmw_tt->vsgt.sgt); vmw_tt->vsgt.sgt = NULL; out_sg_alloc_fail: - ttm_mem_global_free(glob, vmw_tt->sg_alloc_size); return ret; } @@ -418,8 +351,6 @@ static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt) vmw_ttm_unmap_from_dma(vmw_tt); sg_free_table(vmw_tt->vsgt.sgt); vmw_tt->vsgt.sgt = NULL; - ttm_mem_global_free(vmw_mem_glob(dev_priv), - vmw_tt->sg_alloc_size); break; default: break; @@ -484,6 +415,9 @@ static int vmw_ttm_bind(struct ttm_device *bdev, &vmw_be->vsgt, ttm->num_pages, vmw_be->gmr_id); break; + case VMW_PL_SYSTEM: + /* Nothing to be done for a system bind */ + break; default: BUG(); } @@ -507,6 +441,8 @@ static void vmw_ttm_unbind(struct ttm_device *bdev, case VMW_PL_MOB: vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob); break; + case VMW_PL_SYSTEM: + break; default: BUG(); } @@ -534,7 +470,6 @@ static void vmw_ttm_destroy(struct ttm_device *bdev, struct ttm_tt *ttm) static int vmw_ttm_populate(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) { - unsigned int i; int ret; /* TODO: maybe completely drop this ? */ @@ -542,22 +477,7 @@ static int vmw_ttm_populate(struct ttm_device *bdev, return 0; ret = ttm_pool_alloc(&bdev->pool, ttm, ctx); - if (ret) - return ret; - - for (i = 0; i < ttm->num_pages; ++i) { - ret = ttm_mem_global_alloc_page(&ttm_mem_glob, ttm->pages[i], - PAGE_SIZE, ctx); - if (ret) - goto error; - } - return 0; -error: - while (i--) - ttm_mem_global_free_page(&ttm_mem_glob, ttm->pages[i], - PAGE_SIZE); - ttm_pool_free(&bdev->pool, ttm); return ret; } @@ -566,7 +486,6 @@ static void vmw_ttm_unpopulate(struct ttm_device *bdev, { struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt, dma_ttm); - unsigned int i; vmw_ttm_unbind(bdev, ttm); @@ -577,10 +496,6 @@ static void vmw_ttm_unpopulate(struct ttm_device *bdev, vmw_ttm_unmap_dma(vmw_tt); - for (i = 0; i < ttm->num_pages; ++i) - ttm_mem_global_free_page(&ttm_mem_glob, ttm->pages[i], - PAGE_SIZE); - ttm_pool_free(&bdev->pool, ttm); } @@ -624,6 +539,7 @@ static int vmw_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource * switch (mem->mem_type) { case TTM_PL_SYSTEM: + case VMW_PL_SYSTEM: case VMW_PL_GMR: case VMW_PL_MOB: return 0; @@ -670,6 +586,11 @@ static void vmw_swap_notify(struct ttm_buffer_object *bo) (void) ttm_bo_wait(bo, false, false); } +static bool vmw_memtype_is_system(uint32_t mem_type) +{ + return mem_type == TTM_PL_SYSTEM || mem_type == VMW_PL_SYSTEM; +} + static int vmw_move(struct ttm_buffer_object *bo, bool evict, struct ttm_operation_ctx *ctx, @@ -680,7 +601,7 @@ static int vmw_move(struct ttm_buffer_object *bo, struct ttm_resource_manager *new_man = ttm_manager_type(bo->bdev, new_mem->mem_type); int ret; - if (new_man->use_tt && new_mem->mem_type != TTM_PL_SYSTEM) { + if (new_man->use_tt && !vmw_memtype_is_system(new_mem->mem_type)) { ret = vmw_ttm_bind(bo->bdev, bo->ttm, new_mem); if (ret) return ret; @@ -689,7 +610,7 @@ static int vmw_move(struct ttm_buffer_object *bo, vmw_move_notify(bo, bo->resource, new_mem); if (old_man->use_tt && new_man->use_tt) { - if (bo->resource->mem_type == TTM_PL_SYSTEM) { + if (vmw_memtype_is_system(bo->resource->mem_type)) { ttm_bo_move_null(bo, new_mem); return 0; } @@ -736,7 +657,7 @@ int vmw_bo_create_and_populate(struct vmw_private *dev_priv, int ret; ret = vmw_bo_create_kernel(dev_priv, bo_size, - &vmw_sys_placement, + &vmw_pt_sys_placement, &bo); if (unlikely(ret != 0)) return ret; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c index 0a4c340252ec..265f7c48d856 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c @@ -27,30 +27,44 @@ #include "vmwgfx_drv.h" -static struct ttm_buffer_object *vmw_bo_vm_lookup(struct ttm_device *bdev, - unsigned long offset, - unsigned long pages) +static int vmw_bo_vm_lookup(struct ttm_device *bdev, + struct drm_file *filp, + unsigned long offset, + unsigned long pages, + struct ttm_buffer_object **p_bo) { struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev); struct drm_device *drm = &dev_priv->drm; struct drm_vma_offset_node *node; - struct ttm_buffer_object *bo = NULL; + int ret; + + *p_bo = NULL; drm_vma_offset_lock_lookup(bdev->vma_manager); node = drm_vma_offset_lookup_locked(bdev->vma_manager, offset, pages); if (likely(node)) { - bo = container_of(node, struct ttm_buffer_object, + *p_bo = container_of(node, struct ttm_buffer_object, base.vma_node); - bo = ttm_bo_get_unless_zero(bo); + *p_bo = ttm_bo_get_unless_zero(*p_bo); } drm_vma_offset_unlock_lookup(bdev->vma_manager); - if (!bo) + if (!*p_bo) { drm_err(drm, "Could not find buffer object to map\n"); + return -EINVAL; + } + + if (!drm_vma_node_is_allowed(node, filp)) { + ret = -EACCES; + goto out_no_access; + } - return bo; + return 0; +out_no_access: + ttm_bo_put(*p_bo); + return ret; } int vmw_mmap(struct file *filp, struct vm_area_struct *vma) @@ -64,7 +78,6 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma) }; struct drm_file *file_priv = filp->private_data; struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev); - struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct ttm_device *bdev = &dev_priv->bdev; struct ttm_buffer_object *bo; int ret; @@ -72,13 +85,9 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma) if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET_START)) return -EINVAL; - bo = vmw_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma)); - if (unlikely(!bo)) - return -EINVAL; - - ret = vmw_user_bo_verify_access(bo, tfile); + ret = vmw_bo_vm_lookup(bdev, file_priv, vma->vm_pgoff, vma_pages(vma), &bo); if (unlikely(ret != 0)) - goto out_unref; + return ret; ret = ttm_bo_mmap_obj(vma, bo); if (unlikely(ret != 0)) @@ -99,38 +108,3 @@ out_unref: return ret; } -/* struct vmw_validation_mem callback */ -static int vmw_vmt_reserve(struct vmw_validation_mem *m, size_t size) -{ - static struct ttm_operation_ctx ctx = {.interruptible = false, - .no_wait_gpu = false}; - struct vmw_private *dev_priv = container_of(m, struct vmw_private, vvm); - - return ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, &ctx); -} - -/* struct vmw_validation_mem callback */ -static void vmw_vmt_unreserve(struct vmw_validation_mem *m, size_t size) -{ - struct vmw_private *dev_priv = container_of(m, struct vmw_private, vvm); - - return ttm_mem_global_free(vmw_mem_glob(dev_priv), size); -} - -/** - * vmw_validation_mem_init_ttm - Interface the validation memory tracker - * to ttm. - * @dev_priv: Pointer to struct vmw_private. The reason we choose a vmw private - * rather than a struct vmw_validation_mem is to make sure assumption in the - * callbacks that struct vmw_private derives from struct vmw_validation_mem - * holds true. - * @gran: The recommended allocation granularity - */ -void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv, size_t gran) -{ - struct vmw_validation_mem *vvm = &dev_priv->vvm; - - vvm->reserve_mem = vmw_vmt_reserve; - vvm->unreserve_mem = vmw_vmt_unreserve; - vvm->gran = gran; -} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_va.c b/drivers/gpu/drm/vmwgfx/vmwgfx_va.c index ebc1d83c34b4..6ad744ae07f5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_va.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_va.c @@ -117,7 +117,7 @@ int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data; return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, - arg->stream_id, TTM_REF_USAGE); + arg->stream_id); } /** diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c index b09094b50c5d..f46891012be3 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c @@ -29,6 +29,9 @@ #include "vmwgfx_validation.h" #include "vmwgfx_drv.h" + +#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE) + /** * struct vmw_validation_bo_node - Buffer object validation metadata. * @base: Metadata used for TTM reservation- and validation. @@ -43,7 +46,7 @@ */ struct vmw_validation_bo_node { struct ttm_validate_buffer base; - struct drm_hash_item hash; + struct vmwgfx_hash_item hash; unsigned int coherent_count; u32 as_mob : 1; u32 cpu_blit : 1; @@ -72,7 +75,7 @@ struct vmw_validation_bo_node { */ struct vmw_validation_res_node { struct list_head head; - struct drm_hash_item hash; + struct vmwgfx_hash_item hash; struct vmw_resource *res; struct vmw_buffer_object *new_backup; unsigned long new_backup_offset; @@ -113,13 +116,8 @@ void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx, struct page *page; if (ctx->vm && ctx->vm_size_left < PAGE_SIZE) { - int ret = ctx->vm->reserve_mem(ctx->vm, ctx->vm->gran); - - if (ret) - return NULL; - - ctx->vm_size_left += ctx->vm->gran; - ctx->total_mem += ctx->vm->gran; + ctx->vm_size_left += VMWGFX_VALIDATION_MEM_GRAN; + ctx->total_mem += VMWGFX_VALIDATION_MEM_GRAN; } page = alloc_page(GFP_KERNEL | __GFP_ZERO); @@ -159,7 +157,6 @@ static void vmw_validation_mem_free(struct vmw_validation_context *ctx) ctx->mem_size_left = 0; if (ctx->vm && ctx->total_mem) { - ctx->vm->unreserve_mem(ctx->vm, ctx->total_mem); ctx->total_mem = 0; ctx->vm_size_left = 0; } @@ -184,9 +181,9 @@ vmw_validation_find_bo_dup(struct vmw_validation_context *ctx, return NULL; if (ctx->ht) { - struct drm_hash_item *hash; + struct vmwgfx_hash_item *hash; - if (!drm_ht_find_item(ctx->ht, (unsigned long) vbo, &hash)) + if (!vmwgfx_ht_find_item(ctx->ht, (unsigned long) vbo, &hash)) bo_node = container_of(hash, typeof(*bo_node), hash); } else { struct vmw_validation_bo_node *entry; @@ -221,9 +218,9 @@ vmw_validation_find_res_dup(struct vmw_validation_context *ctx, return NULL; if (ctx->ht) { - struct drm_hash_item *hash; + struct vmwgfx_hash_item *hash; - if (!drm_ht_find_item(ctx->ht, (unsigned long) res, &hash)) + if (!vmwgfx_ht_find_item(ctx->ht, (unsigned long) res, &hash)) res_node = container_of(hash, typeof(*res_node), hash); } else { struct vmw_validation_res_node *entry; @@ -280,7 +277,7 @@ int vmw_validation_add_bo(struct vmw_validation_context *ctx, if (ctx->ht) { bo_node->hash.key = (unsigned long) vbo; - ret = drm_ht_insert_item(ctx->ht, &bo_node->hash); + ret = vmwgfx_ht_insert_item(ctx->ht, &bo_node->hash); if (ret) { DRM_ERROR("Failed to initialize a buffer " "validation entry.\n"); @@ -335,7 +332,7 @@ int vmw_validation_add_resource(struct vmw_validation_context *ctx, if (ctx->ht) { node->hash.key = (unsigned long) res; - ret = drm_ht_insert_item(ctx->ht, &node->hash); + ret = vmwgfx_ht_insert_item(ctx->ht, &node->hash); if (ret) { DRM_ERROR("Failed to initialize a resource validation " "entry.\n"); @@ -688,13 +685,13 @@ void vmw_validation_drop_ht(struct vmw_validation_context *ctx) return; list_for_each_entry(entry, &ctx->bo_list, base.head) - (void) drm_ht_remove_item(ctx->ht, &entry->hash); + (void) vmwgfx_ht_remove_item(ctx->ht, &entry->hash); list_for_each_entry(val, &ctx->resource_list, head) - (void) drm_ht_remove_item(ctx->ht, &val->hash); + (void) vmwgfx_ht_remove_item(ctx->ht, &val->hash); list_for_each_entry(val, &ctx->resource_ctx_list, head) - (void) drm_ht_remove_item(ctx->ht, &val->hash); + (void) vmwgfx_ht_remove_item(ctx->ht, &val->hash); ctx->ht = NULL; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h index 739906d1b3eb..f21df053882b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h @@ -31,29 +31,15 @@ #include <linux/list.h> #include <linux/ww_mutex.h> -#include <drm/drm_hashtab.h> #include <drm/ttm/ttm_execbuf_util.h> +#include "vmwgfx_hashtab.h" + #define VMW_RES_DIRTY_NONE 0 #define VMW_RES_DIRTY_SET BIT(0) #define VMW_RES_DIRTY_CLEAR BIT(1) /** - * struct vmw_validation_mem - Custom interface to provide memory reservations - * for the validation code. - * @reserve_mem: Callback to reserve memory - * @unreserve_mem: Callback to unreserve memory - * @gran: Reservation granularity. Contains a hint how much memory should - * be reserved in each call to @reserve_mem(). A slow implementation may want - * reservation to be done in large batches. - */ -struct vmw_validation_mem { - int (*reserve_mem)(struct vmw_validation_mem *m, size_t size); - void (*unreserve_mem)(struct vmw_validation_mem *m, size_t size); - size_t gran; -}; - -/** * struct vmw_validation_context - Per command submission validation context * @ht: Hash table used to find resource- or buffer object duplicates * @resource_list: List head for resource validation metadata @@ -73,7 +59,7 @@ struct vmw_validation_mem { * @total_mem: Amount of reserved memory. */ struct vmw_validation_context { - struct drm_open_hash *ht; + struct vmwgfx_open_hash *ht; struct list_head resource_list; struct list_head resource_ctx_list; struct list_head bo_list; @@ -129,21 +115,6 @@ vmw_validation_has_bos(struct vmw_validation_context *ctx) } /** - * vmw_validation_set_val_mem - Register a validation mem object for - * validation memory reservation - * @ctx: The validation context - * @vm: Pointer to a struct vmw_validation_mem - * - * Must be set before the first attempt to allocate validation memory. - */ -static inline void -vmw_validation_set_val_mem(struct vmw_validation_context *ctx, - struct vmw_validation_mem *vm) -{ - ctx->vm = vm; -} - -/** * vmw_validation_set_ht - Register a hash table for duplicate finding * @ctx: The validation context * @ht: Pointer to a hash table to use for duplicate finding @@ -151,7 +122,7 @@ vmw_validation_set_val_mem(struct vmw_validation_context *ctx, * available at validation context declaration time */ static inline void vmw_validation_set_ht(struct vmw_validation_context *ctx, - struct drm_open_hash *ht) + struct vmwgfx_open_hash *ht) { ctx->ht = ht; } @@ -190,22 +161,6 @@ vmw_validation_bo_fence(struct vmw_validation_context *ctx, } /** - * vmw_validation_context_init - Initialize a validation context - * @ctx: Pointer to the validation context to initialize - * - * This function initializes a validation context with @merge_dups set - * to false - */ -static inline void -vmw_validation_context_init(struct vmw_validation_context *ctx) -{ - memset(ctx, 0, sizeof(*ctx)); - INIT_LIST_HEAD(&ctx->resource_list); - INIT_LIST_HEAD(&ctx->resource_ctx_list); - INIT_LIST_HEAD(&ctx->bo_list); -} - -/** * vmw_validation_align - Align a validation memory allocation * @val: The size to be aligned * diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c index 9f14d99c763c..e63088c2121d 100644 --- a/drivers/gpu/drm/xen/xen_drm_front.c +++ b/drivers/gpu/drm/xen/xen_drm_front.c @@ -469,19 +469,7 @@ static void xen_drm_drv_release(struct drm_device *dev) kfree(drm_info); } -static const struct file_operations xen_drm_dev_fops = { - .owner = THIS_MODULE, - .open = drm_open, - .release = drm_release, - .unlocked_ioctl = drm_ioctl, -#ifdef CONFIG_COMPAT - .compat_ioctl = drm_compat_ioctl, -#endif - .poll = drm_poll, - .read = drm_read, - .llseek = no_llseek, - .mmap = xen_drm_front_gem_mmap, -}; +DEFINE_DRM_GEM_FOPS(xen_drm_dev_fops); static const struct drm_driver xen_drm_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, @@ -489,7 +477,7 @@ static const struct drm_driver xen_drm_driver = { .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import_sg_table = xen_drm_front_gem_import_sg_table, - .gem_prime_mmap = xen_drm_front_gem_prime_mmap, + .gem_prime_mmap = drm_gem_prime_mmap, .dumb_create = xen_drm_drv_dumb_create, .fops = &xen_drm_dev_fops, .name = "xendrm-du", @@ -773,6 +761,7 @@ static struct xenbus_driver xen_driver = { .probe = xen_drv_probe, .remove = xen_drv_remove, .otherend_changed = displback_changed, + .not_essential = true, }; static int __init xen_drv_init(void) diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c index b293c67230ef..dd358ba2bf8e 100644 --- a/drivers/gpu/drm/xen/xen_drm_front_gem.c +++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c @@ -57,6 +57,47 @@ static void gem_free_pages_array(struct xen_gem_object *xen_obj) xen_obj->pages = NULL; } +static int xen_drm_front_gem_object_mmap(struct drm_gem_object *gem_obj, + struct vm_area_struct *vma) +{ + struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj); + int ret; + + vma->vm_ops = gem_obj->funcs->vm_ops; + + /* + * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the + * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map + * the whole buffer. + */ + vma->vm_flags &= ~VM_PFNMAP; + vma->vm_flags |= VM_MIXEDMAP; + vma->vm_pgoff = 0; + + /* + * According to Xen on ARM ABI (xen/include/public/arch-arm.h): + * all memory which is shared with other entities in the system + * (including the hypervisor and other guests) must reside in memory + * which is mapped as Normal Inner Write-Back Outer Write-Back + * Inner-Shareable. + */ + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); + + /* + * vm_operations_struct.fault handler will be called if CPU access + * to VM is here. For GPUs this isn't the case, because CPU doesn't + * touch the memory. Insert pages now, so both CPU and GPU are happy. + * + * FIXME: as we insert all the pages now then no .fault handler must + * be called, so don't provide one + */ + ret = vm_map_pages(vma, xen_obj->pages, xen_obj->num_pages); + if (ret < 0) + DRM_ERROR("Failed to map pages into vma: %d\n", ret); + + return ret; +} + static const struct vm_operations_struct xen_drm_drv_vm_ops = { .open = drm_gem_vm_open, .close = drm_gem_vm_close, @@ -67,6 +108,7 @@ static const struct drm_gem_object_funcs xen_drm_front_gem_object_funcs = { .get_sg_table = xen_drm_front_gem_get_sg_table, .vmap = xen_drm_front_gem_prime_vmap, .vunmap = xen_drm_front_gem_prime_vunmap, + .mmap = xen_drm_front_gem_object_mmap, .vm_ops = &xen_drm_drv_vm_ops, }; @@ -238,58 +280,6 @@ xen_drm_front_gem_import_sg_table(struct drm_device *dev, return &xen_obj->base; } -static int gem_mmap_obj(struct xen_gem_object *xen_obj, - struct vm_area_struct *vma) -{ - int ret; - - /* - * clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the - * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map - * the whole buffer. - */ - vma->vm_flags &= ~VM_PFNMAP; - vma->vm_flags |= VM_MIXEDMAP; - vma->vm_pgoff = 0; - /* - * According to Xen on ARM ABI (xen/include/public/arch-arm.h): - * all memory which is shared with other entities in the system - * (including the hypervisor and other guests) must reside in memory - * which is mapped as Normal Inner Write-Back Outer Write-Back - * Inner-Shareable. - */ - vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); - - /* - * vm_operations_struct.fault handler will be called if CPU access - * to VM is here. For GPUs this isn't the case, because CPU - * doesn't touch the memory. Insert pages now, so both CPU and GPU are - * happy. - * FIXME: as we insert all the pages now then no .fault handler must - * be called, so don't provide one - */ - ret = vm_map_pages(vma, xen_obj->pages, xen_obj->num_pages); - if (ret < 0) - DRM_ERROR("Failed to map pages into vma: %d\n", ret); - - return ret; -} - -int xen_drm_front_gem_mmap(struct file *filp, struct vm_area_struct *vma) -{ - struct xen_gem_object *xen_obj; - struct drm_gem_object *gem_obj; - int ret; - - ret = drm_gem_mmap(filp, vma); - if (ret < 0) - return ret; - - gem_obj = vma->vm_private_data; - xen_obj = to_xen_gem_obj(gem_obj); - return gem_mmap_obj(xen_obj, vma); -} - int xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj, struct dma_buf_map *map) { struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj); @@ -313,17 +303,3 @@ void xen_drm_front_gem_prime_vunmap(struct drm_gem_object *gem_obj, { vunmap(map->vaddr); } - -int xen_drm_front_gem_prime_mmap(struct drm_gem_object *gem_obj, - struct vm_area_struct *vma) -{ - struct xen_gem_object *xen_obj; - int ret; - - ret = drm_gem_mmap_obj(gem_obj, gem_obj->size, vma); - if (ret < 0) - return ret; - - xen_obj = to_xen_gem_obj(gem_obj); - return gem_mmap_obj(xen_obj, vma); -} diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.h b/drivers/gpu/drm/xen/xen_drm_front_gem.h index a4e67d0a149c..eaea470f7001 100644 --- a/drivers/gpu/drm/xen/xen_drm_front_gem.h +++ b/drivers/gpu/drm/xen/xen_drm_front_gem.h @@ -15,9 +15,7 @@ struct dma_buf_attachment; struct dma_buf_map; struct drm_device; struct drm_gem_object; -struct file; struct sg_table; -struct vm_area_struct; struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev, size_t size); @@ -33,15 +31,10 @@ struct page **xen_drm_front_gem_get_pages(struct drm_gem_object *obj); void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj); -int xen_drm_front_gem_mmap(struct file *filp, struct vm_area_struct *vma); - int xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj, struct dma_buf_map *map); void xen_drm_front_gem_prime_vunmap(struct drm_gem_object *gem_obj, struct dma_buf_map *map); -int xen_drm_front_gem_prime_mmap(struct drm_gem_object *gem_obj, - struct vm_area_struct *vma); - #endif /* __XEN_DRM_FRONT_GEM_H */ diff --git a/drivers/gpu/drm/xlnx/Kconfig b/drivers/gpu/drm/xlnx/Kconfig index c3d08269faa9..d8d38d86d5c6 100644 --- a/drivers/gpu/drm/xlnx/Kconfig +++ b/drivers/gpu/drm/xlnx/Kconfig @@ -7,7 +7,6 @@ config DRM_ZYNQMP_DPSUB depends on XILINX_ZYNQMP_DPDMA select DMA_ENGINE select DRM_GEM_CMA_HELPER - select DRM_KMS_CMA_HELPER select DRM_KMS_HELPER select GENERIC_PHY help diff --git a/drivers/gpu/host1x/Kconfig b/drivers/gpu/host1x/Kconfig index 6dab94adf25e..6815b4db17c1 100644 --- a/drivers/gpu/host1x/Kconfig +++ b/drivers/gpu/host1x/Kconfig @@ -2,6 +2,7 @@ config TEGRA_HOST1X tristate "NVIDIA Tegra host1x driver" depends on ARCH_TEGRA || (ARM && COMPILE_TEST) + select DMA_SHARED_BUFFER select IOMMU_IOVA help Driver for the NVIDIA Tegra host1x hardware. diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c index 218e3718fd68..bdee16a0bb8e 100644 --- a/drivers/gpu/host1x/bus.c +++ b/drivers/gpu/host1x/bus.c @@ -5,6 +5,7 @@ */ #include <linux/debugfs.h> +#include <linux/dma-mapping.h> #include <linux/host1x.h> #include <linux/of.h> #include <linux/seq_file.h> @@ -742,6 +743,7 @@ EXPORT_SYMBOL(host1x_driver_unregister); */ void __host1x_client_init(struct host1x_client *client, struct lock_class_key *key) { + host1x_bo_cache_init(&client->cache); INIT_LIST_HEAD(&client->list); __mutex_init(&client->lock, "host1x client lock", key); client->usecount = 0; @@ -761,7 +763,6 @@ EXPORT_SYMBOL(host1x_client_exit); /** * __host1x_client_register() - register a host1x client * @client: host1x client - * @key: lock class key for the client-specific mutex * * Registers a host1x client with each host1x controller instance. Note that * each client will only match their parent host1x controller and will only be @@ -830,6 +831,8 @@ int host1x_client_unregister(struct host1x_client *client) mutex_unlock(&clients_lock); + host1x_bo_cache_destroy(&client->cache); + return 0; } EXPORT_SYMBOL(host1x_client_unregister); @@ -904,3 +907,78 @@ unlock: return err; } EXPORT_SYMBOL(host1x_client_resume); + +struct host1x_bo_mapping *host1x_bo_pin(struct device *dev, struct host1x_bo *bo, + enum dma_data_direction dir, + struct host1x_bo_cache *cache) +{ + struct host1x_bo_mapping *mapping; + + if (cache) { + mutex_lock(&cache->lock); + + list_for_each_entry(mapping, &cache->mappings, entry) { + if (mapping->bo == bo && mapping->direction == dir) { + kref_get(&mapping->ref); + goto unlock; + } + } + } + + mapping = bo->ops->pin(dev, bo, dir); + if (IS_ERR(mapping)) + goto unlock; + + spin_lock(&mapping->bo->lock); + list_add_tail(&mapping->list, &bo->mappings); + spin_unlock(&mapping->bo->lock); + + if (cache) { + INIT_LIST_HEAD(&mapping->entry); + mapping->cache = cache; + + list_add_tail(&mapping->entry, &cache->mappings); + + /* bump reference count to track the copy in the cache */ + kref_get(&mapping->ref); + } + +unlock: + if (cache) + mutex_unlock(&cache->lock); + + return mapping; +} +EXPORT_SYMBOL(host1x_bo_pin); + +static void __host1x_bo_unpin(struct kref *ref) +{ + struct host1x_bo_mapping *mapping = to_host1x_bo_mapping(ref); + + /* + * When the last reference of the mapping goes away, make sure to remove the mapping from + * the cache. + */ + if (mapping->cache) + list_del(&mapping->entry); + + spin_lock(&mapping->bo->lock); + list_del(&mapping->list); + spin_unlock(&mapping->bo->lock); + + mapping->bo->ops->unpin(mapping); +} + +void host1x_bo_unpin(struct host1x_bo_mapping *mapping) +{ + struct host1x_bo_cache *cache = mapping->cache; + + if (cache) + mutex_lock(&cache->lock); + + kref_put(&mapping->ref, __host1x_bo_unpin); + + if (cache) + mutex_unlock(&cache->lock); +} +EXPORT_SYMBOL(host1x_bo_unpin); diff --git a/drivers/gpu/host1x/channel.c b/drivers/gpu/host1x/channel.c index 4cd212bb570d..2a9a3a8d5931 100644 --- a/drivers/gpu/host1x/channel.c +++ b/drivers/gpu/host1x/channel.c @@ -75,6 +75,14 @@ struct host1x_channel *host1x_channel_get_index(struct host1x *host, return ch; } +void host1x_channel_stop(struct host1x_channel *channel) +{ + struct host1x *host = dev_get_drvdata(channel->dev->parent); + + host1x_hw_cdma_stop(host, &channel->cdma); +} +EXPORT_SYMBOL(host1x_channel_stop); + static void release_channel(struct kref *kref) { struct host1x_channel *channel = diff --git a/drivers/gpu/host1x/debug.c b/drivers/gpu/host1x/debug.c index 8a14880c61bb..18d9c8d206e3 100644 --- a/drivers/gpu/host1x/debug.c +++ b/drivers/gpu/host1x/debug.c @@ -7,6 +7,7 @@ */ #include <linux/debugfs.h> +#include <linux/pm_runtime.h> #include <linux/seq_file.h> #include <linux/uaccess.h> @@ -52,6 +53,11 @@ static int show_channel(struct host1x_channel *ch, void *data, bool show_fifo) { struct host1x *m = dev_get_drvdata(ch->dev->parent); struct output *o = data; + int err; + + err = pm_runtime_resume_and_get(m->dev); + if (err < 0) + return err; mutex_lock(&ch->cdma.lock); mutex_lock(&debug_lock); @@ -64,6 +70,8 @@ static int show_channel(struct host1x_channel *ch, void *data, bool show_fifo) mutex_unlock(&debug_lock); mutex_unlock(&ch->cdma.lock); + pm_runtime_put(m->dev); + return 0; } @@ -71,9 +79,14 @@ static void show_syncpts(struct host1x *m, struct output *o) { struct list_head *pos; unsigned int i; + int err; host1x_debug_output(o, "---- syncpts ----\n"); + err = pm_runtime_resume_and_get(m->dev); + if (err < 0) + return; + for (i = 0; i < host1x_syncpt_nb_pts(m); i++) { u32 max = host1x_syncpt_read_max(m->syncpt + i); u32 min = host1x_syncpt_load(m->syncpt + i); @@ -101,6 +114,8 @@ static void show_syncpts(struct host1x *m, struct output *o) base_val); } + pm_runtime_put(m->dev); + host1x_debug_output(o, "\n"); } diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c index fbb6447b8659..6994f8c0e02e 100644 --- a/drivers/gpu/host1x/dev.c +++ b/drivers/gpu/host1x/dev.c @@ -6,18 +6,26 @@ */ #include <linux/clk.h> +#include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/io.h> #include <linux/list.h> #include <linux/module.h> #include <linux/of_device.h> #include <linux/of.h> +#include <linux/pm_runtime.h> #include <linux/slab.h> +#include <soc/tegra/common.h> + #define CREATE_TRACE_POINTS #include <trace/events/host1x.h> #undef CREATE_TRACE_POINTS +#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) +#include <asm/dma-iommu.h> +#endif + #include "bus.h" #include "channel.h" #include "debug.h" @@ -132,6 +140,12 @@ static const struct host1x_sid_entry tegra186_sid_table[] = { .offset = 0x30, .limit = 0x34 }, + { + /* NVDEC */ + .base = 0x1b00, + .offset = 0x30, + .limit = 0x34 + }, }; static const struct host1x_info host1x06_info = { @@ -156,6 +170,18 @@ static const struct host1x_sid_entry tegra194_sid_table[] = { .offset = 0x30, .limit = 0x34 }, + { + /* NVDEC */ + .base = 0x1b00, + .offset = 0x30, + .limit = 0x34 + }, + { + /* NVDEC1 */ + .base = 0x1bc0, + .offset = 0x30, + .limit = 0x34 + }, }; static const struct host1x_info host1x07_info = { @@ -190,6 +216,9 @@ static void host1x_setup_sid_table(struct host1x *host) const struct host1x_info *info = host->info; unsigned int i; + if (!info->has_hypervisor) + return; + for (i = 0; i < info->num_sid_entries; i++) { const struct host1x_sid_entry *entry = &info->sid_table[i]; @@ -238,6 +267,17 @@ static struct iommu_domain *host1x_iommu_attach(struct host1x *host) struct iommu_domain *domain = iommu_get_domain_for_dev(host->dev); int err; +#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) + if (host->dev->archdata.mapping) { + struct dma_iommu_mapping *mapping = + to_dma_iommu_mapping(host->dev); + arm_iommu_detach_device(host->dev); + arm_iommu_release_mapping(mapping); + + domain = iommu_get_domain_for_dev(host->dev); + } +#endif + /* * We may not always want to enable IOMMU support (for example if the * host1x firewall is already enabled and we don't support addressing @@ -347,6 +387,27 @@ static void host1x_iommu_exit(struct host1x *host) } } +static int host1x_get_resets(struct host1x *host) +{ + int err; + + host->resets[0].id = "mc"; + host->resets[1].id = "host1x"; + host->nresets = ARRAY_SIZE(host->resets); + + err = devm_reset_control_bulk_get_optional_exclusive_released( + host->dev, host->nresets, host->resets); + if (err) { + dev_err(host->dev, "failed to get reset: %d\n", err); + return err; + } + + if (WARN_ON(!host->resets[1].rstc)) + return -ENOENT; + + return 0; +} + static int host1x_probe(struct platform_device *pdev) { struct host1x *host; @@ -386,6 +447,7 @@ static int host1x_probe(struct platform_device *pdev) if (syncpt_irq < 0) return syncpt_irq; + host1x_bo_cache_init(&host->cache); mutex_init(&host->devices_lock); INIT_LIST_HEAD(&host->devices); INIT_LIST_HEAD(&host->list); @@ -423,12 +485,9 @@ static int host1x_probe(struct platform_device *pdev) return err; } - host->rst = devm_reset_control_get(&pdev->dev, "host1x"); - if (IS_ERR(host->rst)) { - err = PTR_ERR(host->rst); - dev_err(&pdev->dev, "failed to get reset: %d\n", err); + err = host1x_get_resets(host); + if (err) return err; - } err = host1x_iommu_init(host); if (err < 0) { @@ -443,22 +502,10 @@ static int host1x_probe(struct platform_device *pdev) goto iommu_exit; } - err = clk_prepare_enable(host->clk); - if (err < 0) { - dev_err(&pdev->dev, "failed to enable clock\n"); - goto free_channels; - } - - err = reset_control_deassert(host->rst); - if (err < 0) { - dev_err(&pdev->dev, "failed to deassert reset: %d\n", err); - goto unprepare_disable; - } - err = host1x_syncpt_init(host); if (err) { dev_err(&pdev->dev, "failed to initialize syncpts\n"); - goto reset_assert; + goto free_channels; } err = host1x_intr_init(host, syncpt_irq); @@ -467,10 +514,18 @@ static int host1x_probe(struct platform_device *pdev) goto deinit_syncpt; } - host1x_debug_init(host); + pm_runtime_enable(&pdev->dev); + + err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev); + if (err) + goto pm_disable; - if (host->info->has_hypervisor) - host1x_setup_sid_table(host); + /* the driver's code isn't ready yet for the dynamic RPM */ + err = pm_runtime_resume_and_get(&pdev->dev); + if (err) + goto pm_disable; + + host1x_debug_init(host); err = host1x_register(host); if (err < 0) @@ -486,13 +541,14 @@ unregister: host1x_unregister(host); deinit_debugfs: host1x_debug_deinit(host); + + pm_runtime_put_sync_suspend(&pdev->dev); +pm_disable: + pm_runtime_disable(&pdev->dev); + host1x_intr_deinit(host); deinit_syncpt: host1x_syncpt_deinit(host); -reset_assert: - reset_control_assert(host->rst); -unprepare_disable: - clk_disable_unprepare(host->clk); free_channels: host1x_channel_list_free(&host->channel_list); iommu_exit: @@ -507,19 +563,94 @@ static int host1x_remove(struct platform_device *pdev) host1x_unregister(host); host1x_debug_deinit(host); + + pm_runtime_force_suspend(&pdev->dev); + host1x_intr_deinit(host); host1x_syncpt_deinit(host); - reset_control_assert(host->rst); - clk_disable_unprepare(host->clk); host1x_iommu_exit(host); + host1x_bo_cache_destroy(&host->cache); return 0; } +static int __maybe_unused host1x_runtime_suspend(struct device *dev) +{ + struct host1x *host = dev_get_drvdata(dev); + int err; + + host1x_intr_stop(host); + host1x_syncpt_save(host); + + err = reset_control_bulk_assert(host->nresets, host->resets); + if (err) { + dev_err(dev, "failed to assert reset: %d\n", err); + goto resume_host1x; + } + + usleep_range(1000, 2000); + + clk_disable_unprepare(host->clk); + reset_control_bulk_release(host->nresets, host->resets); + + return 0; + +resume_host1x: + host1x_setup_sid_table(host); + host1x_syncpt_restore(host); + host1x_intr_start(host); + + return err; +} + +static int __maybe_unused host1x_runtime_resume(struct device *dev) +{ + struct host1x *host = dev_get_drvdata(dev); + int err; + + err = reset_control_bulk_acquire(host->nresets, host->resets); + if (err) { + dev_err(dev, "failed to acquire reset: %d\n", err); + return err; + } + + err = clk_prepare_enable(host->clk); + if (err) { + dev_err(dev, "failed to enable clock: %d\n", err); + goto release_reset; + } + + err = reset_control_bulk_deassert(host->nresets, host->resets); + if (err < 0) { + dev_err(dev, "failed to deassert reset: %d\n", err); + goto disable_clk; + } + + host1x_setup_sid_table(host); + host1x_syncpt_restore(host); + host1x_intr_start(host); + + return 0; + +disable_clk: + clk_disable_unprepare(host->clk); +release_reset: + reset_control_bulk_release(host->nresets, host->resets); + + return err; +} + +static const struct dev_pm_ops host1x_pm_ops = { + SET_RUNTIME_PM_OPS(host1x_runtime_suspend, host1x_runtime_resume, + NULL) + /* TODO: add system suspend-resume once driver will be ready for that */ +}; + static struct platform_driver tegra_host1x_driver = { .driver = { .name = "tegra-host1x", .of_match_table = host1x_of_match, + .pm = &host1x_pm_ops, }, .probe = host1x_probe, .remove = host1x_remove, diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h index fa6d4bc46e98..ca4b082f0cd4 100644 --- a/drivers/gpu/host1x/dev.h +++ b/drivers/gpu/host1x/dev.h @@ -118,7 +118,8 @@ struct host1x { struct host1x_syncpt_base *bases; struct device *dev; struct clk *clk; - struct reset_control *rst; + struct reset_control_bulk_data resets[2]; + unsigned int nresets; struct iommu_group *group; struct iommu_domain *domain; @@ -149,6 +150,8 @@ struct host1x { struct list_head list; struct device_dma_parameters dma_parms; + + struct host1x_bo_cache cache; }; void host1x_hypervisor_writel(struct host1x *host1x, u32 r, u32 v); diff --git a/drivers/gpu/host1x/hw/channel_hw.c b/drivers/gpu/host1x/hw/channel_hw.c index 1999780a7203..6b40e9af1e88 100644 --- a/drivers/gpu/host1x/hw/channel_hw.c +++ b/drivers/gpu/host1x/hw/channel_hw.c @@ -159,6 +159,27 @@ static void host1x_channel_set_streamid(struct host1x_channel *channel) #endif } +static void host1x_enable_gather_filter(struct host1x_channel *ch) +{ +#if HOST1X_HW >= 6 + struct host1x *host = dev_get_drvdata(ch->dev->parent); + u32 val; + + if (!host->hv_regs) + return; + + val = host1x_hypervisor_readl( + host, HOST1X_HV_CH_KERNEL_FILTER_GBUFFER(ch->id / 32)); + val |= BIT(ch->id % 32); + host1x_hypervisor_writel( + host, val, HOST1X_HV_CH_KERNEL_FILTER_GBUFFER(ch->id / 32)); +#elif HOST1X_HW >= 4 + host1x_ch_writel(ch, + HOST1X_CHANNEL_CHANNELCTRL_KERNEL_FILTER_GBUFFER(1), + HOST1X_CHANNEL_CHANNELCTRL); +#endif +} + static int channel_submit(struct host1x_job *job) { struct host1x_channel *ch = job->channel; @@ -190,6 +211,7 @@ static int channel_submit(struct host1x_job *job) } host1x_channel_set_streamid(ch); + host1x_enable_gather_filter(ch); /* begin a CDMA submit */ err = host1x_cdma_begin(&ch->cdma, job); @@ -249,27 +271,6 @@ error: return err; } -static void enable_gather_filter(struct host1x *host, - struct host1x_channel *ch) -{ -#if HOST1X_HW >= 6 - u32 val; - - if (!host->hv_regs) - return; - - val = host1x_hypervisor_readl( - host, HOST1X_HV_CH_KERNEL_FILTER_GBUFFER(ch->id / 32)); - val |= BIT(ch->id % 32); - host1x_hypervisor_writel( - host, val, HOST1X_HV_CH_KERNEL_FILTER_GBUFFER(ch->id / 32)); -#elif HOST1X_HW >= 4 - host1x_ch_writel(ch, - HOST1X_CHANNEL_CHANNELCTRL_KERNEL_FILTER_GBUFFER(1), - HOST1X_CHANNEL_CHANNELCTRL); -#endif -} - static int host1x_channel_init(struct host1x_channel *ch, struct host1x *dev, unsigned int index) { @@ -278,7 +279,6 @@ static int host1x_channel_init(struct host1x_channel *ch, struct host1x *dev, #else ch->regs = dev->regs + index * 0x100; #endif - enable_gather_filter(dev, ch); return 0; } diff --git a/drivers/gpu/host1x/intr.c b/drivers/gpu/host1x/intr.c index 45b6be927ec4..965ba21818b1 100644 --- a/drivers/gpu/host1x/intr.c +++ b/drivers/gpu/host1x/intr.c @@ -297,14 +297,11 @@ int host1x_intr_init(struct host1x *host, unsigned int irq_sync) "host1x_sp_%02u", id); } - host1x_intr_start(host); - return 0; } void host1x_intr_deinit(struct host1x *host) { - host1x_intr_stop(host); } void host1x_intr_start(struct host1x *host) diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c index 0eef6df7c89e..5e8c183167b7 100644 --- a/drivers/gpu/host1x/job.c +++ b/drivers/gpu/host1x/job.c @@ -134,20 +134,20 @@ EXPORT_SYMBOL(host1x_job_add_wait); static unsigned int pin_job(struct host1x *host, struct host1x_job *job) { + unsigned long mask = HOST1X_RELOC_READ | HOST1X_RELOC_WRITE; struct host1x_client *client = job->client; struct device *dev = client->dev; struct host1x_job_gather *g; - struct iommu_domain *domain; - struct sg_table *sgt; unsigned int i; int err; - domain = iommu_get_domain_for_dev(dev); job->num_unpins = 0; for (i = 0; i < job->num_relocs; i++) { struct host1x_reloc *reloc = &job->relocs[i]; - dma_addr_t phys_addr, *phys; + enum dma_data_direction direction; + struct host1x_bo_mapping *map; + struct host1x_bo *bo; reloc->target.bo = host1x_bo_get(reloc->target.bo); if (!reloc->target.bo) { @@ -155,64 +155,44 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job) goto unpin; } - /* - * If the client device is not attached to an IOMMU, the - * physical address of the buffer object can be used. - * - * Similarly, when an IOMMU domain is shared between all - * host1x clients, the IOVA is already available, so no - * need to map the buffer object again. - * - * XXX Note that this isn't always safe to do because it - * relies on an assumption that no cache maintenance is - * needed on the buffer objects. - */ - if (!domain || client->group) - phys = &phys_addr; - else - phys = NULL; - - sgt = host1x_bo_pin(dev, reloc->target.bo, phys); - if (IS_ERR(sgt)) { - err = PTR_ERR(sgt); - goto unpin; - } + bo = reloc->target.bo; - if (sgt) { - unsigned long mask = HOST1X_RELOC_READ | - HOST1X_RELOC_WRITE; - enum dma_data_direction dir; - - switch (reloc->flags & mask) { - case HOST1X_RELOC_READ: - dir = DMA_TO_DEVICE; - break; + switch (reloc->flags & mask) { + case HOST1X_RELOC_READ: + direction = DMA_TO_DEVICE; + break; - case HOST1X_RELOC_WRITE: - dir = DMA_FROM_DEVICE; - break; + case HOST1X_RELOC_WRITE: + direction = DMA_FROM_DEVICE; + break; - case HOST1X_RELOC_READ | HOST1X_RELOC_WRITE: - dir = DMA_BIDIRECTIONAL; - break; + case HOST1X_RELOC_READ | HOST1X_RELOC_WRITE: + direction = DMA_BIDIRECTIONAL; + break; - default: - err = -EINVAL; - goto unpin; - } + default: + err = -EINVAL; + goto unpin; + } - err = dma_map_sgtable(dev, sgt, dir, 0); - if (err) - goto unpin; + map = host1x_bo_pin(dev, bo, direction, &client->cache); + if (IS_ERR(map)) { + err = PTR_ERR(map); + goto unpin; + } - job->unpins[job->num_unpins].dev = dev; - job->unpins[job->num_unpins].dir = dir; - phys_addr = sg_dma_address(sgt->sgl); + /* + * host1x clients are generally not able to do scatter-gather themselves, so fail + * if the buffer is discontiguous and we fail to map its SG table to a single + * contiguous chunk of I/O virtual memory. + */ + if (map->chunks > 1) { + err = -EINVAL; + goto unpin; } - job->addr_phys[job->num_unpins] = phys_addr; - job->unpins[job->num_unpins].bo = reloc->target.bo; - job->unpins[job->num_unpins].sgt = sgt; + job->addr_phys[job->num_unpins] = map->phys; + job->unpins[job->num_unpins].map = map; job->num_unpins++; } @@ -224,12 +204,11 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job) return 0; for (i = 0; i < job->num_cmds; i++) { + struct host1x_bo_mapping *map; size_t gather_size = 0; struct scatterlist *sg; - dma_addr_t phys_addr; unsigned long shift; struct iova *alloc; - dma_addr_t *phys; unsigned int j; if (job->cmds[i].is_wait) @@ -243,25 +222,16 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job) goto unpin; } - /** - * If the host1x is not attached to an IOMMU, there is no need - * to map the buffer object for the host1x, since the physical - * address can simply be used. - */ - if (!iommu_get_domain_for_dev(host->dev)) - phys = &phys_addr; - else - phys = NULL; - - sgt = host1x_bo_pin(host->dev, g->bo, phys); - if (IS_ERR(sgt)) { - err = PTR_ERR(sgt); - goto put; + map = host1x_bo_pin(host->dev, g->bo, DMA_TO_DEVICE, &host->cache); + if (IS_ERR(map)) { + err = PTR_ERR(map); + goto unpin; } if (host->domain) { - for_each_sgtable_sg(sgt, sg, j) + for_each_sgtable_sg(map->sgt, sg, j) gather_size += sg->length; + gather_size = iova_align(&host->iova, gather_size); shift = iova_shift(&host->iova); @@ -272,33 +242,23 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job) goto put; } - err = iommu_map_sgtable(host->domain, - iova_dma_addr(&host->iova, alloc), - sgt, IOMMU_READ); + err = iommu_map_sgtable(host->domain, iova_dma_addr(&host->iova, alloc), + map->sgt, IOMMU_READ); if (err == 0) { __free_iova(&host->iova, alloc); err = -EINVAL; goto put; } - job->unpins[job->num_unpins].size = gather_size; - phys_addr = iova_dma_addr(&host->iova, alloc); - } else if (sgt) { - err = dma_map_sgtable(host->dev, sgt, DMA_TO_DEVICE, 0); - if (err) - goto put; - - job->unpins[job->num_unpins].dir = DMA_TO_DEVICE; - job->unpins[job->num_unpins].dev = host->dev; - phys_addr = sg_dma_address(sgt->sgl); + map->phys = iova_dma_addr(&host->iova, alloc); + map->size = gather_size; } - job->addr_phys[job->num_unpins] = phys_addr; - job->gather_addr_phys[i] = phys_addr; - - job->unpins[job->num_unpins].bo = g->bo; - job->unpins[job->num_unpins].sgt = sgt; + job->addr_phys[job->num_unpins] = map->phys; + job->unpins[job->num_unpins].map = map; job->num_unpins++; + + job->gather_addr_phys[i] = map->phys; } return 0; @@ -690,22 +650,16 @@ void host1x_job_unpin(struct host1x_job *job) unsigned int i; for (i = 0; i < job->num_unpins; i++) { - struct host1x_job_unpin_data *unpin = &job->unpins[i]; - struct device *dev = unpin->dev ?: host->dev; - struct sg_table *sgt = unpin->sgt; - - if (!job->enable_firewall && unpin->size && host->domain) { - iommu_unmap(host->domain, job->addr_phys[i], - unpin->size); - free_iova(&host->iova, - iova_pfn(&host->iova, job->addr_phys[i])); - } + struct host1x_bo_mapping *map = job->unpins[i].map; + struct host1x_bo *bo = map->bo; - if (unpin->dev && sgt) - dma_unmap_sgtable(unpin->dev, sgt, unpin->dir, 0); + if (!job->enable_firewall && map->size && host->domain) { + iommu_unmap(host->domain, job->addr_phys[i], map->size); + free_iova(&host->iova, iova_pfn(&host->iova, job->addr_phys[i])); + } - host1x_bo_unpin(dev, unpin->bo, sgt); - host1x_bo_put(unpin->bo); + host1x_bo_unpin(map); + host1x_bo_put(bo); } job->num_unpins = 0; diff --git a/drivers/gpu/host1x/job.h b/drivers/gpu/host1x/job.h index b4428c5495c9..dad5a1946693 100644 --- a/drivers/gpu/host1x/job.h +++ b/drivers/gpu/host1x/job.h @@ -35,11 +35,7 @@ struct host1x_job_cmd { }; struct host1x_job_unpin_data { - struct host1x_bo *bo; - struct sg_table *sgt; - struct device *dev; - size_t size; - enum dma_data_direction dir; + struct host1x_bo_mapping *map; }; /* diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c index d198a10848c6..e08e331e46ae 100644 --- a/drivers/gpu/host1x/syncpt.c +++ b/drivers/gpu/host1x/syncpt.c @@ -143,6 +143,8 @@ void host1x_syncpt_restore(struct host1x *host) for (i = 0; i < host1x_syncpt_nb_bases(host); i++) host1x_hw_syncpt_restore_wait_base(host, sp_base + i); + host1x_hw_syncpt_enable_protection(host); + wmb(); } @@ -366,9 +368,6 @@ int host1x_syncpt_init(struct host1x *host) host->syncpt = syncpt; host->bases = bases; - host1x_syncpt_restore(host); - host1x_hw_syncpt_enable_protection(host); - /* Allocate sync point to use for clearing waits for expired fences */ host->nop_sp = host1x_syncpt_alloc(host, 0, "reserved-nop"); if (!host->nop_sp) diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 9f5435b55949..a7c78ac96270 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -207,14 +207,14 @@ config HID_CHERRY config HID_CHICONY tristate "Chicony devices" - depends on HID + depends on USB_HID default !EXPERT help Support for Chicony Tactical pad and special keys on Chicony keyboards. config HID_CORSAIR tristate "Corsair devices" - depends on HID && USB && LEDS_CLASS + depends on USB_HID && LEDS_CLASS help Support for Corsair devices that are not fully compliant with the HID standard. @@ -245,7 +245,7 @@ config HID_MACALLY config HID_PRODIKEYS tristate "Prodikeys PC-MIDI Keyboard support" - depends on HID && SND + depends on USB_HID && SND select SND_RAWMIDI help Support for Prodikeys PC-MIDI Keyboard device support. @@ -560,7 +560,7 @@ config HID_LENOVO config HID_LOGITECH tristate "Logitech devices" - depends on HID + depends on USB_HID depends on LEDS_CLASS default !EXPERT help @@ -951,7 +951,7 @@ config HID_SAITEK config HID_SAMSUNG tristate "Samsung InfraRed remote control or keyboards" - depends on HID + depends on USB_HID help Support for Samsung InfraRed remote control or keyboards. diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c index 5d57214d8dee..08c9a9a60ae4 100644 --- a/drivers/hid/hid-asus.c +++ b/drivers/hid/hid-asus.c @@ -854,7 +854,7 @@ static int asus_input_mapping(struct hid_device *hdev, switch (usage->hid & HID_USAGE) { case 0x10: asus_map_key_clear(KEY_BRIGHTNESSDOWN); break; case 0x20: asus_map_key_clear(KEY_BRIGHTNESSUP); break; - case 0x35: asus_map_key_clear(KEY_SCREENLOCK); break; + case 0x35: asus_map_key_clear(KEY_DISPLAY_OFF); break; case 0x6c: asus_map_key_clear(KEY_SLEEP); break; case 0x7c: asus_map_key_clear(KEY_MICMUTE); break; case 0x82: asus_map_key_clear(KEY_CAMERA); break; @@ -1028,8 +1028,7 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id) if (drvdata->quirks & QUIRK_IS_MULTITOUCH) drvdata->tp = &asus_i2c_tp; - if ((drvdata->quirks & QUIRK_T100_KEYBOARD) && - hid_is_using_ll_driver(hdev, &usb_hid_driver)) { + if ((drvdata->quirks & QUIRK_T100_KEYBOARD) && hid_is_usb(hdev)) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); if (intf->altsetting->desc.bInterfaceNumber == T100_TPAD_INTF) { @@ -1057,8 +1056,7 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id) drvdata->tp = &asus_t100chi_tp; } - if ((drvdata->quirks & QUIRK_MEDION_E1239T) && - hid_is_using_ll_driver(hdev, &usb_hid_driver)) { + if ((drvdata->quirks & QUIRK_MEDION_E1239T) && hid_is_usb(hdev)) { struct usb_host_interface *alt = to_usb_interface(hdev->dev.parent)->altsetting; diff --git a/drivers/hid/hid-bigbenff.c b/drivers/hid/hid-bigbenff.c index db6da21ade06..74ad8bf98bfd 100644 --- a/drivers/hid/hid-bigbenff.c +++ b/drivers/hid/hid-bigbenff.c @@ -191,7 +191,7 @@ static void bigben_worker(struct work_struct *work) struct bigben_device, worker); struct hid_field *report_field = bigben->report->field[0]; - if (bigben->removed) + if (bigben->removed || !report_field) return; if (bigben->work_led) { diff --git a/drivers/hid/hid-chicony.c b/drivers/hid/hid-chicony.c index ca556d39da2a..f04d2aa23efe 100644 --- a/drivers/hid/hid-chicony.c +++ b/drivers/hid/hid-chicony.c @@ -114,6 +114,9 @@ static int ch_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; + if (!hid_is_usb(hdev)) + return -EINVAL; + hdev->quirks |= HID_QUIRK_INPUT_PER_APP; ret = hid_parse(hdev); if (ret) { diff --git a/drivers/hid/hid-corsair.c b/drivers/hid/hid-corsair.c index 902a60e249ed..8c895c820b67 100644 --- a/drivers/hid/hid-corsair.c +++ b/drivers/hid/hid-corsair.c @@ -553,7 +553,12 @@ static int corsair_probe(struct hid_device *dev, const struct hid_device_id *id) int ret; unsigned long quirks = id->driver_data; struct corsair_drvdata *drvdata; - struct usb_interface *usbif = to_usb_interface(dev->dev.parent); + struct usb_interface *usbif; + + if (!hid_is_usb(dev)) + return -EINVAL; + + usbif = to_usb_interface(dev->dev.parent); drvdata = devm_kzalloc(&dev->dev, sizeof(struct corsair_drvdata), GFP_KERNEL); diff --git a/drivers/hid/hid-elan.c b/drivers/hid/hid-elan.c index 021049805bb7..3091355d48df 100644 --- a/drivers/hid/hid-elan.c +++ b/drivers/hid/hid-elan.c @@ -50,7 +50,7 @@ struct elan_drvdata { static int is_not_elan_touchpad(struct hid_device *hdev) { - if (hdev->bus == BUS_USB) { + if (hid_is_usb(hdev)) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); return (intf->altsetting->desc.bInterfaceNumber != diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c index 383dfda8c12f..8e960d7b233b 100644 --- a/drivers/hid/hid-elo.c +++ b/drivers/hid/hid-elo.c @@ -230,6 +230,9 @@ static int elo_probe(struct hid_device *hdev, const struct hid_device_id *id) int ret; struct usb_device *udev; + if (!hid_is_usb(hdev)) + return -EINVAL; + priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; diff --git a/drivers/hid/hid-ft260.c b/drivers/hid/hid-ft260.c index 4ef1c3b8094e..79505c64dbfe 100644 --- a/drivers/hid/hid-ft260.c +++ b/drivers/hid/hid-ft260.c @@ -915,6 +915,9 @@ static int ft260_probe(struct hid_device *hdev, const struct hid_device_id *id) struct ft260_get_chip_version_report version; int ret; + if (!hid_is_usb(hdev)) + return -EINVAL; + dev = devm_kzalloc(&hdev->dev, sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; @@ -966,24 +969,23 @@ static int ft260_probe(struct hid_device *hdev, const struct hid_device_id *id) mutex_init(&dev->lock); init_completion(&dev->wait); + ret = ft260_xfer_status(dev); + if (ret) + ft260_i2c_reset(hdev); + + i2c_set_adapdata(&dev->adap, dev); ret = i2c_add_adapter(&dev->adap); if (ret) { hid_err(hdev, "failed to add i2c adapter\n"); goto err_hid_close; } - i2c_set_adapdata(&dev->adap, dev); - ret = sysfs_create_group(&hdev->dev.kobj, &ft260_attr_group); if (ret < 0) { hid_err(hdev, "failed to create sysfs attrs\n"); goto err_i2c_free; } - ret = ft260_xfer_status(dev); - if (ret) - ft260_i2c_reset(hdev); - return 0; err_i2c_free: diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c index 8123b871a3eb..0403beb3104b 100644 --- a/drivers/hid/hid-google-hammer.c +++ b/drivers/hid/hid-google-hammer.c @@ -586,6 +586,8 @@ static const struct hid_device_id hammer_devices[] = { { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_DON) }, { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, + USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_EEL) }, + { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_HAMMER) }, { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MAGNEMITE) }, diff --git a/drivers/hid/hid-holtek-kbd.c b/drivers/hid/hid-holtek-kbd.c index 0a38e8e9bc78..403506b9697e 100644 --- a/drivers/hid/hid-holtek-kbd.c +++ b/drivers/hid/hid-holtek-kbd.c @@ -140,12 +140,17 @@ static int holtek_kbd_input_event(struct input_dev *dev, unsigned int type, static int holtek_kbd_probe(struct hid_device *hdev, const struct hid_device_id *id) { - struct usb_interface *intf = to_usb_interface(hdev->dev.parent); - int ret = hid_parse(hdev); + struct usb_interface *intf; + int ret; + + if (!hid_is_usb(hdev)) + return -EINVAL; + ret = hid_parse(hdev); if (!ret) ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); + intf = to_usb_interface(hdev->dev.parent); if (!ret && intf->cur_altsetting->desc.bInterfaceNumber == 1) { struct hid_input *hidinput; list_for_each_entry(hidinput, &hdev->inputs, list) { diff --git a/drivers/hid/hid-holtek-mouse.c b/drivers/hid/hid-holtek-mouse.c index 195b735b001d..b7172c48ef9f 100644 --- a/drivers/hid/hid-holtek-mouse.c +++ b/drivers/hid/hid-holtek-mouse.c @@ -62,6 +62,14 @@ static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc, return rdesc; } +static int holtek_mouse_probe(struct hid_device *hdev, + const struct hid_device_id *id) +{ + if (!hid_is_usb(hdev)) + return -EINVAL; + return 0; +} + static const struct hid_device_id holtek_mouse_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) }, @@ -83,6 +91,7 @@ static struct hid_driver holtek_mouse_driver = { .name = "holtek_mouse", .id_table = holtek_mouse_devices, .report_fixup = holtek_mouse_report_fixup, + .probe = holtek_mouse_probe, }; module_hid_driver(holtek_mouse_driver); diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 95037a3e2e6e..19da07777d62 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -397,7 +397,9 @@ #define USB_DEVICE_ID_TOSHIBA_CLICK_L9W 0x0401 #define USB_DEVICE_ID_HP_X2 0x074d #define USB_DEVICE_ID_HP_X2_10_COVER 0x0755 +#define I2C_DEVICE_ID_HP_ENVY_X360_15 0x2d05 #define I2C_DEVICE_ID_HP_SPECTRE_X360_15 0x2817 +#define USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN 0x2544 #define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN 0x2706 #define I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN 0x261A @@ -500,6 +502,7 @@ #define USB_DEVICE_ID_GOOGLE_MAGNEMITE 0x503d #define USB_DEVICE_ID_GOOGLE_MOONBALL 0x5044 #define USB_DEVICE_ID_GOOGLE_DON 0x5050 +#define USB_DEVICE_ID_GOOGLE_EEL 0x5057 #define USB_VENDOR_ID_GOTOP 0x08f2 #define USB_DEVICE_ID_SUPER_Q2 0x007f @@ -885,6 +888,7 @@ #define USB_DEVICE_ID_MS_TOUCH_COVER_2 0x07a7 #define USB_DEVICE_ID_MS_TYPE_COVER_2 0x07a9 #define USB_DEVICE_ID_MS_POWER_COVER 0x07da +#define USB_DEVICE_ID_MS_SURFACE3_COVER 0x07de #define USB_DEVICE_ID_MS_XBOX_ONE_S_CONTROLLER 0x02fd #define USB_DEVICE_ID_MS_PIXART_MOUSE 0x00cb #define USB_DEVICE_ID_8BITDO_SN30_PRO_PLUS 0x02e0 diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 2c72ce4147b1..03f994541981 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -160,6 +160,7 @@ static int hidinput_setkeycode(struct input_dev *dev, if (usage) { *old_keycode = usage->type == EV_KEY ? usage->code : KEY_RESERVED; + usage->type = EV_KEY; usage->code = ke->keycode; clear_bit(*old_keycode, dev->keybit); @@ -324,6 +325,10 @@ static const struct hid_device_id hid_battery_quirks[] = { HID_BATTERY_QUIRK_IGNORE }, { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN), HID_BATTERY_QUIRK_IGNORE }, + { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN), + HID_BATTERY_QUIRK_IGNORE }, + { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_ENVY_X360_15), + HID_BATTERY_QUIRK_IGNORE }, { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_15), HID_BATTERY_QUIRK_IGNORE }, { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN), @@ -650,10 +655,9 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel code += KEY_MACRO1; else code += BTN_TRIGGER_HAPPY - 0x1e; - } else { - goto ignore; + break; } - break; + fallthrough; default: switch (field->physical) { case HID_GD_MOUSE: diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c index d40af911df63..fb3f7258009c 100644 --- a/drivers/hid/hid-lg.c +++ b/drivers/hid/hid-lg.c @@ -749,12 +749,18 @@ static int lg_raw_event(struct hid_device *hdev, struct hid_report *report, static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id) { - struct usb_interface *iface = to_usb_interface(hdev->dev.parent); - __u8 iface_num = iface->cur_altsetting->desc.bInterfaceNumber; + struct usb_interface *iface; + __u8 iface_num; unsigned int connect_mask = HID_CONNECT_DEFAULT; struct lg_drv_data *drv_data; int ret; + if (!hid_is_usb(hdev)) + return -EINVAL; + + iface = to_usb_interface(hdev->dev.parent); + iface_num = iface->cur_altsetting->desc.bInterfaceNumber; + /* G29 only work with the 1st interface */ if ((hdev->product == USB_DEVICE_ID_LOGITECH_G29_WHEEL) && (iface_num != 0)) { diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c index a0017b010c34..7106b921b53c 100644 --- a/drivers/hid/hid-logitech-dj.c +++ b/drivers/hid/hid-logitech-dj.c @@ -1777,7 +1777,7 @@ static int logi_dj_probe(struct hid_device *hdev, case recvr_type_bluetooth: no_dj_interfaces = 2; break; case recvr_type_dinovo: no_dj_interfaces = 2; break; } - if (hid_is_using_ll_driver(hdev, &usb_hid_driver)) { + if (hid_is_usb(hdev)) { intf = to_usb_interface(hdev->dev.parent); if (intf && intf->altsetting->desc.bInterfaceNumber >= no_dj_interfaces) { diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c index 686788ebf3e1..d7687ce70614 100644 --- a/drivers/hid/hid-magicmouse.c +++ b/drivers/hid/hid-magicmouse.c @@ -256,8 +256,11 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda unsigned long now = jiffies; int step_x = msc->touches[id].scroll_x - x; int step_y = msc->touches[id].scroll_y - y; - int step_hr = ((64 - (int)scroll_speed) * msc->scroll_accel) / - SCROLL_HR_STEPS; + int step_hr = + max_t(int, + ((64 - (int)scroll_speed) * msc->scroll_accel) / + SCROLL_HR_STEPS, + 1); int step_x_hr = msc->touches[id].scroll_x_hr - x; int step_y_hr = msc->touches[id].scroll_y_hr - y; diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index e1afddb7b33d..082376a6cb3d 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -1888,6 +1888,11 @@ static const struct hid_device_id mt_devices[] = { MT_USB_DEVICE(USB_VENDOR_ID_CVTOUCH, USB_DEVICE_ID_CVTOUCH_SCREEN) }, + /* eGalax devices (SAW) */ + { .driver_data = MT_CLS_EXPORT_ALL_INPUTS, + MT_USB_DEVICE(USB_VENDOR_ID_DWAV, + USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER) }, + /* eGalax devices (resistive) */ { .driver_data = MT_CLS_EGALAX, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, diff --git a/drivers/hid/hid-nintendo.c b/drivers/hid/hid-nintendo.c index a1e0f6849875..b6a9a0f3966e 100644 --- a/drivers/hid/hid-nintendo.c +++ b/drivers/hid/hid-nintendo.c @@ -189,6 +189,7 @@ struct joycon_rumble_amp_data { u16 amp; }; +#if IS_ENABLED(CONFIG_NINTENDO_FF) /* * These tables are from * https://github.com/dekuNukem/Nintendo_Switch_Reverse_Engineering/blob/master/rumble_data_table.md @@ -289,6 +290,10 @@ static const struct joycon_rumble_amp_data joycon_rumble_amplitudes[] = { { 0xc2, 0x8070, 940 }, { 0xc4, 0x0071, 960 }, { 0xc6, 0x8071, 981 }, { 0xc8, 0x0072, joycon_max_rumble_amp } }; +static const u16 JC_RUMBLE_DFLT_LOW_FREQ = 160; +static const u16 JC_RUMBLE_DFLT_HIGH_FREQ = 320; +#endif /* IS_ENABLED(CONFIG_NINTENDO_FF) */ +static const u16 JC_RUMBLE_PERIOD_MS = 50; /* States for controller state machine */ enum joycon_ctlr_state { @@ -397,9 +402,6 @@ struct joycon_input_report { #define JC_RUMBLE_DATA_SIZE 8 #define JC_RUMBLE_QUEUE_SIZE 8 -static const u16 JC_RUMBLE_DFLT_LOW_FREQ = 160; -static const u16 JC_RUMBLE_DFLT_HIGH_FREQ = 320; -static const u16 JC_RUMBLE_PERIOD_MS = 50; static const unsigned short JC_RUMBLE_ZERO_AMP_PKT_CNT = 5; static const char * const joycon_player_led_names[] = { @@ -1850,8 +1852,10 @@ static int joycon_leds_create(struct joycon_ctlr *ctlr) d_name, "green", joycon_player_led_names[i]); - if (!name) + if (!name) { + mutex_unlock(&joycon_input_num_mutex); return -ENOMEM; + } led = &ctlr->leds[i]; led->name = name; @@ -1864,6 +1868,7 @@ static int joycon_leds_create(struct joycon_ctlr *ctlr) ret = devm_led_classdev_register(&hdev->dev, led); if (ret) { hid_err(hdev, "Failed registering %s LED\n", led->name); + mutex_unlock(&joycon_input_num_mutex); return ret; } } diff --git a/drivers/hid/hid-prodikeys.c b/drivers/hid/hid-prodikeys.c index 2666af02d5c1..e4e9471d0f1e 100644 --- a/drivers/hid/hid-prodikeys.c +++ b/drivers/hid/hid-prodikeys.c @@ -798,12 +798,18 @@ static int pk_raw_event(struct hid_device *hdev, struct hid_report *report, static int pk_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; - struct usb_interface *intf = to_usb_interface(hdev->dev.parent); - unsigned short ifnum = intf->cur_altsetting->desc.bInterfaceNumber; + struct usb_interface *intf; + unsigned short ifnum; unsigned long quirks = id->driver_data; struct pk_device *pk; struct pcmidi_snd *pm = NULL; + if (!hid_is_usb(hdev)) + return -EINVAL; + + intf = to_usb_interface(hdev->dev.parent); + ifnum = intf->cur_altsetting->desc.bInterfaceNumber; + pk = kzalloc(sizeof(*pk), GFP_KERNEL); if (pk == NULL) { hid_err(hdev, "can't alloc descriptor\n"); diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index 06b7908c874c..ee7e504e7279 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c @@ -124,6 +124,7 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_MCS, USB_DEVICE_ID_MCS_GAMEPADBLOCK), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PIXART_MOUSE), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER), HID_QUIRK_NO_INIT_REPORTS }, + { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE3_COVER), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE_PRO_2), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2), HID_QUIRK_NO_INIT_REPORTS }, diff --git a/drivers/hid/hid-roccat-arvo.c b/drivers/hid/hid-roccat-arvo.c index 4556d2a50f75..d94ee0539421 100644 --- a/drivers/hid/hid-roccat-arvo.c +++ b/drivers/hid/hid-roccat-arvo.c @@ -344,6 +344,9 @@ static int arvo_probe(struct hid_device *hdev, { int retval; + if (!hid_is_usb(hdev)) + return -EINVAL; + retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-roccat-isku.c b/drivers/hid/hid-roccat-isku.c index ce5f22519956..e95d59cd8d07 100644 --- a/drivers/hid/hid-roccat-isku.c +++ b/drivers/hid/hid-roccat-isku.c @@ -324,6 +324,9 @@ static int isku_probe(struct hid_device *hdev, { int retval; + if (!hid_is_usb(hdev)) + return -EINVAL; + retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c index ea17abc7ad52..76da04801ca9 100644 --- a/drivers/hid/hid-roccat-kone.c +++ b/drivers/hid/hid-roccat-kone.c @@ -749,6 +749,9 @@ static int kone_probe(struct hid_device *hdev, const struct hid_device_id *id) { int retval; + if (!hid_is_usb(hdev)) + return -EINVAL; + retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-roccat-koneplus.c b/drivers/hid/hid-roccat-koneplus.c index 0316edf8c5bb..1896c69ea512 100644 --- a/drivers/hid/hid-roccat-koneplus.c +++ b/drivers/hid/hid-roccat-koneplus.c @@ -431,6 +431,9 @@ static int koneplus_probe(struct hid_device *hdev, { int retval; + if (!hid_is_usb(hdev)) + return -EINVAL; + retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-roccat-konepure.c b/drivers/hid/hid-roccat-konepure.c index 5248b3c7cf78..cf8eeb33a125 100644 --- a/drivers/hid/hid-roccat-konepure.c +++ b/drivers/hid/hid-roccat-konepure.c @@ -133,6 +133,9 @@ static int konepure_probe(struct hid_device *hdev, { int retval; + if (!hid_is_usb(hdev)) + return -EINVAL; + retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-roccat-kovaplus.c b/drivers/hid/hid-roccat-kovaplus.c index 960012881570..6fb9b9563769 100644 --- a/drivers/hid/hid-roccat-kovaplus.c +++ b/drivers/hid/hid-roccat-kovaplus.c @@ -501,6 +501,9 @@ static int kovaplus_probe(struct hid_device *hdev, { int retval; + if (!hid_is_usb(hdev)) + return -EINVAL; + retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-roccat-lua.c b/drivers/hid/hid-roccat-lua.c index 4a88a76d5c62..d5ddf0d68346 100644 --- a/drivers/hid/hid-roccat-lua.c +++ b/drivers/hid/hid-roccat-lua.c @@ -160,6 +160,9 @@ static int lua_probe(struct hid_device *hdev, { int retval; + if (!hid_is_usb(hdev)) + return -EINVAL; + retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-roccat-pyra.c b/drivers/hid/hid-roccat-pyra.c index 989927defe8d..4fcc8e7d276f 100644 --- a/drivers/hid/hid-roccat-pyra.c +++ b/drivers/hid/hid-roccat-pyra.c @@ -449,6 +449,9 @@ static int pyra_probe(struct hid_device *hdev, const struct hid_device_id *id) { int retval; + if (!hid_is_usb(hdev)) + return -EINVAL; + retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-roccat-ryos.c b/drivers/hid/hid-roccat-ryos.c index 3956a6c9c521..5bf1971a2b14 100644 --- a/drivers/hid/hid-roccat-ryos.c +++ b/drivers/hid/hid-roccat-ryos.c @@ -141,6 +141,9 @@ static int ryos_probe(struct hid_device *hdev, { int retval; + if (!hid_is_usb(hdev)) + return -EINVAL; + retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-roccat-savu.c b/drivers/hid/hid-roccat-savu.c index 818701f7a028..a784bb4ee651 100644 --- a/drivers/hid/hid-roccat-savu.c +++ b/drivers/hid/hid-roccat-savu.c @@ -113,6 +113,9 @@ static int savu_probe(struct hid_device *hdev, { int retval; + if (!hid_is_usb(hdev)) + return -EINVAL; + retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-samsung.c b/drivers/hid/hid-samsung.c index 2e1c31156eca..cf5992e97094 100644 --- a/drivers/hid/hid-samsung.c +++ b/drivers/hid/hid-samsung.c @@ -152,6 +152,9 @@ static int samsung_probe(struct hid_device *hdev, int ret; unsigned int cmask = HID_CONNECT_DEFAULT; + if (!hid_is_usb(hdev)) + return -EINVAL; + ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c index d1b107d547f5..60ec2b29d54d 100644 --- a/drivers/hid/hid-sony.c +++ b/drivers/hid/hid-sony.c @@ -3000,7 +3000,6 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id) sc->quirks = quirks; hid_set_drvdata(hdev, sc); sc->hdev = hdev; - usbdev = to_usb_device(sc->hdev->dev.parent->parent); ret = hid_parse(hdev); if (ret) { @@ -3038,14 +3037,23 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id) */ if (!(hdev->claimed & HID_CLAIMED_INPUT)) { hid_err(hdev, "failed to claim input\n"); - hid_hw_stop(hdev); - return -ENODEV; + ret = -ENODEV; + goto err; } if (sc->quirks & (GHL_GUITAR_PS3WIIU | GHL_GUITAR_PS4)) { + if (!hid_is_usb(hdev)) { + ret = -EINVAL; + goto err; + } + + usbdev = to_usb_device(sc->hdev->dev.parent->parent); + sc->ghl_urb = usb_alloc_urb(0, GFP_ATOMIC); - if (!sc->ghl_urb) - return -ENOMEM; + if (!sc->ghl_urb) { + ret = -ENOMEM; + goto err; + } if (sc->quirks & GHL_GUITAR_PS3WIIU) ret = ghl_init_urb(sc, usbdev, ghl_ps3wiiu_magic_data, @@ -3055,7 +3063,7 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id) ARRAY_SIZE(ghl_ps4_magic_data)); if (ret) { hid_err(hdev, "error preparing URB\n"); - return ret; + goto err; } timer_setup(&sc->ghl_poke_timer, ghl_magic_poke, 0); @@ -3064,6 +3072,10 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id) } return ret; + +err: + hid_hw_stop(hdev); + return ret; } static void sony_remove(struct hid_device *hdev) diff --git a/drivers/hid/hid-thrustmaster.c b/drivers/hid/hid-thrustmaster.c index d44550aa8805..03b935ff02d5 100644 --- a/drivers/hid/hid-thrustmaster.c +++ b/drivers/hid/hid-thrustmaster.c @@ -205,7 +205,7 @@ static void thrustmaster_model_handler(struct urb *urb) struct tm_wheel *tm_wheel = hid_get_drvdata(hdev); uint16_t model = 0; int i, ret; - const struct tm_wheel_info *twi = 0; + const struct tm_wheel_info *twi = NULL; if (urb->status) { hid_err(hdev, "URB to get model id failed with error %d\n", urb->status); @@ -238,7 +238,7 @@ static void thrustmaster_model_handler(struct urb *urb) tm_wheel->usb_dev, usb_sndctrlpipe(tm_wheel->usb_dev, 0), (char *)tm_wheel->change_request, - 0, 0, // We do not expect any response from the wheel + NULL, 0, // We do not expect any response from the wheel thrustmaster_change_handler, hdev ); @@ -272,7 +272,10 @@ static void thrustmaster_remove(struct hid_device *hdev) static int thrustmaster_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret = 0; - struct tm_wheel *tm_wheel = 0; + struct tm_wheel *tm_wheel = NULL; + + if (!hid_is_usb(hdev)) + return -EINVAL; ret = hid_parse(hdev); if (ret) { diff --git a/drivers/hid/hid-u2fzero.c b/drivers/hid/hid-u2fzero.c index 31ea7fc69916..ad489caf53ad 100644 --- a/drivers/hid/hid-u2fzero.c +++ b/drivers/hid/hid-u2fzero.c @@ -311,7 +311,7 @@ static int u2fzero_probe(struct hid_device *hdev, unsigned int minor; int ret; - if (!hid_is_using_ll_driver(hdev, &usb_hid_driver)) + if (!hid_is_usb(hdev)) return -EINVAL; dev = devm_kzalloc(&hdev->dev, sizeof(*dev), GFP_KERNEL); diff --git a/drivers/hid/hid-uclogic-core.c b/drivers/hid/hid-uclogic-core.c index 6a9865dd703c..d8ab0139e5cd 100644 --- a/drivers/hid/hid-uclogic-core.c +++ b/drivers/hid/hid-uclogic-core.c @@ -164,6 +164,9 @@ static int uclogic_probe(struct hid_device *hdev, struct uclogic_drvdata *drvdata = NULL; bool params_initialized = false; + if (!hid_is_usb(hdev)) + return -EINVAL; + /* * libinput requires the pad interface to be on a different node * than the pen, so use QUIRK_MULTI_INPUT for all tablets. diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c index 3d67b748a3b9..adff1bd68d9f 100644 --- a/drivers/hid/hid-uclogic-params.c +++ b/drivers/hid/hid-uclogic-params.c @@ -843,8 +843,7 @@ int uclogic_params_init(struct uclogic_params *params, struct uclogic_params p = {0, }; /* Check arguments */ - if (params == NULL || hdev == NULL || - !hid_is_using_ll_driver(hdev, &usb_hid_driver)) { + if (params == NULL || hdev == NULL || !hid_is_usb(hdev)) { rc = -EINVAL; goto cleanup; } diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c index 1c5039081db2..8e9d9450cb83 100644 --- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c +++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c @@ -266,7 +266,8 @@ static void __maybe_unused ish_resume_handler(struct work_struct *work) if (ish_should_leave_d0i3(pdev) && !dev->suspend_flag && IPC_IS_ISH_ILUP(fwsts)) { - disable_irq_wake(pdev->irq); + if (device_may_wakeup(&pdev->dev)) + disable_irq_wake(pdev->irq); ish_set_host_ready(dev); @@ -337,7 +338,8 @@ static int __maybe_unused ish_suspend(struct device *device) */ pci_save_state(pdev); - enable_irq_wake(pdev->irq); + if (device_may_wakeup(&pdev->dev)) + enable_irq_wake(pdev->irq); } } else { /* diff --git a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c index 1b486f262747..0e1183e96147 100644 --- a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c +++ b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c @@ -76,9 +76,12 @@ enum ish_loader_commands { #define LOADER_XFER_MODE_ISHTP BIT(1) /* ISH Transport Loader client unique GUID */ -static const guid_t loader_ishtp_guid = - GUID_INIT(0xc804d06a, 0x55bd, 0x4ea7, - 0xad, 0xed, 0x1e, 0x31, 0x22, 0x8c, 0x76, 0xdc); +static const struct ishtp_device_id loader_ishtp_id_table[] = { + { .guid = GUID_INIT(0xc804d06a, 0x55bd, 0x4ea7, + 0xad, 0xed, 0x1e, 0x31, 0x22, 0x8c, 0x76, 0xdc) }, + { } +}; +MODULE_DEVICE_TABLE(ishtp, loader_ishtp_id_table); #define FILENAME_SIZE 256 @@ -880,7 +883,7 @@ static int loader_init(struct ishtp_cl *loader_ishtp_cl, int reset) fw_client = ishtp_fw_cl_get_client(ishtp_get_ishtp_device(loader_ishtp_cl), - &loader_ishtp_guid); + &loader_ishtp_id_table[0].guid); if (!fw_client) { dev_err(cl_data_to_dev(client_data), "ISH client uuid not found\n"); @@ -1057,7 +1060,7 @@ static int loader_ishtp_cl_reset(struct ishtp_cl_device *cl_device) static struct ishtp_cl_driver loader_ishtp_cl_driver = { .name = "ish-loader", - .guid = &loader_ishtp_guid, + .id = loader_ishtp_id_table, .probe = loader_ishtp_cl_probe, .remove = loader_ishtp_cl_remove, .reset = loader_ishtp_cl_reset, @@ -1083,4 +1086,3 @@ MODULE_DESCRIPTION("ISH ISH-TP Host firmware Loader Client Driver"); MODULE_AUTHOR("Rushikesh S Kadam <rushikesh.s.kadam@intel.com>"); MODULE_LICENSE("GPL v2"); -MODULE_ALIAS("ishtp:*"); diff --git a/drivers/hid/intel-ish-hid/ishtp-hid-client.c b/drivers/hid/intel-ish-hid/ishtp-hid-client.c index 91bf4d01e91a..4338c9b68a43 100644 --- a/drivers/hid/intel-ish-hid/ishtp-hid-client.c +++ b/drivers/hid/intel-ish-hid/ishtp-hid-client.c @@ -12,9 +12,12 @@ #include "ishtp-hid.h" /* ISH Transport protocol (ISHTP in short) GUID */ -static const guid_t hid_ishtp_guid = - GUID_INIT(0x33AECD58, 0xB679, 0x4E54, - 0x9B, 0xD9, 0xA0, 0x4D, 0x34, 0xF0, 0xC2, 0x26); +static const struct ishtp_device_id hid_ishtp_id_table[] = { + { .guid = GUID_INIT(0x33AECD58, 0xB679, 0x4E54, + 0x9B, 0xD9, 0xA0, 0x4D, 0x34, 0xF0, 0xC2, 0x26), }, + { } +}; +MODULE_DEVICE_TABLE(ishtp, hid_ishtp_id_table); /* Rx ring buffer pool size */ #define HID_CL_RX_RING_SIZE 32 @@ -662,7 +665,7 @@ static int hid_ishtp_cl_init(struct ishtp_cl *hid_ishtp_cl, int reset) ishtp_set_tx_ring_size(hid_ishtp_cl, HID_CL_TX_RING_SIZE); ishtp_set_rx_ring_size(hid_ishtp_cl, HID_CL_RX_RING_SIZE); - fw_client = ishtp_fw_cl_get_client(dev, &hid_ishtp_guid); + fw_client = ishtp_fw_cl_get_client(dev, &hid_ishtp_id_table[0].guid); if (!fw_client) { dev_err(cl_data_to_dev(client_data), "ish client uuid not found\n"); @@ -945,7 +948,7 @@ static const struct dev_pm_ops hid_ishtp_pm_ops = { static struct ishtp_cl_driver hid_ishtp_cl_driver = { .name = "ish-hid", - .guid = &hid_ishtp_guid, + .id = hid_ishtp_id_table, .probe = hid_ishtp_cl_probe, .remove = hid_ishtp_cl_remove, .reset = hid_ishtp_cl_reset, @@ -981,4 +984,3 @@ MODULE_AUTHOR("Daniel Drubin <daniel.drubin@intel.com>"); MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("ishtp:*"); diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c index 334eac611774..f68aba8794fe 100644 --- a/drivers/hid/intel-ish-hid/ishtp/bus.c +++ b/drivers/hid/intel-ish-hid/ishtp/bus.c @@ -241,7 +241,7 @@ static int ishtp_cl_bus_match(struct device *dev, struct device_driver *drv) struct ishtp_cl_device *device = to_ishtp_cl_device(dev); struct ishtp_cl_driver *driver = to_ishtp_cl_driver(drv); - return guid_equal(driver->guid, + return guid_equal(&driver->id[0].guid, &device->fw_client->props.protocol_name); } @@ -350,7 +350,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *a, { int len; - len = snprintf(buf, PAGE_SIZE, "ishtp:%s\n", dev_name(dev)); + len = snprintf(buf, PAGE_SIZE, ISHTP_MODULE_PREFIX "%s\n", dev_name(dev)); return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len; } static DEVICE_ATTR_RO(modalias); @@ -363,7 +363,7 @@ ATTRIBUTE_GROUPS(ishtp_cl_dev); static int ishtp_cl_uevent(struct device *dev, struct kobj_uevent_env *env) { - if (add_uevent_var(env, "MODALIAS=ishtp:%s", dev_name(dev))) + if (add_uevent_var(env, "MODALIAS=" ISHTP_MODULE_PREFIX "%s", dev_name(dev))) return -ENOMEM; return 0; } diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c index 2717d39600b4..066c567dbaa2 100644 --- a/drivers/hid/wacom_sys.c +++ b/drivers/hid/wacom_sys.c @@ -726,7 +726,7 @@ static void wacom_retrieve_hid_descriptor(struct hid_device *hdev, * Skip the query for this type and modify defaults based on * interface number. */ - if (features->type == WIRELESS) { + if (features->type == WIRELESS && intf) { if (intf->cur_altsetting->desc.bInterfaceNumber == 0) features->device_type = WACOM_DEVICETYPE_WL_MONITOR; else @@ -2214,7 +2214,7 @@ static void wacom_update_name(struct wacom *wacom, const char *suffix) if ((features->type == HID_GENERIC) && !strcmp("Wacom HID", features->name)) { char *product_name = wacom->hdev->name; - if (hid_is_using_ll_driver(wacom->hdev, &usb_hid_driver)) { + if (hid_is_usb(wacom->hdev)) { struct usb_interface *intf = to_usb_interface(wacom->hdev->dev.parent); struct usb_device *dev = interface_to_usbdev(intf); product_name = dev->product; @@ -2451,6 +2451,9 @@ static void wacom_wireless_work(struct work_struct *work) wacom_destroy_battery(wacom); + if (!usbdev) + return; + /* Stylus interface */ hdev1 = usb_get_intfdata(usbdev->config->interface[1]); wacom1 = hid_get_drvdata(hdev1); @@ -2730,8 +2733,6 @@ static void wacom_mode_change_work(struct work_struct *work) static int wacom_probe(struct hid_device *hdev, const struct hid_device_id *id) { - struct usb_interface *intf = to_usb_interface(hdev->dev.parent); - struct usb_device *dev = interface_to_usbdev(intf); struct wacom *wacom; struct wacom_wac *wacom_wac; struct wacom_features *features; @@ -2766,8 +2767,14 @@ static int wacom_probe(struct hid_device *hdev, wacom_wac->hid_data.inputmode = -1; wacom_wac->mode_report = -1; - wacom->usbdev = dev; - wacom->intf = intf; + if (hid_is_usb(hdev)) { + struct usb_interface *intf = to_usb_interface(hdev->dev.parent); + struct usb_device *dev = interface_to_usbdev(intf); + + wacom->usbdev = dev; + wacom->intf = intf; + } + mutex_init(&wacom->lock); INIT_DELAYED_WORK(&wacom->init_work, wacom_init_work); INIT_WORK(&wacom->wireless_work, wacom_wireless_work); diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index 33a6908995b1..2a4cc39962e7 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -2603,6 +2603,9 @@ static void wacom_wac_finger_event(struct hid_device *hdev, return; switch (equivalent_usage) { + case HID_DG_CONFIDENCE: + wacom_wac->hid_data.confidence = value; + break; case HID_GD_X: wacom_wac->hid_data.x = value; break; @@ -2635,7 +2638,8 @@ static void wacom_wac_finger_event(struct hid_device *hdev, } if (usage->usage_index + 1 == field->report_count) { - if (equivalent_usage == wacom_wac->hid_data.last_slot_field) + if (equivalent_usage == wacom_wac->hid_data.last_slot_field && + wacom_wac->hid_data.confidence) wacom_wac_finger_slot(wacom_wac, wacom_wac->touch_input); } } @@ -2653,6 +2657,8 @@ static void wacom_wac_finger_pre_report(struct hid_device *hdev, wacom_wac->is_invalid_bt_frame = false; + hid_data->confidence = true; + for (i = 0; i < report->maxfield; i++) { struct hid_field *field = report->field[i]; int j; diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h index 8b2d4e5b2303..466b62cc16dc 100644 --- a/drivers/hid/wacom_wac.h +++ b/drivers/hid/wacom_wac.h @@ -301,6 +301,7 @@ struct hid_data { bool barrelswitch; bool barrelswitch2; bool serialhi; + bool confidence; int x; int y; int pressure; diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c index 7f11ea07d698..ca873a3b98db 100644 --- a/drivers/hv/hv_balloon.c +++ b/drivers/hv/hv_balloon.c @@ -480,7 +480,7 @@ module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR)); MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure"); static atomic_t trans_id = ATOMIC_INIT(0); -static int dm_ring_size = 20 * 1024; +static int dm_ring_size = VMBUS_RING_SIZE(16 * 1024); /* * Driver specific state. diff --git a/drivers/hwmon/corsair-psu.c b/drivers/hwmon/corsair-psu.c index 731d5117f9f1..14389fd7afb8 100644 --- a/drivers/hwmon/corsair-psu.c +++ b/drivers/hwmon/corsair-psu.c @@ -729,7 +729,7 @@ static int corsairpsu_probe(struct hid_device *hdev, const struct hid_device_id corsairpsu_check_cmd_support(priv); priv->hwmon_dev = hwmon_device_register_with_info(&hdev->dev, "corsairpsu", priv, - &corsairpsu_chip_info, 0); + &corsairpsu_chip_info, NULL); if (IS_ERR(priv->hwmon_dev)) { ret = PTR_ERR(priv->hwmon_dev); diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c index eaace478f508..5596c211f38d 100644 --- a/drivers/hwmon/dell-smm-hwmon.c +++ b/drivers/hwmon/dell-smm-hwmon.c @@ -627,10 +627,9 @@ static void __init i8k_init_procfs(struct device *dev) { struct dell_smm_data *data = dev_get_drvdata(dev); - /* Register the proc entry */ - proc_create_data("i8k", 0, NULL, &i8k_proc_ops, data); - - devm_add_action_or_reset(dev, i8k_exit_procfs, NULL); + /* Only register exit function if creation was successful */ + if (proc_create_data("i8k", 0, NULL, &i8k_proc_ops, data)) + devm_add_action_or_reset(dev, i8k_exit_procfs, NULL); } #else diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index 93dca471972e..57ce8633a725 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c @@ -1527,7 +1527,7 @@ static u16 nct6775_wmi_read_value(struct nct6775_data *data, u16 reg) nct6775_wmi_set_bank(data, reg); - err = nct6775_asuswmi_read(data->bank, reg, &tmp); + err = nct6775_asuswmi_read(data->bank, reg & 0xff, &tmp); if (err) return 0; diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c index 17518b4cab1b..f12b9a28a232 100644 --- a/drivers/hwmon/pwm-fan.c +++ b/drivers/hwmon/pwm-fan.c @@ -336,8 +336,6 @@ static int pwm_fan_probe(struct platform_device *pdev) return ret; } - ctx->pwm_value = MAX_PWM; - pwm_init_state(ctx->pwm, &ctx->pwm_state); /* diff --git a/drivers/hwmon/sht4x.c b/drivers/hwmon/sht4x.c index 09c2a0b06444..3415d7a0e0fc 100644 --- a/drivers/hwmon/sht4x.c +++ b/drivers/hwmon/sht4x.c @@ -23,7 +23,7 @@ /* * I2C command delays (in microseconds) */ -#define SHT4X_MEAS_DELAY 1000 +#define SHT4X_MEAS_DELAY_HPM 8200 /* see t_MEAS,h in datasheet */ #define SHT4X_DELAY_EXTRA 10000 /* @@ -90,7 +90,7 @@ static int sht4x_read_values(struct sht4x_data *data) if (ret < 0) goto unlock; - usleep_range(SHT4X_MEAS_DELAY, SHT4X_MEAS_DELAY + SHT4X_DELAY_EXTRA); + usleep_range(SHT4X_MEAS_DELAY_HPM, SHT4X_MEAS_DELAY_HPM + SHT4X_DELAY_EXTRA); ret = i2c_master_recv(client, raw_data, SHT4X_RESPONSE_LENGTH); if (ret != SHT4X_RESPONSE_LENGTH) { diff --git a/drivers/i2c/busses/i2c-cbus-gpio.c b/drivers/i2c/busses/i2c-cbus-gpio.c index 72df563477b1..f8639a4457d2 100644 --- a/drivers/i2c/busses/i2c-cbus-gpio.c +++ b/drivers/i2c/busses/i2c-cbus-gpio.c @@ -195,8 +195,9 @@ static u32 cbus_i2c_func(struct i2c_adapter *adapter) } static const struct i2c_algorithm cbus_i2c_algo = { - .smbus_xfer = cbus_i2c_smbus_xfer, - .functionality = cbus_i2c_func, + .smbus_xfer = cbus_i2c_smbus_xfer, + .smbus_xfer_atomic = cbus_i2c_smbus_xfer, + .functionality = cbus_i2c_func, }; static int cbus_i2c_remove(struct platform_device *pdev) diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 05187457f88a..41446f9cc52d 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -191,6 +191,7 @@ #define SMBSLVSTS_HST_NTFY_STS BIT(0) /* Host Notify Command register bits */ +#define SMBSLVCMD_SMBALERT_DISABLE BIT(2) #define SMBSLVCMD_HST_NTFY_INTREN BIT(0) #define STATUS_ERROR_FLAGS (SMBHSTSTS_FAILED | SMBHSTSTS_BUS_ERR | \ @@ -259,6 +260,7 @@ struct i801_priv { struct i2c_adapter adapter; unsigned long smba; unsigned char original_hstcfg; + unsigned char original_hstcnt; unsigned char original_slvcmd; struct pci_dev *pci_dev; unsigned int features; @@ -641,12 +643,20 @@ static irqreturn_t i801_isr(int irq, void *dev_id) i801_isr_byte_done(priv); /* - * Clear irq sources and report transaction result. + * Clear remaining IRQ sources: Completion of last command, errors + * and the SMB_ALERT signal. SMB_ALERT status is set after signal + * assertion independently of the interrupt generation being blocked + * or not so clear it always when the status is set. + */ + status &= SMBHSTSTS_INTR | STATUS_ERROR_FLAGS | SMBHSTSTS_SMBALERT_STS; + if (status) + outb_p(status, SMBHSTSTS(priv)); + status &= ~SMBHSTSTS_SMBALERT_STS; /* SMB_ALERT not reported */ + /* + * Report transaction result. * ->status must be cleared before the next transaction is started. */ - status &= SMBHSTSTS_INTR | STATUS_ERROR_FLAGS; if (status) { - outb_p(status, SMBHSTSTS(priv)); priv->status = status; complete(&priv->done); } @@ -974,9 +984,13 @@ static void i801_enable_host_notify(struct i2c_adapter *adapter) if (!(priv->features & FEATURE_HOST_NOTIFY)) return; - if (!(SMBSLVCMD_HST_NTFY_INTREN & priv->original_slvcmd)) - outb_p(SMBSLVCMD_HST_NTFY_INTREN | priv->original_slvcmd, - SMBSLVCMD(priv)); + /* + * Enable host notify interrupt and block the generation of interrupt + * from the SMB_ALERT signal because the driver does not support + * SMBus Alert. + */ + outb_p(SMBSLVCMD_HST_NTFY_INTREN | SMBSLVCMD_SMBALERT_DISABLE | + priv->original_slvcmd, SMBSLVCMD(priv)); /* clear Host Notify bit to allow a new notification */ outb_p(SMBSLVSTS_HST_NTFY_STS, SMBSLVSTS(priv)); @@ -1805,7 +1819,8 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) outb_p(inb_p(SMBAUXCTL(priv)) & ~(SMBAUXCTL_CRC | SMBAUXCTL_E32B), SMBAUXCTL(priv)); - /* Remember original Host Notify setting */ + /* Remember original Interrupt and Host Notify settings */ + priv->original_hstcnt = inb_p(SMBHSTCNT(priv)) & ~SMBHSTCNT_KILL; if (priv->features & FEATURE_HOST_NOTIFY) priv->original_slvcmd = inb_p(SMBSLVCMD(priv)); @@ -1869,6 +1884,7 @@ static void i801_remove(struct pci_dev *dev) { struct i801_priv *priv = pci_get_drvdata(dev); + outb_p(priv->original_hstcnt, SMBHSTCNT(priv)); i801_disable_host_notify(priv); i801_del_mux(priv); i2c_del_adapter(&priv->adapter); @@ -1892,6 +1908,7 @@ static void i801_shutdown(struct pci_dev *dev) struct i801_priv *priv = pci_get_drvdata(dev); /* Restore config registers to avoid hard hang on some systems */ + outb_p(priv->original_hstcnt, SMBHSTCNT(priv)); i801_disable_host_notify(priv); pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg); } @@ -1901,6 +1918,7 @@ static int i801_suspend(struct device *dev) { struct i801_priv *priv = dev_get_drvdata(dev); + outb_p(priv->original_hstcnt, SMBHSTCNT(priv)); pci_write_config_byte(priv->pci_dev, SMBHSTCFG, priv->original_hstcfg); return 0; } diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c index a6ea1eb1394e..53b8da6dbb23 100644 --- a/drivers/i2c/busses/i2c-mpc.c +++ b/drivers/i2c/busses/i2c-mpc.c @@ -636,7 +636,7 @@ static irqreturn_t mpc_i2c_isr(int irq, void *dev_id) status = readb(i2c->base + MPC_I2C_SR); if (status & CSR_MIF) { /* Wait up to 100us for transfer to properly complete */ - readb_poll_timeout(i2c->base + MPC_I2C_SR, status, !(status & CSR_MCF), 0, 100); + readb_poll_timeout_atomic(i2c->base + MPC_I2C_SR, status, status & CSR_MCF, 0, 100); writeb(0, i2c->base + MPC_I2C_SR); mpc_i2c_do_intr(i2c, status); return IRQ_HANDLED; diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c index 819ab4ee517e..02ddb237f69a 100644 --- a/drivers/i2c/busses/i2c-rk3x.c +++ b/drivers/i2c/busses/i2c-rk3x.c @@ -423,8 +423,8 @@ static void rk3x_i2c_handle_read(struct rk3x_i2c *i2c, unsigned int ipd) if (!(ipd & REG_INT_MBRF)) return; - /* ack interrupt */ - i2c_writel(i2c, REG_INT_MBRF, REG_IPD); + /* ack interrupt (read also produces a spurious START flag, clear it too) */ + i2c_writel(i2c, REG_INT_MBRF | REG_INT_START, REG_IPD); /* Can only handle a maximum of 32 bytes at a time */ if (len > 32) diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c index b9b19a2a2ffa..66145d2b9b55 100644 --- a/drivers/i2c/busses/i2c-stm32f7.c +++ b/drivers/i2c/busses/i2c-stm32f7.c @@ -1493,6 +1493,7 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data) { struct stm32f7_i2c_dev *i2c_dev = data; struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg; + struct stm32_i2c_dma *dma = i2c_dev->dma; void __iomem *base = i2c_dev->base; u32 status, mask; int ret = IRQ_HANDLED; @@ -1518,6 +1519,10 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data) dev_dbg(i2c_dev->dev, "<%s>: Receive NACK (addr %x)\n", __func__, f7_msg->addr); writel_relaxed(STM32F7_I2C_ICR_NACKCF, base + STM32F7_I2C_ICR); + if (i2c_dev->use_dma) { + stm32f7_i2c_disable_dma_req(i2c_dev); + dmaengine_terminate_async(dma->chan_using); + } f7_msg->result = -ENXIO; } @@ -1533,7 +1538,7 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data) /* Clear STOP flag */ writel_relaxed(STM32F7_I2C_ICR_STOPCF, base + STM32F7_I2C_ICR); - if (i2c_dev->use_dma) { + if (i2c_dev->use_dma && !f7_msg->result) { ret = IRQ_WAKE_THREAD; } else { i2c_dev->master_mode = false; @@ -1546,7 +1551,7 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data) if (f7_msg->stop) { mask = STM32F7_I2C_CR2_STOP; stm32f7_i2c_set_bits(base + STM32F7_I2C_CR2, mask); - } else if (i2c_dev->use_dma) { + } else if (i2c_dev->use_dma && !f7_msg->result) { ret = IRQ_WAKE_THREAD; } else if (f7_msg->smbus) { stm32f7_i2c_smbus_rep_start(i2c_dev); @@ -1583,7 +1588,7 @@ static irqreturn_t stm32f7_i2c_isr_event_thread(int irq, void *data) if (!ret) { dev_dbg(i2c_dev->dev, "<%s>: Timed out\n", __func__); stm32f7_i2c_disable_dma_req(i2c_dev); - dmaengine_terminate_all(dma->chan_using); + dmaengine_terminate_async(dma->chan_using); f7_msg->result = -ETIMEDOUT; } @@ -1660,7 +1665,7 @@ static irqreturn_t stm32f7_i2c_isr_error(int irq, void *data) /* Disable dma */ if (i2c_dev->use_dma) { stm32f7_i2c_disable_dma_req(i2c_dev); - dmaengine_terminate_all(dma->chan_using); + dmaengine_terminate_async(dma->chan_using); } i2c_dev->master_mode = false; @@ -1696,12 +1701,26 @@ static int stm32f7_i2c_xfer(struct i2c_adapter *i2c_adap, time_left = wait_for_completion_timeout(&i2c_dev->complete, i2c_dev->adap.timeout); ret = f7_msg->result; + if (ret) { + if (i2c_dev->use_dma) + dmaengine_synchronize(dma->chan_using); + + /* + * It is possible that some unsent data have already been + * written into TXDR. To avoid sending old data in a + * further transfer, flush TXDR in case of any error + */ + writel_relaxed(STM32F7_I2C_ISR_TXE, + i2c_dev->base + STM32F7_I2C_ISR); + goto pm_free; + } if (!time_left) { dev_dbg(i2c_dev->dev, "Access to slave 0x%x timed out\n", i2c_dev->msg->addr); if (i2c_dev->use_dma) - dmaengine_terminate_all(dma->chan_using); + dmaengine_terminate_sync(dma->chan_using); + stm32f7_i2c_wait_free_bus(i2c_dev); ret = -ETIMEDOUT; } @@ -1744,13 +1763,25 @@ static int stm32f7_i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr, timeout = wait_for_completion_timeout(&i2c_dev->complete, i2c_dev->adap.timeout); ret = f7_msg->result; - if (ret) + if (ret) { + if (i2c_dev->use_dma) + dmaengine_synchronize(dma->chan_using); + + /* + * It is possible that some unsent data have already been + * written into TXDR. To avoid sending old data in a + * further transfer, flush TXDR in case of any error + */ + writel_relaxed(STM32F7_I2C_ISR_TXE, + i2c_dev->base + STM32F7_I2C_ISR); goto pm_free; + } if (!timeout) { dev_dbg(dev, "Access to slave 0x%x timed out\n", f7_msg->addr); if (i2c_dev->use_dma) - dmaengine_terminate_all(dma->chan_using); + dmaengine_terminate_sync(dma->chan_using); + stm32f7_i2c_wait_free_bus(i2c_dev); ret = -ETIMEDOUT; goto pm_free; } diff --git a/drivers/i2c/busses/i2c-virtio.c b/drivers/i2c/busses/i2c-virtio.c index 1ed4daa918a0..41eb0dcc3204 100644 --- a/drivers/i2c/busses/i2c-virtio.c +++ b/drivers/i2c/busses/i2c-virtio.c @@ -22,24 +22,24 @@ /** * struct virtio_i2c - virtio I2C data * @vdev: virtio device for this controller - * @completion: completion of virtio I2C message * @adap: I2C adapter for this controller * @vq: the virtio virtqueue for communication */ struct virtio_i2c { struct virtio_device *vdev; - struct completion completion; struct i2c_adapter adap; struct virtqueue *vq; }; /** * struct virtio_i2c_req - the virtio I2C request structure + * @completion: completion of virtio I2C message * @out_hdr: the OUT header of the virtio I2C message * @buf: the buffer into which data is read, or from which it's written * @in_hdr: the IN header of the virtio I2C message */ struct virtio_i2c_req { + struct completion completion; struct virtio_i2c_out_hdr out_hdr ____cacheline_aligned; uint8_t *buf ____cacheline_aligned; struct virtio_i2c_in_hdr in_hdr ____cacheline_aligned; @@ -47,9 +47,11 @@ struct virtio_i2c_req { static void virtio_i2c_msg_done(struct virtqueue *vq) { - struct virtio_i2c *vi = vq->vdev->priv; + struct virtio_i2c_req *req; + unsigned int len; - complete(&vi->completion); + while ((req = virtqueue_get_buf(vq, &len))) + complete(&req->completion); } static int virtio_i2c_prepare_reqs(struct virtqueue *vq, @@ -62,6 +64,8 @@ static int virtio_i2c_prepare_reqs(struct virtqueue *vq, for (i = 0; i < num; i++) { int outcnt = 0, incnt = 0; + init_completion(&reqs[i].completion); + /* * Only 7-bit mode supported for this moment. For the address * format, Please check the Virtio I2C Specification. @@ -104,24 +108,17 @@ static int virtio_i2c_prepare_reqs(struct virtqueue *vq, static int virtio_i2c_complete_reqs(struct virtqueue *vq, struct virtio_i2c_req *reqs, - struct i2c_msg *msgs, int num, - bool timedout) + struct i2c_msg *msgs, int num) { - struct virtio_i2c_req *req; - bool failed = timedout; - unsigned int len; + bool failed = false; int i, j = 0; for (i = 0; i < num; i++) { - /* Detach the ith request from the vq */ - req = virtqueue_get_buf(vq, &len); + struct virtio_i2c_req *req = &reqs[i]; - /* - * Condition req == &reqs[i] should always meet since we have - * total num requests in the vq. reqs[i] can never be NULL here. - */ - if (!failed && (WARN_ON(req != &reqs[i]) || - req->in_hdr.status != VIRTIO_I2C_MSG_OK)) + wait_for_completion(&req->completion); + + if (!failed && req->in_hdr.status != VIRTIO_I2C_MSG_OK) failed = true; i2c_put_dma_safe_msg_buf(reqs[i].buf, &msgs[i], !failed); @@ -130,7 +127,7 @@ static int virtio_i2c_complete_reqs(struct virtqueue *vq, j++; } - return timedout ? -ETIMEDOUT : j; + return j; } static int virtio_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, @@ -139,7 +136,6 @@ static int virtio_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, struct virtio_i2c *vi = i2c_get_adapdata(adap); struct virtqueue *vq = vi->vq; struct virtio_i2c_req *reqs; - unsigned long time_left; int count; reqs = kcalloc(num, sizeof(*reqs), GFP_KERNEL); @@ -158,15 +154,9 @@ static int virtio_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, * remote here to clear the virtqueue, so we can try another set of * messages later on. */ - - reinit_completion(&vi->completion); virtqueue_kick(vq); - time_left = wait_for_completion_timeout(&vi->completion, adap->timeout); - if (!time_left) - dev_err(&adap->dev, "virtio i2c backend timeout.\n"); - - count = virtio_i2c_complete_reqs(vq, reqs, msgs, count, !time_left); + count = virtio_i2c_complete_reqs(vq, reqs, msgs, count); err_free: kfree(reqs); @@ -214,8 +204,6 @@ static int virtio_i2c_probe(struct virtio_device *vdev) vdev->priv = vi; vi->vdev = vdev; - init_completion(&vi->completion); - ret = virtio_i2c_setup_vqs(vi); if (ret) return ret; diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c index a51fdd3c9b5b..24c9387c2968 100644 --- a/drivers/iio/accel/kxcjk-1013.c +++ b/drivers/iio/accel/kxcjk-1013.c @@ -1595,8 +1595,7 @@ static int kxcjk1013_probe(struct i2c_client *client, return 0; err_buffer_cleanup: - if (data->dready_trig) - iio_triggered_buffer_cleanup(indio_dev); + iio_triggered_buffer_cleanup(indio_dev); err_trigger_unregister: if (data->dready_trig) iio_trigger_unregister(data->dready_trig); @@ -1618,8 +1617,8 @@ static int kxcjk1013_remove(struct i2c_client *client) pm_runtime_disable(&client->dev); pm_runtime_set_suspended(&client->dev); + iio_triggered_buffer_cleanup(indio_dev); if (data->dready_trig) { - iio_triggered_buffer_cleanup(indio_dev); iio_trigger_unregister(data->dready_trig); iio_trigger_unregister(data->motion_trig); } diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c index 2faf85ca996e..552eba5e8b4f 100644 --- a/drivers/iio/accel/kxsd9.c +++ b/drivers/iio/accel/kxsd9.c @@ -224,14 +224,14 @@ static irqreturn_t kxsd9_trigger_handler(int irq, void *p) hw_values.chan, sizeof(hw_values.chan)); if (ret) { - dev_err(st->dev, - "error reading data\n"); - return ret; + dev_err(st->dev, "error reading data: %d\n", ret); + goto out; } iio_push_to_buffers_with_timestamp(indio_dev, &hw_values, iio_get_time_ns(indio_dev)); +out: iio_trigger_notify_done(indio_dev->trig); return IRQ_HANDLED; diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c index 715b8138fb71..09c7f10fefb6 100644 --- a/drivers/iio/accel/mma8452.c +++ b/drivers/iio/accel/mma8452.c @@ -1470,7 +1470,7 @@ static int mma8452_trigger_setup(struct iio_dev *indio_dev) if (ret) return ret; - indio_dev->trig = trig; + indio_dev->trig = iio_trigger_get(trig); return 0; } diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig index 8bf5b62a73f4..3363af15a43f 100644 --- a/drivers/iio/adc/Kconfig +++ b/drivers/iio/adc/Kconfig @@ -532,7 +532,7 @@ config IMX7D_ADC config IMX8QXP_ADC tristate "NXP IMX8QXP ADC driver" - depends on ARCH_MXC_ARM64 || COMPILE_TEST + depends on ARCH_MXC || COMPILE_TEST depends on HAS_IOMEM help Say yes here to build support for IMX8QXP ADC. diff --git a/drivers/iio/adc/ad7768-1.c b/drivers/iio/adc/ad7768-1.c index 2c5c8a3672b2..aa42ba759fa1 100644 --- a/drivers/iio/adc/ad7768-1.c +++ b/drivers/iio/adc/ad7768-1.c @@ -480,8 +480,8 @@ static irqreturn_t ad7768_trigger_handler(int irq, void *p) iio_push_to_buffers_with_timestamp(indio_dev, &st->data.scan, iio_get_time_ns(indio_dev)); - iio_trigger_notify_done(indio_dev->trig); err_unlock: + iio_trigger_notify_done(indio_dev->trig); mutex_unlock(&st->lock); return IRQ_HANDLED; diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c index 4c922ef634f8..92a57cf10fba 100644 --- a/drivers/iio/adc/at91-sama5d2_adc.c +++ b/drivers/iio/adc/at91-sama5d2_adc.c @@ -1586,7 +1586,8 @@ static int at91_adc_read_info_raw(struct iio_dev *indio_dev, *val = st->conversion_value; ret = at91_adc_adjust_val_osr(st, val); if (chan->scan_type.sign == 's') - *val = sign_extend32(*val, 11); + *val = sign_extend32(*val, + chan->scan_type.realbits - 1); st->conversion_done = false; } diff --git a/drivers/iio/adc/axp20x_adc.c b/drivers/iio/adc/axp20x_adc.c index 3e0c0233b431..df99f1365c39 100644 --- a/drivers/iio/adc/axp20x_adc.c +++ b/drivers/iio/adc/axp20x_adc.c @@ -251,19 +251,8 @@ static int axp22x_adc_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val) { struct axp20x_adc_iio *info = iio_priv(indio_dev); - int size; - /* - * N.B.: Unlike the Chinese datasheets tell, the charging current is - * stored on 12 bits, not 13 bits. Only discharging current is on 13 - * bits. - */ - if (chan->type == IIO_CURRENT && chan->channel == AXP22X_BATT_DISCHRG_I) - size = 13; - else - size = 12; - - *val = axp20x_read_variable_width(info->regmap, chan->address, size); + *val = axp20x_read_variable_width(info->regmap, chan->address, 12); if (*val < 0) return *val; @@ -386,9 +375,8 @@ static int axp22x_adc_scale(struct iio_chan_spec const *chan, int *val, return IIO_VAL_INT_PLUS_MICRO; case IIO_CURRENT: - *val = 0; - *val2 = 500000; - return IIO_VAL_INT_PLUS_MICRO; + *val = 1; + return IIO_VAL_INT; case IIO_TEMP: *val = 100; diff --git a/drivers/iio/adc/dln2-adc.c b/drivers/iio/adc/dln2-adc.c index 16407664182c..97d162a3cba4 100644 --- a/drivers/iio/adc/dln2-adc.c +++ b/drivers/iio/adc/dln2-adc.c @@ -248,7 +248,6 @@ static int dln2_adc_set_chan_period(struct dln2_adc *dln2, static int dln2_adc_read(struct dln2_adc *dln2, unsigned int channel) { int ret, i; - struct iio_dev *indio_dev = platform_get_drvdata(dln2->pdev); u16 conflict; __le16 value; int olen = sizeof(value); @@ -257,13 +256,9 @@ static int dln2_adc_read(struct dln2_adc *dln2, unsigned int channel) .chan = channel, }; - ret = iio_device_claim_direct_mode(indio_dev); - if (ret < 0) - return ret; - ret = dln2_adc_set_chan_enabled(dln2, channel, true); if (ret < 0) - goto release_direct; + return ret; ret = dln2_adc_set_port_enabled(dln2, true, &conflict); if (ret < 0) { @@ -300,8 +295,6 @@ disable_port: dln2_adc_set_port_enabled(dln2, false, NULL); disable_chan: dln2_adc_set_chan_enabled(dln2, channel, false); -release_direct: - iio_device_release_direct_mode(indio_dev); return ret; } @@ -337,10 +330,16 @@ static int dln2_adc_read_raw(struct iio_dev *indio_dev, switch (mask) { case IIO_CHAN_INFO_RAW: + ret = iio_device_claim_direct_mode(indio_dev); + if (ret < 0) + return ret; + mutex_lock(&dln2->mutex); ret = dln2_adc_read(dln2, chan->channel); mutex_unlock(&dln2->mutex); + iio_device_release_direct_mode(indio_dev); + if (ret < 0) return ret; @@ -656,7 +655,11 @@ static int dln2_adc_probe(struct platform_device *pdev) return -ENOMEM; } iio_trigger_set_drvdata(dln2->trig, dln2); - devm_iio_trigger_register(dev, dln2->trig); + ret = devm_iio_trigger_register(dev, dln2->trig); + if (ret) { + dev_err(dev, "failed to register trigger: %d\n", ret); + return ret; + } iio_trigger_set_immutable(indio_dev, dln2->trig); ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL, diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c index 6245434f8377..8cd258cb2682 100644 --- a/drivers/iio/adc/stm32-adc.c +++ b/drivers/iio/adc/stm32-adc.c @@ -1117,6 +1117,7 @@ static void stm32h7_adc_unprepare(struct iio_dev *indio_dev) { struct stm32_adc *adc = iio_priv(indio_dev); + stm32_adc_writel(adc, STM32H7_ADC_PCSEL, 0); stm32h7_adc_disable(indio_dev); stm32_adc_int_ch_disable(adc); stm32h7_adc_enter_pwr_down(adc); @@ -1986,7 +1987,7 @@ static int stm32_adc_populate_int_ch(struct iio_dev *indio_dev, const char *ch_n /* Get calibration data for vrefint channel */ ret = nvmem_cell_read_u16(&indio_dev->dev, "vrefint", &vrefint); if (ret && ret != -ENOENT) { - return dev_err_probe(&indio_dev->dev, ret, + return dev_err_probe(indio_dev->dev.parent, ret, "nvmem access error\n"); } if (ret == -ENOENT) diff --git a/drivers/iio/gyro/adxrs290.c b/drivers/iio/gyro/adxrs290.c index 3e0734ddafe3..600e9725da78 100644 --- a/drivers/iio/gyro/adxrs290.c +++ b/drivers/iio/gyro/adxrs290.c @@ -7,6 +7,7 @@ */ #include <linux/bitfield.h> +#include <linux/bitops.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/kernel.h> @@ -124,7 +125,7 @@ static int adxrs290_get_rate_data(struct iio_dev *indio_dev, const u8 cmd, int * goto err_unlock; } - *val = temp; + *val = sign_extend32(temp, 15); err_unlock: mutex_unlock(&st->lock); @@ -146,7 +147,7 @@ static int adxrs290_get_temp_data(struct iio_dev *indio_dev, int *val) } /* extract lower 12 bits temperature reading */ - *val = temp & 0x0FFF; + *val = sign_extend32(temp, 11); err_unlock: mutex_unlock(&st->lock); diff --git a/drivers/iio/gyro/itg3200_buffer.c b/drivers/iio/gyro/itg3200_buffer.c index 04dd6a7969ea..4cfa0d439560 100644 --- a/drivers/iio/gyro/itg3200_buffer.c +++ b/drivers/iio/gyro/itg3200_buffer.c @@ -61,9 +61,9 @@ static irqreturn_t itg3200_trigger_handler(int irq, void *p) iio_push_to_buffers_with_timestamp(indio_dev, &scan, pf->timestamp); +error_ret: iio_trigger_notify_done(indio_dev->trig); -error_ret: return IRQ_HANDLED; } diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c index b23caa2f2aa1..93990ff1dfe3 100644 --- a/drivers/iio/industrialio-trigger.c +++ b/drivers/iio/industrialio-trigger.c @@ -556,7 +556,6 @@ struct iio_trigger *viio_trigger_alloc(struct device *parent, irq_modify_status(trig->subirq_base + i, IRQ_NOREQUEST | IRQ_NOAUTOEN, IRQ_NOPROBE); } - get_device(&trig->dev); return trig; diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c index 7e51aaac0bf8..b2983b1a9ed1 100644 --- a/drivers/iio/light/ltr501.c +++ b/drivers/iio/light/ltr501.c @@ -1275,7 +1275,7 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p) ret = regmap_bulk_read(data->regmap, LTR501_ALS_DATA1, als_buf, sizeof(als_buf)); if (ret < 0) - return ret; + goto done; if (test_bit(0, indio_dev->active_scan_mask)) scan.channels[j++] = le16_to_cpu(als_buf[1]); if (test_bit(1, indio_dev->active_scan_mask)) diff --git a/drivers/iio/light/stk3310.c b/drivers/iio/light/stk3310.c index 07e91846307c..fc63856ed54d 100644 --- a/drivers/iio/light/stk3310.c +++ b/drivers/iio/light/stk3310.c @@ -546,9 +546,8 @@ static irqreturn_t stk3310_irq_event_handler(int irq, void *private) mutex_lock(&data->lock); ret = regmap_field_read(data->reg_flag_nf, &dir); if (ret < 0) { - dev_err(&data->client->dev, "register read failed\n"); - mutex_unlock(&data->lock); - return ret; + dev_err(&data->client->dev, "register read failed: %d\n", ret); + goto out; } event = IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, 1, IIO_EV_TYPE_THRESH, @@ -560,6 +559,7 @@ static irqreturn_t stk3310_irq_event_handler(int irq, void *private) ret = regmap_field_write(data->reg_flag_psint, 0); if (ret < 0) dev_err(&data->client->dev, "failed to reset interrupts\n"); +out: mutex_unlock(&data->lock); return IRQ_HANDLED; diff --git a/drivers/iio/trigger/stm32-timer-trigger.c b/drivers/iio/trigger/stm32-timer-trigger.c index 33083877cd19..4353b749ecef 100644 --- a/drivers/iio/trigger/stm32-timer-trigger.c +++ b/drivers/iio/trigger/stm32-timer-trigger.c @@ -912,6 +912,6 @@ static struct platform_driver stm32_timer_trigger_driver = { }; module_platform_driver(stm32_timer_trigger_driver); -MODULE_ALIAS("platform: stm32-timer-trigger"); +MODULE_ALIAS("platform:stm32-timer-trigger"); MODULE_DESCRIPTION("STMicroelectronics STM32 Timer Trigger driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index fedc0fa6ebf9..f5aacaf7fb8e 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c @@ -1906,7 +1906,8 @@ static int nldev_stat_set_mode_doit(struct sk_buff *msg, int ret; /* Currently only counter for QP is supported */ - if (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP) + if (!tb[RDMA_NLDEV_ATTR_STAT_RES] || + nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP) return -EINVAL; mode = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_MODE]); diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 692d5ff657df..c18634bec212 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -1232,6 +1232,9 @@ static struct ib_qp *create_qp(struct ib_device *dev, struct ib_pd *pd, INIT_LIST_HEAD(&qp->rdma_mrs); INIT_LIST_HEAD(&qp->sig_mrs); + qp->send_cq = attr->send_cq; + qp->recv_cq = attr->recv_cq; + rdma_restrack_new(&qp->res, RDMA_RESTRACK_QP); WARN_ONCE(!udata && !caller, "Missing kernel QP owner"); rdma_restrack_set_name(&qp->res, udata ? NULL : caller); diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index ec37f4fd8e96..f1245c94ae26 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -8415,6 +8415,8 @@ static void receive_interrupt_common(struct hfi1_ctxtdata *rcd) */ static void __hfi1_rcd_eoi_intr(struct hfi1_ctxtdata *rcd) { + if (!rcd->rcvhdrq) + return; clear_recv_intr(rcd); if (check_packet_present(rcd)) force_recv_intr(rcd); diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c index 61f341c3005c..e2c634af40e9 100644 --- a/drivers/infiniband/hw/hfi1/driver.c +++ b/drivers/infiniband/hw/hfi1/driver.c @@ -1012,6 +1012,8 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread) struct hfi1_packet packet; int skip_pkt = 0; + if (!rcd->rcvhdrq) + return RCV_PKT_OK; /* Control context will always use the slow path interrupt handler */ needset = (rcd->ctxt == HFI1_CTRL_CTXT) ? 0 : 1; diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c index dbd1c31830b9..4436ed41547c 100644 --- a/drivers/infiniband/hw/hfi1/init.c +++ b/drivers/infiniband/hw/hfi1/init.c @@ -113,7 +113,6 @@ static int hfi1_create_kctxt(struct hfi1_devdata *dd, rcd->fast_handler = get_dma_rtail_setting(rcd) ? handle_receive_interrupt_dma_rtail : handle_receive_interrupt_nodma_rtail; - rcd->slow_handler = handle_receive_interrupt; hfi1_set_seq_cnt(rcd, 1); @@ -334,6 +333,8 @@ int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa, rcd->numa_id = numa; rcd->rcv_array_groups = dd->rcv_entries.ngroups; rcd->rhf_rcv_function_map = normal_rhf_rcv_functions; + rcd->slow_handler = handle_receive_interrupt; + rcd->do_interrupt = rcd->slow_handler; rcd->msix_intr = CCE_NUM_MSIX_VECTORS; mutex_init(&rcd->exp_mutex); @@ -874,18 +875,6 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit) if (ret) goto done; - /* allocate dummy tail memory for all receive contexts */ - dd->rcvhdrtail_dummy_kvaddr = dma_alloc_coherent(&dd->pcidev->dev, - sizeof(u64), - &dd->rcvhdrtail_dummy_dma, - GFP_KERNEL); - - if (!dd->rcvhdrtail_dummy_kvaddr) { - dd_dev_err(dd, "cannot allocate dummy tail memory\n"); - ret = -ENOMEM; - goto done; - } - /* dd->rcd can be NULL if early initialization failed */ for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i) { /* @@ -898,8 +887,6 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit) if (!rcd) continue; - rcd->do_interrupt = &handle_receive_interrupt; - lastfail = hfi1_create_rcvhdrq(dd, rcd); if (!lastfail) lastfail = hfi1_setup_eagerbufs(rcd); @@ -1120,7 +1107,7 @@ void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) rcd->egrbufs.rcvtids = NULL; for (e = 0; e < rcd->egrbufs.alloced; e++) { - if (rcd->egrbufs.buffers[e].dma) + if (rcd->egrbufs.buffers[e].addr) dma_free_coherent(&dd->pcidev->dev, rcd->egrbufs.buffers[e].len, rcd->egrbufs.buffers[e].addr, @@ -1201,6 +1188,11 @@ void hfi1_free_devdata(struct hfi1_devdata *dd) dd->tx_opstats = NULL; kfree(dd->comp_vect); dd->comp_vect = NULL; + if (dd->rcvhdrtail_dummy_kvaddr) + dma_free_coherent(&dd->pcidev->dev, sizeof(u64), + (void *)dd->rcvhdrtail_dummy_kvaddr, + dd->rcvhdrtail_dummy_dma); + dd->rcvhdrtail_dummy_kvaddr = NULL; sdma_clean(dd, dd->num_sdma); rvt_dealloc_device(&dd->verbs_dev.rdi); } @@ -1298,6 +1290,15 @@ static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, goto bail; } + /* allocate dummy tail memory for all receive contexts */ + dd->rcvhdrtail_dummy_kvaddr = + dma_alloc_coherent(&dd->pcidev->dev, sizeof(u64), + &dd->rcvhdrtail_dummy_dma, GFP_KERNEL); + if (!dd->rcvhdrtail_dummy_kvaddr) { + ret = -ENOMEM; + goto bail; + } + atomic_set(&dd->ipoib_rsm_usr_num, 0); return dd; @@ -1505,13 +1506,6 @@ static void cleanup_device_data(struct hfi1_devdata *dd) free_credit_return(dd); - if (dd->rcvhdrtail_dummy_kvaddr) { - dma_free_coherent(&dd->pcidev->dev, sizeof(u64), - (void *)dd->rcvhdrtail_dummy_kvaddr, - dd->rcvhdrtail_dummy_dma); - dd->rcvhdrtail_dummy_kvaddr = NULL; - } - /* * Free any resources still in use (usually just kernel contexts) * at unload; we do for ctxtcnt, because that's what we allocate. diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c index 2b6c24b7b586..f07d328689d3 100644 --- a/drivers/infiniband/hw/hfi1/sdma.c +++ b/drivers/infiniband/hw/hfi1/sdma.c @@ -838,8 +838,8 @@ struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd, if (current->nr_cpus_allowed != 1) goto out; - cpu_id = smp_processor_id(); rcu_read_lock(); + cpu_id = smp_processor_id(); rht_node = rhashtable_lookup(dd->sdma_rht, &cpu_id, sdma_rht_params); diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c index ed9fa0d84e9e..dc9211f3a009 100644 --- a/drivers/infiniband/hw/hfi1/verbs.c +++ b/drivers/infiniband/hw/hfi1/verbs.c @@ -1628,8 +1628,7 @@ static int init_cntr_names(const char *names_in, const size_t names_len, n++; names_out = - kmalloc((n + num_extra_names) * sizeof(struct rdma_stat_desc) + - names_len, + kzalloc((n + num_extra_names) * sizeof(*q) + names_len, GFP_KERNEL); if (!names_out) { *num_cntrs = 0; @@ -1637,7 +1636,7 @@ static int init_cntr_names(const char *names_in, const size_t names_len, return -ENOMEM; } - p = names_out + (n + num_extra_names) * sizeof(struct rdma_stat_desc); + p = names_out + (n + num_extra_names) * sizeof(*q); memcpy(p, names_in, names_len); q = (struct rdma_stat_desc *)names_out; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 9bfbaddd1763..bbfa1332dedc 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -33,6 +33,7 @@ #include <linux/acpi.h> #include <linux/etherdevice.h> #include <linux/interrupt.h> +#include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/types.h> #include <net/addrconf.h> @@ -1050,9 +1051,14 @@ static u32 hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev, unsigned long instance_stage, unsigned long reset_stage) { +#define HW_RESET_TIMEOUT_US 1000000 +#define HW_RESET_SLEEP_US 1000 + struct hns_roce_v2_priv *priv = hr_dev->priv; struct hnae3_handle *handle = priv->handle; const struct hnae3_ae_ops *ops = handle->ae_algo->ops; + unsigned long val; + int ret; /* When hardware reset is detected, we should stop sending mailbox&cmq& * doorbell to hardware. If now in .init_instance() function, we should @@ -1064,7 +1070,11 @@ static u32 hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev, * again. */ hr_dev->dis_db = true; - if (!ops->get_hw_reset_stat(handle)) + + ret = read_poll_timeout(ops->ae_dev_reset_cnt, val, + val > hr_dev->reset_cnt, HW_RESET_SLEEP_US, + HW_RESET_TIMEOUT_US, false, handle); + if (!ret) hr_dev->is_reset = true; if (!hr_dev->is_reset || reset_stage == HNS_ROCE_STATE_RST_INIT || @@ -6387,10 +6397,8 @@ static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle) if (!hr_dev) return 0; - hr_dev->is_reset = true; hr_dev->active = false; hr_dev->dis_db = true; - hr_dev->state = HNS_ROCE_DEVICE_STATE_RST_DOWN; return 0; diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c index 4108dcabece2..b4c657f5f2f9 100644 --- a/drivers/infiniband/hw/irdma/hw.c +++ b/drivers/infiniband/hw/irdma/hw.c @@ -60,6 +60,8 @@ static void irdma_iwarp_ce_handler(struct irdma_sc_cq *iwcq) { struct irdma_cq *cq = iwcq->back_cq; + if (!cq->user_mode) + cq->armed = false; if (cq->ibcq.comp_handler) cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); } @@ -146,6 +148,7 @@ static void irdma_set_flush_fields(struct irdma_sc_qp *qp, qp->flush_code = FLUSH_PROT_ERR; break; case IRDMA_AE_AMP_BAD_QP: + case IRDMA_AE_WQE_UNEXPECTED_OPCODE: qp->flush_code = FLUSH_LOC_QP_OP_ERR; break; case IRDMA_AE_AMP_BAD_STAG_KEY: @@ -156,7 +159,6 @@ static void irdma_set_flush_fields(struct irdma_sc_qp *qp, case IRDMA_AE_PRIV_OPERATION_DENIED: case IRDMA_AE_IB_INVALID_REQUEST: case IRDMA_AE_IB_REMOTE_ACCESS_ERROR: - case IRDMA_AE_IB_REMOTE_OP_ERROR: qp->flush_code = FLUSH_REM_ACCESS_ERR; qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; break; @@ -184,6 +186,9 @@ static void irdma_set_flush_fields(struct irdma_sc_qp *qp, case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS: qp->flush_code = FLUSH_MW_BIND_ERR; break; + case IRDMA_AE_IB_REMOTE_OP_ERROR: + qp->flush_code = FLUSH_REM_OP_ERR; + break; default: qp->flush_code = FLUSH_FATAL_ERR; break; diff --git a/drivers/infiniband/hw/irdma/main.h b/drivers/infiniband/hw/irdma/main.h index 91a497139ba3..cb218cab79ac 100644 --- a/drivers/infiniband/hw/irdma/main.h +++ b/drivers/infiniband/hw/irdma/main.h @@ -542,6 +542,7 @@ int irdma_ah_cqp_op(struct irdma_pci_f *rf, struct irdma_sc_ah *sc_ah, u8 cmd, void (*callback_fcn)(struct irdma_cqp_request *cqp_request), void *cb_param); void irdma_gsi_ud_qp_ah_cb(struct irdma_cqp_request *cqp_request); +bool irdma_cq_empty(struct irdma_cq *iwcq); int irdma_inetaddr_event(struct notifier_block *notifier, unsigned long event, void *ptr); int irdma_inet6addr_event(struct notifier_block *notifier, unsigned long event, diff --git a/drivers/infiniband/hw/irdma/pble.c b/drivers/infiniband/hw/irdma/pble.c index aeeb1c310965..fed49da770f3 100644 --- a/drivers/infiniband/hw/irdma/pble.c +++ b/drivers/infiniband/hw/irdma/pble.c @@ -25,8 +25,7 @@ void irdma_destroy_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc) list_del(&chunk->list); if (chunk->type == PBLE_SD_PAGED) irdma_pble_free_paged_mem(chunk); - if (chunk->bitmapbuf) - kfree(chunk->bitmapmem.va); + bitmap_free(chunk->bitmapbuf); kfree(chunk->chunkmem.va); } } @@ -283,7 +282,6 @@ add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc) "PBLE: next_fpm_addr = %llx chunk_size[%llu] = 0x%llx\n", pble_rsrc->next_fpm_addr, chunk->size, chunk->size); pble_rsrc->unallocated_pble -= (u32)(chunk->size >> 3); - list_add(&chunk->list, &pble_rsrc->pinfo.clist); sd_reg_val = (sd_entry_type == IRDMA_SD_TYPE_PAGED) ? sd_entry->u.pd_table.pd_page_addr.pa : sd_entry->u.bp.addr.pa; @@ -295,12 +293,12 @@ add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc) goto error; } + list_add(&chunk->list, &pble_rsrc->pinfo.clist); sd_entry->valid = true; return 0; error: - if (chunk->bitmapbuf) - kfree(chunk->bitmapmem.va); + bitmap_free(chunk->bitmapbuf); kfree(chunk->chunkmem.va); return ret_code; diff --git a/drivers/infiniband/hw/irdma/pble.h b/drivers/infiniband/hw/irdma/pble.h index e1b3b8118a2c..aa20827dcc9d 100644 --- a/drivers/infiniband/hw/irdma/pble.h +++ b/drivers/infiniband/hw/irdma/pble.h @@ -78,7 +78,6 @@ struct irdma_chunk { u32 pg_cnt; enum irdma_alloc_type type; struct irdma_sc_dev *dev; - struct irdma_virt_mem bitmapmem; struct irdma_virt_mem chunkmem; }; diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c index 8b42c43fc14f..398736d8c78a 100644 --- a/drivers/infiniband/hw/irdma/utils.c +++ b/drivers/infiniband/hw/irdma/utils.c @@ -2239,15 +2239,10 @@ enum irdma_status_code irdma_prm_add_pble_mem(struct irdma_pble_prm *pprm, sizeofbitmap = (u64)pchunk->size >> pprm->pble_shift; - pchunk->bitmapmem.size = sizeofbitmap >> 3; - pchunk->bitmapmem.va = kzalloc(pchunk->bitmapmem.size, GFP_KERNEL); - - if (!pchunk->bitmapmem.va) + pchunk->bitmapbuf = bitmap_zalloc(sizeofbitmap, GFP_KERNEL); + if (!pchunk->bitmapbuf) return IRDMA_ERR_NO_MEMORY; - pchunk->bitmapbuf = pchunk->bitmapmem.va; - bitmap_zero(pchunk->bitmapbuf, sizeofbitmap); - pchunk->sizeofbitmap = sizeofbitmap; /* each pble is 8 bytes hence shift by 3 */ pprm->total_pble_alloc += pchunk->size >> 3; @@ -2491,3 +2486,18 @@ void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event) ibevent.element.qp = &iwqp->ibqp; iwqp->ibqp.event_handler(&ibevent, iwqp->ibqp.qp_context); } + +bool irdma_cq_empty(struct irdma_cq *iwcq) +{ + struct irdma_cq_uk *ukcq; + u64 qword3; + __le64 *cqe; + u8 polarity; + + ukcq = &iwcq->sc_cq.cq_uk; + cqe = IRDMA_GET_CURRENT_CQ_ELEM(ukcq); + get_64bit_val(cqe, 24, &qword3); + polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3); + + return polarity != ukcq->polarity; +} diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c index 0f66e809d418..8cd5f9261692 100644 --- a/drivers/infiniband/hw/irdma/verbs.c +++ b/drivers/infiniband/hw/irdma/verbs.c @@ -3584,18 +3584,31 @@ static int irdma_req_notify_cq(struct ib_cq *ibcq, struct irdma_cq *iwcq; struct irdma_cq_uk *ukcq; unsigned long flags; - enum irdma_cmpl_notify cq_notify = IRDMA_CQ_COMPL_EVENT; + enum irdma_cmpl_notify cq_notify; + bool promo_event = false; + int ret = 0; + cq_notify = notify_flags == IB_CQ_SOLICITED ? + IRDMA_CQ_COMPL_SOLICITED : IRDMA_CQ_COMPL_EVENT; iwcq = to_iwcq(ibcq); ukcq = &iwcq->sc_cq.cq_uk; - if (notify_flags == IB_CQ_SOLICITED) - cq_notify = IRDMA_CQ_COMPL_SOLICITED; spin_lock_irqsave(&iwcq->lock, flags); - irdma_uk_cq_request_notification(ukcq, cq_notify); + /* Only promote to arm the CQ for any event if the last arm event was solicited. */ + if (iwcq->last_notify == IRDMA_CQ_COMPL_SOLICITED && notify_flags != IB_CQ_SOLICITED) + promo_event = true; + + if (!iwcq->armed || promo_event) { + iwcq->armed = true; + iwcq->last_notify = cq_notify; + irdma_uk_cq_request_notification(ukcq, cq_notify); + } + + if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) && !irdma_cq_empty(iwcq)) + ret = 1; spin_unlock_irqrestore(&iwcq->lock, flags); - return 0; + return ret; } static int irdma_roce_port_immutable(struct ib_device *ibdev, u32 port_num, diff --git a/drivers/infiniband/hw/irdma/verbs.h b/drivers/infiniband/hw/irdma/verbs.h index 5c244cd321a3..d0fdef8d09ea 100644 --- a/drivers/infiniband/hw/irdma/verbs.h +++ b/drivers/infiniband/hw/irdma/verbs.h @@ -110,6 +110,8 @@ struct irdma_cq { u16 cq_size; u16 cq_num; bool user_mode; + bool armed; + enum irdma_cmpl_notify last_notify; u32 polled_cmpls; u32 cq_mem_size; struct irdma_dma_mem kmem; diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index ceca05982f61..0d2fa3338784 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -2215,6 +2215,11 @@ static const struct ib_device_ops mlx4_ib_hw_stats_ops = { .get_hw_stats = mlx4_ib_get_hw_stats, }; +static const struct ib_device_ops mlx4_ib_hw_stats_ops1 = { + .alloc_hw_device_stats = mlx4_ib_alloc_hw_device_stats, + .get_hw_stats = mlx4_ib_get_hw_stats, +}; + static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev) { struct mlx4_ib_diag_counters *diag = ibdev->diag_counters; @@ -2227,9 +2232,16 @@ static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev) return 0; for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) { - /* i == 1 means we are building port counters */ - if (i && !per_port) - continue; + /* + * i == 1 means we are building port counters, set a different + * stats ops without port stats callback. + */ + if (i && !per_port) { + ib_set_device_ops(&ibdev->ib_dev, + &mlx4_ib_hw_stats_ops1); + + return 0; + } ret = __mlx4_ib_alloc_diag_counters(ibdev, &diag[i].descs, &diag[i].offset, diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index e636e954f6bf..4a7a56ed740b 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -664,7 +664,6 @@ struct mlx5_ib_mr { /* User MR data */ struct mlx5_cache_ent *cache_ent; - struct ib_umem *umem; /* This is zero'd when the MR is allocated */ union { @@ -676,7 +675,7 @@ struct mlx5_ib_mr { struct list_head list; }; - /* Used only by kernel MRs (umem == NULL) */ + /* Used only by kernel MRs */ struct { void *descs; void *descs_alloc; @@ -697,8 +696,9 @@ struct mlx5_ib_mr { int data_length; }; - /* Used only by User MRs (umem != NULL) */ + /* Used only by User MRs */ struct { + struct ib_umem *umem; unsigned int page_shift; /* Current access_flags */ int access_flags; diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 157d862fb864..63e2129f1142 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -1904,19 +1904,18 @@ err: return ret; } -static void -mlx5_free_priv_descs(struct mlx5_ib_mr *mr) +static void mlx5_free_priv_descs(struct mlx5_ib_mr *mr) { - if (!mr->umem && mr->descs) { - struct ib_device *device = mr->ibmr.device; - int size = mr->max_descs * mr->desc_size; - struct mlx5_ib_dev *dev = to_mdev(device); + struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); + int size = mr->max_descs * mr->desc_size; - dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size, - DMA_TO_DEVICE); - kfree(mr->descs_alloc); - mr->descs = NULL; - } + if (!mr->descs) + return; + + dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size, + DMA_TO_DEVICE); + kfree(mr->descs_alloc); + mr->descs = NULL; } int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) @@ -1992,7 +1991,8 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) if (mr->cache_ent) { mlx5_mr_cache_free(dev, mr); } else { - mlx5_free_priv_descs(mr); + if (!udata) + mlx5_free_priv_descs(mr); kfree(mr); } return 0; @@ -2079,7 +2079,6 @@ static struct mlx5_ib_mr *mlx5_ib_alloc_pi_mr(struct ib_pd *pd, if (err) goto err_free_in; - mr->umem = NULL; kfree(in); return mr; @@ -2206,7 +2205,6 @@ static struct ib_mr *__mlx5_ib_alloc_mr(struct ib_pd *pd, } mr->ibmr.device = pd->device; - mr->umem = NULL; switch (mr_type) { case IB_MR_TYPE_MEM_REG: diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c index 975321812c87..54b8711321c1 100644 --- a/drivers/infiniband/sw/rxe/rxe_qp.c +++ b/drivers/infiniband/sw/rxe/rxe_qp.c @@ -359,6 +359,7 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd, err2: rxe_queue_cleanup(qp->sq.queue); + qp->sq.queue = NULL; err1: qp->pd = NULL; qp->rcq = NULL; diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c index f7e459fe68be..76e4352fe3f6 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c +++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c @@ -19,7 +19,7 @@ void rtrs_clt_update_wc_stats(struct rtrs_clt_con *con) int cpu; cpu = raw_smp_processor_id(); - s = this_cpu_ptr(stats->pcpu_stats); + s = get_cpu_ptr(stats->pcpu_stats); if (con->cpu != cpu) { s->cpu_migr.to++; @@ -27,14 +27,16 @@ void rtrs_clt_update_wc_stats(struct rtrs_clt_con *con) s = per_cpu_ptr(stats->pcpu_stats, con->cpu); atomic_inc(&s->cpu_migr.from); } + put_cpu_ptr(stats->pcpu_stats); } void rtrs_clt_inc_failover_cnt(struct rtrs_clt_stats *stats) { struct rtrs_clt_stats_pcpu *s; - s = this_cpu_ptr(stats->pcpu_stats); + s = get_cpu_ptr(stats->pcpu_stats); s->rdma.failover_cnt++; + put_cpu_ptr(stats->pcpu_stats); } int rtrs_clt_stats_migration_from_cnt_to_str(struct rtrs_clt_stats *stats, char *buf) @@ -169,9 +171,10 @@ static inline void rtrs_clt_update_rdma_stats(struct rtrs_clt_stats *stats, { struct rtrs_clt_stats_pcpu *s; - s = this_cpu_ptr(stats->pcpu_stats); + s = get_cpu_ptr(stats->pcpu_stats); s->rdma.dir[d].cnt++; s->rdma.dir[d].size_total += size; + put_cpu_ptr(stats->pcpu_stats); } void rtrs_clt_update_all_stats(struct rtrs_clt_io_req *req, int dir) diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c index 4ff5cd2a6d8d..3d17a0b3fe51 100644 --- a/drivers/input/misc/xen-kbdfront.c +++ b/drivers/input/misc/xen-kbdfront.c @@ -542,6 +542,7 @@ static struct xenbus_driver xenkbd_driver = { .remove = xenkbd_remove, .resume = xenkbd_resume, .otherend_changed = xenkbd_backend_changed, + .not_essential = true, }; static int __init xenkbd_init(void) diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c index 13cbeb997cc1..58da08cc3d01 100644 --- a/drivers/iommu/amd/iommu_v2.c +++ b/drivers/iommu/amd/iommu_v2.c @@ -929,10 +929,8 @@ static int __init amd_iommu_v2_init(void) { int ret; - pr_info("AMD IOMMUv2 driver by Joerg Roedel <jroedel@suse.de>\n"); - if (!amd_iommu_v2_supported()) { - pr_info("AMD IOMMUv2 functionality not available on this system\n"); + pr_info("AMD IOMMUv2 functionality not available on this system - This is not a bug.\n"); /* * Load anyway to provide the symbols to other modules * which may use AMD IOMMUv2 optionally. @@ -947,6 +945,8 @@ static int __init amd_iommu_v2_init(void) amd_iommu_register_ppr_notifier(&ppr_nb); + pr_info("AMD IOMMUv2 loaded and initialized\n"); + return 0; out: diff --git a/drivers/iommu/intel/cap_audit.c b/drivers/iommu/intel/cap_audit.c index b39d223926a4..71596fc62822 100644 --- a/drivers/iommu/intel/cap_audit.c +++ b/drivers/iommu/intel/cap_audit.c @@ -144,6 +144,7 @@ static int cap_audit_static(struct intel_iommu *iommu, enum cap_audit_type type) { struct dmar_drhd_unit *d; struct intel_iommu *i; + int rc = 0; rcu_read_lock(); if (list_empty(&dmar_drhd_units)) @@ -169,11 +170,11 @@ static int cap_audit_static(struct intel_iommu *iommu, enum cap_audit_type type) */ if (intel_cap_smts_sanity() && !intel_cap_flts_sanity() && !intel_cap_slts_sanity()) - return -EOPNOTSUPP; + rc = -EOPNOTSUPP; out: rcu_read_unlock(); - return 0; + return rc; } int intel_cap_audit(enum cap_audit_type type, struct intel_iommu *iommu) diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 0bde0c8b4126..b6a8f3282411 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -1339,13 +1339,11 @@ static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level, pte = &pte[pfn_level_offset(pfn, level)]; do { - unsigned long level_pfn; + unsigned long level_pfn = pfn & level_mask(level); if (!dma_pte_present(pte)) goto next; - level_pfn = pfn & level_mask(level); - /* If range covers entire pagetable, free it */ if (start_pfn <= level_pfn && last_pfn >= level_pfn + level_size(level) - 1) { @@ -1366,7 +1364,7 @@ static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level, freelist); } next: - pfn += level_size(level); + pfn = level_pfn + level_size(level); } while (!first_pte_in_page(++pte) && pfn <= last_pfn); if (first_pte) diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index 5cb260820eda..7f23ad61c094 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -200,8 +200,8 @@ static inline phys_addr_t rk_dte_pt_address(u32 dte) #define DTE_HI_MASK2 GENMASK(7, 4) #define DTE_HI_SHIFT1 24 /* shift bit 8 to bit 32 */ #define DTE_HI_SHIFT2 32 /* shift bit 4 to bit 36 */ -#define PAGE_DESC_HI_MASK1 GENMASK_ULL(39, 36) -#define PAGE_DESC_HI_MASK2 GENMASK_ULL(35, 32) +#define PAGE_DESC_HI_MASK1 GENMASK_ULL(35, 32) +#define PAGE_DESC_HI_MASK2 GENMASK_ULL(39, 36) static inline phys_addr_t rk_dte_pt_address_v2(u32 dte) { diff --git a/drivers/irqchip/irq-apple-aic.c b/drivers/irqchip/irq-apple-aic.c index 3759dc36cc8f..2543ef65825b 100644 --- a/drivers/irqchip/irq-apple-aic.c +++ b/drivers/irqchip/irq-apple-aic.c @@ -707,7 +707,7 @@ static const struct irq_domain_ops aic_ipi_domain_ops = { .free = aic_ipi_free, }; -static int aic_init_smp(struct aic_irq_chip *irqc, struct device_node *node) +static int __init aic_init_smp(struct aic_irq_chip *irqc, struct device_node *node) { struct irq_domain *ipi_domain; int base_ipi; diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c index 80906bfec845..5b8d571c041d 100644 --- a/drivers/irqchip/irq-armada-370-xp.c +++ b/drivers/irqchip/irq-armada-370-xp.c @@ -232,16 +232,12 @@ static int armada_370_xp_msi_alloc(struct irq_domain *domain, unsigned int virq, int hwirq, i; mutex_lock(&msi_used_lock); + hwirq = bitmap_find_free_region(msi_used, PCI_MSI_DOORBELL_NR, + order_base_2(nr_irqs)); + mutex_unlock(&msi_used_lock); - hwirq = bitmap_find_next_zero_area(msi_used, PCI_MSI_DOORBELL_NR, - 0, nr_irqs, 0); - if (hwirq >= PCI_MSI_DOORBELL_NR) { - mutex_unlock(&msi_used_lock); + if (hwirq < 0) return -ENOSPC; - } - - bitmap_set(msi_used, hwirq, nr_irqs); - mutex_unlock(&msi_used_lock); for (i = 0; i < nr_irqs; i++) { irq_domain_set_info(domain, virq + i, hwirq + i, @@ -250,7 +246,7 @@ static int armada_370_xp_msi_alloc(struct irq_domain *domain, unsigned int virq, NULL, NULL); } - return hwirq; + return 0; } static void armada_370_xp_msi_free(struct irq_domain *domain, @@ -259,7 +255,7 @@ static void armada_370_xp_msi_free(struct irq_domain *domain, struct irq_data *d = irq_domain_get_irq_data(domain, virq); mutex_lock(&msi_used_lock); - bitmap_clear(msi_used, d->hwirq, nr_irqs); + bitmap_release_region(msi_used, d->hwirq, order_base_2(nr_irqs)); mutex_unlock(&msi_used_lock); } diff --git a/drivers/irqchip/irq-aspeed-scu-ic.c b/drivers/irqchip/irq-aspeed-scu-ic.c index f3c6855a4cef..18b77c3e6db4 100644 --- a/drivers/irqchip/irq-aspeed-scu-ic.c +++ b/drivers/irqchip/irq-aspeed-scu-ic.c @@ -76,8 +76,8 @@ static void aspeed_scu_ic_irq_handler(struct irq_desc *desc) generic_handle_domain_irq(scu_ic->irq_domain, bit - scu_ic->irq_shift); - regmap_update_bits(scu_ic->scu, scu_ic->reg, mask, - BIT(bit + ASPEED_SCU_IC_STATUS_SHIFT)); + regmap_write_bits(scu_ic->scu, scu_ic->reg, mask, + BIT(bit + ASPEED_SCU_IC_STATUS_SHIFT)); } chained_irq_exit(chip, desc); diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c index d80e67a6aad2..bb6609cebdbc 100644 --- a/drivers/irqchip/irq-bcm7120-l2.c +++ b/drivers/irqchip/irq-bcm7120-l2.c @@ -238,6 +238,7 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn, } data->num_parent_irqs = platform_irq_count(pdev); + put_device(&pdev->dev); if (data->num_parent_irqs <= 0) { pr_err("invalid number of parent interrupts\n"); ret = -ENOMEM; diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index eb0882d15366..0cb584d9815b 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -742,7 +742,7 @@ static struct its_collection *its_build_invall_cmd(struct its_node *its, its_fixup_cmd(cmd); - return NULL; + return desc->its_invall_cmd.col; } static struct its_vpe *its_build_vinvall_cmd(struct its_node *its, diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index d02b05a067d9..ff89b36267dd 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c @@ -9,6 +9,7 @@ #define pr_fmt(fmt) "irq-mips-gic: " fmt +#include <linux/bitfield.h> #include <linux/bitmap.h> #include <linux/clocksource.h> #include <linux/cpuhotplug.h> @@ -735,8 +736,7 @@ static int __init gic_of_init(struct device_node *node, mips_gic_base = ioremap(gic_base, gic_len); gicconfig = read_gic_config(); - gic_shared_intrs = gicconfig & GIC_CONFIG_NUMINTERRUPTS; - gic_shared_intrs >>= __ffs(GIC_CONFIG_NUMINTERRUPTS); + gic_shared_intrs = FIELD_GET(GIC_CONFIG_NUMINTERRUPTS, gicconfig); gic_shared_intrs = (gic_shared_intrs + 1) * 8; if (cpu_has_veic) { diff --git a/drivers/irqchip/irq-nvic.c b/drivers/irqchip/irq-nvic.c index 63bac3f78863..ba4759b3e269 100644 --- a/drivers/irqchip/irq-nvic.c +++ b/drivers/irqchip/irq-nvic.c @@ -26,7 +26,7 @@ #define NVIC_ISER 0x000 #define NVIC_ICER 0x080 -#define NVIC_IPR 0x300 +#define NVIC_IPR 0x400 #define NVIC_MAX_BANKS 16 /* diff --git a/drivers/md/md.c b/drivers/md/md.c index 5111ed966947..41d6e2383517 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -2189,6 +2189,7 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) if (!num_sectors || num_sectors > max_sectors) num_sectors = max_sectors; + rdev->sb_start = sb_start; } sb = page_address(rdev->sb_page); sb->data_size = cpu_to_le64(num_sectors); @@ -6270,7 +6271,8 @@ static void __md_stop(struct mddev *mddev) spin_lock(&mddev->lock); mddev->pers = NULL; spin_unlock(&mddev->lock); - pers->free(mddev, mddev->private); + if (mddev->private) + pers->free(mddev, mddev->private); mddev->private = NULL; if (pers->sync_request && mddev->to_remove == NULL) mddev->to_remove = &md_redundancy_group; diff --git a/drivers/media/cec/core/cec-adap.c b/drivers/media/cec/core/cec-adap.c index 79fa36de8a04..cd9cb354dc2c 100644 --- a/drivers/media/cec/core/cec-adap.c +++ b/drivers/media/cec/core/cec-adap.c @@ -1199,6 +1199,7 @@ void cec_received_msg_ts(struct cec_adapter *adap, if (abort) dst->rx_status |= CEC_RX_STATUS_FEATURE_ABORT; msg->flags = dst->flags; + msg->sequence = dst->sequence; /* Remove it from the wait_queue */ list_del_init(&data->list); diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c index 1094575abf95..90acafd9a290 100644 --- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c +++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c @@ -241,6 +241,7 @@ static void *vb2_dma_sg_get_userptr(struct vb2_buffer *vb, struct device *dev, buf->offset = vaddr & ~PAGE_MASK; buf->size = size; buf->dma_sgt = &buf->sg_table; + buf->vb = vb; vec = vb2_create_framevec(vaddr, size); if (IS_ERR(vec)) goto userptr_fail_pfnvec; @@ -642,6 +643,7 @@ static void *vb2_dma_sg_attach_dmabuf(struct vb2_buffer *vb, struct device *dev, buf->dma_dir = vb->vb2_queue->dma_dir; buf->size = size; buf->db_attach = dba; + buf->vb = vb; return buf; } diff --git a/drivers/media/i2c/hi846.c b/drivers/media/i2c/hi846.c index 822ce3021fde..48909faeced4 100644 --- a/drivers/media/i2c/hi846.c +++ b/drivers/media/i2c/hi846.c @@ -7,9 +7,9 @@ #include <linux/gpio/consumer.h> #include <linux/i2c.h> #include <linux/module.h> -#include <linux/of_graph.h> #include <linux/pm_runtime.h> #include <linux/pm.h> +#include <linux/property.h> #include <linux/regulator/consumer.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> @@ -2176,7 +2176,7 @@ static struct i2c_driver hi846_i2c_driver = { .driver = { .name = "hi846", .pm = &hi846_pm_ops, - .of_match_table = of_match_ptr(hi846_of_match), + .of_match_table = hi846_of_match, }, .probe_new = hi846_probe, .remove = hi846_remove, diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c index 8176769a89fa..0f3d6b5667b0 100644 --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c @@ -751,10 +751,6 @@ static int put_v4l2_ext_controls32(struct v4l2_ext_controls *p64, /* * x86 is the only compat architecture with different struct alignment * between 32-bit and 64-bit tasks. - * - * On all other architectures, v4l2_event32 and v4l2_event32_time32 are - * the same as v4l2_event and v4l2_event_time32, so we can use the native - * handlers, converting v4l2_event to v4l2_event_time32 if necessary. */ struct v4l2_event32 { __u32 type; @@ -772,21 +768,6 @@ struct v4l2_event32 { __u32 reserved[8]; }; -#ifdef CONFIG_COMPAT_32BIT_TIME -struct v4l2_event32_time32 { - __u32 type; - union { - compat_s64 value64; - __u8 data[64]; - } u; - __u32 pending; - __u32 sequence; - struct old_timespec32 timestamp; - __u32 id; - __u32 reserved[8]; -}; -#endif - static int put_v4l2_event32(struct v4l2_event *p64, struct v4l2_event32 __user *p32) { @@ -802,7 +783,22 @@ static int put_v4l2_event32(struct v4l2_event *p64, return 0; } +#endif + #ifdef CONFIG_COMPAT_32BIT_TIME +struct v4l2_event32_time32 { + __u32 type; + union { + compat_s64 value64; + __u8 data[64]; + } u; + __u32 pending; + __u32 sequence; + struct old_timespec32 timestamp; + __u32 id; + __u32 reserved[8]; +}; + static int put_v4l2_event32_time32(struct v4l2_event *p64, struct v4l2_event32_time32 __user *p32) { @@ -818,7 +814,6 @@ static int put_v4l2_event32_time32(struct v4l2_event *p64, return 0; } #endif -#endif struct v4l2_edid32 { __u32 pad; @@ -880,9 +875,7 @@ static int put_v4l2_edid32(struct v4l2_edid *p64, #define VIDIOC_QUERYBUF32_TIME32 _IOWR('V', 9, struct v4l2_buffer32_time32) #define VIDIOC_QBUF32_TIME32 _IOWR('V', 15, struct v4l2_buffer32_time32) #define VIDIOC_DQBUF32_TIME32 _IOWR('V', 17, struct v4l2_buffer32_time32) -#ifdef CONFIG_X86_64 #define VIDIOC_DQEVENT32_TIME32 _IOR ('V', 89, struct v4l2_event32_time32) -#endif #define VIDIOC_PREPARE_BUF32_TIME32 _IOWR('V', 93, struct v4l2_buffer32_time32) #endif @@ -936,11 +929,11 @@ unsigned int v4l2_compat_translate_cmd(unsigned int cmd) #ifdef CONFIG_X86_64 case VIDIOC_DQEVENT32: return VIDIOC_DQEVENT; +#endif #ifdef CONFIG_COMPAT_32BIT_TIME case VIDIOC_DQEVENT32_TIME32: return VIDIOC_DQEVENT; #endif -#endif } return cmd; } @@ -1032,11 +1025,11 @@ int v4l2_compat_put_user(void __user *arg, void *parg, unsigned int cmd) #ifdef CONFIG_X86_64 case VIDIOC_DQEVENT32: return put_v4l2_event32(parg, arg); +#endif #ifdef CONFIG_COMPAT_32BIT_TIME case VIDIOC_DQEVENT32_TIME32: return put_v4l2_event32_time32(parg, arg); #endif -#endif } return 0; } diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c index b883dcc0bbfa..e201e5976f34 100644 --- a/drivers/memory/mtk-smi.c +++ b/drivers/memory/mtk-smi.c @@ -241,7 +241,7 @@ static void mtk_smi_larb_config_port_gen2_general(struct device *dev) { struct mtk_smi_larb *larb = dev_get_drvdata(dev); u32 reg, flags_general = larb->larb_gen->flags_general; - const u8 *larbostd = larb->larb_gen->ostd[larb->larbid]; + const u8 *larbostd = larb->larb_gen->ostd ? larb->larb_gen->ostd[larb->larbid] : NULL; int i; if (BIT(larb->larbid) & larb->larb_gen->larb_direct_to_common_mask) diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c index 8c72eb590f79..6ac509c1821c 100644 --- a/drivers/misc/cardreader/rtsx_pcr.c +++ b/drivers/misc/cardreader/rtsx_pcr.c @@ -1803,8 +1803,6 @@ static int rtsx_pci_runtime_suspend(struct device *device) mutex_lock(&pcr->pcr_mutex); rtsx_pci_power_off(pcr, HOST_ENTER_S3); - free_irq(pcr->irq, (void *)pcr); - mutex_unlock(&pcr->pcr_mutex); pcr->is_runtime_suspended = true; @@ -1825,8 +1823,6 @@ static int rtsx_pci_runtime_resume(struct device *device) mutex_lock(&pcr->pcr_mutex); rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00); - rtsx_pci_acquire_irq(pcr); - synchronize_irq(pcr->irq); if (pcr->ops->fetch_vendor_settings) pcr->ops->fetch_vendor_settings(pcr); diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c index 632325474233..b38978a3b3ff 100644 --- a/drivers/misc/eeprom/at25.c +++ b/drivers/misc/eeprom/at25.c @@ -376,7 +376,6 @@ MODULE_DEVICE_TABLE(spi, at25_spi_ids); static int at25_probe(struct spi_device *spi) { struct at25_data *at25 = NULL; - struct spi_eeprom chip; int err; int sr; u8 id[FM25_ID_LEN]; @@ -389,15 +388,18 @@ static int at25_probe(struct spi_device *spi) if (match && !strcmp(match->compatible, "cypress,fm25")) is_fram = 1; + at25 = devm_kzalloc(&spi->dev, sizeof(struct at25_data), GFP_KERNEL); + if (!at25) + return -ENOMEM; + /* Chip description */ - if (!spi->dev.platform_data) { - if (!is_fram) { - err = at25_fw_to_chip(&spi->dev, &chip); - if (err) - return err; - } - } else - chip = *(struct spi_eeprom *)spi->dev.platform_data; + if (spi->dev.platform_data) { + memcpy(&at25->chip, spi->dev.platform_data, sizeof(at25->chip)); + } else if (!is_fram) { + err = at25_fw_to_chip(&spi->dev, &at25->chip); + if (err) + return err; + } /* Ping the chip ... the status register is pretty portable, * unlike probing manufacturer IDs. We do expect that system @@ -409,12 +411,7 @@ static int at25_probe(struct spi_device *spi) return -ENXIO; } - at25 = devm_kzalloc(&spi->dev, sizeof(struct at25_data), GFP_KERNEL); - if (!at25) - return -ENOMEM; - mutex_init(&at25->lock); - at25->chip = chip; at25->spi = spi; spi_set_drvdata(spi, at25); @@ -431,7 +428,7 @@ static int at25_probe(struct spi_device *spi) dev_err(&spi->dev, "Error: unsupported size (id %02x)\n", id[7]); return -ENODEV; } - chip.byte_len = int_pow(2, id[7] - 0x21 + 4) * 1024; + at25->chip.byte_len = int_pow(2, id[7] - 0x21 + 4) * 1024; if (at25->chip.byte_len > 64 * 1024) at25->chip.flags |= EE_ADDR3; @@ -464,7 +461,7 @@ static int at25_probe(struct spi_device *spi) at25->nvmem_config.type = is_fram ? NVMEM_TYPE_FRAM : NVMEM_TYPE_EEPROM; at25->nvmem_config.name = dev_name(&spi->dev); at25->nvmem_config.dev = &spi->dev; - at25->nvmem_config.read_only = chip.flags & EE_READONLY; + at25->nvmem_config.read_only = at25->chip.flags & EE_READONLY; at25->nvmem_config.root_only = true; at25->nvmem_config.owner = THIS_MODULE; at25->nvmem_config.compat = true; @@ -474,17 +471,18 @@ static int at25_probe(struct spi_device *spi) at25->nvmem_config.priv = at25; at25->nvmem_config.stride = 1; at25->nvmem_config.word_size = 1; - at25->nvmem_config.size = chip.byte_len; + at25->nvmem_config.size = at25->chip.byte_len; at25->nvmem = devm_nvmem_register(&spi->dev, &at25->nvmem_config); if (IS_ERR(at25->nvmem)) return PTR_ERR(at25->nvmem); dev_info(&spi->dev, "%d %s %s %s%s, pagesize %u\n", - (chip.byte_len < 1024) ? chip.byte_len : (chip.byte_len / 1024), - (chip.byte_len < 1024) ? "Byte" : "KByte", + (at25->chip.byte_len < 1024) ? + at25->chip.byte_len : (at25->chip.byte_len / 1024), + (at25->chip.byte_len < 1024) ? "Byte" : "KByte", at25->chip.name, is_fram ? "fram" : "eeprom", - (chip.flags & EE_READONLY) ? " (readonly)" : "", + (at25->chip.flags & EE_READONLY) ? " (readonly)" : "", at25->chip.page_size); return 0; } diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c index 39aca7753719..4ccbf43e6bfa 100644 --- a/drivers/misc/fastrpc.c +++ b/drivers/misc/fastrpc.c @@ -719,16 +719,18 @@ static int fastrpc_get_meta_size(struct fastrpc_invoke_ctx *ctx) static u64 fastrpc_get_payload_size(struct fastrpc_invoke_ctx *ctx, int metalen) { u64 size = 0; - int i; + int oix; size = ALIGN(metalen, FASTRPC_ALIGN); - for (i = 0; i < ctx->nscalars; i++) { + for (oix = 0; oix < ctx->nbufs; oix++) { + int i = ctx->olaps[oix].raix; + if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1) { - if (ctx->olaps[i].offset == 0) + if (ctx->olaps[oix].offset == 0) size = ALIGN(size, FASTRPC_ALIGN); - size += (ctx->olaps[i].mend - ctx->olaps[i].mstart); + size += (ctx->olaps[oix].mend - ctx->olaps[oix].mstart); } } diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c index f4c8e1a61f53..b431cdd27353 100644 --- a/drivers/mmc/host/mmc_spi.c +++ b/drivers/mmc/host/mmc_spi.c @@ -1514,6 +1514,12 @@ static int mmc_spi_remove(struct spi_device *spi) return 0; } +static const struct spi_device_id mmc_spi_dev_ids[] = { + { "mmc-spi-slot"}, + { }, +}; +MODULE_DEVICE_TABLE(spi, mmc_spi_dev_ids); + static const struct of_device_id mmc_spi_of_match_table[] = { { .compatible = "mmc-spi-slot", }, {}, @@ -1525,6 +1531,7 @@ static struct spi_driver mmc_spi_driver = { .name = "mmc_spi", .of_match_table = mmc_spi_of_match_table, }, + .id_table = mmc_spi_dev_ids, .probe = mmc_spi_probe, .remove = mmc_spi_remove, }; diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c index 943940b44e83..632775217d35 100644 --- a/drivers/mmc/host/mtk-sd.c +++ b/drivers/mmc/host/mtk-sd.c @@ -2291,8 +2291,10 @@ static int msdc_execute_hs400_tuning(struct mmc_host *mmc, struct mmc_card *card sdr_set_field(host->base + PAD_DS_TUNE, PAD_DS_TUNE_DLY1, i); ret = mmc_get_ext_csd(card, &ext_csd); - if (!ret) + if (!ret) { result_dly1 |= (1 << i); + kfree(ext_csd); + } } host->hs400_tuning = false; diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c index a4407f391f66..f5b2684ad805 100644 --- a/drivers/mmc/host/renesas_sdhi_core.c +++ b/drivers/mmc/host/renesas_sdhi_core.c @@ -673,7 +673,7 @@ static int renesas_sdhi_execute_tuning(struct mmc_host *mmc, u32 opcode) /* Issue CMD19 twice for each tap */ for (i = 0; i < 2 * priv->tap_num; i++) { - int cmd_error; + int cmd_error = 0; /* Set sampling clock position */ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, i % priv->tap_num); diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index afaf33707d46..764ee1b761d9 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -310,7 +310,6 @@ static struct esdhc_soc_data usdhc_imx8qxp_data = { .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200 | ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES - | ESDHC_FLAG_CQHCI | ESDHC_FLAG_STATE_LOST_IN_LPMODE | ESDHC_FLAG_CLK_RATE_LOST_IN_PM_RUNTIME, }; @@ -319,7 +318,6 @@ static struct esdhc_soc_data usdhc_imx8mm_data = { .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200 | ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES - | ESDHC_FLAG_CQHCI | ESDHC_FLAG_STATE_LOST_IN_LPMODE, }; diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 269c86569402..07c6da1f2f0f 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -771,7 +771,19 @@ static void sdhci_adma_table_pre(struct sdhci_host *host, len -= offset; } - BUG_ON(len > 65536); + /* + * The block layer forces a minimum segment size of PAGE_SIZE, + * so 'len' can be too big here if PAGE_SIZE >= 64KiB. Write + * multiple descriptors, noting that the ADMA table is sized + * for 4KiB chunks anyway, so it will be big enough. + */ + while (len > host->max_adma) { + int n = 32 * 1024; /* 32KiB*/ + + __sdhci_adma_write_desc(host, &desc, addr, n, ADMA2_TRAN_VALID); + addr += n; + len -= n; + } /* tran, valid */ if (len) @@ -3968,6 +3980,7 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev, * descriptor for each segment, plus 1 for a nop end descriptor. */ host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1; + host->max_adma = 65536; host->max_timeout_count = 0xE; @@ -4633,10 +4646,12 @@ int sdhci_setup_host(struct sdhci_host *host) * be larger than 64 KiB though. */ if (host->flags & SDHCI_USE_ADMA) { - if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) + if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) { + host->max_adma = 65532; /* 32-bit alignment */ mmc->max_seg_size = 65535; - else + } else { mmc->max_seg_size = 65536; + } } else { mmc->max_seg_size = mmc->max_req_size; } diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index bb883553d3b4..d7929d725730 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -340,7 +340,8 @@ struct sdhci_adma2_64_desc { /* * Maximum segments assuming a 512KiB maximum requisition size and a minimum - * 4KiB page size. + * 4KiB page size. Note this also allows enough for multiple descriptors in + * case of PAGE_SIZE >= 64KiB. */ #define SDHCI_MAX_SEGS 128 @@ -543,6 +544,7 @@ struct sdhci_host { unsigned int blocks; /* remaining PIO blocks */ int sg_count; /* Mapped sg entries */ + int max_adma; /* Max. length in ADMA descriptor */ void *adma_table; /* ADMA descriptor table */ void *align_buffer; /* Bounce buffer */ diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c index 9802e265fca8..2b317ed6c103 100644 --- a/drivers/mtd/devices/mtd_dataflash.c +++ b/drivers/mtd/devices/mtd_dataflash.c @@ -96,6 +96,13 @@ struct dataflash { struct mtd_info mtd; }; +static const struct spi_device_id dataflash_dev_ids[] = { + { "at45" }, + { "dataflash" }, + { }, +}; +MODULE_DEVICE_TABLE(spi, dataflash_dev_ids); + #ifdef CONFIG_OF static const struct of_device_id dataflash_dt_ids[] = { { .compatible = "atmel,at45", }, @@ -927,6 +934,7 @@ static struct spi_driver dataflash_driver = { .name = "mtd_dataflash", .of_match_table = of_match_ptr(dataflash_dt_ids), }, + .id_table = dataflash_dev_ids, .probe = dataflash_probe, .remove = dataflash_remove, diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig index 67b7cb67c030..0a45d3c6c15b 100644 --- a/drivers/mtd/nand/raw/Kconfig +++ b/drivers/mtd/nand/raw/Kconfig @@ -26,7 +26,7 @@ config MTD_NAND_DENALI_PCI config MTD_NAND_DENALI_DT tristate "Denali NAND controller as a DT device" select MTD_NAND_DENALI - depends on HAS_DMA && HAVE_CLK && OF + depends on HAS_DMA && HAVE_CLK && OF && HAS_IOMEM help Enable the driver for NAND flash on platforms using a Denali NAND controller as a DT device. diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c index 658f0cbe7ce8..6b2bda815b88 100644 --- a/drivers/mtd/nand/raw/fsmc_nand.c +++ b/drivers/mtd/nand/raw/fsmc_nand.c @@ -15,6 +15,7 @@ #include <linux/clk.h> #include <linux/completion.h> +#include <linux/delay.h> #include <linux/dmaengine.h> #include <linux/dma-direction.h> #include <linux/dma-mapping.h> @@ -93,6 +94,14 @@ #define FSMC_BUSY_WAIT_TIMEOUT (1 * HZ) +/* + * According to SPEAr300 Reference Manual (RM0082) + * TOUDEL = 7ns (Output delay from the flip-flops to the board) + * TINDEL = 5ns (Input delay from the board to the flipflop) + */ +#define TOUTDEL 7000 +#define TINDEL 5000 + struct fsmc_nand_timings { u8 tclr; u8 tar; @@ -277,7 +286,7 @@ static int fsmc_calc_timings(struct fsmc_nand_data *host, { unsigned long hclk = clk_get_rate(host->clk); unsigned long hclkn = NSEC_PER_SEC / hclk; - u32 thiz, thold, twait, tset; + u32 thiz, thold, twait, tset, twait_min; if (sdrt->tRC_min < 30000) return -EOPNOTSUPP; @@ -309,13 +318,6 @@ static int fsmc_calc_timings(struct fsmc_nand_data *host, else if (tims->thold > FSMC_THOLD_MASK) tims->thold = FSMC_THOLD_MASK; - twait = max(sdrt->tRP_min, sdrt->tWP_min); - tims->twait = DIV_ROUND_UP(twait / 1000, hclkn) - 1; - if (tims->twait == 0) - tims->twait = 1; - else if (tims->twait > FSMC_TWAIT_MASK) - tims->twait = FSMC_TWAIT_MASK; - tset = max(sdrt->tCS_min - sdrt->tWP_min, sdrt->tCEA_max - sdrt->tREA_max); tims->tset = DIV_ROUND_UP(tset / 1000, hclkn) - 1; @@ -324,6 +326,21 @@ static int fsmc_calc_timings(struct fsmc_nand_data *host, else if (tims->tset > FSMC_TSET_MASK) tims->tset = FSMC_TSET_MASK; + /* + * According to SPEAr300 Reference Manual (RM0082) which gives more + * information related to FSMSC timings than the SPEAr600 one (RM0305), + * twait >= tCEA - (tset * TCLK) + TOUTDEL + TINDEL + */ + twait_min = sdrt->tCEA_max - ((tims->tset + 1) * hclkn * 1000) + + TOUTDEL + TINDEL; + twait = max3(sdrt->tRP_min, sdrt->tWP_min, twait_min); + + tims->twait = DIV_ROUND_UP(twait / 1000, hclkn) - 1; + if (tims->twait == 0) + tims->twait = 1; + else if (tims->twait > FSMC_TWAIT_MASK) + tims->twait = FSMC_TWAIT_MASK; + return 0; } @@ -664,6 +681,9 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op, instr->ctx.waitrdy.timeout_ms); break; } + + if (instr->delay_ns) + ndelay(instr->delay_ns); } return ret; diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index 3d6c6e880520..a130320de412 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -926,7 +926,7 @@ int nand_choose_best_sdr_timings(struct nand_chip *chip, struct nand_sdr_timings *spec_timings) { const struct nand_controller_ops *ops = chip->controller->ops; - int best_mode = 0, mode, ret; + int best_mode = 0, mode, ret = -EOPNOTSUPP; iface->type = NAND_SDR_IFACE; @@ -977,7 +977,7 @@ int nand_choose_best_nvddr_timings(struct nand_chip *chip, struct nand_nvddr_timings *spec_timings) { const struct nand_controller_ops *ops = chip->controller->ops; - int best_mode = 0, mode, ret; + int best_mode = 0, mode, ret = -EOPNOTSUPP; iface->type = NAND_NVDDR_IFACE; @@ -1837,7 +1837,7 @@ int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock) NAND_OP_CMD(NAND_CMD_ERASE1, 0), NAND_OP_ADDR(2, addrs, 0), NAND_OP_CMD(NAND_CMD_ERASE2, - NAND_COMMON_TIMING_MS(conf, tWB_max)), + NAND_COMMON_TIMING_NS(conf, tWB_max)), NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tBERS_max), 0), }; diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 10506a4b66ef..6cccc3dc00bc 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -567,9 +567,7 @@ config XEN_NETDEV_BACKEND config VMXNET3 tristate "VMware VMXNET3 ethernet driver" depends on PCI && INET - depends on !(PAGE_SIZE_64KB || ARM64_64K_PAGES || \ - IA64_PAGE_SIZE_64KB || PARISC_PAGE_SIZE_64KB || \ - PPC_64K_PAGES) + depends on PAGE_SIZE_LESS_THAN_64KB help This driver supports VMware's vmxnet3 virtual ethernet NIC. To compile this driver as a module, choose M here: the diff --git a/drivers/net/amt.c b/drivers/net/amt.c index 47a04c330885..b732ee9a50ef 100644 --- a/drivers/net/amt.c +++ b/drivers/net/amt.c @@ -3286,7 +3286,7 @@ static void __exit amt_fini(void) { rtnl_link_unregister(&amt_link_ops); unregister_netdevice_notifier(&amt_notifier_block); - cancel_delayed_work(&source_gc_wq); + cancel_delayed_work_sync(&source_gc_wq); __amt_source_gc_work(); destroy_workqueue(amt_wq); } diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 2ec8e015c7b3..533e476988f2 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -1501,14 +1501,14 @@ void bond_alb_monitor(struct work_struct *work) struct slave *slave; if (!bond_has_slaves(bond)) { - bond_info->tx_rebalance_counter = 0; + atomic_set(&bond_info->tx_rebalance_counter, 0); bond_info->lp_counter = 0; goto re_arm; } rcu_read_lock(); - bond_info->tx_rebalance_counter++; + atomic_inc(&bond_info->tx_rebalance_counter); bond_info->lp_counter++; /* send learning packets */ @@ -1530,7 +1530,7 @@ void bond_alb_monitor(struct work_struct *work) } /* rebalance tx traffic */ - if (bond_info->tx_rebalance_counter >= BOND_TLB_REBALANCE_TICKS) { + if (atomic_read(&bond_info->tx_rebalance_counter) >= BOND_TLB_REBALANCE_TICKS) { bond_for_each_slave_rcu(bond, slave, iter) { tlb_clear_slave(bond, slave, 1); if (slave == rcu_access_pointer(bond->curr_active_slave)) { @@ -1540,7 +1540,7 @@ void bond_alb_monitor(struct work_struct *work) bond_info->unbalanced_load = 0; } } - bond_info->tx_rebalance_counter = 0; + atomic_set(&bond_info->tx_rebalance_counter, 0); } if (bond_info->rlb_enabled) { @@ -1610,7 +1610,8 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave) tlb_init_slave(slave); /* order a rebalance ASAP */ - bond->alb_info.tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS; + atomic_set(&bond->alb_info.tx_rebalance_counter, + BOND_TLB_REBALANCE_TICKS); if (bond->alb_info.rlb_enabled) bond->alb_info.rlb_rebalance = 1; @@ -1647,7 +1648,8 @@ void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char rlb_clear_slave(bond, slave); } else if (link == BOND_LINK_UP) { /* order a rebalance ASAP */ - bond_info->tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS; + atomic_set(&bond_info->tx_rebalance_counter, + BOND_TLB_REBALANCE_TICKS); if (bond->alb_info.rlb_enabled) { bond->alb_info.rlb_rebalance = 1; /* If the updelay module parameter is smaller than the diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c index 74d9899fc904..eb74cdf26b88 100644 --- a/drivers/net/can/kvaser_pciefd.c +++ b/drivers/net/can/kvaser_pciefd.c @@ -248,6 +248,9 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices"); #define KVASER_PCIEFD_SPACK_EWLR BIT(23) #define KVASER_PCIEFD_SPACK_EPLR BIT(24) +/* Kvaser KCAN_EPACK second word */ +#define KVASER_PCIEFD_EPACK_DIR_TX BIT(0) + struct kvaser_pciefd; struct kvaser_pciefd_can { @@ -1285,7 +1288,10 @@ static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can, can->err_rep_cnt++; can->can.can_stats.bus_error++; - stats->rx_errors++; + if (p->header[1] & KVASER_PCIEFD_EPACK_DIR_TX) + stats->tx_errors++; + else + stats->rx_errors++; can->bec.txerr = bec.txerr; can->bec.rxerr = bec.rxerr; diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 2470c47b2e31..c2a8421e7845 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -204,16 +204,16 @@ enum m_can_reg { /* Interrupts for version 3.0.x */ #define IR_ERR_LEC_30X (IR_STE | IR_FOE | IR_ACKE | IR_BE | IR_CRCE) -#define IR_ERR_BUS_30X (IR_ERR_LEC_30X | IR_WDI | IR_ELO | IR_BEU | \ - IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \ - IR_RF1L | IR_RF0L) +#define IR_ERR_BUS_30X (IR_ERR_LEC_30X | IR_WDI | IR_BEU | IR_BEC | \ + IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | IR_RF1L | \ + IR_RF0L) #define IR_ERR_ALL_30X (IR_ERR_STATE | IR_ERR_BUS_30X) /* Interrupts for version >= 3.1.x */ #define IR_ERR_LEC_31X (IR_PED | IR_PEA) -#define IR_ERR_BUS_31X (IR_ERR_LEC_31X | IR_WDI | IR_ELO | IR_BEU | \ - IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \ - IR_RF1L | IR_RF0L) +#define IR_ERR_BUS_31X (IR_ERR_LEC_31X | IR_WDI | IR_BEU | IR_BEC | \ + IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | IR_RF1L | \ + IR_RF0L) #define IR_ERR_ALL_31X (IR_ERR_STATE | IR_ERR_BUS_31X) /* Interrupt Line Select (ILS) */ @@ -517,7 +517,7 @@ static int m_can_read_fifo(struct net_device *dev, u32 rxfs) err = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_DATA, cf->data, DIV_ROUND_UP(cf->len, 4)); if (err) - goto out_fail; + goto out_free_skb; } /* acknowledge rx fifo 0 */ @@ -532,6 +532,8 @@ static int m_can_read_fifo(struct net_device *dev, u32 rxfs) return 0; +out_free_skb: + kfree_skb(skb); out_fail: netdev_err(dev, "FIFO read returned %d\n", err); return err; @@ -810,8 +812,6 @@ static void m_can_handle_other_err(struct net_device *dev, u32 irqstatus) { if (irqstatus & IR_WDI) netdev_err(dev, "Message RAM Watchdog event due to missing READY\n"); - if (irqstatus & IR_ELO) - netdev_err(dev, "Error Logging Overflow\n"); if (irqstatus & IR_BEU) netdev_err(dev, "Bit Error Uncorrected\n"); if (irqstatus & IR_BEC) @@ -1494,20 +1494,32 @@ static int m_can_dev_setup(struct m_can_classdev *cdev) case 30: /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.x */ can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO); - cdev->can.bittiming_const = &m_can_bittiming_const_30X; - cdev->can.data_bittiming_const = &m_can_data_bittiming_const_30X; + cdev->can.bittiming_const = cdev->bit_timing ? + cdev->bit_timing : &m_can_bittiming_const_30X; + + cdev->can.data_bittiming_const = cdev->data_timing ? + cdev->data_timing : + &m_can_data_bittiming_const_30X; break; case 31: /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.1.x */ can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO); - cdev->can.bittiming_const = &m_can_bittiming_const_31X; - cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X; + cdev->can.bittiming_const = cdev->bit_timing ? + cdev->bit_timing : &m_can_bittiming_const_31X; + + cdev->can.data_bittiming_const = cdev->data_timing ? + cdev->data_timing : + &m_can_data_bittiming_const_31X; break; case 32: case 33: /* Support both MCAN version v3.2.x and v3.3.0 */ - cdev->can.bittiming_const = &m_can_bittiming_const_31X; - cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X; + cdev->can.bittiming_const = cdev->bit_timing ? + cdev->bit_timing : &m_can_bittiming_const_31X; + + cdev->can.data_bittiming_const = cdev->data_timing ? + cdev->data_timing : + &m_can_data_bittiming_const_31X; cdev->can.ctrlmode_supported |= (m_can_niso_supported(cdev) ? diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h index d18b515e6ccc..2c5d40997168 100644 --- a/drivers/net/can/m_can/m_can.h +++ b/drivers/net/can/m_can/m_can.h @@ -85,6 +85,9 @@ struct m_can_classdev { struct sk_buff *tx_skb; struct phy *transceiver; + const struct can_bittiming_const *bit_timing; + const struct can_bittiming_const *data_timing; + struct m_can_ops *ops; int version; diff --git a/drivers/net/can/m_can/m_can_pci.c b/drivers/net/can/m_can/m_can_pci.c index 89cc3d41e952..b56a54d6c5a9 100644 --- a/drivers/net/can/m_can/m_can_pci.c +++ b/drivers/net/can/m_can/m_can_pci.c @@ -18,9 +18,14 @@ #define M_CAN_PCI_MMIO_BAR 0 -#define M_CAN_CLOCK_FREQ_EHL 100000000 #define CTL_CSR_INT_CTL_OFFSET 0x508 +struct m_can_pci_config { + const struct can_bittiming_const *bit_timing; + const struct can_bittiming_const *data_timing; + unsigned int clock_freq; +}; + struct m_can_pci_priv { struct m_can_classdev cdev; @@ -42,8 +47,13 @@ static u32 iomap_read_reg(struct m_can_classdev *cdev, int reg) static int iomap_read_fifo(struct m_can_classdev *cdev, int offset, void *val, size_t val_count) { struct m_can_pci_priv *priv = cdev_to_priv(cdev); + void __iomem *src = priv->base + offset; - ioread32_rep(priv->base + offset, val, val_count); + while (val_count--) { + *(unsigned int *)val = ioread32(src); + val += 4; + src += 4; + } return 0; } @@ -61,8 +71,13 @@ static int iomap_write_fifo(struct m_can_classdev *cdev, int offset, const void *val, size_t val_count) { struct m_can_pci_priv *priv = cdev_to_priv(cdev); + void __iomem *dst = priv->base + offset; - iowrite32_rep(priv->base + offset, val, val_count); + while (val_count--) { + iowrite32(*(unsigned int *)val, dst); + val += 4; + dst += 4; + } return 0; } @@ -74,9 +89,40 @@ static struct m_can_ops m_can_pci_ops = { .read_fifo = iomap_read_fifo, }; +static const struct can_bittiming_const m_can_bittiming_const_ehl = { + .name = KBUILD_MODNAME, + .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */ + .tseg1_max = 64, + .tseg2_min = 1, /* Time segment 2 = phase_seg2 */ + .tseg2_max = 128, + .sjw_max = 128, + .brp_min = 1, + .brp_max = 512, + .brp_inc = 1, +}; + +static const struct can_bittiming_const m_can_data_bittiming_const_ehl = { + .name = KBUILD_MODNAME, + .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */ + .tseg1_max = 16, + .tseg2_min = 1, /* Time segment 2 = phase_seg2 */ + .tseg2_max = 8, + .sjw_max = 4, + .brp_min = 1, + .brp_max = 32, + .brp_inc = 1, +}; + +static const struct m_can_pci_config m_can_pci_ehl = { + .bit_timing = &m_can_bittiming_const_ehl, + .data_timing = &m_can_data_bittiming_const_ehl, + .clock_freq = 200000000, +}; + static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id) { struct device *dev = &pci->dev; + const struct m_can_pci_config *cfg; struct m_can_classdev *mcan_class; struct m_can_pci_priv *priv; void __iomem *base; @@ -104,6 +150,8 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id) if (!mcan_class) return -ENOMEM; + cfg = (const struct m_can_pci_config *)id->driver_data; + priv = cdev_to_priv(mcan_class); priv->base = base; @@ -115,7 +163,9 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id) mcan_class->dev = &pci->dev; mcan_class->net->irq = pci_irq_vector(pci, 0); mcan_class->pm_clock_support = 1; - mcan_class->can.clock.freq = id->driver_data; + mcan_class->bit_timing = cfg->bit_timing; + mcan_class->data_timing = cfg->data_timing; + mcan_class->can.clock.freq = cfg->clock_freq; mcan_class->ops = &m_can_pci_ops; pci_set_drvdata(pci, mcan_class); @@ -168,8 +218,8 @@ static SIMPLE_DEV_PM_OPS(m_can_pci_pm_ops, m_can_pci_suspend, m_can_pci_resume); static const struct pci_device_id m_can_pci_id_table[] = { - { PCI_VDEVICE(INTEL, 0x4bc1), M_CAN_CLOCK_FREQ_EHL, }, - { PCI_VDEVICE(INTEL, 0x4bc2), M_CAN_CLOCK_FREQ_EHL, }, + { PCI_VDEVICE(INTEL, 0x4bc1), (kernel_ulong_t)&m_can_pci_ehl, }, + { PCI_VDEVICE(INTEL, 0x4bc2), (kernel_ulong_t)&m_can_pci_ehl, }, { } /* Terminating Entry */ }; MODULE_DEVICE_TABLE(pci, m_can_pci_id_table); diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c index 92a54a5fd4c5..964c8a09226a 100644 --- a/drivers/net/can/pch_can.c +++ b/drivers/net/can/pch_can.c @@ -692,11 +692,11 @@ static int pch_can_rx_normal(struct net_device *ndev, u32 obj_num, int quota) cf->data[i + 1] = data_reg >> 8; } - netif_receive_skb(skb); rcv_pkts++; stats->rx_packets++; quota--; stats->rx_bytes += cf->len; + netif_receive_skb(skb); pch_fifo_thresh(priv, obj_num); obj_num++; diff --git a/drivers/net/can/sja1000/ems_pcmcia.c b/drivers/net/can/sja1000/ems_pcmcia.c index e21b169c14c0..4642b6d4aaf7 100644 --- a/drivers/net/can/sja1000/ems_pcmcia.c +++ b/drivers/net/can/sja1000/ems_pcmcia.c @@ -234,7 +234,12 @@ static int ems_pcmcia_add_card(struct pcmcia_device *pdev, unsigned long base) free_sja1000dev(dev); } - err = request_irq(dev->irq, &ems_pcmcia_interrupt, IRQF_SHARED, + if (!card->channels) { + err = -ENODEV; + goto failure_cleanup; + } + + err = request_irq(pdev->irq, &ems_pcmcia_interrupt, IRQF_SHARED, DRV_NAME, card); if (!err) return 0; diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c index 59ba7c7beec0..f7af1bf5ab46 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c @@ -28,10 +28,6 @@ #include "kvaser_usb.h" -/* Forward declaration */ -static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg; - -#define CAN_USB_CLOCK 8000000 #define MAX_USBCAN_NET_DEVICES 2 /* Command header size */ @@ -80,6 +76,12 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg; #define CMD_LEAF_LOG_MESSAGE 106 +/* Leaf frequency options */ +#define KVASER_USB_LEAF_SWOPTION_FREQ_MASK 0x60 +#define KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK 0 +#define KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK BIT(5) +#define KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK BIT(6) + /* error factors */ #define M16C_EF_ACKE BIT(0) #define M16C_EF_CRCE BIT(1) @@ -340,6 +342,50 @@ struct kvaser_usb_err_summary { }; }; +static const struct can_bittiming_const kvaser_usb_leaf_bittiming_const = { + .name = "kvaser_usb", + .tseg1_min = KVASER_USB_TSEG1_MIN, + .tseg1_max = KVASER_USB_TSEG1_MAX, + .tseg2_min = KVASER_USB_TSEG2_MIN, + .tseg2_max = KVASER_USB_TSEG2_MAX, + .sjw_max = KVASER_USB_SJW_MAX, + .brp_min = KVASER_USB_BRP_MIN, + .brp_max = KVASER_USB_BRP_MAX, + .brp_inc = KVASER_USB_BRP_INC, +}; + +static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_8mhz = { + .clock = { + .freq = 8000000, + }, + .timestamp_freq = 1, + .bittiming_const = &kvaser_usb_leaf_bittiming_const, +}; + +static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_16mhz = { + .clock = { + .freq = 16000000, + }, + .timestamp_freq = 1, + .bittiming_const = &kvaser_usb_leaf_bittiming_const, +}; + +static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_24mhz = { + .clock = { + .freq = 24000000, + }, + .timestamp_freq = 1, + .bittiming_const = &kvaser_usb_leaf_bittiming_const, +}; + +static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_32mhz = { + .clock = { + .freq = 32000000, + }, + .timestamp_freq = 1, + .bittiming_const = &kvaser_usb_leaf_bittiming_const, +}; + static void * kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv, const struct sk_buff *skb, int *frame_len, @@ -471,6 +517,27 @@ static int kvaser_usb_leaf_send_simple_cmd(const struct kvaser_usb *dev, return rc; } +static void kvaser_usb_leaf_get_software_info_leaf(struct kvaser_usb *dev, + const struct leaf_cmd_softinfo *softinfo) +{ + u32 sw_options = le32_to_cpu(softinfo->sw_options); + + dev->fw_version = le32_to_cpu(softinfo->fw_version); + dev->max_tx_urbs = le16_to_cpu(softinfo->max_outstanding_tx); + + switch (sw_options & KVASER_USB_LEAF_SWOPTION_FREQ_MASK) { + case KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK: + dev->cfg = &kvaser_usb_leaf_dev_cfg_16mhz; + break; + case KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK: + dev->cfg = &kvaser_usb_leaf_dev_cfg_24mhz; + break; + case KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK: + dev->cfg = &kvaser_usb_leaf_dev_cfg_32mhz; + break; + } +} + static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev) { struct kvaser_cmd cmd; @@ -486,14 +553,13 @@ static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev) switch (dev->card_data.leaf.family) { case KVASER_LEAF: - dev->fw_version = le32_to_cpu(cmd.u.leaf.softinfo.fw_version); - dev->max_tx_urbs = - le16_to_cpu(cmd.u.leaf.softinfo.max_outstanding_tx); + kvaser_usb_leaf_get_software_info_leaf(dev, &cmd.u.leaf.softinfo); break; case KVASER_USBCAN: dev->fw_version = le32_to_cpu(cmd.u.usbcan.softinfo.fw_version); dev->max_tx_urbs = le16_to_cpu(cmd.u.usbcan.softinfo.max_outstanding_tx); + dev->cfg = &kvaser_usb_leaf_dev_cfg_8mhz; break; } @@ -1225,24 +1291,11 @@ static int kvaser_usb_leaf_init_card(struct kvaser_usb *dev) { struct kvaser_usb_dev_card_data *card_data = &dev->card_data; - dev->cfg = &kvaser_usb_leaf_dev_cfg; card_data->ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; return 0; } -static const struct can_bittiming_const kvaser_usb_leaf_bittiming_const = { - .name = "kvaser_usb", - .tseg1_min = KVASER_USB_TSEG1_MIN, - .tseg1_max = KVASER_USB_TSEG1_MAX, - .tseg2_min = KVASER_USB_TSEG2_MIN, - .tseg2_max = KVASER_USB_TSEG2_MAX, - .sjw_max = KVASER_USB_SJW_MAX, - .brp_min = KVASER_USB_BRP_MIN, - .brp_max = KVASER_USB_BRP_MAX, - .brp_inc = KVASER_USB_BRP_INC, -}; - static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev) { struct kvaser_usb_net_priv *priv = netdev_priv(netdev); @@ -1348,11 +1401,3 @@ const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops = { .dev_read_bulk_callback = kvaser_usb_leaf_read_bulk_callback, .dev_frame_to_cmd = kvaser_usb_leaf_frame_to_cmd, }; - -static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg = { - .clock = { - .freq = CAN_USB_CLOCK, - }, - .timestamp_freq = 1, - .bittiming_const = &kvaser_usb_leaf_bittiming_const, -}; diff --git a/drivers/net/dsa/b53/b53_spi.c b/drivers/net/dsa/b53/b53_spi.c index 01e37b75471e..2b88f03e5252 100644 --- a/drivers/net/dsa/b53/b53_spi.c +++ b/drivers/net/dsa/b53/b53_spi.c @@ -349,6 +349,19 @@ static const struct of_device_id b53_spi_of_match[] = { }; MODULE_DEVICE_TABLE(of, b53_spi_of_match); +static const struct spi_device_id b53_spi_ids[] = { + { .name = "bcm5325" }, + { .name = "bcm5365" }, + { .name = "bcm5395" }, + { .name = "bcm5397" }, + { .name = "bcm5398" }, + { .name = "bcm53115" }, + { .name = "bcm53125" }, + { .name = "bcm53128" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(spi, b53_spi_ids); + static struct spi_driver b53_spi_driver = { .driver = { .name = "b53-switch", @@ -357,6 +370,7 @@ static struct spi_driver b53_spi_driver = { .probe = b53_spi_probe, .remove = b53_spi_remove, .shutdown = b53_spi_shutdown, + .id_table = b53_spi_ids, }; module_spi_driver(b53_spi_driver); diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c index 43fc3087aeb3..013e9c02be71 100644 --- a/drivers/net/dsa/microchip/ksz8795.c +++ b/drivers/net/dsa/microchip/ksz8795.c @@ -1002,57 +1002,32 @@ static void ksz8_cfg_port_member(struct ksz_device *dev, int port, u8 member) data &= ~PORT_VLAN_MEMBERSHIP; data |= (member & dev->port_mask); ksz_pwrite8(dev, port, P_MIRROR_CTRL, data); - dev->ports[port].member = member; } static void ksz8_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) { struct ksz_device *dev = ds->priv; - int forward = dev->member; struct ksz_port *p; - int member = -1; u8 data; - p = &dev->ports[port]; - ksz_pread8(dev, port, P_STP_CTRL, &data); data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE); switch (state) { case BR_STATE_DISABLED: data |= PORT_LEARN_DISABLE; - if (port < dev->phy_port_cnt) - member = 0; break; case BR_STATE_LISTENING: data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE); - if (port < dev->phy_port_cnt && - p->stp_state == BR_STATE_DISABLED) - member = dev->host_mask | p->vid_member; break; case BR_STATE_LEARNING: data |= PORT_RX_ENABLE; break; case BR_STATE_FORWARDING: data |= (PORT_TX_ENABLE | PORT_RX_ENABLE); - - /* This function is also used internally. */ - if (port == dev->cpu_port) - break; - - /* Port is a member of a bridge. */ - if (dev->br_member & BIT(port)) { - dev->member |= BIT(port); - member = dev->member; - } else { - member = dev->host_mask | p->vid_member; - } break; case BR_STATE_BLOCKING: data |= PORT_LEARN_DISABLE; - if (port < dev->phy_port_cnt && - p->stp_state == BR_STATE_DISABLED) - member = dev->host_mask | p->vid_member; break; default: dev_err(ds->dev, "invalid STP state: %d\n", state); @@ -1060,22 +1035,11 @@ static void ksz8_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) } ksz_pwrite8(dev, port, P_STP_CTRL, data); + + p = &dev->ports[port]; p->stp_state = state; - /* Port membership may share register with STP state. */ - if (member >= 0 && member != p->member) - ksz8_cfg_port_member(dev, port, (u8)member); - - /* Check if forwarding needs to be updated. */ - if (state != BR_STATE_FORWARDING) { - if (dev->br_member & BIT(port)) - dev->member &= ~BIT(port); - } - /* When topology has changed the function ksz_update_port_member - * should be called to modify port forwarding behavior. - */ - if (forward != dev->member) - ksz_update_port_member(dev, port); + ksz_update_port_member(dev, port); } static void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port) @@ -1341,7 +1305,7 @@ static void ksz8795_cpu_interface_select(struct ksz_device *dev, int port) static void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port) { - struct ksz_port *p = &dev->ports[port]; + struct dsa_switch *ds = dev->ds; struct ksz8 *ksz8 = dev->priv; const u32 *masks; u8 member; @@ -1368,10 +1332,11 @@ static void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port) if (!ksz_is_ksz88x3(dev)) ksz8795_cpu_interface_select(dev, port); - member = dev->port_mask; + member = dsa_user_ports(ds); } else { - member = dev->host_mask | p->vid_member; + member = BIT(dsa_upstream_port(ds, port)); } + ksz8_cfg_port_member(dev, port, member); } @@ -1392,20 +1357,13 @@ static void ksz8_config_cpu_port(struct dsa_switch *ds) ksz_cfg(dev, regs[S_TAIL_TAG_CTRL], masks[SW_TAIL_TAG_ENABLE], true); p = &dev->ports[dev->cpu_port]; - p->vid_member = dev->port_mask; p->on = 1; ksz8_port_setup(dev, dev->cpu_port, true); - dev->member = dev->host_mask; for (i = 0; i < dev->phy_port_cnt; i++) { p = &dev->ports[i]; - /* Initialize to non-zero so that ksz_cfg_port_member() will - * be called. - */ - p->vid_member = BIT(i); - p->member = dev->port_mask; ksz8_port_stp_state_set(ds, i, BR_STATE_DISABLED); /* Last port may be disabled. */ diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c index 854e25f43fa7..353b5f981740 100644 --- a/drivers/net/dsa/microchip/ksz9477.c +++ b/drivers/net/dsa/microchip/ksz9477.c @@ -391,7 +391,6 @@ static void ksz9477_cfg_port_member(struct ksz_device *dev, int port, u8 member) { ksz_pwrite32(dev, port, REG_PORT_VLAN_MEMBERSHIP__4, member); - dev->ports[port].member = member; } static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port, @@ -400,8 +399,6 @@ static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port, struct ksz_device *dev = ds->priv; struct ksz_port *p = &dev->ports[port]; u8 data; - int member = -1; - int forward = dev->member; ksz_pread8(dev, port, P_STP_CTRL, &data); data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE); @@ -409,40 +406,18 @@ static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port, switch (state) { case BR_STATE_DISABLED: data |= PORT_LEARN_DISABLE; - if (port != dev->cpu_port) - member = 0; break; case BR_STATE_LISTENING: data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE); - if (port != dev->cpu_port && - p->stp_state == BR_STATE_DISABLED) - member = dev->host_mask | p->vid_member; break; case BR_STATE_LEARNING: data |= PORT_RX_ENABLE; break; case BR_STATE_FORWARDING: data |= (PORT_TX_ENABLE | PORT_RX_ENABLE); - - /* This function is also used internally. */ - if (port == dev->cpu_port) - break; - - member = dev->host_mask | p->vid_member; - mutex_lock(&dev->dev_mutex); - - /* Port is a member of a bridge. */ - if (dev->br_member & (1 << port)) { - dev->member |= (1 << port); - member = dev->member; - } - mutex_unlock(&dev->dev_mutex); break; case BR_STATE_BLOCKING: data |= PORT_LEARN_DISABLE; - if (port != dev->cpu_port && - p->stp_state == BR_STATE_DISABLED) - member = dev->host_mask | p->vid_member; break; default: dev_err(ds->dev, "invalid STP state: %d\n", state); @@ -451,23 +426,8 @@ static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port, ksz_pwrite8(dev, port, P_STP_CTRL, data); p->stp_state = state; - mutex_lock(&dev->dev_mutex); - /* Port membership may share register with STP state. */ - if (member >= 0 && member != p->member) - ksz9477_cfg_port_member(dev, port, (u8)member); - - /* Check if forwarding needs to be updated. */ - if (state != BR_STATE_FORWARDING) { - if (dev->br_member & (1 << port)) - dev->member &= ~(1 << port); - } - /* When topology has changed the function ksz_update_port_member - * should be called to modify port forwarding behavior. - */ - if (forward != dev->member) - ksz_update_port_member(dev, port); - mutex_unlock(&dev->dev_mutex); + ksz_update_port_member(dev, port); } static void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port) @@ -1168,10 +1128,10 @@ static void ksz9477_phy_errata_setup(struct ksz_device *dev, int port) static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port) { - u8 data8; - u8 member; - u16 data16; struct ksz_port *p = &dev->ports[port]; + struct dsa_switch *ds = dev->ds; + u8 data8, member; + u16 data16; /* enable tag tail for host port */ if (cpu_port) @@ -1250,12 +1210,12 @@ static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port) ksz_pwrite8(dev, port, REG_PORT_XMII_CTRL_1, data8); p->phydev.duplex = 1; } - mutex_lock(&dev->dev_mutex); + if (cpu_port) - member = dev->port_mask; + member = dsa_user_ports(ds); else - member = dev->host_mask | p->vid_member; - mutex_unlock(&dev->dev_mutex); + member = BIT(dsa_upstream_port(ds, port)); + ksz9477_cfg_port_member(dev, port, member); /* clear pending interrupts */ @@ -1276,8 +1236,6 @@ static void ksz9477_config_cpu_port(struct dsa_switch *ds) const char *prev_mode; dev->cpu_port = i; - dev->host_mask = (1 << dev->cpu_port); - dev->port_mask |= dev->host_mask; p = &dev->ports[i]; /* Read from XMII register to determine host port @@ -1312,23 +1270,15 @@ static void ksz9477_config_cpu_port(struct dsa_switch *ds) /* enable cpu port */ ksz9477_port_setup(dev, i, true); - p->vid_member = dev->port_mask; p->on = 1; } } - dev->member = dev->host_mask; - for (i = 0; i < dev->port_cnt; i++) { if (i == dev->cpu_port) continue; p = &dev->ports[i]; - /* Initialize to non-zero so that ksz_cfg_port_member() will - * be called. - */ - p->vid_member = (1 << i); - p->member = dev->port_mask; ksz9477_port_stp_state_set(ds, i, BR_STATE_DISABLED); p->on = 1; if (i < dev->phy_port_cnt) diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index 7c2968a639eb..8a04302018dc 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -22,21 +22,40 @@ void ksz_update_port_member(struct ksz_device *dev, int port) { - struct ksz_port *p; + struct ksz_port *p = &dev->ports[port]; + struct dsa_switch *ds = dev->ds; + u8 port_member = 0, cpu_port; + const struct dsa_port *dp; int i; - for (i = 0; i < dev->port_cnt; i++) { - if (i == port || i == dev->cpu_port) + if (!dsa_is_user_port(ds, port)) + return; + + dp = dsa_to_port(ds, port); + cpu_port = BIT(dsa_upstream_port(ds, port)); + + for (i = 0; i < ds->num_ports; i++) { + const struct dsa_port *other_dp = dsa_to_port(ds, i); + struct ksz_port *other_p = &dev->ports[i]; + u8 val = 0; + + if (!dsa_is_user_port(ds, i)) continue; - p = &dev->ports[i]; - if (!(dev->member & (1 << i))) + if (port == i) + continue; + if (!dp->bridge_dev || dp->bridge_dev != other_dp->bridge_dev) continue; - /* Port is a member of the bridge and is forwarding. */ - if (p->stp_state == BR_STATE_FORWARDING && - p->member != dev->member) - dev->dev_ops->cfg_port_member(dev, i, dev->member); + if (other_p->stp_state == BR_STATE_FORWARDING && + p->stp_state == BR_STATE_FORWARDING) { + val |= BIT(port); + port_member |= BIT(i); + } + + dev->dev_ops->cfg_port_member(dev, i, val | cpu_port); } + + dev->dev_ops->cfg_port_member(dev, port, port_member | cpu_port); } EXPORT_SYMBOL_GPL(ksz_update_port_member); @@ -175,12 +194,6 @@ EXPORT_SYMBOL_GPL(ksz_get_ethtool_stats); int ksz_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *br) { - struct ksz_device *dev = ds->priv; - - mutex_lock(&dev->dev_mutex); - dev->br_member |= (1 << port); - mutex_unlock(&dev->dev_mutex); - /* port_stp_state_set() will be called after to put the port in * appropriate state so there is no need to do anything. */ @@ -192,13 +205,6 @@ EXPORT_SYMBOL_GPL(ksz_port_bridge_join); void ksz_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br) { - struct ksz_device *dev = ds->priv; - - mutex_lock(&dev->dev_mutex); - dev->br_member &= ~(1 << port); - dev->member &= ~(1 << port); - mutex_unlock(&dev->dev_mutex); - /* port_stp_state_set() will be called after to put the port in * forwarding state so there is no need to do anything. */ diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h index 1597c63988b4..54b456bc8972 100644 --- a/drivers/net/dsa/microchip/ksz_common.h +++ b/drivers/net/dsa/microchip/ksz_common.h @@ -25,8 +25,6 @@ struct ksz_port_mib { }; struct ksz_port { - u16 member; - u16 vid_member; bool remove_tag; /* Remove Tag flag set, for ksz8795 only */ int stp_state; struct phy_device phydev; @@ -83,8 +81,6 @@ struct ksz_device { struct ksz_port *ports; struct delayed_work mib_read; unsigned long mib_read_interval; - u16 br_member; - u16 member; u16 mirror_rx; u16 mirror_tx; u32 features; /* chip specific features */ diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index f00cbf5753b9..14f87f6ac479 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -471,6 +471,12 @@ static int mv88e6xxx_port_ppu_updates(struct mv88e6xxx_chip *chip, int port) u16 reg; int err; + /* The 88e6250 family does not have the PHY detect bit. Instead, + * report whether the port is internal. + */ + if (chip->info->family == MV88E6XXX_FAMILY_6250) + return port < chip->info->num_internal_phys; + err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, ®); if (err) { dev_err(chip->dev, @@ -692,44 +698,48 @@ static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port, { struct mv88e6xxx_chip *chip = ds->priv; struct mv88e6xxx_port *p; - int err; + int err = 0; p = &chip->ports[port]; - /* FIXME: is this the correct test? If we're in fixed mode on an - * internal port, why should we process this any different from - * PHY mode? On the other hand, the port may be automedia between - * an internal PHY and the serdes... - */ - if ((mode == MLO_AN_PHY) && mv88e6xxx_phy_is_internal(ds, port)) - return; - mv88e6xxx_reg_lock(chip); - /* In inband mode, the link may come up at any time while the link - * is not forced down. Force the link down while we reconfigure the - * interface mode. - */ - if (mode == MLO_AN_INBAND && p->interface != state->interface && - chip->info->ops->port_set_link) - chip->info->ops->port_set_link(chip, port, LINK_FORCED_DOWN); - - err = mv88e6xxx_port_config_interface(chip, port, state->interface); - if (err && err != -EOPNOTSUPP) - goto err_unlock; - err = mv88e6xxx_serdes_pcs_config(chip, port, mode, state->interface, - state->advertising); - /* FIXME: we should restart negotiation if something changed - which - * is something we get if we convert to using phylinks PCS operations. - */ - if (err > 0) - err = 0; + if (mode != MLO_AN_PHY || !mv88e6xxx_phy_is_internal(ds, port)) { + /* In inband mode, the link may come up at any time while the + * link is not forced down. Force the link down while we + * reconfigure the interface mode. + */ + if (mode == MLO_AN_INBAND && + p->interface != state->interface && + chip->info->ops->port_set_link) + chip->info->ops->port_set_link(chip, port, + LINK_FORCED_DOWN); + + err = mv88e6xxx_port_config_interface(chip, port, + state->interface); + if (err && err != -EOPNOTSUPP) + goto err_unlock; + + err = mv88e6xxx_serdes_pcs_config(chip, port, mode, + state->interface, + state->advertising); + /* FIXME: we should restart negotiation if something changed - + * which is something we get if we convert to using phylinks + * PCS operations. + */ + if (err > 0) + err = 0; + } /* Undo the forced down state above after completing configuration - * irrespective of its state on entry, which allows the link to come up. + * irrespective of its state on entry, which allows the link to come + * up in the in-band case where there is no separate SERDES. Also + * ensure that the link can come up if the PPU is in use and we are + * in PHY mode (we treat the PPU as an effective in-band mechanism.) */ - if (mode == MLO_AN_INBAND && p->interface != state->interface && - chip->info->ops->port_set_link) + if (chip->info->ops->port_set_link && + ((mode == MLO_AN_INBAND && p->interface != state->interface) || + (mode == MLO_AN_PHY && mv88e6xxx_port_ppu_updates(chip, port)))) chip->info->ops->port_set_link(chip, port, LINK_UNFORCED); p->interface = state->interface; @@ -752,11 +762,10 @@ static void mv88e6xxx_mac_link_down(struct dsa_switch *ds, int port, ops = chip->info->ops; mv88e6xxx_reg_lock(chip); - /* Internal PHYs propagate their configuration directly to the MAC. - * External PHYs depend on whether the PPU is enabled for this port. + /* Force the link down if we know the port may not be automatically + * updated by the switch or if we are using fixed-link mode. */ - if (((!mv88e6xxx_phy_is_internal(ds, port) && - !mv88e6xxx_port_ppu_updates(chip, port)) || + if ((!mv88e6xxx_port_ppu_updates(chip, port) || mode == MLO_AN_FIXED) && ops->port_sync_link) err = ops->port_sync_link(chip, port, mode, false); mv88e6xxx_reg_unlock(chip); @@ -779,11 +788,11 @@ static void mv88e6xxx_mac_link_up(struct dsa_switch *ds, int port, ops = chip->info->ops; mv88e6xxx_reg_lock(chip); - /* Internal PHYs propagate their configuration directly to the MAC. - * External PHYs depend on whether the PPU is enabled for this port. + /* Configure and force the link up if we know that the port may not + * automatically updated by the switch or if we are using fixed-link + * mode. */ - if ((!mv88e6xxx_phy_is_internal(ds, port) && - !mv88e6xxx_port_ppu_updates(chip, port)) || + if (!mv88e6xxx_port_ppu_updates(chip, port) || mode == MLO_AN_FIXED) { /* FIXME: for an automedia port, should we force the link * down here - what if the link comes up due to "other" media diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c index 6ea003678798..2b05ead515cd 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.c +++ b/drivers/net/dsa/mv88e6xxx/serdes.c @@ -50,11 +50,22 @@ static int mv88e6390_serdes_write(struct mv88e6xxx_chip *chip, } static int mv88e6xxx_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, - u16 status, u16 lpa, + u16 ctrl, u16 status, u16 lpa, struct phylink_link_state *state) { + state->link = !!(status & MV88E6390_SGMII_PHY_STATUS_LINK); + if (status & MV88E6390_SGMII_PHY_STATUS_SPD_DPL_VALID) { - state->link = !!(status & MV88E6390_SGMII_PHY_STATUS_LINK); + /* The Spped and Duplex Resolved register is 1 if AN is enabled + * and complete, or if AN is disabled. So with disabled AN we + * still get here on link up. But we want to set an_complete + * only if AN was enabled, thus we look at BMCR_ANENABLE. + * (According to 802.3-2008 section 22.2.4.2.10, we should be + * able to get this same value from BMSR_ANEGCAPABLE, but tests + * show that these Marvell PHYs don't conform to this part of + * the specificaion - BMSR_ANEGCAPABLE is simply always 1.) + */ + state->an_complete = !!(ctrl & BMCR_ANENABLE); state->duplex = status & MV88E6390_SGMII_PHY_STATUS_DUPLEX_FULL ? DUPLEX_FULL : DUPLEX_HALF; @@ -81,6 +92,18 @@ static int mv88e6xxx_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, dev_err(chip->dev, "invalid PHY speed\n"); return -EINVAL; } + } else if (state->link && + state->interface != PHY_INTERFACE_MODE_SGMII) { + /* If Speed and Duplex Resolved register is 0 and link is up, it + * means that AN was enabled, but link partner had it disabled + * and the PHY invoked the Auto-Negotiation Bypass feature and + * linked anyway. + */ + state->duplex = DUPLEX_FULL; + if (state->interface == PHY_INTERFACE_MODE_2500BASEX) + state->speed = SPEED_2500; + else + state->speed = SPEED_1000; } else { state->link = false; } @@ -168,9 +191,15 @@ int mv88e6352_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port, int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port, int lane, struct phylink_link_state *state) { - u16 lpa, status; + u16 lpa, status, ctrl; int err; + err = mv88e6352_serdes_read(chip, MII_BMCR, &ctrl); + if (err) { + dev_err(chip->dev, "can't read Serdes PHY control: %d\n", err); + return err; + } + err = mv88e6352_serdes_read(chip, 0x11, &status); if (err) { dev_err(chip->dev, "can't read Serdes PHY status: %d\n", err); @@ -183,7 +212,7 @@ int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port, return err; } - return mv88e6xxx_serdes_pcs_get_state(chip, status, lpa, state); + return mv88e6xxx_serdes_pcs_get_state(chip, ctrl, status, lpa, state); } int mv88e6352_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port, @@ -801,7 +830,7 @@ int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane, bool up) { u8 cmode = chip->ports[port].cmode; - int err = 0; + int err; switch (cmode) { case MV88E6XXX_PORT_STS_CMODE_SGMII: @@ -813,6 +842,9 @@ int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane, case MV88E6XXX_PORT_STS_CMODE_RXAUI: err = mv88e6390_serdes_power_10g(chip, lane, up); break; + default: + err = -EINVAL; + break; } if (!err && up) @@ -883,10 +915,17 @@ int mv88e6390_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port, static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip, int port, int lane, struct phylink_link_state *state) { - u16 lpa, status; + u16 lpa, status, ctrl; int err; err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, + MV88E6390_SGMII_BMCR, &ctrl); + if (err) { + dev_err(chip->dev, "can't read Serdes PHY control: %d\n", err); + return err; + } + + err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, MV88E6390_SGMII_PHY_STATUS, &status); if (err) { dev_err(chip->dev, "can't read Serdes PHY status: %d\n", err); @@ -900,7 +939,7 @@ static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip, return err; } - return mv88e6xxx_serdes_pcs_get_state(chip, status, lpa, state); + return mv88e6xxx_serdes_pcs_get_state(chip, ctrl, status, lpa, state); } static int mv88e6390_serdes_pcs_get_state_10g(struct mv88e6xxx_chip *chip, @@ -1271,9 +1310,31 @@ void mv88e6390_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p) } } -static int mv88e6393x_serdes_port_errata(struct mv88e6xxx_chip *chip, int lane) +static int mv88e6393x_serdes_power_lane(struct mv88e6xxx_chip *chip, int lane, + bool on) { - u16 reg, pcs; + u16 reg; + int err; + + err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, + MV88E6393X_SERDES_CTRL1, ®); + if (err) + return err; + + if (on) + reg &= ~(MV88E6393X_SERDES_CTRL1_TX_PDOWN | + MV88E6393X_SERDES_CTRL1_RX_PDOWN); + else + reg |= MV88E6393X_SERDES_CTRL1_TX_PDOWN | + MV88E6393X_SERDES_CTRL1_RX_PDOWN; + + return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS, + MV88E6393X_SERDES_CTRL1, reg); +} + +static int mv88e6393x_serdes_erratum_4_6(struct mv88e6xxx_chip *chip, int lane) +{ + u16 reg; int err; /* mv88e6393x family errata 4.6: @@ -1284,26 +1345,45 @@ static int mv88e6393x_serdes_port_errata(struct mv88e6xxx_chip *chip, int lane) * It seems that after this workaround the SERDES is automatically * powered up (the bit is cleared), so power it down. */ - if (lane == MV88E6393X_PORT0_LANE || lane == MV88E6393X_PORT9_LANE || - lane == MV88E6393X_PORT10_LANE) { - err = mv88e6390_serdes_read(chip, lane, - MDIO_MMD_PHYXS, - MV88E6393X_SERDES_POC, ®); - if (err) - return err; + err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, + MV88E6393X_SERDES_POC, ®); + if (err) + return err; - reg &= ~MV88E6393X_SERDES_POC_PDOWN; - reg |= MV88E6393X_SERDES_POC_RESET; + reg &= ~MV88E6393X_SERDES_POC_PDOWN; + reg |= MV88E6393X_SERDES_POC_RESET; - err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS, - MV88E6393X_SERDES_POC, reg); - if (err) - return err; + err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS, + MV88E6393X_SERDES_POC, reg); + if (err) + return err; - err = mv88e6390_serdes_power_sgmii(chip, lane, false); - if (err) - return err; - } + err = mv88e6390_serdes_power_sgmii(chip, lane, false); + if (err) + return err; + + return mv88e6393x_serdes_power_lane(chip, lane, false); +} + +int mv88e6393x_serdes_setup_errata(struct mv88e6xxx_chip *chip) +{ + int err; + + err = mv88e6393x_serdes_erratum_4_6(chip, MV88E6393X_PORT0_LANE); + if (err) + return err; + + err = mv88e6393x_serdes_erratum_4_6(chip, MV88E6393X_PORT9_LANE); + if (err) + return err; + + return mv88e6393x_serdes_erratum_4_6(chip, MV88E6393X_PORT10_LANE); +} + +static int mv88e6393x_serdes_erratum_4_8(struct mv88e6xxx_chip *chip, int lane) +{ + u16 reg, pcs; + int err; /* mv88e6393x family errata 4.8: * When a SERDES port is operating in 1000BASE-X or SGMII mode link may @@ -1334,38 +1414,152 @@ static int mv88e6393x_serdes_port_errata(struct mv88e6xxx_chip *chip, int lane) MV88E6393X_ERRATA_4_8_REG, reg); } -int mv88e6393x_serdes_setup_errata(struct mv88e6xxx_chip *chip) +static int mv88e6393x_serdes_erratum_5_2(struct mv88e6xxx_chip *chip, int lane, + u8 cmode) +{ + static const struct { + u16 dev, reg, val, mask; + } fixes[] = { + { MDIO_MMD_VEND1, 0x8093, 0xcb5a, 0xffff }, + { MDIO_MMD_VEND1, 0x8171, 0x7088, 0xffff }, + { MDIO_MMD_VEND1, 0x80c9, 0x311a, 0xffff }, + { MDIO_MMD_VEND1, 0x80a2, 0x8000, 0xff7f }, + { MDIO_MMD_VEND1, 0x80a9, 0x0000, 0xfff0 }, + { MDIO_MMD_VEND1, 0x80a3, 0x0000, 0xf8ff }, + { MDIO_MMD_PHYXS, MV88E6393X_SERDES_POC, + MV88E6393X_SERDES_POC_RESET, MV88E6393X_SERDES_POC_RESET }, + }; + int err, i; + u16 reg; + + /* mv88e6393x family errata 5.2: + * For optimal signal integrity the following sequence should be applied + * to SERDES operating in 10G mode. These registers only apply to 10G + * operation and have no effect on other speeds. + */ + if (cmode != MV88E6393X_PORT_STS_CMODE_10GBASER) + return 0; + + for (i = 0; i < ARRAY_SIZE(fixes); ++i) { + err = mv88e6390_serdes_read(chip, lane, fixes[i].dev, + fixes[i].reg, ®); + if (err) + return err; + + reg &= ~fixes[i].mask; + reg |= fixes[i].val; + + err = mv88e6390_serdes_write(chip, lane, fixes[i].dev, + fixes[i].reg, reg); + if (err) + return err; + } + + return 0; +} + +static int mv88e6393x_serdes_fix_2500basex_an(struct mv88e6xxx_chip *chip, + int lane, u8 cmode, bool on) { + u16 reg; int err; - err = mv88e6393x_serdes_port_errata(chip, MV88E6393X_PORT0_LANE); + if (cmode != MV88E6XXX_PORT_STS_CMODE_2500BASEX) + return 0; + + /* Inband AN is broken on Amethyst in 2500base-x mode when set by + * standard mechanism (via cmode). + * We can get around this by configuring the PCS mode to 1000base-x + * and then writing value 0x58 to register 1e.8000. (This must be done + * while SerDes receiver and transmitter are disabled, which is, when + * this function is called.) + * It seem that when we do this configuration to 2500base-x mode (by + * changing PCS mode to 1000base-x and frequency to 3.125 GHz from + * 1.25 GHz) and then configure to sgmii or 1000base-x, the device + * thinks that it already has SerDes at 1.25 GHz and does not change + * the 1e.8000 register, leaving SerDes at 3.125 GHz. + * To avoid this, change PCS mode back to 2500base-x when disabling + * SerDes from 2500base-x mode. + */ + err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, + MV88E6393X_SERDES_POC, ®); + if (err) + return err; + + reg &= ~(MV88E6393X_SERDES_POC_PCS_MASK | MV88E6393X_SERDES_POC_AN); + if (on) + reg |= MV88E6393X_SERDES_POC_PCS_1000BASEX | + MV88E6393X_SERDES_POC_AN; + else + reg |= MV88E6393X_SERDES_POC_PCS_2500BASEX; + reg |= MV88E6393X_SERDES_POC_RESET; + + err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS, + MV88E6393X_SERDES_POC, reg); if (err) return err; - err = mv88e6393x_serdes_port_errata(chip, MV88E6393X_PORT9_LANE); + err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_VEND1, 0x8000, 0x58); if (err) return err; - return mv88e6393x_serdes_port_errata(chip, MV88E6393X_PORT10_LANE); + return 0; } int mv88e6393x_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane, bool on) { u8 cmode = chip->ports[port].cmode; + int err; if (port != 0 && port != 9 && port != 10) return -EOPNOTSUPP; + if (on) { + err = mv88e6393x_serdes_erratum_4_8(chip, lane); + if (err) + return err; + + err = mv88e6393x_serdes_erratum_5_2(chip, lane, cmode); + if (err) + return err; + + err = mv88e6393x_serdes_fix_2500basex_an(chip, lane, cmode, + true); + if (err) + return err; + + err = mv88e6393x_serdes_power_lane(chip, lane, true); + if (err) + return err; + } + switch (cmode) { case MV88E6XXX_PORT_STS_CMODE_SGMII: case MV88E6XXX_PORT_STS_CMODE_1000BASEX: case MV88E6XXX_PORT_STS_CMODE_2500BASEX: - return mv88e6390_serdes_power_sgmii(chip, lane, on); + err = mv88e6390_serdes_power_sgmii(chip, lane, on); + break; case MV88E6393X_PORT_STS_CMODE_5GBASER: case MV88E6393X_PORT_STS_CMODE_10GBASER: - return mv88e6390_serdes_power_10g(chip, lane, on); + err = mv88e6390_serdes_power_10g(chip, lane, on); + break; + default: + err = -EINVAL; + break; } - return 0; + if (err) + return err; + + if (!on) { + err = mv88e6393x_serdes_power_lane(chip, lane, false); + if (err) + return err; + + err = mv88e6393x_serdes_fix_2500basex_an(chip, lane, cmode, + false); + } + + return err; } diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h index cbb3ba30caea..8dd8ed225b45 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.h +++ b/drivers/net/dsa/mv88e6xxx/serdes.h @@ -93,6 +93,10 @@ #define MV88E6393X_SERDES_POC_PCS_MASK 0x0007 #define MV88E6393X_SERDES_POC_RESET BIT(15) #define MV88E6393X_SERDES_POC_PDOWN BIT(5) +#define MV88E6393X_SERDES_POC_AN BIT(3) +#define MV88E6393X_SERDES_CTRL1 0xf003 +#define MV88E6393X_SERDES_CTRL1_TX_PDOWN BIT(9) +#define MV88E6393X_SERDES_CTRL1_RX_PDOWN BIT(8) #define MV88E6393X_ERRATA_4_8_REG 0xF074 #define MV88E6393X_ERRATA_4_8_BIT BIT(14) diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index 327cc4654806..f1a05e7dc818 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -290,8 +290,11 @@ static int felix_setup_mmio_filtering(struct felix *felix) } } - if (cpu < 0) + if (cpu < 0) { + kfree(tagging_rule); + kfree(redirect_rule); return -EINVAL; + } tagging_rule->key_type = OCELOT_VCAP_KEY_ETYPE; *(__be16 *)tagging_rule->key.etype.etype.value = htons(ETH_P_1588); diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index a429c9750add..147ca39531a3 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -1256,8 +1256,12 @@ qca8k_setup(struct dsa_switch *ds) /* Set initial MTU for every port. * We have only have a general MTU setting. So track * every port and set the max across all port. + * Set per port MTU to 1500 as the MTU change function + * will add the overhead and if its set to 1518 then it + * will apply the overhead again and we will end up with + * MTU of 1536 instead of 1518 */ - priv->port_mtu[i] = ETH_FRAME_LEN + ETH_FCS_LEN; + priv->port_mtu[i] = ETH_DATA_LEN; } /* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */ @@ -1433,6 +1437,12 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode, qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val); + /* From original code is reported port instability as SGMII also + * require delay set. Apply advised values here or take them from DT. + */ + if (state->interface == PHY_INTERFACE_MODE_SGMII) + qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg); + /* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and * falling edge is set writing in the PORT0 PAD reg */ @@ -1455,12 +1465,6 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode, QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE, val); - /* From original code is reported port instability as SGMII also - * require delay set. Apply advised values here or take them from DT. - */ - if (state->interface == PHY_INTERFACE_MODE_SGMII) - qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg); - break; default: dev_err(ds->dev, "xMII mode %s not supported for port %d\n", diff --git a/drivers/net/dsa/rtl8365mb.c b/drivers/net/dsa/rtl8365mb.c index baaae97283c5..078ca4cd7160 100644 --- a/drivers/net/dsa/rtl8365mb.c +++ b/drivers/net/dsa/rtl8365mb.c @@ -107,6 +107,7 @@ #define RTL8365MB_LEARN_LIMIT_MAX_8365MB_VC 2112 /* Family-specific data and limits */ +#define RTL8365MB_PHYADDRMAX 7 #define RTL8365MB_NUM_PHYREGS 32 #define RTL8365MB_PHYREGMAX (RTL8365MB_NUM_PHYREGS - 1) #define RTL8365MB_MAX_NUM_PORTS (RTL8365MB_CPU_PORT_NUM_8365MB_VC + 1) @@ -176,7 +177,7 @@ #define RTL8365MB_INDIRECT_ACCESS_STATUS_REG 0x1F01 #define RTL8365MB_INDIRECT_ACCESS_ADDRESS_REG 0x1F02 #define RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_5_1_MASK GENMASK(4, 0) -#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_PHYNUM_MASK GENMASK(6, 5) +#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_PHYNUM_MASK GENMASK(7, 5) #define RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_9_6_MASK GENMASK(11, 8) #define RTL8365MB_PHY_BASE 0x2000 #define RTL8365MB_INDIRECT_ACCESS_WRITE_DATA_REG 0x1F03 @@ -679,6 +680,9 @@ static int rtl8365mb_phy_read(struct realtek_smi *smi, int phy, int regnum) u16 val; int ret; + if (phy > RTL8365MB_PHYADDRMAX) + return -EINVAL; + if (regnum > RTL8365MB_PHYREGMAX) return -EINVAL; @@ -704,6 +708,9 @@ static int rtl8365mb_phy_write(struct realtek_smi *smi, int phy, int regnum, u32 ocp_addr; int ret; + if (phy > RTL8365MB_PHYADDRMAX) + return -EINVAL; + if (regnum > RTL8365MB_PHYREGMAX) return -EINVAL; diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index d75d95a97dd9..993b2fb42961 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -1430,16 +1430,19 @@ static int altera_tse_probe(struct platform_device *pdev) priv->rxdescmem_busaddr = dma_res->start; } else { + ret = -ENODEV; goto err_free_netdev; } - if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask))) + if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask))) { dma_set_coherent_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask)); - else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32))) + } else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32))) { dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32)); - else + } else { + ret = -EIO; goto err_free_netdev; + } /* MAC address space */ ret = request_and_map(pdev, "control_port", &control_port, diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_common.h b/drivers/net/ethernet/aquantia/atlantic/aq_common.h index 23b2d390fcdd..ace691d7cd75 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_common.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_common.h @@ -40,10 +40,12 @@ #define AQ_DEVICE_ID_AQC113DEV 0x00C0 #define AQ_DEVICE_ID_AQC113CS 0x94C0 +#define AQ_DEVICE_ID_AQC113CA 0x34C0 #define AQ_DEVICE_ID_AQC114CS 0x93C0 #define AQ_DEVICE_ID_AQC113 0x04C0 #define AQ_DEVICE_ID_AQC113C 0x14C0 #define AQ_DEVICE_ID_AQC115C 0x12C0 +#define AQ_DEVICE_ID_AQC116C 0x11C0 #define HW_ATL_NIC_NAME "Marvell (aQuantia) AQtion 10Gbit Network Adapter" @@ -53,20 +55,19 @@ #define AQ_NIC_RATE_10G BIT(0) #define AQ_NIC_RATE_5G BIT(1) -#define AQ_NIC_RATE_5GSR BIT(2) -#define AQ_NIC_RATE_2G5 BIT(3) -#define AQ_NIC_RATE_1G BIT(4) -#define AQ_NIC_RATE_100M BIT(5) -#define AQ_NIC_RATE_10M BIT(6) -#define AQ_NIC_RATE_1G_HALF BIT(7) -#define AQ_NIC_RATE_100M_HALF BIT(8) -#define AQ_NIC_RATE_10M_HALF BIT(9) +#define AQ_NIC_RATE_2G5 BIT(2) +#define AQ_NIC_RATE_1G BIT(3) +#define AQ_NIC_RATE_100M BIT(4) +#define AQ_NIC_RATE_10M BIT(5) +#define AQ_NIC_RATE_1G_HALF BIT(6) +#define AQ_NIC_RATE_100M_HALF BIT(7) +#define AQ_NIC_RATE_10M_HALF BIT(8) -#define AQ_NIC_RATE_EEE_10G BIT(10) -#define AQ_NIC_RATE_EEE_5G BIT(11) -#define AQ_NIC_RATE_EEE_2G5 BIT(12) -#define AQ_NIC_RATE_EEE_1G BIT(13) -#define AQ_NIC_RATE_EEE_100M BIT(14) +#define AQ_NIC_RATE_EEE_10G BIT(9) +#define AQ_NIC_RATE_EEE_5G BIT(10) +#define AQ_NIC_RATE_EEE_2G5 BIT(11) +#define AQ_NIC_RATE_EEE_1G BIT(12) +#define AQ_NIC_RATE_EEE_100M BIT(13) #define AQ_NIC_RATE_EEE_MSK (AQ_NIC_RATE_EEE_10G |\ AQ_NIC_RATE_EEE_5G |\ AQ_NIC_RATE_EEE_2G5 |\ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h index 062a300a566a..dbd284660135 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h @@ -80,6 +80,8 @@ struct aq_hw_link_status_s { }; struct aq_stats_s { + u64 brc; + u64 btc; u64 uprc; u64 mprc; u64 bprc; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 1acf544afeb4..33f1a1377588 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -316,18 +316,22 @@ int aq_nic_ndev_register(struct aq_nic_s *self) aq_macsec_init(self); #endif - mutex_lock(&self->fwreq_mutex); - err = self->aq_fw_ops->get_mac_permanent(self->aq_hw, addr); - mutex_unlock(&self->fwreq_mutex); - if (err) - goto err_exit; + if (platform_get_ethdev_address(&self->pdev->dev, self->ndev) != 0) { + // If DT has none or an invalid one, ask device for MAC address + mutex_lock(&self->fwreq_mutex); + err = self->aq_fw_ops->get_mac_permanent(self->aq_hw, addr); + mutex_unlock(&self->fwreq_mutex); - eth_hw_addr_set(self->ndev, addr); + if (err) + goto err_exit; - if (!is_valid_ether_addr(self->ndev->dev_addr) || - !aq_nic_is_valid_ether_addr(self->ndev->dev_addr)) { - netdev_warn(self->ndev, "MAC is invalid, will use random."); - eth_hw_addr_random(self->ndev); + if (is_valid_ether_addr(addr) && + aq_nic_is_valid_ether_addr(addr)) { + eth_hw_addr_set(self->ndev, addr); + } else { + netdev_warn(self->ndev, "MAC is invalid, will use random."); + eth_hw_addr_random(self->ndev); + } } #if defined(AQ_CFG_MAC_ADDR_PERMANENT) @@ -905,8 +909,14 @@ u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data) data[++i] = stats->mbtc; data[++i] = stats->bbrc; data[++i] = stats->bbtc; - data[++i] = stats->ubrc + stats->mbrc + stats->bbrc; - data[++i] = stats->ubtc + stats->mbtc + stats->bbtc; + if (stats->brc) + data[++i] = stats->brc; + else + data[++i] = stats->ubrc + stats->mbrc + stats->bbrc; + if (stats->btc) + data[++i] = stats->btc; + else + data[++i] = stats->ubtc + stats->mbtc + stats->bbtc; data[++i] = stats->dma_pkt_rc; data[++i] = stats->dma_pkt_tc; data[++i] = stats->dma_oct_rc; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c index d4b1976ee69b..797a95142d1f 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c @@ -49,6 +49,8 @@ static const struct pci_device_id aq_pci_tbl[] = { { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113), }, { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113C), }, { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC115C), }, + { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113CA), }, + { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC116C), }, {} }; @@ -85,7 +87,10 @@ static const struct aq_board_revision_s hw_atl_boards[] = { { AQ_DEVICE_ID_AQC113CS, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, }, { AQ_DEVICE_ID_AQC114CS, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, }, { AQ_DEVICE_ID_AQC113C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, }, - { AQ_DEVICE_ID_AQC115C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, }, + { AQ_DEVICE_ID_AQC115C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc115c, }, + { AQ_DEVICE_ID_AQC113CA, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, }, + { AQ_DEVICE_ID_AQC116C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc116c, }, + }; MODULE_DEVICE_TABLE(pci, aq_pci_tbl); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index 24122ccda614..81b3756417ec 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -298,13 +298,14 @@ bool aq_ring_tx_clean(struct aq_ring_s *self) } } - if (unlikely(buff->is_eop)) { + if (unlikely(buff->is_eop && buff->skb)) { u64_stats_update_begin(&self->stats.tx.syncp); ++self->stats.tx.packets; self->stats.tx.bytes += buff->skb->len; u64_stats_update_end(&self->stats.tx.syncp); dev_kfree_skb_any(buff->skb); + buff->skb = NULL; } buff->pa = 0U; buff->eop_index = 0xffffU; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c index d281322d7dd2..f4774cf051c9 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c @@ -362,9 +362,6 @@ unsigned int aq_vec_get_sw_stats(struct aq_vec_s *self, const unsigned int tc, u { unsigned int count; - WARN_ONCE(!aq_vec_is_valid_tc(self, tc), - "Invalid tc %u (#rx=%u, #tx=%u)\n", - tc, self->rx_rings, self->tx_rings); if (!aq_vec_is_valid_tc(self, tc)) return 0; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c index fc0e66006644..7e88d7234b14 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c @@ -559,6 +559,11 @@ int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self, goto err_exit; if (fw.len == 0xFFFFU) { + if (sw.len > sizeof(self->rpc)) { + printk(KERN_INFO "Invalid sw len: %x\n", sw.len); + err = -EINVAL; + goto err_exit; + } err = hw_atl_utils_fw_rpc_call(self, sw.len); if (err < 0) goto err_exit; @@ -567,6 +572,11 @@ int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self, if (rpc) { if (fw.len) { + if (fw.len > sizeof(self->rpc)) { + printk(KERN_INFO "Invalid fw len: %x\n", fw.len); + err = -EINVAL; + goto err_exit; + } err = hw_atl_utils_fw_downld_dwords(self, self->rpc_addr, @@ -857,12 +867,20 @@ static int hw_atl_fw1x_deinit(struct aq_hw_s *self) int hw_atl_utils_update_stats(struct aq_hw_s *self) { struct aq_stats_s *cs = &self->curr_stats; + struct aq_stats_s curr_stats = *cs; struct hw_atl_utils_mbox mbox; + bool corrupted_stats = false; hw_atl_utils_mpi_read_stats(self, &mbox); -#define AQ_SDELTA(_N_) (self->curr_stats._N_ += \ - mbox.stats._N_ - self->last_stats._N_) +#define AQ_SDELTA(_N_) \ +do { \ + if (!corrupted_stats && \ + ((s64)(mbox.stats._N_ - self->last_stats._N_)) >= 0) \ + curr_stats._N_ += mbox.stats._N_ - self->last_stats._N_; \ + else \ + corrupted_stats = true; \ +} while (0) if (self->aq_link_status.mbps) { AQ_SDELTA(uprc); @@ -882,6 +900,9 @@ int hw_atl_utils_update_stats(struct aq_hw_s *self) AQ_SDELTA(bbrc); AQ_SDELTA(bbtc); AQ_SDELTA(dpc); + + if (!corrupted_stats) + *cs = curr_stats; } #undef AQ_SDELTA diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c index eac631c45c56..4d4cfbc91e19 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c @@ -132,9 +132,6 @@ static enum hw_atl_fw2x_rate link_speed_mask_2fw2x_ratemask(u32 speed) if (speed & AQ_NIC_RATE_5G) rate |= FW2X_RATE_5G; - if (speed & AQ_NIC_RATE_5GSR) - rate |= FW2X_RATE_5G; - if (speed & AQ_NIC_RATE_2G5) rate |= FW2X_RATE_2G5; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c index c98708bb044c..5dfc751572ed 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c @@ -65,11 +65,25 @@ const struct aq_hw_caps_s hw_atl2_caps_aqc113 = { AQ_NIC_RATE_5G | AQ_NIC_RATE_2G5 | AQ_NIC_RATE_1G | - AQ_NIC_RATE_1G_HALF | AQ_NIC_RATE_100M | - AQ_NIC_RATE_100M_HALF | - AQ_NIC_RATE_10M | - AQ_NIC_RATE_10M_HALF, + AQ_NIC_RATE_10M, +}; + +const struct aq_hw_caps_s hw_atl2_caps_aqc115c = { + DEFAULT_BOARD_BASIC_CAPABILITIES, + .media_type = AQ_HW_MEDIA_TYPE_TP, + .link_speed_msk = AQ_NIC_RATE_2G5 | + AQ_NIC_RATE_1G | + AQ_NIC_RATE_100M | + AQ_NIC_RATE_10M, +}; + +const struct aq_hw_caps_s hw_atl2_caps_aqc116c = { + DEFAULT_BOARD_BASIC_CAPABILITIES, + .media_type = AQ_HW_MEDIA_TYPE_TP, + .link_speed_msk = AQ_NIC_RATE_1G | + AQ_NIC_RATE_100M | + AQ_NIC_RATE_10M, }; static u32 hw_atl2_sem_act_rslvr_get(struct aq_hw_s *self) diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h index de8723f1c28a..346f0dc9912e 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h @@ -9,6 +9,8 @@ #include "aq_common.h" extern const struct aq_hw_caps_s hw_atl2_caps_aqc113; +extern const struct aq_hw_caps_s hw_atl2_caps_aqc115c; +extern const struct aq_hw_caps_s hw_atl2_caps_aqc116c; extern const struct aq_hw_ops hw_atl2_ops; #endif /* HW_ATL2_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h index b66fa346581c..6bad64c77b87 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h @@ -239,7 +239,8 @@ struct version_s { u8 minor; u16 build; } phy; - u32 rsvd; + u32 drv_iface_ver:4; + u32 rsvd:28; }; struct link_status_s { @@ -424,7 +425,7 @@ struct cable_diag_status_s { u16 rsvd2; }; -struct statistics_s { +struct statistics_a0_s { struct { u32 link_up; u32 link_down; @@ -457,6 +458,33 @@ struct statistics_s { u32 reserve_fw_gap; }; +struct __packed statistics_b0_s { + u64 rx_good_octets; + u64 rx_pause_frames; + u64 rx_good_frames; + u64 rx_errors; + u64 rx_unicast_frames; + u64 rx_multicast_frames; + u64 rx_broadcast_frames; + + u64 tx_good_octets; + u64 tx_pause_frames; + u64 tx_good_frames; + u64 tx_errors; + u64 tx_unicast_frames; + u64 tx_multicast_frames; + u64 tx_broadcast_frames; + + u32 main_loop_cycles; +}; + +struct __packed statistics_s { + union __packed { + struct statistics_a0_s a0; + struct statistics_b0_s b0; + }; +}; + struct filter_caps_s { u8 l2_filters_base_index:6; u8 flexible_filter_mask:2; @@ -545,7 +573,7 @@ struct management_status_s { u32 rsvd5; }; -struct fw_interface_out { +struct __packed fw_interface_out { struct transaction_counter_s transaction_id; struct version_s version; struct link_status_s link_status; @@ -569,7 +597,6 @@ struct fw_interface_out { struct core_dump_s core_dump; u32 rsvd11; struct statistics_s stats; - u32 rsvd12; struct filter_caps_s filter_caps; struct device_caps_s device_caps; u32 rsvd13; @@ -592,6 +619,9 @@ struct fw_interface_out { #define AQ_HOST_MODE_LOW_POWER 3U #define AQ_HOST_MODE_SHUTDOWN 4U +#define AQ_A2_FW_INTERFACE_A0 0 +#define AQ_A2_FW_INTERFACE_B0 1 + int hw_atl2_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops); int hw_atl2_utils_soft_reset(struct aq_hw_s *self); diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c index dd259c8f2f4f..58d426dda3ed 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c @@ -84,7 +84,7 @@ static int hw_atl2_shared_buffer_read_block(struct aq_hw_s *self, if (cnt > AQ_A2_FW_READ_TRY_MAX) return -ETIME; if (tid1.transaction_cnt_a != tid1.transaction_cnt_b) - udelay(1); + mdelay(1); } while (tid1.transaction_cnt_a != tid1.transaction_cnt_b); hw_atl2_mif_shared_buf_read(self, offset, (u32 *)data, dwords); @@ -154,7 +154,7 @@ static void a2_link_speed_mask2fw(u32 speed, { link_options->rate_10G = !!(speed & AQ_NIC_RATE_10G); link_options->rate_5G = !!(speed & AQ_NIC_RATE_5G); - link_options->rate_N5G = !!(speed & AQ_NIC_RATE_5GSR); + link_options->rate_N5G = link_options->rate_5G; link_options->rate_2P5G = !!(speed & AQ_NIC_RATE_2G5); link_options->rate_N2P5G = link_options->rate_2P5G; link_options->rate_1G = !!(speed & AQ_NIC_RATE_1G); @@ -192,8 +192,6 @@ static u32 a2_fw_lkp_to_mask(struct lkp_link_caps_s *lkp_link_caps) rate |= AQ_NIC_RATE_10G; if (lkp_link_caps->rate_5G) rate |= AQ_NIC_RATE_5G; - if (lkp_link_caps->rate_N5G) - rate |= AQ_NIC_RATE_5GSR; if (lkp_link_caps->rate_2P5G) rate |= AQ_NIC_RATE_2G5; if (lkp_link_caps->rate_1G) @@ -335,15 +333,22 @@ static int aq_a2_fw_get_mac_permanent(struct aq_hw_s *self, u8 *mac) return 0; } -static int aq_a2_fw_update_stats(struct aq_hw_s *self) +static void aq_a2_fill_a0_stats(struct aq_hw_s *self, + struct statistics_s *stats) { struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv; - struct statistics_s stats; - - hw_atl2_shared_buffer_read_safe(self, stats, &stats); - -#define AQ_SDELTA(_N_, _F_) (self->curr_stats._N_ += \ - stats.msm._F_ - priv->last_stats.msm._F_) + struct aq_stats_s *cs = &self->curr_stats; + struct aq_stats_s curr_stats = *cs; + bool corrupted_stats = false; + +#define AQ_SDELTA(_N, _F) \ +do { \ + if (!corrupted_stats && \ + ((s64)(stats->a0.msm._F - priv->last_stats.a0.msm._F)) >= 0) \ + curr_stats._N += stats->a0.msm._F - priv->last_stats.a0.msm._F;\ + else \ + corrupted_stats = true; \ +} while (0) if (self->aq_link_status.mbps) { AQ_SDELTA(uprc, rx_unicast_frames); @@ -362,17 +367,76 @@ static int aq_a2_fw_update_stats(struct aq_hw_s *self) AQ_SDELTA(mbtc, tx_multicast_octets); AQ_SDELTA(bbrc, rx_broadcast_octets); AQ_SDELTA(bbtc, tx_broadcast_octets); + + if (!corrupted_stats) + *cs = curr_stats; } #undef AQ_SDELTA - self->curr_stats.dma_pkt_rc = - hw_atl_stats_rx_dma_good_pkt_counter_get(self); - self->curr_stats.dma_pkt_tc = - hw_atl_stats_tx_dma_good_pkt_counter_get(self); - self->curr_stats.dma_oct_rc = - hw_atl_stats_rx_dma_good_octet_counter_get(self); - self->curr_stats.dma_oct_tc = - hw_atl_stats_tx_dma_good_octet_counter_get(self); - self->curr_stats.dpc = hw_atl_rpb_rx_dma_drop_pkt_cnt_get(self); + +} + +static void aq_a2_fill_b0_stats(struct aq_hw_s *self, + struct statistics_s *stats) +{ + struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv; + struct aq_stats_s *cs = &self->curr_stats; + struct aq_stats_s curr_stats = *cs; + bool corrupted_stats = false; + +#define AQ_SDELTA(_N, _F) \ +do { \ + if (!corrupted_stats && \ + ((s64)(stats->b0._F - priv->last_stats.b0._F)) >= 0) \ + curr_stats._N += stats->b0._F - priv->last_stats.b0._F; \ + else \ + corrupted_stats = true; \ +} while (0) + + if (self->aq_link_status.mbps) { + AQ_SDELTA(uprc, rx_unicast_frames); + AQ_SDELTA(mprc, rx_multicast_frames); + AQ_SDELTA(bprc, rx_broadcast_frames); + AQ_SDELTA(erpr, rx_errors); + AQ_SDELTA(brc, rx_good_octets); + + AQ_SDELTA(uptc, tx_unicast_frames); + AQ_SDELTA(mptc, tx_multicast_frames); + AQ_SDELTA(bptc, tx_broadcast_frames); + AQ_SDELTA(erpt, tx_errors); + AQ_SDELTA(btc, tx_good_octets); + + if (!corrupted_stats) + *cs = curr_stats; + } +#undef AQ_SDELTA +} + +static int aq_a2_fw_update_stats(struct aq_hw_s *self) +{ + struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv; + struct aq_stats_s *cs = &self->curr_stats; + struct statistics_s stats; + struct version_s version; + int err; + + err = hw_atl2_shared_buffer_read_safe(self, version, &version); + if (err) + return err; + + err = hw_atl2_shared_buffer_read_safe(self, stats, &stats); + if (err) + return err; + + if (version.drv_iface_ver == AQ_A2_FW_INTERFACE_A0) + aq_a2_fill_a0_stats(self, &stats); + else + aq_a2_fill_b0_stats(self, &stats); + + cs->dma_pkt_rc = hw_atl_stats_rx_dma_good_pkt_counter_get(self); + cs->dma_pkt_tc = hw_atl_stats_tx_dma_good_pkt_counter_get(self); + cs->dma_oct_rc = hw_atl_stats_rx_dma_good_octet_counter_get(self); + cs->dma_oct_tc = hw_atl_stats_tx_dma_good_octet_counter_get(self); + cs->dpc = hw_atl_rpb_rx_dma_drop_pkt_cnt_get(self); memcpy(&priv->last_stats, &stats, sizeof(stats)); @@ -499,9 +563,9 @@ u32 hw_atl2_utils_get_fw_version(struct aq_hw_s *self) hw_atl2_shared_buffer_read_safe(self, version, &version); /* A2 FW version is stored in reverse order */ - return version.mac.major << 24 | - version.mac.minor << 16 | - version.mac.build; + return version.bundle.major << 24 | + version.bundle.minor << 16 | + version.bundle.build; } int hw_atl2_utils_get_action_resolve_table_caps(struct aq_hw_s *self, diff --git a/drivers/net/ethernet/asix/ax88796c_main.h b/drivers/net/ethernet/asix/ax88796c_main.h index 80263c3cef75..4a83c991dcbe 100644 --- a/drivers/net/ethernet/asix/ax88796c_main.h +++ b/drivers/net/ethernet/asix/ax88796c_main.h @@ -127,9 +127,9 @@ struct ax88796c_device { #define AX_PRIV_FLAGS_MASK (AX_CAP_COMP) unsigned long flags; - #define EVENT_INTR BIT(0) - #define EVENT_TX BIT(1) - #define EVENT_SET_MULTI BIT(2) + #define EVENT_INTR 0 + #define EVENT_TX 1 + #define EVENT_SET_MULTI 2 }; diff --git a/drivers/net/ethernet/asix/ax88796c_spi.c b/drivers/net/ethernet/asix/ax88796c_spi.c index 94df4f96d2be..0710e716d682 100644 --- a/drivers/net/ethernet/asix/ax88796c_spi.c +++ b/drivers/net/ethernet/asix/ax88796c_spi.c @@ -34,7 +34,7 @@ int axspi_read_status(struct axspi_data *ax_spi, struct spi_status *status) /* OP */ ax_spi->cmd_buf[0] = AX_SPICMD_READ_STATUS; - ret = spi_write_then_read(ax_spi->spi, ax_spi->cmd_buf, 1, (u8 *)&status, 3); + ret = spi_write_then_read(ax_spi->spi, ax_spi->cmd_buf, 1, (u8 *)status, 3); if (ret) dev_err(&ax_spi->spi->dev, "%s() failed: ret = %d\n", __func__, ret); else diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c index 7cc5213c575a..b07cb9bc5f2d 100644 --- a/drivers/net/ethernet/broadcom/bcm4908_enet.c +++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c @@ -708,7 +708,9 @@ static int bcm4908_enet_probe(struct platform_device *pdev) enet->irq_tx = platform_get_irq_byname(pdev, "tx"); - dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); + err = dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); + if (err) + return err; err = bcm4908_enet_dma_alloc(enet); if (err) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h index 1835d2e451c0..fc7fce642666 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h @@ -635,11 +635,13 @@ static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num, { int i, rc; struct bnx2x_ilt *ilt = BP_ILT(bp); - struct ilt_client_info *ilt_cli = &ilt->clients[cli_num]; + struct ilt_client_info *ilt_cli; if (!ilt || !ilt->lines) return -1; + ilt_cli = &ilt->clients[cli_num]; + if (ilt_cli->flags & (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM)) return 0; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index d0d5da9b78f8..4c9507d82fd0 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -2258,6 +2258,16 @@ static inline void bnxt_db_write(struct bnxt *bp, struct bnxt_db_info *db, } } +/* Must hold rtnl_lock */ +static inline bool bnxt_sriov_cfg(struct bnxt *bp) +{ +#if defined(CONFIG_BNXT_SRIOV) + return BNXT_PF(bp) && (bp->pf.active_vfs || bp->sriov_cfg); +#else + return false; +#endif +} + extern const u16 bnxt_lhint_arr[]; int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index 5c464ea73576..951c4c569a9b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -360,7 +360,7 @@ bnxt_dl_livepatch_report_err(struct bnxt *bp, struct netlink_ext_ack *extack, NL_SET_ERR_MSG_MOD(extack, "Live patch already applied"); break; default: - netdev_err(bp->dev, "Unexpected live patch error: %hhd\n", err); + netdev_err(bp->dev, "Unexpected live patch error: %d\n", err); NL_SET_ERR_MSG_MOD(extack, "Failed to activate live patch"); break; } @@ -441,12 +441,13 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change, switch (action) { case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: { - if (BNXT_PF(bp) && bp->pf.active_vfs) { + rtnl_lock(); + if (bnxt_sriov_cfg(bp)) { NL_SET_ERR_MSG_MOD(extack, - "reload is unsupported when VFs are allocated"); + "reload is unsupported while VFs are allocated or being configured"); + rtnl_unlock(); return -EOPNOTSUPP; } - rtnl_lock(); if (bp->dev->reg_state == NETREG_UNREGISTERED) { rtnl_unlock(); return -ENODEV; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index e6a4a768b10b..1471b6130a2b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -1868,7 +1868,7 @@ static int bnxt_tc_setup_indr_block_cb(enum tc_setup_type type, struct flow_cls_offload *flower = type_data; struct bnxt *bp = priv->bp; - if (flower->common.chain_index) + if (!tc_cls_can_offload_and_chain0(bp->dev, type_data)) return -EOPNOTSUPP; switch (type) { diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 64479c464b4e..ae9cca768d74 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -3196,6 +3196,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, } if (adapter->registered_device_map == 0) { dev_err(&pdev->dev, "could not register any net devices\n"); + err = -EINVAL; goto err_disable_interrupts; } diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c index 13121c4dcfe6..71730ef4cd57 100644 --- a/drivers/net/ethernet/dec/tulip/de4x5.c +++ b/drivers/net/ethernet/dec/tulip/de4x5.c @@ -4709,6 +4709,10 @@ type3_infoblock(struct net_device *dev, u_char count, u_char *p) lp->ibn = 3; lp->active = *p++; if (MOTO_SROM_BUG) lp->active = 0; + /* if (MOTO_SROM_BUG) statement indicates lp->active could + * be 8 (i.e. the size of array lp->phy) */ + if (WARN_ON(lp->active >= ARRAY_SIZE(lp->phy))) + return -EINVAL; lp->phy[lp->active].gep = (*p ? p : NULL); p += (2 * (*p) + 1); lp->phy[lp->active].rst = (*p ? p : NULL); p += (2 * (*p) + 1); lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2; @@ -5000,19 +5004,23 @@ mii_get_phy(struct net_device *dev) } if ((j == limit) && (i < DE4X5_MAX_MII)) { for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++); - lp->phy[k].addr = i; - lp->phy[k].id = id; - lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */ - lp->phy[k].spd.mask = GENERIC_MASK; /* 100Mb/s technologies */ - lp->phy[k].spd.value = GENERIC_VALUE; /* TX & T4, H/F Duplex */ - lp->mii_cnt++; - lp->active++; - printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name); - j = de4x5_debug; - de4x5_debug |= DEBUG_MII; - de4x5_dbg_mii(dev, k); - de4x5_debug = j; - printk("\n"); + if (k < DE4X5_MAX_PHY) { + lp->phy[k].addr = i; + lp->phy[k].id = id; + lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */ + lp->phy[k].spd.mask = GENERIC_MASK; /* 100Mb/s technologies */ + lp->phy[k].spd.value = GENERIC_VALUE; /* TX & T4, H/F Duplex */ + lp->mii_cnt++; + lp->active++; + printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name); + j = de4x5_debug; + de4x5_debug |= DEBUG_MII; + de4x5_dbg_mii(dev, k); + de4x5_debug = j; + printk("\n"); + } else { + goto purgatory; + } } } purgatory: diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 714e961e7a77..8e643567abce 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -4550,10 +4550,12 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev) fsl_mc_portal_free(priv->mc_io); - free_netdev(net_dev); + destroy_workqueue(priv->dpaa2_ptp_wq); dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name); + free_netdev(net_dev); + return 0; } diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 7b4961daa254..ed7301b69169 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -377,6 +377,9 @@ struct bufdesc_ex { #define FEC_ENET_WAKEUP ((uint)0x00020000) /* Wakeup request */ #define FEC_ENET_TXF (FEC_ENET_TXF_0 | FEC_ENET_TXF_1 | FEC_ENET_TXF_2) #define FEC_ENET_RXF (FEC_ENET_RXF_0 | FEC_ENET_RXF_1 | FEC_ENET_RXF_2) +#define FEC_ENET_RXF_GET(X) (((X) == 0) ? FEC_ENET_RXF_0 : \ + (((X) == 1) ? FEC_ENET_RXF_1 : \ + FEC_ENET_RXF_2)) #define FEC_ENET_TS_AVAIL ((uint)0x00010000) #define FEC_ENET_TS_TIMER ((uint)0x00008000) diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index bc418b910999..1b1f7f2a6130 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1480,7 +1480,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) break; pkt_received++; - writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT); + writel(FEC_ENET_RXF_GET(queue_id), fep->hwp + FEC_IEVENT); /* Check for errors. */ status ^= BD_ENET_RX_LAST; diff --git a/drivers/net/ethernet/google/gve/gve_utils.c b/drivers/net/ethernet/google/gve/gve_utils.c index 88ca49cbc1e2..d57508bc4307 100644 --- a/drivers/net/ethernet/google/gve/gve_utils.c +++ b/drivers/net/ethernet/google/gve/gve_utils.c @@ -68,6 +68,9 @@ struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi, set_protocol = ctx->curr_frag_cnt == ctx->expected_frag_cnt - 1; } else { skb = napi_alloc_skb(napi, len); + + if (unlikely(!skb)) + return NULL; set_protocol = true; } __skb_put(skb, len); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c index 23d9cbf262c3..740850b64aff 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c @@ -400,6 +400,10 @@ static void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, return; if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) { + /* DSAF_MAX_PORT_NUM is 6, but DSAF_GE_NUM is 8. + We need check to prevent array overflow */ + if (port >= DSAF_MAX_PORT_NUM) + return; reg_val_1 = 0x1 << port; port_rst_off = dsaf_dev->mac_cb[port]->port_rst_off; /* there is difference between V1 and V2 in register.*/ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c index 67364ab63a1f..081295bff765 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c @@ -1081,7 +1081,8 @@ static void hns3_dump_page_pool_info(struct hns3_enet_ring *ring, u32 j = 0; sprintf(result[j++], "%u", index); - sprintf(result[j++], "%u", ring->page_pool->pages_state_hold_cnt); + sprintf(result[j++], "%u", + READ_ONCE(ring->page_pool->pages_state_hold_cnt)); sprintf(result[j++], "%u", atomic_read(&ring->page_pool->pages_state_release_cnt)); sprintf(result[j++], "%u", ring->page_pool->p.pool_size); @@ -1106,6 +1107,11 @@ hns3_dbg_page_pool_info(struct hnae3_handle *h, char *buf, int len) return -EFAULT; } + if (!priv->ring[h->kinfo.num_tqps].page_pool) { + dev_err(&h->pdev->dev, "page pool is not initialized\n"); + return -EFAULT; + } + for (i = 0; i < ARRAY_SIZE(page_pool_info_items); i++) result[i] = &data_str[i][0]; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index c8442b86df94..c9b4568d7a8d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -987,6 +987,7 @@ static int hns3_set_reset(struct net_device *netdev, u32 *flags) struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev); const struct hnae3_ae_ops *ops = h->ae_algo->ops; const struct hns3_reset_type_map *rst_type_map; + enum ethtool_reset_flags rst_flags; u32 i, size; if (ops->ae_dev_resetting && ops->ae_dev_resetting(h)) @@ -1006,6 +1007,7 @@ static int hns3_set_reset(struct net_device *netdev, u32 *flags) for (i = 0; i < size; i++) { if (rst_type_map[i].rst_flags == *flags) { rst_type = rst_type_map[i].rst_type; + rst_flags = rst_type_map[i].rst_flags; break; } } @@ -1021,6 +1023,8 @@ static int hns3_set_reset(struct net_device *netdev, u32 *flags) ops->reset_event(h->pdev, h); + *flags &= ~rst_flags; + return 0; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 25c419d40066..41afaeea881b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -703,9 +703,9 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) roundup_size = ilog2(roundup_size); for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { - tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); + tc_valid[i] = 1; tc_size[i] = roundup_size; - tc_offset[i] = rss_size * i; + tc_offset[i] = (hdev->hw_tc_map & BIT(i)) ? rss_size * i : 0; } hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c index a78c398bf5b2..01e7d3c0b68e 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c @@ -8,6 +8,7 @@ #include <linux/interrupt.h> #include <linux/etherdevice.h> #include <linux/netdevice.h> +#include <linux/module.h> #include "hinic_hw_dev.h" #include "hinic_dev.h" diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 3cca51735421..0bb3911dd014 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -628,17 +628,9 @@ static bool reuse_rx_pools(struct ibmvnic_adapter *adapter) old_buff_size = adapter->prev_rx_buf_sz; new_buff_size = adapter->cur_rx_buf_sz; - /* Require buff size to be exactly same for now */ - if (old_buff_size != new_buff_size) - return false; - - if (old_num_pools == new_num_pools && old_pool_size == new_pool_size) - return true; - - if (old_num_pools < adapter->min_rx_queues || - old_num_pools > adapter->max_rx_queues || - old_pool_size < adapter->min_rx_add_entries_per_subcrq || - old_pool_size > adapter->max_rx_add_entries_per_subcrq) + if (old_buff_size != new_buff_size || + old_num_pools != new_num_pools || + old_pool_size != new_pool_size) return false; return true; @@ -874,17 +866,9 @@ static bool reuse_tx_pools(struct ibmvnic_adapter *adapter) old_mtu = adapter->prev_mtu; new_mtu = adapter->req_mtu; - /* Require MTU to be exactly same to reuse pools for now */ - if (old_mtu != new_mtu) - return false; - - if (old_num_pools == new_num_pools && old_pool_size == new_pool_size) - return true; - - if (old_num_pools < adapter->min_tx_queues || - old_num_pools > adapter->max_tx_queues || - old_pool_size < adapter->min_tx_entries_per_subcrq || - old_pool_size > adapter->max_tx_entries_per_subcrq) + if (old_mtu != new_mtu || + old_num_pools != new_num_pools || + old_pool_size != new_pool_size) return false; return true; diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index 5039a2536951..0bf3d47bb90d 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -3003,9 +3003,10 @@ static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake) struct net_device *netdev = pci_get_drvdata(pdev); struct nic *nic = netdev_priv(netdev); + netif_device_detach(netdev); + if (netif_running(netdev)) e100_down(nic); - netif_device_detach(netdev); if ((nic->flags & wol_magic) | e100_asf(nic)) { /* enable reverse auto-negotiation */ @@ -3022,7 +3023,7 @@ static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake) *enable_wake = false; } - pci_clear_master(pdev); + pci_disable_device(pdev); } static int __e100_power_off(struct pci_dev *pdev, bool wake) @@ -3042,8 +3043,6 @@ static int __maybe_unused e100_suspend(struct device *dev_d) __e100_shutdown(to_pci_dev(dev_d), &wake); - device_wakeup_disable(dev_d); - return 0; } @@ -3051,6 +3050,14 @@ static int __maybe_unused e100_resume(struct device *dev_d) { struct net_device *netdev = dev_get_drvdata(dev_d); struct nic *nic = netdev_priv(netdev); + int err; + + err = pci_enable_device(to_pci_dev(dev_d)); + if (err) { + netdev_err(netdev, "Resume cannot enable PCI device, aborting\n"); + return err; + } + pci_set_master(to_pci_dev(dev_d)); /* disable reverse auto-negotiation */ if (nic->phy == phy_82552_v) { @@ -3062,10 +3069,11 @@ static int __maybe_unused e100_resume(struct device *dev_d) smartspeed & ~(E100_82552_REV_ANEG)); } - netif_device_attach(netdev); if (netif_running(netdev)) e100_up(nic); + netif_device_attach(netdev); + return 0; } diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 3d528fba754b..4d939af0a626 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -161,6 +161,7 @@ enum i40e_vsi_state_t { __I40E_VSI_OVERFLOW_PROMISC, __I40E_VSI_REINIT_REQUESTED, __I40E_VSI_DOWN_REQUESTED, + __I40E_VSI_RELEASING, /* This must be last as it determines the size of the BITMAP */ __I40E_VSI_STATE_SIZE__, }; @@ -1247,6 +1248,7 @@ void i40e_ptp_restore_hw_time(struct i40e_pf *pf); void i40e_ptp_init(struct i40e_pf *pf); void i40e_ptp_stop(struct i40e_pf *pf); int i40e_ptp_alloc_pins(struct i40e_pf *pf); +int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset); int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi); i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf); i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf); diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 291e61ac3e44..2c1b1da1220e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -553,6 +553,14 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, dev_info(&pf->pdev->dev, "vsi %d not found\n", vsi_seid); return; } + if (vsi->type != I40E_VSI_MAIN && + vsi->type != I40E_VSI_FDIR && + vsi->type != I40E_VSI_VMDQ2) { + dev_info(&pf->pdev->dev, + "vsi %d type %d descriptor rings not available\n", + vsi_seid, vsi->type); + return; + } if (type == RING_TYPE_XDP && !i40e_enabled_xdp_vsi(vsi)) { dev_info(&pf->pdev->dev, "XDP not enabled on VSI %d\n", vsi_seid); return; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index ba862131b9bd..e118cf9265c7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1790,6 +1790,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, bool is_add) { struct i40e_pf *pf = vsi->back; + u16 num_tc_qps = 0; u16 sections = 0; u8 netdev_tc = 0; u16 numtc = 1; @@ -1797,13 +1798,33 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, u8 offset; u16 qmap; int i; - u16 num_tc_qps = 0; sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID; offset = 0; + /* zero out queue mapping, it will get updated on the end of the function */ + memset(ctxt->info.queue_mapping, 0, sizeof(ctxt->info.queue_mapping)); + + if (vsi->type == I40E_VSI_MAIN) { + /* This code helps add more queue to the VSI if we have + * more cores than RSS can support, the higher cores will + * be served by ATR or other filters. Furthermore, the + * non-zero req_queue_pairs says that user requested a new + * queue count via ethtool's set_channels, so use this + * value for queues distribution across traffic classes + */ + if (vsi->req_queue_pairs > 0) + vsi->num_queue_pairs = vsi->req_queue_pairs; + else if (pf->flags & I40E_FLAG_MSIX_ENABLED) + vsi->num_queue_pairs = pf->num_lan_msix; + } /* Number of queues per enabled TC */ - num_tc_qps = vsi->alloc_queue_pairs; + if (vsi->type == I40E_VSI_MAIN || + (vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs != 0)) + num_tc_qps = vsi->num_queue_pairs; + else + num_tc_qps = vsi->alloc_queue_pairs; + if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { /* Find numtc from enabled TC bitmap */ for (i = 0, numtc = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { @@ -1881,15 +1902,11 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, } ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); } - - /* Set actual Tx/Rx queue pairs */ - vsi->num_queue_pairs = offset; - if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) { - if (vsi->req_queue_pairs > 0) - vsi->num_queue_pairs = vsi->req_queue_pairs; - else if (pf->flags & I40E_FLAG_MSIX_ENABLED) - vsi->num_queue_pairs = pf->num_lan_msix; - } + /* Do not change previously set num_queue_pairs for PFs and VFs*/ + if ((vsi->type == I40E_VSI_MAIN && numtc != 1) || + (vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs == 0) || + (vsi->type != I40E_VSI_MAIN && vsi->type != I40E_VSI_SRIOV)) + vsi->num_queue_pairs = offset; /* Scheduler section valid can only be set for ADD VSI */ if (is_add) { @@ -2623,7 +2640,8 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf) for (v = 0; v < pf->num_alloc_vsi; v++) { if (pf->vsi[v] && - (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) { + (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED) && + !test_bit(__I40E_VSI_RELEASING, pf->vsi[v]->state)) { int ret = i40e_sync_vsi_filters(pf->vsi[v]); if (ret) { @@ -5427,6 +5445,58 @@ static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi, } /** + * i40e_update_adq_vsi_queues - update queue mapping for ADq VSI + * @vsi: the VSI being reconfigured + * @vsi_offset: offset from main VF VSI + */ +int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset) +{ + struct i40e_vsi_context ctxt = {}; + struct i40e_pf *pf; + struct i40e_hw *hw; + int ret; + + if (!vsi) + return I40E_ERR_PARAM; + pf = vsi->back; + hw = &pf->hw; + + ctxt.seid = vsi->seid; + ctxt.pf_num = hw->pf_id; + ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id + vsi_offset; + ctxt.uplink_seid = vsi->uplink_seid; + ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; + ctxt.flags = I40E_AQ_VSI_TYPE_VF; + ctxt.info = vsi->info; + + i40e_vsi_setup_queue_map(vsi, &ctxt, vsi->tc_config.enabled_tc, + false); + if (vsi->reconfig_rss) { + vsi->rss_size = min_t(int, pf->alloc_rss_size, + vsi->num_queue_pairs); + ret = i40e_vsi_config_rss(vsi); + if (ret) { + dev_info(&pf->pdev->dev, "Failed to reconfig rss for num_queues\n"); + return ret; + } + vsi->reconfig_rss = false; + } + + ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); + if (ret) { + dev_info(&pf->pdev->dev, "Update vsi config failed, err %s aq_err %s\n", + i40e_stat_str(hw, ret), + i40e_aq_str(hw, hw->aq.asq_last_status)); + return ret; + } + /* update the local VSI info with updated queue map */ + i40e_vsi_update_queue_map(vsi, &ctxt); + vsi->info.valid_sections = 0; + + return ret; +} + +/** * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map * @vsi: VSI to be configured * @enabled_tc: TC bitmap @@ -5717,24 +5787,6 @@ static void i40e_remove_queue_channels(struct i40e_vsi *vsi) } /** - * i40e_is_any_channel - channel exist or not - * @vsi: ptr to VSI to which channels are associated with - * - * Returns true or false if channel(s) exist for associated VSI or not - **/ -static bool i40e_is_any_channel(struct i40e_vsi *vsi) -{ - struct i40e_channel *ch, *ch_tmp; - - list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { - if (ch->initialized) - return true; - } - - return false; -} - -/** * i40e_get_max_queues_for_channel * @vsi: ptr to VSI to which channels are associated with * @@ -6240,26 +6292,15 @@ int i40e_create_queue_channel(struct i40e_vsi *vsi, /* By default we are in VEPA mode, if this is the first VF/VMDq * VSI to be added switch to VEB mode. */ - if ((!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) || - (!i40e_is_any_channel(vsi))) { - if (!is_power_of_2(vsi->tc_config.tc_info[0].qcount)) { - dev_dbg(&pf->pdev->dev, - "Failed to create channel. Override queues (%u) not power of 2\n", - vsi->tc_config.tc_info[0].qcount); - return -EINVAL; - } - if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { - pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; + if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { + pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; - if (vsi->type == I40E_VSI_MAIN) { - if (pf->flags & I40E_FLAG_TC_MQPRIO) - i40e_do_reset(pf, I40E_PF_RESET_FLAG, - true); - else - i40e_do_reset_safe(pf, - I40E_PF_RESET_FLAG); - } + if (vsi->type == I40E_VSI_MAIN) { + if (pf->flags & I40E_FLAG_TC_MQPRIO) + i40e_do_reset(pf, I40E_PF_RESET_FLAG, true); + else + i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG); } /* now onwards for main VSI, number of queues will be value * of TC0's queue count @@ -7912,12 +7953,20 @@ config_tc: vsi->seid); need_reset = true; goto exit; - } else { - dev_info(&vsi->back->pdev->dev, - "Setup channel (id:%u) utilizing num_queues %d\n", - vsi->seid, vsi->tc_config.tc_info[0].qcount); + } else if (enabled_tc && + (!is_power_of_2(vsi->tc_config.tc_info[0].qcount))) { + netdev_info(netdev, + "Failed to create channel. Override queues (%u) not power of 2\n", + vsi->tc_config.tc_info[0].qcount); + ret = -EINVAL; + need_reset = true; + goto exit; } + dev_info(&vsi->back->pdev->dev, + "Setup channel (id:%u) utilizing num_queues %d\n", + vsi->seid, vsi->tc_config.tc_info[0].qcount); + if (pf->flags & I40E_FLAG_TC_MQPRIO) { if (vsi->mqprio_qopt.max_rate[0]) { u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0]; @@ -8482,9 +8531,8 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi, err = i40e_add_del_cloud_filter(vsi, filter, true); if (err) { - dev_err(&pf->pdev->dev, - "Failed to add cloud filter, err %s\n", - i40e_stat_str(&pf->hw, err)); + dev_err(&pf->pdev->dev, "Failed to add cloud filter, err %d\n", + err); goto err; } @@ -13771,7 +13819,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi) dev_info(&pf->pdev->dev, "Can't remove PF VSI\n"); return -ENODEV; } - + set_bit(__I40E_VSI_RELEASING, vsi->state); uplink_seid = vsi->uplink_seid; if (vsi->type != I40E_VSI_SRIOV) { if (vsi->netdev_registered) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 472f56b360b8..2ea4deb8fc44 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -183,17 +183,18 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf) /***********************misc routines*****************************/ /** - * i40e_vc_disable_vf + * i40e_vc_reset_vf * @vf: pointer to the VF info - * - * Disable the VF through a SW reset. + * @notify_vf: notify vf about reset or not + * Reset VF handler. **/ -static inline void i40e_vc_disable_vf(struct i40e_vf *vf) +static void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf) { struct i40e_pf *pf = vf->pf; int i; - i40e_vc_notify_vf_reset(vf); + if (notify_vf) + i40e_vc_notify_vf_reset(vf); /* We want to ensure that an actual reset occurs initiated after this * function was called. However, we do not want to wait forever, so @@ -211,9 +212,14 @@ static inline void i40e_vc_disable_vf(struct i40e_vf *vf) usleep_range(10000, 20000); } - dev_warn(&vf->pf->pdev->dev, - "Failed to initiate reset for VF %d after 200 milliseconds\n", - vf->vf_id); + if (notify_vf) + dev_warn(&vf->pf->pdev->dev, + "Failed to initiate reset for VF %d after 200 milliseconds\n", + vf->vf_id); + else + dev_dbg(&vf->pf->pdev->dev, + "Failed to initiate reset for VF %d after 200 milliseconds\n", + vf->vf_id); } /** @@ -674,14 +680,13 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id, u16 vsi_queue_id, struct virtchnl_rxq_info *info) { + u16 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); struct i40e_pf *pf = vf->pf; + struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx]; struct i40e_hw *hw = &pf->hw; struct i40e_hmc_obj_rxq rx_ctx; - u16 pf_queue_id; int ret = 0; - pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); - /* clear the context structure first */ memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq)); @@ -719,6 +724,10 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id, } rx_ctx.rxmax = info->max_pkt_size; + /* if port VLAN is configured increase the max packet size */ + if (vsi->info.pvid) + rx_ctx.rxmax += VLAN_HLEN; + /* enable 32bytes desc always */ rx_ctx.dsize = 1; @@ -1940,6 +1949,32 @@ static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf, } /** + * i40e_sync_vf_state + * @vf: pointer to the VF info + * @state: VF state + * + * Called from a VF message to synchronize the service with a potential + * VF reset state + **/ +static bool i40e_sync_vf_state(struct i40e_vf *vf, enum i40e_vf_states state) +{ + int i; + + /* When handling some messages, it needs VF state to be set. + * It is possible that this flag is cleared during VF reset, + * so there is a need to wait until the end of the reset to + * handle the request message correctly. + */ + for (i = 0; i < I40E_VF_STATE_WAIT_COUNT; i++) { + if (test_bit(state, &vf->vf_states)) + return true; + usleep_range(10000, 20000); + } + + return test_bit(state, &vf->vf_states); +} + +/** * i40e_vc_get_version_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer @@ -1999,7 +2034,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) size_t len = 0; int ret; - if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_INIT)) { aq_ret = I40E_ERR_PARAM; goto err; } @@ -2106,20 +2141,6 @@ err: } /** - * i40e_vc_reset_vf_msg - * @vf: pointer to the VF info - * - * called from the VF to reset itself, - * unlike other virtchnl messages, PF driver - * doesn't send the response back to the VF - **/ -static void i40e_vc_reset_vf_msg(struct i40e_vf *vf) -{ - if (test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) - i40e_reset_vf(vf, false); -} - -/** * i40e_vc_config_promiscuous_mode_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer @@ -2136,7 +2157,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg) bool allmulti = false; bool alluni = false; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; goto err_out; } @@ -2217,13 +2238,14 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) struct virtchnl_vsi_queue_config_info *qci = (struct virtchnl_vsi_queue_config_info *)msg; struct virtchnl_queue_pair_info *qpi; - struct i40e_pf *pf = vf->pf; u16 vsi_id, vsi_queue_id = 0; - u16 num_qps_all = 0; + struct i40e_pf *pf = vf->pf; i40e_status aq_ret = 0; int i, j = 0, idx = 0; + struct i40e_vsi *vsi; + u16 num_qps_all = 0; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; goto error_param; } @@ -2310,9 +2332,15 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) pf->vsi[vf->lan_vsi_idx]->num_queue_pairs = qci->num_queue_pairs; } else { - for (i = 0; i < vf->num_tc; i++) - pf->vsi[vf->ch[i].vsi_idx]->num_queue_pairs = - vf->ch[i].num_qps; + for (i = 0; i < vf->num_tc; i++) { + vsi = pf->vsi[vf->ch[i].vsi_idx]; + vsi->num_queue_pairs = vf->ch[i].num_qps; + + if (i40e_update_adq_vsi_queues(vsi, i)) { + aq_ret = I40E_ERR_CONFIG; + goto error_param; + } + } } error_param: @@ -2366,7 +2394,7 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg) i40e_status aq_ret = 0; int i; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; goto error_param; } @@ -2538,7 +2566,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg) struct i40e_pf *pf = vf->pf; i40e_status aq_ret = 0; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; goto error_param; } @@ -2588,7 +2616,7 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg) u8 cur_pairs = vf->num_queue_pairs; struct i40e_pf *pf = vf->pf; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) return -EINVAL; if (req_pairs > I40E_MAX_VF_QUEUES) { @@ -2607,8 +2635,7 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg) } else { /* successful request */ vf->num_req_queues = req_pairs; - i40e_vc_notify_vf_reset(vf); - i40e_reset_vf(vf, false); + i40e_vc_reset_vf(vf, true); return 0; } @@ -2634,7 +2661,7 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg) memset(&stats, 0, sizeof(struct i40e_eth_stats)); - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; goto error_param; } @@ -2751,7 +2778,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) i40e_status ret = 0; int i; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) { ret = I40E_ERR_PARAM; goto error_param; @@ -2823,7 +2850,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg) i40e_status ret = 0; int i; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) { ret = I40E_ERR_PARAM; goto error_param; @@ -2967,7 +2994,7 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg) i40e_status aq_ret = 0; int i; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) { aq_ret = I40E_ERR_PARAM; goto error_param; @@ -3087,9 +3114,9 @@ static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg) struct i40e_vsi *vsi = NULL; i40e_status aq_ret = 0; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || !i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) || - (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) { + vrk->key_len != I40E_HKEY_ARRAY_SIZE) { aq_ret = I40E_ERR_PARAM; goto err; } @@ -3118,9 +3145,9 @@ static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg) i40e_status aq_ret = 0; u16 i; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || !i40e_vc_isvalid_vsi_id(vf, vrl->vsi_id) || - (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) { + vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) { aq_ret = I40E_ERR_PARAM; goto err; } @@ -3153,7 +3180,7 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg) i40e_status aq_ret = 0; int len = 0; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; goto err; } @@ -3189,7 +3216,7 @@ static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg) struct i40e_hw *hw = &pf->hw; i40e_status aq_ret = 0; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; goto err; } @@ -3214,7 +3241,7 @@ static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg) i40e_status aq_ret = 0; struct i40e_vsi *vsi; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; goto err; } @@ -3240,7 +3267,7 @@ static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg) i40e_status aq_ret = 0; struct i40e_vsi *vsi; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; goto err; } @@ -3467,7 +3494,7 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg) i40e_status aq_ret = 0; int i, ret; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; goto err; } @@ -3598,7 +3625,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg) i40e_status aq_ret = 0; int i, ret; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; goto err_out; } @@ -3707,7 +3734,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg) i40e_status aq_ret = 0; u64 speed = 0; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; goto err; } @@ -3796,15 +3823,9 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg) /* set this flag only after making sure all inputs are sane */ vf->adq_enabled = true; - /* num_req_queues is set when user changes number of queues via ethtool - * and this causes issue for default VSI(which depends on this variable) - * when ADq is enabled, hence reset it. - */ - vf->num_req_queues = 0; /* reset the VF in order to allocate resources */ - i40e_vc_notify_vf_reset(vf); - i40e_reset_vf(vf, false); + i40e_vc_reset_vf(vf, true); return I40E_SUCCESS; @@ -3824,7 +3845,7 @@ static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg) struct i40e_pf *pf = vf->pf; i40e_status aq_ret = 0; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; goto err; } @@ -3844,8 +3865,7 @@ static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg) } /* reset the VF in order to allocate resources */ - i40e_vc_notify_vf_reset(vf); - i40e_reset_vf(vf, false); + i40e_vc_reset_vf(vf, true); return I40E_SUCCESS; @@ -3907,7 +3927,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, i40e_vc_notify_vf_link_state(vf); break; case VIRTCHNL_OP_RESET_VF: - i40e_vc_reset_vf_msg(vf); + i40e_vc_reset_vf(vf, false); ret = 0; break; case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: @@ -4161,7 +4181,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) /* Force the VF interface down so it has to bring up with new MAC * address */ - i40e_vc_disable_vf(vf); + i40e_vc_reset_vf(vf, true); dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n"); error_param: @@ -4170,34 +4190,6 @@ error_param: } /** - * i40e_vsi_has_vlans - True if VSI has configured VLANs - * @vsi: pointer to the vsi - * - * Check if a VSI has configured any VLANs. False if we have a port VLAN or if - * we have no configured VLANs. Do not call while holding the - * mac_filter_hash_lock. - */ -static bool i40e_vsi_has_vlans(struct i40e_vsi *vsi) -{ - bool have_vlans; - - /* If we have a port VLAN, then the VSI cannot have any VLANs - * configured, as all MAC/VLAN filters will be assigned to the PVID. - */ - if (vsi->info.pvid) - return false; - - /* Since we don't have a PVID, we know that if the device is in VLAN - * mode it must be because of a VLAN filter configured on this VSI. - */ - spin_lock_bh(&vsi->mac_filter_hash_lock); - have_vlans = i40e_is_vsi_in_vlan(vsi); - spin_unlock_bh(&vsi->mac_filter_hash_lock); - - return have_vlans; -} - -/** * i40e_ndo_set_vf_port_vlan * @netdev: network interface device structure * @vf_id: VF identifier @@ -4253,19 +4245,9 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, /* duplicate request, so just return success */ goto error_pvid; - if (i40e_vsi_has_vlans(vsi)) { - dev_err(&pf->pdev->dev, - "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n", - vf_id); - /* Administrator Error - knock the VF offline until he does - * the right thing by reconfiguring his network correctly - * and then reloading the VF driver. - */ - i40e_vc_disable_vf(vf); - /* During reset the VF got a new VSI, so refresh the pointer. */ - vsi = pf->vsi[vf->lan_vsi_idx]; - } - + i40e_vc_reset_vf(vf, true); + /* During reset the VF got a new VSI, so refresh a pointer. */ + vsi = pf->vsi[vf->lan_vsi_idx]; /* Locked once because multiple functions below iterate list */ spin_lock_bh(&vsi->mac_filter_hash_lock); @@ -4641,7 +4623,7 @@ int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting) goto out; vf->trusted = setting; - i40e_vc_disable_vf(vf); + i40e_vc_reset_vf(vf, true); dev_info(&pf->pdev->dev, "VF %u is now %strusted\n", vf_id, setting ? "" : "un"); diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 091e32c1bb46..49575a640a84 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -18,6 +18,8 @@ #define I40E_MAX_VF_PROMISC_FLAGS 3 +#define I40E_VF_STATE_WAIT_COUNT 20 + /* Various queue ctrls */ enum i40e_queue_ctrl { I40E_QUEUE_CTRL_UNKNOWN = 0, diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index e6e7c1da47fb..3789269ce741 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -39,6 +39,7 @@ #include "iavf_txrx.h" #include "iavf_fdir.h" #include "iavf_adv_rss.h" +#include <linux/bitmap.h> #define DEFAULT_DEBUG_LEVEL_SHIFT 3 #define PFX "iavf: " @@ -304,6 +305,7 @@ struct iavf_adapter { #define IAVF_FLAG_AQ_DEL_FDIR_FILTER BIT(26) #define IAVF_FLAG_AQ_ADD_ADV_RSS_CFG BIT(27) #define IAVF_FLAG_AQ_DEL_ADV_RSS_CFG BIT(28) +#define IAVF_FLAG_AQ_REQUEST_STATS BIT(29) /* OS defined structs */ struct net_device *netdev; @@ -443,6 +445,7 @@ int iavf_up(struct iavf_adapter *adapter); void iavf_down(struct iavf_adapter *adapter); int iavf_process_config(struct iavf_adapter *adapter); void iavf_schedule_reset(struct iavf_adapter *adapter); +void iavf_schedule_request_stats(struct iavf_adapter *adapter); void iavf_reset(struct iavf_adapter *adapter); void iavf_set_ethtool_ops(struct net_device *netdev); void iavf_update_stats(struct iavf_adapter *adapter); @@ -500,4 +503,5 @@ void iavf_add_adv_rss_cfg(struct iavf_adapter *adapter); void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter); struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter, const u8 *macaddr); +int iavf_lock_timeout(struct mutex *lock, unsigned int msecs); #endif /* _IAVF_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c index 5a359a0a20ec..461f5237a2f8 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c @@ -354,6 +354,9 @@ static void iavf_get_ethtool_stats(struct net_device *netdev, struct iavf_adapter *adapter = netdev_priv(netdev); unsigned int i; + /* Explicitly request stats refresh */ + iavf_schedule_request_stats(adapter); + iavf_add_ethtool_stats(&data, adapter, iavf_gstrings_stats); rcu_read_lock(); @@ -612,23 +615,44 @@ static int iavf_set_ringparam(struct net_device *netdev, if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) return -EINVAL; - new_tx_count = clamp_t(u32, ring->tx_pending, - IAVF_MIN_TXD, - IAVF_MAX_TXD); - new_tx_count = ALIGN(new_tx_count, IAVF_REQ_DESCRIPTOR_MULTIPLE); + if (ring->tx_pending > IAVF_MAX_TXD || + ring->tx_pending < IAVF_MIN_TXD || + ring->rx_pending > IAVF_MAX_RXD || + ring->rx_pending < IAVF_MIN_RXD) { + netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d] (increment %d)\n", + ring->tx_pending, ring->rx_pending, IAVF_MIN_TXD, + IAVF_MAX_RXD, IAVF_REQ_DESCRIPTOR_MULTIPLE); + return -EINVAL; + } + + new_tx_count = ALIGN(ring->tx_pending, IAVF_REQ_DESCRIPTOR_MULTIPLE); + if (new_tx_count != ring->tx_pending) + netdev_info(netdev, "Requested Tx descriptor count rounded up to %d\n", + new_tx_count); - new_rx_count = clamp_t(u32, ring->rx_pending, - IAVF_MIN_RXD, - IAVF_MAX_RXD); - new_rx_count = ALIGN(new_rx_count, IAVF_REQ_DESCRIPTOR_MULTIPLE); + new_rx_count = ALIGN(ring->rx_pending, IAVF_REQ_DESCRIPTOR_MULTIPLE); + if (new_rx_count != ring->rx_pending) + netdev_info(netdev, "Requested Rx descriptor count rounded up to %d\n", + new_rx_count); /* if nothing to do return success */ if ((new_tx_count == adapter->tx_desc_count) && - (new_rx_count == adapter->rx_desc_count)) + (new_rx_count == adapter->rx_desc_count)) { + netdev_dbg(netdev, "Nothing to change, descriptor count is same as requested\n"); return 0; + } + + if (new_tx_count != adapter->tx_desc_count) { + netdev_dbg(netdev, "Changing Tx descriptor count from %d to %d\n", + adapter->tx_desc_count, new_tx_count); + adapter->tx_desc_count = new_tx_count; + } - adapter->tx_desc_count = new_tx_count; - adapter->rx_desc_count = new_rx_count; + if (new_rx_count != adapter->rx_desc_count) { + netdev_dbg(netdev, "Changing Rx descriptor count from %d to %d\n", + adapter->rx_desc_count, new_rx_count); + adapter->rx_desc_count = new_rx_count; + } if (netif_running(netdev)) { adapter->flags |= IAVF_FLAG_RESET_NEEDED; @@ -723,12 +747,31 @@ static int iavf_get_per_queue_coalesce(struct net_device *netdev, u32 queue, * * Change the ITR settings for a specific queue. **/ -static void iavf_set_itr_per_queue(struct iavf_adapter *adapter, - struct ethtool_coalesce *ec, int queue) +static int iavf_set_itr_per_queue(struct iavf_adapter *adapter, + struct ethtool_coalesce *ec, int queue) { struct iavf_ring *rx_ring = &adapter->rx_rings[queue]; struct iavf_ring *tx_ring = &adapter->tx_rings[queue]; struct iavf_q_vector *q_vector; + u16 itr_setting; + + itr_setting = rx_ring->itr_setting & ~IAVF_ITR_DYNAMIC; + + if (ec->rx_coalesce_usecs != itr_setting && + ec->use_adaptive_rx_coalesce) { + netif_info(adapter, drv, adapter->netdev, + "Rx interrupt throttling cannot be changed if adaptive-rx is enabled\n"); + return -EINVAL; + } + + itr_setting = tx_ring->itr_setting & ~IAVF_ITR_DYNAMIC; + + if (ec->tx_coalesce_usecs != itr_setting && + ec->use_adaptive_tx_coalesce) { + netif_info(adapter, drv, adapter->netdev, + "Tx interrupt throttling cannot be changed if adaptive-tx is enabled\n"); + return -EINVAL; + } rx_ring->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs); tx_ring->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs); @@ -751,6 +794,7 @@ static void iavf_set_itr_per_queue(struct iavf_adapter *adapter, * the Tx and Rx ITR values based on the values we have entered * into the q_vector, no need to write the values now. */ + return 0; } /** @@ -792,9 +836,11 @@ static int __iavf_set_coalesce(struct net_device *netdev, */ if (queue < 0) { for (i = 0; i < adapter->num_active_queues; i++) - iavf_set_itr_per_queue(adapter, ec, i); + if (iavf_set_itr_per_queue(adapter, ec, i)) + return -EINVAL; } else if (queue < adapter->num_active_queues) { - iavf_set_itr_per_queue(adapter, ec, queue); + if (iavf_set_itr_per_queue(adapter, ec, queue)) + return -EINVAL; } else { netif_info(adapter, drv, netdev, "Invalid queue value, queue range is 0 - %d\n", adapter->num_active_queues - 1); @@ -1776,6 +1822,7 @@ static int iavf_set_channels(struct net_device *netdev, { struct iavf_adapter *adapter = netdev_priv(netdev); u32 num_req = ch->combined_count; + int i; if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && adapter->num_tc) { @@ -1786,7 +1833,7 @@ static int iavf_set_channels(struct net_device *netdev, /* All of these should have already been checked by ethtool before this * even gets to us, but just to be sure. */ - if (num_req > adapter->vsi_res->num_queue_pairs) + if (num_req == 0 || num_req > adapter->vsi_res->num_queue_pairs) return -EINVAL; if (num_req == adapter->num_active_queues) @@ -1798,6 +1845,20 @@ static int iavf_set_channels(struct net_device *netdev, adapter->num_req_queues = num_req; adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; iavf_schedule_reset(adapter); + + /* wait for the reset is done */ + for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) { + msleep(IAVF_RESET_WAIT_MS); + if (adapter->flags & IAVF_FLAG_RESET_PENDING) + continue; + break; + } + if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) { + adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; + adapter->num_active_queues = num_req; + return -EOPNOTSUPP; + } + return 0; } @@ -1844,14 +1905,13 @@ static int iavf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, if (hfunc) *hfunc = ETH_RSS_HASH_TOP; - if (!indir) - return 0; - - memcpy(key, adapter->rss_key, adapter->rss_key_size); + if (key) + memcpy(key, adapter->rss_key, adapter->rss_key_size); - /* Each 32 bits pointed by 'indir' is stored with a lut entry */ - for (i = 0; i < adapter->rss_lut_size; i++) - indir[i] = (u32)adapter->rss_lut[i]; + if (indir) + /* Each 32 bits pointed by 'indir' is stored with a lut entry */ + for (i = 0; i < adapter->rss_lut_size; i++) + indir[i] = (u32)adapter->rss_lut[i]; return 0; } diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 847d67e32a54..cfdbf8c08d18 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -147,7 +147,7 @@ enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw, * * Returns 0 on success, negative on failure **/ -static int iavf_lock_timeout(struct mutex *lock, unsigned int msecs) +int iavf_lock_timeout(struct mutex *lock, unsigned int msecs) { unsigned int wait, delay = 10; @@ -175,6 +175,19 @@ void iavf_schedule_reset(struct iavf_adapter *adapter) } /** + * iavf_schedule_request_stats - Set the flags and schedule statistics request + * @adapter: board private structure + * + * Sets IAVF_FLAG_AQ_REQUEST_STATS flag so iavf_watchdog_task() will explicitly + * request and refresh ethtool stats + **/ +void iavf_schedule_request_stats(struct iavf_adapter *adapter) +{ + adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_STATS; + mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); +} + +/** * iavf_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure * @txqueue: queue number that is timing out @@ -697,6 +710,21 @@ static void iavf_del_vlan(struct iavf_adapter *adapter, u16 vlan) } /** + * iavf_restore_filters + * @adapter: board private structure + * + * Restore existing non MAC filters when VF netdev comes back up + **/ +static void iavf_restore_filters(struct iavf_adapter *adapter) +{ + u16 vid; + + /* re-add all VLAN filters */ + for_each_set_bit(vid, adapter->vsi.active_vlans, VLAN_N_VID) + iavf_add_vlan(adapter, vid); +} + +/** * iavf_vlan_rx_add_vid - Add a VLAN filter to a device * @netdev: network device struct * @proto: unused protocol data @@ -709,8 +737,11 @@ static int iavf_vlan_rx_add_vid(struct net_device *netdev, if (!VLAN_ALLOWED(adapter)) return -EIO; + if (iavf_add_vlan(adapter, vid) == NULL) return -ENOMEM; + + set_bit(vid, adapter->vsi.active_vlans); return 0; } @@ -725,11 +756,10 @@ static int iavf_vlan_rx_kill_vid(struct net_device *netdev, { struct iavf_adapter *adapter = netdev_priv(netdev); - if (VLAN_ALLOWED(adapter)) { - iavf_del_vlan(adapter, vid); - return 0; - } - return -EIO; + iavf_del_vlan(adapter, vid); + clear_bit(vid, adapter->vsi.active_vlans); + + return 0; } /** @@ -1639,8 +1669,7 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter) iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC); return 0; } - - if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) && + if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) || (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) { iavf_set_promiscuous(adapter, 0); return 0; @@ -1688,6 +1717,11 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter) iavf_del_adv_rss_cfg(adapter); return 0; } + if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_STATS) { + iavf_request_stats(adapter); + return 0; + } + return -EAGAIN; } @@ -2123,8 +2157,8 @@ static void iavf_disable_vf(struct iavf_adapter *adapter) iavf_free_misc_irq(adapter); iavf_reset_interrupt_capability(adapter); - iavf_free_queues(adapter); iavf_free_q_vectors(adapter); + iavf_free_queues(adapter); memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE); iavf_shutdown_adminq(&adapter->hw); adapter->netdev->flags &= ~IFF_UP; @@ -2152,7 +2186,6 @@ static void iavf_reset_task(struct work_struct *work) struct net_device *netdev = adapter->netdev; struct iavf_hw *hw = &adapter->hw; struct iavf_mac_filter *f, *ftmp; - struct iavf_vlan_filter *vlf; struct iavf_cloud_filter *cf; u32 reg_val; int i = 0, err; @@ -2215,6 +2248,7 @@ static void iavf_reset_task(struct work_struct *work) } pci_set_master(adapter->pdev); + pci_restore_msi_state(adapter->pdev); if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) { dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n", @@ -2233,6 +2267,7 @@ continue_reset: (adapter->state == __IAVF_RESETTING)); if (running) { + netdev->flags &= ~IFF_UP; netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); adapter->link_up = false; @@ -2292,11 +2327,6 @@ continue_reset: list_for_each_entry(f, &adapter->mac_filter_list, list) { f->add = true; } - /* re-add all VLAN filters */ - list_for_each_entry(vlf, &adapter->vlan_filter_list, list) { - vlf->add = true; - } - spin_unlock_bh(&adapter->mac_vlan_list_lock); /* check if TCs are running and re-add all cloud filters */ @@ -2310,7 +2340,6 @@ continue_reset: spin_unlock_bh(&adapter->cloud_filter_list_lock); adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; - adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER; adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER; iavf_misc_irq_enable(adapter); @@ -2344,7 +2373,7 @@ continue_reset: * to __IAVF_RUNNING */ iavf_up_complete(adapter); - + netdev->flags |= IFF_UP; iavf_irq_enable(adapter, true); } else { iavf_change_state(adapter, __IAVF_DOWN); @@ -2357,8 +2386,10 @@ continue_reset: reset_err: mutex_unlock(&adapter->client_lock); mutex_unlock(&adapter->crit_lock); - if (running) + if (running) { iavf_change_state(adapter, __IAVF_RUNNING); + netdev->flags |= IFF_UP; + } dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); iavf_close(netdev); } @@ -2410,7 +2441,7 @@ static void iavf_adminq_task(struct work_struct *work) /* check for error indications */ val = rd32(hw, hw->aq.arq.len); - if (val == 0xdeadbeef) /* indicates device in reset */ + if (val == 0xdeadbeef || val == 0xffffffff) /* device in reset */ goto freedom; oldval = val; if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) { @@ -3095,8 +3126,10 @@ static int iavf_configure_clsflower(struct iavf_adapter *adapter, return -ENOMEM; while (!mutex_trylock(&adapter->crit_lock)) { - if (--count == 0) - goto err; + if (--count == 0) { + kfree(filter); + return err; + } udelay(1); } @@ -3107,11 +3140,11 @@ static int iavf_configure_clsflower(struct iavf_adapter *adapter, /* start out with flow type and eth type IPv4 to begin with */ filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW; err = iavf_parse_cls_flower(adapter, cls_flower, filter); - if (err < 0) + if (err) goto err; err = iavf_handle_tclass(adapter, tc, filter); - if (err < 0) + if (err) goto err; /* add filter to the list */ @@ -3308,6 +3341,9 @@ static int iavf_open(struct net_device *netdev) spin_unlock_bh(&adapter->mac_vlan_list_lock); + /* Restore VLAN filters that were removed with IFF_DOWN */ + iavf_restore_filters(adapter); + iavf_configure(adapter); iavf_up_complete(adapter); @@ -3415,11 +3451,16 @@ static int iavf_set_features(struct net_device *netdev, { struct iavf_adapter *adapter = netdev_priv(netdev); - /* Don't allow changing VLAN_RX flag when adapter is not capable - * of VLAN offload + /* Don't allow enabling VLAN features when adapter is not capable + * of VLAN offload/filtering */ if (!VLAN_ALLOWED(adapter)) { - if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) + netdev->hw_features &= ~(NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_FILTER); + if (features & (NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_FILTER)) return -EINVAL; } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) { if (features & NETIF_F_HW_VLAN_CTAG_RX) @@ -3503,7 +3544,8 @@ static netdev_features_t iavf_fix_features(struct net_device *netdev, { struct iavf_adapter *adapter = netdev_priv(netdev); - if (!(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)) + if (adapter->vf_res && + !(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)) features &= ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER); diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index 8c3f0f77cb57..d60bf7c21200 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -607,7 +607,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter) if (f->add) count++; } - if (!count) { + if (!count || !VLAN_ALLOWED(adapter)) { adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER; spin_unlock_bh(&adapter->mac_vlan_list_lock); return; @@ -673,9 +673,19 @@ void iavf_del_vlans(struct iavf_adapter *adapter) spin_lock_bh(&adapter->mac_vlan_list_lock); - list_for_each_entry(f, &adapter->vlan_filter_list, list) { - if (f->remove) + list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { + /* since VLAN capabilities are not allowed, we dont want to send + * a VLAN delete request because it will most likely fail and + * create unnecessary errors/noise, so just free the VLAN + * filters marked for removal to enable bailing out before + * sending a virtchnl message + */ + if (f->remove && !VLAN_ALLOWED(adapter)) { + list_del(&f->list); + kfree(f); + } else if (f->remove) { count++; + } } if (!count) { adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER; @@ -784,6 +794,8 @@ void iavf_request_stats(struct iavf_adapter *adapter) /* no error message, this isn't crucial */ return; } + + adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_STATS; adapter->current_op = VIRTCHNL_OP_GET_STATS; vqs.vsi_id = adapter->vsi_res->vsi_id; /* queue maps are ignored for this message - only the vsi is used */ @@ -1722,8 +1734,37 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, } spin_lock_bh(&adapter->mac_vlan_list_lock); iavf_add_filter(adapter, adapter->hw.mac.addr); + + if (VLAN_ALLOWED(adapter)) { + if (!list_empty(&adapter->vlan_filter_list)) { + struct iavf_vlan_filter *vlf; + + /* re-add all VLAN filters over virtchnl */ + list_for_each_entry(vlf, + &adapter->vlan_filter_list, + list) + vlf->add = true; + + adapter->aq_required |= + IAVF_FLAG_AQ_ADD_VLAN_FILTER; + } + } + spin_unlock_bh(&adapter->mac_vlan_list_lock); iavf_process_config(adapter); + + /* unlock crit_lock before acquiring rtnl_lock as other + * processes holding rtnl_lock could be waiting for the same + * crit_lock + */ + mutex_unlock(&adapter->crit_lock); + rtnl_lock(); + netdev_update_features(adapter->netdev); + rtnl_unlock(); + if (iavf_lock_timeout(&adapter->crit_lock, 10000)) + dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", + __FUNCTION__); + } break; case VIRTCHNL_OP_ENABLE_QUEUES: diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c index 7fdeb411b6df..3eb01731e496 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c @@ -97,6 +97,9 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets) new_cfg->etscfg.maxtcs = pf->hw.func_caps.common_cap.maxtc; + if (!bwcfg) + new_cfg->etscfg.tcbwtable[0] = 100; + if (!bwrec) new_cfg->etsrec.tcbwtable[0] = 100; @@ -167,15 +170,18 @@ static u8 ice_dcbnl_setdcbx(struct net_device *netdev, u8 mode) if (mode == pf->dcbx_cap) return ICE_DCB_NO_HW_CHG; - pf->dcbx_cap = mode; qos_cfg = &pf->hw.port_info->qos_cfg; - if (mode & DCB_CAP_DCBX_VER_CEE) { - if (qos_cfg->local_dcbx_cfg.pfc_mode == ICE_QOS_MODE_DSCP) - return ICE_DCB_NO_HW_CHG; + + /* DSCP configuration is not DCBx negotiated */ + if (qos_cfg->local_dcbx_cfg.pfc_mode == ICE_QOS_MODE_DSCP) + return ICE_DCB_NO_HW_CHG; + + pf->dcbx_cap = mode; + + if (mode & DCB_CAP_DCBX_VER_CEE) qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_CEE; - } else { + else qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_IEEE; - } dev_info(ice_pf_to_dev(pf), "DCBx mode = 0x%x\n", mode); return ICE_DCB_HW_CHG_RST; diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c index 38960bcc384c..b6e7f47c8c78 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c @@ -1268,7 +1268,7 @@ ice_fdir_write_all_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool is_tun = tun == ICE_FD_HW_SEG_TUN; int err; - if (is_tun && !ice_get_open_tunnel_port(&pf->hw, &port_num)) + if (is_tun && !ice_get_open_tunnel_port(&pf->hw, &port_num, TNL_ALL)) continue; err = ice_fdir_write_fltr(pf, input, add, is_tun); if (err) @@ -1652,7 +1652,7 @@ int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd) } /* return error if not an update and no available filters */ - fltrs_needed = ice_get_open_tunnel_port(hw, &tunnel_port) ? 2 : 1; + fltrs_needed = ice_get_open_tunnel_port(hw, &tunnel_port, TNL_ALL) ? 2 : 1; if (!ice_fdir_find_fltr_by_idx(hw, fsp->location) && ice_fdir_num_avail_fltr(hw, pf->vsi[vsi->idx]) < fltrs_needed) { dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n"); diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.c b/drivers/net/ethernet/intel/ice/ice_fdir.c index cbd8424631e3..4dca009bdd50 100644 --- a/drivers/net/ethernet/intel/ice/ice_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_fdir.c @@ -924,7 +924,7 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, memcpy(pkt, ice_fdir_pkt[idx].pkt, ice_fdir_pkt[idx].pkt_len); loc = pkt; } else { - if (!ice_get_open_tunnel_port(hw, &tnl_port)) + if (!ice_get_open_tunnel_port(hw, &tnl_port, TNL_ALL)) return ICE_ERR_DOES_NOT_EXIST; if (!ice_fdir_pkt[idx].tun_pkt) return ICE_ERR_PARAM; diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c index 23cfcceb1536..6ad1c2559724 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c @@ -1899,9 +1899,11 @@ static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld) * ice_get_open_tunnel_port - retrieve an open tunnel port * @hw: pointer to the HW structure * @port: returns open port + * @type: type of tunnel, can be TNL_LAST if it doesn't matter */ bool -ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port) +ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port, + enum ice_tunnel_type type) { bool res = false; u16 i; @@ -1909,7 +1911,8 @@ ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port) mutex_lock(&hw->tnl_lock); for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) - if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].port) { + if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].port && + (type == TNL_LAST || type == hw->tnl.tbl[i].type)) { *port = hw->tnl.tbl[i].port; res = true; break; diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h index 344c2637facd..a2863f38fd1f 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h @@ -33,7 +33,8 @@ enum ice_status ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt, unsigned long *bm, struct list_head *fv_list); bool -ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port); +ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port, + enum ice_tunnel_type type); int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table, unsigned int idx, struct udp_tunnel_info *ti); int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table, diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 40562600a8cf..09a3297cd63c 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -89,8 +89,13 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi) if (!vsi->rx_rings) goto err_rings; - /* XDP will have vsi->alloc_txq Tx queues as well, so double the size */ - vsi->txq_map = devm_kcalloc(dev, (2 * vsi->alloc_txq), + /* txq_map needs to have enough space to track both Tx (stack) rings + * and XDP rings; at this point vsi->num_xdp_txq might not be set, + * so use num_possible_cpus() as we want to always provide XDP ring + * per CPU, regardless of queue count settings from user that might + * have come from ethtool's set_channels() callback; + */ + vsi->txq_map = devm_kcalloc(dev, (vsi->alloc_txq + num_possible_cpus()), sizeof(*vsi->txq_map), GFP_KERNEL); if (!vsi->txq_map) diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index f099797f35e3..73c61cdb036f 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -2609,7 +2609,18 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog) ice_stat_str(status)); goto clear_xdp_rings; } - ice_vsi_assign_bpf_prog(vsi, prog); + + /* assign the prog only when it's not already present on VSI; + * this flow is a subject of both ethtool -L and ndo_bpf flows; + * VSI rebuild that happens under ethtool -L can expose us to + * the bpf_prog refcount issues as we would be swapping same + * bpf_prog pointers from vsi->xdp_prog and calling bpf_prog_put + * on it as it would be treated as an 'old_prog'; for ndo_bpf + * this is not harmful as dev_xdp_install bumps the refcount + * before calling the op exposed by the driver; + */ + if (!ice_is_xdp_ena_vsi(vsi)) + ice_vsi_assign_bpf_prog(vsi, prog); return 0; clear_xdp_rings: @@ -2785,6 +2796,11 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, if (xdp_ring_err) NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed"); } else { + /* safe to call even when prog == vsi->xdp_prog as + * dev_xdp_install in net/core/dev.c incremented prog's + * refcount so corresponding bpf_prog_put won't cause + * underflow + */ ice_vsi_assign_bpf_prog(vsi, prog); } @@ -5865,6 +5881,9 @@ static int ice_up_complete(struct ice_vsi *vsi) netif_carrier_on(vsi->netdev); } + /* clear this now, and the first stats read will be used as baseline */ + vsi->stat_offsets_loaded = false; + ice_service_task_schedule(pf); return 0; @@ -5911,14 +5930,15 @@ ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp, struct ice_q_stats st /** * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters * @vsi: the VSI to be updated + * @vsi_stats: the stats struct to be updated * @rings: rings to work on * @count: number of rings */ static void -ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_tx_ring **rings, - u16 count) +ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, + struct rtnl_link_stats64 *vsi_stats, + struct ice_tx_ring **rings, u16 count) { - struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats; u16 i; for (i = 0; i < count; i++) { @@ -5942,15 +5962,13 @@ ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_tx_ring **rings, */ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) { - struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats; + struct rtnl_link_stats64 *vsi_stats; u64 pkts, bytes; int i; - /* reset netdev stats */ - vsi_stats->tx_packets = 0; - vsi_stats->tx_bytes = 0; - vsi_stats->rx_packets = 0; - vsi_stats->rx_bytes = 0; + vsi_stats = kzalloc(sizeof(*vsi_stats), GFP_ATOMIC); + if (!vsi_stats) + return; /* reset non-netdev (extended) stats */ vsi->tx_restart = 0; @@ -5962,7 +5980,8 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) rcu_read_lock(); /* update Tx rings counters */ - ice_update_vsi_tx_ring_stats(vsi, vsi->tx_rings, vsi->num_txq); + ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings, + vsi->num_txq); /* update Rx rings counters */ ice_for_each_rxq(vsi, i) { @@ -5977,10 +5996,17 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) /* update XDP Tx rings counters */ if (ice_is_xdp_ena_vsi(vsi)) - ice_update_vsi_tx_ring_stats(vsi, vsi->xdp_rings, + ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings, vsi->num_xdp_txq); rcu_read_unlock(); + + vsi->net_stats.tx_packets = vsi_stats->tx_packets; + vsi->net_stats.tx_bytes = vsi_stats->tx_bytes; + vsi->net_stats.rx_packets = vsi_stats->rx_packets; + vsi->net_stats.rx_bytes = vsi_stats->rx_bytes; + + kfree(vsi_stats); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 793f4a9fc2cd..183d93033890 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -3796,10 +3796,13 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = { * ice_find_recp - find a recipe * @hw: pointer to the hardware structure * @lkup_exts: extension sequence to match + * @tun_type: type of recipe tunnel * * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found. */ -static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts) +static u16 +ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts, + enum ice_sw_tunnel_type tun_type) { bool refresh_required = true; struct ice_sw_recipe *recp; @@ -3860,8 +3863,9 @@ static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts) } /* If for "i"th recipe the found was never set to false * then it means we found our match + * Also tun type of recipe needs to be checked */ - if (found) + if (found && recp[i].tun_type == tun_type) return i; /* Return the recipe ID */ } } @@ -4651,11 +4655,12 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, } /* Look for a recipe which matches our requested fv / mask list */ - *rid = ice_find_recp(hw, lkup_exts); + *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type); if (*rid < ICE_MAX_NUM_RECIPES) /* Success if found a recipe that match the existing criteria */ goto err_unroll; + rm->tun_type = rinfo->tun_type; /* Recipe we need does not exist, add a recipe */ status = ice_add_sw_recipe(hw, rm, profiles); if (status) @@ -4958,11 +4963,13 @@ ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type, switch (tun_type) { case ICE_SW_TUN_VXLAN: + if (!ice_get_open_tunnel_port(hw, &open_port, TNL_VXLAN)) + return ICE_ERR_CFG; + break; case ICE_SW_TUN_GENEVE: - if (!ice_get_open_tunnel_port(hw, &open_port)) + if (!ice_get_open_tunnel_port(hw, &open_port, TNL_GENEVE)) return ICE_ERR_CFG; break; - default: /* Nothing needs to be done for this tunnel type */ return 0; @@ -5555,7 +5562,7 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, if (status) return status; - rid = ice_find_recp(hw, &lkup_exts); + rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type); /* If did not find a recipe that match the existing criteria */ if (rid == ICE_MAX_NUM_RECIPES) return ICE_ERR_PARAM; diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c index e5d23feb6701..25cca5c4ae57 100644 --- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c @@ -74,21 +74,13 @@ static enum ice_protocol_type ice_proto_type_from_ipv6(bool inner) return inner ? ICE_IPV6_IL : ICE_IPV6_OFOS; } -static enum ice_protocol_type -ice_proto_type_from_l4_port(bool inner, u16 ip_proto) +static enum ice_protocol_type ice_proto_type_from_l4_port(u16 ip_proto) { - if (inner) { - switch (ip_proto) { - case IPPROTO_UDP: - return ICE_UDP_ILOS; - } - } else { - switch (ip_proto) { - case IPPROTO_TCP: - return ICE_TCP_IL; - case IPPROTO_UDP: - return ICE_UDP_OF; - } + switch (ip_proto) { + case IPPROTO_TCP: + return ICE_TCP_IL; + case IPPROTO_UDP: + return ICE_UDP_ILOS; } return 0; @@ -191,8 +183,9 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr, i++; } - if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) { - list[i].type = ice_proto_type_from_l4_port(false, hdr->l3_key.ip_proto); + if ((flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) && + hdr->l3_key.ip_proto == IPPROTO_UDP) { + list[i].type = ICE_UDP_OF; list[i].h_u.l4_hdr.dst_port = hdr->l4_key.dst_port; list[i].m_u.l4_hdr.dst_port = hdr->l4_mask.dst_port; i++; @@ -317,7 +310,7 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags, ICE_TC_FLWR_FIELD_SRC_L4_PORT)) { struct ice_tc_l4_hdr *l4_key, *l4_mask; - list[i].type = ice_proto_type_from_l4_port(inner, headers->l3_key.ip_proto); + list[i].type = ice_proto_type_from_l4_port(headers->l3_key.ip_proto); l4_key = &headers->l4_key; l4_mask = &headers->l4_mask; @@ -802,7 +795,8 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule, headers->l3_mask.ttl = match.mask->ttl; } - if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) { + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS) && + fltr->tunnel_type != TNL_VXLAN && fltr->tunnel_type != TNL_GENEVE) { struct flow_match_ports match; flow_rule_match_enc_ports(rule, &match); diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index 217ff5e9a6f1..6427e7ec93de 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -1617,6 +1617,7 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) ice_vc_set_default_allowlist(vf); ice_vf_fdir_exit(vf); + ice_vf_fdir_init(vf); /* clean VF control VSI when resetting VFs since it should be * setup only when VF creates its first FDIR rule. */ @@ -1747,6 +1748,7 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) } ice_vf_fdir_exit(vf); + ice_vf_fdir_init(vf); /* clean VF control VSI when resetting VF since it should be setup * only when VF creates its first FDIR rule. */ @@ -2021,6 +2023,10 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs) if (ret) goto err_unroll_sriov; + /* rearm global interrupts */ + if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state)) + ice_irq_dynamic_ena(hw, NULL, NULL); + return 0; err_unroll_sriov: diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index ff55cb415b11..bb9a80847298 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -383,6 +383,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count) while (i--) { dma = xsk_buff_xdp_get_dma(*xdp); rx_desc->read.pkt_addr = cpu_to_le64(dma); + rx_desc->wb.status_error0 = 0; rx_desc++; xdp++; diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 836be0d3b291..fd54d3ef890b 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -8026,7 +8026,7 @@ static int igb_poll(struct napi_struct *napi, int budget) if (likely(napi_complete_done(napi, work_done))) igb_ring_irq_enable(q_vector); - return min(work_done, budget - 1); + return work_done; } /** diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index 6433c909c6b2..072391c494ce 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -25,6 +25,7 @@ #include <linux/io.h> #include <linux/dma-mapping.h> #include <linux/module.h> +#include <linux/property.h> #include <asm/checksum.h> @@ -239,6 +240,7 @@ ltq_etop_hw_init(struct net_device *dev) { struct ltq_etop_priv *priv = netdev_priv(dev); int i; + int err; ltq_pmu_enable(PMU_PPE); @@ -273,7 +275,13 @@ ltq_etop_hw_init(struct net_device *dev) if (IS_TX(i)) { ltq_dma_alloc_tx(&ch->dma); - request_irq(irq, ltq_etop_dma_irq, 0, "etop_tx", priv); + err = request_irq(irq, ltq_etop_dma_irq, 0, "etop_tx", priv); + if (err) { + netdev_err(dev, + "Unable to get Tx DMA IRQ %d\n", + irq); + return err; + } } else if (IS_RX(i)) { ltq_dma_alloc_rx(&ch->dma); for (ch->dma.desc = 0; ch->dma.desc < LTQ_DESC_NUM; @@ -281,7 +289,13 @@ ltq_etop_hw_init(struct net_device *dev) if (ltq_etop_alloc_skb(ch)) return -ENOMEM; ch->dma.desc = 0; - request_irq(irq, ltq_etop_dma_irq, 0, "etop_rx", priv); + err = request_irq(irq, ltq_etop_dma_irq, 0, "etop_rx", priv); + if (err) { + netdev_err(dev, + "Unable to get Rx DMA IRQ %d\n", + irq); + return err; + } } ch->dma.irq = irq; } @@ -726,7 +740,7 @@ static struct platform_driver ltq_mii_driver = { }, }; -int __init +static int __init init_ltq_etop(void) { int ret = platform_driver_probe(<q_mii_driver, ltq_etop_probe); diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c index 62a97c46fba0..ef878973b859 100644 --- a/drivers/net/ethernet/marvell/mvmdio.c +++ b/drivers/net/ethernet/marvell/mvmdio.c @@ -429,12 +429,14 @@ static const struct of_device_id orion_mdio_match[] = { }; MODULE_DEVICE_TABLE(of, orion_mdio_match); +#ifdef CONFIG_ACPI static const struct acpi_device_id orion_mdio_acpi_match[] = { { "MRVL0100", BUS_TYPE_SMI }, { "MRVL0101", BUS_TYPE_XSMI }, { }, }; MODULE_DEVICE_TABLE(acpi, orion_mdio_acpi_match); +#endif static struct platform_driver orion_mdio_driver = { .probe = orion_mdio_probe, diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 2b18d89d9756..6da8a595026b 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -2960,11 +2960,11 @@ static int mvpp2_rxq_init(struct mvpp2_port *port, mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size); if (priv->percpu_pools) { - err = xdp_rxq_info_reg(&rxq->xdp_rxq_short, port->dev, rxq->id, 0); + err = xdp_rxq_info_reg(&rxq->xdp_rxq_short, port->dev, rxq->logic_rxq, 0); if (err < 0) goto err_free_dma; - err = xdp_rxq_info_reg(&rxq->xdp_rxq_long, port->dev, rxq->id, 0); + err = xdp_rxq_info_reg(&rxq->xdp_rxq_long, port->dev, rxq->logic_rxq, 0); if (err < 0) goto err_unregister_rxq_short; @@ -5017,11 +5017,13 @@ static int mvpp2_change_mtu(struct net_device *dev, int mtu) mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8); } + if (port->xdp_prog && mtu > MVPP2_MAX_RX_BUF_SIZE) { + netdev_err(dev, "Illegal MTU value %d (> %d) for XDP mode\n", + mtu, (int)MVPP2_MAX_RX_BUF_SIZE); + return -EINVAL; + } + if (MVPP2_RX_PKT_SIZE(mtu) > MVPP2_BM_LONG_PKT_SIZE) { - if (port->xdp_prog) { - netdev_err(dev, "Jumbo frames are not supported with XDP\n"); - return -EINVAL; - } if (priv->percpu_pools) { netdev_warn(dev, "mtu %d too high, switching to shared buffers", mtu); mvpp2_bm_switch_buffers(priv, false); @@ -5307,8 +5309,8 @@ static int mvpp2_xdp_setup(struct mvpp2_port *port, struct netdev_bpf *bpf) bool running = netif_running(port->dev); bool reset = !prog != !port->xdp_prog; - if (port->dev->mtu > ETH_DATA_LEN) { - NL_SET_ERR_MSG_MOD(bpf->extack, "XDP is not supported with jumbo frames enabled"); + if (port->dev->mtu > MVPP2_MAX_RX_BUF_SIZE) { + NL_SET_ERR_MSG_MOD(bpf->extack, "MTU too large for XDP"); return -EOPNOTSUPP; } @@ -7456,7 +7458,7 @@ static int mvpp2_probe(struct platform_device *pdev) shared = num_present_cpus() - priv->nthreads; if (shared > 0) - bitmap_fill(&priv->lock_map, + bitmap_set(&priv->lock_map, 0, min_t(int, shared, MVPP2_MAX_THREADS)); for (i = 0; i < MVPP2_MAX_THREADS; i++) { diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index cb56e171ddd4..3ca6b942ebe2 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -2341,7 +2341,7 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw, goto free_regions; break; default: - return err; + goto free_regions; } mw->mbox_wq = alloc_workqueue(name, diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index c7fd466a0efd..a09a507369ac 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -236,10 +236,11 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, u64 lmt_addr, val, tbl_base; int pf, vf, num_vfs, hw_vfs; void __iomem *lmt_map_base; - int index = 0, off = 0; - int bytes_not_copied; int buf_size = 10240; + size_t off = 0; + int index = 0; char *buf; + int ret; /* don't allow partial reads */ if (*ppos != 0) @@ -303,15 +304,17 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, } off += scnprintf(&buf[off], buf_size - 1 - off, "\n"); - bytes_not_copied = copy_to_user(buffer, buf, off); + ret = min(off, count); + if (copy_to_user(buffer, buf, ret)) + ret = -EFAULT; kfree(buf); iounmap(lmt_map_base); - if (bytes_not_copied) - return -EFAULT; + if (ret < 0) + return ret; - *ppos = off; - return off; + *ppos = ret; + return ret; } RVU_DEBUG_FOPS(lmtst_map_table, lmtst_map_table_display, NULL); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c index 0ef68fdd1f26..61c20907315f 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c @@ -5,6 +5,8 @@ * */ +#include <linux/module.h> + #include "otx2_common.h" #include "otx2_ptp.h" diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c index 3ce6ccd0f539..b4599fe4ca8d 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c @@ -497,8 +497,8 @@ int prestera_bridge_port_join(struct net_device *br_dev, br_port = prestera_bridge_port_add(bridge, port->dev); if (IS_ERR(br_port)) { - err = PTR_ERR(br_port); - goto err_brport_create; + prestera_bridge_put(bridge); + return PTR_ERR(br_port); } err = switchdev_bridge_port_offload(br_port->dev, port->dev, NULL, @@ -519,8 +519,6 @@ err_port_join: switchdev_bridge_port_unoffload(br_port->dev, NULL, NULL, NULL); err_switchdev_offload: prestera_bridge_port_put(br_port); -err_brport_create: - prestera_bridge_put(bridge); return err; } @@ -1124,7 +1122,7 @@ static int prestera_switchdev_blk_event(struct notifier_block *unused, prestera_port_obj_attr_set); break; default: - err = -EOPNOTSUPP; + return NOTIFY_DONE; } return notifier_from_errno(err); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index 066d79e4ecfc..10238bedd694 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -670,7 +670,7 @@ void __init mlx4_en_init_ptys2ethtool_map(void) MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_T, SPEED_1000, ETHTOOL_LINK_MODE_1000baseT_Full_BIT); MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_CX_SGMII, SPEED_1000, - ETHTOOL_LINK_MODE_1000baseKX_Full_BIT); + ETHTOOL_LINK_MODE_1000baseX_Full_BIT); MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_KX, SPEED_1000, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT); MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_T, SPEED_10000, @@ -682,9 +682,9 @@ void __init mlx4_en_init_ptys2ethtool_map(void) MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_KR, SPEED_10000, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_CR, SPEED_10000, - ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); + ETHTOOL_LINK_MODE_10000baseCR_Full_BIT); MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_SR, SPEED_10000, - ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); + ETHTOOL_LINK_MODE_10000baseSR_Full_BIT); MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_20GBASE_KR2, SPEED_20000, ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT, ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 3f6d5c384637..f1c10f2bda78 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -2286,9 +2286,14 @@ int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv, bool carry_xdp_prog) { struct bpf_prog *xdp_prog; - int i, t; + int i, t, ret; - mlx4_en_copy_priv(tmp, priv, prof); + ret = mlx4_en_copy_priv(tmp, priv, prof); + if (ret) { + en_warn(priv, "%s: mlx4_en_copy_priv() failed, return\n", + __func__); + return ret; + } if (mlx4_en_alloc_resources(tmp)) { en_warn(priv, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index f71ec4d9d68e..a46284ca5172 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -339,6 +339,9 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_PAGE_FAULT_RESUME: case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS: case MLX5_CMD_OP_DEALLOC_SF: + case MLX5_CMD_OP_DESTROY_UCTX: + case MLX5_CMD_OP_DESTROY_UMEM: + case MLX5_CMD_OP_MODIFY_RQT: return MLX5_CMD_STAT_OK; case MLX5_CMD_OP_QUERY_HCA_CAP: @@ -444,7 +447,6 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_MODIFY_TIS: case MLX5_CMD_OP_QUERY_TIS: case MLX5_CMD_OP_CREATE_RQT: - case MLX5_CMD_OP_MODIFY_RQT: case MLX5_CMD_OP_QUERY_RQT: case MLX5_CMD_OP_CREATE_FLOW_TABLE: @@ -464,9 +466,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT: case MLX5_CMD_OP_QUERY_GENERAL_OBJECT: case MLX5_CMD_OP_CREATE_UCTX: - case MLX5_CMD_OP_DESTROY_UCTX: case MLX5_CMD_OP_CREATE_UMEM: - case MLX5_CMD_OP_DESTROY_UMEM: case MLX5_CMD_OP_ALLOC_MEMIC: case MLX5_CMD_OP_MODIFY_XRQ: case MLX5_CMD_OP_RELEASE_XRQ_ERROR: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c index 02e77ffe5c3e..5371ad0a12eb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c @@ -164,13 +164,14 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) MLX5_SET(destroy_cq_in, in, cqn, cq->cqn); MLX5_SET(destroy_cq_in, in, uid, cq->uid); err = mlx5_cmd_exec_in(dev, destroy_cq, in); + if (err) + return err; synchronize_irq(cq->irqn); - mlx5_cq_put(cq); wait_for_completion(&cq->free); - return err; + return 0; } EXPORT_SYMBOL(mlx5_core_destroy_cq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c index 07c8d9811bc8..10d195042ab5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c @@ -507,6 +507,8 @@ void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) if (!mlx5_debugfs_root) return; - if (cq->dbg) + if (cq->dbg) { rem_res_tree(cq->dbg); + cq->dbg = NULL; + } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c index 142953847996..0015a81eb9a1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c @@ -13,6 +13,9 @@ struct mlx5e_rx_res { unsigned int max_nch; u32 drop_rqn; + struct mlx5e_packet_merge_param pkt_merge_param; + struct rw_semaphore pkt_merge_param_sem; + struct mlx5e_rss *rss[MLX5E_MAX_NUM_RSS]; bool rss_active; u32 rss_rqns[MLX5E_INDIR_RQT_SIZE]; @@ -392,6 +395,7 @@ static int mlx5e_rx_res_ptp_init(struct mlx5e_rx_res *res) if (err) goto out; + /* Separated from the channels RQs, does not share pkt_merge state with them */ mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn, mlx5e_rqt_get_rqtn(&res->ptp.rqt), inner_ft_support); @@ -447,6 +451,9 @@ int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev, res->max_nch = max_nch; res->drop_rqn = drop_rqn; + res->pkt_merge_param = *init_pkt_merge_param; + init_rwsem(&res->pkt_merge_param_sem); + err = mlx5e_rx_res_rss_init_def(res, init_pkt_merge_param, init_nch); if (err) goto err_out; @@ -513,7 +520,7 @@ u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res) return mlx5e_tir_get_tirn(&res->ptp.tir); } -u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix) +static u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix) { return mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt); } @@ -656,6 +663,9 @@ int mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res *res, if (!builder) return -ENOMEM; + down_write(&res->pkt_merge_param_sem); + res->pkt_merge_param = *pkt_merge_param; + mlx5e_tir_builder_build_packet_merge(builder, pkt_merge_param); final_err = 0; @@ -681,6 +691,7 @@ int mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res *res, } } + up_write(&res->pkt_merge_param_sem); mlx5e_tir_builder_free(builder); return final_err; } @@ -689,3 +700,31 @@ struct mlx5e_rss_params_hash mlx5e_rx_res_get_current_hash(struct mlx5e_rx_res * { return mlx5e_rss_get_hash(res->rss[0]); } + +int mlx5e_rx_res_tls_tir_create(struct mlx5e_rx_res *res, unsigned int rxq, + struct mlx5e_tir *tir) +{ + bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT; + struct mlx5e_tir_builder *builder; + u32 rqtn; + int err; + + builder = mlx5e_tir_builder_alloc(false); + if (!builder) + return -ENOMEM; + + rqtn = mlx5e_rx_res_get_rqtn_direct(res, rxq); + + mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn, rqtn, + inner_ft_support); + mlx5e_tir_builder_build_direct(builder); + mlx5e_tir_builder_build_tls(builder); + down_read(&res->pkt_merge_param_sem); + mlx5e_tir_builder_build_packet_merge(builder, &res->pkt_merge_param); + err = mlx5e_tir_init(tir, builder, res->mdev, false); + up_read(&res->pkt_merge_param_sem); + + mlx5e_tir_builder_free(builder); + + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h index d09f7d174a51..b39b20a720e0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h @@ -37,9 +37,6 @@ u32 mlx5e_rx_res_get_tirn_rss(struct mlx5e_rx_res *res, enum mlx5_traffic_types u32 mlx5e_rx_res_get_tirn_rss_inner(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt); u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res); -/* RQTN getters for modules that create their own TIRs */ -u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix); - /* Activate/deactivate API */ void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs); void mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res *res); @@ -69,4 +66,7 @@ struct mlx5e_rss *mlx5e_rx_res_rss_get(struct mlx5e_rx_res *res, u32 rss_idx); /* Workaround for hairpin */ struct mlx5e_rss_params_hash mlx5e_rx_res_get_current_hash(struct mlx5e_rx_res *res); +/* Accel TIRs */ +int mlx5e_rx_res_tls_tir_create(struct mlx5e_rx_res *res, unsigned int rxq, + struct mlx5e_tir *tir); #endif /* __MLX5_EN_RX_RES_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index c1c6e74c79c4..2445e2ae3324 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -1356,9 +1356,13 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv, int mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv, struct mlx5_flow_attr *attr, + struct mlx5e_tc_mod_hdr_acts *mod_acts, const struct flow_action_entry *act, struct netlink_ext_ack *extack) { + bool clear_action = act->ct.action & TCA_CT_ACT_CLEAR; + int err; + if (!priv) { NL_SET_ERR_MSG_MOD(extack, "offload of ct action isn't available"); @@ -1369,6 +1373,17 @@ mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv, attr->ct_attr.ct_action = act->ct.action; attr->ct_attr.nf_ft = act->ct.flow_table; + if (!clear_action) + goto out; + + err = mlx5_tc_ct_entry_set_registers(priv, mod_acts, 0, 0, 0, 0); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to set registers for ct clear"); + return err; + } + attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; + +out: return 0; } @@ -1898,23 +1913,16 @@ __mlx5_tc_ct_flow_offload_clear(struct mlx5_tc_ct_priv *ct_priv, memcpy(pre_ct_attr, attr, attr_sz); - err = mlx5_tc_ct_entry_set_registers(ct_priv, mod_acts, 0, 0, 0, 0); - if (err) { - ct_dbg("Failed to set register for ct clear"); - goto err_set_registers; - } - mod_hdr = mlx5_modify_header_alloc(priv->mdev, ct_priv->ns_type, mod_acts->num_actions, mod_acts->actions); if (IS_ERR(mod_hdr)) { err = PTR_ERR(mod_hdr); ct_dbg("Failed to add create ct clear mod hdr"); - goto err_set_registers; + goto err_mod_hdr; } pre_ct_attr->modify_hdr = mod_hdr; - pre_ct_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; rule = mlx5_tc_rule_insert(priv, orig_spec, pre_ct_attr); if (IS_ERR(rule)) { @@ -1930,7 +1938,7 @@ __mlx5_tc_ct_flow_offload_clear(struct mlx5_tc_ct_priv *ct_priv, err_insert: mlx5_modify_header_dealloc(priv->mdev, mod_hdr); -err_set_registers: +err_mod_hdr: netdev_warn(priv->netdev, "Failed to offload ct clear flow, err %d\n", err); kfree(pre_ct_attr); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h index 363329f4aac6..99662af1e41a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h @@ -110,6 +110,7 @@ int mlx5_tc_ct_add_no_trk_match(struct mlx5_flow_spec *spec); int mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv, struct mlx5_flow_attr *attr, + struct mlx5e_tc_mod_hdr_acts *mod_acts, const struct flow_action_entry *act, struct netlink_ext_ack *extack); @@ -172,6 +173,7 @@ mlx5_tc_ct_add_no_trk_match(struct mlx5_flow_spec *spec) static inline int mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv, struct mlx5_flow_attr *attr, + struct mlx5e_tc_mod_hdr_acts *mod_acts, const struct flow_action_entry *act, struct netlink_ext_ack *extack) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h index 8f64f2c8895a..b689701ac7d8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h @@ -102,6 +102,7 @@ struct mlx5e_tc_flow { refcount_t refcnt; struct rcu_head rcu_head; struct completion init_done; + struct completion del_hw_done; int tunnel_id; /* the mapped tunnel id of this flow */ struct mlx5_flow_attr *attr; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c index 660cca73c36c..042b1abe1437 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c @@ -245,8 +245,14 @@ static void mlx5e_take_tmp_flow(struct mlx5e_tc_flow *flow, struct list_head *flow_list, int index) { - if (IS_ERR(mlx5e_flow_get(flow))) + if (IS_ERR(mlx5e_flow_get(flow))) { + /* Flow is being deleted concurrently. Wait for it to be + * unoffloaded from hardware, otherwise deleting encap will + * fail. + */ + wait_for_completion(&flow->del_hw_done); return; + } wait_for_completion(&flow->init_done); flow->tmp_entry_index = index; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c index fb5397324aa4..2db9573a3fe6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c @@ -191,7 +191,7 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb, eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2; eseg->swp_inner_l4_offset = (skb->csum_start + skb->head - skb->data) / 2; - if (skb->protocol == htons(ETH_P_IPV6)) + if (inner_ip_hdr(skb)->version == 6) eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6; break; default: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c index 62abce008c7b..15711814d2d2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c @@ -55,6 +55,7 @@ struct mlx5e_ktls_offload_context_rx { DECLARE_BITMAP(flags, MLX5E_NUM_PRIV_RX_FLAGS); /* resync */ + spinlock_t lock; /* protects resync fields */ struct mlx5e_ktls_rx_resync_ctx resync; struct list_head list; }; @@ -99,25 +100,6 @@ mlx5e_ktls_rx_resync_create_resp_list(void) return resp_list; } -static int mlx5e_ktls_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir, u32 rqtn) -{ - struct mlx5e_tir_builder *builder; - int err; - - builder = mlx5e_tir_builder_alloc(false); - if (!builder) - return -ENOMEM; - - mlx5e_tir_builder_build_rqt(builder, mdev->mlx5e_res.hw_objs.td.tdn, rqtn, false); - mlx5e_tir_builder_build_direct(builder); - mlx5e_tir_builder_build_tls(builder); - err = mlx5e_tir_init(tir, builder, mdev, false); - - mlx5e_tir_builder_free(builder); - - return err; -} - static void accel_rule_handle_work(struct work_struct *work) { struct mlx5e_ktls_offload_context_rx *priv_rx; @@ -386,14 +368,18 @@ static void resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_r struct mlx5e_icosq *sq; bool trigger_poll; - memcpy(info->rec_seq, &priv_rx->resync.sw_rcd_sn_be, sizeof(info->rec_seq)); - sq = &c->async_icosq; ktls_resync = sq->ktls_resync; + trigger_poll = false; spin_lock_bh(&ktls_resync->lock); - list_add_tail(&priv_rx->list, &ktls_resync->list); - trigger_poll = !test_and_set_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state); + spin_lock_bh(&priv_rx->lock); + memcpy(info->rec_seq, &priv_rx->resync.sw_rcd_sn_be, sizeof(info->rec_seq)); + if (list_empty(&priv_rx->list)) { + list_add_tail(&priv_rx->list, &ktls_resync->list); + trigger_poll = !test_and_set_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state); + } + spin_unlock_bh(&priv_rx->lock); spin_unlock_bh(&ktls_resync->lock); if (!trigger_poll) @@ -604,7 +590,6 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk, struct mlx5_core_dev *mdev; struct mlx5e_priv *priv; int rxq, err; - u32 rqtn; tls_ctx = tls_get_ctx(sk); priv = netdev_priv(netdev); @@ -617,6 +602,8 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk, if (err) goto err_create_key; + INIT_LIST_HEAD(&priv_rx->list); + spin_lock_init(&priv_rx->lock); priv_rx->crypto_info = *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info; @@ -628,9 +615,7 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk, priv_rx->sw_stats = &priv->tls->sw_stats; mlx5e_set_ktls_rx_priv_ctx(tls_ctx, priv_rx); - rqtn = mlx5e_rx_res_get_rqtn_direct(priv->rx_res, rxq); - - err = mlx5e_ktls_create_tir(mdev, &priv_rx->tir, rqtn); + err = mlx5e_rx_res_tls_tir_create(priv->rx_res, rxq, &priv_rx->tir); if (err) goto err_create_tir; @@ -730,10 +715,14 @@ bool mlx5e_ktls_rx_handle_resync_list(struct mlx5e_channel *c, int budget) priv_rx = list_first_entry(&local_list, struct mlx5e_ktls_offload_context_rx, list); + spin_lock(&priv_rx->lock); cseg = post_static_params(sq, priv_rx); - if (IS_ERR(cseg)) + if (IS_ERR(cseg)) { + spin_unlock(&priv_rx->lock); break; - list_del(&priv_rx->list); + } + list_del_init(&priv_rx->list); + spin_unlock(&priv_rx->lock); db_cseg = cseg; } if (db_cseg) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index e58a9ec42553..48895d79796a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -1080,6 +1080,10 @@ static mlx5e_stats_grp_t mlx5e_ul_rep_stats_grps[] = { &MLX5E_STATS_GRP(pme), &MLX5E_STATS_GRP(channels), &MLX5E_STATS_GRP(per_port_buff_congest), +#ifdef CONFIG_MLX5_EN_IPSEC + &MLX5E_STATS_GRP(ipsec_sw), + &MLX5E_STATS_GRP(ipsec_hw), +#endif }; static unsigned int mlx5e_ul_rep_stats_grps_num(struct mlx5e_priv *priv) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 96967b0a2441..793511d5ee4c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -543,13 +543,13 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, u16 klm_entries, u16 index) { struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; - u16 entries, pi, i, header_offset, err, wqe_bbs, new_entries; + u16 entries, pi, header_offset, err, wqe_bbs, new_entries; u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey; struct page *page = shampo->last_page; u64 addr = shampo->last_addr; struct mlx5e_dma_info *dma_info; struct mlx5e_umr_wqe *umr_wqe; - int headroom; + int headroom, i; headroom = rq->buff.headroom; new_entries = klm_entries - (shampo->pi & (MLX5_UMR_KLM_ALIGNMENT - 1)); @@ -601,9 +601,7 @@ update_klm: err_unmap: while (--i >= 0) { - if (--index < 0) - index = shampo->hd_per_wq - 1; - dma_info = &shampo->info[index]; + dma_info = &shampo->info[--index]; if (!(i & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1))) { dma_info->addr = ALIGN_DOWN(dma_info->addr, PAGE_SIZE); mlx5e_page_release(rq, dma_info, true); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 835caa1c7b74..3d45f4ae80c0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1600,6 +1600,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, else mlx5e_tc_unoffload_fdb_rules(esw, flow, attr); } + complete_all(&flow->del_hw_done); if (mlx5_flow_has_geneve_opt(flow)) mlx5_geneve_tlv_option_del(priv->mdev->geneve); @@ -3607,7 +3608,9 @@ parse_tc_nic_actions(struct mlx5e_priv *priv, attr->dest_chain = act->chain_index; break; case FLOW_ACTION_CT: - err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr, act, extack); + err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr, + &parse_attr->mod_hdr_acts, + act, extack); if (err) return err; @@ -4276,7 +4279,9 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, NL_SET_ERR_MSG_MOD(extack, "Sample action with connection tracking is not supported"); return -EOPNOTSUPP; } - err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr, act, extack); + err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr, + &parse_attr->mod_hdr_acts, + act, extack); if (err) return err; @@ -4465,6 +4470,7 @@ mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size, INIT_LIST_HEAD(&flow->l3_to_l2_reformat); refcount_set(&flow->refcnt, 1); init_completion(&flow->init_done); + init_completion(&flow->del_hw_done); *__flow = flow; *__parse_attr = parse_attr; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c index c6cc67cb4f6a..d377ddc70fc7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c @@ -130,7 +130,7 @@ static u32 esw_qos_calculate_min_rate_divider(struct mlx5_eswitch *esw, /* If vports min rate divider is 0 but their group has bw_share configured, then * need to set bw_share for vports to minimal value. */ - if (!group_level && !max_guarantee && group->bw_share) + if (!group_level && !max_guarantee && group && group->bw_share) return 1; return 0; } @@ -423,7 +423,7 @@ static int esw_qos_vport_update_group(struct mlx5_eswitch *esw, return err; /* Recalculate bw share weights of old and new groups */ - if (vport->qos.bw_share) { + if (vport->qos.bw_share || new_group->bw_share) { esw_qos_normalize_vports_min_rate(esw, curr_group, extack); esw_qos_normalize_vports_min_rate(esw, new_group, extack); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index ec136b499204..51a8cecc4a7c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1305,12 +1305,17 @@ abort: */ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { + bool toggle_lag; int ret; if (!mlx5_esw_allowed(esw)) return 0; - mlx5_lag_disable_change(esw->dev); + toggle_lag = esw->mode == MLX5_ESWITCH_NONE; + + if (toggle_lag) + mlx5_lag_disable_change(esw->dev); + down_write(&esw->mode_lock); if (esw->mode == MLX5_ESWITCH_NONE) { ret = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, num_vfs); @@ -1324,7 +1329,10 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) esw->esw_funcs.num_vfs = num_vfs; } up_write(&esw->mode_lock); - mlx5_lag_enable_change(esw->dev); + + if (toggle_lag) + mlx5_lag_enable_change(esw->dev); + return ret; } @@ -1572,6 +1580,11 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) esw->enabled_vports = 0; esw->mode = MLX5_ESWITCH_NONE; esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE; + if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) && + MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)) + esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC; + else + esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE; dev->priv.eswitch = esw; BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head); @@ -1934,7 +1947,7 @@ free_out: return err; } -u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev) +u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev) { struct mlx5_eswitch *esw = dev->priv.eswitch; @@ -1948,7 +1961,7 @@ mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev) struct mlx5_eswitch *esw; esw = dev->priv.eswitch; - return mlx5_esw_allowed(esw) ? esw->offloads.encap : + return (mlx5_eswitch_mode(dev) == MLX5_ESWITCH_OFFLOADS) ? esw->offloads.encap : DEVLINK_ESWITCH_ENCAP_MODE_NONE; } EXPORT_SYMBOL(mlx5_eswitch_get_encap_mode); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index f4eaa5893886..32bc08a39925 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -329,14 +329,25 @@ static bool esw_is_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr) { struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; + bool result = false; int i; - for (i = esw_attr->split_count; i < esw_attr->out_count; i++) + /* Indirect table is supported only for flows with in_port uplink + * and the destination is vport on the same eswitch as the uplink, + * return false in case at least one of destinations doesn't meet + * this criteria. + */ + for (i = esw_attr->split_count; i < esw_attr->out_count; i++) { if (esw_attr->dests[i].rep && mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport, - esw_attr->dests[i].mdev)) - return true; - return false; + esw_attr->dests[i].mdev)) { + result = true; + } else { + result = false; + break; + } + } + return result; } static int @@ -2512,6 +2523,7 @@ static int esw_set_master_egress_rule(struct mlx5_core_dev *master, struct mlx5_eswitch *esw = master->priv.eswitch; struct mlx5_flow_table_attr ft_attr = { .max_fte = 1, .prio = 0, .level = 0, + .flags = MLX5_FLOW_TABLE_OTHER_VPORT, }; struct mlx5_flow_namespace *egress_ns; struct mlx5_flow_table *acl; @@ -3183,12 +3195,6 @@ int esw_offloads_enable(struct mlx5_eswitch *esw) u64 mapping_id; int err; - if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) && - MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap)) - esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC; - else - esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE; - mutex_init(&esw->offloads.termtbl_mutex); mlx5_rdma_enable_roce(esw->dev); @@ -3286,7 +3292,6 @@ void esw_offloads_disable(struct mlx5_eswitch *esw) esw_offloads_metadata_uninit(esw); mlx5_rdma_disable_roce(esw->dev); mutex_destroy(&esw->offloads.termtbl_mutex); - esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE; } static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode) @@ -3630,7 +3635,7 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, *encap = esw->offloads.encap; unlock: up_write(&esw->mode_lock); - return 0; + return err; } static bool diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c index 31c99d53faf7..7e0e04cf26f8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c @@ -40,7 +40,7 @@ #define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000) /* Max number of counters to query in bulk read is 32K */ #define MLX5_SW_MAX_COUNTERS_BULK BIT(15) -#define MLX5_SF_NUM_COUNTERS_BULK 6 +#define MLX5_SF_NUM_COUNTERS_BULK 8 #define MLX5_FC_POOL_MAX_THRESHOLD BIT(18) #define MLX5_FC_POOL_USED_BUFF_RATIO 10 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 64f1abc4dc36..3ca998874c50 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -835,6 +835,9 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev) health->timer.expires = jiffies + msecs_to_jiffies(poll_interval_ms); add_timer(&health->timer); + + if (mlx5_core_is_pf(dev) && MLX5_CAP_MCAM_REG(dev, mrtc)) + queue_delayed_work(health->wq, &health->update_fw_log_ts_work, 0); } void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health) @@ -902,8 +905,6 @@ int mlx5_health_init(struct mlx5_core_dev *dev) INIT_WORK(&health->fatal_report_work, mlx5_fw_fatal_reporter_err_work); INIT_WORK(&health->report_work, mlx5_fw_reporter_err_work); INIT_DELAYED_WORK(&health->update_fw_log_ts_work, mlx5_health_log_ts_update); - if (mlx5_core_is_pf(dev)) - queue_delayed_work(health->wq, &health->update_fw_log_ts_work, 0); return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c index 48d2ea690d7a..4ddf6b330a44 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c @@ -615,6 +615,7 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev, bool is_bonded, is_in_lag, mode_supported; int bond_status = 0; int num_slaves = 0; + int changed = 0; int idx; if (!netif_is_lag_master(upper)) @@ -653,27 +654,27 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev, */ is_in_lag = num_slaves == MLX5_MAX_PORTS && bond_status == 0x3; - if (!mlx5_lag_is_ready(ldev) && is_in_lag) { - NL_SET_ERR_MSG_MOD(info->info.extack, - "Can't activate LAG offload, PF is configured with more than 64 VFs"); - return 0; - } - /* Lag mode must be activebackup or hash. */ mode_supported = tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP || tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH; - if (is_in_lag && !mode_supported) - NL_SET_ERR_MSG_MOD(info->info.extack, - "Can't activate LAG offload, TX type isn't supported"); - is_bonded = is_in_lag && mode_supported; if (tracker->is_bonded != is_bonded) { tracker->is_bonded = is_bonded; - return 1; + changed = 1; } - return 0; + if (!is_in_lag) + return changed; + + if (!mlx5_lag_is_ready(ldev)) + NL_SET_ERR_MSG_MOD(info->info.extack, + "Can't activate LAG offload, PF is configured with more than 64 VFs"); + else if (!mode_supported) + NL_SET_ERR_MSG_MOD(info->info.extack, + "Can't activate LAG offload, TX type isn't supported"); + + return changed; } static int mlx5_handle_changelowerstate_event(struct mlx5_lag *ldev, @@ -716,9 +717,6 @@ static int mlx5_lag_netdev_event(struct notifier_block *this, ldev = container_of(this, struct mlx5_lag, nb); - if (!mlx5_lag_is_ready(ldev) && event == NETDEV_CHANGELOWERSTATE) - return NOTIFY_DONE; - tracker = ldev->tracker; switch (event) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c index ad63dd45c8fb..a6592f9c3c05 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c @@ -608,4 +608,5 @@ void mlx5_lag_port_sel_destroy(struct mlx5_lag *ldev) if (port_sel->tunnel) mlx5_destroy_ttc_table(port_sel->inner.ttc); mlx5_lag_destroy_definers(ldev); + memset(port_sel, 0, sizeof(*port_sel)); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c index 0dd96a6b140d..c1df0d3595d8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c @@ -31,11 +31,11 @@ static void tout_set(struct mlx5_core_dev *dev, u64 val, enum mlx5_timeouts_type dev->timeouts->to[type] = val; } -static void tout_set_def_val(struct mlx5_core_dev *dev) +void mlx5_tout_set_def_val(struct mlx5_core_dev *dev) { int i; - for (i = MLX5_TO_FW_PRE_INIT_TIMEOUT_MS; i < MAX_TIMEOUT_TYPES; i++) + for (i = 0; i < MAX_TIMEOUT_TYPES; i++) tout_set(dev, tout_def_sw_val[i], i); } @@ -45,7 +45,6 @@ int mlx5_tout_init(struct mlx5_core_dev *dev) if (!dev->timeouts) return -ENOMEM; - tout_set_def_val(dev); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h index 31faa5c17aa9..1c42ead782fa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h @@ -34,6 +34,7 @@ int mlx5_tout_init(struct mlx5_core_dev *dev); void mlx5_tout_cleanup(struct mlx5_core_dev *dev); void mlx5_tout_query_iseg(struct mlx5_core_dev *dev); int mlx5_tout_query_dtor(struct mlx5_core_dev *dev); +void mlx5_tout_set_def_val(struct mlx5_core_dev *dev); u64 _mlx5_tout_ms(struct mlx5_core_dev *dev, enum mlx5_timeouts_types type); #define mlx5_tout_ms(dev, type) _mlx5_tout_ms(dev, MLX5_TO_##type##_MS) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index a92a92a52346..7df9c7f8d9c8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -992,11 +992,7 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot) if (mlx5_core_is_pf(dev)) pcie_print_link_status(dev->pdev); - err = mlx5_tout_init(dev); - if (err) { - mlx5_core_err(dev, "Failed initializing timeouts, aborting\n"); - return err; - } + mlx5_tout_set_def_val(dev); /* wait for firmware to accept initialization segments configurations */ @@ -1005,13 +1001,13 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot) if (err) { mlx5_core_err(dev, "Firmware over %llu MS in pre-initializing state, aborting\n", mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT)); - goto err_tout_cleanup; + return err; } err = mlx5_cmd_init(dev); if (err) { mlx5_core_err(dev, "Failed initializing command interface, aborting\n"); - goto err_tout_cleanup; + return err; } mlx5_tout_query_iseg(dev); @@ -1075,18 +1071,16 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot) mlx5_set_driver_version(dev); - mlx5_start_health_poll(dev); - err = mlx5_query_hca_caps(dev); if (err) { mlx5_core_err(dev, "query hca failed\n"); - goto stop_health; + goto reclaim_boot_pages; } + mlx5_start_health_poll(dev); + return 0; -stop_health: - mlx5_stop_health_poll(dev, boot); reclaim_boot_pages: mlx5_reclaim_startup_pages(dev); err_disable_hca: @@ -1094,8 +1088,6 @@ err_disable_hca: err_cmd_cleanup: mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN); mlx5_cmd_cleanup(dev); -err_tout_cleanup: - mlx5_tout_cleanup(dev); return err; } @@ -1114,7 +1106,6 @@ static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot) mlx5_core_disable_hca(dev, 0); mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN); mlx5_cmd_cleanup(dev); - mlx5_tout_cleanup(dev); return 0; } @@ -1476,6 +1467,12 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx) mlx5_debugfs_root); INIT_LIST_HEAD(&priv->traps); + err = mlx5_tout_init(dev); + if (err) { + mlx5_core_err(dev, "Failed initializing timeouts, aborting\n"); + goto err_timeout_init; + } + err = mlx5_health_init(dev); if (err) goto err_health_init; @@ -1501,6 +1498,8 @@ err_adev_init: err_pagealloc_init: mlx5_health_cleanup(dev); err_health_init: + mlx5_tout_cleanup(dev); +err_timeout_init: debugfs_remove(dev->priv.dbg_root); mutex_destroy(&priv->pgdir_mutex); mutex_destroy(&priv->alloc_mutex); @@ -1518,6 +1517,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev) mlx5_adev_cleanup(dev); mlx5_pagealloc_cleanup(dev); mlx5_health_cleanup(dev); + mlx5_tout_cleanup(dev); debugfs_remove_recursive(dev->priv.dbg_root); mutex_destroy(&priv->pgdir_mutex); mutex_destroy(&priv->alloc_mutex); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c index 49089cbe897c..8cbd36c82b3b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c @@ -135,25 +135,14 @@ static void dr_domain_fill_uplink_caps(struct mlx5dr_domain *dmn, static int dr_domain_query_vport(struct mlx5dr_domain *dmn, u16 vport_number, + bool other_vport, struct mlx5dr_cmd_vport_cap *vport_caps) { - u16 cmd_vport = vport_number; - bool other_vport = true; int ret; - if (vport_number == MLX5_VPORT_UPLINK) { - dr_domain_fill_uplink_caps(dmn, vport_caps); - return 0; - } - - if (dmn->info.caps.is_ecpf && vport_number == MLX5_VPORT_ECPF) { - other_vport = false; - cmd_vport = 0; - } - ret = mlx5dr_cmd_query_esw_vport_context(dmn->mdev, other_vport, - cmd_vport, + vport_number, &vport_caps->icm_address_rx, &vport_caps->icm_address_tx); if (ret) @@ -161,7 +150,7 @@ static int dr_domain_query_vport(struct mlx5dr_domain *dmn, ret = mlx5dr_cmd_query_gvmi(dmn->mdev, other_vport, - cmd_vport, + vport_number, &vport_caps->vport_gvmi); if (ret) return ret; @@ -176,9 +165,15 @@ static int dr_domain_query_esw_mngr(struct mlx5dr_domain *dmn) { return dr_domain_query_vport(dmn, dmn->info.caps.is_ecpf ? MLX5_VPORT_ECPF : 0, + false, &dmn->info.caps.vports.esw_manager_caps); } +static void dr_domain_query_uplink(struct mlx5dr_domain *dmn) +{ + dr_domain_fill_uplink_caps(dmn, &dmn->info.caps.vports.uplink_caps); +} + static struct mlx5dr_cmd_vport_cap * dr_domain_add_vport_cap(struct mlx5dr_domain *dmn, u16 vport) { @@ -190,7 +185,7 @@ dr_domain_add_vport_cap(struct mlx5dr_domain *dmn, u16 vport) if (!vport_caps) return NULL; - ret = dr_domain_query_vport(dmn, vport, vport_caps); + ret = dr_domain_query_vport(dmn, vport, true, vport_caps); if (ret) { kvfree(vport_caps); return NULL; @@ -207,16 +202,26 @@ dr_domain_add_vport_cap(struct mlx5dr_domain *dmn, u16 vport) return vport_caps; } +static bool dr_domain_is_esw_mgr_vport(struct mlx5dr_domain *dmn, u16 vport) +{ + struct mlx5dr_cmd_caps *caps = &dmn->info.caps; + + return (caps->is_ecpf && vport == MLX5_VPORT_ECPF) || + (!caps->is_ecpf && vport == 0); +} + struct mlx5dr_cmd_vport_cap * mlx5dr_domain_get_vport_cap(struct mlx5dr_domain *dmn, u16 vport) { struct mlx5dr_cmd_caps *caps = &dmn->info.caps; struct mlx5dr_cmd_vport_cap *vport_caps; - if ((caps->is_ecpf && vport == MLX5_VPORT_ECPF) || - (!caps->is_ecpf && vport == 0)) + if (dr_domain_is_esw_mgr_vport(dmn, vport)) return &caps->vports.esw_manager_caps; + if (vport == MLX5_VPORT_UPLINK) + return &caps->vports.uplink_caps; + vport_load: vport_caps = xa_load(&caps->vports.vports_caps_xa, vport); if (vport_caps) @@ -241,17 +246,6 @@ static void dr_domain_clear_vports(struct mlx5dr_domain *dmn) } } -static int dr_domain_query_uplink(struct mlx5dr_domain *dmn) -{ - struct mlx5dr_cmd_vport_cap *vport_caps; - - vport_caps = mlx5dr_domain_get_vport_cap(dmn, MLX5_VPORT_UPLINK); - if (!vport_caps) - return -EINVAL; - - return 0; -} - static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev, struct mlx5dr_domain *dmn) { @@ -281,11 +275,7 @@ static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev, goto free_vports_caps_xa; } - ret = dr_domain_query_uplink(dmn); - if (ret) { - mlx5dr_err(dmn, "Failed to query uplink vport caps (err: %d)", ret); - goto free_vports_caps_xa; - } + dr_domain_query_uplink(dmn); return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c index 75c775bee351..793365242e85 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c @@ -924,11 +924,12 @@ static int dr_matcher_init(struct mlx5dr_matcher *matcher, /* Check that all mask data was consumed */ for (i = 0; i < consumed_mask.match_sz; i++) { - if (consumed_mask.match_buf[i]) { - mlx5dr_dbg(dmn, "Match param mask contains unsupported parameters\n"); - ret = -EOPNOTSUPP; - goto free_consumed_mask; - } + if (!((u8 *)consumed_mask.match_buf)[i]) + continue; + + mlx5dr_dbg(dmn, "Match param mask contains unsupported parameters\n"); + ret = -EOPNOTSUPP; + goto free_consumed_mask; } ret = 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h index 3028b776da00..2333c2439c28 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@ -764,6 +764,7 @@ struct mlx5dr_roce_cap { struct mlx5dr_vports { struct mlx5dr_cmd_vport_cap esw_manager_caps; + struct mlx5dr_cmd_vport_cap uplink_caps; struct xarray vports_caps_xa; }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 5925db386b1b..03e5bad4e405 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -2153,7 +2153,7 @@ static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, max_ports = mlxsw_core_max_ports(mlxsw_sp->core); local_port = mlxsw_reg_pude_local_port_get(pude_pl); - if (WARN_ON_ONCE(local_port >= max_ports)) + if (WARN_ON_ONCE(!local_port || local_port >= max_ports)) return; mlxsw_sp_port = mlxsw_sp->ports[local_port]; if (!mlxsw_sp_port) @@ -3290,10 +3290,10 @@ mlxsw_sp_resources_rif_mac_profile_register(struct mlxsw_core *mlxsw_core) u8 max_rif_mac_profiles; if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIF_MAC_PROFILES)) - return -EIO; - - max_rif_mac_profiles = MLXSW_CORE_RES_GET(mlxsw_core, - MAX_RIF_MAC_PROFILES); + max_rif_mac_profiles = 1; + else + max_rif_mac_profiles = MLXSW_CORE_RES_GET(mlxsw_core, + MAX_RIF_MAC_PROFILES); devlink_resource_size_params_init(&size_params, max_rif_mac_profiles, max_rif_mac_profiles, 1, DEVLINK_RESOURCE_UNIT_ENTRY); diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index 4fc97823bc84..7d7647481f70 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -914,8 +914,7 @@ static int lan743x_phy_reset(struct lan743x_adapter *adapter) } static void lan743x_phy_update_flowcontrol(struct lan743x_adapter *adapter, - u8 duplex, u16 local_adv, - u16 remote_adv) + u16 local_adv, u16 remote_adv) { struct lan743x_phy *phy = &adapter->phy; u8 cap; @@ -943,7 +942,6 @@ static void lan743x_phy_link_status_change(struct net_device *netdev) phy_print_status(phydev); if (phydev->state == PHY_RUNNING) { - struct ethtool_link_ksettings ksettings; int remote_advertisement = 0; int local_advertisement = 0; @@ -980,18 +978,14 @@ static void lan743x_phy_link_status_change(struct net_device *netdev) } lan743x_csr_write(adapter, MAC_CR, data); - memset(&ksettings, 0, sizeof(ksettings)); - phy_ethtool_get_link_ksettings(netdev, &ksettings); local_advertisement = linkmode_adv_to_mii_adv_t(phydev->advertising); remote_advertisement = linkmode_adv_to_mii_adv_t(phydev->lp_advertising); - lan743x_phy_update_flowcontrol(adapter, - ksettings.base.duplex, - local_advertisement, + lan743x_phy_update_flowcontrol(adapter, local_advertisement, remote_advertisement); - lan743x_ptp_update_latency(adapter, ksettings.base.speed); + lan743x_ptp_update_latency(adapter, phydev->speed); } } diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c index 34b971ff8ef8..078d6a5a0768 100644 --- a/drivers/net/ethernet/microsoft/mana/hw_channel.c +++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c @@ -480,16 +480,16 @@ static int mana_hwc_create_wq(struct hw_channel_context *hwc, if (err) goto out; - err = mana_hwc_alloc_dma_buf(hwc, q_depth, max_msg_size, - &hwc_wq->msg_buf); - if (err) - goto out; - hwc_wq->hwc = hwc; hwc_wq->gdma_wq = queue; hwc_wq->queue_depth = q_depth; hwc_wq->hwc_cq = hwc_cq; + err = mana_hwc_alloc_dma_buf(hwc, q_depth, max_msg_size, + &hwc_wq->msg_buf); + if (err) + goto out; + *hwc_wq_ptr = hwc_wq; return 0; out: diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index e6c18b598d5c..1e4ad953cffb 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -1278,6 +1278,225 @@ int ocelot_fdb_dump(struct ocelot *ocelot, int port, } EXPORT_SYMBOL(ocelot_fdb_dump); +static void ocelot_populate_l2_ptp_trap_key(struct ocelot_vcap_filter *trap) +{ + trap->key_type = OCELOT_VCAP_KEY_ETYPE; + *(__be16 *)trap->key.etype.etype.value = htons(ETH_P_1588); + *(__be16 *)trap->key.etype.etype.mask = htons(0xffff); +} + +static void +ocelot_populate_ipv4_ptp_event_trap_key(struct ocelot_vcap_filter *trap) +{ + trap->key_type = OCELOT_VCAP_KEY_IPV4; + trap->key.ipv4.dport.value = PTP_EV_PORT; + trap->key.ipv4.dport.mask = 0xffff; +} + +static void +ocelot_populate_ipv6_ptp_event_trap_key(struct ocelot_vcap_filter *trap) +{ + trap->key_type = OCELOT_VCAP_KEY_IPV6; + trap->key.ipv6.dport.value = PTP_EV_PORT; + trap->key.ipv6.dport.mask = 0xffff; +} + +static void +ocelot_populate_ipv4_ptp_general_trap_key(struct ocelot_vcap_filter *trap) +{ + trap->key_type = OCELOT_VCAP_KEY_IPV4; + trap->key.ipv4.dport.value = PTP_GEN_PORT; + trap->key.ipv4.dport.mask = 0xffff; +} + +static void +ocelot_populate_ipv6_ptp_general_trap_key(struct ocelot_vcap_filter *trap) +{ + trap->key_type = OCELOT_VCAP_KEY_IPV6; + trap->key.ipv6.dport.value = PTP_GEN_PORT; + trap->key.ipv6.dport.mask = 0xffff; +} + +static int ocelot_trap_add(struct ocelot *ocelot, int port, + unsigned long cookie, + void (*populate)(struct ocelot_vcap_filter *f)) +{ + struct ocelot_vcap_block *block_vcap_is2; + struct ocelot_vcap_filter *trap; + bool new = false; + int err; + + block_vcap_is2 = &ocelot->block[VCAP_IS2]; + + trap = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, cookie, + false); + if (!trap) { + trap = kzalloc(sizeof(*trap), GFP_KERNEL); + if (!trap) + return -ENOMEM; + + populate(trap); + trap->prio = 1; + trap->id.cookie = cookie; + trap->id.tc_offload = false; + trap->block_id = VCAP_IS2; + trap->type = OCELOT_VCAP_FILTER_OFFLOAD; + trap->lookup = 0; + trap->action.cpu_copy_ena = true; + trap->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY; + trap->action.port_mask = 0; + new = true; + } + + trap->ingress_port_mask |= BIT(port); + + if (new) + err = ocelot_vcap_filter_add(ocelot, trap, NULL); + else + err = ocelot_vcap_filter_replace(ocelot, trap); + if (err) { + trap->ingress_port_mask &= ~BIT(port); + if (!trap->ingress_port_mask) + kfree(trap); + return err; + } + + return 0; +} + +static int ocelot_trap_del(struct ocelot *ocelot, int port, + unsigned long cookie) +{ + struct ocelot_vcap_block *block_vcap_is2; + struct ocelot_vcap_filter *trap; + + block_vcap_is2 = &ocelot->block[VCAP_IS2]; + + trap = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, cookie, + false); + if (!trap) + return 0; + + trap->ingress_port_mask &= ~BIT(port); + if (!trap->ingress_port_mask) + return ocelot_vcap_filter_del(ocelot, trap); + + return ocelot_vcap_filter_replace(ocelot, trap); +} + +static int ocelot_l2_ptp_trap_add(struct ocelot *ocelot, int port) +{ + unsigned long l2_cookie = ocelot->num_phys_ports + 1; + + return ocelot_trap_add(ocelot, port, l2_cookie, + ocelot_populate_l2_ptp_trap_key); +} + +static int ocelot_l2_ptp_trap_del(struct ocelot *ocelot, int port) +{ + unsigned long l2_cookie = ocelot->num_phys_ports + 1; + + return ocelot_trap_del(ocelot, port, l2_cookie); +} + +static int ocelot_ipv4_ptp_trap_add(struct ocelot *ocelot, int port) +{ + unsigned long ipv4_gen_cookie = ocelot->num_phys_ports + 2; + unsigned long ipv4_ev_cookie = ocelot->num_phys_ports + 3; + int err; + + err = ocelot_trap_add(ocelot, port, ipv4_ev_cookie, + ocelot_populate_ipv4_ptp_event_trap_key); + if (err) + return err; + + err = ocelot_trap_add(ocelot, port, ipv4_gen_cookie, + ocelot_populate_ipv4_ptp_general_trap_key); + if (err) + ocelot_trap_del(ocelot, port, ipv4_ev_cookie); + + return err; +} + +static int ocelot_ipv4_ptp_trap_del(struct ocelot *ocelot, int port) +{ + unsigned long ipv4_gen_cookie = ocelot->num_phys_ports + 2; + unsigned long ipv4_ev_cookie = ocelot->num_phys_ports + 3; + int err; + + err = ocelot_trap_del(ocelot, port, ipv4_ev_cookie); + err |= ocelot_trap_del(ocelot, port, ipv4_gen_cookie); + return err; +} + +static int ocelot_ipv6_ptp_trap_add(struct ocelot *ocelot, int port) +{ + unsigned long ipv6_gen_cookie = ocelot->num_phys_ports + 4; + unsigned long ipv6_ev_cookie = ocelot->num_phys_ports + 5; + int err; + + err = ocelot_trap_add(ocelot, port, ipv6_ev_cookie, + ocelot_populate_ipv6_ptp_event_trap_key); + if (err) + return err; + + err = ocelot_trap_add(ocelot, port, ipv6_gen_cookie, + ocelot_populate_ipv6_ptp_general_trap_key); + if (err) + ocelot_trap_del(ocelot, port, ipv6_ev_cookie); + + return err; +} + +static int ocelot_ipv6_ptp_trap_del(struct ocelot *ocelot, int port) +{ + unsigned long ipv6_gen_cookie = ocelot->num_phys_ports + 4; + unsigned long ipv6_ev_cookie = ocelot->num_phys_ports + 5; + int err; + + err = ocelot_trap_del(ocelot, port, ipv6_ev_cookie); + err |= ocelot_trap_del(ocelot, port, ipv6_gen_cookie); + return err; +} + +static int ocelot_setup_ptp_traps(struct ocelot *ocelot, int port, + bool l2, bool l4) +{ + int err; + + if (l2) + err = ocelot_l2_ptp_trap_add(ocelot, port); + else + err = ocelot_l2_ptp_trap_del(ocelot, port); + if (err) + return err; + + if (l4) { + err = ocelot_ipv4_ptp_trap_add(ocelot, port); + if (err) + goto err_ipv4; + + err = ocelot_ipv6_ptp_trap_add(ocelot, port); + if (err) + goto err_ipv6; + } else { + err = ocelot_ipv4_ptp_trap_del(ocelot, port); + + err |= ocelot_ipv6_ptp_trap_del(ocelot, port); + } + if (err) + return err; + + return 0; + +err_ipv6: + ocelot_ipv4_ptp_trap_del(ocelot, port); +err_ipv4: + if (l2) + ocelot_l2_ptp_trap_del(ocelot, port); + return err; +} + int ocelot_hwstamp_get(struct ocelot *ocelot, int port, struct ifreq *ifr) { return copy_to_user(ifr->ifr_data, &ocelot->hwtstamp_config, @@ -1288,7 +1507,9 @@ EXPORT_SYMBOL(ocelot_hwstamp_get); int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr) { struct ocelot_port *ocelot_port = ocelot->ports[port]; + bool l2 = false, l4 = false; struct hwtstamp_config cfg; + int err; if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) return -EFAULT; @@ -1320,28 +1541,42 @@ int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr) switch (cfg.rx_filter) { case HWTSTAMP_FILTER_NONE: break; - case HWTSTAMP_FILTER_ALL: - case HWTSTAMP_FILTER_SOME: - case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: - case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: - case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: - case HWTSTAMP_FILTER_NTP_ALL: case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + l4 = true; + break; case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + l2 = true; + break; case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: - cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + l2 = true; + l4 = true; break; default: mutex_unlock(&ocelot->ptp_lock); return -ERANGE; } + err = ocelot_setup_ptp_traps(ocelot, port, l2, l4); + if (err) { + mutex_unlock(&ocelot->ptp_lock); + return err; + } + + if (l2 && l4) + cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + else if (l2) + cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; + else if (l4) + cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; + else + cfg.rx_filter = HWTSTAMP_FILTER_NONE; + /* Commit back the result & save it */ memcpy(&ocelot->hwtstamp_config, &cfg, sizeof(cfg)); mutex_unlock(&ocelot->ptp_lock); @@ -1444,7 +1679,10 @@ int ocelot_get_ts_info(struct ocelot *ocelot, int port, SOF_TIMESTAMPING_RAW_HARDWARE; info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) | BIT(HWTSTAMP_TX_ONESTEP_SYNC); - info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL); + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | + BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT); return 0; } diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.c b/drivers/net/ethernet/mscc/ocelot_vcap.c index 99d7376a70a7..337cd08b1a54 100644 --- a/drivers/net/ethernet/mscc/ocelot_vcap.c +++ b/drivers/net/ethernet/mscc/ocelot_vcap.c @@ -1217,6 +1217,22 @@ int ocelot_vcap_filter_del(struct ocelot *ocelot, } EXPORT_SYMBOL(ocelot_vcap_filter_del); +int ocelot_vcap_filter_replace(struct ocelot *ocelot, + struct ocelot_vcap_filter *filter) +{ + struct ocelot_vcap_block *block = &ocelot->block[filter->block_id]; + int index; + + index = ocelot_vcap_block_get_filter_index(block, filter); + if (index < 0) + return index; + + vcap_entry_set(ocelot, index, filter); + + return 0; +} +EXPORT_SYMBOL(ocelot_vcap_filter_replace); + int ocelot_vcap_filter_stats_update(struct ocelot *ocelot, struct ocelot_vcap_filter *filter) { diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c index ca4686094701..0a02d8bd0a3e 100644 --- a/drivers/net/ethernet/natsemi/xtsonic.c +++ b/drivers/net/ethernet/natsemi/xtsonic.c @@ -120,7 +120,7 @@ static const struct net_device_ops xtsonic_netdev_ops = { .ndo_set_mac_address = eth_mac_addr, }; -static int __init sonic_probe1(struct net_device *dev) +static int sonic_probe1(struct net_device *dev) { unsigned int silicon_revision; struct sonic_local *lp = netdev_priv(dev); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index df203738511b..0b1865e9f0b5 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -565,7 +565,6 @@ struct nfp_net_dp { * @exn_name: Name for Exception interrupt * @shared_handler: Handler for shared interrupts * @shared_name: Name for shared interrupt - * @me_freq_mhz: ME clock_freq (MHz) * @reconfig_lock: Protects @reconfig_posted, @reconfig_timer_active, * @reconfig_sync_present and HW reconfiguration request * regs/machinery from async requests (sync must take @@ -650,8 +649,6 @@ struct nfp_net { irq_handler_t shared_handler; char shared_name[IFNAMSIZ + 8]; - u32 me_freq_mhz; - bool link_up; spinlock_t link_status_lock; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index 1de076f55740..cf7882933993 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -1344,7 +1344,7 @@ static int nfp_net_set_coalesce(struct net_device *netdev, * ME timestamp ticks. There are 16 ME clock cycles for each timestamp * count. */ - factor = nn->me_freq_mhz / 16; + factor = nn->tlv_caps.me_freq_mhz / 16; /* Each pair of (usecs, max_frames) fields specifies that interrupts * should be coalesced until diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c index d7ac0307797f..34c0d2ddf9ef 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c @@ -803,8 +803,10 @@ int nfp_cpp_area_cache_add(struct nfp_cpp *cpp, size_t size) return -ENOMEM; cache = kzalloc(sizeof(*cache), GFP_KERNEL); - if (!cache) + if (!cache) { + nfp_cpp_area_free(area); return -ENOMEM; + } cache->id = 0; cache->addr = 0; diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c index cfeb7620ae20..07a00dd9cfe0 100644 --- a/drivers/net/ethernet/ni/nixge.c +++ b/drivers/net/ethernet/ni/nixge.c @@ -1209,7 +1209,7 @@ static void *nixge_get_nvmem_address(struct device *dev) cell = nvmem_cell_get(dev, "address"); if (IS_ERR(cell)) - return NULL; + return cell; mac = nvmem_cell_read(cell, &cell_size); nvmem_cell_put(cell); @@ -1282,7 +1282,7 @@ static int nixge_probe(struct platform_device *pdev) ndev->max_mtu = NIXGE_JUMBO_MTU; mac_addr = nixge_get_nvmem_address(&pdev->dev); - if (mac_addr && is_valid_ether_addr(mac_addr)) { + if (!IS_ERR(mac_addr) && is_valid_ether_addr(mac_addr)) { eth_hw_addr_set(ndev, mac_addr); kfree(mac_addr); } else { diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index a97f691839e0..6958adeca86d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -1045,7 +1045,7 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn, if (!parities) continue; - for (j = 0, bit_idx = 0; bit_idx < 32; j++) { + for (j = 0, bit_idx = 0; bit_idx < 32 && j < 32; j++) { struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j]; if (qed_int_is_parity_flag(p_hwfn, p_bit) && @@ -1083,7 +1083,7 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn, * to current group, making them responsible for the * previous assertion. */ - for (j = 0, bit_idx = 0; bit_idx < 32; j++) { + for (j = 0, bit_idx = 0; bit_idx < 32 && j < 32; j++) { long unsigned int bitmask; u8 bit, bit_len; @@ -1382,7 +1382,7 @@ static void qed_int_sb_attn_init(struct qed_hwfn *p_hwfn, memset(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS); for (i = 0; i < NUM_ATTN_REGS; i++) { /* j is array index, k is bit index */ - for (j = 0, k = 0; k < 32; j++) { + for (j = 0, k = 0; k < 32 && j < 32; j++) { struct aeu_invert_reg_bit *p_aeu; p_aeu = &aeu_descs[i].bits[j]; diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c index 065e9004598e..999abcfe3310 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c @@ -1643,6 +1643,13 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev) data_split = true; } } else { + if (unlikely(skb->len > ETH_TX_MAX_NON_LSO_PKT_LEN)) { + DP_ERR(edev, "Unexpected non LSO skb length = 0x%x\n", skb->len); + qede_free_failed_tx_pkt(txq, first_bd, 0, false); + qede_update_tx_producer(txq); + return NETDEV_TX_OK; + } + val |= ((skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT); } diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index 1e6d72adfe43..71523d747e93 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -3480,20 +3480,19 @@ static int ql_adapter_up(struct ql3_adapter *qdev) spin_lock_irqsave(&qdev->hw_lock, hw_flags); - err = ql_wait_for_drvr_lock(qdev); - if (err) { - err = ql_adapter_initialize(qdev); - if (err) { - netdev_err(ndev, "Unable to initialize adapter\n"); - goto err_init; - } - netdev_err(ndev, "Releasing driver lock\n"); - ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); - } else { + if (!ql_wait_for_drvr_lock(qdev)) { netdev_err(ndev, "Could not acquire driver lock\n"); + err = -ENODEV; goto err_lock; } + err = ql_adapter_initialize(qdev); + if (err) { + netdev_err(ndev, "Unable to initialize adapter\n"); + goto err_init; + } + ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); set_bit(QL_ADAPTER_UP, &qdev->flags); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index d51bac7ba5af..bd0607680329 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -1077,8 +1077,14 @@ static int qlcnic_83xx_add_rings(struct qlcnic_adapter *adapter) sds_mbx_size = sizeof(struct qlcnic_sds_mbx); context_id = recv_ctx->context_id; num_sds = adapter->drv_sds_rings - QLCNIC_MAX_SDS_RINGS; - ahw->hw_ops->alloc_mbx_args(&cmd, adapter, - QLCNIC_CMD_ADD_RCV_RINGS); + err = ahw->hw_ops->alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_ADD_RCV_RINGS); + if (err) { + dev_err(&adapter->pdev->dev, + "Failed to alloc mbx args %d\n", err); + return err; + } + cmd.req.arg[1] = 0 | (num_sds << 8) | (context_id << 16); /* set up status rings, mbx 2-81 */ diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index bbe21db20417..86c44bc5f73f 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -5217,8 +5217,8 @@ static int rtl_get_ether_clk(struct rtl8169_private *tp) static void rtl_init_mac_address(struct rtl8169_private *tp) { + u8 mac_addr[ETH_ALEN] __aligned(2) = {}; struct net_device *dev = tp->dev; - u8 mac_addr[ETH_ALEN]; int rc; rc = eth_platform_get_mac_address(tp_to_dev(tp), mac_addr); @@ -5233,7 +5233,8 @@ static void rtl_init_mac_address(struct rtl8169_private *tp) if (is_valid_ether_addr(mac_addr)) goto done; - eth_hw_addr_random(dev); + eth_random_addr(mac_addr); + dev->addr_assign_type = NET_ADDR_RANDOM; dev_warn(tp_to_dev(tp), "can't read MAC address, setting random one\n"); done: eth_hw_addr_set(dev, mac_addr); diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c index cc2d907c4c4b..23a336c5096e 100644 --- a/drivers/net/ethernet/sis/sis900.c +++ b/drivers/net/ethernet/sis/sis900.c @@ -392,7 +392,7 @@ static int sis96x_get_mac_addr(struct pci_dev *pci_dev, /* get MAC address from EEPROM */ for (i = 0; i < 3; i++) addr[i] = read_eeprom(ioaddr, i + EEPROMMACAddr); - eth_hw_addr_set(net_dev, (u8 *)addr); + eth_hw_addr_set(net_dev, (u8 *)addr); rc = 1; break; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index 85208128f135..b7c2579c963b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -485,8 +485,28 @@ static int socfpga_dwmac_resume(struct device *dev) } #endif /* CONFIG_PM_SLEEP */ -static SIMPLE_DEV_PM_OPS(socfpga_dwmac_pm_ops, stmmac_suspend, - socfpga_dwmac_resume); +static int __maybe_unused socfpga_dwmac_runtime_suspend(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct stmmac_priv *priv = netdev_priv(ndev); + + stmmac_bus_clks_config(priv, false); + + return 0; +} + +static int __maybe_unused socfpga_dwmac_runtime_resume(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct stmmac_priv *priv = netdev_priv(ndev); + + return stmmac_bus_clks_config(priv, true); +} + +static const struct dev_pm_ops socfpga_dwmac_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(stmmac_suspend, socfpga_dwmac_resume) + SET_RUNTIME_PM_OPS(socfpga_dwmac_runtime_suspend, socfpga_dwmac_runtime_resume, NULL) +}; static const struct socfpga_dwmac_ops socfpga_gen5_ops = { .set_phy_mode = socfpga_gen5_set_phy_mode, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 43eead726886..5f129733aabd 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -314,6 +314,7 @@ int stmmac_mdio_reset(struct mii_bus *mii); int stmmac_xpcs_setup(struct mii_bus *mii); void stmmac_set_ethtool_ops(struct net_device *netdev); +int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags); void stmmac_ptp_register(struct stmmac_priv *priv); void stmmac_ptp_unregister(struct stmmac_priv *priv); int stmmac_open(struct net_device *dev); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index d3f350c25b9b..da8306f60730 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -50,6 +50,13 @@ #include "dwxgmac2.h" #include "hwif.h" +/* As long as the interface is active, we keep the timestamping counter enabled + * with fine resolution and binary rollover. This avoid non-monotonic behavior + * (clock jumps) when changing timestamping settings at runtime. + */ +#define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \ + PTP_TCR_TSCTRLSSR) + #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16) #define TSO_MAX_BUFF_SIZE (SZ_16K - 1) @@ -511,6 +518,14 @@ bool stmmac_eee_init(struct stmmac_priv *priv) return true; } +static inline u32 stmmac_cdc_adjust(struct stmmac_priv *priv) +{ + /* Correct the clk domain crossing(CDC) error */ + if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate) + return (2 * NSEC_PER_SEC) / priv->plat->clk_ptp_rate; + return 0; +} + /* stmmac_get_tx_hwtstamp - get HW TX timestamps * @priv: driver private structure * @p : descriptor pointer @@ -524,7 +539,6 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, { struct skb_shared_hwtstamps shhwtstamp; bool found = false; - s64 adjust = 0; u64 ns = 0; if (!priv->hwts_tx_en) @@ -543,12 +557,7 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, } if (found) { - /* Correct the clk domain crossing(CDC) error */ - if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate) { - adjust += -(2 * (NSEC_PER_SEC / - priv->plat->clk_ptp_rate)); - ns += adjust; - } + ns -= stmmac_cdc_adjust(priv); memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); shhwtstamp.hwtstamp = ns_to_ktime(ns); @@ -573,7 +582,6 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, { struct skb_shared_hwtstamps *shhwtstamp = NULL; struct dma_desc *desc = p; - u64 adjust = 0; u64 ns = 0; if (!priv->hwts_rx_en) @@ -586,11 +594,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) { stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns); - /* Correct the clk domain crossing(CDC) error */ - if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate) { - adjust += 2 * (NSEC_PER_SEC / priv->plat->clk_ptp_rate); - ns -= adjust; - } + ns -= stmmac_cdc_adjust(priv); netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); shhwtstamp = skb_hwtstamps(skb); @@ -616,8 +620,6 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) { struct stmmac_priv *priv = netdev_priv(dev); struct hwtstamp_config config; - struct timespec64 now; - u64 temp = 0; u32 ptp_v2 = 0; u32 tstamp_all = 0; u32 ptp_over_ipv4_udp = 0; @@ -626,11 +628,6 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) u32 snap_type_sel = 0; u32 ts_master_en = 0; u32 ts_event_en = 0; - u32 sec_inc = 0; - u32 value = 0; - bool xmac; - - xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; if (!(priv->dma_cap.time_stamp || priv->adv_ts)) { netdev_alert(priv->dev, "No support for HW time stamping\n"); @@ -792,42 +789,17 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1); priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; - if (!priv->hwts_tx_en && !priv->hwts_rx_en) - stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0); - else { - value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR | - tstamp_all | ptp_v2 | ptp_over_ethernet | - ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en | - ts_master_en | snap_type_sel); - stmmac_config_hw_tstamping(priv, priv->ptpaddr, value); - - /* program Sub Second Increment reg */ - stmmac_config_sub_second_increment(priv, - priv->ptpaddr, priv->plat->clk_ptp_rate, - xmac, &sec_inc); - temp = div_u64(1000000000ULL, sec_inc); - - /* Store sub second increment and flags for later use */ - priv->sub_second_inc = sec_inc; - priv->systime_flags = value; - - /* calculate default added value: - * formula is : - * addend = (2^32)/freq_div_ratio; - * where, freq_div_ratio = 1e9ns/sec_inc - */ - temp = (u64)(temp << 32); - priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate); - stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend); - - /* initialize system time */ - ktime_get_real_ts64(&now); + priv->systime_flags = STMMAC_HWTS_ACTIVE; - /* lower 32 bits of tv_sec are safe until y2106 */ - stmmac_init_systime(priv, priv->ptpaddr, - (u32)now.tv_sec, now.tv_nsec); + if (priv->hwts_tx_en || priv->hwts_rx_en) { + priv->systime_flags |= tstamp_all | ptp_v2 | + ptp_over_ethernet | ptp_over_ipv6_udp | + ptp_over_ipv4_udp | ts_event_en | + ts_master_en | snap_type_sel; } + stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags); + memcpy(&priv->tstamp_config, &config, sizeof(config)); return copy_to_user(ifr->ifr_data, &config, @@ -856,6 +828,66 @@ static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) } /** + * stmmac_init_tstamp_counter - init hardware timestamping counter + * @priv: driver private structure + * @systime_flags: timestamping flags + * Description: + * Initialize hardware counter for packet timestamping. + * This is valid as long as the interface is open and not suspended. + * Will be rerun after resuming from suspend, case in which the timestamping + * flags updated by stmmac_hwtstamp_set() also need to be restored. + */ +int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags) +{ + bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; + struct timespec64 now; + u32 sec_inc = 0; + u64 temp = 0; + int ret; + + if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) + return -EOPNOTSUPP; + + ret = clk_prepare_enable(priv->plat->clk_ptp_ref); + if (ret < 0) { + netdev_warn(priv->dev, + "failed to enable PTP reference clock: %pe\n", + ERR_PTR(ret)); + return ret; + } + + stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags); + priv->systime_flags = systime_flags; + + /* program Sub Second Increment reg */ + stmmac_config_sub_second_increment(priv, priv->ptpaddr, + priv->plat->clk_ptp_rate, + xmac, &sec_inc); + temp = div_u64(1000000000ULL, sec_inc); + + /* Store sub second increment for later use */ + priv->sub_second_inc = sec_inc; + + /* calculate default added value: + * formula is : + * addend = (2^32)/freq_div_ratio; + * where, freq_div_ratio = 1e9ns/sec_inc + */ + temp = (u64)(temp << 32); + priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate); + stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend); + + /* initialize system time */ + ktime_get_real_ts64(&now); + + /* lower 32 bits of tv_sec are safe until y2106 */ + stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec); + + return 0; +} +EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter); + +/** * stmmac_init_ptp - init PTP * @priv: driver private structure * Description: this is to verify if the HW supports the PTPv1 or PTPv2. @@ -865,9 +897,11 @@ static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) static int stmmac_init_ptp(struct stmmac_priv *priv) { bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; + int ret; - if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) - return -EOPNOTSUPP; + ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE); + if (ret) + return ret; priv->adv_ts = 0; /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */ @@ -3275,10 +3309,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) stmmac_mmc_setup(priv); if (init_ptp) { - ret = clk_prepare_enable(priv->plat->clk_ptp_ref); - if (ret < 0) - netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret); - ret = stmmac_init_ptp(priv); if (ret == -EOPNOTSUPP) netdev_warn(priv->dev, "PTP not supported by HW\n"); @@ -3772,6 +3802,8 @@ int stmmac_release(struct net_device *dev) struct stmmac_priv *priv = netdev_priv(dev); u32 chan; + netif_tx_disable(dev); + if (device_may_wakeup(priv->device)) phylink_speed_down(priv->phylink, false); /* Stop and disconnect the PHY */ @@ -5164,12 +5196,13 @@ read_again: if (likely(!(status & rx_not_ls)) && (likely(priv->synopsys_id >= DWMAC_CORE_4_00) || unlikely(status != llc_snap))) { - if (buf2_len) + if (buf2_len) { buf2_len -= ETH_FCS_LEN; - else + len -= ETH_FCS_LEN; + } else if (buf1_len) { buf1_len -= ETH_FCS_LEN; - - len -= ETH_FCS_LEN; + len -= ETH_FCS_LEN; + } } if (!skb) { @@ -5507,8 +5540,6 @@ static int stmmac_set_features(struct net_device *netdev, netdev_features_t features) { struct stmmac_priv *priv = netdev_priv(netdev); - bool sph_en; - u32 chan; /* Keep the COE Type in case of csum is supporting */ if (features & NETIF_F_RXCSUM) @@ -5520,10 +5551,13 @@ static int stmmac_set_features(struct net_device *netdev, */ stmmac_rx_ipc(priv, priv->hw); - sph_en = (priv->hw->rx_csum > 0) && priv->sph; + if (priv->sph_cap) { + bool sph_en = (priv->hw->rx_csum > 0) && priv->sph; + u32 chan; - for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++) - stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); + for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++) + stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); + } return 0; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 232ac98943cd..5d29f336315b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -816,7 +816,7 @@ static int __maybe_unused stmmac_pltfr_noirq_resume(struct device *dev) if (ret) return ret; - clk_prepare_enable(priv->plat->clk_ptp_ref); + stmmac_init_tstamp_counter(priv, priv->systime_flags); } return 0; diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index bfdf89e54752..8a19a06b505d 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c @@ -306,7 +306,6 @@ static void sp_setup(struct net_device *dev) { /* Finish setting up the DEVICE info. */ dev->netdev_ops = &sp_netdev_ops; - dev->needs_free_netdev = true; dev->mtu = SIXP_MTU; dev->hard_header_len = AX25_MAX_HEADER_LEN; dev->header_ops = &ax25_header_ops; diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c index e2b332b54f06..7da2bb8a443c 100644 --- a/drivers/net/hamradio/mkiss.c +++ b/drivers/net/hamradio/mkiss.c @@ -31,6 +31,8 @@ #define AX_MTU 236 +/* some arch define END as assembly function ending, just undef it */ +#undef END /* SLIP/KISS protocol characters. */ #define END 0300 /* indicates end of frame */ #define ESC 0333 /* indicates byte stuffing */ diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c index cff51731195a..d57472ea077f 100644 --- a/drivers/net/ipa/ipa_cmd.c +++ b/drivers/net/ipa/ipa_cmd.c @@ -661,22 +661,6 @@ void ipa_cmd_pipeline_clear_wait(struct ipa *ipa) wait_for_completion(&ipa->completion); } -void ipa_cmd_pipeline_clear(struct ipa *ipa) -{ - u32 count = ipa_cmd_pipeline_clear_count(); - struct gsi_trans *trans; - - trans = ipa_cmd_trans_alloc(ipa, count); - if (trans) { - ipa_cmd_pipeline_clear_add(trans); - gsi_trans_commit_wait(trans); - ipa_cmd_pipeline_clear_wait(ipa); - } else { - dev_err(&ipa->pdev->dev, - "error allocating %u entry tag transaction\n", count); - } -} - static struct ipa_cmd_info * ipa_cmd_info_alloc(struct ipa_endpoint *endpoint, u32 tre_count) { diff --git a/drivers/net/ipa/ipa_cmd.h b/drivers/net/ipa/ipa_cmd.h index 69cd085d427d..05ed7e42e184 100644 --- a/drivers/net/ipa/ipa_cmd.h +++ b/drivers/net/ipa/ipa_cmd.h @@ -164,12 +164,6 @@ u32 ipa_cmd_pipeline_clear_count(void); void ipa_cmd_pipeline_clear_wait(struct ipa *ipa); /** - * ipa_cmd_pipeline_clear() - Clear the hardware pipeline - * @ipa: - IPA pointer - */ -void ipa_cmd_pipeline_clear(struct ipa *ipa); - -/** * ipa_cmd_trans_alloc() - Allocate a transaction for the command TX endpoint * @ipa: IPA pointer * @tre_count: Number of elements in the transaction diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index 5528d97110d5..03a170993420 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -853,6 +853,7 @@ static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint, u32 offset; u32 val; + /* This should only be changed when HOL_BLOCK_EN is disabled */ offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id); val = hol_block_timer_val(ipa, microseconds); iowrite32(val, ipa->reg_virt + offset); @@ -868,6 +869,9 @@ ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, bool enable) val = enable ? HOL_BLOCK_EN_FMASK : 0; offset = IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id); iowrite32(val, endpoint->ipa->reg_virt + offset); + /* When enabling, the register must be written twice for IPA v4.5+ */ + if (enable && endpoint->ipa->version >= IPA_VERSION_4_5) + iowrite32(val, endpoint->ipa->reg_virt + offset); } void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa) @@ -880,6 +884,7 @@ void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa) if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM) continue; + ipa_endpoint_init_hol_block_enable(endpoint, false); ipa_endpoint_init_hol_block_timer(endpoint, 0); ipa_endpoint_init_hol_block_enable(endpoint, true); } @@ -1631,8 +1636,6 @@ void ipa_endpoint_suspend(struct ipa *ipa) if (ipa->modem_netdev) ipa_modem_suspend(ipa->modem_netdev); - ipa_cmd_pipeline_clear(ipa); - ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); } diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c index cdfa98a76e1f..a448ec198bee 100644 --- a/drivers/net/ipa/ipa_main.c +++ b/drivers/net/ipa/ipa_main.c @@ -28,6 +28,7 @@ #include "ipa_reg.h" #include "ipa_mem.h" #include "ipa_table.h" +#include "ipa_smp2p.h" #include "ipa_modem.h" #include "ipa_uc.h" #include "ipa_interrupt.h" @@ -801,6 +802,11 @@ static int ipa_remove(struct platform_device *pdev) struct device *dev = &pdev->dev; int ret; + /* Prevent the modem from triggering a call to ipa_setup(). This + * also ensures a modem-initiated setup that's underway completes. + */ + ipa_smp2p_irq_disable_setup(ipa); + ret = pm_runtime_get_sync(dev); if (WARN_ON(ret < 0)) goto out_power_put; diff --git a/drivers/net/ipa/ipa_modem.c b/drivers/net/ipa/ipa_modem.c index ad116bcc0580..d0ab4d70c303 100644 --- a/drivers/net/ipa/ipa_modem.c +++ b/drivers/net/ipa/ipa_modem.c @@ -339,9 +339,6 @@ int ipa_modem_stop(struct ipa *ipa) if (state != IPA_MODEM_STATE_RUNNING) return -EBUSY; - /* Prevent the modem from triggering a call to ipa_setup() */ - ipa_smp2p_disable(ipa); - /* Clean up the netdev and endpoints if it was started */ if (netdev) { struct ipa_priv *priv = netdev_priv(netdev); @@ -369,6 +366,9 @@ static void ipa_modem_crashed(struct ipa *ipa) struct device *dev = &ipa->pdev->dev; int ret; + /* Prevent the modem from triggering a call to ipa_setup() */ + ipa_smp2p_irq_disable_setup(ipa); + ret = pm_runtime_get_sync(dev); if (ret < 0) { dev_err(dev, "error %d getting power to handle crash\n", ret); diff --git a/drivers/net/ipa/ipa_resource.c b/drivers/net/ipa/ipa_resource.c index e3da95d69409..06cec7199382 100644 --- a/drivers/net/ipa/ipa_resource.c +++ b/drivers/net/ipa/ipa_resource.c @@ -52,7 +52,7 @@ static bool ipa_resource_limits_valid(struct ipa *ipa, return false; } - group_count = data->rsrc_group_src_count; + group_count = data->rsrc_group_dst_count; if (!group_count || group_count > IPA_RESOURCE_GROUP_MAX) return false; diff --git a/drivers/net/ipa/ipa_smp2p.c b/drivers/net/ipa/ipa_smp2p.c index df7639c39d71..211233612039 100644 --- a/drivers/net/ipa/ipa_smp2p.c +++ b/drivers/net/ipa/ipa_smp2p.c @@ -53,7 +53,7 @@ * @setup_ready_irq: IPA interrupt triggered by modem to signal GSI ready * @power_on: Whether IPA power is on * @notified: Whether modem has been notified of power state - * @disabled: Whether setup ready interrupt handling is disabled + * @setup_disabled: Whether setup ready interrupt handler is disabled * @mutex: Mutex protecting ready-interrupt/shutdown interlock * @panic_notifier: Panic notifier structure */ @@ -67,7 +67,7 @@ struct ipa_smp2p { u32 setup_ready_irq; bool power_on; bool notified; - bool disabled; + bool setup_disabled; struct mutex mutex; struct notifier_block panic_notifier; }; @@ -155,11 +155,9 @@ static irqreturn_t ipa_smp2p_modem_setup_ready_isr(int irq, void *dev_id) struct device *dev; int ret; - mutex_lock(&smp2p->mutex); - - if (smp2p->disabled) - goto out_mutex_unlock; - smp2p->disabled = true; /* If any others arrive, ignore them */ + /* Ignore any (spurious) interrupts received after the first */ + if (smp2p->ipa->setup_complete) + return IRQ_HANDLED; /* Power needs to be active for setup */ dev = &smp2p->ipa->pdev->dev; @@ -176,8 +174,6 @@ static irqreturn_t ipa_smp2p_modem_setup_ready_isr(int irq, void *dev_id) out_power_put: pm_runtime_mark_last_busy(dev); (void)pm_runtime_put_autosuspend(dev); -out_mutex_unlock: - mutex_unlock(&smp2p->mutex); return IRQ_HANDLED; } @@ -313,7 +309,7 @@ void ipa_smp2p_exit(struct ipa *ipa) kfree(smp2p); } -void ipa_smp2p_disable(struct ipa *ipa) +void ipa_smp2p_irq_disable_setup(struct ipa *ipa) { struct ipa_smp2p *smp2p = ipa->smp2p; @@ -322,7 +318,10 @@ void ipa_smp2p_disable(struct ipa *ipa) mutex_lock(&smp2p->mutex); - smp2p->disabled = true; + if (!smp2p->setup_disabled) { + disable_irq(smp2p->setup_ready_irq); + smp2p->setup_disabled = true; + } mutex_unlock(&smp2p->mutex); } diff --git a/drivers/net/ipa/ipa_smp2p.h b/drivers/net/ipa/ipa_smp2p.h index 99a956789638..59cee31a7383 100644 --- a/drivers/net/ipa/ipa_smp2p.h +++ b/drivers/net/ipa/ipa_smp2p.h @@ -27,13 +27,12 @@ int ipa_smp2p_init(struct ipa *ipa, bool modem_init); void ipa_smp2p_exit(struct ipa *ipa); /** - * ipa_smp2p_disable() - Prevent "ipa-setup-ready" interrupt handling + * ipa_smp2p_irq_disable_setup() - Disable the "setup ready" interrupt * @ipa: IPA pointer * - * Prevent handling of the "setup ready" interrupt from the modem. - * This is used before initiating shutdown of the driver. + * Disable the "ipa-setup-ready" interrupt from the modem. */ -void ipa_smp2p_disable(struct ipa *ipa); +void ipa_smp2p_irq_disable_setup(struct ipa *ipa); /** * ipa_smp2p_notify_reset() - Reset modem notification state diff --git a/drivers/net/mdio/mdio-aspeed.c b/drivers/net/mdio/mdio-aspeed.c index cad820568f75..966c3b4ad59d 100644 --- a/drivers/net/mdio/mdio-aspeed.c +++ b/drivers/net/mdio/mdio-aspeed.c @@ -61,6 +61,13 @@ static int aspeed_mdio_read(struct mii_bus *bus, int addr, int regnum) iowrite32(ctrl, ctx->base + ASPEED_MDIO_CTRL); + rc = readl_poll_timeout(ctx->base + ASPEED_MDIO_CTRL, ctrl, + !(ctrl & ASPEED_MDIO_CTRL_FIRE), + ASPEED_MDIO_INTERVAL_US, + ASPEED_MDIO_TIMEOUT_US); + if (rc < 0) + return rc; + rc = readl_poll_timeout(ctx->base + ASPEED_MDIO_DATA, data, data & ASPEED_MDIO_DATA_IDLE, ASPEED_MDIO_INTERVAL_US, diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 3ad7397b8119..ea82ea5660e7 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -710,6 +710,7 @@ static void phylink_resolve(struct work_struct *w) struct phylink_link_state link_state; struct net_device *ndev = pl->netdev; bool mac_config = false; + bool retrigger = false; bool cur_link_state; mutex_lock(&pl->state_mutex); @@ -723,6 +724,7 @@ static void phylink_resolve(struct work_struct *w) link_state.link = false; } else if (pl->mac_link_dropped) { link_state.link = false; + retrigger = true; } else { switch (pl->cur_link_an_mode) { case MLO_AN_PHY: @@ -739,6 +741,19 @@ static void phylink_resolve(struct work_struct *w) case MLO_AN_INBAND: phylink_mac_pcs_get_state(pl, &link_state); + /* The PCS may have a latching link-fail indicator. + * If the link was up, bring the link down and + * re-trigger the resolve. Otherwise, re-read the + * PCS state to get the current status of the link. + */ + if (!link_state.link) { + if (cur_link_state) + retrigger = true; + else + phylink_mac_pcs_get_state(pl, + &link_state); + } + /* If we have a phy, the "up" state is the union of * both the PHY and the MAC */ @@ -747,6 +762,15 @@ static void phylink_resolve(struct work_struct *w) /* Only update if the PHY link is up */ if (pl->phydev && pl->phy_state.link) { + /* If the interface has changed, force a + * link down event if the link isn't already + * down, and re-resolve. + */ + if (link_state.interface != + pl->phy_state.interface) { + retrigger = true; + link_state.link = false; + } link_state.interface = pl->phy_state.interface; /* If we have a PHY, we need to update with @@ -789,7 +813,7 @@ static void phylink_resolve(struct work_struct *w) else phylink_link_up(pl, link_state); } - if (!link_state.link && pl->mac_link_dropped) { + if (!link_state.link && retrigger) { pl->mac_link_dropped = false; queue_work(system_power_efficient_wq, &pl->resolve); } @@ -1364,6 +1388,7 @@ EXPORT_SYMBOL_GPL(phylink_stop); * @mac_wol: true if the MAC needs to receive packets for Wake-on-Lan * * Handle a network device suspend event. There are several cases: + * * - If Wake-on-Lan is not active, we can bring down the link between * the MAC and PHY by calling phylink_stop(). * - If Wake-on-Lan is active, and being handled only by the PHY, we diff --git a/drivers/net/slip/slip.h b/drivers/net/slip/slip.h index c420e5948522..3d7f88b330c1 100644 --- a/drivers/net/slip/slip.h +++ b/drivers/net/slip/slip.h @@ -40,6 +40,8 @@ insmod -oslip_maxdev=nnn */ #define SL_MTU 296 /* 296; I am used to 600- FvK */ +/* some arch define END as assembly function ending, just undef it */ +#undef END /* SLIP protocol characters. */ #define END 0300 /* indicates end of frame */ #define ESC 0333 /* indicates byte stuffing */ diff --git a/drivers/net/tun.c b/drivers/net/tun.c index fecc9a1d293a..1572878c3403 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1010,6 +1010,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); int txq = skb->queue_mapping; + struct netdev_queue *queue; struct tun_file *tfile; int len = skb->len; @@ -1054,6 +1055,10 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) if (ptr_ring_produce(&tfile->tx_ring, skb)) goto drop; + /* NETIF_F_LLTX requires to do our own update of trans_start */ + queue = netdev_get_tx_queue(dev, txq); + queue->trans_start = jiffies; + /* Notify and wake up reader process */ if (tfile->flags & TUN_FASYNC) kill_fasync(&tfile->fasync, SIGIO, POLL_IN); diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 24753a4da7e6..e303b522efb5 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -181,6 +181,8 @@ static u32 cdc_ncm_check_tx_max(struct usbnet *dev, u32 new_tx) min = ctx->max_datagram_size + ctx->max_ndp_size + sizeof(struct usb_cdc_ncm_nth32); max = min_t(u32, CDC_NCM_NTB_MAX_SIZE_TX, le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize)); + if (max == 0) + max = CDC_NCM_NTB_MAX_SIZE_TX; /* dwNtbOutMaxSize not set */ /* some devices set dwNtbOutMaxSize too low for the above default */ min = min(min, max); diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index f20376c1ef3f..8cd265fc1fd9 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -2228,7 +2228,7 @@ static int lan78xx_phy_init(struct lan78xx_net *dev) if (dev->domain_data.phyirq > 0) phydev->irq = dev->domain_data.phyirq; else - phydev->irq = 0; + phydev->irq = PHY_POLL; netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq); /* set to AUTOMDIX */ diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 4a02f33f0643..f9877a3e83ac 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -9603,12 +9603,9 @@ static int rtl8152_probe(struct usb_interface *intf, netdev->hw_features &= ~NETIF_F_RXCSUM; } - if (le16_to_cpu(udev->descriptor.idVendor) == VENDOR_ID_LENOVO) { - switch (le16_to_cpu(udev->descriptor.idProduct)) { - case DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2: - case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2: - tp->lenovo_macpassthru = 1; - } + if (udev->parent && + le16_to_cpu(udev->parent->descriptor.idVendor) == VENDOR_ID_LENOVO) { + tp->lenovo_macpassthru = 1; } if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 && udev->serial && diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 20fe4cd8f784..abe0149ed917 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -1050,6 +1050,14 @@ static const struct net_device_ops smsc95xx_netdev_ops = { .ndo_set_features = smsc95xx_set_features, }; +static void smsc95xx_handle_link_change(struct net_device *net) +{ + struct usbnet *dev = netdev_priv(net); + + phy_print_status(net->phydev); + usbnet_defer_kevent(dev, EVENT_LINK_CHANGE); +} + static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) { struct smsc95xx_priv *pdata; @@ -1154,6 +1162,17 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) dev->net->min_mtu = ETH_MIN_MTU; dev->net->max_mtu = ETH_DATA_LEN; dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; + + ret = phy_connect_direct(dev->net, pdata->phydev, + &smsc95xx_handle_link_change, + PHY_INTERFACE_MODE_MII); + if (ret) { + netdev_err(dev->net, "can't attach PHY to %s\n", pdata->mdiobus->id); + goto unregister_mdio; + } + + phy_attached_info(dev->net->phydev); + return 0; unregister_mdio: @@ -1171,47 +1190,25 @@ static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf) { struct smsc95xx_priv *pdata = dev->driver_priv; + phy_disconnect(dev->net->phydev); mdiobus_unregister(pdata->mdiobus); mdiobus_free(pdata->mdiobus); netif_dbg(dev, ifdown, dev->net, "free pdata\n"); kfree(pdata); } -static void smsc95xx_handle_link_change(struct net_device *net) -{ - struct usbnet *dev = netdev_priv(net); - - phy_print_status(net->phydev); - usbnet_defer_kevent(dev, EVENT_LINK_CHANGE); -} - static int smsc95xx_start_phy(struct usbnet *dev) { - struct smsc95xx_priv *pdata = dev->driver_priv; - struct net_device *net = dev->net; - int ret; + phy_start(dev->net->phydev); - ret = smsc95xx_reset(dev); - if (ret < 0) - return ret; - - ret = phy_connect_direct(net, pdata->phydev, - &smsc95xx_handle_link_change, - PHY_INTERFACE_MODE_MII); - if (ret) { - netdev_err(net, "can't attach PHY to %s\n", pdata->mdiobus->id); - return ret; - } - - phy_attached_info(net->phydev); - phy_start(net->phydev); return 0; } -static int smsc95xx_disconnect_phy(struct usbnet *dev) +static int smsc95xx_stop(struct usbnet *dev) { - phy_stop(dev->net->phydev); - phy_disconnect(dev->net->phydev); + if (dev->net->phydev) + phy_stop(dev->net->phydev); + return 0; } @@ -1966,7 +1963,7 @@ static const struct driver_info smsc95xx_info = { .unbind = smsc95xx_unbind, .link_reset = smsc95xx_link_reset, .reset = smsc95xx_start_phy, - .stop = smsc95xx_disconnect_phy, + .stop = smsc95xx_stop, .rx_fixup = smsc95xx_rx_fixup, .tx_fixup = smsc95xx_tx_fixup, .status = smsc95xx_status, diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 1771d6e5224f..55db6a336f7e 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -3423,7 +3423,6 @@ static struct virtio_driver virtio_net_driver = { .feature_table_size = ARRAY_SIZE(features), .feature_table_legacy = features_legacy, .feature_table_size_legacy = ARRAY_SIZE(features_legacy), - .suppress_used_validation = true, .driver.name = KBUILD_MODNAME, .driver.owner = THIS_MODULE, .id_table = id_table, diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 14fae317bc70..fd407c0e2856 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -3261,7 +3261,7 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) #ifdef CONFIG_PCI_MSI if (adapter->intr.type == VMXNET3_IT_MSIX) { - int i, nvec; + int i, nvec, nvec_allocated; nvec = adapter->share_intr == VMXNET3_INTR_TXSHARE ? 1 : adapter->num_tx_queues; @@ -3274,14 +3274,15 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) for (i = 0; i < nvec; i++) adapter->intr.msix_entries[i].entry = i; - nvec = vmxnet3_acquire_msix_vectors(adapter, nvec); - if (nvec < 0) + nvec_allocated = vmxnet3_acquire_msix_vectors(adapter, nvec); + if (nvec_allocated < 0) goto msix_err; /* If we cannot allocate one MSIx vector per queue * then limit the number of rx queues to 1 */ - if (nvec == VMXNET3_LINUX_MIN_MSIX_VECT) { + if (nvec_allocated == VMXNET3_LINUX_MIN_MSIX_VECT && + nvec != VMXNET3_LINUX_MIN_MSIX_VECT) { if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE || adapter->num_rx_queues != 1) { adapter->share_intr = VMXNET3_INTR_TXSHARE; @@ -3291,14 +3292,14 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) } } - adapter->intr.num_intrs = nvec; + adapter->intr.num_intrs = nvec_allocated; return; msix_err: /* If we cannot allocate MSIx vectors use only one rx queue */ dev_info(&adapter->pdev->dev, "Failed to enable MSI-X, error %d. " - "Limiting #rx queues to 1, try MSI.\n", nvec); + "Limiting #rx queues to 1, try MSI.\n", nvec_allocated); adapter->intr.type = VMXNET3_IT_MSI; } diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index ccf677015d5b..b2242a082431 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -497,6 +497,7 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb, /* strip the ethernet header added for pass through VRF device */ __skb_pull(skb, skb_network_offset(skb)); + memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); ret = vrf_ip6_local_out(net, skb->sk, skb); if (unlikely(net_xmit_eval(ret))) dev->stats.tx_errors++; @@ -579,6 +580,7 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb, RT_SCOPE_LINK); } + memset(IPCB(skb), 0, sizeof(*IPCB(skb))); ret = vrf_ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb); if (unlikely(net_xmit_eval(ret))) vrf_dev->stats.tx_errors++; @@ -768,8 +770,6 @@ static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev, skb->dev = vrf_dev; - vrf_nf_set_untracked(skb); - err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, skb, NULL, vrf_dev, vrf_ip6_out_direct_finish); @@ -790,6 +790,8 @@ static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev, if (rt6_need_strict(&ipv6_hdr(skb)->daddr)) return skb; + vrf_nf_set_untracked(skb); + if (qdisc_tx_is_default(vrf_dev) || IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) return vrf_ip6_out_direct(vrf_dev, sk, skb); @@ -998,8 +1000,6 @@ static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev, skb->dev = vrf_dev; - vrf_nf_set_untracked(skb); - err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk, skb, NULL, vrf_dev, vrf_ip_out_direct_finish); @@ -1021,6 +1021,8 @@ static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev, ipv4_is_lbcast(ip_hdr(skb)->daddr)) return skb; + vrf_nf_set_untracked(skb); + if (qdisc_tx_is_default(vrf_dev) || IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) return vrf_ip_out_direct(vrf_dev, sk, skb); diff --git a/drivers/net/wireguard/allowedips.c b/drivers/net/wireguard/allowedips.c index b7197e80f226..9a4c8ff32d9d 100644 --- a/drivers/net/wireguard/allowedips.c +++ b/drivers/net/wireguard/allowedips.c @@ -163,7 +163,7 @@ static bool node_placement(struct allowedips_node __rcu *trie, const u8 *key, return exact; } -static inline void connect_node(struct allowedips_node **parent, u8 bit, struct allowedips_node *node) +static inline void connect_node(struct allowedips_node __rcu **parent, u8 bit, struct allowedips_node *node) { node->parent_bit_packed = (unsigned long)parent | bit; rcu_assign_pointer(*parent, node); diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c index 551ddaaaf540..a46067c38bf5 100644 --- a/drivers/net/wireguard/device.c +++ b/drivers/net/wireguard/device.c @@ -98,6 +98,7 @@ static int wg_stop(struct net_device *dev) { struct wg_device *wg = netdev_priv(dev); struct wg_peer *peer; + struct sk_buff *skb; mutex_lock(&wg->device_update_lock); list_for_each_entry(peer, &wg->peer_list, peer_list) { @@ -108,7 +109,9 @@ static int wg_stop(struct net_device *dev) wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake); } mutex_unlock(&wg->device_update_lock); - skb_queue_purge(&wg->incoming_handshakes); + while ((skb = ptr_ring_consume(&wg->handshake_queue.ring)) != NULL) + kfree_skb(skb); + atomic_set(&wg->handshake_queue_len, 0); wg_socket_reinit(wg, NULL, NULL); return 0; } @@ -235,14 +238,13 @@ static void wg_destruct(struct net_device *dev) destroy_workqueue(wg->handshake_receive_wq); destroy_workqueue(wg->handshake_send_wq); destroy_workqueue(wg->packet_crypt_wq); - wg_packet_queue_free(&wg->decrypt_queue); - wg_packet_queue_free(&wg->encrypt_queue); + wg_packet_queue_free(&wg->handshake_queue, true); + wg_packet_queue_free(&wg->decrypt_queue, false); + wg_packet_queue_free(&wg->encrypt_queue, false); rcu_barrier(); /* Wait for all the peers to be actually freed. */ wg_ratelimiter_uninit(); memzero_explicit(&wg->static_identity, sizeof(wg->static_identity)); - skb_queue_purge(&wg->incoming_handshakes); free_percpu(dev->tstats); - free_percpu(wg->incoming_handshakes_worker); kvfree(wg->index_hashtable); kvfree(wg->peer_hashtable); mutex_unlock(&wg->device_update_lock); @@ -298,7 +300,6 @@ static int wg_newlink(struct net *src_net, struct net_device *dev, init_rwsem(&wg->static_identity.lock); mutex_init(&wg->socket_update_lock); mutex_init(&wg->device_update_lock); - skb_queue_head_init(&wg->incoming_handshakes); wg_allowedips_init(&wg->peer_allowedips); wg_cookie_checker_init(&wg->cookie_checker, wg); INIT_LIST_HEAD(&wg->peer_list); @@ -316,16 +317,10 @@ static int wg_newlink(struct net *src_net, struct net_device *dev, if (!dev->tstats) goto err_free_index_hashtable; - wg->incoming_handshakes_worker = - wg_packet_percpu_multicore_worker_alloc( - wg_packet_handshake_receive_worker, wg); - if (!wg->incoming_handshakes_worker) - goto err_free_tstats; - wg->handshake_receive_wq = alloc_workqueue("wg-kex-%s", WQ_CPU_INTENSIVE | WQ_FREEZABLE, 0, dev->name); if (!wg->handshake_receive_wq) - goto err_free_incoming_handshakes; + goto err_free_tstats; wg->handshake_send_wq = alloc_workqueue("wg-kex-%s", WQ_UNBOUND | WQ_FREEZABLE, 0, dev->name); @@ -347,10 +342,15 @@ static int wg_newlink(struct net *src_net, struct net_device *dev, if (ret < 0) goto err_free_encrypt_queue; - ret = wg_ratelimiter_init(); + ret = wg_packet_queue_init(&wg->handshake_queue, wg_packet_handshake_receive_worker, + MAX_QUEUED_INCOMING_HANDSHAKES); if (ret < 0) goto err_free_decrypt_queue; + ret = wg_ratelimiter_init(); + if (ret < 0) + goto err_free_handshake_queue; + ret = register_netdevice(dev); if (ret < 0) goto err_uninit_ratelimiter; @@ -367,18 +367,18 @@ static int wg_newlink(struct net *src_net, struct net_device *dev, err_uninit_ratelimiter: wg_ratelimiter_uninit(); +err_free_handshake_queue: + wg_packet_queue_free(&wg->handshake_queue, false); err_free_decrypt_queue: - wg_packet_queue_free(&wg->decrypt_queue); + wg_packet_queue_free(&wg->decrypt_queue, false); err_free_encrypt_queue: - wg_packet_queue_free(&wg->encrypt_queue); + wg_packet_queue_free(&wg->encrypt_queue, false); err_destroy_packet_crypt: destroy_workqueue(wg->packet_crypt_wq); err_destroy_handshake_send: destroy_workqueue(wg->handshake_send_wq); err_destroy_handshake_receive: destroy_workqueue(wg->handshake_receive_wq); -err_free_incoming_handshakes: - free_percpu(wg->incoming_handshakes_worker); err_free_tstats: free_percpu(dev->tstats); err_free_index_hashtable: @@ -398,6 +398,7 @@ static struct rtnl_link_ops link_ops __read_mostly = { static void wg_netns_pre_exit(struct net *net) { struct wg_device *wg; + struct wg_peer *peer; rtnl_lock(); list_for_each_entry(wg, &device_list, device_list) { @@ -407,6 +408,8 @@ static void wg_netns_pre_exit(struct net *net) mutex_lock(&wg->device_update_lock); rcu_assign_pointer(wg->creating_net, NULL); wg_socket_reinit(wg, NULL, NULL); + list_for_each_entry(peer, &wg->peer_list, peer_list) + wg_socket_clear_peer_endpoint_src(peer); mutex_unlock(&wg->device_update_lock); } } diff --git a/drivers/net/wireguard/device.h b/drivers/net/wireguard/device.h index 854bc3d97150..43c7cebbf50b 100644 --- a/drivers/net/wireguard/device.h +++ b/drivers/net/wireguard/device.h @@ -39,21 +39,18 @@ struct prev_queue { struct wg_device { struct net_device *dev; - struct crypt_queue encrypt_queue, decrypt_queue; + struct crypt_queue encrypt_queue, decrypt_queue, handshake_queue; struct sock __rcu *sock4, *sock6; struct net __rcu *creating_net; struct noise_static_identity static_identity; - struct workqueue_struct *handshake_receive_wq, *handshake_send_wq; - struct workqueue_struct *packet_crypt_wq; - struct sk_buff_head incoming_handshakes; - int incoming_handshake_cpu; - struct multicore_worker __percpu *incoming_handshakes_worker; + struct workqueue_struct *packet_crypt_wq,*handshake_receive_wq, *handshake_send_wq; struct cookie_checker cookie_checker; struct pubkey_hashtable *peer_hashtable; struct index_hashtable *index_hashtable; struct allowedips peer_allowedips; struct mutex device_update_lock, socket_update_lock; struct list_head device_list, peer_list; + atomic_t handshake_queue_len; unsigned int num_peers, device_update_gen; u32 fwmark; u16 incoming_port; diff --git a/drivers/net/wireguard/main.c b/drivers/net/wireguard/main.c index 75dbe77b0b4b..ee4da9ab8013 100644 --- a/drivers/net/wireguard/main.c +++ b/drivers/net/wireguard/main.c @@ -17,7 +17,7 @@ #include <linux/genetlink.h> #include <net/rtnetlink.h> -static int __init mod_init(void) +static int __init wg_mod_init(void) { int ret; @@ -60,7 +60,7 @@ err_allowedips: return ret; } -static void __exit mod_exit(void) +static void __exit wg_mod_exit(void) { wg_genetlink_uninit(); wg_device_uninit(); @@ -68,8 +68,8 @@ static void __exit mod_exit(void) wg_allowedips_slab_uninit(); } -module_init(mod_init); -module_exit(mod_exit); +module_init(wg_mod_init); +module_exit(wg_mod_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("WireGuard secure network tunnel"); MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>"); diff --git a/drivers/net/wireguard/queueing.c b/drivers/net/wireguard/queueing.c index 48e7b982a307..1de413b19e34 100644 --- a/drivers/net/wireguard/queueing.c +++ b/drivers/net/wireguard/queueing.c @@ -38,11 +38,11 @@ int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, return 0; } -void wg_packet_queue_free(struct crypt_queue *queue) +void wg_packet_queue_free(struct crypt_queue *queue, bool purge) { free_percpu(queue->worker); - WARN_ON(!__ptr_ring_empty(&queue->ring)); - ptr_ring_cleanup(&queue->ring, NULL); + WARN_ON(!purge && !__ptr_ring_empty(&queue->ring)); + ptr_ring_cleanup(&queue->ring, purge ? (void(*)(void*))kfree_skb : NULL); } #define NEXT(skb) ((skb)->prev) diff --git a/drivers/net/wireguard/queueing.h b/drivers/net/wireguard/queueing.h index 4ef2944a68bc..e2388107f7fd 100644 --- a/drivers/net/wireguard/queueing.h +++ b/drivers/net/wireguard/queueing.h @@ -23,7 +23,7 @@ struct sk_buff; /* queueing.c APIs: */ int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, unsigned int len); -void wg_packet_queue_free(struct crypt_queue *queue); +void wg_packet_queue_free(struct crypt_queue *queue, bool purge); struct multicore_worker __percpu * wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr); diff --git a/drivers/net/wireguard/ratelimiter.c b/drivers/net/wireguard/ratelimiter.c index 3fedd1d21f5e..dd55e5c26f46 100644 --- a/drivers/net/wireguard/ratelimiter.c +++ b/drivers/net/wireguard/ratelimiter.c @@ -176,12 +176,12 @@ int wg_ratelimiter_init(void) (1U << 14) / sizeof(struct hlist_head))); max_entries = table_size * 8; - table_v4 = kvzalloc(table_size * sizeof(*table_v4), GFP_KERNEL); + table_v4 = kvcalloc(table_size, sizeof(*table_v4), GFP_KERNEL); if (unlikely(!table_v4)) goto err_kmemcache; #if IS_ENABLED(CONFIG_IPV6) - table_v6 = kvzalloc(table_size * sizeof(*table_v6), GFP_KERNEL); + table_v6 = kvcalloc(table_size, sizeof(*table_v6), GFP_KERNEL); if (unlikely(!table_v6)) { kvfree(table_v4); goto err_kmemcache; diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c index 7dc84bcca261..7b8df406c773 100644 --- a/drivers/net/wireguard/receive.c +++ b/drivers/net/wireguard/receive.c @@ -116,8 +116,8 @@ static void wg_receive_handshake_packet(struct wg_device *wg, return; } - under_load = skb_queue_len(&wg->incoming_handshakes) >= - MAX_QUEUED_INCOMING_HANDSHAKES / 8; + under_load = atomic_read(&wg->handshake_queue_len) >= + MAX_QUEUED_INCOMING_HANDSHAKES / 8; if (under_load) { last_under_load = ktime_get_coarse_boottime_ns(); } else if (last_under_load) { @@ -212,13 +212,14 @@ static void wg_receive_handshake_packet(struct wg_device *wg, void wg_packet_handshake_receive_worker(struct work_struct *work) { - struct wg_device *wg = container_of(work, struct multicore_worker, - work)->ptr; + struct crypt_queue *queue = container_of(work, struct multicore_worker, work)->ptr; + struct wg_device *wg = container_of(queue, struct wg_device, handshake_queue); struct sk_buff *skb; - while ((skb = skb_dequeue(&wg->incoming_handshakes)) != NULL) { + while ((skb = ptr_ring_consume_bh(&queue->ring)) != NULL) { wg_receive_handshake_packet(wg, skb); dev_kfree_skb(skb); + atomic_dec(&wg->handshake_queue_len); cond_resched(); } } @@ -553,22 +554,28 @@ void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb) case cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION): case cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE): case cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE): { - int cpu; - - if (skb_queue_len(&wg->incoming_handshakes) > - MAX_QUEUED_INCOMING_HANDSHAKES || - unlikely(!rng_is_initialized())) { + int cpu, ret = -EBUSY; + + if (unlikely(!rng_is_initialized())) + goto drop; + if (atomic_read(&wg->handshake_queue_len) > MAX_QUEUED_INCOMING_HANDSHAKES / 2) { + if (spin_trylock_bh(&wg->handshake_queue.ring.producer_lock)) { + ret = __ptr_ring_produce(&wg->handshake_queue.ring, skb); + spin_unlock_bh(&wg->handshake_queue.ring.producer_lock); + } + } else + ret = ptr_ring_produce_bh(&wg->handshake_queue.ring, skb); + if (ret) { + drop: net_dbg_skb_ratelimited("%s: Dropping handshake packet from %pISpfsc\n", wg->dev->name, skb); goto err; } - skb_queue_tail(&wg->incoming_handshakes, skb); - /* Queues up a call to packet_process_queued_handshake_ - * packets(skb): - */ - cpu = wg_cpumask_next_online(&wg->incoming_handshake_cpu); + atomic_inc(&wg->handshake_queue_len); + cpu = wg_cpumask_next_online(&wg->handshake_queue.last_cpu); + /* Queues up a call to packet_process_queued_handshake_packets(skb): */ queue_work_on(cpu, wg->handshake_receive_wq, - &per_cpu_ptr(wg->incoming_handshakes_worker, cpu)->work); + &per_cpu_ptr(wg->handshake_queue.worker, cpu)->work); break; } case cpu_to_le32(MESSAGE_DATA): diff --git a/drivers/net/wireguard/socket.c b/drivers/net/wireguard/socket.c index 8c496b747108..6f07b949cb81 100644 --- a/drivers/net/wireguard/socket.c +++ b/drivers/net/wireguard/socket.c @@ -308,7 +308,7 @@ void wg_socket_clear_peer_endpoint_src(struct wg_peer *peer) { write_lock_bh(&peer->endpoint_lock); memset(&peer->endpoint.src6, 0, sizeof(peer->endpoint.src6)); - dst_cache_reset(&peer->endpoint_cache); + dst_cache_reset_now(&peer->endpoint_cache); write_unlock_bh(&peer->endpoint_lock); } diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c index 26c7ae242db6..49c0b1ad40a0 100644 --- a/drivers/net/wireless/ath/ath11k/mhi.c +++ b/drivers/net/wireless/ath/ath11k/mhi.c @@ -533,7 +533,11 @@ static int ath11k_mhi_set_state(struct ath11k_pci *ab_pci, ret = mhi_pm_suspend(ab_pci->mhi_ctrl); break; case ATH11K_MHI_RESUME: - ret = mhi_pm_resume(ab_pci->mhi_ctrl); + /* Do force MHI resume as some devices like QCA6390, WCN6855 + * are not in M3 state but they are functional. So just ignore + * the MHI state while resuming. + */ + ret = mhi_pm_resume_force(ab_pci->mhi_ctrl); break; case ATH11K_MHI_TRIGGER_RDDM: ret = mhi_force_rddm_mode(ab_pci->mhi_ctrl); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c index c875bf35533c..009dd4be597b 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c @@ -86,6 +86,7 @@ static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans, if (len < tlv_len) { IWL_ERR(trans, "invalid TLV len: %zd/%u\n", len, tlv_len); + kfree(reduce_power_data); reduce_power_data = ERR_PTR(-EINVAL); goto out; } @@ -105,6 +106,7 @@ static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans, IWL_DEBUG_FW(trans, "Couldn't allocate (more) reduce_power_data\n"); + kfree(reduce_power_data); reduce_power_data = ERR_PTR(-ENOMEM); goto out; } @@ -134,6 +136,10 @@ static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans, done: if (!size) { IWL_DEBUG_FW(trans, "Empty REDUCE_POWER, skipping.\n"); + /* Better safe than sorry, but 'reduce_power_data' should + * always be NULL if !size. + */ + kfree(reduce_power_data); reduce_power_data = ERR_PTR(-ENOENT); goto out; } diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 36196e07b1a0..5cec467b995b 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -1313,23 +1313,31 @@ _iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op) const struct iwl_op_mode_ops *ops = op->ops; struct dentry *dbgfs_dir = NULL; struct iwl_op_mode *op_mode = NULL; + int retry, max_retry = !!iwlwifi_mod_params.fw_restart * IWL_MAX_INIT_RETRY; + + for (retry = 0; retry <= max_retry; retry++) { #ifdef CONFIG_IWLWIFI_DEBUGFS - drv->dbgfs_op_mode = debugfs_create_dir(op->name, - drv->dbgfs_drv); - dbgfs_dir = drv->dbgfs_op_mode; + drv->dbgfs_op_mode = debugfs_create_dir(op->name, + drv->dbgfs_drv); + dbgfs_dir = drv->dbgfs_op_mode; #endif - op_mode = ops->start(drv->trans, drv->trans->cfg, &drv->fw, dbgfs_dir); + op_mode = ops->start(drv->trans, drv->trans->cfg, + &drv->fw, dbgfs_dir); + + if (op_mode) + return op_mode; + + IWL_ERR(drv, "retry init count %d\n", retry); #ifdef CONFIG_IWLWIFI_DEBUGFS - if (!op_mode) { debugfs_remove_recursive(drv->dbgfs_op_mode); drv->dbgfs_op_mode = NULL; - } #endif + } - return op_mode; + return NULL; } static void _iwl_op_mode_stop(struct iwl_drv *drv) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h index 2e2d60a58692..0fd009e6d685 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h @@ -89,4 +89,7 @@ void iwl_drv_stop(struct iwl_drv *drv); #define IWL_EXPORT_SYMBOL(sym) #endif +/* max retry for init flow */ +#define IWL_MAX_INIT_RETRY 2 + #endif /* __iwl_drv_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 9fb9c7dad314..897e3b91ddb2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -16,6 +16,7 @@ #include <net/ieee80211_radiotap.h> #include <net/tcp.h> +#include "iwl-drv.h" #include "iwl-op-mode.h" #include "iwl-io.h" #include "mvm.h" @@ -1117,9 +1118,30 @@ static int iwl_mvm_mac_start(struct ieee80211_hw *hw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; + int retry, max_retry = 0; mutex_lock(&mvm->mutex); - ret = __iwl_mvm_mac_start(mvm); + + /* we are starting the mac not in error flow, and restart is enabled */ + if (!test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) && + iwlwifi_mod_params.fw_restart) { + max_retry = IWL_MAX_INIT_RETRY; + /* + * This will prevent mac80211 recovery flows to trigger during + * init failures + */ + set_bit(IWL_MVM_STATUS_STARTING, &mvm->status); + } + + for (retry = 0; retry <= max_retry; retry++) { + ret = __iwl_mvm_mac_start(mvm); + if (!ret) + break; + + IWL_ERR(mvm, "mac start retry %d\n", retry); + } + clear_bit(IWL_MVM_STATUS_STARTING, &mvm->status); + mutex_unlock(&mvm->mutex); return ret; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 2b1dcd60e00f..a72d85086fe3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -1123,6 +1123,8 @@ struct iwl_mvm { * @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running * @IWL_MVM_STATUS_NEED_FLUSH_P2P: need to flush P2P bcast STA * @IWL_MVM_STATUS_IN_D3: in D3 (or at least about to go into it) + * @IWL_MVM_STATUS_STARTING: starting mac, + * used to disable restart flow while in STARTING state */ enum iwl_mvm_status { IWL_MVM_STATUS_HW_RFKILL, @@ -1134,6 +1136,7 @@ enum iwl_mvm_status { IWL_MVM_STATUS_FIRMWARE_RUNNING, IWL_MVM_STATUS_NEED_FLUSH_P2P, IWL_MVM_STATUS_IN_D3, + IWL_MVM_STATUS_STARTING, }; /* Keep track of completed init configuration */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 232ad531d612..cd08e289cd9a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -686,6 +686,7 @@ static int iwl_mvm_start_get_nvm(struct iwl_mvm *mvm) int ret; rtnl_lock(); + wiphy_lock(mvm->hw->wiphy); mutex_lock(&mvm->mutex); ret = iwl_run_init_mvm_ucode(mvm); @@ -701,6 +702,7 @@ static int iwl_mvm_start_get_nvm(struct iwl_mvm *mvm) iwl_mvm_stop_device(mvm); mutex_unlock(&mvm->mutex); + wiphy_unlock(mvm->hw->wiphy); rtnl_unlock(); if (ret < 0) @@ -1600,6 +1602,9 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) */ if (!mvm->fw_restart && fw_error) { iwl_fw_error_collect(&mvm->fwrt, false); + } else if (test_bit(IWL_MVM_STATUS_STARTING, + &mvm->status)) { + IWL_ERR(mvm, "Starting mac, retry will be triggered anyway\n"); } else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { struct iwl_mvm_reprobe *reprobe; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index c574f041f096..5ce07f28e7c3 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -1339,9 +1339,13 @@ iwl_pci_find_dev_info(u16 device, u16 subsystem_device, u16 mac_type, u8 mac_step, u16 rf_type, u8 cdb, u8 rf_id, u8 no_160, u8 cores) { + int num_devices = ARRAY_SIZE(iwl_dev_info_table); int i; - for (i = ARRAY_SIZE(iwl_dev_info_table) - 1; i >= 0; i--) { + if (!num_devices) + return NULL; + + for (i = num_devices - 1; i >= 0; i--) { const struct iwl_dev_info *dev_info = &iwl_dev_info_table[i]; if (dev_info->device != (u16)IWL_CFG_ANY && @@ -1442,8 +1446,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) */ if (iwl_trans->trans_cfg->rf_id && iwl_trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000 && - !CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) && get_crf_id(iwl_trans)) + !CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) && get_crf_id(iwl_trans)) { + ret = -EINVAL; goto out_free_trans; + } dev_info = iwl_pci_find_dev_info(pdev->device, pdev->subsystem_device, CSR_HW_REV_TYPE(iwl_trans->hw_rev), diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c index 5ee52cd70a4b..d1806f198aed 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c @@ -143,8 +143,6 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, if (!wcid) wcid = &dev->mt76.global_wcid; - pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb); - if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) && msta) { struct mt7615_phy *phy = &dev->phy; @@ -164,6 +162,7 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, if (id < 0) return id; + pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb); mt7615_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, sta, pid, key, false); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c index bd2939ebcbf4..5a6d7829c6e0 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c @@ -43,19 +43,11 @@ EXPORT_SYMBOL_GPL(mt7663_usb_sdio_reg_map); static void mt7663_usb_sdio_write_txwi(struct mt7615_dev *dev, struct mt76_wcid *wcid, enum mt76_txq_id qid, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, int pid, struct sk_buff *skb) { - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct ieee80211_key_conf *key = info->control.hw_key; - __le32 *txwi; - int pid; - - if (!wcid) - wcid = &dev->mt76.global_wcid; - - pid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb); + __le32 *txwi = (__le32 *)(skb->data - MT_USB_TXD_SIZE); - txwi = (__le32 *)(skb->data - MT_USB_TXD_SIZE); memset(txwi, 0, MT_USB_TXD_SIZE); mt7615_mac_write_txwi(dev, txwi, skb, wcid, sta, pid, key, false); skb_push(skb, MT_USB_TXD_SIZE); @@ -194,10 +186,14 @@ int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); struct sk_buff *skb = tx_info->skb; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_key_conf *key = info->control.hw_key; struct mt7615_sta *msta; - int pad; + int pad, err, pktid; msta = wcid ? container_of(wcid, struct mt7615_sta, wcid) : NULL; + if (!wcid) + wcid = &dev->mt76.global_wcid; + if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) && msta && !msta->rate_probe) { /* request to configure sampling rate */ @@ -207,7 +203,8 @@ int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, spin_unlock_bh(&dev->mt76.lock); } - mt7663_usb_sdio_write_txwi(dev, wcid, qid, sta, skb); + pktid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb); + mt7663_usb_sdio_write_txwi(dev, wcid, qid, sta, key, pktid, skb); if (mt76_is_usb(mdev)) { u32 len = skb->len; @@ -217,7 +214,12 @@ int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, pad = round_up(skb->len, 4) - skb->len; } - return mt76_skb_adjust_pad(skb, pad); + err = mt76_skb_adjust_pad(skb, pad); + if (err) + /* Release pktid in case of error. */ + idr_remove(&wcid->pktid, pktid); + + return err; } EXPORT_SYMBOL_GPL(mt7663_usb_sdio_tx_prepare_skb); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c index efd70ddc2fd1..2c6c03809b20 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c @@ -72,6 +72,7 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data, bool ampdu = IEEE80211_SKB_CB(tx_info->skb)->flags & IEEE80211_TX_CTL_AMPDU; enum mt76_qsel qsel; u32 flags; + int err; mt76_insert_hdr_pad(tx_info->skb); @@ -106,7 +107,12 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data, ewma_pktlen_add(&msta->pktlen, tx_info->skb->len); } - return mt76x02u_skb_dma_info(tx_info->skb, WLAN_PORT, flags); + err = mt76x02u_skb_dma_info(tx_info->skb, WLAN_PORT, flags); + if (err && wcid) + /* Release pktid in case of error. */ + idr_remove(&wcid->pktid, pid); + + return err; } EXPORT_SYMBOL_GPL(mt76x02u_tx_prepare_skb); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c index 5fcf35f2d9fb..809dc18e5083 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c @@ -1151,8 +1151,14 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, } } - pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb); + t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size); + t->skb = tx_info->skb; + + id = mt76_token_consume(mdev, &t); + if (id < 0) + return id; + pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb); mt7915_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, pid, key, false); @@ -1178,13 +1184,6 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, txp->bss_idx = mvif->idx; } - t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size); - t->skb = tx_info->skb; - - id = mt76_token_consume(mdev, &t); - if (id < 0) - return id; - txp->token = cpu_to_le16(id); if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags)) txp->rept_wds_wcid = cpu_to_le16(wcid->idx); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c index 899957b9d0f1..852d5d97c70b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c @@ -176,7 +176,7 @@ mt7915_get_phy_mode(struct ieee80211_vif *vif, struct ieee80211_sta *sta) if (ht_cap->ht_supported) mode |= PHY_MODE_GN; - if (he_cap->has_he) + if (he_cap && he_cap->has_he) mode |= PHY_MODE_AX_24G; } else if (band == NL80211_BAND_5GHZ) { mode |= PHY_MODE_A; @@ -187,7 +187,7 @@ mt7915_get_phy_mode(struct ieee80211_vif *vif, struct ieee80211_sta *sta) if (vht_cap->vht_supported) mode |= PHY_MODE_AC; - if (he_cap->has_he) + if (he_cap && he_cap->has_he) mode |= PHY_MODE_AX_5G; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c index 137f86a6dbf8..bdec508b6b9f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c @@ -142,15 +142,11 @@ out: static void mt7921s_write_txwi(struct mt7921_dev *dev, struct mt76_wcid *wcid, enum mt76_txq_id qid, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, int pid, struct sk_buff *skb) { - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct ieee80211_key_conf *key = info->control.hw_key; - __le32 *txwi; - int pid; + __le32 *txwi = (__le32 *)(skb->data - MT_SDIO_TXD_SIZE); - pid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb); - txwi = (__le32 *)(skb->data - MT_SDIO_TXD_SIZE); memset(txwi, 0, MT_SDIO_TXD_SIZE); mt7921_mac_write_txwi(dev, txwi, skb, wcid, key, pid, false); skb_push(skb, MT_SDIO_TXD_SIZE); @@ -163,8 +159,9 @@ int mt7921s_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, { struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb); + struct ieee80211_key_conf *key = info->control.hw_key; struct sk_buff *skb = tx_info->skb; - int pad; + int err, pad, pktid; if (unlikely(tx_info->skb->len <= ETH_HLEN)) return -EINVAL; @@ -181,12 +178,18 @@ int mt7921s_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, } } - mt7921s_write_txwi(dev, wcid, qid, sta, skb); + pktid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb); + mt7921s_write_txwi(dev, wcid, qid, sta, key, pktid, skb); mt7921_skb_add_sdio_hdr(skb, MT7921_SDIO_DATA); pad = round_up(skb->len, 4) - skb->len; - return mt76_skb_adjust_pad(skb, pad); + err = mt76_skb_adjust_pad(skb, pad); + if (err) + /* Release pktid in case of error. */ + idr_remove(&wcid->pktid, pktid); + + return err; } void mt7921s_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e) diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c index 11719ef034d8..6b8c9dc80542 100644 --- a/drivers/net/wireless/mediatek/mt76/tx.c +++ b/drivers/net/wireless/mediatek/mt76/tx.c @@ -173,7 +173,7 @@ mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid, if (!(cb->flags & MT_TX_CB_DMA_DONE)) continue; - if (!time_is_after_jiffies(cb->jiffies + + if (time_is_after_jiffies(cb->jiffies + MT_TX_STATUS_SKB_TIMEOUT)) continue; } diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c index e4473a551241..74c3d8cb3100 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c @@ -25,6 +25,9 @@ static bool rt2x00usb_check_usb_error(struct rt2x00_dev *rt2x00dev, int status) if (status == -ENODEV || status == -ENOENT) return true; + if (!test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags)) + return false; + if (status == -EPROTO || status == -ETIMEDOUT) rt2x00dev->num_proto_errs++; else diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c index 212aaf577d3c..65ef3dc9d061 100644 --- a/drivers/net/wireless/realtek/rtw89/fw.c +++ b/drivers/net/wireless/realtek/rtw89/fw.c @@ -91,7 +91,6 @@ static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, const u8 *fw, u32 len, info->section_num = GET_FW_HDR_SEC_NUM(fw); info->hdr_len = RTW89_FW_HDR_SIZE + info->section_num * RTW89_FW_SECTION_HDR_SIZE; - SET_FW_HDR_PART_SIZE(fw, FWDL_SECTION_PER_PKT_LEN); bin = fw + info->hdr_len; @@ -275,6 +274,7 @@ static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 l } skb_put_data(skb, fw, len); + SET_FW_HDR_PART_SIZE(skb->data, FWDL_SECTION_PER_PKT_LEN); rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C, H2C_CAT_MAC, H2C_CL_MAC_FWDL, H2C_FUNC_MAC_FWHDR_DL, len); diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h index 7ee0d9323310..36e8d0da6c1e 100644 --- a/drivers/net/wireless/realtek/rtw89/fw.h +++ b/drivers/net/wireless/realtek/rtw89/fw.h @@ -282,8 +282,10 @@ struct rtw89_h2creg_sch_tx_en { le32_get_bits(*((__le32 *)(fwhdr) + 6), GENMASK(15, 8)) #define GET_FW_HDR_CMD_VERSERION(fwhdr) \ le32_get_bits(*((__le32 *)(fwhdr) + 7), GENMASK(31, 24)) -#define SET_FW_HDR_PART_SIZE(fwhdr, val) \ - le32p_replace_bits((__le32 *)(fwhdr) + 7, val, GENMASK(15, 0)) +static inline void SET_FW_HDR_PART_SIZE(void *fwhdr, u32 val) +{ + le32p_replace_bits((__le32 *)fwhdr + 7, val, GENMASK(15, 0)); +} #define SET_CTRL_INFO_MACID(table, val) \ le32p_replace_bits((__le32 *)(table) + 0, val, GENMASK(6, 0)) diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.c b/drivers/net/wwan/iosm/iosm_ipc_imem.c index cff3b43ca4d7..12c03dacb5dd 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_imem.c +++ b/drivers/net/wwan/iosm/iosm_ipc_imem.c @@ -181,9 +181,9 @@ void ipc_imem_hrtimer_stop(struct hrtimer *hr_timer) bool ipc_imem_ul_write_td(struct iosm_imem *ipc_imem) { struct ipc_mem_channel *channel; + bool hpda_ctrl_pending = false; struct sk_buff_head *ul_list; bool hpda_pending = false; - bool forced_hpdu = false; struct ipc_pipe *pipe; int i; @@ -200,15 +200,19 @@ bool ipc_imem_ul_write_td(struct iosm_imem *ipc_imem) ul_list = &channel->ul_list; /* Fill the transfer descriptor with the uplink buffer info. */ - hpda_pending |= ipc_protocol_ul_td_send(ipc_imem->ipc_protocol, + if (!ipc_imem_check_wwan_ips(channel)) { + hpda_ctrl_pending |= + ipc_protocol_ul_td_send(ipc_imem->ipc_protocol, pipe, ul_list); - - /* forced HP update needed for non data channels */ - if (hpda_pending && !ipc_imem_check_wwan_ips(channel)) - forced_hpdu = true; + } else { + hpda_pending |= + ipc_protocol_ul_td_send(ipc_imem->ipc_protocol, + pipe, ul_list); + } } - if (forced_hpdu) { + /* forced HP update needed for non data channels */ + if (hpda_ctrl_pending) { hpda_pending = false; ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol, IPC_HP_UL_WRITE_TD); @@ -527,6 +531,9 @@ static void ipc_imem_run_state_worker(struct work_struct *instance) return; } + if (test_and_clear_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag)) + ipc_devlink_deinit(ipc_imem->ipc_devlink); + if (!ipc_imem_setup_cp_mux_cap_init(ipc_imem, &mux_cfg)) ipc_imem->mux = ipc_mux_init(&mux_cfg, ipc_imem); @@ -1167,7 +1174,7 @@ void ipc_imem_cleanup(struct iosm_imem *ipc_imem) ipc_port_deinit(ipc_imem->ipc_port); } - if (ipc_imem->ipc_devlink) + if (test_and_clear_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag)) ipc_devlink_deinit(ipc_imem->ipc_devlink); ipc_imem_device_ipc_uninit(ipc_imem); @@ -1263,7 +1270,6 @@ struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id, ipc_imem->pci_device_id = device_id; - ipc_imem->ev_cdev_write_pending = false; ipc_imem->cp_version = 0; ipc_imem->device_sleep = IPC_HOST_SLEEP_ENTER_SLEEP; @@ -1331,6 +1337,8 @@ struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id, if (ipc_flash_link_establish(ipc_imem)) goto devlink_channel_fail; + + set_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag); } return ipc_imem; devlink_channel_fail: diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.h b/drivers/net/wwan/iosm/iosm_ipc_imem.h index 6be6708b4eec..6b8a837faef2 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_imem.h +++ b/drivers/net/wwan/iosm/iosm_ipc_imem.h @@ -101,6 +101,7 @@ struct ipc_chnl_cfg; #define IOSM_CHIP_INFO_SIZE_MAX 100 #define FULLY_FUNCTIONAL 0 +#define IOSM_DEVLINK_INIT 1 /* List of the supported UL/DL pipes. */ enum ipc_mem_pipes { @@ -335,8 +336,6 @@ enum ipc_phase { * process the irq actions. * @flag: Flag to monitor the state of driver * @td_update_timer_suspended: if true then td update timer suspend - * @ev_cdev_write_pending: 0 means inform the IPC tasklet to pass - * the accumulated uplink buffers to CP. * @ev_mux_net_transmit_pending:0 means inform the IPC tasklet to pass * @reset_det_n: Reset detect flag * @pcie_wake_n: Pcie wake flag @@ -374,7 +373,6 @@ struct iosm_imem { u8 ev_irq_pending[IPC_IRQ_VECTORS]; unsigned long flag; u8 td_update_timer_suspended:1, - ev_cdev_write_pending:1, ev_mux_net_transmit_pending:1, reset_det_n:1, pcie_wake_n:1; diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c index 825e8e5ffb2a..831cdae28e8a 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c +++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c @@ -41,7 +41,6 @@ void ipc_imem_sys_wwan_close(struct iosm_imem *ipc_imem, int if_id, static int ipc_imem_tq_cdev_write(struct iosm_imem *ipc_imem, int arg, void *msg, size_t size) { - ipc_imem->ev_cdev_write_pending = false; ipc_imem_ul_send(ipc_imem); return 0; @@ -50,11 +49,6 @@ static int ipc_imem_tq_cdev_write(struct iosm_imem *ipc_imem, int arg, /* Through tasklet to do sio write. */ static int ipc_imem_call_cdev_write(struct iosm_imem *ipc_imem) { - if (ipc_imem->ev_cdev_write_pending) - return -1; - - ipc_imem->ev_cdev_write_pending = true; - return ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_cdev_write, 0, NULL, 0, false); } @@ -450,6 +444,7 @@ void ipc_imem_sys_devlink_close(struct iosm_devlink *ipc_devlink) /* Release the pipe resources */ ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe); ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe); + ipc_imem->nr_of_channels--; } void ipc_imem_sys_devlink_notify_rx(struct iosm_devlink *ipc_devlink, diff --git a/drivers/nfc/virtual_ncidev.c b/drivers/nfc/virtual_ncidev.c index 221fa3bb8705..f577449e4935 100644 --- a/drivers/nfc/virtual_ncidev.c +++ b/drivers/nfc/virtual_ncidev.c @@ -202,7 +202,7 @@ static int __init virtual_ncidev_init(void) miscdev.minor = MISC_DYNAMIC_MINOR; miscdev.name = "virtual_nci"; miscdev.fops = &virtual_ncidev_fops; - miscdev.mode = S_IALLUGO; + miscdev.mode = 0600; return misc_register(&miscdev); } diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 4b5de8f5435a..1af8a4513708 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -666,6 +666,7 @@ blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl, struct request *rq) { if (ctrl->state != NVME_CTRL_DELETING_NOIO && + ctrl->state != NVME_CTRL_DELETING && ctrl->state != NVME_CTRL_DEAD && !test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) && !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH)) @@ -895,10 +896,19 @@ static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns, cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req))); cmnd->write_zeroes.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); - if (nvme_ns_has_pi(ns)) + + if (nvme_ns_has_pi(ns)) { cmnd->write_zeroes.control = cpu_to_le16(NVME_RW_PRINFO_PRACT); - else - cmnd->write_zeroes.control = 0; + + switch (ns->pi_type) { + case NVME_NS_DPS_PI_TYPE1: + case NVME_NS_DPS_PI_TYPE2: + cmnd->write_zeroes.reftag = + cpu_to_le32(t10_pi_ref_tag(req)); + break; + } + } + return BLK_STS_OK; } @@ -1740,9 +1750,20 @@ static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id) */ if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT))) return -EINVAL; - if (ctrl->max_integrity_segments) - ns->features |= - (NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS); + + ns->features |= NVME_NS_EXT_LBAS; + + /* + * The current fabrics transport drivers support namespace + * metadata formats only if nvme_ns_has_pi() returns true. + * Suppress support for all other formats so the namespace will + * have a 0 capacity and not be usable through the block stack. + * + * Note, this check will need to be modified if any drivers + * gain the ability to use other metadata formats. + */ + if (ctrl->max_integrity_segments && nvme_ns_has_pi(ns)) + ns->features |= NVME_NS_METADATA_SUPPORTED; } else { /* * For PCIe controllers, we can't easily remap the separate @@ -2469,6 +2490,20 @@ static const struct nvme_core_quirk_entry core_quirks[] = { .vid = 0x14a4, .fr = "22301111", .quirks = NVME_QUIRK_SIMPLE_SUSPEND, + }, + { + /* + * This Kioxia CD6-V Series / HPE PE8030 device times out and + * aborts I/O during any load, but more easily reproducible + * with discards (fstrim). + * + * The device is left in a state where it is also not possible + * to use "nvme set-feature" to disable APST, but booting with + * nvme_core.default_ps_max_latency=0 works. + */ + .vid = 0x1e0f, + .mn = "KCD6XVUL6T40", + .quirks = NVME_QUIRK_NO_APST, } }; @@ -2673,8 +2708,9 @@ static bool nvme_validate_cntlid(struct nvme_subsystem *subsys, if (tmp->cntlid == ctrl->cntlid) { dev_err(ctrl->device, - "Duplicate cntlid %u with %s, rejecting\n", - ctrl->cntlid, dev_name(tmp->device)); + "Duplicate cntlid %u with %s, subsys %s, rejecting\n", + ctrl->cntlid, dev_name(tmp->device), + subsys->subnqn); return false; } diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index c5a2b71c5268..282d54117e0a 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -698,6 +698,9 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, if (token >= 0) pr_warn("I/O fail on reconnect controller after %d sec\n", token); + else + token = -1; + opts->fast_io_fail_tmo = token; break; case NVMF_OPT_HOSTNQN: diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 7f2071f2460c..13e5d503ed07 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -866,7 +866,7 @@ int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) } if (ana_log_size > ctrl->ana_log_size) { nvme_mpath_stop(ctrl); - kfree(ctrl->ana_log_buf); + nvme_mpath_uninit(ctrl); ctrl->ana_log_buf = kmalloc(ana_log_size, GFP_KERNEL); if (!ctrl->ana_log_buf) return -ENOMEM; @@ -886,4 +886,5 @@ void nvme_mpath_uninit(struct nvme_ctrl *ctrl) { kfree(ctrl->ana_log_buf); ctrl->ana_log_buf = NULL; + ctrl->ana_log_size = 0; } diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index b334af8aa264..9b095ee01364 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -709,7 +709,7 @@ static inline bool nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq, return true; if (ctrl->ops->flags & NVME_F_FABRICS && ctrl->state == NVME_CTRL_DELETING) - return true; + return queue_live; return __nvme_check_ready(ctrl, rq, queue_live); } int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 33bc83d8d992..4ceb28675fdf 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -572,7 +572,7 @@ static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue, return ret; } -static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req, +static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req, struct nvme_tcp_r2t_pdu *pdu) { struct nvme_tcp_data_pdu *data = req->pdu; @@ -581,32 +581,11 @@ static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req, u8 hdgst = nvme_tcp_hdgst_len(queue); u8 ddgst = nvme_tcp_ddgst_len(queue); + req->state = NVME_TCP_SEND_H2C_PDU; + req->offset = 0; req->pdu_len = le32_to_cpu(pdu->r2t_length); req->pdu_sent = 0; - if (unlikely(!req->pdu_len)) { - dev_err(queue->ctrl->ctrl.device, - "req %d r2t len is %u, probably a bug...\n", - rq->tag, req->pdu_len); - return -EPROTO; - } - - if (unlikely(req->data_sent + req->pdu_len > req->data_len)) { - dev_err(queue->ctrl->ctrl.device, - "req %d r2t len %u exceeded data len %u (%zu sent)\n", - rq->tag, req->pdu_len, req->data_len, - req->data_sent); - return -EPROTO; - } - - if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) { - dev_err(queue->ctrl->ctrl.device, - "req %d unexpected r2t offset %u (expected %zu)\n", - rq->tag, le32_to_cpu(pdu->r2t_offset), - req->data_sent); - return -EPROTO; - } - memset(data, 0, sizeof(*data)); data->hdr.type = nvme_tcp_h2c_data; data->hdr.flags = NVME_TCP_F_DATA_LAST; @@ -622,7 +601,6 @@ static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req, data->command_id = nvme_cid(rq); data->data_offset = pdu->r2t_offset; data->data_length = cpu_to_le32(req->pdu_len); - return 0; } static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue, @@ -630,7 +608,7 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue, { struct nvme_tcp_request *req; struct request *rq; - int ret; + u32 r2t_length = le32_to_cpu(pdu->r2t_length); rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id); if (!rq) { @@ -641,13 +619,28 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue, } req = blk_mq_rq_to_pdu(rq); - ret = nvme_tcp_setup_h2c_data_pdu(req, pdu); - if (unlikely(ret)) - return ret; + if (unlikely(!r2t_length)) { + dev_err(queue->ctrl->ctrl.device, + "req %d r2t len is %u, probably a bug...\n", + rq->tag, r2t_length); + return -EPROTO; + } - req->state = NVME_TCP_SEND_H2C_PDU; - req->offset = 0; + if (unlikely(req->data_sent + r2t_length > req->data_len)) { + dev_err(queue->ctrl->ctrl.device, + "req %d r2t len %u exceeded data len %u (%zu sent)\n", + rq->tag, r2t_length, req->data_len, req->data_sent); + return -EPROTO; + } + if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) { + dev_err(queue->ctrl->ctrl.device, + "req %d unexpected r2t offset %u (expected %zu)\n", + rq->tag, le32_to_cpu(pdu->r2t_offset), req->data_sent); + return -EPROTO; + } + + nvme_tcp_setup_h2c_data_pdu(req, pdu); nvme_tcp_queue_request(req, false, true); return 0; @@ -1232,6 +1225,7 @@ static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl) static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid) { + struct page *page; struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); struct nvme_tcp_queue *queue = &ctrl->queues[qid]; @@ -1241,6 +1235,11 @@ static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid) if (queue->hdr_digest || queue->data_digest) nvme_tcp_free_crypto(queue); + if (queue->pf_cache.va) { + page = virt_to_head_page(queue->pf_cache.va); + __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias); + queue->pf_cache.va = NULL; + } sock_release(queue->sock); kfree(queue->pdu); mutex_destroy(&queue->send_mutex); diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c index bfc259e0d7b8..9f81beb4df4e 100644 --- a/drivers/nvme/host/zns.c +++ b/drivers/nvme/host/zns.c @@ -166,7 +166,10 @@ static int nvme_zone_parse_entry(struct nvme_ns *ns, zone.len = ns->zsze; zone.capacity = nvme_lba_to_sect(ns, le64_to_cpu(entry->zcap)); zone.start = nvme_lba_to_sect(ns, le64_to_cpu(entry->zslba)); - zone.wp = nvme_lba_to_sect(ns, le64_to_cpu(entry->wp)); + if (zone.cond == BLK_ZONE_COND_FULL) + zone.wp = zone.start + zone.len; + else + zone.wp = nvme_lba_to_sect(ns, le64_to_cpu(entry->wp)); return cb(&zone, idx, data); } diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c index 6aa30f30b572..6be6e59d273b 100644 --- a/drivers/nvme/target/io-cmd-file.c +++ b/drivers/nvme/target/io-cmd-file.c @@ -8,6 +8,7 @@ #include <linux/uio.h> #include <linux/falloc.h> #include <linux/file.h> +#include <linux/fs.h> #include "nvmet.h" #define NVMET_MAX_MPOOL_BVEC 16 @@ -266,7 +267,8 @@ static void nvmet_file_execute_rw(struct nvmet_req *req) if (req->ns->buffered_io) { if (likely(!req->f.mpool_alloc) && - nvmet_file_execute_io(req, IOCB_NOWAIT)) + (req->ns->file->f_mode & FMODE_NOWAIT) && + nvmet_file_execute_io(req, IOCB_NOWAIT)) return; nvmet_file_submit_buffered_io(req); } else diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 84c387e4bf43..7c1c43ce466b 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -166,6 +166,8 @@ static struct workqueue_struct *nvmet_tcp_wq; static const struct nvmet_fabrics_ops nvmet_tcp_ops; static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c); static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd); +static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd); +static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd); static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue, struct nvmet_tcp_cmd *cmd) @@ -297,6 +299,16 @@ static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu) return 0; } +static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd) +{ + WARN_ON(unlikely(cmd->nr_mapped > 0)); + + kfree(cmd->iov); + sgl_free(cmd->req.sg); + cmd->iov = NULL; + cmd->req.sg = NULL; +} + static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd) { struct scatterlist *sg; @@ -306,6 +318,8 @@ static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd) for (i = 0; i < cmd->nr_mapped; i++) kunmap(sg_page(&sg[i])); + + cmd->nr_mapped = 0; } static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd) @@ -387,7 +401,7 @@ static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd) return 0; err: - sgl_free(cmd->req.sg); + nvmet_tcp_free_cmd_buffers(cmd); return NVME_SC_INTERNAL; } @@ -632,10 +646,8 @@ static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch) } } - if (queue->nvme_sq.sqhd_disabled) { - kfree(cmd->iov); - sgl_free(cmd->req.sg); - } + if (queue->nvme_sq.sqhd_disabled) + nvmet_tcp_free_cmd_buffers(cmd); return 1; @@ -664,8 +676,7 @@ static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd, if (left) return -EAGAIN; - kfree(cmd->iov); - sgl_free(cmd->req.sg); + nvmet_tcp_free_cmd_buffers(cmd); cmd->queue->snd_cmd = NULL; nvmet_tcp_put_cmd(cmd); return 1; @@ -700,10 +711,11 @@ static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch) static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch) { struct nvmet_tcp_queue *queue = cmd->queue; + int left = NVME_TCP_DIGEST_LENGTH - cmd->offset; struct msghdr msg = { .msg_flags = MSG_DONTWAIT }; struct kvec iov = { .iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset, - .iov_len = NVME_TCP_DIGEST_LENGTH - cmd->offset + .iov_len = left }; int ret; @@ -717,6 +729,10 @@ static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch) return ret; cmd->offset += ret; + left -= ret; + + if (left) + return -EAGAIN; if (queue->nvme_sq.sqhd_disabled) { cmd->queue->snd_cmd = NULL; @@ -906,7 +922,14 @@ static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue, size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length); int ret; - if (!nvme_is_write(cmd->req.cmd) || + /* + * This command has not been processed yet, hence we are trying to + * figure out if there is still pending data left to receive. If + * we don't, we can simply prepare for the next pdu and bail out, + * otherwise we will need to prepare a buffer and receive the + * stale data before continuing forward. + */ + if (!nvme_is_write(cmd->req.cmd) || !data_len || data_len > cmd->req.port->inline_data_size) { nvmet_prepare_receive_pdu(queue); return; @@ -1406,8 +1429,7 @@ static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd) { nvmet_req_uninit(&cmd->req); nvmet_tcp_unmap_pdu_iovec(cmd); - kfree(cmd->iov); - sgl_free(cmd->req.sg); + nvmet_tcp_free_cmd_buffers(cmd); } static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue) @@ -1417,7 +1439,10 @@ static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue) for (i = 0; i < queue->nr_cmds; i++, cmd++) { if (nvmet_tcp_need_data_in(cmd)) - nvmet_tcp_finish_cmd(cmd); + nvmet_req_uninit(&cmd->req); + + nvmet_tcp_unmap_pdu_iovec(cmd); + nvmet_tcp_free_cmd_buffers(cmd); } if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) { @@ -1437,7 +1462,9 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w) mutex_unlock(&nvmet_tcp_queue_mutex); nvmet_tcp_restore_socket_callbacks(queue); - flush_work(&queue->io_work); + cancel_work_sync(&queue->io_work); + /* stop accepting incoming data */ + queue->rcv_state = NVMET_TCP_RECV_ERR; nvmet_tcp_uninit_data_in_cmds(queue); nvmet_sq_destroy(&queue->nvme_sq); diff --git a/drivers/of/irq.c b/drivers/of/irq.c index b10f015b2e37..2b07677a386b 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c @@ -76,6 +76,26 @@ struct device_node *of_irq_find_parent(struct device_node *child) } EXPORT_SYMBOL_GPL(of_irq_find_parent); +/* + * These interrupt controllers abuse interrupt-map for unspeakable + * reasons and rely on the core code to *ignore* it (the drivers do + * their own parsing of the property). + * + * If you think of adding to the list for something *new*, think + * again. There is a high chance that you will be sent back to the + * drawing board. + */ +static const char * const of_irq_imap_abusers[] = { + "CBEA,platform-spider-pic", + "sti,platform-spider-pic", + "realtek,rtl-intc", + "fsl,ls1021a-extirq", + "fsl,ls1043a-extirq", + "fsl,ls1088a-extirq", + "renesas,rza1-irqc", + NULL, +}; + /** * of_irq_parse_raw - Low level interrupt tree parsing * @addr: address specifier (start of "reg" property of the device) in be32 format @@ -159,12 +179,15 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq) /* * Now check if cursor is an interrupt-controller and * if it is then we are done, unless there is an - * interrupt-map which takes precedence. + * interrupt-map which takes precedence except on one + * of these broken platforms that want to parse + * interrupt-map themselves for $reason. */ bool intc = of_property_read_bool(ipar, "interrupt-controller"); imap = of_get_property(ipar, "interrupt-map", &imaplen); - if (imap == NULL && intc) { + if (intc && + (!imap || of_device_compatible_match(ipar, of_irq_imap_abusers))) { pr_debug(" -> got it !\n"); return 0; } diff --git a/drivers/of/platform.c b/drivers/of/platform.c index b3faf89744aa..793350028906 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c @@ -540,6 +540,10 @@ static int __init of_platform_default_populate_init(void) of_node_put(node); } + node = of_get_compatible_child(of_chosen, "simple-framebuffer"); + of_platform_device_create(node, NULL, NULL); + of_node_put(node); + /* Populate everything else. */ of_platform_default_populate(NULL, NULL, NULL); diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c index c24dab383654..722dacdd5a17 100644 --- a/drivers/pci/controller/dwc/pci-exynos.c +++ b/drivers/pci/controller/dwc/pci-exynos.c @@ -19,6 +19,7 @@ #include <linux/platform_device.h> #include <linux/phy/phy.h> #include <linux/regulator/consumer.h> +#include <linux/module.h> #include "pcie-designware.h" diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c index 7b17da2f9b3f..cfe66bf04c1d 100644 --- a/drivers/pci/controller/dwc/pcie-qcom-ep.c +++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c @@ -18,6 +18,7 @@ #include <linux/pm_domain.h> #include <linux/regmap.h> #include <linux/reset.h> +#include <linux/module.h> #include "pcie-designware.h" diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c index c5300d49807a..c3b725afa11f 100644 --- a/drivers/pci/controller/pci-aardvark.c +++ b/drivers/pci/controller/pci-aardvark.c @@ -32,7 +32,6 @@ #define PCIE_CORE_DEV_ID_REG 0x0 #define PCIE_CORE_CMD_STATUS_REG 0x4 #define PCIE_CORE_DEV_REV_REG 0x8 -#define PCIE_CORE_EXP_ROM_BAR_REG 0x30 #define PCIE_CORE_PCIEXP_CAP 0xc0 #define PCIE_CORE_ERR_CAPCTL_REG 0x118 #define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX BIT(5) @@ -774,10 +773,6 @@ advk_pci_bridge_emul_base_conf_read(struct pci_bridge_emul *bridge, *value = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG); return PCI_BRIDGE_EMUL_HANDLED; - case PCI_ROM_ADDRESS1: - *value = advk_readl(pcie, PCIE_CORE_EXP_ROM_BAR_REG); - return PCI_BRIDGE_EMUL_HANDLED; - case PCI_INTERRUPT_LINE: { /* * From the whole 32bit register we support reading from HW only @@ -810,10 +805,6 @@ advk_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge, advk_writel(pcie, new, PCIE_CORE_CMD_STATUS_REG); break; - case PCI_ROM_ADDRESS1: - advk_writel(pcie, new, PCIE_CORE_EXP_ROM_BAR_REG); - break; - case PCI_INTERRUPT_LINE: if (mask & (PCI_BRIDGE_CTL_BUS_RESET << 16)) { u32 val = advk_readl(pcie, PCIE_CORE_CTRL1_REG); diff --git a/drivers/pci/controller/pcie-apple.c b/drivers/pci/controller/pcie-apple.c index 1bf4d75b61be..b090924b41fe 100644 --- a/drivers/pci/controller/pcie-apple.c +++ b/drivers/pci/controller/pcie-apple.c @@ -516,7 +516,7 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie, int ret, i; reset = gpiod_get_from_of_node(np, "reset-gpios", 0, - GPIOD_OUT_LOW, "#PERST"); + GPIOD_OUT_LOW, "PERST#"); if (IS_ERR(reset)) return PTR_ERR(reset); @@ -539,12 +539,22 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie, rmw_set(PORT_APPCLK_EN, port->base + PORT_APPCLK); + /* Assert PERST# before setting up the clock */ + gpiod_set_value(reset, 1); + ret = apple_pcie_setup_refclk(pcie, port); if (ret < 0) return ret; + /* The minimal Tperst-clk value is 100us (PCIe CEM r5.0, 2.9.2) */ + usleep_range(100, 200); + + /* Deassert PERST# */ rmw_set(PORT_PERST_OFF, port->base + PORT_PERST); - gpiod_set_value(reset, 1); + gpiod_set_value(reset, 0); + + /* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */ + msleep(100); ret = readl_relaxed_poll_timeout(port->base + PORT_STATUS, stat, stat & PORT_STATUS_READY, 100, 250000); diff --git a/drivers/phy/hisilicon/phy-hi3670-pcie.c b/drivers/phy/hisilicon/phy-hi3670-pcie.c index c64c6679b1b9..0ac9634b398d 100644 --- a/drivers/phy/hisilicon/phy-hi3670-pcie.c +++ b/drivers/phy/hisilicon/phy-hi3670-pcie.c @@ -757,8 +757,8 @@ static int hi3670_pcie_phy_get_resources(struct hi3670_pcie_phy *phy, return PTR_ERR(phy->sysctrl); phy->pmctrl = syscon_regmap_lookup_by_compatible("hisilicon,hi3670-pmctrl"); - if (IS_ERR(phy->sysctrl)) - return PTR_ERR(phy->sysctrl); + if (IS_ERR(phy->pmctrl)) + return PTR_ERR(phy->pmctrl); /* clocks */ phy->phy_ref_clk = devm_clk_get(dev, "phy_ref"); diff --git a/drivers/phy/marvell/phy-mvebu-cp110-utmi.c b/drivers/phy/marvell/phy-mvebu-cp110-utmi.c index 08d178a4dc13..aa27c7994610 100644 --- a/drivers/phy/marvell/phy-mvebu-cp110-utmi.c +++ b/drivers/phy/marvell/phy-mvebu-cp110-utmi.c @@ -82,9 +82,9 @@ * struct mvebu_cp110_utmi - PHY driver data * * @regs: PHY registers - * @syscom: Regmap with system controller registers + * @syscon: Regmap with system controller registers * @dev: device driver handle - * @caps: PHY capabilities + * @ops: phy ops */ struct mvebu_cp110_utmi { void __iomem *regs; diff --git a/drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c b/drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c index bfff0c8c9130..fec1da470d26 100644 --- a/drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c +++ b/drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c @@ -127,12 +127,13 @@ struct phy_drvdata { }; /** - * Write register and read back masked value to confirm it is written + * usb_phy_write_readback() - Write register and read back masked value to + * confirm it is written * - * @base - QCOM DWC3 PHY base virtual address. - * @offset - register offset. - * @mask - register bitmask specifying what should be updated - * @val - value to write. + * @phy_dwc3: QCOM DWC3 phy context + * @offset: register offset. + * @mask: register bitmask specifying what should be updated + * @val: value to write. */ static inline void usb_phy_write_readback(struct usb_phy *phy_dwc3, u32 offset, @@ -171,11 +172,11 @@ static int wait_for_latch(void __iomem *addr) } /** - * Write SSPHY register + * usb_ss_write_phycreg() - Write SSPHY register * - * @base - QCOM DWC3 PHY base virtual address. - * @addr - SSPHY address to write. - * @val - value to write. + * @phy_dwc3: QCOM DWC3 phy context + * @addr: SSPHY address to write. + * @val: value to write. */ static int usb_ss_write_phycreg(struct usb_phy *phy_dwc3, u32 addr, u32 val) @@ -209,10 +210,11 @@ err_wait: } /** - * Read SSPHY register. + * usb_ss_read_phycreg() - Read SSPHY register. * - * @base - QCOM DWC3 PHY base virtual address. - * @addr - SSPHY address to read. + * @phy_dwc3: QCOM DWC3 phy context + * @addr: SSPHY address to read. + * @val: pointer in which read is store. */ static int usb_ss_read_phycreg(struct usb_phy *phy_dwc3, u32 addr, u32 *val) diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c index 456a59d8c7d0..c96639d5f581 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp.c @@ -2973,6 +2973,9 @@ struct qmp_phy_combo_cfg { * @qmp: QMP phy to which this lane belongs * @lane_rst: lane's reset controller * @mode: current PHY mode + * @dp_aux_cfg: Display port aux config + * @dp_opts: Display port optional config + * @dp_clks: Display port clocks */ struct qmp_phy { struct phy *phy; diff --git a/drivers/phy/qualcomm/phy-qcom-usb-hsic.c b/drivers/phy/qualcomm/phy-qcom-usb-hsic.c index 04d18d52f700..716a77748ed8 100644 --- a/drivers/phy/qualcomm/phy-qcom-usb-hsic.c +++ b/drivers/phy/qualcomm/phy-qcom-usb-hsic.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * Copyright (C) 2016 Linaro Ltd */ #include <linux/module.h> diff --git a/drivers/phy/st/phy-stm32-usbphyc.c b/drivers/phy/st/phy-stm32-usbphyc.c index 7df6a63ad37b..e4f4a9be5132 100644 --- a/drivers/phy/st/phy-stm32-usbphyc.c +++ b/drivers/phy/st/phy-stm32-usbphyc.c @@ -478,7 +478,7 @@ static void stm32_usbphyc_phy_tuning(struct stm32_usbphyc *usbphyc, if (!of_property_read_bool(np, "st,no-lsfs-fb-cap")) usbphyc_phy->tune |= LFSCAPEN; - if (of_property_read_bool(np, "st,slow-hs-slew-rate")) + if (of_property_read_bool(np, "st,decrease-hs-slew-rate")) usbphyc_phy->tune |= HSDRVSLEW; ret = of_property_read_u32(np, "st,tune-hs-dc-level", &val); diff --git a/drivers/phy/ti/phy-am654-serdes.c b/drivers/phy/ti/phy-am654-serdes.c index 2ff56ce77b30..c1211c4f863c 100644 --- a/drivers/phy/ti/phy-am654-serdes.c +++ b/drivers/phy/ti/phy-am654-serdes.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/** +/* * PCIe SERDES driver for AM654x SoC * * Copyright (C) 2018 - 2019 Texas Instruments Incorporated - http://www.ti.com/ diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c index 126f5b8735cc..b3384c31637a 100644 --- a/drivers/phy/ti/phy-j721e-wiz.c +++ b/drivers/phy/ti/phy-j721e-wiz.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/** +/* * Wrapper driver for SERDES used in J721E * * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/ diff --git a/drivers/phy/ti/phy-omap-usb2.c b/drivers/phy/ti/phy-omap-usb2.c index ebceb1520ce8..3a505fe5715a 100644 --- a/drivers/phy/ti/phy-omap-usb2.c +++ b/drivers/phy/ti/phy-omap-usb2.c @@ -89,9 +89,9 @@ static inline void omap_usb_writel(void __iomem *addr, unsigned int offset, } /** - * omap_usb2_set_comparator - links the comparator present in the system with - * this phy - * @comparator - the companion phy(comparator) for this phy + * omap_usb2_set_comparator() - links the comparator present in the system with this phy + * + * @comparator: the companion phy(comparator) for this phy * * The phy companion driver should call this API passing the phy_companion * filled with set_vbus and start_srp to be used by usb phy. diff --git a/drivers/phy/ti/phy-tusb1210.c b/drivers/phy/ti/phy-tusb1210.c index a63213f5972a..15c1c79e5c29 100644 --- a/drivers/phy/ti/phy-tusb1210.c +++ b/drivers/phy/ti/phy-tusb1210.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * tusb1210.c - TUSB1210 USB ULPI PHY driver * * Copyright (C) 2015 Intel Corporation diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c index bae9d429b813..ecab9064a845 100644 --- a/drivers/pinctrl/pinctrl-amd.c +++ b/drivers/pinctrl/pinctrl-amd.c @@ -598,14 +598,14 @@ static struct irq_chip amd_gpio_irqchip = { #define PIN_IRQ_PENDING (BIT(INTERRUPT_STS_OFF) | BIT(WAKE_STS_OFF)) -static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id) +static bool do_amd_gpio_irq_handler(int irq, void *dev_id) { struct amd_gpio *gpio_dev = dev_id; struct gpio_chip *gc = &gpio_dev->gc; - irqreturn_t ret = IRQ_NONE; unsigned int i, irqnr; unsigned long flags; u32 __iomem *regs; + bool ret = false; u32 regval; u64 status, mask; @@ -627,6 +627,14 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id) /* Each status bit covers four pins */ for (i = 0; i < 4; i++) { regval = readl(regs + i); + /* caused wake on resume context for shared IRQ */ + if (irq < 0 && (regval & BIT(WAKE_STS_OFF))) { + dev_dbg(&gpio_dev->pdev->dev, + "Waking due to GPIO %d: 0x%x", + irqnr + i, regval); + return true; + } + if (!(regval & PIN_IRQ_PENDING) || !(regval & BIT(INTERRUPT_MASK_OFF))) continue; @@ -650,9 +658,12 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id) } writel(regval, regs + i); raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); - ret = IRQ_HANDLED; + ret = true; } } + /* did not cause wake on resume context for shared IRQ */ + if (irq < 0) + return false; /* Signal EOI to the GPIO unit */ raw_spin_lock_irqsave(&gpio_dev->lock, flags); @@ -664,6 +675,16 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id) return ret; } +static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id) +{ + return IRQ_RETVAL(do_amd_gpio_irq_handler(irq, dev_id)); +} + +static bool __maybe_unused amd_gpio_check_wake(void *dev_id) +{ + return do_amd_gpio_irq_handler(-1, dev_id); +} + static int amd_get_groups_count(struct pinctrl_dev *pctldev) { struct amd_gpio *gpio_dev = pinctrl_dev_get_drvdata(pctldev); @@ -1033,6 +1054,7 @@ static int amd_gpio_probe(struct platform_device *pdev) goto out2; platform_set_drvdata(pdev, gpio_dev); + acpi_register_wakeup_handler(gpio_dev->irq, amd_gpio_check_wake, gpio_dev); dev_dbg(&pdev->dev, "amd gpio driver loaded\n"); return ret; @@ -1050,6 +1072,7 @@ static int amd_gpio_remove(struct platform_device *pdev) gpio_dev = platform_get_drvdata(pdev); gpiochip_remove(&gpio_dev->gc); + acpi_unregister_wakeup_handler(amd_gpio_check_wake, gpio_dev); return 0; } diff --git a/drivers/pinctrl/pinctrl-apple-gpio.c b/drivers/pinctrl/pinctrl-apple-gpio.c index 0cc346bfc4c3..a7861079a650 100644 --- a/drivers/pinctrl/pinctrl-apple-gpio.c +++ b/drivers/pinctrl/pinctrl-apple-gpio.c @@ -258,7 +258,7 @@ static void apple_gpio_irq_ack(struct irq_data *data) pctl->base + REG_IRQ(irqgrp, data->hwirq)); } -static int apple_gpio_irq_type(unsigned int type) +static unsigned int apple_gpio_irq_type(unsigned int type) { switch (type & IRQ_TYPE_SENSE_MASK) { case IRQ_TYPE_EDGE_RISING: @@ -272,7 +272,7 @@ static int apple_gpio_irq_type(unsigned int type) case IRQ_TYPE_LEVEL_LOW: return REG_GPIOx_IN_IRQ_LO; default: - return -EINVAL; + return REG_GPIOx_IN_IRQ_OFF; } } @@ -288,7 +288,7 @@ static void apple_gpio_irq_unmask(struct irq_data *data) { struct apple_gpio_pinctrl *pctl = gpiochip_get_data(irq_data_get_irq_chip_data(data)); - int irqtype = apple_gpio_irq_type(irqd_get_trigger_type(data)); + unsigned int irqtype = apple_gpio_irq_type(irqd_get_trigger_type(data)); apple_gpio_set_reg(pctl, data->hwirq, REG_GPIOx_MODE, FIELD_PREP(REG_GPIOx_MODE, irqtype)); @@ -313,10 +313,10 @@ static int apple_gpio_irq_set_type(struct irq_data *data, { struct apple_gpio_pinctrl *pctl = gpiochip_get_data(irq_data_get_irq_chip_data(data)); - int irqtype = apple_gpio_irq_type(type); + unsigned int irqtype = apple_gpio_irq_type(type); - if (irqtype < 0) - return irqtype; + if (irqtype == REG_GPIOx_IN_IRQ_OFF) + return -EINVAL; apple_gpio_set_reg(pctl, data->hwirq, REG_GPIOx_MODE, FIELD_PREP(REG_GPIOx_MODE, irqtype)); diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig index b9191f1abb1c..3e0c00766f59 100644 --- a/drivers/pinctrl/qcom/Kconfig +++ b/drivers/pinctrl/qcom/Kconfig @@ -197,6 +197,7 @@ config PINCTRL_QCOM_SPMI_PMIC select PINMUX select PINCONF select GENERIC_PINCONF + select GPIOLIB select GPIOLIB_IRQCHIP select IRQ_DOMAIN_HIERARCHY help @@ -211,6 +212,7 @@ config PINCTRL_QCOM_SSBI_PMIC select PINMUX select PINCONF select GENERIC_PINCONF + select GPIOLIB select GPIOLIB_IRQCHIP select IRQ_DOMAIN_HIERARCHY help diff --git a/drivers/pinctrl/qcom/pinctrl-sdm845.c b/drivers/pinctrl/qcom/pinctrl-sdm845.c index c51793f6546f..fdfd7b8f3a76 100644 --- a/drivers/pinctrl/qcom/pinctrl-sdm845.c +++ b/drivers/pinctrl/qcom/pinctrl-sdm845.c @@ -1310,6 +1310,7 @@ static const struct msm_pinctrl_soc_data sdm845_pinctrl = { .ngpios = 151, .wakeirq_map = sdm845_pdc_map, .nwakeirq_map = ARRAY_SIZE(sdm845_pdc_map), + .wakeirq_dual_edge_errata = true, }; static const struct msm_pinctrl_soc_data sdm845_acpi_pinctrl = { diff --git a/drivers/pinctrl/qcom/pinctrl-sm8350.c b/drivers/pinctrl/qcom/pinctrl-sm8350.c index 4d8f8636c2b3..1c042d39380c 100644 --- a/drivers/pinctrl/qcom/pinctrl-sm8350.c +++ b/drivers/pinctrl/qcom/pinctrl-sm8350.c @@ -1597,10 +1597,10 @@ static const struct msm_pingroup sm8350_groups[] = { [200] = PINGROUP(200, qdss_gpio, _, _, _, _, _, _, _, _), [201] = PINGROUP(201, _, _, _, _, _, _, _, _, _), [202] = PINGROUP(202, _, _, _, _, _, _, _, _, _), - [203] = UFS_RESET(ufs_reset, 0x1d8000), - [204] = SDC_PINGROUP(sdc2_clk, 0x1cf000, 14, 6), - [205] = SDC_PINGROUP(sdc2_cmd, 0x1cf000, 11, 3), - [206] = SDC_PINGROUP(sdc2_data, 0x1cf000, 9, 0), + [203] = UFS_RESET(ufs_reset, 0xd8000), + [204] = SDC_PINGROUP(sdc2_clk, 0xcf000, 14, 6), + [205] = SDC_PINGROUP(sdc2_cmd, 0xcf000, 11, 3), + [206] = SDC_PINGROUP(sdc2_data, 0xcf000, 9, 0), }; static const struct msm_gpio_wakeirq_map sm8350_pdc_map[] = { diff --git a/drivers/pinctrl/ralink/pinctrl-mt7620.c b/drivers/pinctrl/ralink/pinctrl-mt7620.c index 425d55a2ee19..6853b5b8b0fe 100644 --- a/drivers/pinctrl/ralink/pinctrl-mt7620.c +++ b/drivers/pinctrl/ralink/pinctrl-mt7620.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only +#include <asm/mach-ralink/ralink_regs.h> #include <asm/mach-ralink/mt7620.h> #include <linux/module.h> #include <linux/platform_device.h> diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c index 8d734bfc33d2..50bd26a30ac0 100644 --- a/drivers/pinctrl/tegra/pinctrl-tegra.c +++ b/drivers/pinctrl/tegra/pinctrl-tegra.c @@ -275,7 +275,7 @@ static int tegra_pinctrl_set_mux(struct pinctrl_dev *pctldev, return 0; } -static struct tegra_pingroup *tegra_pinctrl_get_group(struct pinctrl_dev *pctldev, +static const struct tegra_pingroup *tegra_pinctrl_get_group(struct pinctrl_dev *pctldev, unsigned int offset) { struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev); @@ -289,7 +289,7 @@ static struct tegra_pingroup *tegra_pinctrl_get_group(struct pinctrl_dev *pctlde continue; for (j = 0; j < num_pins; j++) { if (offset == pins[j]) - return (struct tegra_pingroup *)&pmx->soc->groups[group]; + return &pmx->soc->groups[group]; } } diff --git a/drivers/pinctrl/tegra/pinctrl-tegra194.c b/drivers/pinctrl/tegra/pinctrl-tegra194.c index b4fef9185d88..5c1dfcb46749 100644 --- a/drivers/pinctrl/tegra/pinctrl-tegra194.c +++ b/drivers/pinctrl/tegra/pinctrl-tegra194.c @@ -1387,7 +1387,6 @@ static struct tegra_function tegra194_functions[] = { .schmitt_bit = schmitt_b, \ .drvtype_bit = 13, \ .lpdr_bit = e_lpdr, \ - .drv_reg = -1, \ #define drive_touch_clk_pcc4 DRV_PINGROUP_ENTRY_Y(0x2004, 12, 5, 20, 5, -1, -1, -1, -1, 1) #define drive_uart3_rx_pcc6 DRV_PINGROUP_ENTRY_Y(0x200c, 12, 5, 20, 5, -1, -1, -1, -1, 1) diff --git a/drivers/platform/chrome/cros_ec_ishtp.c b/drivers/platform/chrome/cros_ec_ishtp.c index 9d1e7e03628e..4020b8354bae 100644 --- a/drivers/platform/chrome/cros_ec_ishtp.c +++ b/drivers/platform/chrome/cros_ec_ishtp.c @@ -41,9 +41,12 @@ enum cros_ec_ish_channel { #define ISHTP_SEND_TIMEOUT (3 * HZ) /* ISH Transport CrOS EC ISH client unique GUID */ -static const guid_t cros_ish_guid = - GUID_INIT(0x7b7154d0, 0x56f4, 0x4bdc, - 0xb0, 0xd8, 0x9e, 0x7c, 0xda, 0xe0, 0xd6, 0xa0); +static const struct ishtp_device_id cros_ec_ishtp_id_table[] = { + { .guid = GUID_INIT(0x7b7154d0, 0x56f4, 0x4bdc, + 0xb0, 0xd8, 0x9e, 0x7c, 0xda, 0xe0, 0xd6, 0xa0), }, + { } +}; +MODULE_DEVICE_TABLE(ishtp, cros_ec_ishtp_id_table); struct header { u8 channel; @@ -389,7 +392,7 @@ static int cros_ish_init(struct ishtp_cl *cros_ish_cl) ishtp_set_tx_ring_size(cros_ish_cl, CROS_ISH_CL_TX_RING_SIZE); ishtp_set_rx_ring_size(cros_ish_cl, CROS_ISH_CL_RX_RING_SIZE); - fw_client = ishtp_fw_cl_get_client(dev, &cros_ish_guid); + fw_client = ishtp_fw_cl_get_client(dev, &cros_ec_ishtp_id_table[0].guid); if (!fw_client) { dev_err(cl_data_to_dev(client_data), "ish client uuid not found\n"); @@ -765,7 +768,7 @@ static SIMPLE_DEV_PM_OPS(cros_ec_ishtp_pm_ops, cros_ec_ishtp_suspend, static struct ishtp_cl_driver cros_ec_ishtp_driver = { .name = "cros_ec_ishtp", - .guid = &cros_ish_guid, + .id = cros_ec_ishtp_id_table, .probe = cros_ec_ishtp_probe, .remove = cros_ec_ishtp_remove, .reset = cros_ec_ishtp_reset, @@ -791,4 +794,3 @@ MODULE_DESCRIPTION("ChromeOS EC ISHTP Client Driver"); MODULE_AUTHOR("Rushikesh S Kadam <rushikesh.s.kadam@intel.com>"); MODULE_LICENSE("GPL v2"); -MODULE_ALIAS("ishtp:*"); diff --git a/drivers/platform/mellanox/mlxreg-lc.c b/drivers/platform/mellanox/mlxreg-lc.c index 0b7f58feb701..c897a2f15840 100644 --- a/drivers/platform/mellanox/mlxreg-lc.c +++ b/drivers/platform/mellanox/mlxreg-lc.c @@ -413,7 +413,7 @@ mlxreg_lc_create_static_devices(struct mlxreg_lc *mlxreg_lc, struct mlxreg_hotpl int size) { struct mlxreg_hotplug_device *dev = devs; - int i; + int i, ret; /* Create static I2C device feeding by auxiliary or main power. */ for (i = 0; i < size; i++, dev++) { @@ -423,6 +423,7 @@ mlxreg_lc_create_static_devices(struct mlxreg_lc *mlxreg_lc, struct mlxreg_hotpl dev->brdinfo->type, dev->nr, dev->brdinfo->addr); dev->adapter = NULL; + ret = PTR_ERR(dev->client); goto fail_create_static_devices; } } @@ -435,7 +436,7 @@ fail_create_static_devices: i2c_unregister_device(dev->client); dev->client = NULL; } - return IS_ERR(dev->client); + return ret; } static void diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index d4c079f4afc6..97e87628eb35 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -185,7 +185,7 @@ config ACER_WMI config AMD_PMC tristate "AMD SoC PMC driver" - depends on ACPI && PCI + depends on ACPI && PCI && RTC_CLASS help The driver provides support for AMD Power Management Controller primarily responsible for S2Idle transactions that are driven from @@ -517,7 +517,9 @@ config THINKPAD_ACPI depends on ACPI_VIDEO || ACPI_VIDEO = n depends on BACKLIGHT_CLASS_DEVICE depends on I2C + depends on DRM select ACPI_PLATFORM_PROFILE + select DRM_PRIVACY_SCREEN select HWMON select NVRAM select NEW_LEDS diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c index b7e50ed050a8..841c44cd64c2 100644 --- a/drivers/platform/x86/amd-pmc.c +++ b/drivers/platform/x86/amd-pmc.c @@ -76,7 +76,7 @@ #define AMD_CPU_ID_CZN AMD_CPU_ID_RN #define AMD_CPU_ID_YC 0x14B5 -#define PMC_MSG_DELAY_MIN_US 100 +#define PMC_MSG_DELAY_MIN_US 50 #define RESPONSE_REGISTER_LOOP_MAX 20000 #define SOC_SUBSYSTEM_IP_MAX 12 diff --git a/drivers/platform/x86/dell/Kconfig b/drivers/platform/x86/dell/Kconfig index 2fffa57e596e..fe224a54f24c 100644 --- a/drivers/platform/x86/dell/Kconfig +++ b/drivers/platform/x86/dell/Kconfig @@ -187,7 +187,7 @@ config DELL_WMI_AIO config DELL_WMI_DESCRIPTOR tristate - default m + default n depends on ACPI_WMI config DELL_WMI_LED diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c index b183967ecfb7..435a91fe2568 100644 --- a/drivers/platform/x86/hp_accel.c +++ b/drivers/platform/x86/hp_accel.c @@ -331,9 +331,11 @@ static int lis3lv02d_probe(struct platform_device *device) INIT_WORK(&hpled_led.work, delayed_set_status_worker); ret = led_classdev_register(NULL, &hpled_led.led_classdev); if (ret) { + i8042_remove_filter(hp_accel_i8042_filter); lis3lv02d_joystick_disable(&lis3_dev); lis3lv02d_poweroff(&lis3_dev); flush_work(&hpled_led.work); + lis3lv02d_remove_fs(&lis3_dev); return ret; } diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c index 08598942a6d7..13f8cf70b9ae 100644 --- a/drivers/platform/x86/intel/hid.c +++ b/drivers/platform/x86/intel/hid.c @@ -99,6 +99,13 @@ static const struct dmi_system_id button_array_table[] = { DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Tablet Gen 2"), }, }, + { + .ident = "Microsoft Surface Go 3", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go 3"), + }, + }, { } }; diff --git a/drivers/platform/x86/intel/ishtp_eclite.c b/drivers/platform/x86/intel/ishtp_eclite.c index 12fc98a48657..93ac8b2dbf38 100644 --- a/drivers/platform/x86/intel/ishtp_eclite.c +++ b/drivers/platform/x86/intel/ishtp_eclite.c @@ -93,9 +93,12 @@ struct ishtp_opregion_dev { }; /* eclite ishtp client UUID: 6a19cc4b-d760-4de3-b14d-f25ebd0fbcd9 */ -static const guid_t ecl_ishtp_guid = - GUID_INIT(0x6a19cc4b, 0xd760, 0x4de3, - 0xb1, 0x4d, 0xf2, 0x5e, 0xbd, 0xf, 0xbc, 0xd9); +static const struct ishtp_device_id ecl_ishtp_id_table[] = { + { .guid = GUID_INIT(0x6a19cc4b, 0xd760, 0x4de3, + 0xb1, 0x4d, 0xf2, 0x5e, 0xbd, 0xf, 0xbc, 0xd9), }, + { } +}; +MODULE_DEVICE_TABLE(ishtp, ecl_ishtp_id_table); /* ACPI DSM UUID: 91d936a7-1f01-49c6-a6b4-72f00ad8d8a5 */ static const guid_t ecl_acpi_guid = @@ -462,7 +465,7 @@ static int ecl_ishtp_cl_init(struct ishtp_cl *ecl_ishtp_cl) ishtp_set_tx_ring_size(ecl_ishtp_cl, ECL_CL_TX_RING_SIZE); ishtp_set_rx_ring_size(ecl_ishtp_cl, ECL_CL_RX_RING_SIZE); - fw_client = ishtp_fw_cl_get_client(dev, &ecl_ishtp_guid); + fw_client = ishtp_fw_cl_get_client(dev, &ecl_ishtp_id_table[0].guid); if (!fw_client) { dev_err(cl_data_to_dev(opr_dev), "fw client not found\n"); return -ENOENT; @@ -674,7 +677,7 @@ static const struct dev_pm_ops ecl_ishtp_pm_ops = { static struct ishtp_cl_driver ecl_ishtp_cl_driver = { .name = "ishtp-eclite", - .guid = &ecl_ishtp_guid, + .id = ecl_ishtp_id_table, .probe = ecl_ishtp_cl_probe, .remove = ecl_ishtp_cl_remove, .reset = ecl_ishtp_cl_reset, @@ -698,4 +701,3 @@ MODULE_DESCRIPTION("ISH ISHTP eclite client opregion driver"); MODULE_AUTHOR("K Naduvalath, Sumesh <sumesh.k.naduvalath@intel.com>"); MODULE_LICENSE("GPL v2"); -MODULE_ALIAS("ishtp:*"); diff --git a/drivers/platform/x86/lg-laptop.c b/drivers/platform/x86/lg-laptop.c index ae9293024c77..a91847a551a7 100644 --- a/drivers/platform/x86/lg-laptop.c +++ b/drivers/platform/x86/lg-laptop.c @@ -657,6 +657,18 @@ static int acpi_add(struct acpi_device *device) if (product && strlen(product) > 4) switch (product[4]) { case '5': + if (strlen(product) > 5) + switch (product[5]) { + case 'N': + year = 2021; + break; + case '0': + year = 2016; + break; + default: + year = 2022; + } + break; case '6': year = 2016; break; diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c index 7ee010aa740a..c1d9ed9b7b67 100644 --- a/drivers/platform/x86/samsung-laptop.c +++ b/drivers/platform/x86/samsung-laptop.c @@ -152,7 +152,7 @@ struct sabi_config { static const struct sabi_config sabi_configs[] = { { - /* I don't know if it is really 2, but it it is + /* I don't know if it is really 2, but it is * less than 3 anyway */ .sabi_version = 2, diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c index 9472aae72df2..c4d9c45350f7 100644 --- a/drivers/platform/x86/think-lmi.c +++ b/drivers/platform/x86/think-lmi.c @@ -888,8 +888,10 @@ static int tlmi_analyze(void) break; if (!item) break; - if (!*item) + if (!*item) { + kfree(item); continue; + } /* It is not allowed to have '/' for file name. Convert it into '\'. */ strreplace(item, '/', '\\'); @@ -902,6 +904,7 @@ static int tlmi_analyze(void) setting = kzalloc(sizeof(*setting), GFP_KERNEL); if (!setting) { ret = -ENOMEM; + kfree(item); goto fail_clear_attr; } setting->index = i; @@ -916,7 +919,6 @@ static int tlmi_analyze(void) } kobject_init(&setting->kobj, &tlmi_attr_setting_ktype); tlmi_priv.setting[i] = setting; - tlmi_priv.settings_count++; kfree(item); } @@ -983,7 +985,12 @@ static void tlmi_remove(struct wmi_device *wdev) static int tlmi_probe(struct wmi_device *wdev, const void *context) { - tlmi_analyze(); + int ret; + + ret = tlmi_analyze(); + if (ret) + return ret; + return tlmi_sysfs_init(); } diff --git a/drivers/platform/x86/think-lmi.h b/drivers/platform/x86/think-lmi.h index f8e26823075f..2ce5086a5af2 100644 --- a/drivers/platform/x86/think-lmi.h +++ b/drivers/platform/x86/think-lmi.h @@ -55,7 +55,6 @@ struct tlmi_attr_setting { struct think_lmi { struct wmi_device *wmi_device; - int settings_count; bool can_set_bios_settings; bool can_get_bios_selections; bool can_set_bios_password; diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 9c632df734bb..341655d711ce 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -73,6 +73,7 @@ #include <linux/uaccess.h> #include <acpi/battery.h> #include <acpi/video.h> +#include <drm/drm_privacy_screen_driver.h> #include "dual_accel_detect.h" /* ThinkPad CMOS commands */ @@ -157,6 +158,7 @@ enum tpacpi_hkey_event_t { TP_HKEY_EV_VOL_UP = 0x1015, /* Volume up or unmute */ TP_HKEY_EV_VOL_DOWN = 0x1016, /* Volume down or unmute */ TP_HKEY_EV_VOL_MUTE = 0x1017, /* Mixer output mute */ + TP_HKEY_EV_PRIVACYGUARD_TOGGLE = 0x130f, /* Toggle priv.guard on/off */ /* Reasons for waking up from S3/S4 */ TP_HKEY_EV_WKUP_S3_UNDOCK = 0x2304, /* undock requested, S3 */ @@ -1105,15 +1107,6 @@ static int tpacpi_rfk_update_swstate(const struct tpacpi_rfk *tp_rfk) return status; } -/* Query FW and update rfkill sw state for all rfkill switches */ -static void tpacpi_rfk_update_swstate_all(void) -{ - unsigned int i; - - for (i = 0; i < TPACPI_RFK_SW_MAX; i++) - tpacpi_rfk_update_swstate(tpacpi_rfkill_switches[i]); -} - /* * Sync the HW-blocking state of all rfkill switches, * do notice it causes the rfkill core to schedule uevents @@ -3024,6 +3017,8 @@ static struct attribute *hotkey_attributes[] = { &dev_attr_hotkey_all_mask.attr, &dev_attr_hotkey_adaptive_all_mask.attr, &dev_attr_hotkey_recommended_mask.attr, + &dev_attr_hotkey_tablet_mode.attr, + &dev_attr_hotkey_radio_sw.attr, #ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL &dev_attr_hotkey_source_mask.attr, &dev_attr_hotkey_poll_freq.attr, @@ -3074,9 +3069,6 @@ static void tpacpi_send_radiosw_update(void) if (wlsw == TPACPI_RFK_RADIO_OFF) tpacpi_rfk_update_hwblock_state(true); - /* Sync sw blocking state */ - tpacpi_rfk_update_swstate_all(); - /* Sync hw blocking state last if it is hw-unblocked */ if (wlsw == TPACPI_RFK_RADIO_ON) tpacpi_rfk_update_hwblock_state(false); @@ -3798,6 +3790,30 @@ static bool adaptive_keyboard_hotkey_notify_hotkey(unsigned int scancode) } } +static bool hotkey_notify_extended_hotkey(const u32 hkey) +{ + unsigned int scancode; + + switch (hkey) { + case TP_HKEY_EV_PRIVACYGUARD_TOGGLE: + tpacpi_driver_event(hkey); + return true; + } + + /* Extended keycodes start at 0x300 and our offset into the map + * TP_ACPI_HOTKEYSCAN_EXTENDED_START. The calculated scancode + * will be positive, but might not be in the correct range. + */ + scancode = (hkey & 0xfff) - (0x300 - TP_ACPI_HOTKEYSCAN_EXTENDED_START); + if (scancode >= TP_ACPI_HOTKEYSCAN_EXTENDED_START && + scancode < TPACPI_HOTKEY_MAP_LEN) { + tpacpi_input_send_key(scancode); + return true; + } + + return false; +} + static bool hotkey_notify_hotkey(const u32 hkey, bool *send_acpi_ev, bool *ignore_acpi_ev) @@ -3832,17 +3848,7 @@ static bool hotkey_notify_hotkey(const u32 hkey, return adaptive_keyboard_hotkey_notify_hotkey(scancode); case 3: - /* Extended keycodes start at 0x300 and our offset into the map - * TP_ACPI_HOTKEYSCAN_EXTENDED_START. The calculated scancode - * will be positive, but might not be in the correct range. - */ - scancode -= (0x300 - TP_ACPI_HOTKEYSCAN_EXTENDED_START); - if (scancode >= TP_ACPI_HOTKEYSCAN_EXTENDED_START && - scancode < TPACPI_HOTKEY_MAP_LEN) { - tpacpi_input_send_key(scancode); - return true; - } - break; + return hotkey_notify_extended_hotkey(hkey); } return false; @@ -5738,11 +5744,11 @@ static const char * const tpacpi_led_names[TPACPI_LED_NUMLEDS] = { "tpacpi::standby", "tpacpi::dock_status1", "tpacpi::dock_status2", - "tpacpi::unknown_led2", + "tpacpi::lid_logo_dot", "tpacpi::unknown_led3", "tpacpi::thinkvantage", }; -#define TPACPI_SAFE_LEDS 0x1081U +#define TPACPI_SAFE_LEDS 0x1481U static inline bool tpacpi_is_led_restricted(const unsigned int led) { @@ -8766,6 +8772,7 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = { TPACPI_Q_LNV3('N', '2', 'E', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (1st gen) */ TPACPI_Q_LNV3('N', '2', 'O', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (2nd gen) */ TPACPI_Q_LNV3('N', '2', 'V', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (3nd gen) */ + TPACPI_Q_LNV3('N', '4', '0', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (4nd gen) */ TPACPI_Q_LNV3('N', '3', '0', TPACPI_FAN_2CTL), /* P15 (1st gen) / P15v (1st gen) */ TPACPI_Q_LNV3('N', '3', '2', TPACPI_FAN_2CTL), /* X1 Carbon (9th gen) */ }; @@ -9724,69 +9731,85 @@ static struct ibm_struct battery_driver_data = { * LCD Shadow subdriver, for the Lenovo PrivacyGuard feature */ -static int lcdshadow_state; +static struct drm_privacy_screen *lcdshadow_dev; +static acpi_handle lcdshadow_get_handle; +static acpi_handle lcdshadow_set_handle; -static int lcdshadow_on_off(bool state) +static int lcdshadow_set_sw_state(struct drm_privacy_screen *priv, + enum drm_privacy_screen_status state) { - acpi_handle set_shadow_handle; int output; - if (ACPI_FAILURE(acpi_get_handle(hkey_handle, "SSSS", &set_shadow_handle))) { - pr_warn("Thinkpad ACPI has no %s interface.\n", "SSSS"); + if (WARN_ON(!mutex_is_locked(&priv->lock))) return -EIO; - } - if (!acpi_evalf(set_shadow_handle, &output, NULL, "dd", (int)state)) + if (!acpi_evalf(lcdshadow_set_handle, &output, NULL, "dd", (int)state)) return -EIO; - lcdshadow_state = state; + priv->hw_state = priv->sw_state = state; return 0; } -static int lcdshadow_set(bool on) +static void lcdshadow_get_hw_state(struct drm_privacy_screen *priv) { - if (lcdshadow_state < 0) - return lcdshadow_state; - if (lcdshadow_state == on) - return 0; - return lcdshadow_on_off(on); + int output; + + if (!acpi_evalf(lcdshadow_get_handle, &output, NULL, "dd", 0)) + return; + + priv->hw_state = priv->sw_state = output & 0x1; } +static const struct drm_privacy_screen_ops lcdshadow_ops = { + .set_sw_state = lcdshadow_set_sw_state, + .get_hw_state = lcdshadow_get_hw_state, +}; + static int tpacpi_lcdshadow_init(struct ibm_init_struct *iibm) { - acpi_handle get_shadow_handle; + acpi_status status1, status2; int output; - if (ACPI_FAILURE(acpi_get_handle(hkey_handle, "GSSS", &get_shadow_handle))) { - lcdshadow_state = -ENODEV; + status1 = acpi_get_handle(hkey_handle, "GSSS", &lcdshadow_get_handle); + status2 = acpi_get_handle(hkey_handle, "SSSS", &lcdshadow_set_handle); + if (ACPI_FAILURE(status1) || ACPI_FAILURE(status2)) return 0; - } - if (!acpi_evalf(get_shadow_handle, &output, NULL, "dd", 0)) { - lcdshadow_state = -EIO; + if (!acpi_evalf(lcdshadow_get_handle, &output, NULL, "dd", 0)) return -EIO; - } - if (!(output & 0x10000)) { - lcdshadow_state = -ENODEV; + + if (!(output & 0x10000)) return 0; - } - lcdshadow_state = output & 0x1; + + lcdshadow_dev = drm_privacy_screen_register(&tpacpi_pdev->dev, + &lcdshadow_ops); + if (IS_ERR(lcdshadow_dev)) + return PTR_ERR(lcdshadow_dev); return 0; } +static void lcdshadow_exit(void) +{ + drm_privacy_screen_unregister(lcdshadow_dev); +} + static void lcdshadow_resume(void) { - if (lcdshadow_state >= 0) - lcdshadow_on_off(lcdshadow_state); + if (!lcdshadow_dev) + return; + + mutex_lock(&lcdshadow_dev->lock); + lcdshadow_set_sw_state(lcdshadow_dev, lcdshadow_dev->sw_state); + mutex_unlock(&lcdshadow_dev->lock); } static int lcdshadow_read(struct seq_file *m) { - if (lcdshadow_state < 0) { + if (!lcdshadow_dev) { seq_puts(m, "status:\t\tnot supported\n"); } else { - seq_printf(m, "status:\t\t%d\n", lcdshadow_state); + seq_printf(m, "status:\t\t%d\n", lcdshadow_dev->hw_state); seq_puts(m, "commands:\t0, 1\n"); } @@ -9798,7 +9821,7 @@ static int lcdshadow_write(char *buf) char *cmd; int res, state = -EINVAL; - if (lcdshadow_state < 0) + if (!lcdshadow_dev) return -ENODEV; while ((cmd = strsep(&buf, ","))) { @@ -9810,11 +9833,18 @@ static int lcdshadow_write(char *buf) if (state >= 2 || state < 0) return -EINVAL; - return lcdshadow_set(state); + mutex_lock(&lcdshadow_dev->lock); + res = lcdshadow_set_sw_state(lcdshadow_dev, state); + mutex_unlock(&lcdshadow_dev->lock); + + drm_privacy_screen_call_notifier_chain(lcdshadow_dev); + + return res; } static struct ibm_struct lcdshadow_driver_data = { .name = "lcdshadow", + .exit = lcdshadow_exit, .resume = lcdshadow_resume, .read = lcdshadow_read, .write = lcdshadow_write, @@ -10624,6 +10654,20 @@ static void tpacpi_driver_event(const unsigned int hkey_event) if (!atomic_add_unless(&dytc_ignore_event, -1, 0)) dytc_profile_refresh(); } + + if (lcdshadow_dev && hkey_event == TP_HKEY_EV_PRIVACYGUARD_TOGGLE) { + enum drm_privacy_screen_status old_hw_state; + bool changed; + + mutex_lock(&lcdshadow_dev->lock); + old_hw_state = lcdshadow_dev->hw_state; + lcdshadow_get_hw_state(lcdshadow_dev); + changed = lcdshadow_dev->hw_state != old_hw_state; + mutex_unlock(&lcdshadow_dev->lock); + + if (changed) + drm_privacy_screen_call_notifier_chain(lcdshadow_dev); + } } static void hotkey_driver_event(const unsigned int scancode) diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c index fa8812039b82..17dd54d4b783 100644 --- a/drivers/platform/x86/touchscreen_dmi.c +++ b/drivers/platform/x86/touchscreen_dmi.c @@ -905,6 +905,16 @@ static const struct ts_dmi_data trekstor_primetab_t13b_data = { .properties = trekstor_primetab_t13b_props, }; +static const struct property_entry trekstor_surftab_duo_w1_props[] = { + PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"), + { } +}; + +static const struct ts_dmi_data trekstor_surftab_duo_w1_data = { + .acpi_name = "GDIX1001:00", + .properties = trekstor_surftab_duo_w1_props, +}; + static const struct property_entry trekstor_surftab_twin_10_1_props[] = { PROPERTY_ENTRY_U32("touchscreen-min-x", 20), PROPERTY_ENTRY_U32("touchscreen-min-y", 0), @@ -1503,6 +1513,14 @@ const struct dmi_system_id touchscreen_dmi_table[] = { }, }, { + /* TrekStor SurfTab duo W1 10.1 ST10432-10b */ + .driver_data = (void *)&trekstor_surftab_duo_w1_data, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TrekStor"), + DMI_MATCH(DMI_PRODUCT_NAME, "SurfTab duo W1 10.1 (VT4)"), + }, + }, + { /* TrekStor SurfTab twin 10.1 ST10432-8 */ .driver_data = (void *)&trekstor_surftab_twin_10_1_data, .matches = { diff --git a/drivers/powercap/dtpm.c b/drivers/powercap/dtpm.c index b9fac786246a..2a5c1829aab7 100644 --- a/drivers/powercap/dtpm.c +++ b/drivers/powercap/dtpm.c @@ -463,17 +463,12 @@ int dtpm_register(const char *name, struct dtpm *dtpm, struct dtpm *parent) static int __init init_dtpm(void) { - struct dtpm_descr *dtpm_descr; - pct = powercap_register_control_type(NULL, "dtpm", NULL); if (IS_ERR(pct)) { pr_err("Failed to register control type\n"); return PTR_ERR(pct); } - for_each_dtpm_table(dtpm_descr) - dtpm_descr->init(); - return 0; } late_initcall(init_dtpm); diff --git a/drivers/powercap/dtpm_cpu.c b/drivers/powercap/dtpm_cpu.c index 44faa3a74db6..b740866b228d 100644 --- a/drivers/powercap/dtpm_cpu.c +++ b/drivers/powercap/dtpm_cpu.c @@ -166,16 +166,13 @@ static struct dtpm_ops dtpm_ops = { static int cpuhp_dtpm_cpu_offline(unsigned int cpu) { - struct em_perf_domain *pd; struct dtpm_cpu *dtpm_cpu; - pd = em_cpu_get(cpu); - if (!pd) - return -EINVAL; - dtpm_cpu = per_cpu(dtpm_per_cpu, cpu); + if (dtpm_cpu) + dtpm_update_power(&dtpm_cpu->dtpm); - return dtpm_update_power(&dtpm_cpu->dtpm); + return 0; } static int cpuhp_dtpm_cpu_online(unsigned int cpu) diff --git a/drivers/ptp/ptp_clockmatrix.c b/drivers/ptp/ptp_clockmatrix.c index 6bc5791a7ec5..08e429a06922 100644 --- a/drivers/ptp/ptp_clockmatrix.c +++ b/drivers/ptp/ptp_clockmatrix.c @@ -1699,12 +1699,9 @@ static int initialize_dco_operating_mode(struct idtcm_channel *channel) /* PTP Hardware Clock interface */ -/** +/* * Maximum absolute value for write phase offset in picoseconds * - * @channel: channel - * @delta_ns: delta in nanoseconds - * * Destination signed register is 32-bit register in resolution of 50ps * * 0x7fffffff * 50 = 2147483647 * 50 = 107374182350 diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c index 34f943c8c9fd..0f1b5a7d2a89 100644 --- a/drivers/ptp/ptp_ocp.c +++ b/drivers/ptp/ptp_ocp.c @@ -1304,10 +1304,11 @@ ptp_ocp_register_ext(struct ptp_ocp *bp, struct ocp_resource *r) if (!ext) return -ENOMEM; - err = -EINVAL; ext->mem = ptp_ocp_get_mem(bp, r); - if (!ext->mem) + if (IS_ERR(ext->mem)) { + err = PTR_ERR(ext->mem); goto out; + } ext->bp = bp; ext->info = r->extra; @@ -1371,8 +1372,8 @@ ptp_ocp_register_mem(struct ptp_ocp *bp, struct ocp_resource *r) void __iomem *mem; mem = ptp_ocp_get_mem(bp, r); - if (!mem) - return -EINVAL; + if (IS_ERR(mem)) + return PTR_ERR(mem); bp_assign_entry(bp, r, mem); diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c index fb04a439462c..93772ab8d7e3 100644 --- a/drivers/pwm/core.c +++ b/drivers/pwm/core.c @@ -152,6 +152,32 @@ of_pwm_xlate_with_flags(struct pwm_chip *pc, const struct of_phandle_args *args) } EXPORT_SYMBOL_GPL(of_pwm_xlate_with_flags); +struct pwm_device * +of_pwm_single_xlate(struct pwm_chip *pc, const struct of_phandle_args *args) +{ + struct pwm_device *pwm; + + if (pc->of_pwm_n_cells < 1) + return ERR_PTR(-EINVAL); + + /* validate that one cell is specified, optionally with flags */ + if (args->args_count != 1 && args->args_count != 2) + return ERR_PTR(-EINVAL); + + pwm = pwm_request_from_chip(pc, 0, NULL); + if (IS_ERR(pwm)) + return pwm; + + pwm->args.period = args->args[0]; + pwm->args.polarity = PWM_POLARITY_NORMAL; + + if (args->args_count == 2 && args->args[2] & PWM_POLARITY_INVERTED) + pwm->args.polarity = PWM_POLARITY_INVERSED; + + return pwm; +} +EXPORT_SYMBOL_GPL(of_pwm_single_xlate); + static void of_pwmchip_add(struct pwm_chip *chip) { if (!chip->dev || !chip->dev->of_node) diff --git a/drivers/pwm/pwm-pxa.c b/drivers/pwm/pwm-pxa.c index a9efdcf839ae..238ec88c130b 100644 --- a/drivers/pwm/pwm-pxa.c +++ b/drivers/pwm/pwm-pxa.c @@ -148,20 +148,6 @@ static const struct platform_device_id *pxa_pwm_get_id_dt(struct device *dev) return id ? id->data : NULL; } -static struct pwm_device * -pxa_pwm_of_xlate(struct pwm_chip *pc, const struct of_phandle_args *args) -{ - struct pwm_device *pwm; - - pwm = pwm_request_from_chip(pc, 0, NULL); - if (IS_ERR(pwm)) - return pwm; - - pwm->args.period = args->args[0]; - - return pwm; -} - static int pwm_probe(struct platform_device *pdev) { const struct platform_device_id *id = platform_get_device_id(pdev); @@ -187,7 +173,7 @@ static int pwm_probe(struct platform_device *pdev) pc->chip.npwm = (id->driver_data & HAS_SECONDARY_PWM) ? 2 : 1; if (IS_ENABLED(CONFIG_OF)) { - pc->chip.of_xlate = pxa_pwm_of_xlate; + pc->chip.of_xlate = of_pwm_single_xlate; pc->chip.of_pwm_n_cells = 1; } diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c index 2c40fe15da55..6043c832d09e 100644 --- a/drivers/s390/block/dasd_devmap.c +++ b/drivers/s390/block/dasd_devmap.c @@ -731,7 +731,7 @@ static ssize_t dasd_ff_show(struct device *dev, struct device_attribute *attr, ff_flag = (devmap->features & DASD_FEATURE_FAILFAST) != 0; else ff_flag = (DASD_FEATURE_DEFAULT & DASD_FEATURE_FAILFAST) != 0; - return snprintf(buf, PAGE_SIZE, ff_flag ? "1\n" : "0\n"); + return sysfs_emit(buf, ff_flag ? "1\n" : "0\n"); } static ssize_t dasd_ff_store(struct device *dev, struct device_attribute *attr, @@ -773,7 +773,7 @@ dasd_ro_show(struct device *dev, struct device_attribute *attr, char *buf) spin_unlock(&dasd_devmap_lock); out: - return snprintf(buf, PAGE_SIZE, ro_flag ? "1\n" : "0\n"); + return sysfs_emit(buf, ro_flag ? "1\n" : "0\n"); } static ssize_t @@ -834,7 +834,7 @@ dasd_erplog_show(struct device *dev, struct device_attribute *attr, char *buf) erplog = (devmap->features & DASD_FEATURE_ERPLOG) != 0; else erplog = (DASD_FEATURE_DEFAULT & DASD_FEATURE_ERPLOG) != 0; - return snprintf(buf, PAGE_SIZE, erplog ? "1\n" : "0\n"); + return sysfs_emit(buf, erplog ? "1\n" : "0\n"); } static ssize_t @@ -1033,13 +1033,13 @@ dasd_discipline_show(struct device *dev, struct device_attribute *attr, dasd_put_device(device); goto out; } else { - len = snprintf(buf, PAGE_SIZE, "%s\n", - device->discipline->name); + len = sysfs_emit(buf, "%s\n", + device->discipline->name); dasd_put_device(device); return len; } out: - len = snprintf(buf, PAGE_SIZE, "none\n"); + len = sysfs_emit(buf, "none\n"); return len; } @@ -1056,30 +1056,30 @@ dasd_device_status_show(struct device *dev, struct device_attribute *attr, if (!IS_ERR(device)) { switch (device->state) { case DASD_STATE_NEW: - len = snprintf(buf, PAGE_SIZE, "new\n"); + len = sysfs_emit(buf, "new\n"); break; case DASD_STATE_KNOWN: - len = snprintf(buf, PAGE_SIZE, "detected\n"); + len = sysfs_emit(buf, "detected\n"); break; case DASD_STATE_BASIC: - len = snprintf(buf, PAGE_SIZE, "basic\n"); + len = sysfs_emit(buf, "basic\n"); break; case DASD_STATE_UNFMT: - len = snprintf(buf, PAGE_SIZE, "unformatted\n"); + len = sysfs_emit(buf, "unformatted\n"); break; case DASD_STATE_READY: - len = snprintf(buf, PAGE_SIZE, "ready\n"); + len = sysfs_emit(buf, "ready\n"); break; case DASD_STATE_ONLINE: - len = snprintf(buf, PAGE_SIZE, "online\n"); + len = sysfs_emit(buf, "online\n"); break; default: - len = snprintf(buf, PAGE_SIZE, "no stat\n"); + len = sysfs_emit(buf, "no stat\n"); break; } dasd_put_device(device); } else - len = snprintf(buf, PAGE_SIZE, "unknown\n"); + len = sysfs_emit(buf, "unknown\n"); return len; } @@ -1120,7 +1120,7 @@ static ssize_t dasd_vendor_show(struct device *dev, device = dasd_device_from_cdev(to_ccwdev(dev)); vendor = ""; if (IS_ERR(device)) - return snprintf(buf, PAGE_SIZE, "%s\n", vendor); + return sysfs_emit(buf, "%s\n", vendor); if (device->discipline && device->discipline->get_uid && !device->discipline->get_uid(device, &uid)) @@ -1128,7 +1128,7 @@ static ssize_t dasd_vendor_show(struct device *dev, dasd_put_device(device); - return snprintf(buf, PAGE_SIZE, "%s\n", vendor); + return sysfs_emit(buf, "%s\n", vendor); } static DEVICE_ATTR(vendor, 0444, dasd_vendor_show, NULL); @@ -1148,7 +1148,7 @@ dasd_uid_show(struct device *dev, struct device_attribute *attr, char *buf) device = dasd_device_from_cdev(to_ccwdev(dev)); uid_string[0] = 0; if (IS_ERR(device)) - return snprintf(buf, PAGE_SIZE, "%s\n", uid_string); + return sysfs_emit(buf, "%s\n", uid_string); if (device->discipline && device->discipline->get_uid && !device->discipline->get_uid(device, &uid)) { @@ -1183,7 +1183,7 @@ dasd_uid_show(struct device *dev, struct device_attribute *attr, char *buf) } dasd_put_device(device); - return snprintf(buf, PAGE_SIZE, "%s\n", uid_string); + return sysfs_emit(buf, "%s\n", uid_string); } static DEVICE_ATTR(uid, 0444, dasd_uid_show, NULL); @@ -1201,7 +1201,7 @@ dasd_eer_show(struct device *dev, struct device_attribute *attr, char *buf) eer_flag = dasd_eer_enabled(devmap->device); else eer_flag = 0; - return snprintf(buf, PAGE_SIZE, eer_flag ? "1\n" : "0\n"); + return sysfs_emit(buf, eer_flag ? "1\n" : "0\n"); } static ssize_t @@ -1243,7 +1243,7 @@ dasd_expires_show(struct device *dev, struct device_attribute *attr, char *buf) device = dasd_device_from_cdev(to_ccwdev(dev)); if (IS_ERR(device)) return -ENODEV; - len = snprintf(buf, PAGE_SIZE, "%lu\n", device->default_expires); + len = sysfs_emit(buf, "%lu\n", device->default_expires); dasd_put_device(device); return len; } @@ -1283,7 +1283,7 @@ dasd_retries_show(struct device *dev, struct device_attribute *attr, char *buf) device = dasd_device_from_cdev(to_ccwdev(dev)); if (IS_ERR(device)) return -ENODEV; - len = snprintf(buf, PAGE_SIZE, "%lu\n", device->default_retries); + len = sysfs_emit(buf, "%lu\n", device->default_retries); dasd_put_device(device); return len; } @@ -1324,7 +1324,7 @@ dasd_timeout_show(struct device *dev, struct device_attribute *attr, device = dasd_device_from_cdev(to_ccwdev(dev)); if (IS_ERR(device)) return -ENODEV; - len = snprintf(buf, PAGE_SIZE, "%lu\n", device->blk_timeout); + len = sysfs_emit(buf, "%lu\n", device->blk_timeout); dasd_put_device(device); return len; } @@ -1398,11 +1398,11 @@ static ssize_t dasd_hpf_show(struct device *dev, struct device_attribute *attr, return -ENODEV; if (!device->discipline || !device->discipline->hpf_enabled) { dasd_put_device(device); - return snprintf(buf, PAGE_SIZE, "%d\n", dasd_nofcx); + return sysfs_emit(buf, "%d\n", dasd_nofcx); } hpf = device->discipline->hpf_enabled(device); dasd_put_device(device); - return snprintf(buf, PAGE_SIZE, "%d\n", hpf); + return sysfs_emit(buf, "%d\n", hpf); } static DEVICE_ATTR(hpf, 0444, dasd_hpf_show, NULL); @@ -1416,13 +1416,13 @@ static ssize_t dasd_reservation_policy_show(struct device *dev, devmap = dasd_find_busid(dev_name(dev)); if (IS_ERR(devmap)) { - rc = snprintf(buf, PAGE_SIZE, "ignore\n"); + rc = sysfs_emit(buf, "ignore\n"); } else { spin_lock(&dasd_devmap_lock); if (devmap->features & DASD_FEATURE_FAILONSLCK) - rc = snprintf(buf, PAGE_SIZE, "fail\n"); + rc = sysfs_emit(buf, "fail\n"); else - rc = snprintf(buf, PAGE_SIZE, "ignore\n"); + rc = sysfs_emit(buf, "ignore\n"); spin_unlock(&dasd_devmap_lock); } return rc; @@ -1457,14 +1457,14 @@ static ssize_t dasd_reservation_state_show(struct device *dev, device = dasd_device_from_cdev(to_ccwdev(dev)); if (IS_ERR(device)) - return snprintf(buf, PAGE_SIZE, "none\n"); + return sysfs_emit(buf, "none\n"); if (test_bit(DASD_FLAG_IS_RESERVED, &device->flags)) - rc = snprintf(buf, PAGE_SIZE, "reserved\n"); + rc = sysfs_emit(buf, "reserved\n"); else if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) - rc = snprintf(buf, PAGE_SIZE, "lost\n"); + rc = sysfs_emit(buf, "lost\n"); else - rc = snprintf(buf, PAGE_SIZE, "none\n"); + rc = sysfs_emit(buf, "none\n"); dasd_put_device(device); return rc; } @@ -1531,7 +1531,7 @@ dasd_path_threshold_show(struct device *dev, device = dasd_device_from_cdev(to_ccwdev(dev)); if (IS_ERR(device)) return -ENODEV; - len = snprintf(buf, PAGE_SIZE, "%lu\n", device->path_thrhld); + len = sysfs_emit(buf, "%lu\n", device->path_thrhld); dasd_put_device(device); return len; } @@ -1578,7 +1578,7 @@ dasd_path_autodisable_show(struct device *dev, else flag = (DASD_FEATURE_DEFAULT & DASD_FEATURE_PATH_AUTODISABLE) != 0; - return snprintf(buf, PAGE_SIZE, flag ? "1\n" : "0\n"); + return sysfs_emit(buf, flag ? "1\n" : "0\n"); } static ssize_t @@ -1616,7 +1616,7 @@ dasd_path_interval_show(struct device *dev, device = dasd_device_from_cdev(to_ccwdev(dev)); if (IS_ERR(device)) return -ENODEV; - len = snprintf(buf, PAGE_SIZE, "%lu\n", device->path_interval); + len = sysfs_emit(buf, "%lu\n", device->path_interval); dasd_put_device(device); return len; } @@ -1662,9 +1662,9 @@ dasd_device_fcs_show(struct device *dev, struct device_attribute *attr, return -ENODEV; fc_sec = dasd_path_get_fcs_device(device); if (fc_sec == -EINVAL) - rc = snprintf(buf, PAGE_SIZE, "Inconsistent\n"); + rc = sysfs_emit(buf, "Inconsistent\n"); else - rc = snprintf(buf, PAGE_SIZE, "%s\n", dasd_path_get_fcs_str(fc_sec)); + rc = sysfs_emit(buf, "%s\n", dasd_path_get_fcs_str(fc_sec)); dasd_put_device(device); return rc; @@ -1677,7 +1677,7 @@ dasd_path_fcs_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) struct dasd_path *path = to_dasd_path(kobj); unsigned int fc_sec = path->fc_security; - return snprintf(buf, PAGE_SIZE, "%s\n", dasd_path_get_fcs_str(fc_sec)); + return sysfs_emit(buf, "%s\n", dasd_path_get_fcs_str(fc_sec)); } static struct kobj_attribute path_fcs_attribute = @@ -1698,7 +1698,7 @@ static ssize_t dasd_##_name##_show(struct device *dev, \ val = _func(device); \ dasd_put_device(device); \ \ - return snprintf(buf, PAGE_SIZE, "%d\n", val); \ + return sysfs_emit(buf, "%d\n", val); \ } \ static DEVICE_ATTR(_name, 0444, dasd_##_name##_show, NULL); \ diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c index 646ec796bb83..dfde0d941c3c 100644 --- a/drivers/s390/char/raw3270.c +++ b/drivers/s390/char/raw3270.c @@ -1047,24 +1047,24 @@ raw3270_probe (struct ccw_device *cdev) static ssize_t raw3270_model_show(struct device *dev, struct device_attribute *attr, char *buf) { - return snprintf(buf, PAGE_SIZE, "%i\n", - ((struct raw3270 *) dev_get_drvdata(dev))->model); + return sysfs_emit(buf, "%i\n", + ((struct raw3270 *)dev_get_drvdata(dev))->model); } static DEVICE_ATTR(model, 0444, raw3270_model_show, NULL); static ssize_t raw3270_rows_show(struct device *dev, struct device_attribute *attr, char *buf) { - return snprintf(buf, PAGE_SIZE, "%i\n", - ((struct raw3270 *) dev_get_drvdata(dev))->rows); + return sysfs_emit(buf, "%i\n", + ((struct raw3270 *)dev_get_drvdata(dev))->rows); } static DEVICE_ATTR(rows, 0444, raw3270_rows_show, NULL); static ssize_t raw3270_columns_show(struct device *dev, struct device_attribute *attr, char *buf) { - return snprintf(buf, PAGE_SIZE, "%i\n", - ((struct raw3270 *) dev_get_drvdata(dev))->cols); + return sysfs_emit(buf, "%i\n", + ((struct raw3270 *)dev_get_drvdata(dev))->cols); } static DEVICE_ATTR(columns, 0444, raw3270_columns_show, NULL); diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c index 1097e76982a5..5440f285f349 100644 --- a/drivers/s390/cio/chp.c +++ b/drivers/s390/cio/chp.c @@ -285,7 +285,7 @@ static ssize_t chp_configure_show(struct device *dev, if (status < 0) return status; - return snprintf(buf, PAGE_SIZE, "%d\n", status); + return sysfs_emit(buf, "%d\n", status); } static int cfg_wait_idle(void); diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index b940e0268f96..e83453bea2ae 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -5095,14 +5095,9 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* NPort Recovery mode or node is just allocated */ if (!lpfc_nlp_not_used(ndlp)) { /* A LOGO is completing and the node is in NPR state. - * If this a fabric node that cleared its transport - * registration, release the rpi. + * Just unregister the RPI because the node is still + * required. */ - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; - if (phba->sli_rev == LPFC_SLI_REV4) - ndlp->nlp_flag |= NLP_RELEASE_RPI; - spin_unlock_irq(&ndlp->lock); lpfc_unreg_rpi(vport, ndlp); } else { /* Indicate the node has already released, should diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 27eb652b564f..81dab9b82f79 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -639,8 +639,8 @@ static void _base_sync_drv_fw_timestamp(struct MPT3SAS_ADAPTER *ioc) mpi_request->IOCParameter = MPI26_SET_IOC_PARAMETER_SYNC_TIMESTAMP; current_time = ktime_get_real(); TimeStamp = ktime_to_ms(current_time); - mpi_request->Reserved7 = cpu_to_le32(TimeStamp & 0xFFFFFFFF); - mpi_request->IOCParameterValue = cpu_to_le32(TimeStamp >> 32); + mpi_request->Reserved7 = cpu_to_le32(TimeStamp >> 32); + mpi_request->IOCParameterValue = cpu_to_le32(TimeStamp & 0xFFFFFFFF); init_completion(&ioc->scsih_cmds.done); ioc->put_smid_default(ioc, smid); dinitprintk(ioc, ioc_info(ioc, diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index db6a759de1e9..a0af986633d2 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h @@ -142,6 +142,8 @@ #define MPT_MAX_CALLBACKS 32 +#define MPT_MAX_HBA_NUM_PHYS 32 + #define INTERNAL_CMDS_COUNT 10 /* reserved cmds */ /* reserved for issuing internally framed scsi io cmds */ #define INTERNAL_SCSIIO_CMDS_COUNT 3 @@ -798,6 +800,7 @@ struct _sas_phy { * @enclosure_handle: handle for this a member of an enclosure * @device_info: bitwise defining capabilities of this sas_host/expander * @responding: used in _scsih_expander_device_mark_responding + * @nr_phys_allocated: Allocated memory for this many count phys * @phy: a list of phys that make up this sas_host/expander * @sas_port_list: list of ports attached to this sas_host/expander * @port: hba port entry containing node's port number info @@ -813,6 +816,7 @@ struct _sas_node { u16 enclosure_handle; u64 enclosure_logical_id; u8 responding; + u8 nr_phys_allocated; struct hba_port *port; struct _sas_phy *phy; struct list_head sas_port_list; diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index cee7170beae8..00792767c620 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -3869,7 +3869,7 @@ _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, shost_for_each_device(sdev, ioc->shost) { sas_device_priv_data = sdev->hostdata; - if (!sas_device_priv_data) + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) continue; if (sas_device_priv_data->sas_target->sas_address != sas_address) @@ -6406,11 +6406,26 @@ _scsih_sas_port_refresh(struct MPT3SAS_ADAPTER *ioc) int i, j, count = 0, lcount = 0; int ret; u64 sas_addr; + u8 num_phys; drsprintk(ioc, ioc_info(ioc, "updating ports for sas_host(0x%016llx)\n", (unsigned long long)ioc->sas_hba.sas_address)); + mpt3sas_config_get_number_hba_phys(ioc, &num_phys); + if (!num_phys) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return; + } + + if (num_phys > ioc->sas_hba.nr_phys_allocated) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return; + } + ioc->sas_hba.num_phys = num_phys; + port_table = kcalloc(ioc->sas_hba.num_phys, sizeof(struct hba_port), GFP_KERNEL); if (!port_table) @@ -6611,6 +6626,30 @@ _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc) ioc->sas_hba.phy[i].hba_vphy = 1; } + /* + * Add new HBA phys to STL if these new phys got added as part + * of HBA Firmware upgrade/downgrade operation. + */ + if (!ioc->sas_hba.phy[i].phy) { + if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, + &phy_pg0, i))) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + continue; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + continue; + } + ioc->sas_hba.phy[i].phy_id = i; + mpt3sas_transport_add_host_phy(ioc, + &ioc->sas_hba.phy[i], phy_pg0, + ioc->sas_hba.parent_dev); + continue; + } ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle; attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i]. AttachedDevHandle); @@ -6622,6 +6661,19 @@ _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc) attached_handle, i, link_rate, ioc->sas_hba.phy[i].port); } + /* + * Clear the phy details if this phy got disabled as part of + * HBA Firmware upgrade/downgrade operation. + */ + for (i = ioc->sas_hba.num_phys; + i < ioc->sas_hba.nr_phys_allocated; i++) { + if (ioc->sas_hba.phy[i].phy && + ioc->sas_hba.phy[i].phy->negotiated_linkrate >= + SAS_LINK_RATE_1_5_GBPS) + mpt3sas_transport_update_links(ioc, + ioc->sas_hba.sas_address, 0, i, + MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED, NULL); + } out: kfree(sas_iounit_pg0); } @@ -6654,7 +6706,10 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc) __FILE__, __LINE__, __func__); return; } - ioc->sas_hba.phy = kcalloc(num_phys, + + ioc->sas_hba.nr_phys_allocated = max_t(u8, + MPT_MAX_HBA_NUM_PHYS, num_phys); + ioc->sas_hba.phy = kcalloc(ioc->sas_hba.nr_phys_allocated, sizeof(struct _sas_phy), GFP_KERNEL); if (!ioc->sas_hba.phy) { ioc_err(ioc, "failure at %s:%d/%s()!\n", diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index bed8cc125544..fbfeb0b046dd 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -282,12 +282,12 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha, if (rc) { pm8001_dbg(pm8001_ha, FAIL, "pm8001_setup_irq failed [ret: %d]\n", rc); - goto err_out_shost; + goto err_out; } /* Request Interrupt */ rc = pm8001_request_irq(pm8001_ha); if (rc) - goto err_out_shost; + goto err_out; count = pm8001_ha->max_q_num; /* Queues are chosen based on the number of cores/msix availability */ @@ -423,8 +423,6 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha, pm8001_tag_init(pm8001_ha); return 0; -err_out_shost: - scsi_remove_host(pm8001_ha->shost); err_out_nodev: for (i = 0; i < pm8001_ha->max_memcnt; i++) { if (pm8001_ha->memoryMap.region[i].virt_ptr != NULL) { diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c index 84a4204a2cb4..5916ed7662d5 100644 --- a/drivers/scsi/qedi/qedi_fw.c +++ b/drivers/scsi/qedi/qedi_fw.c @@ -732,7 +732,6 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi, { struct qedi_work_map *work, *work_tmp; u32 proto_itt = cqe->itid; - itt_t protoitt = 0; int found = 0; struct qedi_cmd *qedi_cmd = NULL; u32 iscsi_cid; @@ -812,16 +811,12 @@ unlock: return; check_cleanup_reqs: - if (qedi_conn->cmd_cleanup_req > 0) { - QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID, + if (atomic_inc_return(&qedi_conn->cmd_cleanup_cmpl) == + qedi_conn->cmd_cleanup_req) { + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "Freeing tid=0x%x for cid=0x%x\n", cqe->itid, qedi_conn->iscsi_conn_id); - qedi_conn->cmd_cleanup_cmpl++; wake_up(&qedi_conn->wait_queue); - } else { - QEDI_ERR(&qedi->dbg_ctx, - "Delayed or untracked cleanup response, itt=0x%x, tid=0x%x, cid=0x%x\n", - protoitt, cqe->itid, qedi_conn->iscsi_conn_id); } } @@ -1163,7 +1158,7 @@ int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn, } qedi_conn->cmd_cleanup_req = 0; - qedi_conn->cmd_cleanup_cmpl = 0; + atomic_set(&qedi_conn->cmd_cleanup_cmpl, 0); QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "active_cmd_count=%d, cid=0x%x, in_recovery=%d, lun_reset=%d\n", @@ -1215,16 +1210,15 @@ int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn, qedi_conn->iscsi_conn_id); rval = wait_event_interruptible_timeout(qedi_conn->wait_queue, - ((qedi_conn->cmd_cleanup_req == - qedi_conn->cmd_cleanup_cmpl) || - test_bit(QEDI_IN_RECOVERY, - &qedi->flags)), - 5 * HZ); + (qedi_conn->cmd_cleanup_req == + atomic_read(&qedi_conn->cmd_cleanup_cmpl)) || + test_bit(QEDI_IN_RECOVERY, &qedi->flags), + 5 * HZ); if (rval) { QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "i/o cmd_cleanup_req=%d, equal to cmd_cleanup_cmpl=%d, cid=0x%x\n", qedi_conn->cmd_cleanup_req, - qedi_conn->cmd_cleanup_cmpl, + atomic_read(&qedi_conn->cmd_cleanup_cmpl), qedi_conn->iscsi_conn_id); return 0; @@ -1233,7 +1227,7 @@ int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn, QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "i/o cmd_cleanup_req=%d, not equal to cmd_cleanup_cmpl=%d, cid=0x%x\n", qedi_conn->cmd_cleanup_req, - qedi_conn->cmd_cleanup_cmpl, + atomic_read(&qedi_conn->cmd_cleanup_cmpl), qedi_conn->iscsi_conn_id); iscsi_host_for_each_session(qedi->shost, @@ -1242,11 +1236,10 @@ int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn, /* Enable IOs for all other sessions except current.*/ if (!wait_event_interruptible_timeout(qedi_conn->wait_queue, - (qedi_conn->cmd_cleanup_req == - qedi_conn->cmd_cleanup_cmpl) || - test_bit(QEDI_IN_RECOVERY, - &qedi->flags), - 5 * HZ)) { + (qedi_conn->cmd_cleanup_req == + atomic_read(&qedi_conn->cmd_cleanup_cmpl)) || + test_bit(QEDI_IN_RECOVERY, &qedi->flags), + 5 * HZ)) { iscsi_host_for_each_session(qedi->shost, qedi_mark_device_available); return -1; @@ -1266,7 +1259,7 @@ void qedi_clearsq(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn, qedi_ep = qedi_conn->ep; qedi_conn->cmd_cleanup_req = 0; - qedi_conn->cmd_cleanup_cmpl = 0; + atomic_set(&qedi_conn->cmd_cleanup_cmpl, 0); if (!qedi_ep) { QEDI_WARN(&qedi->dbg_ctx, diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c index 88aa7d8b11c9..282ecb4e39bb 100644 --- a/drivers/scsi/qedi/qedi_iscsi.c +++ b/drivers/scsi/qedi/qedi_iscsi.c @@ -412,7 +412,7 @@ static int qedi_conn_bind(struct iscsi_cls_session *cls_session, qedi_conn->iscsi_conn_id = qedi_ep->iscsi_cid; qedi_conn->fw_cid = qedi_ep->fw_cid; qedi_conn->cmd_cleanup_req = 0; - qedi_conn->cmd_cleanup_cmpl = 0; + atomic_set(&qedi_conn->cmd_cleanup_cmpl, 0); if (qedi_bind_conn_to_iscsi_cid(qedi, qedi_conn)) { rc = -EINVAL; diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h index a282860da0aa..9b9f2e44fdde 100644 --- a/drivers/scsi/qedi/qedi_iscsi.h +++ b/drivers/scsi/qedi/qedi_iscsi.h @@ -155,7 +155,7 @@ struct qedi_conn { spinlock_t list_lock; /* internal conn lock */ u32 active_cmd_count; u32 cmd_cleanup_req; - u32 cmd_cleanup_cmpl; + atomic_t cmd_cleanup_cmpl; u32 iscsi_conn_id; int itt; diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 25549a8a2d72..7cf1f78cbaee 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -2491,6 +2491,9 @@ ql_dbg(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...) struct va_format vaf; char pbuf[64]; + if (!ql_mask_match(level) && !trace_ql_dbg_log_enabled()) + return; + va_start(va, fmt); vaf.fmt = fmt; diff --git a/drivers/scsi/qla2xxx/qla_edif.c b/drivers/scsi/qla2xxx/qla_edif.c index 2e37b189cb75..53d2b8562027 100644 --- a/drivers/scsi/qla2xxx/qla_edif.c +++ b/drivers/scsi/qla2xxx/qla_edif.c @@ -865,7 +865,7 @@ qla_edif_app_getfcinfo(scsi_qla_host_t *vha, struct bsg_job *bsg_job) "APP request entry - portid=%06x.\n", tdid.b24); /* Ran out of space */ - if (pcnt > app_req.num_ports) + if (pcnt >= app_req.num_ports) break; if (tdid.b24 != 0 && tdid.b24 != fcport->d_id.b24) diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 73a353153d33..10d2655ef676 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -1695,10 +1695,8 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa, mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10; if (IS_FWI2_CAPABLE(vha->hw)) mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16; - if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) { - mcp->in_mb |= MBX_15; - mcp->out_mb |= MBX_7|MBX_21|MBX_22|MBX_23; - } + if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) + mcp->in_mb |= MBX_15|MBX_21|MBX_22|MBX_23; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 1d0278da9041..2104973a35cd 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -1189,7 +1189,7 @@ static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr, __func__, off_dst, scsi_bufflen(scp), act_len, scsi_get_resid(scp)); n = scsi_bufflen(scp) - (off_dst + act_len); - scsi_set_resid(scp, min_t(int, scsi_get_resid(scp), n)); + scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n)); return 0; } @@ -1562,7 +1562,8 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) unsigned char pq_pdt; unsigned char *arr; unsigned char *cmd = scp->cmnd; - int alloc_len, n, ret; + u32 alloc_len, n; + int ret; bool have_wlun, is_disk, is_zbc, is_disk_zbc; alloc_len = get_unaligned_be16(cmd + 3); @@ -1585,7 +1586,8 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) kfree(arr); return check_condition_result; } else if (0x1 & cmd[1]) { /* EVPD bit set */ - int lu_id_num, port_group_id, target_dev_id, len; + int lu_id_num, port_group_id, target_dev_id; + u32 len; char lu_id_str[6]; int host_no = devip->sdbg_host->shost->host_no; @@ -1676,9 +1678,9 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) kfree(arr); return check_condition_result; } - len = min(get_unaligned_be16(arr + 2) + 4, alloc_len); + len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len); ret = fill_from_dev_buffer(scp, arr, - min(len, SDEBUG_MAX_INQ_ARR_SZ)); + min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ)); kfree(arr); return ret; } @@ -1714,7 +1716,7 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) } put_unaligned_be16(0x2100, arr + n); /* SPL-4 no version claimed */ ret = fill_from_dev_buffer(scp, arr, - min_t(int, alloc_len, SDEBUG_LONG_INQ_SZ)); + min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ)); kfree(arr); return ret; } @@ -1729,8 +1731,8 @@ static int resp_requests(struct scsi_cmnd *scp, unsigned char *cmd = scp->cmnd; unsigned char arr[SCSI_SENSE_BUFFERSIZE]; /* assume >= 18 bytes */ bool dsense = !!(cmd[1] & 1); - int alloc_len = cmd[4]; - int len = 18; + u32 alloc_len = cmd[4]; + u32 len = 18; int stopped_state = atomic_read(&devip->stopped); memset(arr, 0, sizeof(arr)); @@ -1774,7 +1776,7 @@ static int resp_requests(struct scsi_cmnd *scp, arr[7] = 0xa; } } - return fill_from_dev_buffer(scp, arr, min_t(int, len, alloc_len)); + return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len)); } static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) @@ -2312,7 +2314,8 @@ static int resp_mode_sense(struct scsi_cmnd *scp, { int pcontrol, pcode, subpcode, bd_len; unsigned char dev_spec; - int alloc_len, offset, len, target_dev_id; + u32 alloc_len, offset, len; + int target_dev_id; int target = scp->device->id; unsigned char *ap; unsigned char arr[SDEBUG_MAX_MSENSE_SZ]; @@ -2468,7 +2471,7 @@ static int resp_mode_sense(struct scsi_cmnd *scp, arr[0] = offset - 1; else put_unaligned_be16((offset - 2), arr + 0); - return fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, offset)); + return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset)); } #define SDEBUG_MAX_MSELECT_SZ 512 @@ -2499,11 +2502,11 @@ static int resp_mode_select(struct scsi_cmnd *scp, __func__, param_len, res); md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2); bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6); - if (md_len > 2) { + off = bd_len + (mselect6 ? 4 : 8); + if (md_len > 2 || off >= res) { mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1); return check_condition_result; } - off = bd_len + (mselect6 ? 4 : 8); mpage = arr[off] & 0x3f; ps = !!(arr[off] & 0x80); if (ps) { @@ -2583,7 +2586,8 @@ static int resp_ie_l_pg(unsigned char *arr) static int resp_log_sense(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) { - int ppc, sp, pcode, subpcode, alloc_len, len, n; + int ppc, sp, pcode, subpcode; + u32 alloc_len, len, n; unsigned char arr[SDEBUG_MAX_LSENSE_SZ]; unsigned char *cmd = scp->cmnd; @@ -2653,9 +2657,9 @@ static int resp_log_sense(struct scsi_cmnd *scp, mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1); return check_condition_result; } - len = min_t(int, get_unaligned_be16(arr + 2) + 4, alloc_len); + len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len); return fill_from_dev_buffer(scp, arr, - min_t(int, len, SDEBUG_MAX_INQ_ARR_SZ)); + min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ)); } static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip) @@ -4338,7 +4342,7 @@ static int resp_report_zones(struct scsi_cmnd *scp, rep_max_zones = min((alloc_len - 64) >> ilog2(RZONES_DESC_HD), max_zones); - arr = kcalloc(RZONES_DESC_HD, alloc_len, GFP_ATOMIC); + arr = kzalloc(alloc_len, GFP_ATOMIC); if (!arr) { mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC, INSUFF_RES_ASCQ); @@ -4430,7 +4434,7 @@ static int resp_report_zones(struct scsi_cmnd *scp, put_unaligned_be64(sdebug_capacity - 1, arr + 8); rep_len = (unsigned long)desc - (unsigned long)arr; - ret = fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, rep_len)); + ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len)); fini: read_unlock(macc_lckp); @@ -4653,6 +4657,7 @@ static void zbc_rwp_zone(struct sdebug_dev_info *devip, struct sdeb_zone_state *zsp) { enum sdebug_z_cond zc; + struct sdeb_store_info *sip = devip2sip(devip, false); if (zbc_zone_is_conv(zsp)) return; @@ -4664,6 +4669,10 @@ static void zbc_rwp_zone(struct sdebug_dev_info *devip, if (zsp->z_cond == ZC4_CLOSED) devip->nr_closed--; + if (zsp->z_wp > zsp->z_start) + memset(sip->storep + zsp->z_start * sdebug_sector_size, 0, + (zsp->z_wp - zsp->z_start) * sdebug_sector_size); + zsp->z_non_seq_resource = false; zsp->z_wp = zsp->z_start; zsp->z_cond = ZC1_EMPTY; diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 55addd78fde4..d4edce930a4a 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -792,6 +792,7 @@ store_state_field(struct device *dev, struct device_attribute *attr, int i, ret; struct scsi_device *sdev = to_scsi_device(dev); enum scsi_device_state state = 0; + bool rescan_dev = false; for (i = 0; i < ARRAY_SIZE(sdev_states); i++) { const int len = strlen(sdev_states[i].name); @@ -810,20 +811,27 @@ store_state_field(struct device *dev, struct device_attribute *attr, } mutex_lock(&sdev->state_mutex); - ret = scsi_device_set_state(sdev, state); - /* - * If the device state changes to SDEV_RUNNING, we need to - * run the queue to avoid I/O hang, and rescan the device - * to revalidate it. Running the queue first is necessary - * because another thread may be waiting inside - * blk_mq_freeze_queue_wait() and because that call may be - * waiting for pending I/O to finish. - */ - if (ret == 0 && state == SDEV_RUNNING) { + if (sdev->sdev_state == SDEV_RUNNING && state == SDEV_RUNNING) { + ret = 0; + } else { + ret = scsi_device_set_state(sdev, state); + if (ret == 0 && state == SDEV_RUNNING) + rescan_dev = true; + } + mutex_unlock(&sdev->state_mutex); + + if (rescan_dev) { + /* + * If the device state changes to SDEV_RUNNING, we need to + * run the queue to avoid I/O hang, and rescan the device + * to revalidate it. Running the queue first is necessary + * because another thread may be waiting inside + * blk_mq_freeze_queue_wait() and because that call may be + * waiting for pending I/O to finish. + */ blk_mq_run_hw_queues(sdev->request_queue, true); scsi_rescan_device(dev); } - mutex_unlock(&sdev->state_mutex); return ret == 0 ? count : -EINVAL; } diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 78343d3f9385..554b6f784223 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -1899,12 +1899,12 @@ static void session_recovery_timedout(struct work_struct *work) } spin_unlock_irqrestore(&session->lock, flags); - if (session->transport->session_recovery_timedout) - session->transport->session_recovery_timedout(session); - ISCSI_DBG_TRANS_SESSION(session, "Unblocking SCSI target\n"); scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE); ISCSI_DBG_TRANS_SESSION(session, "Completed unblocking SCSI target\n"); + + if (session->transport->session_recovery_timedout) + session->transport->session_recovery_timedout(session); } static void __iscsi_unblock_session(struct work_struct *work) diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c index fc5b214347b3..5393b5c9dd9c 100644 --- a/drivers/scsi/ufs/ufs-mediatek.c +++ b/drivers/scsi/ufs/ufs-mediatek.c @@ -1189,6 +1189,7 @@ static int ufs_mtk_probe(struct platform_device *pdev) } link = device_link_add(dev, &reset_pdev->dev, DL_FLAG_AUTOPROBE_CONSUMER); + put_device(&reset_pdev->dev); if (!link) { dev_notice(dev, "add reset device_link fail\n"); goto skip_reset; diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c index 51424557810d..f725248ba57f 100644 --- a/drivers/scsi/ufs/ufshcd-pci.c +++ b/drivers/scsi/ufs/ufshcd-pci.c @@ -421,6 +421,13 @@ static int ufs_intel_lkf_init(struct ufs_hba *hba) return err; } +static int ufs_intel_adl_init(struct ufs_hba *hba) +{ + hba->nop_out_timeout = 200; + hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8; + return ufs_intel_common_init(hba); +} + static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = { .name = "intel-pci", .init = ufs_intel_common_init, @@ -449,6 +456,15 @@ static struct ufs_hba_variant_ops ufs_intel_lkf_hba_vops = { .device_reset = ufs_intel_device_reset, }; +static struct ufs_hba_variant_ops ufs_intel_adl_hba_vops = { + .name = "intel-pci", + .init = ufs_intel_adl_init, + .exit = ufs_intel_common_exit, + .link_startup_notify = ufs_intel_link_startup_notify, + .resume = ufs_intel_resume, + .device_reset = ufs_intel_device_reset, +}; + #ifdef CONFIG_PM_SLEEP static int ufshcd_pci_restore(struct device *dev) { @@ -563,6 +579,8 @@ static const struct pci_device_id ufshcd_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0x4B41), (kernel_ulong_t)&ufs_intel_ehl_hba_vops }, { PCI_VDEVICE(INTEL, 0x4B43), (kernel_ulong_t)&ufs_intel_ehl_hba_vops }, { PCI_VDEVICE(INTEL, 0x98FA), (kernel_ulong_t)&ufs_intel_lkf_hba_vops }, + { PCI_VDEVICE(INTEL, 0x51FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops }, + { PCI_VDEVICE(INTEL, 0x54FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops }, { } /* terminate list */ }; diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index afd38142b1c0..13c09dbd99b9 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -6453,9 +6453,8 @@ static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba) irqreturn_t ret = IRQ_NONE; int tag; - pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); - spin_lock_irqsave(hba->host->host_lock, flags); + pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); issued = hba->outstanding_tasks & ~pending; for_each_set_bit(tag, &issued, hba->nutmrs) { struct request *req = hba->tmf_rqs[tag]; @@ -6616,11 +6615,6 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, err = wait_for_completion_io_timeout(&wait, msecs_to_jiffies(TM_CMD_TIMEOUT)); if (!err) { - /* - * Make sure that ufshcd_compl_tm() does not trigger a - * use-after-free. - */ - req->end_io_data = NULL; ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_ERR); dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n", __func__, tm_function); @@ -7116,6 +7110,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) goto release; } + lrbp->cmd = NULL; err = SUCCESS; release: diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c index 2e31e1413826..ded5ba9b1466 100644 --- a/drivers/scsi/ufs/ufshpb.c +++ b/drivers/scsi/ufs/ufshpb.c @@ -331,7 +331,7 @@ ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, cdb[0] = UFSHPB_READ; if (hba->dev_quirks & UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ) - ppn_tmp = swab64(ppn); + ppn_tmp = (__force __be64)swab64((__force u64)ppn); /* ppn value is stored as big-endian in the host memory */ memcpy(&cdb[6], &ppn_tmp, sizeof(__be64)); diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 19f7d7b90625..28e1d98ae102 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -977,7 +977,6 @@ static unsigned int features[] = { static struct virtio_driver virtio_scsi_driver = { .feature_table = features, .feature_table_size = ARRAY_SIZE(features), - .suppress_used_validation = true, .driver.name = KBUILD_MODNAME, .driver.owner = THIS_MODULE, .id_table = id_table, diff --git a/drivers/soc/tegra/common.c b/drivers/soc/tegra/common.c index cd33e99249c3..35c882da55fc 100644 --- a/drivers/soc/tegra/common.c +++ b/drivers/soc/tegra/common.c @@ -10,6 +10,7 @@ #include <linux/export.h> #include <linux/of.h> #include <linux/pm_opp.h> +#include <linux/pm_runtime.h> #include <soc/tegra/common.h> #include <soc/tegra/fuse.h> @@ -43,6 +44,7 @@ static int tegra_core_dev_init_opp_state(struct device *dev) { unsigned long rate; struct clk *clk; + bool rpm_enabled; int err; clk = devm_clk_get(dev, NULL); @@ -57,8 +59,31 @@ static int tegra_core_dev_init_opp_state(struct device *dev) return -EINVAL; } + /* + * Runtime PM of the device must be enabled in order to set up + * GENPD's performance properly because GENPD core checks whether + * device is suspended and this check doesn't work while RPM is + * disabled. This makes sure the OPP vote below gets cached in + * GENPD for the device. Instead, the vote is done the next time + * the device gets runtime resumed. + */ + rpm_enabled = pm_runtime_enabled(dev); + if (!rpm_enabled) + pm_runtime_enable(dev); + + /* should never happen in practice */ + if (!pm_runtime_enabled(dev)) { + dev_WARN(dev, "failed to enable runtime PM\n"); + pm_runtime_disable(dev); + return -EINVAL; + } + /* first dummy rate-setting initializes voltage vote */ err = dev_pm_opp_set_rate(dev, rate); + + if (!rpm_enabled) + pm_runtime_disable(dev); + if (err) { dev_err(dev, "failed to initialize OPP clock: %d\n", err); return err; diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c index 8b3d268ac63c..b808c94641fa 100644 --- a/drivers/spi/spi-cadence-quadspi.c +++ b/drivers/spi/spi-cadence-quadspi.c @@ -37,6 +37,7 @@ #define CQSPI_NEEDS_WR_DELAY BIT(0) #define CQSPI_DISABLE_DAC_MODE BIT(1) #define CQSPI_SUPPORT_EXTERNAL_DMA BIT(2) +#define CQSPI_NO_SUPPORT_WR_COMPLETION BIT(3) /* Capabilities */ #define CQSPI_SUPPORTS_OCTAL BIT(0) @@ -86,6 +87,7 @@ struct cqspi_st { struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT]; bool use_dma_read; u32 pd_dev_id; + bool wr_completion; }; struct cqspi_driver_platdata { @@ -996,9 +998,11 @@ static int cqspi_write_setup(struct cqspi_flash_pdata *f_pdata, * polling on the controller's side. spinand and spi-nor will take * care of polling the status register. */ - reg = readl(reg_base + CQSPI_REG_WR_COMPLETION_CTRL); - reg |= CQSPI_REG_WR_DISABLE_AUTO_POLL; - writel(reg, reg_base + CQSPI_REG_WR_COMPLETION_CTRL); + if (cqspi->wr_completion) { + reg = readl(reg_base + CQSPI_REG_WR_COMPLETION_CTRL); + reg |= CQSPI_REG_WR_DISABLE_AUTO_POLL; + writel(reg, reg_base + CQSPI_REG_WR_COMPLETION_CTRL); + } reg = readl(reg_base + CQSPI_REG_SIZE); reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK; @@ -1736,6 +1740,10 @@ static int cqspi_probe(struct platform_device *pdev) cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk); master->max_speed_hz = cqspi->master_ref_clk_hz; + + /* write completion is supported by default */ + cqspi->wr_completion = true; + ddata = of_device_get_match_data(dev); if (ddata) { if (ddata->quirks & CQSPI_NEEDS_WR_DELAY) @@ -1747,6 +1755,8 @@ static int cqspi_probe(struct platform_device *pdev) cqspi->use_direct_mode = true; if (ddata->quirks & CQSPI_SUPPORT_EXTERNAL_DMA) cqspi->use_dma_read = true; + if (ddata->quirks & CQSPI_NO_SUPPORT_WR_COMPLETION) + cqspi->wr_completion = false; if (of_device_is_compatible(pdev->dev.of_node, "xlnx,versal-ospi-1.0")) @@ -1859,6 +1869,10 @@ static const struct cqspi_driver_platdata intel_lgm_qspi = { .quirks = CQSPI_DISABLE_DAC_MODE, }; +static const struct cqspi_driver_platdata socfpga_qspi = { + .quirks = CQSPI_NO_SUPPORT_WR_COMPLETION, +}; + static const struct cqspi_driver_platdata versal_ospi = { .hwcaps_mask = CQSPI_SUPPORTS_OCTAL, .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_SUPPORT_EXTERNAL_DMA, @@ -1887,6 +1901,10 @@ static const struct of_device_id cqspi_dt_ids[] = { .compatible = "xlnx,versal-ospi-1.0", .data = (void *)&versal_ospi, }, + { + .compatible = "intel,socfpga-qspi", + .data = (void *)&socfpga_qspi, + }, { /* end of table */ } }; diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c index 5d98611dd999..c72e501c270f 100644 --- a/drivers/spi/spi-fsl-lpspi.c +++ b/drivers/spi/spi-fsl-lpspi.c @@ -912,7 +912,7 @@ static int fsl_lpspi_probe(struct platform_device *pdev) ret = devm_spi_register_controller(&pdev->dev, controller); if (ret < 0) { - dev_err(&pdev->dev, "spi_register_controller error.\n"); + dev_err_probe(&pdev->dev, ret, "spi_register_controller error: %i\n", ret); goto out_pm_get; } diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c index 27a446faf143..e2affaee4e76 100644 --- a/drivers/spi/spi-geni-qcom.c +++ b/drivers/spi/spi-geni-qcom.c @@ -491,22 +491,26 @@ static int spi_geni_grab_gpi_chan(struct spi_geni_master *mas) int ret; mas->tx = dma_request_chan(mas->dev, "tx"); - ret = dev_err_probe(mas->dev, IS_ERR(mas->tx), "Failed to get tx DMA ch\n"); - if (ret < 0) + if (IS_ERR(mas->tx)) { + ret = dev_err_probe(mas->dev, PTR_ERR(mas->tx), + "Failed to get tx DMA ch\n"); goto err_tx; + } mas->rx = dma_request_chan(mas->dev, "rx"); - ret = dev_err_probe(mas->dev, IS_ERR(mas->rx), "Failed to get rx DMA ch\n"); - if (ret < 0) + if (IS_ERR(mas->rx)) { + ret = dev_err_probe(mas->dev, PTR_ERR(mas->rx), + "Failed to get rx DMA ch\n"); goto err_rx; + } return 0; err_rx: + mas->rx = NULL; dma_release_channel(mas->tx); - mas->tx = NULL; err_tx: - mas->rx = NULL; + mas->tx = NULL; return ret; } diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index b23e675953e1..fdd530b150a7 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -3099,12 +3099,6 @@ void spi_unregister_controller(struct spi_controller *ctlr) device_del(&ctlr->dev); - /* Release the last reference on the controller if its driver - * has not yet been converted to devm_spi_alloc_master/slave(). - */ - if (!ctlr->devm_allocated) - put_device(&ctlr->dev); - /* free bus id */ mutex_lock(&board_lock); if (found == ctlr) @@ -3113,6 +3107,12 @@ void spi_unregister_controller(struct spi_controller *ctlr) if (IS_ENABLED(CONFIG_SPI_DYNAMIC)) mutex_unlock(&ctlr->add_lock); + + /* Release the last reference on the controller if its driver + * has not yet been converted to devm_spi_alloc_master/slave(). + */ + if (!ctlr->devm_allocated) + put_device(&ctlr->dev); } EXPORT_SYMBOL_GPL(spi_unregister_controller); diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index 59af251e7576..7fec86946131 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -66,8 +66,6 @@ source "drivers/staging/gdm724x/Kconfig" source "drivers/staging/fwserial/Kconfig" -source "drivers/staging/netlogic/Kconfig" - source "drivers/staging/gs_fpgaboot/Kconfig" source "drivers/staging/unisys/Kconfig" diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index 76f413470bc8..e66e19c45425 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile @@ -10,7 +10,6 @@ obj-$(CONFIG_RTL8723BS) += rtl8723bs/ obj-$(CONFIG_R8712U) += rtl8712/ obj-$(CONFIG_R8188EU) += r8188eu/ obj-$(CONFIG_RTS5208) += rts5208/ -obj-$(CONFIG_NETLOGIC_XLR_NET) += netlogic/ obj-$(CONFIG_OCTEON_ETHERNET) += octeon/ obj-$(CONFIG_OCTEON_USB) += octeon-usb/ obj-$(CONFIG_VT6655) += vt6655/ diff --git a/drivers/staging/fbtft/fb_ssd1351.c b/drivers/staging/fbtft/fb_ssd1351.c index cf263a58a148..6fd549a424d5 100644 --- a/drivers/staging/fbtft/fb_ssd1351.c +++ b/drivers/staging/fbtft/fb_ssd1351.c @@ -187,7 +187,6 @@ static struct fbtft_display display = { }, }; -#ifdef CONFIG_FB_BACKLIGHT static int update_onboard_backlight(struct backlight_device *bd) { struct fbtft_par *par = bl_get_data(bd); @@ -231,9 +230,6 @@ static void register_onboard_backlight(struct fbtft_par *par) if (!par->fbtftops.unregister_backlight) par->fbtftops.unregister_backlight = fbtft_unregister_backlight; } -#else -static void register_onboard_backlight(struct fbtft_par *par) { }; -#endif FBTFT_REGISTER_DRIVER(DRVNAME, "solomon,ssd1351", &display); diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c index ecb5f75f6dd5..f2684d2d6851 100644 --- a/drivers/staging/fbtft/fbtft-core.c +++ b/drivers/staging/fbtft/fbtft-core.c @@ -128,7 +128,6 @@ static int fbtft_request_gpios(struct fbtft_par *par) return 0; } -#ifdef CONFIG_FB_BACKLIGHT static int fbtft_backlight_update_status(struct backlight_device *bd) { struct fbtft_par *par = bl_get_data(bd); @@ -161,6 +160,7 @@ void fbtft_unregister_backlight(struct fbtft_par *par) par->info->bl_dev = NULL; } } +EXPORT_SYMBOL(fbtft_unregister_backlight); static const struct backlight_ops fbtft_bl_ops = { .get_brightness = fbtft_backlight_get_brightness, @@ -198,12 +198,7 @@ void fbtft_register_backlight(struct fbtft_par *par) if (!par->fbtftops.unregister_backlight) par->fbtftops.unregister_backlight = fbtft_unregister_backlight; } -#else -void fbtft_register_backlight(struct fbtft_par *par) { }; -void fbtft_unregister_backlight(struct fbtft_par *par) { }; -#endif EXPORT_SYMBOL(fbtft_register_backlight); -EXPORT_SYMBOL(fbtft_unregister_backlight); static void fbtft_set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye) @@ -853,13 +848,11 @@ int fbtft_register_framebuffer(struct fb_info *fb_info) fb_info->fix.smem_len >> 10, text1, HZ / fb_info->fbdefio->delay, text2); -#ifdef CONFIG_FB_BACKLIGHT /* Turn on backlight if available */ if (fb_info->bl_dev) { fb_info->bl_dev->props.power = FB_BLANK_UNBLANK; fb_info->bl_dev->ops->update_status(fb_info->bl_dev); } -#endif return 0; diff --git a/drivers/staging/greybus/audio_helper.c b/drivers/staging/greybus/audio_helper.c index 1ed4772d2771..843760675876 100644 --- a/drivers/staging/greybus/audio_helper.c +++ b/drivers/staging/greybus/audio_helper.c @@ -192,7 +192,11 @@ int gbaudio_remove_component_controls(struct snd_soc_component *component, unsigned int num_controls) { struct snd_card *card = component->card->snd_card; + int err; - return gbaudio_remove_controls(card, component->dev, controls, - num_controls, component->name_prefix); + down_write(&card->controls_rwsem); + err = gbaudio_remove_controls(card, component->dev, controls, + num_controls, component->name_prefix); + up_write(&card->controls_rwsem); + return err; } diff --git a/drivers/staging/netlogic/Kconfig b/drivers/staging/netlogic/Kconfig deleted file mode 100644 index e1712606ee3c..000000000000 --- a/drivers/staging/netlogic/Kconfig +++ /dev/null @@ -1,9 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -config NETLOGIC_XLR_NET - tristate "Netlogic XLR/XLS network device" - depends on CPU_XLR - depends on NETDEVICES - select PHYLIB - help - This driver support Netlogic XLR/XLS on chip gigabit - Ethernet. diff --git a/drivers/staging/netlogic/Makefile b/drivers/staging/netlogic/Makefile deleted file mode 100644 index 7e2902af26a3..000000000000 --- a/drivers/staging/netlogic/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_NETLOGIC_XLR_NET) += xlr_net.o platform_net.o diff --git a/drivers/staging/netlogic/TODO b/drivers/staging/netlogic/TODO deleted file mode 100644 index 20e22ecb9903..000000000000 --- a/drivers/staging/netlogic/TODO +++ /dev/null @@ -1,11 +0,0 @@ -* Implementing 64bit stat counter in software -* All memory allocation should be changed to DMA allocations -* Changing comments into linux standard format - -Please send patches -To: -Ganesan Ramalingam <ganesanr@broadcom.com> -Greg Kroah-Hartman <gregkh@linuxfoundation.org> -Cc: -Jayachandran Chandrashekaran Nair <jchandra@broadcom.com> - diff --git a/drivers/staging/netlogic/platform_net.c b/drivers/staging/netlogic/platform_net.c deleted file mode 100644 index 8be9d0b0c22c..000000000000 --- a/drivers/staging/netlogic/platform_net.c +++ /dev/null @@ -1,219 +0,0 @@ -// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) -/* - * Copyright (c) 2003-2012 Broadcom Corporation - * All Rights Reserved - */ - -#include <linux/device.h> -#include <linux/platform_device.h> -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/io.h> -#include <linux/delay.h> -#include <linux/ioport.h> -#include <linux/resource.h> -#include <linux/phy.h> - -#include <asm/netlogic/haldefs.h> -#include <asm/netlogic/common.h> -#include <asm/netlogic/xlr/fmn.h> -#include <asm/netlogic/xlr/xlr.h> -#include <asm/netlogic/psb-bootinfo.h> -#include <asm/netlogic/xlr/pic.h> -#include <asm/netlogic/xlr/iomap.h> - -#include "platform_net.h" - -/* Linux Net */ -#define MAX_NUM_GMAC 8 -#define MAX_NUM_XLS_GMAC 8 -#define MAX_NUM_XLR_GMAC 4 - -static u32 xlr_gmac_offsets[] = { - NETLOGIC_IO_GMAC_0_OFFSET, NETLOGIC_IO_GMAC_1_OFFSET, - NETLOGIC_IO_GMAC_2_OFFSET, NETLOGIC_IO_GMAC_3_OFFSET, - NETLOGIC_IO_GMAC_4_OFFSET, NETLOGIC_IO_GMAC_5_OFFSET, - NETLOGIC_IO_GMAC_6_OFFSET, NETLOGIC_IO_GMAC_7_OFFSET -}; - -static u32 xlr_gmac_irqs[] = { PIC_GMAC_0_IRQ, PIC_GMAC_1_IRQ, - PIC_GMAC_2_IRQ, PIC_GMAC_3_IRQ, - PIC_GMAC_4_IRQ, PIC_GMAC_5_IRQ, - PIC_GMAC_6_IRQ, PIC_GMAC_7_IRQ -}; - -static struct resource xlr_net0_res[8]; -static struct resource xlr_net1_res[8]; -static u32 __iomem *gmac4_addr; -static u32 __iomem *gpio_addr; - -static void xlr_resource_init(struct resource *res, int offset, int irq) -{ - res->name = "gmac"; - - res->start = CPHYSADDR(nlm_mmio_base(offset)); - res->end = res->start + 0xfff; - res->flags = IORESOURCE_MEM; - - res++; - res->name = "gmac"; - res->start = irq; - res->end = irq; - res->flags = IORESOURCE_IRQ; -} - -static struct platform_device *gmac_controller2_init(void *gmac0_addr) -{ - int mac; - static struct xlr_net_data ndata1 = { - .phy_interface = PHY_INTERFACE_MODE_SGMII, - .rfr_station = FMN_STNID_GMAC1_FR_0, - .bucket_size = xlr_board_fmn_config.bucket_size, - .gmac_fmn_info = &xlr_board_fmn_config.gmac[1], - }; - - static struct platform_device xlr_net_dev1 = { - .name = "xlr-net", - .id = 1, - .dev.platform_data = &ndata1, - }; - - gmac4_addr = - ioremap(CPHYSADDR(nlm_mmio_base(NETLOGIC_IO_GMAC_4_OFFSET)), - 0xfff); - ndata1.serdes_addr = gmac4_addr; - ndata1.pcs_addr = gmac4_addr; - ndata1.mii_addr = gmac0_addr; - ndata1.gpio_addr = gpio_addr; - ndata1.cpu_mask = nlm_current_node()->coremask; - - xlr_net_dev1.resource = xlr_net1_res; - - for (mac = 0; mac < 4; mac++) { - ndata1.tx_stnid[mac] = FMN_STNID_GMAC1_TX0 + mac; - ndata1.phy_addr[mac] = mac + 4 + 0x10; - - xlr_resource_init(&xlr_net1_res[mac * 2], - xlr_gmac_offsets[mac + 4], - xlr_gmac_irqs[mac + 4]); - } - xlr_net_dev1.num_resources = 8; - - return &xlr_net_dev1; -} - -static void xls_gmac_init(void) -{ - int mac; - struct platform_device *xlr_net_dev1; - void __iomem *gmac0_addr = - ioremap(CPHYSADDR(nlm_mmio_base(NETLOGIC_IO_GMAC_0_OFFSET)), - 0xfff); - - static struct xlr_net_data ndata0 = { - .rfr_station = FMN_STNID_GMACRFR_0, - .bucket_size = xlr_board_fmn_config.bucket_size, - .gmac_fmn_info = &xlr_board_fmn_config.gmac[0], - }; - - static struct platform_device xlr_net_dev0 = { - .name = "xlr-net", - .id = 0, - }; - xlr_net_dev0.dev.platform_data = &ndata0; - ndata0.serdes_addr = gmac0_addr; - ndata0.pcs_addr = gmac0_addr; - ndata0.mii_addr = gmac0_addr; - - /* Passing GPIO base for serdes init. Only needed on sgmii ports */ - gpio_addr = - ioremap(CPHYSADDR(nlm_mmio_base(NETLOGIC_IO_GPIO_OFFSET)), - 0xfff); - ndata0.gpio_addr = gpio_addr; - ndata0.cpu_mask = nlm_current_node()->coremask; - - xlr_net_dev0.resource = xlr_net0_res; - - switch (nlm_prom_info.board_major_version) { - case 12: - /* first block RGMII or XAUI, use RGMII */ - ndata0.phy_interface = PHY_INTERFACE_MODE_RGMII; - ndata0.tx_stnid[0] = FMN_STNID_GMAC0_TX0; - ndata0.phy_addr[0] = 0; - - xlr_net_dev0.num_resources = 2; - - xlr_resource_init(&xlr_net0_res[0], xlr_gmac_offsets[0], - xlr_gmac_irqs[0]); - platform_device_register(&xlr_net_dev0); - - /* second block is XAUI, not supported yet */ - break; - default: - /* default XLS config, all ports SGMII */ - ndata0.phy_interface = PHY_INTERFACE_MODE_SGMII; - for (mac = 0; mac < 4; mac++) { - ndata0.tx_stnid[mac] = FMN_STNID_GMAC0_TX0 + mac; - ndata0.phy_addr[mac] = mac + 0x10; - - xlr_resource_init(&xlr_net0_res[mac * 2], - xlr_gmac_offsets[mac], - xlr_gmac_irqs[mac]); - } - xlr_net_dev0.num_resources = 8; - platform_device_register(&xlr_net_dev0); - - xlr_net_dev1 = gmac_controller2_init(gmac0_addr); - platform_device_register(xlr_net_dev1); - } -} - -static void xlr_gmac_init(void) -{ - int mac; - - /* assume all GMACs for now */ - static struct xlr_net_data ndata0 = { - .phy_interface = PHY_INTERFACE_MODE_RGMII, - .serdes_addr = NULL, - .pcs_addr = NULL, - .rfr_station = FMN_STNID_GMACRFR_0, - .bucket_size = xlr_board_fmn_config.bucket_size, - .gmac_fmn_info = &xlr_board_fmn_config.gmac[0], - .gpio_addr = NULL, - }; - - static struct platform_device xlr_net_dev0 = { - .name = "xlr-net", - .id = 0, - .dev.platform_data = &ndata0, - }; - ndata0.mii_addr = - ioremap(CPHYSADDR(nlm_mmio_base(NETLOGIC_IO_GMAC_0_OFFSET)), - 0xfff); - - ndata0.cpu_mask = nlm_current_node()->coremask; - - for (mac = 0; mac < MAX_NUM_XLR_GMAC; mac++) { - ndata0.tx_stnid[mac] = FMN_STNID_GMAC0_TX0 + mac; - ndata0.phy_addr[mac] = mac; - xlr_resource_init(&xlr_net0_res[mac * 2], xlr_gmac_offsets[mac], - xlr_gmac_irqs[mac]); - } - xlr_net_dev0.num_resources = 8; - xlr_net_dev0.resource = xlr_net0_res; - - platform_device_register(&xlr_net_dev0); -} - -static int __init xlr_net_init(void) -{ - if (nlm_chip_is_xls()) - xls_gmac_init(); - else - xlr_gmac_init(); - - return 0; -} - -arch_initcall(xlr_net_init); diff --git a/drivers/staging/netlogic/platform_net.h b/drivers/staging/netlogic/platform_net.h deleted file mode 100644 index c8d4c13424c6..000000000000 --- a/drivers/staging/netlogic/platform_net.h +++ /dev/null @@ -1,21 +0,0 @@ -/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */ -/* - * Copyright (c) 2003-2012 Broadcom Corporation - * All Rights Reserved - */ - -#define PORTS_PER_CONTROLLER 4 - -struct xlr_net_data { - int cpu_mask; - u32 __iomem *mii_addr; - u32 __iomem *serdes_addr; - u32 __iomem *pcs_addr; - u32 __iomem *gpio_addr; - int phy_interface; - int rfr_station; - int tx_stnid[PORTS_PER_CONTROLLER]; - int *bucket_size; - int phy_addr[PORTS_PER_CONTROLLER]; - struct xlr_fmn_info *gmac_fmn_info; -}; diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c deleted file mode 100644 index 69ea61faf8fa..000000000000 --- a/drivers/staging/netlogic/xlr_net.c +++ /dev/null @@ -1,1080 +0,0 @@ -// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) -/* - * Copyright (c) 2003-2012 Broadcom Corporation - * All Rights Reserved - */ - -#include <linux/phy.h> -#include <linux/delay.h> -#include <linux/netdevice.h> -#include <linux/smp.h> -#include <linux/ethtool.h> -#include <linux/module.h> -#include <linux/etherdevice.h> -#include <linux/skbuff.h> -#include <linux/jiffies.h> -#include <linux/interrupt.h> -#include <linux/platform_device.h> - -#include <asm/mipsregs.h> -/* - * fmn.h - For FMN credit configuration and registering fmn_handler. - * FMN is communication mechanism that allows processing agents within - * XLR/XLS to communicate each other. - */ -#include <asm/netlogic/xlr/fmn.h> - -#include "platform_net.h" -#include "xlr_net.h" - -/* - * The readl/writel implementation byteswaps on XLR/XLS, so - * we need to use __raw_ IO to read the NAE registers - * because they are in the big-endian MMIO area on the SoC. - */ -static inline void xlr_nae_wreg(u32 __iomem *base, unsigned int reg, u32 val) -{ - __raw_writel(val, base + reg); -} - -static inline u32 xlr_nae_rdreg(u32 __iomem *base, unsigned int reg) -{ - return __raw_readl(base + reg); -} - -static inline void xlr_reg_update(u32 *base_addr, u32 off, u32 val, u32 mask) -{ - u32 tmp; - - tmp = xlr_nae_rdreg(base_addr, off); - xlr_nae_wreg(base_addr, off, (tmp & ~mask) | (val & mask)); -} - -#define MAC_SKB_BACK_PTR_SIZE SMP_CACHE_BYTES - -static int send_to_rfr_fifo(struct xlr_net_priv *priv, void *addr) -{ - struct nlm_fmn_msg msg; - int ret = 0, num_try = 0, stnid; - unsigned long paddr, mflags; - - paddr = virt_to_bus(addr); - msg.msg0 = (u64)paddr & 0xffffffffe0ULL; - msg.msg1 = 0; - msg.msg2 = 0; - msg.msg3 = 0; - stnid = priv->nd->rfr_station; - do { - mflags = nlm_cop2_enable_irqsave(); - ret = nlm_fmn_send(1, 0, stnid, &msg); - nlm_cop2_disable_irqrestore(mflags); - if (ret == 0) - return 0; - } while (++num_try < 10000); - - netdev_err(priv->ndev, "Send to RFR failed in RX path\n"); - return ret; -} - -static inline unsigned char *xlr_alloc_skb(void) -{ - struct sk_buff *skb; - int buf_len = sizeof(struct sk_buff *); - unsigned char *skb_data; - - /* skb->data is cache aligned */ - skb = alloc_skb(XLR_RX_BUF_SIZE, GFP_ATOMIC); - if (!skb) - return NULL; - skb_data = skb->data; - skb_reserve(skb, MAC_SKB_BACK_PTR_SIZE); - memcpy(skb_data, &skb, buf_len); - - return skb->data; -} - -static void xlr_net_fmn_handler(int bkt, int src_stnid, int size, int code, - struct nlm_fmn_msg *msg, void *arg) -{ - struct sk_buff *skb; - void *skb_data = NULL; - struct net_device *ndev; - struct xlr_net_priv *priv; - u32 port, length; - unsigned char *addr; - struct xlr_adapter *adapter = arg; - - length = (msg->msg0 >> 40) & 0x3fff; - if (length == 0) { - addr = bus_to_virt(msg->msg0 & 0xffffffffffULL); - addr = addr - MAC_SKB_BACK_PTR_SIZE; - skb = (struct sk_buff *)(*(unsigned long *)addr); - dev_kfree_skb_any((struct sk_buff *)addr); - } else { - addr = (unsigned char *) - bus_to_virt(msg->msg0 & 0xffffffffe0ULL); - length = length - BYTE_OFFSET - MAC_CRC_LEN; - port = ((int)msg->msg0) & 0x0f; - addr = addr - MAC_SKB_BACK_PTR_SIZE; - skb = (struct sk_buff *)(*(unsigned long *)addr); - skb->dev = adapter->netdev[port]; - if (!skb->dev) - return; - ndev = skb->dev; - priv = netdev_priv(ndev); - - /* 16 byte IP header align */ - skb_reserve(skb, BYTE_OFFSET); - skb_put(skb, length); - skb->protocol = eth_type_trans(skb, skb->dev); - netif_rx(skb); - /* Fill rx ring */ - skb_data = xlr_alloc_skb(); - if (skb_data) - send_to_rfr_fifo(priv, skb_data); - } -} - -static struct phy_device *xlr_get_phydev(struct xlr_net_priv *priv) -{ - return mdiobus_get_phy(priv->mii_bus, priv->phy_addr); -} - -/* - * Ethtool operation - */ -static int xlr_get_link_ksettings(struct net_device *ndev, - struct ethtool_link_ksettings *ecmd) -{ - struct xlr_net_priv *priv = netdev_priv(ndev); - struct phy_device *phydev = xlr_get_phydev(priv); - - if (!phydev) - return -ENODEV; - - phy_ethtool_ksettings_get(phydev, ecmd); - - return 0; -} - -static int xlr_set_link_ksettings(struct net_device *ndev, - const struct ethtool_link_ksettings *ecmd) -{ - struct xlr_net_priv *priv = netdev_priv(ndev); - struct phy_device *phydev = xlr_get_phydev(priv); - - if (!phydev) - return -ENODEV; - return phy_ethtool_ksettings_set(phydev, ecmd); -} - -static const struct ethtool_ops xlr_ethtool_ops = { - .get_link_ksettings = xlr_get_link_ksettings, - .set_link_ksettings = xlr_set_link_ksettings, -}; - -/* - * Net operations - */ -static int xlr_net_fill_rx_ring(struct net_device *ndev) -{ - void *skb_data; - struct xlr_net_priv *priv = netdev_priv(ndev); - int i; - - for (i = 0; i < MAX_FRIN_SPILL / 4; i++) { - skb_data = xlr_alloc_skb(); - if (!skb_data) - return -ENOMEM; - send_to_rfr_fifo(priv, skb_data); - } - netdev_info(ndev, "Rx ring setup done\n"); - return 0; -} - -static int xlr_net_open(struct net_device *ndev) -{ - u32 err; - struct xlr_net_priv *priv = netdev_priv(ndev); - struct phy_device *phydev = xlr_get_phydev(priv); - - /* schedule a link state check */ - phy_start(phydev); - - err = phy_start_aneg(phydev); - if (err) { - pr_err("Autoneg failed\n"); - return err; - } - /* Setup the speed from PHY to internal reg*/ - xlr_set_gmac_speed(priv); - - netif_tx_start_all_queues(ndev); - - return 0; -} - -static int xlr_net_stop(struct net_device *ndev) -{ - struct xlr_net_priv *priv = netdev_priv(ndev); - struct phy_device *phydev = xlr_get_phydev(priv); - - phy_stop(phydev); - netif_tx_stop_all_queues(ndev); - return 0; -} - -static void xlr_make_tx_desc(struct nlm_fmn_msg *msg, unsigned long addr, - struct sk_buff *skb) -{ - unsigned long physkb = virt_to_phys(skb); - int cpu_core = nlm_core_id(); - int fr_stn_id = cpu_core * 8 + XLR_FB_STN; /* FB to 6th bucket */ - - msg->msg0 = (((u64)1 << 63) | /* End of packet descriptor */ - ((u64)127 << 54) | /* No Free back */ - (u64)skb->len << 40 | /* Length of data */ - ((u64)addr)); - msg->msg1 = (((u64)1 << 63) | - ((u64)fr_stn_id << 54) | /* Free back id */ - (u64)0 << 40 | /* Set len to 0 */ - ((u64)physkb & 0xffffffff)); /* 32bit address */ - msg->msg2 = 0; - msg->msg3 = 0; -} - -static netdev_tx_t xlr_net_start_xmit(struct sk_buff *skb, - struct net_device *ndev) -{ - struct nlm_fmn_msg msg; - struct xlr_net_priv *priv = netdev_priv(ndev); - int ret; - u32 flags; - - xlr_make_tx_desc(&msg, virt_to_phys(skb->data), skb); - flags = nlm_cop2_enable_irqsave(); - ret = nlm_fmn_send(2, 0, priv->tx_stnid, &msg); - nlm_cop2_disable_irqrestore(flags); - if (ret) - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; -} - -static void xlr_hw_set_mac_addr(struct net_device *ndev) -{ - struct xlr_net_priv *priv = netdev_priv(ndev); - - /* set mac station address */ - xlr_nae_wreg(priv->base_addr, R_MAC_ADDR0, - ((ndev->dev_addr[5] << 24) | (ndev->dev_addr[4] << 16) | - (ndev->dev_addr[3] << 8) | (ndev->dev_addr[2]))); - xlr_nae_wreg(priv->base_addr, R_MAC_ADDR0 + 1, - ((ndev->dev_addr[1] << 24) | (ndev->dev_addr[0] << 16))); - - xlr_nae_wreg(priv->base_addr, R_MAC_ADDR_MASK2, 0xffffffff); - xlr_nae_wreg(priv->base_addr, R_MAC_ADDR_MASK2 + 1, 0xffffffff); - xlr_nae_wreg(priv->base_addr, R_MAC_ADDR_MASK3, 0xffffffff); - xlr_nae_wreg(priv->base_addr, R_MAC_ADDR_MASK3 + 1, 0xffffffff); - - xlr_nae_wreg(priv->base_addr, R_MAC_FILTER_CONFIG, - (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) | - (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) | - (1 << O_MAC_FILTER_CONFIG__MAC_ADDR0_VALID)); - - if (priv->nd->phy_interface == PHY_INTERFACE_MODE_RGMII || - priv->nd->phy_interface == PHY_INTERFACE_MODE_SGMII) - xlr_reg_update(priv->base_addr, R_IPG_IFG, MAC_B2B_IPG, 0x7f); -} - -static int xlr_net_set_mac_addr(struct net_device *ndev, void *data) -{ - int err; - - err = eth_mac_addr(ndev, data); - if (err) - return err; - xlr_hw_set_mac_addr(ndev); - return 0; -} - -static void xlr_set_rx_mode(struct net_device *ndev) -{ - struct xlr_net_priv *priv = netdev_priv(ndev); - u32 regval; - - regval = xlr_nae_rdreg(priv->base_addr, R_MAC_FILTER_CONFIG); - - if (ndev->flags & IFF_PROMISC) { - regval |= (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) | - (1 << O_MAC_FILTER_CONFIG__PAUSE_FRAME_EN) | - (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) | - (1 << O_MAC_FILTER_CONFIG__ALL_UCAST_EN); - } else { - regval &= ~((1 << O_MAC_FILTER_CONFIG__PAUSE_FRAME_EN) | - (1 << O_MAC_FILTER_CONFIG__ALL_UCAST_EN)); - } - - xlr_nae_wreg(priv->base_addr, R_MAC_FILTER_CONFIG, regval); -} - -static void xlr_stats(struct net_device *ndev, struct rtnl_link_stats64 *stats) -{ - struct xlr_net_priv *priv = netdev_priv(ndev); - - stats->rx_packets = xlr_nae_rdreg(priv->base_addr, RX_PACKET_COUNTER); - stats->tx_packets = xlr_nae_rdreg(priv->base_addr, TX_PACKET_COUNTER); - stats->rx_bytes = xlr_nae_rdreg(priv->base_addr, RX_BYTE_COUNTER); - stats->tx_bytes = xlr_nae_rdreg(priv->base_addr, TX_BYTE_COUNTER); - stats->tx_errors = xlr_nae_rdreg(priv->base_addr, TX_FCS_ERROR_COUNTER); - stats->rx_dropped = xlr_nae_rdreg(priv->base_addr, - RX_DROP_PACKET_COUNTER); - stats->tx_dropped = xlr_nae_rdreg(priv->base_addr, - TX_DROP_FRAME_COUNTER); - - stats->multicast = xlr_nae_rdreg(priv->base_addr, - RX_MULTICAST_PACKET_COUNTER); - stats->collisions = xlr_nae_rdreg(priv->base_addr, - TX_TOTAL_COLLISION_COUNTER); - - stats->rx_length_errors = xlr_nae_rdreg(priv->base_addr, - RX_FRAME_LENGTH_ERROR_COUNTER); - stats->rx_over_errors = xlr_nae_rdreg(priv->base_addr, - RX_DROP_PACKET_COUNTER); - stats->rx_crc_errors = xlr_nae_rdreg(priv->base_addr, - RX_FCS_ERROR_COUNTER); - stats->rx_frame_errors = xlr_nae_rdreg(priv->base_addr, - RX_ALIGNMENT_ERROR_COUNTER); - - stats->rx_fifo_errors = xlr_nae_rdreg(priv->base_addr, - RX_DROP_PACKET_COUNTER); - stats->rx_missed_errors = xlr_nae_rdreg(priv->base_addr, - RX_CARRIER_SENSE_ERROR_COUNTER); - - stats->rx_errors = (stats->rx_over_errors + stats->rx_crc_errors + - stats->rx_frame_errors + stats->rx_fifo_errors + - stats->rx_missed_errors); - - stats->tx_aborted_errors = xlr_nae_rdreg(priv->base_addr, - TX_EXCESSIVE_COLLISION_PACKET_COUNTER); - stats->tx_carrier_errors = xlr_nae_rdreg(priv->base_addr, - TX_DROP_FRAME_COUNTER); - stats->tx_fifo_errors = xlr_nae_rdreg(priv->base_addr, - TX_DROP_FRAME_COUNTER); -} - -static const struct net_device_ops xlr_netdev_ops = { - .ndo_open = xlr_net_open, - .ndo_stop = xlr_net_stop, - .ndo_start_xmit = xlr_net_start_xmit, - .ndo_select_queue = dev_pick_tx_cpu_id, - .ndo_set_mac_address = xlr_net_set_mac_addr, - .ndo_set_rx_mode = xlr_set_rx_mode, - .ndo_get_stats64 = xlr_stats, -}; - -/* - * Gmac init - */ -static void *xlr_config_spill(struct xlr_net_priv *priv, int reg_start_0, - int reg_start_1, int reg_size, int size) -{ - void *spill; - u32 *base; - unsigned long phys_addr; - u32 spill_size; - - base = priv->base_addr; - spill_size = size; - spill = kmalloc(spill_size + SMP_CACHE_BYTES, GFP_KERNEL); - if (!spill) - return ZERO_SIZE_PTR; - - spill = PTR_ALIGN(spill, SMP_CACHE_BYTES); - phys_addr = virt_to_phys(spill); - dev_dbg(&priv->ndev->dev, "Allocated spill %d bytes at %lx\n", - size, phys_addr); - xlr_nae_wreg(base, reg_start_0, (phys_addr >> 5) & 0xffffffff); - xlr_nae_wreg(base, reg_start_1, ((u64)phys_addr >> 37) & 0x07); - xlr_nae_wreg(base, reg_size, spill_size); - - return spill; -} - -/* - * Configure the 6 FIFO's that are used by the network accelarator to - * communicate with the rest of the XLx device. 4 of the FIFO's are for - * packets from NA --> cpu (called Class FIFO's) and 2 are for feeding - * the NA with free descriptors. - */ -static void xlr_config_fifo_spill_area(struct xlr_net_priv *priv) -{ - priv->frin_spill = xlr_config_spill(priv, - R_REG_FRIN_SPILL_MEM_START_0, - R_REG_FRIN_SPILL_MEM_START_1, - R_REG_FRIN_SPILL_MEM_SIZE, - MAX_FRIN_SPILL * sizeof(u64)); - priv->frout_spill = xlr_config_spill(priv, - R_FROUT_SPILL_MEM_START_0, - R_FROUT_SPILL_MEM_START_1, - R_FROUT_SPILL_MEM_SIZE, - MAX_FROUT_SPILL * sizeof(u64)); - priv->class_0_spill = xlr_config_spill(priv, - R_CLASS0_SPILL_MEM_START_0, - R_CLASS0_SPILL_MEM_START_1, - R_CLASS0_SPILL_MEM_SIZE, - MAX_CLASS_0_SPILL * sizeof(u64)); - priv->class_1_spill = xlr_config_spill(priv, - R_CLASS1_SPILL_MEM_START_0, - R_CLASS1_SPILL_MEM_START_1, - R_CLASS1_SPILL_MEM_SIZE, - MAX_CLASS_1_SPILL * sizeof(u64)); - priv->class_2_spill = xlr_config_spill(priv, - R_CLASS2_SPILL_MEM_START_0, - R_CLASS2_SPILL_MEM_START_1, - R_CLASS2_SPILL_MEM_SIZE, - MAX_CLASS_2_SPILL * sizeof(u64)); - priv->class_3_spill = xlr_config_spill(priv, - R_CLASS3_SPILL_MEM_START_0, - R_CLASS3_SPILL_MEM_START_1, - R_CLASS3_SPILL_MEM_SIZE, - MAX_CLASS_3_SPILL * sizeof(u64)); -} - -/* - * Configure PDE to Round-Robin distribution of packets to the - * available cpu - */ -static void xlr_config_pde(struct xlr_net_priv *priv) -{ - int i = 0; - u64 bkt_map = 0; - - /* Each core has 8 buckets(station) */ - for (i = 0; i < hweight32(priv->nd->cpu_mask); i++) - bkt_map |= (0xff << (i * 8)); - - xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_0, (bkt_map & 0xffffffff)); - xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_0 + 1, - ((bkt_map >> 32) & 0xffffffff)); - - xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_1, (bkt_map & 0xffffffff)); - xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_1 + 1, - ((bkt_map >> 32) & 0xffffffff)); - - xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_2, (bkt_map & 0xffffffff)); - xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_2 + 1, - ((bkt_map >> 32) & 0xffffffff)); - - xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_3, (bkt_map & 0xffffffff)); - xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_3 + 1, - ((bkt_map >> 32) & 0xffffffff)); -} - -/* - * Setup the Message ring credits, bucket size and other - * common configuration - */ -static int xlr_config_common(struct xlr_net_priv *priv) -{ - struct xlr_fmn_info *gmac = priv->nd->gmac_fmn_info; - int start_stn_id = gmac->start_stn_id; - int end_stn_id = gmac->end_stn_id; - int *bucket_size = priv->nd->bucket_size; - int i, j, err; - - /* Setting non-core MsgBktSize(0x321 - 0x325) */ - for (i = start_stn_id; i <= end_stn_id; i++) { - xlr_nae_wreg(priv->base_addr, - R_GMAC_RFR0_BUCKET_SIZE + i - start_stn_id, - bucket_size[i]); - } - - /* - * Setting non-core Credit counter register - * Distributing Gmac's credit to CPU's - */ - for (i = 0; i < 8; i++) { - for (j = 0; j < 8; j++) - xlr_nae_wreg(priv->base_addr, - (R_CC_CPU0_0 + (i * 8)) + j, - gmac->credit_config[(i * 8) + j]); - } - - xlr_nae_wreg(priv->base_addr, R_MSG_TX_THRESHOLD, 3); - xlr_nae_wreg(priv->base_addr, R_DMACR0, 0xffffffff); - xlr_nae_wreg(priv->base_addr, R_DMACR1, 0xffffffff); - xlr_nae_wreg(priv->base_addr, R_DMACR2, 0xffffffff); - xlr_nae_wreg(priv->base_addr, R_DMACR3, 0xffffffff); - xlr_nae_wreg(priv->base_addr, R_FREEQCARVE, 0); - - err = xlr_net_fill_rx_ring(priv->ndev); - if (err) - return err; - nlm_register_fmn_handler(start_stn_id, end_stn_id, xlr_net_fmn_handler, - priv->adapter); - return 0; -} - -static void xlr_config_translate_table(struct xlr_net_priv *priv) -{ - u32 cpu_mask; - u32 val; - int bkts[32]; /* one bucket is assumed for each cpu */ - int b1, b2, c1, c2, i, j, k; - int use_bkt; - - use_bkt = 0; - cpu_mask = priv->nd->cpu_mask; - - pr_info("Using %s-based distribution\n", - (use_bkt) ? "bucket" : "class"); - j = 0; - for (i = 0; i < 32; i++) { - if ((1 << i) & cpu_mask) { - /* for each cpu, mark the 4+threadid bucket */ - bkts[j] = ((i / 4) * 8) + (i % 4); - j++; - } - } - - /*configure the 128 * 9 Translation table to send to available buckets*/ - k = 0; - c1 = 3; - c2 = 0; - for (i = 0; i < 64; i++) { - /* - * On use_bkt set the b0, b1 are used, else - * the 4 classes are used, here implemented - * a logic to distribute the packets to the - * buckets equally or based on the class - */ - c1 = (c1 + 1) & 3; - c2 = (c1 + 1) & 3; - b1 = bkts[k]; - k = (k + 1) % j; - b2 = bkts[k]; - k = (k + 1) % j; - - val = ((c1 << 23) | (b1 << 17) | (use_bkt << 16) | - (c2 << 7) | (b2 << 1) | (use_bkt << 0)); - dev_dbg(&priv->ndev->dev, "Table[%d] b1=%d b2=%d c1=%d c2=%d\n", - i, b1, b2, c1, c2); - xlr_nae_wreg(priv->base_addr, R_TRANSLATETABLE + i, val); - c1 = c2; - } -} - -static void xlr_config_parser(struct xlr_net_priv *priv) -{ - u32 val; - - /* Mark it as ETHERNET type */ - xlr_nae_wreg(priv->base_addr, R_L2TYPE_0, 0x01); - - /* Use 7bit CRChash for flow classification with 127 as CRC polynomial*/ - xlr_nae_wreg(priv->base_addr, R_PARSERCONFIGREG, - ((0x7f << 8) | (1 << 1))); - - /* configure the parser : L2 Type is configured in the bootloader */ - /* extract IP: src, dest protocol */ - xlr_nae_wreg(priv->base_addr, R_L3CTABLE, - (9 << 20) | (1 << 19) | (1 << 18) | (0x01 << 16) | - (0x0800 << 0)); - xlr_nae_wreg(priv->base_addr, R_L3CTABLE + 1, - (9 << 25) | (1 << 21) | (12 << 14) | (4 << 10) | - (16 << 4) | 4); - - /* Configure to extract SRC port and Dest port for TCP and UDP pkts */ - xlr_nae_wreg(priv->base_addr, R_L4CTABLE, 6); - xlr_nae_wreg(priv->base_addr, R_L4CTABLE + 2, 17); - val = ((0 << 21) | (2 << 17) | (2 << 11) | (2 << 7)); - xlr_nae_wreg(priv->base_addr, R_L4CTABLE + 1, val); - xlr_nae_wreg(priv->base_addr, R_L4CTABLE + 3, val); - - xlr_config_translate_table(priv); -} - -static int xlr_phy_write(u32 *base_addr, int phy_addr, int regnum, u16 val) -{ - unsigned long timeout, stoptime, checktime; - int timedout; - - /* 100ms timeout*/ - timeout = msecs_to_jiffies(100); - stoptime = jiffies + timeout; - timedout = 0; - - xlr_nae_wreg(base_addr, R_MII_MGMT_ADDRESS, (phy_addr << 8) | regnum); - - /* Write the data which starts the write cycle */ - xlr_nae_wreg(base_addr, R_MII_MGMT_WRITE_DATA, (u32)val); - - /* poll for the read cycle to complete */ - while (!timedout) { - checktime = jiffies; - if (xlr_nae_rdreg(base_addr, R_MII_MGMT_INDICATORS) == 0) - break; - timedout = time_after(checktime, stoptime); - } - if (timedout) { - pr_info("Phy device write err: device busy"); - return -EBUSY; - } - - return 0; -} - -static int xlr_phy_read(u32 *base_addr, int phy_addr, int regnum) -{ - unsigned long timeout, stoptime, checktime; - int timedout; - - /* 100ms timeout*/ - timeout = msecs_to_jiffies(100); - stoptime = jiffies + timeout; - timedout = 0; - - /* setup the phy reg to be used */ - xlr_nae_wreg(base_addr, R_MII_MGMT_ADDRESS, - (phy_addr << 8) | (regnum << 0)); - - /* Issue the read command */ - xlr_nae_wreg(base_addr, R_MII_MGMT_COMMAND, - (1 << O_MII_MGMT_COMMAND__rstat)); - - /* poll for the read cycle to complete */ - while (!timedout) { - checktime = jiffies; - if (xlr_nae_rdreg(base_addr, R_MII_MGMT_INDICATORS) == 0) - break; - timedout = time_after(checktime, stoptime); - } - if (timedout) { - pr_info("Phy device read err: device busy"); - return -EBUSY; - } - - /* clear the read cycle */ - xlr_nae_wreg(base_addr, R_MII_MGMT_COMMAND, 0); - - /* Read the data */ - return xlr_nae_rdreg(base_addr, R_MII_MGMT_STATUS); -} - -static int xlr_mii_write(struct mii_bus *bus, int phy_addr, int regnum, u16 val) -{ - struct xlr_net_priv *priv = bus->priv; - int ret; - - ret = xlr_phy_write(priv->mii_addr, phy_addr, regnum, val); - dev_dbg(&priv->ndev->dev, "mii_write phy %d : %d <- %x [%x]\n", - phy_addr, regnum, val, ret); - return ret; -} - -static int xlr_mii_read(struct mii_bus *bus, int phy_addr, int regnum) -{ - struct xlr_net_priv *priv = bus->priv; - int ret; - - ret = xlr_phy_read(priv->mii_addr, phy_addr, regnum); - dev_dbg(&priv->ndev->dev, "mii_read phy %d : %d [%x]\n", - phy_addr, regnum, ret); - return ret; -} - -/* - * XLR ports are RGMII. XLS ports are SGMII mostly except the port0, - * which can be configured either SGMII or RGMII, considered SGMII - * by default, if board setup to RGMII the port_type need to set - * accordingly.Serdes and PCS layer need to configured for SGMII - */ -static void xlr_sgmii_init(struct xlr_net_priv *priv) -{ - int phy; - - xlr_phy_write(priv->serdes_addr, 26, 0, 0x6DB0); - xlr_phy_write(priv->serdes_addr, 26, 1, 0xFFFF); - xlr_phy_write(priv->serdes_addr, 26, 2, 0xB6D0); - xlr_phy_write(priv->serdes_addr, 26, 3, 0x00FF); - xlr_phy_write(priv->serdes_addr, 26, 4, 0x0000); - xlr_phy_write(priv->serdes_addr, 26, 5, 0x0000); - xlr_phy_write(priv->serdes_addr, 26, 6, 0x0005); - xlr_phy_write(priv->serdes_addr, 26, 7, 0x0001); - xlr_phy_write(priv->serdes_addr, 26, 8, 0x0000); - xlr_phy_write(priv->serdes_addr, 26, 9, 0x0000); - xlr_phy_write(priv->serdes_addr, 26, 10, 0x0000); - - /* program GPIO values for serdes init parameters */ - xlr_nae_wreg(priv->gpio_addr, 0x20, 0x7e6802); - xlr_nae_wreg(priv->gpio_addr, 0x10, 0x7104); - - xlr_nae_wreg(priv->gpio_addr, 0x22, 0x7e6802); - xlr_nae_wreg(priv->gpio_addr, 0x21, 0x7104); - - /* enable autoneg - more magic */ - phy = priv->phy_addr % 4 + 27; - xlr_phy_write(priv->pcs_addr, phy, 0, 0x1000); - xlr_phy_write(priv->pcs_addr, phy, 0, 0x0200); -} - -void xlr_set_gmac_speed(struct xlr_net_priv *priv) -{ - struct phy_device *phydev = xlr_get_phydev(priv); - int speed; - - if (phydev->interface == PHY_INTERFACE_MODE_SGMII) - xlr_sgmii_init(priv); - - if (phydev->speed != priv->phy_speed) { - speed = phydev->speed; - if (speed == SPEED_1000) { - /* Set interface to Byte mode */ - xlr_nae_wreg(priv->base_addr, R_MAC_CONFIG_2, 0x7217); - priv->phy_speed = speed; - } else if (speed == SPEED_100 || speed == SPEED_10) { - /* Set interface to Nibble mode */ - xlr_nae_wreg(priv->base_addr, R_MAC_CONFIG_2, 0x7117); - priv->phy_speed = speed; - } - /* Set SGMII speed in Interface control reg */ - if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { - if (speed == SPEED_10) - xlr_nae_wreg(priv->base_addr, - R_INTERFACE_CONTROL, - SGMII_SPEED_10); - if (speed == SPEED_100) - xlr_nae_wreg(priv->base_addr, - R_INTERFACE_CONTROL, - SGMII_SPEED_100); - if (speed == SPEED_1000) - xlr_nae_wreg(priv->base_addr, - R_INTERFACE_CONTROL, - SGMII_SPEED_1000); - } - if (speed == SPEED_10) - xlr_nae_wreg(priv->base_addr, R_CORECONTROL, 0x2); - if (speed == SPEED_100) - xlr_nae_wreg(priv->base_addr, R_CORECONTROL, 0x1); - if (speed == SPEED_1000) - xlr_nae_wreg(priv->base_addr, R_CORECONTROL, 0x0); - } - pr_info("gmac%d : %dMbps\n", priv->port_id, priv->phy_speed); -} - -static void xlr_gmac_link_adjust(struct net_device *ndev) -{ - struct xlr_net_priv *priv = netdev_priv(ndev); - struct phy_device *phydev = xlr_get_phydev(priv); - u32 intreg; - - intreg = xlr_nae_rdreg(priv->base_addr, R_INTREG); - if (phydev->link) { - if (phydev->speed != priv->phy_speed) { - xlr_set_gmac_speed(priv); - pr_info("gmac%d : Link up\n", priv->port_id); - } - } else { - xlr_set_gmac_speed(priv); - pr_info("gmac%d : Link down\n", priv->port_id); - } -} - -static int xlr_mii_probe(struct xlr_net_priv *priv) -{ - struct phy_device *phydev = xlr_get_phydev(priv); - - if (!phydev) { - pr_err("no PHY found on phy_addr %d\n", priv->phy_addr); - return -ENODEV; - } - - /* Attach MAC to PHY */ - phydev = phy_connect(priv->ndev, phydev_name(phydev), - xlr_gmac_link_adjust, priv->nd->phy_interface); - - if (IS_ERR(phydev)) { - pr_err("could not attach PHY\n"); - return PTR_ERR(phydev); - } - phydev->supported &= (ADVERTISED_10baseT_Full - | ADVERTISED_10baseT_Half - | ADVERTISED_100baseT_Full - | ADVERTISED_100baseT_Half - | ADVERTISED_1000baseT_Full - | ADVERTISED_Autoneg - | ADVERTISED_MII); - - phydev->advertising = phydev->supported; - phy_attached_info(phydev); - return 0; -} - -static int xlr_setup_mdio(struct xlr_net_priv *priv, - struct platform_device *pdev) -{ - int err; - - priv->mii_bus = mdiobus_alloc(); - if (!priv->mii_bus) { - pr_err("mdiobus alloc failed\n"); - return -ENOMEM; - } - - priv->mii_bus->priv = priv; - priv->mii_bus->name = "xlr-mdio"; - snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%d", - priv->mii_bus->name, priv->port_id); - priv->mii_bus->read = xlr_mii_read; - priv->mii_bus->write = xlr_mii_write; - priv->mii_bus->parent = &pdev->dev; - - /* Scan only the enabled address */ - priv->mii_bus->phy_mask = ~(1 << priv->phy_addr); - - /* setting clock divisor to 54 */ - xlr_nae_wreg(priv->base_addr, R_MII_MGMT_CONFIG, 0x7); - - err = mdiobus_register(priv->mii_bus); - if (err) { - mdiobus_free(priv->mii_bus); - pr_err("mdio bus registration failed\n"); - return err; - } - - pr_info("Registered mdio bus id : %s\n", priv->mii_bus->id); - err = xlr_mii_probe(priv); - if (err) { - mdiobus_free(priv->mii_bus); - return err; - } - return 0; -} - -static void xlr_port_enable(struct xlr_net_priv *priv) -{ - u32 prid = (read_c0_prid() & 0xf000); - - /* Setup MAC_CONFIG reg if (xls & rgmii) */ - if ((prid == 0x8000 || prid == 0x4000 || prid == 0xc000) && - priv->nd->phy_interface == PHY_INTERFACE_MODE_RGMII) - xlr_reg_update(priv->base_addr, R_RX_CONTROL, - (1 << O_RX_CONTROL__RGMII), - (1 << O_RX_CONTROL__RGMII)); - - /* Rx Tx enable */ - xlr_reg_update(priv->base_addr, R_MAC_CONFIG_1, - ((1 << O_MAC_CONFIG_1__rxen) | - (1 << O_MAC_CONFIG_1__txen) | - (1 << O_MAC_CONFIG_1__rxfc) | - (1 << O_MAC_CONFIG_1__txfc)), - ((1 << O_MAC_CONFIG_1__rxen) | - (1 << O_MAC_CONFIG_1__txen) | - (1 << O_MAC_CONFIG_1__rxfc) | - (1 << O_MAC_CONFIG_1__txfc))); - - /* Setup tx control reg */ - xlr_reg_update(priv->base_addr, R_TX_CONTROL, - ((1 << O_TX_CONTROL__TXENABLE) | - (512 << O_TX_CONTROL__TXTHRESHOLD)), 0x3fff); - - /* Setup rx control reg */ - xlr_reg_update(priv->base_addr, R_RX_CONTROL, - 1 << O_RX_CONTROL__RXENABLE, - 1 << O_RX_CONTROL__RXENABLE); -} - -static void xlr_port_disable(struct xlr_net_priv *priv) -{ - /* Setup MAC_CONFIG reg */ - /* Rx Tx disable*/ - xlr_reg_update(priv->base_addr, R_MAC_CONFIG_1, - ((1 << O_MAC_CONFIG_1__rxen) | - (1 << O_MAC_CONFIG_1__txen) | - (1 << O_MAC_CONFIG_1__rxfc) | - (1 << O_MAC_CONFIG_1__txfc)), 0x0); - - /* Setup tx control reg */ - xlr_reg_update(priv->base_addr, R_TX_CONTROL, - ((1 << O_TX_CONTROL__TXENABLE) | - (512 << O_TX_CONTROL__TXTHRESHOLD)), 0); - - /* Setup rx control reg */ - xlr_reg_update(priv->base_addr, R_RX_CONTROL, - 1 << O_RX_CONTROL__RXENABLE, 0); -} - -/* - * Initialization of gmac - */ -static int xlr_gmac_init(struct xlr_net_priv *priv, - struct platform_device *pdev) -{ - int ret; - - pr_info("Initializing the gmac%d\n", priv->port_id); - - xlr_port_disable(priv); - - xlr_nae_wreg(priv->base_addr, R_DESC_PACK_CTRL, - (1 << O_DESC_PACK_CTRL__MAXENTRY) | - (BYTE_OFFSET << O_DESC_PACK_CTRL__BYTEOFFSET) | - (1600 << O_DESC_PACK_CTRL__REGULARSIZE)); - - ret = xlr_setup_mdio(priv, pdev); - if (ret) - return ret; - xlr_port_enable(priv); - - /* Enable Full-duplex/1000Mbps/CRC */ - xlr_nae_wreg(priv->base_addr, R_MAC_CONFIG_2, 0x7217); - /* speed 2.5Mhz */ - xlr_nae_wreg(priv->base_addr, R_CORECONTROL, 0x02); - /* Setup Interrupt mask reg */ - xlr_nae_wreg(priv->base_addr, R_INTMASK, (1 << O_INTMASK__TXILLEGAL) | - (1 << O_INTMASK__MDINT) | (1 << O_INTMASK__TXFETCHERROR) | - (1 << O_INTMASK__P2PSPILLECC) | (1 << O_INTMASK__TAGFULL) | - (1 << O_INTMASK__UNDERRUN) | (1 << O_INTMASK__ABORT)); - - /* Clear all stats */ - xlr_reg_update(priv->base_addr, R_STATCTRL, 0, 1 << O_STATCTRL__CLRCNT); - xlr_reg_update(priv->base_addr, R_STATCTRL, 1 << 2, 1 << 2); - return 0; -} - -static int xlr_net_probe(struct platform_device *pdev) -{ - struct xlr_net_priv *priv = NULL; - struct net_device *ndev; - struct resource *res; - struct xlr_adapter *adapter; - int err, port; - - pr_info("XLR/XLS Ethernet Driver controller %d\n", pdev->id); - /* - * Allocate our adapter data structure and attach it to the device. - */ - adapter = devm_kzalloc(&pdev->dev, sizeof(*adapter), GFP_KERNEL); - if (!adapter) - return -ENOMEM; - - /* - * XLR and XLS have 1 and 2 NAE controller respectively - * Each controller has 4 gmac ports, mapping each controller - * under one parent device, 4 gmac ports under one device. - */ - for (port = 0; port < pdev->num_resources / 2; port++) { - ndev = alloc_etherdev_mq(sizeof(struct xlr_net_priv), 32); - if (!ndev) { - dev_err(&pdev->dev, - "Allocation of Ethernet device failed\n"); - return -ENOMEM; - } - - priv = netdev_priv(ndev); - priv->pdev = pdev; - priv->ndev = ndev; - priv->port_id = (pdev->id * 4) + port; - priv->nd = (struct xlr_net_data *)pdev->dev.platform_data; - priv->base_addr = devm_platform_ioremap_resource(pdev, port); - if (IS_ERR(priv->base_addr)) { - err = PTR_ERR(priv->base_addr); - goto err_gmac; - } - priv->adapter = adapter; - adapter->netdev[port] = ndev; - - res = platform_get_resource(pdev, IORESOURCE_IRQ, port); - if (!res) { - dev_err(&pdev->dev, "No irq resource for MAC %d\n", - priv->port_id); - err = -ENODEV; - goto err_gmac; - } - - ndev->irq = res->start; - - priv->phy_addr = priv->nd->phy_addr[port]; - priv->tx_stnid = priv->nd->tx_stnid[port]; - priv->mii_addr = priv->nd->mii_addr; - priv->serdes_addr = priv->nd->serdes_addr; - priv->pcs_addr = priv->nd->pcs_addr; - priv->gpio_addr = priv->nd->gpio_addr; - - ndev->netdev_ops = &xlr_netdev_ops; - ndev->watchdog_timeo = HZ; - - /* Setup Mac address and Rx mode */ - eth_hw_addr_random(ndev); - xlr_hw_set_mac_addr(ndev); - xlr_set_rx_mode(ndev); - - priv->num_rx_desc += MAX_NUM_DESC_SPILL; - ndev->ethtool_ops = &xlr_ethtool_ops; - SET_NETDEV_DEV(ndev, &pdev->dev); - - xlr_config_fifo_spill_area(priv); - /* Configure PDE to Round-Robin pkt distribution */ - xlr_config_pde(priv); - xlr_config_parser(priv); - - /* Call init with respect to port */ - if (strcmp(res->name, "gmac") == 0) { - err = xlr_gmac_init(priv, pdev); - if (err) { - dev_err(&pdev->dev, "gmac%d init failed\n", - priv->port_id); - goto err_gmac; - } - } - - if (priv->port_id == 0 || priv->port_id == 4) { - err = xlr_config_common(priv); - if (err) - goto err_netdev; - } - - err = register_netdev(ndev); - if (err) { - dev_err(&pdev->dev, - "Registering netdev failed for gmac%d\n", - priv->port_id); - goto err_netdev; - } - platform_set_drvdata(pdev, priv); - } - - return 0; - -err_netdev: - mdiobus_free(priv->mii_bus); -err_gmac: - free_netdev(ndev); - return err; -} - -static int xlr_net_remove(struct platform_device *pdev) -{ - struct xlr_net_priv *priv = platform_get_drvdata(pdev); - - unregister_netdev(priv->ndev); - mdiobus_unregister(priv->mii_bus); - mdiobus_free(priv->mii_bus); - free_netdev(priv->ndev); - return 0; -} - -static struct platform_driver xlr_net_driver = { - .probe = xlr_net_probe, - .remove = xlr_net_remove, - .driver = { - .name = "xlr-net", - }, -}; - -module_platform_driver(xlr_net_driver); - -MODULE_AUTHOR("Ganesan Ramalingam <ganesanr@broadcom.com>"); -MODULE_DESCRIPTION("Ethernet driver for Netlogic XLR/XLS"); -MODULE_LICENSE("Dual BSD/GPL"); -MODULE_ALIAS("platform:xlr-net"); diff --git a/drivers/staging/netlogic/xlr_net.h b/drivers/staging/netlogic/xlr_net.h deleted file mode 100644 index 8365b744f9b3..000000000000 --- a/drivers/staging/netlogic/xlr_net.h +++ /dev/null @@ -1,1079 +0,0 @@ -/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */ -/* - * Copyright (c) 2003-2012 Broadcom Corporation - * All Rights Reserved - */ - -/* #define MAC_SPLIT_MODE */ - -#define MAC_SPACING 0x400 -#define XGMAC_SPACING 0x400 - -/* PE-MCXMAC register and bit field definitions */ -#define R_MAC_CONFIG_1 0x00 -#define O_MAC_CONFIG_1__srst 31 -#define O_MAC_CONFIG_1__simr 30 -#define O_MAC_CONFIG_1__hrrmc 18 -#define W_MAC_CONFIG_1__hrtmc 2 -#define O_MAC_CONFIG_1__hrrfn 16 -#define W_MAC_CONFIG_1__hrtfn 2 -#define O_MAC_CONFIG_1__intlb 8 -#define O_MAC_CONFIG_1__rxfc 5 -#define O_MAC_CONFIG_1__txfc 4 -#define O_MAC_CONFIG_1__srxen 3 -#define O_MAC_CONFIG_1__rxen 2 -#define O_MAC_CONFIG_1__stxen 1 -#define O_MAC_CONFIG_1__txen 0 -#define R_MAC_CONFIG_2 0x01 -#define O_MAC_CONFIG_2__prlen 12 -#define W_MAC_CONFIG_2__prlen 4 -#define O_MAC_CONFIG_2__speed 8 -#define W_MAC_CONFIG_2__speed 2 -#define O_MAC_CONFIG_2__hugen 5 -#define O_MAC_CONFIG_2__flchk 4 -#define O_MAC_CONFIG_2__crce 1 -#define O_MAC_CONFIG_2__fulld 0 -#define R_IPG_IFG 0x02 -#define O_IPG_IFG__ipgr1 24 -#define W_IPG_IFG__ipgr1 7 -#define O_IPG_IFG__ipgr2 16 -#define W_IPG_IFG__ipgr2 7 -#define O_IPG_IFG__mifg 8 -#define W_IPG_IFG__mifg 8 -#define O_IPG_IFG__ipgt 0 -#define W_IPG_IFG__ipgt 7 -#define R_HALF_DUPLEX 0x03 -#define O_HALF_DUPLEX__abebt 24 -#define W_HALF_DUPLEX__abebt 4 -#define O_HALF_DUPLEX__abebe 19 -#define O_HALF_DUPLEX__bpnb 18 -#define O_HALF_DUPLEX__nobo 17 -#define O_HALF_DUPLEX__edxsdfr 16 -#define O_HALF_DUPLEX__retry 12 -#define W_HALF_DUPLEX__retry 4 -#define O_HALF_DUPLEX__lcol 0 -#define W_HALF_DUPLEX__lcol 10 -#define R_MAXIMUM_FRAME_LENGTH 0x04 -#define O_MAXIMUM_FRAME_LENGTH__maxf 0 -#define W_MAXIMUM_FRAME_LENGTH__maxf 16 -#define R_TEST 0x07 -#define O_TEST__mbof 3 -#define O_TEST__rthdf 2 -#define O_TEST__tpause 1 -#define O_TEST__sstct 0 -#define R_MII_MGMT_CONFIG 0x08 -#define O_MII_MGMT_CONFIG__scinc 5 -#define O_MII_MGMT_CONFIG__spre 4 -#define O_MII_MGMT_CONFIG__clks 3 -#define W_MII_MGMT_CONFIG__clks 3 -#define R_MII_MGMT_COMMAND 0x09 -#define O_MII_MGMT_COMMAND__scan 1 -#define O_MII_MGMT_COMMAND__rstat 0 -#define R_MII_MGMT_ADDRESS 0x0A -#define O_MII_MGMT_ADDRESS__fiad 8 -#define W_MII_MGMT_ADDRESS__fiad 5 -#define O_MII_MGMT_ADDRESS__fgad 5 -#define W_MII_MGMT_ADDRESS__fgad 0 -#define R_MII_MGMT_WRITE_DATA 0x0B -#define O_MII_MGMT_WRITE_DATA__ctld 0 -#define W_MII_MGMT_WRITE_DATA__ctld 16 -#define R_MII_MGMT_STATUS 0x0C -#define R_MII_MGMT_INDICATORS 0x0D -#define O_MII_MGMT_INDICATORS__nvalid 2 -#define O_MII_MGMT_INDICATORS__scan 1 -#define O_MII_MGMT_INDICATORS__busy 0 -#define R_INTERFACE_CONTROL 0x0E -#define O_INTERFACE_CONTROL__hrstint 31 -#define O_INTERFACE_CONTROL__tbimode 27 -#define O_INTERFACE_CONTROL__ghdmode 26 -#define O_INTERFACE_CONTROL__lhdmode 25 -#define O_INTERFACE_CONTROL__phymod 24 -#define O_INTERFACE_CONTROL__hrrmi 23 -#define O_INTERFACE_CONTROL__rspd 16 -#define O_INTERFACE_CONTROL__hr100 15 -#define O_INTERFACE_CONTROL__frcq 10 -#define O_INTERFACE_CONTROL__nocfr 9 -#define O_INTERFACE_CONTROL__dlfct 8 -#define O_INTERFACE_CONTROL__enjab 0 -#define R_INTERFACE_STATUS 0x0F -#define O_INTERFACE_STATUS__xsdfr 9 -#define O_INTERFACE_STATUS__ssrr 8 -#define W_INTERFACE_STATUS__ssrr 5 -#define O_INTERFACE_STATUS__miilf 3 -#define O_INTERFACE_STATUS__locar 2 -#define O_INTERFACE_STATUS__sqerr 1 -#define O_INTERFACE_STATUS__jabber 0 -#define R_STATION_ADDRESS_LS 0x10 -#define R_STATION_ADDRESS_MS 0x11 - -/* A-XGMAC register and bit field definitions */ -#define R_XGMAC_CONFIG_0 0x00 -#define O_XGMAC_CONFIG_0__hstmacrst 31 -#define O_XGMAC_CONFIG_0__hstrstrctl 23 -#define O_XGMAC_CONFIG_0__hstrstrfn 22 -#define O_XGMAC_CONFIG_0__hstrsttctl 18 -#define O_XGMAC_CONFIG_0__hstrsttfn 17 -#define O_XGMAC_CONFIG_0__hstrstmiim 16 -#define O_XGMAC_CONFIG_0__hstloopback 8 -#define R_XGMAC_CONFIG_1 0x01 -#define O_XGMAC_CONFIG_1__hsttctlen 31 -#define O_XGMAC_CONFIG_1__hsttfen 30 -#define O_XGMAC_CONFIG_1__hstrctlen 29 -#define O_XGMAC_CONFIG_1__hstrfen 28 -#define O_XGMAC_CONFIG_1__tfen 26 -#define O_XGMAC_CONFIG_1__rfen 24 -#define O_XGMAC_CONFIG_1__hstrctlshrtp 12 -#define O_XGMAC_CONFIG_1__hstdlyfcstx 10 -#define W_XGMAC_CONFIG_1__hstdlyfcstx 2 -#define O_XGMAC_CONFIG_1__hstdlyfcsrx 8 -#define W_XGMAC_CONFIG_1__hstdlyfcsrx 2 -#define O_XGMAC_CONFIG_1__hstppen 7 -#define O_XGMAC_CONFIG_1__hstbytswp 6 -#define O_XGMAC_CONFIG_1__hstdrplt64 5 -#define O_XGMAC_CONFIG_1__hstprmscrx 4 -#define O_XGMAC_CONFIG_1__hstlenchk 3 -#define O_XGMAC_CONFIG_1__hstgenfcs 2 -#define O_XGMAC_CONFIG_1__hstpadmode 0 -#define W_XGMAC_CONFIG_1__hstpadmode 2 -#define R_XGMAC_CONFIG_2 0x02 -#define O_XGMAC_CONFIG_2__hsttctlfrcp 31 -#define O_XGMAC_CONFIG_2__hstmlnkflth 27 -#define O_XGMAC_CONFIG_2__hstalnkflth 26 -#define O_XGMAC_CONFIG_2__rflnkflt 24 -#define W_XGMAC_CONFIG_2__rflnkflt 2 -#define O_XGMAC_CONFIG_2__hstipgextmod 16 -#define W_XGMAC_CONFIG_2__hstipgextmod 5 -#define O_XGMAC_CONFIG_2__hstrctlfrcp 15 -#define O_XGMAC_CONFIG_2__hstipgexten 5 -#define O_XGMAC_CONFIG_2__hstmipgext 0 -#define W_XGMAC_CONFIG_2__hstmipgext 5 -#define R_XGMAC_CONFIG_3 0x03 -#define O_XGMAC_CONFIG_3__hstfltrfrm 31 -#define W_XGMAC_CONFIG_3__hstfltrfrm 16 -#define O_XGMAC_CONFIG_3__hstfltrfrmdc 15 -#define W_XGMAC_CONFIG_3__hstfltrfrmdc 16 -#define R_XGMAC_STATION_ADDRESS_LS 0x04 -#define O_XGMAC_STATION_ADDRESS_LS__hstmacadr0 0 -#define W_XGMAC_STATION_ADDRESS_LS__hstmacadr0 32 -#define R_XGMAC_STATION_ADDRESS_MS 0x05 -#define R_XGMAC_MAX_FRAME_LEN 0x08 -#define O_XGMAC_MAX_FRAME_LEN__hstmxfrmwctx 16 -#define W_XGMAC_MAX_FRAME_LEN__hstmxfrmwctx 14 -#define O_XGMAC_MAX_FRAME_LEN__hstmxfrmbcrx 0 -#define W_XGMAC_MAX_FRAME_LEN__hstmxfrmbcrx 16 -#define R_XGMAC_REV_LEVEL 0x0B -#define O_XGMAC_REV_LEVEL__revlvl 0 -#define W_XGMAC_REV_LEVEL__revlvl 15 -#define R_XGMAC_MIIM_COMMAND 0x10 -#define O_XGMAC_MIIM_COMMAND__hstldcmd 3 -#define O_XGMAC_MIIM_COMMAND__hstmiimcmd 0 -#define W_XGMAC_MIIM_COMMAND__hstmiimcmd 3 -#define R_XGMAC_MIIM_FILED 0x11 -#define O_XGMAC_MIIM_FILED__hststfield 30 -#define W_XGMAC_MIIM_FILED__hststfield 2 -#define O_XGMAC_MIIM_FILED__hstopfield 28 -#define W_XGMAC_MIIM_FILED__hstopfield 2 -#define O_XGMAC_MIIM_FILED__hstphyadx 23 -#define W_XGMAC_MIIM_FILED__hstphyadx 5 -#define O_XGMAC_MIIM_FILED__hstregadx 18 -#define W_XGMAC_MIIM_FILED__hstregadx 5 -#define O_XGMAC_MIIM_FILED__hsttafield 16 -#define W_XGMAC_MIIM_FILED__hsttafield 2 -#define O_XGMAC_MIIM_FILED__miimrddat 0 -#define W_XGMAC_MIIM_FILED__miimrddat 16 -#define R_XGMAC_MIIM_CONFIG 0x12 -#define O_XGMAC_MIIM_CONFIG__hstnopram 7 -#define O_XGMAC_MIIM_CONFIG__hstclkdiv 0 -#define W_XGMAC_MIIM_CONFIG__hstclkdiv 7 -#define R_XGMAC_MIIM_LINK_FAIL_VECTOR 0x13 -#define O_XGMAC_MIIM_LINK_FAIL_VECTOR__miimlfvec 0 -#define W_XGMAC_MIIM_LINK_FAIL_VECTOR__miimlfvec 32 -#define R_XGMAC_MIIM_INDICATOR 0x14 -#define O_XGMAC_MIIM_INDICATOR__miimphylf 4 -#define O_XGMAC_MIIM_INDICATOR__miimmoncplt 3 -#define O_XGMAC_MIIM_INDICATOR__miimmonvld 2 -#define O_XGMAC_MIIM_INDICATOR__miimmon 1 -#define O_XGMAC_MIIM_INDICATOR__miimbusy 0 - -/* GMAC stats registers */ -#define R_RBYT 0x27 -#define R_RPKT 0x28 -#define R_RFCS 0x29 -#define R_RMCA 0x2A -#define R_RBCA 0x2B -#define R_RXCF 0x2C -#define R_RXPF 0x2D -#define R_RXUO 0x2E -#define R_RALN 0x2F -#define R_RFLR 0x30 -#define R_RCDE 0x31 -#define R_RCSE 0x32 -#define R_RUND 0x33 -#define R_ROVR 0x34 -#define R_TBYT 0x38 -#define R_TPKT 0x39 -#define R_TMCA 0x3A -#define R_TBCA 0x3B -#define R_TXPF 0x3C -#define R_TDFR 0x3D -#define R_TEDF 0x3E -#define R_TSCL 0x3F -#define R_TMCL 0x40 -#define R_TLCL 0x41 -#define R_TXCL 0x42 -#define R_TNCL 0x43 -#define R_TJBR 0x46 -#define R_TFCS 0x47 -#define R_TXCF 0x48 -#define R_TOVR 0x49 -#define R_TUND 0x4A -#define R_TFRG 0x4B - -/* Glue logic register and bit field definitions */ -#define R_MAC_ADDR0 0x50 -#define R_MAC_ADDR1 0x52 -#define R_MAC_ADDR2 0x54 -#define R_MAC_ADDR3 0x56 -#define R_MAC_ADDR_MASK2 0x58 -#define R_MAC_ADDR_MASK3 0x5A -#define R_MAC_FILTER_CONFIG 0x5C -#define O_MAC_FILTER_CONFIG__BROADCAST_EN 10 -#define O_MAC_FILTER_CONFIG__PAUSE_FRAME_EN 9 -#define O_MAC_FILTER_CONFIG__ALL_MCAST_EN 8 -#define O_MAC_FILTER_CONFIG__ALL_UCAST_EN 7 -#define O_MAC_FILTER_CONFIG__HASH_MCAST_EN 6 -#define O_MAC_FILTER_CONFIG__HASH_UCAST_EN 5 -#define O_MAC_FILTER_CONFIG__ADDR_MATCH_DISC 4 -#define O_MAC_FILTER_CONFIG__MAC_ADDR3_VALID 3 -#define O_MAC_FILTER_CONFIG__MAC_ADDR2_VALID 2 -#define O_MAC_FILTER_CONFIG__MAC_ADDR1_VALID 1 -#define O_MAC_FILTER_CONFIG__MAC_ADDR0_VALID 0 -#define R_HASH_TABLE_VECTOR 0x30 -#define R_TX_CONTROL 0x0A0 -#define O_TX_CONTROL__TX15HALT 31 -#define O_TX_CONTROL__TX14HALT 30 -#define O_TX_CONTROL__TX13HALT 29 -#define O_TX_CONTROL__TX12HALT 28 -#define O_TX_CONTROL__TX11HALT 27 -#define O_TX_CONTROL__TX10HALT 26 -#define O_TX_CONTROL__TX9HALT 25 -#define O_TX_CONTROL__TX8HALT 24 -#define O_TX_CONTROL__TX7HALT 23 -#define O_TX_CONTROL__TX6HALT 22 -#define O_TX_CONTROL__TX5HALT 21 -#define O_TX_CONTROL__TX4HALT 20 -#define O_TX_CONTROL__TX3HALT 19 -#define O_TX_CONTROL__TX2HALT 18 -#define O_TX_CONTROL__TX1HALT 17 -#define O_TX_CONTROL__TX0HALT 16 -#define O_TX_CONTROL__TXIDLE 15 -#define O_TX_CONTROL__TXENABLE 14 -#define O_TX_CONTROL__TXTHRESHOLD 0 -#define W_TX_CONTROL__TXTHRESHOLD 14 -#define R_RX_CONTROL 0x0A1 -#define O_RX_CONTROL__RGMII 10 -#define O_RX_CONTROL__SOFTRESET 2 -#define O_RX_CONTROL__RXHALT 1 -#define O_RX_CONTROL__RXENABLE 0 -#define R_DESC_PACK_CTRL 0x0A2 -#define O_DESC_PACK_CTRL__BYTEOFFSET 17 -#define W_DESC_PACK_CTRL__BYTEOFFSET 3 -#define O_DESC_PACK_CTRL__PREPADENABLE 16 -#define O_DESC_PACK_CTRL__MAXENTRY 14 -#define W_DESC_PACK_CTRL__MAXENTRY 2 -#define O_DESC_PACK_CTRL__REGULARSIZE 0 -#define W_DESC_PACK_CTRL__REGULARSIZE 14 -#define R_STATCTRL 0x0A3 -#define O_STATCTRL__OVERFLOWEN 4 -#define O_STATCTRL__GIG 3 -#define O_STATCTRL__STEN 2 -#define O_STATCTRL__CLRCNT 1 -#define O_STATCTRL__AUTOZ 0 -#define R_L2ALLOCCTRL 0x0A4 -#define O_L2ALLOCCTRL__TXL2ALLOCATE 9 -#define W_L2ALLOCCTRL__TXL2ALLOCATE 9 -#define O_L2ALLOCCTRL__RXL2ALLOCATE 0 -#define W_L2ALLOCCTRL__RXL2ALLOCATE 9 -#define R_INTMASK 0x0A5 -#define O_INTMASK__SPI4TXERROR 28 -#define O_INTMASK__SPI4RXERROR 27 -#define O_INTMASK__RGMIIHALFDUPCOLLISION 27 -#define O_INTMASK__ABORT 26 -#define O_INTMASK__UNDERRUN 25 -#define O_INTMASK__DISCARDPACKET 24 -#define O_INTMASK__ASYNCFIFOFULL 23 -#define O_INTMASK__TAGFULL 22 -#define O_INTMASK__CLASS3FULL 21 -#define O_INTMASK__C3EARLYFULL 20 -#define O_INTMASK__CLASS2FULL 19 -#define O_INTMASK__C2EARLYFULL 18 -#define O_INTMASK__CLASS1FULL 17 -#define O_INTMASK__C1EARLYFULL 16 -#define O_INTMASK__CLASS0FULL 15 -#define O_INTMASK__C0EARLYFULL 14 -#define O_INTMASK__RXDATAFULL 13 -#define O_INTMASK__RXEARLYFULL 12 -#define O_INTMASK__RFREEEMPTY 9 -#define O_INTMASK__RFEARLYEMPTY 8 -#define O_INTMASK__P2PSPILLECC 7 -#define O_INTMASK__FREEDESCFULL 5 -#define O_INTMASK__FREEEARLYFULL 4 -#define O_INTMASK__TXFETCHERROR 3 -#define O_INTMASK__STATCARRY 2 -#define O_INTMASK__MDINT 1 -#define O_INTMASK__TXILLEGAL 0 -#define R_INTREG 0x0A6 -#define O_INTREG__SPI4TXERROR 28 -#define O_INTREG__SPI4RXERROR 27 -#define O_INTREG__RGMIIHALFDUPCOLLISION 27 -#define O_INTREG__ABORT 26 -#define O_INTREG__UNDERRUN 25 -#define O_INTREG__DISCARDPACKET 24 -#define O_INTREG__ASYNCFIFOFULL 23 -#define O_INTREG__TAGFULL 22 -#define O_INTREG__CLASS3FULL 21 -#define O_INTREG__C3EARLYFULL 20 -#define O_INTREG__CLASS2FULL 19 -#define O_INTREG__C2EARLYFULL 18 -#define O_INTREG__CLASS1FULL 17 -#define O_INTREG__C1EARLYFULL 16 -#define O_INTREG__CLASS0FULL 15 -#define O_INTREG__C0EARLYFULL 14 -#define O_INTREG__RXDATAFULL 13 -#define O_INTREG__RXEARLYFULL 12 -#define O_INTREG__RFREEEMPTY 9 -#define O_INTREG__RFEARLYEMPTY 8 -#define O_INTREG__P2PSPILLECC 7 -#define O_INTREG__FREEDESCFULL 5 -#define O_INTREG__FREEEARLYFULL 4 -#define O_INTREG__TXFETCHERROR 3 -#define O_INTREG__STATCARRY 2 -#define O_INTREG__MDINT 1 -#define O_INTREG__TXILLEGAL 0 -#define R_TXRETRY 0x0A7 -#define O_TXRETRY__COLLISIONRETRY 6 -#define O_TXRETRY__BUSERRORRETRY 5 -#define O_TXRETRY__UNDERRUNRETRY 4 -#define O_TXRETRY__RETRIES 0 -#define W_TXRETRY__RETRIES 4 -#define R_CORECONTROL 0x0A8 -#define O_CORECONTROL__ERRORTHREAD 4 -#define W_CORECONTROL__ERRORTHREAD 7 -#define O_CORECONTROL__SHUTDOWN 2 -#define O_CORECONTROL__SPEED 0 -#define W_CORECONTROL__SPEED 2 -#define R_BYTEOFFSET0 0x0A9 -#define R_BYTEOFFSET1 0x0AA -#define R_L2TYPE_0 0x0F0 -#define O_L2TYPE__EXTRAHDRPROTOSIZE 26 -#define W_L2TYPE__EXTRAHDRPROTOSIZE 5 -#define O_L2TYPE__EXTRAHDRPROTOOFFSET 20 -#define W_L2TYPE__EXTRAHDRPROTOOFFSET 6 -#define O_L2TYPE__EXTRAHEADERSIZE 14 -#define W_L2TYPE__EXTRAHEADERSIZE 6 -#define O_L2TYPE__PROTOOFFSET 8 -#define W_L2TYPE__PROTOOFFSET 6 -#define O_L2TYPE__L2HDROFFSET 2 -#define W_L2TYPE__L2HDROFFSET 6 -#define O_L2TYPE__L2PROTO 0 -#define W_L2TYPE__L2PROTO 2 -#define R_L2TYPE_1 0xF0 -#define R_L2TYPE_2 0xF0 -#define R_L2TYPE_3 0xF0 -#define R_PARSERCONFIGREG 0x100 -#define O_PARSERCONFIGREG__CRCHASHPOLY 8 -#define W_PARSERCONFIGREG__CRCHASHPOLY 7 -#define O_PARSERCONFIGREG__PREPADOFFSET 4 -#define W_PARSERCONFIGREG__PREPADOFFSET 4 -#define O_PARSERCONFIGREG__USECAM 2 -#define O_PARSERCONFIGREG__USEHASH 1 -#define O_PARSERCONFIGREG__USEPROTO 0 -#define R_L3CTABLE 0x140 -#define O_L3CTABLE__OFFSET0 25 -#define W_L3CTABLE__OFFSET0 7 -#define O_L3CTABLE__LEN0 21 -#define W_L3CTABLE__LEN0 4 -#define O_L3CTABLE__OFFSET1 14 -#define W_L3CTABLE__OFFSET1 7 -#define O_L3CTABLE__LEN1 10 -#define W_L3CTABLE__LEN1 4 -#define O_L3CTABLE__OFFSET2 4 -#define W_L3CTABLE__OFFSET2 6 -#define O_L3CTABLE__LEN2 0 -#define W_L3CTABLE__LEN2 4 -#define O_L3CTABLE__L3HDROFFSET 26 -#define W_L3CTABLE__L3HDROFFSET 6 -#define O_L3CTABLE__L4PROTOOFFSET 20 -#define W_L3CTABLE__L4PROTOOFFSET 6 -#define O_L3CTABLE__IPCHKSUMCOMPUTE 19 -#define O_L3CTABLE__L4CLASSIFY 18 -#define O_L3CTABLE__L2PROTO 16 -#define W_L3CTABLE__L2PROTO 2 -#define O_L3CTABLE__L3PROTOKEY 0 -#define W_L3CTABLE__L3PROTOKEY 16 -#define R_L4CTABLE 0x160 -#define O_L4CTABLE__OFFSET0 21 -#define W_L4CTABLE__OFFSET0 6 -#define O_L4CTABLE__LEN0 17 -#define W_L4CTABLE__LEN0 4 -#define O_L4CTABLE__OFFSET1 11 -#define W_L4CTABLE__OFFSET1 6 -#define O_L4CTABLE__LEN1 7 -#define W_L4CTABLE__LEN1 4 -#define O_L4CTABLE__TCPCHKSUMENABLE 0 -#define R_CAM4X128TABLE 0x172 -#define O_CAM4X128TABLE__CLASSID 7 -#define W_CAM4X128TABLE__CLASSID 2 -#define O_CAM4X128TABLE__BUCKETID 1 -#define W_CAM4X128TABLE__BUCKETID 6 -#define O_CAM4X128TABLE__USEBUCKET 0 -#define R_CAM4X128KEY 0x180 -#define R_TRANSLATETABLE 0x1A0 -#define R_DMACR0 0x200 -#define O_DMACR0__DATA0WRMAXCR 27 -#define W_DMACR0__DATA0WRMAXCR 3 -#define O_DMACR0__DATA0RDMAXCR 24 -#define W_DMACR0__DATA0RDMAXCR 3 -#define O_DMACR0__DATA1WRMAXCR 21 -#define W_DMACR0__DATA1WRMAXCR 3 -#define O_DMACR0__DATA1RDMAXCR 18 -#define W_DMACR0__DATA1RDMAXCR 3 -#define O_DMACR0__DATA2WRMAXCR 15 -#define W_DMACR0__DATA2WRMAXCR 3 -#define O_DMACR0__DATA2RDMAXCR 12 -#define W_DMACR0__DATA2RDMAXCR 3 -#define O_DMACR0__DATA3WRMAXCR 9 -#define W_DMACR0__DATA3WRMAXCR 3 -#define O_DMACR0__DATA3RDMAXCR 6 -#define W_DMACR0__DATA3RDMAXCR 3 -#define O_DMACR0__DATA4WRMAXCR 3 -#define W_DMACR0__DATA4WRMAXCR 3 -#define O_DMACR0__DATA4RDMAXCR 0 -#define W_DMACR0__DATA4RDMAXCR 3 -#define R_DMACR1 0x201 -#define O_DMACR1__DATA5WRMAXCR 27 -#define W_DMACR1__DATA5WRMAXCR 3 -#define O_DMACR1__DATA5RDMAXCR 24 -#define W_DMACR1__DATA5RDMAXCR 3 -#define O_DMACR1__DATA6WRMAXCR 21 -#define W_DMACR1__DATA6WRMAXCR 3 -#define O_DMACR1__DATA6RDMAXCR 18 -#define W_DMACR1__DATA6RDMAXCR 3 -#define O_DMACR1__DATA7WRMAXCR 15 -#define W_DMACR1__DATA7WRMAXCR 3 -#define O_DMACR1__DATA7RDMAXCR 12 -#define W_DMACR1__DATA7RDMAXCR 3 -#define O_DMACR1__DATA8WRMAXCR 9 -#define W_DMACR1__DATA8WRMAXCR 3 -#define O_DMACR1__DATA8RDMAXCR 6 -#define W_DMACR1__DATA8RDMAXCR 3 -#define O_DMACR1__DATA9WRMAXCR 3 -#define W_DMACR1__DATA9WRMAXCR 3 -#define O_DMACR1__DATA9RDMAXCR 0 -#define W_DMACR1__DATA9RDMAXCR 3 -#define R_DMACR2 0x202 -#define O_DMACR2__DATA10WRMAXCR 27 -#define W_DMACR2__DATA10WRMAXCR 3 -#define O_DMACR2__DATA10RDMAXCR 24 -#define W_DMACR2__DATA10RDMAXCR 3 -#define O_DMACR2__DATA11WRMAXCR 21 -#define W_DMACR2__DATA11WRMAXCR 3 -#define O_DMACR2__DATA11RDMAXCR 18 -#define W_DMACR2__DATA11RDMAXCR 3 -#define O_DMACR2__DATA12WRMAXCR 15 -#define W_DMACR2__DATA12WRMAXCR 3 -#define O_DMACR2__DATA12RDMAXCR 12 -#define W_DMACR2__DATA12RDMAXCR 3 -#define O_DMACR2__DATA13WRMAXCR 9 -#define W_DMACR2__DATA13WRMAXCR 3 -#define O_DMACR2__DATA13RDMAXCR 6 -#define W_DMACR2__DATA13RDMAXCR 3 -#define O_DMACR2__DATA14WRMAXCR 3 -#define W_DMACR2__DATA14WRMAXCR 3 -#define O_DMACR2__DATA14RDMAXCR 0 -#define W_DMACR2__DATA14RDMAXCR 3 -#define R_DMACR3 0x203 -#define O_DMACR3__DATA15WRMAXCR 27 -#define W_DMACR3__DATA15WRMAXCR 3 -#define O_DMACR3__DATA15RDMAXCR 24 -#define W_DMACR3__DATA15RDMAXCR 3 -#define O_DMACR3__SPCLASSWRMAXCR 21 -#define W_DMACR3__SPCLASSWRMAXCR 3 -#define O_DMACR3__SPCLASSRDMAXCR 18 -#define W_DMACR3__SPCLASSRDMAXCR 3 -#define O_DMACR3__JUMFRINWRMAXCR 15 -#define W_DMACR3__JUMFRINWRMAXCR 3 -#define O_DMACR3__JUMFRINRDMAXCR 12 -#define W_DMACR3__JUMFRINRDMAXCR 3 -#define O_DMACR3__REGFRINWRMAXCR 9 -#define W_DMACR3__REGFRINWRMAXCR 3 -#define O_DMACR3__REGFRINRDMAXCR 6 -#define W_DMACR3__REGFRINRDMAXCR 3 -#define O_DMACR3__FROUTWRMAXCR 3 -#define W_DMACR3__FROUTWRMAXCR 3 -#define O_DMACR3__FROUTRDMAXCR 0 -#define W_DMACR3__FROUTRDMAXCR 3 -#define R_REG_FRIN_SPILL_MEM_START_0 0x204 -#define O_REG_FRIN_SPILL_MEM_START_0__REGFRINSPILLMEMSTART0 0 -#define W_REG_FRIN_SPILL_MEM_START_0__REGFRINSPILLMEMSTART0 32 -#define R_REG_FRIN_SPILL_MEM_START_1 0x205 -#define O_REG_FRIN_SPILL_MEM_START_1__REGFRINSPILLMEMSTART1 0 -#define W_REG_FRIN_SPILL_MEM_START_1__REGFRINSPILLMEMSTART1 3 -#define R_REG_FRIN_SPILL_MEM_SIZE 0x206 -#define O_REG_FRIN_SPILL_MEM_SIZE__REGFRINSPILLMEMSIZE 0 -#define W_REG_FRIN_SPILL_MEM_SIZE__REGFRINSPILLMEMSIZE 32 -#define R_FROUT_SPILL_MEM_START_0 0x207 -#define O_FROUT_SPILL_MEM_START_0__FROUTSPILLMEMSTART0 0 -#define W_FROUT_SPILL_MEM_START_0__FROUTSPILLMEMSTART0 32 -#define R_FROUT_SPILL_MEM_START_1 0x208 -#define O_FROUT_SPILL_MEM_START_1__FROUTSPILLMEMSTART1 0 -#define W_FROUT_SPILL_MEM_START_1__FROUTSPILLMEMSTART1 3 -#define R_FROUT_SPILL_MEM_SIZE 0x209 -#define O_FROUT_SPILL_MEM_SIZE__FROUTSPILLMEMSIZE 0 -#define W_FROUT_SPILL_MEM_SIZE__FROUTSPILLMEMSIZE 32 -#define R_CLASS0_SPILL_MEM_START_0 0x20A -#define O_CLASS0_SPILL_MEM_START_0__CLASS0SPILLMEMSTART0 0 -#define W_CLASS0_SPILL_MEM_START_0__CLASS0SPILLMEMSTART0 32 -#define R_CLASS0_SPILL_MEM_START_1 0x20B -#define O_CLASS0_SPILL_MEM_START_1__CLASS0SPILLMEMSTART1 0 -#define W_CLASS0_SPILL_MEM_START_1__CLASS0SPILLMEMSTART1 3 -#define R_CLASS0_SPILL_MEM_SIZE 0x20C -#define O_CLASS0_SPILL_MEM_SIZE__CLASS0SPILLMEMSIZE 0 -#define W_CLASS0_SPILL_MEM_SIZE__CLASS0SPILLMEMSIZE 32 -#define R_JUMFRIN_SPILL_MEM_START_0 0x20D -#define O_JUMFRIN_SPILL_MEM_START_0__JUMFRINSPILLMEMSTART0 0 -#define W_JUMFRIN_SPILL_MEM_START_0__JUMFRINSPILLMEMSTART0 32 -#define R_JUMFRIN_SPILL_MEM_START_1 0x20E -#define O_JUMFRIN_SPILL_MEM_START_1__JUMFRINSPILLMEMSTART1 0 -#define W_JUMFRIN_SPILL_MEM_START_1__JUMFRINSPILLMEMSTART1 3 -#define R_JUMFRIN_SPILL_MEM_SIZE 0x20F -#define O_JUMFRIN_SPILL_MEM_SIZE__JUMFRINSPILLMEMSIZE 0 -#define W_JUMFRIN_SPILL_MEM_SIZE__JUMFRINSPILLMEMSIZE 32 -#define R_CLASS1_SPILL_MEM_START_0 0x210 -#define O_CLASS1_SPILL_MEM_START_0__CLASS1SPILLMEMSTART0 0 -#define W_CLASS1_SPILL_MEM_START_0__CLASS1SPILLMEMSTART0 32 -#define R_CLASS1_SPILL_MEM_START_1 0x211 -#define O_CLASS1_SPILL_MEM_START_1__CLASS1SPILLMEMSTART1 0 -#define W_CLASS1_SPILL_MEM_START_1__CLASS1SPILLMEMSTART1 3 -#define R_CLASS1_SPILL_MEM_SIZE 0x212 -#define O_CLASS1_SPILL_MEM_SIZE__CLASS1SPILLMEMSIZE 0 -#define W_CLASS1_SPILL_MEM_SIZE__CLASS1SPILLMEMSIZE 32 -#define R_CLASS2_SPILL_MEM_START_0 0x213 -#define O_CLASS2_SPILL_MEM_START_0__CLASS2SPILLMEMSTART0 0 -#define W_CLASS2_SPILL_MEM_START_0__CLASS2SPILLMEMSTART0 32 -#define R_CLASS2_SPILL_MEM_START_1 0x214 -#define O_CLASS2_SPILL_MEM_START_1__CLASS2SPILLMEMSTART1 0 -#define W_CLASS2_SPILL_MEM_START_1__CLASS2SPILLMEMSTART1 3 -#define R_CLASS2_SPILL_MEM_SIZE 0x215 -#define O_CLASS2_SPILL_MEM_SIZE__CLASS2SPILLMEMSIZE 0 -#define W_CLASS2_SPILL_MEM_SIZE__CLASS2SPILLMEMSIZE 32 -#define R_CLASS3_SPILL_MEM_START_0 0x216 -#define O_CLASS3_SPILL_MEM_START_0__CLASS3SPILLMEMSTART0 0 -#define W_CLASS3_SPILL_MEM_START_0__CLASS3SPILLMEMSTART0 32 -#define R_CLASS3_SPILL_MEM_START_1 0x217 -#define O_CLASS3_SPILL_MEM_START_1__CLASS3SPILLMEMSTART1 0 -#define W_CLASS3_SPILL_MEM_START_1__CLASS3SPILLMEMSTART1 3 -#define R_CLASS3_SPILL_MEM_SIZE 0x218 -#define O_CLASS3_SPILL_MEM_SIZE__CLASS3SPILLMEMSIZE 0 -#define W_CLASS3_SPILL_MEM_SIZE__CLASS3SPILLMEMSIZE 32 -#define R_REG_FRIN1_SPILL_MEM_START_0 0x219 -#define R_REG_FRIN1_SPILL_MEM_START_1 0x21a -#define R_REG_FRIN1_SPILL_MEM_SIZE 0x21b -#define R_SPIHNGY0 0x219 -#define O_SPIHNGY0__EG_HNGY_THRESH_0 24 -#define W_SPIHNGY0__EG_HNGY_THRESH_0 7 -#define O_SPIHNGY0__EG_HNGY_THRESH_1 16 -#define W_SPIHNGY0__EG_HNGY_THRESH_1 7 -#define O_SPIHNGY0__EG_HNGY_THRESH_2 8 -#define W_SPIHNGY0__EG_HNGY_THRESH_2 7 -#define O_SPIHNGY0__EG_HNGY_THRESH_3 0 -#define W_SPIHNGY0__EG_HNGY_THRESH_3 7 -#define R_SPIHNGY1 0x21A -#define O_SPIHNGY1__EG_HNGY_THRESH_4 24 -#define W_SPIHNGY1__EG_HNGY_THRESH_4 7 -#define O_SPIHNGY1__EG_HNGY_THRESH_5 16 -#define W_SPIHNGY1__EG_HNGY_THRESH_5 7 -#define O_SPIHNGY1__EG_HNGY_THRESH_6 8 -#define W_SPIHNGY1__EG_HNGY_THRESH_6 7 -#define O_SPIHNGY1__EG_HNGY_THRESH_7 0 -#define W_SPIHNGY1__EG_HNGY_THRESH_7 7 -#define R_SPIHNGY2 0x21B -#define O_SPIHNGY2__EG_HNGY_THRESH_8 24 -#define W_SPIHNGY2__EG_HNGY_THRESH_8 7 -#define O_SPIHNGY2__EG_HNGY_THRESH_9 16 -#define W_SPIHNGY2__EG_HNGY_THRESH_9 7 -#define O_SPIHNGY2__EG_HNGY_THRESH_10 8 -#define W_SPIHNGY2__EG_HNGY_THRESH_10 7 -#define O_SPIHNGY2__EG_HNGY_THRESH_11 0 -#define W_SPIHNGY2__EG_HNGY_THRESH_11 7 -#define R_SPIHNGY3 0x21C -#define O_SPIHNGY3__EG_HNGY_THRESH_12 24 -#define W_SPIHNGY3__EG_HNGY_THRESH_12 7 -#define O_SPIHNGY3__EG_HNGY_THRESH_13 16 -#define W_SPIHNGY3__EG_HNGY_THRESH_13 7 -#define O_SPIHNGY3__EG_HNGY_THRESH_14 8 -#define W_SPIHNGY3__EG_HNGY_THRESH_14 7 -#define O_SPIHNGY3__EG_HNGY_THRESH_15 0 -#define W_SPIHNGY3__EG_HNGY_THRESH_15 7 -#define R_SPISTRV0 0x21D -#define O_SPISTRV0__EG_STRV_THRESH_0 24 -#define W_SPISTRV0__EG_STRV_THRESH_0 7 -#define O_SPISTRV0__EG_STRV_THRESH_1 16 -#define W_SPISTRV0__EG_STRV_THRESH_1 7 -#define O_SPISTRV0__EG_STRV_THRESH_2 8 -#define W_SPISTRV0__EG_STRV_THRESH_2 7 -#define O_SPISTRV0__EG_STRV_THRESH_3 0 -#define W_SPISTRV0__EG_STRV_THRESH_3 7 -#define R_SPISTRV1 0x21E -#define O_SPISTRV1__EG_STRV_THRESH_4 24 -#define W_SPISTRV1__EG_STRV_THRESH_4 7 -#define O_SPISTRV1__EG_STRV_THRESH_5 16 -#define W_SPISTRV1__EG_STRV_THRESH_5 7 -#define O_SPISTRV1__EG_STRV_THRESH_6 8 -#define W_SPISTRV1__EG_STRV_THRESH_6 7 -#define O_SPISTRV1__EG_STRV_THRESH_7 0 -#define W_SPISTRV1__EG_STRV_THRESH_7 7 -#define R_SPISTRV2 0x21F -#define O_SPISTRV2__EG_STRV_THRESH_8 24 -#define W_SPISTRV2__EG_STRV_THRESH_8 7 -#define O_SPISTRV2__EG_STRV_THRESH_9 16 -#define W_SPISTRV2__EG_STRV_THRESH_9 7 -#define O_SPISTRV2__EG_STRV_THRESH_10 8 -#define W_SPISTRV2__EG_STRV_THRESH_10 7 -#define O_SPISTRV2__EG_STRV_THRESH_11 0 -#define W_SPISTRV2__EG_STRV_THRESH_11 7 -#define R_SPISTRV3 0x220 -#define O_SPISTRV3__EG_STRV_THRESH_12 24 -#define W_SPISTRV3__EG_STRV_THRESH_12 7 -#define O_SPISTRV3__EG_STRV_THRESH_13 16 -#define W_SPISTRV3__EG_STRV_THRESH_13 7 -#define O_SPISTRV3__EG_STRV_THRESH_14 8 -#define W_SPISTRV3__EG_STRV_THRESH_14 7 -#define O_SPISTRV3__EG_STRV_THRESH_15 0 -#define W_SPISTRV3__EG_STRV_THRESH_15 7 -#define R_TXDATAFIFO0 0x221 -#define O_TXDATAFIFO0__TX0DATAFIFOSTART 24 -#define W_TXDATAFIFO0__TX0DATAFIFOSTART 7 -#define O_TXDATAFIFO0__TX0DATAFIFOSIZE 16 -#define W_TXDATAFIFO0__TX0DATAFIFOSIZE 7 -#define O_TXDATAFIFO0__TX1DATAFIFOSTART 8 -#define W_TXDATAFIFO0__TX1DATAFIFOSTART 7 -#define O_TXDATAFIFO0__TX1DATAFIFOSIZE 0 -#define W_TXDATAFIFO0__TX1DATAFIFOSIZE 7 -#define R_TXDATAFIFO1 0x222 -#define O_TXDATAFIFO1__TX2DATAFIFOSTART 24 -#define W_TXDATAFIFO1__TX2DATAFIFOSTART 7 -#define O_TXDATAFIFO1__TX2DATAFIFOSIZE 16 -#define W_TXDATAFIFO1__TX2DATAFIFOSIZE 7 -#define O_TXDATAFIFO1__TX3DATAFIFOSTART 8 -#define W_TXDATAFIFO1__TX3DATAFIFOSTART 7 -#define O_TXDATAFIFO1__TX3DATAFIFOSIZE 0 -#define W_TXDATAFIFO1__TX3DATAFIFOSIZE 7 -#define R_TXDATAFIFO2 0x223 -#define O_TXDATAFIFO2__TX4DATAFIFOSTART 24 -#define W_TXDATAFIFO2__TX4DATAFIFOSTART 7 -#define O_TXDATAFIFO2__TX4DATAFIFOSIZE 16 -#define W_TXDATAFIFO2__TX4DATAFIFOSIZE 7 -#define O_TXDATAFIFO2__TX5DATAFIFOSTART 8 -#define W_TXDATAFIFO2__TX5DATAFIFOSTART 7 -#define O_TXDATAFIFO2__TX5DATAFIFOSIZE 0 -#define W_TXDATAFIFO2__TX5DATAFIFOSIZE 7 -#define R_TXDATAFIFO3 0x224 -#define O_TXDATAFIFO3__TX6DATAFIFOSTART 24 -#define W_TXDATAFIFO3__TX6DATAFIFOSTART 7 -#define O_TXDATAFIFO3__TX6DATAFIFOSIZE 16 -#define W_TXDATAFIFO3__TX6DATAFIFOSIZE 7 -#define O_TXDATAFIFO3__TX7DATAFIFOSTART 8 -#define W_TXDATAFIFO3__TX7DATAFIFOSTART 7 -#define O_TXDATAFIFO3__TX7DATAFIFOSIZE 0 -#define W_TXDATAFIFO3__TX7DATAFIFOSIZE 7 -#define R_TXDATAFIFO4 0x225 -#define O_TXDATAFIFO4__TX8DATAFIFOSTART 24 -#define W_TXDATAFIFO4__TX8DATAFIFOSTART 7 -#define O_TXDATAFIFO4__TX8DATAFIFOSIZE 16 -#define W_TXDATAFIFO4__TX8DATAFIFOSIZE 7 -#define O_TXDATAFIFO4__TX9DATAFIFOSTART 8 -#define W_TXDATAFIFO4__TX9DATAFIFOSTART 7 -#define O_TXDATAFIFO4__TX9DATAFIFOSIZE 0 -#define W_TXDATAFIFO4__TX9DATAFIFOSIZE 7 -#define R_TXDATAFIFO5 0x226 -#define O_TXDATAFIFO5__TX10DATAFIFOSTART 24 -#define W_TXDATAFIFO5__TX10DATAFIFOSTART 7 -#define O_TXDATAFIFO5__TX10DATAFIFOSIZE 16 -#define W_TXDATAFIFO5__TX10DATAFIFOSIZE 7 -#define O_TXDATAFIFO5__TX11DATAFIFOSTART 8 -#define W_TXDATAFIFO5__TX11DATAFIFOSTART 7 -#define O_TXDATAFIFO5__TX11DATAFIFOSIZE 0 -#define W_TXDATAFIFO5__TX11DATAFIFOSIZE 7 -#define R_TXDATAFIFO6 0x227 -#define O_TXDATAFIFO6__TX12DATAFIFOSTART 24 -#define W_TXDATAFIFO6__TX12DATAFIFOSTART 7 -#define O_TXDATAFIFO6__TX12DATAFIFOSIZE 16 -#define W_TXDATAFIFO6__TX12DATAFIFOSIZE 7 -#define O_TXDATAFIFO6__TX13DATAFIFOSTART 8 -#define W_TXDATAFIFO6__TX13DATAFIFOSTART 7 -#define O_TXDATAFIFO6__TX13DATAFIFOSIZE 0 -#define W_TXDATAFIFO6__TX13DATAFIFOSIZE 7 -#define R_TXDATAFIFO7 0x228 -#define O_TXDATAFIFO7__TX14DATAFIFOSTART 24 -#define W_TXDATAFIFO7__TX14DATAFIFOSTART 7 -#define O_TXDATAFIFO7__TX14DATAFIFOSIZE 16 -#define W_TXDATAFIFO7__TX14DATAFIFOSIZE 7 -#define O_TXDATAFIFO7__TX15DATAFIFOSTART 8 -#define W_TXDATAFIFO7__TX15DATAFIFOSTART 7 -#define O_TXDATAFIFO7__TX15DATAFIFOSIZE 0 -#define W_TXDATAFIFO7__TX15DATAFIFOSIZE 7 -#define R_RXDATAFIFO0 0x229 -#define O_RXDATAFIFO0__RX0DATAFIFOSTART 24 -#define W_RXDATAFIFO0__RX0DATAFIFOSTART 7 -#define O_RXDATAFIFO0__RX0DATAFIFOSIZE 16 -#define W_RXDATAFIFO0__RX0DATAFIFOSIZE 7 -#define O_RXDATAFIFO0__RX1DATAFIFOSTART 8 -#define W_RXDATAFIFO0__RX1DATAFIFOSTART 7 -#define O_RXDATAFIFO0__RX1DATAFIFOSIZE 0 -#define W_RXDATAFIFO0__RX1DATAFIFOSIZE 7 -#define R_RXDATAFIFO1 0x22A -#define O_RXDATAFIFO1__RX2DATAFIFOSTART 24 -#define W_RXDATAFIFO1__RX2DATAFIFOSTART 7 -#define O_RXDATAFIFO1__RX2DATAFIFOSIZE 16 -#define W_RXDATAFIFO1__RX2DATAFIFOSIZE 7 -#define O_RXDATAFIFO1__RX3DATAFIFOSTART 8 -#define W_RXDATAFIFO1__RX3DATAFIFOSTART 7 -#define O_RXDATAFIFO1__RX3DATAFIFOSIZE 0 -#define W_RXDATAFIFO1__RX3DATAFIFOSIZE 7 -#define R_RXDATAFIFO2 0x22B -#define O_RXDATAFIFO2__RX4DATAFIFOSTART 24 -#define W_RXDATAFIFO2__RX4DATAFIFOSTART 7 -#define O_RXDATAFIFO2__RX4DATAFIFOSIZE 16 -#define W_RXDATAFIFO2__RX4DATAFIFOSIZE 7 -#define O_RXDATAFIFO2__RX5DATAFIFOSTART 8 -#define W_RXDATAFIFO2__RX5DATAFIFOSTART 7 -#define O_RXDATAFIFO2__RX5DATAFIFOSIZE 0 -#define W_RXDATAFIFO2__RX5DATAFIFOSIZE 7 -#define R_RXDATAFIFO3 0x22C -#define O_RXDATAFIFO3__RX6DATAFIFOSTART 24 -#define W_RXDATAFIFO3__RX6DATAFIFOSTART 7 -#define O_RXDATAFIFO3__RX6DATAFIFOSIZE 16 -#define W_RXDATAFIFO3__RX6DATAFIFOSIZE 7 -#define O_RXDATAFIFO3__RX7DATAFIFOSTART 8 -#define W_RXDATAFIFO3__RX7DATAFIFOSTART 7 -#define O_RXDATAFIFO3__RX7DATAFIFOSIZE 0 -#define W_RXDATAFIFO3__RX7DATAFIFOSIZE 7 -#define R_RXDATAFIFO4 0x22D -#define O_RXDATAFIFO4__RX8DATAFIFOSTART 24 -#define W_RXDATAFIFO4__RX8DATAFIFOSTART 7 -#define O_RXDATAFIFO4__RX8DATAFIFOSIZE 16 -#define W_RXDATAFIFO4__RX8DATAFIFOSIZE 7 -#define O_RXDATAFIFO4__RX9DATAFIFOSTART 8 -#define W_RXDATAFIFO4__RX9DATAFIFOSTART 7 -#define O_RXDATAFIFO4__RX9DATAFIFOSIZE 0 -#define W_RXDATAFIFO4__RX9DATAFIFOSIZE 7 -#define R_RXDATAFIFO5 0x22E -#define O_RXDATAFIFO5__RX10DATAFIFOSTART 24 -#define W_RXDATAFIFO5__RX10DATAFIFOSTART 7 -#define O_RXDATAFIFO5__RX10DATAFIFOSIZE 16 -#define W_RXDATAFIFO5__RX10DATAFIFOSIZE 7 -#define O_RXDATAFIFO5__RX11DATAFIFOSTART 8 -#define W_RXDATAFIFO5__RX11DATAFIFOSTART 7 -#define O_RXDATAFIFO5__RX11DATAFIFOSIZE 0 -#define W_RXDATAFIFO5__RX11DATAFIFOSIZE 7 -#define R_RXDATAFIFO6 0x22F -#define O_RXDATAFIFO6__RX12DATAFIFOSTART 24 -#define W_RXDATAFIFO6__RX12DATAFIFOSTART 7 -#define O_RXDATAFIFO6__RX12DATAFIFOSIZE 16 -#define W_RXDATAFIFO6__RX12DATAFIFOSIZE 7 -#define O_RXDATAFIFO6__RX13DATAFIFOSTART 8 -#define W_RXDATAFIFO6__RX13DATAFIFOSTART 7 -#define O_RXDATAFIFO6__RX13DATAFIFOSIZE 0 -#define W_RXDATAFIFO6__RX13DATAFIFOSIZE 7 -#define R_RXDATAFIFO7 0x230 -#define O_RXDATAFIFO7__RX14DATAFIFOSTART 24 -#define W_RXDATAFIFO7__RX14DATAFIFOSTART 7 -#define O_RXDATAFIFO7__RX14DATAFIFOSIZE 16 -#define W_RXDATAFIFO7__RX14DATAFIFOSIZE 7 -#define O_RXDATAFIFO7__RX15DATAFIFOSTART 8 -#define W_RXDATAFIFO7__RX15DATAFIFOSTART 7 -#define O_RXDATAFIFO7__RX15DATAFIFOSIZE 0 -#define W_RXDATAFIFO7__RX15DATAFIFOSIZE 7 -#define R_XGMACPADCALIBRATION 0x231 -#define R_FREEQCARVE 0x233 -#define R_SPI4STATICDELAY0 0x240 -#define O_SPI4STATICDELAY0__DATALINE7 28 -#define W_SPI4STATICDELAY0__DATALINE7 4 -#define O_SPI4STATICDELAY0__DATALINE6 24 -#define W_SPI4STATICDELAY0__DATALINE6 4 -#define O_SPI4STATICDELAY0__DATALINE5 20 -#define W_SPI4STATICDELAY0__DATALINE5 4 -#define O_SPI4STATICDELAY0__DATALINE4 16 -#define W_SPI4STATICDELAY0__DATALINE4 4 -#define O_SPI4STATICDELAY0__DATALINE3 12 -#define W_SPI4STATICDELAY0__DATALINE3 4 -#define O_SPI4STATICDELAY0__DATALINE2 8 -#define W_SPI4STATICDELAY0__DATALINE2 4 -#define O_SPI4STATICDELAY0__DATALINE1 4 -#define W_SPI4STATICDELAY0__DATALINE1 4 -#define O_SPI4STATICDELAY0__DATALINE0 0 -#define W_SPI4STATICDELAY0__DATALINE0 4 -#define R_SPI4STATICDELAY1 0x241 -#define O_SPI4STATICDELAY1__DATALINE15 28 -#define W_SPI4STATICDELAY1__DATALINE15 4 -#define O_SPI4STATICDELAY1__DATALINE14 24 -#define W_SPI4STATICDELAY1__DATALINE14 4 -#define O_SPI4STATICDELAY1__DATALINE13 20 -#define W_SPI4STATICDELAY1__DATALINE13 4 -#define O_SPI4STATICDELAY1__DATALINE12 16 -#define W_SPI4STATICDELAY1__DATALINE12 4 -#define O_SPI4STATICDELAY1__DATALINE11 12 -#define W_SPI4STATICDELAY1__DATALINE11 4 -#define O_SPI4STATICDELAY1__DATALINE10 8 -#define W_SPI4STATICDELAY1__DATALINE10 4 -#define O_SPI4STATICDELAY1__DATALINE9 4 -#define W_SPI4STATICDELAY1__DATALINE9 4 -#define O_SPI4STATICDELAY1__DATALINE8 0 -#define W_SPI4STATICDELAY1__DATALINE8 4 -#define R_SPI4STATICDELAY2 0x242 -#define O_SPI4STATICDELAY0__TXSTAT1 8 -#define W_SPI4STATICDELAY0__TXSTAT1 4 -#define O_SPI4STATICDELAY0__TXSTAT0 4 -#define W_SPI4STATICDELAY0__TXSTAT0 4 -#define O_SPI4STATICDELAY0__RXCONTROL 0 -#define W_SPI4STATICDELAY0__RXCONTROL 4 -#define R_SPI4CONTROL 0x243 -#define O_SPI4CONTROL__STATICDELAY 2 -#define O_SPI4CONTROL__LVDS_LVTTL 1 -#define O_SPI4CONTROL__SPI4ENABLE 0 -#define R_CLASSWATERMARKS 0x244 -#define O_CLASSWATERMARKS__CLASS0WATERMARK 24 -#define W_CLASSWATERMARKS__CLASS0WATERMARK 5 -#define O_CLASSWATERMARKS__CLASS1WATERMARK 16 -#define W_CLASSWATERMARKS__CLASS1WATERMARK 5 -#define O_CLASSWATERMARKS__CLASS3WATERMARK 0 -#define W_CLASSWATERMARKS__CLASS3WATERMARK 5 -#define R_RXWATERMARKS1 0x245 -#define O_RXWATERMARKS__RX0DATAWATERMARK 24 -#define W_RXWATERMARKS__RX0DATAWATERMARK 7 -#define O_RXWATERMARKS__RX1DATAWATERMARK 16 -#define W_RXWATERMARKS__RX1DATAWATERMARK 7 -#define O_RXWATERMARKS__RX3DATAWATERMARK 0 -#define W_RXWATERMARKS__RX3DATAWATERMARK 7 -#define R_RXWATERMARKS2 0x246 -#define O_RXWATERMARKS__RX4DATAWATERMARK 24 -#define W_RXWATERMARKS__RX4DATAWATERMARK 7 -#define O_RXWATERMARKS__RX5DATAWATERMARK 16 -#define W_RXWATERMARKS__RX5DATAWATERMARK 7 -#define O_RXWATERMARKS__RX6DATAWATERMARK 8 -#define W_RXWATERMARKS__RX6DATAWATERMARK 7 -#define O_RXWATERMARKS__RX7DATAWATERMARK 0 -#define W_RXWATERMARKS__RX7DATAWATERMARK 7 -#define R_RXWATERMARKS3 0x247 -#define O_RXWATERMARKS__RX8DATAWATERMARK 24 -#define W_RXWATERMARKS__RX8DATAWATERMARK 7 -#define O_RXWATERMARKS__RX9DATAWATERMARK 16 -#define W_RXWATERMARKS__RX9DATAWATERMARK 7 -#define O_RXWATERMARKS__RX10DATAWATERMARK 8 -#define W_RXWATERMARKS__RX10DATAWATERMARK 7 -#define O_RXWATERMARKS__RX11DATAWATERMARK 0 -#define W_RXWATERMARKS__RX11DATAWATERMARK 7 -#define R_RXWATERMARKS4 0x248 -#define O_RXWATERMARKS__RX12DATAWATERMARK 24 -#define W_RXWATERMARKS__RX12DATAWATERMARK 7 -#define O_RXWATERMARKS__RX13DATAWATERMARK 16 -#define W_RXWATERMARKS__RX13DATAWATERMARK 7 -#define O_RXWATERMARKS__RX14DATAWATERMARK 8 -#define W_RXWATERMARKS__RX14DATAWATERMARK 7 -#define O_RXWATERMARKS__RX15DATAWATERMARK 0 -#define W_RXWATERMARKS__RX15DATAWATERMARK 7 -#define R_FREEWATERMARKS 0x249 -#define O_FREEWATERMARKS__FREEOUTWATERMARK 16 -#define W_FREEWATERMARKS__FREEOUTWATERMARK 16 -#define O_FREEWATERMARKS__JUMFRWATERMARK 8 -#define W_FREEWATERMARKS__JUMFRWATERMARK 7 -#define O_FREEWATERMARKS__REGFRWATERMARK 0 -#define W_FREEWATERMARKS__REGFRWATERMARK 7 -#define R_EGRESSFIFOCARVINGSLOTS 0x24a - -#define CTRL_RES0 0 -#define CTRL_RES1 1 -#define CTRL_REG_FREE 2 -#define CTRL_JUMBO_FREE 3 -#define CTRL_CONT 4 -#define CTRL_EOP 5 -#define CTRL_START 6 -#define CTRL_SNGL 7 - -#define CTRL_B0_NOT_EOP 0 -#define CTRL_B0_EOP 1 - -#define R_ROUND_ROBIN_TABLE 0 -#define R_PDE_CLASS_0 0x300 -#define R_PDE_CLASS_1 0x302 -#define R_PDE_CLASS_2 0x304 -#define R_PDE_CLASS_3 0x306 - -#define R_MSG_TX_THRESHOLD 0x308 - -#define R_GMAC_JFR0_BUCKET_SIZE 0x320 -#define R_GMAC_RFR0_BUCKET_SIZE 0x321 -#define R_GMAC_TX0_BUCKET_SIZE 0x322 -#define R_GMAC_TX1_BUCKET_SIZE 0x323 -#define R_GMAC_TX2_BUCKET_SIZE 0x324 -#define R_GMAC_TX3_BUCKET_SIZE 0x325 -#define R_GMAC_JFR1_BUCKET_SIZE 0x326 -#define R_GMAC_RFR1_BUCKET_SIZE 0x327 - -#define R_XGS_TX0_BUCKET_SIZE 0x320 -#define R_XGS_TX1_BUCKET_SIZE 0x321 -#define R_XGS_TX2_BUCKET_SIZE 0x322 -#define R_XGS_TX3_BUCKET_SIZE 0x323 -#define R_XGS_TX4_BUCKET_SIZE 0x324 -#define R_XGS_TX5_BUCKET_SIZE 0x325 -#define R_XGS_TX6_BUCKET_SIZE 0x326 -#define R_XGS_TX7_BUCKET_SIZE 0x327 -#define R_XGS_TX8_BUCKET_SIZE 0x328 -#define R_XGS_TX9_BUCKET_SIZE 0x329 -#define R_XGS_TX10_BUCKET_SIZE 0x32A -#define R_XGS_TX11_BUCKET_SIZE 0x32B -#define R_XGS_TX12_BUCKET_SIZE 0x32C -#define R_XGS_TX13_BUCKET_SIZE 0x32D -#define R_XGS_TX14_BUCKET_SIZE 0x32E -#define R_XGS_TX15_BUCKET_SIZE 0x32F -#define R_XGS_JFR_BUCKET_SIZE 0x330 -#define R_XGS_RFR_BUCKET_SIZE 0x331 - -#define R_CC_CPU0_0 0x380 -#define R_CC_CPU1_0 0x388 -#define R_CC_CPU2_0 0x390 -#define R_CC_CPU3_0 0x398 -#define R_CC_CPU4_0 0x3a0 -#define R_CC_CPU5_0 0x3a8 -#define R_CC_CPU6_0 0x3b0 -#define R_CC_CPU7_0 0x3b8 - -#define XLR_GMAC_BLK_SZ (XLR_IO_GMAC_1_OFFSET - \ - XLR_IO_GMAC_0_OFFSET) - -/* Constants used for configuring the devices */ - -#define XLR_FB_STN 6 /* Bucket used for Tx freeback */ - -#define MAC_B2B_IPG 88 - -#define XLR_NET_PREPAD_LEN 32 - -/* frame sizes need to be cacheline aligned */ -#define MAX_FRAME_SIZE (1536 + XLR_NET_PREPAD_LEN) -#define MAX_FRAME_SIZE_JUMBO 9216 - -#define MAC_SKB_BACK_PTR_SIZE SMP_CACHE_BYTES -#define MAC_PREPAD 0 -#define BYTE_OFFSET 2 -#define XLR_RX_BUF_SIZE (MAX_FRAME_SIZE + BYTE_OFFSET + \ - MAC_PREPAD + MAC_SKB_BACK_PTR_SIZE + SMP_CACHE_BYTES) -#define MAC_CRC_LEN 4 -#define MAX_NUM_MSGRNG_STN_CC 128 -#define MAX_MSG_SND_ATTEMPTS 100 /* 13 stns x 4 entry msg/stn + - * headroom - */ - -#define MAC_FRIN_TO_BE_SENT_THRESHOLD 16 - -#define MAX_NUM_DESC_SPILL 1024 -#define MAX_FRIN_SPILL (MAX_NUM_DESC_SPILL << 2) -#define MAX_FROUT_SPILL (MAX_NUM_DESC_SPILL << 2) -#define MAX_CLASS_0_SPILL (MAX_NUM_DESC_SPILL << 2) -#define MAX_CLASS_1_SPILL (MAX_NUM_DESC_SPILL << 2) -#define MAX_CLASS_2_SPILL (MAX_NUM_DESC_SPILL << 2) -#define MAX_CLASS_3_SPILL (MAX_NUM_DESC_SPILL << 2) - -enum { - SGMII_SPEED_10 = 0x00000000, - SGMII_SPEED_100 = 0x02000000, - SGMII_SPEED_1000 = 0x04000000, -}; - -enum tsv_rsv_reg { - TX_RX_64_BYTE_FRAME = 0x20, - TX_RX_64_127_BYTE_FRAME, - TX_RX_128_255_BYTE_FRAME, - TX_RX_256_511_BYTE_FRAME, - TX_RX_512_1023_BYTE_FRAME, - TX_RX_1024_1518_BYTE_FRAME, - TX_RX_1519_1522_VLAN_BYTE_FRAME, - - RX_BYTE_COUNTER = 0x27, - RX_PACKET_COUNTER, - RX_FCS_ERROR_COUNTER, - RX_MULTICAST_PACKET_COUNTER, - RX_BROADCAST_PACKET_COUNTER, - RX_CONTROL_FRAME_PACKET_COUNTER, - RX_PAUSE_FRAME_PACKET_COUNTER, - RX_UNKNOWN_OP_CODE_COUNTER, - RX_ALIGNMENT_ERROR_COUNTER, - RX_FRAME_LENGTH_ERROR_COUNTER, - RX_CODE_ERROR_COUNTER, - RX_CARRIER_SENSE_ERROR_COUNTER, - RX_UNDERSIZE_PACKET_COUNTER, - RX_OVERSIZE_PACKET_COUNTER, - RX_FRAGMENTS_COUNTER, - RX_JABBER_COUNTER, - RX_DROP_PACKET_COUNTER, - - TX_BYTE_COUNTER = 0x38, - TX_PACKET_COUNTER, - TX_MULTICAST_PACKET_COUNTER, - TX_BROADCAST_PACKET_COUNTER, - TX_PAUSE_CONTROL_FRAME_COUNTER, - TX_DEFERRAL_PACKET_COUNTER, - TX_EXCESSIVE_DEFERRAL_PACKET_COUNTER, - TX_SINGLE_COLLISION_PACKET_COUNTER, - TX_MULTI_COLLISION_PACKET_COUNTER, - TX_LATE_COLLISION_PACKET_COUNTER, - TX_EXCESSIVE_COLLISION_PACKET_COUNTER, - TX_TOTAL_COLLISION_COUNTER, - TX_PAUSE_FRAME_HONERED_COUNTER, - TX_DROP_FRAME_COUNTER, - TX_JABBER_FRAME_COUNTER, - TX_FCS_ERROR_COUNTER, - TX_CONTROL_FRAME_COUNTER, - TX_OVERSIZE_FRAME_COUNTER, - TX_UNDERSIZE_FRAME_COUNTER, - TX_FRAGMENT_FRAME_COUNTER, - - CARRY_REG_1 = 0x4c, - CARRY_REG_2 = 0x4d, -}; - -struct xlr_adapter { - struct net_device *netdev[4]; -}; - -struct xlr_net_priv { - u32 __iomem *base_addr; - struct net_device *ndev; - struct xlr_adapter *adapter; - struct mii_bus *mii_bus; - int num_rx_desc; - int phy_addr; /* PHY addr on MDIO bus */ - int pcs_id; /* PCS id on MDIO bus */ - int port_id; /* Port(gmac/xgmac) number, i.e 0-7 */ - int tx_stnid; - u32 __iomem *mii_addr; - u32 __iomem *serdes_addr; - u32 __iomem *pcs_addr; - u32 __iomem *gpio_addr; - int phy_speed; - int port_type; - struct timer_list queue_timer; - int wakeup_q; - struct platform_device *pdev; - struct xlr_net_data *nd; - - u64 *frin_spill; - u64 *frout_spill; - u64 *class_0_spill; - u64 *class_1_spill; - u64 *class_2_spill; - u64 *class_3_spill; -}; - -void xlr_set_gmac_speed(struct xlr_net_priv *priv); diff --git a/drivers/staging/r8188eu/core/rtw_mlme_ext.c b/drivers/staging/r8188eu/core/rtw_mlme_ext.c index 55c3d4a6faeb..b4820ad2cee7 100644 --- a/drivers/staging/r8188eu/core/rtw_mlme_ext.c +++ b/drivers/staging/r8188eu/core/rtw_mlme_ext.c @@ -107,6 +107,7 @@ static struct rt_channel_plan_map RTW_ChannelPlanMap[RT_CHANNEL_DOMAIN_MAX] = { {0x01}, /* 0x10, RT_CHANNEL_DOMAIN_JAPAN */ {0x02}, /* 0x11, RT_CHANNEL_DOMAIN_FCC_NO_DFS */ {0x01}, /* 0x12, RT_CHANNEL_DOMAIN_JAPAN_NO_DFS */ + {0x00}, /* 0x13 */ {0x02}, /* 0x14, RT_CHANNEL_DOMAIN_TAIWAN_NO_DFS */ {0x00}, /* 0x15, RT_CHANNEL_DOMAIN_ETSI_NO_DFS */ {0x00}, /* 0x16, RT_CHANNEL_DOMAIN_KOREA_NO_DFS */ @@ -118,6 +119,7 @@ static struct rt_channel_plan_map RTW_ChannelPlanMap[RT_CHANNEL_DOMAIN_MAX] = { {0x00}, /* 0x1C, */ {0x00}, /* 0x1D, */ {0x00}, /* 0x1E, */ + {0x00}, /* 0x1F, */ /* 0x20 ~ 0x7F , New Define ===== */ {0x00}, /* 0x20, RT_CHANNEL_DOMAIN_WORLD_NULL */ {0x01}, /* 0x21, RT_CHANNEL_DOMAIN_ETSI1_NULL */ @@ -6845,12 +6847,12 @@ void report_del_sta_event(struct adapter *padapter, unsigned char *MacAddr, unsi struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; struct cmd_priv *pcmdpriv = &padapter->cmdpriv; - pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); + pcmd_obj = kzalloc(sizeof(*pcmd_obj), GFP_ATOMIC); if (!pcmd_obj) return; cmdsz = (sizeof(struct stadel_event) + sizeof(struct C2HEvent_Header)); - pevtcmd = kzalloc(cmdsz, GFP_KERNEL); + pevtcmd = kzalloc(cmdsz, GFP_ATOMIC); if (!pevtcmd) { kfree(pcmd_obj); return; diff --git a/drivers/staging/r8188eu/os_dep/ioctl_linux.c b/drivers/staging/r8188eu/os_dep/ioctl_linux.c index 52d42e576443..9404355726d0 100644 --- a/drivers/staging/r8188eu/os_dep/ioctl_linux.c +++ b/drivers/staging/r8188eu/os_dep/ioctl_linux.c @@ -1980,6 +1980,7 @@ static int rtw_wx_read32(struct net_device *dev, u32 data32; u32 bytes; u8 *ptmp; + int ret; padapter = (struct adapter *)rtw_netdev_priv(dev); p = &wrqu->data; @@ -2007,12 +2008,17 @@ static int rtw_wx_read32(struct net_device *dev, break; default: DBG_88E(KERN_INFO "%s: usage> read [bytes],[address(hex)]\n", __func__); - return -EINVAL; + ret = -EINVAL; + goto err_free_ptmp; } DBG_88E(KERN_INFO "%s: addr = 0x%08X data =%s\n", __func__, addr, extra); kfree(ptmp); return 0; + +err_free_ptmp: + kfree(ptmp); + return ret; } static int rtw_wx_write32(struct net_device *dev, diff --git a/drivers/staging/r8188eu/os_dep/mlme_linux.c b/drivers/staging/r8188eu/os_dep/mlme_linux.c index a9b6ffdbf31a..f7ce724ebf87 100644 --- a/drivers/staging/r8188eu/os_dep/mlme_linux.c +++ b/drivers/staging/r8188eu/os_dep/mlme_linux.c @@ -112,7 +112,7 @@ void rtw_report_sec_ie(struct adapter *adapter, u8 authmode, u8 *sec_ie) buff = NULL; if (authmode == _WPA_IE_ID_) { - buff = kzalloc(IW_CUSTOM_MAX, GFP_KERNEL); + buff = kzalloc(IW_CUSTOM_MAX, GFP_ATOMIC); if (!buff) return; p = buff; diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c index d2e9df60e9ba..b9ce71848023 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c @@ -2549,13 +2549,14 @@ static void _rtl92e_pci_disconnect(struct pci_dev *pdev) free_irq(dev->irq, dev); priv->irq = 0; } - free_rtllib(dev); if (dev->mem_start != 0) { iounmap((void __iomem *)dev->mem_start); release_mem_region(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1)); } + + free_rtllib(dev); } pci_disable_device(pdev); diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index 0b65de9f2df1..95a88f6224cd 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c @@ -520,7 +520,7 @@ static ssize_t target_fabric_port_alua_tg_pt_gp_show(struct config_item *item, { struct se_lun *lun = item_to_lun(item); - if (!lun || !lun->lun_se_dev) + if (!lun->lun_se_dev) return -ENODEV; return core_alua_show_tg_pt_gp_info(lun, page); @@ -531,7 +531,7 @@ static ssize_t target_fabric_port_alua_tg_pt_gp_store(struct config_item *item, { struct se_lun *lun = item_to_lun(item); - if (!lun || !lun->lun_se_dev) + if (!lun->lun_se_dev) return -ENODEV; return core_alua_store_tg_pt_gp_info(lun, page, count); @@ -542,7 +542,7 @@ static ssize_t target_fabric_port_alua_tg_pt_offline_show( { struct se_lun *lun = item_to_lun(item); - if (!lun || !lun->lun_se_dev) + if (!lun->lun_se_dev) return -ENODEV; return core_alua_show_offline_bit(lun, page); @@ -553,7 +553,7 @@ static ssize_t target_fabric_port_alua_tg_pt_offline_store( { struct se_lun *lun = item_to_lun(item); - if (!lun || !lun->lun_se_dev) + if (!lun->lun_se_dev) return -ENODEV; return core_alua_store_offline_bit(lun, page, count); @@ -564,7 +564,7 @@ static ssize_t target_fabric_port_alua_tg_pt_status_show( { struct se_lun *lun = item_to_lun(item); - if (!lun || !lun->lun_se_dev) + if (!lun->lun_se_dev) return -ENODEV; return core_alua_show_secondary_status(lun, page); @@ -575,7 +575,7 @@ static ssize_t target_fabric_port_alua_tg_pt_status_store( { struct se_lun *lun = item_to_lun(item); - if (!lun || !lun->lun_se_dev) + if (!lun->lun_se_dev) return -ENODEV; return core_alua_store_secondary_status(lun, page, count); @@ -586,7 +586,7 @@ static ssize_t target_fabric_port_alua_tg_pt_write_md_show( { struct se_lun *lun = item_to_lun(item); - if (!lun || !lun->lun_se_dev) + if (!lun->lun_se_dev) return -ENODEV; return core_alua_show_secondary_write_metadata(lun, page); @@ -597,7 +597,7 @@ static ssize_t target_fabric_port_alua_tg_pt_write_md_store( { struct se_lun *lun = item_to_lun(item); - if (!lun || !lun->lun_se_dev) + if (!lun->lun_se_dev) return -ENODEV; return core_alua_store_secondary_write_metadata(lun, page, count); diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c index 22703a0dbd07..4c76498d3fb0 100644 --- a/drivers/target/target_core_spc.c +++ b/drivers/target/target_core_spc.c @@ -40,11 +40,11 @@ static void spc_fill_alua_data(struct se_lun *lun, unsigned char *buf) * * See spc4r17 section 6.4.2 Table 135 */ - spin_lock(&lun->lun_tg_pt_gp_lock); - tg_pt_gp = lun->lun_tg_pt_gp; + rcu_read_lock(); + tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp); if (tg_pt_gp) buf[5] |= tg_pt_gp->tg_pt_gp_alua_access_type; - spin_unlock(&lun->lun_tg_pt_gp_lock); + rcu_read_unlock(); } static u16 @@ -325,14 +325,14 @@ check_t10_vend_desc: * Get the PROTOCOL IDENTIFIER as defined by spc4r17 * section 7.5.1 Table 362 */ - spin_lock(&lun->lun_tg_pt_gp_lock); - tg_pt_gp = lun->lun_tg_pt_gp; + rcu_read_lock(); + tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp); if (!tg_pt_gp) { - spin_unlock(&lun->lun_tg_pt_gp_lock); + rcu_read_unlock(); goto check_lu_gp; } tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id; - spin_unlock(&lun->lun_tg_pt_gp_lock); + rcu_read_unlock(); buf[off] = tpg->proto_id << 4; buf[off++] |= 0x1; /* CODE SET == Binary */ diff --git a/drivers/tee/optee/ffa_abi.c b/drivers/tee/optee/ffa_abi.c index 45424824e0f9..d8c8683863aa 100644 --- a/drivers/tee/optee/ffa_abi.c +++ b/drivers/tee/optee/ffa_abi.c @@ -810,10 +810,9 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev) return -EINVAL; optee = kzalloc(sizeof(*optee), GFP_KERNEL); - if (!optee) { - rc = -ENOMEM; - goto err; - } + if (!optee) + return -ENOMEM; + optee->pool = optee_ffa_config_dyn_shm(); if (IS_ERR(optee->pool)) { rc = PTR_ERR(optee->pool); diff --git a/drivers/thermal/intel/int340x_thermal/Kconfig b/drivers/thermal/intel/int340x_thermal/Kconfig index 45c31f3d6054..5d046de96a5d 100644 --- a/drivers/thermal/intel/int340x_thermal/Kconfig +++ b/drivers/thermal/intel/int340x_thermal/Kconfig @@ -5,12 +5,12 @@ config INT340X_THERMAL tristate "ACPI INT340X thermal drivers" - depends on X86 && ACPI && PCI + depends on X86_64 && ACPI && PCI select THERMAL_GOV_USER_SPACE select ACPI_THERMAL_REL select ACPI_FAN select INTEL_SOC_DTS_IOSF_CORE - select PROC_THERMAL_MMIO_RAPL if X86_64 && POWERCAP + select PROC_THERMAL_MMIO_RAPL if POWERCAP help Newer laptops and tablets that use ACPI may have thermal sensors and other devices with thermal control capabilities outside the core diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c index b25b54d4bac1..e693ec8234fb 100644 --- a/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c +++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c @@ -29,7 +29,7 @@ static const char * const fivr_strings[] = { }; static const struct mmio_reg tgl_fivr_mmio_regs[] = { - { 0, 0x5A18, 3, 0x7, 12}, /* vco_ref_code_lo */ + { 0, 0x5A18, 3, 0x7, 11}, /* vco_ref_code_lo */ { 0, 0x5A18, 8, 0xFF, 16}, /* vco_ref_code_hi */ { 0, 0x5A08, 8, 0xFF, 0}, /* spread_spectrum_pct */ { 0, 0x5A08, 1, 0x1, 8}, /* spread_spectrum_clk_enable */ diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 648829ab79ff..82654dc8382b 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -421,6 +421,8 @@ static void thermal_zone_device_init(struct thermal_zone_device *tz) { struct thermal_instance *pos; tz->temperature = THERMAL_TEMP_INVALID; + tz->prev_low_trip = -INT_MAX; + tz->prev_high_trip = INT_MAX; list_for_each_entry(pos, &tz->thermal_instances, tz_node) pos->initialized = false; } diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c index f0bf01ea069a..71e0dd2c0ce5 100644 --- a/drivers/tty/hvc/hvc_xen.c +++ b/drivers/tty/hvc/hvc_xen.c @@ -522,6 +522,7 @@ static struct xenbus_driver xencons_driver = { .remove = xencons_remove, .resume = xencons_resume, .otherend_changed = xencons_backend_changed, + .not_essential = true, }; #endif /* CONFIG_HVC_XEN_FRONTEND */ diff --git a/drivers/tty/serial/8250/8250_bcm7271.c b/drivers/tty/serial/8250/8250_bcm7271.c index 7f656fac503f..5163d60756b7 100644 --- a/drivers/tty/serial/8250/8250_bcm7271.c +++ b/drivers/tty/serial/8250/8250_bcm7271.c @@ -237,6 +237,7 @@ struct brcmuart_priv { u32 rx_err; u32 rx_timeout; u32 rx_abort; + u32 saved_mctrl; }; static struct dentry *brcmuart_debugfs_root; @@ -1133,16 +1134,27 @@ static int brcmuart_remove(struct platform_device *pdev) static int __maybe_unused brcmuart_suspend(struct device *dev) { struct brcmuart_priv *priv = dev_get_drvdata(dev); + struct uart_8250_port *up = serial8250_get_port(priv->line); + struct uart_port *port = &up->port; serial8250_suspend_port(priv->line); clk_disable_unprepare(priv->baud_mux_clk); + /* + * This will prevent resume from enabling RTS before the + * baud rate has been resored. + */ + priv->saved_mctrl = port->mctrl; + port->mctrl = 0; + return 0; } static int __maybe_unused brcmuart_resume(struct device *dev) { struct brcmuart_priv *priv = dev_get_drvdata(dev); + struct uart_8250_port *up = serial8250_get_port(priv->line); + struct uart_port *port = &up->port; int ret; ret = clk_prepare_enable(priv->baud_mux_clk); @@ -1165,6 +1177,7 @@ static int __maybe_unused brcmuart_resume(struct device *dev) start_rx_dma(serial8250_get_port(priv->line)); } serial8250_resume_port(priv->line); + port->mctrl = priv->saved_mctrl; return 0; } diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index 5d43de143f33..60f8fffdfd77 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -1324,29 +1324,33 @@ pericom_do_set_divisor(struct uart_port *port, unsigned int baud, { int scr; int lcr; - int actual_baud; - int tolerance; - for (scr = 5 ; scr <= 15 ; scr++) { - actual_baud = 921600 * 16 / scr; - tolerance = actual_baud / 50; + for (scr = 16; scr > 4; scr--) { + unsigned int maxrate = port->uartclk / scr; + unsigned int divisor = max(maxrate / baud, 1U); + int delta = maxrate / divisor - baud; - if ((baud < actual_baud + tolerance) && - (baud > actual_baud - tolerance)) { + if (baud > maxrate + baud / 50) + continue; + if (delta > baud / 50) + divisor++; + + if (divisor > 0xffff) + continue; + + /* Update delta due to possible divisor change */ + delta = maxrate / divisor - baud; + if (abs(delta) < baud / 50) { lcr = serial_port_in(port, UART_LCR); serial_port_out(port, UART_LCR, lcr | 0x80); - - serial_port_out(port, UART_DLL, 1); - serial_port_out(port, UART_DLM, 0); + serial_port_out(port, UART_DLL, divisor & 0xff); + serial_port_out(port, UART_DLM, divisor >> 8 & 0xff); serial_port_out(port, 2, 16 - scr); serial_port_out(port, UART_LCR, lcr); return; - } else if (baud > actual_baud) { - break; } } - serial8250_do_set_divisor(port, baud, quot, quot_frac); } static int pci_pericom_setup(struct serial_private *priv, const struct pciserial_board *board, @@ -2291,7 +2295,7 @@ static struct pci_serial_quirk pci_serial_quirks[] = { .setup = pci_pericom_setup_four_at_eight, }, { - .vendor = PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S, + .vendor = PCI_VENDOR_ID_ACCESIO, .device = PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, @@ -2299,6 +2303,13 @@ static struct pci_serial_quirk pci_serial_quirks[] = { }, { .vendor = PCI_VENDOR_ID_ACCESIO, + .device = PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = pci_pericom_setup_four_at_eight, + }, + { + .vendor = PCI_VENDOR_ID_ACCESIO, .device = PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_4, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 5775cbff8f6e..46e2079ad1aa 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -2024,13 +2024,6 @@ void serial8250_do_set_mctrl(struct uart_port *port, unsigned int mctrl) struct uart_8250_port *up = up_to_u8250p(port); unsigned char mcr; - if (port->rs485.flags & SER_RS485_ENABLED) { - if (serial8250_in_MCR(up) & UART_MCR_RTS) - mctrl |= TIOCM_RTS; - else - mctrl &= ~TIOCM_RTS; - } - mcr = serial8250_TIOCM_to_MCR(mctrl); mcr = (mcr & up->mcr_mask) | up->mcr_force | up->mcr; diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index 6ff94cfcd9db..fc543ac97c13 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -1533,7 +1533,7 @@ config SERIAL_LITEUART tristate "LiteUART serial port support" depends on HAS_IOMEM depends on OF || COMPILE_TEST - depends on LITEX + depends on LITEX || COMPILE_TEST select SERIAL_CORE help This driver is for the FPGA-based LiteUART serial controller from LiteX diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index d361cd84ff8c..52518a606c06 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -2947,6 +2947,7 @@ MODULE_DEVICE_TABLE(of, sbsa_uart_of_match); static const struct acpi_device_id __maybe_unused sbsa_uart_acpi_match[] = { { "ARMH0011", 0 }, + { "ARMHB000", 0 }, {}, }; MODULE_DEVICE_TABLE(acpi, sbsa_uart_acpi_match); diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index b1e7190ae483..ac5112def40d 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c @@ -2625,6 +2625,7 @@ OF_EARLYCON_DECLARE(lpuart, "fsl,vf610-lpuart", lpuart_early_console_setup); OF_EARLYCON_DECLARE(lpuart32, "fsl,ls1021a-lpuart", lpuart32_early_console_setup); OF_EARLYCON_DECLARE(lpuart32, "fsl,ls1028a-lpuart", ls1028a_early_console_setup); OF_EARLYCON_DECLARE(lpuart32, "fsl,imx7ulp-lpuart", lpuart32_imx_early_console_setup); +OF_EARLYCON_DECLARE(lpuart32, "fsl,imx8qxp-lpuart", lpuart32_imx_early_console_setup); EARLYCON_DECLARE(lpuart, lpuart_early_console_setup); EARLYCON_DECLARE(lpuart32, lpuart32_early_console_setup); diff --git a/drivers/tty/serial/liteuart.c b/drivers/tty/serial/liteuart.c index dbc0559a9157..2941659e5274 100644 --- a/drivers/tty/serial/liteuart.c +++ b/drivers/tty/serial/liteuart.c @@ -270,8 +270,10 @@ static int liteuart_probe(struct platform_device *pdev) /* get membase */ port->membase = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); - if (IS_ERR(port->membase)) - return PTR_ERR(port->membase); + if (IS_ERR(port->membase)) { + ret = PTR_ERR(port->membase); + goto err_erase_id; + } /* values not from device tree */ port->dev = &pdev->dev; @@ -285,7 +287,18 @@ static int liteuart_probe(struct platform_device *pdev) port->line = dev_id; spin_lock_init(&port->lock); - return uart_add_one_port(&liteuart_driver, &uart->port); + platform_set_drvdata(pdev, port); + + ret = uart_add_one_port(&liteuart_driver, &uart->port); + if (ret) + goto err_erase_id; + + return 0; + +err_erase_id: + xa_erase(&liteuart_array, uart->id); + + return ret; } static int liteuart_remove(struct platform_device *pdev) @@ -293,6 +306,7 @@ static int liteuart_remove(struct platform_device *pdev) struct uart_port *port = platform_get_drvdata(pdev); struct liteuart_port *uart = to_liteuart_port(port); + uart_remove_one_port(&liteuart_driver, port); xa_erase(&liteuart_array, uart->id); return 0; diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c index fcef7a961430..489d19274f9a 100644 --- a/drivers/tty/serial/msm_serial.c +++ b/drivers/tty/serial/msm_serial.c @@ -598,6 +598,9 @@ static void msm_start_rx_dma(struct msm_port *msm_port) u32 val; int ret; + if (IS_ENABLED(CONFIG_CONSOLE_POLL)) + return; + if (!dma->chan) return; diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c index 45e2e4109acd..b6223fab0687 100644 --- a/drivers/tty/serial/serial-tegra.c +++ b/drivers/tty/serial/serial-tegra.c @@ -1506,7 +1506,7 @@ static struct tegra_uart_chip_data tegra20_uart_chip_data = { .fifo_mode_enable_status = false, .uart_max_port = 5, .max_dma_burst_bytes = 4, - .error_tolerance_low_range = 0, + .error_tolerance_low_range = -4, .error_tolerance_high_range = 4, }; @@ -1517,7 +1517,7 @@ static struct tegra_uart_chip_data tegra30_uart_chip_data = { .fifo_mode_enable_status = false, .uart_max_port = 5, .max_dma_burst_bytes = 4, - .error_tolerance_low_range = 0, + .error_tolerance_low_range = -4, .error_tolerance_high_range = 4, }; diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 1e738f265eea..61e3dd0222af 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -1075,6 +1075,11 @@ uart_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) goto out; if (!tty_io_error(tty)) { + if (uport->rs485.flags & SER_RS485_ENABLED) { + set &= ~TIOCM_RTS; + clear &= ~TIOCM_RTS; + } + uart_update_mctrl(uport, set, clear); ret = 0; } @@ -1549,6 +1554,7 @@ static void uart_tty_port_shutdown(struct tty_port *port) { struct uart_state *state = container_of(port, struct uart_state, port); struct uart_port *uport = uart_port_check(state); + char *buf; /* * At this point, we stop accepting input. To do this, we @@ -1570,8 +1576,18 @@ static void uart_tty_port_shutdown(struct tty_port *port) */ tty_port_set_suspended(port, 0); - uart_change_pm(state, UART_PM_STATE_OFF); + /* + * Free the transmit buffer. + */ + spin_lock_irq(&uport->lock); + buf = state->xmit.buf; + state->xmit.buf = NULL; + spin_unlock_irq(&uport->lock); + if (buf) + free_page((unsigned long)buf); + + uart_change_pm(state, UART_PM_STATE_OFF); } static void uart_wait_until_sent(struct tty_struct *tty, int timeout) diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c index 1f3b4a142212..f9af7ebe003d 100644 --- a/drivers/usb/cdns3/cdns3-gadget.c +++ b/drivers/usb/cdns3/cdns3-gadget.c @@ -337,19 +337,6 @@ static void cdns3_ep_inc_deq(struct cdns3_endpoint *priv_ep) cdns3_ep_inc_trb(&priv_ep->dequeue, &priv_ep->ccs, priv_ep->num_trbs); } -static void cdns3_move_deq_to_next_trb(struct cdns3_request *priv_req) -{ - struct cdns3_endpoint *priv_ep = priv_req->priv_ep; - int current_trb = priv_req->start_trb; - - while (current_trb != priv_req->end_trb) { - cdns3_ep_inc_deq(priv_ep); - current_trb = priv_ep->dequeue; - } - - cdns3_ep_inc_deq(priv_ep); -} - /** * cdns3_allow_enable_l1 - enable/disable permits to transition to L1. * @priv_dev: Extended gadget object @@ -1517,10 +1504,11 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev, trb = priv_ep->trb_pool + priv_ep->dequeue; - /* Request was dequeued and TRB was changed to TRB_LINK. */ - if (TRB_FIELD_TO_TYPE(le32_to_cpu(trb->control)) == TRB_LINK) { + /* The TRB was changed as link TRB, and the request was handled at ep_dequeue */ + while (TRB_FIELD_TO_TYPE(le32_to_cpu(trb->control)) == TRB_LINK) { trace_cdns3_complete_trb(priv_ep, trb); - cdns3_move_deq_to_next_trb(priv_req); + cdns3_ep_inc_deq(priv_ep); + trb = priv_ep->trb_pool + priv_ep->dequeue; } if (!request->stream_id) { diff --git a/drivers/usb/cdns3/cdnsp-mem.c b/drivers/usb/cdns3/cdnsp-mem.c index ad9aee3f1e39..97866bfb2da9 100644 --- a/drivers/usb/cdns3/cdnsp-mem.c +++ b/drivers/usb/cdns3/cdnsp-mem.c @@ -987,6 +987,9 @@ int cdnsp_endpoint_init(struct cdnsp_device *pdev, /* Set up the endpoint ring. */ pep->ring = cdnsp_ring_alloc(pdev, 2, ring_type, max_packet, mem_flags); + if (!pep->ring) + return -ENOMEM; + pep->skip = false; /* Fill the endpoint context */ diff --git a/drivers/usb/cdns3/host.c b/drivers/usb/cdns3/host.c index 84dadfa726aa..9643b905e2d8 100644 --- a/drivers/usb/cdns3/host.c +++ b/drivers/usb/cdns3/host.c @@ -10,6 +10,7 @@ */ #include <linux/platform_device.h> +#include <linux/slab.h> #include "core.h" #include "drd.h" #include "host-export.h" diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c index f1d100671ee6..097142ffb184 100644 --- a/drivers/usb/chipidea/ci_hdrc_imx.c +++ b/drivers/usb/chipidea/ci_hdrc_imx.c @@ -420,15 +420,15 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev) data->phy = devm_usb_get_phy_by_phandle(dev, "fsl,usbphy", 0); if (IS_ERR(data->phy)) { ret = PTR_ERR(data->phy); - if (ret == -ENODEV) { - data->phy = devm_usb_get_phy_by_phandle(dev, "phys", 0); - if (IS_ERR(data->phy)) { - ret = PTR_ERR(data->phy); - if (ret == -ENODEV) - data->phy = NULL; - else - goto err_clk; - } + if (ret != -ENODEV) + goto err_clk; + data->phy = devm_usb_get_phy_by_phandle(dev, "phys", 0); + if (IS_ERR(data->phy)) { + ret = PTR_ERR(data->phy); + if (ret == -ENODEV) + data->phy = NULL; + else + goto err_clk; } } diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index 16b1fd9dc60c..48bc8a4814ac 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c @@ -406,7 +406,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, * the USB-2 spec requires such endpoints to have wMaxPacketSize = 0 * (see the end of section 5.6.3), so don't warn about them. */ - maxp = usb_endpoint_maxp(&endpoint->desc); + maxp = le16_to_cpu(endpoint->desc.wMaxPacketSize); if (maxp == 0 && !(usb_endpoint_xfer_isoc(d) && asnum == 0)) { dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid wMaxPacketSize 0\n", cfgno, inum, asnum, d->bEndpointAddress); @@ -422,9 +422,9 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, maxpacket_maxes = full_speed_maxpacket_maxes; break; case USB_SPEED_HIGH: - /* Bits 12..11 are allowed only for HS periodic endpoints */ + /* Multiple-transactions bits are allowed only for HS periodic endpoints */ if (usb_endpoint_xfer_int(d) || usb_endpoint_xfer_isoc(d)) { - i = maxp & (BIT(12) | BIT(11)); + i = maxp & USB_EP_MAXP_MULT_MASK; maxp &= ~i; } fallthrough; diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 86658a81d284..00070a8a6507 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -4700,8 +4700,6 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1, if (oldspeed == USB_SPEED_LOW) delay = HUB_LONG_RESET_TIME; - mutex_lock(hcd->address0_mutex); - /* Reset the device; full speed may morph to high speed */ /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */ retval = hub_port_reset(hub, port1, udev, delay, false); @@ -5016,7 +5014,6 @@ fail: hub_port_disable(hub, port1, 0); update_devnum(udev, devnum); /* for disconnect processing */ } - mutex_unlock(hcd->address0_mutex); return retval; } @@ -5191,6 +5188,7 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, struct usb_port *port_dev = hub->ports[port1 - 1]; struct usb_device *udev = port_dev->child; static int unreliable_port = -1; + bool retry_locked; /* Disconnect any existing devices under this port */ if (udev) { @@ -5246,8 +5244,11 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, unit_load = 100; status = 0; - for (i = 0; i < PORT_INIT_TRIES; i++) { + for (i = 0; i < PORT_INIT_TRIES; i++) { + usb_lock_port(port_dev); + mutex_lock(hcd->address0_mutex); + retry_locked = true; /* reallocate for each attempt, since references * to the previous one can escape in various ways */ @@ -5255,6 +5256,8 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, if (!udev) { dev_err(&port_dev->dev, "couldn't allocate usb_device\n"); + mutex_unlock(hcd->address0_mutex); + usb_unlock_port(port_dev); goto done; } @@ -5276,12 +5279,14 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, } /* reset (non-USB 3.0 devices) and get descriptor */ - usb_lock_port(port_dev); status = hub_port_init(hub, udev, port1, i); - usb_unlock_port(port_dev); if (status < 0) goto loop; + mutex_unlock(hcd->address0_mutex); + usb_unlock_port(port_dev); + retry_locked = false; + if (udev->quirks & USB_QUIRK_DELAY_INIT) msleep(2000); @@ -5374,6 +5379,10 @@ loop: usb_ep0_reinit(udev); release_devnum(udev); hub_free_dev(udev); + if (retry_locked) { + mutex_unlock(hcd->address0_mutex); + usb_unlock_port(port_dev); + } usb_put_dev(udev); if ((status == -ENOTCONN) || (status == -ENOTSUPP)) break; @@ -5915,6 +5924,8 @@ static int usb_reset_and_verify_device(struct usb_device *udev) bos = udev->bos; udev->bos = NULL; + mutex_lock(hcd->address0_mutex); + for (i = 0; i < PORT_INIT_TRIES; ++i) { /* ep0 maxpacket size may change; let the HCD know about it. @@ -5924,6 +5935,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev) if (ret >= 0 || ret == -ENOTCONN || ret == -ENODEV) break; } + mutex_unlock(hcd->address0_mutex); if (ret < 0) goto re_enumerate; diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 8239fe7129dd..019351c0b52c 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -434,6 +434,9 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x1532, 0x0116), .driver_info = USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL }, + /* Lenovo Powered USB-C Travel Hub (4X90S92381, RTL8153 GigE) */ + { USB_DEVICE(0x17ef, 0x721e), .driver_info = USB_QUIRK_NO_LPM }, + /* Lenovo ThinkCenter A630Z TI024Gen3 usb-audio */ { USB_DEVICE(0x17ef, 0xa012), .driver_info = USB_QUIRK_DISCONNECT_SUSPEND }, diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index 4ab4a1d5062b..ab8d7dad9f56 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -1198,6 +1198,8 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg, } ctrl |= DXEPCTL_CNAK; } else { + hs_req->req.frame_number = hs_ep->target_frame; + hs_req->req.actual = 0; dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA); return; } @@ -2857,9 +2859,12 @@ static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep) do { hs_req = get_ep_head(hs_ep); - if (hs_req) + if (hs_req) { + hs_req->req.frame_number = hs_ep->target_frame; + hs_req->req.actual = 0; dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA); + } dwc2_gadget_incr_frame_num(hs_ep); /* Update current frame number value. */ hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg); @@ -2912,8 +2917,11 @@ static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep) while (dwc2_gadget_target_frame_elapsed(ep)) { hs_req = get_ep_head(ep); - if (hs_req) + if (hs_req) { + hs_req->req.frame_number = ep->target_frame; + hs_req->req.actual = 0; dwc2_hsotg_complete_request(hsotg, ep, hs_req, -ENODATA); + } dwc2_gadget_incr_frame_num(ep); /* Update current frame number value. */ @@ -3002,8 +3010,11 @@ static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep) while (dwc2_gadget_target_frame_elapsed(hs_ep)) { hs_req = get_ep_head(hs_ep); - if (hs_req) + if (hs_req) { + hs_req->req.frame_number = hs_ep->target_frame; + hs_req->req.actual = 0; dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA); + } dwc2_gadget_incr_frame_num(hs_ep); /* Update current frame number value. */ diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c index 89a788326c56..24beff610cf2 100644 --- a/drivers/usb/dwc2/hcd_queue.c +++ b/drivers/usb/dwc2/hcd_queue.c @@ -59,7 +59,7 @@ #define DWC2_UNRESERVE_DELAY (msecs_to_jiffies(5)) /* If we get a NAK, wait this long before retrying */ -#define DWC2_RETRY_WAIT_DELAY (1 * 1E6L) +#define DWC2_RETRY_WAIT_DELAY (1 * NSEC_PER_MSEC) /** * dwc2_periodic_channel_available() - Checks that a channel is available for a diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 643239d7d370..f4c09951b517 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -1594,9 +1594,11 @@ static int dwc3_probe(struct platform_device *pdev) dwc3_get_properties(dwc); - ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64)); - if (ret) - return ret; + if (!dwc->sysdev_is_parent) { + ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64)); + if (ret) + return ret; + } dwc->reset = devm_reset_control_array_get_optional_shared(dev); if (IS_ERR(dwc->reset)) diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index 620c8d3914d7..5c491d0a19d7 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h @@ -143,7 +143,7 @@ #define DWC3_GHWPARAMS8 0xc600 #define DWC3_GUCTL3 0xc60c #define DWC3_GFLADJ 0xc630 -#define DWC3_GHWPARAMS9 0xc680 +#define DWC3_GHWPARAMS9 0xc6e0 /* Device Registers */ #define DWC3_DCFG 0xc700 diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c index 9abbd01028c5..3cb01cdd02c2 100644 --- a/drivers/usb/dwc3/dwc3-qcom.c +++ b/drivers/usb/dwc3/dwc3-qcom.c @@ -649,7 +649,6 @@ static int dwc3_qcom_of_register_core(struct platform_device *pdev) struct dwc3_qcom *qcom = platform_get_drvdata(pdev); struct device_node *np = pdev->dev.of_node, *dwc3_np; struct device *dev = &pdev->dev; - struct property *prop; int ret; dwc3_np = of_get_compatible_child(np, "snps,dwc3"); @@ -658,20 +657,6 @@ static int dwc3_qcom_of_register_core(struct platform_device *pdev) return -ENODEV; } - prop = devm_kzalloc(dev, sizeof(*prop), GFP_KERNEL); - if (!prop) { - ret = -ENOMEM; - dev_err(dev, "unable to allocate memory for property\n"); - goto node_put; - } - - prop->name = "tx-fifo-resize"; - ret = of_add_property(dwc3_np, prop); - if (ret) { - dev_err(dev, "unable to add property\n"); - goto node_put; - } - ret = of_platform_populate(np, NULL, NULL, dev); if (ret) { dev_err(dev, "failed to register dwc3 core - %d\n", ret); diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 23de2a5a40d6..7e3db00e9759 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -310,13 +310,24 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd, if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) { int link_state; + /* + * Initiate remote wakeup if the link state is in U3 when + * operating in SS/SSP or L1/L2 when operating in HS/FS. If the + * link state is in U1/U2, no remote wakeup is needed. The Start + * Transfer command will initiate the link recovery. + */ link_state = dwc3_gadget_get_link_state(dwc); - if (link_state == DWC3_LINK_STATE_U1 || - link_state == DWC3_LINK_STATE_U2 || - link_state == DWC3_LINK_STATE_U3) { + switch (link_state) { + case DWC3_LINK_STATE_U2: + if (dwc->gadget->speed >= USB_SPEED_SUPER) + break; + + fallthrough; + case DWC3_LINK_STATE_U3: ret = __dwc3_gadget_wakeup(dwc); dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n", ret); + break; } } @@ -3252,6 +3263,9 @@ static bool dwc3_gadget_endpoint_trbs_complete(struct dwc3_ep *dep, struct dwc3 *dwc = dep->dwc; bool no_started_trb = true; + if (!dep->endpoint.desc) + return no_started_trb; + dwc3_gadget_ep_cleanup_completed_requests(dep, event, status); if (dep->flags & DWC3_EP_END_TRANSFER_PENDING) @@ -3299,6 +3313,9 @@ static void dwc3_gadget_endpoint_transfer_in_progress(struct dwc3_ep *dep, { int status = 0; + if (!dep->endpoint.desc) + return; + if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) dwc3_gadget_endpoint_frame_from_event(dep, event); @@ -3352,6 +3369,14 @@ static void dwc3_gadget_endpoint_command_complete(struct dwc3_ep *dep, if (cmd != DWC3_DEPCMD_ENDTRANSFER) return; + /* + * The END_TRANSFER command will cause the controller to generate a + * NoStream Event, and it's not due to the host DP NoStream rejection. + * Ignore the next NoStream event. + */ + if (dep->stream_capable) + dep->flags |= DWC3_EP_IGNORE_NEXT_NOSTREAM; + dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING; dep->flags &= ~DWC3_EP_TRANSFER_STARTED; dwc3_gadget_ep_cleanup_cancelled_requests(dep); @@ -3574,14 +3599,6 @@ static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, WARN_ON_ONCE(ret); dep->resource_index = 0; - /* - * The END_TRANSFER command will cause the controller to generate a - * NoStream Event, and it's not due to the host DP NoStream rejection. - * Ignore the next NoStream event. - */ - if (dep->stream_capable) - dep->flags |= DWC3_EP_IGNORE_NEXT_NOSTREAM; - if (!interrupt) dep->flags &= ~DWC3_EP_TRANSFER_STARTED; else diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 504c1cbc255d..284eea9f6e4d 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -1679,6 +1679,18 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) struct usb_function *f = NULL; u8 endp; + if (w_length > USB_COMP_EP0_BUFSIZ) { + if (ctrl->bRequestType == USB_DIR_OUT) { + goto done; + } else { + /* Cast away the const, we are going to overwrite on purpose. */ + __le16 *temp = (__le16 *)&ctrl->wLength; + + *temp = cpu_to_le16(USB_COMP_EP0_BUFSIZ); + w_length = USB_COMP_EP0_BUFSIZ; + } + } + /* partial re-init of the response message; the function or the * gadget might need to intercept e.g. a control-OUT completion * when we delegate to it. @@ -2209,7 +2221,7 @@ int composite_dev_prepare(struct usb_composite_driver *composite, if (!cdev->req) return -ENOMEM; - cdev->req->buf = kmalloc(USB_COMP_EP0_BUFSIZ, GFP_KERNEL); + cdev->req->buf = kzalloc(USB_COMP_EP0_BUFSIZ, GFP_KERNEL); if (!cdev->req->buf) goto fail; diff --git a/drivers/usb/gadget/legacy/dbgp.c b/drivers/usb/gadget/legacy/dbgp.c index e1d566c9918a..355bc7dab9d5 100644 --- a/drivers/usb/gadget/legacy/dbgp.c +++ b/drivers/usb/gadget/legacy/dbgp.c @@ -137,7 +137,7 @@ static int dbgp_enable_ep_req(struct usb_ep *ep) goto fail_1; } - req->buf = kmalloc(DBGP_REQ_LEN, GFP_KERNEL); + req->buf = kzalloc(DBGP_REQ_LEN, GFP_KERNEL); if (!req->buf) { err = -ENOMEM; stp = 2; @@ -345,6 +345,19 @@ static int dbgp_setup(struct usb_gadget *gadget, void *data = NULL; u16 len = 0; + if (length > DBGP_REQ_LEN) { + if (ctrl->bRequestType == USB_DIR_OUT) { + return err; + } else { + /* Cast away the const, we are going to overwrite on purpose. */ + __le16 *temp = (__le16 *)&ctrl->wLength; + + *temp = cpu_to_le16(DBGP_REQ_LEN); + length = DBGP_REQ_LEN; + } + } + + if (request == USB_REQ_GET_DESCRIPTOR) { switch (value>>8) { case USB_DT_DEVICE: diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index 78be94750232..63150e3889ef 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c @@ -110,6 +110,8 @@ enum ep0_state { /* enough for the whole queue: most events invalidate others */ #define N_EVENT 5 +#define RBUF_SIZE 256 + struct dev_data { spinlock_t lock; refcount_t count; @@ -144,7 +146,7 @@ struct dev_data { struct dentry *dentry; /* except this scratch i/o buffer for ep0 */ - u8 rbuf [256]; + u8 rbuf[RBUF_SIZE]; }; static inline void get_dev (struct dev_data *data) @@ -1331,6 +1333,18 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) u16 w_value = le16_to_cpu(ctrl->wValue); u16 w_length = le16_to_cpu(ctrl->wLength); + if (w_length > RBUF_SIZE) { + if (ctrl->bRequestType == USB_DIR_OUT) { + return value; + } else { + /* Cast away the const, we are going to overwrite on purpose. */ + __le16 *temp = (__le16 *)&ctrl->wLength; + + *temp = cpu_to_le16(RBUF_SIZE); + w_length = RBUF_SIZE; + } + } + spin_lock (&dev->lock); dev->setup_abort = 0; if (dev->state == STATE_DEV_UNCONNECTED) { diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c index f5ca670776a3..857159dd5ae0 100644 --- a/drivers/usb/gadget/udc/udc-xilinx.c +++ b/drivers/usb/gadget/udc/udc-xilinx.c @@ -2136,7 +2136,7 @@ static int xudc_probe(struct platform_device *pdev) ret = usb_add_gadget_udc(&pdev->dev, &udc->gadget); if (ret) - goto fail; + goto err_disable_unprepare_clk; udc->dev = &udc->gadget.dev; @@ -2155,6 +2155,9 @@ static int xudc_probe(struct platform_device *pdev) udc->dma_enabled ? "with DMA" : "without DMA"); return 0; + +err_disable_unprepare_clk: + clk_disable_unprepare(udc->clk); fail: dev_err(&pdev->dev, "probe failed, %d\n", ret); return ret; diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index af946c42b6f0..df3522dab31b 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -717,6 +717,7 @@ static int xhci_enter_test_mode(struct xhci_hcd *xhci, continue; retval = xhci_disable_slot(xhci, i); + xhci_free_virt_device(xhci, i); if (retval) xhci_err(xhci, "Failed to disable slot %d, %d. Enter test mode anyway\n", i, retval); diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 311597bba80e..d0b6806275e0 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -366,7 +366,9 @@ static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci, /* Must be called with xhci->lock held, releases and aquires lock back */ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags) { - u32 temp_32; + struct xhci_segment *new_seg = xhci->cmd_ring->deq_seg; + union xhci_trb *new_deq = xhci->cmd_ring->dequeue; + u64 crcr; int ret; xhci_dbg(xhci, "Abort command ring\n"); @@ -375,13 +377,18 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags) /* * The control bits like command stop, abort are located in lower - * dword of the command ring control register. Limit the write - * to the lower dword to avoid corrupting the command ring pointer - * in case if the command ring is stopped by the time upper dword - * is written. + * dword of the command ring control register. + * Some controllers require all 64 bits to be written to abort the ring. + * Make sure the upper dword is valid, pointing to the next command, + * avoiding corrupting the command ring pointer in case the command ring + * is stopped by the time the upper dword is written. */ - temp_32 = readl(&xhci->op_regs->cmd_ring); - writel(temp_32 | CMD_RING_ABORT, &xhci->op_regs->cmd_ring); + next_trb(xhci, NULL, &new_seg, &new_deq); + if (trb_is_link(new_deq)) + next_trb(xhci, NULL, &new_seg, &new_deq); + + crcr = xhci_trb_virt_to_dma(new_seg, new_deq); + xhci_write_64(xhci, crcr | CMD_RING_ABORT, &xhci->op_regs->cmd_ring); /* Section 4.6.1.2 of xHCI 1.0 spec says software should also time the * completion of the Command Abort operation. If CRR is not negated in 5 @@ -1518,7 +1525,6 @@ static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id) if (xhci->quirks & XHCI_EP_LIMIT_QUIRK) /* Delete default control endpoint resources */ xhci_free_device_endpoint_resources(xhci, virt_dev, true); - xhci_free_virt_device(xhci, slot_id); } static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id, diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c index 1bf494b649bd..c8af2cd2216d 100644 --- a/drivers/usb/host/xhci-tegra.c +++ b/drivers/usb/host/xhci-tegra.c @@ -1400,6 +1400,7 @@ static void tegra_xusb_deinit_usb_phy(struct tegra_xusb *tegra) static int tegra_xusb_probe(struct platform_device *pdev) { + struct of_phandle_args args; struct tegra_xusb *tegra; struct device_node *np; struct resource *regs; @@ -1454,10 +1455,17 @@ static int tegra_xusb_probe(struct platform_device *pdev) goto put_padctl; } - tegra->padctl_irq = of_irq_get(np, 0); - if (tegra->padctl_irq <= 0) { - err = (tegra->padctl_irq == 0) ? -ENODEV : tegra->padctl_irq; - goto put_padctl; + /* Older device-trees don't have padctrl interrupt */ + err = of_irq_parse_one(np, 0, &args); + if (!err) { + tegra->padctl_irq = of_irq_get(np, 0); + if (tegra->padctl_irq <= 0) { + err = (tegra->padctl_irq == 0) ? -ENODEV : tegra->padctl_irq; + goto put_padctl; + } + } else { + dev_dbg(&pdev->dev, + "%pOF is missing an interrupt, disabling PM support\n", np); } tegra->host_clk = devm_clk_get(&pdev->dev, "xusb_host"); @@ -1696,11 +1704,15 @@ static int tegra_xusb_probe(struct platform_device *pdev) goto remove_usb3; } - err = devm_request_threaded_irq(&pdev->dev, tegra->padctl_irq, NULL, tegra_xusb_padctl_irq, - IRQF_ONESHOT, dev_name(&pdev->dev), tegra); - if (err < 0) { - dev_err(&pdev->dev, "failed to request padctl IRQ: %d\n", err); - goto remove_usb3; + if (tegra->padctl_irq) { + err = devm_request_threaded_irq(&pdev->dev, tegra->padctl_irq, + NULL, tegra_xusb_padctl_irq, + IRQF_ONESHOT, dev_name(&pdev->dev), + tegra); + if (err < 0) { + dev_err(&pdev->dev, "failed to request padctl IRQ: %d\n", err); + goto remove_usb3; + } } err = tegra_xusb_enable_firmware_messages(tegra); @@ -1718,13 +1730,16 @@ static int tegra_xusb_probe(struct platform_device *pdev) /* Enable wake for both USB 2.0 and USB 3.0 roothubs */ device_init_wakeup(&tegra->hcd->self.root_hub->dev, true); device_init_wakeup(&xhci->shared_hcd->self.root_hub->dev, true); - device_init_wakeup(tegra->dev, true); pm_runtime_use_autosuspend(tegra->dev); pm_runtime_set_autosuspend_delay(tegra->dev, 2000); pm_runtime_mark_last_busy(tegra->dev); pm_runtime_set_active(tegra->dev); - pm_runtime_enable(tegra->dev); + + if (tegra->padctl_irq) { + device_init_wakeup(tegra->dev, true); + pm_runtime_enable(tegra->dev); + } return 0; @@ -1772,7 +1787,9 @@ static int tegra_xusb_remove(struct platform_device *pdev) dma_free_coherent(&pdev->dev, tegra->fw.size, tegra->fw.virt, tegra->fw.phys); - pm_runtime_disable(&pdev->dev); + if (tegra->padctl_irq) + pm_runtime_disable(&pdev->dev); + pm_runtime_put(&pdev->dev); tegra_xusb_powergate_partitions(tegra); diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 902f410874e8..f5b1bcc875de 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -3934,7 +3934,6 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) struct xhci_slot_ctx *slot_ctx; int i, ret; -#ifndef CONFIG_USB_DEFAULT_PERSIST /* * We called pm_runtime_get_noresume when the device was attached. * Decrement the counter here to allow controller to runtime suspend @@ -3942,7 +3941,6 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) */ if (xhci->quirks & XHCI_RESET_ON_RESUME) pm_runtime_put_noidle(hcd->self.controller); -#endif ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); /* If the host is halted due to driver unload, we still need to free the @@ -3961,9 +3959,8 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) del_timer_sync(&virt_dev->eps[i].stop_cmd_timer); } virt_dev->udev = NULL; - ret = xhci_disable_slot(xhci, udev->slot_id); - if (ret) - xhci_free_virt_device(xhci, udev->slot_id); + xhci_disable_slot(xhci, udev->slot_id); + xhci_free_virt_device(xhci, udev->slot_id); } int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id) @@ -3973,7 +3970,7 @@ int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id) u32 state; int ret = 0; - command = xhci_alloc_command(xhci, false, GFP_KERNEL); + command = xhci_alloc_command(xhci, true, GFP_KERNEL); if (!command) return -ENOMEM; @@ -3998,6 +3995,15 @@ int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id) } xhci_ring_cmd_db(xhci); spin_unlock_irqrestore(&xhci->lock, flags); + + wait_for_completion(command->completion); + + if (command->status != COMP_SUCCESS) + xhci_warn(xhci, "Unsuccessful disable slot %u command, status %d\n", + slot_id, command->status); + + xhci_free_command(xhci, command); + return ret; } @@ -4094,23 +4100,20 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) xhci_debugfs_create_slot(xhci, slot_id); -#ifndef CONFIG_USB_DEFAULT_PERSIST /* * If resetting upon resume, we can't put the controller into runtime * suspend if there is a device attached. */ if (xhci->quirks & XHCI_RESET_ON_RESUME) pm_runtime_get_noresume(hcd->self.controller); -#endif /* Is this a LS or FS device under a HS hub? */ /* Hub or peripherial? */ return 1; disable_slot: - ret = xhci_disable_slot(xhci, udev->slot_id); - if (ret) - xhci_free_virt_device(xhci, udev->slot_id); + xhci_disable_slot(xhci, udev->slot_id); + xhci_free_virt_device(xhci, udev->slot_id); return 0; } @@ -4240,6 +4243,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, mutex_unlock(&xhci->mutex); ret = xhci_disable_slot(xhci, udev->slot_id); + xhci_free_virt_device(xhci, udev->slot_id); if (!ret) xhci_alloc_dev(hcd, udev); kfree(command->completion); diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index a484ff5e4ebf..546fce4617a8 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -1267,6 +1267,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = NCTRL(2) }, { USB_DEVICE(TELIT_VENDOR_ID, 0x9010), /* Telit SBL FN980 flashing device */ .driver_info = NCTRL(0) | ZLP }, + { USB_DEVICE(TELIT_VENDOR_ID, 0x9200), /* Telit LE910S1 flashing device */ + .driver_info = NCTRL(0) | ZLP }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff), .driver_info = RSVD(1) }, @@ -2094,6 +2096,9 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */ { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */ + { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a2, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */ + { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a4, 0xff), /* Fibocom FM101-GL (laptop MBIM) */ + .driver_info = RSVD(4) }, { USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */ { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */ { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */ diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index f45ca7ddf78e..a70fd86f735c 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -432,6 +432,7 @@ static int pl2303_detect_type(struct usb_serial *serial) case 0x200: switch (bcdDevice) { case 0x100: + case 0x105: case 0x305: case 0x405: /* diff --git a/drivers/usb/typec/tcpm/fusb302.c b/drivers/usb/typec/tcpm/fusb302.c index 7a2a17866a82..72f9001b0792 100644 --- a/drivers/usb/typec/tcpm/fusb302.c +++ b/drivers/usb/typec/tcpm/fusb302.c @@ -669,25 +669,27 @@ static int tcpm_set_cc(struct tcpc_dev *dev, enum typec_cc_status cc) ret = fusb302_i2c_mask_write(chip, FUSB_REG_MASK, FUSB_REG_MASK_BC_LVL | FUSB_REG_MASK_COMP_CHNG, - FUSB_REG_MASK_COMP_CHNG); + FUSB_REG_MASK_BC_LVL); if (ret < 0) { fusb302_log(chip, "cannot set SRC interrupt, ret=%d", ret); goto done; } chip->intr_comp_chng = true; + chip->intr_bc_lvl = false; break; case TYPEC_CC_RD: ret = fusb302_i2c_mask_write(chip, FUSB_REG_MASK, FUSB_REG_MASK_BC_LVL | FUSB_REG_MASK_COMP_CHNG, - FUSB_REG_MASK_BC_LVL); + FUSB_REG_MASK_COMP_CHNG); if (ret < 0) { fusb302_log(chip, "cannot set SRC interrupt, ret=%d", ret); goto done; } chip->intr_bc_lvl = true; + chip->intr_comp_chng = false; break; default: break; diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index 7f2f3ff1b391..6010b9901126 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c @@ -4110,11 +4110,7 @@ static void run_state_machine(struct tcpm_port *port) tcpm_try_src(port) ? SRC_TRY : SNK_ATTACHED, 0); - else - /* Wait for VBUS, but not forever */ - tcpm_set_state(port, PORT_RESET, PD_T_PS_SOURCE_ON); break; - case SRC_TRY: port->try_src_count++; tcpm_set_cc(port, tcpm_rp_cc(port)); diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c index fb8ef12bbe9c..6d27a5b5e3ca 100644 --- a/drivers/usb/typec/tipd/core.c +++ b/drivers/usb/typec/tipd/core.c @@ -653,7 +653,7 @@ static int cd321x_switch_power_state(struct tps6598x *tps, u8 target_state) if (state == target_state) return 0; - ret = tps6598x_exec_cmd(tps, "SPSS", sizeof(u8), &target_state, 0, NULL); + ret = tps6598x_exec_cmd(tps, "SSPS", sizeof(u8), &target_state, 0, NULL); if (ret) return ret; @@ -707,6 +707,7 @@ static int tps6598x_probe(struct i2c_client *client) u32 conf; u32 vid; int ret; + u64 mask1; tps = devm_kzalloc(&client->dev, sizeof(*tps), GFP_KERNEL); if (!tps) @@ -730,11 +731,6 @@ static int tps6598x_probe(struct i2c_client *client) if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) tps->i2c_protocol = true; - /* Make sure the controller has application firmware running */ - ret = tps6598x_check_mode(tps); - if (ret) - return ret; - if (np && of_device_is_compatible(np, "apple,cd321x")) { /* Switch CD321X chips to the correct system power state */ ret = cd321x_switch_power_state(tps, TPS_SYSTEM_POWER_STATE_S0); @@ -742,24 +738,27 @@ static int tps6598x_probe(struct i2c_client *client) return ret; /* CD321X chips have all interrupts masked initially */ - ret = tps6598x_write64(tps, TPS_REG_INT_MASK1, - APPLE_CD_REG_INT_POWER_STATUS_UPDATE | - APPLE_CD_REG_INT_DATA_STATUS_UPDATE | - APPLE_CD_REG_INT_PLUG_EVENT); - if (ret) - return ret; + mask1 = APPLE_CD_REG_INT_POWER_STATUS_UPDATE | + APPLE_CD_REG_INT_DATA_STATUS_UPDATE | + APPLE_CD_REG_INT_PLUG_EVENT; irq_handler = cd321x_interrupt; } else { /* Enable power status, data status and plug event interrupts */ - ret = tps6598x_write64(tps, TPS_REG_INT_MASK1, - TPS_REG_INT_POWER_STATUS_UPDATE | - TPS_REG_INT_DATA_STATUS_UPDATE | - TPS_REG_INT_PLUG_EVENT); - if (ret) - return ret; + mask1 = TPS_REG_INT_POWER_STATUS_UPDATE | + TPS_REG_INT_DATA_STATUS_UPDATE | + TPS_REG_INT_PLUG_EVENT; } + /* Make sure the controller has application firmware running */ + ret = tps6598x_check_mode(tps); + if (ret) + return ret; + + ret = tps6598x_write64(tps, TPS_REG_INT_MASK1, mask1); + if (ret) + return ret; + ret = tps6598x_read32(tps, TPS_REG_STATUS, &status); if (ret < 0) return ret; diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c index 5f484fff8dbe..41b0cd17fcba 100644 --- a/drivers/vdpa/vdpa_sim/vdpa_sim.c +++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c @@ -591,8 +591,11 @@ static void vdpasim_free(struct vdpa_device *vdpa) vringh_kiov_cleanup(&vdpasim->vqs[i].in_iov); } - put_iova_domain(&vdpasim->iova); - iova_cache_put(); + if (vdpa_get_dma_dev(vdpa)) { + put_iova_domain(&vdpasim->iova); + iova_cache_put(); + } + kvfree(vdpasim->buffer); if (vdpasim->iommu) vhost_iotlb_free(vdpasim->iommu); diff --git a/drivers/vfio/pci/vfio_pci_igd.c b/drivers/vfio/pci/vfio_pci_igd.c index 56cd551e0e04..362f91ec8845 100644 --- a/drivers/vfio/pci/vfio_pci_igd.c +++ b/drivers/vfio/pci/vfio_pci_igd.c @@ -98,7 +98,8 @@ static ssize_t vfio_pci_igd_rw(struct vfio_pci_core_device *vdev, version = cpu_to_le16(0x0201); if (igd_opregion_shift_copy(buf, &off, - &version + (pos - OPREGION_VERSION), + (u8 *)&version + + (pos - OPREGION_VERSION), &pos, &remaining, bytes)) return -EFAULT; } @@ -121,7 +122,7 @@ static ssize_t vfio_pci_igd_rw(struct vfio_pci_core_device *vdev, OPREGION_SIZE : 0); if (igd_opregion_shift_copy(buf, &off, - &rvda + (pos - OPREGION_RVDA), + (u8 *)&rvda + (pos - OPREGION_RVDA), &pos, &remaining, bytes)) return -EFAULT; } diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c index 82fb75464f92..735d1d344af9 100644 --- a/drivers/vfio/vfio.c +++ b/drivers/vfio/vfio.c @@ -232,7 +232,7 @@ static inline bool vfio_iommu_driver_allowed(struct vfio_container *container, } #endif /* CONFIG_VFIO_NOIOMMU */ -/** +/* * IOMMU driver registration */ int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops) @@ -285,7 +285,7 @@ static int vfio_iommu_group_notifier(struct notifier_block *nb, unsigned long action, void *data); static void vfio_group_get(struct vfio_group *group); -/** +/* * Container objects - containers are created when /dev/vfio/vfio is * opened, but their lifecycle extends until the last user is done, so * it's freed via kref. Must support container/group/device being @@ -309,7 +309,7 @@ static void vfio_container_put(struct vfio_container *container) kref_put(&container->kref, vfio_container_release); } -/** +/* * Group objects - create, release, get, put, search */ static struct vfio_group * @@ -488,7 +488,7 @@ static struct vfio_group *vfio_group_get_from_dev(struct device *dev) return group; } -/** +/* * Device objects - create, release, get, put, search */ /* Device reference always implies a group reference */ @@ -595,7 +595,7 @@ static int vfio_dev_viable(struct device *dev, void *data) return ret; } -/** +/* * Async device support */ static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev) @@ -689,7 +689,7 @@ static int vfio_iommu_group_notifier(struct notifier_block *nb, return NOTIFY_OK; } -/** +/* * VFIO driver API */ void vfio_init_group_dev(struct vfio_device *device, struct device *dev, @@ -831,7 +831,7 @@ int vfio_register_emulated_iommu_dev(struct vfio_device *device) } EXPORT_SYMBOL_GPL(vfio_register_emulated_iommu_dev); -/** +/* * Get a reference to the vfio_device for a device. Even if the * caller thinks they own the device, they could be racing with a * release call path, so we can't trust drvdata for the shortcut. @@ -965,7 +965,7 @@ void vfio_unregister_group_dev(struct vfio_device *device) } EXPORT_SYMBOL_GPL(vfio_unregister_group_dev); -/** +/* * VFIO base fd, /dev/vfio/vfio */ static long vfio_ioctl_check_extension(struct vfio_container *container, @@ -1183,7 +1183,7 @@ static const struct file_operations vfio_fops = { .compat_ioctl = compat_ptr_ioctl, }; -/** +/* * VFIO Group fd, /dev/vfio/$GROUP */ static void __vfio_group_unset_container(struct vfio_group *group) @@ -1536,7 +1536,7 @@ static const struct file_operations vfio_group_fops = { .release = vfio_group_fops_release, }; -/** +/* * VFIO Device fd */ static int vfio_device_fops_release(struct inode *inode, struct file *filep) @@ -1611,7 +1611,7 @@ static const struct file_operations vfio_device_fops = { .mmap = vfio_device_fops_mmap, }; -/** +/* * External user API, exported by symbols to be linked dynamically. * * The protocol includes: @@ -1659,7 +1659,7 @@ struct vfio_group *vfio_group_get_external_user(struct file *filep) } EXPORT_SYMBOL_GPL(vfio_group_get_external_user); -/** +/* * External user API, exported by symbols to be linked dynamically. * The external user passes in a device pointer * to verify that: @@ -1725,7 +1725,7 @@ long vfio_external_check_extension(struct vfio_group *group, unsigned long arg) } EXPORT_SYMBOL_GPL(vfio_external_check_extension); -/** +/* * Sub-module support */ /* @@ -2272,7 +2272,7 @@ struct iommu_domain *vfio_group_iommu_domain(struct vfio_group *group) } EXPORT_SYMBOL_GPL(vfio_group_iommu_domain); -/** +/* * Module/class support */ static char *vfio_devnode(struct device *dev, umode_t *mode) diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index 01c59ce7e250..29cced1cd277 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -1014,12 +1014,12 @@ static int vhost_vdpa_release(struct inode *inode, struct file *filep) mutex_lock(&d->mutex); filep->private_data = NULL; + vhost_vdpa_clean_irq(v); vhost_vdpa_reset(v); vhost_dev_stop(&v->vdev); vhost_vdpa_iotlb_free(v); vhost_vdpa_free_domain(v); vhost_vdpa_config_put(v); - vhost_vdpa_clean_irq(v); vhost_dev_cleanup(&v->vdev); kfree(v->vdev.vqs); mutex_unlock(&d->mutex); diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index 938aefbc75ec..d6ca1c7ad513 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -511,8 +511,6 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work) vhost_disable_notify(&vsock->dev, vq); do { - u32 len; - if (!vhost_vsock_more_replies(vsock)) { /* Stop tx until the device processes already * pending replies. Leave tx virtqueue @@ -540,7 +538,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work) continue; } - len = pkt->len; + total_len += sizeof(pkt->hdr) + pkt->len; /* Deliver to monitoring devices all received packets */ virtio_transport_deliver_tap_pkt(pkt); @@ -553,9 +551,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work) else virtio_transport_free_pkt(pkt); - len += sizeof(pkt->hdr); - vhost_add_used(vq, head, len); - total_len += len; + vhost_add_used(vq, head, 0); added = true; } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len))); diff --git a/drivers/video/console/sticon.c b/drivers/video/console/sticon.c index 1b451165311c..40496e9e9b43 100644 --- a/drivers/video/console/sticon.c +++ b/drivers/video/console/sticon.c @@ -332,13 +332,13 @@ static u8 sticon_build_attr(struct vc_data *conp, u8 color, bool blink, bool underline, bool reverse, bool italic) { - u8 attr = ((color & 0x70) >> 1) | ((color & 7)); + u8 fg = color & 7; + u8 bg = (color & 0x70) >> 4; - if (reverse) { - color = ((color >> 3) & 0x7) | ((color & 0x7) << 3); - } - - return attr; + if (reverse) + return (fg << 3) | bg; + else + return (bg << 3) | fg; } static void sticon_invert_region(struct vc_data *conp, u16 *p, int count) diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c index ef9c57ce0906..576612f18d59 100644 --- a/drivers/video/console/vgacon.c +++ b/drivers/video/console/vgacon.c @@ -97,30 +97,9 @@ static int vga_video_font_height; static int vga_scan_lines __read_mostly; static unsigned int vga_rolled_over; /* last vc_origin offset before wrap */ -static bool vgacon_text_mode_force; static bool vga_hardscroll_enabled; static bool vga_hardscroll_user_enable = true; -bool vgacon_text_force(void) -{ - return vgacon_text_mode_force; -} -EXPORT_SYMBOL(vgacon_text_force); - -static int __init text_mode(char *str) -{ - vgacon_text_mode_force = true; - - pr_warn("You have booted with nomodeset. This means your GPU drivers are DISABLED\n"); - pr_warn("Any video related functionality will be severely degraded, and you may not even be able to suspend the system properly\n"); - pr_warn("Unless you actually understand what nomodeset does, you should reboot without enabling it\n"); - - return 1; -} - -/* force text mode - used by kernel modesetting */ -__setup("nomodeset", text_mode); - static int __init no_scroll(char *str) { /* @@ -366,11 +345,17 @@ static void vgacon_init(struct vc_data *c, int init) struct uni_pagedir *p; /* - * We cannot be loaded as a module, therefore init is always 1, - * but vgacon_init can be called more than once, and init will - * not be 1. + * We cannot be loaded as a module, therefore init will be 1 + * if we are the default console, however if we are a fallback + * console, for example if fbcon has failed registration, then + * init will be 0, so we need to make sure our boot parameters + * have been copied to the console structure for vgacon_resize + * ultimately called by vc_resize. Any subsequent calls to + * vgacon_init init will have init set to 0 too. */ c->vc_can_do_color = vga_can_do_color; + c->vc_scan_lines = vga_scan_lines; + c->vc_font.height = c->vc_cell_height = vga_video_font_height; /* set dimensions manually if init != 0 since vc_resize() will fail */ if (init) { @@ -379,8 +364,6 @@ static void vgacon_init(struct vc_data *c, int init) } else vc_resize(c, vga_video_num_columns, vga_video_num_lines); - c->vc_scan_lines = vga_scan_lines; - c->vc_font.height = c->vc_cell_height = vga_video_font_height; c->vc_complement_mask = 0x7700; if (vga_512_chars) c->vc_hi_font_mask = 0x0800; diff --git a/drivers/video/fbdev/core/fbsysfs.c b/drivers/video/fbdev/core/fbsysfs.c index 65dae05fff8e..26892940c213 100644 --- a/drivers/video/fbdev/core/fbsysfs.c +++ b/drivers/video/fbdev/core/fbsysfs.c @@ -230,7 +230,7 @@ static ssize_t show_bpp(struct device *device, struct device_attribute *attr, char *buf) { struct fb_info *fb_info = dev_get_drvdata(device); - return snprintf(buf, PAGE_SIZE, "%d\n", fb_info->var.bits_per_pixel); + return sysfs_emit(buf, "%d\n", fb_info->var.bits_per_pixel); } static ssize_t store_rotate(struct device *device, @@ -257,7 +257,7 @@ static ssize_t show_rotate(struct device *device, { struct fb_info *fb_info = dev_get_drvdata(device); - return snprintf(buf, PAGE_SIZE, "%d\n", fb_info->var.rotate); + return sysfs_emit(buf, "%d\n", fb_info->var.rotate); } static ssize_t store_virtual(struct device *device, @@ -285,7 +285,7 @@ static ssize_t show_virtual(struct device *device, struct device_attribute *attr, char *buf) { struct fb_info *fb_info = dev_get_drvdata(device); - return snprintf(buf, PAGE_SIZE, "%d,%d\n", fb_info->var.xres_virtual, + return sysfs_emit(buf, "%d,%d\n", fb_info->var.xres_virtual, fb_info->var.yres_virtual); } @@ -293,7 +293,7 @@ static ssize_t show_stride(struct device *device, struct device_attribute *attr, char *buf) { struct fb_info *fb_info = dev_get_drvdata(device); - return snprintf(buf, PAGE_SIZE, "%d\n", fb_info->fix.line_length); + return sysfs_emit(buf, "%d\n", fb_info->fix.line_length); } static ssize_t store_blank(struct device *device, @@ -381,7 +381,7 @@ static ssize_t show_pan(struct device *device, struct device_attribute *attr, char *buf) { struct fb_info *fb_info = dev_get_drvdata(device); - return snprintf(buf, PAGE_SIZE, "%d,%d\n", fb_info->var.xoffset, + return sysfs_emit(buf, "%d,%d\n", fb_info->var.xoffset, fb_info->var.yoffset); } @@ -390,7 +390,7 @@ static ssize_t show_name(struct device *device, { struct fb_info *fb_info = dev_get_drvdata(device); - return snprintf(buf, PAGE_SIZE, "%s\n", fb_info->fix.id); + return sysfs_emit(buf, "%s\n", fb_info->fix.id); } static ssize_t store_fbstate(struct device *device, @@ -418,7 +418,7 @@ static ssize_t show_fbstate(struct device *device, struct device_attribute *attr, char *buf) { struct fb_info *fb_info = dev_get_drvdata(device); - return snprintf(buf, PAGE_SIZE, "%d\n", fb_info->state); + return sysfs_emit(buf, "%d\n", fb_info->state); } #if IS_ENABLED(CONFIG_FB_BACKLIGHT) diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c index edca3703b964..ea42ba6445b2 100644 --- a/drivers/video/fbdev/efifb.c +++ b/drivers/video/fbdev/efifb.c @@ -351,6 +351,17 @@ static int efifb_probe(struct platform_device *dev) char *option = NULL; efi_memory_desc_t md; + /* + * Generic drivers must not be registered if a framebuffer exists. + * If a native driver was probed, the display hardware was already + * taken and attempting to use the system framebuffer is dangerous. + */ + if (num_registered_fb > 0) { + dev_err(&dev->dev, + "efifb: a framebuffer is already registered\n"); + return -EINVAL; + } + if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI || pci_dev_disabled) return -ENODEV; diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c index 3d090d2d9ed9..b495c09e6102 100644 --- a/drivers/video/fbdev/omap/omapfb_main.c +++ b/drivers/video/fbdev/omap/omapfb_main.c @@ -1555,6 +1555,7 @@ static void omapfb_free_resources(struct omapfb_device *fbdev, int state) case 1: dev_set_drvdata(fbdev->dev, NULL); kfree(fbdev); + break; case 0: /* nothing to free */ break; diff --git a/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c b/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c index 6dbe265b312d..8f355d1caf86 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c @@ -19,14 +19,14 @@ static ssize_t display_name_show(struct omap_dss_device *dssdev, char *buf) { - return snprintf(buf, PAGE_SIZE, "%s\n", + return sysfs_emit(buf, "%s\n", dssdev->name ? dssdev->name : ""); } static ssize_t display_enabled_show(struct omap_dss_device *dssdev, char *buf) { - return snprintf(buf, PAGE_SIZE, "%d\n", + return sysfs_emit(buf, "%d\n", omapdss_device_is_enabled(dssdev)); } @@ -59,7 +59,7 @@ static ssize_t display_enabled_store(struct omap_dss_device *dssdev, static ssize_t display_tear_show(struct omap_dss_device *dssdev, char *buf) { - return snprintf(buf, PAGE_SIZE, "%d\n", + return sysfs_emit(buf, "%d\n", dssdev->driver->get_te ? dssdev->driver->get_te(dssdev) : 0); } @@ -93,7 +93,7 @@ static ssize_t display_timings_show(struct omap_dss_device *dssdev, char *buf) dssdev->driver->get_timings(dssdev, &t); - return snprintf(buf, PAGE_SIZE, "%u,%u/%u/%u/%u,%u/%u/%u/%u\n", + return sysfs_emit(buf, "%u,%u/%u/%u/%u,%u/%u/%u/%u\n", t.pixelclock, t.x_res, t.hfp, t.hbp, t.hsw, t.y_res, t.vfp, t.vbp, t.vsw); @@ -143,7 +143,7 @@ static ssize_t display_rotate_show(struct omap_dss_device *dssdev, char *buf) if (!dssdev->driver->get_rotate) return -ENOENT; rotate = dssdev->driver->get_rotate(dssdev); - return snprintf(buf, PAGE_SIZE, "%u\n", rotate); + return sysfs_emit(buf, "%u\n", rotate); } static ssize_t display_rotate_store(struct omap_dss_device *dssdev, @@ -171,7 +171,7 @@ static ssize_t display_mirror_show(struct omap_dss_device *dssdev, char *buf) if (!dssdev->driver->get_mirror) return -ENOENT; mirror = dssdev->driver->get_mirror(dssdev); - return snprintf(buf, PAGE_SIZE, "%u\n", mirror); + return sysfs_emit(buf, "%u\n", mirror); } static ssize_t display_mirror_store(struct omap_dss_device *dssdev, @@ -203,7 +203,7 @@ static ssize_t display_wss_show(struct omap_dss_device *dssdev, char *buf) wss = dssdev->driver->get_wss(dssdev); - return snprintf(buf, PAGE_SIZE, "0x%05x\n", wss); + return sysfs_emit(buf, "0x%05x\n", wss); } static ssize_t display_wss_store(struct omap_dss_device *dssdev, diff --git a/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c b/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c index b52cc1af0959..3ffb1fe4a38a 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c @@ -22,14 +22,14 @@ static ssize_t manager_name_show(struct omap_overlay_manager *mgr, char *buf) { - return snprintf(buf, PAGE_SIZE, "%s\n", mgr->name); + return sysfs_emit(buf, "%s\n", mgr->name); } static ssize_t manager_display_show(struct omap_overlay_manager *mgr, char *buf) { struct omap_dss_device *dssdev = mgr->get_device(mgr); - return snprintf(buf, PAGE_SIZE, "%s\n", dssdev ? + return sysfs_emit(buf, "%s\n", dssdev ? dssdev->name : "<none>"); } @@ -120,7 +120,7 @@ static ssize_t manager_default_color_show(struct omap_overlay_manager *mgr, mgr->get_manager_info(mgr, &info); - return snprintf(buf, PAGE_SIZE, "%#x\n", info.default_color); + return sysfs_emit(buf, "%#x\n", info.default_color); } static ssize_t manager_default_color_store(struct omap_overlay_manager *mgr, @@ -165,7 +165,7 @@ static ssize_t manager_trans_key_type_show(struct omap_overlay_manager *mgr, key_type = info.trans_key_type; BUG_ON(key_type >= ARRAY_SIZE(trans_key_type_str)); - return snprintf(buf, PAGE_SIZE, "%s\n", trans_key_type_str[key_type]); + return sysfs_emit(buf, "%s\n", trans_key_type_str[key_type]); } static ssize_t manager_trans_key_type_store(struct omap_overlay_manager *mgr, @@ -200,7 +200,7 @@ static ssize_t manager_trans_key_value_show(struct omap_overlay_manager *mgr, mgr->get_manager_info(mgr, &info); - return snprintf(buf, PAGE_SIZE, "%#x\n", info.trans_key); + return sysfs_emit(buf, "%#x\n", info.trans_key); } static ssize_t manager_trans_key_value_store(struct omap_overlay_manager *mgr, @@ -236,7 +236,7 @@ static ssize_t manager_trans_key_enabled_show(struct omap_overlay_manager *mgr, mgr->get_manager_info(mgr, &info); - return snprintf(buf, PAGE_SIZE, "%d\n", info.trans_enabled); + return sysfs_emit(buf, "%d\n", info.trans_enabled); } static ssize_t manager_trans_key_enabled_store(struct omap_overlay_manager *mgr, @@ -275,7 +275,7 @@ static ssize_t manager_alpha_blending_enabled_show( mgr->get_manager_info(mgr, &info); - return snprintf(buf, PAGE_SIZE, "%d\n", + return sysfs_emit(buf, "%d\n", info.partial_alpha_enabled); } @@ -316,7 +316,7 @@ static ssize_t manager_cpr_enable_show(struct omap_overlay_manager *mgr, mgr->get_manager_info(mgr, &info); - return snprintf(buf, PAGE_SIZE, "%d\n", info.cpr_enable); + return sysfs_emit(buf, "%d\n", info.cpr_enable); } static ssize_t manager_cpr_enable_store(struct omap_overlay_manager *mgr, @@ -358,7 +358,7 @@ static ssize_t manager_cpr_coef_show(struct omap_overlay_manager *mgr, mgr->get_manager_info(mgr, &info); - return snprintf(buf, PAGE_SIZE, + return sysfs_emit(buf, "%d %d %d %d %d %d %d %d %d\n", info.cpr_coefs.rr, info.cpr_coefs.rg, diff --git a/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c b/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c index 36acf366213a..421dcb7564ad 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c @@ -22,12 +22,12 @@ static ssize_t overlay_name_show(struct omap_overlay *ovl, char *buf) { - return snprintf(buf, PAGE_SIZE, "%s\n", ovl->name); + return sysfs_emit(buf, "%s\n", ovl->name); } static ssize_t overlay_manager_show(struct omap_overlay *ovl, char *buf) { - return snprintf(buf, PAGE_SIZE, "%s\n", + return sysfs_emit(buf, "%s\n", ovl->manager ? ovl->manager->name : "<none>"); } @@ -108,7 +108,7 @@ static ssize_t overlay_input_size_show(struct omap_overlay *ovl, char *buf) ovl->get_overlay_info(ovl, &info); - return snprintf(buf, PAGE_SIZE, "%d,%d\n", + return sysfs_emit(buf, "%d,%d\n", info.width, info.height); } @@ -118,7 +118,7 @@ static ssize_t overlay_screen_width_show(struct omap_overlay *ovl, char *buf) ovl->get_overlay_info(ovl, &info); - return snprintf(buf, PAGE_SIZE, "%d\n", info.screen_width); + return sysfs_emit(buf, "%d\n", info.screen_width); } static ssize_t overlay_position_show(struct omap_overlay *ovl, char *buf) @@ -127,7 +127,7 @@ static ssize_t overlay_position_show(struct omap_overlay *ovl, char *buf) ovl->get_overlay_info(ovl, &info); - return snprintf(buf, PAGE_SIZE, "%d,%d\n", + return sysfs_emit(buf, "%d,%d\n", info.pos_x, info.pos_y); } @@ -166,7 +166,7 @@ static ssize_t overlay_output_size_show(struct omap_overlay *ovl, char *buf) ovl->get_overlay_info(ovl, &info); - return snprintf(buf, PAGE_SIZE, "%d,%d\n", + return sysfs_emit(buf, "%d,%d\n", info.out_width, info.out_height); } @@ -201,7 +201,7 @@ static ssize_t overlay_output_size_store(struct omap_overlay *ovl, static ssize_t overlay_enabled_show(struct omap_overlay *ovl, char *buf) { - return snprintf(buf, PAGE_SIZE, "%d\n", ovl->is_enabled(ovl)); + return sysfs_emit(buf, "%d\n", ovl->is_enabled(ovl)); } static ssize_t overlay_enabled_store(struct omap_overlay *ovl, const char *buf, @@ -231,7 +231,7 @@ static ssize_t overlay_global_alpha_show(struct omap_overlay *ovl, char *buf) ovl->get_overlay_info(ovl, &info); - return snprintf(buf, PAGE_SIZE, "%d\n", + return sysfs_emit(buf, "%d\n", info.global_alpha); } @@ -273,7 +273,7 @@ static ssize_t overlay_pre_mult_alpha_show(struct omap_overlay *ovl, ovl->get_overlay_info(ovl, &info); - return snprintf(buf, PAGE_SIZE, "%d\n", + return sysfs_emit(buf, "%d\n", info.pre_mult_alpha); } @@ -314,7 +314,7 @@ static ssize_t overlay_zorder_show(struct omap_overlay *ovl, char *buf) ovl->get_overlay_info(ovl, &info); - return snprintf(buf, PAGE_SIZE, "%d\n", info.zorder); + return sysfs_emit(buf, "%d\n", info.zorder); } static ssize_t overlay_zorder_store(struct omap_overlay *ovl, diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c b/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c index 2d39dbfa742e..06dc41aa0354 100644 --- a/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c +++ b/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c @@ -29,7 +29,7 @@ static ssize_t show_rotate_type(struct device *dev, struct fb_info *fbi = dev_get_drvdata(dev); struct omapfb_info *ofbi = FB2OFB(fbi); - return snprintf(buf, PAGE_SIZE, "%d\n", ofbi->rotation_type); + return sysfs_emit(buf, "%d\n", ofbi->rotation_type); } static ssize_t store_rotate_type(struct device *dev, @@ -83,7 +83,7 @@ static ssize_t show_mirror(struct device *dev, struct fb_info *fbi = dev_get_drvdata(dev); struct omapfb_info *ofbi = FB2OFB(fbi); - return snprintf(buf, PAGE_SIZE, "%d\n", ofbi->mirror); + return sysfs_emit(buf, "%d\n", ofbi->mirror); } static ssize_t store_mirror(struct device *dev, @@ -415,7 +415,7 @@ static ssize_t show_size(struct device *dev, struct fb_info *fbi = dev_get_drvdata(dev); struct omapfb_info *ofbi = FB2OFB(fbi); - return snprintf(buf, PAGE_SIZE, "%lu\n", ofbi->region->size); + return sysfs_emit(buf, "%lu\n", ofbi->region->size); } static ssize_t store_size(struct device *dev, struct device_attribute *attr, @@ -492,7 +492,7 @@ static ssize_t show_phys(struct device *dev, struct fb_info *fbi = dev_get_drvdata(dev); struct omapfb_info *ofbi = FB2OFB(fbi); - return snprintf(buf, PAGE_SIZE, "%0x\n", ofbi->region->paddr); + return sysfs_emit(buf, "%0x\n", ofbi->region->paddr); } static ssize_t show_virt(struct device *dev, @@ -501,7 +501,7 @@ static ssize_t show_virt(struct device *dev, struct fb_info *fbi = dev_get_drvdata(dev); struct omapfb_info *ofbi = FB2OFB(fbi); - return snprintf(buf, PAGE_SIZE, "%p\n", ofbi->region->vaddr); + return sysfs_emit(buf, "%p\n", ofbi->region->vaddr); } static ssize_t show_upd_mode(struct device *dev, @@ -516,7 +516,7 @@ static ssize_t show_upd_mode(struct device *dev, if (r) return r; - return snprintf(buf, PAGE_SIZE, "%u\n", (unsigned)mode); + return sysfs_emit(buf, "%u\n", (unsigned int)mode); } static ssize_t store_upd_mode(struct device *dev, struct device_attribute *attr, diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c index 62f0ded70681..57541887188b 100644 --- a/drivers/video/fbdev/simplefb.c +++ b/drivers/video/fbdev/simplefb.c @@ -407,6 +407,17 @@ static int simplefb_probe(struct platform_device *pdev) struct simplefb_par *par; struct resource *mem; + /* + * Generic drivers must not be registered if a framebuffer exists. + * If a native driver was probed, the display hardware was already + * taken and attempting to use the system framebuffer is dangerous. + */ + if (num_registered_fb > 0) { + dev_err(&pdev->dev, + "simplefb: a framebuffer is already registered\n"); + return -EINVAL; + } + if (fb_get_options("simplefb", NULL)) return -ENODEV; @@ -530,26 +541,7 @@ static struct platform_driver simplefb_driver = { .remove = simplefb_remove, }; -static int __init simplefb_init(void) -{ - int ret; - struct device_node *np; - - ret = platform_driver_register(&simplefb_driver); - if (ret) - return ret; - - if (IS_ENABLED(CONFIG_OF_ADDRESS) && of_chosen) { - for_each_child_of_node(of_chosen, np) { - if (of_device_is_compatible(np, "simple-framebuffer")) - of_platform_device_create(np, NULL, NULL); - } - } - - return 0; -} - -fs_initcall(simplefb_init); +module_platform_driver(simplefb_driver); MODULE_AUTHOR("Stephen Warren <swarren@wwwdotorg.org>"); MODULE_DESCRIPTION("Simple framebuffer driver"); diff --git a/drivers/video/fbdev/xen-fbfront.c b/drivers/video/fbdev/xen-fbfront.c index 5ec51445bee8..6826f986da43 100644 --- a/drivers/video/fbdev/xen-fbfront.c +++ b/drivers/video/fbdev/xen-fbfront.c @@ -695,6 +695,7 @@ static struct xenbus_driver xenfb_driver = { .remove = xenfb_remove, .resume = xenfb_resume, .otherend_changed = xenfb_backend_changed, + .not_essential = true, }; static int __init xenfb_init(void) diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 00f64f2f8b72..6d2614e34470 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -14,9 +14,6 @@ #include <linux/spinlock.h> #include <xen/xen.h> -static bool force_used_validation = false; -module_param(force_used_validation, bool, 0444); - #ifdef DEBUG /* For development, we want to crash whenever the ring is screwed. */ #define BAD_RING(_vq, fmt, args...) \ @@ -185,9 +182,6 @@ struct vring_virtqueue { } packed; }; - /* Per-descriptor in buffer length */ - u32 *buflen; - /* How to notify other side. FIXME: commonalize hcalls! */ bool (*notify)(struct virtqueue *vq); @@ -496,7 +490,6 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, unsigned int i, n, avail, descs_used, prev, err_idx; int head; bool indirect; - u32 buflen = 0; START_USE(vq); @@ -578,7 +571,6 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE, indirect); - buflen += sg->length; } } /* Last one doesn't continue. */ @@ -618,10 +610,6 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, else vq->split.desc_state[head].indir_desc = ctx; - /* Store in buffer length if necessary */ - if (vq->buflen) - vq->buflen[head] = buflen; - /* Put entry in available array (but don't update avail->idx until they * do sync). */ avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1); @@ -796,11 +784,6 @@ static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq, BAD_RING(vq, "id %u is not a head!\n", i); return NULL; } - if (vq->buflen && unlikely(*len > vq->buflen[i])) { - BAD_RING(vq, "used len %d is larger than in buflen %u\n", - *len, vq->buflen[i]); - return NULL; - } /* detach_buf_split clears data, so grab it now. */ ret = vq->split.desc_state[i].data; @@ -1079,7 +1062,6 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq, unsigned int i, n, err_idx; u16 head, id; dma_addr_t addr; - u32 buflen = 0; head = vq->packed.next_avail_idx; desc = alloc_indirect_packed(total_sg, gfp); @@ -1109,8 +1091,6 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq, desc[i].addr = cpu_to_le64(addr); desc[i].len = cpu_to_le32(sg->length); i++; - if (n >= out_sgs) - buflen += sg->length; } } @@ -1164,10 +1144,6 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq, vq->packed.desc_state[id].indir_desc = desc; vq->packed.desc_state[id].last = id; - /* Store in buffer length if necessary */ - if (vq->buflen) - vq->buflen[id] = buflen; - vq->num_added += 1; pr_debug("Added buffer head %i to %p\n", head, vq); @@ -1203,7 +1179,6 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq, __le16 head_flags, flags; u16 head, id, prev, curr, avail_used_flags; int err; - u32 buflen = 0; START_USE(vq); @@ -1283,8 +1258,6 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq, 1 << VRING_PACKED_DESC_F_AVAIL | 1 << VRING_PACKED_DESC_F_USED; } - if (n >= out_sgs) - buflen += sg->length; } } @@ -1304,10 +1277,6 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq, vq->packed.desc_state[id].indir_desc = ctx; vq->packed.desc_state[id].last = prev; - /* Store in buffer length if necessary */ - if (vq->buflen) - vq->buflen[id] = buflen; - /* * A driver MUST NOT make the first descriptor in the list * available before all subsequent descriptors comprising @@ -1494,11 +1463,6 @@ static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq, BAD_RING(vq, "id %u is not a head!\n", id); return NULL; } - if (vq->buflen && unlikely(*len > vq->buflen[id])) { - BAD_RING(vq, "used len %d is larger than in buflen %u\n", - *len, vq->buflen[id]); - return NULL; - } /* detach_buf_packed clears data, so grab it now. */ ret = vq->packed.desc_state[id].data; @@ -1704,7 +1668,6 @@ static struct virtqueue *vring_create_virtqueue_packed( struct vring_virtqueue *vq; struct vring_packed_desc *ring; struct vring_packed_desc_event *driver, *device; - struct virtio_driver *drv = drv_to_virtio(vdev->dev.driver); dma_addr_t ring_dma_addr, driver_event_dma_addr, device_event_dma_addr; size_t ring_size_in_bytes, event_size_in_bytes; @@ -1794,15 +1757,6 @@ static struct virtqueue *vring_create_virtqueue_packed( if (!vq->packed.desc_extra) goto err_desc_extra; - if (!drv->suppress_used_validation || force_used_validation) { - vq->buflen = kmalloc_array(num, sizeof(*vq->buflen), - GFP_KERNEL); - if (!vq->buflen) - goto err_buflen; - } else { - vq->buflen = NULL; - } - /* No callback? Tell other side not to bother us. */ if (!callback) { vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE; @@ -1815,8 +1769,6 @@ static struct virtqueue *vring_create_virtqueue_packed( spin_unlock(&vdev->vqs_list_lock); return &vq->vq; -err_buflen: - kfree(vq->packed.desc_extra); err_desc_extra: kfree(vq->packed.desc_state); err_desc_state: @@ -2224,7 +2176,6 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index, void (*callback)(struct virtqueue *), const char *name) { - struct virtio_driver *drv = drv_to_virtio(vdev->dev.driver); struct vring_virtqueue *vq; if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED)) @@ -2284,15 +2235,6 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index, if (!vq->split.desc_extra) goto err_extra; - if (!drv->suppress_used_validation || force_used_validation) { - vq->buflen = kmalloc_array(vring.num, sizeof(*vq->buflen), - GFP_KERNEL); - if (!vq->buflen) - goto err_buflen; - } else { - vq->buflen = NULL; - } - /* Put everything in free lists. */ vq->free_head = 0; memset(vq->split.desc_state, 0, vring.num * @@ -2303,8 +2245,6 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index, spin_unlock(&vdev->vqs_list_lock); return &vq->vq; -err_buflen: - kfree(vq->split.desc_extra); err_extra: kfree(vq->split.desc_state); err_state: diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index a1b11c62da9e..33e941e40082 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig @@ -259,9 +259,15 @@ config XEN_SCSI_BACKEND if guests need generic access to SCSI devices. config XEN_PRIVCMD - tristate + tristate "Xen hypercall passthrough driver" depends on XEN default m + help + The hypercall passthrough driver allows privileged user programs to + perform Xen hypercalls. This driver is normally required for systems + running as Dom0 to perform privileged operations, but in some + disaggregated Xen setups this driver might be needed for other + domains, too. config XEN_ACPI_PROCESSOR tristate "Xen ACPI processor" diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c index 7984645b5956..3c9ae156b597 100644 --- a/drivers/xen/pvcalls-front.c +++ b/drivers/xen/pvcalls-front.c @@ -1275,6 +1275,7 @@ static struct xenbus_driver pvcalls_front_driver = { .probe = pvcalls_front_probe, .remove = pvcalls_front_remove, .otherend_changed = pvcalls_front_changed, + .not_essential = true, }; static int __init pvcalls_frontend_init(void) diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c index bd003ca8acbe..fe360c33ce71 100644 --- a/drivers/xen/xenbus/xenbus_probe.c +++ b/drivers/xen/xenbus/xenbus_probe.c @@ -909,7 +909,7 @@ static struct notifier_block xenbus_resume_nb = { static int __init xenbus_init(void) { - int err = 0; + int err; uint64_t v = 0; xen_store_domain_type = XS_UNKNOWN; @@ -949,6 +949,29 @@ static int __init xenbus_init(void) err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v); if (err) goto out_error; + /* + * Uninitialized hvm_params are zero and return no error. + * Although it is theoretically possible to have + * HVM_PARAM_STORE_PFN set to zero on purpose, in reality it is + * not zero when valid. If zero, it means that Xenstore hasn't + * been properly initialized. Instead of attempting to map a + * wrong guest physical address return error. + * + * Also recognize all bits set as an invalid value. + */ + if (!v || !~v) { + err = -ENOENT; + goto out_error; + } + /* Avoid truncation on 32-bit. */ +#if BITS_PER_LONG == 32 + if (v > ULONG_MAX) { + pr_err("%s: cannot handle HVM_PARAM_STORE_PFN=%llx > ULONG_MAX\n", + __func__, v); + err = -EINVAL; + goto out_error; + } +#endif xen_store_gfn = (unsigned long)v; xen_store_interface = xen_remap(xen_store_gfn << XEN_PAGE_SHIFT, @@ -983,8 +1006,10 @@ static int __init xenbus_init(void) */ proc_create_mount_point("xen"); #endif + return 0; out_error: + xen_store_domain_type = XS_UNKNOWN; return err; } diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c index 480944606a3c..07b010a68fcf 100644 --- a/drivers/xen/xenbus/xenbus_probe_frontend.c +++ b/drivers/xen/xenbus/xenbus_probe_frontend.c @@ -211,19 +211,11 @@ static int is_device_connecting(struct device *dev, void *data, bool ignore_none if (drv && (dev->driver != drv)) return 0; - if (ignore_nonessential) { - /* With older QEMU, for PVonHVM guests the guest config files - * could contain: vfb = [ 'vnc=1, vnclisten=0.0.0.0'] - * which is nonsensical as there is no PV FB (there can be - * a PVKB) running as HVM guest. */ + xendrv = to_xenbus_driver(dev->driver); - if ((strncmp(xendev->nodename, "device/vkbd", 11) == 0)) - return 0; + if (ignore_nonessential && xendrv->not_essential) + return 0; - if ((strncmp(xendev->nodename, "device/vfb", 10) == 0)) - return 0; - } - xendrv = to_xenbus_driver(dev->driver); return (xendev->state < XenbusStateConnected || (xendev->state == XenbusStateConnected && xendrv->is_ready && !xendrv->is_ready(xendev))); @@ -181,8 +181,9 @@ struct poll_iocb { struct file *file; struct wait_queue_head *head; __poll_t events; - bool done; bool cancelled; + bool work_scheduled; + bool work_need_resched; struct wait_queue_entry wait; struct work_struct work; }; @@ -1619,6 +1620,51 @@ static void aio_poll_put_work(struct work_struct *work) iocb_put(iocb); } +/* + * Safely lock the waitqueue which the request is on, synchronizing with the + * case where the ->poll() provider decides to free its waitqueue early. + * + * Returns true on success, meaning that req->head->lock was locked, req->wait + * is on req->head, and an RCU read lock was taken. Returns false if the + * request was already removed from its waitqueue (which might no longer exist). + */ +static bool poll_iocb_lock_wq(struct poll_iocb *req) +{ + wait_queue_head_t *head; + + /* + * While we hold the waitqueue lock and the waitqueue is nonempty, + * wake_up_pollfree() will wait for us. However, taking the waitqueue + * lock in the first place can race with the waitqueue being freed. + * + * We solve this as eventpoll does: by taking advantage of the fact that + * all users of wake_up_pollfree() will RCU-delay the actual free. If + * we enter rcu_read_lock() and see that the pointer to the queue is + * non-NULL, we can then lock it without the memory being freed out from + * under us, then check whether the request is still on the queue. + * + * Keep holding rcu_read_lock() as long as we hold the queue lock, in + * case the caller deletes the entry from the queue, leaving it empty. + * In that case, only RCU prevents the queue memory from being freed. + */ + rcu_read_lock(); + head = smp_load_acquire(&req->head); + if (head) { + spin_lock(&head->lock); + if (!list_empty(&req->wait.entry)) + return true; + spin_unlock(&head->lock); + } + rcu_read_unlock(); + return false; +} + +static void poll_iocb_unlock_wq(struct poll_iocb *req) +{ + spin_unlock(&req->head->lock); + rcu_read_unlock(); +} + static void aio_poll_complete_work(struct work_struct *work) { struct poll_iocb *req = container_of(work, struct poll_iocb, work); @@ -1638,14 +1684,27 @@ static void aio_poll_complete_work(struct work_struct *work) * avoid further branches in the fast path. */ spin_lock_irq(&ctx->ctx_lock); - if (!mask && !READ_ONCE(req->cancelled)) { - add_wait_queue(req->head, &req->wait); - spin_unlock_irq(&ctx->ctx_lock); - return; - } + if (poll_iocb_lock_wq(req)) { + if (!mask && !READ_ONCE(req->cancelled)) { + /* + * The request isn't actually ready to be completed yet. + * Reschedule completion if another wakeup came in. + */ + if (req->work_need_resched) { + schedule_work(&req->work); + req->work_need_resched = false; + } else { + req->work_scheduled = false; + } + poll_iocb_unlock_wq(req); + spin_unlock_irq(&ctx->ctx_lock); + return; + } + list_del_init(&req->wait.entry); + poll_iocb_unlock_wq(req); + } /* else, POLLFREE has freed the waitqueue, so we must complete */ list_del_init(&iocb->ki_list); iocb->ki_res.res = mangle_poll(mask); - req->done = true; spin_unlock_irq(&ctx->ctx_lock); iocb_put(iocb); @@ -1657,13 +1716,14 @@ static int aio_poll_cancel(struct kiocb *iocb) struct aio_kiocb *aiocb = container_of(iocb, struct aio_kiocb, rw); struct poll_iocb *req = &aiocb->poll; - spin_lock(&req->head->lock); - WRITE_ONCE(req->cancelled, true); - if (!list_empty(&req->wait.entry)) { - list_del_init(&req->wait.entry); - schedule_work(&aiocb->poll.work); - } - spin_unlock(&req->head->lock); + if (poll_iocb_lock_wq(req)) { + WRITE_ONCE(req->cancelled, true); + if (!req->work_scheduled) { + schedule_work(&aiocb->poll.work); + req->work_scheduled = true; + } + poll_iocb_unlock_wq(req); + } /* else, the request was force-cancelled by POLLFREE already */ return 0; } @@ -1680,21 +1740,27 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, if (mask && !(mask & req->events)) return 0; - list_del_init(&req->wait.entry); - - if (mask && spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) { + /* + * Complete the request inline if possible. This requires that three + * conditions be met: + * 1. An event mask must have been passed. If a plain wakeup was done + * instead, then mask == 0 and we have to call vfs_poll() to get + * the events, so inline completion isn't possible. + * 2. The completion work must not have already been scheduled. + * 3. ctx_lock must not be busy. We have to use trylock because we + * already hold the waitqueue lock, so this inverts the normal + * locking order. Use irqsave/irqrestore because not all + * filesystems (e.g. fuse) call this function with IRQs disabled, + * yet IRQs have to be disabled before ctx_lock is obtained. + */ + if (mask && !req->work_scheduled && + spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) { struct kioctx *ctx = iocb->ki_ctx; - /* - * Try to complete the iocb inline if we can. Use - * irqsave/irqrestore because not all filesystems (e.g. fuse) - * call this function with IRQs disabled and because IRQs - * have to be disabled before ctx_lock is obtained. - */ + list_del_init(&req->wait.entry); list_del(&iocb->ki_list); iocb->ki_res.res = mangle_poll(mask); - req->done = true; - if (iocb->ki_eventfd && eventfd_signal_allowed()) { + if (iocb->ki_eventfd && !eventfd_signal_allowed()) { iocb = NULL; INIT_WORK(&req->work, aio_poll_put_work); schedule_work(&req->work); @@ -1703,7 +1769,43 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, if (iocb) iocb_put(iocb); } else { - schedule_work(&req->work); + /* + * Schedule the completion work if needed. If it was already + * scheduled, record that another wakeup came in. + * + * Don't remove the request from the waitqueue here, as it might + * not actually be complete yet (we won't know until vfs_poll() + * is called), and we must not miss any wakeups. POLLFREE is an + * exception to this; see below. + */ + if (req->work_scheduled) { + req->work_need_resched = true; + } else { + schedule_work(&req->work); + req->work_scheduled = true; + } + + /* + * If the waitqueue is being freed early but we can't complete + * the request inline, we have to tear down the request as best + * we can. That means immediately removing the request from its + * waitqueue and preventing all further accesses to the + * waitqueue via the request. We also need to schedule the + * completion work (done above). Also mark the request as + * cancelled, to potentially skip an unneeded call to ->poll(). + */ + if (mask & POLLFREE) { + WRITE_ONCE(req->cancelled, true); + list_del_init(&req->wait.entry); + + /* + * Careful: this *must* be the last step, since as soon + * as req->head is NULL'ed out, the request can be + * completed and freed, since aio_poll_complete_work() + * will no longer need to take the waitqueue lock. + */ + smp_store_release(&req->head, NULL); + } } return 1; } @@ -1711,6 +1813,7 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, struct aio_poll_table { struct poll_table_struct pt; struct aio_kiocb *iocb; + bool queued; int error; }; @@ -1721,11 +1824,12 @@ aio_poll_queue_proc(struct file *file, struct wait_queue_head *head, struct aio_poll_table *pt = container_of(p, struct aio_poll_table, pt); /* multiple wait queues per file are not supported */ - if (unlikely(pt->iocb->poll.head)) { + if (unlikely(pt->queued)) { pt->error = -EINVAL; return; } + pt->queued = true; pt->error = 0; pt->iocb->poll.head = head; add_wait_queue(head, &pt->iocb->poll.wait); @@ -1750,12 +1854,14 @@ static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb) req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP; req->head = NULL; - req->done = false; req->cancelled = false; + req->work_scheduled = false; + req->work_need_resched = false; apt.pt._qproc = aio_poll_queue_proc; apt.pt._key = req->events; apt.iocb = aiocb; + apt.queued = false; apt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */ /* initialized the list so that we can do list_empty checks */ @@ -1764,23 +1870,35 @@ static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb) mask = vfs_poll(req->file, &apt.pt) & req->events; spin_lock_irq(&ctx->ctx_lock); - if (likely(req->head)) { - spin_lock(&req->head->lock); - if (unlikely(list_empty(&req->wait.entry))) { - if (apt.error) + if (likely(apt.queued)) { + bool on_queue = poll_iocb_lock_wq(req); + + if (!on_queue || req->work_scheduled) { + /* + * aio_poll_wake() already either scheduled the async + * completion work, or completed the request inline. + */ + if (apt.error) /* unsupported case: multiple queues */ cancel = true; apt.error = 0; mask = 0; } if (mask || apt.error) { + /* Steal to complete synchronously. */ list_del_init(&req->wait.entry); } else if (cancel) { + /* Cancel if possible (may be too late though). */ WRITE_ONCE(req->cancelled, true); - } else if (!req->done) { /* actually waiting for an event */ + } else if (on_queue) { + /* + * Actually waiting for an event, so add the request to + * active_reqs so that it can be cancelled if needed. + */ list_add_tail(&aiocb->ki_list, &ctx->active_reqs); aiocb->ki_cancel = aio_poll_cancel; } - spin_unlock(&req->head->lock); + if (on_queue) + poll_iocb_unlock_wq(req); } if (mask) { /* no async, we'd stolen it */ aiocb->ki_res.res = mangle_poll(mask); diff --git a/fs/attr.c b/fs/attr.c index 473d21b3a86d..66899b6e9bd8 100644 --- a/fs/attr.c +++ b/fs/attr.c @@ -35,7 +35,7 @@ static bool chown_ok(struct user_namespace *mnt_userns, kuid_t uid) { kuid_t kuid = i_uid_into_mnt(mnt_userns, inode); - if (uid_eq(current_fsuid(), kuid) && uid_eq(uid, kuid)) + if (uid_eq(current_fsuid(), kuid) && uid_eq(uid, inode->i_uid)) return true; if (capable_wrt_inode_uidgid(mnt_userns, inode, CAP_CHOWN)) return true; @@ -62,7 +62,7 @@ static bool chgrp_ok(struct user_namespace *mnt_userns, { kgid_t kgid = i_gid_into_mnt(mnt_userns, inode); if (uid_eq(current_fsuid(), i_uid_into_mnt(mnt_userns, inode)) && - (in_group_p(gid) || gid_eq(gid, kgid))) + (in_group_p(gid) || gid_eq(gid, inode->i_gid))) return true; if (capable_wrt_inode_uidgid(mnt_userns, inode, CAP_CHOWN)) return true; diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c index 309516e6a968..43c89952b7d2 100644 --- a/fs/btrfs/async-thread.c +++ b/fs/btrfs/async-thread.c @@ -234,6 +234,13 @@ static void run_ordered_work(struct __btrfs_workqueue *wq, ordered_list); if (!test_bit(WORK_DONE_BIT, &work->flags)) break; + /* + * Orders all subsequent loads after reading WORK_DONE_BIT, + * paired with the smp_mb__before_atomic in btrfs_work_helper + * this guarantees that the ordered function will see all + * updates from ordinary work function. + */ + smp_rmb(); /* * we are going to call the ordered done function, but @@ -317,6 +324,13 @@ static void btrfs_work_helper(struct work_struct *normal_work) thresh_exec_hook(wq); work->func(work); if (need_order) { + /* + * Ensures all memory accesses done in the work function are + * ordered before setting the WORK_DONE_BIT. Ensuring the thread + * which is going to executed the ordered work sees them. + * Pairs with the smp_rmb in run_ordered_work. + */ + smp_mb__before_atomic(); set_bit(WORK_DONE_BIT, &work->flags); run_ordered_work(wq, work); } else { diff --git a/fs/btrfs/delalloc-space.c b/fs/btrfs/delalloc-space.c index 2059d1504149..40c4d6ba3fb9 100644 --- a/fs/btrfs/delalloc-space.c +++ b/fs/btrfs/delalloc-space.c @@ -143,10 +143,13 @@ int btrfs_check_data_free_space(struct btrfs_inode *inode, /* Use new btrfs_qgroup_reserve_data to reserve precious data space. */ ret = btrfs_qgroup_reserve_data(inode, reserved, start, len); - if (ret < 0) + if (ret < 0) { btrfs_free_reserved_data_space_noquota(fs_info, len); - else + extent_changeset_free(*reserved); + *reserved = NULL; + } else { ret = 0; + } return ret; } @@ -452,8 +455,11 @@ int btrfs_delalloc_reserve_space(struct btrfs_inode *inode, if (ret < 0) return ret; ret = btrfs_delalloc_reserve_metadata(inode, len); - if (ret < 0) + if (ret < 0) { btrfs_free_reserved_data_space(inode, *reserved, start, len); + extent_changeset_free(*reserved); + *reserved = NULL; + } return ret; } diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 59c3be8c1f4c..514ead6e93b6 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -3978,11 +3978,23 @@ static void btrfs_end_empty_barrier(struct bio *bio) */ static void write_dev_flush(struct btrfs_device *device) { - struct request_queue *q = bdev_get_queue(device->bdev); struct bio *bio = device->flush_bio; +#ifndef CONFIG_BTRFS_FS_CHECK_INTEGRITY + /* + * When a disk has write caching disabled, we skip submission of a bio + * with flush and sync requests before writing the superblock, since + * it's not needed. However when the integrity checker is enabled, this + * results in reports that there are metadata blocks referred by a + * superblock that were not properly flushed. So don't skip the bio + * submission only when the integrity checker is enabled for the sake + * of simplicity, since this is a debug tool and not meant for use in + * non-debug builds. + */ + struct request_queue *q = bdev_get_queue(device->bdev); if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) return; +#endif bio_reset(bio); bio->bi_end_io = btrfs_end_empty_barrier; diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 3fd736a02c1e..fc4895e6a62c 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -6051,6 +6051,9 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range) int dev_ret = 0; int ret = 0; + if (range->start == U64_MAX) + return -EINVAL; + /* * Check range overflow if range->len is set. * The default range->len is U64_MAX. diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 4e03a6d3aa32..3258b6f01e85 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -4314,6 +4314,20 @@ static void set_btree_ioerr(struct page *page, struct extent_buffer *eb) return; /* + * A read may stumble upon this buffer later, make sure that it gets an + * error and knows there was an error. + */ + clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); + + /* + * We need to set the mapping with the io error as well because a write + * error will flip the file system readonly, and then syncfs() will + * return a 0 because we are readonly if we don't modify the err seq for + * the superblock. + */ + mapping_set_error(page->mapping, -EIO); + + /* * If we error out, we should add back the dirty_metadata_bytes * to make it consistent. */ diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index fb8cc9642ac4..2b84846ed934 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -3187,10 +3187,8 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg) return -EPERM; vol_args = memdup_user(arg, sizeof(*vol_args)); - if (IS_ERR(vol_args)) { - ret = PTR_ERR(vol_args); - goto out; - } + if (IS_ERR(vol_args)) + return PTR_ERR(vol_args); if (vol_args->flags & ~BTRFS_DEVICE_REMOVE_ARGS_MASK) { ret = -EOPNOTSUPP; @@ -3985,6 +3983,10 @@ static long btrfs_ioctl_balance(struct file *file, void __user *arg) bool need_unlock; /* for mut. excl. ops lock */ int ret; + if (!arg) + btrfs_warn(fs_info, + "IOC_BALANCE ioctl (v1) is deprecated and will be removed in kernel 5.18"); + if (!capable(CAP_SYS_ADMIN)) return -EPERM; diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c index 65cb0766e62d..0fb90cbe7669 100644 --- a/fs/btrfs/lzo.c +++ b/fs/btrfs/lzo.c @@ -125,6 +125,7 @@ static inline size_t read_compress_length(const char *buf) static int copy_compressed_data_to_page(char *compressed_data, size_t compressed_size, struct page **out_pages, + unsigned long max_nr_page, u32 *cur_out, const u32 sectorsize) { @@ -133,6 +134,9 @@ static int copy_compressed_data_to_page(char *compressed_data, struct page *cur_page; char *kaddr; + if ((*cur_out / PAGE_SIZE) >= max_nr_page) + return -E2BIG; + /* * We never allow a segment header crossing sector boundary, previous * run should ensure we have enough space left inside the sector. @@ -161,6 +165,10 @@ static int copy_compressed_data_to_page(char *compressed_data, orig_out + compressed_size - *cur_out); kunmap(cur_page); + + if ((*cur_out / PAGE_SIZE) >= max_nr_page) + return -E2BIG; + cur_page = out_pages[*cur_out / PAGE_SIZE]; /* Allocate a new page */ if (!cur_page) { @@ -203,6 +211,7 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping, const u32 sectorsize = btrfs_sb(mapping->host->i_sb)->sectorsize; struct page *page_in = NULL; char *sizes_ptr; + const unsigned long max_nr_page = *out_pages; int ret = 0; /* Points to the file offset of input data */ u64 cur_in = start; @@ -210,6 +219,7 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping, u32 cur_out = 0; u32 len = *total_out; + ASSERT(max_nr_page > 0); *out_pages = 0; *total_out = 0; *total_in = 0; @@ -248,7 +258,8 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping, } ret = copy_compressed_data_to_page(workspace->cbuf, out_len, - pages, &cur_out, sectorsize); + pages, max_nr_page, + &cur_out, sectorsize); if (ret < 0) goto out; @@ -279,6 +290,8 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping, *total_out = cur_out; *total_in = cur_in - start; out: + if (page_in) + put_page(page_in); *out_pages = DIV_ROUND_UP(cur_out, PAGE_SIZE); return ret; } diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c index 12ceb14a1141..d20166336557 100644 --- a/fs/btrfs/root-tree.c +++ b/fs/btrfs/root-tree.c @@ -334,7 +334,8 @@ int btrfs_del_root_ref(struct btrfs_trans_handle *trans, u64 root_id, key.offset = ref_id; again: ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1); - BUG_ON(ret < 0); + if (ret < 0) + goto out; if (ret == 0) { leaf = path->nodes[0]; ref = btrfs_item_ptr(leaf, path->slots[0], diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index cf82ea6f54fb..8f6ceea33969 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -73,8 +73,8 @@ struct scrub_page { u64 physical_for_dev_replace; atomic_t refs; u8 mirror_num; - int have_csum:1; - int io_error:1; + unsigned int have_csum:1; + unsigned int io_error:1; u8 csum[BTRFS_CSUM_SIZE]; struct scrub_recover *recover; diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 8ab33caf016f..3e6f14e13918 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -2908,6 +2908,8 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans, path->nodes[*level]->len); if (ret) return ret; + btrfs_redirty_list_add(trans->transaction, + next); } else { if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags)) clear_extent_buffer_dirty(next); @@ -2988,6 +2990,7 @@ static int walk_log_tree(struct btrfs_trans_handle *trans, next->start, next->len); if (ret) goto out; + btrfs_redirty_list_add(trans->transaction, next); } else { if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags)) clear_extent_buffer_dirty(next); @@ -3438,8 +3441,6 @@ static void free_log_tree(struct btrfs_trans_handle *trans, EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT); extent_io_tree_release(&log->log_csum_range); - if (trans && log->node) - btrfs_redirty_list_add(trans->transaction, log->node); btrfs_put_root(log); } diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 61ac57bcbf1a..0997e3cd74e9 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -7559,6 +7559,19 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info) fs_info->fs_devices->total_rw_bytes = 0; /* + * Lockdep complains about possible circular locking dependency between + * a disk's open_mutex (struct gendisk.open_mutex), the rw semaphores + * used for freeze procection of a fs (struct super_block.s_writers), + * which we take when starting a transaction, and extent buffers of the + * chunk tree if we call read_one_dev() while holding a lock on an + * extent buffer of the chunk tree. Since we are mounting the filesystem + * and at this point there can't be any concurrent task modifying the + * chunk tree, to keep it simple, just skip locking on the chunk tree. + */ + ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags)); + path->skip_locking = 1; + + /* * Read all device items, and then all the chunk items. All * device items are found before any chunk item (their object id * is smaller than the lowest possible object id for a chunk @@ -7583,10 +7596,6 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info) goto error; break; } - /* - * The nodes on level 1 are not locked but we don't need to do - * that during mount time as nothing else can access the tree - */ node = path->nodes[1]; if (node) { if (last_ra_node != node->start) { @@ -7614,7 +7623,6 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info) * requirement for chunk allocation, see the comment on * top of btrfs_chunk_alloc() for details. */ - ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags)); chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); ret = read_one_chunk(&found_key, leaf, chunk); if (ret) diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c index 67d932d70798..678a29469511 100644 --- a/fs/btrfs/zoned.c +++ b/fs/btrfs/zoned.c @@ -1860,6 +1860,7 @@ int btrfs_zone_finish(struct btrfs_block_group *block_group) block_group->alloc_offset = block_group->zone_capacity; block_group->free_space_ctl->free_space = 0; btrfs_clear_treelog_bg(block_group); + btrfs_clear_data_reloc_bg(block_group); spin_unlock(&block_group->lock); ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_FINISH, @@ -1942,6 +1943,7 @@ void btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, u64 logical, u64 len ASSERT(block_group->alloc_offset == block_group->zone_capacity); ASSERT(block_group->free_space_ctl->free_space == 0); btrfs_clear_treelog_bg(block_group); + btrfs_clear_data_reloc_bg(block_group); spin_unlock(&block_group->lock); map = block_group->physical_map; diff --git a/fs/cifs/cifs_swn.c b/fs/cifs/cifs_swn.c index 12bde7bfda86..23a1ed2fb769 100644 --- a/fs/cifs/cifs_swn.c +++ b/fs/cifs/cifs_swn.c @@ -393,26 +393,14 @@ static void cifs_put_swn_reg(struct cifs_swn_reg *swnreg) static int cifs_swn_resource_state_changed(struct cifs_swn_reg *swnreg, const char *name, int state) { - int i; - switch (state) { case CIFS_SWN_RESOURCE_STATE_UNAVAILABLE: cifs_dbg(FYI, "%s: resource name '%s' become unavailable\n", __func__, name); - for (i = 0; i < swnreg->tcon->ses->chan_count; i++) { - spin_lock(&GlobalMid_Lock); - if (swnreg->tcon->ses->chans[i].server->tcpStatus != CifsExiting) - swnreg->tcon->ses->chans[i].server->tcpStatus = CifsNeedReconnect; - spin_unlock(&GlobalMid_Lock); - } + cifs_ses_mark_for_reconnect(swnreg->tcon->ses); break; case CIFS_SWN_RESOURCE_STATE_AVAILABLE: cifs_dbg(FYI, "%s: resource name '%s' become available\n", __func__, name); - for (i = 0; i < swnreg->tcon->ses->chan_count; i++) { - spin_lock(&GlobalMid_Lock); - if (swnreg->tcon->ses->chans[i].server->tcpStatus != CifsExiting) - swnreg->tcon->ses->chans[i].server->tcpStatus = CifsNeedReconnect; - spin_unlock(&GlobalMid_Lock); - } + cifs_ses_mark_for_reconnect(swnreg->tcon->ses); break; case CIFS_SWN_RESOURCE_STATE_UNKNOWN: cifs_dbg(FYI, "%s: resource name '%s' changed to unknown state\n", __func__, name); diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index b50da1901ebd..9e5d9e192ef0 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -152,5 +152,5 @@ extern struct dentry *cifs_smb3_do_mount(struct file_system_type *fs_type, extern const struct export_operations cifs_export_ops; #endif /* CONFIG_CIFS_NFSD_EXPORT */ -#define CIFS_VERSION "2.33" +#define CIFS_VERSION "2.34" #endif /* _CIFSFS_H */ diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index f3073a62ce57..4f5a3e857df4 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -599,6 +599,7 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses) bool is_server_using_iface(struct TCP_Server_Info *server, struct cifs_server_iface *iface); bool is_ses_using_iface(struct cifs_ses *ses, struct cifs_server_iface *iface); +void cifs_ses_mark_for_reconnect(struct cifs_ses *ses); void extract_unc_hostname(const char *unc, const char **h, size_t *len); int copy_path_name(char *dst, const char *src); diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 82577a7a5bb1..18448dbd762a 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -1271,10 +1271,8 @@ static int match_server(struct TCP_Server_Info *server, struct smb3_fs_context * { struct sockaddr *addr = (struct sockaddr *)&ctx->dstaddr; - if (ctx->nosharesock) { - server->nosharesock = true; + if (ctx->nosharesock) return 0; - } /* this server does not share socket */ if (server->nosharesock) @@ -1438,6 +1436,9 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx, goto out_err; } + if (ctx->nosharesock) + tcp_ses->nosharesock = true; + tcp_ses->ops = ctx->ops; tcp_ses->vals = ctx->vals; cifs_set_net_ns(tcp_ses, get_net(current->nsproxy->net_ns)); @@ -1452,8 +1453,10 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx, tcp_ses->max_in_flight = 0; tcp_ses->credits = 1; if (primary_server) { + spin_lock(&cifs_tcp_ses_lock); ++primary_server->srv_count; tcp_ses->primary_server = primary_server; + spin_unlock(&cifs_tcp_ses_lock); } init_waitqueue_head(&tcp_ses->response_q); init_waitqueue_head(&tcp_ses->request_q); @@ -1559,6 +1562,10 @@ smbd_connected: /* fscache server cookies are based on primary channel only */ if (!CIFS_SERVER_IS_CHAN(tcp_ses)) cifs_fscache_get_client_cookie(tcp_ses); +#ifdef CONFIG_CIFS_FSCACHE + else + tcp_ses->fscache = tcp_ses->primary_server->fscache; +#endif /* CONFIG_CIFS_FSCACHE */ /* queue echo request delayed work */ queue_delayed_work(cifsiod_wq, &tcp_ses->echo, tcp_ses->echo_interval); @@ -3043,12 +3050,6 @@ static int mount_get_conns(struct mount_ctx *mnt_ctx) cifs_dbg(VFS, "read only mount of RW share\n"); /* no need to log a RW mount of a typical RW share */ } - /* - * The cookie is initialized from volume info returned above. - * Inside cifs_fscache_get_super_cookie it checks - * that we do not get super cookie twice. - */ - cifs_fscache_get_super_cookie(tcon); } /* @@ -3423,6 +3424,7 @@ static int connect_dfs_root(struct mount_ctx *mnt_ctx, struct dfs_cache_tgt_list */ mount_put_conns(mnt_ctx); mount_get_dfs_conns(mnt_ctx); + set_root_ses(mnt_ctx); full_path = build_unc_path_to_root(ctx, cifs_sb, true); if (IS_ERR(full_path)) @@ -4111,18 +4113,6 @@ cifs_prune_tlinks(struct work_struct *work) } #ifdef CONFIG_CIFS_DFS_UPCALL -static void mark_tcon_tcp_ses_for_reconnect(struct cifs_tcon *tcon) -{ - int i; - - for (i = 0; i < tcon->ses->chan_count; i++) { - spin_lock(&GlobalMid_Lock); - if (tcon->ses->chans[i].server->tcpStatus != CifsExiting) - tcon->ses->chans[i].server->tcpStatus = CifsNeedReconnect; - spin_unlock(&GlobalMid_Lock); - } -} - /* Update dfs referral path of superblock */ static int update_server_fullpath(struct TCP_Server_Info *server, struct cifs_sb_info *cifs_sb, const char *target) @@ -4299,7 +4289,7 @@ static int tree_connect_dfs_target(const unsigned int xid, struct cifs_tcon *tco */ if (rc && server->current_fullpath != server->origin_fullpath) { server->current_fullpath = server->origin_fullpath; - mark_tcon_tcp_ses_for_reconnect(tcon); + cifs_ses_mark_for_reconnect(tcon->ses); } dfs_cache_free_tgts(tl); diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c index 5c1259d2eeac..e9b0fa2a9614 100644 --- a/fs/cifs/dfs_cache.c +++ b/fs/cifs/dfs_cache.c @@ -1355,12 +1355,7 @@ static void mark_for_reconnect_if_needed(struct cifs_tcon *tcon, struct dfs_cach } cifs_dbg(FYI, "%s: no cached or matched targets. mark dfs share for reconnect.\n", __func__); - for (i = 0; i < tcon->ses->chan_count; i++) { - spin_lock(&GlobalMid_Lock); - if (tcon->ses->chans[i].server->tcpStatus != CifsExiting) - tcon->ses->chans[i].server->tcpStatus = CifsNeedReconnect; - spin_unlock(&GlobalMid_Lock); - } + cifs_ses_mark_for_reconnect(tcon->ses); } /* Refresh dfs referral of tcon and mark it for reconnect if needed */ diff --git a/fs/cifs/fscache.c b/fs/cifs/fscache.c index 7e409a38a2d7..003c5f1f4dfb 100644 --- a/fs/cifs/fscache.c +++ b/fs/cifs/fscache.c @@ -16,14 +16,7 @@ * Key layout of CIFS server cache index object */ struct cifs_server_key { - struct { - uint16_t family; /* address family */ - __be16 port; /* IP port */ - } hdr; - union { - struct in_addr ipv4_addr; - struct in6_addr ipv6_addr; - }; + __u64 conn_id; } __packed; /* @@ -31,42 +24,23 @@ struct cifs_server_key { */ void cifs_fscache_get_client_cookie(struct TCP_Server_Info *server) { - const struct sockaddr *sa = (struct sockaddr *) &server->dstaddr; - const struct sockaddr_in *addr = (struct sockaddr_in *) sa; - const struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *) sa; struct cifs_server_key key; - uint16_t key_len = sizeof(key.hdr); - - memset(&key, 0, sizeof(key)); /* - * Should not be a problem as sin_family/sin6_family overlays - * sa_family field + * Check if cookie was already initialized so don't reinitialize it. + * In the future, as we integrate with newer fscache features, + * we may want to instead add a check if cookie has changed */ - key.hdr.family = sa->sa_family; - switch (sa->sa_family) { - case AF_INET: - key.hdr.port = addr->sin_port; - key.ipv4_addr = addr->sin_addr; - key_len += sizeof(key.ipv4_addr); - break; - - case AF_INET6: - key.hdr.port = addr6->sin6_port; - key.ipv6_addr = addr6->sin6_addr; - key_len += sizeof(key.ipv6_addr); - break; - - default: - cifs_dbg(VFS, "Unknown network family '%d'\n", sa->sa_family); - server->fscache = NULL; + if (server->fscache) return; - } + + memset(&key, 0, sizeof(key)); + key.conn_id = server->conn_id; server->fscache = fscache_acquire_cookie(cifs_fscache_netfs.primary_index, &cifs_fscache_server_index_def, - &key, key_len, + &key, sizeof(key), NULL, 0, server, 0, true); cifs_dbg(FYI, "%s: (0x%p/0x%p)\n", @@ -92,7 +66,7 @@ void cifs_fscache_get_super_cookie(struct cifs_tcon *tcon) * In the future, as we integrate with newer fscache features, * we may want to instead add a check if cookie has changed */ - if (tcon->fscache == NULL) + if (tcon->fscache) return; sharename = extract_sharename(tcon->treeName); diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 82848412ad85..96d083db1737 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -1376,6 +1376,13 @@ iget_no_retry: inode = ERR_PTR(rc); } + /* + * The cookie is initialized from volume info returned above. + * Inside cifs_fscache_get_super_cookie it checks + * that we do not get super cookie twice. + */ + cifs_fscache_get_super_cookie(tcon); + out: kfree(path); free_xid(xid); diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index 2c10b186ed6e..035dc3e245dc 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c @@ -95,9 +95,9 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses) } if (!(ses->server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) { - cifs_dbg(VFS, "server %s does not support multichannel\n", ses->server->hostname); ses->chan_max = 1; spin_unlock(&ses->chan_lock); + cifs_dbg(VFS, "server %s does not support multichannel\n", ses->server->hostname); return 0; } spin_unlock(&ses->chan_lock); @@ -222,6 +222,7 @@ cifs_ses_add_channel(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses, /* Auth */ ctx.domainauto = ses->domainAuto; ctx.domainname = ses->domainName; + ctx.server_hostname = ses->server->hostname; ctx.username = ses->user_name; ctx.password = ses->password; ctx.sectype = ses->sectype; @@ -318,6 +319,19 @@ out: return rc; } +/* Mark all session channels for reconnect */ +void cifs_ses_mark_for_reconnect(struct cifs_ses *ses) +{ + int i; + + for (i = 0; i < ses->chan_count; i++) { + spin_lock(&GlobalMid_Lock); + if (ses->chans[i].server->tcpStatus != CifsExiting) + ses->chans[i].server->tcpStatus = CifsNeedReconnect; + spin_unlock(&GlobalMid_Lock); + } +} + static __u32 cifs_ssetup_hdr(struct cifs_ses *ses, SESSION_SETUP_ANDX *pSMB) { __u32 capabilities = 0; @@ -576,8 +590,8 @@ int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len, { unsigned int tioffset; /* challenge message target info area */ unsigned int tilen; /* challenge message target info area length */ - CHALLENGE_MESSAGE *pblob = (CHALLENGE_MESSAGE *)bcc_ptr; + __u32 server_flags; if (blob_len < sizeof(CHALLENGE_MESSAGE)) { cifs_dbg(VFS, "challenge blob len %d too small\n", blob_len); @@ -595,12 +609,37 @@ int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len, return -EINVAL; } + server_flags = le32_to_cpu(pblob->NegotiateFlags); + cifs_dbg(FYI, "%s: negotiate=0x%08x challenge=0x%08x\n", __func__, + ses->ntlmssp->client_flags, server_flags); + + if ((ses->ntlmssp->client_flags & (NTLMSSP_NEGOTIATE_SEAL | NTLMSSP_NEGOTIATE_SIGN)) && + (!(server_flags & NTLMSSP_NEGOTIATE_56) && !(server_flags & NTLMSSP_NEGOTIATE_128))) { + cifs_dbg(VFS, "%s: requested signing/encryption but server did not return either 56-bit or 128-bit session key size\n", + __func__); + return -EINVAL; + } + if (!(server_flags & NTLMSSP_NEGOTIATE_NTLM) && !(server_flags & NTLMSSP_NEGOTIATE_EXTENDED_SEC)) { + cifs_dbg(VFS, "%s: server does not seem to support either NTLMv1 or NTLMv2\n", __func__); + return -EINVAL; + } + if (ses->server->sign && !(server_flags & NTLMSSP_NEGOTIATE_SIGN)) { + cifs_dbg(VFS, "%s: forced packet signing but server does not seem to support it\n", + __func__); + return -EOPNOTSUPP; + } + if ((ses->ntlmssp->client_flags & NTLMSSP_NEGOTIATE_KEY_XCH) && + !(server_flags & NTLMSSP_NEGOTIATE_KEY_XCH)) + pr_warn_once("%s: authentication has been weakened as server does not support key exchange\n", + __func__); + + ses->ntlmssp->server_flags = server_flags; + memcpy(ses->ntlmssp->cryptkey, pblob->Challenge, CIFS_CRYPTO_KEY_SIZE); - /* BB we could decode pblob->NegotiateFlags; some may be useful */ /* In particular we can examine sign flags */ /* BB spec says that if AvId field of MsvAvTimestamp is populated then we must set the MIC field of the AUTHENTICATE_MESSAGE */ - ses->ntlmssp->server_flags = le32_to_cpu(pblob->NegotiateFlags); + tioffset = le32_to_cpu(pblob->TargetInfoArray.BufferOffset); tilen = le16_to_cpu(pblob->TargetInfoArray.Length); if (tioffset > blob_len || tioffset + tilen > blob_len) { @@ -707,13 +746,13 @@ int build_ntlmssp_negotiate_blob(unsigned char **pbuffer, flags = NTLMSSP_NEGOTIATE_56 | NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE | NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC | - NTLMSSP_NEGOTIATE_SEAL; - if (server->sign) - flags |= NTLMSSP_NEGOTIATE_SIGN; + NTLMSSP_NEGOTIATE_ALWAYS_SIGN | NTLMSSP_NEGOTIATE_SEAL | + NTLMSSP_NEGOTIATE_SIGN; if (!server->session_estab || ses->ntlmssp->sesskey_per_smbsess) flags |= NTLMSSP_NEGOTIATE_KEY_XCH; tmp = *pbuffer + sizeof(NEGOTIATE_MESSAGE); + ses->ntlmssp->client_flags = flags; sec_blob->NegotiateFlags = cpu_to_le32(flags); /* these fields should be null in negotiate phase MS-NLMP 3.1.5.1.1 */ @@ -765,15 +804,8 @@ int build_ntlmssp_auth_blob(unsigned char **pbuffer, memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8); sec_blob->MessageType = NtLmAuthenticate; - flags = NTLMSSP_NEGOTIATE_56 | - NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_TARGET_INFO | - NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE | - NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC | - NTLMSSP_NEGOTIATE_SEAL | NTLMSSP_NEGOTIATE_WORKSTATION_SUPPLIED; - if (ses->server->sign) - flags |= NTLMSSP_NEGOTIATE_SIGN; - if (!ses->server->session_estab || ses->ntlmssp->sesskey_per_smbsess) - flags |= NTLMSSP_NEGOTIATE_KEY_XCH; + flags = ses->ntlmssp->server_flags | NTLMSSP_REQUEST_TARGET | + NTLMSSP_NEGOTIATE_TARGET_INFO | NTLMSSP_NEGOTIATE_WORKSTATION_SUPPLIED; tmp = *pbuffer + sizeof(AUTHENTICATE_MESSAGE); sec_blob->NegotiateFlags = cpu_to_le32(flags); @@ -820,9 +852,9 @@ int build_ntlmssp_auth_blob(unsigned char **pbuffer, *pbuffer, &tmp, nls_cp); - if (((ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_KEY_XCH) || - (ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_EXTENDED_SEC)) - && !calc_seckey(ses)) { + if ((ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_KEY_XCH) && + (!ses->server->session_estab || ses->ntlmssp->sesskey_per_smbsess) && + !calc_seckey(ses)) { memcpy(tmp, ses->ntlmssp->ciphertext, CIFS_CPHTXT_SIZE); sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - *pbuffer); sec_blob->SessionKey.Length = cpu_to_le16(CIFS_CPHTXT_SIZE); diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 2f5f2c4c6183..8b3670388cda 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -142,7 +142,7 @@ static int smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon, struct TCP_Server_Info *server) { - int rc; + int rc = 0; struct nls_table *nls_codepage; struct cifs_ses *ses; int retries; diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c index 84da2c280012..ec9a1d780dc1 100644 --- a/fs/erofs/utils.c +++ b/fs/erofs/utils.c @@ -150,7 +150,7 @@ static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi, * however in order to avoid some race conditions, add a * DBG_BUGON to observe this in advance. */ - DBG_BUGON(xa_erase(&sbi->managed_pslots, grp->index) != grp); + DBG_BUGON(__xa_erase(&sbi->managed_pslots, grp->index) != grp); /* last refcount should be connected with its managed pslot. */ erofs_workgroup_unfreeze(grp, 0); @@ -165,15 +165,19 @@ static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi, unsigned int freed = 0; unsigned long index; + xa_lock(&sbi->managed_pslots); xa_for_each(&sbi->managed_pslots, index, grp) { /* try to shrink each valid workgroup */ if (!erofs_try_to_release_workgroup(sbi, grp)) continue; + xa_unlock(&sbi->managed_pslots); ++freed; if (!--nr_shrink) - break; + return freed; + xa_lock(&sbi->managed_pslots); } + xa_unlock(&sbi->managed_pslots); return freed; } diff --git a/fs/file.c b/fs/file.c index 8627dacfc424..ad4a8bf3cf10 100644 --- a/fs/file.c +++ b/fs/file.c @@ -858,6 +858,10 @@ loop: file = NULL; else if (!get_file_rcu_many(file, refs)) goto loop; + else if (files_lookup_fd_raw(files, fd) != file) { + fput_many(file, refs); + goto loop; + } } rcu_read_unlock(); diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 79f7eda49e06..cd54a529460d 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -847,17 +847,17 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep) replace_page_cache_page(oldpage, newpage); + get_page(newpage); + + if (!(buf->flags & PIPE_BUF_FLAG_LRU)) + lru_cache_add(newpage); + /* * Release while we have extra ref on stolen page. Otherwise * anon_pipe_buf_release() might think the page can be reused. */ pipe_buf_release(cs->pipe, buf); - get_page(newpage); - - if (!(buf->flags & PIPE_BUF_FLAG_LRU)) - lru_cache_add(newpage); - err = 0; spin_lock(&cs->req->waitq.lock); if (test_bit(FR_ABORTED, &cs->req->flags)) diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index 7235d539e969..d67108489148 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c @@ -940,7 +940,7 @@ do_alloc: else if (height == ip->i_height) ret = gfs2_hole_size(inode, lblock, len, mp, iomap); else - iomap->length = size - pos; + iomap->length = size - iomap->offset; } else if (flags & IOMAP_WRITE) { u64 alloc_size; diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index adafaaf7d24d..3e718cfc19a7 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c @@ -773,8 +773,8 @@ static inline bool should_fault_in_pages(ssize_t ret, struct iov_iter *i, size_t *prev_count, size_t *window_size) { - char __user *p = i->iov[0].iov_base + i->iov_offset; size_t count = iov_iter_count(i); + char __user *p; int pages = 1; if (likely(!count)) @@ -787,14 +787,14 @@ static inline bool should_fault_in_pages(ssize_t ret, struct iov_iter *i, if (*prev_count != count || !*window_size) { int pages, nr_dirtied; - pages = min_t(int, BIO_MAX_VECS, - DIV_ROUND_UP(iov_iter_count(i), PAGE_SIZE)); + pages = min_t(int, BIO_MAX_VECS, DIV_ROUND_UP(count, PAGE_SIZE)); nr_dirtied = max(current->nr_dirtied_pause - current->nr_dirtied, 1); pages = min(pages, nr_dirtied); } *prev_count = count; + p = i->iov[0].iov_base + i->iov_offset; *window_size = (size_t)PAGE_SIZE * pages - offset_in_page(p); return true; } @@ -1013,6 +1013,7 @@ static ssize_t gfs2_file_buffered_write(struct kiocb *iocb, struct gfs2_sbd *sdp = GFS2_SB(inode); struct gfs2_holder *statfs_gh = NULL; size_t prev_count = 0, window_size = 0; + size_t orig_count = iov_iter_count(from); size_t read = 0; ssize_t ret; @@ -1057,6 +1058,7 @@ retry_under_glock: if (inode == sdp->sd_rindex) gfs2_glock_dq_uninit(statfs_gh); + from->count = orig_count - read; if (should_fault_in_pages(ret, from, &prev_count, &window_size)) { size_t leftover; @@ -1064,6 +1066,7 @@ retry_under_glock: leftover = fault_in_iov_iter_readable(from, window_size); gfs2_holder_disallow_demote(gh); if (leftover != window_size) { + from->count = min(from->count, window_size - leftover); if (!gfs2_holder_queued(gh)) { if (read) goto out_uninit; diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 19f38aee1b61..44a7a4288956 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -411,14 +411,14 @@ static void do_error(struct gfs2_glock *gl, const int ret) static void demote_incompat_holders(struct gfs2_glock *gl, struct gfs2_holder *new_gh) { - struct gfs2_holder *gh; + struct gfs2_holder *gh, *tmp; /* * Demote incompatible holders before we make ourselves eligible. * (This holder may or may not allow auto-demoting, but we don't want * to demote the new holder before it's even granted.) */ - list_for_each_entry(gh, &gl->gl_holders, gh_list) { + list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { /* * Since holders are at the front of the list, we stop when we * find the first non-holder. @@ -496,7 +496,7 @@ again: * Since we unlock the lockref lock, we set a flag to indicate * instantiate is in progress. */ - if (test_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags)) { + if (test_and_set_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags)) { wait_on_bit(&gl->gl_flags, GLF_INSTANTIATE_IN_PROG, TASK_UNINTERRUPTIBLE); /* @@ -509,14 +509,10 @@ again: goto again; } - set_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags); - ret = glops->go_instantiate(gh); if (!ret) clear_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags); - clear_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags); - smp_mb__after_atomic(); - wake_up_bit(&gl->gl_flags, GLF_INSTANTIATE_IN_PROG); + clear_and_wake_up_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags); return ret; } @@ -1861,7 +1857,6 @@ void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs) void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state) { - struct gfs2_holder mock_gh = { .gh_gl = gl, .gh_state = state, }; unsigned long delay = 0; unsigned long holdtime; unsigned long now = jiffies; @@ -1894,8 +1889,13 @@ void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state) * keep the glock until the last strong holder is done with it. */ if (!find_first_strong_holder(gl)) { - if (state == LM_ST_UNLOCKED) - mock_gh.gh_state = LM_ST_EXCLUSIVE; + struct gfs2_holder mock_gh = { + .gh_gl = gl, + .gh_state = (state == LM_ST_UNLOCKED) ? + LM_ST_EXCLUSIVE : state, + .gh_iflags = BIT(HIF_HOLDER) + }; + demote_incompat_holders(gl, &mock_gh); } handle_callback(gl, state, delay, true); diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index 6424b903e885..89905f4f29bb 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c @@ -40,37 +40,6 @@ static const struct inode_operations gfs2_file_iops; static const struct inode_operations gfs2_dir_iops; static const struct inode_operations gfs2_symlink_iops; -static int iget_test(struct inode *inode, void *opaque) -{ - u64 no_addr = *(u64 *)opaque; - - return GFS2_I(inode)->i_no_addr == no_addr; -} - -static int iget_set(struct inode *inode, void *opaque) -{ - u64 no_addr = *(u64 *)opaque; - - GFS2_I(inode)->i_no_addr = no_addr; - inode->i_ino = no_addr; - return 0; -} - -static struct inode *gfs2_iget(struct super_block *sb, u64 no_addr) -{ - struct inode *inode; - -repeat: - inode = iget5_locked(sb, no_addr, iget_test, iget_set, &no_addr); - if (!inode) - return inode; - if (is_bad_inode(inode)) { - iput(inode); - goto repeat; - } - return inode; -} - /** * gfs2_set_iop - Sets inode operations * @inode: The inode with correct i_mode filled in @@ -104,6 +73,22 @@ static void gfs2_set_iop(struct inode *inode) } } +static int iget_test(struct inode *inode, void *opaque) +{ + u64 no_addr = *(u64 *)opaque; + + return GFS2_I(inode)->i_no_addr == no_addr; +} + +static int iget_set(struct inode *inode, void *opaque) +{ + u64 no_addr = *(u64 *)opaque; + + GFS2_I(inode)->i_no_addr = no_addr; + inode->i_ino = no_addr; + return 0; +} + /** * gfs2_inode_lookup - Lookup an inode * @sb: The super block @@ -132,12 +117,11 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type, { struct inode *inode; struct gfs2_inode *ip; - struct gfs2_glock *io_gl = NULL; struct gfs2_holder i_gh; int error; gfs2_holder_mark_uninitialized(&i_gh); - inode = gfs2_iget(sb, no_addr); + inode = iget5_locked(sb, no_addr, iget_test, iget_set, &no_addr); if (!inode) return ERR_PTR(-ENOMEM); @@ -145,22 +129,16 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type, if (inode->i_state & I_NEW) { struct gfs2_sbd *sdp = GFS2_SB(inode); + struct gfs2_glock *io_gl; error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl); if (unlikely(error)) goto fail; - flush_delayed_work(&ip->i_gl->gl_work); - - error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl); - if (unlikely(error)) - goto fail; - if (blktype != GFS2_BLKST_UNLINKED) - gfs2_cancel_delete_work(io_gl); if (type == DT_UNKNOWN || blktype != GFS2_BLKST_FREE) { /* * The GL_SKIP flag indicates to skip reading the inode - * block. We read the inode with gfs2_inode_refresh + * block. We read the inode when instantiating it * after possibly checking the block type. */ error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, @@ -181,24 +159,31 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type, } } - glock_set_object(ip->i_gl, ip); set_bit(GLF_INSTANTIATE_NEEDED, &ip->i_gl->gl_flags); - error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh); + + error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl); if (unlikely(error)) goto fail; - glock_set_object(ip->i_iopen_gh.gh_gl, ip); + if (blktype != GFS2_BLKST_UNLINKED) + gfs2_cancel_delete_work(io_gl); + error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh); gfs2_glock_put(io_gl); - io_gl = NULL; + if (unlikely(error)) + goto fail; /* Lowest possible timestamp; will be overwritten in gfs2_dinode_in. */ inode->i_atime.tv_sec = 1LL << (8 * sizeof(inode->i_atime.tv_sec) - 1); inode->i_atime.tv_nsec = 0; + glock_set_object(ip->i_gl, ip); + if (type == DT_UNKNOWN) { /* Inode glock must be locked already */ error = gfs2_instantiate(&i_gh); - if (error) + if (error) { + glock_clear_object(ip->i_gl, ip); goto fail; + } } else { ip->i_no_formal_ino = no_formal_ino; inode->i_mode = DT2IF(type); @@ -206,31 +191,23 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type, if (gfs2_holder_initialized(&i_gh)) gfs2_glock_dq_uninit(&i_gh); + glock_set_object(ip->i_iopen_gh.gh_gl, ip); gfs2_set_iop(inode); + unlock_new_inode(inode); } if (no_formal_ino && ip->i_no_formal_ino && no_formal_ino != ip->i_no_formal_ino) { - error = -ESTALE; - if (inode->i_state & I_NEW) - goto fail; iput(inode); - return ERR_PTR(error); + return ERR_PTR(-ESTALE); } - if (inode->i_state & I_NEW) - unlock_new_inode(inode); - return inode; fail: - if (gfs2_holder_initialized(&ip->i_iopen_gh)) { - glock_clear_object(ip->i_iopen_gh.gh_gl, ip); + if (gfs2_holder_initialized(&ip->i_iopen_gh)) gfs2_glock_dq_uninit(&ip->i_iopen_gh); - } - if (io_gl) - gfs2_glock_put(io_gl); if (gfs2_holder_initialized(&i_gh)) gfs2_glock_dq_uninit(&i_gh); iget_failed(inode); @@ -730,18 +707,19 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl); if (error) goto fail_free_inode; - flush_delayed_work(&ip->i_gl->gl_work); error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_iopen_glops, CREATE, &io_gl); if (error) goto fail_free_inode; gfs2_cancel_delete_work(io_gl); + error = insert_inode_locked4(inode, ip->i_no_addr, iget_test, &ip->i_no_addr); + BUG_ON(error); + error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1); if (error) goto fail_gunlock2; - glock_set_object(ip->i_gl, ip); error = gfs2_trans_begin(sdp, blocks, 0); if (error) goto fail_gunlock2; @@ -757,9 +735,9 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, if (error) goto fail_gunlock2; + glock_set_object(ip->i_gl, ip); glock_set_object(io_gl, ip); gfs2_set_iop(inode); - insert_inode_hash(inode); free_vfs_inode = 0; /* After this point, the inode is no longer considered free. Any failures need to undo @@ -801,17 +779,17 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, gfs2_glock_dq_uninit(ghs + 1); gfs2_glock_put(io_gl); gfs2_qa_put(dip); + unlock_new_inode(inode); return error; fail_gunlock3: + glock_clear_object(ip->i_gl, ip); glock_clear_object(io_gl, ip); gfs2_glock_dq_uninit(&ip->i_iopen_gh); fail_gunlock2: - glock_clear_object(io_gl, ip); gfs2_glock_put(io_gl); fail_free_inode: if (ip->i_gl) { - glock_clear_object(ip->i_gl, ip); if (free_vfs_inode) /* else evict will do the put for us */ gfs2_glock_put(ip->i_gl); } @@ -829,7 +807,10 @@ fail_gunlock: mark_inode_dirty(inode); set_bit(free_vfs_inode ? GIF_FREE_VFS_INODE : GIF_ALLOC_FAILED, &GFS2_I(inode)->i_flags); - iput(inode); + if (inode->i_state & I_NEW) + iget_failed(inode); + else + iput(inode); } if (gfs2_holder_initialized(ghs + 1)) gfs2_glock_dq_uninit(ghs + 1); diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index 5b121371508a..0f93e8beca4d 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c @@ -1402,13 +1402,6 @@ out: gfs2_ordered_del_inode(ip); clear_inode(inode); gfs2_dir_hash_inval(ip); - if (ip->i_gl) { - glock_clear_object(ip->i_gl, ip); - wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE); - gfs2_glock_add_to_lru(ip->i_gl); - gfs2_glock_put_eventually(ip->i_gl); - ip->i_gl = NULL; - } if (gfs2_holder_initialized(&ip->i_iopen_gh)) { struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl; @@ -1421,6 +1414,13 @@ out: gfs2_holder_uninit(&ip->i_iopen_gh); gfs2_glock_put_eventually(gl); } + if (ip->i_gl) { + glock_clear_object(ip->i_gl, ip); + wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE); + gfs2_glock_add_to_lru(ip->i_gl); + gfs2_glock_put_eventually(ip->i_gl); + ip->i_gl = NULL; + } } static struct inode *gfs2_alloc_inode(struct super_block *sb) diff --git a/fs/inode.c b/fs/inode.c index 3eba0940ffcf..6b80a51129d5 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -180,8 +180,6 @@ int inode_init_always(struct super_block *sb, struct inode *inode) mapping->a_ops = &empty_aops; mapping->host = inode; mapping->flags = 0; - if (sb->s_type->fs_flags & FS_THP_SUPPORT) - __set_bit(AS_THP_SUPPORT, &mapping->flags); mapping->wb_err = 0; atomic_set(&mapping->i_mmap_writable, 0); #ifdef CONFIG_READ_ONLY_THP_FOR_FS diff --git a/fs/io-wq.c b/fs/io-wq.c index 88202de519f6..8d2bb818a3bb 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -142,6 +142,7 @@ static bool io_acct_cancel_pending_work(struct io_wqe *wqe, struct io_wqe_acct *acct, struct io_cb_cancel_data *match); static void create_worker_cb(struct callback_head *cb); +static void io_wq_cancel_tw_create(struct io_wq *wq); static bool io_worker_get(struct io_worker *worker) { @@ -357,12 +358,22 @@ static bool io_queue_worker_create(struct io_worker *worker, test_and_set_bit_lock(0, &worker->create_state)) goto fail_release; + atomic_inc(&wq->worker_refs); init_task_work(&worker->create_work, func); worker->create_index = acct->index; if (!task_work_add(wq->task, &worker->create_work, TWA_SIGNAL)) { - clear_bit_unlock(0, &worker->create_state); + /* + * EXIT may have been set after checking it above, check after + * adding the task_work and remove any creation item if it is + * now set. wq exit does that too, but we can have added this + * work item after we canceled in io_wq_exit_workers(). + */ + if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) + io_wq_cancel_tw_create(wq); + io_worker_ref_put(wq); return true; } + io_worker_ref_put(wq); clear_bit_unlock(0, &worker->create_state); fail_release: io_worker_release(worker); @@ -714,6 +725,13 @@ static bool io_wq_work_match_all(struct io_wq_work *work, void *data) static inline bool io_should_retry_thread(long err) { + /* + * Prevent perpetual task_work retry, if the task (or its group) is + * exiting. + */ + if (fatal_signal_pending(current)) + return false; + switch (err) { case -EAGAIN: case -ERESTARTSYS: @@ -1191,13 +1209,9 @@ void io_wq_exit_start(struct io_wq *wq) set_bit(IO_WQ_BIT_EXIT, &wq->state); } -static void io_wq_exit_workers(struct io_wq *wq) +static void io_wq_cancel_tw_create(struct io_wq *wq) { struct callback_head *cb; - int node; - - if (!wq->task) - return; while ((cb = task_work_cancel_match(wq->task, io_task_work_match, wq)) != NULL) { struct io_worker *worker; @@ -1205,6 +1219,16 @@ static void io_wq_exit_workers(struct io_wq *wq) worker = container_of(cb, struct io_worker, create_work); io_worker_cancel_cb(worker); } +} + +static void io_wq_exit_workers(struct io_wq *wq) +{ + int node; + + if (!wq->task) + return; + + io_wq_cancel_tw_create(wq); rcu_read_lock(); for_each_node(node) { diff --git a/fs/io_uring.c b/fs/io_uring.c index b07196b4511c..d5ab0e9a3f29 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1278,6 +1278,7 @@ static void io_refs_resurrect(struct percpu_ref *ref, struct completion *compl) static bool io_match_task(struct io_kiocb *head, struct task_struct *task, bool cancel_all) + __must_hold(&req->ctx->timeout_lock) { struct io_kiocb *req; @@ -1293,6 +1294,44 @@ static bool io_match_task(struct io_kiocb *head, struct task_struct *task, return false; } +static bool io_match_linked(struct io_kiocb *head) +{ + struct io_kiocb *req; + + io_for_each_link(req, head) { + if (req->flags & REQ_F_INFLIGHT) + return true; + } + return false; +} + +/* + * As io_match_task() but protected against racing with linked timeouts. + * User must not hold timeout_lock. + */ +static bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task, + bool cancel_all) +{ + bool matched; + + if (task && head->task != task) + return false; + if (cancel_all) + return true; + + if (head->flags & REQ_F_LINK_TIMEOUT) { + struct io_ring_ctx *ctx = head->ctx; + + /* protect against races with linked timeouts */ + spin_lock_irq(&ctx->timeout_lock); + matched = io_match_linked(head); + spin_unlock_irq(&ctx->timeout_lock); + } else { + matched = io_match_linked(head); + } + return matched; +} + static inline bool req_has_async_data(struct io_kiocb *req) { return req->flags & REQ_F_ASYNC_DATA; @@ -1502,10 +1541,10 @@ static void io_prep_async_link(struct io_kiocb *req) if (req->flags & REQ_F_LINK_TIMEOUT) { struct io_ring_ctx *ctx = req->ctx; - spin_lock(&ctx->completion_lock); + spin_lock_irq(&ctx->timeout_lock); io_for_each_link(cur, req) io_prep_async_work(cur); - spin_unlock(&ctx->completion_lock); + spin_unlock_irq(&ctx->timeout_lock); } else { io_for_each_link(cur, req) io_prep_async_work(cur); @@ -4327,6 +4366,7 @@ static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf, kfree(nxt); if (++i == nbufs) return i; + cond_resched(); } i++; kfree(buf); @@ -5704,7 +5744,7 @@ static __cold bool io_poll_remove_all(struct io_ring_ctx *ctx, list = &ctx->cancel_hash[i]; hlist_for_each_entry_safe(req, tmp, list, hash_node) { - if (io_match_task(req, tsk, cancel_all)) + if (io_match_task_safe(req, tsk, cancel_all)) posted += io_poll_remove_one(req); } } @@ -6156,6 +6196,9 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr))) return -EFAULT; + if (data->ts.tv_sec < 0 || data->ts.tv_nsec < 0) + return -EINVAL; + data->mode = io_translate_timeout_mode(flags); hrtimer_init(&data->timer, io_timeout_get_clock(data), data->mode); @@ -6880,10 +6923,11 @@ static inline struct file *io_file_get(struct io_ring_ctx *ctx, static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked) { struct io_kiocb *prev = req->timeout.prev; - int ret; + int ret = -ENOENT; if (prev) { - ret = io_try_cancel_userdata(req, prev->user_data); + if (!(req->task->flags & PF_EXITING)) + ret = io_try_cancel_userdata(req, prev->user_data); io_req_complete_post(req, ret ?: -ETIME, 0); io_put_req(prev); } else { @@ -9255,10 +9299,8 @@ static void io_destroy_buffers(struct io_ring_ctx *ctx) struct io_buffer *buf; unsigned long index; - xa_for_each(&ctx->io_buffers, index, buf) { + xa_for_each(&ctx->io_buffers, index, buf) __io_remove_buffers(ctx, buf, index, -1U); - cond_resched(); - } } static void io_req_caches_free(struct io_ring_ctx *ctx) @@ -9562,19 +9604,8 @@ static bool io_cancel_task_cb(struct io_wq_work *work, void *data) { struct io_kiocb *req = container_of(work, struct io_kiocb, work); struct io_task_cancel *cancel = data; - bool ret; - - if (!cancel->all && (req->flags & REQ_F_LINK_TIMEOUT)) { - struct io_ring_ctx *ctx = req->ctx; - /* protect against races with linked timeouts */ - spin_lock(&ctx->completion_lock); - ret = io_match_task(req, cancel->task, cancel->all); - spin_unlock(&ctx->completion_lock); - } else { - ret = io_match_task(req, cancel->task, cancel->all); - } - return ret; + return io_match_task_safe(req, cancel->task, cancel->all); } static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx, @@ -9586,7 +9617,7 @@ static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx, spin_lock(&ctx->completion_lock); list_for_each_entry_reverse(de, &ctx->defer_list, list) { - if (io_match_task(de->req, task, cancel_all)) { + if (io_match_task_safe(de->req, task, cancel_all)) { list_cut_position(&list, &ctx->defer_list, &de->list); break; } @@ -9764,7 +9795,7 @@ static __cold void io_uring_clean_tctx(struct io_uring_task *tctx) } if (wq) { /* - * Must be after io_uring_del_task_file() (removes nodes under + * Must be after io_uring_del_tctx_node() (removes nodes under * uring_lock) to avoid race with io_uring_try_cancel_iowq(). */ io_wq_put_and_exit(wq); @@ -9793,7 +9824,7 @@ static __cold void io_uring_drop_tctx_refs(struct task_struct *task) /* * Find any io_uring ctx that this task has registered or done IO on, and cancel - * requests. @sqd should be not-null IIF it's an SQPOLL thread cancellation. + * requests. @sqd should be not-null IFF it's an SQPOLL thread cancellation. */ static __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd) @@ -9835,8 +9866,10 @@ static __cold void io_uring_cancel_generic(bool cancel_all, cancel_all); } - prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE); + prepare_to_wait(&tctx->wait, &wait, TASK_INTERRUPTIBLE); + io_run_task_work(); io_uring_drop_tctx_refs(current); + /* * If we've seen completions, retry without waiting. This * avoids a race where a completion comes in before we did diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index 1753c26c8e76..71a36ae120ee 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -205,7 +205,16 @@ struct iomap_readpage_ctx { struct readahead_control *rac; }; -static loff_t iomap_read_inline_data(const struct iomap_iter *iter, +/** + * iomap_read_inline_data - copy inline data into the page cache + * @iter: iteration structure + * @page: page to copy to + * + * Copy the inline data in @iter into @page and zero out the rest of the page. + * Only a single IOMAP_INLINE extent is allowed at the end of each file. + * Returns zero for success to complete the read, or the usual negative errno. + */ +static int iomap_read_inline_data(const struct iomap_iter *iter, struct page *page) { const struct iomap *iomap = iomap_iter_srcmap(iter); @@ -214,7 +223,7 @@ static loff_t iomap_read_inline_data(const struct iomap_iter *iter, void *addr; if (PageUptodate(page)) - return PAGE_SIZE - poff; + return 0; if (WARN_ON_ONCE(size > PAGE_SIZE - poff)) return -EIO; @@ -231,7 +240,7 @@ static loff_t iomap_read_inline_data(const struct iomap_iter *iter, memset(addr + size, 0, PAGE_SIZE - poff - size); kunmap_local(addr); iomap_set_range_uptodate(page, poff, PAGE_SIZE - poff); - return PAGE_SIZE - poff; + return 0; } static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter, @@ -257,7 +266,7 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter, sector_t sector; if (iomap->type == IOMAP_INLINE) - return min(iomap_read_inline_data(iter, page), length); + return iomap_read_inline_data(iter, page); /* zero post-eof blocks as the page may be mapped */ iop = iomap_page_create(iter->inode, page); @@ -370,6 +379,8 @@ static loff_t iomap_readahead_iter(const struct iomap_iter *iter, ctx->cur_page_in_bio = false; } ret = iomap_readpage_iter(iter, ctx, done); + if (ret <= 0) + return ret; } return done; @@ -580,15 +591,10 @@ static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos, static int iomap_write_begin_inline(const struct iomap_iter *iter, struct page *page) { - int ret; - /* needs more work for the tailpacking case; disable for now */ if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0)) return -EIO; - ret = iomap_read_inline_data(iter, page); - if (ret < 0) - return ret; - return 0; + return iomap_read_inline_data(iter, page); } static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos, diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c index 121f8e8c70ac..49c9da37315c 100644 --- a/fs/ksmbd/smb2pdu.c +++ b/fs/ksmbd/smb2pdu.c @@ -1697,8 +1697,10 @@ int smb2_sess_setup(struct ksmbd_work *work) negblob_off = le16_to_cpu(req->SecurityBufferOffset); negblob_len = le16_to_cpu(req->SecurityBufferLength); if (negblob_off < offsetof(struct smb2_sess_setup_req, Buffer) || - negblob_len < offsetof(struct negotiate_message, NegotiateFlags)) - return -EINVAL; + negblob_len < offsetof(struct negotiate_message, NegotiateFlags)) { + rc = -EINVAL; + goto out_err; + } negblob = (struct negotiate_message *)((char *)&req->hdr.ProtocolId + negblob_off); @@ -4457,6 +4459,12 @@ static void get_file_stream_info(struct ksmbd_work *work, &stat); file_info = (struct smb2_file_stream_info *)rsp->Buffer; + buf_free_len = + smb2_calc_max_out_buf_len(work, 8, + le32_to_cpu(req->OutputBufferLength)); + if (buf_free_len < 0) + goto out; + xattr_list_len = ksmbd_vfs_listxattr(path->dentry, &xattr_list); if (xattr_list_len < 0) { goto out; @@ -4465,12 +4473,6 @@ static void get_file_stream_info(struct ksmbd_work *work, goto out; } - buf_free_len = - smb2_calc_max_out_buf_len(work, 8, - le32_to_cpu(req->OutputBufferLength)); - if (buf_free_len < 0) - goto out; - while (idx < xattr_list_len) { stream_name = xattr_list + idx; streamlen = strlen(stream_name); @@ -4496,8 +4498,10 @@ static void get_file_stream_info(struct ksmbd_work *work, ":%s", &stream_name[XATTR_NAME_STREAM_LEN]); next = sizeof(struct smb2_file_stream_info) + streamlen * 2; - if (next > buf_free_len) + if (next > buf_free_len) { + kfree(stream_buf); break; + } file_info = (struct smb2_file_stream_info *)&rsp->Buffer[nbytes]; streamlen = smbConvertToUTF16((__le16 *)file_info->StreamName, @@ -4514,6 +4518,7 @@ static void get_file_stream_info(struct ksmbd_work *work, file_info->NextEntryOffset = cpu_to_le32(next); } +out: if (!S_ISDIR(stat.mode) && buf_free_len >= sizeof(struct smb2_file_stream_info) + 7 * 2) { file_info = (struct smb2_file_stream_info *) @@ -4522,14 +4527,13 @@ static void get_file_stream_info(struct ksmbd_work *work, "::$DATA", 7, conn->local_nls, 0); streamlen *= 2; file_info->StreamNameLength = cpu_to_le32(streamlen); - file_info->StreamSize = 0; - file_info->StreamAllocationSize = 0; + file_info->StreamSize = cpu_to_le64(stat.size); + file_info->StreamAllocationSize = cpu_to_le64(stat.blocks << 9); nbytes += sizeof(struct smb2_file_stream_info) + streamlen; } /* last entry offset should be 0 */ file_info->NextEntryOffset = 0; -out: kvfree(xattr_list); rsp->OutputBufferLength = cpu_to_le32(nbytes); @@ -5068,7 +5072,7 @@ static int smb2_get_info_sec(struct ksmbd_work *work, if (addition_info & ~(OWNER_SECINFO | GROUP_SECINFO | DACL_SECINFO | PROTECTED_DACL_SECINFO | UNPROTECTED_DACL_SECINFO)) { - pr_err("Unsupported addition info: 0x%x)\n", + ksmbd_debug(SMB, "Unsupported addition info: 0x%x)\n", addition_info); pntsd->revision = cpu_to_le16(1); diff --git a/fs/netfs/read_helper.c b/fs/netfs/read_helper.c index 9320a42dfaf9..75c76cbb27cc 100644 --- a/fs/netfs/read_helper.c +++ b/fs/netfs/read_helper.c @@ -354,16 +354,11 @@ static void netfs_rreq_write_to_cache_work(struct work_struct *work) netfs_rreq_do_write_to_cache(rreq); } -static void netfs_rreq_write_to_cache(struct netfs_read_request *rreq, - bool was_async) +static void netfs_rreq_write_to_cache(struct netfs_read_request *rreq) { - if (was_async) { - rreq->work.func = netfs_rreq_write_to_cache_work; - if (!queue_work(system_unbound_wq, &rreq->work)) - BUG(); - } else { - netfs_rreq_do_write_to_cache(rreq); - } + rreq->work.func = netfs_rreq_write_to_cache_work; + if (!queue_work(system_unbound_wq, &rreq->work)) + BUG(); } /* @@ -558,7 +553,7 @@ again: wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS); if (test_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags)) - return netfs_rreq_write_to_cache(rreq, was_async); + return netfs_rreq_write_to_cache(rreq); netfs_rreq_completed(rreq, was_async); } @@ -960,7 +955,7 @@ int netfs_readpage(struct file *file, rreq = netfs_alloc_read_request(ops, netfs_priv, file); if (!rreq) { if (netfs_priv) - ops->cleanup(netfs_priv, folio_file_mapping(folio)); + ops->cleanup(folio_file_mapping(folio), netfs_priv); folio_unlock(folio); return -ENOMEM; } @@ -1008,8 +1003,8 @@ out: } EXPORT_SYMBOL(netfs_readpage); -/** - * netfs_skip_folio_read - prep a folio for writing without reading first +/* + * Prepare a folio for writing without reading first * @folio: The folio being prepared * @pos: starting position for the write * @len: length of write @@ -1191,7 +1186,7 @@ have_folio: goto error; have_folio_no_wait: if (netfs_priv) - ops->cleanup(netfs_priv, mapping); + ops->cleanup(mapping, netfs_priv); *_folio = folio; _leave(" = 0"); return 0; @@ -1202,7 +1197,7 @@ error: folio_unlock(folio); folio_put(folio); if (netfs_priv) - ops->cleanup(netfs_priv, mapping); + ops->cleanup(mapping, netfs_priv); _leave(" = %d", ret); return ret; } diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index dd53704c3f40..fda530d5e764 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -219,6 +219,7 @@ void nfs_set_cache_invalid(struct inode *inode, unsigned long flags) NFS_INO_DATA_INVAL_DEFER); else if (nfsi->cache_validity & NFS_INO_INVALID_DATA) nfsi->cache_validity &= ~NFS_INO_DATA_INVAL_DEFER; + trace_nfs_set_cache_invalid(inode, 0); } EXPORT_SYMBOL_GPL(nfs_set_cache_invalid); diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c index 08355b66e7cb..8b21ff1be717 100644 --- a/fs/nfs/nfs42proc.c +++ b/fs/nfs/nfs42proc.c @@ -289,7 +289,9 @@ static void nfs42_copy_dest_done(struct inode *inode, loff_t pos, loff_t len) loff_t newsize = pos + len; loff_t end = newsize - 1; - truncate_pagecache_range(inode, pos, end); + WARN_ON_ONCE(invalidate_inode_pages2_range(inode->i_mapping, + pos >> PAGE_SHIFT, end >> PAGE_SHIFT)); + spin_lock(&inode->i_lock); if (newsize > i_size_read(inode)) i_size_write(inode, newsize); diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c index c8bad735e4c1..271e5f92ed01 100644 --- a/fs/nfs/nfs42xdr.c +++ b/fs/nfs/nfs42xdr.c @@ -1434,8 +1434,7 @@ static int nfs4_xdr_dec_clone(struct rpc_rqst *rqstp, status = decode_clone(xdr); if (status) goto out; - status = decode_getfattr(xdr, res->dst_fattr, res->server); - + decode_getfattr(xdr, res->dst_fattr, res->server); out: res->rpc_status = status; return status; diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index ecc4594299d6..f63dfa01001c 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1998,6 +1998,10 @@ static int nfs4_handle_reclaim_lease_error(struct nfs_client *clp, int status) dprintk("%s: exit with error %d for server %s\n", __func__, -EPROTONOSUPPORT, clp->cl_hostname); return -EPROTONOSUPPORT; + case -ENOSPC: + if (clp->cl_cons_state == NFS_CS_SESSION_INITING) + nfs_mark_client_ready(clp, -EIO); + return -EIO; case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery * in nfs4_exchange_id */ default: diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h index 21dac847f1e4..b3aee261801e 100644 --- a/fs/nfs/nfstrace.h +++ b/fs/nfs/nfstrace.h @@ -162,6 +162,7 @@ DEFINE_NFS_INODE_EVENT_DONE(nfs_writeback_inode_exit); DEFINE_NFS_INODE_EVENT(nfs_fsync_enter); DEFINE_NFS_INODE_EVENT_DONE(nfs_fsync_exit); DEFINE_NFS_INODE_EVENT(nfs_access_enter); +DEFINE_NFS_INODE_EVENT_DONE(nfs_set_cache_invalid); TRACE_EVENT(nfs_access_exit, TP_PROTO( diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c index 6fedc49726bf..c634483d85d2 100644 --- a/fs/nfsd/nfs4recover.c +++ b/fs/nfsd/nfs4recover.c @@ -2156,6 +2156,7 @@ static struct notifier_block nfsd4_cld_block = { int register_cld_notifier(void) { + WARN_ON(!nfsd_net_id); return rpc_pipefs_notifier_register(&nfsd4_cld_block); } diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index bfad94c70b84..1956d377d1a6 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -1207,6 +1207,11 @@ hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp) return 0; } +static bool delegation_hashed(struct nfs4_delegation *dp) +{ + return !(list_empty(&dp->dl_perfile)); +} + static bool unhash_delegation_locked(struct nfs4_delegation *dp) { @@ -1214,7 +1219,7 @@ unhash_delegation_locked(struct nfs4_delegation *dp) lockdep_assert_held(&state_lock); - if (list_empty(&dp->dl_perfile)) + if (!delegation_hashed(dp)) return false; dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID; @@ -4598,7 +4603,7 @@ static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb) * queued for a lease break. Don't queue it again. */ spin_lock(&state_lock); - if (dp->dl_time == 0) { + if (delegation_hashed(dp) && dp->dl_time == 0) { dp->dl_time = ktime_get_boottime_seconds(); list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru); } diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index b2a1d969a172..5a93a5db4fb0 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -288,11 +288,8 @@ nfsd4_decode_bitmap4(struct nfsd4_compoundargs *argp, u32 *bmval, u32 bmlen) p = xdr_inline_decode(argp->xdr, count << 2); if (!p) return nfserr_bad_xdr; - i = 0; - while (i < count) - bmval[i++] = be32_to_cpup(p++); - while (i < bmlen) - bmval[i++] = 0; + for (i = 0; i < bmlen; i++) + bmval[i] = (i < count) ? be32_to_cpup(p++) : 0; return nfs_ok; } diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index af8531c3854a..51a49e0cfe37 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c @@ -1521,12 +1521,9 @@ static int __init init_nfsd(void) int retval; printk(KERN_INFO "Installing knfsd (copyright (C) 1996 okir@monad.swb.de).\n"); - retval = register_cld_notifier(); - if (retval) - return retval; retval = nfsd4_init_slabs(); if (retval) - goto out_unregister_notifier; + return retval; retval = nfsd4_init_pnfs(); if (retval) goto out_free_slabs; @@ -1545,9 +1542,14 @@ static int __init init_nfsd(void) goto out_free_exports; retval = register_pernet_subsys(&nfsd_net_ops); if (retval < 0) + goto out_free_filesystem; + retval = register_cld_notifier(); + if (retval) goto out_free_all; return 0; out_free_all: + unregister_pernet_subsys(&nfsd_net_ops); +out_free_filesystem: unregister_filesystem(&nfsd_fs_type); out_free_exports: remove_proc_entry("fs/nfs/exports", NULL); @@ -1561,13 +1563,12 @@ out_free_pnfs: nfsd4_exit_pnfs(); out_free_slabs: nfsd4_free_slabs(); -out_unregister_notifier: - unregister_cld_notifier(); return retval; } static void __exit exit_nfsd(void) { + unregister_cld_notifier(); unregister_pernet_subsys(&nfsd_net_ops); nfsd_drc_slab_free(); remove_proc_entry("fs/nfs/exports", NULL); @@ -1577,7 +1578,6 @@ static void __exit exit_nfsd(void) nfsd4_free_slabs(); nfsd4_exit_pnfs(); unregister_filesystem(&nfsd_fs_type); - unregister_cld_notifier(); } MODULE_AUTHOR("Olaf Kirch <okir@monad.swb.de>"); diff --git a/fs/ntfs/Kconfig b/fs/ntfs/Kconfig index 1667a7e590d8..f93e69a61283 100644 --- a/fs/ntfs/Kconfig +++ b/fs/ntfs/Kconfig @@ -52,6 +52,7 @@ config NTFS_DEBUG config NTFS_RW bool "NTFS write support" depends on NTFS_FS + depends on PAGE_SIZE_LESS_THAN_64KB help This enables the partial, but safe, write support in the NTFS driver. diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c index 30a3b66f475a..509f85148fee 100644 --- a/fs/proc/vmcore.c +++ b/fs/proc/vmcore.c @@ -154,9 +154,13 @@ ssize_t read_from_oldmem(char *buf, size_t count, nr_bytes = count; /* If pfn is not ram, return zeros for sparse dump files */ - if (!pfn_is_ram(pfn)) - memset(buf, 0, nr_bytes); - else { + if (!pfn_is_ram(pfn)) { + tmp = 0; + if (!userbuf) + memset(buf, 0, nr_bytes); + else if (clear_user(buf, nr_bytes)) + tmp = -EFAULT; + } else { if (encrypted) tmp = copy_oldmem_page_encrypted(pfn, buf, nr_bytes, @@ -165,12 +169,12 @@ ssize_t read_from_oldmem(char *buf, size_t count, else tmp = copy_oldmem_page(pfn, buf, nr_bytes, offset, userbuf); - - if (tmp < 0) { - up_read(&vmcore_cb_rwsem); - return tmp; - } } + if (tmp < 0) { + up_read(&vmcore_cb_rwsem); + return tmp; + } + *ppos += nr_bytes; count -= nr_bytes; buf += nr_bytes; diff --git a/fs/pstore/Kconfig b/fs/pstore/Kconfig index 328da35da390..8adabde685f1 100644 --- a/fs/pstore/Kconfig +++ b/fs/pstore/Kconfig @@ -173,7 +173,6 @@ config PSTORE_BLK tristate "Log panic/oops to a block device" depends on PSTORE depends on BLOCK - depends on BROKEN select PSTORE_ZONE default n help diff --git a/fs/pstore/blk.c b/fs/pstore/blk.c index 5d1fbaffd66a..4ae0cfcd15f2 100644 --- a/fs/pstore/blk.c +++ b/fs/pstore/blk.c @@ -309,7 +309,7 @@ static int __init __best_effort_init(void) if (ret) kfree(best_effort_dev); else - pr_info("attached %s (%zu) (no dedicated panic_write!)\n", + pr_info("attached %s (%lu) (no dedicated panic_write!)\n", blkdev, best_effort_dev->zone.total_size); return ret; diff --git a/fs/signalfd.c b/fs/signalfd.c index 040e1cf90528..65ce0e72e7b9 100644 --- a/fs/signalfd.c +++ b/fs/signalfd.c @@ -35,17 +35,7 @@ void signalfd_cleanup(struct sighand_struct *sighand) { - wait_queue_head_t *wqh = &sighand->signalfd_wqh; - /* - * The lockless check can race with remove_wait_queue() in progress, - * but in this case its caller should run under rcu_read_lock() and - * sighand_cachep is SLAB_TYPESAFE_BY_RCU, we can safely return. - */ - if (likely(!waitqueue_active(wqh))) - return; - - /* wait_queue_entry_t->func(POLLFREE) should do remove_wait_queue() */ - wake_up_poll(wqh, EPOLLHUP | POLLFREE); + wake_up_pollfree(&sighand->signalfd_wqh); } struct signalfd_ctx { diff --git a/fs/smbfs_common/cifs_arc4.c b/fs/smbfs_common/cifs_arc4.c index 85ba15a60b13..043e4cb839fa 100644 --- a/fs/smbfs_common/cifs_arc4.c +++ b/fs/smbfs_common/cifs_arc4.c @@ -72,16 +72,3 @@ void cifs_arc4_crypt(struct arc4_ctx *ctx, u8 *out, const u8 *in, unsigned int l ctx->y = y; } EXPORT_SYMBOL_GPL(cifs_arc4_crypt); - -static int __init -init_smbfs_common(void) -{ - return 0; -} -static void __init -exit_smbfs_common(void) -{ -} - -module_init(init_smbfs_common) -module_exit(exit_smbfs_common) diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c index 925a621b432e..3616839c5c4b 100644 --- a/fs/tracefs/inode.c +++ b/fs/tracefs/inode.c @@ -161,6 +161,77 @@ struct tracefs_fs_info { struct tracefs_mount_opts mount_opts; }; +static void change_gid(struct dentry *dentry, kgid_t gid) +{ + if (!dentry->d_inode) + return; + dentry->d_inode->i_gid = gid; +} + +/* + * Taken from d_walk, but without he need for handling renames. + * Nothing can be renamed while walking the list, as tracefs + * does not support renames. This is only called when mounting + * or remounting the file system, to set all the files to + * the given gid. + */ +static void set_gid(struct dentry *parent, kgid_t gid) +{ + struct dentry *this_parent; + struct list_head *next; + + this_parent = parent; + spin_lock(&this_parent->d_lock); + + change_gid(this_parent, gid); +repeat: + next = this_parent->d_subdirs.next; +resume: + while (next != &this_parent->d_subdirs) { + struct list_head *tmp = next; + struct dentry *dentry = list_entry(tmp, struct dentry, d_child); + next = tmp->next; + + spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); + + change_gid(dentry, gid); + + if (!list_empty(&dentry->d_subdirs)) { + spin_unlock(&this_parent->d_lock); + spin_release(&dentry->d_lock.dep_map, _RET_IP_); + this_parent = dentry; + spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_); + goto repeat; + } + spin_unlock(&dentry->d_lock); + } + /* + * All done at this level ... ascend and resume the search. + */ + rcu_read_lock(); +ascend: + if (this_parent != parent) { + struct dentry *child = this_parent; + this_parent = child->d_parent; + + spin_unlock(&child->d_lock); + spin_lock(&this_parent->d_lock); + + /* go into the first sibling still alive */ + do { + next = child->d_child.next; + if (next == &this_parent->d_subdirs) + goto ascend; + child = list_entry(next, struct dentry, d_child); + } while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED)); + rcu_read_unlock(); + goto resume; + } + rcu_read_unlock(); + spin_unlock(&this_parent->d_lock); + return; +} + static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts) { substring_t args[MAX_OPT_ARGS]; @@ -193,6 +264,7 @@ static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts) if (!gid_valid(gid)) return -EINVAL; opts->gid = gid; + set_gid(tracefs_mount->mnt_root, gid); break; case Opt_mode: if (match_octal(&args[0], &option)) @@ -414,6 +486,8 @@ struct dentry *tracefs_create_file(const char *name, umode_t mode, inode->i_mode = mode; inode->i_fop = fops ? fops : &tracefs_file_operations; inode->i_private = data; + inode->i_uid = d_inode(dentry->d_parent)->i_uid; + inode->i_gid = d_inode(dentry->d_parent)->i_gid; d_instantiate(dentry, inode); fsnotify_create(dentry->d_parent->d_inode, dentry); return end_creating(dentry); @@ -436,6 +510,8 @@ static struct dentry *__create_dir(const char *name, struct dentry *parent, inode->i_mode = S_IFDIR | S_IRWXU | S_IRUSR| S_IRGRP | S_IXUSR | S_IXGRP; inode->i_op = ops; inode->i_fop = &simple_dir_operations; + inode->i_uid = d_inode(dentry->d_parent)->i_uid; + inode->i_gid = d_inode(dentry->d_parent)->i_gid; /* directory inodes start off with i_nlink == 2 (for "." entry) */ inc_nlink(inode); diff --git a/fs/udf/dir.c b/fs/udf/dir.c index 70abdfad2df1..42e3e551fa4c 100644 --- a/fs/udf/dir.c +++ b/fs/udf/dir.c @@ -31,6 +31,7 @@ #include <linux/mm.h> #include <linux/slab.h> #include <linux/bio.h> +#include <linux/iversion.h> #include "udf_i.h" #include "udf_sb.h" @@ -43,7 +44,7 @@ static int udf_readdir(struct file *file, struct dir_context *ctx) struct fileIdentDesc *fi = NULL; struct fileIdentDesc cfi; udf_pblk_t block, iblock; - loff_t nf_pos; + loff_t nf_pos, emit_pos = 0; int flen; unsigned char *fname = NULL, *copy_name = NULL; unsigned char *nameptr; @@ -57,6 +58,7 @@ static int udf_readdir(struct file *file, struct dir_context *ctx) int i, num, ret = 0; struct extent_position epos = { NULL, 0, {0, 0} }; struct super_block *sb = dir->i_sb; + bool pos_valid = false; if (ctx->pos == 0) { if (!dir_emit_dot(file, ctx)) @@ -67,6 +69,21 @@ static int udf_readdir(struct file *file, struct dir_context *ctx) if (nf_pos >= size) goto out; + /* + * Something changed since last readdir (either lseek was called or dir + * changed)? We need to verify the position correctly points at the + * beginning of some dir entry so that the directory parsing code does + * not get confused. Since UDF does not have any reliable way of + * identifying beginning of dir entry (names are under user control), + * we need to scan the directory from the beginning. + */ + if (!inode_eq_iversion(dir, file->f_version)) { + emit_pos = nf_pos; + nf_pos = 0; + } else { + pos_valid = true; + } + fname = kmalloc(UDF_NAME_LEN, GFP_NOFS); if (!fname) { ret = -ENOMEM; @@ -122,13 +139,21 @@ static int udf_readdir(struct file *file, struct dir_context *ctx) while (nf_pos < size) { struct kernel_lb_addr tloc; + loff_t cur_pos = nf_pos; - ctx->pos = (nf_pos >> 2) + 1; + /* Update file position only if we got past the current one */ + if (nf_pos >= emit_pos) { + ctx->pos = (nf_pos >> 2) + 1; + pos_valid = true; + } fi = udf_fileident_read(dir, &nf_pos, &fibh, &cfi, &epos, &eloc, &elen, &offset); if (!fi) goto out; + /* Still not at offset where user asked us to read from? */ + if (cur_pos < emit_pos) + continue; liu = le16_to_cpu(cfi.lengthOfImpUse); lfi = cfi.lengthFileIdent; @@ -186,8 +211,11 @@ static int udf_readdir(struct file *file, struct dir_context *ctx) } /* end while */ ctx->pos = (nf_pos >> 2) + 1; + pos_valid = true; out: + if (pos_valid) + file->f_version = inode_query_iversion(dir); if (fibh.sbh != fibh.ebh) brelse(fibh.ebh); brelse(fibh.sbh); diff --git a/fs/udf/namei.c b/fs/udf/namei.c index caeef08efed2..0ed4861b038f 100644 --- a/fs/udf/namei.c +++ b/fs/udf/namei.c @@ -30,6 +30,7 @@ #include <linux/sched.h> #include <linux/crc-itu-t.h> #include <linux/exportfs.h> +#include <linux/iversion.h> static inline int udf_match(int len1, const unsigned char *name1, int len2, const unsigned char *name2) @@ -134,6 +135,8 @@ int udf_write_fi(struct inode *inode, struct fileIdentDesc *cfi, mark_buffer_dirty_inode(fibh->ebh, inode); mark_buffer_dirty_inode(fibh->sbh, inode); } + inode_inc_iversion(inode); + return 0; } diff --git a/fs/udf/super.c b/fs/udf/super.c index 34247fba6df9..f26b5e0b84b6 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -57,6 +57,7 @@ #include <linux/crc-itu-t.h> #include <linux/log2.h> #include <asm/byteorder.h> +#include <linux/iversion.h> #include "udf_sb.h" #include "udf_i.h" @@ -149,6 +150,7 @@ static struct inode *udf_alloc_inode(struct super_block *sb) init_rwsem(&ei->i_data_sem); ei->cached_extent.lstart = -1; spin_lock_init(&ei->i_extent_cache_lock); + inode_set_iversion(&ei->vfs_inode, 1); return &ei->vfs_inode; } diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c index fbc9d816882c..23523b802539 100644 --- a/fs/xfs/libxfs/xfs_attr.c +++ b/fs/xfs/libxfs/xfs_attr.c @@ -1077,21 +1077,18 @@ xfs_attr_node_hasname( state = xfs_da_state_alloc(args); if (statep != NULL) - *statep = NULL; + *statep = state; /* * Search to see if name exists, and get back a pointer to it. */ error = xfs_da3_node_lookup_int(state, &retval); - if (error) { - xfs_da_state_free(state); - return error; - } + if (error) + retval = error; - if (statep != NULL) - *statep = state; - else + if (!statep) xfs_da_state_free(state); + return retval; } @@ -1112,7 +1109,7 @@ xfs_attr_node_addname_find_attr( */ retval = xfs_attr_node_hasname(args, &dac->da_state); if (retval != -ENOATTR && retval != -EEXIST) - return retval; + goto error; if (retval == -ENOATTR && (args->attr_flags & XATTR_REPLACE)) goto error; @@ -1337,7 +1334,7 @@ int xfs_attr_node_removename_setup( error = xfs_attr_node_hasname(args, state); if (error != -EEXIST) - return error; + goto out; error = 0; ASSERT((*state)->path.blk[(*state)->path.active - 1].bp != NULL); diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c index e1472004170e..da4af2142a2b 100644 --- a/fs/xfs/xfs_icache.c +++ b/fs/xfs/xfs_icache.c @@ -289,22 +289,6 @@ xfs_perag_clear_inode_tag( trace_xfs_perag_clear_inode_tag(mp, pag->pag_agno, tag, _RET_IP_); } -static inline void -xfs_inew_wait( - struct xfs_inode *ip) -{ - wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_INEW_BIT); - DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_INEW_BIT); - - do { - prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE); - if (!xfs_iflags_test(ip, XFS_INEW)) - break; - schedule(); - } while (true); - finish_wait(wq, &wait.wq_entry); -} - /* * When we recycle a reclaimable inode, we need to re-initialise the VFS inode * part of the structure. This is made more complex by the fact we store @@ -368,18 +352,13 @@ xfs_iget_recycle( ASSERT(!rwsem_is_locked(&inode->i_rwsem)); error = xfs_reinit_inode(mp, inode); if (error) { - bool wake; - /* * Re-initializing the inode failed, and we are in deep * trouble. Try to re-add it to the reclaim list. */ rcu_read_lock(); spin_lock(&ip->i_flags_lock); - wake = !!__xfs_iflags_test(ip, XFS_INEW); ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM); - if (wake) - wake_up_bit(&ip->i_flags, __XFS_INEW_BIT); ASSERT(ip->i_flags & XFS_IRECLAIMABLE); spin_unlock(&ip->i_flags_lock); rcu_read_unlock(); diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 64b9bf334806..6771f357ad2c 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -3122,7 +3122,6 @@ xfs_rename( * appropriately. */ if (flags & RENAME_WHITEOUT) { - ASSERT(!(flags & (RENAME_NOREPLACE | RENAME_EXCHANGE))); error = xfs_rename_alloc_whiteout(mnt_userns, target_dp, &wip); if (error) return error; diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index e635a3d64cba..c447bf04205a 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -231,8 +231,7 @@ static inline bool xfs_inode_has_bigtime(struct xfs_inode *ip) #define XFS_IRECLAIM (1 << 0) /* started reclaiming this inode */ #define XFS_ISTALE (1 << 1) /* inode has been staled */ #define XFS_IRECLAIMABLE (1 << 2) /* inode can be reclaimed */ -#define __XFS_INEW_BIT 3 /* inode has just been allocated */ -#define XFS_INEW (1 << __XFS_INEW_BIT) +#define XFS_INEW (1 << 3) /* inode has just been allocated */ #define XFS_IPRESERVE_DM_FIELDS (1 << 4) /* has legacy DMAPI fields set */ #define XFS_ITRUNCATED (1 << 5) /* truncated down so flush-on-close */ #define XFS_IDIRTY_RELEASE (1 << 6) /* dirty release already seen */ @@ -492,7 +491,6 @@ static inline void xfs_finish_inode_setup(struct xfs_inode *ip) xfs_iflags_clear(ip, XFS_INEW); barrier(); unlock_new_inode(VFS_I(ip)); - wake_up_bit(&ip->i_flags, __XFS_INEW_BIT); } static inline void xfs_setup_existing_inode(struct xfs_inode *ip) diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index e21459f9923a..778b57b1f020 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -1765,7 +1765,10 @@ static int xfs_remount_ro( struct xfs_mount *mp) { - int error; + struct xfs_icwalk icw = { + .icw_flags = XFS_ICWALK_FLAG_SYNC, + }; + int error; /* * Cancel background eofb scanning so it cannot race with the final @@ -1773,8 +1776,13 @@ xfs_remount_ro( */ xfs_blockgc_stop(mp); - /* Get rid of any leftover CoW reservations... */ - error = xfs_blockgc_free_space(mp, NULL); + /* + * Clear out all remaining COW staging extents and speculative post-EOF + * preallocations so that we don't leave inodes requiring inactivation + * cleanups during reclaim on a read-only mount. We must process every + * cached inode, so this requires a synchronous cache scan. + */ + error = xfs_blockgc_free_space(mp, &icw); if (error) { xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); return error; diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h index fedc0dfa4877..4f07afacbc23 100644 --- a/include/asm-generic/cacheflush.h +++ b/include/asm-generic/cacheflush.h @@ -50,13 +50,7 @@ static inline void flush_dcache_page(struct page *page) { } -static inline void flush_dcache_folio(struct folio *folio) { } #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 -#define ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO -#endif - -#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO -void flush_dcache_folio(struct folio *folio); #endif #ifndef flush_dcache_mmap_lock diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index 379746d3266f..b501d0badaea 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h @@ -27,6 +27,7 @@ #include <linux/llist.h> #include <linux/ctype.h> #include <linux/hdmi.h> +#include <linux/notifier.h> #include <drm/drm_mode_object.h> #include <drm/drm_util.h> @@ -40,6 +41,7 @@ struct drm_encoder; struct drm_property; struct drm_property_blob; struct drm_printer; +struct drm_privacy_screen; struct edid; struct i2c_adapter; @@ -320,6 +322,30 @@ struct drm_monitor_range_info { u8 max_vfreq; }; +/** + * enum drm_privacy_screen_status - privacy screen status + * + * This enum is used to track and control the state of the integrated privacy + * screen present on some display panels, via the "privacy-screen sw-state" + * and "privacy-screen hw-state" properties. Note the _LOCKED enum values + * are only valid for the "privacy-screen hw-state" property. + * + * @PRIVACY_SCREEN_DISABLED: + * The privacy-screen on the panel is disabled + * @PRIVACY_SCREEN_ENABLED: + * The privacy-screen on the panel is enabled + * @PRIVACY_SCREEN_DISABLED_LOCKED: + * The privacy-screen on the panel is disabled and locked (cannot be changed) + * @PRIVACY_SCREEN_ENABLED_LOCKED: + * The privacy-screen on the panel is enabled and locked (cannot be changed) + */ +enum drm_privacy_screen_status { + PRIVACY_SCREEN_DISABLED = 0, + PRIVACY_SCREEN_ENABLED, + PRIVACY_SCREEN_DISABLED_LOCKED, + PRIVACY_SCREEN_ENABLED_LOCKED, +}; + /* * This is a consolidated colorimetry list supported by HDMI and * DP protocol standard. The respective connectors will register @@ -794,6 +820,12 @@ struct drm_connector_state { u8 max_bpc; /** + * @privacy_screen_sw_state: See :ref:`Standard Connector + * Properties<standard_connector_properties>` + */ + enum drm_privacy_screen_status privacy_screen_sw_state; + + /** * @hdr_output_metadata: * DRM blob property for HDR output metadata */ @@ -1421,6 +1453,24 @@ struct drm_connector { */ struct drm_property *max_bpc_property; + /** @privacy_screen: drm_privacy_screen for this connector, or NULL. */ + struct drm_privacy_screen *privacy_screen; + + /** @privacy_screen_notifier: privacy-screen notifier_block */ + struct notifier_block privacy_screen_notifier; + + /** + * @privacy_screen_sw_state_property: Optional atomic property for the + * connector to control the integrated privacy screen. + */ + struct drm_property *privacy_screen_sw_state_property; + + /** + * @privacy_screen_hw_state_property: Optional atomic property for the + * connector to report the actual integrated privacy screen state. + */ + struct drm_property *privacy_screen_hw_state_property; + #define DRM_CONNECTOR_POLL_HPD (1 << 0) #define DRM_CONNECTOR_POLL_CONNECT (1 << 1) #define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2) @@ -1744,6 +1794,11 @@ int drm_connector_set_panel_orientation_with_quirk( int width, int height); int drm_connector_attach_max_bpc_property(struct drm_connector *connector, int min, int max); +void drm_connector_create_privacy_screen_properties(struct drm_connector *conn); +void drm_connector_attach_privacy_screen_properties(struct drm_connector *conn); +void drm_connector_attach_privacy_screen_provider( + struct drm_connector *connector, struct drm_privacy_screen *priv); +void drm_connector_update_privacy_screen(const struct drm_connector_state *connector_state); /** * struct drm_tile_group - Tile group metadata diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h index 604b1d1b2d72..9923c7a6885e 100644 --- a/include/drm/drm_device.h +++ b/include/drm/drm_device.h @@ -6,16 +6,13 @@ #include <linux/mutex.h> #include <linux/idr.h> -#include <drm/drm_hashtab.h> +#include <drm/drm_legacy.h> #include <drm/drm_mode_config.h> struct drm_driver; struct drm_minor; struct drm_master; -struct drm_device_dma; struct drm_vblank_crtc; -struct drm_sg_mem; -struct drm_local_map; struct drm_vma_offset_manager; struct drm_vram_mm; struct drm_fb_helper; diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index b52df4db3e8f..8b2ed4199284 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -1114,8 +1114,15 @@ struct drm_panel; # define DP_UHBR20 (1 << 1) # define DP_UHBR13_5 (1 << 2) -#define DP_128B132B_TRAINING_AUX_RD_INTERVAL 0x2216 /* 2.0 */ -# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_MASK 0x7f +#define DP_128B132B_TRAINING_AUX_RD_INTERVAL 0x2216 /* 2.0 */ +# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_MASK 0x7f +# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_400_US 0x00 +# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_4_MS 0x01 +# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_8_MS 0x02 +# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_12_MS 0x03 +# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_16_MS 0x04 +# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_32_MS 0x05 +# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_64_MS 0x06 #define DP_TEST_264BIT_CUSTOM_PATTERN_7_0 0x2230 #define DP_TEST_264BIT_CUSTOM_PATTERN_263_256 0x2250 @@ -1389,6 +1396,11 @@ enum drm_dp_phy { # define DP_VOLTAGE_SWING_LEVEL_3_SUPPORTED BIT(0) # define DP_PRE_EMPHASIS_LEVEL_3_SUPPORTED BIT(1) +#define DP_128B132B_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 0xf0022 /* 2.0 */ +#define DP_128B132B_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER(dp_phy) \ + DP_LTTPR_REG(dp_phy, DP_128B132B_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1) +/* see DP_128B132B_TRAINING_AUX_RD_INTERVAL for values */ + #define DP_LANE0_1_STATUS_PHY_REPEATER1 0xf0030 /* 1.3 */ #define DP_LANE0_1_STATUS_PHY_REPEATER(dp_phy) \ DP_LTTPR_REG(dp_phy, DP_LANE0_1_STATUS_PHY_REPEATER1) @@ -1527,6 +1539,11 @@ u8 drm_dp_get_adjust_request_post_cursor(const u8 link_status[DP_LINK_STATUS_SIZ #define DP_LTTPR_COMMON_CAP_SIZE 8 #define DP_LTTPR_PHY_CAP_SIZE 3 +int drm_dp_read_clock_recovery_delay(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE], + enum drm_dp_phy dp_phy, bool uhbr); +int drm_dp_read_channel_eq_delay(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE], + enum drm_dp_phy dp_phy, bool uhbr); + void drm_dp_link_train_clock_recovery_delay(const struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE]); void drm_dp_lttpr_link_train_clock_recovery_delay(void); @@ -1851,7 +1868,7 @@ drm_dp_sink_can_do_video_without_timing_msa(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) * * Note that currently this function will return %false for panels which support various DPCD * backlight features but which require the brightness be set through PWM, and don't support setting - * the brightness level via the DPCD. This is a TODO. + * the brightness level via the DPCD. * * Returns: %True if @edp_dpcd indicates that VESA backlight controls are supported, %false * otherwise @@ -1859,8 +1876,7 @@ drm_dp_sink_can_do_video_without_timing_msa(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) static inline bool drm_edp_backlight_supported(const u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE]) { - return (edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP) && - (edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP); + return !!(edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP); } /* @@ -2221,6 +2237,7 @@ drm_dp_has_quirk(const struct drm_dp_desc *desc, enum drm_dp_quirk quirk) * @max: The maximum backlight level that may be set * @lsb_reg_used: Do we also write values to the DP_EDP_BACKLIGHT_BRIGHTNESS_LSB register? * @aux_enable: Does the panel support the AUX enable cap? + * @aux_set: Does the panel support setting the brightness through AUX? * * This structure contains various data about an eDP backlight, which can be populated by using * drm_edp_backlight_init(). @@ -2232,6 +2249,7 @@ struct drm_edp_backlight_info { bool lsb_reg_used : 1; bool aux_enable : 1; + bool aux_set : 1; }; int diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h index 0cd95953cdf5..f6159acb8856 100644 --- a/include/drm/drm_drv.h +++ b/include/drm/drm_drv.h @@ -291,8 +291,9 @@ struct drm_driver { /** * @gem_create_object: constructor for gem objects * - * Hook for allocating the GEM object struct, for use by the CMA and - * SHMEM GEM helpers. + * Hook for allocating the GEM object struct, for use by the CMA + * and SHMEM GEM helpers. Returns a GEM object on success, or an + * ERR_PTR()-encoded error code otherwise. */ struct drm_gem_object *(*gem_create_object)(struct drm_device *dev, size_t size); @@ -345,11 +346,14 @@ struct drm_driver { * mmap hook for GEM drivers, used to implement dma-buf mmap in the * PRIME helpers. * - * FIXME: There's way too much duplication going on here, and also moved - * to &drm_gem_object_funcs. + * This hook only exists for historical reasons. Drivers must use + * drm_gem_prime_mmap() to implement it. + * + * FIXME: Convert all drivers to implement mmap in struct + * &drm_gem_object_funcs and inline drm_gem_prime_mmap() into + * its callers. This hook should be removed afterwards. */ - int (*gem_prime_mmap)(struct drm_gem_object *obj, - struct vm_area_struct *vma); + int (*gem_prime_mmap)(struct drm_gem_object *obj, struct vm_area_struct *vma); /** * @dumb_create: @@ -598,5 +602,6 @@ static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev) int drm_dev_set_unique(struct drm_device *dev, const char *name); +extern bool drm_firmware_drivers_only(void); #endif diff --git a/include/drm/drm_format_helper.h b/include/drm/drm_format_helper.h index e86925cf07b9..b30ed5de0a33 100644 --- a/include/drm/drm_format_helper.h +++ b/include/drm/drm_format_helper.h @@ -6,38 +6,41 @@ #ifndef __LINUX_DRM_FORMAT_HELPER_H #define __LINUX_DRM_FORMAT_HELPER_H +struct drm_format_info; struct drm_framebuffer; struct drm_rect; -void drm_fb_memcpy(void *dst, void *vaddr, struct drm_framebuffer *fb, - struct drm_rect *clip); -void drm_fb_memcpy_dstclip(void __iomem *dst, unsigned int dst_pitch, void *vaddr, - struct drm_framebuffer *fb, - struct drm_rect *clip); -void drm_fb_swab(void *dst, void *src, struct drm_framebuffer *fb, - struct drm_rect *clip, bool cached); -void drm_fb_xrgb8888_to_rgb332(void *dst, void *vaddr, struct drm_framebuffer *fb, - struct drm_rect *clip); -void drm_fb_xrgb8888_to_rgb565(void *dst, void *vaddr, - struct drm_framebuffer *fb, - struct drm_rect *clip, bool swab); -void drm_fb_xrgb8888_to_rgb565_dstclip(void __iomem *dst, unsigned int dst_pitch, - void *vaddr, struct drm_framebuffer *fb, - struct drm_rect *clip, bool swab); -void drm_fb_xrgb8888_to_rgb888(void *dst, void *src, struct drm_framebuffer *fb, - struct drm_rect *clip); -void drm_fb_xrgb8888_to_rgb888_dstclip(void __iomem *dst, unsigned int dst_pitch, - void *vaddr, struct drm_framebuffer *fb, - struct drm_rect *clip); -void drm_fb_xrgb8888_to_gray8(u8 *dst, void *vaddr, struct drm_framebuffer *fb, - struct drm_rect *clip); +unsigned int drm_fb_clip_offset(unsigned int pitch, const struct drm_format_info *format, + const struct drm_rect *clip); -int drm_fb_blit_rect_dstclip(void __iomem *dst, unsigned int dst_pitch, - uint32_t dst_format, void *vmap, - struct drm_framebuffer *fb, - struct drm_rect *rect); -int drm_fb_blit_dstclip(void __iomem *dst, unsigned int dst_pitch, - uint32_t dst_format, void *vmap, - struct drm_framebuffer *fb); +void drm_fb_memcpy(void *dst, unsigned int dst_pitch, const void *vaddr, + const struct drm_framebuffer *fb, const struct drm_rect *clip); +void drm_fb_memcpy_toio(void __iomem *dst, unsigned int dst_pitch, const void *vaddr, + const struct drm_framebuffer *fb, const struct drm_rect *clip); +void drm_fb_swab(void *dst, unsigned int dst_pitch, const void *src, + const struct drm_framebuffer *fb, const struct drm_rect *clip, + bool cached); +void drm_fb_xrgb8888_to_rgb332(void *dst, unsigned int dst_pitch, const void *vaddr, + const struct drm_framebuffer *fb, const struct drm_rect *clip); +void drm_fb_xrgb8888_to_rgb565(void *dst, unsigned int dst_pitch, const void *vaddr, + const struct drm_framebuffer *fb, const struct drm_rect *clip, + bool swab); +void drm_fb_xrgb8888_to_rgb565_toio(void __iomem *dst, unsigned int dst_pitch, + const void *vaddr, const struct drm_framebuffer *fb, + const struct drm_rect *clip, bool swab); +void drm_fb_xrgb8888_to_rgb888(void *dst, unsigned int dst_pitch, const void *src, + const struct drm_framebuffer *fb, const struct drm_rect *clip); +void drm_fb_xrgb8888_to_rgb888_toio(void __iomem *dst, unsigned int dst_pitch, + const void *vaddr, const struct drm_framebuffer *fb, + const struct drm_rect *clip); +void drm_fb_xrgb8888_to_xrgb2101010_toio(void __iomem *dst, unsigned int dst_pitch, + const void *vaddr, const struct drm_framebuffer *fb, + const struct drm_rect *clip); +void drm_fb_xrgb8888_to_gray8(void *dst, unsigned int dst_pitch, const void *vaddr, + const struct drm_framebuffer *fb, const struct drm_rect *clip); + +int drm_fb_blit_toio(void __iomem *dst, unsigned int dst_pitch, uint32_t dst_format, + const void *vmap, const struct drm_framebuffer *fb, + const struct drm_rect *rect); #endif /* __LINUX_DRM_FORMAT_HELPER_H */ diff --git a/include/drm/drm_gem_atomic_helper.h b/include/drm/drm_gem_atomic_helper.h index 48222a107873..0b1e2dd2ac3f 100644 --- a/include/drm/drm_gem_atomic_helper.h +++ b/include/drm/drm_gem_atomic_helper.h @@ -23,6 +23,24 @@ int drm_gem_simple_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe, */ /** + * DRM_SHADOW_PLANE_MAX_WIDTH - Maximum width of a plane's shadow buffer in pixels + * + * For drivers with shadow planes, the maximum width of the framebuffer is + * usually independent from hardware limitations. Drivers can initialize struct + * drm_mode_config.max_width from DRM_SHADOW_PLANE_MAX_WIDTH. + */ +#define DRM_SHADOW_PLANE_MAX_WIDTH (4096u) + +/** + * DRM_SHADOW_PLANE_MAX_HEIGHT - Maximum height of a plane's shadow buffer in scanlines + * + * For drivers with shadow planes, the maximum height of the framebuffer is + * usually independent from hardware limitations. Drivers can initialize struct + * drm_mode_config.max_height from DRM_SHADOW_PLANE_MAX_HEIGHT. + */ +#define DRM_SHADOW_PLANE_MAX_HEIGHT (4096u) + +/** * struct drm_shadow_plane_state - plane state for planes with shadow buffers * * For planes that use a shadow buffer, struct drm_shadow_plane_state diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h index cd13508acbc1..adb507a9dbf0 100644 --- a/include/drm/drm_gem_cma_helper.h +++ b/include/drm/drm_gem_cma_helper.h @@ -32,42 +32,108 @@ struct drm_gem_cma_object { #define to_drm_gem_cma_obj(gem_obj) \ container_of(gem_obj, struct drm_gem_cma_object, base) -#ifndef CONFIG_MMU -#define DRM_GEM_CMA_UNMAPPED_AREA_FOPS \ - .get_unmapped_area = drm_gem_cma_get_unmapped_area, -#else -#define DRM_GEM_CMA_UNMAPPED_AREA_FOPS -#endif +struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm, + size_t size); +void drm_gem_cma_free(struct drm_gem_cma_object *cma_obj); +void drm_gem_cma_print_info(const struct drm_gem_cma_object *cma_obj, + struct drm_printer *p, unsigned int indent); +struct sg_table *drm_gem_cma_get_sg_table(struct drm_gem_cma_object *cma_obj); +int drm_gem_cma_vmap(struct drm_gem_cma_object *cma_obj, struct dma_buf_map *map); +int drm_gem_cma_mmap(struct drm_gem_cma_object *cma_obj, struct vm_area_struct *vma); + +extern const struct vm_operations_struct drm_gem_cma_vm_ops; + +/* + * GEM object functions + */ /** - * DEFINE_DRM_GEM_CMA_FOPS() - macro to generate file operations for CMA drivers - * @name: name for the generated structure + * drm_gem_cma_object_free - GEM object function for drm_gem_cma_free() + * @obj: GEM object to free * - * This macro autogenerates a suitable &struct file_operations for CMA based - * drivers, which can be assigned to &drm_driver.fops. Note that this structure - * cannot be shared between drivers, because it contains a reference to the - * current module using THIS_MODULE. + * This function wraps drm_gem_cma_free_object(). Drivers that employ the CMA helpers + * should use it as their &drm_gem_object_funcs.free handler. + */ +static inline void drm_gem_cma_object_free(struct drm_gem_object *obj) +{ + struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj); + + drm_gem_cma_free(cma_obj); +} + +/** + * drm_gem_cma_object_print_info() - Print &drm_gem_cma_object info for debugfs + * @p: DRM printer + * @indent: Tab indentation level + * @obj: GEM object * - * Note that the declaration is already marked as static - if you need a - * non-static version of this you're probably doing it wrong and will break the - * THIS_MODULE reference by accident. + * This function wraps drm_gem_cma_print_info(). Drivers that employ the CMA helpers + * should use this function as their &drm_gem_object_funcs.print_info handler. */ -#define DEFINE_DRM_GEM_CMA_FOPS(name) \ - static const struct file_operations name = {\ - .owner = THIS_MODULE,\ - .open = drm_open,\ - .release = drm_release,\ - .unlocked_ioctl = drm_ioctl,\ - .compat_ioctl = drm_compat_ioctl,\ - .poll = drm_poll,\ - .read = drm_read,\ - .llseek = noop_llseek,\ - .mmap = drm_gem_mmap,\ - DRM_GEM_CMA_UNMAPPED_AREA_FOPS \ - } +static inline void drm_gem_cma_object_print_info(struct drm_printer *p, unsigned int indent, + const struct drm_gem_object *obj) +{ + const struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj); + + drm_gem_cma_print_info(cma_obj, p, indent); +} + +/** + * drm_gem_cma_object_get_sg_table - GEM object function for drm_gem_cma_get_sg_table() + * @obj: GEM object + * + * This function wraps drm_gem_cma_get_sg_table(). Drivers that employ the CMA helpers should + * use it as their &drm_gem_object_funcs.get_sg_table handler. + * + * Returns: + * A pointer to the scatter/gather table of pinned pages or NULL on failure. + */ +static inline struct sg_table *drm_gem_cma_object_get_sg_table(struct drm_gem_object *obj) +{ + struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj); + + return drm_gem_cma_get_sg_table(cma_obj); +} + +/* + * drm_gem_cma_object_vmap - GEM object function for drm_gem_cma_vmap() + * @obj: GEM object + * @map: Returns the kernel virtual address of the CMA GEM object's backing store. + * + * This function wraps drm_gem_cma_vmap(). Drivers that employ the CMA helpers should + * use it as their &drm_gem_object_funcs.vmap handler. + * + * Returns: + * 0 on success or a negative error code on failure. + */ +static inline int drm_gem_cma_object_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) +{ + struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj); + + return drm_gem_cma_vmap(cma_obj, map); +} + +/** + * drm_gem_cma_object_mmap - GEM object function for drm_gem_cma_mmap() + * @obj: GEM object + * @vma: VMA for the area to be mapped + * + * This function wraps drm_gem_cma_mmap(). Drivers that employ the cma helpers should + * use it as their &drm_gem_object_funcs.mmap handler. + * + * Returns: + * 0 on success or a negative error code on failure. + */ +static inline int drm_gem_cma_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) +{ + struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj); + + return drm_gem_cma_mmap(cma_obj, vma); +} -/* free GEM object */ -void drm_gem_cma_free_object(struct drm_gem_object *gem_obj); +/* + * Driver ops + */ /* create memory region for DRM framebuffer */ int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv, @@ -79,30 +145,10 @@ int drm_gem_cma_dumb_create(struct drm_file *file_priv, struct drm_device *drm, struct drm_mode_create_dumb *args); -/* allocate physical memory */ -struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm, - size_t size); - -extern const struct vm_operations_struct drm_gem_cma_vm_ops; - -#ifndef CONFIG_MMU -unsigned long drm_gem_cma_get_unmapped_area(struct file *filp, - unsigned long addr, - unsigned long len, - unsigned long pgoff, - unsigned long flags); -#endif - -void drm_gem_cma_print_info(struct drm_printer *p, unsigned int indent, - const struct drm_gem_object *obj); - -struct sg_table *drm_gem_cma_get_sg_table(struct drm_gem_object *obj); struct drm_gem_object * drm_gem_cma_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sgt); -int drm_gem_cma_vmap(struct drm_gem_object *obj, struct dma_buf_map *map); -int drm_gem_cma_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); /** * DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE - CMA GEM driver operations @@ -185,4 +231,47 @@ drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *drm, struct dma_buf_attachment *attach, struct sg_table *sgt); +/* + * File ops + */ + +#ifndef CONFIG_MMU +unsigned long drm_gem_cma_get_unmapped_area(struct file *filp, + unsigned long addr, + unsigned long len, + unsigned long pgoff, + unsigned long flags); +#define DRM_GEM_CMA_UNMAPPED_AREA_FOPS \ + .get_unmapped_area = drm_gem_cma_get_unmapped_area, +#else +#define DRM_GEM_CMA_UNMAPPED_AREA_FOPS +#endif + +/** + * DEFINE_DRM_GEM_CMA_FOPS() - macro to generate file operations for CMA drivers + * @name: name for the generated structure + * + * This macro autogenerates a suitable &struct file_operations for CMA based + * drivers, which can be assigned to &drm_driver.fops. Note that this structure + * cannot be shared between drivers, because it contains a reference to the + * current module using THIS_MODULE. + * + * Note that the declaration is already marked as static - if you need a + * non-static version of this you're probably doing it wrong and will break the + * THIS_MODULE reference by accident. + */ +#define DEFINE_DRM_GEM_CMA_FOPS(name) \ + static const struct file_operations name = {\ + .owner = THIS_MODULE,\ + .open = drm_open,\ + .release = drm_release,\ + .unlocked_ioctl = drm_ioctl,\ + .compat_ioctl = drm_compat_ioctl,\ + .poll = drm_poll,\ + .read = drm_read,\ + .llseek = noop_llseek,\ + .mmap = drm_gem_mmap,\ + DRM_GEM_CMA_UNMAPPED_AREA_FOPS \ + } + #endif /* __DRM_GEM_CMA_HELPER_H__ */ diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h index 434328d8a0d9..311d66c9cf4b 100644 --- a/include/drm/drm_gem_shmem_helper.h +++ b/include/drm/drm_gem_shmem_helper.h @@ -107,16 +107,17 @@ struct drm_gem_shmem_object { container_of(obj, struct drm_gem_shmem_object, base) struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size); -void drm_gem_shmem_free_object(struct drm_gem_object *obj); +void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem); int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem); void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem); -int drm_gem_shmem_pin(struct drm_gem_object *obj); -void drm_gem_shmem_unpin(struct drm_gem_object *obj); -int drm_gem_shmem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map); -void drm_gem_shmem_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map); +int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem); +void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem); +int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, struct dma_buf_map *map); +void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, struct dma_buf_map *map); +int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma); -int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv); +int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv); static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem) { @@ -125,29 +126,156 @@ static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem !shmem->base.dma_buf && !shmem->base.import_attach; } -void drm_gem_shmem_purge_locked(struct drm_gem_object *obj); -bool drm_gem_shmem_purge(struct drm_gem_object *obj); +void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem); +bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem); -struct drm_gem_shmem_object * -drm_gem_shmem_create_with_handle(struct drm_file *file_priv, - struct drm_device *dev, size_t size, - uint32_t *handle); +struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem); +struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem); -int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev, - struct drm_mode_create_dumb *args); +void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem, + struct drm_printer *p, unsigned int indent); + +/* + * GEM object functions + */ + +/** + * drm_gem_shmem_object_free - GEM object function for drm_gem_shmem_free() + * @obj: GEM object to free + * + * This function wraps drm_gem_shmem_free(). Drivers that employ the shmem helpers + * should use it as their &drm_gem_object_funcs.free handler. + */ +static inline void drm_gem_shmem_object_free(struct drm_gem_object *obj) +{ + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + + drm_gem_shmem_free(shmem); +} + +/** + * drm_gem_shmem_object_print_info() - Print &drm_gem_shmem_object info for debugfs + * @p: DRM printer + * @indent: Tab indentation level + * @obj: GEM object + * + * This function wraps drm_gem_shmem_print_info(). Drivers that employ the shmem helpers should + * use this function as their &drm_gem_object_funcs.print_info handler. + */ +static inline void drm_gem_shmem_object_print_info(struct drm_printer *p, unsigned int indent, + const struct drm_gem_object *obj) +{ + const struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + + drm_gem_shmem_print_info(shmem, p, indent); +} + +/** + * drm_gem_shmem_object_pin - GEM object function for drm_gem_shmem_pin() + * @obj: GEM object + * + * This function wraps drm_gem_shmem_pin(). Drivers that employ the shmem helpers should + * use it as their &drm_gem_object_funcs.pin handler. + */ +static inline int drm_gem_shmem_object_pin(struct drm_gem_object *obj) +{ + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + + return drm_gem_shmem_pin(shmem); +} + +/** + * drm_gem_shmem_object_unpin - GEM object function for drm_gem_shmem_unpin() + * @obj: GEM object + * + * This function wraps drm_gem_shmem_unpin(). Drivers that employ the shmem helpers should + * use it as their &drm_gem_object_funcs.unpin handler. + */ +static inline void drm_gem_shmem_object_unpin(struct drm_gem_object *obj) +{ + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + + drm_gem_shmem_unpin(shmem); +} -int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); +/** + * drm_gem_shmem_object_get_sg_table - GEM object function for drm_gem_shmem_get_sg_table() + * @obj: GEM object + * + * This function wraps drm_gem_shmem_get_sg_table(). Drivers that employ the shmem helpers should + * use it as their &drm_gem_object_funcs.get_sg_table handler. + * + * Returns: + * A pointer to the scatter/gather table of pinned pages or NULL on failure. + */ +static inline struct sg_table *drm_gem_shmem_object_get_sg_table(struct drm_gem_object *obj) +{ + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + + return drm_gem_shmem_get_sg_table(shmem); +} + +/* + * drm_gem_shmem_object_vmap - GEM object function for drm_gem_shmem_vmap() + * @obj: GEM object + * @map: Returns the kernel virtual address of the SHMEM GEM object's backing store. + * + * This function wraps drm_gem_shmem_vmap(). Drivers that employ the shmem helpers should + * use it as their &drm_gem_object_funcs.vmap handler. + * + * Returns: + * 0 on success or a negative error code on failure. + */ +static inline int drm_gem_shmem_object_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) +{ + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + + return drm_gem_shmem_vmap(shmem, map); +} + +/* + * drm_gem_shmem_object_vunmap - GEM object function for drm_gem_shmem_vunmap() + * @obj: GEM object + * @map: Kernel virtual address where the SHMEM GEM object was mapped + * + * This function wraps drm_gem_shmem_vunmap(). Drivers that employ the shmem helpers should + * use it as their &drm_gem_object_funcs.vunmap handler. + */ +static inline void drm_gem_shmem_object_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map) +{ + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + + drm_gem_shmem_vunmap(shmem, map); +} + +/** + * drm_gem_shmem_object_mmap - GEM object function for drm_gem_shmem_mmap() + * @obj: GEM object + * @vma: VMA for the area to be mapped + * + * This function wraps drm_gem_shmem_mmap(). Drivers that employ the shmem helpers should + * use it as their &drm_gem_object_funcs.mmap handler. + * + * Returns: + * 0 on success or a negative error code on failure. + */ +static inline int drm_gem_shmem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) +{ + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + + return drm_gem_shmem_mmap(shmem, vma); +} -void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent, - const struct drm_gem_object *obj); +/* + * Driver ops + */ -struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj); struct drm_gem_object * drm_gem_shmem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sgt); - -struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_object *obj); +int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev, + struct drm_mode_create_dumb *args); /** * DRM_GEM_SHMEM_DRIVER_OPS - Default shmem GEM operations diff --git a/include/drm/drm_gem_ttm_helper.h b/include/drm/drm_gem_ttm_helper.h index c1aa02bd4c89..78040f6cc6f3 100644 --- a/include/drm/drm_gem_ttm_helper.h +++ b/include/drm/drm_gem_ttm_helper.h @@ -3,7 +3,7 @@ #ifndef DRM_GEM_TTM_HELPER_H #define DRM_GEM_TTM_HELPER_H -#include <linux/kernel.h> +#include <linux/container_of.h> #include <drm/drm_device.h> #include <drm/drm_gem.h> diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h index d3cf06c9af65..b4ce27a72773 100644 --- a/include/drm/drm_gem_vram_helper.h +++ b/include/drm/drm_gem_vram_helper.h @@ -11,8 +11,8 @@ #include <drm/ttm/ttm_bo_api.h> #include <drm/ttm/ttm_bo_driver.h> +#include <linux/container_of.h> #include <linux/dma-buf-map.h> -#include <linux/kernel.h> /* for container_of() */ struct drm_mode_create_dumb; struct drm_plane; diff --git a/include/drm/drm_legacy.h b/include/drm/drm_legacy.h index 58dc8d8cc907..0fc85418aad8 100644 --- a/include/drm/drm_legacy.h +++ b/include/drm/drm_legacy.h @@ -37,7 +37,6 @@ #include <drm/drm.h> #include <drm/drm_auth.h> -#include <drm/drm_hashtab.h> struct drm_device; struct drm_driver; @@ -51,6 +50,20 @@ struct pci_driver; * you're doing it terribly wrong. */ +/* + * Hash-table Support + */ + +struct drm_hash_item { + struct hlist_node head; + unsigned long key; +}; + +struct drm_open_hash { + struct hlist_head *table; + u8 order; +}; + /** * DMA buffer. */ diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h index 9b4292f229c6..ac33ba1b18bc 100644 --- a/include/drm/drm_mm.h +++ b/include/drm/drm_mm.h @@ -39,13 +39,15 @@ */ #include <linux/bug.h> #include <linux/rbtree.h> -#include <linux/kernel.h> +#include <linux/limits.h> #include <linux/mm_types.h> #include <linux/list.h> #include <linux/spinlock.h> #ifdef CONFIG_DRM_DEBUG_MM #include <linux/stackdepot.h> #endif +#include <linux/types.h> + #include <drm/drm_print.h> #ifdef CONFIG_DRM_DEBUG_MM diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h index 48b7de80daf5..91ca575a78de 100644 --- a/include/drm/drm_mode_config.h +++ b/include/drm/drm_mode_config.h @@ -359,6 +359,19 @@ struct drm_mode_config_funcs { * Core mode resource tracking structure. All CRTC, encoders, and connectors * enumerated by the driver are added here, as are global properties. Some * global restrictions are also here, e.g. dimension restrictions. + * + * Framebuffer sizes refer to the virtual screen that can be displayed by + * the CRTC. This can be different from the physical resolution programmed. + * The minimum width and height, stored in @min_width and @min_height, + * describe the smallest size of the framebuffer. It correlates to the + * minimum programmable resolution. + * The maximum width, stored in @max_width, is typically limited by the + * maximum pitch between two adjacent scanlines. The maximum height, stored + * in @max_height, is usually only limited by the amount of addressable video + * memory. For hardware that has no real maximum, drivers should pick a + * reasonable default. + * + * See also @DRM_SHADOW_PLANE_MAX_WIDTH and @DRM_SHADOW_PLANE_MAX_HEIGHT. */ struct drm_mode_config { /** diff --git a/include/drm/drm_of.h b/include/drm/drm_of.h index b9b093add92e..99f79ac8b4cd 100644 --- a/include/drm/drm_of.h +++ b/include/drm/drm_of.h @@ -49,6 +49,7 @@ int drm_of_find_panel_or_bridge(const struct device_node *np, struct drm_bridge **bridge); int drm_of_lvds_get_dual_link_pixel_order(const struct device_node *port1, const struct device_node *port2); +int drm_of_lvds_get_data_mapping(const struct device_node *port); #else static inline uint32_t drm_of_crtc_port_mask(struct drm_device *dev, struct device_node *port) @@ -98,6 +99,12 @@ drm_of_lvds_get_dual_link_pixel_order(const struct device_node *port1, { return -EINVAL; } + +static inline int +drm_of_lvds_get_data_mapping(const struct device_node *port) +{ + return -EINVAL; +} #endif /* diff --git a/include/drm/drm_privacy_screen_consumer.h b/include/drm/drm_privacy_screen_consumer.h new file mode 100644 index 000000000000..7f66a90d15b7 --- /dev/null +++ b/include/drm/drm_privacy_screen_consumer.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (C) 2020 Red Hat, Inc. + * + * Authors: + * Hans de Goede <hdegoede@redhat.com> + */ + +#ifndef __DRM_PRIVACY_SCREEN_CONSUMER_H__ +#define __DRM_PRIVACY_SCREEN_CONSUMER_H__ + +#include <linux/device.h> +#include <drm/drm_connector.h> + +struct drm_privacy_screen; + +#if IS_ENABLED(CONFIG_DRM_PRIVACY_SCREEN) +struct drm_privacy_screen *drm_privacy_screen_get(struct device *dev, + const char *con_id); +void drm_privacy_screen_put(struct drm_privacy_screen *priv); + +int drm_privacy_screen_set_sw_state(struct drm_privacy_screen *priv, + enum drm_privacy_screen_status sw_state); +void drm_privacy_screen_get_state(struct drm_privacy_screen *priv, + enum drm_privacy_screen_status *sw_state_ret, + enum drm_privacy_screen_status *hw_state_ret); + +int drm_privacy_screen_register_notifier(struct drm_privacy_screen *priv, + struct notifier_block *nb); +int drm_privacy_screen_unregister_notifier(struct drm_privacy_screen *priv, + struct notifier_block *nb); +#else +static inline struct drm_privacy_screen *drm_privacy_screen_get(struct device *dev, + const char *con_id) +{ + return ERR_PTR(-ENODEV); +} +static inline void drm_privacy_screen_put(struct drm_privacy_screen *priv) +{ +} +static inline int drm_privacy_screen_set_sw_state(struct drm_privacy_screen *priv, + enum drm_privacy_screen_status sw_state) +{ + return -ENODEV; +} +static inline void drm_privacy_screen_get_state(struct drm_privacy_screen *priv, + enum drm_privacy_screen_status *sw_state_ret, + enum drm_privacy_screen_status *hw_state_ret) +{ + *sw_state_ret = PRIVACY_SCREEN_DISABLED; + *hw_state_ret = PRIVACY_SCREEN_DISABLED; +} +static inline int drm_privacy_screen_register_notifier(struct drm_privacy_screen *priv, + struct notifier_block *nb) +{ + return -ENODEV; +} +static inline int drm_privacy_screen_unregister_notifier(struct drm_privacy_screen *priv, + struct notifier_block *nb) +{ + return -ENODEV; +} +#endif + +#endif diff --git a/include/drm/drm_privacy_screen_driver.h b/include/drm/drm_privacy_screen_driver.h new file mode 100644 index 000000000000..24591b607675 --- /dev/null +++ b/include/drm/drm_privacy_screen_driver.h @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (C) 2020 Red Hat, Inc. + * + * Authors: + * Hans de Goede <hdegoede@redhat.com> + */ + +#ifndef __DRM_PRIVACY_SCREEN_DRIVER_H__ +#define __DRM_PRIVACY_SCREEN_DRIVER_H__ + +#include <linux/device.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <drm/drm_connector.h> + +struct drm_privacy_screen; + +/** + * struct drm_privacy_screen_ops - drm_privacy_screen operations + * + * Defines the operations which the privacy-screen class code may call. + * These functions should be implemented by the privacy-screen driver. + */ +struct drm_privacy_screen_ops { + /** + * @set_sw_state: Called to request a change of the privacy-screen + * state. The privacy-screen class code contains a check to avoid this + * getting called when the hw_state reports the state is locked. + * It is the driver's responsibility to update sw_state and hw_state. + * This is always called with the drm_privacy_screen's lock held. + */ + int (*set_sw_state)(struct drm_privacy_screen *priv, + enum drm_privacy_screen_status sw_state); + /** + * @get_hw_state: Called to request that the driver gets the current + * privacy-screen state from the hardware and then updates sw_state and + * hw_state accordingly. This will be called by the core just before + * the privacy-screen is registered in sysfs. + */ + void (*get_hw_state)(struct drm_privacy_screen *priv); +}; + +/** + * struct drm_privacy_screen - central privacy-screen structure + * + * Central privacy-screen structure, this contains the struct device used + * to register the screen in sysfs, the screen's state, ops, etc. + */ +struct drm_privacy_screen { + /** @dev: device used to register the privacy-screen in sysfs. */ + struct device dev; + /** @lock: mutex protection all fields in this struct. */ + struct mutex lock; + /** @list: privacy-screen devices list list-entry. */ + struct list_head list; + /** @notifier_head: privacy-screen notifier head. */ + struct blocking_notifier_head notifier_head; + /** + * @ops: &struct drm_privacy_screen_ops for this privacy-screen. + * This is NULL if the driver has unregistered the privacy-screen. + */ + const struct drm_privacy_screen_ops *ops; + /** + * @sw_state: The privacy-screen's software state, see + * :ref:`Standard Connector Properties<standard_connector_properties>` + * for more info. + */ + enum drm_privacy_screen_status sw_state; + /** + * @hw_state: The privacy-screen's hardware state, see + * :ref:`Standard Connector Properties<standard_connector_properties>` + * for more info. + */ + enum drm_privacy_screen_status hw_state; +}; + +struct drm_privacy_screen *drm_privacy_screen_register( + struct device *parent, const struct drm_privacy_screen_ops *ops); +void drm_privacy_screen_unregister(struct drm_privacy_screen *priv); + +void drm_privacy_screen_call_notifier_chain(struct drm_privacy_screen *priv); + +#endif diff --git a/include/drm/drm_privacy_screen_machine.h b/include/drm/drm_privacy_screen_machine.h new file mode 100644 index 000000000000..02e5371904d3 --- /dev/null +++ b/include/drm/drm_privacy_screen_machine.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (C) 2020 Red Hat, Inc. + * + * Authors: + * Hans de Goede <hdegoede@redhat.com> + */ + +#ifndef __DRM_PRIVACY_SCREEN_MACHINE_H__ +#define __DRM_PRIVACY_SCREEN_MACHINE_H__ + +#include <linux/list.h> + +/** + * struct drm_privacy_screen_lookup - static privacy-screen lookup list entry + * + * Used for the static lookup-list for mapping privacy-screen consumer + * dev-connector pairs to a privacy-screen provider. + */ +struct drm_privacy_screen_lookup { + /** @list: Lookup list list-entry. */ + struct list_head list; + /** @dev_id: Consumer device name or NULL to match all devices. */ + const char *dev_id; + /** @con_id: Consumer connector name or NULL to match all connectors. */ + const char *con_id; + /** @provider: dev_name() of the privacy_screen provider. */ + const char *provider; +}; + +void drm_privacy_screen_lookup_add(struct drm_privacy_screen_lookup *lookup); +void drm_privacy_screen_lookup_remove(struct drm_privacy_screen_lookup *lookup); + +#if IS_ENABLED(CONFIG_DRM_PRIVACY_SCREEN) && IS_ENABLED(CONFIG_X86) +void drm_privacy_screen_lookup_init(void); +void drm_privacy_screen_lookup_exit(void); +#else +static inline void drm_privacy_screen_lookup_init(void) +{ +} +static inline void drm_privacy_screen_lookup_exit(void) +{ +} +#endif + +#endif diff --git a/include/drm/drm_probe_helper.h b/include/drm/drm_probe_helper.h index 04c57564c397..48300aa6ca71 100644 --- a/include/drm/drm_probe_helper.h +++ b/include/drm/drm_probe_helper.h @@ -20,6 +20,7 @@ void drm_kms_helper_poll_fini(struct drm_device *dev); bool drm_helper_hpd_irq_event(struct drm_device *dev); bool drm_connector_helper_hpd_irq_event(struct drm_connector *connector); void drm_kms_helper_hotplug_event(struct drm_device *dev); +void drm_kms_helper_connector_hotplug_event(struct drm_connector *connector); void drm_kms_helper_poll_disable(struct drm_device *dev); void drm_kms_helper_poll_enable(struct drm_device *dev); diff --git a/include/drm/drm_sysfs.h b/include/drm/drm_sysfs.h index d454ef617b2c..6273cac44e47 100644 --- a/include/drm/drm_sysfs.h +++ b/include/drm/drm_sysfs.h @@ -11,6 +11,7 @@ int drm_class_device_register(struct device *dev); void drm_class_device_unregister(struct device *dev); void drm_sysfs_hotplug_event(struct drm_device *dev); +void drm_sysfs_connector_hotplug_event(struct drm_connector *connector); void drm_sysfs_connector_status_event(struct drm_connector *connector, struct drm_property *property); #endif diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index f011e4c407f2..bbc22fad8d80 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -28,6 +28,7 @@ #include <linux/dma-fence.h> #include <linux/completion.h> #include <linux/xarray.h> +#include <linux/irq_work.h> #define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000) @@ -286,7 +287,16 @@ struct drm_sched_job { struct list_head list; struct drm_gpu_scheduler *sched; struct drm_sched_fence *s_fence; - struct dma_fence_cb finish_cb; + + /* + * work is used only after finish_cb has been used and will not be + * accessed anymore. + */ + union { + struct dma_fence_cb finish_cb; + struct irq_work work; + }; + uint64_t id; atomic_t karma; enum drm_sched_priority s_priority; diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index c00ac54692d7..baf3d1d3d566 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h @@ -666,4 +666,13 @@ INTEL_VGA_DEVICE(0x46C2, info), \ INTEL_VGA_DEVICE(0x46C3, info) +/* RPL-S */ +#define INTEL_RPLS_IDS(info) \ + INTEL_VGA_DEVICE(0xA780, info), \ + INTEL_VGA_DEVICE(0xA781, info), \ + INTEL_VGA_DEVICE(0xA782, info), \ + INTEL_VGA_DEVICE(0xA783, info), \ + INTEL_VGA_DEVICE(0xA788, info), \ + INTEL_VGA_DEVICE(0xA789, info) + #endif /* _I915_PCIIDS_H */ diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h index abfefaaf897a..67530bfef129 100644 --- a/include/drm/intel-gtt.h +++ b/include/drm/intel-gtt.h @@ -4,9 +4,11 @@ #ifndef _DRM_INTEL_GTT_H #define _DRM_INTEL_GTT_H -#include <linux/agp_backend.h> -#include <linux/intel-iommu.h> -#include <linux/kernel.h> +#include <linux/types.h> + +struct agp_bridge_data; +struct pci_dev; +struct sg_table; void intel_gtt_get(u64 *gtt_total, phys_addr_t *mappable_base, diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index cd785cfa3123..c17b2df9178b 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -32,7 +32,6 @@ #define _TTM_BO_API_H_ #include <drm/drm_gem.h> -#include <drm/drm_hashtab.h> #include <drm/drm_vma_manager.h> #include <linux/kref.h> #include <linux/list.h> diff --git a/include/drm/ttm/ttm_placement.h b/include/drm/ttm/ttm_placement.h index 76d1b9119a2b..8074d0f6cae5 100644 --- a/include/drm/ttm/ttm_placement.h +++ b/include/drm/ttm/ttm_placement.h @@ -35,6 +35,17 @@ /* * Memory regions for data placement. + * + * Buffers placed in TTM_PL_SYSTEM are considered under TTMs control and can + * be swapped out whenever TTMs thinks it is a good idea. + * In cases where drivers would like to use TTM_PL_SYSTEM as a valid + * placement they need to be able to handle the issues that arise due to the + * above manually. + * + * For BO's which reside in system memory but for which the accelerator + * requires direct access (i.e. their usage needs to be synchronized + * between the CPU and accelerator via fences) a new, driver private + * placement that can handle such scenarios is a good idea. */ #define TTM_PL_SYSTEM 0 diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 143ce7e0bee1..b28f8790192a 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -974,6 +974,15 @@ static inline int acpi_get_local_address(acpi_handle handle, u32 *addr) return -ENODEV; } +static inline int acpi_register_wakeup_handler(int wake_irq, + bool (*wakeup)(void *context), void *context) +{ + return -ENXIO; +} + +static inline void acpi_unregister_wakeup_handler( + bool (*wakeup)(void *context), void *context) { } + #endif /* !CONFIG_ACPI */ #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC @@ -1173,7 +1182,6 @@ int acpi_node_prop_get(const struct fwnode_handle *fwnode, const char *propname, struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode, struct fwnode_handle *child); -struct fwnode_handle *acpi_node_get_parent(const struct fwnode_handle *fwnode); struct acpi_probe_entry; typedef bool (*acpi_probe_entry_validate_subtbl)(struct acpi_subtable_header *, @@ -1279,12 +1287,6 @@ acpi_get_next_subnode(const struct fwnode_handle *fwnode, } static inline struct fwnode_handle * -acpi_node_get_parent(const struct fwnode_handle *fwnode) -{ - return NULL; -} - -static inline struct fwnode_handle * acpi_graph_get_next_endpoint(const struct fwnode_handle *fwnode, struct fwnode_handle *prev) { diff --git a/include/linux/bpf.h b/include/linux/bpf.h index f715e8863f4d..755f38e893be 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -193,7 +193,7 @@ struct bpf_map { atomic64_t usercnt; struct work_struct work; struct mutex freeze_mutex; - u64 writecnt; /* writable mmap cnt; protected by freeze_mutex */ + atomic64_t writecnt; }; static inline bool map_value_has_spin_lock(const struct bpf_map *map) @@ -732,6 +732,7 @@ int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr) struct bpf_trampoline *bpf_trampoline_get(u64 key, struct bpf_attach_target_info *tgt_info); void bpf_trampoline_put(struct bpf_trampoline *tr); +int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs); #define BPF_DISPATCHER_INIT(_name) { \ .mutex = __MUTEX_INITIALIZER(_name.mutex), \ .func = &_name##_func, \ @@ -1352,28 +1353,16 @@ extern struct mutex bpf_stats_enabled_mutex; * kprobes, tracepoints) to prevent deadlocks on map operations as any of * these events can happen inside a region which holds a map bucket lock * and can deadlock on it. - * - * Use the preemption safe inc/dec variants on RT because migrate disable - * is preemptible on RT and preemption in the middle of the RMW operation - * might lead to inconsistent state. Use the raw variants for non RT - * kernels as migrate_disable() maps to preempt_disable() so the slightly - * more expensive save operation can be avoided. */ static inline void bpf_disable_instrumentation(void) { migrate_disable(); - if (IS_ENABLED(CONFIG_PREEMPT_RT)) - this_cpu_inc(bpf_prog_active); - else - __this_cpu_inc(bpf_prog_active); + this_cpu_inc(bpf_prog_active); } static inline void bpf_enable_instrumentation(void) { - if (IS_ENABLED(CONFIG_PREEMPT_RT)) - this_cpu_dec(bpf_prog_active); - else - __this_cpu_dec(bpf_prog_active); + this_cpu_dec(bpf_prog_active); migrate_enable(); } @@ -1419,6 +1408,7 @@ void bpf_map_put(struct bpf_map *map); void *bpf_map_area_alloc(u64 size, int numa_node); void *bpf_map_area_mmapable_alloc(u64 size, int numa_node); void bpf_map_area_free(void *base); +bool bpf_map_write_active(const struct bpf_map *map); void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); int generic_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr, diff --git a/include/linux/btf.h b/include/linux/btf.h index 203eef993d76..0e1b6281fd8f 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -245,7 +245,10 @@ struct kfunc_btf_id_set { struct module *owner; }; -struct kfunc_btf_id_list; +struct kfunc_btf_id_list { + struct list_head list; + struct mutex mutex; +}; #ifdef CONFIG_DEBUG_INFO_BTF_MODULES void register_kfunc_btf_id_set(struct kfunc_btf_id_list *l, @@ -254,6 +257,9 @@ void unregister_kfunc_btf_id_set(struct kfunc_btf_id_list *l, struct kfunc_btf_id_set *s); bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist, u32 kfunc_id, struct module *owner); + +extern struct kfunc_btf_id_list bpf_tcp_ca_kfunc_list; +extern struct kfunc_btf_id_list prog_test_kfunc_list; #else static inline void register_kfunc_btf_id_set(struct kfunc_btf_id_list *l, struct kfunc_btf_id_set *s) @@ -268,13 +274,13 @@ static inline bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist, { return false; } + +static struct kfunc_btf_id_list bpf_tcp_ca_kfunc_list __maybe_unused; +static struct kfunc_btf_id_list prog_test_kfunc_list __maybe_unused; #endif #define DEFINE_KFUNC_BTF_ID_SET(set, name) \ struct kfunc_btf_id_set name = { LIST_HEAD_INIT(name.list), (set), \ THIS_MODULE } -extern struct kfunc_btf_id_list bpf_tcp_ca_kfunc_list; -extern struct kfunc_btf_id_list prog_test_kfunc_list; - #endif diff --git a/include/linux/cacheflush.h b/include/linux/cacheflush.h new file mode 100644 index 000000000000..fef8b607f97e --- /dev/null +++ b/include/linux/cacheflush.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_CACHEFLUSH_H +#define _LINUX_CACHEFLUSH_H + +#include <asm/cacheflush.h> + +#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE +#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO +void flush_dcache_folio(struct folio *folio); +#endif +#else +static inline void flush_dcache_folio(struct folio *folio) +{ +} +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO 0 +#endif /* ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE */ + +#endif /* _LINUX_CACHEFLUSH_H */ diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h index 2f909ed084c6..4ff37cb763ae 100644 --- a/include/linux/cacheinfo.h +++ b/include/linux/cacheinfo.h @@ -3,7 +3,6 @@ #define _LINUX_CACHEINFO_H #include <linux/bitops.h> -#include <linux/cpu.h> #include <linux/cpumask.h> #include <linux/smp.h> diff --git a/include/linux/console.h b/include/linux/console.h index a97f277cfdfa..7cd758a4f44e 100644 --- a/include/linux/console.h +++ b/include/linux/console.h @@ -219,12 +219,6 @@ extern atomic_t ignore_console_lock_warning; #define VESA_HSYNC_SUSPEND 2 #define VESA_POWERDOWN 3 -#ifdef CONFIG_VGA_CONSOLE -extern bool vgacon_text_force(void); -#else -static inline bool vgacon_text_force(void) { return false; } -#endif - extern void console_init(void); /* For deferred console takeover */ diff --git a/include/linux/delay.h b/include/linux/delay.h index 8eacf67eb212..039e7e0c7378 100644 --- a/include/linux/delay.h +++ b/include/linux/delay.h @@ -20,6 +20,7 @@ */ #include <linux/math.h> +#include <linux/sched.h> extern unsigned long loops_per_jiffy; @@ -58,7 +59,18 @@ void calibrate_delay(void); void __attribute__((weak)) calibration_delay_done(void); void msleep(unsigned int msecs); unsigned long msleep_interruptible(unsigned int msecs); -void usleep_range(unsigned long min, unsigned long max); +void usleep_range_state(unsigned long min, unsigned long max, + unsigned int state); + +static inline void usleep_range(unsigned long min, unsigned long max) +{ + usleep_range_state(min, max, TASK_UNINTERRUPTIBLE); +} + +static inline void usleep_idle_range(unsigned long min, unsigned long max) +{ + usleep_range_state(min, max, TASK_IDLE); +} static inline void ssleep(unsigned int seconds) { diff --git a/include/linux/device/driver.h b/include/linux/device/driver.h index a498ebcf4993..15e7c5e15d62 100644 --- a/include/linux/device/driver.h +++ b/include/linux/device/driver.h @@ -18,6 +18,7 @@ #include <linux/klist.h> #include <linux/pm.h> #include <linux/device/bus.h> +#include <linux/module.h> /** * enum probe_type - device driver probe type to try diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 42a323a73c61..7ab50076e7a6 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -433,8 +433,8 @@ struct dma_buf { /** @poll: for userspace poll support */ wait_queue_head_t poll; - /** @cb_excl: for userspace poll support */ - /** @cb_shared: for userspace poll support */ + /** @cb_in: for userspace poll support */ + /** @cb_out: for userspace poll support */ struct dma_buf_poll_cb_t { struct dma_fence_cb cb; wait_queue_head_t *poll; diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index a706b7bf51d7..1ea691753bd3 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -264,6 +264,7 @@ void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, void dma_fence_release(struct kref *kref); void dma_fence_free(struct dma_fence *fence); +void dma_fence_describe(struct dma_fence *fence, struct seq_file *seq); /** * dma_fence_put - decreases refcount of the fence diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index dbd235ab447f..eebf04325b34 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -441,32 +441,6 @@ dma_resv_excl_fence(struct dma_resv *obj) } /** - * dma_resv_get_excl_unlocked - get the reservation object's - * exclusive fence, without lock held. - * @obj: the reservation object - * - * If there is an exclusive fence, this atomically increments it's - * reference count and returns it. - * - * RETURNS - * The exclusive fence or NULL if none - */ -static inline struct dma_fence * -dma_resv_get_excl_unlocked(struct dma_resv *obj) -{ - struct dma_fence *fence; - - if (!rcu_access_pointer(obj->fence_excl)) - return NULL; - - rcu_read_lock(); - fence = dma_fence_get_rcu_safe(&obj->fence_excl); - rcu_read_unlock(); - - return fence; -} - -/** * dma_resv_shared_list - get the reservation object's shared fence list * @obj: the reservation object * @@ -490,5 +464,6 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src); long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr, unsigned long timeout); bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all); +void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq); #endif /* _LINUX_RESERVATION_H */ diff --git a/include/linux/filter.h b/include/linux/filter.h index 24b7ed2677af..7f1e88e3e2b5 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -6,6 +6,7 @@ #define __LINUX_FILTER_H__ #include <linux/atomic.h> +#include <linux/bpf.h> #include <linux/refcount.h> #include <linux/compat.h> #include <linux/skbuff.h> @@ -26,7 +27,6 @@ #include <asm/byteorder.h> #include <uapi/linux/filter.h> -#include <uapi/linux/bpf.h> struct sk_buff; struct sock; @@ -640,9 +640,6 @@ static __always_inline u32 bpf_prog_run(const struct bpf_prog *prog, const void * This uses migrate_disable/enable() explicitly to document that the * invocation of a BPF program does not require reentrancy protection * against a BPF program which is invoked from a preempting task. - * - * For non RT enabled kernels migrate_disable/enable() maps to - * preempt_disable/enable(), i.e. it disables also preemption. */ static inline u32 bpf_prog_run_pin_on_cpu(const struct bpf_prog *prog, const void *ctx) diff --git a/include/linux/fs.h b/include/linux/fs.h index 1cb616fc1105..bbf812ce89a8 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2518,7 +2518,6 @@ struct file_system_type { #define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */ #define FS_DISALLOW_NOTIFY_PERM 16 /* Disable fanotify permission events */ #define FS_ALLOW_IDMAP 32 /* FS has been updated to handle vfs idmappings. */ -#define FS_THP_SUPPORT 8192 /* Remove once all fs converted */ #define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */ int (*init_fs_context)(struct fs_context *); const struct fs_parameter_spec *parameters; diff --git a/include/linux/hid.h b/include/linux/hid.h index 9e067f937dbc..f453be385bd4 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -840,6 +840,11 @@ static inline bool hid_is_using_ll_driver(struct hid_device *hdev, return hdev->ll_driver == driver; } +static inline bool hid_is_usb(struct hid_device *hdev) +{ + return hid_is_using_ll_driver(hdev, &usb_hid_driver); +} + #define PM_HINT_FULLON 1<<5 #define PM_HINT_NORMAL 1<<1 diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 25aff0f2ed0b..39bb9b47fa9c 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -5,12 +5,11 @@ #include <linux/fs.h> #include <linux/kernel.h> #include <linux/bug.h> +#include <linux/cacheflush.h> #include <linux/mm.h> #include <linux/uaccess.h> #include <linux/hardirq.h> -#include <asm/cacheflush.h> - #include "highmem-internal.h" /** @@ -231,10 +230,10 @@ static inline void tag_clear_highpage(struct page *page) * If we pass in a base or tail page, we can zero up to PAGE_SIZE. * If we pass in a head page, we can zero up to the size of the compound page. */ -#if defined(CONFIG_HIGHMEM) && defined(CONFIG_TRANSPARENT_HUGEPAGE) +#ifdef CONFIG_HIGHMEM void zero_user_segments(struct page *page, unsigned start1, unsigned end1, unsigned start2, unsigned end2); -#else /* !HIGHMEM || !TRANSPARENT_HUGEPAGE */ +#else static inline void zero_user_segments(struct page *page, unsigned start1, unsigned end1, unsigned start2, unsigned end2) @@ -254,7 +253,7 @@ static inline void zero_user_segments(struct page *page, for (i = 0; i < compound_nr(page); i++) flush_dcache_page(page + i); } -#endif /* !HIGHMEM || !TRANSPARENT_HUGEPAGE */ +#endif static inline void zero_user_segment(struct page *page, unsigned start, unsigned end) @@ -364,4 +363,42 @@ static inline void memzero_page(struct page *page, size_t offset, size_t len) kunmap_local(addr); } +/** + * folio_zero_segments() - Zero two byte ranges in a folio. + * @folio: The folio to write to. + * @start1: The first byte to zero. + * @xend1: One more than the last byte in the first range. + * @start2: The first byte to zero in the second range. + * @xend2: One more than the last byte in the second range. + */ +static inline void folio_zero_segments(struct folio *folio, + size_t start1, size_t xend1, size_t start2, size_t xend2) +{ + zero_user_segments(&folio->page, start1, xend1, start2, xend2); +} + +/** + * folio_zero_segment() - Zero a byte range in a folio. + * @folio: The folio to write to. + * @start: The first byte to zero. + * @xend: One more than the last byte to zero. + */ +static inline void folio_zero_segment(struct folio *folio, + size_t start, size_t xend) +{ + zero_user_segments(&folio->page, start, xend, 0, 0); +} + +/** + * folio_zero_range() - Zero a byte range in a folio. + * @folio: The folio to write to. + * @start: The first byte to zero. + * @length: The number of bytes to zero. + */ +static inline void folio_zero_range(struct folio *folio, + size_t start, size_t length) +{ + zero_user_segments(&folio->page, start, start + length, 0, 0); +} + #endif /* _LINUX_HIGHMEM_H */ diff --git a/include/linux/host1x.h b/include/linux/host1x.h index 7bccf589aba7..e8dc5bc41f79 100644 --- a/include/linux/host1x.h +++ b/include/linux/host1x.h @@ -7,6 +7,8 @@ #define __LINUX_HOST1X_H #include <linux/device.h> +#include <linux/dma-direction.h> +#include <linux/spinlock.h> #include <linux/types.h> enum host1x_class { @@ -15,6 +17,8 @@ enum host1x_class { HOST1X_CLASS_GR2D_SB = 0x52, HOST1X_CLASS_VIC = 0x5D, HOST1X_CLASS_GR3D = 0x60, + HOST1X_CLASS_NVDEC = 0xF0, + HOST1X_CLASS_NVDEC1 = 0xF5, }; struct host1x; @@ -24,6 +28,28 @@ struct iommu_group; u64 host1x_get_dma_mask(struct host1x *host1x); /** + * struct host1x_bo_cache - host1x buffer object cache + * @mappings: list of mappings + * @lock: synchronizes accesses to the list of mappings + */ +struct host1x_bo_cache { + struct list_head mappings; + struct mutex lock; +}; + +static inline void host1x_bo_cache_init(struct host1x_bo_cache *cache) +{ + INIT_LIST_HEAD(&cache->mappings); + mutex_init(&cache->lock); +} + +static inline void host1x_bo_cache_destroy(struct host1x_bo_cache *cache) +{ + /* XXX warn if not empty? */ + mutex_destroy(&cache->lock); +} + +/** * struct host1x_client_ops - host1x client operations * @early_init: host1x client early initialization code * @init: host1x client initialization code @@ -73,6 +99,8 @@ struct host1x_client { struct host1x_client *parent; unsigned int usecount; struct mutex lock; + + struct host1x_bo_cache cache; }; /* @@ -82,23 +110,48 @@ struct host1x_client { struct host1x_bo; struct sg_table; +struct host1x_bo_mapping { + struct kref ref; + struct dma_buf_attachment *attach; + enum dma_data_direction direction; + struct list_head list; + struct host1x_bo *bo; + struct sg_table *sgt; + unsigned int chunks; + struct device *dev; + dma_addr_t phys; + size_t size; + + struct host1x_bo_cache *cache; + struct list_head entry; +}; + +static inline struct host1x_bo_mapping *to_host1x_bo_mapping(struct kref *ref) +{ + return container_of(ref, struct host1x_bo_mapping, ref); +} + struct host1x_bo_ops { struct host1x_bo *(*get)(struct host1x_bo *bo); void (*put)(struct host1x_bo *bo); - struct sg_table *(*pin)(struct device *dev, struct host1x_bo *bo, - dma_addr_t *phys); - void (*unpin)(struct device *dev, struct sg_table *sgt); + struct host1x_bo_mapping *(*pin)(struct device *dev, struct host1x_bo *bo, + enum dma_data_direction dir); + void (*unpin)(struct host1x_bo_mapping *map); void *(*mmap)(struct host1x_bo *bo); void (*munmap)(struct host1x_bo *bo, void *addr); }; struct host1x_bo { const struct host1x_bo_ops *ops; + struct list_head mappings; + spinlock_t lock; }; static inline void host1x_bo_init(struct host1x_bo *bo, const struct host1x_bo_ops *ops) { + INIT_LIST_HEAD(&bo->mappings); + spin_lock_init(&bo->lock); bo->ops = ops; } @@ -112,18 +165,10 @@ static inline void host1x_bo_put(struct host1x_bo *bo) bo->ops->put(bo); } -static inline struct sg_table *host1x_bo_pin(struct device *dev, - struct host1x_bo *bo, - dma_addr_t *phys) -{ - return bo->ops->pin(dev, bo, phys); -} - -static inline void host1x_bo_unpin(struct device *dev, struct host1x_bo *bo, - struct sg_table *sgt) -{ - bo->ops->unpin(dev, sgt); -} +struct host1x_bo_mapping *host1x_bo_pin(struct device *dev, struct host1x_bo *bo, + enum dma_data_direction dir, + struct host1x_bo_cache *cache); +void host1x_bo_unpin(struct host1x_bo_mapping *map); static inline void *host1x_bo_mmap(struct host1x_bo *bo) { @@ -181,6 +226,7 @@ struct host1x_job; struct host1x_channel *host1x_channel_request(struct host1x_client *client); struct host1x_channel *host1x_channel_get(struct host1x_channel *channel); +void host1x_channel_stop(struct host1x_channel *channel); void host1x_channel_put(struct host1x_channel *channel); int host1x_job_submit(struct host1x_job *job); diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h index c137396129db..ba025ae27882 100644 --- a/include/linux/hugetlb_cgroup.h +++ b/include/linux/hugetlb_cgroup.h @@ -128,6 +128,13 @@ static inline void resv_map_dup_hugetlb_cgroup_uncharge_info( css_get(resv_map->css); } +static inline void resv_map_put_hugetlb_cgroup_uncharge_info( + struct resv_map *resv_map) +{ + if (resv_map->css) + css_put(resv_map->css); +} + extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, struct hugetlb_cgroup **ptr); extern int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages, @@ -211,6 +218,11 @@ static inline void resv_map_dup_hugetlb_cgroup_uncharge_info( { } +static inline void resv_map_put_hugetlb_cgroup_uncharge_info( + struct resv_map *resv_map) +{ +} + static inline int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, struct hugetlb_cgroup **ptr) { diff --git a/include/linux/intel-ish-client-if.h b/include/linux/intel-ish-client-if.h index aee8ff4739b1..f45f13304add 100644 --- a/include/linux/intel-ish-client-if.h +++ b/include/linux/intel-ish-client-if.h @@ -9,7 +9,7 @@ #define _INTEL_ISH_CLIENT_IF_H_ #include <linux/device.h> -#include <linux/uuid.h> +#include <linux/mod_devicetable.h> struct ishtp_cl_device; struct ishtp_device; @@ -40,7 +40,7 @@ enum cl_state { struct ishtp_cl_driver { struct device_driver driver; const char *name; - const guid_t *guid; + const struct ishtp_device_id *id; int (*probe)(struct ishtp_cl_device *dev); void (*remove)(struct ishtp_cl_device *dev); int (*reset)(struct ishtp_cl_device *dev); diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h index 05e22770af51..b75395ec8d52 100644 --- a/include/linux/ipc_namespace.h +++ b/include/linux/ipc_namespace.h @@ -131,6 +131,16 @@ static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns) return ns; } +static inline struct ipc_namespace *get_ipc_ns_not_zero(struct ipc_namespace *ns) +{ + if (ns) { + if (refcount_inc_not_zero(&ns->ns.count)) + return ns; + } + + return NULL; +} + extern void put_ipc_ns(struct ipc_namespace *ns); #else static inline struct ipc_namespace *copy_ipcs(unsigned long flags, @@ -147,6 +157,11 @@ static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns) return ns; } +static inline struct ipc_namespace *get_ipc_ns_not_zero(struct ipc_namespace *ns) +{ + return ns; +} + static inline void put_ipc_ns(struct ipc_namespace *ns) { } diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index e974caf39d3e..8c8f7a4d93af 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -153,6 +153,8 @@ struct kretprobe { struct kretprobe_holder *rph; }; +#define KRETPROBE_MAX_DATA_SIZE 4096 + struct kretprobe_instance { union { struct freelist_node freelist; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 9e0667e3723e..c310648cc8f1 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -874,7 +874,7 @@ void kvm_release_pfn_dirty(kvm_pfn_t pfn); void kvm_set_pfn_dirty(kvm_pfn_t pfn); void kvm_set_pfn_accessed(kvm_pfn_t pfn); -void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache); +void kvm_release_pfn(kvm_pfn_t pfn, bool dirty); int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, int len); int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); @@ -950,12 +950,8 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn); kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map); -int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map, - struct gfn_to_pfn_cache *cache, bool atomic); struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn); void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty); -int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, - struct gfn_to_pfn_cache *cache, bool dirty, bool atomic); unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn); unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable); int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset, diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index 2237abb93ccd..234eab059839 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h @@ -53,13 +53,6 @@ struct gfn_to_hva_cache { struct kvm_memory_slot *memslot; }; -struct gfn_to_pfn_cache { - u64 generation; - gfn_t gfn; - kvm_pfn_t pfn; - bool dirty; -}; - #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE /* * Memory caches are used to preallocate memory ahead of various MMU flows, diff --git a/include/linux/mhi.h b/include/linux/mhi.h index 723985879035..a5cc4cdf9cc8 100644 --- a/include/linux/mhi.h +++ b/include/linux/mhi.h @@ -664,6 +664,19 @@ int mhi_pm_suspend(struct mhi_controller *mhi_cntrl); int mhi_pm_resume(struct mhi_controller *mhi_cntrl); /** + * mhi_pm_resume_force - Force resume MHI from suspended state + * @mhi_cntrl: MHI controller + * + * Resume the device irrespective of its MHI state. As per the MHI spec, devices + * has to be in M3 state during resume. But some devices seem to be in a + * different MHI state other than M3 but they continue working fine if allowed. + * This API is intented to be used for such devices. + * + * Return: 0 if the resume succeeds, a negative error code otherwise + */ +int mhi_pm_resume_force(struct mhi_controller *mhi_cntrl); + +/** * mhi_download_rddm_image - Download ramdump image from device for * debugging purpose. * @mhi_cntrl: MHI controller diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h index 97afcea39a7b..8b18fe9771f9 100644 --- a/include/linux/mlx5/eswitch.h +++ b/include/linux/mlx5/eswitch.h @@ -145,13 +145,13 @@ u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw, GENMASK(31 - ESW_TUN_ID_BITS - ESW_RESERVED_BITS, \ ESW_TUN_OPTS_OFFSET + 1) -u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev); +u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev); u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev); struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitch *esw); #else /* CONFIG_MLX5_ESWITCH */ -static inline u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev) +static inline u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev) { return MLX5_ESWITCH_NONE; } diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 3636df90899a..fbaab440a484 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -9698,7 +9698,10 @@ struct mlx5_ifc_mcam_access_reg_bits { u8 regs_84_to_68[0x11]; u8 tracer_registers[0x4]; - u8 regs_63_to_32[0x20]; + u8 regs_63_to_46[0x12]; + u8 mrtc[0x1]; + u8 regs_44_to_32[0xd]; + u8 regs_31_to_0[0x20]; }; diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index bb8c6f5f19bc..c3a6e6209600 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -105,7 +105,18 @@ struct page { struct page_pool *pp; unsigned long _pp_mapping_pad; unsigned long dma_addr; - atomic_long_t pp_frag_count; + union { + /** + * dma_addr_upper: might require a 64-bit + * value on 32-bit architectures. + */ + unsigned long dma_addr_upper; + /** + * For frag page support, not supported in + * 32-bit architectures with 64-bit DMA. + */ + atomic_long_t pp_frag_count; + }; }; struct { /* slab, slob and slub */ union { diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index ae2e75d15b21..4bb71979a8fd 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -895,4 +895,18 @@ struct dfl_device_id { kernel_ulong_t driver_data; }; +/* ISHTP (Integrated Sensor Hub Transport Protocol) */ + +#define ISHTP_MODULE_PREFIX "ishtp:" + +/** + * struct ishtp_device_id - ISHTP device identifier + * @guid: GUID of the device. + * @driver_data: pointer to driver specific data + */ +struct ishtp_device_id { + guid_t guid; + kernel_ulong_t driver_data; +}; + #endif /* LINUX_MOD_DEVICETABLE_H */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 3ec42495a43a..be5cb3360b94 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -4404,7 +4404,8 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) { spin_lock(&txq->_xmit_lock); - txq->xmit_lock_owner = cpu; + /* Pairs with READ_ONCE() in __dev_queue_xmit() */ + WRITE_ONCE(txq->xmit_lock_owner, cpu); } static inline bool __netif_tx_acquire(struct netdev_queue *txq) @@ -4421,26 +4422,32 @@ static inline void __netif_tx_release(struct netdev_queue *txq) static inline void __netif_tx_lock_bh(struct netdev_queue *txq) { spin_lock_bh(&txq->_xmit_lock); - txq->xmit_lock_owner = smp_processor_id(); + /* Pairs with READ_ONCE() in __dev_queue_xmit() */ + WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id()); } static inline bool __netif_tx_trylock(struct netdev_queue *txq) { bool ok = spin_trylock(&txq->_xmit_lock); - if (likely(ok)) - txq->xmit_lock_owner = smp_processor_id(); + + if (likely(ok)) { + /* Pairs with READ_ONCE() in __dev_queue_xmit() */ + WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id()); + } return ok; } static inline void __netif_tx_unlock(struct netdev_queue *txq) { - txq->xmit_lock_owner = -1; + /* Pairs with READ_ONCE() in __dev_queue_xmit() */ + WRITE_ONCE(txq->xmit_lock_owner, -1); spin_unlock(&txq->_xmit_lock); } static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) { - txq->xmit_lock_owner = -1; + /* Pairs with READ_ONCE() in __dev_queue_xmit() */ + WRITE_ONCE(txq->xmit_lock_owner, -1); spin_unlock_bh(&txq->_xmit_lock); } diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 52ec4b5e5615..b5f14d581113 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -686,13 +686,13 @@ static inline bool test_set_page_writeback(struct page *page) __PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY) -/* Whether there are one or multiple pages in a folio */ -static inline bool folio_test_single(struct folio *folio) -{ - return !folio_test_head(folio); -} - -static inline bool folio_test_multi(struct folio *folio) +/** + * folio_test_large() - Does this folio contain more than one page? + * @folio: The folio to test. + * + * Return: True if the folio is larger than one page. + */ +static inline bool folio_test_large(struct folio *folio) { return folio_test_head(folio); } diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 1a0c646eb6ff..605246452305 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -84,7 +84,7 @@ enum mapping_flags { AS_EXITING = 4, /* final truncate in progress */ /* writeback related tags are not used */ AS_NO_WRITEBACK_TAGS = 5, - AS_THP_SUPPORT = 6, /* THPs supported */ + AS_LARGE_FOLIO_SUPPORT = 6, }; /** @@ -176,9 +176,25 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) m->gfp_mask = mask; } -static inline bool mapping_thp_support(struct address_space *mapping) +/** + * mapping_set_large_folios() - Indicate the file supports large folios. + * @mapping: The file. + * + * The filesystem should call this function in its inode constructor to + * indicate that the VFS can use large folios to cache the contents of + * the file. + * + * Context: This should not be called while the inode is active as it + * is non-atomic. + */ +static inline void mapping_set_large_folios(struct address_space *mapping) +{ + __set_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags); +} + +static inline bool mapping_large_folio_support(struct address_space *mapping) { - return test_bit(AS_THP_SUPPORT, &mapping->flags); + return test_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags); } static inline int filemap_nr_thps(struct address_space *mapping) @@ -193,7 +209,7 @@ static inline int filemap_nr_thps(struct address_space *mapping) static inline void filemap_nr_thps_inc(struct address_space *mapping) { #ifdef CONFIG_READ_ONLY_THP_FOR_FS - if (!mapping_thp_support(mapping)) + if (!mapping_large_folio_support(mapping)) atomic_inc(&mapping->nr_thps); #else WARN_ON_ONCE(1); @@ -203,7 +219,7 @@ static inline void filemap_nr_thps_inc(struct address_space *mapping) static inline void filemap_nr_thps_dec(struct address_space *mapping) { #ifdef CONFIG_READ_ONLY_THP_FOR_FS - if (!mapping_thp_support(mapping)) + if (!mapping_large_folio_support(mapping)) atomic_dec(&mapping->nr_thps); #else WARN_ON_ONCE(1); diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index b31d3f3312ce..d73a1c08c3e3 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h @@ -51,9 +51,9 @@ #define _LINUX_PERCPU_REFCOUNT_H #include <linux/atomic.h> -#include <linux/kernel.h> #include <linux/percpu.h> #include <linux/rcupdate.h> +#include <linux/types.h> #include <linux/gfp.h> struct percpu_ref; diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 98a9371133f8..ae4004e7957e 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -6,7 +6,6 @@ #include <linux/preempt.h> #include <linux/smp.h> #include <linux/cpumask.h> -#include <linux/printk.h> #include <linux/pfn.h> #include <linux/init.h> diff --git a/include/linux/phy.h b/include/linux/phy.h index 96e43fbb2dd8..cbf03a5f9cf5 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -538,11 +538,12 @@ struct macsec_ops; * @mac_managed_pm: Set true if MAC driver takes of suspending/resuming PHY * @state: State of the PHY for management purposes * @dev_flags: Device-specific flags used by the PHY driver. - * Bits [15:0] are free to use by the PHY driver to communicate - * driver specific behavior. - * Bits [23:16] are currently reserved for future use. - * Bits [31:24] are reserved for defining generic - * PHY driver behavior. + * + * - Bits [15:0] are free to use by the PHY driver to communicate + * driver specific behavior. + * - Bits [23:16] are currently reserved for future use. + * - Bits [31:24] are reserved for defining generic + * PHY driver behavior. * @irq: IRQ number of the PHY's interrupt (-1 if none) * @phy_timer: The timer for handling the state machine * @phylink: Pointer to phylink instance for this PHY diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 222da43b7096..eddd66d426ca 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -129,7 +129,7 @@ static inline bool pm_runtime_suspended(struct device *dev) * pm_runtime_active - Check whether or not a device is runtime-active. * @dev: Target device. * - * Return %true if runtime PM is enabled for @dev and its runtime PM status is + * Return %true if runtime PM is disabled for @dev or its runtime PM status is * %RPM_ACTIVE, or %false otherwise. * * Note that the return value of this function can only be trusted if it is diff --git a/include/linux/printk.h b/include/linux/printk.h index 85b656f82d75..9497f6b98339 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -198,6 +198,7 @@ void dump_stack_print_info(const char *log_lvl); void show_regs_print_info(const char *log_lvl); extern asmlinkage void dump_stack_lvl(const char *log_lvl) __cold; extern asmlinkage void dump_stack(void) __cold; +void printk_trigger_flush(void); #else static inline __printf(1, 0) int vprintk(const char *s, va_list args) @@ -274,6 +275,9 @@ static inline void dump_stack_lvl(const char *log_lvl) static inline void dump_stack(void) { } +static inline void printk_trigger_flush(void) +{ +} #endif #ifdef CONFIG_SMP diff --git a/include/linux/ptp_classify.h b/include/linux/ptp_classify.h index ae04968a3a47..9afd34a2d36c 100644 --- a/include/linux/ptp_classify.h +++ b/include/linux/ptp_classify.h @@ -37,6 +37,7 @@ #define PTP_MSGTYPE_PDELAY_RESP 0x3 #define PTP_EV_PORT 319 +#define PTP_GEN_PORT 320 #define PTP_GEN_BIT 0x08 /* indicates general message, if set in message type */ #define OFF_PTP_SOURCE_UUID 22 /* PTPv1 only */ diff --git a/include/linux/pwm.h b/include/linux/pwm.h index e6dac95e4960..9771a0761a40 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -414,6 +414,8 @@ struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip, struct pwm_device *of_pwm_xlate_with_flags(struct pwm_chip *pc, const struct of_phandle_args *args); +struct pwm_device *of_pwm_single_xlate(struct pwm_chip *pc, + const struct of_phandle_args *args); struct pwm_device *pwm_get(struct device *dev, const char *con_id); struct pwm_device *of_pwm_get(struct device *dev, struct device_node *np, diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index bd7a73db2e66..54cf566616ae 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -499,7 +499,8 @@ struct regulator_irq_data { * best to shut-down regulator(s) or reboot the SOC if error * handling is repeatedly failing. If fatal_cnt is given the IRQ * handling is aborted if it fails for fatal_cnt times and die() - * callback (if populated) or BUG() is called to try to prevent + * callback (if populated) is called. If die() is not populated + * poweroff for the system is attempted in order to prevent any * further damage. * @reread_ms: The time which is waited before attempting to re-read status * at the worker if IC reading fails. Immediate re-read is done @@ -516,11 +517,12 @@ struct regulator_irq_data { * @data: Driver private data pointer which will be passed as such to * the renable, map_event and die callbacks in regulator_irq_data. * @die: Protection callback. If IC status reading or recovery actions - * fail fatal_cnt times this callback or BUG() is called. This - * callback should implement a final protection attempt like - * disabling the regulator. If protection succeeded this may - * return 0. If anything else is returned the core assumes final - * protection failed and calls BUG() as a last resort. + * fail fatal_cnt times this callback is called or system is + * powered off. This callback should implement a final protection + * attempt like disabling the regulator. If protection succeeded + * die() may return 0. If anything else is returned the core + * assumes final protection failed and attempts to perform a + * poweroff as a last resort. * @map_event: Driver callback to map IRQ status into regulator devices with * events / errors. NOTE: callback MUST initialize both the * errors and notifs for all rdevs which it signals having diff --git a/include/linux/sched/cputime.h b/include/linux/sched/cputime.h index 6c9f19a33865..ce3c58286062 100644 --- a/include/linux/sched/cputime.h +++ b/include/linux/sched/cputime.h @@ -18,15 +18,16 @@ #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN -extern void task_cputime(struct task_struct *t, +extern bool task_cputime(struct task_struct *t, u64 *utime, u64 *stime); extern u64 task_gtime(struct task_struct *t); #else -static inline void task_cputime(struct task_struct *t, +static inline bool task_cputime(struct task_struct *t, u64 *utime, u64 *stime) { *utime = t->utime; *stime = t->stime; + return false; } static inline u64 task_gtime(struct task_struct *t) diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index 23505394ef70..33a50642cf41 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h @@ -352,6 +352,7 @@ extern __must_check bool do_notify_parent(struct task_struct *, int); extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); extern void force_sig(int); extern void force_fatal_sig(int); +extern void force_exit_sig(int); extern int send_sig(int, struct task_struct *, int); extern int zap_other_threads(struct task_struct *p); extern struct sigqueue *sigqueue_alloc(void); diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index ba88a6987400..058d7f371e25 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -158,7 +158,7 @@ static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t) * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring * subscriptions and synchronises with wait4(). Also used in procfs. Also * pins the final release of task.io_context. Also protects ->cpuset and - * ->cgroup.subsys[]. And ->vfork_done. + * ->cgroup.subsys[]. And ->vfork_done. And ->sysvshm.shm_clist. * * Nests both inside and outside of read_lock(&tasklist_lock). * It must not be nested with write_lock_irq(&tasklist_lock), diff --git a/include/linux/sdb.h b/include/linux/sdb.h deleted file mode 100644 index a2404a2bbd10..000000000000 --- a/include/linux/sdb.h +++ /dev/null @@ -1,160 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * This is the official version 1.1 of sdb.h - */ -#ifndef __SDB_H__ -#define __SDB_H__ -#ifdef __KERNEL__ -#include <linux/types.h> -#else -#include <stdint.h> -#endif - -/* - * All structures are 64 bytes long and are expected - * to live in an array, one for each interconnect. - * Most fields of the structures are shared among the - * various types, and most-specific fields are at the - * beginning (for alignment reasons, and to keep the - * magic number at the head of the interconnect record - */ - -/* Product, 40 bytes at offset 24, 8-byte aligned - * - * device_id is vendor-assigned; version is device-specific, - * date is hex (e.g 0x20120501), name is UTF-8, blank-filled - * and not terminated with a 0 byte. - */ -struct sdb_product { - uint64_t vendor_id; /* 0x18..0x1f */ - uint32_t device_id; /* 0x20..0x23 */ - uint32_t version; /* 0x24..0x27 */ - uint32_t date; /* 0x28..0x2b */ - uint8_t name[19]; /* 0x2c..0x3e */ - uint8_t record_type; /* 0x3f */ -}; - -/* - * Component, 56 bytes at offset 8, 8-byte aligned - * - * The address range is first to last, inclusive - * (for example 0x100000 - 0x10ffff) - */ -struct sdb_component { - uint64_t addr_first; /* 0x08..0x0f */ - uint64_t addr_last; /* 0x10..0x17 */ - struct sdb_product product; /* 0x18..0x3f */ -}; - -/* Type of the SDB record */ -enum sdb_record_type { - sdb_type_interconnect = 0x00, - sdb_type_device = 0x01, - sdb_type_bridge = 0x02, - sdb_type_integration = 0x80, - sdb_type_repo_url = 0x81, - sdb_type_synthesis = 0x82, - sdb_type_empty = 0xFF, -}; - -/* Type 0: interconnect (first of the array) - * - * sdb_records is the length of the table including this first - * record, version is 1. The bus type is enumerated later. - */ -#define SDB_MAGIC 0x5344422d /* "SDB-" */ -struct sdb_interconnect { - uint32_t sdb_magic; /* 0x00-0x03 */ - uint16_t sdb_records; /* 0x04-0x05 */ - uint8_t sdb_version; /* 0x06 */ - uint8_t sdb_bus_type; /* 0x07 */ - struct sdb_component sdb_component; /* 0x08-0x3f */ -}; - -/* Type 1: device - * - * class is 0 for "custom device", other values are - * to be standardized; ABI version is for the driver, - * bus-specific bits are defined by each bus (see below) - */ -struct sdb_device { - uint16_t abi_class; /* 0x00-0x01 */ - uint8_t abi_ver_major; /* 0x02 */ - uint8_t abi_ver_minor; /* 0x03 */ - uint32_t bus_specific; /* 0x04-0x07 */ - struct sdb_component sdb_component; /* 0x08-0x3f */ -}; - -/* Type 2: bridge - * - * child is the address of the nested SDB table - */ -struct sdb_bridge { - uint64_t sdb_child; /* 0x00-0x07 */ - struct sdb_component sdb_component; /* 0x08-0x3f */ -}; - -/* Type 0x80: integration - * - * all types with bit 7 set are meta-information, so - * software can ignore the types it doesn't know. Here we - * just provide product information for an aggregate device - */ -struct sdb_integration { - uint8_t reserved[24]; /* 0x00-0x17 */ - struct sdb_product product; /* 0x08-0x3f */ -}; - -/* Type 0x81: Top module repository url - * - * again, an informative field that software can ignore - */ -struct sdb_repo_url { - uint8_t repo_url[63]; /* 0x00-0x3e */ - uint8_t record_type; /* 0x3f */ -}; - -/* Type 0x82: Synthesis tool information - * - * this informative record - */ -struct sdb_synthesis { - uint8_t syn_name[16]; /* 0x00-0x0f */ - uint8_t commit_id[16]; /* 0x10-0x1f */ - uint8_t tool_name[8]; /* 0x20-0x27 */ - uint32_t tool_version; /* 0x28-0x2b */ - uint32_t date; /* 0x2c-0x2f */ - uint8_t user_name[15]; /* 0x30-0x3e */ - uint8_t record_type; /* 0x3f */ -}; - -/* Type 0xff: empty - * - * this allows keeping empty slots during development, - * so they can be filled later with minimal efforts and - * no misleading description is ever shipped -- hopefully. - * It can also be used to pad a table to a desired length. - */ -struct sdb_empty { - uint8_t reserved[63]; /* 0x00-0x3e */ - uint8_t record_type; /* 0x3f */ -}; - -/* The type of bus, for bus-specific flags */ -enum sdb_bus_type { - sdb_wishbone = 0x00, - sdb_data = 0x01, -}; - -#define SDB_WB_WIDTH_MASK 0x0f -#define SDB_WB_ACCESS8 0x01 -#define SDB_WB_ACCESS16 0x02 -#define SDB_WB_ACCESS32 0x04 -#define SDB_WB_ACCESS64 0x08 -#define SDB_WB_LITTLE_ENDIAN 0x80 - -#define SDB_DATA_READ 0x04 -#define SDB_DATA_WRITE 0x02 -#define SDB_DATA_EXEC 0x01 - -#endif /* __SDB_H__ */ diff --git a/include/linux/siphash.h b/include/linux/siphash.h index bf21591a9e5e..0cda61855d90 100644 --- a/include/linux/siphash.h +++ b/include/linux/siphash.h @@ -27,9 +27,7 @@ static inline bool siphash_key_is_zero(const siphash_key_t *key) } u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key); -#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key); -#endif u64 siphash_1u64(const u64 a, const siphash_key_t *key); u64 siphash_2u64(const u64 a, const u64 b, const siphash_key_t *key); @@ -82,10 +80,9 @@ static inline u64 ___siphash_aligned(const __le64 *data, size_t len, static inline u64 siphash(const void *data, size_t len, const siphash_key_t *key) { -#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS - if (!IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT)) + if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || + !IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT)) return __siphash_unaligned(data, len, key); -#endif return ___siphash_aligned(data, len, key); } @@ -96,10 +93,8 @@ typedef struct { u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key); -#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS u32 __hsiphash_unaligned(const void *data, size_t len, const hsiphash_key_t *key); -#endif u32 hsiphash_1u32(const u32 a, const hsiphash_key_t *key); u32 hsiphash_2u32(const u32 a, const u32 b, const hsiphash_key_t *key); @@ -135,10 +130,9 @@ static inline u32 ___hsiphash_aligned(const __le32 *data, size_t len, static inline u32 hsiphash(const void *data, size_t len, const hsiphash_key_t *key) { -#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS - if (!IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT)) + if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || + !IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT)) return __hsiphash_unaligned(data, len, key); -#endif return ___hsiphash_aligned(data, len, key); } diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 686a666d073d..c8cb7e697d47 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -4226,7 +4226,7 @@ static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr, return; } - if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) { + if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) { __skb_checksum_complete(skb); skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data); } diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 50453b287615..2d167ac3452c 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -673,7 +673,7 @@ struct trace_event_file { #define PERF_MAX_TRACE_SIZE 8192 -#define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ +#define MAX_FILTER_STR_VAL 256U /* Should handle KSYM_SYMBOL_LEN */ enum event_trigger_type { ETT_NONE = (0), diff --git a/include/linux/virtio.h b/include/linux/virtio.h index 44d0e09da2d9..41edbc01ffa4 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -152,7 +152,6 @@ size_t virtio_max_dma_size(struct virtio_device *vdev); * @feature_table_size: number of entries in the feature table array. * @feature_table_legacy: same as feature_table but when working in legacy mode. * @feature_table_size_legacy: number of entries in feature table legacy array. - * @suppress_used_validation: set to not have core validate used length * @probe: the function to call when a device is found. Returns 0 or -errno. * @scan: optional function to call after successful probe; intended * for virtio-scsi to invoke a scan. @@ -169,7 +168,6 @@ struct virtio_driver { unsigned int feature_table_size; const unsigned int *feature_table_legacy; unsigned int feature_table_size_legacy; - bool suppress_used_validation; int (*validate)(struct virtio_device *dev); int (*probe)(struct virtio_device *dev); void (*scan)(struct virtio_device *dev); diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h index b465f8f3e554..04e87f4b9417 100644 --- a/include/linux/virtio_net.h +++ b/include/linux/virtio_net.h @@ -120,10 +120,15 @@ retry: if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { u16 gso_size = __virtio16_to_cpu(little_endian, hdr->gso_size); + unsigned int nh_off = p_off; struct skb_shared_info *shinfo = skb_shinfo(skb); + /* UFO may not include transport header in gso_size. */ + if (gso_type & SKB_GSO_UDP) + nh_off -= thlen; + /* Too small packets are not really GSO ones. */ - if (skb->len - p_off > gso_size) { + if (skb->len - nh_off > gso_size) { shinfo->gso_size = gso_size; shinfo->gso_type = gso_type; diff --git a/include/linux/wait.h b/include/linux/wait.h index 2d0df57c9902..851e07da2583 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -217,6 +217,7 @@ void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key); void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr); void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode); +void __wake_up_pollfree(struct wait_queue_head *wq_head); #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL) #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL) @@ -245,6 +246,31 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode); #define wake_up_interruptible_sync_poll_locked(x, m) \ __wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m)) +/** + * wake_up_pollfree - signal that a polled waitqueue is going away + * @wq_head: the wait queue head + * + * In the very rare cases where a ->poll() implementation uses a waitqueue whose + * lifetime is tied to a task rather than to the 'struct file' being polled, + * this function must be called before the waitqueue is freed so that + * non-blocking polls (e.g. epoll) are notified that the queue is going away. + * + * The caller must also RCU-delay the freeing of the wait_queue_head, e.g. via + * an explicit synchronize_rcu() or call_rcu(), or via SLAB_TYPESAFE_BY_RCU. + */ +static inline void wake_up_pollfree(struct wait_queue_head *wq_head) +{ + /* + * For performance reasons, we don't always take the queue lock here. + * Therefore, we might race with someone removing the last entry from + * the queue, and proceed while they still hold the queue lock. + * However, rcu_read_lock() is required to be held in such cases, so we + * can safely proceed with an RCU-delayed free. + */ + if (waitqueue_active(wq_head)) + __wake_up_pollfree(wq_head); +} + #define ___wait_cond_timeout(condition) \ ({ \ bool __cond = (condition); \ diff --git a/include/net/bond_alb.h b/include/net/bond_alb.h index f6af76c87a6c..191c36afa1f4 100644 --- a/include/net/bond_alb.h +++ b/include/net/bond_alb.h @@ -126,7 +126,7 @@ struct tlb_slave_info { struct alb_bond_info { struct tlb_client_info *tx_hashtbl; /* Dynamically allocated */ u32 unbalanced_load; - int tx_rebalance_counter; + atomic_t tx_rebalance_counter; int lp_counter; /* -------- rlb parameters -------- */ int rlb_enabled; diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h index 4202c609bb0b..c4898fcbf923 100644 --- a/include/net/busy_poll.h +++ b/include/net/busy_poll.h @@ -133,6 +133,19 @@ static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb) if (unlikely(READ_ONCE(sk->sk_napi_id) != skb->napi_id)) WRITE_ONCE(sk->sk_napi_id, skb->napi_id); #endif + sk_rx_queue_update(sk, skb); +} + +/* Variant of sk_mark_napi_id() for passive flow setup, + * as sk->sk_napi_id and sk->sk_rx_queue_mapping content + * needs to be set. + */ +static inline void sk_mark_napi_id_set(struct sock *sk, + const struct sk_buff *skb) +{ +#ifdef CONFIG_NET_RX_BUSY_POLL + WRITE_ONCE(sk->sk_napi_id, skb->napi_id); +#endif sk_rx_queue_set(sk, skb); } diff --git a/include/net/dst_cache.h b/include/net/dst_cache.h index 67634675e919..df6622a5fe98 100644 --- a/include/net/dst_cache.h +++ b/include/net/dst_cache.h @@ -80,6 +80,17 @@ static inline void dst_cache_reset(struct dst_cache *dst_cache) } /** + * dst_cache_reset_now - invalidate the cache contents immediately + * @dst_cache: the cache + * + * The caller must be sure there are no concurrent users, as this frees + * all dst_cache users immediately, rather than waiting for the next + * per-cpu usage like dst_cache_reset does. Most callers should use the + * higher speed lazily-freed dst_cache_reset function instead. + */ +void dst_cache_reset_now(struct dst_cache *dst_cache); + +/** * dst_cache_init - initialize the cache, allocating the required storage * @dst_cache: the cache * @gfp: allocation flags diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h index 4b10676c69d1..bd07484ab9dd 100644 --- a/include/net/fib_rules.h +++ b/include/net/fib_rules.h @@ -69,7 +69,7 @@ struct fib_rules_ops { int (*action)(struct fib_rule *, struct flowi *, int, struct fib_lookup_arg *); - bool (*suppress)(struct fib_rule *, + bool (*suppress)(struct fib_rule *, int, struct fib_lookup_arg *); int (*match)(struct fib_rule *, struct flowi *, int); @@ -218,7 +218,9 @@ INDIRECT_CALLABLE_DECLARE(int fib4_rule_action(struct fib_rule *rule, struct fib_lookup_arg *arg)); INDIRECT_CALLABLE_DECLARE(bool fib6_rule_suppress(struct fib_rule *rule, + int flags, struct fib_lookup_arg *arg)); INDIRECT_CALLABLE_DECLARE(bool fib4_rule_suppress(struct fib_rule *rule, + int flags, struct fib_lookup_arg *arg)); #endif diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index c412dde4d67d..83b8070d1cc9 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -485,6 +485,7 @@ int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh, struct fib6_config *cfg, gfp_t gfp_flags, struct netlink_ext_ack *extack); void fib6_nh_release(struct fib6_nh *fib6_nh); +void fib6_nh_release_dsts(struct fib6_nh *fib6_nh); int call_fib6_entry_notifiers(struct net *net, enum fib_event_type event_type, diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index ab5348e57db1..3417ba2d27ad 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -438,7 +438,7 @@ int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, #ifdef CONFIG_IP_ROUTE_CLASSID static inline int fib_num_tclassid_users(struct net *net) { - return net->ipv4.fib_num_tclassid_users; + return atomic_read(&net->ipv4.fib_num_tclassid_users); } #else static inline int fib_num_tclassid_users(struct net *net) diff --git a/include/net/ipv6_stubs.h b/include/net/ipv6_stubs.h index afbce90c4480..45e0339be6fa 100644 --- a/include/net/ipv6_stubs.h +++ b/include/net/ipv6_stubs.h @@ -47,6 +47,7 @@ struct ipv6_stub { struct fib6_config *cfg, gfp_t gfp_flags, struct netlink_ext_ack *extack); void (*fib6_nh_release)(struct fib6_nh *fib6_nh); + void (*fib6_nh_release_dsts)(struct fib6_nh *fib6_nh); void (*fib6_update_sernum)(struct net *net, struct fib6_info *rt); int (*ip6_del_rt)(struct net *net, struct fib6_info *rt, bool skip_notify); void (*fib6_rt_update)(struct net *net, struct fib6_info *rt, diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index cc663c68ddc4..d24b0a34c8f0 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -276,14 +276,14 @@ static inline bool nf_is_loopback_packet(const struct sk_buff *skb) /* jiffies until ct expires, 0 if already expired */ static inline unsigned long nf_ct_expires(const struct nf_conn *ct) { - s32 timeout = ct->timeout - nfct_time_stamp; + s32 timeout = READ_ONCE(ct->timeout) - nfct_time_stamp; return timeout > 0 ? timeout : 0; } static inline bool nf_ct_is_expired(const struct nf_conn *ct) { - return (__s32)(ct->timeout - nfct_time_stamp) <= 0; + return (__s32)(READ_ONCE(ct->timeout) - nfct_time_stamp) <= 0; } /* use after obtaining a reference count */ @@ -302,7 +302,7 @@ static inline bool nf_ct_should_gc(const struct nf_conn *ct) static inline void nf_ct_offload_timeout(struct nf_conn *ct) { if (nf_ct_expires(ct) < NF_CT_DAY / 2) - ct->timeout = nfct_time_stamp + NF_CT_DAY; + WRITE_ONCE(ct->timeout, nfct_time_stamp + NF_CT_DAY); } struct kernel_param; diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index 2f65701a43c9..6c5b2efc4f17 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -65,7 +65,7 @@ struct netns_ipv4 { bool fib_has_custom_local_routes; bool fib_offload_disabled; #ifdef CONFIG_IP_ROUTE_CLASSID - int fib_num_tclassid_users; + atomic_t fib_num_tclassid_users; #endif struct hlist_head *fib_table_hash; struct sock *fibnl; diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h index a964daedc17b..ea8595651c38 100644 --- a/include/net/nfc/nci_core.h +++ b/include/net/nfc/nci_core.h @@ -30,6 +30,7 @@ enum nci_flag { NCI_UP, NCI_DATA_EXCHANGE, NCI_DATA_EXCHANGE_TO, + NCI_UNREG, }; /* NCI device states */ diff --git a/include/net/nl802154.h b/include/net/nl802154.h index ddcee128f5d9..145acb8f2509 100644 --- a/include/net/nl802154.h +++ b/include/net/nl802154.h @@ -19,6 +19,8 @@ * */ +#include <linux/types.h> + #define NL802154_GENL_NAME "nl802154" enum nl802154_commands { @@ -150,10 +152,9 @@ enum nl802154_attrs { }; enum nl802154_iftype { - /* for backwards compatibility TODO */ - NL802154_IFTYPE_UNSPEC = -1, + NL802154_IFTYPE_UNSPEC = (~(__u32)0), - NL802154_IFTYPE_NODE, + NL802154_IFTYPE_NODE = 0, NL802154_IFTYPE_MONITOR, NL802154_IFTYPE_COORD, diff --git a/include/net/page_pool.h b/include/net/page_pool.h index 3855f069627f..a4082406a003 100644 --- a/include/net/page_pool.h +++ b/include/net/page_pool.h @@ -216,14 +216,24 @@ static inline void page_pool_recycle_direct(struct page_pool *pool, page_pool_put_full_page(pool, page, true); } +#define PAGE_POOL_DMA_USE_PP_FRAG_COUNT \ + (sizeof(dma_addr_t) > sizeof(unsigned long)) + static inline dma_addr_t page_pool_get_dma_addr(struct page *page) { - return page->dma_addr; + dma_addr_t ret = page->dma_addr; + + if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT) + ret |= (dma_addr_t)page->dma_addr_upper << 16 << 16; + + return ret; } static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr) { page->dma_addr = addr; + if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT) + page->dma_addr_upper = upper_32_bits(addr); } static inline void page_pool_set_frag_count(struct page *page, long nr) diff --git a/include/net/sock.h b/include/net/sock.h index b32906e1ab55..bea21ff70e74 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1913,18 +1913,31 @@ static inline int sk_tx_queue_get(const struct sock *sk) return -1; } -static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb) +static inline void __sk_rx_queue_set(struct sock *sk, + const struct sk_buff *skb, + bool force_set) { #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING if (skb_rx_queue_recorded(skb)) { u16 rx_queue = skb_get_rx_queue(skb); - if (unlikely(READ_ONCE(sk->sk_rx_queue_mapping) != rx_queue)) + if (force_set || + unlikely(READ_ONCE(sk->sk_rx_queue_mapping) != rx_queue)) WRITE_ONCE(sk->sk_rx_queue_mapping, rx_queue); } #endif } +static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb) +{ + __sk_rx_queue_set(sk, skb, true); +} + +static inline void sk_rx_queue_update(struct sock *sk, const struct sk_buff *skb) +{ + __sk_rx_queue_set(sk, skb, false); +} + static inline void sk_rx_queue_clear(struct sock *sk) { #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING @@ -2430,19 +2443,22 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk) * @sk: socket * * Use the per task page_frag instead of the per socket one for - * optimization when we know that we're in the normal context and owns + * optimization when we know that we're in process context and own * everything that's associated with %current. * - * gfpflags_allow_blocking() isn't enough here as direct reclaim may nest - * inside other socket operations and end up recursing into sk_page_frag() - * while it's already in use. + * Both direct reclaim and page faults can nest inside other + * socket operations and end up recursing into sk_page_frag() + * while it's already in use: explicitly avoid task page_frag + * usage if the caller is potentially doing any of them. + * This assumes that page fault handlers use the GFP_NOFS flags. * * Return: a per task page_frag if context allows that, * otherwise a per socket one. */ static inline struct page_frag *sk_page_frag(struct sock *sk) { - if (gfpflags_normal_context(sk->sk_allocation)) + if ((sk->sk_allocation & (__GFP_DIRECT_RECLAIM | __GFP_MEMALLOC | __GFP_FS)) == + (__GFP_DIRECT_RECLAIM | __GFP_FS)) return ¤t->task_frag; return &sk->sk_frag; diff --git a/include/rdma/rdma_netlink.h b/include/rdma/rdma_netlink.h index 2758d9df71ee..c2a79aeee113 100644 --- a/include/rdma/rdma_netlink.h +++ b/include/rdma/rdma_netlink.h @@ -30,7 +30,7 @@ enum rdma_nl_flags { * constant as well and the compiler checks they are the same. */ #define MODULE_ALIAS_RDMA_NETLINK(_index, _val) \ - static inline void __chk_##_index(void) \ + static inline void __maybe_unused __chk_##_index(void) \ { \ BUILD_BUG_ON(_index != _val); \ } \ diff --git a/include/soc/mscc/ocelot_vcap.h b/include/soc/mscc/ocelot_vcap.h index eeb1142aa1b1..4d1dfa1136b2 100644 --- a/include/soc/mscc/ocelot_vcap.h +++ b/include/soc/mscc/ocelot_vcap.h @@ -703,6 +703,8 @@ int ocelot_vcap_filter_add(struct ocelot *ocelot, struct netlink_ext_ack *extack); int ocelot_vcap_filter_del(struct ocelot *ocelot, struct ocelot_vcap_filter *rule); +int ocelot_vcap_filter_replace(struct ocelot *ocelot, + struct ocelot_vcap_filter *filter); struct ocelot_vcap_filter * ocelot_vcap_block_find_filter_by_id(struct ocelot_vcap_block *block, unsigned long cookie, bool tc_offload); diff --git a/include/soc/tegra/common.h b/include/soc/tegra/common.h index af41ad80ec21..8ec1ac07fc85 100644 --- a/include/soc/tegra/common.h +++ b/include/soc/tegra/common.h @@ -39,4 +39,19 @@ devm_tegra_core_dev_init_opp_table(struct device *dev, } #endif +static inline int +devm_tegra_core_dev_init_opp_table_common(struct device *dev) +{ + struct tegra_core_opp_params opp_params = {}; + int err; + + opp_params.init_state = true; + + err = devm_tegra_core_dev_init_opp_table(dev, &opp_params); + if (err != -ENODEV) + return err; + + return 0; +} + #endif /* __SOC_TEGRA_COMMON_H__ */ diff --git a/include/sound/soc-acpi.h b/include/sound/soc-acpi.h index 31f4c4f9aeea..ac0893df9c76 100644 --- a/include/sound/soc-acpi.h +++ b/include/sound/soc-acpi.h @@ -147,7 +147,7 @@ struct snd_soc_acpi_link_adr { */ /* Descriptor for SST ASoC machine driver */ struct snd_soc_acpi_mach { - const u8 id[ACPI_ID_LEN]; + u8 id[ACPI_ID_LEN]; const struct snd_soc_acpi_codecs *comp_ids; const u32 link_mask; const struct snd_soc_acpi_link_adr *links; diff --git a/include/trace/events/rpcgss.h b/include/trace/events/rpcgss.h index 3ba63319af3c..c9048f3e471b 100644 --- a/include/trace/events/rpcgss.h +++ b/include/trace/events/rpcgss.h @@ -8,7 +8,7 @@ #undef TRACE_SYSTEM #define TRACE_SYSTEM rpcgss -#if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ) +#if !defined(_TRACE_RPCGSS_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_RPCGSS_H #include <linux/tracepoint.h> diff --git a/include/uapi/asm-generic/poll.h b/include/uapi/asm-generic/poll.h index 41b509f410bf..f9c520ce4bf4 100644 --- a/include/uapi/asm-generic/poll.h +++ b/include/uapi/asm-generic/poll.h @@ -29,7 +29,7 @@ #define POLLRDHUP 0x2000 #endif -#define POLLFREE (__force __poll_t)0x4000 /* currently only for epoll */ +#define POLLFREE (__force __poll_t)0x4000 #define POLL_BUSY_LOOP (__force __poll_t)0x8000 diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 26e45fc5eb1a..0b94ec7b73e7 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -80,7 +80,7 @@ extern "C" { * * %AMDGPU_GEM_DOMAIN_GTT GPU accessible system memory, mapped into the * GPU's virtual address space via gart. Gart memory linearizes non-contiguous - * pages of system memory, allows GPU access system memory in a linezrized + * pages of system memory, allows GPU access system memory in a linearized * fashion. * * %AMDGPU_GEM_DOMAIN_VRAM Local video memory. For APUs, it is memory diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index 3b810b53ba8b..642808520d92 100644 --- a/include/uapi/drm/drm.h +++ b/include/uapi/drm/drm.h @@ -1096,6 +1096,24 @@ extern "C" { #define DRM_IOCTL_SYNCOBJ_TRANSFER DRM_IOWR(0xCC, struct drm_syncobj_transfer) #define DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL DRM_IOWR(0xCD, struct drm_syncobj_timeline_array) +/** + * DRM_IOCTL_MODE_GETFB2 - Get framebuffer metadata. + * + * This queries metadata about a framebuffer. User-space fills + * &drm_mode_fb_cmd2.fb_id as the input, and the kernels fills the rest of the + * struct as the output. + * + * If the client is DRM master or has &CAP_SYS_ADMIN, &drm_mode_fb_cmd2.handles + * will be filled with GEM buffer handles. Planes are valid until one has a + * zero handle -- this can be used to compute the number of planes. + * + * Otherwise, &drm_mode_fb_cmd2.handles will be zeroed and planes are valid + * until one has a zero &drm_mode_fb_cmd2.pitches. + * + * If the framebuffer has a format modifier, &DRM_MODE_FB_MODIFIERS will be set + * in &drm_mode_fb_cmd2.flags and &drm_mode_fb_cmd2.modifier will contain the + * modifier. Otherwise, user-space must ignore &drm_mode_fb_cmd2.modifier. + */ #define DRM_IOCTL_MODE_GETFB2 DRM_IOWR(0xCE, struct drm_mode_fb_cmd2) /* diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h index 7f652c96845b..fc0c1454d275 100644 --- a/include/uapi/drm/drm_fourcc.h +++ b/include/uapi/drm/drm_fourcc.h @@ -314,6 +314,13 @@ extern "C" { */ #define DRM_FORMAT_P016 fourcc_code('P', '0', '1', '6') /* 2x2 subsampled Cr:Cb plane 16 bits per channel */ +/* 2 plane YCbCr420. + * 3 10 bit components and 2 padding bits packed into 4 bytes. + * index 0 = Y plane, [31:0] x:Y2:Y1:Y0 2:10:10:10 little endian + * index 1 = Cr:Cb plane, [63:0] x:Cr2:Cb2:Cr1:x:Cb1:Cr0:Cb0 [2:10:10:10:2:10:10:10] little endian + */ +#define DRM_FORMAT_P030 fourcc_code('P', '0', '3', '0') /* 2x2 subsampled Cr:Cb plane 10 bits per channel packed */ + /* 3 plane non-subsampled (444) YCbCr * 16 bits per component, but only 10 bits are used and 6 bits are padded * index 0: Y plane, [15:0] Y:x [10:6] little endian @@ -854,6 +861,10 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier) * and UV. Some SAND-using hardware stores UV in a separate tiled * image from Y to reduce the column height, which is not supported * with these modifiers. + * + * The DRM_FORMAT_MOD_BROADCOM_SAND128_COL_HEIGHT modifier is also + * supported for DRM_FORMAT_P030 where the columns remain as 128 bytes + * wide, but as this is a 10 bpp format that translates to 96 pixels. */ #define DRM_FORMAT_MOD_BROADCOM_SAND32_COL_HEIGHT(v) \ diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h index a13e20cc66b4..0512fde5e697 100644 --- a/include/uapi/drm/virtgpu_drm.h +++ b/include/uapi/drm/virtgpu_drm.h @@ -196,6 +196,13 @@ struct drm_virtgpu_context_init { __u64 ctx_set_params; }; +/* + * Event code that's given when VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK is in + * effect. The event size is sizeof(drm_event), since there is no additional + * payload. + */ +#define VIRTGPU_EVENT_FENCE_SIGNALED 0x90000000 + #define DRM_IOCTL_VIRTGPU_MAP \ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map) diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h index 9078775feb51..8277644c1144 100644 --- a/include/uapi/drm/vmwgfx_drm.h +++ b/include/uapi/drm/vmwgfx_drm.h @@ -110,6 +110,7 @@ extern "C" { #define DRM_VMW_PARAM_HW_CAPS2 13 #define DRM_VMW_PARAM_SM4_1 14 #define DRM_VMW_PARAM_SM5 15 +#define DRM_VMW_PARAM_GL43 16 /** * enum drm_vmw_handle_type - handle type for ref ioctls diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h index 5da4ee234e0b..c0c2f3ed5729 100644 --- a/include/uapi/linux/if_ether.h +++ b/include/uapi/linux/if_ether.h @@ -117,7 +117,7 @@ #define ETH_P_IFE 0xED3E /* ForCES inter-FE LFB type */ #define ETH_P_AF_IUCV 0xFBFB /* IBM af_iucv [ NOT AN OFFICIALLY REGISTERED ID ] */ -#define ETH_P_802_3_MIN 0x0600 /* If the value in the ethernet type is less than this value +#define ETH_P_802_3_MIN 0x0600 /* If the value in the ethernet type is more than this value * then the frame is Ethernet II. Else it is 802.3 */ /* diff --git a/include/uapi/linux/kfd_sysfs.h b/include/uapi/linux/kfd_sysfs.h new file mode 100644 index 000000000000..e1fb78b4bf09 --- /dev/null +++ b/include/uapi/linux/kfd_sysfs.h @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT WITH Linux-syscall-note */ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef KFD_SYSFS_H_INCLUDED +#define KFD_SYSFS_H_INCLUDED + +/* Capability bits in node properties */ +#define HSA_CAP_HOT_PLUGGABLE 0x00000001 +#define HSA_CAP_ATS_PRESENT 0x00000002 +#define HSA_CAP_SHARED_WITH_GRAPHICS 0x00000004 +#define HSA_CAP_QUEUE_SIZE_POW2 0x00000008 +#define HSA_CAP_QUEUE_SIZE_32BIT 0x00000010 +#define HSA_CAP_QUEUE_IDLE_EVENT 0x00000020 +#define HSA_CAP_VA_LIMIT 0x00000040 +#define HSA_CAP_WATCH_POINTS_SUPPORTED 0x00000080 +#define HSA_CAP_WATCH_POINTS_TOTALBITS_MASK 0x00000f00 +#define HSA_CAP_WATCH_POINTS_TOTALBITS_SHIFT 8 +#define HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK 0x00003000 +#define HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT 12 + +#define HSA_CAP_DOORBELL_TYPE_PRE_1_0 0x0 +#define HSA_CAP_DOORBELL_TYPE_1_0 0x1 +#define HSA_CAP_DOORBELL_TYPE_2_0 0x2 +#define HSA_CAP_AQL_QUEUE_DOUBLE_MAP 0x00004000 + +/* Old buggy user mode depends on this being 0 */ +#define HSA_CAP_RESERVED_WAS_SRAM_EDCSUPPORTED 0x00080000 + +#define HSA_CAP_MEM_EDCSUPPORTED 0x00100000 +#define HSA_CAP_RASEVENTNOTIFY 0x00200000 +#define HSA_CAP_ASIC_REVISION_MASK 0x03c00000 +#define HSA_CAP_ASIC_REVISION_SHIFT 22 +#define HSA_CAP_SRAM_EDCSUPPORTED 0x04000000 +#define HSA_CAP_SVMAPI_SUPPORTED 0x08000000 +#define HSA_CAP_FLAGS_COHERENTHOSTACCESS 0x10000000 +#define HSA_CAP_RESERVED 0xe00f8000 + +/* Heap types in memory properties */ +#define HSA_MEM_HEAP_TYPE_SYSTEM 0 +#define HSA_MEM_HEAP_TYPE_FB_PUBLIC 1 +#define HSA_MEM_HEAP_TYPE_FB_PRIVATE 2 +#define HSA_MEM_HEAP_TYPE_GPU_GDS 3 +#define HSA_MEM_HEAP_TYPE_GPU_LDS 4 +#define HSA_MEM_HEAP_TYPE_GPU_SCRATCH 5 + +/* Flag bits in memory properties */ +#define HSA_MEM_FLAGS_HOT_PLUGGABLE 0x00000001 +#define HSA_MEM_FLAGS_NON_VOLATILE 0x00000002 +#define HSA_MEM_FLAGS_RESERVED 0xfffffffc + +/* Cache types in cache properties */ +#define HSA_CACHE_TYPE_DATA 0x00000001 +#define HSA_CACHE_TYPE_INSTRUCTION 0x00000002 +#define HSA_CACHE_TYPE_CPU 0x00000004 +#define HSA_CACHE_TYPE_HSACU 0x00000008 +#define HSA_CACHE_TYPE_RESERVED 0xfffffff0 + +/* Link types in IO link properties (matches CRAT link types) */ +#define HSA_IOLINK_TYPE_UNDEFINED 0 +#define HSA_IOLINK_TYPE_HYPERTRANSPORT 1 +#define HSA_IOLINK_TYPE_PCIEXPRESS 2 +#define HSA_IOLINK_TYPE_AMBA 3 +#define HSA_IOLINK_TYPE_MIPI 4 +#define HSA_IOLINK_TYPE_QPI_1_1 5 +#define HSA_IOLINK_TYPE_RESERVED1 6 +#define HSA_IOLINK_TYPE_RESERVED2 7 +#define HSA_IOLINK_TYPE_RAPID_IO 8 +#define HSA_IOLINK_TYPE_INFINIBAND 9 +#define HSA_IOLINK_TYPE_RESERVED3 10 +#define HSA_IOLINK_TYPE_XGMI 11 +#define HSA_IOLINK_TYPE_XGOP 12 +#define HSA_IOLINK_TYPE_GZ 13 +#define HSA_IOLINK_TYPE_ETHERNET_RDMA 14 +#define HSA_IOLINK_TYPE_RDMA_OTHER 15 +#define HSA_IOLINK_TYPE_OTHER 16 + +/* Flag bits in IO link properties (matches CRAT flags, excluding the + * bi-directional flag, which is not offially part of the CRAT spec, and + * only used internally in KFD) + */ +#define HSA_IOLINK_FLAGS_ENABLED (1 << 0) +#define HSA_IOLINK_FLAGS_NON_COHERENT (1 << 1) +#define HSA_IOLINK_FLAGS_NO_ATOMICS_32_BIT (1 << 2) +#define HSA_IOLINK_FLAGS_NO_ATOMICS_64_BIT (1 << 3) +#define HSA_IOLINK_FLAGS_NO_PEER_TO_PEER_DMA (1 << 4) +#define HSA_IOLINK_FLAGS_RESERVED 0xffffffe0 + +#endif diff --git a/include/uapi/linux/resource.h b/include/uapi/linux/resource.h index 74ef57b38f9f..ac5d6a3031db 100644 --- a/include/uapi/linux/resource.h +++ b/include/uapi/linux/resource.h @@ -66,10 +66,17 @@ struct rlimit64 { #define _STK_LIM (8*1024*1024) /* - * GPG2 wants 64kB of mlocked memory, to make sure pass phrases - * and other sensitive information are never written to disk. + * Limit the amount of locked memory by some sane default: + * root can always increase this limit if needed. + * + * The main use-cases are (1) preventing sensitive memory + * from being swapped; (2) real-time operations; (3) via + * IOURING_REGISTER_BUFFERS. + * + * The first two don't need much. The latter will take as + * much as it can get. 8MB is a reasonably sane default. */ -#define MLOCK_LIMIT ((PAGE_SIZE > 64*1024) ? PAGE_SIZE : 64*1024) +#define MLOCK_LIMIT (8*1024*1024) /* * Due to binary compatibility, the actual resource numbers diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h index b94074c82772..b13eb86395e0 100644 --- a/include/xen/xenbus.h +++ b/include/xen/xenbus.h @@ -112,6 +112,7 @@ struct xenbus_driver { const char *name; /* defaults to ids[0].devicetype */ const struct xenbus_device_id *ids; bool allow_rebind; /* avoid setting xenstore closed during remove */ + bool not_essential; /* is not mandatory for boot progress */ int (*probe)(struct xenbus_device *dev, const struct xenbus_device_id *id); void (*otherend_changed)(struct xenbus_device *dev, diff --git a/init/Kconfig b/init/Kconfig index 036b750e8d8a..4b7bac10c72d 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -887,7 +887,7 @@ config CC_HAS_INT128 config CC_IMPLICIT_FALLTHROUGH string - default "-Wimplicit-fallthrough=5" if CC_IS_GCC + default "-Wimplicit-fallthrough=5" if CC_IS_GCC && $(cc-option,-Wimplicit-fallthrough=5) default "-Wimplicit-fallthrough" if CC_IS_CLANG && $(cc-option,-Wunreachable-code-fallthrough) # diff --git a/ipc/shm.c b/ipc/shm.c index 4942bdd65748..b3048ebd5c31 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -62,9 +62,18 @@ struct shmid_kernel /* private to the kernel */ struct pid *shm_lprid; struct ucounts *mlock_ucounts; - /* The task created the shm object. NULL if the task is dead. */ + /* + * The task created the shm object, for + * task_lock(shp->shm_creator) + */ struct task_struct *shm_creator; - struct list_head shm_clist; /* list by creator */ + + /* + * List by creator. task_lock(->shm_creator) required for read/write. + * If list_empty(), then the creator is dead already. + */ + struct list_head shm_clist; + struct ipc_namespace *ns; } __randomize_layout; /* shm_mode upper byte flags */ @@ -115,6 +124,7 @@ static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) struct shmid_kernel *shp; shp = container_of(ipcp, struct shmid_kernel, shm_perm); + WARN_ON(ns != shp->ns); if (shp->shm_nattch) { shp->shm_perm.mode |= SHM_DEST; @@ -225,10 +235,43 @@ static void shm_rcu_free(struct rcu_head *head) kfree(shp); } -static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s) +/* + * It has to be called with shp locked. + * It must be called before ipc_rmid() + */ +static inline void shm_clist_rm(struct shmid_kernel *shp) { - list_del(&s->shm_clist); - ipc_rmid(&shm_ids(ns), &s->shm_perm); + struct task_struct *creator; + + /* ensure that shm_creator does not disappear */ + rcu_read_lock(); + + /* + * A concurrent exit_shm may do a list_del_init() as well. + * Just do nothing if exit_shm already did the work + */ + if (!list_empty(&shp->shm_clist)) { + /* + * shp->shm_creator is guaranteed to be valid *only* + * if shp->shm_clist is not empty. + */ + creator = shp->shm_creator; + + task_lock(creator); + /* + * list_del_init() is a nop if the entry was already removed + * from the list. + */ + list_del_init(&shp->shm_clist); + task_unlock(creator); + } + rcu_read_unlock(); +} + +static inline void shm_rmid(struct shmid_kernel *s) +{ + shm_clist_rm(s); + ipc_rmid(&shm_ids(s->ns), &s->shm_perm); } @@ -283,7 +326,7 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) shm_file = shp->shm_file; shp->shm_file = NULL; ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT; - shm_rmid(ns, shp); + shm_rmid(shp); shm_unlock(shp); if (!is_file_hugepages(shm_file)) shmem_lock(shm_file, 0, shp->mlock_ucounts); @@ -303,10 +346,10 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) * * 2) sysctl kernel.shm_rmid_forced is set to 1. */ -static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) +static bool shm_may_destroy(struct shmid_kernel *shp) { return (shp->shm_nattch == 0) && - (ns->shm_rmid_forced || + (shp->ns->shm_rmid_forced || (shp->shm_perm.mode & SHM_DEST)); } @@ -337,7 +380,7 @@ static void shm_close(struct vm_area_struct *vma) ipc_update_pid(&shp->shm_lprid, task_tgid(current)); shp->shm_dtim = ktime_get_real_seconds(); shp->shm_nattch--; - if (shm_may_destroy(ns, shp)) + if (shm_may_destroy(shp)) shm_destroy(ns, shp); else shm_unlock(shp); @@ -358,10 +401,10 @@ static int shm_try_destroy_orphaned(int id, void *p, void *data) * * As shp->* are changed under rwsem, it's safe to skip shp locking. */ - if (shp->shm_creator != NULL) + if (!list_empty(&shp->shm_clist)) return 0; - if (shm_may_destroy(ns, shp)) { + if (shm_may_destroy(shp)) { shm_lock_by_ptr(shp); shm_destroy(ns, shp); } @@ -379,48 +422,97 @@ void shm_destroy_orphaned(struct ipc_namespace *ns) /* Locking assumes this will only be called with task == current */ void exit_shm(struct task_struct *task) { - struct ipc_namespace *ns = task->nsproxy->ipc_ns; - struct shmid_kernel *shp, *n; + for (;;) { + struct shmid_kernel *shp; + struct ipc_namespace *ns; - if (list_empty(&task->sysvshm.shm_clist)) - return; + task_lock(task); + + if (list_empty(&task->sysvshm.shm_clist)) { + task_unlock(task); + break; + } + + shp = list_first_entry(&task->sysvshm.shm_clist, struct shmid_kernel, + shm_clist); - /* - * If kernel.shm_rmid_forced is not set then only keep track of - * which shmids are orphaned, so that a later set of the sysctl - * can clean them up. - */ - if (!ns->shm_rmid_forced) { - down_read(&shm_ids(ns).rwsem); - list_for_each_entry(shp, &task->sysvshm.shm_clist, shm_clist) - shp->shm_creator = NULL; /* - * Only under read lock but we are only called on current - * so no entry on the list will be shared. + * 1) Get pointer to the ipc namespace. It is worth to say + * that this pointer is guaranteed to be valid because + * shp lifetime is always shorter than namespace lifetime + * in which shp lives. + * We taken task_lock it means that shp won't be freed. */ - list_del(&task->sysvshm.shm_clist); - up_read(&shm_ids(ns).rwsem); - return; - } + ns = shp->ns; - /* - * Destroy all already created segments, that were not yet mapped, - * and mark any mapped as orphan to cover the sysctl toggling. - * Destroy is skipped if shm_may_destroy() returns false. - */ - down_write(&shm_ids(ns).rwsem); - list_for_each_entry_safe(shp, n, &task->sysvshm.shm_clist, shm_clist) { - shp->shm_creator = NULL; + /* + * 2) If kernel.shm_rmid_forced is not set then only keep track of + * which shmids are orphaned, so that a later set of the sysctl + * can clean them up. + */ + if (!ns->shm_rmid_forced) + goto unlink_continue; - if (shm_may_destroy(ns, shp)) { - shm_lock_by_ptr(shp); - shm_destroy(ns, shp); + /* + * 3) get a reference to the namespace. + * The refcount could be already 0. If it is 0, then + * the shm objects will be free by free_ipc_work(). + */ + ns = get_ipc_ns_not_zero(ns); + if (!ns) { +unlink_continue: + list_del_init(&shp->shm_clist); + task_unlock(task); + continue; } - } - /* Remove the list head from any segments still attached. */ - list_del(&task->sysvshm.shm_clist); - up_write(&shm_ids(ns).rwsem); + /* + * 4) get a reference to shp. + * This cannot fail: shm_clist_rm() is called before + * ipc_rmid(), thus the refcount cannot be 0. + */ + WARN_ON(!ipc_rcu_getref(&shp->shm_perm)); + + /* + * 5) unlink the shm segment from the list of segments + * created by current. + * This must be done last. After unlinking, + * only the refcounts obtained above prevent IPC_RMID + * from destroying the segment or the namespace. + */ + list_del_init(&shp->shm_clist); + + task_unlock(task); + + /* + * 6) we have all references + * Thus lock & if needed destroy shp. + */ + down_write(&shm_ids(ns).rwsem); + shm_lock_by_ptr(shp); + /* + * rcu_read_lock was implicitly taken in shm_lock_by_ptr, it's + * safe to call ipc_rcu_putref here + */ + ipc_rcu_putref(&shp->shm_perm, shm_rcu_free); + + if (ipc_valid_object(&shp->shm_perm)) { + if (shm_may_destroy(shp)) + shm_destroy(ns, shp); + else + shm_unlock(shp); + } else { + /* + * Someone else deleted the shp from namespace + * idr/kht while we have waited. + * Just unlock and continue. + */ + shm_unlock(shp); + } + + up_write(&shm_ids(ns).rwsem); + put_ipc_ns(ns); /* paired with get_ipc_ns_not_zero */ + } } static vm_fault_t shm_fault(struct vm_fault *vmf) @@ -676,7 +768,11 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) if (error < 0) goto no_id; + shp->ns = ns; + + task_lock(current); list_add(&shp->shm_clist, ¤t->sysvshm.shm_clist); + task_unlock(current); /* * shmid gets reported as "inode#" in /proc/pid/maps. @@ -1567,7 +1663,8 @@ out_nattch: down_write(&shm_ids(ns).rwsem); shp = shm_lock(ns, shmid); shp->shm_nattch--; - if (shm_may_destroy(ns, shp)) + + if (shm_may_destroy(shp)) shm_destroy(ns, shp); else shm_unlock(shp); diff --git a/ipc/util.c b/ipc/util.c index d48d8cfa1f3f..fa2d86ef3fb8 100644 --- a/ipc/util.c +++ b/ipc/util.c @@ -447,8 +447,8 @@ static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids, static void ipc_kht_remove(struct ipc_ids *ids, struct kern_ipc_perm *ipcp) { if (ipcp->key != IPC_PRIVATE) - rhashtable_remove_fast(&ids->key_ht, &ipcp->khtnode, - ipc_kht_params); + WARN_ON_ONCE(rhashtable_remove_fast(&ids->key_ht, &ipcp->khtnode, + ipc_kht_params)); } /** @@ -498,7 +498,7 @@ void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp) { int idx = ipcid_to_idx(ipcp->id); - idr_remove(&ids->ipcs_idr, idx); + WARN_ON_ONCE(idr_remove(&ids->ipcs_idr, idx) != ipcp); ipc_kht_remove(ids, ipcp); ids->in_use--; ipcp->deleted = true; diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index dbc3ad07e21b..9bdb03767db5 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -6346,11 +6346,6 @@ BTF_ID_LIST_GLOBAL_SINGLE(btf_task_struct_ids, struct, task_struct) /* BTF ID set registration API for modules */ -struct kfunc_btf_id_list { - struct list_head list; - struct mutex mutex; -}; - #ifdef CONFIG_DEBUG_INFO_BTF_MODULES void register_kfunc_btf_id_set(struct kfunc_btf_id_list *l, @@ -6376,8 +6371,6 @@ bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist, u32 kfunc_id, { struct kfunc_btf_id_set *s; - if (!owner) - return false; mutex_lock(&klist->mutex); list_for_each_entry(s, &klist->list, list) { if (s->owner == owner && btf_id_set_contains(s->set, kfunc_id)) { @@ -6389,8 +6382,6 @@ bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist, u32 kfunc_id, return false; } -#endif - #define DEFINE_KFUNC_BTF_ID_LIST(name) \ struct kfunc_btf_id_list name = { LIST_HEAD_INIT(name.list), \ __MUTEX_INITIALIZER(name.mutex) }; \ @@ -6398,3 +6389,5 @@ bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist, u32 kfunc_id, DEFINE_KFUNC_BTF_ID_LIST(bpf_tcp_ca_kfunc_list); DEFINE_KFUNC_BTF_ID_LIST(prog_test_kfunc_list); + +#endif diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index 2ca643af9a54..43eb3501721b 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -1809,6 +1809,8 @@ sysctl_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_sysctl_get_new_value_proto; case BPF_FUNC_sysctl_set_new_value: return &bpf_sysctl_set_new_value_proto; + case BPF_FUNC_ktime_get_coarse_ns: + return &bpf_ktime_get_coarse_ns_proto; default: return cgroup_base_func_proto(func_id, prog); } diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 1ffd469c217f..649f07623df6 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -1364,8 +1364,6 @@ bpf_base_func_proto(enum bpf_func_id func_id) return &bpf_ktime_get_ns_proto; case BPF_FUNC_ktime_get_boot_ns: return &bpf_ktime_get_boot_ns_proto; - case BPF_FUNC_ktime_get_coarse_ns: - return &bpf_ktime_get_coarse_ns_proto; case BPF_FUNC_ringbuf_output: return &bpf_ringbuf_output_proto; case BPF_FUNC_ringbuf_reserve: diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 50f96ea4452a..1033ee8c0caf 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -132,6 +132,21 @@ static struct bpf_map *find_and_alloc_map(union bpf_attr *attr) return map; } +static void bpf_map_write_active_inc(struct bpf_map *map) +{ + atomic64_inc(&map->writecnt); +} + +static void bpf_map_write_active_dec(struct bpf_map *map) +{ + atomic64_dec(&map->writecnt); +} + +bool bpf_map_write_active(const struct bpf_map *map) +{ + return atomic64_read(&map->writecnt) != 0; +} + static u32 bpf_map_value_size(const struct bpf_map *map) { if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || @@ -601,11 +616,8 @@ static void bpf_map_mmap_open(struct vm_area_struct *vma) { struct bpf_map *map = vma->vm_file->private_data; - if (vma->vm_flags & VM_MAYWRITE) { - mutex_lock(&map->freeze_mutex); - map->writecnt++; - mutex_unlock(&map->freeze_mutex); - } + if (vma->vm_flags & VM_MAYWRITE) + bpf_map_write_active_inc(map); } /* called for all unmapped memory region (including initial) */ @@ -613,11 +625,8 @@ static void bpf_map_mmap_close(struct vm_area_struct *vma) { struct bpf_map *map = vma->vm_file->private_data; - if (vma->vm_flags & VM_MAYWRITE) { - mutex_lock(&map->freeze_mutex); - map->writecnt--; - mutex_unlock(&map->freeze_mutex); - } + if (vma->vm_flags & VM_MAYWRITE) + bpf_map_write_active_dec(map); } static const struct vm_operations_struct bpf_map_default_vmops = { @@ -668,7 +677,7 @@ static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma) goto out; if (vma->vm_flags & VM_MAYWRITE) - map->writecnt++; + bpf_map_write_active_inc(map); out: mutex_unlock(&map->freeze_mutex); return err; @@ -1139,6 +1148,7 @@ static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr) map = __bpf_map_get(f); if (IS_ERR(map)) return PTR_ERR(map); + bpf_map_write_active_inc(map); if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { err = -EPERM; goto err_put; @@ -1174,6 +1184,7 @@ free_value: free_key: kvfree(key); err_put: + bpf_map_write_active_dec(map); fdput(f); return err; } @@ -1196,6 +1207,7 @@ static int map_delete_elem(union bpf_attr *attr) map = __bpf_map_get(f); if (IS_ERR(map)) return PTR_ERR(map); + bpf_map_write_active_inc(map); if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { err = -EPERM; goto err_put; @@ -1226,6 +1238,7 @@ static int map_delete_elem(union bpf_attr *attr) out: kvfree(key); err_put: + bpf_map_write_active_dec(map); fdput(f); return err; } @@ -1533,6 +1546,7 @@ static int map_lookup_and_delete_elem(union bpf_attr *attr) map = __bpf_map_get(f); if (IS_ERR(map)) return PTR_ERR(map); + bpf_map_write_active_inc(map); if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) || !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { err = -EPERM; @@ -1597,6 +1611,7 @@ free_value: free_key: kvfree(key); err_put: + bpf_map_write_active_dec(map); fdput(f); return err; } @@ -1624,8 +1639,7 @@ static int map_freeze(const union bpf_attr *attr) } mutex_lock(&map->freeze_mutex); - - if (map->writecnt) { + if (bpf_map_write_active(map)) { err = -EBUSY; goto err_put; } @@ -4171,6 +4185,9 @@ static int bpf_map_do_batch(const union bpf_attr *attr, union bpf_attr __user *uattr, int cmd) { + bool has_read = cmd == BPF_MAP_LOOKUP_BATCH || + cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH; + bool has_write = cmd != BPF_MAP_LOOKUP_BATCH; struct bpf_map *map; int err, ufd; struct fd f; @@ -4183,16 +4200,13 @@ static int bpf_map_do_batch(const union bpf_attr *attr, map = __bpf_map_get(f); if (IS_ERR(map)) return PTR_ERR(map); - - if ((cmd == BPF_MAP_LOOKUP_BATCH || - cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH) && - !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { + if (has_write) + bpf_map_write_active_inc(map); + if (has_read && !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { err = -EPERM; goto err_put; } - - if (cmd != BPF_MAP_LOOKUP_BATCH && - !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { + if (has_write && !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { err = -EPERM; goto err_put; } @@ -4205,8 +4219,9 @@ static int bpf_map_do_batch(const union bpf_attr *attr, BPF_DO_BATCH(map->ops->map_update_batch); else BPF_DO_BATCH(map->ops->map_delete_batch); - err_put: + if (has_write) + bpf_map_write_active_dec(map); fdput(f); return err; } diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 890b3ec375a3..f3001937bbb9 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1151,7 +1151,8 @@ static void mark_ptr_not_null_reg(struct bpf_reg_state *reg) /* transfer reg's id which is unique for every map_lookup_elem * as UID of the inner map. */ - reg->map_uid = reg->id; + if (map_value_has_timer(map->inner_map_meta)) + reg->map_uid = reg->id; } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) { reg->type = PTR_TO_XDP_SOCK; } else if (map->map_type == BPF_MAP_TYPE_SOCKMAP || @@ -4055,7 +4056,22 @@ static void coerce_reg_to_size(struct bpf_reg_state *reg, int size) static bool bpf_map_is_rdonly(const struct bpf_map *map) { - return (map->map_flags & BPF_F_RDONLY_PROG) && map->frozen; + /* A map is considered read-only if the following condition are true: + * + * 1) BPF program side cannot change any of the map content. The + * BPF_F_RDONLY_PROG flag is throughout the lifetime of a map + * and was set at map creation time. + * 2) The map value(s) have been initialized from user space by a + * loader and then "frozen", such that no new map update/delete + * operations from syscall side are possible for the rest of + * the map's lifetime from that point onwards. + * 3) Any parallel/pending map update/delete operations from syscall + * side have been completed. Only after that point, it's safe to + * assume that map value(s) are immutable. + */ + return (map->map_flags & BPF_F_RDONLY_PROG) && + READ_ONCE(map->frozen) && + !bpf_map_write_active(map); } static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val) @@ -8406,7 +8422,7 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *vstate, new_range = dst_reg->off; if (range_right_open) - new_range--; + new_range++; /* Examples for register markings: * @@ -11631,6 +11647,13 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env, } } + if (map_value_has_timer(map)) { + if (is_tracing_prog_type(prog_type)) { + verbose(env, "tracing progs cannot use bpf_timer yet\n"); + return -EINVAL; + } + } + if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) && !bpf_offload_prog_map_match(prog, map)) { verbose(env, "offload device mismatch between prog and map\n"); diff --git a/kernel/cpu.c b/kernel/cpu.c index 192e43a87407..407a2568f35e 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -31,6 +31,7 @@ #include <linux/smpboot.h> #include <linux/relay.h> #include <linux/slab.h> +#include <linux/scs.h> #include <linux/percpu-rwsem.h> #include <linux/cpuset.h> @@ -588,6 +589,12 @@ static int bringup_cpu(unsigned int cpu) int ret; /* + * Reset stale stack state from the last time this CPU was online. + */ + scs_task_reset(idle); + kasan_unpoison_task_stack(idle); + + /* * Some architectures have to walk the irq descriptors to * setup the vector space for the cpu which comes online. * Prevent irq alloc/free across the bringup. diff --git a/kernel/entry/syscall_user_dispatch.c b/kernel/entry/syscall_user_dispatch.c index 4508201847d2..0b6379adff6b 100644 --- a/kernel/entry/syscall_user_dispatch.c +++ b/kernel/entry/syscall_user_dispatch.c @@ -48,7 +48,7 @@ bool syscall_user_dispatch(struct pt_regs *regs) * the selector is loaded by userspace. */ if (unlikely(__get_user(state, sd->selector))) { - force_fatal_sig(SIGSEGV); + force_exit_sig(SIGSEGV); return true; } @@ -56,7 +56,7 @@ bool syscall_user_dispatch(struct pt_regs *regs) return false; if (state != SYSCALL_DISPATCH_FILTER_BLOCK) { - force_fatal_sig(SIGSYS); + force_exit_sig(SIGSYS); return true; } } diff --git a/kernel/events/core.c b/kernel/events/core.c index 523106a506ee..30d94f68c5bd 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -9759,6 +9759,9 @@ void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size, continue; if (event->attr.config != entry->type) continue; + /* Cannot deliver synchronous signal to other task. */ + if (event->attr.sigtrap) + continue; if (perf_tp_event_match(event, &data, regs)) perf_swevent_event(event, count, &data, regs); } diff --git a/kernel/kprobes.c b/kernel/kprobes.c index e9db0c810554..21eccc961bba 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -2086,6 +2086,9 @@ int register_kretprobe(struct kretprobe *rp) } } + if (rp->data_size > KRETPROBE_MAX_DATA_SIZE) + return -E2BIG; + rp->kp.pre_handler = pre_handler_kretprobe; rp->kp.post_handler = NULL; diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c index c51387a43265..04a74d040a6d 100644 --- a/kernel/locking/rwsem.c +++ b/kernel/locking/rwsem.c @@ -105,9 +105,9 @@ * atomic_long_cmpxchg() will be used to obtain writer lock. * * There are three places where the lock handoff bit may be set or cleared. - * 1) rwsem_mark_wake() for readers. - * 2) rwsem_try_write_lock() for writers. - * 3) Error path of rwsem_down_write_slowpath(). + * 1) rwsem_mark_wake() for readers -- set, clear + * 2) rwsem_try_write_lock() for writers -- set, clear + * 3) rwsem_del_waiter() -- clear * * For all the above cases, wait_lock will be held. A writer must also * be the first one in the wait_list to be eligible for setting the handoff @@ -334,6 +334,9 @@ struct rwsem_waiter { struct task_struct *task; enum rwsem_waiter_type type; unsigned long timeout; + + /* Writer only, not initialized in reader */ + bool handoff_set; }; #define rwsem_first_waiter(sem) \ list_first_entry(&sem->wait_list, struct rwsem_waiter, list) @@ -344,12 +347,6 @@ enum rwsem_wake_type { RWSEM_WAKE_READ_OWNED /* Waker thread holds the read lock */ }; -enum writer_wait_state { - WRITER_NOT_FIRST, /* Writer is not first in wait list */ - WRITER_FIRST, /* Writer is first in wait list */ - WRITER_HANDOFF /* Writer is first & handoff needed */ -}; - /* * The typical HZ value is either 250 or 1000. So set the minimum waiting * time to at least 4ms or 1 jiffy (if it is higher than 4ms) in the wait @@ -365,6 +362,31 @@ enum writer_wait_state { */ #define MAX_READERS_WAKEUP 0x100 +static inline void +rwsem_add_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter) +{ + lockdep_assert_held(&sem->wait_lock); + list_add_tail(&waiter->list, &sem->wait_list); + /* caller will set RWSEM_FLAG_WAITERS */ +} + +/* + * Remove a waiter from the wait_list and clear flags. + * + * Both rwsem_mark_wake() and rwsem_try_write_lock() contain a full 'copy' of + * this function. Modify with care. + */ +static inline void +rwsem_del_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter) +{ + lockdep_assert_held(&sem->wait_lock); + list_del(&waiter->list); + if (likely(!list_empty(&sem->wait_list))) + return; + + atomic_long_andnot(RWSEM_FLAG_HANDOFF | RWSEM_FLAG_WAITERS, &sem->count); +} + /* * handle the lock release when processes blocked on it that can now run * - if we come here from up_xxxx(), then the RWSEM_FLAG_WAITERS bit must @@ -376,6 +398,8 @@ enum writer_wait_state { * preferably when the wait_lock is released * - woken process blocks are discarded from the list after having task zeroed * - writers are only marked woken if downgrading is false + * + * Implies rwsem_del_waiter() for all woken readers. */ static void rwsem_mark_wake(struct rw_semaphore *sem, enum rwsem_wake_type wake_type, @@ -490,18 +514,25 @@ static void rwsem_mark_wake(struct rw_semaphore *sem, adjustment = woken * RWSEM_READER_BIAS - adjustment; lockevent_cond_inc(rwsem_wake_reader, woken); + + oldcount = atomic_long_read(&sem->count); if (list_empty(&sem->wait_list)) { - /* hit end of list above */ + /* + * Combined with list_move_tail() above, this implies + * rwsem_del_waiter(). + */ adjustment -= RWSEM_FLAG_WAITERS; + if (oldcount & RWSEM_FLAG_HANDOFF) + adjustment -= RWSEM_FLAG_HANDOFF; + } else if (woken) { + /* + * When we've woken a reader, we no longer need to force + * writers to give up the lock and we can clear HANDOFF. + */ + if (oldcount & RWSEM_FLAG_HANDOFF) + adjustment -= RWSEM_FLAG_HANDOFF; } - /* - * When we've woken a reader, we no longer need to force writers - * to give up the lock and we can clear HANDOFF. - */ - if (woken && (atomic_long_read(&sem->count) & RWSEM_FLAG_HANDOFF)) - adjustment -= RWSEM_FLAG_HANDOFF; - if (adjustment) atomic_long_add(adjustment, &sem->count); @@ -532,12 +563,12 @@ static void rwsem_mark_wake(struct rw_semaphore *sem, * race conditions between checking the rwsem wait list and setting the * sem->count accordingly. * - * If wstate is WRITER_HANDOFF, it will make sure that either the handoff - * bit is set or the lock is acquired with handoff bit cleared. + * Implies rwsem_del_waiter() on success. */ static inline bool rwsem_try_write_lock(struct rw_semaphore *sem, - enum writer_wait_state wstate) + struct rwsem_waiter *waiter) { + bool first = rwsem_first_waiter(sem) == waiter; long count, new; lockdep_assert_held(&sem->wait_lock); @@ -546,13 +577,19 @@ static inline bool rwsem_try_write_lock(struct rw_semaphore *sem, do { bool has_handoff = !!(count & RWSEM_FLAG_HANDOFF); - if (has_handoff && wstate == WRITER_NOT_FIRST) - return false; + if (has_handoff) { + if (!first) + return false; + + /* First waiter inherits a previously set handoff bit */ + waiter->handoff_set = true; + } new = count; if (count & RWSEM_LOCK_MASK) { - if (has_handoff || (wstate != WRITER_HANDOFF)) + if (has_handoff || (!rt_task(waiter->task) && + !time_after(jiffies, waiter->timeout))) return false; new |= RWSEM_FLAG_HANDOFF; @@ -569,9 +606,17 @@ static inline bool rwsem_try_write_lock(struct rw_semaphore *sem, * We have either acquired the lock with handoff bit cleared or * set the handoff bit. */ - if (new & RWSEM_FLAG_HANDOFF) + if (new & RWSEM_FLAG_HANDOFF) { + waiter->handoff_set = true; + lockevent_inc(rwsem_wlock_handoff); return false; + } + /* + * Have rwsem_try_write_lock() fully imply rwsem_del_waiter() on + * success. + */ + list_del(&waiter->list); rwsem_set_owner(sem); return true; } @@ -956,7 +1001,7 @@ queue: } adjustment += RWSEM_FLAG_WAITERS; } - list_add_tail(&waiter.list, &sem->wait_list); + rwsem_add_waiter(sem, &waiter); /* we're now waiting on the lock, but no longer actively locking */ count = atomic_long_add_return(adjustment, &sem->count); @@ -1002,11 +1047,7 @@ queue: return sem; out_nolock: - list_del(&waiter.list); - if (list_empty(&sem->wait_list)) { - atomic_long_andnot(RWSEM_FLAG_WAITERS|RWSEM_FLAG_HANDOFF, - &sem->count); - } + rwsem_del_waiter(sem, &waiter); raw_spin_unlock_irq(&sem->wait_lock); __set_current_state(TASK_RUNNING); lockevent_inc(rwsem_rlock_fail); @@ -1020,9 +1061,7 @@ static struct rw_semaphore * rwsem_down_write_slowpath(struct rw_semaphore *sem, int state) { long count; - enum writer_wait_state wstate; struct rwsem_waiter waiter; - struct rw_semaphore *ret = sem; DEFINE_WAKE_Q(wake_q); /* do optimistic spinning and steal lock if possible */ @@ -1038,16 +1077,13 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state) waiter.task = current; waiter.type = RWSEM_WAITING_FOR_WRITE; waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT; + waiter.handoff_set = false; raw_spin_lock_irq(&sem->wait_lock); - - /* account for this before adding a new element to the list */ - wstate = list_empty(&sem->wait_list) ? WRITER_FIRST : WRITER_NOT_FIRST; - - list_add_tail(&waiter.list, &sem->wait_list); + rwsem_add_waiter(sem, &waiter); /* we're now waiting on the lock */ - if (wstate == WRITER_NOT_FIRST) { + if (rwsem_first_waiter(sem) != &waiter) { count = atomic_long_read(&sem->count); /* @@ -1083,13 +1119,16 @@ wait: /* wait until we successfully acquire the lock */ set_current_state(state); for (;;) { - if (rwsem_try_write_lock(sem, wstate)) { + if (rwsem_try_write_lock(sem, &waiter)) { /* rwsem_try_write_lock() implies ACQUIRE on success */ break; } raw_spin_unlock_irq(&sem->wait_lock); + if (signal_pending_state(state, current)) + goto out_nolock; + /* * After setting the handoff bit and failing to acquire * the lock, attempt to spin on owner to accelerate lock @@ -1098,7 +1137,7 @@ wait: * In this case, we attempt to acquire the lock again * without sleeping. */ - if (wstate == WRITER_HANDOFF) { + if (waiter.handoff_set) { enum owner_state owner_state; preempt_disable(); @@ -1109,66 +1148,26 @@ wait: goto trylock_again; } - /* Block until there are no active lockers. */ - for (;;) { - if (signal_pending_state(state, current)) - goto out_nolock; - - schedule(); - lockevent_inc(rwsem_sleep_writer); - set_current_state(state); - /* - * If HANDOFF bit is set, unconditionally do - * a trylock. - */ - if (wstate == WRITER_HANDOFF) - break; - - if ((wstate == WRITER_NOT_FIRST) && - (rwsem_first_waiter(sem) == &waiter)) - wstate = WRITER_FIRST; - - count = atomic_long_read(&sem->count); - if (!(count & RWSEM_LOCK_MASK)) - break; - - /* - * The setting of the handoff bit is deferred - * until rwsem_try_write_lock() is called. - */ - if ((wstate == WRITER_FIRST) && (rt_task(current) || - time_after(jiffies, waiter.timeout))) { - wstate = WRITER_HANDOFF; - lockevent_inc(rwsem_wlock_handoff); - break; - } - } + schedule(); + lockevent_inc(rwsem_sleep_writer); + set_current_state(state); trylock_again: raw_spin_lock_irq(&sem->wait_lock); } __set_current_state(TASK_RUNNING); - list_del(&waiter.list); raw_spin_unlock_irq(&sem->wait_lock); lockevent_inc(rwsem_wlock); - - return ret; + return sem; out_nolock: __set_current_state(TASK_RUNNING); raw_spin_lock_irq(&sem->wait_lock); - list_del(&waiter.list); - - if (unlikely(wstate == WRITER_HANDOFF)) - atomic_long_add(-RWSEM_FLAG_HANDOFF, &sem->count); - - if (list_empty(&sem->wait_list)) - atomic_long_andnot(RWSEM_FLAG_WAITERS, &sem->count); - else + rwsem_del_waiter(sem, &waiter); + if (!list_empty(&sem->wait_list)) rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q); raw_spin_unlock_irq(&sem->wait_lock); wake_up_q(&wake_q); lockevent_inc(rwsem_wlock_fail); - return ERR_PTR(-EINTR); } @@ -1249,17 +1248,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem); - /* - * Optimize for the case when the rwsem is not locked at all. - */ - tmp = RWSEM_UNLOCKED_VALUE; - do { + tmp = atomic_long_read(&sem->count); + while (!(tmp & RWSEM_READ_FAILED_MASK)) { if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, - tmp + RWSEM_READER_BIAS)) { + tmp + RWSEM_READER_BIAS)) { rwsem_set_reader_owned(sem); return 1; } - } while (!(tmp & RWSEM_READ_FAILED_MASK)); + } return 0; } diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 9ed9b744876c..e6af502c2fd7 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -693,7 +693,7 @@ static int load_image_and_restore(void) goto Unlock; error = swsusp_read(&flags); - swsusp_close(FMODE_READ); + swsusp_close(FMODE_READ | FMODE_EXCL); if (!error) error = hibernation_restore(flags & SF_PLATFORM_MODE); @@ -983,7 +983,7 @@ static int software_resume(void) /* The snapshot device should not be opened while we're running */ if (!hibernate_acquire()) { error = -EBUSY; - swsusp_close(FMODE_READ); + swsusp_close(FMODE_READ | FMODE_EXCL); goto Unlock; } @@ -1018,7 +1018,7 @@ static int software_resume(void) pm_pr_dbg("Hibernation image not present or could not be loaded.\n"); return error; Close_Finish: - swsusp_close(FMODE_READ); + swsusp_close(FMODE_READ | FMODE_EXCL); goto Finish; } diff --git a/kernel/power/user.c b/kernel/power/user.c index 740723bb3885..ad241b4ff64c 100644 --- a/kernel/power/user.c +++ b/kernel/power/user.c @@ -177,7 +177,7 @@ static ssize_t snapshot_write(struct file *filp, const char __user *buf, if (res <= 0) goto unlock; } else { - res = PAGE_SIZE - pg_offp; + res = PAGE_SIZE; } if (!data_of(data->handle)) { diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 013bfd6dcc34..57b132b658e1 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -3253,6 +3253,11 @@ void defer_console_output(void) preempt_enable(); } +void printk_trigger_flush(void) +{ + defer_console_output(); +} + int vprintk_deferred(const char *fmt, va_list args) { int r; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 3c9b0fda64ac..77563109c0ea 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1918,7 +1918,7 @@ static void __init init_uclamp_rq(struct rq *rq) }; } - rq->uclamp_flags = 0; + rq->uclamp_flags = UCLAMP_FLAG_IDLE; } static void __init init_uclamp(void) @@ -6617,11 +6617,11 @@ static int __init setup_preempt_mode(char *str) int mode = sched_dynamic_mode(str); if (mode < 0) { pr_warn("Dynamic Preempt: unsupported mode: %s\n", str); - return 1; + return 0; } sched_dynamic_update(mode); - return 0; + return 1; } __setup("preempt=", setup_preempt_mode); @@ -8619,9 +8619,6 @@ void __init init_idle(struct task_struct *idle, int cpu) idle->flags |= PF_IDLE | PF_KTHREAD | PF_NO_SETAFFINITY; kthread_set_per_cpu(idle, cpu); - scs_task_reset(idle); - kasan_unpoison_task_stack(idle); - #ifdef CONFIG_SMP /* * It's possible that init_idle() gets called multiple times on a task, @@ -8777,7 +8774,6 @@ void idle_task_exit(void) finish_arch_post_lock_switch(); } - scs_task_reset(current); /* finish_cpu(), as ran on the BP, will clean up the active_mm state */ } diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index 872e481d5098..9392aea1804e 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -615,7 +615,8 @@ void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st) .sum_exec_runtime = p->se.sum_exec_runtime, }; - task_cputime(p, &cputime.utime, &cputime.stime); + if (task_cputime(p, &cputime.utime, &cputime.stime)) + cputime.sum_exec_runtime = task_sched_runtime(p); cputime_adjust(&cputime, &p->prev_cputime, ut, st); } EXPORT_SYMBOL_GPL(task_cputime_adjusted); @@ -828,19 +829,21 @@ u64 task_gtime(struct task_struct *t) * add up the pending nohz execution time since the last * cputime snapshot. */ -void task_cputime(struct task_struct *t, u64 *utime, u64 *stime) +bool task_cputime(struct task_struct *t, u64 *utime, u64 *stime) { struct vtime *vtime = &t->vtime; unsigned int seq; u64 delta; + int ret; if (!vtime_accounting_enabled()) { *utime = t->utime; *stime = t->stime; - return; + return false; } do { + ret = false; seq = read_seqcount_begin(&vtime->seqcount); *utime = t->utime; @@ -850,6 +853,7 @@ void task_cputime(struct task_struct *t, u64 *utime, u64 *stime) if (vtime->state < VTIME_SYS) continue; + ret = true; delta = vtime_delta(vtime); /* @@ -861,6 +865,8 @@ void task_cputime(struct task_struct *t, u64 *utime, u64 *stime) else *utime += vtime->utime + delta; } while (read_seqcount_retry(&vtime->seqcount, seq)); + + return ret; } static int vtime_state_fetch(struct vtime *vtime, int cpu) diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c index 76577d1642a5..eca38107b32f 100644 --- a/kernel/sched/wait.c +++ b/kernel/sched/wait.c @@ -238,6 +238,13 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode) } EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ +void __wake_up_pollfree(struct wait_queue_head *wq_head) +{ + __wake_up(wq_head, TASK_NORMAL, 0, poll_to_key(EPOLLHUP | POLLFREE)); + /* POLLFREE must have cleared the queue. */ + WARN_ON_ONCE(waitqueue_active(wq_head)); +} + /* * Note: we use "set_current_state()" _after_ the wait-queue add, * because we need a memory barrier there on SMP, so that any diff --git a/kernel/signal.c b/kernel/signal.c index 7c4b7ae714d4..a629b11bf3e0 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -1298,6 +1298,12 @@ int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p return ret; } +enum sig_handler { + HANDLER_CURRENT, /* If reachable use the current handler */ + HANDLER_SIG_DFL, /* Always use SIG_DFL handler semantics */ + HANDLER_EXIT, /* Only visible as the process exit code */ +}; + /* * Force a signal that the process can't ignore: if necessary * we unblock the signal and change any SIG_IGN to SIG_DFL. @@ -1310,7 +1316,8 @@ int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p * that is why we also clear SIGNAL_UNKILLABLE. */ static int -force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t, bool sigdfl) +force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t, + enum sig_handler handler) { unsigned long int flags; int ret, blocked, ignored; @@ -1321,9 +1328,10 @@ force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t, bool action = &t->sighand->action[sig-1]; ignored = action->sa.sa_handler == SIG_IGN; blocked = sigismember(&t->blocked, sig); - if (blocked || ignored || sigdfl) { + if (blocked || ignored || (handler != HANDLER_CURRENT)) { action->sa.sa_handler = SIG_DFL; - action->sa.sa_flags |= SA_IMMUTABLE; + if (handler == HANDLER_EXIT) + action->sa.sa_flags |= SA_IMMUTABLE; if (blocked) { sigdelset(&t->blocked, sig); recalc_sigpending_and_wake(t); @@ -1343,7 +1351,7 @@ force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t, bool int force_sig_info(struct kernel_siginfo *info) { - return force_sig_info_to_task(info, current, false); + return force_sig_info_to_task(info, current, HANDLER_CURRENT); } /* @@ -1660,7 +1668,20 @@ void force_fatal_sig(int sig) info.si_code = SI_KERNEL; info.si_pid = 0; info.si_uid = 0; - force_sig_info_to_task(&info, current, true); + force_sig_info_to_task(&info, current, HANDLER_SIG_DFL); +} + +void force_exit_sig(int sig) +{ + struct kernel_siginfo info; + + clear_siginfo(&info); + info.si_signo = sig; + info.si_errno = 0; + info.si_code = SI_KERNEL; + info.si_pid = 0; + info.si_uid = 0; + force_sig_info_to_task(&info, current, HANDLER_EXIT); } /* @@ -1693,7 +1714,7 @@ int force_sig_fault_to_task(int sig, int code, void __user *addr info.si_flags = flags; info.si_isr = isr; #endif - return force_sig_info_to_task(&info, t, false); + return force_sig_info_to_task(&info, t, HANDLER_CURRENT); } int force_sig_fault(int sig, int code, void __user *addr @@ -1813,7 +1834,8 @@ int force_sig_seccomp(int syscall, int reason, bool force_coredump) info.si_errno = reason; info.si_arch = syscall_get_arch(current); info.si_syscall = syscall; - return force_sig_info_to_task(&info, current, force_coredump); + return force_sig_info_to_task(&info, current, + force_coredump ? HANDLER_EXIT : HANDLER_CURRENT); } /* For the crazy architectures that include trap information in diff --git a/kernel/softirq.c b/kernel/softirq.c index 322b65d45676..41f470929e99 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -595,7 +595,8 @@ void irq_enter_rcu(void) { __irq_enter_raw(); - if (is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET)) + if (tick_nohz_full_cpu(smp_processor_id()) || + (is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET))) tick_irq_enter(); account_hardirq_enter(current); diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 6bffe5af8cb1..17a283ce2b20 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -1375,6 +1375,13 @@ static inline void tick_nohz_irq_enter(void) now = ktime_get(); if (ts->idle_active) tick_nohz_stop_idle(ts, now); + /* + * If all CPUs are idle. We may need to update a stale jiffies value. + * Note nohz_full is a special case: a timekeeper is guaranteed to stay + * alive but it might be busy looping with interrupts disabled in some + * rare case (typically stop machine). So we must make sure we have a + * last resort. + */ if (ts->tick_stopped) tick_nohz_update_jiffies(now); } diff --git a/kernel/time/timer.c b/kernel/time/timer.c index e3d2c23c413d..85f1021ad459 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -2054,26 +2054,28 @@ unsigned long msleep_interruptible(unsigned int msecs) EXPORT_SYMBOL(msleep_interruptible); /** - * usleep_range - Sleep for an approximate time - * @min: Minimum time in usecs to sleep - * @max: Maximum time in usecs to sleep + * usleep_range_state - Sleep for an approximate time in a given state + * @min: Minimum time in usecs to sleep + * @max: Maximum time in usecs to sleep + * @state: State of the current task that will be while sleeping * * In non-atomic context where the exact wakeup time is flexible, use - * usleep_range() instead of udelay(). The sleep improves responsiveness + * usleep_range_state() instead of udelay(). The sleep improves responsiveness * by avoiding the CPU-hogging busy-wait of udelay(), and the range reduces * power usage by allowing hrtimers to take advantage of an already- * scheduled interrupt instead of scheduling a new one just for this sleep. */ -void __sched usleep_range(unsigned long min, unsigned long max) +void __sched usleep_range_state(unsigned long min, unsigned long max, + unsigned int state) { ktime_t exp = ktime_add_us(ktime_get(), min); u64 delta = (u64)(max - min) * NSEC_PER_USEC; for (;;) { - __set_current_state(TASK_UNINTERRUPTIBLE); + __set_current_state(state); /* Do not return before the requested sleep time has elapsed */ if (!schedule_hrtimeout_range(&exp, delta, HRTIMER_MODE_ABS)) break; } } -EXPORT_SYMBOL(usleep_range); +EXPORT_SYMBOL(usleep_range_state); diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 7396488793ff..ae9755037b7e 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1111,8 +1111,6 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_ktime_get_ns_proto; case BPF_FUNC_ktime_get_boot_ns: return &bpf_ktime_get_boot_ns_proto; - case BPF_FUNC_ktime_get_coarse_ns: - return &bpf_ktime_get_coarse_ns_proto; case BPF_FUNC_tail_call: return &bpf_tail_call_proto; case BPF_FUNC_get_current_pid_tgid: diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 30bc880c3849..be5f6b32a012 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -5217,6 +5217,7 @@ int unregister_ftrace_direct(unsigned long ip, unsigned long addr) { struct ftrace_direct_func *direct; struct ftrace_func_entry *entry; + struct ftrace_hash *hash; int ret = -ENODEV; mutex_lock(&direct_mutex); @@ -5225,7 +5226,8 @@ int unregister_ftrace_direct(unsigned long ip, unsigned long addr) if (!entry) goto out_unlock; - if (direct_functions->count == 1) + hash = direct_ops.func_hash->filter_hash; + if (hash->count == 1) unregister_ftrace_function(&direct_ops); ret = ftrace_set_filter_ip(&direct_ops, ip, 1, 0); @@ -5540,6 +5542,10 @@ int unregister_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr) err = unregister_ftrace_function(ops); remove_direct_functions_hash(hash, addr); mutex_unlock(&direct_mutex); + + /* cleanup for possible another register call */ + ops->func = NULL; + ops->trampoline = 0; return err; } EXPORT_SYMBOL_GPL(unregister_ftrace_direct_multi); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index f9139dc1262c..88de94da596b 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -3812,6 +3812,18 @@ void trace_check_vprintf(struct trace_iterator *iter, const char *fmt, iter->fmt[i] = '\0'; trace_seq_vprintf(&iter->seq, iter->fmt, ap); + /* + * If iter->seq is full, the above call no longer guarantees + * that ap is in sync with fmt processing, and further calls + * to va_arg() can return wrong positional arguments. + * + * Ensure that ap is no longer used in this case. + */ + if (iter->seq.full) { + p = ""; + break; + } + if (star) len = va_arg(ap, int); @@ -6706,9 +6718,7 @@ waitagain: cnt = PAGE_SIZE - 1; /* reset all but tr, trace, and overruns */ - memset(&iter->seq, 0, - sizeof(struct trace_iterator) - - offsetof(struct trace_iterator, seq)); + memset_startat(iter, 0, seq); cpumask_clear(iter->started); trace_seq_init(&iter->seq); iter->pos = -1; diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 6b60ab9475ed..38715aa6cfdf 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -1366,14 +1366,26 @@ __event_trigger_test_discard(struct trace_event_file *file, if (eflags & EVENT_FILE_FL_TRIGGER_COND) *tt = event_triggers_call(file, buffer, entry, event); - if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) || - (unlikely(file->flags & EVENT_FILE_FL_FILTERED) && - !filter_match_preds(file->filter, entry))) { - __trace_event_discard_commit(buffer, event); - return true; - } + if (likely(!(file->flags & (EVENT_FILE_FL_SOFT_DISABLED | + EVENT_FILE_FL_FILTERED | + EVENT_FILE_FL_PID_FILTER)))) + return false; + + if (file->flags & EVENT_FILE_FL_SOFT_DISABLED) + goto discard; + + if (file->flags & EVENT_FILE_FL_FILTERED && + !filter_match_preds(file->filter, entry)) + goto discard; + + if ((file->flags & EVENT_FILE_FL_PID_FILTER) && + trace_event_ignore_this_pid(file)) + goto discard; return false; + discard: + __trace_event_discard_commit(buffer, event); + return true; } /** diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 4021b9a79f93..92be9cb1d7d4 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -2678,12 +2678,24 @@ static struct trace_event_file * trace_create_new_event(struct trace_event_call *call, struct trace_array *tr) { + struct trace_pid_list *no_pid_list; + struct trace_pid_list *pid_list; struct trace_event_file *file; + unsigned int first; file = kmem_cache_alloc(file_cachep, GFP_TRACE); if (!file) return NULL; + pid_list = rcu_dereference_protected(tr->filtered_pids, + lockdep_is_held(&event_mutex)); + no_pid_list = rcu_dereference_protected(tr->filtered_no_pids, + lockdep_is_held(&event_mutex)); + + if (!trace_pid_list_first(pid_list, &first) || + !trace_pid_list_first(no_pid_list, &first)) + file->flags |= EVENT_FILE_FL_PID_FILTER; + file->event_call = call; file->tr = tr; atomic_set(&file->sm_ref, 0); diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 8a10046c775f..319f9c8ca7e7 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -2576,28 +2576,27 @@ static struct hist_field *parse_expr(struct hist_trigger_data *hist_data, /* Split the expression string at the root operator */ if (!sep) - goto free; + return ERR_PTR(-EINVAL); + *sep = '\0'; operand1_str = str; str = sep+1; /* Binary operator requires both operands */ if (*operand1_str == '\0' || *str == '\0') - goto free; + return ERR_PTR(-EINVAL); operand_flags = 0; /* LHS of string is an expression e.g. a+b in a+b+c */ operand1 = parse_expr(hist_data, file, operand1_str, operand_flags, NULL, n_subexprs); - if (IS_ERR(operand1)) { - ret = PTR_ERR(operand1); - operand1 = NULL; - goto free; - } + if (IS_ERR(operand1)) + return ERR_CAST(operand1); + if (operand1->flags & HIST_FIELD_FL_STRING) { hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(operand1_str)); ret = -EINVAL; - goto free; + goto free_op1; } /* RHS of string is another expression e.g. c in a+b+c */ @@ -2605,13 +2604,12 @@ static struct hist_field *parse_expr(struct hist_trigger_data *hist_data, operand2 = parse_expr(hist_data, file, str, operand_flags, NULL, n_subexprs); if (IS_ERR(operand2)) { ret = PTR_ERR(operand2); - operand2 = NULL; - goto free; + goto free_op1; } if (operand2->flags & HIST_FIELD_FL_STRING) { hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(str)); ret = -EINVAL; - goto free; + goto free_operands; } switch (field_op) { @@ -2629,12 +2627,12 @@ static struct hist_field *parse_expr(struct hist_trigger_data *hist_data, break; default: ret = -EINVAL; - goto free; + goto free_operands; } ret = check_expr_operands(file->tr, operand1, operand2, &var1, &var2); if (ret) - goto free; + goto free_operands; operand_flags = var1 ? var1->flags : operand1->flags; operand2_flags = var2 ? var2->flags : operand2->flags; @@ -2653,12 +2651,13 @@ static struct hist_field *parse_expr(struct hist_trigger_data *hist_data, expr = create_hist_field(hist_data, NULL, flags, var_name); if (!expr) { ret = -ENOMEM; - goto free; + goto free_operands; } operand1->read_once = true; operand2->read_once = true; + /* The operands are now owned and free'd by 'expr' */ expr->operands[0] = operand1; expr->operands[1] = operand2; @@ -2669,7 +2668,7 @@ static struct hist_field *parse_expr(struct hist_trigger_data *hist_data, if (!divisor) { hist_err(file->tr, HIST_ERR_DIVISION_BY_ZERO, errpos(str)); ret = -EDOM; - goto free; + goto free_expr; } /* @@ -2709,18 +2708,22 @@ static struct hist_field *parse_expr(struct hist_trigger_data *hist_data, expr->type = kstrdup_const(operand1->type, GFP_KERNEL); if (!expr->type) { ret = -ENOMEM; - goto free; + goto free_expr; } expr->name = expr_str(expr, 0); } return expr; -free: - destroy_hist_field(operand1, 0); + +free_operands: destroy_hist_field(operand2, 0); - destroy_hist_field(expr, 0); +free_op1: + destroy_hist_field(operand1, 0); + return ERR_PTR(ret); +free_expr: + destroy_hist_field(expr, 0); return ERR_PTR(ret); } @@ -3026,8 +3029,10 @@ static inline void __update_field_vars(struct tracing_map_elt *elt, if (val->flags & HIST_FIELD_FL_STRING) { char *str = elt_data->field_var_str[j++]; char *val_str = (char *)(uintptr_t)var_val; + unsigned int size; - strscpy(str, val_str, val->size); + size = min(val->size, STR_VAR_LEN_MAX); + strscpy(str, val_str, size); var_val = (u64)(uintptr_t)str; } tracing_map_set_var(elt, var_idx, var_val); @@ -3752,7 +3757,7 @@ static int check_synth_field(struct synth_event *event, if (strcmp(field->type, hist_field->type) != 0) { if (field->size != hist_field->size || - field->is_signed != hist_field->is_signed) + (!field->is_string && field->is_signed != hist_field->is_signed)) return -EINVAL; } @@ -4914,6 +4919,7 @@ static void hist_trigger_elt_update(struct hist_trigger_data *hist_data, if (hist_field->flags & HIST_FIELD_FL_STRING) { unsigned int str_start, var_str_idx, idx; char *str, *val_str; + unsigned int size; str_start = hist_data->n_field_var_str + hist_data->n_save_var_str; @@ -4922,7 +4928,9 @@ static void hist_trigger_elt_update(struct hist_trigger_data *hist_data, str = elt_data->field_var_str[idx]; val_str = (char *)(uintptr_t)hist_val; - strscpy(str, val_str, hist_field->size); + + size = min(hist_field->size, STR_VAR_LEN_MAX); + strscpy(str, val_str, size); hist_val = (u64)(uintptr_t)str; } diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c index 22db3ce95e74..ca9c13b2ecf4 100644 --- a/kernel/trace/trace_events_synth.c +++ b/kernel/trace/trace_events_synth.c @@ -1237,9 +1237,8 @@ static int __create_synth_event(const char *name, const char *raw_fields) argv + consumed, &consumed, &field_version); if (IS_ERR(field)) { - argv_free(argv); ret = PTR_ERR(field); - goto err; + goto err_free_arg; } /* @@ -1262,18 +1261,19 @@ static int __create_synth_event(const char *name, const char *raw_fields) if (cmd_version > 1 && n_fields_this_loop >= 1) { synth_err(SYNTH_ERR_INVALID_CMD, errpos(field_str)); ret = -EINVAL; - goto err; + goto err_free_arg; } fields[n_fields++] = field; if (n_fields == SYNTH_FIELDS_MAX) { synth_err(SYNTH_ERR_TOO_MANY_FIELDS, 0); ret = -EINVAL; - goto err; + goto err_free_arg; } n_fields_this_loop++; } + argv_free(argv); if (consumed < argc) { synth_err(SYNTH_ERR_INVALID_CMD, 0); @@ -1281,7 +1281,6 @@ static int __create_synth_event(const char *name, const char *raw_fields) goto err; } - argv_free(argv); } if (n_fields == 0) { @@ -1307,6 +1306,8 @@ static int __create_synth_event(const char *name, const char *raw_fields) kfree(saved_fields); return ret; + err_free_arg: + argv_free(argv); err: for (i = 0; i < n_fields; i++) free_synth_field(fields[i]); diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 0a5c0db3137e..f5f0039d31e5 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -1313,6 +1313,7 @@ static int uprobe_perf_open(struct trace_event_call *call, return 0; list_for_each_entry(pos, trace_probe_probe_list(tp), list) { + tu = container_of(pos, struct trace_uprobe, tp); err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true); if (err) { uprobe_perf_close(call, event); diff --git a/kernel/trace/tracing_map.c b/kernel/trace/tracing_map.c index 39bb56d2dcbe..9628b5571846 100644 --- a/kernel/trace/tracing_map.c +++ b/kernel/trace/tracing_map.c @@ -15,6 +15,7 @@ #include <linux/jhash.h> #include <linux/slab.h> #include <linux/sort.h> +#include <linux/kmemleak.h> #include "tracing_map.h" #include "trace.h" @@ -307,6 +308,7 @@ static void tracing_map_array_free(struct tracing_map_array *a) for (i = 0; i < a->n_pages; i++) { if (!a->pages[i]) break; + kmemleak_free(a->pages[i]); free_page((unsigned long)a->pages[i]); } @@ -342,6 +344,7 @@ static struct tracing_map_array *tracing_map_array_alloc(unsigned int n_elts, a->pages[i] = (void *)get_zeroed_page(GFP_KERNEL); if (!a->pages[i]) goto free; + kmemleak_alloc(a->pages[i], PAGE_SIZE, 1, GFP_KERNEL); } out: return a; diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 9ef7ce18b4f5..5e14e32056ad 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -316,6 +316,7 @@ config DEBUG_INFO_BTF bool "Generate BTF typeinfo" depends on !DEBUG_INFO_SPLIT && !DEBUG_INFO_REDUCED depends on !GCC_PLUGIN_RANDSTRUCT || COMPILE_TEST + depends on BPF_SYSCALL help Generate deduplicated BTF type information from DWARF debug info. Turning this on expects presence of pahole tool, which will convert @@ -346,8 +347,9 @@ config FRAME_WARN int "Warn for stack frames larger than" range 0 8192 default 2048 if GCC_PLUGIN_LATENT_ENTROPY - default 1536 if (!64BIT && (PARISC || XTENSA)) - default 1024 if (!64BIT && !PARISC) + default 2048 if PARISC + default 1536 if (!64BIT && XTENSA) + default 1024 if !64BIT default 2048 if 64BIT help Tell gcc to warn at build time for stack frames larger than this. diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c index f9e89001b52e..199ab201d501 100644 --- a/lib/nmi_backtrace.c +++ b/lib/nmi_backtrace.c @@ -75,6 +75,12 @@ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask, touch_softlockup_watchdog(); } + /* + * Force flush any remote buffers that might be stuck in IRQ context + * and therefore could not run their irq_work. + */ + printk_trigger_flush(); + clear_bit_unlock(0, &backtrace_flag); put_cpu(); } diff --git a/lib/siphash.c b/lib/siphash.c index a90112ee72a1..72b9068ab57b 100644 --- a/lib/siphash.c +++ b/lib/siphash.c @@ -49,6 +49,7 @@ SIPROUND; \ return (v0 ^ v1) ^ (v2 ^ v3); +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key) { const u8 *end = data + len - (len % sizeof(u64)); @@ -80,8 +81,8 @@ u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key) POSTAMBLE } EXPORT_SYMBOL(__siphash_aligned); +#endif -#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key) { const u8 *end = data + len - (len % sizeof(u64)); @@ -113,7 +114,6 @@ u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key) POSTAMBLE } EXPORT_SYMBOL(__siphash_unaligned); -#endif /** * siphash_1u64 - compute 64-bit siphash PRF value of a u64 @@ -250,6 +250,7 @@ EXPORT_SYMBOL(siphash_3u32); HSIPROUND; \ return (v0 ^ v1) ^ (v2 ^ v3); +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key) { const u8 *end = data + len - (len % sizeof(u64)); @@ -280,8 +281,8 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key) HPOSTAMBLE } EXPORT_SYMBOL(__hsiphash_aligned); +#endif -#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS u32 __hsiphash_unaligned(const void *data, size_t len, const hsiphash_key_t *key) { @@ -313,7 +314,6 @@ u32 __hsiphash_unaligned(const void *data, size_t len, HPOSTAMBLE } EXPORT_SYMBOL(__hsiphash_unaligned); -#endif /** * hsiphash_1u32 - compute 64-bit hsiphash PRF value of a u32 @@ -418,6 +418,7 @@ EXPORT_SYMBOL(hsiphash_4u32); HSIPROUND; \ return v1 ^ v3; +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key) { const u8 *end = data + len - (len % sizeof(u32)); @@ -438,8 +439,8 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key) HPOSTAMBLE } EXPORT_SYMBOL(__hsiphash_aligned); +#endif -#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS u32 __hsiphash_unaligned(const void *data, size_t len, const hsiphash_key_t *key) { @@ -461,7 +462,6 @@ u32 __hsiphash_unaligned(const void *data, size_t len, HPOSTAMBLE } EXPORT_SYMBOL(__hsiphash_unaligned); -#endif /** * hsiphash_1u32 - compute 32-bit hsiphash PRF value of a u32 diff --git a/lib/test_kasan.c b/lib/test_kasan.c index 67ed689a0b1b..0643573f8686 100644 --- a/lib/test_kasan.c +++ b/lib/test_kasan.c @@ -869,6 +869,7 @@ static void kasan_memchr(struct kunit *test) ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); + OPTIMIZER_HIDE_VAR(size); KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = memchr(ptr, '1', size + 1)); @@ -894,6 +895,7 @@ static void kasan_memcmp(struct kunit *test) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); memset(arr, 0, sizeof(arr)); + OPTIMIZER_HIDE_VAR(size); KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = memcmp(ptr, arr, size+1)); kfree(ptr); diff --git a/lib/zstd/Makefile b/lib/zstd/Makefile index 65218ec5b8f2..fc45339fc3a3 100644 --- a/lib/zstd/Makefile +++ b/lib/zstd/Makefile @@ -11,8 +11,6 @@ obj-$(CONFIG_ZSTD_COMPRESS) += zstd_compress.o obj-$(CONFIG_ZSTD_DECOMPRESS) += zstd_decompress.o -ccflags-y += -O3 - zstd_compress-y := \ zstd_compress_module.o \ common/debug.o \ diff --git a/lib/zstd/common/compiler.h b/lib/zstd/common/compiler.h index a1a051e4bce6..f5a9c70a228a 100644 --- a/lib/zstd/common/compiler.h +++ b/lib/zstd/common/compiler.h @@ -16,6 +16,7 @@ *********************************************************/ /* force inlining */ +#if !defined(ZSTD_NO_INLINE) #if (defined(__GNUC__) && !defined(__STRICT_ANSI__)) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ # define INLINE_KEYWORD inline #else @@ -24,6 +25,12 @@ #define FORCE_INLINE_ATTR __attribute__((always_inline)) +#else + +#define INLINE_KEYWORD +#define FORCE_INLINE_ATTR + +#endif /* On MSVC qsort requires that functions passed into it use the __cdecl calling conversion(CC). diff --git a/lib/zstd/compress/zstd_compress_superblock.c b/lib/zstd/compress/zstd_compress_superblock.c index ee03e0aedb03..b0610b255653 100644 --- a/lib/zstd/compress/zstd_compress_superblock.c +++ b/lib/zstd/compress/zstd_compress_superblock.c @@ -411,6 +411,8 @@ static size_t ZSTD_seqDecompressedSize(seqStore_t const* seqStore, const seqDef* const seqDef* sp = sstart; size_t matchLengthSum = 0; size_t litLengthSum = 0; + /* Only used by assert(), suppress unused variable warnings in production. */ + (void)litLengthSum; while (send-sp > 0) { ZSTD_sequenceLength const seqLen = ZSTD_getSequenceLength(seqStore, sp); litLengthSum += seqLen.litLength; diff --git a/lib/zstd/compress/zstd_opt.c b/lib/zstd/compress/zstd_opt.c index 04337050fe9a..dfc55e3e8119 100644 --- a/lib/zstd/compress/zstd_opt.c +++ b/lib/zstd/compress/zstd_opt.c @@ -8,6 +8,18 @@ * You may select, at your option, one of the above-listed licenses. */ +/* + * Disable inlining for the optimal parser for the kernel build. + * It is unlikely to be used in the kernel, and where it is used + * latency shouldn't matter because it is very slow to begin with. + * We prefer a ~180KB binary size win over faster optimal parsing. + * + * TODO(https://github.com/facebook/zstd/issues/2862): + * Improve the code size of the optimal parser in general, so we + * don't need this hack for the kernel build. + */ +#define ZSTD_NO_INLINE 1 + #include "zstd_compress_internal.h" #include "hist.h" #include "zstd_opt.h" diff --git a/mm/Kconfig b/mm/Kconfig index 068ce591a13a..356f4f2c779e 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -428,7 +428,7 @@ config THP_SWAP # UP and nommu archs use km based percpu allocator # config NEED_PER_CPU_KM - depends on !SMP + depends on !SMP || !MMU bool default y @@ -890,6 +890,9 @@ config MAPPING_DIRTY_HELPERS config KMAP_LOCAL bool +config KMAP_LOCAL_NON_LINEAR_PTE_ARRAY + bool + # struct io_mapping based helper. Selected by drivers that need them config IO_MAPPING bool diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 1eead4761011..eae96dfe0261 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -945,6 +945,13 @@ void bdi_unregister(struct backing_dev_info *bdi) wb_shutdown(&bdi->wb); cgwb_bdi_unregister(bdi); + /* + * If this BDI's min ratio has been set, use bdi_set_min_ratio() to + * update the global bdi_min_ratio. + */ + if (bdi->min_ratio) + bdi_set_min_ratio(bdi, 0); + if (bdi->dev) { bdi_debug_unregister(bdi); device_unregister(bdi->dev); diff --git a/mm/damon/core.c b/mm/damon/core.c index c381b3c525d0..e92497895202 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -282,7 +282,6 @@ int damon_set_targets(struct damon_ctx *ctx, for (i = 0; i < nr_ids; i++) { t = damon_new_target(ids[i]); if (!t) { - pr_err("Failed to alloc damon_target\n"); /* The caller should do cleanup of the ids itself */ damon_for_each_target_safe(t, next, ctx) damon_destroy_target(t); @@ -312,16 +311,10 @@ int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int, unsigned long aggr_int, unsigned long primitive_upd_int, unsigned long min_nr_reg, unsigned long max_nr_reg) { - if (min_nr_reg < 3) { - pr_err("min_nr_regions (%lu) must be at least 3\n", - min_nr_reg); + if (min_nr_reg < 3) return -EINVAL; - } - if (min_nr_reg > max_nr_reg) { - pr_err("invalid nr_regions. min (%lu) > max (%lu)\n", - min_nr_reg, max_nr_reg); + if (min_nr_reg > max_nr_reg) return -EINVAL; - } ctx->sample_interval = sample_int; ctx->aggr_interval = aggr_int; @@ -980,10 +973,11 @@ static unsigned long damos_wmark_wait_us(struct damos *scheme) static void kdamond_usleep(unsigned long usecs) { - if (usecs > 100 * 1000) - schedule_timeout_interruptible(usecs_to_jiffies(usecs)); + /* See Documentation/timers/timers-howto.rst for the thresholds */ + if (usecs > 20 * USEC_PER_MSEC) + schedule_timeout_idle(usecs_to_jiffies(usecs)); else - usleep_range(usecs, usecs + 1); + usleep_idle_range(usecs, usecs + 1); } /* Returns negative error code if it's not activated but should return */ @@ -1038,7 +1032,7 @@ static int kdamond_fn(void *data) ctx->callback.after_sampling(ctx)) done = true; - usleep_range(ctx->sample_interval, ctx->sample_interval + 1); + kdamond_usleep(ctx->sample_interval); if (ctx->primitive.check_accesses) max_nr_accesses = ctx->primitive.check_accesses(ctx); diff --git a/mm/damon/dbgfs.c b/mm/damon/dbgfs.c index eccc14b34901..1efac0022e9a 100644 --- a/mm/damon/dbgfs.c +++ b/mm/damon/dbgfs.c @@ -32,7 +32,7 @@ static char *user_input_str(const char __user *buf, size_t count, loff_t *ppos) if (*ppos) return ERR_PTR(-EINVAL); - kbuf = kmalloc(count + 1, GFP_KERNEL); + kbuf = kmalloc(count + 1, GFP_KERNEL | __GFP_NOWARN); if (!kbuf) return ERR_PTR(-ENOMEM); @@ -133,7 +133,7 @@ static ssize_t dbgfs_schemes_read(struct file *file, char __user *buf, char *kbuf; ssize_t len; - kbuf = kmalloc(count, GFP_KERNEL); + kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN); if (!kbuf) return -ENOMEM; @@ -210,10 +210,8 @@ static struct damos **str_to_schemes(const char *str, ssize_t len, &wmarks.low, &parsed); if (ret != 18) break; - if (!damos_action_valid(action)) { - pr_err("wrong action %d\n", action); + if (!damos_action_valid(action)) goto fail; - } pos += parsed; scheme = damon_new_scheme(min_sz, max_sz, min_nr_a, max_nr_a, @@ -452,7 +450,7 @@ static ssize_t dbgfs_init_regions_read(struct file *file, char __user *buf, char *kbuf; ssize_t len; - kbuf = kmalloc(count, GFP_KERNEL); + kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN); if (!kbuf) return -ENOMEM; @@ -578,7 +576,7 @@ static ssize_t dbgfs_kdamond_pid_read(struct file *file, char *kbuf; ssize_t len; - kbuf = kmalloc(count, GFP_KERNEL); + kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN); if (!kbuf) return -ENOMEM; @@ -877,12 +875,14 @@ static ssize_t dbgfs_monitor_on_write(struct file *file, return -EINVAL; } + mutex_lock(&damon_dbgfs_lock); if (!strncmp(kbuf, "on", count)) { int i; for (i = 0; i < dbgfs_nr_ctxs; i++) { if (damon_targets_empty(dbgfs_ctxs[i])) { kfree(kbuf); + mutex_unlock(&damon_dbgfs_lock); return -EINVAL; } } @@ -892,6 +892,7 @@ static ssize_t dbgfs_monitor_on_write(struct file *file, } else { ret = -EINVAL; } + mutex_unlock(&damon_dbgfs_lock); if (!ret) ret = count; @@ -944,15 +945,16 @@ static int __init __damon_dbgfs_init(void) static int __init damon_dbgfs_init(void) { - int rc; + int rc = -ENOMEM; + mutex_lock(&damon_dbgfs_lock); dbgfs_ctxs = kmalloc(sizeof(*dbgfs_ctxs), GFP_KERNEL); if (!dbgfs_ctxs) - return -ENOMEM; + goto out; dbgfs_ctxs[0] = dbgfs_new_ctx(); if (!dbgfs_ctxs[0]) { kfree(dbgfs_ctxs); - return -ENOMEM; + goto out; } dbgfs_nr_ctxs = 1; @@ -963,6 +965,8 @@ static int __init damon_dbgfs_init(void) pr_err("%s: dbgfs init failed\n", __func__); } +out: + mutex_unlock(&damon_dbgfs_lock); return rc; } diff --git a/mm/damon/vaddr-test.h b/mm/damon/vaddr-test.h index ecfd0b2ed222..6a1b9272ea12 100644 --- a/mm/damon/vaddr-test.h +++ b/mm/damon/vaddr-test.h @@ -135,7 +135,6 @@ static void damon_do_test_apply_three_regions(struct kunit *test, struct damon_addr_range *three_regions, unsigned long *expected, int nr_expected) { - struct damon_ctx *ctx = damon_new_ctx(); struct damon_target *t; struct damon_region *r; int i; @@ -145,7 +144,6 @@ static void damon_do_test_apply_three_regions(struct kunit *test, r = damon_new_region(regions[i * 2], regions[i * 2 + 1]); damon_add_region(r, t); } - damon_add_target(ctx, t); damon_va_apply_three_regions(t, three_regions); @@ -154,8 +152,6 @@ static void damon_do_test_apply_three_regions(struct kunit *test, KUNIT_EXPECT_EQ(test, r->ar.start, expected[i * 2]); KUNIT_EXPECT_EQ(test, r->ar.end, expected[i * 2 + 1]); } - - damon_destroy_ctx(ctx); } /* @@ -252,60 +248,59 @@ static void damon_test_apply_three_regions4(struct kunit *test) new_three_regions, expected, ARRAY_SIZE(expected)); } -static void damon_test_split_evenly(struct kunit *test) +static void damon_test_split_evenly_fail(struct kunit *test, + unsigned long start, unsigned long end, unsigned int nr_pieces) { - struct damon_ctx *c = damon_new_ctx(); - struct damon_target *t; - struct damon_region *r; - unsigned long i; - - KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(NULL, NULL, 5), - -EINVAL); - - t = damon_new_target(42); - r = damon_new_region(0, 100); - KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 0), -EINVAL); + struct damon_target *t = damon_new_target(42); + struct damon_region *r = damon_new_region(start, end); damon_add_region(r, t); - KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 10), 0); - KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 10u); + KUNIT_EXPECT_EQ(test, + damon_va_evenly_split_region(t, r, nr_pieces), -EINVAL); + KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1u); - i = 0; damon_for_each_region(r, t) { - KUNIT_EXPECT_EQ(test, r->ar.start, i++ * 10); - KUNIT_EXPECT_EQ(test, r->ar.end, i * 10); + KUNIT_EXPECT_EQ(test, r->ar.start, start); + KUNIT_EXPECT_EQ(test, r->ar.end, end); } + damon_free_target(t); +} + +static void damon_test_split_evenly_succ(struct kunit *test, + unsigned long start, unsigned long end, unsigned int nr_pieces) +{ + struct damon_target *t = damon_new_target(42); + struct damon_region *r = damon_new_region(start, end); + unsigned long expected_width = (end - start) / nr_pieces; + unsigned long i = 0; - t = damon_new_target(42); - r = damon_new_region(5, 59); damon_add_region(r, t); - KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 5), 0); - KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 5u); + KUNIT_EXPECT_EQ(test, + damon_va_evenly_split_region(t, r, nr_pieces), 0); + KUNIT_EXPECT_EQ(test, damon_nr_regions(t), nr_pieces); - i = 0; damon_for_each_region(r, t) { - if (i == 4) + if (i == nr_pieces - 1) break; - KUNIT_EXPECT_EQ(test, r->ar.start, 5 + 10 * i++); - KUNIT_EXPECT_EQ(test, r->ar.end, 5 + 10 * i); + KUNIT_EXPECT_EQ(test, + r->ar.start, start + i++ * expected_width); + KUNIT_EXPECT_EQ(test, r->ar.end, start + i * expected_width); } - KUNIT_EXPECT_EQ(test, r->ar.start, 5 + 10 * i); - KUNIT_EXPECT_EQ(test, r->ar.end, 59ul); + KUNIT_EXPECT_EQ(test, r->ar.start, start + i * expected_width); + KUNIT_EXPECT_EQ(test, r->ar.end, end); damon_free_target(t); +} - t = damon_new_target(42); - r = damon_new_region(5, 6); - damon_add_region(r, t); - KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 2), -EINVAL); - KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1u); +static void damon_test_split_evenly(struct kunit *test) +{ + KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(NULL, NULL, 5), + -EINVAL); - damon_for_each_region(r, t) { - KUNIT_EXPECT_EQ(test, r->ar.start, 5ul); - KUNIT_EXPECT_EQ(test, r->ar.end, 6ul); - } - damon_free_target(t); - damon_destroy_ctx(c); + damon_test_split_evenly_fail(test, 0, 100, 0); + damon_test_split_evenly_succ(test, 0, 100, 10); + damon_test_split_evenly_succ(test, 5, 59, 5); + damon_test_split_evenly_fail(test, 5, 6, 2); } static struct kunit_case damon_test_cases[] = { diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c index 35fe49080ee9..20a9a9d69eb1 100644 --- a/mm/damon/vaddr.c +++ b/mm/damon/vaddr.c @@ -13,6 +13,7 @@ #include <linux/mmu_notifier.h> #include <linux/page_idle.h> #include <linux/pagewalk.h> +#include <linux/sched/mm.h> #include "prmtv-common.h" @@ -626,7 +627,6 @@ int damon_va_apply_scheme(struct damon_ctx *ctx, struct damon_target *t, case DAMOS_STAT: return 0; default: - pr_warn("Wrong action %d\n", scheme->action); return -EINVAL; } diff --git a/mm/filemap.c b/mm/filemap.c index daa0e23a6ee6..39c4c46c6133 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3253,8 +3253,6 @@ static struct page *next_uptodate_page(struct page *page, goto skip; if (!PageUptodate(page) || PageReadahead(page)) goto skip; - if (PageHWPoison(page)) - goto skip; if (!trylock_page(page)) goto skip; if (page->mapping != mapping) diff --git a/mm/highmem.c b/mm/highmem.c index 88f65f155845..762679050c9a 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -359,7 +359,6 @@ void kunmap_high(struct page *page) } EXPORT_SYMBOL(kunmap_high); -#ifdef CONFIG_TRANSPARENT_HUGEPAGE void zero_user_segments(struct page *page, unsigned start1, unsigned end1, unsigned start2, unsigned end2) { @@ -416,7 +415,6 @@ void zero_user_segments(struct page *page, unsigned start1, unsigned end1, BUG_ON((start1 | start2 | end1 | end2) != 0); } EXPORT_SYMBOL(zero_user_segments); -#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* CONFIG_HIGHMEM */ #ifdef CONFIG_KMAP_LOCAL @@ -503,16 +501,22 @@ static inline int kmap_local_calc_idx(int idx) static pte_t *__kmap_pte; -static pte_t *kmap_get_pte(void) +static pte_t *kmap_get_pte(unsigned long vaddr, int idx) { + if (IS_ENABLED(CONFIG_KMAP_LOCAL_NON_LINEAR_PTE_ARRAY)) + /* + * Set by the arch if __kmap_pte[-idx] does not produce + * the correct entry. + */ + return virt_to_kpte(vaddr); if (!__kmap_pte) __kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN)); - return __kmap_pte; + return &__kmap_pte[-idx]; } void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot) { - pte_t pteval, *kmap_pte = kmap_get_pte(); + pte_t pteval, *kmap_pte; unsigned long vaddr; int idx; @@ -524,9 +528,10 @@ void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot) preempt_disable(); idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); - BUG_ON(!pte_none(*(kmap_pte - idx))); + kmap_pte = kmap_get_pte(vaddr, idx); + BUG_ON(!pte_none(*kmap_pte)); pteval = pfn_pte(pfn, prot); - arch_kmap_local_set_pte(&init_mm, vaddr, kmap_pte - idx, pteval); + arch_kmap_local_set_pte(&init_mm, vaddr, kmap_pte, pteval); arch_kmap_local_post_map(vaddr, pteval); current->kmap_ctrl.pteval[kmap_local_idx()] = pteval; preempt_enable(); @@ -559,7 +564,7 @@ EXPORT_SYMBOL(__kmap_local_page_prot); void kunmap_local_indexed(void *vaddr) { unsigned long addr = (unsigned long) vaddr & PAGE_MASK; - pte_t *kmap_pte = kmap_get_pte(); + pte_t *kmap_pte; int idx; if (addr < __fix_to_virt(FIX_KMAP_END) || @@ -584,8 +589,9 @@ void kunmap_local_indexed(void *vaddr) idx = arch_kmap_local_unmap_idx(kmap_local_idx(), addr); WARN_ON_ONCE(addr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); + kmap_pte = kmap_get_pte(addr, idx); arch_kmap_local_pre_unmap(addr); - pte_clear(&init_mm, addr, kmap_pte - idx); + pte_clear(&init_mm, addr, kmap_pte); arch_kmap_local_post_unmap(addr); current->kmap_ctrl.pteval[kmap_local_idx()] = __pte(0); kmap_local_idx_pop(); @@ -607,7 +613,7 @@ EXPORT_SYMBOL(kunmap_local_indexed); void __kmap_local_sched_out(void) { struct task_struct *tsk = current; - pte_t *kmap_pte = kmap_get_pte(); + pte_t *kmap_pte; int i; /* Clear kmaps */ @@ -634,8 +640,9 @@ void __kmap_local_sched_out(void) idx = arch_kmap_local_map_idx(i, pte_pfn(pteval)); addr = __fix_to_virt(FIX_KMAP_BEGIN + idx); + kmap_pte = kmap_get_pte(addr, idx); arch_kmap_local_pre_unmap(addr); - pte_clear(&init_mm, addr, kmap_pte - idx); + pte_clear(&init_mm, addr, kmap_pte); arch_kmap_local_post_unmap(addr); } } @@ -643,7 +650,7 @@ void __kmap_local_sched_out(void) void __kmap_local_sched_in(void) { struct task_struct *tsk = current; - pte_t *kmap_pte = kmap_get_pte(); + pte_t *kmap_pte; int i; /* Restore kmaps */ @@ -663,7 +670,8 @@ void __kmap_local_sched_in(void) /* See comment in __kmap_local_sched_out() */ idx = arch_kmap_local_map_idx(i, pte_pfn(pteval)); addr = __fix_to_virt(FIX_KMAP_BEGIN + idx); - set_pte_at(&init_mm, addr, kmap_pte - idx, pteval); + kmap_pte = kmap_get_pte(addr, idx); + set_pte_at(&init_mm, addr, kmap_pte, pteval); arch_kmap_local_post_map(addr, pteval); } } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index e09159c957e3..a1baa198519a 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1037,8 +1037,10 @@ void clear_vma_resv_huge_pages(struct vm_area_struct *vma) */ struct resv_map *reservations = vma_resv_map(vma); - if (reservations && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) + if (reservations && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { + resv_map_put_hugetlb_cgroup_uncharge_info(reservations); kref_put(&reservations->refs, resv_map_release); + } reset_vma_resv_huge_pages(vma); } @@ -2971,7 +2973,7 @@ int __alloc_bootmem_huge_page(struct hstate *h, int nid) struct huge_bootmem_page *m = NULL; /* initialize for clang */ int nr_nodes, node; - if (nid >= nr_online_nodes) + if (nid != NUMA_NO_NODE && nid >= nr_online_nodes) return 0; /* do node specific alloc */ if (nid != NUMA_NO_NODE) { @@ -4917,9 +4919,9 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma, move_huge_pte(vma, old_addr, new_addr, src_pte); } - i_mmap_unlock_write(mapping); flush_tlb_range(vma, old_end - len, old_end); mmu_notifier_invalidate_range_end(&range); + i_mmap_unlock_write(mapping); return len + old_addr - old_end; } @@ -4937,6 +4939,7 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct struct hstate *h = hstate_vma(vma); unsigned long sz = huge_page_size(h); struct mmu_notifier_range range; + bool force_flush = false; WARN_ON(!is_vm_hugetlb_page(vma)); BUG_ON(start & ~huge_page_mask(h)); @@ -4965,10 +4968,8 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct ptl = huge_pte_lock(h, mm, ptep); if (huge_pmd_unshare(mm, vma, &address, ptep)) { spin_unlock(ptl); - /* - * We just unmapped a page of PMDs by clearing a PUD. - * The caller's TLB flush range should cover this area. - */ + tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE); + force_flush = true; continue; } @@ -5025,6 +5026,22 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct } mmu_notifier_invalidate_range_end(&range); tlb_end_vma(tlb, vma); + + /* + * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We + * could defer the flush until now, since by holding i_mmap_rwsem we + * guaranteed that the last refernece would not be dropped. But we must + * do the flushing before we return, as otherwise i_mmap_rwsem will be + * dropped and the last reference to the shared PMDs page might be + * dropped as well. + * + * In theory we could defer the freeing of the PMD pages as well, but + * huge_pmd_unshare() relies on the exact page_count for the PMD page to + * detect sharing, so we cannot defer the release of the page either. + * Instead, do flush now. + */ + if (force_flush) + tlb_flush_mmu_tlbonly(tlb); } void __unmap_hugepage_range_final(struct mmu_gather *tlb, @@ -5734,13 +5751,14 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, int ret = -ENOMEM; struct page *page; int writable; - bool new_pagecache_page = false; + bool page_in_pagecache = false; if (is_continue) { ret = -EFAULT; page = find_lock_page(mapping, idx); if (!page) goto out; + page_in_pagecache = true; } else if (!*pagep) { /* If a page already exists, then it's UFFDIO_COPY for * a non-missing case. Return -EEXIST. @@ -5828,7 +5846,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, ret = huge_add_to_page_cache(page, mapping, idx); if (ret) goto out_release_nounlock; - new_pagecache_page = true; + page_in_pagecache = true; } ptl = huge_pte_lockptr(h, dst_mm, dst_pte); @@ -5892,7 +5910,7 @@ out_release_unlock: if (vm_shared || is_continue) unlock_page(page); out_release_nounlock: - if (!new_pagecache_page) + if (!page_in_pagecache) restore_reserve_on_error(h, dst_vma, dst_addr, page); put_page(page); goto out; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 781605e92015..2ed5f2a0879d 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -776,24 +776,6 @@ void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val) rcu_read_unlock(); } -/* - * mod_objcg_mlstate() may be called with irq enabled, so - * mod_memcg_lruvec_state() should be used. - */ -static inline void mod_objcg_mlstate(struct obj_cgroup *objcg, - struct pglist_data *pgdat, - enum node_stat_item idx, int nr) -{ - struct mem_cgroup *memcg; - struct lruvec *lruvec; - - rcu_read_lock(); - memcg = obj_cgroup_memcg(objcg); - lruvec = mem_cgroup_lruvec(memcg, pgdat); - mod_memcg_lruvec_state(lruvec, idx, nr); - rcu_read_unlock(); -} - /** * __count_memcg_events - account VM events in a cgroup * @memcg: the memory cgroup @@ -2137,41 +2119,6 @@ static bool obj_stock_flush_required(struct memcg_stock_pcp *stock, } #endif -/* - * Most kmem_cache_alloc() calls are from user context. The irq disable/enable - * sequence used in this case to access content from object stock is slow. - * To optimize for user context access, there are now two object stocks for - * task context and interrupt context access respectively. - * - * The task context object stock can be accessed by disabling preemption only - * which is cheap in non-preempt kernel. The interrupt context object stock - * can only be accessed after disabling interrupt. User context code can - * access interrupt object stock, but not vice versa. - */ -static inline struct obj_stock *get_obj_stock(unsigned long *pflags) -{ - struct memcg_stock_pcp *stock; - - if (likely(in_task())) { - *pflags = 0UL; - preempt_disable(); - stock = this_cpu_ptr(&memcg_stock); - return &stock->task_obj; - } - - local_irq_save(*pflags); - stock = this_cpu_ptr(&memcg_stock); - return &stock->irq_obj; -} - -static inline void put_obj_stock(unsigned long flags) -{ - if (likely(in_task())) - preempt_enable(); - else - local_irq_restore(flags); -} - /** * consume_stock: Try to consume stocked charge on this cpu. * @memcg: memcg to consume from. @@ -2816,6 +2763,59 @@ retry: */ #define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT) +/* + * Most kmem_cache_alloc() calls are from user context. The irq disable/enable + * sequence used in this case to access content from object stock is slow. + * To optimize for user context access, there are now two object stocks for + * task context and interrupt context access respectively. + * + * The task context object stock can be accessed by disabling preemption only + * which is cheap in non-preempt kernel. The interrupt context object stock + * can only be accessed after disabling interrupt. User context code can + * access interrupt object stock, but not vice versa. + */ +static inline struct obj_stock *get_obj_stock(unsigned long *pflags) +{ + struct memcg_stock_pcp *stock; + + if (likely(in_task())) { + *pflags = 0UL; + preempt_disable(); + stock = this_cpu_ptr(&memcg_stock); + return &stock->task_obj; + } + + local_irq_save(*pflags); + stock = this_cpu_ptr(&memcg_stock); + return &stock->irq_obj; +} + +static inline void put_obj_stock(unsigned long flags) +{ + if (likely(in_task())) + preempt_enable(); + else + local_irq_restore(flags); +} + +/* + * mod_objcg_mlstate() may be called with irq enabled, so + * mod_memcg_lruvec_state() should be used. + */ +static inline void mod_objcg_mlstate(struct obj_cgroup *objcg, + struct pglist_data *pgdat, + enum node_stat_item idx, int nr) +{ + struct mem_cgroup *memcg; + struct lruvec *lruvec; + + rcu_read_lock(); + memcg = obj_cgroup_memcg(objcg); + lruvec = mem_cgroup_lruvec(memcg, pgdat); + mod_memcg_lruvec_state(lruvec, idx, nr); + rcu_read_unlock(); +} + int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s, gfp_t gfp, bool new_page) { @@ -5558,7 +5558,7 @@ static int mem_cgroup_move_account(struct page *page, VM_BUG_ON(from == to); VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); - VM_BUG_ON(compound && !folio_test_multi(folio)); + VM_BUG_ON(compound && !folio_test_large(folio)); /* * Prevent mem_cgroup_migrate() from looking at diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 852041f6be41..2a9627dc784c 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -35,6 +35,7 @@ #include <linux/memblock.h> #include <linux/compaction.h> #include <linux/rmap.h> +#include <linux/module.h> #include <asm/tlbflush.h> diff --git a/mm/shmem.c b/mm/shmem.c index dc038ce78700..18f93c2d68f1 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2303,6 +2303,7 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode INIT_LIST_HEAD(&info->swaplist); simple_xattrs_init(&info->xattrs); cache_no_acl(inode); + mapping_set_large_folios(inode->i_mapping); switch (mode & S_IFMT) { default: @@ -3870,7 +3871,7 @@ static struct file_system_type shmem_fs_type = { .parameters = shmem_fs_parameters, #endif .kill_sb = kill_litter_super, - .fs_flags = FS_USERNS_MOUNT | FS_THP_SUPPORT, + .fs_flags = FS_USERNS_MOUNT, }; int __init shmem_init(void) diff --git a/mm/slab.c b/mm/slab.c index da132a9ae6f8..ca4822f6b2b6 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3733,14 +3733,13 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp) if (!cachep) return; + trace_kmem_cache_free(_RET_IP_, objp, cachep->name); local_irq_save(flags); debug_check_no_locks_freed(objp, cachep->object_size); if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) debug_check_no_obj_freed(objp, cachep->object_size); __cache_free(cachep, objp, _RET_IP_); local_irq_restore(flags); - - trace_kmem_cache_free(_RET_IP_, objp, cachep->name); } EXPORT_SYMBOL(kmem_cache_free); diff --git a/mm/slab.h b/mm/slab.h index 58c01a34e5b8..56ad7eea3ddf 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -147,7 +147,7 @@ static inline slab_flags_t kmem_cache_flags(unsigned int object_size, #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ SLAB_TEMPORARY | SLAB_ACCOUNT) #else -#define SLAB_CACHE_FLAGS (0) +#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE) #endif /* Common flags available with current configuration */ diff --git a/mm/slob.c b/mm/slob.c index 74d3f6e60666..03deee1e6a94 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -666,6 +666,7 @@ static void kmem_rcu_free(struct rcu_head *head) void kmem_cache_free(struct kmem_cache *c, void *b) { kmemleak_free_recursive(b, c->flags); + trace_kmem_cache_free(_RET_IP_, b, c->name); if (unlikely(c->flags & SLAB_TYPESAFE_BY_RCU)) { struct slob_rcu *slob_rcu; slob_rcu = b + (c->size - sizeof(struct slob_rcu)); @@ -674,8 +675,6 @@ void kmem_cache_free(struct kmem_cache *c, void *b) } else { __kmem_cache_free(b, c->size); } - - trace_kmem_cache_free(_RET_IP_, b, c->name); } EXPORT_SYMBOL(kmem_cache_free); diff --git a/mm/slub.c b/mm/slub.c index f7368bfffb7a..abe7db581d68 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3526,8 +3526,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x) s = cache_from_obj(s, x); if (!s) return; - slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_); trace_kmem_cache_free(_RET_IP_, x, s->name); + slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_); } EXPORT_SYMBOL(kmem_cache_free); @@ -5081,6 +5081,7 @@ struct loc_track { unsigned long max; unsigned long count; struct location *loc; + loff_t idx; }; static struct dentry *slab_debugfs_root; @@ -6052,11 +6053,11 @@ __initcall(slab_sysfs_init); #if defined(CONFIG_SLUB_DEBUG) && defined(CONFIG_DEBUG_FS) static int slab_debugfs_show(struct seq_file *seq, void *v) { - - struct location *l; - unsigned int idx = *(unsigned int *)v; struct loc_track *t = seq->private; + struct location *l; + unsigned long idx; + idx = (unsigned long) t->idx; if (idx < t->count) { l = &t->loc[idx]; @@ -6105,16 +6106,18 @@ static void *slab_debugfs_next(struct seq_file *seq, void *v, loff_t *ppos) { struct loc_track *t = seq->private; - v = ppos; - ++*ppos; + t->idx = ++(*ppos); if (*ppos <= t->count) - return v; + return ppos; return NULL; } static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos) { + struct loc_track *t = seq->private; + + t->idx = *ppos; return ppos; } diff --git a/mm/swap.c b/mm/swap.c index 1841c24682f8..e8c9dc6d0377 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -156,6 +156,7 @@ void put_pages_list(struct list_head *pages) } free_unref_page_list(pages); + INIT_LIST_HEAD(pages); } EXPORT_SYMBOL(put_pages_list); diff --git a/mm/swap_slots.c b/mm/swap_slots.c index 16f706c55d92..2b5531840583 100644 --- a/mm/swap_slots.c +++ b/mm/swap_slots.c @@ -30,6 +30,7 @@ #include <linux/swap_slots.h> #include <linux/cpu.h> #include <linux/cpumask.h> +#include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/mutex.h> #include <linux/mm.h> diff --git a/mm/util.c b/mm/util.c index e58151a61255..741ba32a43ac 100644 --- a/mm/util.c +++ b/mm/util.c @@ -670,7 +670,7 @@ bool folio_mapped(struct folio *folio) { long i, nr; - if (folio_test_single(folio)) + if (!folio_test_large(folio)) return atomic_read(&folio->_mapcount) >= 0; if (atomic_read(folio_mapcount_ptr(folio)) >= 0) return true; diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index a3a0a5e994f5..abaa5d96ded2 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -184,9 +184,6 @@ int register_vlan_dev(struct net_device *dev, struct netlink_ext_ack *extack) if (err) goto out_unregister_netdev; - /* Account for reference in struct vlan_dev_priv */ - dev_hold(real_dev); - vlan_stacked_transfer_operstate(real_dev, dev, vlan); linkwatch_fire_event(dev); /* _MUST_ call rfc2863_policy() */ diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index ab6dee28536d..a54535cbcf4c 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -615,6 +615,9 @@ static int vlan_dev_init(struct net_device *dev) if (!vlan->vlan_pcpu_stats) return -ENOMEM; + /* Get vlan's reference to real_dev */ + dev_hold(real_dev); + return 0; } diff --git a/net/core/dev.c b/net/core/dev.c index 15ac064b5562..2a352e668d10 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4210,7 +4210,10 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev) if (dev->flags & IFF_UP) { int cpu = smp_processor_id(); /* ok because BHs are off */ - if (txq->xmit_lock_owner != cpu) { + /* Other cpus might concurrently change txq->xmit_lock_owner + * to -1 or to their cpu id, but not to our id. + */ + if (READ_ONCE(txq->xmit_lock_owner) != cpu) { if (dev_xmit_recursion()) goto recursion_alert; diff --git a/net/core/devlink.c b/net/core/devlink.c index 5ba4f9434acd..c06c9ba6e8c5 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -4110,14 +4110,6 @@ static int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info) return err; } - if (info->attrs[DEVLINK_ATTR_NETNS_PID] || - info->attrs[DEVLINK_ATTR_NETNS_FD] || - info->attrs[DEVLINK_ATTR_NETNS_ID]) { - dest_net = devlink_netns_get(skb, info); - if (IS_ERR(dest_net)) - return PTR_ERR(dest_net); - } - if (info->attrs[DEVLINK_ATTR_RELOAD_ACTION]) action = nla_get_u8(info->attrs[DEVLINK_ATTR_RELOAD_ACTION]); else @@ -4160,6 +4152,14 @@ static int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info) return -EINVAL; } } + if (info->attrs[DEVLINK_ATTR_NETNS_PID] || + info->attrs[DEVLINK_ATTR_NETNS_FD] || + info->attrs[DEVLINK_ATTR_NETNS_ID]) { + dest_net = devlink_netns_get(skb, info); + if (IS_ERR(dest_net)) + return PTR_ERR(dest_net); + } + err = devlink_reload(devlink, dest_net, action, limit, &actions_performed, info->extack); if (dest_net) @@ -4229,7 +4229,9 @@ static void __devlink_flash_update_notify(struct devlink *devlink, WARN_ON(cmd != DEVLINK_CMD_FLASH_UPDATE && cmd != DEVLINK_CMD_FLASH_UPDATE_END && cmd != DEVLINK_CMD_FLASH_UPDATE_STATUS); - WARN_ON(!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED)); + + if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED)) + return; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) diff --git a/net/core/dst_cache.c b/net/core/dst_cache.c index be74ab4551c2..0ccfd5fa5cb9 100644 --- a/net/core/dst_cache.c +++ b/net/core/dst_cache.c @@ -162,3 +162,22 @@ void dst_cache_destroy(struct dst_cache *dst_cache) free_percpu(dst_cache->cache); } EXPORT_SYMBOL_GPL(dst_cache_destroy); + +void dst_cache_reset_now(struct dst_cache *dst_cache) +{ + int i; + + if (!dst_cache->cache) + return; + + dst_cache->reset_ts = jiffies; + for_each_possible_cpu(i) { + struct dst_cache_pcpu *idst = per_cpu_ptr(dst_cache->cache, i); + struct dst_entry *dst = idst->dst; + + idst->cookie = 0; + idst->dst = NULL; + dst_release(dst); + } +} +EXPORT_SYMBOL_GPL(dst_cache_reset_now); diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index 79df7cd9dbc1..1bb567a3b329 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -323,7 +323,7 @@ jumped: if (!err && ops->suppress && INDIRECT_CALL_MT(ops->suppress, fib6_rule_suppress, fib4_rule_suppress, - rule, arg)) + rule, flags, arg)) continue; if (err != -EAGAIN) { diff --git a/net/core/filter.c b/net/core/filter.c index e471c9b09670..6102f093d59a 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -7162,6 +7162,8 @@ sock_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) #endif case BPF_FUNC_sk_storage_get: return &bpf_sk_storage_get_cg_sock_proto; + case BPF_FUNC_ktime_get_coarse_ns: + return &bpf_ktime_get_coarse_ns_proto; default: return bpf_base_func_proto(func_id); } @@ -10327,6 +10329,8 @@ sk_reuseport_func_proto(enum bpf_func_id func_id, return &sk_reuseport_load_bytes_relative_proto; case BPF_FUNC_get_socket_cookie: return &bpf_get_socket_ptr_cookie_proto; + case BPF_FUNC_ktime_get_coarse_ns: + return &bpf_ktime_get_coarse_ns_proto; default: return bpf_base_func_proto(func_id); } @@ -10833,6 +10837,8 @@ bpf_sk_base_func_proto(enum bpf_func_id func_id) case BPF_FUNC_skc_to_unix_sock: func = &bpf_skc_to_unix_sock_proto; break; + case BPF_FUNC_ktime_get_coarse_ns: + return &bpf_ktime_get_coarse_ns_proto; default: return bpf_base_func_proto(func_id); } diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 47931c8be04b..dda12fbd177b 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -763,11 +763,10 @@ struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl, ASSERT_RTNL(); - n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL); + n = kzalloc(sizeof(*n) + key_len, GFP_KERNEL); if (!n) goto out; - n->protocol = 0; write_pnet(&n->net, net); memcpy(n->key, pkey, key_len); n->dev = dev; @@ -1779,6 +1778,7 @@ int neigh_table_clear(int index, struct neigh_table *tbl) { neigh_tables[index] = NULL; /* It is not clean... Fix it to unload IPv6 module safely */ + cancel_delayed_work_sync(&tbl->managed_work); cancel_delayed_work_sync(&tbl->gc_work); del_timer_sync(&tbl->proxy_timer); pneigh_queue_purge(&tbl->proxy_queue); diff --git a/net/core/page_pool.c b/net/core/page_pool.c index 9b60e4301a44..1a6978427d6c 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -49,12 +49,6 @@ static int page_pool_init(struct page_pool *pool, * which is the XDP_TX use-case. */ if (pool->p.flags & PP_FLAG_DMA_MAP) { - /* DMA-mapping is not supported on 32-bit systems with - * 64-bit DMA mapping. - */ - if (sizeof(dma_addr_t) > sizeof(unsigned long)) - return -EOPNOTSUPP; - if ((pool->p.dma_dir != DMA_FROM_DEVICE) && (pool->p.dma_dir != DMA_BIDIRECTIONAL)) return -EINVAL; @@ -75,6 +69,10 @@ static int page_pool_init(struct page_pool *pool, */ } + if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT && + pool->p.flags & PP_FLAG_PAGE_FRAG) + return -EINVAL; + if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) return -ENOMEM; diff --git a/net/core/skmsg.c b/net/core/skmsg.c index 1ae52ac943f6..8eb671c827f9 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -1124,6 +1124,8 @@ void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock) void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock) { + psock_set_prog(&psock->progs.stream_parser, NULL); + if (!psock->saved_data_ready) return; @@ -1212,6 +1214,9 @@ void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock) void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock) { + psock_set_prog(&psock->progs.stream_verdict, NULL); + psock_set_prog(&psock->progs.skb_verdict, NULL); + if (!psock->saved_data_ready) return; diff --git a/net/core/sock.c b/net/core/sock.c index 8f2b2f2c0e7b..41e91d0f7061 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2124,8 +2124,10 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) newsk->sk_prot_creator = prot; /* SANITY */ - if (likely(newsk->sk_net_refcnt)) + if (likely(newsk->sk_net_refcnt)) { get_net(sock_net(newsk)); + sock_inuse_add(sock_net(newsk), 1); + } sk_node_init(&newsk->sk_node); sock_lock_init(newsk); bh_lock_sock(newsk); @@ -2197,8 +2199,6 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) newsk->sk_err_soft = 0; newsk->sk_priority = 0; newsk->sk_incoming_cpu = raw_smp_processor_id(); - if (likely(newsk->sk_net_refcnt)) - sock_inuse_add(sock_net(newsk), 1); /* Before updating sk_refcnt, we must commit prior changes to memory * (Documentation/RCU/rculist_nulls.rst for details) diff --git a/net/core/sock_map.c b/net/core/sock_map.c index f39ef79ced67..4ca4b11f4e5f 100644 --- a/net/core/sock_map.c +++ b/net/core/sock_map.c @@ -167,8 +167,11 @@ static void sock_map_del_link(struct sock *sk, write_lock_bh(&sk->sk_callback_lock); if (strp_stop) sk_psock_stop_strp(sk, psock); - else + if (verdict_stop) sk_psock_stop_verdict(sk, psock); + + if (psock->psock_update_sk_prot) + psock->psock_update_sk_prot(sk, psock, false); write_unlock_bh(&sk->sk_callback_lock); } } @@ -282,6 +285,12 @@ static int sock_map_link(struct bpf_map *map, struct sock *sk) if (msg_parser) psock_set_prog(&psock->progs.msg_parser, msg_parser); + if (stream_parser) + psock_set_prog(&psock->progs.stream_parser, stream_parser); + if (stream_verdict) + psock_set_prog(&psock->progs.stream_verdict, stream_verdict); + if (skb_verdict) + psock_set_prog(&psock->progs.skb_verdict, skb_verdict); ret = sock_map_init_proto(sk, psock); if (ret < 0) @@ -292,14 +301,10 @@ static int sock_map_link(struct bpf_map *map, struct sock *sk) ret = sk_psock_init_strp(sk, psock); if (ret) goto out_unlock_drop; - psock_set_prog(&psock->progs.stream_verdict, stream_verdict); - psock_set_prog(&psock->progs.stream_parser, stream_parser); sk_psock_start_strp(sk, psock); } else if (!stream_parser && stream_verdict && !psock->saved_data_ready) { - psock_set_prog(&psock->progs.stream_verdict, stream_verdict); sk_psock_start_verdict(sk,psock); } else if (!stream_verdict && skb_verdict && !psock->saved_data_ready) { - psock_set_prog(&psock->progs.skb_verdict, skb_verdict); sk_psock_start_verdict(sk, psock); } write_unlock_bh(&sk->sk_callback_lock); diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c index 65e9bc1058b5..20bcf86970ff 100644 --- a/net/ethtool/ioctl.c +++ b/net/ethtool/ioctl.c @@ -1719,7 +1719,7 @@ static noinline_for_stack int ethtool_set_coalesce(struct net_device *dev, struct ethtool_coalesce coalesce; int ret; - if (!dev->ethtool_ops->set_coalesce && !dev->ethtool_ops->get_coalesce) + if (!dev->ethtool_ops->set_coalesce || !dev->ethtool_ops->get_coalesce) return -EOPNOTSUPP; ret = dev->ethtool_ops->get_coalesce(dev, &coalesce, &kernel_coalesce, diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c index 38b44c0291b1..96f4180aabd2 100644 --- a/net/ethtool/netlink.c +++ b/net/ethtool/netlink.c @@ -40,7 +40,8 @@ int ethnl_ops_begin(struct net_device *dev) if (dev->dev.parent) pm_runtime_get_sync(dev->dev.parent); - if (!netif_device_present(dev)) { + if (!netif_device_present(dev) || + dev->reg_state == NETREG_UNREGISTERING) { ret = -ENODEV; goto err; } diff --git a/net/ipv4/bpf_tcp_ca.c b/net/ipv4/bpf_tcp_ca.c index 2cf02b4d77fb..4bb9401b0a3f 100644 --- a/net/ipv4/bpf_tcp_ca.c +++ b/net/ipv4/bpf_tcp_ca.c @@ -205,6 +205,8 @@ bpf_tcp_ca_get_func_proto(enum bpf_func_id func_id, offsetof(struct tcp_congestion_ops, release)) return &bpf_sk_getsockopt_proto; return NULL; + case BPF_FUNC_ktime_get_coarse_ns: + return &bpf_ktime_get_coarse_ns_proto; default: return bpf_base_func_proto(func_id); } diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index ec73a0d52d3e..323e622ff9b7 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -2591,7 +2591,7 @@ static int __devinet_sysctl_register(struct net *net, char *dev_name, free: kfree(t); out: - return -ENOBUFS; + return -ENOMEM; } static void __devinet_sysctl_unregister(struct net *net, diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 9fe13e4f5d08..4d61ddd8a0ec 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -1582,7 +1582,7 @@ static int __net_init fib_net_init(struct net *net) int error; #ifdef CONFIG_IP_ROUTE_CLASSID - net->ipv4.fib_num_tclassid_users = 0; + atomic_set(&net->ipv4.fib_num_tclassid_users, 0); #endif error = ip_fib_net_init(net); if (error < 0) diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index ce54a30c2ef1..d279cb8ac158 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c @@ -141,6 +141,7 @@ INDIRECT_CALLABLE_SCOPE int fib4_rule_action(struct fib_rule *rule, } INDIRECT_CALLABLE_SCOPE bool fib4_rule_suppress(struct fib_rule *rule, + int flags, struct fib_lookup_arg *arg) { struct fib_result *result = (struct fib_result *) arg->result; @@ -263,7 +264,7 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb, if (tb[FRA_FLOW]) { rule4->tclassid = nla_get_u32(tb[FRA_FLOW]); if (rule4->tclassid) - net->ipv4.fib_num_tclassid_users++; + atomic_inc(&net->ipv4.fib_num_tclassid_users); } #endif @@ -295,7 +296,7 @@ static int fib4_rule_delete(struct fib_rule *rule) #ifdef CONFIG_IP_ROUTE_CLASSID if (((struct fib4_rule *)rule)->tclassid) - net->ipv4.fib_num_tclassid_users--; + atomic_dec(&net->ipv4.fib_num_tclassid_users); #endif net->ipv4.fib_has_custom_rules = true; diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 3364cb9c67e0..fde7797b5806 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -220,7 +220,7 @@ void fib_nh_release(struct net *net, struct fib_nh *fib_nh) { #ifdef CONFIG_IP_ROUTE_CLASSID if (fib_nh->nh_tclassid) - net->ipv4.fib_num_tclassid_users--; + atomic_dec(&net->ipv4.fib_num_tclassid_users); #endif fib_nh_common_release(&fib_nh->nh_common); } @@ -632,7 +632,7 @@ int fib_nh_init(struct net *net, struct fib_nh *nh, #ifdef CONFIG_IP_ROUTE_CLASSID nh->nh_tclassid = cfg->fc_flow; if (nh->nh_tclassid) - net->ipv4.fib_num_tclassid_users++; + atomic_inc(&net->ipv4.fib_num_tclassid_users); #endif #ifdef CONFIG_IP_ROUTE_MULTIPATH nh->fib_nh_weight = nh_weight; diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index f7fea3a7c5e6..62a67fdc344c 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -721,7 +721,7 @@ static struct request_sock *inet_reqsk_clone(struct request_sock *req, sk_node_init(&nreq_sk->sk_node); nreq_sk->sk_tx_queue_mapping = req_sk->sk_tx_queue_mapping; -#ifdef CONFIG_XPS +#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING nreq_sk->sk_rx_queue_mapping = req_sk->sk_rx_queue_mapping; #endif nreq_sk->sk_incoming_cpu = req_sk->sk_incoming_cpu; diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c index 9e8100728d46..5dbd4b5505eb 100644 --- a/net/ipv4/nexthop.c +++ b/net/ipv4/nexthop.c @@ -1899,15 +1899,36 @@ static void remove_nexthop(struct net *net, struct nexthop *nh, /* if any FIB entries reference this nexthop, any dst entries * need to be regenerated */ -static void nh_rt_cache_flush(struct net *net, struct nexthop *nh) +static void nh_rt_cache_flush(struct net *net, struct nexthop *nh, + struct nexthop *replaced_nh) { struct fib6_info *f6i; + struct nh_group *nhg; + int i; if (!list_empty(&nh->fi_list)) rt_cache_flush(net); list_for_each_entry(f6i, &nh->f6i_list, nh_list) ipv6_stub->fib6_update_sernum(net, f6i); + + /* if an IPv6 group was replaced, we have to release all old + * dsts to make sure all refcounts are released + */ + if (!replaced_nh->is_group) + return; + + /* new dsts must use only the new nexthop group */ + synchronize_net(); + + nhg = rtnl_dereference(replaced_nh->nh_grp); + for (i = 0; i < nhg->num_nh; i++) { + struct nh_grp_entry *nhge = &nhg->nh_entries[i]; + struct nh_info *nhi = rtnl_dereference(nhge->nh->nh_info); + + if (nhi->family == AF_INET6) + ipv6_stub->fib6_nh_release_dsts(&nhi->fib6_nh); + } } static int replace_nexthop_grp(struct net *net, struct nexthop *old, @@ -2247,7 +2268,7 @@ static int replace_nexthop(struct net *net, struct nexthop *old, err = replace_nexthop_single(net, old, new, extack); if (!err) { - nh_rt_cache_flush(net, old); + nh_rt_cache_flush(net, old, new); __remove_nexthop(net, new, NULL); nexthop_put(new); @@ -2544,11 +2565,15 @@ static int nh_create_ipv6(struct net *net, struct nexthop *nh, /* sets nh_dev if successful */ err = ipv6_stub->fib6_nh_init(net, fib6_nh, &fib6_cfg, GFP_KERNEL, extack); - if (err) + if (err) { + /* IPv6 is not enabled, don't call fib6_nh_release */ + if (err == -EAFNOSUPPORT) + goto out; ipv6_stub->fib6_nh_release(fib6_nh); - else + } else { nh->nh_flags = fib6_nh->fib_nh_flags; - + } +out: return err; } diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index b7796b4cf0a0..bbb3d39c69af 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1758,6 +1758,9 @@ static skb_frag_t *skb_advance_to_frag(struct sk_buff *skb, u32 offset_skb, { skb_frag_t *frag; + if (unlikely(offset_skb >= skb->len)) + return NULL; + offset_skb -= skb_headlen(skb); if ((int)offset_skb < 0 || skb_has_frag_list(skb)) return NULL; diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index 5e9d9c51164c..e07837e23b3f 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c @@ -330,8 +330,6 @@ static void cubictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) return; if (tcp_in_slow_start(tp)) { - if (hystart && after(ack, ca->end_seq)) - bictcp_hystart_reset(sk); acked = tcp_slow_start(tp, acked); if (!acked) return; @@ -391,6 +389,9 @@ static void hystart_update(struct sock *sk, u32 delay) struct bictcp *ca = inet_csk_ca(sk); u32 threshold; + if (after(tp->snd_una, ca->end_seq)) + bictcp_hystart_reset(sk); + if (hystart_detect & HYSTART_ACK_TRAIN) { u32 now = bictcp_clock_us(sk); diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index cf913a66df17..7c2d3ac2363a 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -829,8 +829,8 @@ int tcp_child_process(struct sock *parent, struct sock *child, int ret = 0; int state = child->sk_state; - /* record NAPI ID of child */ - sk_mark_napi_id(child, skb); + /* record sk_napi_id and sk_rx_queue_mapping of child. */ + sk_mark_napi_id_set(child, skb); tcp_segs_in(tcp_sk(child), skb); if (!sock_owned_by_user(child)) { diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 319dd7bbfe33..23b05e28490b 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -916,7 +916,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4, kfree_skb(skb); return -EINVAL; } - if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) { + if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) { kfree_skb(skb); return -EINVAL; } @@ -1807,6 +1807,17 @@ int udp_read_sock(struct sock *sk, read_descriptor_t *desc, skb = skb_recv_udp(sk, 0, 1, &err); if (!skb) return err; + + if (udp_lib_checksum_complete(skb)) { + __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, + IS_UDPLITE(sk)); + __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, + IS_UDPLITE(sk)); + atomic_inc(&sk->sk_drops); + kfree_skb(skb); + continue; + } + used = recv_actor(desc, skb, 0, skb->len); if (used <= 0) { if (!copied) diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 0c4da163535a..dab4a047590b 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -1026,6 +1026,7 @@ static const struct ipv6_stub ipv6_stub_impl = { .ip6_mtu_from_fib6 = ip6_mtu_from_fib6, .fib6_nh_init = fib6_nh_init, .fib6_nh_release = fib6_nh_release, + .fib6_nh_release_dsts = fib6_nh_release_dsts, .fib6_update_sernum = fib6_update_sernum_stub, .fib6_rt_update = fib6_rt_update, .ip6_del_rt = ip6_del_rt, diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index ed2f061b8768..f0bac6f7ab6b 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -808,6 +808,12 @@ int esp6_input_done2(struct sk_buff *skb, int err) struct tcphdr *th; offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off); + + if (offset < 0) { + err = -EINVAL; + goto out; + } + uh = (void *)(skb->data + offset); th = (void *)(skb->data + offset); hdr_len += offset; diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index 40f3e4f9f33a..dcedfe29d9d9 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c @@ -267,6 +267,7 @@ INDIRECT_CALLABLE_SCOPE int fib6_rule_action(struct fib_rule *rule, } INDIRECT_CALLABLE_SCOPE bool fib6_rule_suppress(struct fib_rule *rule, + int flags, struct fib_lookup_arg *arg) { struct fib6_result *res = arg->result; @@ -294,8 +295,7 @@ INDIRECT_CALLABLE_SCOPE bool fib6_rule_suppress(struct fib_rule *rule, return false; suppress_route: - if (!(arg->flags & FIB_LOOKUP_NOREF)) - ip6_rt_put(rt); + ip6_rt_put_flags(rt, flags); return true; } diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index 1b9827ff8ccf..1cbd49d5788d 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -248,9 +248,9 @@ INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head, * memcmp() alone below is sufficient, right? */ if ((first_word & htonl(0xF00FFFFF)) || - !ipv6_addr_equal(&iph->saddr, &iph2->saddr) || - !ipv6_addr_equal(&iph->daddr, &iph2->daddr) || - *(u16 *)&iph->nexthdr != *(u16 *)&iph2->nexthdr) { + !ipv6_addr_equal(&iph->saddr, &iph2->saddr) || + !ipv6_addr_equal(&iph->daddr, &iph2->daddr) || + *(u16 *)&iph->nexthdr != *(u16 *)&iph2->nexthdr) { not_same_flow: NAPI_GRO_CB(p)->same_flow = 0; continue; diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 2f044a49afa8..ff4e83e2a506 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -174,7 +174,7 @@ static int __ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM) /* Policy lookup after SNAT yielded a new policy */ if (skb_dst(skb)->xfrm) { - IPCB(skb)->flags |= IPSKB_REROUTED; + IP6CB(skb)->flags |= IP6SKB_REROUTED; return dst_output(net, sk, skb); } #endif diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 3ae25b8ffbd6..42d60c76d30a 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -3680,6 +3680,25 @@ void fib6_nh_release(struct fib6_nh *fib6_nh) fib_nh_common_release(&fib6_nh->nh_common); } +void fib6_nh_release_dsts(struct fib6_nh *fib6_nh) +{ + int cpu; + + if (!fib6_nh->rt6i_pcpu) + return; + + for_each_possible_cpu(cpu) { + struct rt6_info *pcpu_rt, **ppcpu_rt; + + ppcpu_rt = per_cpu_ptr(fib6_nh->rt6i_pcpu, cpu); + pcpu_rt = xchg(ppcpu_rt, NULL); + if (pcpu_rt) { + dst_dev_put(&pcpu_rt->dst); + dst_release(&pcpu_rt->dst); + } + } +} + static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg, gfp_t gfp_flags, struct netlink_ext_ack *extack) diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c index 3adc5d9211ad..d64855010948 100644 --- a/net/ipv6/seg6_iptunnel.c +++ b/net/ipv6/seg6_iptunnel.c @@ -161,6 +161,14 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto) hdr->hop_limit = ip6_dst_hoplimit(skb_dst(skb)); memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); + + /* the control block has been erased, so we have to set the + * iif once again. + * We read the receiving interface index directly from the + * skb->skb_iif as it is done in the IPv4 receiving path (i.e.: + * ip_rcv_core(...)). + */ + IP6CB(skb)->iif = skb->skb_iif; } hdr->nexthdr = NEXTHDR_ROUTING; diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index e2b791c37591..bd3d3195097f 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -80,7 +80,8 @@ static int ieee80211_set_mon_options(struct ieee80211_sub_if_data *sdata, } /* also validate MU-MIMO change */ - monitor_sdata = rtnl_dereference(local->monitor_sdata); + monitor_sdata = wiphy_dereference(local->hw.wiphy, + local->monitor_sdata); if (!monitor_sdata && (params->vht_mumimo_groups || params->vht_mumimo_follow_addr)) @@ -840,7 +841,8 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy, mutex_lock(&local->mtx); if (local->use_chanctx) { - sdata = rtnl_dereference(local->monitor_sdata); + sdata = wiphy_dereference(local->hw.wiphy, + local->monitor_sdata); if (sdata) { ieee80211_vif_release_channel(sdata); ret = ieee80211_vif_use_channel(sdata, chandef, @@ -2707,7 +2709,8 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy, sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); if (sdata->vif.type == NL80211_IFTYPE_MONITOR) { - sdata = rtnl_dereference(local->monitor_sdata); + sdata = wiphy_dereference(local->hw.wiphy, + local->monitor_sdata); if (!sdata) return -EOPNOTSUPP; } @@ -2767,7 +2770,8 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy, mutex_unlock(&local->iflist_mtx); if (has_monitor) { - sdata = rtnl_dereference(local->monitor_sdata); + sdata = wiphy_dereference(local->hw.wiphy, + local->monitor_sdata); if (sdata) { sdata->user_power_level = local->user_power_level; if (txp_type != sdata->vif.bss_conf.txpower_type) diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 9a2145c8192b..20aa5cc31f77 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -588,7 +588,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do */ if (local->suspended) { WARN_ON(local->wowlan); - WARN_ON(rtnl_dereference(local->monitor_sdata)); + WARN_ON(rcu_access_pointer(local->monitor_sdata)); return; } @@ -961,6 +961,7 @@ int ieee80211_add_virtual_monitor(struct ieee80211_local *local) return 0; ASSERT_RTNL(); + lockdep_assert_wiphy(local->hw.wiphy); if (local->monitor_sdata) return 0; @@ -1028,6 +1029,7 @@ void ieee80211_del_virtual_monitor(struct ieee80211_local *local) return; ASSERT_RTNL(); + lockdep_assert_wiphy(local->hw.wiphy); mutex_lock(&local->iflist_mtx); diff --git a/net/mac80211/led.h b/net/mac80211/led.h index fb3aaa3c5606..b71a1428d883 100644 --- a/net/mac80211/led.h +++ b/net/mac80211/led.h @@ -72,19 +72,19 @@ static inline void ieee80211_mod_tpt_led_trig(struct ieee80211_local *local, #endif static inline void -ieee80211_tpt_led_trig_tx(struct ieee80211_local *local, __le16 fc, int bytes) +ieee80211_tpt_led_trig_tx(struct ieee80211_local *local, int bytes) { #ifdef CONFIG_MAC80211_LEDS - if (ieee80211_is_data(fc) && atomic_read(&local->tpt_led_active)) + if (atomic_read(&local->tpt_led_active)) local->tpt_led_trigger->tx_bytes += bytes; #endif } static inline void -ieee80211_tpt_led_trig_rx(struct ieee80211_local *local, __le16 fc, int bytes) +ieee80211_tpt_led_trig_rx(struct ieee80211_local *local, int bytes) { #ifdef CONFIG_MAC80211_LEDS - if (ieee80211_is_data(fc) && atomic_read(&local->tpt_led_active)) + if (atomic_read(&local->tpt_led_active)) local->tpt_led_trigger->rx_bytes += bytes; #endif } diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index fc5c608d02e2..9541a4c30aca 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -364,7 +364,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, * the compiler to think we have walked past the end of the * struct member. */ - pos = (void *)&rthdr->it_optional[it_present - rthdr->it_optional]; + pos = (void *)&rthdr->it_optional[it_present + 1 - rthdr->it_optional]; /* the order of the following fields is important */ @@ -1952,7 +1952,8 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) int keyid = rx->sta->ptk_idx; sta_ptk = rcu_dereference(rx->sta->ptk[keyid]); - if (ieee80211_has_protected(fc)) { + if (ieee80211_has_protected(fc) && + !(status->flag & RX_FLAG_IV_STRIPPED)) { cs = rx->sta->cipher_scheme; keyid = ieee80211_get_keyid(rx->skb, cs); @@ -4863,6 +4864,7 @@ void ieee80211_rx_list(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, struct ieee80211_rate *rate = NULL; struct ieee80211_supported_band *sband; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; WARN_ON_ONCE(softirq_count() == 0); @@ -4959,9 +4961,9 @@ void ieee80211_rx_list(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, if (!(status->flag & RX_FLAG_8023)) skb = ieee80211_rx_monitor(local, skb, rate); if (skb) { - ieee80211_tpt_led_trig_rx(local, - ((struct ieee80211_hdr *)skb->data)->frame_control, - skb->len); + if ((status->flag & RX_FLAG_8023) || + ieee80211_is_data_present(hdr->frame_control)) + ieee80211_tpt_led_trig_rx(local, skb->len); if (status->flag & RX_FLAG_8023) __ieee80211_rx_handle_8023(hw, pubsta, skb, list); diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index a756a197c770..278945e3e08a 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1721,21 +1721,19 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local, * Returns false if the frame couldn't be transmitted but was queued instead. */ static bool __ieee80211_tx(struct ieee80211_local *local, - struct sk_buff_head *skbs, int led_len, - struct sta_info *sta, bool txpending) + struct sk_buff_head *skbs, struct sta_info *sta, + bool txpending) { struct ieee80211_tx_info *info; struct ieee80211_sub_if_data *sdata; struct ieee80211_vif *vif; struct sk_buff *skb; bool result; - __le16 fc; if (WARN_ON(skb_queue_empty(skbs))) return true; skb = skb_peek(skbs); - fc = ((struct ieee80211_hdr *)skb->data)->frame_control; info = IEEE80211_SKB_CB(skb); sdata = vif_to_sdata(info->control.vif); if (sta && !sta->uploaded) @@ -1769,8 +1767,6 @@ static bool __ieee80211_tx(struct ieee80211_local *local, result = ieee80211_tx_frags(local, vif, sta, skbs, txpending); - ieee80211_tpt_led_trig_tx(local, fc, led_len); - WARN_ON_ONCE(!skb_queue_empty(skbs)); return result; @@ -1920,7 +1916,6 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata, ieee80211_tx_result res_prepare; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); bool result = true; - int led_len; if (unlikely(skb->len < 10)) { dev_kfree_skb(skb); @@ -1928,7 +1923,6 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata, } /* initialises tx */ - led_len = skb->len; res_prepare = ieee80211_tx_prepare(sdata, &tx, sta, skb); if (unlikely(res_prepare == TX_DROP)) { @@ -1951,8 +1945,7 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata, return true; if (!invoke_tx_handlers_late(&tx)) - result = __ieee80211_tx(local, &tx.skbs, led_len, - tx.sta, txpending); + result = __ieee80211_tx(local, &tx.skbs, tx.sta, txpending); return result; } @@ -4175,6 +4168,7 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb, struct ieee80211_local *local = sdata->local; struct sta_info *sta; struct sk_buff *next; + int len = skb->len; if (unlikely(skb->len < ETH_HLEN)) { kfree_skb(skb); @@ -4221,10 +4215,8 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb, } } else { /* we cannot process non-linear frames on this path */ - if (skb_linearize(skb)) { - kfree_skb(skb); - goto out; - } + if (skb_linearize(skb)) + goto out_free; /* the frame could be fragmented, software-encrypted, and other * things so we cannot really handle checksum offload with it - @@ -4258,7 +4250,10 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb, goto out; out_free: kfree_skb(skb); + len = 0; out: + if (len) + ieee80211_tpt_led_trig_tx(local, len); rcu_read_unlock(); } @@ -4396,8 +4391,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, } static bool ieee80211_tx_8023(struct ieee80211_sub_if_data *sdata, - struct sk_buff *skb, int led_len, - struct sta_info *sta, + struct sk_buff *skb, struct sta_info *sta, bool txpending) { struct ieee80211_local *local = sdata->local; @@ -4410,6 +4404,8 @@ static bool ieee80211_tx_8023(struct ieee80211_sub_if_data *sdata, if (sta) sk_pacing_shift_update(skb->sk, local->hw.tx_sk_pacing_shift); + ieee80211_tpt_led_trig_tx(local, skb->len); + if (ieee80211_queue_skb(local, sdata, sta, skb)) return true; @@ -4498,7 +4494,7 @@ static void ieee80211_8023_xmit(struct ieee80211_sub_if_data *sdata, if (key) info->control.hw_key = &key->conf; - ieee80211_tx_8023(sdata, skb, skb->len, sta, false); + ieee80211_tx_8023(sdata, skb, sta, false); return; @@ -4637,7 +4633,7 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local, if (IS_ERR(sta) || (sta && !sta->uploaded)) sta = NULL; - result = ieee80211_tx_8023(sdata, skb, skb->len, sta, true); + result = ieee80211_tx_8023(sdata, skb, sta, true); } else { struct sk_buff_head skbs; @@ -4647,7 +4643,7 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local, hdr = (struct ieee80211_hdr *)skb->data; sta = sta_info_get(sdata, hdr->addr1); - result = __ieee80211_tx(local, &skbs, skb->len, sta, true); + result = __ieee80211_tx(local, &skbs, sta, true); } return result; diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 39fa2a50385d..43df2f0c5db9 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -796,7 +796,7 @@ static void __iterate_interfaces(struct ieee80211_local *local, sdata = rcu_dereference_check(local->monitor_sdata, lockdep_is_held(&local->iflist_mtx) || - lockdep_rtnl_is_held()); + lockdep_is_held(&local->hw.wiphy->mtx)); if (sdata && (iter_flags & IEEE80211_IFACE_ITER_RESUME_ALL || !active_only || sdata->flags & IEEE80211_SDATA_IN_DRIVER)) @@ -2381,7 +2381,7 @@ int ieee80211_reconfig(struct ieee80211_local *local) IEEE80211_TPT_LEDTRIG_FL_RADIO, 0); /* add interfaces */ - sdata = rtnl_dereference(local->monitor_sdata); + sdata = wiphy_dereference(local->hw.wiphy, local->monitor_sdata); if (sdata) { /* in HW restart it exists already */ WARN_ON(local->resuming); @@ -2426,7 +2426,8 @@ int ieee80211_reconfig(struct ieee80211_local *local) WARN_ON(drv_add_chanctx(local, ctx)); mutex_unlock(&local->chanctx_mtx); - sdata = rtnl_dereference(local->monitor_sdata); + sdata = wiphy_dereference(local->hw.wiphy, + local->monitor_sdata); if (sdata && ieee80211_sdata_running(sdata)) ieee80211_assign_chanctx(local, sdata); } diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c index 9ea6004abe1b..62c6733e0792 100644 --- a/net/mac80211/wme.c +++ b/net/mac80211/wme.c @@ -143,7 +143,6 @@ u16 ieee80211_select_queue_80211(struct ieee80211_sub_if_data *sdata, u16 __ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, struct sk_buff *skb) { - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct mac80211_qos_map *qos_map; bool qos; @@ -156,7 +155,7 @@ u16 __ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, else qos = false; - if (!qos || (info->control.flags & IEEE80211_TX_CTRL_DONT_REORDER)) { + if (!qos) { skb->priority = 0; /* required for correct WPA/11i MIC */ return IEEE80211_AC_BE; } diff --git a/net/mctp/route.c b/net/mctp/route.c index 46c44823edb7..cdf09c2a7007 100644 --- a/net/mctp/route.c +++ b/net/mctp/route.c @@ -952,7 +952,7 @@ static int mctp_route_add(struct mctp_dev *mdev, mctp_eid_t daddr_start, } static int mctp_route_remove(struct mctp_dev *mdev, mctp_eid_t daddr_start, - unsigned int daddr_extent) + unsigned int daddr_extent, unsigned char type) { struct net *net = dev_net(mdev->dev); struct mctp_route *rt, *tmp; @@ -969,7 +969,8 @@ static int mctp_route_remove(struct mctp_dev *mdev, mctp_eid_t daddr_start, list_for_each_entry_safe(rt, tmp, &net->mctp.routes, list) { if (rt->dev == mdev && - rt->min == daddr_start && rt->max == daddr_end) { + rt->min == daddr_start && rt->max == daddr_end && + rt->type == type) { list_del_rcu(&rt->list); /* TODO: immediate RTM_DELROUTE */ mctp_route_release(rt); @@ -987,7 +988,7 @@ int mctp_route_add_local(struct mctp_dev *mdev, mctp_eid_t addr) int mctp_route_remove_local(struct mctp_dev *mdev, mctp_eid_t addr) { - return mctp_route_remove(mdev, addr, 0); + return mctp_route_remove(mdev, addr, 0, RTN_LOCAL); } /* removes all entries for a given device */ @@ -1195,7 +1196,7 @@ static int mctp_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, if (rtm->rtm_type != RTN_UNICAST) return -EINVAL; - rc = mctp_route_remove(mdev, daddr_start, rtm->rtm_dst_len); + rc = mctp_route_remove(mdev, daddr_start, rtm->rtm_dst_len, RTN_UNICAST); return rc; } diff --git a/net/mctp/test/utils.c b/net/mctp/test/utils.c index cc6b8803aa9d..7b7918702592 100644 --- a/net/mctp/test/utils.c +++ b/net/mctp/test/utils.c @@ -12,7 +12,7 @@ static netdev_tx_t mctp_test_dev_tx(struct sk_buff *skb, struct net_device *ndev) { - kfree(skb); + kfree_skb(skb); return NETDEV_TX_OK; } diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c index ffeb2df8be7a..0c7bde1c14a6 100644 --- a/net/mpls/af_mpls.c +++ b/net/mpls/af_mpls.c @@ -409,7 +409,7 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev, goto err; /* Find the output device */ - out_dev = rcu_dereference(nh->nh_dev); + out_dev = nh->nh_dev; if (!mpls_output_possible(out_dev)) goto tx_err; @@ -698,7 +698,7 @@ static int mpls_nh_assign_dev(struct net *net, struct mpls_route *rt, (dev->addr_len != nh->nh_via_alen)) goto errout; - RCU_INIT_POINTER(nh->nh_dev, dev); + nh->nh_dev = dev; if (!(dev->flags & IFF_UP)) { nh->nh_flags |= RTNH_F_DEAD; @@ -1491,26 +1491,53 @@ static void mpls_dev_destroy_rcu(struct rcu_head *head) kfree(mdev); } -static void mpls_ifdown(struct net_device *dev, int event) +static int mpls_ifdown(struct net_device *dev, int event) { struct mpls_route __rcu **platform_label; struct net *net = dev_net(dev); - u8 alive, deleted; unsigned index; platform_label = rtnl_dereference(net->mpls.platform_label); for (index = 0; index < net->mpls.platform_labels; index++) { struct mpls_route *rt = rtnl_dereference(platform_label[index]); + bool nh_del = false; + u8 alive = 0; if (!rt) continue; - alive = 0; - deleted = 0; + if (event == NETDEV_UNREGISTER) { + u8 deleted = 0; + + for_nexthops(rt) { + if (!nh->nh_dev || nh->nh_dev == dev) + deleted++; + if (nh->nh_dev == dev) + nh_del = true; + } endfor_nexthops(rt); + + /* if there are no more nexthops, delete the route */ + if (deleted == rt->rt_nhn) { + mpls_route_update(net, index, NULL, NULL); + continue; + } + + if (nh_del) { + size_t size = sizeof(*rt) + rt->rt_nhn * + rt->rt_nh_size; + struct mpls_route *orig = rt; + + rt = kmalloc(size, GFP_KERNEL); + if (!rt) + return -ENOMEM; + memcpy(rt, orig, size); + } + } + change_nexthops(rt) { unsigned int nh_flags = nh->nh_flags; - if (rtnl_dereference(nh->nh_dev) != dev) + if (nh->nh_dev != dev) goto next; switch (event) { @@ -1523,23 +1550,22 @@ static void mpls_ifdown(struct net_device *dev, int event) break; } if (event == NETDEV_UNREGISTER) - RCU_INIT_POINTER(nh->nh_dev, NULL); + nh->nh_dev = NULL; if (nh->nh_flags != nh_flags) WRITE_ONCE(nh->nh_flags, nh_flags); next: if (!(nh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))) alive++; - if (!rtnl_dereference(nh->nh_dev)) - deleted++; } endfor_nexthops(rt); WRITE_ONCE(rt->rt_nhn_alive, alive); - /* if there are no more nexthops, delete the route */ - if (event == NETDEV_UNREGISTER && deleted == rt->rt_nhn) - mpls_route_update(net, index, NULL, NULL); + if (nh_del) + mpls_route_update(net, index, rt, NULL); } + + return 0; } static void mpls_ifup(struct net_device *dev, unsigned int flags) @@ -1559,14 +1585,12 @@ static void mpls_ifup(struct net_device *dev, unsigned int flags) alive = 0; change_nexthops(rt) { unsigned int nh_flags = nh->nh_flags; - struct net_device *nh_dev = - rtnl_dereference(nh->nh_dev); if (!(nh_flags & flags)) { alive++; continue; } - if (nh_dev != dev) + if (nh->nh_dev != dev) continue; alive++; nh_flags &= ~flags; @@ -1597,8 +1621,12 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event, return NOTIFY_OK; switch (event) { + int err; + case NETDEV_DOWN: - mpls_ifdown(dev, event); + err = mpls_ifdown(dev, event); + if (err) + return notifier_from_errno(err); break; case NETDEV_UP: flags = dev_get_flags(dev); @@ -1609,13 +1637,18 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event, break; case NETDEV_CHANGE: flags = dev_get_flags(dev); - if (flags & (IFF_RUNNING | IFF_LOWER_UP)) + if (flags & (IFF_RUNNING | IFF_LOWER_UP)) { mpls_ifup(dev, RTNH_F_DEAD | RTNH_F_LINKDOWN); - else - mpls_ifdown(dev, event); + } else { + err = mpls_ifdown(dev, event); + if (err) + return notifier_from_errno(err); + } break; case NETDEV_UNREGISTER: - mpls_ifdown(dev, event); + err = mpls_ifdown(dev, event); + if (err) + return notifier_from_errno(err); mdev = mpls_dev_get(dev); if (mdev) { mpls_dev_sysctl_unregister(dev, mdev); @@ -1626,8 +1659,6 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event, case NETDEV_CHANGENAME: mdev = mpls_dev_get(dev); if (mdev) { - int err; - mpls_dev_sysctl_unregister(dev, mdev); err = mpls_dev_sysctl_register(dev, mdev); if (err) @@ -1994,7 +2025,7 @@ static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event, nla_put_via(skb, nh->nh_via_table, mpls_nh_via(rt, nh), nh->nh_via_alen)) goto nla_put_failure; - dev = rtnl_dereference(nh->nh_dev); + dev = nh->nh_dev; if (dev && nla_put_u32(skb, RTA_OIF, dev->ifindex)) goto nla_put_failure; if (nh->nh_flags & RTNH_F_LINKDOWN) @@ -2012,7 +2043,7 @@ static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event, goto nla_put_failure; for_nexthops(rt) { - dev = rtnl_dereference(nh->nh_dev); + dev = nh->nh_dev; if (!dev) continue; @@ -2123,18 +2154,14 @@ static int mpls_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh, static bool mpls_rt_uses_dev(struct mpls_route *rt, const struct net_device *dev) { - struct net_device *nh_dev; - if (rt->rt_nhn == 1) { struct mpls_nh *nh = rt->rt_nh; - nh_dev = rtnl_dereference(nh->nh_dev); - if (dev == nh_dev) + if (nh->nh_dev == dev) return true; } else { for_nexthops(rt) { - nh_dev = rtnl_dereference(nh->nh_dev); - if (nh_dev == dev) + if (nh->nh_dev == dev) return true; } endfor_nexthops(rt); } @@ -2222,7 +2249,7 @@ static inline size_t lfib_nlmsg_size(struct mpls_route *rt) size_t nhsize = 0; for_nexthops(rt) { - if (!rtnl_dereference(nh->nh_dev)) + if (!nh->nh_dev) continue; nhsize += nla_total_size(sizeof(struct rtnexthop)); /* RTA_VIA */ @@ -2468,7 +2495,7 @@ static int mpls_getroute(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, nla_put_via(skb, nh->nh_via_table, mpls_nh_via(rt, nh), nh->nh_via_alen)) goto nla_put_failure; - dev = rtnl_dereference(nh->nh_dev); + dev = nh->nh_dev; if (dev && nla_put_u32(skb, RTA_OIF, dev->ifindex)) goto nla_put_failure; @@ -2507,7 +2534,7 @@ static int resize_platform_label_table(struct net *net, size_t limit) rt0 = mpls_rt_alloc(1, lo->addr_len, 0); if (IS_ERR(rt0)) goto nort0; - RCU_INIT_POINTER(rt0->rt_nh->nh_dev, lo); + rt0->rt_nh->nh_dev = lo; rt0->rt_protocol = RTPROT_KERNEL; rt0->rt_payload_type = MPT_IPV4; rt0->rt_ttl_propagate = MPLS_TTL_PROP_DEFAULT; @@ -2521,7 +2548,7 @@ static int resize_platform_label_table(struct net *net, size_t limit) rt2 = mpls_rt_alloc(1, lo->addr_len, 0); if (IS_ERR(rt2)) goto nort2; - RCU_INIT_POINTER(rt2->rt_nh->nh_dev, lo); + rt2->rt_nh->nh_dev = lo; rt2->rt_protocol = RTPROT_KERNEL; rt2->rt_payload_type = MPT_IPV6; rt2->rt_ttl_propagate = MPLS_TTL_PROP_DEFAULT; diff --git a/net/mpls/internal.h b/net/mpls/internal.h index 838cdfc10e47..893df00b77b6 100644 --- a/net/mpls/internal.h +++ b/net/mpls/internal.h @@ -87,7 +87,7 @@ enum mpls_payload_type { }; struct mpls_nh { /* next hop label forwarding entry */ - struct net_device __rcu *nh_dev; + struct net_device *nh_dev; /* nh_flags is accessed under RCU in the packet path; it is * modified handling netdev events with rtnl lock held diff --git a/net/mptcp/options.c b/net/mptcp/options.c index 7c3420afb1a0..fe98e4f475ba 100644 --- a/net/mptcp/options.c +++ b/net/mptcp/options.c @@ -422,28 +422,6 @@ bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb, return false; } -/* MP_JOIN client subflow must wait for 4th ack before sending any data: - * TCP can't schedule delack timer before the subflow is fully established. - * MPTCP uses the delack timer to do 3rd ack retransmissions - */ -static void schedule_3rdack_retransmission(struct sock *sk) -{ - struct inet_connection_sock *icsk = inet_csk(sk); - struct tcp_sock *tp = tcp_sk(sk); - unsigned long timeout; - - /* reschedule with a timeout above RTT, as we must look only for drop */ - if (tp->srtt_us) - timeout = tp->srtt_us << 1; - else - timeout = TCP_TIMEOUT_INIT; - - WARN_ON_ONCE(icsk->icsk_ack.pending & ICSK_ACK_TIMER); - icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; - icsk->icsk_ack.timeout = timeout; - sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); -} - static void clear_3rdack_retransmission(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); @@ -526,7 +504,15 @@ static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb, *size = TCPOLEN_MPTCP_MPJ_ACK; pr_debug("subflow=%p", subflow); - schedule_3rdack_retransmission(sk); + /* we can use the full delegate action helper only from BH context + * If we are in process context - sk is flushing the backlog at + * socket lock release time - just set the appropriate flag, will + * be handled by the release callback + */ + if (sock_owned_by_user(sk)) + set_bit(MPTCP_DELEGATE_ACK, &subflow->delegated_status); + else + mptcp_subflow_delegate(subflow, MPTCP_DELEGATE_ACK); return true; } return false; diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index b7e32e316738..c82a76d2d0bf 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -1596,7 +1596,8 @@ static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk) if (!xmit_ssk) goto out; if (xmit_ssk != ssk) { - mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk)); + mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk), + MPTCP_DELEGATE_SEND); goto out; } @@ -2943,7 +2944,7 @@ void __mptcp_check_push(struct sock *sk, struct sock *ssk) if (xmit_ssk == ssk) __mptcp_subflow_push_pending(sk, ssk); else if (xmit_ssk) - mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk)); + mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk), MPTCP_DELEGATE_SEND); } else { set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags); } @@ -2993,18 +2994,50 @@ static void mptcp_release_cb(struct sock *sk) __mptcp_update_rmem(sk); } +/* MP_JOIN client subflow must wait for 4th ack before sending any data: + * TCP can't schedule delack timer before the subflow is fully established. + * MPTCP uses the delack timer to do 3rd ack retransmissions + */ +static void schedule_3rdack_retransmission(struct sock *ssk) +{ + struct inet_connection_sock *icsk = inet_csk(ssk); + struct tcp_sock *tp = tcp_sk(ssk); + unsigned long timeout; + + if (mptcp_subflow_ctx(ssk)->fully_established) + return; + + /* reschedule with a timeout above RTT, as we must look only for drop */ + if (tp->srtt_us) + timeout = usecs_to_jiffies(tp->srtt_us >> (3 - 1)); + else + timeout = TCP_TIMEOUT_INIT; + timeout += jiffies; + + WARN_ON_ONCE(icsk->icsk_ack.pending & ICSK_ACK_TIMER); + icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; + icsk->icsk_ack.timeout = timeout; + sk_reset_timer(ssk, &icsk->icsk_delack_timer, timeout); +} + void mptcp_subflow_process_delegated(struct sock *ssk) { struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); struct sock *sk = subflow->conn; - mptcp_data_lock(sk); - if (!sock_owned_by_user(sk)) - __mptcp_subflow_push_pending(sk, ssk); - else - set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags); - mptcp_data_unlock(sk); - mptcp_subflow_delegated_done(subflow); + if (test_bit(MPTCP_DELEGATE_SEND, &subflow->delegated_status)) { + mptcp_data_lock(sk); + if (!sock_owned_by_user(sk)) + __mptcp_subflow_push_pending(sk, ssk); + else + set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags); + mptcp_data_unlock(sk); + mptcp_subflow_delegated_done(subflow, MPTCP_DELEGATE_SEND); + } + if (test_bit(MPTCP_DELEGATE_ACK, &subflow->delegated_status)) { + schedule_3rdack_retransmission(ssk); + mptcp_subflow_delegated_done(subflow, MPTCP_DELEGATE_ACK); + } } static int mptcp_hash(struct sock *sk) diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index 67a61ac48b20..d87cc040352e 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -387,6 +387,7 @@ struct mptcp_delegated_action { DECLARE_PER_CPU(struct mptcp_delegated_action, mptcp_delegated_actions); #define MPTCP_DELEGATE_SEND 0 +#define MPTCP_DELEGATE_ACK 1 /* MPTCP subflow context */ struct mptcp_subflow_context { @@ -492,23 +493,23 @@ static inline void mptcp_add_pending_subflow(struct mptcp_sock *msk, void mptcp_subflow_process_delegated(struct sock *ssk); -static inline void mptcp_subflow_delegate(struct mptcp_subflow_context *subflow) +static inline void mptcp_subflow_delegate(struct mptcp_subflow_context *subflow, int action) { struct mptcp_delegated_action *delegated; bool schedule; + /* the caller held the subflow bh socket lock */ + lockdep_assert_in_softirq(); + /* The implied barrier pairs with mptcp_subflow_delegated_done(), and * ensures the below list check sees list updates done prior to status * bit changes */ - if (!test_and_set_bit(MPTCP_DELEGATE_SEND, &subflow->delegated_status)) { + if (!test_and_set_bit(action, &subflow->delegated_status)) { /* still on delegated list from previous scheduling */ if (!list_empty(&subflow->delegated_node)) return; - /* the caller held the subflow bh socket lock */ - lockdep_assert_in_softirq(); - delegated = this_cpu_ptr(&mptcp_delegated_actions); schedule = list_empty(&delegated->head); list_add_tail(&subflow->delegated_node, &delegated->head); @@ -533,16 +534,16 @@ mptcp_subflow_delegated_next(struct mptcp_delegated_action *delegated) static inline bool mptcp_subflow_has_delegated_action(const struct mptcp_subflow_context *subflow) { - return test_bit(MPTCP_DELEGATE_SEND, &subflow->delegated_status); + return !!READ_ONCE(subflow->delegated_status); } -static inline void mptcp_subflow_delegated_done(struct mptcp_subflow_context *subflow) +static inline void mptcp_subflow_delegated_done(struct mptcp_subflow_context *subflow, int action) { /* pairs with mptcp_subflow_delegate, ensures delegate_node is updated before * touching the status bit */ smp_wmb(); - clear_bit(MPTCP_DELEGATE_SEND, &subflow->delegated_status); + clear_bit(action, &subflow->delegated_status); } int mptcp_is_enabled(const struct net *net); diff --git a/net/ncsi/ncsi-cmd.c b/net/ncsi/ncsi-cmd.c index ba9ae482141b..dda8b76b7798 100644 --- a/net/ncsi/ncsi-cmd.c +++ b/net/ncsi/ncsi-cmd.c @@ -18,6 +18,8 @@ #include "internal.h" #include "ncsi-pkt.h" +static const int padding_bytes = 26; + u32 ncsi_calculate_checksum(unsigned char *data, int len) { u32 checksum = 0; @@ -213,12 +215,17 @@ static int ncsi_cmd_handler_oem(struct sk_buff *skb, { struct ncsi_cmd_oem_pkt *cmd; unsigned int len; + int payload; + /* NC-SI spec DSP_0222_1.2.0, section 8.2.2.2 + * requires payload to be padded with 0 to + * 32-bit boundary before the checksum field. + * Ensure the padding bytes are accounted for in + * skb allocation + */ + payload = ALIGN(nca->payload, 4); len = sizeof(struct ncsi_cmd_pkt_hdr) + 4; - if (nca->payload < 26) - len += 26; - else - len += nca->payload; + len += max(payload, padding_bytes); cmd = skb_put_zero(skb, len); memcpy(&cmd->mfr_id, nca->data, nca->payload); @@ -272,6 +279,7 @@ static struct ncsi_request *ncsi_alloc_command(struct ncsi_cmd_arg *nca) struct net_device *dev = nd->dev; int hlen = LL_RESERVED_SPACE(dev); int tlen = dev->needed_tailroom; + int payload; int len = hlen + tlen; struct sk_buff *skb; struct ncsi_request *nr; @@ -281,14 +289,14 @@ static struct ncsi_request *ncsi_alloc_command(struct ncsi_cmd_arg *nca) return NULL; /* NCSI command packet has 16-bytes header, payload, 4 bytes checksum. + * Payload needs padding so that the checksum field following payload is + * aligned to 32-bit boundary. * The packet needs padding if its payload is less than 26 bytes to * meet 64 bytes minimal ethernet frame length. */ len += sizeof(struct ncsi_cmd_pkt_hdr) + 4; - if (nca->payload < 26) - len += 26; - else - len += nca->payload; + payload = ALIGN(nca->payload, 4); + len += max(payload, padding_bytes); /* Allocate skb */ skb = alloc_skb(len, GFP_ATOMIC); diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index e93c937a8bf0..51ad557a525b 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -1919,7 +1919,6 @@ ip_vs_in_hook(void *priv, struct sk_buff *skb, const struct nf_hook_state *state struct ip_vs_proto_data *pd; struct ip_vs_conn *cp; int ret, pkts; - int conn_reuse_mode; struct sock *sk; int af = state->pf; @@ -1997,15 +1996,16 @@ ip_vs_in_hook(void *priv, struct sk_buff *skb, const struct nf_hook_state *state cp = INDIRECT_CALL_1(pp->conn_in_get, ip_vs_conn_in_get_proto, ipvs, af, skb, &iph); - conn_reuse_mode = sysctl_conn_reuse_mode(ipvs); - if (conn_reuse_mode && !iph.fragoffs && is_new_conn(skb, &iph) && cp) { + if (!iph.fragoffs && is_new_conn(skb, &iph) && cp) { + int conn_reuse_mode = sysctl_conn_reuse_mode(ipvs); bool old_ct = false, resched = false; if (unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest && unlikely(!atomic_read(&cp->dest->weight))) { resched = true; old_ct = ip_vs_conn_uses_old_conntrack(cp, skb); - } else if (is_new_conn_expected(cp, conn_reuse_mode)) { + } else if (conn_reuse_mode && + is_new_conn_expected(cp, conn_reuse_mode)) { old_ct = ip_vs_conn_uses_old_conntrack(cp, skb); if (!atomic_read(&cp->n_control)) { resched = true; diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 770a63103c7a..4712a90a1820 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -684,7 +684,7 @@ bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report) tstamp = nf_conn_tstamp_find(ct); if (tstamp) { - s32 timeout = ct->timeout - nfct_time_stamp; + s32 timeout = READ_ONCE(ct->timeout) - nfct_time_stamp; tstamp->stop = ktime_get_real_ns(); if (timeout < 0) @@ -1036,7 +1036,7 @@ static int nf_ct_resolve_clash_harder(struct sk_buff *skb, u32 repl_idx) } /* We want the clashing entry to go away real soon: 1 second timeout. */ - loser_ct->timeout = nfct_time_stamp + HZ; + WRITE_ONCE(loser_ct->timeout, nfct_time_stamp + HZ); /* IPS_NAT_CLASH removes the entry automatically on the first * reply. Also prevents UDP tracker from moving the entry to @@ -1560,7 +1560,7 @@ __nf_conntrack_alloc(struct net *net, /* save hash for reusing when confirming */ *(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash; ct->status = 0; - ct->timeout = 0; + WRITE_ONCE(ct->timeout, 0); write_pnet(&ct->ct_net, net); memset(&ct->__nfct_init_offset, 0, offsetof(struct nf_conn, proto) - diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index f1e5443fe7c7..81d03acf68d4 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -1011,11 +1011,9 @@ ctnetlink_alloc_filter(const struct nlattr * const cda[], u8 family) CTA_TUPLE_REPLY, filter->family, &filter->zone, - filter->orig_flags); - if (err < 0) { - err = -EINVAL; + filter->reply_flags); + if (err < 0) goto err_filter; - } } return filter; @@ -2000,7 +1998,7 @@ static int ctnetlink_change_timeout(struct nf_conn *ct, if (timeout > INT_MAX) timeout = INT_MAX; - ct->timeout = nfct_time_stamp + (u32)timeout; + WRITE_ONCE(ct->timeout, nfct_time_stamp + (u32)timeout); if (test_bit(IPS_DYING_BIT, &ct->status)) return -ETIME; diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c index 87a7388b6c89..ed37bb9b4e58 100644 --- a/net/netfilter/nf_flow_table_core.c +++ b/net/netfilter/nf_flow_table_core.c @@ -201,8 +201,8 @@ static void flow_offload_fixup_ct_timeout(struct nf_conn *ct) if (timeout < 0) timeout = 0; - if (nf_flow_timeout_delta(ct->timeout) > (__s32)timeout) - ct->timeout = nfct_time_stamp + timeout; + if (nf_flow_timeout_delta(READ_ONCE(ct->timeout)) > (__s32)timeout) + WRITE_ONCE(ct->timeout, nfct_time_stamp + timeout); } static void flow_offload_fixup_ct_state(struct nf_conn *ct) diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c index d6bf1b2cd541..b561e0a44a45 100644 --- a/net/netfilter/nf_flow_table_offload.c +++ b/net/netfilter/nf_flow_table_offload.c @@ -65,11 +65,11 @@ static void nf_flow_rule_lwt_match(struct nf_flow_match *match, sizeof(struct in6_addr)); if (memcmp(&key->enc_ipv6.src, &in6addr_any, sizeof(struct in6_addr))) - memset(&key->enc_ipv6.src, 0xff, + memset(&mask->enc_ipv6.src, 0xff, sizeof(struct in6_addr)); if (memcmp(&key->enc_ipv6.dst, &in6addr_any, sizeof(struct in6_addr))) - memset(&key->enc_ipv6.dst, 0xff, + memset(&mask->enc_ipv6.dst, 0xff, sizeof(struct in6_addr)); enc_keys |= BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS); key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index 4acc4b8e9fe5..5837e8efc9c2 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -387,7 +387,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, struct net_device *indev; struct net_device *outdev; struct nf_conn *ct = NULL; - enum ip_conntrack_info ctinfo; + enum ip_conntrack_info ctinfo = 0; struct nfnl_ct_hook *nfnl_ct; bool csum_verify; char *secdata = NULL; diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c index af4ee874a067..dbe1f2e7dd9e 100644 --- a/net/netfilter/nft_exthdr.c +++ b/net/netfilter/nft_exthdr.c @@ -236,7 +236,7 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr, tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff, &tcphdr_len); if (!tcph) - return; + goto err; opt = (u8 *)tcph; for (i = sizeof(*tcph); i < tcphdr_len - 1; i += optl) { @@ -251,16 +251,16 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr, continue; if (i + optl > tcphdr_len || priv->len + priv->offset > optl) - return; + goto err; if (skb_ensure_writable(pkt->skb, nft_thoff(pkt) + i + priv->len)) - return; + goto err; tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff, &tcphdr_len); if (!tcph) - return; + goto err; offset = i + priv->offset; @@ -303,6 +303,9 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr, return; } + return; +err: + regs->verdict.code = NFT_BREAK; } static void nft_exthdr_sctp_eval(const struct nft_expr *expr, diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c index cbfe4e4a4ad7..bd689938a2e0 100644 --- a/net/netfilter/nft_payload.c +++ b/net/netfilter/nft_payload.c @@ -22,7 +22,6 @@ #include <linux/icmpv6.h> #include <linux/ip.h> #include <linux/ipv6.h> -#include <linux/ip.h> #include <net/sctp/checksum.h> static bool nft_payload_rebuild_vlan_hdr(const struct sk_buff *skb, int mac_off, diff --git a/net/netfilter/nft_set_pipapo_avx2.c b/net/netfilter/nft_set_pipapo_avx2.c index e517663e0cd1..6f4116e72958 100644 --- a/net/netfilter/nft_set_pipapo_avx2.c +++ b/net/netfilter/nft_set_pipapo_avx2.c @@ -886,7 +886,7 @@ static int nft_pipapo_avx2_lookup_8b_6(unsigned long *map, unsigned long *fill, NFT_PIPAPO_AVX2_BUCKET_LOAD8(4, lt, 4, pkt[4], bsize); NFT_PIPAPO_AVX2_AND(5, 0, 1); - NFT_PIPAPO_AVX2_BUCKET_LOAD8(6, lt, 6, pkt[5], bsize); + NFT_PIPAPO_AVX2_BUCKET_LOAD8(6, lt, 5, pkt[5], bsize); NFT_PIPAPO_AVX2_AND(7, 2, 3); /* Stall */ diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c index 2f7cf5ecebf4..0f8bb0bf558f 100644 --- a/net/netfilter/xt_IDLETIMER.c +++ b/net/netfilter/xt_IDLETIMER.c @@ -85,9 +85,9 @@ static ssize_t idletimer_tg_show(struct device *dev, mutex_unlock(&list_mutex); if (time_after(expires, jiffies) || ktimespec.tv_sec > 0) - return snprintf(buf, PAGE_SIZE, "%ld\n", time_diff); + return sysfs_emit(buf, "%ld\n", time_diff); - return snprintf(buf, PAGE_SIZE, "0\n"); + return sysfs_emit(buf, "0\n"); } static void idletimer_tg_work(struct work_struct *work) diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 4c575324a985..9eba2e648385 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -1852,6 +1852,11 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; + if (len == 0) { + pr_warn_once("Zero length message leads to an empty skb\n"); + return -ENODATA; + } + err = scm_send(sock, msg, &scm, true); if (err < 0) return err; diff --git a/net/nfc/core.c b/net/nfc/core.c index 3c645c1d99c9..dc7a2404efdf 100644 --- a/net/nfc/core.c +++ b/net/nfc/core.c @@ -94,13 +94,13 @@ int nfc_dev_up(struct nfc_dev *dev) device_lock(&dev->dev); - if (dev->rfkill && rfkill_blocked(dev->rfkill)) { - rc = -ERFKILL; + if (!device_is_registered(&dev->dev)) { + rc = -ENODEV; goto error; } - if (!device_is_registered(&dev->dev)) { - rc = -ENODEV; + if (dev->rfkill && rfkill_blocked(dev->rfkill)) { + rc = -ERFKILL; goto error; } @@ -1125,11 +1125,7 @@ int nfc_register_device(struct nfc_dev *dev) if (rc) pr_err("Could not register llcp device\n"); - rc = nfc_genl_device_added(dev); - if (rc) - pr_debug("The userspace won't be notified that the device %s was added\n", - dev_name(&dev->dev)); - + device_lock(&dev->dev); dev->rfkill = rfkill_alloc(dev_name(&dev->dev), &dev->dev, RFKILL_TYPE_NFC, &nfc_rfkill_ops, dev); if (dev->rfkill) { @@ -1138,6 +1134,12 @@ int nfc_register_device(struct nfc_dev *dev) dev->rfkill = NULL; } } + device_unlock(&dev->dev); + + rc = nfc_genl_device_added(dev); + if (rc) + pr_debug("The userspace won't be notified that the device %s was added\n", + dev_name(&dev->dev)); return 0; } @@ -1154,10 +1156,17 @@ void nfc_unregister_device(struct nfc_dev *dev) pr_debug("dev_name=%s\n", dev_name(&dev->dev)); + rc = nfc_genl_device_removed(dev); + if (rc) + pr_debug("The userspace won't be notified that the device %s " + "was removed\n", dev_name(&dev->dev)); + + device_lock(&dev->dev); if (dev->rfkill) { rfkill_unregister(dev->rfkill); rfkill_destroy(dev->rfkill); } + device_unlock(&dev->dev); if (dev->ops->check_presence) { device_lock(&dev->dev); @@ -1167,11 +1176,6 @@ void nfc_unregister_device(struct nfc_dev *dev) cancel_work_sync(&dev->check_pres_work); } - rc = nfc_genl_device_removed(dev); - if (rc) - pr_debug("The userspace won't be notified that the device %s " - "was removed\n", dev_name(&dev->dev)); - nfc_llcp_unregister_device(dev); mutex_lock(&nfc_devlist_mutex); diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c index 6fd873aa86be..d2537383a3e8 100644 --- a/net/nfc/nci/core.c +++ b/net/nfc/nci/core.c @@ -144,12 +144,15 @@ inline int nci_request(struct nci_dev *ndev, { int rc; - if (!test_bit(NCI_UP, &ndev->flags)) - return -ENETDOWN; - /* Serialize all requests */ mutex_lock(&ndev->req_lock); - rc = __nci_request(ndev, req, opt, timeout); + /* check the state after obtaing the lock against any races + * from nci_close_device when the device gets removed. + */ + if (test_bit(NCI_UP, &ndev->flags)) + rc = __nci_request(ndev, req, opt, timeout); + else + rc = -ENETDOWN; mutex_unlock(&ndev->req_lock); return rc; @@ -473,6 +476,11 @@ static int nci_open_device(struct nci_dev *ndev) mutex_lock(&ndev->req_lock); + if (test_bit(NCI_UNREG, &ndev->flags)) { + rc = -ENODEV; + goto done; + } + if (test_bit(NCI_UP, &ndev->flags)) { rc = -EALREADY; goto done; @@ -545,6 +553,10 @@ done: static int nci_close_device(struct nci_dev *ndev) { nci_req_cancel(ndev, ENODEV); + + /* This mutex needs to be held as a barrier for + * caller nci_unregister_device + */ mutex_lock(&ndev->req_lock); if (!test_and_clear_bit(NCI_UP, &ndev->flags)) { @@ -582,8 +594,8 @@ static int nci_close_device(struct nci_dev *ndev) del_timer_sync(&ndev->cmd_timer); - /* Clear flags */ - ndev->flags = 0; + /* Clear flags except NCI_UNREG */ + ndev->flags &= BIT(NCI_UNREG); mutex_unlock(&ndev->req_lock); @@ -1266,6 +1278,12 @@ void nci_unregister_device(struct nci_dev *ndev) { struct nci_conn_info *conn_info, *n; + /* This set_bit is not protected with specialized barrier, + * However, it is fine because the mutex_lock(&ndev->req_lock); + * in nci_close_device() will help to emit one. + */ + set_bit(NCI_UNREG, &ndev->flags); + nci_close_device(ndev); destroy_workqueue(ndev->cmd_wq); diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c index 334f63c9529e..f184b0db79d4 100644 --- a/net/nfc/netlink.c +++ b/net/nfc/netlink.c @@ -636,8 +636,10 @@ static int nfc_genl_dump_devices_done(struct netlink_callback *cb) { struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0]; - nfc_device_iter_exit(iter); - kfree(iter); + if (iter) { + nfc_device_iter_exit(iter); + kfree(iter); + } return 0; } @@ -1392,8 +1394,10 @@ static int nfc_genl_dump_ses_done(struct netlink_callback *cb) { struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0]; - nfc_device_iter_exit(iter); - kfree(iter); + if (iter) { + nfc_device_iter_exit(iter); + kfree(iter); + } return 0; } diff --git a/net/rds/tcp.c b/net/rds/tcp.c index abf19c0e3ba0..5327d130c4b5 100644 --- a/net/rds/tcp.c +++ b/net/rds/tcp.c @@ -500,7 +500,7 @@ void rds_tcp_tune(struct socket *sock) sk->sk_userlocks |= SOCK_SNDBUF_LOCK; } if (rtn->rcvbuf_size > 0) { - sk->sk_sndbuf = rtn->rcvbuf_size; + sk->sk_rcvbuf = rtn->rcvbuf_size; sk->sk_userlocks |= SOCK_RCVBUF_LOCK; } release_sock(sk); diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c index dbea0bfee48e..8120138dac01 100644 --- a/net/rxrpc/conn_client.c +++ b/net/rxrpc/conn_client.c @@ -135,16 +135,20 @@ struct rxrpc_bundle *rxrpc_get_bundle(struct rxrpc_bundle *bundle) return bundle; } +static void rxrpc_free_bundle(struct rxrpc_bundle *bundle) +{ + rxrpc_put_peer(bundle->params.peer); + kfree(bundle); +} + void rxrpc_put_bundle(struct rxrpc_bundle *bundle) { unsigned int d = bundle->debug_id; unsigned int u = atomic_dec_return(&bundle->usage); _debug("PUT B=%x %u", d, u); - if (u == 0) { - rxrpc_put_peer(bundle->params.peer); - kfree(bundle); - } + if (u == 0) + rxrpc_free_bundle(bundle); } /* @@ -328,7 +332,7 @@ static struct rxrpc_bundle *rxrpc_look_up_bundle(struct rxrpc_conn_parameters *c return candidate; found_bundle_free: - kfree(candidate); + rxrpc_free_bundle(candidate); found_bundle: rxrpc_get_bundle(bundle); spin_unlock(&local->client_bundles_lock); diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c index 68396d052052..0298fe2ad6d3 100644 --- a/net/rxrpc/peer_object.c +++ b/net/rxrpc/peer_object.c @@ -299,6 +299,12 @@ static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_sock *rx, return peer; } +static void rxrpc_free_peer(struct rxrpc_peer *peer) +{ + rxrpc_put_local(peer->local); + kfree_rcu(peer, rcu); +} + /* * Set up a new incoming peer. There shouldn't be any other matching peers * since we've already done a search in the list from the non-reentrant context @@ -365,7 +371,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *rx, spin_unlock_bh(&rxnet->peer_hash_lock); if (peer) - kfree(candidate); + rxrpc_free_peer(candidate); else peer = candidate; } @@ -420,8 +426,7 @@ static void __rxrpc_put_peer(struct rxrpc_peer *peer) list_del_init(&peer->keepalive_link); spin_unlock_bh(&rxnet->peer_hash_lock); - rxrpc_put_local(peer->local); - kfree_rcu(peer, rcu); + rxrpc_free_peer(peer); } /* @@ -457,8 +462,7 @@ void rxrpc_put_peer_locked(struct rxrpc_peer *peer) if (n == 0) { hash_del_rcu(&peer->hash_link); list_del_init(&peer->keepalive_link); - rxrpc_put_local(peer->local); - kfree_rcu(peer, rcu); + rxrpc_free_peer(peer); } } diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index d64b0eeccbe4..efc963ab995a 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -19,6 +19,7 @@ #include <linux/if_arp.h> #include <net/net_namespace.h> #include <net/netlink.h> +#include <net/dst.h> #include <net/pkt_sched.h> #include <net/pkt_cls.h> #include <linux/tc_act/tc_mirred.h> @@ -228,6 +229,7 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a, bool want_ingress; bool is_redirect; bool expects_nh; + bool at_ingress; int m_eaction; int mac_len; bool at_nh; @@ -263,7 +265,8 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a, * ingress - that covers the TC S/W datapath. */ is_redirect = tcf_mirred_is_act_redirect(m_eaction); - use_reinsert = skb_at_tc_ingress(skb) && is_redirect && + at_ingress = skb_at_tc_ingress(skb); + use_reinsert = at_ingress && is_redirect && tcf_mirred_can_reinsert(retval); if (!use_reinsert) { skb2 = skb_clone(skb, GFP_ATOMIC); @@ -271,10 +274,12 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a, goto out; } + want_ingress = tcf_mirred_act_wants_ingress(m_eaction); + /* All mirred/redirected skbs should clear previous ct info */ nf_reset_ct(skb2); - - want_ingress = tcf_mirred_act_wants_ingress(m_eaction); + if (want_ingress && !at_ingress) /* drop dst for egress -> ingress */ + skb_dst_drop(skb2); expects_nh = want_ingress || !m_mac_header_xmit; at_nh = skb->data == skb_network_header(skb); diff --git a/net/sched/sch_ets.c b/net/sched/sch_ets.c index 0eae9ff5edf6..e007fc75ef2f 100644 --- a/net/sched/sch_ets.c +++ b/net/sched/sch_ets.c @@ -665,12 +665,14 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt, q->classes[i].deficit = quanta[i]; } } + for (i = q->nbands; i < oldbands; i++) { + qdisc_tree_flush_backlog(q->classes[i].qdisc); + if (i >= q->nstrict) + list_del(&q->classes[i].alist); + } q->nstrict = nstrict; memcpy(q->prio2band, priomap, sizeof(priomap)); - for (i = q->nbands; i < oldbands; i++) - qdisc_tree_flush_backlog(q->classes[i].qdisc); - for (i = 0; i < q->nbands; i++) q->classes[i].quantum = quanta[i]; diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c index 830f3559f727..d6aba6edd16e 100644 --- a/net/sched/sch_fq_pie.c +++ b/net/sched/sch_fq_pie.c @@ -531,6 +531,7 @@ static void fq_pie_destroy(struct Qdisc *sch) struct fq_pie_sched_data *q = qdisc_priv(sch); tcf_block_put(q->block); + q->p_params.tupdate = 0; del_timer_sync(&q->adapt_timer); kvfree(q->flows); } diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 59284da9116d..230072f9ec48 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -566,6 +566,10 @@ static void smc_stat_fallback(struct smc_sock *smc) static void smc_switch_to_fallback(struct smc_sock *smc, int reason_code) { + wait_queue_head_t *smc_wait = sk_sleep(&smc->sk); + wait_queue_head_t *clc_wait = sk_sleep(smc->clcsock->sk); + unsigned long flags; + smc->use_fallback = true; smc->fallback_rsn = reason_code; smc_stat_fallback(smc); @@ -575,6 +579,16 @@ static void smc_switch_to_fallback(struct smc_sock *smc, int reason_code) smc->clcsock->file->private_data = smc->clcsock; smc->clcsock->wq.fasync_list = smc->sk.sk_socket->wq.fasync_list; + + /* There may be some entries remaining in + * smc socket->wq, which should be removed + * to clcsocket->wq during the fallback. + */ + spin_lock_irqsave(&smc_wait->lock, flags); + spin_lock_nested(&clc_wait->lock, SINGLE_DEPTH_NESTING); + list_splice_init(&smc_wait->head, &clc_wait->head); + spin_unlock(&clc_wait->lock); + spin_unlock_irqrestore(&smc_wait->lock, flags); } } @@ -2120,8 +2134,10 @@ static int smc_listen(struct socket *sock, int backlog) smc->clcsock->sk->sk_user_data = (void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY); rc = kernel_listen(smc->clcsock, backlog); - if (rc) + if (rc) { + smc->clcsock->sk->sk_data_ready = smc->clcsk_data_ready; goto out; + } sk->sk_max_ack_backlog = backlog; sk->sk_ack_backlog = 0; sk->sk_state = SMC_LISTEN; @@ -2354,8 +2370,10 @@ static __poll_t smc_poll(struct file *file, struct socket *sock, static int smc_shutdown(struct socket *sock, int how) { struct sock *sk = sock->sk; + bool do_shutdown = true; struct smc_sock *smc; int rc = -EINVAL; + int old_state; int rc1 = 0; smc = smc_sk(sk); @@ -2382,7 +2400,11 @@ static int smc_shutdown(struct socket *sock, int how) } switch (how) { case SHUT_RDWR: /* shutdown in both directions */ + old_state = sk->sk_state; rc = smc_close_active(smc); + if (old_state == SMC_ACTIVE && + sk->sk_state == SMC_PEERCLOSEWAIT1) + do_shutdown = false; break; case SHUT_WR: rc = smc_close_shutdown_write(smc); @@ -2392,7 +2414,7 @@ static int smc_shutdown(struct socket *sock, int how) /* nothing more to do because peer is not involved */ break; } - if (smc->clcsock) + if (do_shutdown && smc->clcsock) rc1 = kernel_sock_shutdown(smc->clcsock, how); /* map sock_shutdown_cmd constants to sk_shutdown value range */ sk->sk_shutdown |= how + 1; diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c index 0f9ffba07d26..292e4d904ab6 100644 --- a/net/smc/smc_close.c +++ b/net/smc/smc_close.c @@ -195,6 +195,7 @@ int smc_close_active(struct smc_sock *smc) int old_state; long timeout; int rc = 0; + int rc1 = 0; timeout = current->flags & PF_EXITING ? 0 : sock_flag(sk, SOCK_LINGER) ? @@ -228,6 +229,15 @@ again: /* send close request */ rc = smc_close_final(conn); sk->sk_state = SMC_PEERCLOSEWAIT1; + + /* actively shutdown clcsock before peer close it, + * prevent peer from entering TIME_WAIT state. + */ + if (smc->clcsock && smc->clcsock->sk) { + rc1 = kernel_sock_shutdown(smc->clcsock, + SHUT_RDWR); + rc = rc ? rc : rc1; + } } else { /* peer event has changed the state */ goto again; @@ -354,9 +364,9 @@ static void smc_close_passive_work(struct work_struct *work) if (rxflags->peer_conn_abort) { /* peer has not received all data */ smc_close_passive_abort_received(smc); - release_sock(&smc->sk); + release_sock(sk); cancel_delayed_work_sync(&conn->tx_work); - lock_sock(&smc->sk); + lock_sock(sk); goto wakeup; } diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 49b8ba3bb683..387d28b2f8dd 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -625,18 +625,17 @@ int smcd_nl_get_lgr(struct sk_buff *skb, struct netlink_callback *cb) void smc_lgr_cleanup_early(struct smc_connection *conn) { struct smc_link_group *lgr = conn->lgr; - struct list_head *lgr_list; spinlock_t *lgr_lock; if (!lgr) return; smc_conn_free(conn); - lgr_list = smc_lgr_list_head(lgr, &lgr_lock); + smc_lgr_list_head(lgr, &lgr_lock); spin_lock_bh(lgr_lock); /* do not use this link group for new connections */ - if (!list_empty(lgr_list)) - list_del_init(lgr_list); + if (!list_empty(&lgr->list)) + list_del_init(&lgr->list); spin_unlock_bh(lgr_lock); __smc_lgr_terminate(lgr, true); } @@ -708,13 +707,14 @@ static u8 smcr_next_link_id(struct smc_link_group *lgr) int i; while (1) { +again: link_id = ++lgr->next_link_id; if (!link_id) /* skip zero as link_id */ link_id = ++lgr->next_link_id; for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { if (smc_link_usable(&lgr->lnk[i]) && lgr->lnk[i].link_id == link_id) - continue; + goto again; } break; } @@ -1671,14 +1671,26 @@ static void smc_link_down_work(struct work_struct *work) mutex_unlock(&lgr->llc_conf_mutex); } -/* Determine vlan of internal TCP socket. - * @vlan_id: address to store the determined vlan id into - */ +static int smc_vlan_by_tcpsk_walk(struct net_device *lower_dev, + struct netdev_nested_priv *priv) +{ + unsigned short *vlan_id = (unsigned short *)priv->data; + + if (is_vlan_dev(lower_dev)) { + *vlan_id = vlan_dev_vlan_id(lower_dev); + return 1; + } + + return 0; +} + +/* Determine vlan of internal TCP socket. */ int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini) { struct dst_entry *dst = sk_dst_get(clcsock->sk); + struct netdev_nested_priv priv; struct net_device *ndev; - int i, nest_lvl, rc = 0; + int rc = 0; ini->vlan_id = 0; if (!dst) { @@ -1696,20 +1708,9 @@ int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini) goto out_rel; } + priv.data = (void *)&ini->vlan_id; rtnl_lock(); - nest_lvl = ndev->lower_level; - for (i = 0; i < nest_lvl; i++) { - struct list_head *lower = &ndev->adj_list.lower; - - if (list_empty(lower)) - break; - lower = lower->next; - ndev = (struct net_device *)netdev_lower_get_next(ndev, &lower); - if (is_vlan_dev(ndev)) { - ini->vlan_id = vlan_dev_vlan_id(ndev); - break; - } - } + netdev_walk_all_lower_dev(ndev, smc_vlan_by_tcpsk_walk, &priv); rtnl_unlock(); out_rel: diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index ae48c9c84ee1..d8ee06a9650a 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -1720,15 +1720,15 @@ static void xs_local_set_port(struct rpc_xprt *xprt, unsigned short port) } #ifdef CONFIG_DEBUG_LOCK_ALLOC -static struct lock_class_key xs_key[2]; -static struct lock_class_key xs_slock_key[2]; +static struct lock_class_key xs_key[3]; +static struct lock_class_key xs_slock_key[3]; static inline void xs_reclassify_socketu(struct socket *sock) { struct sock *sk = sock->sk; sock_lock_init_class_and_name(sk, "slock-AF_LOCAL-RPC", - &xs_slock_key[1], "sk_lock-AF_LOCAL-RPC", &xs_key[1]); + &xs_slock_key[0], "sk_lock-AF_LOCAL-RPC", &xs_key[0]); } static inline void xs_reclassify_socket4(struct socket *sock) @@ -1736,7 +1736,7 @@ static inline void xs_reclassify_socket4(struct socket *sock) struct sock *sk = sock->sk; sock_lock_init_class_and_name(sk, "slock-AF_INET-RPC", - &xs_slock_key[0], "sk_lock-AF_INET-RPC", &xs_key[0]); + &xs_slock_key[1], "sk_lock-AF_INET-RPC", &xs_key[1]); } static inline void xs_reclassify_socket6(struct socket *sock) @@ -1744,7 +1744,7 @@ static inline void xs_reclassify_socket6(struct socket *sock) struct sock *sk = sock->sk; sock_lock_init_class_and_name(sk, "slock-AF_INET6-RPC", - &xs_slock_key[1], "sk_lock-AF_INET6-RPC", &xs_key[1]); + &xs_slock_key[2], "sk_lock-AF_INET6-RPC", &xs_key[2]); } static inline void xs_reclassify_socket(int family, struct socket *sock) diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c index dc60c32bb70d..b4d9419a015b 100644 --- a/net/tipc/crypto.c +++ b/net/tipc/crypto.c @@ -524,7 +524,7 @@ static int tipc_aead_init(struct tipc_aead **aead, struct tipc_aead_key *ukey, return -EEXIST; /* Allocate a new AEAD */ - tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC); + tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); if (unlikely(!tmp)) return -ENOMEM; @@ -597,6 +597,10 @@ static int tipc_aead_init(struct tipc_aead **aead, struct tipc_aead_key *ukey, tmp->cloned = NULL; tmp->authsize = TIPC_AES_GCM_TAG_SIZE; tmp->key = kmemdup(ukey, tipc_aead_key_size(ukey), GFP_KERNEL); + if (!tmp->key) { + tipc_aead_free(&tmp->rcu); + return -ENOMEM; + } memcpy(&tmp->salt, ukey->key + keylen, TIPC_AES_GCM_SALT_SIZE); atomic_set(&tmp->users, 0); atomic64_set(&tmp->seqno, 0); @@ -1470,7 +1474,7 @@ int tipc_crypto_start(struct tipc_crypto **crypto, struct net *net, return -EEXIST; /* Allocate crypto */ - c = kzalloc(sizeof(*c), GFP_ATOMIC); + c = kzalloc(sizeof(*c), GFP_KERNEL); if (!c) return -ENOMEM; @@ -1484,7 +1488,7 @@ int tipc_crypto_start(struct tipc_crypto **crypto, struct net *net, } /* Allocate statistic structure */ - c->stats = alloc_percpu_gfp(struct tipc_crypto_stats, GFP_ATOMIC); + c->stats = alloc_percpu(struct tipc_crypto_stats); if (!c->stats) { if (c->wq) destroy_workqueue(c->wq); @@ -2457,7 +2461,7 @@ static void tipc_crypto_work_tx(struct work_struct *work) } /* Lets duplicate it first */ - skey = kmemdup(aead->key, tipc_aead_key_size(aead->key), GFP_ATOMIC); + skey = kmemdup(aead->key, tipc_aead_key_size(aead->key), GFP_KERNEL); rcu_read_unlock(); /* Now, generate new key, initiate & distribute it */ diff --git a/net/tipc/link.c b/net/tipc/link.c index 1b7a487c8841..09ae8448f394 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -1298,8 +1298,11 @@ static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb, return false; #ifdef CONFIG_TIPC_CRYPTO case MSG_CRYPTO: - tipc_crypto_msg_rcv(l->net, skb); - return true; + if (TIPC_SKB_CB(skb)->decrypted) { + tipc_crypto_msg_rcv(l->net, skb); + return true; + } + fallthrough; #endif default: pr_warn("Dropping received illegal msg type\n"); diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index acfba9f1ba72..6bc2879ba637 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -61,7 +61,7 @@ static DEFINE_MUTEX(tcpv6_prot_mutex); static const struct proto *saved_tcpv4_prot; static DEFINE_MUTEX(tcpv4_prot_mutex); static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG]; -static struct proto_ops tls_sw_proto_ops; +static struct proto_ops tls_proto_ops[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG]; static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG], const struct proto *base); @@ -71,6 +71,8 @@ void update_sk_prot(struct sock *sk, struct tls_context *ctx) WRITE_ONCE(sk->sk_prot, &tls_prots[ip_ver][ctx->tx_conf][ctx->rx_conf]); + WRITE_ONCE(sk->sk_socket->ops, + &tls_proto_ops[ip_ver][ctx->tx_conf][ctx->rx_conf]); } int wait_on_pending_writer(struct sock *sk, long *timeo) @@ -669,8 +671,6 @@ static int do_tls_setsockopt_conf(struct sock *sk, sockptr_t optval, if (tx) { ctx->sk_write_space = sk->sk_write_space; sk->sk_write_space = tls_write_space; - } else { - sk->sk_socket->ops = &tls_sw_proto_ops; } goto out; @@ -728,6 +728,39 @@ struct tls_context *tls_ctx_create(struct sock *sk) return ctx; } +static void build_proto_ops(struct proto_ops ops[TLS_NUM_CONFIG][TLS_NUM_CONFIG], + const struct proto_ops *base) +{ + ops[TLS_BASE][TLS_BASE] = *base; + + ops[TLS_SW ][TLS_BASE] = ops[TLS_BASE][TLS_BASE]; + ops[TLS_SW ][TLS_BASE].sendpage_locked = tls_sw_sendpage_locked; + + ops[TLS_BASE][TLS_SW ] = ops[TLS_BASE][TLS_BASE]; + ops[TLS_BASE][TLS_SW ].splice_read = tls_sw_splice_read; + + ops[TLS_SW ][TLS_SW ] = ops[TLS_SW ][TLS_BASE]; + ops[TLS_SW ][TLS_SW ].splice_read = tls_sw_splice_read; + +#ifdef CONFIG_TLS_DEVICE + ops[TLS_HW ][TLS_BASE] = ops[TLS_BASE][TLS_BASE]; + ops[TLS_HW ][TLS_BASE].sendpage_locked = NULL; + + ops[TLS_HW ][TLS_SW ] = ops[TLS_BASE][TLS_SW ]; + ops[TLS_HW ][TLS_SW ].sendpage_locked = NULL; + + ops[TLS_BASE][TLS_HW ] = ops[TLS_BASE][TLS_SW ]; + + ops[TLS_SW ][TLS_HW ] = ops[TLS_SW ][TLS_SW ]; + + ops[TLS_HW ][TLS_HW ] = ops[TLS_HW ][TLS_SW ]; + ops[TLS_HW ][TLS_HW ].sendpage_locked = NULL; +#endif +#ifdef CONFIG_TLS_TOE + ops[TLS_HW_RECORD][TLS_HW_RECORD] = *base; +#endif +} + static void tls_build_proto(struct sock *sk) { int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4; @@ -739,6 +772,8 @@ static void tls_build_proto(struct sock *sk) mutex_lock(&tcpv6_prot_mutex); if (likely(prot != saved_tcpv6_prot)) { build_protos(tls_prots[TLSV6], prot); + build_proto_ops(tls_proto_ops[TLSV6], + sk->sk_socket->ops); smp_store_release(&saved_tcpv6_prot, prot); } mutex_unlock(&tcpv6_prot_mutex); @@ -749,6 +784,8 @@ static void tls_build_proto(struct sock *sk) mutex_lock(&tcpv4_prot_mutex); if (likely(prot != saved_tcpv4_prot)) { build_protos(tls_prots[TLSV4], prot); + build_proto_ops(tls_proto_ops[TLSV4], + sk->sk_socket->ops); smp_store_release(&saved_tcpv4_prot, prot); } mutex_unlock(&tcpv4_prot_mutex); @@ -959,10 +996,6 @@ static int __init tls_register(void) if (err) return err; - tls_sw_proto_ops = inet_stream_ops; - tls_sw_proto_ops.splice_read = tls_sw_splice_read; - tls_sw_proto_ops.sendpage_locked = tls_sw_sendpage_locked; - tls_device_init(); tcp_register_ulp(&tcp_tls_ulp_ops); diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index d81564078557..dfe623a4e72f 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -521,7 +521,7 @@ static int tls_do_encryption(struct sock *sk, memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv, prot->iv_size + prot->salt_size); - xor_iv_with_seq(prot, rec->iv_data, tls_ctx->tx.rec_seq); + xor_iv_with_seq(prot, rec->iv_data + iv_offset, tls_ctx->tx.rec_seq); sge->offset += prot->prepend_size; sge->length -= prot->prepend_size; @@ -1499,7 +1499,7 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb, else memcpy(iv + iv_offset, tls_ctx->rx.iv, prot->salt_size); - xor_iv_with_seq(prot, iv, tls_ctx->rx.rec_seq); + xor_iv_with_seq(prot, iv + iv_offset, tls_ctx->rx.rec_seq); /* Prepare AAD */ tls_make_aad(aad, rxm->full_len - prot->overhead_size + @@ -2005,6 +2005,7 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, struct sock *sk = sock->sk; struct sk_buff *skb; ssize_t copied = 0; + bool from_queue; int err = 0; long timeo; int chunk; @@ -2014,25 +2015,28 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, timeo = sock_rcvtimeo(sk, flags & SPLICE_F_NONBLOCK); - skb = tls_wait_data(sk, NULL, flags & SPLICE_F_NONBLOCK, timeo, &err); - if (!skb) - goto splice_read_end; - - if (!ctx->decrypted) { - err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc, false); - - /* splice does not support reading control messages */ - if (ctx->control != TLS_RECORD_TYPE_DATA) { - err = -EINVAL; + from_queue = !skb_queue_empty(&ctx->rx_list); + if (from_queue) { + skb = __skb_dequeue(&ctx->rx_list); + } else { + skb = tls_wait_data(sk, NULL, flags & SPLICE_F_NONBLOCK, timeo, + &err); + if (!skb) goto splice_read_end; - } + err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc, false); if (err < 0) { tls_err_abort(sk, -EBADMSG); goto splice_read_end; } - ctx->decrypted = 1; } + + /* splice does not support reading control messages */ + if (ctx->control != TLS_RECORD_TYPE_DATA) { + err = -EINVAL; + goto splice_read_end; + } + rxm = strp_msg(skb); chunk = min_t(unsigned int, rxm->full_len, len); @@ -2040,7 +2044,17 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, if (copied < 0) goto splice_read_end; - tls_sw_advance_skb(sk, skb, copied); + if (!from_queue) { + ctx->recv_pkt = NULL; + __strp_unpause(&ctx->strp); + } + if (chunk < rxm->full_len) { + __skb_queue_head(&ctx->rx_list, skb); + rxm->offset += len; + rxm->full_len -= len; + } else { + consume_skb(skb); + } splice_read_end: release_sock(sk); diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 78e08e82c08c..b0bfc78e421c 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -2882,9 +2882,6 @@ static int unix_shutdown(struct socket *sock, int mode) unix_state_lock(sk); sk->sk_shutdown |= mode; - if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) && - mode == SHUTDOWN_MASK) - sk->sk_state = TCP_CLOSE; other = unix_peer(sk); if (other) sock_hold(other); diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 81232b73df8f..a27b3b5fa210 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -936,33 +936,37 @@ nl80211_packet_pattern_policy[MAX_NL80211_PKTPAT + 1] = { [NL80211_PKTPAT_OFFSET] = { .type = NLA_U32 }, }; -int nl80211_prepare_wdev_dump(struct netlink_callback *cb, - struct cfg80211_registered_device **rdev, - struct wireless_dev **wdev) +static int nl80211_prepare_wdev_dump(struct netlink_callback *cb, + struct cfg80211_registered_device **rdev, + struct wireless_dev **wdev, + struct nlattr **attrbuf) { int err; if (!cb->args[0]) { - struct nlattr **attrbuf; + struct nlattr **attrbuf_free = NULL; - attrbuf = kcalloc(NUM_NL80211_ATTR, sizeof(*attrbuf), - GFP_KERNEL); - if (!attrbuf) - return -ENOMEM; + if (!attrbuf) { + attrbuf = kcalloc(NUM_NL80211_ATTR, sizeof(*attrbuf), + GFP_KERNEL); + if (!attrbuf) + return -ENOMEM; + attrbuf_free = attrbuf; + } err = nlmsg_parse_deprecated(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, attrbuf, nl80211_fam.maxattr, nl80211_policy, NULL); if (err) { - kfree(attrbuf); + kfree(attrbuf_free); return err; } rtnl_lock(); *wdev = __cfg80211_wdev_from_attrs(NULL, sock_net(cb->skb->sk), attrbuf); - kfree(attrbuf); + kfree(attrbuf_free); if (IS_ERR(*wdev)) { rtnl_unlock(); return PTR_ERR(*wdev); @@ -6197,7 +6201,7 @@ static int nl80211_dump_station(struct sk_buff *skb, int sta_idx = cb->args[2]; int err; - err = nl80211_prepare_wdev_dump(cb, &rdev, &wdev); + err = nl80211_prepare_wdev_dump(cb, &rdev, &wdev, NULL); if (err) return err; /* nl80211_prepare_wdev_dump acquired it in the successful case */ @@ -7092,7 +7096,7 @@ static int nl80211_dump_mpath(struct sk_buff *skb, int path_idx = cb->args[2]; int err; - err = nl80211_prepare_wdev_dump(cb, &rdev, &wdev); + err = nl80211_prepare_wdev_dump(cb, &rdev, &wdev, NULL); if (err) return err; /* nl80211_prepare_wdev_dump acquired it in the successful case */ @@ -7292,7 +7296,7 @@ static int nl80211_dump_mpp(struct sk_buff *skb, int path_idx = cb->args[2]; int err; - err = nl80211_prepare_wdev_dump(cb, &rdev, &wdev); + err = nl80211_prepare_wdev_dump(cb, &rdev, &wdev, NULL); if (err) return err; /* nl80211_prepare_wdev_dump acquired it in the successful case */ @@ -9718,7 +9722,7 @@ static int nl80211_dump_scan(struct sk_buff *skb, struct netlink_callback *cb) int start = cb->args[2], idx = 0; int err; - err = nl80211_prepare_wdev_dump(cb, &rdev, &wdev); + err = nl80211_prepare_wdev_dump(cb, &rdev, &wdev, NULL); if (err) return err; /* nl80211_prepare_wdev_dump acquired it in the successful case */ @@ -9851,7 +9855,7 @@ static int nl80211_dump_survey(struct sk_buff *skb, struct netlink_callback *cb) if (!attrbuf) return -ENOMEM; - res = nl80211_prepare_wdev_dump(cb, &rdev, &wdev); + res = nl80211_prepare_wdev_dump(cb, &rdev, &wdev, attrbuf); if (res) { kfree(attrbuf); return res; diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h index a3f387770f1b..d642e3be4ee7 100644 --- a/net/wireless/nl80211.h +++ b/net/wireless/nl80211.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* * Portions of this file - * Copyright (C) 2018, 2020 Intel Corporation + * Copyright (C) 2018, 2020-2021 Intel Corporation */ #ifndef __NET_WIRELESS_NL80211_H #define __NET_WIRELESS_NL80211_H @@ -22,10 +22,6 @@ static inline u64 wdev_id(struct wireless_dev *wdev) ((u64)wiphy_to_rdev(wdev->wiphy)->wiphy_idx << 32); } -int nl80211_prepare_wdev_dump(struct netlink_callback *cb, - struct cfg80211_registered_device **rdev, - struct wireless_dev **wdev); - int nl80211_parse_chandef(struct cfg80211_registered_device *rdev, struct genl_info *info, struct cfg80211_chan_def *chandef); diff --git a/net/wireless/util.c b/net/wireless/util.c index 5ff1f8726faf..41ea65deb6e1 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -1046,6 +1046,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev, switch (otype) { case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_P2P_GO: cfg80211_stop_ap(rdev, dev, true); break; case NL80211_IFTYPE_ADHOC: diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c index 90c4e1e819d3..bc4ad48ea4f0 100644 --- a/net/xdp/xsk_buff_pool.c +++ b/net/xdp/xsk_buff_pool.c @@ -500,7 +500,7 @@ struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool) pool->free_list_cnt--; xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk, free_list_node); - list_del(&xskb->free_list_node); + list_del_init(&xskb->free_list_node); } xskb->xdp.data = xskb->xdp.data_hard_start + XDP_PACKET_HEADROOM; @@ -568,7 +568,7 @@ static u32 xp_alloc_reused(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u3 i = nb_entries; while (i--) { xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk, free_list_node); - list_del(&xskb->free_list_node); + list_del_init(&xskb->free_list_node); *xdp = &xskb->xdp; xdp++; @@ -615,6 +615,9 @@ EXPORT_SYMBOL(xp_can_alloc); void xp_free(struct xdp_buff_xsk *xskb) { + if (!list_empty(&xskb->free_list_node)) + return; + xskb->pool->free_list_cnt++; list_add(&xskb->free_list_node, &xskb->pool->free_list); } diff --git a/samples/Kconfig b/samples/Kconfig index bec3528aa2de..43d2e9aa557f 100644 --- a/samples/Kconfig +++ b/samples/Kconfig @@ -31,6 +31,15 @@ config SAMPLE_FTRACE_DIRECT This builds an ftrace direct function example that hooks to wake_up_process and prints the parameters. +config SAMPLE_FTRACE_DIRECT_MULTI + tristate "Build register_ftrace_direct_multi() example" + depends on DYNAMIC_FTRACE_WITH_DIRECT_CALLS && m + depends on HAVE_SAMPLE_FTRACE_DIRECT_MULTI + help + This builds an ftrace direct function example + that hooks to wake_up_process and schedule, and prints + the function addresses. + config SAMPLE_TRACE_ARRAY tristate "Build sample module for kernel access to Ftrace instancess" depends on EVENT_TRACING && m @@ -237,5 +246,5 @@ endif # SAMPLES config HAVE_SAMPLE_FTRACE_DIRECT bool -config HAVE_SAMPLE_FTRACE_MULTI_DIRECT +config HAVE_SAMPLE_FTRACE_DIRECT_MULTI bool diff --git a/samples/Makefile b/samples/Makefile index b7b98307c2b4..4bcd6b93bffa 100644 --- a/samples/Makefile +++ b/samples/Makefile @@ -22,7 +22,7 @@ subdir-$(CONFIG_SAMPLE_TIMER) += timers obj-$(CONFIG_SAMPLE_TRACE_EVENTS) += trace_events/ obj-$(CONFIG_SAMPLE_TRACE_PRINTK) += trace_printk/ obj-$(CONFIG_SAMPLE_FTRACE_DIRECT) += ftrace/ -obj-$(CONFIG_SAMPLE_FTRACE_MULTI_DIRECT) += ftrace/ +obj-$(CONFIG_SAMPLE_FTRACE_DIRECT_MULTI) += ftrace/ obj-$(CONFIG_SAMPLE_TRACE_ARRAY) += ftrace/ subdir-$(CONFIG_SAMPLE_UHID) += uhid obj-$(CONFIG_VIDEO_PCI_SKELETON) += v4l/ diff --git a/samples/bpf/hbm_kern.h b/samples/bpf/hbm_kern.h index 722b3fadb467..1752a46a2b05 100644 --- a/samples/bpf/hbm_kern.h +++ b/samples/bpf/hbm_kern.h @@ -9,8 +9,6 @@ * Include file for sample Host Bandwidth Manager (HBM) BPF programs */ #define KBUILD_MODNAME "foo" -#include <stddef.h> -#include <stdbool.h> #include <uapi/linux/bpf.h> #include <uapi/linux/if_ether.h> #include <uapi/linux/if_packet.h> diff --git a/samples/bpf/xdp_redirect_cpu_user.c b/samples/bpf/xdp_redirect_cpu_user.c index d84e6949007c..a81704d3317b 100644 --- a/samples/bpf/xdp_redirect_cpu_user.c +++ b/samples/bpf/xdp_redirect_cpu_user.c @@ -309,7 +309,6 @@ int main(int argc, char **argv) const char *mprog_filename = NULL, *mprog_name = NULL; struct xdp_redirect_cpu *skel; struct bpf_map_info info = {}; - char ifname_buf[IF_NAMESIZE]; struct bpf_cpumap_val value; __u32 infosz = sizeof(info); int ret = EXIT_FAIL_OPTION; @@ -390,10 +389,10 @@ int main(int argc, char **argv) case 'd': if (strlen(optarg) >= IF_NAMESIZE) { fprintf(stderr, "-d/--dev name too long\n"); + usage(argv, long_options, __doc__, mask, true, skel->obj); goto end_cpu; } - safe_strncpy(ifname_buf, optarg, strlen(ifname_buf)); - ifindex = if_nametoindex(ifname_buf); + ifindex = if_nametoindex(optarg); if (!ifindex) ifindex = strtoul(optarg, NULL, 0); if (!ifindex) { diff --git a/samples/bpf/xdp_sample_user.c b/samples/bpf/xdp_sample_user.c index b32d82178199..8740838e7767 100644 --- a/samples/bpf/xdp_sample_user.c +++ b/samples/bpf/xdp_sample_user.c @@ -120,7 +120,10 @@ struct sample_output { __u64 xmit; } totals; struct { - __u64 pps; + union { + __u64 pps; + __u64 num; + }; __u64 drop; __u64 err; } rx_cnt; @@ -1322,7 +1325,7 @@ int sample_install_xdp(struct bpf_program *xdp_prog, int ifindex, bool generic, static void sample_summary_print(void) { - double period = sample_out.rx_cnt.pps; + double num = sample_out.rx_cnt.num; if (sample_out.totals.rx) { double pkts = sample_out.totals.rx; @@ -1330,7 +1333,7 @@ static void sample_summary_print(void) print_always(" Packets received : %'-10llu\n", sample_out.totals.rx); print_always(" Average packets/s : %'-10.0f\n", - sample_round(pkts / period)); + sample_round(pkts / num)); } if (sample_out.totals.redir) { double pkts = sample_out.totals.redir; @@ -1338,7 +1341,7 @@ static void sample_summary_print(void) print_always(" Packets redirected : %'-10llu\n", sample_out.totals.redir); print_always(" Average redir/s : %'-10.0f\n", - sample_round(pkts / period)); + sample_round(pkts / num)); } if (sample_out.totals.drop) print_always(" Rx dropped : %'-10llu\n", @@ -1355,7 +1358,7 @@ static void sample_summary_print(void) print_always(" Packets transmitted : %'-10llu\n", sample_out.totals.xmit); print_always(" Average transmit/s : %'-10.0f\n", - sample_round(pkts / period)); + sample_round(pkts / num)); } } @@ -1422,7 +1425,7 @@ static int sample_stats_collect(struct stats_record *rec) return 0; } -static void sample_summary_update(struct sample_output *out, int interval) +static void sample_summary_update(struct sample_output *out) { sample_out.totals.rx += out->totals.rx; sample_out.totals.redir += out->totals.redir; @@ -1430,12 +1433,11 @@ static void sample_summary_update(struct sample_output *out, int interval) sample_out.totals.drop_xmit += out->totals.drop_xmit; sample_out.totals.err += out->totals.err; sample_out.totals.xmit += out->totals.xmit; - sample_out.rx_cnt.pps += interval; + sample_out.rx_cnt.num++; } static void sample_stats_print(int mask, struct stats_record *cur, - struct stats_record *prev, char *prog_name, - int interval) + struct stats_record *prev, char *prog_name) { struct sample_output out = {}; @@ -1452,7 +1454,7 @@ static void sample_stats_print(int mask, struct stats_record *cur, else if (mask & SAMPLE_DEVMAP_XMIT_CNT_MULTI) stats_get_devmap_xmit_multi(cur, prev, 0, &out, mask & SAMPLE_DEVMAP_XMIT_CNT); - sample_summary_update(&out, interval); + sample_summary_update(&out); stats_print(prog_name, mask, cur, prev, &out); } @@ -1495,7 +1497,7 @@ static void swap(struct stats_record **a, struct stats_record **b) } static int sample_timer_cb(int timerfd, struct stats_record **rec, - struct stats_record **prev, int interval) + struct stats_record **prev) { char line[64] = "Summary"; int ret; @@ -1524,7 +1526,7 @@ static int sample_timer_cb(int timerfd, struct stats_record **rec, snprintf(line, sizeof(line), "%s->%s", f ?: "?", t ?: "?"); } - sample_stats_print(sample_mask, *rec, *prev, line, interval); + sample_stats_print(sample_mask, *rec, *prev, line); return 0; } @@ -1579,7 +1581,7 @@ int sample_run(int interval, void (*post_cb)(void *), void *ctx) if (pfd[0].revents & POLLIN) ret = sample_signal_cb(); else if (pfd[1].revents & POLLIN) - ret = sample_timer_cb(timerfd, &rec, &prev, interval); + ret = sample_timer_cb(timerfd, &rec, &prev); if (ret) break; diff --git a/samples/ftrace/Makefile b/samples/ftrace/Makefile index e8a3f8520a44..faf8cdb79c5f 100644 --- a/samples/ftrace/Makefile +++ b/samples/ftrace/Makefile @@ -3,7 +3,8 @@ obj-$(CONFIG_SAMPLE_FTRACE_DIRECT) += ftrace-direct.o obj-$(CONFIG_SAMPLE_FTRACE_DIRECT) += ftrace-direct-too.o obj-$(CONFIG_SAMPLE_FTRACE_DIRECT) += ftrace-direct-modify.o -obj-$(CONFIG_SAMPLE_FTRACE_MULTI_DIRECT) += ftrace-direct-multi.o +obj-$(CONFIG_SAMPLE_FTRACE_DIRECT_MULTI) += ftrace-direct-multi.o +obj-$(CONFIG_SAMPLE_FTRACE_DIRECT_MULTI) += ftrace-direct-multi-modify.o CFLAGS_sample-trace-array.o := -I$(src) obj-$(CONFIG_SAMPLE_TRACE_ARRAY) += sample-trace-array.o diff --git a/samples/ftrace/ftrace-direct-multi-modify.c b/samples/ftrace/ftrace-direct-multi-modify.c new file mode 100644 index 000000000000..91bc42a7adb9 --- /dev/null +++ b/samples/ftrace/ftrace-direct-multi-modify.c @@ -0,0 +1,152 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include <linux/module.h> +#include <linux/kthread.h> +#include <linux/ftrace.h> +#include <asm/asm-offsets.h> + +void my_direct_func1(unsigned long ip) +{ + trace_printk("my direct func1 ip %lx\n", ip); +} + +void my_direct_func2(unsigned long ip) +{ + trace_printk("my direct func2 ip %lx\n", ip); +} + +extern void my_tramp1(void *); +extern void my_tramp2(void *); + +#ifdef CONFIG_X86_64 + +asm ( +" .pushsection .text, \"ax\", @progbits\n" +" .type my_tramp1, @function\n" +" .globl my_tramp1\n" +" my_tramp1:" +" pushq %rbp\n" +" movq %rsp, %rbp\n" +" pushq %rdi\n" +" movq 8(%rbp), %rdi\n" +" call my_direct_func1\n" +" popq %rdi\n" +" leave\n" +" ret\n" +" .size my_tramp1, .-my_tramp1\n" +" .type my_tramp2, @function\n" +"\n" +" .globl my_tramp2\n" +" my_tramp2:" +" pushq %rbp\n" +" movq %rsp, %rbp\n" +" pushq %rdi\n" +" movq 8(%rbp), %rdi\n" +" call my_direct_func2\n" +" popq %rdi\n" +" leave\n" +" ret\n" +" .size my_tramp2, .-my_tramp2\n" +" .popsection\n" +); + +#endif /* CONFIG_X86_64 */ + +#ifdef CONFIG_S390 + +asm ( +" .pushsection .text, \"ax\", @progbits\n" +" .type my_tramp1, @function\n" +" .globl my_tramp1\n" +" my_tramp1:" +" lgr %r1,%r15\n" +" stmg %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n" +" stg %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n" +" aghi %r15,"__stringify(-STACK_FRAME_OVERHEAD)"\n" +" stg %r1,"__stringify(__SF_BACKCHAIN)"(%r15)\n" +" lgr %r2,%r0\n" +" brasl %r14,my_direct_func1\n" +" aghi %r15,"__stringify(STACK_FRAME_OVERHEAD)"\n" +" lmg %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n" +" lg %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n" +" lgr %r1,%r0\n" +" br %r1\n" +" .size my_tramp1, .-my_tramp1\n" +"\n" +" .type my_tramp2, @function\n" +" .globl my_tramp2\n" +" my_tramp2:" +" lgr %r1,%r15\n" +" stmg %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n" +" stg %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n" +" aghi %r15,"__stringify(-STACK_FRAME_OVERHEAD)"\n" +" stg %r1,"__stringify(__SF_BACKCHAIN)"(%r15)\n" +" lgr %r2,%r0\n" +" brasl %r14,my_direct_func2\n" +" aghi %r15,"__stringify(STACK_FRAME_OVERHEAD)"\n" +" lmg %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n" +" lg %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n" +" lgr %r1,%r0\n" +" br %r1\n" +" .size my_tramp2, .-my_tramp2\n" +" .popsection\n" +); + +#endif /* CONFIG_S390 */ + +static unsigned long my_tramp = (unsigned long)my_tramp1; +static unsigned long tramps[2] = { + (unsigned long)my_tramp1, + (unsigned long)my_tramp2, +}; + +static struct ftrace_ops direct; + +static int simple_thread(void *arg) +{ + static int t; + int ret = 0; + + while (!kthread_should_stop()) { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(2 * HZ); + + if (ret) + continue; + t ^= 1; + ret = modify_ftrace_direct_multi(&direct, tramps[t]); + if (!ret) + my_tramp = tramps[t]; + WARN_ON_ONCE(ret); + } + + return 0; +} + +static struct task_struct *simple_tsk; + +static int __init ftrace_direct_multi_init(void) +{ + int ret; + + ftrace_set_filter_ip(&direct, (unsigned long) wake_up_process, 0, 0); + ftrace_set_filter_ip(&direct, (unsigned long) schedule, 0, 0); + + ret = register_ftrace_direct_multi(&direct, my_tramp); + + if (!ret) + simple_tsk = kthread_run(simple_thread, NULL, "event-sample-fn"); + return ret; +} + +static void __exit ftrace_direct_multi_exit(void) +{ + kthread_stop(simple_tsk); + unregister_ftrace_direct_multi(&direct, my_tramp); +} + +module_init(ftrace_direct_multi_init); +module_exit(ftrace_direct_multi_exit); + +MODULE_AUTHOR("Jiri Olsa"); +MODULE_DESCRIPTION("Example use case of using modify_ftrace_direct_multi()"); +MODULE_LICENSE("GPL"); diff --git a/samples/ftrace/ftrace-direct-multi.c b/samples/ftrace/ftrace-direct-multi.c index b6d7806b400e..2fafc9afcbf0 100644 --- a/samples/ftrace/ftrace-direct-multi.c +++ b/samples/ftrace/ftrace-direct-multi.c @@ -4,6 +4,7 @@ #include <linux/mm.h> /* for handle_mm_fault() */ #include <linux/ftrace.h> #include <linux/sched/stat.h> +#include <asm/asm-offsets.h> extern void my_direct_func(unsigned long ip); @@ -14,6 +15,8 @@ void my_direct_func(unsigned long ip) extern void my_tramp(void *); +#ifdef CONFIG_X86_64 + asm ( " .pushsection .text, \"ax\", @progbits\n" " .type my_tramp, @function\n" @@ -31,6 +34,33 @@ asm ( " .popsection\n" ); +#endif /* CONFIG_X86_64 */ + +#ifdef CONFIG_S390 + +asm ( +" .pushsection .text, \"ax\", @progbits\n" +" .type my_tramp, @function\n" +" .globl my_tramp\n" +" my_tramp:" +" lgr %r1,%r15\n" +" stmg %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n" +" stg %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n" +" aghi %r15,"__stringify(-STACK_FRAME_OVERHEAD)"\n" +" stg %r1,"__stringify(__SF_BACKCHAIN)"(%r15)\n" +" lgr %r2,%r0\n" +" brasl %r14,my_direct_func\n" +" aghi %r15,"__stringify(STACK_FRAME_OVERHEAD)"\n" +" lmg %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n" +" lg %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n" +" lgr %r1,%r0\n" +" br %r1\n" +" .size my_tramp, .-my_tramp\n" +" .popsection\n" +); + +#endif /* CONFIG_S390 */ + static struct ftrace_ops direct; static int __init ftrace_direct_multi_init(void) diff --git a/scripts/mod/devicetable-offsets.c b/scripts/mod/devicetable-offsets.c index cc3625617a0e..c0d3bcb99138 100644 --- a/scripts/mod/devicetable-offsets.c +++ b/scripts/mod/devicetable-offsets.c @@ -259,5 +259,8 @@ int main(void) DEVID_FIELD(dfl_device_id, type); DEVID_FIELD(dfl_device_id, feature_id); + DEVID(ishtp_device_id); + DEVID_FIELD(ishtp_device_id, guid); + return 0; } diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c index 49aba862073e..5258247d78ac 100644 --- a/scripts/mod/file2alias.c +++ b/scripts/mod/file2alias.c @@ -115,6 +115,17 @@ static inline void add_uuid(char *str, uuid_le uuid) uuid.b[12], uuid.b[13], uuid.b[14], uuid.b[15]); } +static inline void add_guid(char *str, guid_t guid) +{ + int len = strlen(str); + + sprintf(str + len, "%02X%02X%02X%02X-%02X%02X-%02X%02X-%02X%02X-%02X%02X%02X%02X%02X%02X", + guid.b[3], guid.b[2], guid.b[1], guid.b[0], + guid.b[5], guid.b[4], guid.b[7], guid.b[6], + guid.b[8], guid.b[9], guid.b[10], guid.b[11], + guid.b[12], guid.b[13], guid.b[14], guid.b[15]); +} + /** * Check that sizeof(device_id type) are consistent with size of section * in .o file. If in-consistent then userspace and kernel does not agree @@ -1380,6 +1391,18 @@ static int do_mhi_entry(const char *filename, void *symval, char *alias) return 1; } +/* Looks like: ishtp:{guid} */ +static int do_ishtp_entry(const char *filename, void *symval, char *alias) +{ + DEF_FIELD(symval, ishtp_device_id, guid); + + strcpy(alias, ISHTP_MODULE_PREFIX "{"); + add_guid(alias, guid); + strcat(alias, "}"); + + return 1; +} + static int do_auxiliary_entry(const char *filename, void *symval, char *alias) { DEF_FIELD_ADDR(symval, auxiliary_device_id, name); @@ -1499,6 +1522,7 @@ static const struct devtable devtable[] = { {"auxiliary", SIZE_auxiliary_device_id, do_auxiliary_entry}, {"ssam", SIZE_ssam_device_id, do_ssam_entry}, {"dfl", SIZE_dfl_device_id, do_dfl_entry}, + {"ishtp", SIZE_ishtp_device_id, do_ishtp_entry}, }; /* Create MODULE_ALIAS() statements. diff --git a/security/selinux/ss/hashtab.c b/security/selinux/ss/hashtab.c index 727c3b484bd3..0ae4e4e57a40 100644 --- a/security/selinux/ss/hashtab.c +++ b/security/selinux/ss/hashtab.c @@ -31,13 +31,20 @@ static u32 hashtab_compute_size(u32 nel) int hashtab_init(struct hashtab *h, u32 nel_hint) { - h->size = hashtab_compute_size(nel_hint); + u32 size = hashtab_compute_size(nel_hint); + + /* should already be zeroed, but better be safe */ h->nel = 0; - if (!h->size) - return 0; + h->size = 0; + h->htable = NULL; - h->htable = kcalloc(h->size, sizeof(*h->htable), GFP_KERNEL); - return h->htable ? 0 : -ENOMEM; + if (size) { + h->htable = kcalloc(size, sizeof(*h->htable), GFP_KERNEL); + if (!h->htable) + return -ENOMEM; + h->size = size; + } + return 0; } int __hashtab_insert(struct hashtab *h, struct hashtab_node **dst, diff --git a/sound/core/control_compat.c b/sound/core/control_compat.c index 470dabc60aa0..edff063e088d 100644 --- a/sound/core/control_compat.c +++ b/sound/core/control_compat.c @@ -264,6 +264,7 @@ static int copy_ctl_value_to_user(void __user *userdata, struct snd_ctl_elem_value *data, int type, int count) { + struct snd_ctl_elem_value32 __user *data32 = userdata; int i, size; if (type == SNDRV_CTL_ELEM_TYPE_BOOLEAN || @@ -280,6 +281,8 @@ static int copy_ctl_value_to_user(void __user *userdata, if (copy_to_user(valuep, data->value.bytes.data, size)) return -EFAULT; } + if (copy_to_user(&data32->id, &data->id, sizeof(data32->id))) + return -EFAULT; return 0; } diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c index 82a818734a5f..20a0a4771b9a 100644 --- a/sound/core/oss/pcm_oss.c +++ b/sound/core/oss/pcm_oss.c @@ -147,7 +147,7 @@ snd_pcm_hw_param_value_min(const struct snd_pcm_hw_params *params, * * Return the maximum value for field PAR. */ -static unsigned int +static int snd_pcm_hw_param_value_max(const struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var, int *dir) { @@ -682,18 +682,24 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *oss_params, struct snd_pcm_hw_params *slave_params) { - size_t s; - size_t oss_buffer_size, oss_period_size, oss_periods; - size_t min_period_size, max_period_size; + ssize_t s; + ssize_t oss_buffer_size; + ssize_t oss_period_size, oss_periods; + ssize_t min_period_size, max_period_size; struct snd_pcm_runtime *runtime = substream->runtime; size_t oss_frame_size; oss_frame_size = snd_pcm_format_physical_width(params_format(oss_params)) * params_channels(oss_params) / 8; + oss_buffer_size = snd_pcm_hw_param_value_max(slave_params, + SNDRV_PCM_HW_PARAM_BUFFER_SIZE, + NULL); + if (oss_buffer_size <= 0) + return -EINVAL; oss_buffer_size = snd_pcm_plug_client_size(substream, - snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, NULL)) * oss_frame_size; - if (!oss_buffer_size) + oss_buffer_size * oss_frame_size); + if (oss_buffer_size <= 0) return -EINVAL; oss_buffer_size = rounddown_pow_of_two(oss_buffer_size); if (atomic_read(&substream->mmap_count)) { @@ -730,7 +736,7 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream, min_period_size = snd_pcm_plug_client_size(substream, snd_pcm_hw_param_value_min(slave_params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, NULL)); - if (min_period_size) { + if (min_period_size > 0) { min_period_size *= oss_frame_size; min_period_size = roundup_pow_of_two(min_period_size); if (oss_period_size < min_period_size) @@ -739,7 +745,7 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream, max_period_size = snd_pcm_plug_client_size(substream, snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, NULL)); - if (max_period_size) { + if (max_period_size > 0) { max_period_size *= oss_frame_size; max_period_size = rounddown_pow_of_two(max_period_size); if (oss_period_size > max_period_size) @@ -752,7 +758,7 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream, oss_periods = substream->oss.setup.periods; s = snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_PERIODS, NULL); - if (runtime->oss.maxfrags && s > runtime->oss.maxfrags) + if (s > 0 && runtime->oss.maxfrags && s > runtime->oss.maxfrags) s = runtime->oss.maxfrags; if (oss_periods > s) oss_periods = s; @@ -878,8 +884,15 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream) err = -EINVAL; goto failure; } - choose_rate(substream, sparams, runtime->oss.rate); - snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_CHANNELS, runtime->oss.channels, NULL); + + err = choose_rate(substream, sparams, runtime->oss.rate); + if (err < 0) + goto failure; + err = snd_pcm_hw_param_near(substream, sparams, + SNDRV_PCM_HW_PARAM_CHANNELS, + runtime->oss.channels, NULL); + if (err < 0) + goto failure; format = snd_pcm_oss_format_from(runtime->oss.format); @@ -1956,7 +1969,7 @@ static int snd_pcm_oss_set_fragment1(struct snd_pcm_substream *substream, unsign if (runtime->oss.subdivision || runtime->oss.fragshift) return -EINVAL; fragshift = val & 0xffff; - if (fragshift >= 31) + if (fragshift >= 25) /* should be large enough */ return -EINVAL; runtime->oss.fragshift = fragshift; runtime->oss.maxfrags = (val >> 16) & 0xffff; diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c index b9ac9e9e45a4..4208fa8a4db5 100644 --- a/sound/hda/intel-dsp-config.c +++ b/sound/hda/intel-dsp-config.c @@ -252,6 +252,11 @@ static const struct config_entry config_table[] = { .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE, .device = 0x02c8, }, + { + .flags = FLAG_SOF, + .device = 0x02c8, + .codec_hid = "ESSX8336", + }, /* Cometlake-H */ { .flags = FLAG_SOF, @@ -276,6 +281,11 @@ static const struct config_entry config_table[] = { .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE, .device = 0x06c8, }, + { + .flags = FLAG_SOF, + .device = 0x06c8, + .codec_hid = "ESSX8336", + }, #endif /* Icelake */ @@ -299,6 +309,15 @@ static const struct config_entry config_table[] = { }, #endif +/* JasperLake */ +#if IS_ENABLED(CONFIG_SND_SOC_SOF_JASPERLAKE) + { + .flags = FLAG_SOF, + .device = 0x4dc8, + .codec_hid = "ESSX8336", + }, +#endif + /* Tigerlake */ #if IS_ENABLED(CONFIG_SND_SOC_SOF_TIGERLAKE) { diff --git a/sound/pci/cmipci.c b/sound/pci/cmipci.c index ea20236f35db..9a678b5cf285 100644 --- a/sound/pci/cmipci.c +++ b/sound/pci/cmipci.c @@ -3218,7 +3218,6 @@ static int snd_cmipci_probe(struct pci_dev *pci, { static int dev; struct snd_card *card; - struct cmipci *cm; int err; if (dev >= SNDRV_CARDS) @@ -3229,10 +3228,9 @@ static int snd_cmipci_probe(struct pci_dev *pci, } err = snd_devm_card_new(&pci->dev, index[dev], id[dev], THIS_MODULE, - sizeof(*cm), &card); + sizeof(struct cmipci), &card); if (err < 0) return err; - cm = card->private_data; switch (pci->device) { case PCI_DEVICE_ID_CMEDIA_CM8738: diff --git a/sound/pci/ctxfi/ctamixer.c b/sound/pci/ctxfi/ctamixer.c index da6e6350ceaf..d074727c3e21 100644 --- a/sound/pci/ctxfi/ctamixer.c +++ b/sound/pci/ctxfi/ctamixer.c @@ -23,16 +23,15 @@ #define BLANK_SLOT 4094 -static int amixer_master(struct rsc *rsc) +static void amixer_master(struct rsc *rsc) { rsc->conj = 0; - return rsc->idx = container_of(rsc, struct amixer, rsc)->idx[0]; + rsc->idx = container_of(rsc, struct amixer, rsc)->idx[0]; } -static int amixer_next_conj(struct rsc *rsc) +static void amixer_next_conj(struct rsc *rsc) { rsc->conj++; - return container_of(rsc, struct amixer, rsc)->idx[rsc->conj]; } static int amixer_index(const struct rsc *rsc) @@ -331,16 +330,15 @@ int amixer_mgr_destroy(struct amixer_mgr *amixer_mgr) /* SUM resource management */ -static int sum_master(struct rsc *rsc) +static void sum_master(struct rsc *rsc) { rsc->conj = 0; - return rsc->idx = container_of(rsc, struct sum, rsc)->idx[0]; + rsc->idx = container_of(rsc, struct sum, rsc)->idx[0]; } -static int sum_next_conj(struct rsc *rsc) +static void sum_next_conj(struct rsc *rsc) { rsc->conj++; - return container_of(rsc, struct sum, rsc)->idx[rsc->conj]; } static int sum_index(const struct rsc *rsc) diff --git a/sound/pci/ctxfi/ctdaio.c b/sound/pci/ctxfi/ctdaio.c index f589da045342..7fc720046ce2 100644 --- a/sound/pci/ctxfi/ctdaio.c +++ b/sound/pci/ctxfi/ctdaio.c @@ -51,12 +51,12 @@ static const struct daio_rsc_idx idx_20k2[NUM_DAIOTYP] = { [SPDIFIO] = {.left = 0x05, .right = 0x85}, }; -static int daio_master(struct rsc *rsc) +static void daio_master(struct rsc *rsc) { /* Actually, this is not the resource index of DAIO. * For DAO, it is the input mapper index. And, for DAI, * it is the output time-slot index. */ - return rsc->conj = rsc->idx; + rsc->conj = rsc->idx; } static int daio_index(const struct rsc *rsc) @@ -64,19 +64,19 @@ static int daio_index(const struct rsc *rsc) return rsc->conj; } -static int daio_out_next_conj(struct rsc *rsc) +static void daio_out_next_conj(struct rsc *rsc) { - return rsc->conj += 2; + rsc->conj += 2; } -static int daio_in_next_conj_20k1(struct rsc *rsc) +static void daio_in_next_conj_20k1(struct rsc *rsc) { - return rsc->conj += 0x200; + rsc->conj += 0x200; } -static int daio_in_next_conj_20k2(struct rsc *rsc) +static void daio_in_next_conj_20k2(struct rsc *rsc) { - return rsc->conj += 0x100; + rsc->conj += 0x100; } static const struct rsc_ops daio_out_rsc_ops = { diff --git a/sound/pci/ctxfi/ctresource.c b/sound/pci/ctxfi/ctresource.c index 81ad26934518..be1d3e61309c 100644 --- a/sound/pci/ctxfi/ctresource.c +++ b/sound/pci/ctxfi/ctresource.c @@ -109,18 +109,17 @@ static int audio_ring_slot(const struct rsc *rsc) return (rsc->conj << 4) + offset_in_audio_slot_block[rsc->type]; } -static int rsc_next_conj(struct rsc *rsc) +static void rsc_next_conj(struct rsc *rsc) { unsigned int i; for (i = 0; (i < 8) && (!(rsc->msr & (0x1 << i))); ) i++; rsc->conj += (AUDIO_SLOT_BLOCK_NUM >> i); - return rsc->conj; } -static int rsc_master(struct rsc *rsc) +static void rsc_master(struct rsc *rsc) { - return rsc->conj = rsc->idx; + rsc->conj = rsc->idx; } static const struct rsc_ops rsc_generic_ops = { diff --git a/sound/pci/ctxfi/ctresource.h b/sound/pci/ctxfi/ctresource.h index fdbfd808816d..58553bda44f4 100644 --- a/sound/pci/ctxfi/ctresource.h +++ b/sound/pci/ctxfi/ctresource.h @@ -39,8 +39,8 @@ struct rsc { }; struct rsc_ops { - int (*master)(struct rsc *rsc); /* Move to master resource */ - int (*next_conj)(struct rsc *rsc); /* Move to next conjugate resource */ + void (*master)(struct rsc *rsc); /* Move to master resource */ + void (*next_conj)(struct rsc *rsc); /* Move to next conjugate resource */ int (*index)(const struct rsc *rsc); /* Return the index of resource */ /* Return the output slot number */ int (*output_slot)(const struct rsc *rsc); diff --git a/sound/pci/ctxfi/ctsrc.c b/sound/pci/ctxfi/ctsrc.c index bd4697b44233..4a94b4708a77 100644 --- a/sound/pci/ctxfi/ctsrc.c +++ b/sound/pci/ctxfi/ctsrc.c @@ -590,16 +590,15 @@ int src_mgr_destroy(struct src_mgr *src_mgr) /* SRCIMP resource manager operations */ -static int srcimp_master(struct rsc *rsc) +static void srcimp_master(struct rsc *rsc) { rsc->conj = 0; - return rsc->idx = container_of(rsc, struct srcimp, rsc)->idx[0]; + rsc->idx = container_of(rsc, struct srcimp, rsc)->idx[0]; } -static int srcimp_next_conj(struct rsc *rsc) +static void srcimp_next_conj(struct rsc *rsc) { rsc->conj++; - return container_of(rsc, struct srcimp, rsc)->idx[rsc->conj]; } static int srcimp_index(const struct rsc *rsc) diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index fe51163f2d82..1b46b599a5cf 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -335,7 +335,10 @@ enum { ((pci)->device == 0x0c0c) || \ ((pci)->device == 0x0d0c) || \ ((pci)->device == 0x160c) || \ - ((pci)->device == 0x490d)) + ((pci)->device == 0x490d) || \ + ((pci)->device == 0x4f90) || \ + ((pci)->device == 0x4f91) || \ + ((pci)->device == 0x4f92)) #define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98) @@ -2473,6 +2476,13 @@ static const struct pci_device_id azx_ids[] = { /* DG1 */ { PCI_DEVICE(0x8086, 0x490d), .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, + /* DG2 */ + { PCI_DEVICE(0x8086, 0x4f90), + .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, + { PCI_DEVICE(0x8086, 0x4f91), + .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, + { PCI_DEVICE(0x8086, 0x4f92), + .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, /* Alderlake-S */ { PCI_DEVICE(0x8086, 0x7ad0), .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h index ea8ab8b43337..d22c96eb2f8f 100644 --- a/sound/pci/hda/hda_local.h +++ b/sound/pci/hda/hda_local.h @@ -438,6 +438,15 @@ int snd_hda_codec_set_pin_target(struct hda_codec *codec, hda_nid_t nid, #define for_each_hda_codec_node(nid, codec) \ for ((nid) = (codec)->core.start_nid; (nid) < (codec)->core.end_nid; (nid)++) +/* Set the codec power_state flag to indicate to allow unsol event handling; + * see hda_codec_unsol_event() in hda_bind.c. Calling this might confuse the + * state tracking, so use with care. + */ +static inline void snd_hda_codec_allow_unsol_events(struct hda_codec *codec) +{ + codec->core.dev.power.power_state = PMSG_ON; +} + /* * get widget capabilities */ diff --git a/sound/pci/hda/patch_cs8409.c b/sound/pci/hda/patch_cs8409.c index 31ff11ab868e..039b9f2f8e94 100644 --- a/sound/pci/hda/patch_cs8409.c +++ b/sound/pci/hda/patch_cs8409.c @@ -750,6 +750,11 @@ static void cs42l42_resume(struct sub_codec *cs42l42) if (cs42l42->full_scale_vol) cs8409_i2c_write(cs42l42, 0x2001, 0x01); + /* we have to explicitly allow unsol event handling even during the + * resume phase so that the jack event is processed properly + */ + snd_hda_codec_allow_unsol_events(cs42l42->codec); + cs42l42_enable_jack_detect(cs42l42); } diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 65d2c5539919..415701bd10ac 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -4380,10 +4380,11 @@ HDA_CODEC_ENTRY(0x8086280f, "Icelake HDMI", patch_i915_icl_hdmi), HDA_CODEC_ENTRY(0x80862812, "Tigerlake HDMI", patch_i915_tgl_hdmi), HDA_CODEC_ENTRY(0x80862814, "DG1 HDMI", patch_i915_tgl_hdmi), HDA_CODEC_ENTRY(0x80862815, "Alderlake HDMI", patch_i915_tgl_hdmi), -HDA_CODEC_ENTRY(0x8086281c, "Alderlake-P HDMI", patch_i915_tgl_hdmi), HDA_CODEC_ENTRY(0x80862816, "Rocketlake HDMI", patch_i915_tgl_hdmi), +HDA_CODEC_ENTRY(0x80862819, "DG2 HDMI", patch_i915_tgl_hdmi), HDA_CODEC_ENTRY(0x8086281a, "Jasperlake HDMI", patch_i915_icl_hdmi), HDA_CODEC_ENTRY(0x8086281b, "Elkhartlake HDMI", patch_i915_icl_hdmi), +HDA_CODEC_ENTRY(0x8086281c, "Alderlake-P HDMI", patch_i915_tgl_hdmi), HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi), HDA_CODEC_ENTRY(0x80862882, "Valleyview2 HDMI", patch_i915_byt_hdmi), HDA_CODEC_ENTRY(0x80862883, "Braswell HDMI", patch_i915_byt_hdmi), diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 2f1727faec69..3599f4c85ebf 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -6503,22 +6503,47 @@ static void alc287_fixup_legion_15imhg05_speakers(struct hda_codec *codec, /* for alc285_fixup_ideapad_s740_coef() */ #include "ideapad_s740_helper.c" -static void alc256_fixup_tongfang_reset_persistent_settings(struct hda_codec *codec, - const struct hda_fixup *fix, - int action) +static const struct coef_fw alc256_fixup_set_coef_defaults_coefs[] = { + WRITE_COEF(0x10, 0x0020), WRITE_COEF(0x24, 0x0000), + WRITE_COEF(0x26, 0x0000), WRITE_COEF(0x29, 0x3000), + WRITE_COEF(0x37, 0xfe05), WRITE_COEF(0x45, 0x5089), + {} +}; + +static void alc256_fixup_set_coef_defaults(struct hda_codec *codec, + const struct hda_fixup *fix, + int action) { /* - * A certain other OS sets these coeffs to different values. On at least one TongFang - * barebone these settings might survive even a cold reboot. So to restore a clean slate the - * values are explicitly reset to default here. Without this, the external microphone is - * always in a plugged-in state, while the internal microphone is always in an unplugged - * state, breaking the ability to use the internal microphone. - */ - alc_write_coef_idx(codec, 0x24, 0x0000); - alc_write_coef_idx(codec, 0x26, 0x0000); - alc_write_coef_idx(codec, 0x29, 0x3000); - alc_write_coef_idx(codec, 0x37, 0xfe05); - alc_write_coef_idx(codec, 0x45, 0x5089); + * A certain other OS sets these coeffs to different values. On at least + * one TongFang barebone these settings might survive even a cold + * reboot. So to restore a clean slate the values are explicitly reset + * to default here. Without this, the external microphone is always in a + * plugged-in state, while the internal microphone is always in an + * unplugged state, breaking the ability to use the internal microphone. + */ + alc_process_coef_fw(codec, alc256_fixup_set_coef_defaults_coefs); +} + +static const struct coef_fw alc233_fixup_no_audio_jack_coefs[] = { + WRITE_COEF(0x1a, 0x9003), WRITE_COEF(0x1b, 0x0e2b), WRITE_COEF(0x37, 0xfe06), + WRITE_COEF(0x38, 0x4981), WRITE_COEF(0x45, 0xd489), WRITE_COEF(0x46, 0x0074), + WRITE_COEF(0x49, 0x0149), + {} +}; + +static void alc233_fixup_no_audio_jack(struct hda_codec *codec, + const struct hda_fixup *fix, + int action) +{ + /* + * The audio jack input and output is not detected on the ASRock NUC Box + * 1100 series when cold booting without this fix. Warm rebooting from a + * certain other OS makes the audio functional, as COEF settings are + * preserved in this case. This fix sets these altered COEF values as + * the default. + */ + alc_process_coef_fw(codec, alc233_fixup_no_audio_jack_coefs); } enum { @@ -6738,8 +6763,9 @@ enum { ALC287_FIXUP_LEGION_15IMHG05_AUTOMUTE, ALC287_FIXUP_YOGA7_14ITL_SPEAKERS, ALC287_FIXUP_13S_GEN2_SPEAKERS, - ALC256_FIXUP_TONGFANG_RESET_PERSISTENT_SETTINGS, + ALC256_FIXUP_SET_COEF_DEFAULTS, ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE, + ALC233_FIXUP_NO_AUDIO_JACK, }; static const struct hda_fixup alc269_fixups[] = { @@ -8443,9 +8469,9 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MODE, }, - [ALC256_FIXUP_TONGFANG_RESET_PERSISTENT_SETTINGS] = { + [ALC256_FIXUP_SET_COEF_DEFAULTS] = { .type = HDA_FIXUP_FUNC, - .v.func = alc256_fixup_tongfang_reset_persistent_settings, + .v.func = alc256_fixup_set_coef_defaults, }, [ALC245_FIXUP_HP_GPIO_LED] = { .type = HDA_FIXUP_FUNC, @@ -8460,6 +8486,10 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC, }, + [ALC233_FIXUP_NO_AUDIO_JACK] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc233_fixup_no_audio_jack, + }, }; static const struct snd_pci_quirk alc269_fixup_tbl[] = { @@ -8639,6 +8669,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x8728, "HP EliteBook 840 G7", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8730, "HP ProBook 445 G7", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), + SND_PCI_QUIRK(0x103c, 0x8735, "HP ProBook 435 G7", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_AMP_INIT), SND_PCI_QUIRK(0x103c, 0x8760, "HP", ALC285_FIXUP_HP_MUTE_LED), SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED), @@ -8894,6 +8925,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x511e, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), SND_PCI_QUIRK(0x17aa, 0x511f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD), + SND_PCI_QUIRK(0x1849, 0x1233, "ASRock NUC Box 1100", ALC233_FIXUP_NO_AUDIO_JACK), SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MACH-WX9", ALC256_FIXUP_HUAWEI_MACH_WX9_PINS), SND_PCI_QUIRK(0x1b35, 0x1235, "CZC B20", ALC269_FIXUP_CZC_B20), SND_PCI_QUIRK(0x1b35, 0x1236, "CZC TMI", ALC269_FIXUP_CZC_TMI), @@ -8901,7 +8933,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */ SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802), SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X), - SND_PCI_QUIRK(0x1d05, 0x1132, "TongFang PHxTxX1", ALC256_FIXUP_TONGFANG_RESET_PERSISTENT_SETTINGS), + SND_PCI_QUIRK(0x1d05, 0x1132, "TongFang PHxTxX1", ALC256_FIXUP_SET_COEF_DEFAULTS), SND_PCI_QUIRK(0x1d72, 0x1602, "RedmiBook", ALC255_FIXUP_XIAOMI_HEADSET_MIC), SND_PCI_QUIRK(0x1d72, 0x1701, "XiaomiNotebook Pro", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC), @@ -10203,6 +10235,27 @@ static void alc671_fixup_hp_headset_mic2(struct hda_codec *codec, } } +static void alc897_hp_automute_hook(struct hda_codec *codec, + struct hda_jack_callback *jack) +{ + struct alc_spec *spec = codec->spec; + int vref; + + snd_hda_gen_hp_automute(codec, jack); + vref = spec->gen.hp_jack_present ? (PIN_HP | AC_PINCTL_VREF_100) : PIN_HP; + snd_hda_codec_write(codec, 0x1b, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, + vref); +} + +static void alc897_fixup_lenovo_headset_mic(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + struct alc_spec *spec = codec->spec; + if (action == HDA_FIXUP_ACT_PRE_PROBE) { + spec->gen.hp_automute_hook = alc897_hp_automute_hook; + } +} + static const struct coef_fw alc668_coefs[] = { WRITE_COEF(0x01, 0xbebe), WRITE_COEF(0x02, 0xaaaa), WRITE_COEF(0x03, 0x0), WRITE_COEF(0x04, 0x0180), WRITE_COEF(0x06, 0x0), WRITE_COEF(0x07, 0x0f80), @@ -10283,6 +10336,8 @@ enum { ALC668_FIXUP_ASUS_NO_HEADSET_MIC, ALC668_FIXUP_HEADSET_MIC, ALC668_FIXUP_MIC_DET_COEF, + ALC897_FIXUP_LENOVO_HEADSET_MIC, + ALC897_FIXUP_HEADSET_MIC_PIN, }; static const struct hda_fixup alc662_fixups[] = { @@ -10689,6 +10744,19 @@ static const struct hda_fixup alc662_fixups[] = { {} }, }, + [ALC897_FIXUP_LENOVO_HEADSET_MIC] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc897_fixup_lenovo_headset_mic, + }, + [ALC897_FIXUP_HEADSET_MIC_PIN] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x1a, 0x03a11050 }, + { } + }, + .chained = true, + .chain_id = ALC897_FIXUP_LENOVO_HEADSET_MIC + }, }; static const struct snd_pci_quirk alc662_fixup_tbl[] = { @@ -10733,6 +10801,10 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = { SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD), SND_PCI_QUIRK(0x14cd, 0x5003, "USI", ALC662_FIXUP_USI_HEADSET_MODE), SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC662_FIXUP_LENOVO_MULTI_CODECS), + SND_PCI_QUIRK(0x17aa, 0x32ca, "Lenovo ThinkCentre M80", ALC897_FIXUP_HEADSET_MIC_PIN), + SND_PCI_QUIRK(0x17aa, 0x32cb, "Lenovo ThinkCentre M70", ALC897_FIXUP_HEADSET_MIC_PIN), + SND_PCI_QUIRK(0x17aa, 0x32cf, "Lenovo ThinkCentre M950", ALC897_FIXUP_HEADSET_MIC_PIN), + SND_PCI_QUIRK(0x17aa, 0x32f7, "Lenovo ThinkCentre M90", ALC897_FIXUP_HEADSET_MIC_PIN), SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD), SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD), SND_PCI_QUIRK(0x1849, 0x5892, "ASRock B150M", ALC892_FIXUP_ASROCK_MOBO), diff --git a/sound/soc/amd/yc/pci-acp6x.c b/sound/soc/amd/yc/pci-acp6x.c index 957eeb6fb8e3..7e9a9a9d8ddd 100644 --- a/sound/soc/amd/yc/pci-acp6x.c +++ b/sound/soc/amd/yc/pci-acp6x.c @@ -146,10 +146,11 @@ static int snd_acp6x_probe(struct pci_dev *pci, { struct acp6x_dev_data *adata; struct platform_device_info pdevinfo[ACP6x_DEVS]; - int ret, index; + int index = 0; int val = 0x00; u32 addr; unsigned int irqflags; + int ret; irqflags = IRQF_SHARED; /* Yellow Carp device check */ diff --git a/sound/soc/codecs/cs35l41-spi.c b/sound/soc/codecs/cs35l41-spi.c index 90a921f726c3..3fa99741779a 100644 --- a/sound/soc/codecs/cs35l41-spi.c +++ b/sound/soc/codecs/cs35l41-spi.c @@ -42,34 +42,6 @@ static const struct spi_device_id cs35l41_id_spi[] = { MODULE_DEVICE_TABLE(spi, cs35l41_id_spi); -static void cs35l41_spi_otp_setup(struct cs35l41_private *cs35l41, - bool is_pre_setup, unsigned int *freq) -{ - struct spi_device *spi; - u32 orig_spi_freq; - - spi = to_spi_device(cs35l41->dev); - - if (!spi) { - dev_err(cs35l41->dev, "%s: No SPI device\n", __func__); - return; - } - - if (is_pre_setup) { - orig_spi_freq = spi->max_speed_hz; - if (orig_spi_freq > CS35L41_SPI_MAX_FREQ_OTP) { - spi->max_speed_hz = CS35L41_SPI_MAX_FREQ_OTP; - spi_setup(spi); - } - *freq = orig_spi_freq; - } else { - if (spi->max_speed_hz != *freq) { - spi->max_speed_hz = *freq; - spi_setup(spi); - } - } -} - static int cs35l41_spi_probe(struct spi_device *spi) { const struct regmap_config *regmap_config = &cs35l41_regmap_spi; @@ -81,6 +53,9 @@ static int cs35l41_spi_probe(struct spi_device *spi) if (!cs35l41) return -ENOMEM; + spi->max_speed_hz = CS35L41_SPI_MAX_FREQ; + spi_setup(spi); + spi_set_drvdata(spi, cs35l41); cs35l41->regmap = devm_regmap_init_spi(spi, regmap_config); if (IS_ERR(cs35l41->regmap)) { @@ -91,7 +66,6 @@ static int cs35l41_spi_probe(struct spi_device *spi) cs35l41->dev = &spi->dev; cs35l41->irq = spi->irq; - cs35l41->otp_setup = cs35l41_spi_otp_setup; return cs35l41_probe(cs35l41, pdata); } diff --git a/sound/soc/codecs/cs35l41.c b/sound/soc/codecs/cs35l41.c index 94ed21d7676f..9c4d481f7614 100644 --- a/sound/soc/codecs/cs35l41.c +++ b/sound/soc/codecs/cs35l41.c @@ -302,7 +302,6 @@ static int cs35l41_otp_unpack(void *data) const struct cs35l41_otp_packed_element_t *otp_map; struct cs35l41_private *cs35l41 = data; int bit_offset, word_offset, ret, i; - unsigned int orig_spi_freq; unsigned int bit_sum = 8; u32 otp_val, otp_id_reg; u32 *otp_mem; @@ -326,9 +325,6 @@ static int cs35l41_otp_unpack(void *data) goto err_otp_unpack; } - if (cs35l41->otp_setup) - cs35l41->otp_setup(cs35l41, true, &orig_spi_freq); - ret = regmap_bulk_read(cs35l41->regmap, CS35L41_OTP_MEM0, otp_mem, CS35L41_OTP_SIZE_WORDS); if (ret < 0) { @@ -336,9 +332,6 @@ static int cs35l41_otp_unpack(void *data) goto err_otp_unpack; } - if (cs35l41->otp_setup) - cs35l41->otp_setup(cs35l41, false, &orig_spi_freq); - otp_map = otp_map_match->map; bit_offset = otp_map_match->bit_offset; @@ -612,6 +605,12 @@ static const struct snd_soc_dapm_widget cs35l41_dapm_widgets[] = { SND_SOC_DAPM_AIF_OUT("ASPTX3", NULL, 0, CS35L41_SP_ENABLES, 2, 0), SND_SOC_DAPM_AIF_OUT("ASPTX4", NULL, 0, CS35L41_SP_ENABLES, 3, 0), + SND_SOC_DAPM_SIGGEN("VSENSE"), + SND_SOC_DAPM_SIGGEN("ISENSE"), + SND_SOC_DAPM_SIGGEN("VP"), + SND_SOC_DAPM_SIGGEN("VBST"), + SND_SOC_DAPM_SIGGEN("TEMP"), + SND_SOC_DAPM_ADC("VMON ADC", NULL, CS35L41_PWR_CTRL2, 12, 0), SND_SOC_DAPM_ADC("IMON ADC", NULL, CS35L41_PWR_CTRL2, 13, 0), SND_SOC_DAPM_ADC("VPMON ADC", NULL, CS35L41_PWR_CTRL2, 8, 0), @@ -623,12 +622,6 @@ static const struct snd_soc_dapm_widget cs35l41_dapm_widgets[] = { cs35l41_main_amp_event, SND_SOC_DAPM_POST_PMD | SND_SOC_DAPM_POST_PMU), - SND_SOC_DAPM_INPUT("VP"), - SND_SOC_DAPM_INPUT("VBST"), - SND_SOC_DAPM_INPUT("ISENSE"), - SND_SOC_DAPM_INPUT("VSENSE"), - SND_SOC_DAPM_INPUT("TEMP"), - SND_SOC_DAPM_MUX("ASP TX1 Source", SND_SOC_NOPM, 0, 0, &asp_tx1_mux), SND_SOC_DAPM_MUX("ASP TX2 Source", SND_SOC_NOPM, 0, 0, &asp_tx2_mux), SND_SOC_DAPM_MUX("ASP TX3 Source", SND_SOC_NOPM, 0, 0, &asp_tx3_mux), @@ -674,8 +667,8 @@ static const struct snd_soc_dapm_route cs35l41_audio_map[] = { {"VMON ADC", NULL, "VSENSE"}, {"IMON ADC", NULL, "ISENSE"}, {"VPMON ADC", NULL, "VP"}, - {"TEMPMON ADC", NULL, "TEMP"}, {"VBSTMON ADC", NULL, "VBST"}, + {"TEMPMON ADC", NULL, "TEMP"}, {"ASPRX1", NULL, "AMP Playback"}, {"ASPRX2", NULL, "AMP Playback"}, diff --git a/sound/soc/codecs/cs35l41.h b/sound/soc/codecs/cs35l41.h index 6cffe8a55beb..48485b08a6f1 100644 --- a/sound/soc/codecs/cs35l41.h +++ b/sound/soc/codecs/cs35l41.h @@ -726,7 +726,7 @@ #define CS35L41_FS2_WINDOW_MASK 0x00FFF800 #define CS35L41_FS2_WINDOW_SHIFT 12 -#define CS35L41_SPI_MAX_FREQ_OTP 4000000 +#define CS35L41_SPI_MAX_FREQ 4000000 #define CS35L41_RX_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE) #define CS35L41_TX_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE) @@ -764,8 +764,6 @@ struct cs35l41_private { int irq; /* GPIO for /RST */ struct gpio_desc *reset_gpio; - void (*otp_setup)(struct cs35l41_private *cs35l41, bool is_pre_setup, - unsigned int *freq); }; int cs35l41_probe(struct cs35l41_private *cs35l41, diff --git a/sound/soc/codecs/lpass-rx-macro.c b/sound/soc/codecs/lpass-rx-macro.c index 2bed5cf229be..aec5127260fd 100644 --- a/sound/soc/codecs/lpass-rx-macro.c +++ b/sound/soc/codecs/lpass-rx-macro.c @@ -2188,7 +2188,7 @@ static int rx_macro_config_classh(struct snd_soc_component *component, snd_soc_component_update_bits(component, CDC_RX_CLSH_DECAY_CTRL, CDC_RX_CLSH_DECAY_RATE_MASK, 0x0); - snd_soc_component_update_bits(component, + snd_soc_component_write_field(component, CDC_RX_RX1_RX_PATH_CFG0, CDC_RX_RXn_CLSH_EN_MASK, 0x1); break; diff --git a/sound/soc/codecs/rk817_codec.c b/sound/soc/codecs/rk817_codec.c index 943d7d933e81..03f24edfe4f6 100644 --- a/sound/soc/codecs/rk817_codec.c +++ b/sound/soc/codecs/rk817_codec.c @@ -539,3 +539,4 @@ module_platform_driver(rk817_codec_driver); MODULE_DESCRIPTION("ASoC RK817 codec driver"); MODULE_AUTHOR("binyuan <kevan.lan@rock-chips.com>"); MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:rk817-codec"); diff --git a/sound/soc/codecs/rt1011.c b/sound/soc/codecs/rt1011.c index 297af7ff824c..b62301a6281f 100644 --- a/sound/soc/codecs/rt1011.c +++ b/sound/soc/codecs/rt1011.c @@ -1311,13 +1311,54 @@ static int rt1011_r0_load_info(struct snd_kcontrol *kcontrol, .put = rt1011_r0_load_mode_put \ } -static const char * const rt1011_i2s_ref_texts[] = { - "Left Channel", "Right Channel" +static const char * const rt1011_i2s_ref[] = { + "None", "Left Channel", "Right Channel" }; -static SOC_ENUM_SINGLE_DECL(rt1011_i2s_ref_enum, - RT1011_TDM1_SET_1, 7, - rt1011_i2s_ref_texts); +static SOC_ENUM_SINGLE_DECL(rt1011_i2s_ref_enum, 0, 0, + rt1011_i2s_ref); + +static int rt1011_i2s_ref_put(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *component = + snd_soc_kcontrol_component(kcontrol); + struct rt1011_priv *rt1011 = + snd_soc_component_get_drvdata(component); + + rt1011->i2s_ref = ucontrol->value.enumerated.item[0]; + switch (rt1011->i2s_ref) { + case RT1011_I2S_REF_LEFT_CH: + regmap_write(rt1011->regmap, RT1011_TDM_TOTAL_SET, 0x0240); + regmap_write(rt1011->regmap, RT1011_TDM1_SET_2, 0x8); + regmap_write(rt1011->regmap, RT1011_TDM1_SET_1, 0x1022); + regmap_write(rt1011->regmap, RT1011_ADCDAT_OUT_SOURCE, 0x4); + break; + case RT1011_I2S_REF_RIGHT_CH: + regmap_write(rt1011->regmap, RT1011_TDM_TOTAL_SET, 0x0240); + regmap_write(rt1011->regmap, RT1011_TDM1_SET_2, 0x8); + regmap_write(rt1011->regmap, RT1011_TDM1_SET_1, 0x10a2); + regmap_write(rt1011->regmap, RT1011_ADCDAT_OUT_SOURCE, 0x4); + break; + default: + dev_info(component->dev, "I2S Reference: Do nothing\n"); + } + + return 0; +} + +static int rt1011_i2s_ref_get(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *component = + snd_soc_kcontrol_component(kcontrol); + struct rt1011_priv *rt1011 = + snd_soc_component_get_drvdata(component); + + ucontrol->value.enumerated.item[0] = rt1011->i2s_ref; + + return 0; +} static const struct snd_kcontrol_new rt1011_snd_controls[] = { /* I2S Data In Selection */ @@ -1358,7 +1399,8 @@ static const struct snd_kcontrol_new rt1011_snd_controls[] = { SOC_SINGLE("R0 Temperature", RT1011_STP_INITIAL_RESISTANCE_TEMP, 2, 255, 0), /* I2S Reference */ - SOC_ENUM("I2S Reference", rt1011_i2s_ref_enum), + SOC_ENUM_EXT("I2S Reference", rt1011_i2s_ref_enum, + rt1011_i2s_ref_get, rt1011_i2s_ref_put), }; static int rt1011_is_sys_clk_from_pll(struct snd_soc_dapm_widget *source, @@ -2017,6 +2059,7 @@ static int rt1011_probe(struct snd_soc_component *component) schedule_work(&rt1011->cali_work); + rt1011->i2s_ref = 0; rt1011->bq_drc_params = devm_kcalloc(component->dev, RT1011_ADVMODE_NUM, sizeof(struct rt1011_bq_drc_params *), GFP_KERNEL); diff --git a/sound/soc/codecs/rt1011.h b/sound/soc/codecs/rt1011.h index 68fadc15fa8c..4d6e7492d99c 100644 --- a/sound/soc/codecs/rt1011.h +++ b/sound/soc/codecs/rt1011.h @@ -654,6 +654,12 @@ enum { RT1011_AIFS }; +enum { + RT1011_I2S_REF_NONE, + RT1011_I2S_REF_LEFT_CH, + RT1011_I2S_REF_RIGHT_CH, +}; + /* BiQual & DRC related settings */ #define RT1011_BQ_DRC_NUM 128 struct rt1011_bq_drc_params { @@ -692,6 +698,7 @@ struct rt1011_priv { unsigned int r0_reg, cali_done; unsigned int r0_calib, temperature_calib; int recv_spk_mode; + int i2s_ref; }; #endif /* end of _RT1011_H_ */ diff --git a/sound/soc/codecs/rt5682-i2c.c b/sound/soc/codecs/rt5682-i2c.c index 983347b65127..20e0f90ea498 100644 --- a/sound/soc/codecs/rt5682-i2c.c +++ b/sound/soc/codecs/rt5682-i2c.c @@ -198,6 +198,7 @@ static int rt5682_i2c_probe(struct i2c_client *i2c, } mutex_init(&rt5682->calibrate_mutex); + mutex_init(&rt5682->jdet_mutex); rt5682_calibrate(rt5682); rt5682_apply_patch_list(rt5682, &i2c->dev); diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c index 78b4cb5fb6c8..5224123d0d3b 100644 --- a/sound/soc/codecs/rt5682.c +++ b/sound/soc/codecs/rt5682.c @@ -48,6 +48,8 @@ static const struct reg_sequence patch_list[] = { {RT5682_SAR_IL_CMD_6, 0x0110}, {RT5682_CHARGE_PUMP_1, 0x0210}, {RT5682_HP_LOGIC_CTRL_2, 0x0007}, + {RT5682_SAR_IL_CMD_2, 0xac00}, + {RT5682_CBJ_CTRL_7, 0x0104}, }; void rt5682_apply_patch_list(struct rt5682_priv *rt5682, struct device *dev) @@ -940,6 +942,10 @@ int rt5682_headset_detect(struct snd_soc_component *component, int jack_insert) snd_soc_component_update_bits(component, RT5682_HP_CHARGE_PUMP_1, RT5682_OSW_L_MASK | RT5682_OSW_R_MASK, 0); + rt5682_enable_push_button_irq(component, false); + snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1, + RT5682_TRIG_JD_MASK, RT5682_TRIG_JD_LOW); + usleep_range(55000, 60000); snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1, RT5682_TRIG_JD_MASK, RT5682_TRIG_JD_HIGH); @@ -1092,6 +1098,7 @@ void rt5682_jack_detect_handler(struct work_struct *work) while (!rt5682->component->card->instantiated) usleep_range(10000, 15000); + mutex_lock(&rt5682->jdet_mutex); mutex_lock(&rt5682->calibrate_mutex); val = snd_soc_component_read(rt5682->component, RT5682_AJD1_CTRL) @@ -1165,6 +1172,7 @@ void rt5682_jack_detect_handler(struct work_struct *work) } mutex_unlock(&rt5682->calibrate_mutex); + mutex_unlock(&rt5682->jdet_mutex); } EXPORT_SYMBOL_GPL(rt5682_jack_detect_handler); @@ -1514,6 +1522,7 @@ static int rt5682_hp_event(struct snd_soc_dapm_widget *w, { struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); + struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component); switch (event) { case SND_SOC_DAPM_PRE_PMU: @@ -1525,12 +1534,17 @@ static int rt5682_hp_event(struct snd_soc_dapm_widget *w, RT5682_DEPOP_1, 0x60, 0x60); snd_soc_component_update_bits(component, RT5682_DAC_ADC_DIG_VOL1, 0x00c0, 0x0080); + + mutex_lock(&rt5682->jdet_mutex); + snd_soc_component_update_bits(component, RT5682_HP_CTRL_2, RT5682_HP_C2_DAC_L_EN | RT5682_HP_C2_DAC_R_EN, RT5682_HP_C2_DAC_L_EN | RT5682_HP_C2_DAC_R_EN); usleep_range(5000, 10000); snd_soc_component_update_bits(component, RT5682_CHARGE_PUMP_1, RT5682_CP_SW_SIZE_MASK, RT5682_CP_SW_SIZE_L); + + mutex_unlock(&rt5682->jdet_mutex); break; case SND_SOC_DAPM_POST_PMD: @@ -2844,6 +2858,8 @@ int rt5682_register_dai_clks(struct rt5682_priv *rt5682) for (i = 0; i < RT5682_DAI_NUM_CLKS; ++i) { struct clk_init_data init = { }; + struct clk_parent_data parent_data; + const struct clk_hw *parent; dai_clk_hw = &rt5682->dai_clks_hw[i]; @@ -2851,17 +2867,17 @@ int rt5682_register_dai_clks(struct rt5682_priv *rt5682) case RT5682_DAI_WCLK_IDX: /* Make MCLK the parent of WCLK */ if (rt5682->mclk) { - init.parent_data = &(struct clk_parent_data){ + parent_data = (struct clk_parent_data){ .fw_name = "mclk", }; + init.parent_data = &parent_data; init.num_parents = 1; } break; case RT5682_DAI_BCLK_IDX: /* Make WCLK the parent of BCLK */ - init.parent_hws = &(const struct clk_hw *){ - &rt5682->dai_clks_hw[RT5682_DAI_WCLK_IDX] - }; + parent = &rt5682->dai_clks_hw[RT5682_DAI_WCLK_IDX]; + init.parent_hws = &parent; init.num_parents = 1; break; default: @@ -2942,10 +2958,7 @@ static int rt5682_suspend(struct snd_soc_component *component) cancel_delayed_work_sync(&rt5682->jack_detect_work); cancel_delayed_work_sync(&rt5682->jd_check_work); - if (rt5682->hs_jack && rt5682->jack_type == SND_JACK_HEADSET) { - snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1, - RT5682_MB1_PATH_MASK | RT5682_MB2_PATH_MASK, - RT5682_CTRL_MB1_REG | RT5682_CTRL_MB2_REG); + if (rt5682->hs_jack && (rt5682->jack_type & SND_JACK_HEADSET) == SND_JACK_HEADSET) { val = snd_soc_component_read(component, RT5682_CBJ_CTRL_2) & RT5682_JACK_TYPE_MASK; @@ -2967,10 +2980,17 @@ static int rt5682_suspend(struct snd_soc_component *component) /* enter SAR ADC power saving mode */ snd_soc_component_update_bits(component, RT5682_SAR_IL_CMD_1, RT5682_SAR_BUTT_DET_MASK | RT5682_SAR_BUTDET_MODE_MASK | - RT5682_SAR_BUTDET_RST_MASK | RT5682_SAR_SEL_MB1_MB2_MASK, 0); + RT5682_SAR_SEL_MB1_MB2_MASK, 0); + usleep_range(5000, 6000); + snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1, + RT5682_MB1_PATH_MASK | RT5682_MB2_PATH_MASK, + RT5682_CTRL_MB1_REG | RT5682_CTRL_MB2_REG); + usleep_range(10000, 12000); snd_soc_component_update_bits(component, RT5682_SAR_IL_CMD_1, - RT5682_SAR_BUTT_DET_MASK | RT5682_SAR_BUTDET_MODE_MASK | RT5682_SAR_BUTDET_RST_MASK, - RT5682_SAR_BUTT_DET_EN | RT5682_SAR_BUTDET_POW_SAV | RT5682_SAR_BUTDET_RST_NORMAL); + RT5682_SAR_BUTT_DET_MASK | RT5682_SAR_BUTDET_MODE_MASK, + RT5682_SAR_BUTT_DET_EN | RT5682_SAR_BUTDET_POW_SAV); + snd_soc_component_update_bits(component, RT5682_HP_CHARGE_PUMP_1, + RT5682_OSW_L_MASK | RT5682_OSW_R_MASK, 0); } regcache_cache_only(rt5682->regmap, true); @@ -2988,10 +3008,11 @@ static int rt5682_resume(struct snd_soc_component *component) regcache_cache_only(rt5682->regmap, false); regcache_sync(rt5682->regmap); - if (rt5682->hs_jack && rt5682->jack_type == SND_JACK_HEADSET) { + if (rt5682->hs_jack && (rt5682->jack_type & SND_JACK_HEADSET) == SND_JACK_HEADSET) { snd_soc_component_update_bits(component, RT5682_SAR_IL_CMD_1, RT5682_SAR_BUTDET_MODE_MASK | RT5682_SAR_SEL_MB1_MB2_MASK, RT5682_SAR_BUTDET_POW_NORM | RT5682_SAR_SEL_MB1_MB2_AUTO); + usleep_range(5000, 6000); snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1, RT5682_MB1_PATH_MASK | RT5682_MB2_PATH_MASK, RT5682_CTRL_MB1_FSM | RT5682_CTRL_MB2_FSM); @@ -2999,8 +3020,9 @@ static int rt5682_resume(struct snd_soc_component *component) RT5682_PWR_CBJ, RT5682_PWR_CBJ); } + rt5682->jack_type = 0; mod_delayed_work(system_power_efficient_wq, - &rt5682->jack_detect_work, msecs_to_jiffies(250)); + &rt5682->jack_detect_work, msecs_to_jiffies(0)); return 0; } diff --git a/sound/soc/codecs/rt5682.h b/sound/soc/codecs/rt5682.h index d93829c35585..c917c76200ea 100644 --- a/sound/soc/codecs/rt5682.h +++ b/sound/soc/codecs/rt5682.h @@ -1463,6 +1463,7 @@ struct rt5682_priv { int jack_type; int irq_work_delay_time; + struct mutex jdet_mutex; }; extern const char *rt5682_supply_names[RT5682_NUM_SUPPLIES]; diff --git a/sound/soc/codecs/rt5682s.c b/sound/soc/codecs/rt5682s.c index 470957fcad6b..d49a4f68566d 100644 --- a/sound/soc/codecs/rt5682s.c +++ b/sound/soc/codecs/rt5682s.c @@ -2693,6 +2693,8 @@ static int rt5682s_register_dai_clks(struct snd_soc_component *component) for (i = 0; i < RT5682S_DAI_NUM_CLKS; ++i) { struct clk_init_data init = { }; + struct clk_parent_data parent_data; + const struct clk_hw *parent; dai_clk_hw = &rt5682s->dai_clks_hw[i]; @@ -2700,17 +2702,17 @@ static int rt5682s_register_dai_clks(struct snd_soc_component *component) case RT5682S_DAI_WCLK_IDX: /* Make MCLK the parent of WCLK */ if (rt5682s->mclk) { - init.parent_data = &(struct clk_parent_data){ + parent_data = (struct clk_parent_data){ .fw_name = "mclk", }; + init.parent_data = &parent_data; init.num_parents = 1; } break; case RT5682S_DAI_BCLK_IDX: /* Make WCLK the parent of BCLK */ - init.parent_hws = &(const struct clk_hw *){ - &rt5682s->dai_clks_hw[RT5682S_DAI_WCLK_IDX] - }; + parent = &rt5682s->dai_clks_hw[RT5682S_DAI_WCLK_IDX]; + init.parent_hws = &parent; init.num_parents = 1; break; default: diff --git a/sound/soc/codecs/rt9120.c b/sound/soc/codecs/rt9120.c index f9574980a407..7aa1772a915f 100644 --- a/sound/soc/codecs/rt9120.c +++ b/sound/soc/codecs/rt9120.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/bits.h> +#include <linux/bitfield.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/i2c.h> @@ -23,9 +24,11 @@ #define RT9120_REG_ERRRPT 0x10 #define RT9120_REG_MSVOL 0x20 #define RT9120_REG_SWRESET 0x40 +#define RT9120_REG_INTERCFG 0x63 #define RT9120_REG_INTERNAL0 0x65 #define RT9120_REG_INTERNAL1 0x69 #define RT9120_REG_UVPOPT 0x6C +#define RT9120_REG_DIGCFG 0xF8 #define RT9120_VID_MASK GENMASK(15, 8) #define RT9120_SWRST_MASK BIT(7) @@ -46,8 +49,10 @@ #define RT9120_CFG_WORDLEN_24 24 #define RT9120_CFG_WORDLEN_32 32 #define RT9120_DVDD_UVSEL_MASK GENMASK(5, 4) +#define RT9120_AUTOSYNC_MASK BIT(6) -#define RT9120_VENDOR_ID 0x4200 +#define RT9120_VENDOR_ID 0x42 +#define RT9120S_VENDOR_ID 0x43 #define RT9120_RESET_WAITMS 20 #define RT9120_CHIPON_WAITMS 20 #define RT9120_AMPON_WAITMS 50 @@ -61,9 +66,16 @@ SNDRV_PCM_FMTBIT_S24_LE |\ SNDRV_PCM_FMTBIT_S32_LE) +enum { + CHIP_IDX_RT9120 = 0, + CHIP_IDX_RT9120S, + CHIP_IDX_MAX +}; + struct rt9120_data { struct device *dev; struct regmap *regmap; + int chip_idx; }; /* 11bit [min,max,step] = [-103.9375dB, 24dB, 0.0625dB] */ @@ -149,8 +161,12 @@ static int rt9120_codec_probe(struct snd_soc_component *comp) snd_soc_component_init_regmap(comp, data->regmap); /* Internal setting */ - snd_soc_component_write(comp, RT9120_REG_INTERNAL1, 0x03); - snd_soc_component_write(comp, RT9120_REG_INTERNAL0, 0x69); + if (data->chip_idx == CHIP_IDX_RT9120S) { + snd_soc_component_write(comp, RT9120_REG_INTERCFG, 0xde); + snd_soc_component_write(comp, RT9120_REG_INTERNAL0, 0x66); + } else + snd_soc_component_write(comp, RT9120_REG_INTERNAL0, 0x04); + return 0; } @@ -201,8 +217,8 @@ static int rt9120_hw_params(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_component *comp = dai->component; - unsigned int param_width, param_slot_width; - int width; + unsigned int param_width, param_slot_width, auto_sync; + int width, fs; switch (width = params_width(param)) { case 16: @@ -240,6 +256,16 @@ static int rt9120_hw_params(struct snd_pcm_substream *substream, snd_soc_component_update_bits(comp, RT9120_REG_I2SWL, RT9120_AUDWL_MASK, param_slot_width); + + fs = width * params_channels(param); + /* If fs is divided by 48, disable auto sync */ + if (fs % 48 == 0) + auto_sync = 0; + else + auto_sync = RT9120_AUTOSYNC_MASK; + + snd_soc_component_update_bits(comp, RT9120_REG_DIGCFG, + RT9120_AUTOSYNC_MASK, auto_sync); return 0; } @@ -279,9 +305,11 @@ static const struct regmap_range rt9120_rd_yes_ranges[] = { regmap_reg_range(0x20, 0x27), regmap_reg_range(0x30, 0x38), regmap_reg_range(0x3A, 0x40), + regmap_reg_range(0x63, 0x63), regmap_reg_range(0x65, 0x65), regmap_reg_range(0x69, 0x69), - regmap_reg_range(0x6C, 0x6C) + regmap_reg_range(0x6C, 0x6C), + regmap_reg_range(0xF8, 0xF8) }; static const struct regmap_access_table rt9120_rd_table = { @@ -297,9 +325,11 @@ static const struct regmap_range rt9120_wr_yes_ranges[] = { regmap_reg_range(0x30, 0x38), regmap_reg_range(0x3A, 0x3D), regmap_reg_range(0x40, 0x40), + regmap_reg_range(0x63, 0x63), regmap_reg_range(0x65, 0x65), regmap_reg_range(0x69, 0x69), - regmap_reg_range(0x6C, 0x6C) + regmap_reg_range(0x6C, 0x6C), + regmap_reg_range(0xF8, 0xF8) }; static const struct regmap_access_table rt9120_wr_table = { @@ -370,7 +400,7 @@ static int rt9120_reg_write(void *context, unsigned int reg, unsigned int val) static const struct regmap_config rt9120_regmap_config = { .reg_bits = 8, .val_bits = 32, - .max_register = RT9120_REG_UVPOPT, + .max_register = RT9120_REG_DIGCFG, .reg_read = rt9120_reg_read, .reg_write = rt9120_reg_write, @@ -388,8 +418,16 @@ static int rt9120_check_vendor_info(struct rt9120_data *data) if (ret) return ret; - if ((devid & RT9120_VID_MASK) != RT9120_VENDOR_ID) { - dev_err(data->dev, "DEVID not correct [0x%04x]\n", devid); + devid = FIELD_GET(RT9120_VID_MASK, devid); + switch (devid) { + case RT9120_VENDOR_ID: + data->chip_idx = CHIP_IDX_RT9120; + break; + case RT9120S_VENDOR_ID: + data->chip_idx = CHIP_IDX_RT9120S; + break; + default: + dev_err(data->dev, "DEVID not correct [0x%0x]\n", devid); return -ENODEV; } diff --git a/sound/soc/codecs/wcd934x.c b/sound/soc/codecs/wcd934x.c index c496b359f2f4..e63c6b723d76 100644 --- a/sound/soc/codecs/wcd934x.c +++ b/sound/soc/codecs/wcd934x.c @@ -1896,9 +1896,8 @@ static int wcd934x_hw_params(struct snd_pcm_substream *substream, } wcd->dai[dai->id].sconfig.rate = params_rate(params); - wcd934x_slim_set_hw_params(wcd, &wcd->dai[dai->id], substream->stream); - return 0; + return wcd934x_slim_set_hw_params(wcd, &wcd->dai[dai->id], substream->stream); } static int wcd934x_hw_free(struct snd_pcm_substream *substream, @@ -3257,6 +3256,9 @@ static int wcd934x_compander_set(struct snd_kcontrol *kc, int value = ucontrol->value.integer.value[0]; int sel; + if (wcd->comp_enabled[comp] == value) + return 0; + wcd->comp_enabled[comp] = value; sel = value ? WCD934X_HPH_GAIN_SRC_SEL_COMPANDER : WCD934X_HPH_GAIN_SRC_SEL_REGISTER; @@ -3280,10 +3282,10 @@ static int wcd934x_compander_set(struct snd_kcontrol *kc, case COMPANDER_8: break; default: - break; + return 0; } - return 0; + return 1; } static int wcd934x_rx_hph_mode_get(struct snd_kcontrol *kc, @@ -3327,6 +3329,31 @@ static int slim_rx_mux_get(struct snd_kcontrol *kc, return 0; } +static int slim_rx_mux_to_dai_id(int mux) +{ + int aif_id; + + switch (mux) { + case 1: + aif_id = AIF1_PB; + break; + case 2: + aif_id = AIF2_PB; + break; + case 3: + aif_id = AIF3_PB; + break; + case 4: + aif_id = AIF4_PB; + break; + default: + aif_id = -1; + break; + } + + return aif_id; +} + static int slim_rx_mux_put(struct snd_kcontrol *kc, struct snd_ctl_elem_value *ucontrol) { @@ -3334,43 +3361,59 @@ static int slim_rx_mux_put(struct snd_kcontrol *kc, struct wcd934x_codec *wcd = dev_get_drvdata(w->dapm->dev); struct soc_enum *e = (struct soc_enum *)kc->private_value; struct snd_soc_dapm_update *update = NULL; + struct wcd934x_slim_ch *ch, *c; u32 port_id = w->shift; + bool found = false; + int mux_idx; + int prev_mux_idx = wcd->rx_port_value[port_id]; + int aif_id; - if (wcd->rx_port_value[port_id] == ucontrol->value.enumerated.item[0]) - return 0; + mux_idx = ucontrol->value.enumerated.item[0]; - wcd->rx_port_value[port_id] = ucontrol->value.enumerated.item[0]; + if (mux_idx == prev_mux_idx) + return 0; - switch (wcd->rx_port_value[port_id]) { + switch(mux_idx) { case 0: - list_del_init(&wcd->rx_chs[port_id].list); - break; - case 1: - list_add_tail(&wcd->rx_chs[port_id].list, - &wcd->dai[AIF1_PB].slim_ch_list); - break; - case 2: - list_add_tail(&wcd->rx_chs[port_id].list, - &wcd->dai[AIF2_PB].slim_ch_list); - break; - case 3: - list_add_tail(&wcd->rx_chs[port_id].list, - &wcd->dai[AIF3_PB].slim_ch_list); + aif_id = slim_rx_mux_to_dai_id(prev_mux_idx); + if (aif_id < 0) + return 0; + + list_for_each_entry_safe(ch, c, &wcd->dai[aif_id].slim_ch_list, list) { + if (ch->port == port_id + WCD934X_RX_START) { + found = true; + list_del_init(&ch->list); + break; + } + } + if (!found) + return 0; + break; - case 4: - list_add_tail(&wcd->rx_chs[port_id].list, - &wcd->dai[AIF4_PB].slim_ch_list); + case 1 ... 4: + aif_id = slim_rx_mux_to_dai_id(mux_idx); + if (aif_id < 0) + return 0; + + if (list_empty(&wcd->rx_chs[port_id].list)) { + list_add_tail(&wcd->rx_chs[port_id].list, + &wcd->dai[aif_id].slim_ch_list); + } else { + dev_err(wcd->dev ,"SLIM_RX%d PORT is busy\n", port_id); + return 0; + } break; + default: - dev_err(wcd->dev, "Unknown AIF %d\n", - wcd->rx_port_value[port_id]); + dev_err(wcd->dev, "Unknown AIF %d\n", mux_idx); goto err; } + wcd->rx_port_value[port_id] = mux_idx; snd_soc_dapm_mux_update_power(w->dapm, kc, wcd->rx_port_value[port_id], e, update); - return 0; + return 1; err: return -EINVAL; } @@ -3816,6 +3859,7 @@ static int slim_tx_mixer_put(struct snd_kcontrol *kc, struct soc_mixer_control *mixer = (struct soc_mixer_control *)kc->private_value; int enable = ucontrol->value.integer.value[0]; + struct wcd934x_slim_ch *ch, *c; int dai_id = widget->shift; int port_id = mixer->shift; @@ -3823,17 +3867,32 @@ static int slim_tx_mixer_put(struct snd_kcontrol *kc, if (enable == wcd->tx_port_value[port_id]) return 0; - wcd->tx_port_value[port_id] = enable; - - if (enable) - list_add_tail(&wcd->tx_chs[port_id].list, - &wcd->dai[dai_id].slim_ch_list); - else - list_del_init(&wcd->tx_chs[port_id].list); + if (enable) { + if (list_empty(&wcd->tx_chs[port_id].list)) { + list_add_tail(&wcd->tx_chs[port_id].list, + &wcd->dai[dai_id].slim_ch_list); + } else { + dev_err(wcd->dev ,"SLIM_TX%d PORT is busy\n", port_id); + return 0; + } + } else { + bool found = false; + + list_for_each_entry_safe(ch, c, &wcd->dai[dai_id].slim_ch_list, list) { + if (ch->port == port_id) { + found = true; + list_del_init(&wcd->tx_chs[port_id].list); + break; + } + } + if (!found) + return 0; + } + wcd->tx_port_value[port_id] = enable; snd_soc_dapm_mixer_update_power(widget->dapm, kc, enable, update); - return 0; + return 1; } static const struct snd_kcontrol_new aif1_slim_cap_mixer[] = { diff --git a/sound/soc/codecs/wcd938x.c b/sound/soc/codecs/wcd938x.c index 52de7d14b139..67151c7770c6 100644 --- a/sound/soc/codecs/wcd938x.c +++ b/sound/soc/codecs/wcd938x.c @@ -1174,6 +1174,9 @@ static bool wcd938x_readonly_register(struct device *dev, unsigned int reg) case WCD938X_DIGITAL_INTR_STATUS_0: case WCD938X_DIGITAL_INTR_STATUS_1: case WCD938X_DIGITAL_INTR_STATUS_2: + case WCD938X_DIGITAL_INTR_CLEAR_0: + case WCD938X_DIGITAL_INTR_CLEAR_1: + case WCD938X_DIGITAL_INTR_CLEAR_2: case WCD938X_DIGITAL_SWR_HM_TEST_0: case WCD938X_DIGITAL_SWR_HM_TEST_1: case WCD938X_DIGITAL_EFUSE_T_DATA_0: diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c index d4f0d72cbcc8..6cb01a8e08fb 100644 --- a/sound/soc/codecs/wm_adsp.c +++ b/sound/soc/codecs/wm_adsp.c @@ -617,8 +617,9 @@ static int wm_adsp_control_add(struct cs_dsp_coeff_ctl *cs_ctl) switch (cs_dsp->fw_ver) { case 0: case 1: - snprintf(name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN, "%s %s %x", - cs_dsp->name, region_name, cs_ctl->alg_region.alg); + ret = scnprintf(name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN, + "%s %s %x", cs_dsp->name, region_name, + cs_ctl->alg_region.alg); break; case 2: ret = scnprintf(name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN, diff --git a/sound/soc/codecs/wsa881x.c b/sound/soc/codecs/wsa881x.c index 2da4a5fa7a18..564b78f3cdd0 100644 --- a/sound/soc/codecs/wsa881x.c +++ b/sound/soc/codecs/wsa881x.c @@ -772,7 +772,8 @@ static int wsa881x_put_pa_gain(struct snd_kcontrol *kc, usleep_range(1000, 1010); } - return 0; + + return 1; } static int wsa881x_get_port(struct snd_kcontrol *kcontrol, @@ -816,15 +817,22 @@ static int wsa881x_set_port(struct snd_kcontrol *kcontrol, (struct soc_mixer_control *)kcontrol->private_value; int portidx = mixer->reg; - if (ucontrol->value.integer.value[0]) + if (ucontrol->value.integer.value[0]) { + if (data->port_enable[portidx]) + return 0; + data->port_enable[portidx] = true; - else + } else { + if (!data->port_enable[portidx]) + return 0; + data->port_enable[portidx] = false; + } if (portidx == WSA881X_PORT_BOOST) /* Boost Switch */ wsa881x_boost_ctrl(comp, data->port_enable[portidx]); - return 0; + return 1; } static const char * const smart_boost_lvl_text[] = { diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c index f10496206cee..77219c3f8766 100644 --- a/sound/soc/intel/boards/sof_sdw.c +++ b/sound/soc/intel/boards/sof_sdw.c @@ -248,6 +248,75 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = { SOF_BT_OFFLOAD_SSP(2) | SOF_SSP_BT_OFFLOAD_PRESENT), }, + { + .callback = sof_sdw_quirk_cb, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"), + DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0AF3"), + }, + /* No Jack */ + .driver_data = (void *)(SOF_SDW_TGL_HDMI | + SOF_SDW_FOUR_SPK), + }, + { + .callback = sof_sdw_quirk_cb, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"), + DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0B00") + }, + .driver_data = (void *)(SOF_SDW_TGL_HDMI | + RT711_JD2 | + SOF_SDW_FOUR_SPK), + }, + { + .callback = sof_sdw_quirk_cb, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"), + DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0B01") + }, + .driver_data = (void *)(SOF_SDW_TGL_HDMI | + RT711_JD2 | + SOF_SDW_FOUR_SPK), + }, + { + .callback = sof_sdw_quirk_cb, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"), + DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0B11") + }, + .driver_data = (void *)(SOF_SDW_TGL_HDMI | + RT711_JD2 | + SOF_SDW_FOUR_SPK), + }, + { + .callback = sof_sdw_quirk_cb, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"), + DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0B12") + }, + .driver_data = (void *)(SOF_SDW_TGL_HDMI | + RT711_JD2 | + SOF_SDW_FOUR_SPK), + }, + { + .callback = sof_sdw_quirk_cb, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"), + DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0B13"), + }, + /* No Jack */ + .driver_data = (void *)SOF_SDW_TGL_HDMI, + }, + { + .callback = sof_sdw_quirk_cb, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"), + DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0B29"), + }, + .driver_data = (void *)(SOF_SDW_TGL_HDMI | + RT711_JD2 | + SOF_SDW_FOUR_SPK), + }, {} }; diff --git a/sound/soc/intel/common/soc-acpi-intel-adl-match.c b/sound/soc/intel/common/soc-acpi-intel-adl-match.c index 06f503452aa5..b61a778a9d26 100644 --- a/sound/soc/intel/common/soc-acpi-intel-adl-match.c +++ b/sound/soc/intel/common/soc-acpi-intel-adl-match.c @@ -74,6 +74,15 @@ static const struct snd_soc_acpi_adr_device rt711_sdca_0_adr[] = { } }; +static const struct snd_soc_acpi_adr_device rt711_sdca_2_adr[] = { + { + .adr = 0x000230025D071101ull, + .num_endpoints = 1, + .endpoints = &single_endpoint, + .name_prefix = "rt711" + } +}; + static const struct snd_soc_acpi_adr_device rt1316_1_group1_adr[] = { { .adr = 0x000131025D131601ull, /* unique ID is set for some reason */ @@ -101,6 +110,24 @@ static const struct snd_soc_acpi_adr_device rt1316_3_group1_adr[] = { } }; +static const struct snd_soc_acpi_adr_device rt1316_0_group2_adr[] = { + { + .adr = 0x000031025D131601ull, + .num_endpoints = 1, + .endpoints = &spk_l_endpoint, + .name_prefix = "rt1316-1" + } +}; + +static const struct snd_soc_acpi_adr_device rt1316_1_group2_adr[] = { + { + .adr = 0x000130025D131601ull, + .num_endpoints = 1, + .endpoints = &spk_r_endpoint, + .name_prefix = "rt1316-2" + } +}; + static const struct snd_soc_acpi_adr_device rt1316_2_single_adr[] = { { .adr = 0x000230025D131601ull, @@ -209,6 +236,63 @@ static const struct snd_soc_acpi_link_adr adl_sdca_3_in_1[] = { {} }; +static const struct snd_soc_acpi_link_adr adl_sdw_rt711_link2_rt1316_link01_rt714_link3[] = { + { + .mask = BIT(2), + .num_adr = ARRAY_SIZE(rt711_sdca_2_adr), + .adr_d = rt711_sdca_2_adr, + }, + { + .mask = BIT(0), + .num_adr = ARRAY_SIZE(rt1316_0_group2_adr), + .adr_d = rt1316_0_group2_adr, + }, + { + .mask = BIT(1), + .num_adr = ARRAY_SIZE(rt1316_1_group2_adr), + .adr_d = rt1316_1_group2_adr, + }, + { + .mask = BIT(3), + .num_adr = ARRAY_SIZE(rt714_3_adr), + .adr_d = rt714_3_adr, + }, + {} +}; + +static const struct snd_soc_acpi_link_adr adl_sdw_rt1316_link12_rt714_link0[] = { + { + .mask = BIT(1), + .num_adr = ARRAY_SIZE(rt1316_1_group1_adr), + .adr_d = rt1316_1_group1_adr, + }, + { + .mask = BIT(2), + .num_adr = ARRAY_SIZE(rt1316_2_group1_adr), + .adr_d = rt1316_2_group1_adr, + }, + { + .mask = BIT(0), + .num_adr = ARRAY_SIZE(rt714_0_adr), + .adr_d = rt714_0_adr, + }, + {} +}; + +static const struct snd_soc_acpi_link_adr adl_sdw_rt1316_link2_rt714_link3[] = { + { + .mask = BIT(2), + .num_adr = ARRAY_SIZE(rt1316_2_single_adr), + .adr_d = rt1316_2_single_adr, + }, + { + .mask = BIT(3), + .num_adr = ARRAY_SIZE(rt714_3_adr), + .adr_d = rt714_3_adr, + }, + {} +}; + static const struct snd_soc_acpi_link_adr adl_sdw_rt1316_link2_rt714_link0[] = { { .mask = BIT(2), @@ -340,6 +424,27 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_sdw_machines[] = { .sof_tplg_filename = "sof-adl-rt711-l0-rt1316-l13-rt714-l2.tplg", }, { + .link_mask = 0xF, /* 4 active links required */ + .links = adl_sdw_rt711_link2_rt1316_link01_rt714_link3, + .drv_name = "sof_sdw", + .sof_fw_filename = "sof-adl.ri", + .sof_tplg_filename = "sof-adl-rt711-l2-rt1316-l01-rt714-l3.tplg", + }, + { + .link_mask = 0xC, /* rt1316 on link2 & rt714 on link3 */ + .links = adl_sdw_rt1316_link2_rt714_link3, + .drv_name = "sof_sdw", + .sof_fw_filename = "sof-adl.ri", + .sof_tplg_filename = "sof-adl-rt1316-l2-mono-rt714-l3.tplg", + }, + { + .link_mask = 0x7, /* rt714 on link0 & two rt1316s on link1 and link2 */ + .links = adl_sdw_rt1316_link12_rt714_link0, + .drv_name = "sof_sdw", + .sof_fw_filename = "sof-adl.ri", + .sof_tplg_filename = "sof-adl-rt1316-l12-rt714-l0.tplg", + }, + { .link_mask = 0x5, /* 2 active links required */ .links = adl_sdw_rt1316_link2_rt714_link0, .drv_name = "sof_sdw", diff --git a/sound/soc/intel/common/soc-acpi-intel-cml-match.c b/sound/soc/intel/common/soc-acpi-intel-cml-match.c index b4eb0c97edf1..4eebc79d4b48 100644 --- a/sound/soc/intel/common/soc-acpi-intel-cml-match.c +++ b/sound/soc/intel/common/soc-acpi-intel-cml-match.c @@ -81,6 +81,12 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_cml_machines[] = { .sof_fw_filename = "sof-cml.ri", .sof_tplg_filename = "sof-cml-da7219-max98390.tplg", }, + { + .id = "ESSX8336", + .drv_name = "sof-essx8336", + .sof_fw_filename = "sof-cml.ri", + .sof_tplg_filename = "sof-cml-es8336.tplg", + }, {}, }; EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_cml_machines); diff --git a/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c b/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c index 6350390414d4..31494930433f 100644 --- a/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c +++ b/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c @@ -1054,6 +1054,7 @@ static int mt8173_afe_pcm_dev_probe(struct platform_device *pdev) int irq_id; struct mtk_base_afe *afe; struct mt8173_afe_private *afe_priv; + struct snd_soc_component *comp_pcm, *comp_hdmi; ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(33)); if (ret) @@ -1142,23 +1143,55 @@ static int mt8173_afe_pcm_dev_probe(struct platform_device *pdev) if (ret) goto err_pm_disable; - ret = devm_snd_soc_register_component(&pdev->dev, - &mt8173_afe_pcm_dai_component, - mt8173_afe_pcm_dais, - ARRAY_SIZE(mt8173_afe_pcm_dais)); + comp_pcm = devm_kzalloc(&pdev->dev, sizeof(*comp_pcm), GFP_KERNEL); + if (!comp_pcm) { + ret = -ENOMEM; + goto err_pm_disable; + } + + ret = snd_soc_component_initialize(comp_pcm, + &mt8173_afe_pcm_dai_component, + &pdev->dev); if (ret) goto err_pm_disable; - ret = devm_snd_soc_register_component(&pdev->dev, - &mt8173_afe_hdmi_dai_component, - mt8173_afe_hdmi_dais, - ARRAY_SIZE(mt8173_afe_hdmi_dais)); +#ifdef CONFIG_DEBUG_FS + comp_pcm->debugfs_prefix = "pcm"; +#endif + + ret = snd_soc_add_component(comp_pcm, + mt8173_afe_pcm_dais, + ARRAY_SIZE(mt8173_afe_pcm_dais)); + if (ret) + goto err_pm_disable; + + comp_hdmi = devm_kzalloc(&pdev->dev, sizeof(*comp_hdmi), GFP_KERNEL); + if (!comp_hdmi) { + ret = -ENOMEM; + goto err_pm_disable; + } + + ret = snd_soc_component_initialize(comp_hdmi, + &mt8173_afe_hdmi_dai_component, + &pdev->dev); if (ret) goto err_pm_disable; +#ifdef CONFIG_DEBUG_FS + comp_hdmi->debugfs_prefix = "hdmi"; +#endif + + ret = snd_soc_add_component(comp_hdmi, + mt8173_afe_hdmi_dais, + ARRAY_SIZE(mt8173_afe_hdmi_dais)); + if (ret) + goto err_cleanup_components; + dev_info(&pdev->dev, "MT8173 AFE driver initialized.\n"); return 0; +err_cleanup_components: + snd_soc_unregister_component(&pdev->dev); err_pm_disable: pm_runtime_disable(&pdev->dev); return ret; @@ -1166,6 +1199,8 @@ err_pm_disable: static int mt8173_afe_pcm_dev_remove(struct platform_device *pdev) { + snd_soc_unregister_component(&pdev->dev); + pm_runtime_disable(&pdev->dev); if (!pm_runtime_status_suspended(&pdev->dev)) mt8173_afe_runtime_suspend(&pdev->dev); diff --git a/sound/soc/mediatek/mt8173/mt8173-rt5650.c b/sound/soc/mediatek/mt8173/mt8173-rt5650.c index c28ebf891cb0..2cbf679f5c74 100644 --- a/sound/soc/mediatek/mt8173/mt8173-rt5650.c +++ b/sound/soc/mediatek/mt8173/mt8173-rt5650.c @@ -30,15 +30,15 @@ static struct mt8173_rt5650_platform_data mt8173_rt5650_priv = { }; static const struct snd_soc_dapm_widget mt8173_rt5650_widgets[] = { - SND_SOC_DAPM_SPK("Speaker", NULL), + SND_SOC_DAPM_SPK("Ext Spk", NULL), SND_SOC_DAPM_MIC("Int Mic", NULL), SND_SOC_DAPM_HP("Headphone", NULL), SND_SOC_DAPM_MIC("Headset Mic", NULL), }; static const struct snd_soc_dapm_route mt8173_rt5650_routes[] = { - {"Speaker", NULL, "SPOL"}, - {"Speaker", NULL, "SPOR"}, + {"Ext Spk", NULL, "SPOL"}, + {"Ext Spk", NULL, "SPOR"}, {"DMIC L1", NULL, "Int Mic"}, {"DMIC R1", NULL, "Int Mic"}, {"Headphone", NULL, "HPOL"}, @@ -48,7 +48,7 @@ static const struct snd_soc_dapm_route mt8173_rt5650_routes[] = { }; static const struct snd_kcontrol_new mt8173_rt5650_controls[] = { - SOC_DAPM_PIN_SWITCH("Speaker"), + SOC_DAPM_PIN_SWITCH("Ext Spk"), SOC_DAPM_PIN_SWITCH("Int Mic"), SOC_DAPM_PIN_SWITCH("Headphone"), SOC_DAPM_PIN_SWITCH("Headset Mic"), diff --git a/sound/soc/qcom/qdsp6/audioreach.h b/sound/soc/qcom/qdsp6/audioreach.h index 4f693a2660b5..3ee8bfcd0121 100644 --- a/sound/soc/qcom/qdsp6/audioreach.h +++ b/sound/soc/qcom/qdsp6/audioreach.h @@ -550,6 +550,10 @@ struct audio_hw_clk_cfg { uint32_t clock_root; } __packed; +struct audio_hw_clk_rel_cfg { + uint32_t clock_id; +} __packed; + #define PARAM_ID_HW_EP_POWER_MODE_CFG 0x8001176 #define AR_HW_EP_POWER_MODE_0 0 /* default */ #define AR_HW_EP_POWER_MODE_1 1 /* XO Shutdown allowed */ diff --git a/sound/soc/qcom/qdsp6/q6adm.c b/sound/soc/qcom/qdsp6/q6adm.c index 3d831b635524..72c5719f1d25 100644 --- a/sound/soc/qcom/qdsp6/q6adm.c +++ b/sound/soc/qcom/qdsp6/q6adm.c @@ -390,7 +390,7 @@ struct q6copp *q6adm_open(struct device *dev, int port_id, int path, int rate, int ret = 0; if (port_id < 0) { - dev_err(dev, "Invalid port_id 0x%x\n", port_id); + dev_err(dev, "Invalid port_id %d\n", port_id); return ERR_PTR(-EINVAL); } @@ -508,7 +508,7 @@ int q6adm_matrix_map(struct device *dev, int path, int port_idx = payload_map.port_id[i]; if (port_idx < 0) { - dev_err(dev, "Invalid port_id 0x%x\n", + dev_err(dev, "Invalid port_id %d\n", payload_map.port_id[i]); kfree(pkt); return -EINVAL; diff --git a/sound/soc/qcom/qdsp6/q6asm-dai.c b/sound/soc/qcom/qdsp6/q6asm-dai.c index 46f365528d50..b74b67720ef4 100644 --- a/sound/soc/qcom/qdsp6/q6asm-dai.c +++ b/sound/soc/qcom/qdsp6/q6asm-dai.c @@ -269,9 +269,7 @@ static int q6asm_dai_prepare(struct snd_soc_component *component, if (ret < 0) { dev_err(dev, "%s: q6asm_open_write failed\n", __func__); - q6asm_audio_client_free(prtd->audio_client); - prtd->audio_client = NULL; - return -ENOMEM; + goto open_err; } prtd->session_id = q6asm_get_session_id(prtd->audio_client); @@ -279,7 +277,7 @@ static int q6asm_dai_prepare(struct snd_soc_component *component, prtd->session_id, substream->stream); if (ret) { dev_err(dev, "%s: stream reg failed ret:%d\n", __func__, ret); - return ret; + goto routing_err; } if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { @@ -301,10 +299,19 @@ static int q6asm_dai_prepare(struct snd_soc_component *component, } if (ret < 0) dev_info(dev, "%s: CMD Format block failed\n", __func__); + else + prtd->state = Q6ASM_STREAM_RUNNING; - prtd->state = Q6ASM_STREAM_RUNNING; + return ret; - return 0; +routing_err: + q6asm_cmd(prtd->audio_client, prtd->stream_id, CMD_CLOSE); +open_err: + q6asm_unmap_memory_regions(substream->stream, prtd->audio_client); + q6asm_audio_client_free(prtd->audio_client); + prtd->audio_client = NULL; + + return ret; } static int q6asm_dai_trigger(struct snd_soc_component *component, diff --git a/sound/soc/qcom/qdsp6/q6prm.c b/sound/soc/qcom/qdsp6/q6prm.c index 82c40f2d4e1d..cda33ded29be 100644 --- a/sound/soc/qcom/qdsp6/q6prm.c +++ b/sound/soc/qcom/qdsp6/q6prm.c @@ -42,6 +42,12 @@ struct prm_cmd_request_rsc { struct audio_hw_clk_cfg clock_id; } __packed; +struct prm_cmd_release_rsc { + struct apm_module_param_data param_data; + uint32_t num_clk_id; + struct audio_hw_clk_rel_cfg clock_id; +} __packed; + static int q6prm_send_cmd_sync(struct q6prm *prm, struct gpr_pkt *pkt, uint32_t rsp_opcode) { return audioreach_send_cmd_sync(prm->dev, prm->gdev, &prm->result, &prm->lock, @@ -102,8 +108,8 @@ int q6prm_unvote_lpass_core_hw(struct device *dev, uint32_t hw_block_id, uint32_ } EXPORT_SYMBOL_GPL(q6prm_unvote_lpass_core_hw); -int q6prm_set_lpass_clock(struct device *dev, int clk_id, int clk_attr, int clk_root, - unsigned int freq) +static int q6prm_request_lpass_clock(struct device *dev, int clk_id, int clk_attr, int clk_root, + unsigned int freq) { struct q6prm *prm = dev_get_drvdata(dev->parent); struct apm_module_param_data *param_data; @@ -138,6 +144,49 @@ int q6prm_set_lpass_clock(struct device *dev, int clk_id, int clk_attr, int clk_ return rc; } + +static int q6prm_release_lpass_clock(struct device *dev, int clk_id, int clk_attr, int clk_root, + unsigned int freq) +{ + struct q6prm *prm = dev_get_drvdata(dev->parent); + struct apm_module_param_data *param_data; + struct prm_cmd_release_rsc *rel; + gpr_device_t *gdev = prm->gdev; + struct gpr_pkt *pkt; + int rc; + + pkt = audioreach_alloc_cmd_pkt(sizeof(*rel), PRM_CMD_RELEASE_HW_RSC, 0, gdev->svc.id, + GPR_PRM_MODULE_IID); + if (IS_ERR(pkt)) + return PTR_ERR(pkt); + + rel = (void *)pkt + GPR_HDR_SIZE + APM_CMD_HDR_SIZE; + + param_data = &rel->param_data; + + param_data->module_instance_id = GPR_PRM_MODULE_IID; + param_data->error_code = 0; + param_data->param_id = PARAM_ID_RSC_AUDIO_HW_CLK; + param_data->param_size = sizeof(*rel) - APM_MODULE_PARAM_DATA_SIZE; + + rel->num_clk_id = 1; + rel->clock_id.clock_id = clk_id; + + rc = q6prm_send_cmd_sync(prm, pkt, PRM_CMD_RSP_RELEASE_HW_RSC); + + kfree(pkt); + + return rc; +} + +int q6prm_set_lpass_clock(struct device *dev, int clk_id, int clk_attr, int clk_root, + unsigned int freq) +{ + if (freq) + return q6prm_request_lpass_clock(dev, clk_id, clk_attr, clk_attr, freq); + + return q6prm_release_lpass_clock(dev, clk_id, clk_attr, clk_attr, freq); +} EXPORT_SYMBOL_GPL(q6prm_set_lpass_clock); static int prm_callback(struct gpr_resp_pkt *data, void *priv, int op) diff --git a/sound/soc/qcom/qdsp6/q6routing.c b/sound/soc/qcom/qdsp6/q6routing.c index 3390ebef9549..928fd23e2c27 100644 --- a/sound/soc/qcom/qdsp6/q6routing.c +++ b/sound/soc/qcom/qdsp6/q6routing.c @@ -372,6 +372,12 @@ int q6routing_stream_open(int fedai_id, int perf_mode, } session = &routing_data->sessions[stream_id - 1]; + if (session->port_id < 0) { + dev_err(routing_data->dev, "Routing not setup for MultiMedia%d Session\n", + session->fedai_id); + return -EINVAL; + } + pdata = &routing_data->port_data[session->port_id]; mutex_lock(&routing_data->lock); @@ -492,9 +498,15 @@ static int msm_routing_put_audio_mixer(struct snd_kcontrol *kcontrol, struct session_data *session = &data->sessions[session_id]; if (ucontrol->value.integer.value[0]) { + if (session->port_id == be_id) + return 0; + session->port_id = be_id; snd_soc_dapm_mixer_update_power(dapm, kcontrol, 1, update); } else { + if (session->port_id == -1 || session->port_id != be_id) + return 0; + session->port_id = -1; snd_soc_dapm_mixer_update_power(dapm, kcontrol, 0, update); } diff --git a/sound/soc/rockchip/rockchip_i2s_tdm.c b/sound/soc/rockchip/rockchip_i2s_tdm.c index 17b9b287853a..5f9cb5c4c7f0 100644 --- a/sound/soc/rockchip/rockchip_i2s_tdm.c +++ b/sound/soc/rockchip/rockchip_i2s_tdm.c @@ -95,6 +95,7 @@ struct rk_i2s_tdm_dev { spinlock_t lock; /* xfer lock */ bool has_playback; bool has_capture; + struct snd_soc_dai_driver *dai; }; static int to_ch_num(unsigned int val) @@ -1310,19 +1311,14 @@ static const struct of_device_id rockchip_i2s_tdm_match[] = { {}, }; -static struct snd_soc_dai_driver i2s_tdm_dai = { +static const struct snd_soc_dai_driver i2s_tdm_dai = { .probe = rockchip_i2s_tdm_dai_probe, - .playback = { - .stream_name = "Playback", - }, - .capture = { - .stream_name = "Capture", - }, .ops = &rockchip_i2s_tdm_dai_ops, }; -static void rockchip_i2s_tdm_init_dai(struct rk_i2s_tdm_dev *i2s_tdm) +static int rockchip_i2s_tdm_init_dai(struct rk_i2s_tdm_dev *i2s_tdm) { + struct snd_soc_dai_driver *dai; struct property *dma_names; const char *dma_name; u64 formats = (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_LE | @@ -1337,19 +1333,33 @@ static void rockchip_i2s_tdm_init_dai(struct rk_i2s_tdm_dev *i2s_tdm) i2s_tdm->has_capture = true; } + dai = devm_kmemdup(i2s_tdm->dev, &i2s_tdm_dai, + sizeof(*dai), GFP_KERNEL); + if (!dai) + return -ENOMEM; + if (i2s_tdm->has_playback) { - i2s_tdm_dai.playback.channels_min = 2; - i2s_tdm_dai.playback.channels_max = 8; - i2s_tdm_dai.playback.rates = SNDRV_PCM_RATE_8000_192000; - i2s_tdm_dai.playback.formats = formats; + dai->playback.stream_name = "Playback"; + dai->playback.channels_min = 2; + dai->playback.channels_max = 8; + dai->playback.rates = SNDRV_PCM_RATE_8000_192000; + dai->playback.formats = formats; } if (i2s_tdm->has_capture) { - i2s_tdm_dai.capture.channels_min = 2; - i2s_tdm_dai.capture.channels_max = 8; - i2s_tdm_dai.capture.rates = SNDRV_PCM_RATE_8000_192000; - i2s_tdm_dai.capture.formats = formats; + dai->capture.stream_name = "Capture"; + dai->capture.channels_min = 2; + dai->capture.channels_max = 8; + dai->capture.rates = SNDRV_PCM_RATE_8000_192000; + dai->capture.formats = formats; } + + if (i2s_tdm->clk_trcm != TRCM_TXRX) + dai->symmetric_rate = 1; + + i2s_tdm->dai = dai; + + return 0; } static int rockchip_i2s_tdm_path_check(struct rk_i2s_tdm_dev *i2s_tdm, @@ -1541,8 +1551,6 @@ static int rockchip_i2s_tdm_probe(struct platform_device *pdev) spin_lock_init(&i2s_tdm->lock); i2s_tdm->soc_data = (struct rk_i2s_soc_data *)of_id->data; - rockchip_i2s_tdm_init_dai(i2s_tdm); - i2s_tdm->frame_width = 64; i2s_tdm->clk_trcm = TRCM_TXRX; @@ -1555,8 +1563,10 @@ static int rockchip_i2s_tdm_probe(struct platform_device *pdev) } i2s_tdm->clk_trcm = TRCM_RX; } - if (i2s_tdm->clk_trcm != TRCM_TXRX) - i2s_tdm_dai.symmetric_rate = 1; + + ret = rockchip_i2s_tdm_init_dai(i2s_tdm); + if (ret) + return ret; i2s_tdm->grf = syscon_regmap_lookup_by_phandle(node, "rockchip,grf"); if (IS_ERR(i2s_tdm->grf)) @@ -1678,7 +1688,7 @@ static int rockchip_i2s_tdm_probe(struct platform_device *pdev) ret = devm_snd_soc_register_component(&pdev->dev, &rockchip_i2s_tdm_component, - &i2s_tdm_dai, 1); + i2s_tdm->dai, 1); if (ret) { dev_err(&pdev->dev, "Could not register DAI\n"); diff --git a/sound/soc/sh/rcar/dma.c b/sound/soc/sh/rcar/dma.c index 16c6e0265749..03e0d4eca781 100644 --- a/sound/soc/sh/rcar/dma.c +++ b/sound/soc/sh/rcar/dma.c @@ -102,7 +102,7 @@ static int rsnd_dmaen_stop(struct rsnd_mod *mod, struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma); if (dmaen->chan) - dmaengine_terminate_sync(dmaen->chan); + dmaengine_terminate_async(dmaen->chan); return 0; } diff --git a/sound/soc/soc-acpi.c b/sound/soc/soc-acpi.c index 2ae99b49d3f5..cbd7ea48837b 100644 --- a/sound/soc/soc-acpi.c +++ b/sound/soc/soc-acpi.c @@ -20,8 +20,10 @@ static bool snd_soc_acpi_id_present(struct snd_soc_acpi_mach *machine) if (comp_ids) { for (i = 0; i < comp_ids->num_codecs; i++) { - if (acpi_dev_present(comp_ids->codecs[i], NULL, -1)) + if (acpi_dev_present(comp_ids->codecs[i], NULL, -1)) { + strscpy(machine->id, comp_ids->codecs[i], ACPI_ID_LEN); return true; + } } } diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index 2892b0aba151..b06c5682445c 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c @@ -2559,8 +2559,13 @@ static struct snd_soc_dapm_widget *dapm_find_widget( return NULL; } -static int snd_soc_dapm_set_pin(struct snd_soc_dapm_context *dapm, - const char *pin, int status) +/* + * set the DAPM pin status: + * returns 1 when the value has been updated, 0 when unchanged, or a negative + * error code; called from kcontrol put callback + */ +static int __snd_soc_dapm_set_pin(struct snd_soc_dapm_context *dapm, + const char *pin, int status) { struct snd_soc_dapm_widget *w = dapm_find_widget(dapm, pin, true); int ret = 0; @@ -2586,6 +2591,18 @@ static int snd_soc_dapm_set_pin(struct snd_soc_dapm_context *dapm, return ret; } +/* + * similar as __snd_soc_dapm_set_pin(), but returns 0 when successful; + * called from several API functions below + */ +static int snd_soc_dapm_set_pin(struct snd_soc_dapm_context *dapm, + const char *pin, int status) +{ + int ret = __snd_soc_dapm_set_pin(dapm, pin, status); + + return ret < 0 ? ret : 0; +} + /** * snd_soc_dapm_sync_unlocked - scan and power dapm paths * @dapm: DAPM context @@ -3589,10 +3606,10 @@ int snd_soc_dapm_put_pin_switch(struct snd_kcontrol *kcontrol, const char *pin = (const char *)kcontrol->private_value; int ret; - if (ucontrol->value.integer.value[0]) - ret = snd_soc_dapm_enable_pin(&card->dapm, pin); - else - ret = snd_soc_dapm_disable_pin(&card->dapm, pin); + mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME); + ret = __snd_soc_dapm_set_pin(&card->dapm, pin, + !!ucontrol->value.integer.value[0]); + mutex_unlock(&card->dapm_mutex); snd_soc_dapm_sync(&card->dapm); return ret; diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c index 557e22c5254c..f5b9e66ac3b8 100644 --- a/sound/soc/soc-topology.c +++ b/sound/soc/soc-topology.c @@ -2700,6 +2700,7 @@ EXPORT_SYMBOL_GPL(snd_soc_tplg_component_load); /* remove dynamic controls from the component driver */ int snd_soc_tplg_component_remove(struct snd_soc_component *comp) { + struct snd_card *card = comp->card->snd_card; struct snd_soc_dobj *dobj, *next_dobj; int pass = SOC_TPLG_PASS_END; @@ -2707,6 +2708,7 @@ int snd_soc_tplg_component_remove(struct snd_soc_component *comp) while (pass >= SOC_TPLG_PASS_START) { /* remove mixer controls */ + down_write(&card->controls_rwsem); list_for_each_entry_safe(dobj, next_dobj, &comp->dobj_list, list) { @@ -2745,6 +2747,7 @@ int snd_soc_tplg_component_remove(struct snd_soc_component *comp) break; } } + up_write(&card->controls_rwsem); pass--; } diff --git a/sound/soc/sof/Kconfig b/sound/soc/sof/Kconfig index 6bb4db87af03..041c54639c4d 100644 --- a/sound/soc/sof/Kconfig +++ b/sound/soc/sof/Kconfig @@ -47,7 +47,7 @@ config SND_SOC_SOF_OF Say Y if you need this option. If unsure select "N". config SND_SOC_SOF_COMPRESS - tristate + bool select SND_SOC_COMPRESS config SND_SOC_SOF_DEBUG_PROBES diff --git a/sound/soc/sof/control.c b/sound/soc/sof/control.c index 58bb89af4de1..bb1dfe4f6d40 100644 --- a/sound/soc/sof/control.c +++ b/sound/soc/sof/control.c @@ -69,7 +69,7 @@ static void snd_sof_refresh_control(struct snd_sof_control *scontrol) { struct sof_ipc_ctrl_data *cdata = scontrol->control_data; struct snd_soc_component *scomp = scontrol->scomp; - enum sof_ipc_ctrl_type ctrl_type; + u32 ipc_cmd; int ret; if (!scontrol->comp_data_dirty) @@ -79,9 +79,9 @@ static void snd_sof_refresh_control(struct snd_sof_control *scontrol) return; if (scontrol->cmd == SOF_CTRL_CMD_BINARY) - ctrl_type = SOF_IPC_COMP_GET_DATA; + ipc_cmd = SOF_IPC_COMP_GET_DATA; else - ctrl_type = SOF_IPC_COMP_GET_VALUE; + ipc_cmd = SOF_IPC_COMP_GET_VALUE; /* set the ABI header values */ cdata->data->magic = SOF_ABI_MAGIC; @@ -89,7 +89,7 @@ static void snd_sof_refresh_control(struct snd_sof_control *scontrol) /* refresh the component data from DSP */ scontrol->comp_data_dirty = false; - ret = snd_sof_ipc_set_get_comp_data(scontrol, ctrl_type, + ret = snd_sof_ipc_set_get_comp_data(scontrol, ipc_cmd, SOF_CTRL_TYPE_VALUE_CHAN_GET, scontrol->cmd, false); if (ret < 0) { diff --git a/sound/soc/sof/intel/hda-bus.c b/sound/soc/sof/intel/hda-bus.c index 30025d3c16b6..0862ff8b6627 100644 --- a/sound/soc/sof/intel/hda-bus.c +++ b/sound/soc/sof/intel/hda-bus.c @@ -10,6 +10,8 @@ #include <linux/io.h> #include <sound/hdaudio.h> #include <sound/hda_i915.h> +#include <sound/hda_codec.h> +#include <sound/hda_register.h> #include "../sof-priv.h" #include "hda.h" @@ -21,6 +23,18 @@ #endif #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA) +static void update_codec_wake_enable(struct hdac_bus *bus, unsigned int addr, bool link_power) +{ + unsigned int mask = snd_hdac_chip_readw(bus, WAKEEN); + + if (link_power) + mask &= ~BIT(addr); + else + mask |= BIT(addr); + + snd_hdac_chip_updatew(bus, WAKEEN, STATESTS_INT_MASK, mask); +} + static void sof_hda_bus_link_power(struct hdac_device *codec, bool enable) { struct hdac_bus *bus = codec->bus; @@ -41,6 +55,9 @@ static void sof_hda_bus_link_power(struct hdac_device *codec, bool enable) */ if (codec->addr == HDA_IDISP_ADDR && !enable) snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false); + + /* WAKEEN needs to be set for disabled links */ + update_codec_wake_enable(bus, codec->addr, enable); } static const struct hdac_bus_ops bus_core_ops = { diff --git a/sound/soc/sof/intel/hda-codec.c b/sound/soc/sof/intel/hda-codec.c index 6744318de612..13cd96e6724a 100644 --- a/sound/soc/sof/intel/hda-codec.c +++ b/sound/soc/sof/intel/hda-codec.c @@ -22,6 +22,7 @@ #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC) #define IDISP_VID_INTEL 0x80860000 +#define CODEC_PROBE_RETRIES 3 /* load the legacy HDA codec driver */ static int request_codec_module(struct hda_codec *codec) @@ -121,12 +122,15 @@ static int hda_codec_probe(struct snd_sof_dev *sdev, int address, u32 hda_cmd = (address << 28) | (AC_NODE_ROOT << 20) | (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID; u32 resp = -1; - int ret; + int ret, retry = 0; + + do { + mutex_lock(&hbus->core.cmd_mutex); + snd_hdac_bus_send_cmd(&hbus->core, hda_cmd); + snd_hdac_bus_get_response(&hbus->core, address, &resp); + mutex_unlock(&hbus->core.cmd_mutex); + } while (resp == -1 && retry++ < CODEC_PROBE_RETRIES); - mutex_lock(&hbus->core.cmd_mutex); - snd_hdac_bus_send_cmd(&hbus->core, hda_cmd); - snd_hdac_bus_get_response(&hbus->core, address, &resp); - mutex_unlock(&hbus->core.cmd_mutex); if (resp == -1) return -EIO; dev_dbg(sdev->dev, "HDA codec #%d probed OK: response: %x\n", diff --git a/sound/soc/sof/intel/hda-dsp.c b/sound/soc/sof/intel/hda-dsp.c index 058baca2cd0e..287dc0eb6686 100644 --- a/sound/soc/sof/intel/hda-dsp.c +++ b/sound/soc/sof/intel/hda-dsp.c @@ -622,8 +622,7 @@ static int hda_suspend(struct snd_sof_dev *sdev, bool runtime_suspend) hda_dsp_ipc_int_disable(sdev); #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA) - if (runtime_suspend) - hda_codec_jack_wake_enable(sdev, true); + hda_codec_jack_wake_enable(sdev, runtime_suspend); /* power down all hda link */ snd_hdac_ext_bus_link_power_down_all(bus); diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c index 883d78dd01b5..2c0d4d06ab36 100644 --- a/sound/soc/sof/intel/hda.c +++ b/sound/soc/sof/intel/hda.c @@ -58,6 +58,13 @@ int hda_ctrl_dai_widget_setup(struct snd_soc_dapm_widget *w) return -EINVAL; } + /* DAI already configured, reset it before reconfiguring it */ + if (sof_dai->configured) { + ret = hda_ctrl_dai_widget_free(w); + if (ret < 0) + return ret; + } + config = &sof_dai->dai_config[sof_dai->current_config]; /* @@ -810,6 +817,20 @@ skip_soundwire: return 0; } +static void hda_check_for_state_change(struct snd_sof_dev *sdev) +{ +#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA) + struct hdac_bus *bus = sof_to_bus(sdev); + unsigned int codec_mask; + + codec_mask = snd_hdac_chip_readw(bus, STATESTS); + if (codec_mask) { + hda_codec_jack_check(sdev); + snd_hdac_chip_writew(bus, STATESTS, codec_mask); + } +#endif +} + static irqreturn_t hda_dsp_interrupt_handler(int irq, void *context) { struct snd_sof_dev *sdev = context; @@ -851,6 +872,8 @@ static irqreturn_t hda_dsp_interrupt_thread(int irq, void *context) if (hda_sdw_check_wakeen_irq(sdev)) hda_sdw_process_wakeen(sdev); + hda_check_for_state_change(sdev); + /* enable GIE interrupt */ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL, diff --git a/sound/soc/stm/stm32_i2s.c b/sound/soc/stm/stm32_i2s.c index 6254bacad6eb..717f45a83445 100644 --- a/sound/soc/stm/stm32_i2s.c +++ b/sound/soc/stm/stm32_i2s.c @@ -700,7 +700,7 @@ static int stm32_i2s_configure_clock(struct snd_soc_dai *cpu_dai, if (ret < 0) return ret; - nb_bits = frame_len * ((cgfr & I2S_CGFR_CHLEN) + 1); + nb_bits = frame_len * (FIELD_GET(I2S_CGFR_CHLEN, cgfr) + 1); ret = stm32_i2s_calc_clk_div(i2s, i2s_clock_rate, (nb_bits * rate)); if (ret) diff --git a/sound/soc/tegra/tegra186_dspk.c b/sound/soc/tegra/tegra186_dspk.c index 8ee9a77bd83d..a74c980ee775 100644 --- a/sound/soc/tegra/tegra186_dspk.c +++ b/sound/soc/tegra/tegra186_dspk.c @@ -26,51 +26,162 @@ static const struct reg_default tegra186_dspk_reg_defaults[] = { { TEGRA186_DSPK_CODEC_CTRL, 0x03000000 }, }; -static int tegra186_dspk_get_control(struct snd_kcontrol *kcontrol, +static int tegra186_dspk_get_fifo_th(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol); struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec); - if (strstr(kcontrol->id.name, "FIFO Threshold")) - ucontrol->value.integer.value[0] = dspk->rx_fifo_th; - else if (strstr(kcontrol->id.name, "OSR Value")) - ucontrol->value.integer.value[0] = dspk->osr_val; - else if (strstr(kcontrol->id.name, "LR Polarity Select")) - ucontrol->value.integer.value[0] = dspk->lrsel; - else if (strstr(kcontrol->id.name, "Channel Select")) - ucontrol->value.integer.value[0] = dspk->ch_sel; - else if (strstr(kcontrol->id.name, "Mono To Stereo")) - ucontrol->value.integer.value[0] = dspk->mono_to_stereo; - else if (strstr(kcontrol->id.name, "Stereo To Mono")) - ucontrol->value.integer.value[0] = dspk->stereo_to_mono; + ucontrol->value.integer.value[0] = dspk->rx_fifo_th; return 0; } -static int tegra186_dspk_put_control(struct snd_kcontrol *kcontrol, +static int tegra186_dspk_put_fifo_th(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol); struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec); - int val = ucontrol->value.integer.value[0]; - - if (strstr(kcontrol->id.name, "FIFO Threshold")) - dspk->rx_fifo_th = val; - else if (strstr(kcontrol->id.name, "OSR Value")) - dspk->osr_val = val; - else if (strstr(kcontrol->id.name, "LR Polarity Select")) - dspk->lrsel = val; - else if (strstr(kcontrol->id.name, "Channel Select")) - dspk->ch_sel = val; - else if (strstr(kcontrol->id.name, "Mono To Stereo")) - dspk->mono_to_stereo = val; - else if (strstr(kcontrol->id.name, "Stereo To Mono")) - dspk->stereo_to_mono = val; + int value = ucontrol->value.integer.value[0]; + + if (value == dspk->rx_fifo_th) + return 0; + + dspk->rx_fifo_th = value; + + return 1; +} + +static int tegra186_dspk_get_osr_val(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol); + struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec); + + ucontrol->value.enumerated.item[0] = dspk->osr_val; return 0; } +static int tegra186_dspk_put_osr_val(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol); + struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec); + unsigned int value = ucontrol->value.enumerated.item[0]; + + if (value == dspk->osr_val) + return 0; + + dspk->osr_val = value; + + return 1; +} + +static int tegra186_dspk_get_pol_sel(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol); + struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec); + + ucontrol->value.enumerated.item[0] = dspk->lrsel; + + return 0; +} + +static int tegra186_dspk_put_pol_sel(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol); + struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec); + unsigned int value = ucontrol->value.enumerated.item[0]; + + if (value == dspk->lrsel) + return 0; + + dspk->lrsel = value; + + return 1; +} + +static int tegra186_dspk_get_ch_sel(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol); + struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec); + + ucontrol->value.enumerated.item[0] = dspk->ch_sel; + + return 0; +} + +static int tegra186_dspk_put_ch_sel(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol); + struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec); + unsigned int value = ucontrol->value.enumerated.item[0]; + + if (value == dspk->ch_sel) + return 0; + + dspk->ch_sel = value; + + return 1; +} + +static int tegra186_dspk_get_mono_to_stereo(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol); + struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec); + + ucontrol->value.enumerated.item[0] = dspk->mono_to_stereo; + + return 0; +} + +static int tegra186_dspk_put_mono_to_stereo(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol); + struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec); + unsigned int value = ucontrol->value.enumerated.item[0]; + + if (value == dspk->mono_to_stereo) + return 0; + + dspk->mono_to_stereo = value; + + return 1; +} + +static int tegra186_dspk_get_stereo_to_mono(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol); + struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec); + + ucontrol->value.enumerated.item[0] = dspk->stereo_to_mono; + + return 0; +} + +static int tegra186_dspk_put_stereo_to_mono(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol); + struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec); + unsigned int value = ucontrol->value.enumerated.item[0]; + + if (value == dspk->stereo_to_mono) + return 0; + + dspk->stereo_to_mono = value; + + return 1; +} + static int __maybe_unused tegra186_dspk_runtime_suspend(struct device *dev) { struct tegra186_dspk *dspk = dev_get_drvdata(dev); @@ -279,17 +390,19 @@ static const struct soc_enum tegra186_dspk_lrsel_enum = static const struct snd_kcontrol_new tegrat186_dspk_controls[] = { SOC_SINGLE_EXT("FIFO Threshold", SND_SOC_NOPM, 0, TEGRA186_DSPK_RX_FIFO_DEPTH - 1, 0, - tegra186_dspk_get_control, tegra186_dspk_put_control), + tegra186_dspk_get_fifo_th, tegra186_dspk_put_fifo_th), SOC_ENUM_EXT("OSR Value", tegra186_dspk_osr_enum, - tegra186_dspk_get_control, tegra186_dspk_put_control), + tegra186_dspk_get_osr_val, tegra186_dspk_put_osr_val), SOC_ENUM_EXT("LR Polarity Select", tegra186_dspk_lrsel_enum, - tegra186_dspk_get_control, tegra186_dspk_put_control), + tegra186_dspk_get_pol_sel, tegra186_dspk_put_pol_sel), SOC_ENUM_EXT("Channel Select", tegra186_dspk_ch_sel_enum, - tegra186_dspk_get_control, tegra186_dspk_put_control), + tegra186_dspk_get_ch_sel, tegra186_dspk_put_ch_sel), SOC_ENUM_EXT("Mono To Stereo", tegra186_dspk_mono_conv_enum, - tegra186_dspk_get_control, tegra186_dspk_put_control), + tegra186_dspk_get_mono_to_stereo, + tegra186_dspk_put_mono_to_stereo), SOC_ENUM_EXT("Stereo To Mono", tegra186_dspk_stereo_conv_enum, - tegra186_dspk_get_control, tegra186_dspk_put_control), + tegra186_dspk_get_stereo_to_mono, + tegra186_dspk_put_stereo_to_mono), }; static const struct snd_soc_component_driver tegra186_dspk_cmpnt = { diff --git a/sound/soc/tegra/tegra210_admaif.c b/sound/soc/tegra/tegra210_admaif.c index bcccdf3ddc52..1a2e868a6220 100644 --- a/sound/soc/tegra/tegra210_admaif.c +++ b/sound/soc/tegra/tegra210_admaif.c @@ -424,46 +424,122 @@ static const struct snd_soc_dai_ops tegra_admaif_dai_ops = { .trigger = tegra_admaif_trigger, }; -static int tegra_admaif_get_control(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) +static int tegra210_admaif_pget_mono_to_stereo(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol); + struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt); + struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value; + + ucontrol->value.enumerated.item[0] = + admaif->mono_to_stereo[ADMAIF_TX_PATH][ec->reg]; + + return 0; +} + +static int tegra210_admaif_pput_mono_to_stereo(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol); + struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt); + struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value; + unsigned int value = ucontrol->value.enumerated.item[0]; + + if (value == admaif->mono_to_stereo[ADMAIF_TX_PATH][ec->reg]) + return 0; + + admaif->mono_to_stereo[ADMAIF_TX_PATH][ec->reg] = value; + + return 1; +} + +static int tegra210_admaif_cget_mono_to_stereo(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol); + struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt); + struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value; + + ucontrol->value.enumerated.item[0] = + admaif->mono_to_stereo[ADMAIF_RX_PATH][ec->reg]; + + return 0; +} + +static int tegra210_admaif_cput_mono_to_stereo(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol); + struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt); struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value; + unsigned int value = ucontrol->value.enumerated.item[0]; + + if (value == admaif->mono_to_stereo[ADMAIF_RX_PATH][ec->reg]) + return 0; + + admaif->mono_to_stereo[ADMAIF_RX_PATH][ec->reg] = value; + + return 1; +} + +static int tegra210_admaif_pget_stereo_to_mono(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol); struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt); - long *uctl_val = &ucontrol->value.integer.value[0]; + struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value; - if (strstr(kcontrol->id.name, "Playback Mono To Stereo")) - *uctl_val = admaif->mono_to_stereo[ADMAIF_TX_PATH][ec->reg]; - else if (strstr(kcontrol->id.name, "Capture Mono To Stereo")) - *uctl_val = admaif->mono_to_stereo[ADMAIF_RX_PATH][ec->reg]; - else if (strstr(kcontrol->id.name, "Playback Stereo To Mono")) - *uctl_val = admaif->stereo_to_mono[ADMAIF_TX_PATH][ec->reg]; - else if (strstr(kcontrol->id.name, "Capture Stereo To Mono")) - *uctl_val = admaif->stereo_to_mono[ADMAIF_RX_PATH][ec->reg]; + ucontrol->value.enumerated.item[0] = + admaif->stereo_to_mono[ADMAIF_TX_PATH][ec->reg]; return 0; } -static int tegra_admaif_put_control(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) +static int tegra210_admaif_pput_stereo_to_mono(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol); + struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt); struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value; + unsigned int value = ucontrol->value.enumerated.item[0]; + + if (value == admaif->stereo_to_mono[ADMAIF_TX_PATH][ec->reg]) + return 0; + + admaif->stereo_to_mono[ADMAIF_TX_PATH][ec->reg] = value; + + return 1; +} + +static int tegra210_admaif_cget_stereo_to_mono(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol); struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt); - int value = ucontrol->value.integer.value[0]; + struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value; - if (strstr(kcontrol->id.name, "Playback Mono To Stereo")) - admaif->mono_to_stereo[ADMAIF_TX_PATH][ec->reg] = value; - else if (strstr(kcontrol->id.name, "Capture Mono To Stereo")) - admaif->mono_to_stereo[ADMAIF_RX_PATH][ec->reg] = value; - else if (strstr(kcontrol->id.name, "Playback Stereo To Mono")) - admaif->stereo_to_mono[ADMAIF_TX_PATH][ec->reg] = value; - else if (strstr(kcontrol->id.name, "Capture Stereo To Mono")) - admaif->stereo_to_mono[ADMAIF_RX_PATH][ec->reg] = value; + ucontrol->value.enumerated.item[0] = + admaif->stereo_to_mono[ADMAIF_RX_PATH][ec->reg]; return 0; } +static int tegra210_admaif_cput_stereo_to_mono(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol); + struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt); + struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value; + unsigned int value = ucontrol->value.enumerated.item[0]; + + if (value == admaif->stereo_to_mono[ADMAIF_RX_PATH][ec->reg]) + return 0; + + admaif->stereo_to_mono[ADMAIF_RX_PATH][ec->reg] = value; + + return 1; +} + static int tegra_admaif_dai_probe(struct snd_soc_dai *dai) { struct tegra_admaif *admaif = snd_soc_dai_get_drvdata(dai); @@ -559,17 +635,21 @@ static const char * const tegra_admaif_mono_conv_text[] = { } #define TEGRA_ADMAIF_CIF_CTRL(reg) \ - NV_SOC_ENUM_EXT("ADMAIF" #reg " Playback Mono To Stereo", reg - 1,\ - tegra_admaif_get_control, tegra_admaif_put_control, \ + NV_SOC_ENUM_EXT("ADMAIF" #reg " Playback Mono To Stereo", reg - 1, \ + tegra210_admaif_pget_mono_to_stereo, \ + tegra210_admaif_pput_mono_to_stereo, \ tegra_admaif_mono_conv_text), \ - NV_SOC_ENUM_EXT("ADMAIF" #reg " Playback Stereo To Mono", reg - 1,\ - tegra_admaif_get_control, tegra_admaif_put_control, \ + NV_SOC_ENUM_EXT("ADMAIF" #reg " Playback Stereo To Mono", reg - 1, \ + tegra210_admaif_pget_stereo_to_mono, \ + tegra210_admaif_pput_stereo_to_mono, \ tegra_admaif_stereo_conv_text), \ - NV_SOC_ENUM_EXT("ADMAIF" #reg " Capture Mono To Stereo", reg - 1, \ - tegra_admaif_get_control, tegra_admaif_put_control, \ + NV_SOC_ENUM_EXT("ADMAIF" #reg " Capture Mono To Stereo", reg - 1, \ + tegra210_admaif_cget_mono_to_stereo, \ + tegra210_admaif_cput_mono_to_stereo, \ tegra_admaif_mono_conv_text), \ - NV_SOC_ENUM_EXT("ADMAIF" #reg " Capture Stereo To Mono", reg - 1, \ - tegra_admaif_get_control, tegra_admaif_put_control, \ + NV_SOC_ENUM_EXT("ADMAIF" #reg " Capture Stereo To Mono", reg - 1, \ + tegra210_admaif_cget_stereo_to_mono, \ + tegra210_admaif_cput_stereo_to_mono, \ tegra_admaif_stereo_conv_text) static struct snd_kcontrol_new tegra210_admaif_controls[] = { diff --git a/sound/soc/tegra/tegra210_adx.c b/sound/soc/tegra/tegra210_adx.c index d7c7849c2f92..3785cade2d9a 100644 --- a/sound/soc/tegra/tegra210_adx.c +++ b/sound/soc/tegra/tegra210_adx.c @@ -193,6 +193,9 @@ static int tegra210_adx_put_byte_map(struct snd_kcontrol *kcontrol, struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value;; + if (value == bytes_map[mc->reg]) + return 0; + if (value >= 0 && value <= 255) { /* update byte map and enable slot */ bytes_map[mc->reg] = value; @@ -511,8 +514,8 @@ static int tegra210_adx_platform_remove(struct platform_device *pdev) static const struct dev_pm_ops tegra210_adx_pm_ops = { SET_RUNTIME_PM_OPS(tegra210_adx_runtime_suspend, tegra210_adx_runtime_resume, NULL) - SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, - pm_runtime_force_resume) + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) }; static struct platform_driver tegra210_adx_driver = { diff --git a/sound/soc/tegra/tegra210_ahub.c b/sound/soc/tegra/tegra210_ahub.c index a1989eae2b52..388b815443c7 100644 --- a/sound/soc/tegra/tegra210_ahub.c +++ b/sound/soc/tegra/tegra210_ahub.c @@ -62,6 +62,7 @@ static int tegra_ahub_put_value_enum(struct snd_kcontrol *kctl, unsigned int *item = uctl->value.enumerated.item; unsigned int value = e->values[item[0]]; unsigned int i, bit_pos, reg_idx = 0, reg_val = 0; + int change = 0; if (item[0] >= e->items) return -EINVAL; @@ -86,12 +87,14 @@ static int tegra_ahub_put_value_enum(struct snd_kcontrol *kctl, /* Update widget power if state has changed */ if (snd_soc_component_test_bits(cmpnt, update[i].reg, - update[i].mask, update[i].val)) - snd_soc_dapm_mux_update_power(dapm, kctl, item[0], e, - &update[i]); + update[i].mask, + update[i].val)) + change |= snd_soc_dapm_mux_update_power(dapm, kctl, + item[0], e, + &update[i]); } - return 0; + return change; } static struct snd_soc_dai_driver tegra210_ahub_dais[] = { diff --git a/sound/soc/tegra/tegra210_amx.c b/sound/soc/tegra/tegra210_amx.c index af9bddfc3120..d064cc67fea6 100644 --- a/sound/soc/tegra/tegra210_amx.c +++ b/sound/soc/tegra/tegra210_amx.c @@ -222,6 +222,9 @@ static int tegra210_amx_put_byte_map(struct snd_kcontrol *kcontrol, int reg = mc->reg; int value = ucontrol->value.integer.value[0]; + if (value == bytes_map[reg]) + return 0; + if (value >= 0 && value <= 255) { /* Update byte map and enable slot */ bytes_map[reg] = value; @@ -580,8 +583,8 @@ static int tegra210_amx_platform_remove(struct platform_device *pdev) static const struct dev_pm_ops tegra210_amx_pm_ops = { SET_RUNTIME_PM_OPS(tegra210_amx_runtime_suspend, tegra210_amx_runtime_resume, NULL) - SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, - pm_runtime_force_resume) + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) }; static struct platform_driver tegra210_amx_driver = { diff --git a/sound/soc/tegra/tegra210_dmic.c b/sound/soc/tegra/tegra210_dmic.c index b096478cd2ef..db95794530f4 100644 --- a/sound/soc/tegra/tegra210_dmic.c +++ b/sound/soc/tegra/tegra210_dmic.c @@ -156,51 +156,162 @@ static int tegra210_dmic_hw_params(struct snd_pcm_substream *substream, return 0; } -static int tegra210_dmic_get_control(struct snd_kcontrol *kcontrol, +static int tegra210_dmic_get_boost_gain(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol); + struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp); + + ucontrol->value.integer.value[0] = dmic->boost_gain; + + return 0; +} + +static int tegra210_dmic_put_boost_gain(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol); + struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp); + int value = ucontrol->value.integer.value[0]; + + if (value == dmic->boost_gain) + return 0; + + dmic->boost_gain = value; + + return 1; +} + +static int tegra210_dmic_get_ch_select(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol); + struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp); + + ucontrol->value.enumerated.item[0] = dmic->ch_select; + + return 0; +} + +static int tegra210_dmic_put_ch_select(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol); + struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp); + unsigned int value = ucontrol->value.enumerated.item[0]; + + if (value == dmic->ch_select) + return 0; + + dmic->ch_select = value; + + return 1; +} + +static int tegra210_dmic_get_mono_to_stereo(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol); + struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp); + + ucontrol->value.enumerated.item[0] = dmic->mono_to_stereo; + + return 0; +} + +static int tegra210_dmic_put_mono_to_stereo(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol); + struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp); + unsigned int value = ucontrol->value.enumerated.item[0]; + + if (value == dmic->mono_to_stereo) + return 0; + + dmic->mono_to_stereo = value; + + return 1; +} + +static int tegra210_dmic_get_stereo_to_mono(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol); + struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp); + + ucontrol->value.enumerated.item[0] = dmic->stereo_to_mono; + + return 0; +} + +static int tegra210_dmic_put_stereo_to_mono(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol); + struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp); + unsigned int value = ucontrol->value.enumerated.item[0]; + + if (value == dmic->stereo_to_mono) + return 0; + + dmic->stereo_to_mono = value; + + return 1; +} + +static int tegra210_dmic_get_osr_val(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol); struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp); - if (strstr(kcontrol->id.name, "Boost Gain Volume")) - ucontrol->value.integer.value[0] = dmic->boost_gain; - else if (strstr(kcontrol->id.name, "Channel Select")) - ucontrol->value.integer.value[0] = dmic->ch_select; - else if (strstr(kcontrol->id.name, "Mono To Stereo")) - ucontrol->value.integer.value[0] = dmic->mono_to_stereo; - else if (strstr(kcontrol->id.name, "Stereo To Mono")) - ucontrol->value.integer.value[0] = dmic->stereo_to_mono; - else if (strstr(kcontrol->id.name, "OSR Value")) - ucontrol->value.integer.value[0] = dmic->osr_val; - else if (strstr(kcontrol->id.name, "LR Polarity Select")) - ucontrol->value.integer.value[0] = dmic->lrsel; + ucontrol->value.enumerated.item[0] = dmic->osr_val; return 0; } -static int tegra210_dmic_put_control(struct snd_kcontrol *kcontrol, +static int tegra210_dmic_put_osr_val(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol); struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp); - int value = ucontrol->value.integer.value[0]; + unsigned int value = ucontrol->value.enumerated.item[0]; - if (strstr(kcontrol->id.name, "Boost Gain Volume")) - dmic->boost_gain = value; - else if (strstr(kcontrol->id.name, "Channel Select")) - dmic->ch_select = ucontrol->value.integer.value[0]; - else if (strstr(kcontrol->id.name, "Mono To Stereo")) - dmic->mono_to_stereo = value; - else if (strstr(kcontrol->id.name, "Stereo To Mono")) - dmic->stereo_to_mono = value; - else if (strstr(kcontrol->id.name, "OSR Value")) - dmic->osr_val = value; - else if (strstr(kcontrol->id.name, "LR Polarity Select")) - dmic->lrsel = value; + if (value == dmic->osr_val) + return 0; + + dmic->osr_val = value; + + return 1; +} + +static int tegra210_dmic_get_pol_sel(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol); + struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp); + + ucontrol->value.enumerated.item[0] = dmic->lrsel; return 0; } +static int tegra210_dmic_put_pol_sel(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol); + struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp); + unsigned int value = ucontrol->value.enumerated.item[0]; + + if (value == dmic->lrsel) + return 0; + + dmic->lrsel = value; + + return 1; +} + static const struct snd_soc_dai_ops tegra210_dmic_dai_ops = { .hw_params = tegra210_dmic_hw_params, }; @@ -287,19 +398,22 @@ static const struct soc_enum tegra210_dmic_lrsel_enum = static const struct snd_kcontrol_new tegra210_dmic_controls[] = { SOC_SINGLE_EXT("Boost Gain Volume", 0, 0, MAX_BOOST_GAIN, 0, - tegra210_dmic_get_control, tegra210_dmic_put_control), + tegra210_dmic_get_boost_gain, + tegra210_dmic_put_boost_gain), SOC_ENUM_EXT("Channel Select", tegra210_dmic_ch_enum, - tegra210_dmic_get_control, tegra210_dmic_put_control), + tegra210_dmic_get_ch_select, tegra210_dmic_put_ch_select), SOC_ENUM_EXT("Mono To Stereo", - tegra210_dmic_mono_conv_enum, tegra210_dmic_get_control, - tegra210_dmic_put_control), + tegra210_dmic_mono_conv_enum, + tegra210_dmic_get_mono_to_stereo, + tegra210_dmic_put_mono_to_stereo), SOC_ENUM_EXT("Stereo To Mono", - tegra210_dmic_stereo_conv_enum, tegra210_dmic_get_control, - tegra210_dmic_put_control), + tegra210_dmic_stereo_conv_enum, + tegra210_dmic_get_stereo_to_mono, + tegra210_dmic_put_stereo_to_mono), SOC_ENUM_EXT("OSR Value", tegra210_dmic_osr_enum, - tegra210_dmic_get_control, tegra210_dmic_put_control), + tegra210_dmic_get_osr_val, tegra210_dmic_put_osr_val), SOC_ENUM_EXT("LR Polarity Select", tegra210_dmic_lrsel_enum, - tegra210_dmic_get_control, tegra210_dmic_put_control), + tegra210_dmic_get_pol_sel, tegra210_dmic_put_pol_sel), }; static const struct snd_soc_component_driver tegra210_dmic_compnt = { diff --git a/sound/soc/tegra/tegra210_i2s.c b/sound/soc/tegra/tegra210_i2s.c index 45f31ccb49d8..9552bbb939dd 100644 --- a/sound/soc/tegra/tegra210_i2s.c +++ b/sound/soc/tegra/tegra210_i2s.c @@ -302,85 +302,235 @@ static int tegra210_i2s_set_tdm_slot(struct snd_soc_dai *dai, return 0; } -static int tegra210_i2s_set_dai_bclk_ratio(struct snd_soc_dai *dai, - unsigned int ratio) +static int tegra210_i2s_get_loopback(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) { - struct tegra210_i2s *i2s = snd_soc_dai_get_drvdata(dai); + struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol); + struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt); - i2s->bclk_ratio = ratio; + ucontrol->value.integer.value[0] = i2s->loopback; return 0; } -static int tegra210_i2s_get_control(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) +static int tegra210_i2s_put_loopback(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol); + struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt); + int value = ucontrol->value.integer.value[0]; + + if (value == i2s->loopback) + return 0; + + i2s->loopback = value; + + regmap_update_bits(i2s->regmap, TEGRA210_I2S_CTRL, I2S_CTRL_LPBK_MASK, + i2s->loopback << I2S_CTRL_LPBK_SHIFT); + + return 1; +} + +static int tegra210_i2s_get_fsync_width(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol); struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt); - long *uctl_val = &ucontrol->value.integer.value[0]; - - if (strstr(kcontrol->id.name, "Loopback")) - *uctl_val = i2s->loopback; - else if (strstr(kcontrol->id.name, "FSYNC Width")) - *uctl_val = i2s->fsync_width; - else if (strstr(kcontrol->id.name, "Capture Stereo To Mono")) - *uctl_val = i2s->stereo_to_mono[I2S_TX_PATH]; - else if (strstr(kcontrol->id.name, "Capture Mono To Stereo")) - *uctl_val = i2s->mono_to_stereo[I2S_TX_PATH]; - else if (strstr(kcontrol->id.name, "Playback Stereo To Mono")) - *uctl_val = i2s->stereo_to_mono[I2S_RX_PATH]; - else if (strstr(kcontrol->id.name, "Playback Mono To Stereo")) - *uctl_val = i2s->mono_to_stereo[I2S_RX_PATH]; - else if (strstr(kcontrol->id.name, "Playback FIFO Threshold")) - *uctl_val = i2s->rx_fifo_th; - else if (strstr(kcontrol->id.name, "BCLK Ratio")) - *uctl_val = i2s->bclk_ratio; + + ucontrol->value.integer.value[0] = i2s->fsync_width; return 0; } -static int tegra210_i2s_put_control(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) +static int tegra210_i2s_put_fsync_width(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol); struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt); int value = ucontrol->value.integer.value[0]; - if (strstr(kcontrol->id.name, "Loopback")) { - i2s->loopback = value; + if (value == i2s->fsync_width) + return 0; - regmap_update_bits(i2s->regmap, TEGRA210_I2S_CTRL, - I2S_CTRL_LPBK_MASK, - i2s->loopback << I2S_CTRL_LPBK_SHIFT); + i2s->fsync_width = value; - } else if (strstr(kcontrol->id.name, "FSYNC Width")) { - /* - * Frame sync width is used only for FSYNC modes and not - * applicable for LRCK modes. Reset value for this field is "0", - * which means the width is one bit clock wide. - * The width requirement may depend on the codec and in such - * cases mixer control is used to update custom values. A value - * of "N" here means, width is "N + 1" bit clock wide. - */ - i2s->fsync_width = value; - - regmap_update_bits(i2s->regmap, TEGRA210_I2S_CTRL, - I2S_CTRL_FSYNC_WIDTH_MASK, - i2s->fsync_width << I2S_FSYNC_WIDTH_SHIFT); - - } else if (strstr(kcontrol->id.name, "Capture Stereo To Mono")) { - i2s->stereo_to_mono[I2S_TX_PATH] = value; - } else if (strstr(kcontrol->id.name, "Capture Mono To Stereo")) { - i2s->mono_to_stereo[I2S_TX_PATH] = value; - } else if (strstr(kcontrol->id.name, "Playback Stereo To Mono")) { - i2s->stereo_to_mono[I2S_RX_PATH] = value; - } else if (strstr(kcontrol->id.name, "Playback Mono To Stereo")) { - i2s->mono_to_stereo[I2S_RX_PATH] = value; - } else if (strstr(kcontrol->id.name, "Playback FIFO Threshold")) { - i2s->rx_fifo_th = value; - } else if (strstr(kcontrol->id.name, "BCLK Ratio")) { - i2s->bclk_ratio = value; - } + /* + * Frame sync width is used only for FSYNC modes and not + * applicable for LRCK modes. Reset value for this field is "0", + * which means the width is one bit clock wide. + * The width requirement may depend on the codec and in such + * cases mixer control is used to update custom values. A value + * of "N" here means, width is "N + 1" bit clock wide. + */ + regmap_update_bits(i2s->regmap, TEGRA210_I2S_CTRL, + I2S_CTRL_FSYNC_WIDTH_MASK, + i2s->fsync_width << I2S_FSYNC_WIDTH_SHIFT); + + return 1; +} + +static int tegra210_i2s_cget_stereo_to_mono(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol); + struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt); + + ucontrol->value.enumerated.item[0] = i2s->stereo_to_mono[I2S_TX_PATH]; + + return 0; +} + +static int tegra210_i2s_cput_stereo_to_mono(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol); + struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt); + unsigned int value = ucontrol->value.enumerated.item[0]; + + if (value == i2s->stereo_to_mono[I2S_TX_PATH]) + return 0; + + i2s->stereo_to_mono[I2S_TX_PATH] = value; + + return 1; +} + +static int tegra210_i2s_cget_mono_to_stereo(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol); + struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt); + + ucontrol->value.enumerated.item[0] = i2s->mono_to_stereo[I2S_TX_PATH]; + + return 0; +} + +static int tegra210_i2s_cput_mono_to_stereo(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol); + struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt); + unsigned int value = ucontrol->value.enumerated.item[0]; + + if (value == i2s->mono_to_stereo[I2S_TX_PATH]) + return 0; + + i2s->mono_to_stereo[I2S_TX_PATH] = value; + + return 1; +} + +static int tegra210_i2s_pget_stereo_to_mono(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol); + struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt); + + ucontrol->value.enumerated.item[0] = i2s->stereo_to_mono[I2S_RX_PATH]; + + return 0; +} + +static int tegra210_i2s_pput_stereo_to_mono(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol); + struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt); + unsigned int value = ucontrol->value.enumerated.item[0]; + + if (value == i2s->stereo_to_mono[I2S_RX_PATH]) + return 0; + + i2s->stereo_to_mono[I2S_RX_PATH] = value; + + return 1; +} + +static int tegra210_i2s_pget_mono_to_stereo(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol); + struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt); + + ucontrol->value.enumerated.item[0] = i2s->mono_to_stereo[I2S_RX_PATH]; + + return 0; +} + +static int tegra210_i2s_pput_mono_to_stereo(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol); + struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt); + unsigned int value = ucontrol->value.enumerated.item[0]; + + if (value == i2s->mono_to_stereo[I2S_RX_PATH]) + return 0; + + i2s->mono_to_stereo[I2S_RX_PATH] = value; + + return 1; +} + +static int tegra210_i2s_pget_fifo_th(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol); + struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt); + + ucontrol->value.integer.value[0] = i2s->rx_fifo_th; + + return 0; +} + +static int tegra210_i2s_pput_fifo_th(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol); + struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt); + int value = ucontrol->value.integer.value[0]; + + if (value == i2s->rx_fifo_th) + return 0; + + i2s->rx_fifo_th = value; + + return 1; +} + +static int tegra210_i2s_get_bclk_ratio(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol); + struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt); + + ucontrol->value.integer.value[0] = i2s->bclk_ratio; + + return 0; +} + +static int tegra210_i2s_put_bclk_ratio(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol); + struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt); + int value = ucontrol->value.integer.value[0]; + + if (value == i2s->bclk_ratio) + return 0; + + i2s->bclk_ratio = value; + + return 1; +} + +static int tegra210_i2s_set_dai_bclk_ratio(struct snd_soc_dai *dai, + unsigned int ratio) +{ + struct tegra210_i2s *i2s = snd_soc_dai_get_drvdata(dai); + + i2s->bclk_ratio = ratio; return 0; } @@ -598,22 +748,28 @@ static const struct soc_enum tegra210_i2s_stereo_conv_enum = tegra210_i2s_stereo_conv_text); static const struct snd_kcontrol_new tegra210_i2s_controls[] = { - SOC_SINGLE_EXT("Loopback", 0, 0, 1, 0, tegra210_i2s_get_control, - tegra210_i2s_put_control), - SOC_SINGLE_EXT("FSYNC Width", 0, 0, 255, 0, tegra210_i2s_get_control, - tegra210_i2s_put_control), + SOC_SINGLE_EXT("Loopback", 0, 0, 1, 0, tegra210_i2s_get_loopback, + tegra210_i2s_put_loopback), + SOC_SINGLE_EXT("FSYNC Width", 0, 0, 255, 0, + tegra210_i2s_get_fsync_width, + tegra210_i2s_put_fsync_width), SOC_ENUM_EXT("Capture Stereo To Mono", tegra210_i2s_stereo_conv_enum, - tegra210_i2s_get_control, tegra210_i2s_put_control), + tegra210_i2s_cget_stereo_to_mono, + tegra210_i2s_cput_stereo_to_mono), SOC_ENUM_EXT("Capture Mono To Stereo", tegra210_i2s_mono_conv_enum, - tegra210_i2s_get_control, tegra210_i2s_put_control), + tegra210_i2s_cget_mono_to_stereo, + tegra210_i2s_cput_mono_to_stereo), SOC_ENUM_EXT("Playback Stereo To Mono", tegra210_i2s_stereo_conv_enum, - tegra210_i2s_get_control, tegra210_i2s_put_control), + tegra210_i2s_pget_mono_to_stereo, + tegra210_i2s_pput_mono_to_stereo), SOC_ENUM_EXT("Playback Mono To Stereo", tegra210_i2s_mono_conv_enum, - tegra210_i2s_get_control, tegra210_i2s_put_control), + tegra210_i2s_pget_stereo_to_mono, + tegra210_i2s_pput_stereo_to_mono), SOC_SINGLE_EXT("Playback FIFO Threshold", 0, 0, I2S_RX_FIFO_DEPTH - 1, - 0, tegra210_i2s_get_control, tegra210_i2s_put_control), - SOC_SINGLE_EXT("BCLK Ratio", 0, 0, INT_MAX, 0, tegra210_i2s_get_control, - tegra210_i2s_put_control), + 0, tegra210_i2s_pget_fifo_th, tegra210_i2s_pput_fifo_th), + SOC_SINGLE_EXT("BCLK Ratio", 0, 0, INT_MAX, 0, + tegra210_i2s_get_bclk_ratio, + tegra210_i2s_put_bclk_ratio), }; static const struct snd_soc_dapm_widget tegra210_i2s_widgets[] = { diff --git a/sound/soc/tegra/tegra210_mixer.c b/sound/soc/tegra/tegra210_mixer.c index 55e61776c565..16e679a95658 100644 --- a/sound/soc/tegra/tegra210_mixer.c +++ b/sound/soc/tegra/tegra210_mixer.c @@ -192,24 +192,24 @@ static int tegra210_mixer_get_gain(struct snd_kcontrol *kcontrol, return 0; } -static int tegra210_mixer_put_gain(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) +static int tegra210_mixer_apply_gain(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol, + bool instant_gain) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol); struct tegra210_mixer *mixer = snd_soc_component_get_drvdata(cmpnt); unsigned int reg = mc->reg, id; - bool instant_gain = false; int err; - if (strstr(kcontrol->id.name, "Instant Gain Volume")) - instant_gain = true; - /* Save gain value for specific MIXER input */ id = (reg - TEGRA210_MIXER_GAIN_CFG_RAM_ADDR_0) / TEGRA210_MIXER_GAIN_CFG_RAM_ADDR_STRIDE; + if (mixer->gain_value[id] == ucontrol->value.integer.value[0]) + return 0; + mixer->gain_value[id] = ucontrol->value.integer.value[0]; err = tegra210_mixer_configure_gain(cmpnt, id, instant_gain); @@ -221,6 +221,18 @@ static int tegra210_mixer_put_gain(struct snd_kcontrol *kcontrol, return 1; } +static int tegra210_mixer_put_gain(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + return tegra210_mixer_apply_gain(kcontrol, ucontrol, false); +} + +static int tegra210_mixer_put_instant_gain(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + return tegra210_mixer_apply_gain(kcontrol, ucontrol, true); +} + static int tegra210_mixer_set_audio_cif(struct tegra210_mixer *mixer, struct snd_pcm_hw_params *params, unsigned int reg, @@ -388,7 +400,7 @@ ADDER_CTRL_DECL(adder5, TEGRA210_MIXER_TX5_ADDER_CONFIG); SOC_SINGLE_EXT("RX" #id " Instant Gain Volume", \ MIXER_GAIN_CFG_RAM_ADDR((id) - 1), 0, \ 0x20000, 0, tegra210_mixer_get_gain, \ - tegra210_mixer_put_gain), + tegra210_mixer_put_instant_gain), /* Volume controls for all MIXER inputs */ static const struct snd_kcontrol_new tegra210_mixer_gain_ctls[] = { @@ -654,8 +666,8 @@ static int tegra210_mixer_platform_remove(struct platform_device *pdev) static const struct dev_pm_ops tegra210_mixer_pm_ops = { SET_RUNTIME_PM_OPS(tegra210_mixer_runtime_suspend, tegra210_mixer_runtime_resume, NULL) - SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, - pm_runtime_force_resume) + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) }; static struct platform_driver tegra210_mixer_driver = { diff --git a/sound/soc/tegra/tegra210_mvc.c b/sound/soc/tegra/tegra210_mvc.c index 7b9c7006e419..acf59328dcb6 100644 --- a/sound/soc/tegra/tegra210_mvc.c +++ b/sound/soc/tegra/tegra210_mvc.c @@ -136,7 +136,7 @@ static int tegra210_mvc_put_mute(struct snd_kcontrol *kcontrol, struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol); struct tegra210_mvc *mvc = snd_soc_component_get_drvdata(cmpnt); unsigned int value; - u8 mute_mask; + u8 new_mask, old_mask; int err; pm_runtime_get_sync(cmpnt->dev); @@ -148,15 +148,23 @@ static int tegra210_mvc_put_mute(struct snd_kcontrol *kcontrol, if (err < 0) goto end; - mute_mask = ucontrol->value.integer.value[0]; + regmap_read(mvc->regmap, TEGRA210_MVC_CTRL, &value); + + old_mask = (value >> TEGRA210_MVC_MUTE_SHIFT) & TEGRA210_MUTE_MASK_EN; + new_mask = ucontrol->value.integer.value[0]; + + if (new_mask == old_mask) { + err = 0; + goto end; + } err = regmap_update_bits(mvc->regmap, mc->reg, TEGRA210_MVC_MUTE_MASK, - mute_mask << TEGRA210_MVC_MUTE_SHIFT); + new_mask << TEGRA210_MVC_MUTE_SHIFT); if (err < 0) goto end; - return 1; + err = 1; end: pm_runtime_put(cmpnt->dev); @@ -195,7 +203,7 @@ static int tegra210_mvc_put_vol(struct snd_kcontrol *kcontrol, unsigned int reg = mc->reg; unsigned int value; u8 chan; - int err; + int err, old_volume; pm_runtime_get_sync(cmpnt->dev); @@ -207,10 +215,16 @@ static int tegra210_mvc_put_vol(struct snd_kcontrol *kcontrol, goto end; chan = (reg - TEGRA210_MVC_TARGET_VOL) / REG_SIZE; + old_volume = mvc->volume[chan]; tegra210_mvc_conv_vol(mvc, chan, ucontrol->value.integer.value[0]); + if (mvc->volume[chan] == old_volume) { + err = 0; + goto end; + } + /* Configure init volume same as target volume */ regmap_write(mvc->regmap, TEGRA210_MVC_REG_OFFSET(TEGRA210_MVC_INIT_VOL, chan), @@ -222,7 +236,7 @@ static int tegra210_mvc_put_vol(struct snd_kcontrol *kcontrol, TEGRA210_MVC_VOLUME_SWITCH_MASK, TEGRA210_MVC_VOLUME_SWITCH_TRIGGER); - return 1; + err = 1; end: pm_runtime_put(cmpnt->dev); @@ -275,7 +289,7 @@ static int tegra210_mvc_get_curve_type(struct snd_kcontrol *kcontrol, struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol); struct tegra210_mvc *mvc = snd_soc_component_get_drvdata(cmpnt); - ucontrol->value.integer.value[0] = mvc->curve_type; + ucontrol->value.enumerated.item[0] = mvc->curve_type; return 0; } @@ -285,7 +299,7 @@ static int tegra210_mvc_put_curve_type(struct snd_kcontrol *kcontrol, { struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol); struct tegra210_mvc *mvc = snd_soc_component_get_drvdata(cmpnt); - int value; + unsigned int value; regmap_read(mvc->regmap, TEGRA210_MVC_ENABLE, &value); if (value & TEGRA210_MVC_EN) { @@ -294,10 +308,10 @@ static int tegra210_mvc_put_curve_type(struct snd_kcontrol *kcontrol, return -EINVAL; } - if (mvc->curve_type == ucontrol->value.integer.value[0]) + if (mvc->curve_type == ucontrol->value.enumerated.item[0]) return 0; - mvc->curve_type = ucontrol->value.integer.value[0]; + mvc->curve_type = ucontrol->value.enumerated.item[0]; tegra210_mvc_reset_vol_settings(mvc, cmpnt->dev); @@ -625,8 +639,8 @@ static int tegra210_mvc_platform_remove(struct platform_device *pdev) static const struct dev_pm_ops tegra210_mvc_pm_ops = { SET_RUNTIME_PM_OPS(tegra210_mvc_runtime_suspend, tegra210_mvc_runtime_resume, NULL) - SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, - pm_runtime_force_resume) + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) }; static struct platform_driver tegra210_mvc_driver = { diff --git a/sound/soc/tegra/tegra210_sfc.c b/sound/soc/tegra/tegra210_sfc.c index dc477ee1b82c..368f077e7bee 100644 --- a/sound/soc/tegra/tegra210_sfc.c +++ b/sound/soc/tegra/tegra210_sfc.c @@ -3244,46 +3244,107 @@ static int tegra210_sfc_init(struct snd_soc_dapm_widget *w, return tegra210_sfc_write_coeff_ram(cmpnt); } -static int tegra210_sfc_get_control(struct snd_kcontrol *kcontrol, +static int tegra210_sfc_iget_stereo_to_mono(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol); struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt); - if (strstr(kcontrol->id.name, "Input Stereo To Mono")) - ucontrol->value.integer.value[0] = - sfc->stereo_to_mono[SFC_RX_PATH]; - else if (strstr(kcontrol->id.name, "Input Mono To Stereo")) - ucontrol->value.integer.value[0] = - sfc->mono_to_stereo[SFC_RX_PATH]; - else if (strstr(kcontrol->id.name, "Output Stereo To Mono")) - ucontrol->value.integer.value[0] = - sfc->stereo_to_mono[SFC_TX_PATH]; - else if (strstr(kcontrol->id.name, "Output Mono To Stereo")) - ucontrol->value.integer.value[0] = - sfc->mono_to_stereo[SFC_TX_PATH]; + ucontrol->value.enumerated.item[0] = sfc->stereo_to_mono[SFC_RX_PATH]; return 0; } -static int tegra210_sfc_put_control(struct snd_kcontrol *kcontrol, +static int tegra210_sfc_iput_stereo_to_mono(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol); struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt); - int value = ucontrol->value.integer.value[0]; - - if (strstr(kcontrol->id.name, "Input Stereo To Mono")) - sfc->stereo_to_mono[SFC_RX_PATH] = value; - else if (strstr(kcontrol->id.name, "Input Mono To Stereo")) - sfc->mono_to_stereo[SFC_RX_PATH] = value; - else if (strstr(kcontrol->id.name, "Output Stereo To Mono")) - sfc->stereo_to_mono[SFC_TX_PATH] = value; - else if (strstr(kcontrol->id.name, "Output Mono To Stereo")) - sfc->mono_to_stereo[SFC_TX_PATH] = value; - else + unsigned int value = ucontrol->value.enumerated.item[0]; + + if (value == sfc->stereo_to_mono[SFC_RX_PATH]) + return 0; + + sfc->stereo_to_mono[SFC_RX_PATH] = value; + + return 1; +} + +static int tegra210_sfc_iget_mono_to_stereo(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol); + struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt); + + ucontrol->value.enumerated.item[0] = sfc->mono_to_stereo[SFC_RX_PATH]; + + return 0; +} + +static int tegra210_sfc_iput_mono_to_stereo(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol); + struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt); + unsigned int value = ucontrol->value.enumerated.item[0]; + + if (value == sfc->mono_to_stereo[SFC_RX_PATH]) return 0; + sfc->mono_to_stereo[SFC_RX_PATH] = value; + + return 1; +} + +static int tegra210_sfc_oget_stereo_to_mono(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol); + struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt); + + ucontrol->value.enumerated.item[0] = sfc->stereo_to_mono[SFC_TX_PATH]; + + return 0; +} + +static int tegra210_sfc_oput_stereo_to_mono(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol); + struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt); + unsigned int value = ucontrol->value.enumerated.item[0]; + + if (value == sfc->stereo_to_mono[SFC_TX_PATH]) + return 0; + + sfc->stereo_to_mono[SFC_TX_PATH] = value; + + return 1; +} + +static int tegra210_sfc_oget_mono_to_stereo(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol); + struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt); + + ucontrol->value.enumerated.item[0] = sfc->mono_to_stereo[SFC_TX_PATH]; + + return 0; +} + +static int tegra210_sfc_oput_mono_to_stereo(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol); + struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt); + unsigned int value = ucontrol->value.enumerated.item[0]; + + if (value == sfc->mono_to_stereo[SFC_TX_PATH]) + return 0; + + sfc->mono_to_stereo[SFC_TX_PATH] = value; + return 1; } @@ -3384,13 +3445,17 @@ static const struct soc_enum tegra210_sfc_mono_conv_enum = static const struct snd_kcontrol_new tegra210_sfc_controls[] = { SOC_ENUM_EXT("Input Stereo To Mono", tegra210_sfc_stereo_conv_enum, - tegra210_sfc_get_control, tegra210_sfc_put_control), + tegra210_sfc_iget_stereo_to_mono, + tegra210_sfc_iput_stereo_to_mono), SOC_ENUM_EXT("Input Mono To Stereo", tegra210_sfc_mono_conv_enum, - tegra210_sfc_get_control, tegra210_sfc_put_control), + tegra210_sfc_iget_mono_to_stereo, + tegra210_sfc_iput_mono_to_stereo), SOC_ENUM_EXT("Output Stereo To Mono", tegra210_sfc_stereo_conv_enum, - tegra210_sfc_get_control, tegra210_sfc_put_control), + tegra210_sfc_oget_stereo_to_mono, + tegra210_sfc_oput_stereo_to_mono), SOC_ENUM_EXT("Output Mono To Stereo", tegra210_sfc_mono_conv_enum, - tegra210_sfc_get_control, tegra210_sfc_put_control), + tegra210_sfc_oget_mono_to_stereo, + tegra210_sfc_oput_mono_to_stereo), }; static const struct snd_soc_component_driver tegra210_sfc_cmpnt = { @@ -3529,8 +3594,8 @@ static int tegra210_sfc_platform_remove(struct platform_device *pdev) static const struct dev_pm_ops tegra210_sfc_pm_ops = { SET_RUNTIME_PM_OPS(tegra210_sfc_runtime_suspend, tegra210_sfc_runtime_resume, NULL) - SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, - pm_runtime_force_resume) + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) }; static struct platform_driver tegra210_sfc_driver = { diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c index d489c1de3bae..823b6b8de942 100644 --- a/sound/usb/mixer_quirks.c +++ b/sound/usb/mixer_quirks.c @@ -3016,11 +3016,11 @@ static const struct snd_djm_ctl snd_djm_ctls_750mk2[] = { static const struct snd_djm_device snd_djm_devices[] = { - SND_DJM_DEVICE(250mk2), - SND_DJM_DEVICE(750), - SND_DJM_DEVICE(750mk2), - SND_DJM_DEVICE(850), - SND_DJM_DEVICE(900nxs2) + [SND_DJM_250MK2_IDX] = SND_DJM_DEVICE(250mk2), + [SND_DJM_750_IDX] = SND_DJM_DEVICE(750), + [SND_DJM_850_IDX] = SND_DJM_DEVICE(850), + [SND_DJM_900NXS2_IDX] = SND_DJM_DEVICE(900nxs2), + [SND_DJM_750MK2_IDX] = SND_DJM_DEVICE(750mk2), }; diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c index 95ec8eec1bb0..cec6e91afea2 100644 --- a/sound/usb/pcm.c +++ b/sound/usb/pcm.c @@ -581,6 +581,12 @@ static int snd_usb_hw_free(struct snd_pcm_substream *substream) return 0; } +/* free-wheeling mode? (e.g. dmix) */ +static int in_free_wheeling_mode(struct snd_pcm_runtime *runtime) +{ + return runtime->stop_threshold > runtime->buffer_size; +} + /* check whether early start is needed for playback stream */ static int lowlatency_playback_available(struct snd_pcm_runtime *runtime, struct snd_usb_substream *subs) @@ -592,8 +598,7 @@ static int lowlatency_playback_available(struct snd_pcm_runtime *runtime, /* disabled via module option? */ if (!chip->lowlatency) return false; - /* free-wheeling mode? (e.g. dmix) */ - if (runtime->stop_threshold > runtime->buffer_size) + if (in_free_wheeling_mode(runtime)) return false; /* implicit feedback mode has own operation mode */ if (snd_usb_endpoint_implicit_feedback_sink(subs->data_endpoint)) @@ -635,7 +640,8 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream) runtime->delay = 0; subs->lowlatency_playback = lowlatency_playback_available(runtime, subs); - if (!subs->lowlatency_playback) + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && + !subs->lowlatency_playback) ret = start_endpoints(subs); unlock: @@ -1552,6 +1558,8 @@ static int snd_usb_substream_playback_trigger(struct snd_pcm_substream *substrea subs); if (subs->lowlatency_playback && cmd == SNDRV_PCM_TRIGGER_START) { + if (in_free_wheeling_mode(substream->runtime)) + subs->lowlatency_playback = false; err = start_endpoints(subs); if (err < 0) { snd_usb_endpoint_set_callback(subs->data_endpoint, diff --git a/sound/xen/xen_snd_front.c b/sound/xen/xen_snd_front.c index 2cb0a19be2b8..4041748c12e5 100644 --- a/sound/xen/xen_snd_front.c +++ b/sound/xen/xen_snd_front.c @@ -358,6 +358,7 @@ static struct xenbus_driver xen_driver = { .probe = xen_drv_probe, .remove = xen_drv_remove, .otherend_changed = sndback_changed, + .not_essential = true, }; static int __init xen_drv_init(void) diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h index d0ce5cfd3ac1..d5b5f2ab87a0 100644 --- a/tools/arch/x86/include/asm/cpufeatures.h +++ b/tools/arch/x86/include/asm/cpufeatures.h @@ -277,6 +277,7 @@ #define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC instruction */ #define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 instruction */ #define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS instructions */ +#define X86_FEATURE_XFD (10*32+ 4) /* "" eXtended Feature Disabling */ /* * Extended auxiliary flags: Linux defined - for features scattered in various @@ -298,6 +299,7 @@ /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */ #define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */ #define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */ +#define X86_FEATURE_AMX_TILE (18*32+24) /* AMX tile Support */ /* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */ #define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */ diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h index 2ef1f6513c68..5a776a08f78c 100644 --- a/tools/arch/x86/include/uapi/asm/kvm.h +++ b/tools/arch/x86/include/uapi/asm/kvm.h @@ -504,4 +504,8 @@ struct kvm_pmu_event_filter { #define KVM_PMU_EVENT_ALLOW 0 #define KVM_PMU_EVENT_DENY 1 +/* for KVM_{GET,SET,HAS}_DEVICE_ATTR */ +#define KVM_VCPU_TSC_CTRL 0 /* control group for the timestamp counter (TSC) */ +#define KVM_VCPU_TSC_OFFSET 0 /* attribute for the TSC offset */ + #endif /* _ASM_X86_KVM_H */ diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c index a59cb0ee609c..73409e27be01 100644 --- a/tools/bpf/resolve_btfids/main.c +++ b/tools/bpf/resolve_btfids/main.c @@ -83,6 +83,7 @@ struct btf_id { int cnt; }; int addr_cnt; + bool is_set; Elf64_Addr addr[ADDR_CNT]; }; @@ -451,8 +452,10 @@ static int symbols_collect(struct object *obj) * in symbol's size, together with 'cnt' field hence * that - 1. */ - if (id) + if (id) { id->cnt = sym.st_size / sizeof(int) - 1; + id->is_set = true; + } } else { pr_err("FAILED unsupported prefix %s\n", prefix); return -1; @@ -568,9 +571,8 @@ static int id_patch(struct object *obj, struct btf_id *id) int *ptr = data->d_buf; int i; - if (!id->id) { + if (!id->id && !id->is_set) pr_err("WARN: resolve_btfids: unresolved symbol %s\n", id->name); - } for (i = 0; i < id->addr_cnt; i++) { unsigned long addr = id->addr[i]; diff --git a/tools/bpf/runqslower/Makefile b/tools/bpf/runqslower/Makefile index bbd1150578f7..8791d0e2762b 100644 --- a/tools/bpf/runqslower/Makefile +++ b/tools/bpf/runqslower/Makefile @@ -88,5 +88,4 @@ $(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(BPFOBJ_OU $(DEFAULT_BPFTOOL): $(BPFOBJ) | $(BPFTOOL_OUTPUT) $(Q)$(MAKE) $(submake_extras) -C ../bpftool OUTPUT=$(BPFTOOL_OUTPUT) \ - LIBBPF_OUTPUT=$(BPFOBJ_OUTPUT) \ - LIBBPF_DESTDIR=$(BPF_DESTDIR) CC=$(HOSTCC) LD=$(HOSTLD) + CC=$(HOSTCC) LD=$(HOSTLD) diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature index 45a9a59828c3..ae61f464043a 100644 --- a/tools/build/Makefile.feature +++ b/tools/build/Makefile.feature @@ -48,7 +48,6 @@ FEATURE_TESTS_BASIC := \ numa_num_possible_cpus \ libperl \ libpython \ - libpython-version \ libslang \ libslang-include-subdir \ libtraceevent \ diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile index 0a3244ad9673..1480910c792e 100644 --- a/tools/build/feature/Makefile +++ b/tools/build/feature/Makefile @@ -32,7 +32,6 @@ FILES= \ test-numa_num_possible_cpus.bin \ test-libperl.bin \ test-libpython.bin \ - test-libpython-version.bin \ test-libslang.bin \ test-libslang-include-subdir.bin \ test-libtraceevent.bin \ @@ -227,9 +226,6 @@ $(OUTPUT)test-libperl.bin: $(OUTPUT)test-libpython.bin: $(BUILD) $(FLAGS_PYTHON_EMBED) -$(OUTPUT)test-libpython-version.bin: - $(BUILD) - $(OUTPUT)test-libbfd.bin: $(BUILD) -DPACKAGE='"perf"' -lbfd -ldl diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c index 920439527291..5ffafb967b6e 100644 --- a/tools/build/feature/test-all.c +++ b/tools/build/feature/test-all.c @@ -14,10 +14,6 @@ # include "test-libpython.c" #undef main -#define main main_test_libpython_version -# include "test-libpython-version.c" -#undef main - #define main main_test_libperl # include "test-libperl.c" #undef main @@ -177,7 +173,6 @@ int main(int argc, char *argv[]) { main_test_libpython(); - main_test_libpython_version(); main_test_libperl(); main_test_hello(); main_test_libelf(); @@ -200,7 +195,6 @@ int main(int argc, char *argv[]) main_test_timerfd(); main_test_stackprotector_all(); main_test_libdw_dwarf_unwind(); - main_test_sync_compare_and_swap(argc, argv); main_test_zlib(); main_test_pthread_attr_setaffinity_np(); main_test_pthread_barrier(); diff --git a/tools/build/feature/test-libpython-version.c b/tools/build/feature/test-libpython-version.c deleted file mode 100644 index 47714b942d4d..000000000000 --- a/tools/build/feature/test-libpython-version.c +++ /dev/null @@ -1,11 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include <Python.h> - -#if PY_VERSION_HEX >= 0x03000000 - #error -#endif - -int main(void) -{ - return 0; -} diff --git a/tools/include/linux/debug_locks.h b/tools/include/linux/debug_locks.h deleted file mode 100644 index 72d595ce764a..000000000000 --- a/tools/include/linux/debug_locks.h +++ /dev/null @@ -1,14 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LIBLOCKDEP_DEBUG_LOCKS_H_ -#define _LIBLOCKDEP_DEBUG_LOCKS_H_ - -#include <stddef.h> -#include <linux/compiler.h> -#include <asm/bug.h> - -#define DEBUG_LOCKS_WARN_ON(x) WARN_ON(x) - -extern bool debug_locks; -extern bool debug_locks_silent; - -#endif diff --git a/tools/include/linux/hardirq.h b/tools/include/linux/hardirq.h deleted file mode 100644 index b25580b6a9be..000000000000 --- a/tools/include/linux/hardirq.h +++ /dev/null @@ -1,12 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LIBLOCKDEP_LINUX_HARDIRQ_H_ -#define _LIBLOCKDEP_LINUX_HARDIRQ_H_ - -#define SOFTIRQ_BITS 0UL -#define HARDIRQ_BITS 0UL -#define SOFTIRQ_SHIFT 0UL -#define HARDIRQ_SHIFT 0UL -#define hardirq_count() 0UL -#define softirq_count() 0UL - -#endif diff --git a/tools/include/linux/irqflags.h b/tools/include/linux/irqflags.h deleted file mode 100644 index 501262aee8ff..000000000000 --- a/tools/include/linux/irqflags.h +++ /dev/null @@ -1,39 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LIBLOCKDEP_LINUX_TRACE_IRQFLAGS_H_ -#define _LIBLOCKDEP_LINUX_TRACE_IRQFLAGS_H_ - -# define lockdep_hardirq_context() 0 -# define lockdep_softirq_context(p) 0 -# define lockdep_hardirqs_enabled() 0 -# define lockdep_softirqs_enabled(p) 0 -# define lockdep_hardirq_enter() do { } while (0) -# define lockdep_hardirq_exit() do { } while (0) -# define lockdep_softirq_enter() do { } while (0) -# define lockdep_softirq_exit() do { } while (0) -# define INIT_TRACE_IRQFLAGS - -# define stop_critical_timings() do { } while (0) -# define start_critical_timings() do { } while (0) - -#define raw_local_irq_disable() do { } while (0) -#define raw_local_irq_enable() do { } while (0) -#define raw_local_irq_save(flags) ((flags) = 0) -#define raw_local_irq_restore(flags) ((void)(flags)) -#define raw_local_save_flags(flags) ((flags) = 0) -#define raw_irqs_disabled_flags(flags) ((void)(flags)) -#define raw_irqs_disabled() 0 -#define raw_safe_halt() - -#define local_irq_enable() do { } while (0) -#define local_irq_disable() do { } while (0) -#define local_irq_save(flags) ((flags) = 0) -#define local_irq_restore(flags) ((void)(flags)) -#define local_save_flags(flags) ((flags) = 0) -#define irqs_disabled() (1) -#define irqs_disabled_flags(flags) ((void)(flags), 0) -#define safe_halt() do { } while (0) - -#define trace_lock_release(x, y) -#define trace_lock_acquire(a, b, c, d, e, f, g) - -#endif diff --git a/tools/include/linux/kernel.h b/tools/include/linux/kernel.h index a7e54a08fb54..3e8df500cfbd 100644 --- a/tools/include/linux/kernel.h +++ b/tools/include/linux/kernel.h @@ -7,6 +7,7 @@ #include <assert.h> #include <linux/build_bug.h> #include <linux/compiler.h> +#include <linux/math.h> #include <endian.h> #include <byteswap.h> @@ -14,8 +15,6 @@ #define UINT_MAX (~0U) #endif -#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) - #define PERF_ALIGN(x, a) __PERF_ALIGN_MASK(x, (typeof(x))(a)-1) #define __PERF_ALIGN_MASK(x, mask) (((x)+(mask))&~(mask)) @@ -52,15 +51,6 @@ _min1 < _min2 ? _min1 : _min2; }) #endif -#ifndef roundup -#define roundup(x, y) ( \ -{ \ - const typeof(y) __y = y; \ - (((x) + (__y - 1)) / __y) * __y; \ -} \ -) -#endif - #ifndef BUG_ON #ifdef NDEBUG #define BUG_ON(cond) do { if (cond) {} } while (0) @@ -104,16 +94,6 @@ int scnprintf_pad(char * buf, size_t size, const char * fmt, ...); #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) -/* - * This looks more complex than it should be. But we need to - * get the type for the ~ right in round_down (it needs to be - * as wide as the result!), and we want to evaluate the macro - * arguments just once each. - */ -#define __round_mask(x, y) ((__typeof__(x))((y)-1)) -#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1) -#define round_down(x, y) ((x) & ~__round_mask(x, y)) - #define current_gfp_context(k) 0 #define synchronize_rcu() diff --git a/tools/include/linux/lockdep.h b/tools/include/linux/lockdep.h deleted file mode 100644 index e56997288f2b..000000000000 --- a/tools/include/linux/lockdep.h +++ /dev/null @@ -1,72 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LIBLOCKDEP_LOCKDEP_H_ -#define _LIBLOCKDEP_LOCKDEP_H_ - -#include <sys/prctl.h> -#include <sys/syscall.h> -#include <string.h> -#include <limits.h> -#include <linux/utsname.h> -#include <linux/compiler.h> -#include <linux/export.h> -#include <linux/kern_levels.h> -#include <linux/err.h> -#include <linux/rcu.h> -#include <linux/list.h> -#include <linux/hardirq.h> -#include <unistd.h> - -#define MAX_LOCK_DEPTH 63UL - -#define asmlinkage -#define __visible - -#include "../../../include/linux/lockdep.h" - -struct task_struct { - u64 curr_chain_key; - int lockdep_depth; - unsigned int lockdep_recursion; - struct held_lock held_locks[MAX_LOCK_DEPTH]; - gfp_t lockdep_reclaim_gfp; - int pid; - int state; - char comm[17]; -}; - -#define TASK_RUNNING 0 - -extern struct task_struct *__curr(void); - -#define current (__curr()) - -static inline int debug_locks_off(void) -{ - return 1; -} - -#define task_pid_nr(tsk) ((tsk)->pid) - -#define KSYM_NAME_LEN 128 -#define printk(...) dprintf(STDOUT_FILENO, __VA_ARGS__) -#define pr_err(format, ...) fprintf (stderr, format, ## __VA_ARGS__) -#define pr_warn pr_err -#define pr_cont pr_err - -#define list_del_rcu list_del - -#define atomic_t unsigned long -#define atomic_inc(x) ((*(x))++) - -#define print_tainted() "" -#define static_obj(x) 1 - -#define debug_show_all_locks() -extern void debug_check_no_locks_held(void); - -static __used bool __is_kernel_percpu_address(unsigned long addr, void *can_addr) -{ - return false; -} - -#endif diff --git a/tools/include/linux/math.h b/tools/include/linux/math.h new file mode 100644 index 000000000000..4e7af99ec9eb --- /dev/null +++ b/tools/include/linux/math.h @@ -0,0 +1,25 @@ +#ifndef _TOOLS_MATH_H +#define _TOOLS_MATH_H + +/* + * This looks more complex than it should be. But we need to + * get the type for the ~ right in round_down (it needs to be + * as wide as the result!), and we want to evaluate the macro + * arguments just once each. + */ +#define __round_mask(x, y) ((__typeof__(x))((y)-1)) +#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1) +#define round_down(x, y) ((x) & ~__round_mask(x, y)) + +#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) + +#ifndef roundup +#define roundup(x, y) ( \ +{ \ + const typeof(y) __y = y; \ + (((x) + (__y - 1)) / __y) * __y; \ +} \ +) +#endif + +#endif diff --git a/tools/include/linux/proc_fs.h b/tools/include/linux/proc_fs.h deleted file mode 100644 index 8b3b03b64fda..000000000000 --- a/tools/include/linux/proc_fs.h +++ /dev/null @@ -1,4 +0,0 @@ -#ifndef _TOOLS_INCLUDE_LINUX_PROC_FS_H -#define _TOOLS_INCLUDE_LINUX_PROC_FS_H - -#endif /* _TOOLS_INCLUDE_LINUX_PROC_FS_H */ diff --git a/tools/include/linux/spinlock.h b/tools/include/linux/spinlock.h index c934572d935c..622266b197d0 100644 --- a/tools/include/linux/spinlock.h +++ b/tools/include/linux/spinlock.h @@ -37,6 +37,4 @@ static inline bool arch_spin_is_locked(arch_spinlock_t *mutex) return true; } -#include <linux/lockdep.h> - #endif diff --git a/tools/include/linux/stacktrace.h b/tools/include/linux/stacktrace.h deleted file mode 100644 index ae343ac35bfa..000000000000 --- a/tools/include/linux/stacktrace.h +++ /dev/null @@ -1,33 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LIBLOCKDEP_LINUX_STACKTRACE_H_ -#define _LIBLOCKDEP_LINUX_STACKTRACE_H_ - -#include <execinfo.h> - -struct stack_trace { - unsigned int nr_entries, max_entries; - unsigned long *entries; - int skip; -}; - -static inline void print_stack_trace(struct stack_trace *trace, int spaces) -{ - backtrace_symbols_fd((void **)trace->entries, trace->nr_entries, 1); -} - -#define save_stack_trace(trace) \ - ((trace)->nr_entries = \ - backtrace((void **)(trace)->entries, (trace)->max_entries)) - -static inline int dump_stack(void) -{ - void *array[64]; - size_t size; - - size = backtrace(array, 64); - backtrace_symbols_fd(array, size, 1); - - return 0; -} - -#endif diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h index b3610fdd1fee..eebd3894fe89 100644 --- a/tools/include/uapi/linux/if_link.h +++ b/tools/include/uapi/linux/if_link.h @@ -7,24 +7,23 @@ /* This struct should be in sync with struct rtnl_link_stats64 */ struct rtnl_link_stats { - __u32 rx_packets; /* total packets received */ - __u32 tx_packets; /* total packets transmitted */ - __u32 rx_bytes; /* total bytes received */ - __u32 tx_bytes; /* total bytes transmitted */ - __u32 rx_errors; /* bad packets received */ - __u32 tx_errors; /* packet transmit problems */ - __u32 rx_dropped; /* no space in linux buffers */ - __u32 tx_dropped; /* no space available in linux */ - __u32 multicast; /* multicast packets received */ + __u32 rx_packets; + __u32 tx_packets; + __u32 rx_bytes; + __u32 tx_bytes; + __u32 rx_errors; + __u32 tx_errors; + __u32 rx_dropped; + __u32 tx_dropped; + __u32 multicast; __u32 collisions; - /* detailed rx_errors: */ __u32 rx_length_errors; - __u32 rx_over_errors; /* receiver ring buff overflow */ - __u32 rx_crc_errors; /* recved pkt with crc error */ - __u32 rx_frame_errors; /* recv'd frame alignment error */ - __u32 rx_fifo_errors; /* recv'r fifo overrun */ - __u32 rx_missed_errors; /* receiver missed packet */ + __u32 rx_over_errors; + __u32 rx_crc_errors; + __u32 rx_frame_errors; + __u32 rx_fifo_errors; + __u32 rx_missed_errors; /* detailed tx_errors */ __u32 tx_aborted_errors; @@ -37,29 +36,201 @@ struct rtnl_link_stats { __u32 rx_compressed; __u32 tx_compressed; - __u32 rx_nohandler; /* dropped, no handler found */ + __u32 rx_nohandler; }; -/* The main device statistics structure */ +/** + * struct rtnl_link_stats64 - The main device statistics structure. + * + * @rx_packets: Number of good packets received by the interface. + * For hardware interfaces counts all good packets received from the device + * by the host, including packets which host had to drop at various stages + * of processing (even in the driver). + * + * @tx_packets: Number of packets successfully transmitted. + * For hardware interfaces counts packets which host was able to successfully + * hand over to the device, which does not necessarily mean that packets + * had been successfully transmitted out of the device, only that device + * acknowledged it copied them out of host memory. + * + * @rx_bytes: Number of good received bytes, corresponding to @rx_packets. + * + * For IEEE 802.3 devices should count the length of Ethernet Frames + * excluding the FCS. + * + * @tx_bytes: Number of good transmitted bytes, corresponding to @tx_packets. + * + * For IEEE 802.3 devices should count the length of Ethernet Frames + * excluding the FCS. + * + * @rx_errors: Total number of bad packets received on this network device. + * This counter must include events counted by @rx_length_errors, + * @rx_crc_errors, @rx_frame_errors and other errors not otherwise + * counted. + * + * @tx_errors: Total number of transmit problems. + * This counter must include events counter by @tx_aborted_errors, + * @tx_carrier_errors, @tx_fifo_errors, @tx_heartbeat_errors, + * @tx_window_errors and other errors not otherwise counted. + * + * @rx_dropped: Number of packets received but not processed, + * e.g. due to lack of resources or unsupported protocol. + * For hardware interfaces this counter may include packets discarded + * due to L2 address filtering but should not include packets dropped + * by the device due to buffer exhaustion which are counted separately in + * @rx_missed_errors (since procfs folds those two counters together). + * + * @tx_dropped: Number of packets dropped on their way to transmission, + * e.g. due to lack of resources. + * + * @multicast: Multicast packets received. + * For hardware interfaces this statistic is commonly calculated + * at the device level (unlike @rx_packets) and therefore may include + * packets which did not reach the host. + * + * For IEEE 802.3 devices this counter may be equivalent to: + * + * - 30.3.1.1.21 aMulticastFramesReceivedOK + * + * @collisions: Number of collisions during packet transmissions. + * + * @rx_length_errors: Number of packets dropped due to invalid length. + * Part of aggregate "frame" errors in `/proc/net/dev`. + * + * For IEEE 802.3 devices this counter should be equivalent to a sum + * of the following attributes: + * + * - 30.3.1.1.23 aInRangeLengthErrors + * - 30.3.1.1.24 aOutOfRangeLengthField + * - 30.3.1.1.25 aFrameTooLongErrors + * + * @rx_over_errors: Receiver FIFO overflow event counter. + * + * Historically the count of overflow events. Such events may be + * reported in the receive descriptors or via interrupts, and may + * not correspond one-to-one with dropped packets. + * + * The recommended interpretation for high speed interfaces is - + * number of packets dropped because they did not fit into buffers + * provided by the host, e.g. packets larger than MTU or next buffer + * in the ring was not available for a scatter transfer. + * + * Part of aggregate "frame" errors in `/proc/net/dev`. + * + * This statistics was historically used interchangeably with + * @rx_fifo_errors. + * + * This statistic corresponds to hardware events and is not commonly used + * on software devices. + * + * @rx_crc_errors: Number of packets received with a CRC error. + * Part of aggregate "frame" errors in `/proc/net/dev`. + * + * For IEEE 802.3 devices this counter must be equivalent to: + * + * - 30.3.1.1.6 aFrameCheckSequenceErrors + * + * @rx_frame_errors: Receiver frame alignment errors. + * Part of aggregate "frame" errors in `/proc/net/dev`. + * + * For IEEE 802.3 devices this counter should be equivalent to: + * + * - 30.3.1.1.7 aAlignmentErrors + * + * @rx_fifo_errors: Receiver FIFO error counter. + * + * Historically the count of overflow events. Those events may be + * reported in the receive descriptors or via interrupts, and may + * not correspond one-to-one with dropped packets. + * + * This statistics was used interchangeably with @rx_over_errors. + * Not recommended for use in drivers for high speed interfaces. + * + * This statistic is used on software devices, e.g. to count software + * packet queue overflow (can) or sequencing errors (GRE). + * + * @rx_missed_errors: Count of packets missed by the host. + * Folded into the "drop" counter in `/proc/net/dev`. + * + * Counts number of packets dropped by the device due to lack + * of buffer space. This usually indicates that the host interface + * is slower than the network interface, or host is not keeping up + * with the receive packet rate. + * + * This statistic corresponds to hardware events and is not used + * on software devices. + * + * @tx_aborted_errors: + * Part of aggregate "carrier" errors in `/proc/net/dev`. + * For IEEE 802.3 devices capable of half-duplex operation this counter + * must be equivalent to: + * + * - 30.3.1.1.11 aFramesAbortedDueToXSColls + * + * High speed interfaces may use this counter as a general device + * discard counter. + * + * @tx_carrier_errors: Number of frame transmission errors due to loss + * of carrier during transmission. + * Part of aggregate "carrier" errors in `/proc/net/dev`. + * + * For IEEE 802.3 devices this counter must be equivalent to: + * + * - 30.3.1.1.13 aCarrierSenseErrors + * + * @tx_fifo_errors: Number of frame transmission errors due to device + * FIFO underrun / underflow. This condition occurs when the device + * begins transmission of a frame but is unable to deliver the + * entire frame to the transmitter in time for transmission. + * Part of aggregate "carrier" errors in `/proc/net/dev`. + * + * @tx_heartbeat_errors: Number of Heartbeat / SQE Test errors for + * old half-duplex Ethernet. + * Part of aggregate "carrier" errors in `/proc/net/dev`. + * + * For IEEE 802.3 devices possibly equivalent to: + * + * - 30.3.2.1.4 aSQETestErrors + * + * @tx_window_errors: Number of frame transmission errors due + * to late collisions (for Ethernet - after the first 64B of transmission). + * Part of aggregate "carrier" errors in `/proc/net/dev`. + * + * For IEEE 802.3 devices this counter must be equivalent to: + * + * - 30.3.1.1.10 aLateCollisions + * + * @rx_compressed: Number of correctly received compressed packets. + * This counters is only meaningful for interfaces which support + * packet compression (e.g. CSLIP, PPP). + * + * @tx_compressed: Number of transmitted compressed packets. + * This counters is only meaningful for interfaces which support + * packet compression (e.g. CSLIP, PPP). + * + * @rx_nohandler: Number of packets received on the interface + * but dropped by the networking stack because the device is + * not designated to receive packets (e.g. backup link in a bond). + */ struct rtnl_link_stats64 { - __u64 rx_packets; /* total packets received */ - __u64 tx_packets; /* total packets transmitted */ - __u64 rx_bytes; /* total bytes received */ - __u64 tx_bytes; /* total bytes transmitted */ - __u64 rx_errors; /* bad packets received */ - __u64 tx_errors; /* packet transmit problems */ - __u64 rx_dropped; /* no space in linux buffers */ - __u64 tx_dropped; /* no space available in linux */ - __u64 multicast; /* multicast packets received */ + __u64 rx_packets; + __u64 tx_packets; + __u64 rx_bytes; + __u64 tx_bytes; + __u64 rx_errors; + __u64 tx_errors; + __u64 rx_dropped; + __u64 tx_dropped; + __u64 multicast; __u64 collisions; /* detailed rx_errors: */ __u64 rx_length_errors; - __u64 rx_over_errors; /* receiver ring buff overflow */ - __u64 rx_crc_errors; /* recved pkt with crc error */ - __u64 rx_frame_errors; /* recv'd frame alignment error */ - __u64 rx_fifo_errors; /* recv'r fifo overrun */ - __u64 rx_missed_errors; /* receiver missed packet */ + __u64 rx_over_errors; + __u64 rx_crc_errors; + __u64 rx_frame_errors; + __u64 rx_fifo_errors; + __u64 rx_missed_errors; /* detailed tx_errors */ __u64 tx_aborted_errors; @@ -71,8 +242,7 @@ struct rtnl_link_stats64 { /* for cslip etc */ __u64 rx_compressed; __u64 tx_compressed; - - __u64 rx_nohandler; /* dropped, no handler found */ + __u64 rx_nohandler; }; /* The struct should be in sync with struct ifmap */ @@ -170,12 +340,29 @@ enum { IFLA_PROP_LIST, IFLA_ALT_IFNAME, /* Alternative ifname */ IFLA_PERM_ADDRESS, + IFLA_PROTO_DOWN_REASON, + + /* device (sysfs) name as parent, used instead + * of IFLA_LINK where there's no parent netdev + */ + IFLA_PARENT_DEV_NAME, + IFLA_PARENT_DEV_BUS_NAME, + __IFLA_MAX }; #define IFLA_MAX (__IFLA_MAX - 1) +enum { + IFLA_PROTO_DOWN_REASON_UNSPEC, + IFLA_PROTO_DOWN_REASON_MASK, /* u32, mask for reason bits */ + IFLA_PROTO_DOWN_REASON_VALUE, /* u32, reason bit value */ + + __IFLA_PROTO_DOWN_REASON_CNT, + IFLA_PROTO_DOWN_REASON_MAX = __IFLA_PROTO_DOWN_REASON_CNT - 1 +}; + /* backwards compatibility for userspace */ #ifndef __KERNEL__ #define IFLA_RTA(r) ((struct rtattr*)(((char*)(r)) + NLMSG_ALIGN(sizeof(struct ifinfomsg)))) @@ -293,6 +480,7 @@ enum { IFLA_BR_MCAST_MLD_VERSION, IFLA_BR_VLAN_STATS_PER_PORT, IFLA_BR_MULTI_BOOLOPT, + IFLA_BR_MCAST_QUERIER_STATE, __IFLA_BR_MAX, }; @@ -346,6 +534,8 @@ enum { IFLA_BRPORT_BACKUP_PORT, IFLA_BRPORT_MRP_RING_OPEN, IFLA_BRPORT_MRP_IN_OPEN, + IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT, + IFLA_BRPORT_MCAST_EHT_HOSTS_CNT, __IFLA_BRPORT_MAX }; #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1) @@ -433,6 +623,7 @@ enum macvlan_macaddr_mode { }; #define MACVLAN_FLAG_NOPROMISC 1 +#define MACVLAN_FLAG_NODST 2 /* skip dst macvlan if matching src macvlan */ /* VRF section */ enum { @@ -597,6 +788,18 @@ enum ifla_geneve_df { GENEVE_DF_MAX = __GENEVE_DF_END - 1, }; +/* Bareudp section */ +enum { + IFLA_BAREUDP_UNSPEC, + IFLA_BAREUDP_PORT, + IFLA_BAREUDP_ETHERTYPE, + IFLA_BAREUDP_SRCPORT_MIN, + IFLA_BAREUDP_MULTIPROTO_MODE, + __IFLA_BAREUDP_MAX +}; + +#define IFLA_BAREUDP_MAX (__IFLA_BAREUDP_MAX - 1) + /* PPP section */ enum { IFLA_PPP_UNSPEC, @@ -899,7 +1102,14 @@ enum { #define IFLA_IPOIB_MAX (__IFLA_IPOIB_MAX - 1) -/* HSR section */ +/* HSR/PRP section, both uses same interface */ + +/* Different redundancy protocols for hsr device */ +enum { + HSR_PROTOCOL_HSR, + HSR_PROTOCOL_PRP, + HSR_PROTOCOL_MAX, +}; enum { IFLA_HSR_UNSPEC, @@ -909,6 +1119,9 @@ enum { IFLA_HSR_SUPERVISION_ADDR, /* Supervision frame multicast addr */ IFLA_HSR_SEQ_NR, IFLA_HSR_VERSION, /* HSR version */ + IFLA_HSR_PROTOCOL, /* Indicate different protocol than + * HSR. For example PRP. + */ __IFLA_HSR_MAX, }; @@ -1033,6 +1246,8 @@ enum { #define RMNET_FLAGS_INGRESS_MAP_COMMANDS (1U << 1) #define RMNET_FLAGS_INGRESS_MAP_CKSUMV4 (1U << 2) #define RMNET_FLAGS_EGRESS_MAP_CKSUMV4 (1U << 3) +#define RMNET_FLAGS_INGRESS_MAP_CKSUMV5 (1U << 4) +#define RMNET_FLAGS_EGRESS_MAP_CKSUMV5 (1U << 5) enum { IFLA_RMNET_UNSPEC, @@ -1048,4 +1263,14 @@ struct ifla_rmnet_flags { __u32 mask; }; +/* MCTP section */ + +enum { + IFLA_MCTP_UNSPEC, + IFLA_MCTP_NET, + __IFLA_MCTP_MAX, +}; + +#define IFLA_MCTP_MAX (__IFLA_MCTP_MAX - 1) + #endif /* _UAPI_LINUX_IF_LINK_H */ diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h index a067410ebea5..1daa45268de2 100644 --- a/tools/include/uapi/linux/kvm.h +++ b/tools/include/uapi/linux/kvm.h @@ -269,6 +269,7 @@ struct kvm_xen_exit { #define KVM_EXIT_AP_RESET_HOLD 32 #define KVM_EXIT_X86_BUS_LOCK 33 #define KVM_EXIT_XEN 34 +#define KVM_EXIT_RISCV_SBI 35 /* For KVM_EXIT_INTERNAL_ERROR */ /* Emulate instruction failed. */ @@ -397,13 +398,23 @@ struct kvm_run { * "ndata" is correct, that new fields are enumerated in "flags", * and that each flag enumerates fields that are 64-bit aligned * and sized (so that ndata+internal.data[] is valid/accurate). + * + * Space beyond the defined fields may be used to store arbitrary + * debug information relating to the emulation failure. It is + * accounted for in "ndata" but the format is unspecified and is + * not represented in "flags". Any such information is *not* ABI! */ struct { __u32 suberror; __u32 ndata; __u64 flags; - __u8 insn_size; - __u8 insn_bytes[15]; + union { + struct { + __u8 insn_size; + __u8 insn_bytes[15]; + }; + }; + /* Arbitrary debug data may follow. */ } emulation_failure; /* KVM_EXIT_OSI */ struct { @@ -469,6 +480,13 @@ struct kvm_run { } msr; /* KVM_EXIT_XEN */ struct kvm_xen_exit xen; + /* KVM_EXIT_RISCV_SBI */ + struct { + unsigned long extension_id; + unsigned long function_id; + unsigned long args[6]; + unsigned long ret[2]; + } riscv_sbi; /* Fix the size of the union. */ char padding[256]; }; @@ -1112,6 +1130,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_BINARY_STATS_FD 203 #define KVM_CAP_EXIT_ON_EMULATION_FAILURE 204 #define KVM_CAP_ARM_MTE 205 +#define KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM 206 #ifdef KVM_CAP_IRQ_ROUTING @@ -1223,11 +1242,16 @@ struct kvm_irqfd { /* Do not use 1, KVM_CHECK_EXTENSION returned it before we had flags. */ #define KVM_CLOCK_TSC_STABLE 2 +#define KVM_CLOCK_REALTIME (1 << 2) +#define KVM_CLOCK_HOST_TSC (1 << 3) struct kvm_clock_data { __u64 clock; __u32 flags; - __u32 pad[9]; + __u32 pad0; + __u64 realtime; + __u64 host_tsc; + __u32 pad[4]; }; /* For KVM_CAP_SW_TLB */ diff --git a/tools/lib/bpf/bpf_gen_internal.h b/tools/lib/bpf/bpf_gen_internal.h index d26e5472fe50..6f3df004479b 100644 --- a/tools/lib/bpf/bpf_gen_internal.h +++ b/tools/lib/bpf/bpf_gen_internal.h @@ -45,8 +45,8 @@ struct bpf_gen { int nr_fd_array; }; -void bpf_gen__init(struct bpf_gen *gen, int log_level); -int bpf_gen__finish(struct bpf_gen *gen); +void bpf_gen__init(struct bpf_gen *gen, int log_level, int nr_progs, int nr_maps); +int bpf_gen__finish(struct bpf_gen *gen, int nr_progs, int nr_maps); void bpf_gen__free(struct bpf_gen *gen); void bpf_gen__load_btf(struct bpf_gen *gen, const void *raw_data, __u32 raw_size); void bpf_gen__map_create(struct bpf_gen *gen, struct bpf_create_map_params *map_attr, int map_idx); diff --git a/tools/lib/bpf/gen_loader.c b/tools/lib/bpf/gen_loader.c index 502dea53a742..9934851ccde7 100644 --- a/tools/lib/bpf/gen_loader.c +++ b/tools/lib/bpf/gen_loader.c @@ -18,7 +18,7 @@ #define MAX_USED_MAPS 64 #define MAX_USED_PROGS 32 #define MAX_KFUNC_DESCS 256 -#define MAX_FD_ARRAY_SZ (MAX_USED_PROGS + MAX_KFUNC_DESCS) +#define MAX_FD_ARRAY_SZ (MAX_USED_MAPS + MAX_KFUNC_DESCS) /* The following structure describes the stack layout of the loader program. * In addition R6 contains the pointer to context. @@ -33,8 +33,8 @@ */ struct loader_stack { __u32 btf_fd; - __u32 prog_fd[MAX_USED_PROGS]; __u32 inner_map_fd; + __u32 prog_fd[MAX_USED_PROGS]; }; #define stack_off(field) \ @@ -42,6 +42,11 @@ struct loader_stack { #define attr_field(attr, field) (attr + offsetof(union bpf_attr, field)) +static int blob_fd_array_off(struct bpf_gen *gen, int index) +{ + return gen->fd_array + index * sizeof(int); +} + static int realloc_insn_buf(struct bpf_gen *gen, __u32 size) { size_t off = gen->insn_cur - gen->insn_start; @@ -102,11 +107,15 @@ static void emit2(struct bpf_gen *gen, struct bpf_insn insn1, struct bpf_insn in emit(gen, insn2); } -void bpf_gen__init(struct bpf_gen *gen, int log_level) +static int add_data(struct bpf_gen *gen, const void *data, __u32 size); +static void emit_sys_close_blob(struct bpf_gen *gen, int blob_off); + +void bpf_gen__init(struct bpf_gen *gen, int log_level, int nr_progs, int nr_maps) { - size_t stack_sz = sizeof(struct loader_stack); + size_t stack_sz = sizeof(struct loader_stack), nr_progs_sz; int i; + gen->fd_array = add_data(gen, NULL, MAX_FD_ARRAY_SZ * sizeof(int)); gen->log_level = log_level; /* save ctx pointer into R6 */ emit(gen, BPF_MOV64_REG(BPF_REG_6, BPF_REG_1)); @@ -118,19 +127,27 @@ void bpf_gen__init(struct bpf_gen *gen, int log_level) emit(gen, BPF_MOV64_IMM(BPF_REG_3, 0)); emit(gen, BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel)); + /* amount of stack actually used, only used to calculate iterations, not stack offset */ + nr_progs_sz = offsetof(struct loader_stack, prog_fd[nr_progs]); /* jump over cleanup code */ emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, - /* size of cleanup code below */ - (stack_sz / 4) * 3 + 2)); + /* size of cleanup code below (including map fd cleanup) */ + (nr_progs_sz / 4) * 3 + 2 + + /* 6 insns for emit_sys_close_blob, + * 6 insns for debug_regs in emit_sys_close_blob + */ + nr_maps * (6 + (gen->log_level ? 6 : 0)))); /* remember the label where all error branches will jump to */ gen->cleanup_label = gen->insn_cur - gen->insn_start; /* emit cleanup code: close all temp FDs */ - for (i = 0; i < stack_sz; i += 4) { + for (i = 0; i < nr_progs_sz; i += 4) { emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, -stack_sz + i)); emit(gen, BPF_JMP_IMM(BPF_JSLE, BPF_REG_1, 0, 1)); emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_close)); } + for (i = 0; i < nr_maps; i++) + emit_sys_close_blob(gen, blob_fd_array_off(gen, i)); /* R7 contains the error code from sys_bpf. Copy it into R0 and exit. */ emit(gen, BPF_MOV64_REG(BPF_REG_0, BPF_REG_7)); emit(gen, BPF_EXIT_INSN()); @@ -160,8 +177,6 @@ static int add_data(struct bpf_gen *gen, const void *data, __u32 size) */ static int add_map_fd(struct bpf_gen *gen) { - if (!gen->fd_array) - gen->fd_array = add_data(gen, NULL, MAX_FD_ARRAY_SZ * sizeof(int)); if (gen->nr_maps == MAX_USED_MAPS) { pr_warn("Total maps exceeds %d\n", MAX_USED_MAPS); gen->error = -E2BIG; @@ -174,8 +189,6 @@ static int add_kfunc_btf_fd(struct bpf_gen *gen) { int cur; - if (!gen->fd_array) - gen->fd_array = add_data(gen, NULL, MAX_FD_ARRAY_SZ * sizeof(int)); if (gen->nr_fd_array == MAX_KFUNC_DESCS) { cur = add_data(gen, NULL, sizeof(int)); return (cur - gen->fd_array) / sizeof(int); @@ -183,11 +196,6 @@ static int add_kfunc_btf_fd(struct bpf_gen *gen) return MAX_USED_MAPS + gen->nr_fd_array++; } -static int blob_fd_array_off(struct bpf_gen *gen, int index) -{ - return gen->fd_array + index * sizeof(int); -} - static int insn_bytes_to_bpf_size(__u32 sz) { switch (sz) { @@ -359,10 +367,15 @@ static void emit_sys_close_blob(struct bpf_gen *gen, int blob_off) __emit_sys_close(gen); } -int bpf_gen__finish(struct bpf_gen *gen) +int bpf_gen__finish(struct bpf_gen *gen, int nr_progs, int nr_maps) { int i; + if (nr_progs != gen->nr_progs || nr_maps != gen->nr_maps) { + pr_warn("progs/maps mismatch\n"); + gen->error = -EFAULT; + return gen->error; + } emit_sys_close_stack(gen, stack_off(btf_fd)); for (i = 0; i < gen->nr_progs; i++) move_stack2ctx(gen, diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index a1bea1953df6..7c74342bb668 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -7258,7 +7258,7 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr) } if (obj->gen_loader) - bpf_gen__init(obj->gen_loader, attr->log_level); + bpf_gen__init(obj->gen_loader, attr->log_level, obj->nr_programs, obj->nr_maps); err = bpf_object__probe_loading(obj); err = err ? : bpf_object__load_vmlinux_btf(obj, false); @@ -7277,7 +7277,7 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr) for (i = 0; i < obj->nr_maps; i++) obj->maps[i].fd = -1; if (!err) - err = bpf_gen__finish(obj->gen_loader); + err = bpf_gen__finish(obj->gen_loader, obj->nr_programs, obj->nr_maps); } /* clean up fd_array */ diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c index 81a4c543ff7e..4b384c907027 100644 --- a/tools/objtool/elf.c +++ b/tools/objtool/elf.c @@ -375,6 +375,7 @@ static int read_symbols(struct elf *elf) return -1; } memset(sym, 0, sizeof(*sym)); + INIT_LIST_HEAD(&sym->pv_target); sym->alias = sym; sym->idx = i; diff --git a/tools/objtool/objtool.c b/tools/objtool/objtool.c index c90c7084e45a..bdf699f6552b 100644 --- a/tools/objtool/objtool.c +++ b/tools/objtool/objtool.c @@ -153,6 +153,10 @@ void objtool_pv_add(struct objtool_file *f, int idx, struct symbol *func) !strcmp(func->name, "_paravirt_ident_64")) return; + /* already added this function */ + if (!list_empty(&func->pv_target)) + return; + list_add(&func->pv_target, &f->pv_ops[idx].targets); f->pv_ops[idx].clean = false; } diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config index 07e65a061fd3..3df74cf5651a 100644 --- a/tools/perf/Makefile.config +++ b/tools/perf/Makefile.config @@ -271,8 +271,6 @@ endif FEATURE_CHECK_CFLAGS-libpython := $(PYTHON_EMBED_CCOPTS) FEATURE_CHECK_LDFLAGS-libpython := $(PYTHON_EMBED_LDOPTS) -FEATURE_CHECK_CFLAGS-libpython-version := $(PYTHON_EMBED_CCOPTS) -FEATURE_CHECK_LDFLAGS-libpython-version := $(PYTHON_EMBED_LDOPTS) FEATURE_CHECK_LDFLAGS-libaio = -lrt @@ -1010,6 +1008,9 @@ ifndef NO_AUXTRACE ifndef NO_AUXTRACE $(call detected,CONFIG_AUXTRACE) CFLAGS += -DHAVE_AUXTRACE_SUPPORT + ifeq ($(feature-reallocarray), 0) + CFLAGS += -DCOMPAT_NEED_REALLOCARRAY + endif endif endif diff --git a/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl b/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl index 1ca7bc337932..e2c481fcede6 100644 --- a/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl +++ b/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl @@ -363,3 +363,4 @@ 446 n64 landlock_restrict_self sys_landlock_restrict_self # 447 reserved for memfd_secret 448 n64 process_mrelease sys_process_mrelease +449 n64 futex_waitv sys_futex_waitv diff --git a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl index 7bef917cc84e..15109af9d075 100644 --- a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl +++ b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl @@ -528,3 +528,4 @@ 446 common landlock_restrict_self sys_landlock_restrict_self # 447 reserved for memfd_secret 448 common process_mrelease sys_process_mrelease +449 common futex_waitv sys_futex_waitv diff --git a/tools/perf/arch/s390/entry/syscalls/syscall.tbl b/tools/perf/arch/s390/entry/syscalls/syscall.tbl index df5261e5cfe1..ed9c5c2eafad 100644 --- a/tools/perf/arch/s390/entry/syscalls/syscall.tbl +++ b/tools/perf/arch/s390/entry/syscalls/syscall.tbl @@ -451,3 +451,4 @@ 446 common landlock_restrict_self sys_landlock_restrict_self sys_landlock_restrict_self # 447 reserved for memfd_secret 448 common process_mrelease sys_process_mrelease sys_process_mrelease +449 common futex_waitv sys_futex_waitv sys_futex_waitv diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c index bc5259db5fd9..b9d6306cc14e 100644 --- a/tools/perf/builtin-inject.c +++ b/tools/perf/builtin-inject.c @@ -820,7 +820,7 @@ static int __cmd_inject(struct perf_inject *inject) inject->tool.ordered_events = true; inject->tool.ordering_requires_timestamps = true; /* Allow space in the header for new attributes */ - output_data_offset = 4096; + output_data_offset = roundup(8192 + session->header.data_offset, 4096); if (inject->strip) strip_init(inject); } diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 8167ebfe776a..8ae400429870 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -619,14 +619,17 @@ static int report__browse_hists(struct report *rep) int ret; struct perf_session *session = rep->session; struct evlist *evlist = session->evlist; - const char *help = perf_tip(system_path(TIPDIR)); + char *help = NULL, *path = NULL; - if (help == NULL) { + path = system_path(TIPDIR); + if (perf_tip(&help, path) || help == NULL) { /* fallback for people who don't install perf ;-) */ - help = perf_tip(DOCDIR); - if (help == NULL) - help = "Cannot load tips.txt file, please install perf!"; + free(path); + path = system_path(DOCDIR); + if (perf_tip(&help, path) || help == NULL) + help = strdup("Cannot load tips.txt file, please install perf!"); } + free(path); switch (use_browser) { case 1: @@ -651,7 +654,7 @@ static int report__browse_hists(struct report *rep) ret = evlist__tty_browse_hists(evlist, rep, help); break; } - + free(help); return ret; } diff --git a/tools/perf/tests/event_update.c b/tools/perf/tests/event_update.c index fbb68deba59f..d01532d40acb 100644 --- a/tools/perf/tests/event_update.c +++ b/tools/perf/tests/event_update.c @@ -88,7 +88,6 @@ static int test__event_update(struct test_suite *test __maybe_unused, int subtes struct evsel *evsel; struct event_name tmp; struct evlist *evlist = evlist__new_default(); - char *unit = strdup("KRAVA"); TEST_ASSERT_VAL("failed to get evlist", evlist); @@ -99,7 +98,8 @@ static int test__event_update(struct test_suite *test __maybe_unused, int subtes perf_evlist__id_add(&evlist->core, &evsel->core, 0, 0, 123); - evsel->unit = unit; + free((char *)evsel->unit); + evsel->unit = strdup("KRAVA"); TEST_ASSERT_VAL("failed to synthesize attr update unit", !perf_event__synthesize_event_update_unit(NULL, evsel, process_event_unit)); @@ -119,7 +119,6 @@ static int test__event_update(struct test_suite *test __maybe_unused, int subtes TEST_ASSERT_VAL("failed to synthesize attr update cpus", !perf_event__synthesize_event_update_cpus(&tmp.tool, evsel, process_event_cpus)); - free(unit); evlist__delete(evlist); return 0; } diff --git a/tools/perf/tests/expr.c b/tools/perf/tests/expr.c index c895de481fe1..d54c5371c6a6 100644 --- a/tools/perf/tests/expr.c +++ b/tools/perf/tests/expr.c @@ -169,7 +169,9 @@ static int test__expr(struct test_suite *t __maybe_unused, int subtest __maybe_u TEST_ASSERT_VAL("#num_dies", expr__parse(&num_dies, ctx, "#num_dies") == 0); TEST_ASSERT_VAL("#num_cores >= #num_dies", num_cores >= num_dies); TEST_ASSERT_VAL("#num_packages", expr__parse(&num_packages, ctx, "#num_packages") == 0); - TEST_ASSERT_VAL("#num_dies >= #num_packages", num_dies >= num_packages); + + if (num_dies) // Some platforms do not have CPU die support, for example s390 + TEST_ASSERT_VAL("#num_dies >= #num_packages", num_dies >= num_packages); /* * Source count returns the number of events aggregating in a leader diff --git a/tools/perf/tests/parse-metric.c b/tools/perf/tests/parse-metric.c index 574b7e4efd3a..07b6f4ec024f 100644 --- a/tools/perf/tests/parse-metric.c +++ b/tools/perf/tests/parse-metric.c @@ -109,6 +109,7 @@ static void load_runtime_stat(struct runtime_stat *st, struct evlist *evlist, struct evsel *evsel; u64 count; + perf_stat__reset_shadow_stats(); evlist__for_each_entry(evlist, evsel) { count = find_value(evsel->name, vals); perf_stat__update_shadow_stats(evsel, count, 0, st); diff --git a/tools/perf/tests/sample-parsing.c b/tools/perf/tests/sample-parsing.c index b669d22f2b13..07f2411b0ad4 100644 --- a/tools/perf/tests/sample-parsing.c +++ b/tools/perf/tests/sample-parsing.c @@ -36,7 +36,7 @@ * These are based on the input value (213) specified * in branch_stack variable. */ -#define BS_EXPECTED_BE 0xa00d000000000000 +#define BS_EXPECTED_BE 0xa000d00000000000 #define BS_EXPECTED_LE 0xd5000000 #define FLAG(s) s->branch_stack->entries[i].flags diff --git a/tools/perf/tests/wp.c b/tools/perf/tests/wp.c index 820d942b30c3..9d4c45184e71 100644 --- a/tools/perf/tests/wp.c +++ b/tools/perf/tests/wp.c @@ -21,6 +21,7 @@ do { \ volatile u64 data1; volatile u8 data2[3]; +#ifndef __s390x__ static int wp_read(int fd, long long *count, int size) { int ret = read(fd, count, size); @@ -48,7 +49,6 @@ static void get__perf_event_attr(struct perf_event_attr *attr, int wp_type, attr->exclude_hv = 1; } -#ifndef __s390x__ static int __event(int wp_type, void *wp_addr, unsigned long wp_len) { int fd; diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c index c1f24d004852..5075ecead5f3 100644 --- a/tools/perf/ui/hist.c +++ b/tools/perf/ui/hist.c @@ -535,6 +535,18 @@ struct perf_hpp_list perf_hpp_list = { #undef __HPP_SORT_ACC_FN #undef __HPP_SORT_RAW_FN +static void fmt_free(struct perf_hpp_fmt *fmt) +{ + /* + * At this point fmt should be completely + * unhooked, if not it's a bug. + */ + BUG_ON(!list_empty(&fmt->list)); + BUG_ON(!list_empty(&fmt->sort_list)); + + if (fmt->free) + fmt->free(fmt); +} void perf_hpp__init(void) { @@ -598,9 +610,10 @@ void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list, list_add(&format->sort_list, &list->sorts); } -void perf_hpp__column_unregister(struct perf_hpp_fmt *format) +static void perf_hpp__column_unregister(struct perf_hpp_fmt *format) { list_del_init(&format->list); + fmt_free(format); } void perf_hpp__cancel_cumulate(void) @@ -672,19 +685,6 @@ next: } -static void fmt_free(struct perf_hpp_fmt *fmt) -{ - /* - * At this point fmt should be completely - * unhooked, if not it's a bug. - */ - BUG_ON(!list_empty(&fmt->list)); - BUG_ON(!list_empty(&fmt->sort_list)); - - if (fmt->free) - fmt->free(fmt); -} - void perf_hpp__reset_output_field(struct perf_hpp_list *list) { struct perf_hpp_fmt *fmt, *tmp; diff --git a/tools/perf/util/arm-spe.c b/tools/perf/util/arm-spe.c index 4748bcfe61de..fccac06b573a 100644 --- a/tools/perf/util/arm-spe.c +++ b/tools/perf/util/arm-spe.c @@ -51,6 +51,7 @@ struct arm_spe { u8 timeless_decoding; u8 data_queued; + u64 sample_type; u8 sample_flc; u8 sample_llc; u8 sample_tlb; @@ -287,6 +288,12 @@ static void arm_spe_prep_sample(struct arm_spe *spe, event->sample.header.size = sizeof(struct perf_event_header); } +static int arm_spe__inject_event(union perf_event *event, struct perf_sample *sample, u64 type) +{ + event->header.size = perf_event__sample_event_size(sample, type, 0); + return perf_event__synthesize_sample(event, type, 0, sample); +} + static inline int arm_spe_deliver_synth_event(struct arm_spe *spe, struct arm_spe_queue *speq __maybe_unused, @@ -295,6 +302,12 @@ arm_spe_deliver_synth_event(struct arm_spe *spe, { int ret; + if (spe->synth_opts.inject) { + ret = arm_spe__inject_event(event, sample, spe->sample_type); + if (ret) + return ret; + } + ret = perf_session__deliver_synth_event(spe->session, event, sample); if (ret) pr_err("ARM SPE: failed to deliver event, error %d\n", ret); @@ -986,6 +999,8 @@ arm_spe_synth_events(struct arm_spe *spe, struct perf_session *session) else attr.sample_type |= PERF_SAMPLE_TIME; + spe->sample_type = attr.sample_type; + attr.exclude_user = evsel->core.attr.exclude_user; attr.exclude_kernel = evsel->core.attr.exclude_kernel; attr.exclude_hv = evsel->core.attr.exclude_hv; diff --git a/tools/perf/util/bpf_skel/bperf.h b/tools/perf/util/bpf_skel/bperf.h deleted file mode 100644 index 186a5551ddb9..000000000000 --- a/tools/perf/util/bpf_skel/bperf.h +++ /dev/null @@ -1,14 +0,0 @@ -// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) -// Copyright (c) 2021 Facebook - -#ifndef __BPERF_STAT_H -#define __BPERF_STAT_H - -typedef struct { - __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); - __uint(key_size, sizeof(__u32)); - __uint(value_size, sizeof(struct bpf_perf_event_value)); - __uint(max_entries, 1); -} reading_map; - -#endif /* __BPERF_STAT_H */ diff --git a/tools/perf/util/bpf_skel/bperf_follower.bpf.c b/tools/perf/util/bpf_skel/bperf_follower.bpf.c index b8fa3cb2da23..f193998530d4 100644 --- a/tools/perf/util/bpf_skel/bperf_follower.bpf.c +++ b/tools/perf/util/bpf_skel/bperf_follower.bpf.c @@ -1,14 +1,23 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) // Copyright (c) 2021 Facebook -#include <linux/bpf.h> -#include <linux/perf_event.h> +#include "vmlinux.h" #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> -#include "bperf.h" #include "bperf_u.h" -reading_map diff_readings SEC(".maps"); -reading_map accum_readings SEC(".maps"); +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(struct bpf_perf_event_value)); + __uint(max_entries, 1); +} diff_readings SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(struct bpf_perf_event_value)); + __uint(max_entries, 1); +} accum_readings SEC(".maps"); struct { __uint(type, BPF_MAP_TYPE_HASH); diff --git a/tools/perf/util/bpf_skel/bperf_leader.bpf.c b/tools/perf/util/bpf_skel/bperf_leader.bpf.c index 4f70d1459e86..e2a2d4cd7779 100644 --- a/tools/perf/util/bpf_skel/bperf_leader.bpf.c +++ b/tools/perf/util/bpf_skel/bperf_leader.bpf.c @@ -1,10 +1,8 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) // Copyright (c) 2021 Facebook -#include <linux/bpf.h> -#include <linux/perf_event.h> +#include "vmlinux.h" #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> -#include "bperf.h" struct { __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); @@ -13,8 +11,19 @@ struct { __uint(map_flags, BPF_F_PRESERVE_ELEMS); } events SEC(".maps"); -reading_map prev_readings SEC(".maps"); -reading_map diff_readings SEC(".maps"); +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(struct bpf_perf_event_value)); + __uint(max_entries, 1); +} prev_readings SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(struct bpf_perf_event_value)); + __uint(max_entries, 1); +} diff_readings SEC(".maps"); SEC("raw_tp/sched_switch") int BPF_PROG(on_switch) diff --git a/tools/perf/util/bpf_skel/bpf_prog_profiler.bpf.c b/tools/perf/util/bpf_skel/bpf_prog_profiler.bpf.c index ab12b4c4ece2..97037d3b3d9f 100644 --- a/tools/perf/util/bpf_skel/bpf_prog_profiler.bpf.c +++ b/tools/perf/util/bpf_skel/bpf_prog_profiler.bpf.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) // Copyright (c) 2020 Facebook -#include <linux/bpf.h> +#include "vmlinux.h" #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index 95ffed66369c..c59331eea1d9 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -44,13 +44,16 @@ struct perf_event_attr; /* perf sample has 16 bits size limit */ #define PERF_SAMPLE_MAX_SIZE (1 << 16) +/* number of register is bound by the number of bits in regs_dump::mask (64) */ +#define PERF_SAMPLE_REGS_CACHE_SIZE (8 * sizeof(u64)) + struct regs_dump { u64 abi; u64 mask; u64 *regs; /* Cached values/mask filled by first register access. */ - u64 cache_regs[PERF_REGS_MAX]; + u64 cache_regs[PERF_SAMPLE_REGS_CACHE_SIZE]; u64 cache_mask; }; diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index a59fb2ecb84e..ac0127be0459 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -241,7 +241,7 @@ void evsel__init(struct evsel *evsel, { perf_evsel__init(&evsel->core, attr, idx); evsel->tracking = !idx; - evsel->unit = ""; + evsel->unit = strdup(""); evsel->scale = 1.0; evsel->max_events = ULONG_MAX; evsel->evlist = NULL; @@ -276,13 +276,8 @@ struct evsel *evsel__new_idx(struct perf_event_attr *attr, int idx) } if (evsel__is_clock(evsel)) { - /* - * The evsel->unit points to static alias->unit - * so it's ok to use static string in here. - */ - static const char *unit = "msec"; - - evsel->unit = unit; + free((char *)evsel->unit); + evsel->unit = strdup("msec"); evsel->scale = 1e-6; } @@ -420,7 +415,11 @@ struct evsel *evsel__clone(struct evsel *orig) evsel->max_events = orig->max_events; evsel->tool_event = orig->tool_event; - evsel->unit = orig->unit; + free((char *)evsel->unit); + evsel->unit = strdup(orig->unit); + if (evsel->unit == NULL) + goto out_err; + evsel->scale = orig->scale; evsel->snapshot = orig->snapshot; evsel->per_pkg = orig->per_pkg; @@ -1441,6 +1440,7 @@ void evsel__exit(struct evsel *evsel) zfree(&evsel->group_name); zfree(&evsel->name); zfree(&evsel->pmu_name); + zfree(&evsel->unit); zfree(&evsel->metric_id); evsel__zero_per_pkg(evsel); hashmap__free(evsel->per_pkg_mask); diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index fda8d14c891f..e3c1a532d059 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -2321,6 +2321,7 @@ out: #define FEAT_PROCESS_STR_FUN(__feat, __feat_env) \ static int process_##__feat(struct feat_fd *ff, void *data __maybe_unused) \ {\ + free(ff->ph->env.__feat_env); \ ff->ph->env.__feat_env = do_read_string(ff); \ return ff->ph->env.__feat_env ? 0 : -ENOMEM; \ } @@ -4124,6 +4125,7 @@ int perf_event__process_feature(struct perf_session *session, struct perf_record_header_feature *fe = (struct perf_record_header_feature *)event; int type = fe->header.type; u64 feat = fe->feat_id; + int ret = 0; if (type < 0 || type >= PERF_RECORD_HEADER_MAX) { pr_warning("invalid record type %d in pipe-mode\n", type); @@ -4141,11 +4143,13 @@ int perf_event__process_feature(struct perf_session *session, ff.size = event->header.size - sizeof(*fe); ff.ph = &session->header; - if (feat_ops[feat].process(&ff, NULL)) - return -1; + if (feat_ops[feat].process(&ff, NULL)) { + ret = -1; + goto out; + } if (!feat_ops[feat].print || !tool->show_feat_hdr) - return 0; + goto out; if (!feat_ops[feat].full_only || tool->show_feat_hdr >= SHOW_FEAT_HEADER_FULL_INFO) { @@ -4154,8 +4158,9 @@ int perf_event__process_feature(struct perf_session *session, fprintf(stdout, "# %s info available, use -I to display\n", feat_ops[feat].name); } - - return 0; +out: + free_event_desc(ff.events); + return ret; } size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp) @@ -4257,9 +4262,11 @@ int perf_event__process_event_update(struct perf_tool *tool __maybe_unused, switch (ev->type) { case PERF_EVENT_UPDATE__UNIT: + free((char *)evsel->unit); evsel->unit = strdup(ev->data); break; case PERF_EVENT_UPDATE__NAME: + free(evsel->name); evsel->name = strdup(ev->data); break; case PERF_EVENT_UPDATE__SCALE: @@ -4268,11 +4275,11 @@ int perf_event__process_event_update(struct perf_tool *tool __maybe_unused, break; case PERF_EVENT_UPDATE__CPUS: ev_cpus = (struct perf_record_event_update_cpus *)ev->data; - map = cpu_map__new_data(&ev_cpus->cpus); - if (map) + if (map) { + perf_cpu_map__put(evsel->core.own_cpus); evsel->core.own_cpus = map; - else + } else pr_err("failed to get event_update cpus\n"); default: break; diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index 65fe65ba03c2..b776465e04ef 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -289,15 +289,10 @@ static long hist_time(unsigned long htime) return htime; } -static void he_stat__add_period(struct he_stat *he_stat, u64 period, - u64 weight, u64 ins_lat, u64 p_stage_cyc) +static void he_stat__add_period(struct he_stat *he_stat, u64 period) { - he_stat->period += period; - he_stat->weight += weight; he_stat->nr_events += 1; - he_stat->ins_lat += ins_lat; - he_stat->p_stage_cyc += p_stage_cyc; } static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src) @@ -308,9 +303,6 @@ static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src) dest->period_guest_sys += src->period_guest_sys; dest->period_guest_us += src->period_guest_us; dest->nr_events += src->nr_events; - dest->weight += src->weight; - dest->ins_lat += src->ins_lat; - dest->p_stage_cyc += src->p_stage_cyc; } static void he_stat__decay(struct he_stat *he_stat) @@ -598,9 +590,6 @@ static struct hist_entry *hists__findnew_entry(struct hists *hists, struct hist_entry *he; int64_t cmp; u64 period = entry->stat.period; - u64 weight = entry->stat.weight; - u64 ins_lat = entry->stat.ins_lat; - u64 p_stage_cyc = entry->stat.p_stage_cyc; bool leftmost = true; p = &hists->entries_in->rb_root.rb_node; @@ -619,11 +608,11 @@ static struct hist_entry *hists__findnew_entry(struct hists *hists, if (!cmp) { if (sample_self) { - he_stat__add_period(&he->stat, period, weight, ins_lat, p_stage_cyc); + he_stat__add_period(&he->stat, period); hist_entry__add_callchain_period(he, period); } if (symbol_conf.cumulate_callchain) - he_stat__add_period(he->stat_acc, period, weight, ins_lat, p_stage_cyc); + he_stat__add_period(he->stat_acc, period); /* * This mem info was allocated from sample__resolve_mem @@ -733,9 +722,6 @@ __hists__add_entry(struct hists *hists, .stat = { .nr_events = 1, .period = sample->period, - .weight = sample->weight, - .ins_lat = sample->ins_lat, - .p_stage_cyc = sample->p_stage_cyc, }, .parent = sym_parent, .filtered = symbol__parent_filter(sym_parent) | al->filtered, @@ -748,6 +734,9 @@ __hists__add_entry(struct hists *hists, .raw_size = sample->raw_size, .ops = ops, .time = hist_time(sample->time), + .weight = sample->weight, + .ins_lat = sample->ins_lat, + .p_stage_cyc = sample->p_stage_cyc, }, *he = hists__findnew_entry(hists, &entry, al, sample_self); if (!hists->has_callchains && he && he->callchain_size != 0) diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index 5343b62476e6..621f35ae1efa 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -369,7 +369,6 @@ enum { }; void perf_hpp__init(void); -void perf_hpp__column_unregister(struct perf_hpp_fmt *format); void perf_hpp__cancel_cumulate(void); void perf_hpp__setup_output_field(struct perf_hpp_list *list); void perf_hpp__reset_output_field(struct perf_hpp_list *list); diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c index 5f83937bf8f3..0e013c2d9eb4 100644 --- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c +++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c @@ -1205,61 +1205,69 @@ out_no_progress: static bool intel_pt_fup_event(struct intel_pt_decoder *decoder) { + enum intel_pt_sample_type type = decoder->state.type; bool ret = false; + decoder->state.type &= ~INTEL_PT_BRANCH; + if (decoder->set_fup_tx_flags) { decoder->set_fup_tx_flags = false; decoder->tx_flags = decoder->fup_tx_flags; - decoder->state.type = INTEL_PT_TRANSACTION; + decoder->state.type |= INTEL_PT_TRANSACTION; if (decoder->fup_tx_flags & INTEL_PT_ABORT_TX) decoder->state.type |= INTEL_PT_BRANCH; - decoder->state.from_ip = decoder->ip; - decoder->state.to_ip = 0; decoder->state.flags = decoder->fup_tx_flags; - return true; + ret = true; } if (decoder->set_fup_ptw) { decoder->set_fup_ptw = false; - decoder->state.type = INTEL_PT_PTW; + decoder->state.type |= INTEL_PT_PTW; decoder->state.flags |= INTEL_PT_FUP_IP; - decoder->state.from_ip = decoder->ip; - decoder->state.to_ip = 0; decoder->state.ptw_payload = decoder->fup_ptw_payload; - return true; + ret = true; } if (decoder->set_fup_mwait) { decoder->set_fup_mwait = false; - decoder->state.type = INTEL_PT_MWAIT_OP; - decoder->state.from_ip = decoder->ip; - decoder->state.to_ip = 0; + decoder->state.type |= INTEL_PT_MWAIT_OP; decoder->state.mwait_payload = decoder->fup_mwait_payload; ret = true; } if (decoder->set_fup_pwre) { decoder->set_fup_pwre = false; decoder->state.type |= INTEL_PT_PWR_ENTRY; - decoder->state.type &= ~INTEL_PT_BRANCH; - decoder->state.from_ip = decoder->ip; - decoder->state.to_ip = 0; decoder->state.pwre_payload = decoder->fup_pwre_payload; ret = true; } if (decoder->set_fup_exstop) { decoder->set_fup_exstop = false; decoder->state.type |= INTEL_PT_EX_STOP; - decoder->state.type &= ~INTEL_PT_BRANCH; decoder->state.flags |= INTEL_PT_FUP_IP; - decoder->state.from_ip = decoder->ip; - decoder->state.to_ip = 0; ret = true; } if (decoder->set_fup_bep) { decoder->set_fup_bep = false; decoder->state.type |= INTEL_PT_BLK_ITEMS; - decoder->state.type &= ~INTEL_PT_BRANCH; + ret = true; + } + if (decoder->overflow) { + decoder->overflow = false; + if (!ret && !decoder->pge) { + if (decoder->hop) { + decoder->state.type = 0; + decoder->pkt_state = INTEL_PT_STATE_RESAMPLE; + } + decoder->pge = true; + decoder->state.type |= INTEL_PT_BRANCH | INTEL_PT_TRACE_BEGIN; + decoder->state.from_ip = 0; + decoder->state.to_ip = decoder->ip; + return true; + } + } + if (ret) { decoder->state.from_ip = decoder->ip; decoder->state.to_ip = 0; - ret = true; + } else { + decoder->state.type = type; } return ret; } @@ -1608,7 +1616,16 @@ static int intel_pt_overflow(struct intel_pt_decoder *decoder) intel_pt_clear_tx_flags(decoder); intel_pt_set_nr(decoder); decoder->timestamp_insn_cnt = 0; - decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC; + decoder->pkt_state = INTEL_PT_STATE_IN_SYNC; + decoder->state.from_ip = decoder->ip; + decoder->ip = 0; + decoder->pge = false; + decoder->set_fup_tx_flags = false; + decoder->set_fup_ptw = false; + decoder->set_fup_mwait = false; + decoder->set_fup_pwre = false; + decoder->set_fup_exstop = false; + decoder->set_fup_bep = false; decoder->overflow = true; return -EOVERFLOW; } @@ -2666,6 +2683,8 @@ static int intel_pt_scan_for_psb(struct intel_pt_decoder *decoder); /* Hop mode: Ignore TNT, do not walk code, but get ip from FUPs and TIPs */ static int intel_pt_hop_trace(struct intel_pt_decoder *decoder, bool *no_tip, int *err) { + *err = 0; + /* Leap from PSB to PSB, getting ip from FUP within PSB+ */ if (decoder->leap && !decoder->in_psb && decoder->packet.type != INTEL_PT_PSB) { *err = intel_pt_scan_for_psb(decoder); @@ -2678,6 +2697,7 @@ static int intel_pt_hop_trace(struct intel_pt_decoder *decoder, bool *no_tip, in return HOP_IGNORE; case INTEL_PT_TIP_PGD: + decoder->pge = false; if (!decoder->packet.count) { intel_pt_set_nr(decoder); return HOP_IGNORE; @@ -2705,18 +2725,21 @@ static int intel_pt_hop_trace(struct intel_pt_decoder *decoder, bool *no_tip, in if (!decoder->packet.count) return HOP_IGNORE; intel_pt_set_ip(decoder); - if (intel_pt_fup_event(decoder)) - return HOP_RETURN; - if (!decoder->branch_enable) + if (decoder->set_fup_mwait || decoder->set_fup_pwre) + *no_tip = true; + if (!decoder->branch_enable || !decoder->pge) *no_tip = true; if (*no_tip) { decoder->state.type = INTEL_PT_INSTRUCTION; decoder->state.from_ip = decoder->ip; decoder->state.to_ip = 0; + intel_pt_fup_event(decoder); return HOP_RETURN; } + intel_pt_fup_event(decoder); + decoder->state.type |= INTEL_PT_INSTRUCTION | INTEL_PT_BRANCH; *err = intel_pt_walk_fup_tip(decoder); - if (!*err) + if (!*err && decoder->state.to_ip) decoder->pkt_state = INTEL_PT_STATE_RESAMPLE; return HOP_RETURN; @@ -2897,7 +2920,7 @@ static bool intel_pt_psb_with_fup(struct intel_pt_decoder *decoder, int *err) { struct intel_pt_psb_info data = { .fup = false }; - if (!decoder->branch_enable || !decoder->pge) + if (!decoder->branch_enable) return false; intel_pt_pkt_lookahead(decoder, intel_pt_psb_lookahead_cb, &data); @@ -2924,6 +2947,7 @@ static int intel_pt_walk_trace(struct intel_pt_decoder *decoder) if (err) return err; next: + err = 0; if (decoder->cyc_threshold) { if (decoder->sample_cyc && last_packet_type != INTEL_PT_CYC) decoder->sample_cyc = false; @@ -2962,6 +2986,7 @@ next: case INTEL_PT_TIP_PGE: { decoder->pge = true; + decoder->overflow = false; intel_pt_mtc_cyc_cnt_pge(decoder); intel_pt_set_nr(decoder); if (decoder->packet.count == 0) { @@ -2999,7 +3024,7 @@ next: break; } intel_pt_set_last_ip(decoder); - if (!decoder->branch_enable) { + if (!decoder->branch_enable || !decoder->pge) { decoder->ip = decoder->last_ip; if (intel_pt_fup_event(decoder)) return 0; @@ -3467,10 +3492,10 @@ static int intel_pt_sync_ip(struct intel_pt_decoder *decoder) decoder->set_fup_pwre = false; decoder->set_fup_exstop = false; decoder->set_fup_bep = false; + decoder->overflow = false; if (!decoder->branch_enable) { decoder->pkt_state = INTEL_PT_STATE_IN_SYNC; - decoder->overflow = false; decoder->state.type = 0; /* Do not have a sample */ return 0; } @@ -3485,7 +3510,6 @@ static int intel_pt_sync_ip(struct intel_pt_decoder *decoder) decoder->pkt_state = INTEL_PT_STATE_RESAMPLE; else decoder->pkt_state = INTEL_PT_STATE_IN_SYNC; - decoder->overflow = false; decoder->state.from_ip = 0; decoder->state.to_ip = decoder->ip; @@ -3607,7 +3631,7 @@ static int intel_pt_sync(struct intel_pt_decoder *decoder) } decoder->have_last_ip = true; - decoder->pkt_state = INTEL_PT_STATE_NO_IP; + decoder->pkt_state = INTEL_PT_STATE_IN_SYNC; err = intel_pt_walk_psb(decoder); if (err) @@ -3704,7 +3728,8 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder) if (err) { decoder->state.err = intel_pt_ext_err(err); - decoder->state.from_ip = decoder->ip; + if (err != -EOVERFLOW) + decoder->state.from_ip = decoder->ip; intel_pt_update_sample_time(decoder); decoder->sample_tot_cyc_cnt = decoder->tot_cyc_cnt; intel_pt_set_nr(decoder); diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c index 556a893508da..10c3187e4c5a 100644 --- a/tools/perf/util/intel-pt.c +++ b/tools/perf/util/intel-pt.c @@ -2565,6 +2565,7 @@ static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp) ptq->sync_switch = false; intel_pt_next_tid(pt, ptq); } + ptq->timestamp = state->est_timestamp; if (pt->synth_opts.errors) { err = intel_ptq_synth_error(ptq, state); if (err) diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 5bfb6f892489..ba74fdf74af9 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -402,8 +402,10 @@ static int add_event_tool(struct list_head *list, int *idx, if (!evsel) return -ENOMEM; evsel->tool_event = tool_event; - if (tool_event == PERF_TOOL_DURATION_TIME) - evsel->unit = "ns"; + if (tool_event == PERF_TOOL_DURATION_TIME) { + free((char *)evsel->unit); + evsel->unit = strdup("ns"); + } return 0; } @@ -1630,7 +1632,8 @@ int parse_events_add_pmu(struct parse_events_state *parse_state, if (parse_state->fake_pmu) return 0; - evsel->unit = info.unit; + free((char *)evsel->unit); + evsel->unit = strdup(info.unit); evsel->scale = info.scale; evsel->per_pkg = info.per_pkg; evsel->snapshot = info.snapshot; diff --git a/tools/perf/util/perf_regs.c b/tools/perf/util/perf_regs.c index 5ee47ae1509c..06a7461ba864 100644 --- a/tools/perf/util/perf_regs.c +++ b/tools/perf/util/perf_regs.c @@ -25,6 +25,9 @@ int perf_reg_value(u64 *valp, struct regs_dump *regs, int id) int i, idx = 0; u64 mask = regs->mask; + if ((u64)id >= PERF_SAMPLE_REGS_CACHE_SIZE) + return -EINVAL; + if (regs->cache_mask & (1ULL << id)) goto out; diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c index 563a9ba8954f..7f782a31bda3 100644 --- a/tools/perf/util/python.c +++ b/tools/perf/util/python.c @@ -461,7 +461,7 @@ get_tracepoint_field(struct pyrf_event *pevent, PyObject *attr_name) struct tep_event *tp_format; tp_format = trace_event__tp_format_id(evsel->core.attr.config); - if (!tp_format) + if (IS_ERR_OR_NULL(tp_format)) return NULL; evsel->tp_format = tp_format; diff --git a/tools/perf/util/smt.c b/tools/perf/util/smt.c index 20bacd5972ad..34f1b1b1176c 100644 --- a/tools/perf/util/smt.c +++ b/tools/perf/util/smt.c @@ -15,7 +15,7 @@ int smt_on(void) if (cached) return cached_result; - if (sysfs__read_int("devices/system/cpu/smt/active", &cached_result) > 0) + if (sysfs__read_int("devices/system/cpu/smt/active", &cached_result) >= 0) goto done; ncpu = sysconf(_SC_NPROCESSORS_CONF); diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 568a88c001c6..a111065b484e 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -1325,88 +1325,68 @@ struct sort_entry sort_mispredict = { .se_width_idx = HISTC_MISPREDICT, }; -static u64 he_weight(struct hist_entry *he) -{ - return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0; -} - static int64_t -sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right) +sort__weight_cmp(struct hist_entry *left, struct hist_entry *right) { - return he_weight(left) - he_weight(right); + return left->weight - right->weight; } static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width) { - return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he)); + return repsep_snprintf(bf, size, "%-*llu", width, he->weight); } struct sort_entry sort_local_weight = { .se_header = "Local Weight", - .se_cmp = sort__local_weight_cmp, + .se_cmp = sort__weight_cmp, .se_snprintf = hist_entry__local_weight_snprintf, .se_width_idx = HISTC_LOCAL_WEIGHT, }; -static int64_t -sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right) -{ - return left->stat.weight - right->stat.weight; -} - static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width) { - return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight); + return repsep_snprintf(bf, size, "%-*llu", width, + he->weight * he->stat.nr_events); } struct sort_entry sort_global_weight = { .se_header = "Weight", - .se_cmp = sort__global_weight_cmp, + .se_cmp = sort__weight_cmp, .se_snprintf = hist_entry__global_weight_snprintf, .se_width_idx = HISTC_GLOBAL_WEIGHT, }; -static u64 he_ins_lat(struct hist_entry *he) -{ - return he->stat.nr_events ? he->stat.ins_lat / he->stat.nr_events : 0; -} - static int64_t -sort__local_ins_lat_cmp(struct hist_entry *left, struct hist_entry *right) +sort__ins_lat_cmp(struct hist_entry *left, struct hist_entry *right) { - return he_ins_lat(left) - he_ins_lat(right); + return left->ins_lat - right->ins_lat; } static int hist_entry__local_ins_lat_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width) { - return repsep_snprintf(bf, size, "%-*u", width, he_ins_lat(he)); + return repsep_snprintf(bf, size, "%-*u", width, he->ins_lat); } struct sort_entry sort_local_ins_lat = { .se_header = "Local INSTR Latency", - .se_cmp = sort__local_ins_lat_cmp, + .se_cmp = sort__ins_lat_cmp, .se_snprintf = hist_entry__local_ins_lat_snprintf, .se_width_idx = HISTC_LOCAL_INS_LAT, }; -static int64_t -sort__global_ins_lat_cmp(struct hist_entry *left, struct hist_entry *right) -{ - return left->stat.ins_lat - right->stat.ins_lat; -} - static int hist_entry__global_ins_lat_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width) { - return repsep_snprintf(bf, size, "%-*u", width, he->stat.ins_lat); + return repsep_snprintf(bf, size, "%-*u", width, + he->ins_lat * he->stat.nr_events); } struct sort_entry sort_global_ins_lat = { .se_header = "INSTR Latency", - .se_cmp = sort__global_ins_lat_cmp, + .se_cmp = sort__ins_lat_cmp, .se_snprintf = hist_entry__global_ins_lat_snprintf, .se_width_idx = HISTC_GLOBAL_INS_LAT, }; @@ -1414,13 +1394,13 @@ struct sort_entry sort_global_ins_lat = { static int64_t sort__global_p_stage_cyc_cmp(struct hist_entry *left, struct hist_entry *right) { - return left->stat.p_stage_cyc - right->stat.p_stage_cyc; + return left->p_stage_cyc - right->p_stage_cyc; } static int hist_entry__p_stage_cyc_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width) { - return repsep_snprintf(bf, size, "%-*u", width, he->stat.p_stage_cyc); + return repsep_snprintf(bf, size, "%-*u", width, he->p_stage_cyc); } struct sort_entry sort_p_stage_cyc = { diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h index b67c469aba79..7b7145501933 100644 --- a/tools/perf/util/sort.h +++ b/tools/perf/util/sort.h @@ -49,9 +49,6 @@ struct he_stat { u64 period_us; u64 period_guest_sys; u64 period_guest_us; - u64 weight; - u64 ins_lat; - u64 p_stage_cyc; u32 nr_events; }; @@ -109,6 +106,9 @@ struct hist_entry { s32 socket; s32 cpu; u64 code_page_size; + u64 weight; + u64 ins_lat; + u64 p_stage_cyc; u8 cpumode; u8 depth; diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c index 37a9492edb3e..df3c4671be72 100644 --- a/tools/perf/util/util.c +++ b/tools/perf/util/util.c @@ -379,32 +379,32 @@ fetch_kernel_version(unsigned int *puint, char *str, return 0; } -const char *perf_tip(const char *dirpath) +int perf_tip(char **strp, const char *dirpath) { struct strlist *tips; struct str_node *node; - char *tip = NULL; struct strlist_config conf = { .dirname = dirpath, .file_only = true, }; + int ret = 0; + *strp = NULL; tips = strlist__new("tips.txt", &conf); if (tips == NULL) - return errno == ENOENT ? NULL : - "Tip: check path of tips.txt or get more memory! ;-p"; + return -errno; if (strlist__nr_entries(tips) == 0) goto out; node = strlist__entry(tips, random() % strlist__nr_entries(tips)); - if (asprintf(&tip, "Tip: %s", node->s) < 0) - tip = (char *)"Tip: get more memory! ;-)"; + if (asprintf(strp, "Tip: %s", node->s) < 0) + ret = -ENOMEM; out: strlist__delete(tips); - return tip; + return ret; } char *perf_exe(char *buf, int len) diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h index ad737052e597..9f0d36ba77f2 100644 --- a/tools/perf/util/util.h +++ b/tools/perf/util/util.h @@ -39,7 +39,7 @@ int fetch_kernel_version(unsigned int *puint, #define KVER_FMT "%d.%d.%d" #define KVER_PARAM(x) KVER_VERSION(x), KVER_PATCHLEVEL(x), KVER_SUBLEVEL(x) -const char *perf_tip(const char *dirpath); +int perf_tip(char **strp, const char *dirpath); #ifndef HAVE_SCHED_GETCPU_SUPPORT int sched_getcpu(void); diff --git a/tools/power/acpi/Makefile.config b/tools/power/acpi/Makefile.config index 331f6d30f472..cd7106876a5f 100644 --- a/tools/power/acpi/Makefile.config +++ b/tools/power/acpi/Makefile.config @@ -69,6 +69,7 @@ KERNEL_INCLUDE := $(OUTPUT)include ACPICA_INCLUDE := $(srctree)/../../../drivers/acpi/acpica CFLAGS += -D_LINUX -I$(KERNEL_INCLUDE) -I$(ACPICA_INCLUDE) CFLAGS += $(WARNINGS) +MKDIR = mkdir ifeq ($(strip $(V)),false) QUIET=@ diff --git a/tools/power/acpi/Makefile.rules b/tools/power/acpi/Makefile.rules index 2a6c170b57cd..1d7616f5d0ae 100644 --- a/tools/power/acpi/Makefile.rules +++ b/tools/power/acpi/Makefile.rules @@ -21,6 +21,7 @@ $(KERNEL_INCLUDE): $(objdir)%.o: %.c $(KERNEL_INCLUDE) $(ECHO) " CC " $(subst $(OUTPUT),,$@) + $(QUIET) $(MKDIR) -p $(objdir) 2>/dev/null $(QUIET) $(CC) -c $(CFLAGS) -o $@ $< all: $(OUTPUT)$(TOOL) diff --git a/tools/testing/radix-tree/linux/lockdep.h b/tools/testing/radix-tree/linux/lockdep.h index 565fccdfe6e9..016cff473cfc 100644 --- a/tools/testing/radix-tree/linux/lockdep.h +++ b/tools/testing/radix-tree/linux/lockdep.h @@ -1,5 +1,8 @@ #ifndef _LINUX_LOCKDEP_H #define _LINUX_LOCKDEP_H + +#include <linux/spinlock.h> + struct lock_class_key { unsigned int a; }; diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 54b0a41a3775..62fafbeb4672 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -187,7 +187,7 @@ DEFAULT_BPFTOOL := $(HOST_SCRATCH_DIR)/sbin/bpftool $(OUTPUT)/runqslower: $(BPFOBJ) | $(DEFAULT_BPFTOOL) $(RUNQSLOWER_OUTPUT) $(Q)$(MAKE) $(submake_extras) -C $(TOOLSDIR)/bpf/runqslower \ OUTPUT=$(RUNQSLOWER_OUTPUT) VMLINUX_BTF=$(VMLINUX_BTF) \ - BPFTOOL_OUTPUT=$(BUILD_DIR)/bpftool/ \ + BPFTOOL_OUTPUT=$(HOST_BUILD_DIR)/bpftool/ \ BPFOBJ_OUTPUT=$(BUILD_DIR)/libbpf \ BPFOBJ=$(BPFOBJ) BPF_INCLUDE=$(INCLUDE_DIR) && \ cp $(RUNQSLOWER_OUTPUT)runqslower $@ diff --git a/tools/testing/selftests/bpf/prog_tests/helper_restricted.c b/tools/testing/selftests/bpf/prog_tests/helper_restricted.c new file mode 100644 index 000000000000..e1de5f80c3b2 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/helper_restricted.c @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <test_progs.h> +#include "test_helper_restricted.skel.h" + +void test_helper_restricted(void) +{ + int prog_i = 0, prog_cnt; + int duration = 0; + + do { + struct test_helper_restricted *test; + int maybeOK; + + test = test_helper_restricted__open(); + if (!ASSERT_OK_PTR(test, "open")) + return; + + prog_cnt = test->skeleton->prog_cnt; + + for (int j = 0; j < prog_cnt; ++j) { + struct bpf_program *prog = *test->skeleton->progs[j].prog; + + maybeOK = bpf_program__set_autoload(prog, prog_i == j); + ASSERT_OK(maybeOK, "set autoload"); + } + + maybeOK = test_helper_restricted__load(test); + CHECK(!maybeOK, test->skeleton->progs[prog_i].name, "helper isn't restricted"); + + test_helper_restricted__destroy(test); + } while (++prog_i < prog_cnt); +} diff --git a/tools/testing/selftests/bpf/progs/test_helper_restricted.c b/tools/testing/selftests/bpf/progs/test_helper_restricted.c new file mode 100644 index 000000000000..68d64c365f90 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_helper_restricted.c @@ -0,0 +1,123 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include <time.h> +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> + +struct timer { + struct bpf_timer t; +}; + +struct lock { + struct bpf_spin_lock l; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, struct timer); +} timers SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, struct lock); +} locks SEC(".maps"); + +static int timer_cb(void *map, int *key, struct timer *timer) +{ + return 0; +} + +static void timer_work(void) +{ + struct timer *timer; + const int key = 0; + + timer = bpf_map_lookup_elem(&timers, &key); + if (timer) { + bpf_timer_init(&timer->t, &timers, CLOCK_MONOTONIC); + bpf_timer_set_callback(&timer->t, timer_cb); + bpf_timer_start(&timer->t, 10E9, 0); + bpf_timer_cancel(&timer->t); + } +} + +static void spin_lock_work(void) +{ + const int key = 0; + struct lock *lock; + + lock = bpf_map_lookup_elem(&locks, &key); + if (lock) { + bpf_spin_lock(&lock->l); + bpf_spin_unlock(&lock->l); + } +} + +SEC("raw_tp/sys_enter") +int raw_tp_timer(void *ctx) +{ + timer_work(); + + return 0; +} + +SEC("tp/syscalls/sys_enter_nanosleep") +int tp_timer(void *ctx) +{ + timer_work(); + + return 0; +} + +SEC("kprobe/sys_nanosleep") +int kprobe_timer(void *ctx) +{ + timer_work(); + + return 0; +} + +SEC("perf_event") +int perf_event_timer(void *ctx) +{ + timer_work(); + + return 0; +} + +SEC("raw_tp/sys_enter") +int raw_tp_spin_lock(void *ctx) +{ + spin_lock_work(); + + return 0; +} + +SEC("tp/syscalls/sys_enter_nanosleep") +int tp_spin_lock(void *ctx) +{ + spin_lock_work(); + + return 0; +} + +SEC("kprobe/sys_nanosleep") +int kprobe_spin_lock(void *ctx) +{ + spin_lock_work(); + + return 0; +} + +SEC("perf_event") +int perf_event_spin_lock(void *ctx) +{ + spin_lock_work(); + + return 0; +} + +const char LICENSE[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 25afe423b3f0..465ef3f112c0 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -92,6 +92,7 @@ struct bpf_test { int fixup_map_event_output[MAX_FIXUPS]; int fixup_map_reuseport_array[MAX_FIXUPS]; int fixup_map_ringbuf[MAX_FIXUPS]; + int fixup_map_timer[MAX_FIXUPS]; /* Expected verifier log output for result REJECT or VERBOSE_ACCEPT. * Can be a tab-separated sequence of expected strings. An empty string * means no log verification. @@ -604,8 +605,15 @@ static int create_cgroup_storage(bool percpu) * int cnt; * struct bpf_spin_lock l; * }; + * struct bpf_timer { + * __u64 :64; + * __u64 :64; + * } __attribute__((aligned(8))); + * struct timer { + * struct bpf_timer t; + * }; */ -static const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l"; +static const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l\0bpf_timer\0timer\0t"; static __u32 btf_raw_types[] = { /* int */ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ @@ -616,6 +624,11 @@ static __u32 btf_raw_types[] = { BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8), BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */ BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */ + /* struct bpf_timer */ /* [4] */ + BTF_TYPE_ENC(25, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 0), 16), + /* struct timer */ /* [5] */ + BTF_TYPE_ENC(35, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 16), + BTF_MEMBER_ENC(41, 4, 0), /* struct bpf_timer t; */ }; static int load_btf(void) @@ -696,6 +709,29 @@ static int create_sk_storage_map(void) return fd; } +static int create_map_timer(void) +{ + struct bpf_create_map_attr attr = { + .name = "test_map", + .map_type = BPF_MAP_TYPE_ARRAY, + .key_size = 4, + .value_size = 16, + .max_entries = 1, + .btf_key_type_id = 1, + .btf_value_type_id = 5, + }; + int fd, btf_fd; + + btf_fd = load_btf(); + if (btf_fd < 0) + return -1; + attr.btf_fd = btf_fd; + fd = bpf_create_map_xattr(&attr); + if (fd < 0) + printf("Failed to create map with timer\n"); + return fd; +} + static char bpf_vlog[UINT_MAX >> 8]; static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type, @@ -722,6 +758,7 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type, int *fixup_map_event_output = test->fixup_map_event_output; int *fixup_map_reuseport_array = test->fixup_map_reuseport_array; int *fixup_map_ringbuf = test->fixup_map_ringbuf; + int *fixup_map_timer = test->fixup_map_timer; if (test->fill_helper) { test->fill_insns = calloc(MAX_TEST_INSNS, sizeof(struct bpf_insn)); @@ -907,6 +944,13 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type, fixup_map_ringbuf++; } while (*fixup_map_ringbuf); } + if (*fixup_map_timer) { + map_fds[21] = create_map_timer(); + do { + prog[*fixup_map_timer].imm = map_fds[21]; + fixup_map_timer++; + } while (*fixup_map_timer); + } } struct libcap { diff --git a/tools/testing/selftests/bpf/verifier/helper_restricted.c b/tools/testing/selftests/bpf/verifier/helper_restricted.c new file mode 100644 index 000000000000..a067b7098b97 --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/helper_restricted.c @@ -0,0 +1,196 @@ +{ + "bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_KPROBE", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ktime_get_coarse_ns), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "unknown func bpf_ktime_get_coarse_ns", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_KPROBE, +}, +{ + "bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_TRACEPOINT", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ktime_get_coarse_ns), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "unknown func bpf_ktime_get_coarse_ns", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_PERF_EVENT", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ktime_get_coarse_ns), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "unknown func bpf_ktime_get_coarse_ns", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_PERF_EVENT, +}, +{ + "bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_RAW_TRACEPOINT", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ktime_get_coarse_ns), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "unknown func bpf_ktime_get_coarse_ns", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_RAW_TRACEPOINT, +}, +{ + "bpf_timer_init isn restricted in BPF_PROG_TYPE_KPROBE", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_3, 1), + BPF_EMIT_CALL(BPF_FUNC_timer_init), + BPF_EXIT_INSN(), + }, + .fixup_map_timer = { 3, 8 }, + .errstr = "tracing progs cannot use bpf_timer yet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_KPROBE, +}, +{ + "bpf_timer_init is forbidden in BPF_PROG_TYPE_PERF_EVENT", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_3, 1), + BPF_EMIT_CALL(BPF_FUNC_timer_init), + BPF_EXIT_INSN(), + }, + .fixup_map_timer = { 3, 8 }, + .errstr = "tracing progs cannot use bpf_timer yet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_PERF_EVENT, +}, +{ + "bpf_timer_init is forbidden in BPF_PROG_TYPE_TRACEPOINT", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_3, 1), + BPF_EMIT_CALL(BPF_FUNC_timer_init), + BPF_EXIT_INSN(), + }, + .fixup_map_timer = { 3, 8 }, + .errstr = "tracing progs cannot use bpf_timer yet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "bpf_timer_init is forbidden in BPF_PROG_TYPE_RAW_TRACEPOINT", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_3, 1), + BPF_EMIT_CALL(BPF_FUNC_timer_init), + BPF_EXIT_INSN(), + }, + .fixup_map_timer = { 3, 8 }, + .errstr = "tracing progs cannot use bpf_timer yet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_RAW_TRACEPOINT, +}, +{ + "bpf_spin_lock is forbidden in BPF_PROG_TYPE_KPROBE", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_EMIT_CALL(BPF_FUNC_spin_lock), + BPF_EXIT_INSN(), + }, + .fixup_map_spin_lock = { 3 }, + .errstr = "tracing progs cannot use bpf_spin_lock yet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_KPROBE, +}, +{ + "bpf_spin_lock is forbidden in BPF_PROG_TYPE_TRACEPOINT", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_EMIT_CALL(BPF_FUNC_spin_lock), + BPF_EXIT_INSN(), + }, + .fixup_map_spin_lock = { 3 }, + .errstr = "tracing progs cannot use bpf_spin_lock yet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "bpf_spin_lock is forbidden in BPF_PROG_TYPE_PERF_EVENT", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_EMIT_CALL(BPF_FUNC_spin_lock), + BPF_EXIT_INSN(), + }, + .fixup_map_spin_lock = { 3 }, + .errstr = "tracing progs cannot use bpf_spin_lock yet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_PERF_EVENT, +}, +{ + "bpf_spin_lock is forbidden in BPF_PROG_TYPE_RAW_TRACEPOINT", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_EMIT_CALL(BPF_FUNC_spin_lock), + BPF_EXIT_INSN(), + }, + .fixup_map_spin_lock = { 3 }, + .errstr = "tracing progs cannot use bpf_spin_lock yet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_RAW_TRACEPOINT, +}, diff --git a/tools/testing/selftests/bpf/verifier/map_in_map.c b/tools/testing/selftests/bpf/verifier/map_in_map.c index 2798927ee9ff..128a348b762d 100644 --- a/tools/testing/selftests/bpf/verifier/map_in_map.c +++ b/tools/testing/selftests/bpf/verifier/map_in_map.c @@ -19,6 +19,40 @@ .result = ACCEPT, }, { + "map in map state pruning", + .insns = { + BPF_ST_MEM(0, BPF_REG_10, -4, 0), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -4), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_6), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_6), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 11), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_6), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_6), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_in_map = { 4, 14 }, + .flags = BPF_F_TEST_STATE_FREQ, + .result = VERBOSE_ACCEPT, + .errstr = "processed 25 insns", + .prog_type = BPF_PROG_TYPE_XDP, +}, +{ "invalid inner map pointer", .insns = { BPF_ST_MEM(0, BPF_REG_10, -4, 0), diff --git a/tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c b/tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c index bfb97383e6b5..b4ec228eb95d 100644 --- a/tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c +++ b/tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c @@ -35,7 +35,7 @@ .prog_type = BPF_PROG_TYPE_XDP, }, { - "XDP pkt read, pkt_data' > pkt_end, good access", + "XDP pkt read, pkt_data' > pkt_end, corner case, good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, @@ -88,6 +88,41 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { + "XDP pkt read, pkt_data' > pkt_end, corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data' > pkt_end, corner case -1, bad access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ "XDP pkt read, pkt_end > pkt_data', good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), @@ -106,16 +141,16 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_end > pkt_data', bad access 1", + "XDP pkt read, pkt_end > pkt_data', corner case -1, bad access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data_end)), BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6), BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, @@ -143,6 +178,42 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { + "XDP pkt read, pkt_end > pkt_data', corner case, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_end > pkt_data', corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ "XDP pkt read, pkt_data' < pkt_end, good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), @@ -161,16 +232,16 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_data' < pkt_end, bad access 1", + "XDP pkt read, pkt_data' < pkt_end, corner case -1, bad access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data_end)), BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6), BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, @@ -198,7 +269,43 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_end < pkt_data', good access", + "XDP pkt read, pkt_data' < pkt_end, corner case, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data' < pkt_end, corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_end < pkt_data', corner case, good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, @@ -251,6 +358,41 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { + "XDP pkt read, pkt_end < pkt_data', corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9), + BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_end < pkt_data', corner case -1, bad access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ "XDP pkt read, pkt_data' >= pkt_end, good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), @@ -268,15 +410,15 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_data' >= pkt_end, bad access 1", + "XDP pkt read, pkt_data' >= pkt_end, corner case -1, bad access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data_end)), BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6), BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, @@ -304,7 +446,41 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_end >= pkt_data', good access", + "XDP pkt read, pkt_data' >= pkt_end, corner case, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data' >= pkt_end, corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_end >= pkt_data', corner case, good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, @@ -359,7 +535,44 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_data' <= pkt_end, good access", + "XDP pkt read, pkt_end >= pkt_data', corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9), + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_end >= pkt_data', corner case -1, bad access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data' <= pkt_end, corner case, good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, @@ -414,6 +627,43 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { + "XDP pkt read, pkt_data' <= pkt_end, corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9), + BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data' <= pkt_end, corner case -1, bad access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ "XDP pkt read, pkt_end <= pkt_data', good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), @@ -431,15 +681,15 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_end <= pkt_data', bad access 1", + "XDP pkt read, pkt_end <= pkt_data', corner case -1, bad access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data_end)), BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6), BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, @@ -467,7 +717,41 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_meta' > pkt_data, good access", + "XDP pkt read, pkt_end <= pkt_data', corner case, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_end <= pkt_data', corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_meta' > pkt_data, corner case, good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data_meta)), @@ -520,6 +804,41 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { + "XDP pkt read, pkt_meta' > pkt_data, corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_meta' > pkt_data, corner case -1, bad access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ "XDP pkt read, pkt_data > pkt_meta', good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, @@ -538,16 +857,16 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_data > pkt_meta', bad access 1", + "XDP pkt read, pkt_data > pkt_meta', corner case -1, bad access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data_meta)), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6), BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, @@ -575,6 +894,42 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { + "XDP pkt read, pkt_data > pkt_meta', corner case, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data > pkt_meta', corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ "XDP pkt read, pkt_meta' < pkt_data, good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, @@ -593,16 +948,16 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_meta' < pkt_data, bad access 1", + "XDP pkt read, pkt_meta' < pkt_data, corner case -1, bad access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data_meta)), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6), BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, @@ -630,7 +985,43 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_data < pkt_meta', good access", + "XDP pkt read, pkt_meta' < pkt_data, corner case, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_meta' < pkt_data, corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data < pkt_meta', corner case, good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data_meta)), @@ -683,6 +1074,41 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { + "XDP pkt read, pkt_data < pkt_meta', corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9), + BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data < pkt_meta', corner case -1, bad access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ "XDP pkt read, pkt_meta' >= pkt_data, good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, @@ -700,15 +1126,15 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_meta' >= pkt_data, bad access 1", + "XDP pkt read, pkt_meta' >= pkt_data, corner case -1, bad access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data_meta)), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6), BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, @@ -736,7 +1162,41 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_data >= pkt_meta', good access", + "XDP pkt read, pkt_meta' >= pkt_data, corner case, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_meta' >= pkt_data, corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data >= pkt_meta', corner case, good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data_meta)), @@ -791,7 +1251,44 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_meta' <= pkt_data, good access", + "XDP pkt read, pkt_data >= pkt_meta', corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9), + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data >= pkt_meta', corner case -1, bad access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_meta' <= pkt_data, corner case, good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data_meta)), @@ -846,6 +1343,43 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { + "XDP pkt read, pkt_meta' <= pkt_data, corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9), + BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_meta' <= pkt_data, corner case -1, bad access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ "XDP pkt read, pkt_data <= pkt_meta', good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, @@ -863,15 +1397,15 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_data <= pkt_meta', bad access 1", + "XDP pkt read, pkt_data <= pkt_meta', corner case -1, bad access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data_meta)), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6), BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, @@ -898,3 +1432,37 @@ .prog_type = BPF_PROG_TYPE_XDP, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, +{ + "XDP pkt read, pkt_data <= pkt_meta', corner case, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data <= pkt_meta', corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, diff --git a/tools/testing/selftests/damon/.gitignore b/tools/testing/selftests/damon/.gitignore new file mode 100644 index 000000000000..c6c2965a6607 --- /dev/null +++ b/tools/testing/selftests/damon/.gitignore @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +huge_count_read_write diff --git a/tools/testing/selftests/damon/Makefile b/tools/testing/selftests/damon/Makefile index 8a3f2cd9fec0..937d36ae9a69 100644 --- a/tools/testing/selftests/damon/Makefile +++ b/tools/testing/selftests/damon/Makefile @@ -1,7 +1,10 @@ # SPDX-License-Identifier: GPL-2.0 # Makefile for damon selftests -TEST_FILES = _chk_dependency.sh -TEST_PROGS = debugfs_attrs.sh +TEST_GEN_FILES += huge_count_read_write + +TEST_FILES = _chk_dependency.sh _debugfs_common.sh +TEST_PROGS = debugfs_attrs.sh debugfs_schemes.sh debugfs_target_ids.sh +TEST_PROGS += debugfs_empty_targets.sh debugfs_huge_count_read_write.sh include ../lib.mk diff --git a/tools/testing/selftests/damon/_debugfs_common.sh b/tools/testing/selftests/damon/_debugfs_common.sh new file mode 100644 index 000000000000..48989d4813ae --- /dev/null +++ b/tools/testing/selftests/damon/_debugfs_common.sh @@ -0,0 +1,52 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +test_write_result() { + file=$1 + content=$2 + orig_content=$3 + expect_reason=$4 + expected=$5 + + echo "$content" > "$file" + if [ $? -ne "$expected" ] + then + echo "writing $content to $file doesn't return $expected" + echo "expected because: $expect_reason" + echo "$orig_content" > "$file" + exit 1 + fi +} + +test_write_succ() { + test_write_result "$1" "$2" "$3" "$4" 0 +} + +test_write_fail() { + test_write_result "$1" "$2" "$3" "$4" 1 +} + +test_content() { + file=$1 + orig_content=$2 + expected=$3 + expect_reason=$4 + + content=$(cat "$file") + if [ "$content" != "$expected" ] + then + echo "reading $file expected $expected but $content" + echo "expected because: $expect_reason" + echo "$orig_content" > "$file" + exit 1 + fi +} + +source ./_chk_dependency.sh + +damon_onoff="$DBGFS/monitor_on" +if [ $(cat "$damon_onoff") = "on" ] +then + echo "monitoring is on" + exit $ksft_skip +fi diff --git a/tools/testing/selftests/damon/debugfs_attrs.sh b/tools/testing/selftests/damon/debugfs_attrs.sh index 196b6640bf37..902e312bca89 100644 --- a/tools/testing/selftests/damon/debugfs_attrs.sh +++ b/tools/testing/selftests/damon/debugfs_attrs.sh @@ -1,48 +1,7 @@ #!/bin/bash # SPDX-License-Identifier: GPL-2.0 -test_write_result() { - file=$1 - content=$2 - orig_content=$3 - expect_reason=$4 - expected=$5 - - echo "$content" > "$file" - if [ $? -ne "$expected" ] - then - echo "writing $content to $file doesn't return $expected" - echo "expected because: $expect_reason" - echo "$orig_content" > "$file" - exit 1 - fi -} - -test_write_succ() { - test_write_result "$1" "$2" "$3" "$4" 0 -} - -test_write_fail() { - test_write_result "$1" "$2" "$3" "$4" 1 -} - -test_content() { - file=$1 - orig_content=$2 - expected=$3 - expect_reason=$4 - - content=$(cat "$file") - if [ "$content" != "$expected" ] - then - echo "reading $file expected $expected but $content" - echo "expected because: $expect_reason" - echo "$orig_content" > "$file" - exit 1 - fi -} - -source ./_chk_dependency.sh +source _debugfs_common.sh # Test attrs file # =============== @@ -56,33 +15,3 @@ test_write_fail "$file" "1 2 3 5 4" "$orig_content" \ "min_nr_regions > max_nr_regions" test_content "$file" "$orig_content" "1 2 3 4 5" "successfully written" echo "$orig_content" > "$file" - -# Test schemes file -# ================= - -file="$DBGFS/schemes" -orig_content=$(cat "$file") - -test_write_succ "$file" "1 2 3 4 5 6 4 0 0 0 1 2 3 1 100 3 2 1" \ - "$orig_content" "valid input" -test_write_fail "$file" "1 2 -3 4 5 6 3 0 0 0 1 2 3 1 100 3 2 1" "$orig_content" "multi lines" -test_write_succ "$file" "" "$orig_content" "disabling" -echo "$orig_content" > "$file" - -# Test target_ids file -# ==================== - -file="$DBGFS/target_ids" -orig_content=$(cat "$file") - -test_write_succ "$file" "1 2 3 4" "$orig_content" "valid input" -test_write_succ "$file" "1 2 abc 4" "$orig_content" "still valid input" -test_content "$file" "$orig_content" "1 2" "non-integer was there" -test_write_succ "$file" "abc 2 3" "$orig_content" "the file allows wrong input" -test_content "$file" "$orig_content" "" "wrong input written" -test_write_succ "$file" "" "$orig_content" "empty input" -test_content "$file" "$orig_content" "" "empty input written" -echo "$orig_content" > "$file" - -echo "PASS" diff --git a/tools/testing/selftests/damon/debugfs_empty_targets.sh b/tools/testing/selftests/damon/debugfs_empty_targets.sh new file mode 100644 index 000000000000..87aff8083822 --- /dev/null +++ b/tools/testing/selftests/damon/debugfs_empty_targets.sh @@ -0,0 +1,13 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +source _debugfs_common.sh + +# Test empty targets case +# ======================= + +orig_target_ids=$(cat "$DBGFS/target_ids") +echo "" > "$DBGFS/target_ids" +orig_monitor_on=$(cat "$DBGFS/monitor_on") +test_write_fail "$DBGFS/monitor_on" "on" "orig_monitor_on" "empty target ids" +echo "$orig_target_ids" > "$DBGFS/target_ids" diff --git a/tools/testing/selftests/damon/debugfs_huge_count_read_write.sh b/tools/testing/selftests/damon/debugfs_huge_count_read_write.sh new file mode 100644 index 000000000000..922cadac2950 --- /dev/null +++ b/tools/testing/selftests/damon/debugfs_huge_count_read_write.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +source _debugfs_common.sh + +# Test huge count read write +# ========================== + +dmesg -C + +for file in "$DBGFS/"* +do + ./huge_count_read_write "$file" +done + +if dmesg | grep -q WARNING +then + dmesg + exit 1 +else + exit 0 +fi diff --git a/tools/testing/selftests/damon/debugfs_schemes.sh b/tools/testing/selftests/damon/debugfs_schemes.sh new file mode 100644 index 000000000000..5b39ab44731c --- /dev/null +++ b/tools/testing/selftests/damon/debugfs_schemes.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +source _debugfs_common.sh + +# Test schemes file +# ================= + +file="$DBGFS/schemes" +orig_content=$(cat "$file") + +test_write_succ "$file" "1 2 3 4 5 6 4 0 0 0 1 2 3 1 100 3 2 1" \ + "$orig_content" "valid input" +test_write_fail "$file" "1 2 +3 4 5 6 3 0 0 0 1 2 3 1 100 3 2 1" "$orig_content" "multi lines" +test_write_succ "$file" "" "$orig_content" "disabling" +test_write_fail "$file" "2 1 2 1 10 1 3 10 1 1 1 1 1 1 1 1 2 3" \ + "$orig_content" "wrong condition ranges" +echo "$orig_content" > "$file" diff --git a/tools/testing/selftests/damon/debugfs_target_ids.sh b/tools/testing/selftests/damon/debugfs_target_ids.sh new file mode 100644 index 000000000000..49aeabdb0aae --- /dev/null +++ b/tools/testing/selftests/damon/debugfs_target_ids.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +source _debugfs_common.sh + +# Test target_ids file +# ==================== + +file="$DBGFS/target_ids" +orig_content=$(cat "$file") + +test_write_succ "$file" "1 2 3 4" "$orig_content" "valid input" +test_write_succ "$file" "1 2 abc 4" "$orig_content" "still valid input" +test_content "$file" "$orig_content" "1 2" "non-integer was there" +test_write_succ "$file" "abc 2 3" "$orig_content" "the file allows wrong input" +test_content "$file" "$orig_content" "" "wrong input written" +test_write_succ "$file" "" "$orig_content" "empty input" +test_content "$file" "$orig_content" "" "empty input written" +echo "$orig_content" > "$file" diff --git a/tools/testing/selftests/damon/huge_count_read_write.c b/tools/testing/selftests/damon/huge_count_read_write.c new file mode 100644 index 000000000000..ad7a6b4cf338 --- /dev/null +++ b/tools/testing/selftests/damon/huge_count_read_write.c @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Author: SeongJae Park <sj@kernel.org> + */ + +#include <fcntl.h> +#include <stdlib.h> +#include <unistd.h> +#include <stdio.h> + +void write_read_with_huge_count(char *file) +{ + int filedesc = open(file, O_RDWR); + char buf[25]; + int ret; + + printf("%s %s\n", __func__, file); + if (filedesc < 0) { + fprintf(stderr, "failed opening %s\n", file); + exit(1); + } + + write(filedesc, "", 0xfffffffful); + perror("after write: "); + ret = read(filedesc, buf, 0xfffffffful); + perror("after read: "); + close(filedesc); +} + +int main(int argc, char *argv[]) +{ + if (argc != 2) { + fprintf(stderr, "Usage: %s <file>\n", argv[0]); + exit(1); + } + write_read_with_huge_count(argv[1]); + + return 0; +} diff --git a/tools/testing/selftests/gpio/Makefile b/tools/testing/selftests/gpio/Makefile index 39f2bbe8dd3d..d7b312b44a62 100644 --- a/tools/testing/selftests/gpio/Makefile +++ b/tools/testing/selftests/gpio/Makefile @@ -3,5 +3,6 @@ TEST_PROGS := gpio-mockup.sh TEST_FILES := gpio-mockup-sysfs.sh TEST_GEN_PROGS_EXTENDED := gpio-mockup-cdev +CFLAGS += -O2 -g -Wall -I../../../../usr/include/ include ../lib.mk diff --git a/tools/testing/selftests/gpio/gpio-mockup-cdev.c b/tools/testing/selftests/gpio/gpio-mockup-cdev.c index e83eac71621a..d1640f44f8ac 100644 --- a/tools/testing/selftests/gpio/gpio-mockup-cdev.c +++ b/tools/testing/selftests/gpio/gpio-mockup-cdev.c @@ -117,7 +117,7 @@ int main(int argc, char *argv[]) { char *chip; int opt, ret, cfd, lfd; - unsigned int offset, val, abiv; + unsigned int offset, val = 0, abiv; uint32_t flags_v1; uint64_t flags_v2; diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore index d4a830139683..00814c0f87a6 100644 --- a/tools/testing/selftests/kvm/.gitignore +++ b/tools/testing/selftests/kvm/.gitignore @@ -23,12 +23,14 @@ /x86_64/platform_info_test /x86_64/set_boot_cpu_id /x86_64/set_sregs_test +/x86_64/sev_migrate_tests /x86_64/smm_test /x86_64/state_test /x86_64/svm_vmcall_test /x86_64/svm_int_ctl_test /x86_64/sync_regs_test /x86_64/tsc_msrs_test +/x86_64/userspace_io_test /x86_64/userspace_msr_exit_test /x86_64/vmx_apic_access_test /x86_64/vmx_close_while_nested_test diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index c4e34717826a..f307c9f61981 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -59,6 +59,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/vmx_preemption_timer_test TEST_GEN_PROGS_x86_64 += x86_64/svm_vmcall_test TEST_GEN_PROGS_x86_64 += x86_64/svm_int_ctl_test TEST_GEN_PROGS_x86_64 += x86_64/sync_regs_test +TEST_GEN_PROGS_x86_64 += x86_64/userspace_io_test TEST_GEN_PROGS_x86_64 += x86_64/userspace_msr_exit_test TEST_GEN_PROGS_x86_64 += x86_64/vmx_apic_access_test TEST_GEN_PROGS_x86_64 += x86_64/vmx_close_while_nested_test diff --git a/tools/testing/selftests/kvm/access_tracking_perf_test.c b/tools/testing/selftests/kvm/access_tracking_perf_test.c index 5d95113c7b7c..d8909032317a 100644 --- a/tools/testing/selftests/kvm/access_tracking_perf_test.c +++ b/tools/testing/selftests/kvm/access_tracking_perf_test.c @@ -47,7 +47,7 @@ #include "guest_modes.h" /* Global variable used to synchronize all of the vCPU threads. */ -static int iteration = -1; +static int iteration; /* Defines what vCPU threads should do during a given iteration. */ static enum { @@ -215,12 +215,11 @@ static bool spin_wait_for_next_iteration(int *current_iteration) return true; } -static void *vcpu_thread_main(void *arg) +static void vcpu_thread_main(struct perf_test_vcpu_args *vcpu_args) { - struct perf_test_vcpu_args *vcpu_args = arg; struct kvm_vm *vm = perf_test_args.vm; int vcpu_id = vcpu_args->vcpu_id; - int current_iteration = -1; + int current_iteration = 0; while (spin_wait_for_next_iteration(¤t_iteration)) { switch (READ_ONCE(iteration_work)) { @@ -235,8 +234,6 @@ static void *vcpu_thread_main(void *arg) vcpu_last_completed_iteration[vcpu_id] = current_iteration; } - - return NULL; } static void spin_wait_for_vcpu(int vcpu_id, int target_iteration) @@ -277,8 +274,7 @@ static void run_iteration(struct kvm_vm *vm, int vcpus, const char *description) static void access_memory(struct kvm_vm *vm, int vcpus, enum access_type access, const char *description) { - perf_test_args.wr_fract = (access == ACCESS_READ) ? INT_MAX : 1; - sync_global_to_guest(vm, perf_test_args); + perf_test_set_wr_fract(vm, (access == ACCESS_READ) ? INT_MAX : 1); iteration_work = ITERATION_ACCESS_MEMORY; run_iteration(vm, vcpus, description); } @@ -296,48 +292,16 @@ static void mark_memory_idle(struct kvm_vm *vm, int vcpus) run_iteration(vm, vcpus, "Mark memory idle"); } -static pthread_t *create_vcpu_threads(int vcpus) -{ - pthread_t *vcpu_threads; - int i; - - vcpu_threads = malloc(vcpus * sizeof(vcpu_threads[0])); - TEST_ASSERT(vcpu_threads, "Failed to allocate vcpu_threads."); - - for (i = 0; i < vcpus; i++) { - vcpu_last_completed_iteration[i] = iteration; - pthread_create(&vcpu_threads[i], NULL, vcpu_thread_main, - &perf_test_args.vcpu_args[i]); - } - - return vcpu_threads; -} - -static void terminate_vcpu_threads(pthread_t *vcpu_threads, int vcpus) -{ - int i; - - /* Set done to signal the vCPU threads to exit */ - done = true; - - for (i = 0; i < vcpus; i++) - pthread_join(vcpu_threads[i], NULL); -} - static void run_test(enum vm_guest_mode mode, void *arg) { struct test_params *params = arg; struct kvm_vm *vm; - pthread_t *vcpu_threads; int vcpus = params->vcpus; vm = perf_test_create_vm(mode, vcpus, params->vcpu_memory_bytes, 1, - params->backing_src); + params->backing_src, !overlap_memory_access); - perf_test_setup_vcpus(vm, vcpus, params->vcpu_memory_bytes, - !overlap_memory_access); - - vcpu_threads = create_vcpu_threads(vcpus); + perf_test_start_vcpu_threads(vcpus, vcpu_thread_main); pr_info("\n"); access_memory(vm, vcpus, ACCESS_WRITE, "Populating memory"); @@ -352,8 +316,10 @@ static void run_test(enum vm_guest_mode mode, void *arg) mark_memory_idle(vm, vcpus); access_memory(vm, vcpus, ACCESS_READ, "Reading from idle memory"); - terminate_vcpu_threads(vcpu_threads, vcpus); - free(vcpu_threads); + /* Set done to signal the vCPU threads to exit */ + done = true; + + perf_test_join_vcpu_threads(vcpus); perf_test_destroy_vm(vm); } diff --git a/tools/testing/selftests/kvm/demand_paging_test.c b/tools/testing/selftests/kvm/demand_paging_test.c index 1510b21e6306..6a719d065599 100644 --- a/tools/testing/selftests/kvm/demand_paging_test.c +++ b/tools/testing/selftests/kvm/demand_paging_test.c @@ -42,10 +42,9 @@ static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE; static size_t demand_paging_size; static char *guest_data_prototype; -static void *vcpu_worker(void *data) +static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args) { int ret; - struct perf_test_vcpu_args *vcpu_args = (struct perf_test_vcpu_args *)data; int vcpu_id = vcpu_args->vcpu_id; struct kvm_vm *vm = perf_test_args.vm; struct kvm_run *run; @@ -68,8 +67,6 @@ static void *vcpu_worker(void *data) ts_diff = timespec_elapsed(start); PER_VCPU_DEBUG("vCPU %d execution time: %ld.%.9lds\n", vcpu_id, ts_diff.tv_sec, ts_diff.tv_nsec); - - return NULL; } static int handle_uffd_page_request(int uffd_mode, int uffd, uint64_t addr) @@ -282,7 +279,6 @@ struct test_params { static void run_test(enum vm_guest_mode mode, void *arg) { struct test_params *p = arg; - pthread_t *vcpu_threads; pthread_t *uffd_handler_threads = NULL; struct uffd_handler_args *uffd_args = NULL; struct timespec start; @@ -293,9 +289,7 @@ static void run_test(enum vm_guest_mode mode, void *arg) int r; vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size, 1, - p->src_type); - - perf_test_args.wr_fract = 1; + p->src_type, p->partition_vcpu_memory_access); demand_paging_size = get_backing_src_pagesz(p->src_type); @@ -304,12 +298,6 @@ static void run_test(enum vm_guest_mode mode, void *arg) "Failed to allocate buffer for guest data pattern"); memset(guest_data_prototype, 0xAB, demand_paging_size); - vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads)); - TEST_ASSERT(vcpu_threads, "Memory allocation failed"); - - perf_test_setup_vcpus(vm, nr_vcpus, guest_percpu_mem_size, - p->partition_vcpu_memory_access); - if (p->uffd_mode) { uffd_handler_threads = malloc(nr_vcpus * sizeof(*uffd_handler_threads)); @@ -322,26 +310,15 @@ static void run_test(enum vm_guest_mode mode, void *arg) TEST_ASSERT(pipefds, "Unable to allocate memory for pipefd"); for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) { - vm_paddr_t vcpu_gpa; + struct perf_test_vcpu_args *vcpu_args; void *vcpu_hva; void *vcpu_alias; - uint64_t vcpu_mem_size; - - if (p->partition_vcpu_memory_access) { - vcpu_gpa = guest_test_phys_mem + - (vcpu_id * guest_percpu_mem_size); - vcpu_mem_size = guest_percpu_mem_size; - } else { - vcpu_gpa = guest_test_phys_mem; - vcpu_mem_size = guest_percpu_mem_size * nr_vcpus; - } - PER_VCPU_DEBUG("Added VCPU %d with test mem gpa [%lx, %lx)\n", - vcpu_id, vcpu_gpa, vcpu_gpa + vcpu_mem_size); + vcpu_args = &perf_test_args.vcpu_args[vcpu_id]; /* Cache the host addresses of the region */ - vcpu_hva = addr_gpa2hva(vm, vcpu_gpa); - vcpu_alias = addr_gpa2alias(vm, vcpu_gpa); + vcpu_hva = addr_gpa2hva(vm, vcpu_args->gpa); + vcpu_alias = addr_gpa2alias(vm, vcpu_args->gpa); /* * Set up user fault fd to handle demand paging @@ -355,32 +332,18 @@ static void run_test(enum vm_guest_mode mode, void *arg) pipefds[vcpu_id * 2], p->uffd_mode, p->uffd_delay, &uffd_args[vcpu_id], vcpu_hva, vcpu_alias, - vcpu_mem_size); + vcpu_args->pages * perf_test_args.guest_page_size); } } - /* Export the shared variables to the guest */ - sync_global_to_guest(vm, perf_test_args); - pr_info("Finished creating vCPUs and starting uffd threads\n"); clock_gettime(CLOCK_MONOTONIC, &start); - - for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) { - pthread_create(&vcpu_threads[vcpu_id], NULL, vcpu_worker, - &perf_test_args.vcpu_args[vcpu_id]); - } - + perf_test_start_vcpu_threads(nr_vcpus, vcpu_worker); pr_info("Started all vCPUs\n"); - /* Wait for the vcpu threads to quit */ - for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) { - pthread_join(vcpu_threads[vcpu_id], NULL); - PER_VCPU_DEBUG("Joined thread for vCPU %d\n", vcpu_id); - } - + perf_test_join_vcpu_threads(nr_vcpus); ts_diff = timespec_elapsed(start); - pr_info("All vCPU threads joined\n"); if (p->uffd_mode) { @@ -404,7 +367,6 @@ static void run_test(enum vm_guest_mode mode, void *arg) perf_test_destroy_vm(vm); free(guest_data_prototype); - free(vcpu_threads); if (p->uffd_mode) { free(uffd_handler_threads); free(uffd_args); diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c b/tools/testing/selftests/kvm/dirty_log_perf_test.c index 7ffab5bd5ce5..1954b964d1cf 100644 --- a/tools/testing/selftests/kvm/dirty_log_perf_test.c +++ b/tools/testing/selftests/kvm/dirty_log_perf_test.c @@ -31,7 +31,7 @@ static bool host_quit; static int iteration; static int vcpu_last_completed_iteration[KVM_MAX_VCPUS]; -static void *vcpu_worker(void *data) +static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args) { int ret; struct kvm_vm *vm = perf_test_args.vm; @@ -41,7 +41,6 @@ static void *vcpu_worker(void *data) struct timespec ts_diff; struct timespec total = (struct timespec){0}; struct timespec avg; - struct perf_test_vcpu_args *vcpu_args = (struct perf_test_vcpu_args *)data; int vcpu_id = vcpu_args->vcpu_id; run = vcpu_state(vm, vcpu_id); @@ -83,8 +82,6 @@ static void *vcpu_worker(void *data) pr_debug("\nvCPU %d dirtied 0x%lx pages over %d iterations in %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n", vcpu_id, pages_count, vcpu_last_completed_iteration[vcpu_id], total.tv_sec, total.tv_nsec, avg.tv_sec, avg.tv_nsec); - - return NULL; } struct test_params { @@ -170,7 +167,6 @@ static void free_bitmaps(unsigned long *bitmaps[], int slots) static void run_test(enum vm_guest_mode mode, void *arg) { struct test_params *p = arg; - pthread_t *vcpu_threads; struct kvm_vm *vm; unsigned long **bitmaps; uint64_t guest_num_pages; @@ -186,9 +182,10 @@ static void run_test(enum vm_guest_mode mode, void *arg) struct timespec clear_dirty_log_total = (struct timespec){0}; vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size, - p->slots, p->backing_src); + p->slots, p->backing_src, + p->partition_vcpu_memory_access); - perf_test_args.wr_fract = p->wr_fract; + perf_test_set_wr_fract(vm, p->wr_fract); guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm_get_page_shift(vm); guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages); @@ -203,25 +200,15 @@ static void run_test(enum vm_guest_mode mode, void *arg) vm_enable_cap(vm, &cap); } - vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads)); - TEST_ASSERT(vcpu_threads, "Memory allocation failed"); - - perf_test_setup_vcpus(vm, nr_vcpus, guest_percpu_mem_size, - p->partition_vcpu_memory_access); - - sync_global_to_guest(vm, perf_test_args); - /* Start the iterations */ iteration = 0; host_quit = false; clock_gettime(CLOCK_MONOTONIC, &start); - for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) { + for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) vcpu_last_completed_iteration[vcpu_id] = -1; - pthread_create(&vcpu_threads[vcpu_id], NULL, vcpu_worker, - &perf_test_args.vcpu_args[vcpu_id]); - } + perf_test_start_vcpu_threads(nr_vcpus, vcpu_worker); /* Allow the vCPUs to populate memory */ pr_debug("Starting iteration %d - Populating\n", iteration); @@ -290,8 +277,7 @@ static void run_test(enum vm_guest_mode mode, void *arg) /* Tell the vcpu thread to quit */ host_quit = true; - for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) - pthread_join(vcpu_threads[vcpu_id], NULL); + perf_test_join_vcpu_threads(nr_vcpus); avg = timespec_div(get_dirty_log_total, p->iterations); pr_info("Get dirty log over %lu iterations took %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n", @@ -306,7 +292,6 @@ static void run_test(enum vm_guest_mode mode, void *arg) } free_bitmaps(bitmaps, p->slots); - free(vcpu_threads); perf_test_destroy_vm(vm); } diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c index 792c60e1b17d..3fcd89e195c7 100644 --- a/tools/testing/selftests/kvm/dirty_log_test.c +++ b/tools/testing/selftests/kvm/dirty_log_test.c @@ -115,7 +115,7 @@ static void guest_code(void) addr = guest_test_virt_mem; addr += (READ_ONCE(random_array[i]) % guest_num_pages) * guest_page_size; - addr &= ~(host_page_size - 1); + addr = align_down(addr, host_page_size); *(uint64_t *)addr = READ_ONCE(iteration); } @@ -737,14 +737,14 @@ static void run_test(enum vm_guest_mode mode, void *arg) if (!p->phys_offset) { guest_test_phys_mem = (vm_get_max_gfn(vm) - guest_num_pages) * guest_page_size; - guest_test_phys_mem &= ~(host_page_size - 1); + guest_test_phys_mem = align_down(guest_test_phys_mem, host_page_size); } else { guest_test_phys_mem = p->phys_offset; } #ifdef __s390x__ /* Align to 1M (segment size) */ - guest_test_phys_mem &= ~((1 << 20) - 1); + guest_test_phys_mem = align_down(guest_test_phys_mem, 1 << 20); #endif pr_info("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem); diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h index 6a1a37f30494..da2b702da71a 100644 --- a/tools/testing/selftests/kvm/include/kvm_util.h +++ b/tools/testing/selftests/kvm/include/kvm_util.h @@ -71,6 +71,15 @@ enum vm_guest_mode { #endif +#if defined(__x86_64__) +unsigned long vm_compute_max_gfn(struct kvm_vm *vm); +#else +static inline unsigned long vm_compute_max_gfn(struct kvm_vm *vm) +{ + return ((1ULL << vm->pa_bits) >> vm->page_shift) - 1; +} +#endif + #define MIN_PAGE_SIZE (1U << MIN_PAGE_SHIFT) #define PTES_PER_MIN_PAGE ptes_per_page(MIN_PAGE_SIZE) diff --git a/tools/testing/selftests/kvm/include/perf_test_util.h b/tools/testing/selftests/kvm/include/perf_test_util.h index df9f1a3a3ffb..a86f953d8d36 100644 --- a/tools/testing/selftests/kvm/include/perf_test_util.h +++ b/tools/testing/selftests/kvm/include/perf_test_util.h @@ -8,6 +8,8 @@ #ifndef SELFTEST_KVM_PERF_TEST_UTIL_H #define SELFTEST_KVM_PERF_TEST_UTIL_H +#include <pthread.h> + #include "kvm_util.h" /* Default guest test virtual memory offset */ @@ -18,6 +20,7 @@ #define PERF_TEST_MEM_SLOT_INDEX 1 struct perf_test_vcpu_args { + uint64_t gpa; uint64_t gva; uint64_t pages; @@ -27,7 +30,7 @@ struct perf_test_vcpu_args { struct perf_test_args { struct kvm_vm *vm; - uint64_t host_page_size; + uint64_t gpa; uint64_t guest_page_size; int wr_fract; @@ -36,19 +39,15 @@ struct perf_test_args { extern struct perf_test_args perf_test_args; -/* - * Guest physical memory offset of the testing memory slot. - * This will be set to the topmost valid physical address minus - * the test memory size. - */ -extern uint64_t guest_test_phys_mem; - struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus, uint64_t vcpu_memory_bytes, int slots, - enum vm_mem_backing_src_type backing_src); + enum vm_mem_backing_src_type backing_src, + bool partition_vcpu_memory_access); void perf_test_destroy_vm(struct kvm_vm *vm); -void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus, - uint64_t vcpu_memory_bytes, - bool partition_vcpu_memory_access); + +void perf_test_set_wr_fract(struct kvm_vm *vm, int wr_fract); + +void perf_test_start_vcpu_threads(int vcpus, void (*vcpu_fn)(struct perf_test_vcpu_args *)); +void perf_test_join_vcpu_threads(int vcpus); #endif /* SELFTEST_KVM_PERF_TEST_UTIL_H */ diff --git a/tools/testing/selftests/kvm/include/test_util.h b/tools/testing/selftests/kvm/include/test_util.h index f8fddc84c0d3..99e0dcdc923f 100644 --- a/tools/testing/selftests/kvm/include/test_util.h +++ b/tools/testing/selftests/kvm/include/test_util.h @@ -104,6 +104,7 @@ size_t get_trans_hugepagesz(void); size_t get_def_hugetlb_pagesz(void); const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(uint32_t i); size_t get_backing_src_pagesz(uint32_t i); +bool is_backing_src_hugetlb(uint32_t i); void backing_src_help(const char *flag); enum vm_mem_backing_src_type parse_backing_src_type(const char *type_name); long get_run_delay(void); @@ -117,4 +118,29 @@ static inline bool backing_src_is_shared(enum vm_mem_backing_src_type t) return vm_mem_backing_src_alias(t)->flag & MAP_SHARED; } +/* Aligns x up to the next multiple of size. Size must be a power of 2. */ +static inline uint64_t align_up(uint64_t x, uint64_t size) +{ + uint64_t mask = size - 1; + + TEST_ASSERT(size != 0 && !(size & (size - 1)), + "size not a power of 2: %lu", size); + return ((x + mask) & ~mask); +} + +static inline uint64_t align_down(uint64_t x, uint64_t size) +{ + uint64_t x_aligned_up = align_up(x, size); + + if (x == x_aligned_up) + return x; + else + return x_aligned_up - size; +} + +static inline void *align_ptr_up(void *x, size_t size) +{ + return (void *)align_up((unsigned long)x, size); +} + #endif /* SELFTEST_KVM_TEST_UTIL_H */ diff --git a/tools/testing/selftests/kvm/kvm_create_max_vcpus.c b/tools/testing/selftests/kvm/kvm_create_max_vcpus.c index f968dfd4ee88..aed9dc3ca1e9 100644 --- a/tools/testing/selftests/kvm/kvm_create_max_vcpus.c +++ b/tools/testing/selftests/kvm/kvm_create_max_vcpus.c @@ -12,6 +12,7 @@ #include <stdio.h> #include <stdlib.h> #include <string.h> +#include <sys/resource.h> #include "test_util.h" @@ -40,11 +41,40 @@ int main(int argc, char *argv[]) { int kvm_max_vcpu_id = kvm_check_cap(KVM_CAP_MAX_VCPU_ID); int kvm_max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS); + /* + * Number of file descriptors reqired, KVM_CAP_MAX_VCPUS for vCPU fds + + * an arbitrary number for everything else. + */ + int nr_fds_wanted = kvm_max_vcpus + 100; + struct rlimit rl; pr_info("KVM_CAP_MAX_VCPU_ID: %d\n", kvm_max_vcpu_id); pr_info("KVM_CAP_MAX_VCPUS: %d\n", kvm_max_vcpus); /* + * Check that we're allowed to open nr_fds_wanted file descriptors and + * try raising the limits if needed. + */ + TEST_ASSERT(!getrlimit(RLIMIT_NOFILE, &rl), "getrlimit() failed!"); + + if (rl.rlim_cur < nr_fds_wanted) { + rl.rlim_cur = nr_fds_wanted; + if (rl.rlim_max < nr_fds_wanted) { + int old_rlim_max = rl.rlim_max; + rl.rlim_max = nr_fds_wanted; + + int r = setrlimit(RLIMIT_NOFILE, &rl); + if (r < 0) { + printf("RLIMIT_NOFILE hard limit is too low (%d, wanted %d)\n", + old_rlim_max, nr_fds_wanted); + exit(KSFT_SKIP); + } + } else { + TEST_ASSERT(!setrlimit(RLIMIT_NOFILE, &rl), "setrlimit() failed!"); + } + } + + /* * Upstream KVM prior to 4.8 does not support KVM_CAP_MAX_VCPU_ID. * Userspace is supposed to use KVM_CAP_MAX_VCPUS as the maximum ID * in this case. diff --git a/tools/testing/selftests/kvm/kvm_page_table_test.c b/tools/testing/selftests/kvm/kvm_page_table_test.c index 36407cb0ec85..ba1fdc3dcf4a 100644 --- a/tools/testing/selftests/kvm/kvm_page_table_test.c +++ b/tools/testing/selftests/kvm/kvm_page_table_test.c @@ -280,7 +280,7 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg) #ifdef __s390x__ alignment = max(0x100000, alignment); #endif - guest_test_phys_mem &= ~(alignment - 1); + guest_test_phys_mem = align_down(guest_test_phys_mem, alignment); /* Set up the shared data structure test_args */ test_args.vm = vm; diff --git a/tools/testing/selftests/kvm/lib/elf.c b/tools/testing/selftests/kvm/lib/elf.c index eac44f5d0db0..13e8e3dcf984 100644 --- a/tools/testing/selftests/kvm/lib/elf.c +++ b/tools/testing/selftests/kvm/lib/elf.c @@ -157,8 +157,7 @@ void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename) "memsize of 0,\n" " phdr index: %u p_memsz: 0x%" PRIx64, n1, (uint64_t) phdr.p_memsz); - vm_vaddr_t seg_vstart = phdr.p_vaddr; - seg_vstart &= ~(vm_vaddr_t)(vm->page_size - 1); + vm_vaddr_t seg_vstart = align_down(phdr.p_vaddr, vm->page_size); vm_vaddr_t seg_vend = phdr.p_vaddr + phdr.p_memsz - 1; seg_vend |= vm->page_size - 1; size_t seg_size = seg_vend - seg_vstart + 1; diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index 14bb4d5b6bb7..daf6fdb217a7 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -22,15 +22,6 @@ static int vcpu_mmap_sz(void); -/* Aligns x up to the next multiple of size. Size must be a power of 2. */ -static void *align(void *x, size_t size) -{ - size_t mask = size - 1; - TEST_ASSERT(size != 0 && !(size & (size - 1)), - "size not a power of 2: %lu", size); - return (void *) (((size_t) x + mask) & ~mask); -} - int open_path_or_exit(const char *path, int flags) { int fd; @@ -191,15 +182,15 @@ const char *vm_guest_mode_string(uint32_t i) } const struct vm_guest_mode_params vm_guest_mode_params[] = { - { 52, 48, 0x1000, 12 }, - { 52, 48, 0x10000, 16 }, - { 48, 48, 0x1000, 12 }, - { 48, 48, 0x10000, 16 }, - { 40, 48, 0x1000, 12 }, - { 40, 48, 0x10000, 16 }, - { 0, 0, 0x1000, 12 }, - { 47, 64, 0x1000, 12 }, - { 44, 64, 0x1000, 12 }, + [VM_MODE_P52V48_4K] = { 52, 48, 0x1000, 12 }, + [VM_MODE_P52V48_64K] = { 52, 48, 0x10000, 16 }, + [VM_MODE_P48V48_4K] = { 48, 48, 0x1000, 12 }, + [VM_MODE_P48V48_64K] = { 48, 48, 0x10000, 16 }, + [VM_MODE_P40V48_4K] = { 40, 48, 0x1000, 12 }, + [VM_MODE_P40V48_64K] = { 40, 48, 0x10000, 16 }, + [VM_MODE_PXXV48_4K] = { 0, 0, 0x1000, 12 }, + [VM_MODE_P47V64_4K] = { 47, 64, 0x1000, 12 }, + [VM_MODE_P44V64_4K] = { 44, 64, 0x1000, 12 }, }; _Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) == NUM_VM_MODES, "Missing new mode params?"); @@ -311,7 +302,7 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm) (1ULL << (vm->va_bits - 1)) >> vm->page_shift); /* Limit physical addresses to PA-bits. */ - vm->max_gfn = ((1ULL << vm->pa_bits) >> vm->page_shift) - 1; + vm->max_gfn = vm_compute_max_gfn(vm); /* Allocate and setup memory for guest. */ vm->vpages_mapped = sparsebit_alloc(); @@ -879,9 +870,17 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm, alignment = 1; #endif + /* + * When using THP mmap is not guaranteed to returned a hugepage aligned + * address so we have to pad the mmap. Padding is not needed for HugeTLB + * because mmap will always return an address aligned to the HugeTLB + * page size. + */ if (src_type == VM_MEM_SRC_ANONYMOUS_THP) alignment = max(backing_src_pagesz, alignment); + ASSERT_EQ(guest_paddr, align_up(guest_paddr, backing_src_pagesz)); + /* Add enough memory to align up if necessary */ if (alignment > 1) region->mmap_size += alignment; @@ -914,8 +913,13 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm, "test_malloc failed, mmap_start: %p errno: %i", region->mmap_start, errno); + TEST_ASSERT(!is_backing_src_hugetlb(src_type) || + region->mmap_start == align_ptr_up(region->mmap_start, backing_src_pagesz), + "mmap_start %p is not aligned to HugeTLB page size 0x%lx", + region->mmap_start, backing_src_pagesz); + /* Align host address */ - region->host_mem = align(region->mmap_start, alignment); + region->host_mem = align_ptr_up(region->mmap_start, alignment); /* As needed perform madvise */ if ((src_type == VM_MEM_SRC_ANONYMOUS || @@ -958,7 +962,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm, "mmap of alias failed, errno: %i", errno); /* Align host alias address */ - region->host_alias = align(region->mmap_alias, alignment); + region->host_alias = align_ptr_up(region->mmap_alias, alignment); } } diff --git a/tools/testing/selftests/kvm/lib/perf_test_util.c b/tools/testing/selftests/kvm/lib/perf_test_util.c index 0ef80dbdc116..722df3a28791 100644 --- a/tools/testing/selftests/kvm/lib/perf_test_util.c +++ b/tools/testing/selftests/kvm/lib/perf_test_util.c @@ -10,21 +10,40 @@ struct perf_test_args perf_test_args; -uint64_t guest_test_phys_mem; - /* * Guest virtual memory offset of the testing memory slot. * Must not conflict with identity mapped test code. */ static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM; +struct vcpu_thread { + /* The id of the vCPU. */ + int vcpu_id; + + /* The pthread backing the vCPU. */ + pthread_t thread; + + /* Set to true once the vCPU thread is up and running. */ + bool running; +}; + +/* The vCPU threads involved in this test. */ +static struct vcpu_thread vcpu_threads[KVM_MAX_VCPUS]; + +/* The function run by each vCPU thread, as provided by the test. */ +static void (*vcpu_thread_fn)(struct perf_test_vcpu_args *); + +/* Set to true once all vCPU threads are up and running. */ +static bool all_vcpu_threads_running; + /* * Continuously write to the first 8 bytes of each page in the * specified region. */ static void guest_code(uint32_t vcpu_id) { - struct perf_test_vcpu_args *vcpu_args = &perf_test_args.vcpu_args[vcpu_id]; + struct perf_test_args *pta = &perf_test_args; + struct perf_test_vcpu_args *vcpu_args = &pta->vcpu_args[vcpu_id]; uint64_t gva; uint64_t pages; int i; @@ -37,9 +56,9 @@ static void guest_code(uint32_t vcpu_id) while (true) { for (i = 0; i < pages; i++) { - uint64_t addr = gva + (i * perf_test_args.guest_page_size); + uint64_t addr = gva + (i * pta->guest_page_size); - if (i % perf_test_args.wr_fract == 0) + if (i % pta->wr_fract == 0) *(uint64_t *)addr = 0x0123456789ABCDEF; else READ_ONCE(*(uint64_t *)addr); @@ -49,35 +68,81 @@ static void guest_code(uint32_t vcpu_id) } } +void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus, + uint64_t vcpu_memory_bytes, + bool partition_vcpu_memory_access) +{ + struct perf_test_args *pta = &perf_test_args; + struct perf_test_vcpu_args *vcpu_args; + int vcpu_id; + + for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) { + vcpu_args = &pta->vcpu_args[vcpu_id]; + + vcpu_args->vcpu_id = vcpu_id; + if (partition_vcpu_memory_access) { + vcpu_args->gva = guest_test_virt_mem + + (vcpu_id * vcpu_memory_bytes); + vcpu_args->pages = vcpu_memory_bytes / + pta->guest_page_size; + vcpu_args->gpa = pta->gpa + (vcpu_id * vcpu_memory_bytes); + } else { + vcpu_args->gva = guest_test_virt_mem; + vcpu_args->pages = (vcpus * vcpu_memory_bytes) / + pta->guest_page_size; + vcpu_args->gpa = pta->gpa; + } + + vcpu_args_set(vm, vcpu_id, 1, vcpu_id); + + pr_debug("Added VCPU %d with test mem gpa [%lx, %lx)\n", + vcpu_id, vcpu_args->gpa, vcpu_args->gpa + + (vcpu_args->pages * pta->guest_page_size)); + } +} + struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus, uint64_t vcpu_memory_bytes, int slots, - enum vm_mem_backing_src_type backing_src) + enum vm_mem_backing_src_type backing_src, + bool partition_vcpu_memory_access) { + struct perf_test_args *pta = &perf_test_args; struct kvm_vm *vm; uint64_t guest_num_pages; + uint64_t backing_src_pagesz = get_backing_src_pagesz(backing_src); int i; pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode)); - perf_test_args.host_page_size = getpagesize(); - perf_test_args.guest_page_size = vm_guest_mode_params[mode].page_size; + /* By default vCPUs will write to memory. */ + pta->wr_fract = 1; + + /* + * Snapshot the non-huge page size. This is used by the guest code to + * access/dirty pages at the logging granularity. + */ + pta->guest_page_size = vm_guest_mode_params[mode].page_size; guest_num_pages = vm_adjust_num_guest_pages(mode, - (vcpus * vcpu_memory_bytes) / perf_test_args.guest_page_size); + (vcpus * vcpu_memory_bytes) / pta->guest_page_size); - TEST_ASSERT(vcpu_memory_bytes % perf_test_args.host_page_size == 0, + TEST_ASSERT(vcpu_memory_bytes % getpagesize() == 0, "Guest memory size is not host page size aligned."); - TEST_ASSERT(vcpu_memory_bytes % perf_test_args.guest_page_size == 0, + TEST_ASSERT(vcpu_memory_bytes % pta->guest_page_size == 0, "Guest memory size is not guest page size aligned."); TEST_ASSERT(guest_num_pages % slots == 0, "Guest memory cannot be evenly divided into %d slots.", slots); + /* + * Pass guest_num_pages to populate the page tables for test memory. + * The memory is also added to memslot 0, but that's a benign side + * effect as KVM allows aliasing HVAs in meslots. + */ vm = vm_create_with_vcpus(mode, vcpus, DEFAULT_GUEST_PHY_PAGES, - (vcpus * vcpu_memory_bytes) / perf_test_args.guest_page_size, - 0, guest_code, NULL); + guest_num_pages, 0, guest_code, NULL); - perf_test_args.vm = vm; + pta->vm = vm; /* * If there should be more memory in the guest test region than there @@ -90,20 +155,18 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus, guest_num_pages, vm_get_max_gfn(vm), vcpus, vcpu_memory_bytes); - guest_test_phys_mem = (vm_get_max_gfn(vm) - guest_num_pages) * - perf_test_args.guest_page_size; - guest_test_phys_mem &= ~(perf_test_args.host_page_size - 1); + pta->gpa = (vm_get_max_gfn(vm) - guest_num_pages) * pta->guest_page_size; + pta->gpa = align_down(pta->gpa, backing_src_pagesz); #ifdef __s390x__ /* Align to 1M (segment size) */ - guest_test_phys_mem &= ~((1 << 20) - 1); + pta->gpa = align_down(pta->gpa, 1 << 20); #endif - pr_info("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem); + pr_info("guest physical test memory offset: 0x%lx\n", pta->gpa); /* Add extra memory slots for testing */ for (i = 0; i < slots; i++) { uint64_t region_pages = guest_num_pages / slots; - vm_paddr_t region_start = guest_test_phys_mem + - region_pages * perf_test_args.guest_page_size * i; + vm_paddr_t region_start = pta->gpa + region_pages * pta->guest_page_size * i; vm_userspace_mem_region_add(vm, backing_src, region_start, PERF_TEST_MEM_SLOT_INDEX + i, @@ -111,10 +174,15 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus, } /* Do mapping for the demand paging memory slot */ - virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages); + virt_map(vm, guest_test_virt_mem, pta->gpa, guest_num_pages); + + perf_test_setup_vcpus(vm, vcpus, vcpu_memory_bytes, partition_vcpu_memory_access); ucall_init(vm, NULL); + /* Export the shared variables to the guest. */ + sync_global_to_guest(vm, perf_test_args); + return vm; } @@ -124,36 +192,60 @@ void perf_test_destroy_vm(struct kvm_vm *vm) kvm_vm_free(vm); } -void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus, - uint64_t vcpu_memory_bytes, - bool partition_vcpu_memory_access) +void perf_test_set_wr_fract(struct kvm_vm *vm, int wr_fract) +{ + perf_test_args.wr_fract = wr_fract; + sync_global_to_guest(vm, perf_test_args); +} + +static void *vcpu_thread_main(void *data) +{ + struct vcpu_thread *vcpu = data; + + WRITE_ONCE(vcpu->running, true); + + /* + * Wait for all vCPU threads to be up and running before calling the test- + * provided vCPU thread function. This prevents thread creation (which + * requires taking the mmap_sem in write mode) from interfering with the + * guest faulting in its memory. + */ + while (!READ_ONCE(all_vcpu_threads_running)) + ; + + vcpu_thread_fn(&perf_test_args.vcpu_args[vcpu->vcpu_id]); + + return NULL; +} + +void perf_test_start_vcpu_threads(int vcpus, void (*vcpu_fn)(struct perf_test_vcpu_args *)) { - vm_paddr_t vcpu_gpa; - struct perf_test_vcpu_args *vcpu_args; int vcpu_id; + vcpu_thread_fn = vcpu_fn; + WRITE_ONCE(all_vcpu_threads_running, false); + for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) { - vcpu_args = &perf_test_args.vcpu_args[vcpu_id]; + struct vcpu_thread *vcpu = &vcpu_threads[vcpu_id]; - vcpu_args->vcpu_id = vcpu_id; - if (partition_vcpu_memory_access) { - vcpu_args->gva = guest_test_virt_mem + - (vcpu_id * vcpu_memory_bytes); - vcpu_args->pages = vcpu_memory_bytes / - perf_test_args.guest_page_size; - vcpu_gpa = guest_test_phys_mem + - (vcpu_id * vcpu_memory_bytes); - } else { - vcpu_args->gva = guest_test_virt_mem; - vcpu_args->pages = (vcpus * vcpu_memory_bytes) / - perf_test_args.guest_page_size; - vcpu_gpa = guest_test_phys_mem; - } + vcpu->vcpu_id = vcpu_id; + WRITE_ONCE(vcpu->running, false); - vcpu_args_set(vm, vcpu_id, 1, vcpu_id); + pthread_create(&vcpu->thread, NULL, vcpu_thread_main, vcpu); + } - pr_debug("Added VCPU %d with test mem gpa [%lx, %lx)\n", - vcpu_id, vcpu_gpa, vcpu_gpa + - (vcpu_args->pages * perf_test_args.guest_page_size)); + for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) { + while (!READ_ONCE(vcpu_threads[vcpu_id].running)) + ; } + + WRITE_ONCE(all_vcpu_threads_running, true); +} + +void perf_test_join_vcpu_threads(int vcpus) +{ + int vcpu_id; + + for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) + pthread_join(vcpu_threads[vcpu_id].thread, NULL); } diff --git a/tools/testing/selftests/kvm/lib/test_util.c b/tools/testing/selftests/kvm/lib/test_util.c index b72429108993..6d23878bbfe1 100644 --- a/tools/testing/selftests/kvm/lib/test_util.c +++ b/tools/testing/selftests/kvm/lib/test_util.c @@ -283,6 +283,11 @@ size_t get_backing_src_pagesz(uint32_t i) } } +bool is_backing_src_hugetlb(uint32_t i) +{ + return !!(vm_mem_backing_src_alias(i)->flag & MAP_HUGETLB); +} + static void print_available_backing_src_types(const char *prefix) { int i; diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c index 82c39db91369..eef7b34756d5 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/processor.c +++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c @@ -1431,3 +1431,71 @@ struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vm *vm, uint32_t vcpui return cpuid; } + +#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx 0x68747541 +#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx 0x444d4163 +#define X86EMUL_CPUID_VENDOR_AuthenticAMD_edx 0x69746e65 + +static inline unsigned x86_family(unsigned int eax) +{ + unsigned int x86; + + x86 = (eax >> 8) & 0xf; + + if (x86 == 0xf) + x86 += (eax >> 20) & 0xff; + + return x86; +} + +unsigned long vm_compute_max_gfn(struct kvm_vm *vm) +{ + const unsigned long num_ht_pages = 12 << (30 - vm->page_shift); /* 12 GiB */ + unsigned long ht_gfn, max_gfn, max_pfn; + uint32_t eax, ebx, ecx, edx, max_ext_leaf; + + max_gfn = (1ULL << (vm->pa_bits - vm->page_shift)) - 1; + + /* Avoid reserved HyperTransport region on AMD processors. */ + eax = ecx = 0; + cpuid(&eax, &ebx, &ecx, &edx); + if (ebx != X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx || + ecx != X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx || + edx != X86EMUL_CPUID_VENDOR_AuthenticAMD_edx) + return max_gfn; + + /* On parts with <40 physical address bits, the area is fully hidden */ + if (vm->pa_bits < 40) + return max_gfn; + + /* Before family 17h, the HyperTransport area is just below 1T. */ + ht_gfn = (1 << 28) - num_ht_pages; + eax = 1; + cpuid(&eax, &ebx, &ecx, &edx); + if (x86_family(eax) < 0x17) + goto done; + + /* + * Otherwise it's at the top of the physical address space, possibly + * reduced due to SME by bits 11:6 of CPUID[0x8000001f].EBX. Use + * the old conservative value if MAXPHYADDR is not enumerated. + */ + eax = 0x80000000; + cpuid(&eax, &ebx, &ecx, &edx); + max_ext_leaf = eax; + if (max_ext_leaf < 0x80000008) + goto done; + + eax = 0x80000008; + cpuid(&eax, &ebx, &ecx, &edx); + max_pfn = (1ULL << ((eax & 0xff) - vm->page_shift)) - 1; + if (max_ext_leaf >= 0x8000001f) { + eax = 0x8000001f; + cpuid(&eax, &ebx, &ecx, &edx); + max_pfn >>= (ebx >> 6) & 0x3f; + } + + ht_gfn = max_pfn - num_ht_pages; +done: + return min(max_gfn, ht_gfn - 1); +} diff --git a/tools/testing/selftests/kvm/memslot_modification_stress_test.c b/tools/testing/selftests/kvm/memslot_modification_stress_test.c index 4cfcafea9f5a..1410d0a9141a 100644 --- a/tools/testing/selftests/kvm/memslot_modification_stress_test.c +++ b/tools/testing/selftests/kvm/memslot_modification_stress_test.c @@ -36,11 +36,9 @@ static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE; static bool run_vcpus = true; -static void *vcpu_worker(void *data) +static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args) { int ret; - struct perf_test_vcpu_args *vcpu_args = - (struct perf_test_vcpu_args *)data; int vcpu_id = vcpu_args->vcpu_id; struct kvm_vm *vm = perf_test_args.vm; struct kvm_run *run; @@ -59,8 +57,6 @@ static void *vcpu_worker(void *data) "Invalid guest sync status: exit_reason=%s\n", exit_reason_str(run->exit_reason)); } - - return NULL; } struct memslot_antagonist_args { @@ -80,7 +76,7 @@ static void add_remove_memslot(struct kvm_vm *vm, useconds_t delay, * Add the dummy memslot just below the perf_test_util memslot, which is * at the top of the guest physical address space. */ - gpa = guest_test_phys_mem - pages * vm_get_page_size(vm); + gpa = perf_test_args.gpa - pages * vm_get_page_size(vm); for (i = 0; i < nr_modifications; i++) { usleep(delay); @@ -100,29 +96,15 @@ struct test_params { static void run_test(enum vm_guest_mode mode, void *arg) { struct test_params *p = arg; - pthread_t *vcpu_threads; struct kvm_vm *vm; - int vcpu_id; vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size, 1, - VM_MEM_SRC_ANONYMOUS); - - perf_test_args.wr_fract = 1; - - vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads)); - TEST_ASSERT(vcpu_threads, "Memory allocation failed"); - - perf_test_setup_vcpus(vm, nr_vcpus, guest_percpu_mem_size, - p->partition_vcpu_memory_access); - - /* Export the shared variables to the guest */ - sync_global_to_guest(vm, perf_test_args); + VM_MEM_SRC_ANONYMOUS, + p->partition_vcpu_memory_access); pr_info("Finished creating vCPUs\n"); - for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) - pthread_create(&vcpu_threads[vcpu_id], NULL, vcpu_worker, - &perf_test_args.vcpu_args[vcpu_id]); + perf_test_start_vcpu_threads(nr_vcpus, vcpu_worker); pr_info("Started all vCPUs\n"); @@ -131,16 +113,10 @@ static void run_test(enum vm_guest_mode mode, void *arg) run_vcpus = false; - /* Wait for the vcpu threads to quit */ - for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) - pthread_join(vcpu_threads[vcpu_id], NULL); - + perf_test_join_vcpu_threads(nr_vcpus); pr_info("All vCPU threads joined\n"); - ucall_uninit(vm); - kvm_vm_free(vm); - - free(vcpu_threads); + perf_test_destroy_vm(vm); } static void help(char *name) diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_features.c b/tools/testing/selftests/kvm/x86_64/hyperv_features.c index 91d88aaa9899..672915ce73d8 100644 --- a/tools/testing/selftests/kvm/x86_64/hyperv_features.c +++ b/tools/testing/selftests/kvm/x86_64/hyperv_features.c @@ -165,10 +165,10 @@ static void hv_set_cpuid(struct kvm_vm *vm, struct kvm_cpuid2 *cpuid, vcpu_set_cpuid(vm, VCPU_ID, cpuid); } -static void guest_test_msrs_access(struct kvm_vm *vm, struct msr_data *msr, - struct kvm_cpuid2 *best) +static void guest_test_msrs_access(void) { struct kvm_run *run; + struct kvm_vm *vm; struct ucall uc; int stage = 0, r; struct kvm_cpuid_entry2 feat = { @@ -180,11 +180,34 @@ static void guest_test_msrs_access(struct kvm_vm *vm, struct msr_data *msr, struct kvm_cpuid_entry2 dbg = { .function = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES }; - struct kvm_enable_cap cap = {0}; - - run = vcpu_state(vm, VCPU_ID); + struct kvm_cpuid2 *best; + vm_vaddr_t msr_gva; + struct kvm_enable_cap cap = { + .cap = KVM_CAP_HYPERV_ENFORCE_CPUID, + .args = {1} + }; + struct msr_data *msr; while (true) { + vm = vm_create_default(VCPU_ID, 0, guest_msr); + + msr_gva = vm_vaddr_alloc_page(vm); + memset(addr_gva2hva(vm, msr_gva), 0x0, getpagesize()); + msr = addr_gva2hva(vm, msr_gva); + + vcpu_args_set(vm, VCPU_ID, 1, msr_gva); + vcpu_enable_cap(vm, VCPU_ID, &cap); + + vcpu_set_hv_cpuid(vm, VCPU_ID); + + best = kvm_get_supported_hv_cpuid(); + + vm_init_descriptor_tables(vm); + vcpu_init_descriptor_tables(vm, VCPU_ID); + vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler); + + run = vcpu_state(vm, VCPU_ID); + switch (stage) { case 0: /* @@ -315,6 +338,7 @@ static void guest_test_msrs_access(struct kvm_vm *vm, struct msr_data *msr, * capability enabled and guest visible CPUID bit unset. */ cap.cap = KVM_CAP_HYPERV_SYNIC2; + cap.args[0] = 0; vcpu_enable_cap(vm, VCPU_ID, &cap); break; case 22: @@ -461,9 +485,9 @@ static void guest_test_msrs_access(struct kvm_vm *vm, struct msr_data *msr, switch (get_ucall(vm, VCPU_ID, &uc)) { case UCALL_SYNC: - TEST_ASSERT(uc.args[1] == stage, - "Unexpected stage: %ld (%d expected)\n", - uc.args[1], stage); + TEST_ASSERT(uc.args[1] == 0, + "Unexpected stage: %ld (0 expected)\n", + uc.args[1]); break; case UCALL_ABORT: TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0], @@ -474,13 +498,14 @@ static void guest_test_msrs_access(struct kvm_vm *vm, struct msr_data *msr, } stage++; + kvm_vm_free(vm); } } -static void guest_test_hcalls_access(struct kvm_vm *vm, struct hcall_data *hcall, - void *input, void *output, struct kvm_cpuid2 *best) +static void guest_test_hcalls_access(void) { struct kvm_run *run; + struct kvm_vm *vm; struct ucall uc; int stage = 0, r; struct kvm_cpuid_entry2 feat = { @@ -493,10 +518,38 @@ static void guest_test_hcalls_access(struct kvm_vm *vm, struct hcall_data *hcall struct kvm_cpuid_entry2 dbg = { .function = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES }; - - run = vcpu_state(vm, VCPU_ID); + struct kvm_enable_cap cap = { + .cap = KVM_CAP_HYPERV_ENFORCE_CPUID, + .args = {1} + }; + vm_vaddr_t hcall_page, hcall_params; + struct hcall_data *hcall; + struct kvm_cpuid2 *best; while (true) { + vm = vm_create_default(VCPU_ID, 0, guest_hcall); + + vm_init_descriptor_tables(vm); + vcpu_init_descriptor_tables(vm, VCPU_ID); + vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler); + + /* Hypercall input/output */ + hcall_page = vm_vaddr_alloc_pages(vm, 2); + hcall = addr_gva2hva(vm, hcall_page); + memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize()); + + hcall_params = vm_vaddr_alloc_page(vm); + memset(addr_gva2hva(vm, hcall_params), 0x0, getpagesize()); + + vcpu_args_set(vm, VCPU_ID, 2, addr_gva2gpa(vm, hcall_page), hcall_params); + vcpu_enable_cap(vm, VCPU_ID, &cap); + + vcpu_set_hv_cpuid(vm, VCPU_ID); + + best = kvm_get_supported_hv_cpuid(); + + run = vcpu_state(vm, VCPU_ID); + switch (stage) { case 0: hcall->control = 0xdeadbeef; @@ -606,9 +659,9 @@ static void guest_test_hcalls_access(struct kvm_vm *vm, struct hcall_data *hcall switch (get_ucall(vm, VCPU_ID, &uc)) { case UCALL_SYNC: - TEST_ASSERT(uc.args[1] == stage, - "Unexpected stage: %ld (%d expected)\n", - uc.args[1], stage); + TEST_ASSERT(uc.args[1] == 0, + "Unexpected stage: %ld (0 expected)\n", + uc.args[1]); break; case UCALL_ABORT: TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0], @@ -619,66 +672,15 @@ static void guest_test_hcalls_access(struct kvm_vm *vm, struct hcall_data *hcall } stage++; + kvm_vm_free(vm); } } int main(void) { - struct kvm_cpuid2 *best; - struct kvm_vm *vm; - vm_vaddr_t msr_gva, hcall_page, hcall_params; - struct kvm_enable_cap cap = { - .cap = KVM_CAP_HYPERV_ENFORCE_CPUID, - .args = {1} - }; - - /* Test MSRs */ - vm = vm_create_default(VCPU_ID, 0, guest_msr); - - msr_gva = vm_vaddr_alloc_page(vm); - memset(addr_gva2hva(vm, msr_gva), 0x0, getpagesize()); - vcpu_args_set(vm, VCPU_ID, 1, msr_gva); - vcpu_enable_cap(vm, VCPU_ID, &cap); - - vcpu_set_hv_cpuid(vm, VCPU_ID); - - best = kvm_get_supported_hv_cpuid(); - - vm_init_descriptor_tables(vm); - vcpu_init_descriptor_tables(vm, VCPU_ID); - vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler); - pr_info("Testing access to Hyper-V specific MSRs\n"); - guest_test_msrs_access(vm, addr_gva2hva(vm, msr_gva), - best); - kvm_vm_free(vm); - - /* Test hypercalls */ - vm = vm_create_default(VCPU_ID, 0, guest_hcall); - - vm_init_descriptor_tables(vm); - vcpu_init_descriptor_tables(vm, VCPU_ID); - vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler); - - /* Hypercall input/output */ - hcall_page = vm_vaddr_alloc_pages(vm, 2); - memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize()); - - hcall_params = vm_vaddr_alloc_page(vm); - memset(addr_gva2hva(vm, hcall_params), 0x0, getpagesize()); - - vcpu_args_set(vm, VCPU_ID, 2, addr_gva2gpa(vm, hcall_page), hcall_params); - vcpu_enable_cap(vm, VCPU_ID, &cap); - - vcpu_set_hv_cpuid(vm, VCPU_ID); - - best = kvm_get_supported_hv_cpuid(); + guest_test_msrs_access(); pr_info("Testing access to Hyper-V hypercalls\n"); - guest_test_hcalls_access(vm, addr_gva2hva(vm, hcall_params), - addr_gva2hva(vm, hcall_page), - addr_gva2hva(vm, hcall_page) + getpagesize(), - best); - - kvm_vm_free(vm); + guest_test_hcalls_access(); } diff --git a/tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c b/tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c index 5ba325cd64bf..29b18d565cf4 100644 --- a/tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c +++ b/tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c @@ -54,12 +54,15 @@ static struct kvm_vm *sev_vm_create(bool es) return vm; } -static struct kvm_vm *__vm_create(void) +static struct kvm_vm *aux_vm_create(bool with_vcpus) { struct kvm_vm *vm; int i; vm = vm_create(VM_MODE_DEFAULT, 0, O_RDWR); + if (!with_vcpus) + return vm; + for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i) vm_vcpu_add(vm, i); @@ -89,11 +92,11 @@ static void test_sev_migrate_from(bool es) { struct kvm_vm *src_vm; struct kvm_vm *dst_vms[NR_MIGRATE_TEST_VMS]; - int i; + int i, ret; src_vm = sev_vm_create(es); for (i = 0; i < NR_MIGRATE_TEST_VMS; ++i) - dst_vms[i] = __vm_create(); + dst_vms[i] = aux_vm_create(true); /* Initial migration from the src to the first dst. */ sev_migrate_from(dst_vms[0]->fd, src_vm->fd); @@ -102,7 +105,10 @@ static void test_sev_migrate_from(bool es) sev_migrate_from(dst_vms[i]->fd, dst_vms[i - 1]->fd); /* Migrate the guest back to the original VM. */ - sev_migrate_from(src_vm->fd, dst_vms[NR_MIGRATE_TEST_VMS - 1]->fd); + ret = __sev_migrate_from(src_vm->fd, dst_vms[NR_MIGRATE_TEST_VMS - 1]->fd); + TEST_ASSERT(ret == -1 && errno == EIO, + "VM that was migrated from should be dead. ret %d, errno: %d\n", ret, + errno); kvm_vm_free(src_vm); for (i = 0; i < NR_MIGRATE_TEST_VMS; ++i) @@ -146,6 +152,8 @@ static void test_sev_migrate_locking(void) for (i = 0; i < NR_LOCK_TESTING_THREADS; ++i) pthread_join(pt[i], NULL); + for (i = 0; i < NR_LOCK_TESTING_THREADS; ++i) + kvm_vm_free(input[i].vm); } static void test_sev_migrate_parameters(void) @@ -157,12 +165,11 @@ static void test_sev_migrate_parameters(void) sev_vm = sev_vm_create(/* es= */ false); sev_es_vm = sev_vm_create(/* es= */ true); vm_no_vcpu = vm_create(VM_MODE_DEFAULT, 0, O_RDWR); - vm_no_sev = __vm_create(); + vm_no_sev = aux_vm_create(true); sev_es_vm_no_vmsa = vm_create(VM_MODE_DEFAULT, 0, O_RDWR); sev_ioctl(sev_es_vm_no_vmsa->fd, KVM_SEV_ES_INIT, NULL); vm_vcpu_add(sev_es_vm_no_vmsa, 1); - ret = __sev_migrate_from(sev_vm->fd, sev_es_vm->fd); TEST_ASSERT( ret == -1 && errno == EINVAL, @@ -191,13 +198,151 @@ static void test_sev_migrate_parameters(void) TEST_ASSERT(ret == -1 && errno == EINVAL, "Migrations require SEV enabled. ret %d, errno: %d\n", ret, errno); + + kvm_vm_free(sev_vm); + kvm_vm_free(sev_es_vm); + kvm_vm_free(sev_es_vm_no_vmsa); + kvm_vm_free(vm_no_vcpu); + kvm_vm_free(vm_no_sev); +} + +static int __sev_mirror_create(int dst_fd, int src_fd) +{ + struct kvm_enable_cap cap = { + .cap = KVM_CAP_VM_COPY_ENC_CONTEXT_FROM, + .args = { src_fd } + }; + + return ioctl(dst_fd, KVM_ENABLE_CAP, &cap); +} + + +static void sev_mirror_create(int dst_fd, int src_fd) +{ + int ret; + + ret = __sev_mirror_create(dst_fd, src_fd); + TEST_ASSERT(!ret, "Copying context failed, ret: %d, errno: %d\n", ret, errno); +} + +static void test_sev_mirror(bool es) +{ + struct kvm_vm *src_vm, *dst_vm; + struct kvm_sev_launch_start start = { + .policy = es ? SEV_POLICY_ES : 0 + }; + int i; + + src_vm = sev_vm_create(es); + dst_vm = aux_vm_create(false); + + sev_mirror_create(dst_vm->fd, src_vm->fd); + + /* Check that we can complete creation of the mirror VM. */ + for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i) + vm_vcpu_add(dst_vm, i); + sev_ioctl(dst_vm->fd, KVM_SEV_LAUNCH_START, &start); + if (es) + sev_ioctl(dst_vm->fd, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL); + + kvm_vm_free(src_vm); + kvm_vm_free(dst_vm); +} + +static void test_sev_mirror_parameters(void) +{ + struct kvm_vm *sev_vm, *sev_es_vm, *vm_no_vcpu, *vm_with_vcpu; + int ret; + + sev_vm = sev_vm_create(/* es= */ false); + sev_es_vm = sev_vm_create(/* es= */ true); + vm_with_vcpu = aux_vm_create(true); + vm_no_vcpu = aux_vm_create(false); + + ret = __sev_mirror_create(sev_vm->fd, sev_vm->fd); + TEST_ASSERT( + ret == -1 && errno == EINVAL, + "Should not be able copy context to self. ret: %d, errno: %d\n", + ret, errno); + + ret = __sev_mirror_create(sev_vm->fd, sev_es_vm->fd); + TEST_ASSERT( + ret == -1 && errno == EINVAL, + "Should not be able copy context to SEV enabled VM. ret: %d, errno: %d\n", + ret, errno); + + ret = __sev_mirror_create(sev_es_vm->fd, sev_vm->fd); + TEST_ASSERT( + ret == -1 && errno == EINVAL, + "Should not be able copy context to SEV-ES enabled VM. ret: %d, errno: %d\n", + ret, errno); + + ret = __sev_mirror_create(vm_no_vcpu->fd, vm_with_vcpu->fd); + TEST_ASSERT(ret == -1 && errno == EINVAL, + "Copy context requires SEV enabled. ret %d, errno: %d\n", ret, + errno); + + ret = __sev_mirror_create(vm_with_vcpu->fd, sev_vm->fd); + TEST_ASSERT( + ret == -1 && errno == EINVAL, + "SEV copy context requires no vCPUS on the destination. ret: %d, errno: %d\n", + ret, errno); + + kvm_vm_free(sev_vm); + kvm_vm_free(sev_es_vm); + kvm_vm_free(vm_with_vcpu); + kvm_vm_free(vm_no_vcpu); +} + +static void test_sev_move_copy(void) +{ + struct kvm_vm *dst_vm, *sev_vm, *mirror_vm, *dst_mirror_vm; + int ret; + + sev_vm = sev_vm_create(/* es= */ false); + dst_vm = aux_vm_create(true); + mirror_vm = aux_vm_create(false); + dst_mirror_vm = aux_vm_create(false); + + sev_mirror_create(mirror_vm->fd, sev_vm->fd); + ret = __sev_migrate_from(dst_vm->fd, sev_vm->fd); + TEST_ASSERT(ret == -1 && errno == EBUSY, + "Cannot migrate VM that has mirrors. ret %d, errno: %d\n", ret, + errno); + + /* The mirror itself can be migrated. */ + sev_migrate_from(dst_mirror_vm->fd, mirror_vm->fd); + ret = __sev_migrate_from(dst_vm->fd, sev_vm->fd); + TEST_ASSERT(ret == -1 && errno == EBUSY, + "Cannot migrate VM that has mirrors. ret %d, errno: %d\n", ret, + errno); + + /* + * mirror_vm is not a mirror anymore, dst_mirror_vm is. Thus, + * the owner can be copied as soon as dst_mirror_vm is gone. + */ + kvm_vm_free(dst_mirror_vm); + sev_migrate_from(dst_vm->fd, sev_vm->fd); + + kvm_vm_free(mirror_vm); + kvm_vm_free(dst_vm); + kvm_vm_free(sev_vm); } int main(int argc, char *argv[]) { - test_sev_migrate_from(/* es= */ false); - test_sev_migrate_from(/* es= */ true); - test_sev_migrate_locking(); - test_sev_migrate_parameters(); + if (kvm_check_cap(KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM)) { + test_sev_migrate_from(/* es= */ false); + test_sev_migrate_from(/* es= */ true); + test_sev_migrate_locking(); + test_sev_migrate_parameters(); + if (kvm_check_cap(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM)) + test_sev_move_copy(); + } + if (kvm_check_cap(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM)) { + test_sev_mirror(/* es= */ false); + test_sev_mirror(/* es= */ true); + test_sev_mirror_parameters(); + } return 0; } diff --git a/tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c b/tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c index df04f56ce859..30a81038df46 100644 --- a/tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c +++ b/tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c @@ -75,7 +75,7 @@ static void l1_guest_code(struct svm_test_data *svm) vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK; /* No intercepts for real and virtual interrupts */ - vmcb->control.intercept &= ~(1ULL << INTERCEPT_INTR | INTERCEPT_VINTR); + vmcb->control.intercept &= ~(BIT(INTERCEPT_INTR) | BIT(INTERCEPT_VINTR)); /* Make a virtual interrupt VINTR_IRQ_NUMBER pending */ vmcb->control.int_ctl |= V_IRQ_MASK | (0x1 << V_INTR_PRIO_SHIFT); diff --git a/tools/testing/selftests/kvm/x86_64/userspace_io_test.c b/tools/testing/selftests/kvm/x86_64/userspace_io_test.c new file mode 100644 index 000000000000..e4bef2e05686 --- /dev/null +++ b/tools/testing/selftests/kvm/x86_64/userspace_io_test.c @@ -0,0 +1,114 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <fcntl.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/ioctl.h> + +#include "test_util.h" + +#include "kvm_util.h" +#include "processor.h" + +#define VCPU_ID 1 + +static void guest_ins_port80(uint8_t *buffer, unsigned int count) +{ + unsigned long end; + + if (count == 2) + end = (unsigned long)buffer + 1; + else + end = (unsigned long)buffer + 8192; + + asm volatile("cld; rep; insb" : "+D"(buffer), "+c"(count) : "d"(0x80) : "memory"); + GUEST_ASSERT_1(count == 0, count); + GUEST_ASSERT_2((unsigned long)buffer == end, buffer, end); +} + +static void guest_code(void) +{ + uint8_t buffer[8192]; + int i; + + /* + * Special case tests. main() will adjust RCX 2 => 1 and 3 => 8192 to + * test that KVM doesn't explode when userspace modifies the "count" on + * a userspace I/O exit. KVM isn't required to play nice with the I/O + * itself as KVM doesn't support manipulating the count, it just needs + * to not explode or overflow a buffer. + */ + guest_ins_port80(buffer, 2); + guest_ins_port80(buffer, 3); + + /* Verify KVM fills the buffer correctly when not stuffing RCX. */ + memset(buffer, 0, sizeof(buffer)); + guest_ins_port80(buffer, 8192); + for (i = 0; i < 8192; i++) + GUEST_ASSERT_2(buffer[i] == 0xaa, i, buffer[i]); + + GUEST_DONE(); +} + +int main(int argc, char *argv[]) +{ + struct kvm_regs regs; + struct kvm_run *run; + struct kvm_vm *vm; + struct ucall uc; + int rc; + + /* Tell stdout not to buffer its content */ + setbuf(stdout, NULL); + + /* Create VM */ + vm = vm_create_default(VCPU_ID, 0, guest_code); + run = vcpu_state(vm, VCPU_ID); + + memset(®s, 0, sizeof(regs)); + + while (1) { + rc = _vcpu_run(vm, VCPU_ID); + + TEST_ASSERT(rc == 0, "vcpu_run failed: %d\n", rc); + TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, + "Unexpected exit reason: %u (%s),\n", + run->exit_reason, + exit_reason_str(run->exit_reason)); + + if (get_ucall(vm, VCPU_ID, &uc)) + break; + + TEST_ASSERT(run->io.port == 0x80, + "Expected I/O at port 0x80, got port 0x%x\n", run->io.port); + + /* + * Modify the rep string count in RCX: 2 => 1 and 3 => 8192. + * Note, this abuses KVM's batching of rep string I/O to avoid + * getting stuck in an infinite loop. That behavior isn't in + * scope from a testing perspective as it's not ABI in any way, + * i.e. it really is abusing internal KVM knowledge. + */ + vcpu_regs_get(vm, VCPU_ID, ®s); + if (regs.rcx == 2) + regs.rcx = 1; + if (regs.rcx == 3) + regs.rcx = 8192; + memset((void *)run + run->io.data_offset, 0xaa, 4096); + vcpu_regs_set(vm, VCPU_ID, ®s); + } + + switch (uc.cmd) { + case UCALL_DONE: + break; + case UCALL_ABORT: + TEST_FAIL("%s at %s:%ld : argN+1 = 0x%lx, argN+2 = 0x%lx", + (const char *)uc.args[0], __FILE__, uc.args[1], + uc.args[2], uc.args[3]); + default: + TEST_FAIL("Unknown ucall %lu", uc.cmd); + } + + kvm_vm_free(vm); + return 0; +} diff --git a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c index eda0d2a51224..a0699f00b3d6 100644 --- a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c +++ b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c @@ -24,8 +24,12 @@ #define PVTIME_ADDR (SHINFO_REGION_GPA + PAGE_SIZE) #define RUNSTATE_ADDR (SHINFO_REGION_GPA + PAGE_SIZE + 0x20) +#define VCPU_INFO_ADDR (SHINFO_REGION_GPA + 0x40) #define RUNSTATE_VADDR (SHINFO_REGION_GVA + PAGE_SIZE + 0x20) +#define VCPU_INFO_VADDR (SHINFO_REGION_GVA + 0x40) + +#define EVTCHN_VECTOR 0x10 static struct kvm_vm *vm; @@ -56,15 +60,44 @@ struct vcpu_runstate_info { uint64_t time[4]; }; +struct arch_vcpu_info { + unsigned long cr2; + unsigned long pad; /* sizeof(vcpu_info_t) == 64 */ +}; + +struct vcpu_info { + uint8_t evtchn_upcall_pending; + uint8_t evtchn_upcall_mask; + unsigned long evtchn_pending_sel; + struct arch_vcpu_info arch; + struct pvclock_vcpu_time_info time; +}; /* 64 bytes (x86) */ + #define RUNSTATE_running 0 #define RUNSTATE_runnable 1 #define RUNSTATE_blocked 2 #define RUNSTATE_offline 3 +static void evtchn_handler(struct ex_regs *regs) +{ + struct vcpu_info *vi = (void *)VCPU_INFO_VADDR; + vi->evtchn_upcall_pending = 0; + + GUEST_SYNC(0x20); +} + static void guest_code(void) { struct vcpu_runstate_info *rs = (void *)RUNSTATE_VADDR; + __asm__ __volatile__( + "sti\n" + "nop\n" + ); + + /* Trigger an interrupt injection */ + GUEST_SYNC(0); + /* Test having the host set runstates manually */ GUEST_SYNC(RUNSTATE_runnable); GUEST_ASSERT(rs->time[RUNSTATE_runnable] != 0); @@ -153,7 +186,7 @@ int main(int argc, char *argv[]) struct kvm_xen_vcpu_attr vi = { .type = KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO, - .u.gpa = SHINFO_REGION_GPA + 0x40, + .u.gpa = VCPU_INFO_ADDR, }; vcpu_ioctl(vm, VCPU_ID, KVM_XEN_VCPU_SET_ATTR, &vi); @@ -163,6 +196,16 @@ int main(int argc, char *argv[]) }; vcpu_ioctl(vm, VCPU_ID, KVM_XEN_VCPU_SET_ATTR, &pvclock); + struct kvm_xen_hvm_attr vec = { + .type = KVM_XEN_ATTR_TYPE_UPCALL_VECTOR, + .u.vector = EVTCHN_VECTOR, + }; + vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &vec); + + vm_init_descriptor_tables(vm); + vcpu_init_descriptor_tables(vm, VCPU_ID); + vm_install_exception_handler(vm, EVTCHN_VECTOR, evtchn_handler); + if (do_runstate_tests) { struct kvm_xen_vcpu_attr st = { .type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR, @@ -171,9 +214,14 @@ int main(int argc, char *argv[]) vcpu_ioctl(vm, VCPU_ID, KVM_XEN_VCPU_SET_ATTR, &st); } + struct vcpu_info *vinfo = addr_gpa2hva(vm, VCPU_INFO_VADDR); + vinfo->evtchn_upcall_pending = 0; + struct vcpu_runstate_info *rs = addr_gpa2hva(vm, RUNSTATE_ADDR); rs->state = 0x5a; + bool evtchn_irq_expected = false; + for (;;) { volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID); struct ucall uc; @@ -193,16 +241,21 @@ int main(int argc, char *argv[]) struct kvm_xen_vcpu_attr rst; long rundelay; - /* If no runstate support, bail out early */ - if (!do_runstate_tests) - goto done; - - TEST_ASSERT(rs->state_entry_time == rs->time[0] + - rs->time[1] + rs->time[2] + rs->time[3], - "runstate times don't add up"); + if (do_runstate_tests) + TEST_ASSERT(rs->state_entry_time == rs->time[0] + + rs->time[1] + rs->time[2] + rs->time[3], + "runstate times don't add up"); switch (uc.args[1]) { - case RUNSTATE_running...RUNSTATE_offline: + case 0: + evtchn_irq_expected = true; + vinfo->evtchn_upcall_pending = 1; + break; + + case RUNSTATE_runnable...RUNSTATE_offline: + TEST_ASSERT(!evtchn_irq_expected, "Event channel IRQ not seen"); + if (!do_runstate_tests) + goto done; rst.type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT; rst.u.runstate.state = uc.args[1]; vcpu_ioctl(vm, VCPU_ID, KVM_XEN_VCPU_SET_ATTR, &rst); @@ -236,6 +289,10 @@ int main(int argc, char *argv[]) sched_yield(); } while (get_run_delay() < rundelay); break; + case 0x20: + TEST_ASSERT(evtchn_irq_expected, "Unexpected event channel IRQ"); + evtchn_irq_expected = false; + break; } break; } diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile index 7615f29831eb..9897fa9ab953 100644 --- a/tools/testing/selftests/net/Makefile +++ b/tools/testing/selftests/net/Makefile @@ -34,6 +34,7 @@ TEST_PROGS += srv6_end_dt46_l3vpn_test.sh TEST_PROGS += srv6_end_dt4_l3vpn_test.sh TEST_PROGS += srv6_end_dt6_l3vpn_test.sh TEST_PROGS += vrf_strict_mode_test.sh +TEST_PROGS += arp_ndisc_evict_nocarrier.sh TEST_PROGS_EXTENDED := in_netns.sh setup_loopback.sh setup_veth.sh TEST_PROGS_EXTENDED += toeplitz_client.sh toeplitz.sh TEST_GEN_FILES = socket nettest diff --git a/tools/testing/selftests/net/fcnal-test.sh b/tools/testing/selftests/net/fcnal-test.sh index 3313566ce906..a1da013d847b 100755 --- a/tools/testing/selftests/net/fcnal-test.sh +++ b/tools/testing/selftests/net/fcnal-test.sh @@ -4002,8 +4002,8 @@ EOF ################################################################################ # main -TESTS_IPV4="ipv4_ping ipv4_tcp ipv4_udp ipv4_addr_bind ipv4_runtime ipv4_netfilter" -TESTS_IPV6="ipv6_ping ipv6_tcp ipv6_udp ipv6_addr_bind ipv6_runtime ipv6_netfilter" +TESTS_IPV4="ipv4_ping ipv4_tcp ipv4_udp ipv4_bind ipv4_runtime ipv4_netfilter" +TESTS_IPV6="ipv6_ping ipv6_tcp ipv6_udp ipv6_bind ipv6_runtime ipv6_netfilter" TESTS_OTHER="use_cases" PAUSE_ON_FAIL=no @@ -4077,3 +4077,11 @@ cleanup 2>/dev/null printf "\nTests passed: %3d\n" ${nsuccess} printf "Tests failed: %3d\n" ${nfail} + +if [ $nfail -ne 0 ]; then + exit 1 # KSFT_FAIL +elif [ $nsuccess -eq 0 ]; then + exit $ksft_skip +fi + +exit 0 # KSFT_PASS diff --git a/tools/testing/selftests/net/fib_nexthops.sh b/tools/testing/selftests/net/fib_nexthops.sh index b5a69ad191b0..d444ee6aa3cb 100755 --- a/tools/testing/selftests/net/fib_nexthops.sh +++ b/tools/testing/selftests/net/fib_nexthops.sh @@ -629,6 +629,66 @@ ipv6_fcnal() log_test $? 0 "Nexthops removed on admin down" } +ipv6_grp_refs() +{ + if [ ! -x "$(command -v mausezahn)" ]; then + echo "SKIP: Could not run test; need mausezahn tool" + return + fi + + run_cmd "$IP link set dev veth1 up" + run_cmd "$IP link add veth1.10 link veth1 up type vlan id 10" + run_cmd "$IP link add veth1.20 link veth1 up type vlan id 20" + run_cmd "$IP -6 addr add 2001:db8:91::1/64 dev veth1.10" + run_cmd "$IP -6 addr add 2001:db8:92::1/64 dev veth1.20" + run_cmd "$IP -6 neigh add 2001:db8:91::2 lladdr 00:11:22:33:44:55 dev veth1.10" + run_cmd "$IP -6 neigh add 2001:db8:92::2 lladdr 00:11:22:33:44:55 dev veth1.20" + run_cmd "$IP nexthop add id 100 via 2001:db8:91::2 dev veth1.10" + run_cmd "$IP nexthop add id 101 via 2001:db8:92::2 dev veth1.20" + run_cmd "$IP nexthop add id 102 group 100" + run_cmd "$IP route add 2001:db8:101::1/128 nhid 102" + + # create per-cpu dsts through nh 100 + run_cmd "ip netns exec me mausezahn -6 veth1.10 -B 2001:db8:101::1 -A 2001:db8:91::1 -c 5 -t tcp "dp=1-1023, flags=syn" >/dev/null 2>&1" + + # remove nh 100 from the group to delete the route potentially leaving + # a stale per-cpu dst which holds a reference to the nexthop's net + # device and to the IPv6 route + run_cmd "$IP nexthop replace id 102 group 101" + run_cmd "$IP route del 2001:db8:101::1/128" + + # add both nexthops to the group so a reference is taken on them + run_cmd "$IP nexthop replace id 102 group 100/101" + + # if the bug described in commit "net: nexthop: release IPv6 per-cpu + # dsts when replacing a nexthop group" exists at this point we have + # an unlinked IPv6 route (but not freed due to stale dst) with a + # reference over the group so we delete the group which will again + # only unlink it due to the route reference + run_cmd "$IP nexthop del id 102" + + # delete the nexthop with stale dst, since we have an unlinked + # group with a ref to it and an unlinked IPv6 route with ref to the + # group, the nh will only be unlinked and not freed so the stale dst + # remains forever and we get a net device refcount imbalance + run_cmd "$IP nexthop del id 100" + + # if a reference was lost this command will hang because the net device + # cannot be removed + timeout -s KILL 5 ip netns exec me ip link del veth1.10 >/dev/null 2>&1 + + # we can't cleanup if the command is hung trying to delete the netdev + if [ $? -eq 137 ]; then + return 1 + fi + + # cleanup + run_cmd "$IP link del veth1.20" + run_cmd "$IP nexthop flush" + + return 0 +} + ipv6_grp_fcnal() { local rc @@ -734,6 +794,9 @@ ipv6_grp_fcnal() run_cmd "$IP nexthop add id 108 group 31/24" log_test $? 2 "Nexthop group can not have a blackhole and another nexthop" + + ipv6_grp_refs + log_test $? 0 "Nexthop group replace refcounts" } ipv6_res_grp_fcnal() diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh index 5abe92d55b69..996af1ae3d3d 100755 --- a/tools/testing/selftests/net/fib_tests.sh +++ b/tools/testing/selftests/net/fib_tests.sh @@ -444,24 +444,63 @@ fib_rp_filter_test() setup set -e + ip netns add ns2 + ip netns set ns2 auto + + ip -netns ns2 link set dev lo up + + $IP link add name veth1 type veth peer name veth2 + $IP link set dev veth2 netns ns2 + $IP address add 192.0.2.1/24 dev veth1 + ip -netns ns2 address add 192.0.2.1/24 dev veth2 + $IP link set dev veth1 up + ip -netns ns2 link set dev veth2 up + $IP link set dev lo address 52:54:00:6a:c7:5e - $IP link set dummy0 address 52:54:00:6a:c7:5e - $IP link add dummy1 type dummy - $IP link set dummy1 address 52:54:00:6a:c7:5e - $IP link set dev dummy1 up + $IP link set dev veth1 address 52:54:00:6a:c7:5e + ip -netns ns2 link set dev lo address 52:54:00:6a:c7:5e + ip -netns ns2 link set dev veth2 address 52:54:00:6a:c7:5e + + # 1. (ns2) redirect lo's egress to veth2's egress + ip netns exec ns2 tc qdisc add dev lo parent root handle 1: fq_codel + ip netns exec ns2 tc filter add dev lo parent 1: protocol arp basic \ + action mirred egress redirect dev veth2 + ip netns exec ns2 tc filter add dev lo parent 1: protocol ip basic \ + action mirred egress redirect dev veth2 + + # 2. (ns1) redirect veth1's ingress to lo's ingress + $NS_EXEC tc qdisc add dev veth1 ingress + $NS_EXEC tc filter add dev veth1 ingress protocol arp basic \ + action mirred ingress redirect dev lo + $NS_EXEC tc filter add dev veth1 ingress protocol ip basic \ + action mirred ingress redirect dev lo + + # 3. (ns1) redirect lo's egress to veth1's egress + $NS_EXEC tc qdisc add dev lo parent root handle 1: fq_codel + $NS_EXEC tc filter add dev lo parent 1: protocol arp basic \ + action mirred egress redirect dev veth1 + $NS_EXEC tc filter add dev lo parent 1: protocol ip basic \ + action mirred egress redirect dev veth1 + + # 4. (ns2) redirect veth2's ingress to lo's ingress + ip netns exec ns2 tc qdisc add dev veth2 ingress + ip netns exec ns2 tc filter add dev veth2 ingress protocol arp basic \ + action mirred ingress redirect dev lo + ip netns exec ns2 tc filter add dev veth2 ingress protocol ip basic \ + action mirred ingress redirect dev lo + $NS_EXEC sysctl -qw net.ipv4.conf.all.rp_filter=1 $NS_EXEC sysctl -qw net.ipv4.conf.all.accept_local=1 $NS_EXEC sysctl -qw net.ipv4.conf.all.route_localnet=1 - - $NS_EXEC tc qd add dev dummy1 parent root handle 1: fq_codel - $NS_EXEC tc filter add dev dummy1 parent 1: protocol arp basic action mirred egress redirect dev lo - $NS_EXEC tc filter add dev dummy1 parent 1: protocol ip basic action mirred egress redirect dev lo + ip netns exec ns2 sysctl -qw net.ipv4.conf.all.rp_filter=1 + ip netns exec ns2 sysctl -qw net.ipv4.conf.all.accept_local=1 + ip netns exec ns2 sysctl -qw net.ipv4.conf.all.route_localnet=1 set +e - run_cmd "ip netns exec ns1 ping -I dummy1 -w1 -c1 198.51.100.1" + run_cmd "ip netns exec ns2 ping -w1 -c1 192.0.2.1" log_test $? 0 "rp_filter passes local packets" - run_cmd "ip netns exec ns1 ping -I dummy1 -w1 -c1 127.0.0.1" + run_cmd "ip netns exec ns2 ping -w1 -c1 127.0.0.1" log_test $? 0 "rp_filter passes loopback packets" cleanup diff --git a/tools/testing/selftests/net/forwarding/config b/tools/testing/selftests/net/forwarding/config index a4bd1b087303..697994a9278b 100644 --- a/tools/testing/selftests/net/forwarding/config +++ b/tools/testing/selftests/net/forwarding/config @@ -6,6 +6,7 @@ CONFIG_IPV6_MULTIPLE_TABLES=y CONFIG_NET_VRF=m CONFIG_BPF_SYSCALL=y CONFIG_CGROUP_BPF=y +CONFIG_NET_ACT_CT=m CONFIG_NET_ACT_MIRRED=m CONFIG_NET_ACT_MPLS=m CONFIG_NET_ACT_VLAN=m diff --git a/tools/testing/selftests/net/forwarding/tc_actions.sh b/tools/testing/selftests/net/forwarding/tc_actions.sh index d9eca227136b..de19eb6c38f0 100755 --- a/tools/testing/selftests/net/forwarding/tc_actions.sh +++ b/tools/testing/selftests/net/forwarding/tc_actions.sh @@ -3,7 +3,7 @@ ALL_TESTS="gact_drop_and_ok_test mirred_egress_redirect_test \ mirred_egress_mirror_test matchall_mirred_egress_mirror_test \ - gact_trap_test" + gact_trap_test mirred_egress_to_ingress_test" NUM_NETIFS=4 source tc_common.sh source lib.sh @@ -13,10 +13,12 @@ tcflags="skip_hw" h1_create() { simple_if_init $h1 192.0.2.1/24 + tc qdisc add dev $h1 clsact } h1_destroy() { + tc qdisc del dev $h1 clsact simple_if_fini $h1 192.0.2.1/24 } @@ -153,6 +155,49 @@ gact_trap_test() log_test "trap ($tcflags)" } +mirred_egress_to_ingress_test() +{ + RET=0 + + tc filter add dev $h1 protocol ip pref 100 handle 100 egress flower \ + ip_proto icmp src_ip 192.0.2.1 dst_ip 192.0.2.2 type 8 action \ + ct commit nat src addr 192.0.2.2 pipe \ + ct clear pipe \ + ct commit nat dst addr 192.0.2.1 pipe \ + mirred ingress redirect dev $h1 + + tc filter add dev $swp1 protocol ip pref 11 handle 111 ingress flower \ + ip_proto icmp src_ip 192.0.2.1 dst_ip 192.0.2.2 type 8 action drop + tc filter add dev $swp1 protocol ip pref 12 handle 112 ingress flower \ + ip_proto icmp src_ip 192.0.2.1 dst_ip 192.0.2.2 type 0 action pass + + $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \ + -t icmp "ping,id=42,seq=10" -q + + tc_check_packets "dev $h1 egress" 100 1 + check_err $? "didn't mirror first packet" + + tc_check_packets "dev $swp1 ingress" 111 1 + check_fail $? "didn't redirect first packet" + tc_check_packets "dev $swp1 ingress" 112 1 + check_err $? "didn't receive reply to first packet" + + ping 192.0.2.2 -I$h1 -c1 -w1 -q 1>/dev/null 2>&1 + + tc_check_packets "dev $h1 egress" 100 2 + check_err $? "didn't mirror second packet" + tc_check_packets "dev $swp1 ingress" 111 1 + check_fail $? "didn't redirect second packet" + tc_check_packets "dev $swp1 ingress" 112 2 + check_err $? "didn't receive reply to second packet" + + tc filter del dev $h1 egress protocol ip pref 100 handle 100 flower + tc filter del dev $swp1 ingress protocol ip pref 11 handle 111 flower + tc filter del dev $swp1 ingress protocol ip pref 12 handle 112 flower + + log_test "mirred_egress_to_ingress ($tcflags)" +} + setup_prepare() { h1=${NETIFS[p1]} diff --git a/tools/testing/selftests/net/gre_gso.sh b/tools/testing/selftests/net/gre_gso.sh index fdeb44d621eb..3224651db97b 100755 --- a/tools/testing/selftests/net/gre_gso.sh +++ b/tools/testing/selftests/net/gre_gso.sh @@ -118,16 +118,18 @@ gre_gst_test_checks() local addr=$2 local proto=$3 - $NS_EXEC nc $proto -kl $port >/dev/null & + [ "$proto" == 6 ] && addr="[$addr]" + + $NS_EXEC socat - tcp${proto}-listen:$port,reuseaddr,fork >/dev/null & PID=$! while ! $NS_EXEC ss -ltn | grep -q $port; do ((i++)); sleep 0.01; done - cat $TMPFILE | timeout 1 nc $proto -N $addr $port + cat $TMPFILE | timeout 1 socat -u STDIN TCP:$addr:$port log_test $? 0 "$name - copy file w/ TSO" ethtool -K veth0 tso off - cat $TMPFILE | timeout 1 nc $proto -N $addr $port + cat $TMPFILE | timeout 1 socat -u STDIN TCP:$addr:$port log_test $? 0 "$name - copy file w/ GSO" ethtool -K veth0 tso on @@ -155,8 +157,8 @@ gre6_gso_test() sleep 2 - gre_gst_test_checks GREv6/v4 172.16.2.2 - gre_gst_test_checks GREv6/v6 2001:db8:1::2 -6 + gre_gst_test_checks GREv6/v4 172.16.2.2 4 + gre_gst_test_checks GREv6/v6 2001:db8:1::2 6 cleanup } @@ -212,8 +214,8 @@ if [ ! -x "$(command -v ip)" ]; then exit $ksft_skip fi -if [ ! -x "$(command -v nc)" ]; then - echo "SKIP: Could not run test without nc tool" +if [ ! -x "$(command -v socat)" ]; then + echo "SKIP: Could not run test without socat tool" exit $ksft_skip fi diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c index e61fc4c32ba2..6e468e0f42f7 100644 --- a/tools/testing/selftests/net/tls.c +++ b/tools/testing/selftests/net/tls.c @@ -31,6 +31,8 @@ struct tls_crypto_info_keys { struct tls12_crypto_info_chacha20_poly1305 chacha20; struct tls12_crypto_info_sm4_gcm sm4gcm; struct tls12_crypto_info_sm4_ccm sm4ccm; + struct tls12_crypto_info_aes_ccm_128 aesccm128; + struct tls12_crypto_info_aes_gcm_256 aesgcm256; }; size_t len; }; @@ -61,6 +63,16 @@ static void tls_crypto_info_init(uint16_t tls_version, uint16_t cipher_type, tls12->sm4ccm.info.version = tls_version; tls12->sm4ccm.info.cipher_type = cipher_type; break; + case TLS_CIPHER_AES_CCM_128: + tls12->len = sizeof(struct tls12_crypto_info_aes_ccm_128); + tls12->aesccm128.info.version = tls_version; + tls12->aesccm128.info.cipher_type = cipher_type; + break; + case TLS_CIPHER_AES_GCM_256: + tls12->len = sizeof(struct tls12_crypto_info_aes_gcm_256); + tls12->aesgcm256.info.version = tls_version; + tls12->aesgcm256.info.cipher_type = cipher_type; + break; default: break; } @@ -78,26 +90,21 @@ static void memrnd(void *s, size_t n) *byte++ = rand(); } -FIXTURE(tls_basic) -{ - int fd, cfd; - bool notls; -}; - -FIXTURE_SETUP(tls_basic) +static void ulp_sock_pair(struct __test_metadata *_metadata, + int *fd, int *cfd, bool *notls) { struct sockaddr_in addr; socklen_t len; int sfd, ret; - self->notls = false; + *notls = false; len = sizeof(addr); addr.sin_family = AF_INET; addr.sin_addr.s_addr = htonl(INADDR_ANY); addr.sin_port = 0; - self->fd = socket(AF_INET, SOCK_STREAM, 0); + *fd = socket(AF_INET, SOCK_STREAM, 0); sfd = socket(AF_INET, SOCK_STREAM, 0); ret = bind(sfd, &addr, sizeof(addr)); @@ -108,26 +115,96 @@ FIXTURE_SETUP(tls_basic) ret = getsockname(sfd, &addr, &len); ASSERT_EQ(ret, 0); - ret = connect(self->fd, &addr, sizeof(addr)); + ret = connect(*fd, &addr, sizeof(addr)); ASSERT_EQ(ret, 0); - self->cfd = accept(sfd, &addr, &len); - ASSERT_GE(self->cfd, 0); + *cfd = accept(sfd, &addr, &len); + ASSERT_GE(*cfd, 0); close(sfd); - ret = setsockopt(self->fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls")); + ret = setsockopt(*fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls")); if (ret != 0) { ASSERT_EQ(errno, ENOENT); - self->notls = true; + *notls = true; printf("Failure setting TCP_ULP, testing without tls\n"); return; } - ret = setsockopt(self->cfd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls")); + ret = setsockopt(*cfd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls")); ASSERT_EQ(ret, 0); } +/* Produce a basic cmsg */ +static int tls_send_cmsg(int fd, unsigned char record_type, + void *data, size_t len, int flags) +{ + char cbuf[CMSG_SPACE(sizeof(char))]; + int cmsg_len = sizeof(char); + struct cmsghdr *cmsg; + struct msghdr msg; + struct iovec vec; + + vec.iov_base = data; + vec.iov_len = len; + memset(&msg, 0, sizeof(struct msghdr)); + msg.msg_iov = &vec; + msg.msg_iovlen = 1; + msg.msg_control = cbuf; + msg.msg_controllen = sizeof(cbuf); + cmsg = CMSG_FIRSTHDR(&msg); + cmsg->cmsg_level = SOL_TLS; + /* test sending non-record types. */ + cmsg->cmsg_type = TLS_SET_RECORD_TYPE; + cmsg->cmsg_len = CMSG_LEN(cmsg_len); + *CMSG_DATA(cmsg) = record_type; + msg.msg_controllen = cmsg->cmsg_len; + + return sendmsg(fd, &msg, flags); +} + +static int tls_recv_cmsg(struct __test_metadata *_metadata, + int fd, unsigned char record_type, + void *data, size_t len, int flags) +{ + char cbuf[CMSG_SPACE(sizeof(char))]; + struct cmsghdr *cmsg; + unsigned char ctype; + struct msghdr msg; + struct iovec vec; + int n; + + vec.iov_base = data; + vec.iov_len = len; + memset(&msg, 0, sizeof(struct msghdr)); + msg.msg_iov = &vec; + msg.msg_iovlen = 1; + msg.msg_control = cbuf; + msg.msg_controllen = sizeof(cbuf); + + n = recvmsg(fd, &msg, flags); + + cmsg = CMSG_FIRSTHDR(&msg); + EXPECT_NE(cmsg, NULL); + EXPECT_EQ(cmsg->cmsg_level, SOL_TLS); + EXPECT_EQ(cmsg->cmsg_type, TLS_GET_RECORD_TYPE); + ctype = *((unsigned char *)CMSG_DATA(cmsg)); + EXPECT_EQ(ctype, record_type); + + return n; +} + +FIXTURE(tls_basic) +{ + int fd, cfd; + bool notls; +}; + +FIXTURE_SETUP(tls_basic) +{ + ulp_sock_pair(_metadata, &self->fd, &self->cfd, &self->notls); +} + FIXTURE_TEARDOWN(tls_basic) { close(self->fd); @@ -196,63 +273,48 @@ FIXTURE_VARIANT_ADD(tls, 13_sm4_ccm) .cipher_type = TLS_CIPHER_SM4_CCM, }; +FIXTURE_VARIANT_ADD(tls, 12_aes_ccm) +{ + .tls_version = TLS_1_2_VERSION, + .cipher_type = TLS_CIPHER_AES_CCM_128, +}; + +FIXTURE_VARIANT_ADD(tls, 13_aes_ccm) +{ + .tls_version = TLS_1_3_VERSION, + .cipher_type = TLS_CIPHER_AES_CCM_128, +}; + +FIXTURE_VARIANT_ADD(tls, 12_aes_gcm_256) +{ + .tls_version = TLS_1_2_VERSION, + .cipher_type = TLS_CIPHER_AES_GCM_256, +}; + +FIXTURE_VARIANT_ADD(tls, 13_aes_gcm_256) +{ + .tls_version = TLS_1_3_VERSION, + .cipher_type = TLS_CIPHER_AES_GCM_256, +}; + FIXTURE_SETUP(tls) { struct tls_crypto_info_keys tls12; - struct sockaddr_in addr; - socklen_t len; - int sfd, ret; - - self->notls = false; - len = sizeof(addr); + int ret; tls_crypto_info_init(variant->tls_version, variant->cipher_type, &tls12); - addr.sin_family = AF_INET; - addr.sin_addr.s_addr = htonl(INADDR_ANY); - addr.sin_port = 0; - - self->fd = socket(AF_INET, SOCK_STREAM, 0); - sfd = socket(AF_INET, SOCK_STREAM, 0); + ulp_sock_pair(_metadata, &self->fd, &self->cfd, &self->notls); - ret = bind(sfd, &addr, sizeof(addr)); - ASSERT_EQ(ret, 0); - ret = listen(sfd, 10); - ASSERT_EQ(ret, 0); + if (self->notls) + return; - ret = getsockname(sfd, &addr, &len); + ret = setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12, tls12.len); ASSERT_EQ(ret, 0); - ret = connect(self->fd, &addr, sizeof(addr)); + ret = setsockopt(self->cfd, SOL_TLS, TLS_RX, &tls12, tls12.len); ASSERT_EQ(ret, 0); - - ret = setsockopt(self->fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls")); - if (ret != 0) { - self->notls = true; - printf("Failure setting TCP_ULP, testing without tls\n"); - } - - if (!self->notls) { - ret = setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12, - tls12.len); - ASSERT_EQ(ret, 0); - } - - self->cfd = accept(sfd, &addr, &len); - ASSERT_GE(self->cfd, 0); - - if (!self->notls) { - ret = setsockopt(self->cfd, IPPROTO_TCP, TCP_ULP, "tls", - sizeof("tls")); - ASSERT_EQ(ret, 0); - - ret = setsockopt(self->cfd, SOL_TLS, TLS_RX, &tls12, - tls12.len); - ASSERT_EQ(ret, 0); - } - - close(sfd); } FIXTURE_TEARDOWN(tls) @@ -613,6 +675,95 @@ TEST_F(tls, splice_to_pipe) EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0); } +TEST_F(tls, splice_cmsg_to_pipe) +{ + char *test_str = "test_read"; + char record_type = 100; + int send_len = 10; + char buf[10]; + int p[2]; + + ASSERT_GE(pipe(p), 0); + EXPECT_EQ(tls_send_cmsg(self->fd, 100, test_str, send_len, 0), 10); + EXPECT_EQ(splice(self->cfd, NULL, p[1], NULL, send_len, 0), -1); + EXPECT_EQ(errno, EINVAL); + EXPECT_EQ(recv(self->cfd, buf, send_len, 0), -1); + EXPECT_EQ(errno, EIO); + EXPECT_EQ(tls_recv_cmsg(_metadata, self->cfd, record_type, + buf, sizeof(buf), MSG_WAITALL), + send_len); + EXPECT_EQ(memcmp(test_str, buf, send_len), 0); +} + +TEST_F(tls, splice_dec_cmsg_to_pipe) +{ + char *test_str = "test_read"; + char record_type = 100; + int send_len = 10; + char buf[10]; + int p[2]; + + ASSERT_GE(pipe(p), 0); + EXPECT_EQ(tls_send_cmsg(self->fd, 100, test_str, send_len, 0), 10); + EXPECT_EQ(recv(self->cfd, buf, send_len, 0), -1); + EXPECT_EQ(errno, EIO); + EXPECT_EQ(splice(self->cfd, NULL, p[1], NULL, send_len, 0), -1); + EXPECT_EQ(errno, EINVAL); + EXPECT_EQ(tls_recv_cmsg(_metadata, self->cfd, record_type, + buf, sizeof(buf), MSG_WAITALL), + send_len); + EXPECT_EQ(memcmp(test_str, buf, send_len), 0); +} + +TEST_F(tls, recv_and_splice) +{ + int send_len = TLS_PAYLOAD_MAX_LEN; + char mem_send[TLS_PAYLOAD_MAX_LEN]; + char mem_recv[TLS_PAYLOAD_MAX_LEN]; + int half = send_len / 2; + int p[2]; + + ASSERT_GE(pipe(p), 0); + EXPECT_EQ(send(self->fd, mem_send, send_len, 0), send_len); + /* Recv hald of the record, splice the other half */ + EXPECT_EQ(recv(self->cfd, mem_recv, half, MSG_WAITALL), half); + EXPECT_EQ(splice(self->cfd, NULL, p[1], NULL, half, SPLICE_F_NONBLOCK), + half); + EXPECT_EQ(read(p[0], &mem_recv[half], half), half); + EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0); +} + +TEST_F(tls, peek_and_splice) +{ + int send_len = TLS_PAYLOAD_MAX_LEN; + char mem_send[TLS_PAYLOAD_MAX_LEN]; + char mem_recv[TLS_PAYLOAD_MAX_LEN]; + int chunk = TLS_PAYLOAD_MAX_LEN / 4; + int n, i, p[2]; + + memrnd(mem_send, sizeof(mem_send)); + + ASSERT_GE(pipe(p), 0); + for (i = 0; i < 4; i++) + EXPECT_EQ(send(self->fd, &mem_send[chunk * i], chunk, 0), + chunk); + + EXPECT_EQ(recv(self->cfd, mem_recv, chunk * 5 / 2, + MSG_WAITALL | MSG_PEEK), + chunk * 5 / 2); + EXPECT_EQ(memcmp(mem_send, mem_recv, chunk * 5 / 2), 0); + + n = 0; + while (n < send_len) { + i = splice(self->cfd, NULL, p[1], NULL, send_len - n, 0); + EXPECT_GT(i, 0); + n += i; + } + EXPECT_EQ(n, send_len); + EXPECT_EQ(read(p[0], mem_recv, send_len), send_len); + EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0); +} + TEST_F(tls, recvmsg_single) { char const *test_str = "test_recvmsg_single"; @@ -1193,60 +1344,30 @@ TEST_F(tls, mutliproc_sendpage_writers) TEST_F(tls, control_msg) { - if (self->notls) - return; - - char cbuf[CMSG_SPACE(sizeof(char))]; - char const *test_str = "test_read"; - int cmsg_len = sizeof(char); + char *test_str = "test_read"; char record_type = 100; - struct cmsghdr *cmsg; - struct msghdr msg; int send_len = 10; - struct iovec vec; char buf[10]; - vec.iov_base = (char *)test_str; - vec.iov_len = 10; - memset(&msg, 0, sizeof(struct msghdr)); - msg.msg_iov = &vec; - msg.msg_iovlen = 1; - msg.msg_control = cbuf; - msg.msg_controllen = sizeof(cbuf); - cmsg = CMSG_FIRSTHDR(&msg); - cmsg->cmsg_level = SOL_TLS; - /* test sending non-record types. */ - cmsg->cmsg_type = TLS_SET_RECORD_TYPE; - cmsg->cmsg_len = CMSG_LEN(cmsg_len); - *CMSG_DATA(cmsg) = record_type; - msg.msg_controllen = cmsg->cmsg_len; + if (self->notls) + SKIP(return, "no TLS support"); - EXPECT_EQ(sendmsg(self->fd, &msg, 0), send_len); + EXPECT_EQ(tls_send_cmsg(self->fd, record_type, test_str, send_len, 0), + send_len); /* Should fail because we didn't provide a control message */ EXPECT_EQ(recv(self->cfd, buf, send_len, 0), -1); - vec.iov_base = buf; - EXPECT_EQ(recvmsg(self->cfd, &msg, MSG_WAITALL | MSG_PEEK), send_len); - - cmsg = CMSG_FIRSTHDR(&msg); - EXPECT_NE(cmsg, NULL); - EXPECT_EQ(cmsg->cmsg_level, SOL_TLS); - EXPECT_EQ(cmsg->cmsg_type, TLS_GET_RECORD_TYPE); - record_type = *((unsigned char *)CMSG_DATA(cmsg)); - EXPECT_EQ(record_type, 100); + EXPECT_EQ(tls_recv_cmsg(_metadata, self->cfd, record_type, + buf, sizeof(buf), MSG_WAITALL | MSG_PEEK), + send_len); EXPECT_EQ(memcmp(buf, test_str, send_len), 0); /* Recv the message again without MSG_PEEK */ - record_type = 0; memset(buf, 0, sizeof(buf)); - EXPECT_EQ(recvmsg(self->cfd, &msg, MSG_WAITALL), send_len); - cmsg = CMSG_FIRSTHDR(&msg); - EXPECT_NE(cmsg, NULL); - EXPECT_EQ(cmsg->cmsg_level, SOL_TLS); - EXPECT_EQ(cmsg->cmsg_type, TLS_GET_RECORD_TYPE); - record_type = *((unsigned char *)CMSG_DATA(cmsg)); - EXPECT_EQ(record_type, 100); + EXPECT_EQ(tls_recv_cmsg(_metadata, self->cfd, record_type, + buf, sizeof(buf), MSG_WAITALL), + send_len); EXPECT_EQ(memcmp(buf, test_str, send_len), 0); } @@ -1301,6 +1422,160 @@ TEST_F(tls, shutdown_reuse) EXPECT_EQ(errno, EISCONN); } +FIXTURE(tls_err) +{ + int fd, cfd; + int fd2, cfd2; + bool notls; +}; + +FIXTURE_VARIANT(tls_err) +{ + uint16_t tls_version; +}; + +FIXTURE_VARIANT_ADD(tls_err, 12_aes_gcm) +{ + .tls_version = TLS_1_2_VERSION, +}; + +FIXTURE_VARIANT_ADD(tls_err, 13_aes_gcm) +{ + .tls_version = TLS_1_3_VERSION, +}; + +FIXTURE_SETUP(tls_err) +{ + struct tls_crypto_info_keys tls12; + int ret; + + tls_crypto_info_init(variant->tls_version, TLS_CIPHER_AES_GCM_128, + &tls12); + + ulp_sock_pair(_metadata, &self->fd, &self->cfd, &self->notls); + ulp_sock_pair(_metadata, &self->fd2, &self->cfd2, &self->notls); + if (self->notls) + return; + + ret = setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12, tls12.len); + ASSERT_EQ(ret, 0); + + ret = setsockopt(self->cfd2, SOL_TLS, TLS_RX, &tls12, tls12.len); + ASSERT_EQ(ret, 0); +} + +FIXTURE_TEARDOWN(tls_err) +{ + close(self->fd); + close(self->cfd); + close(self->fd2); + close(self->cfd2); +} + +TEST_F(tls_err, bad_rec) +{ + char buf[64]; + + if (self->notls) + SKIP(return, "no TLS support"); + + memset(buf, 0x55, sizeof(buf)); + EXPECT_EQ(send(self->fd2, buf, sizeof(buf), 0), sizeof(buf)); + EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1); + EXPECT_EQ(errno, EMSGSIZE); + EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), MSG_DONTWAIT), -1); + EXPECT_EQ(errno, EAGAIN); +} + +TEST_F(tls_err, bad_auth) +{ + char buf[128]; + int n; + + if (self->notls) + SKIP(return, "no TLS support"); + + memrnd(buf, sizeof(buf) / 2); + EXPECT_EQ(send(self->fd, buf, sizeof(buf) / 2, 0), sizeof(buf) / 2); + n = recv(self->cfd, buf, sizeof(buf), 0); + EXPECT_GT(n, sizeof(buf) / 2); + + buf[n - 1]++; + + EXPECT_EQ(send(self->fd2, buf, n, 0), n); + EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1); + EXPECT_EQ(errno, EBADMSG); + EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1); + EXPECT_EQ(errno, EBADMSG); +} + +TEST_F(tls_err, bad_in_large_read) +{ + char txt[3][64]; + char cip[3][128]; + char buf[3 * 128]; + int i, n; + + if (self->notls) + SKIP(return, "no TLS support"); + + /* Put 3 records in the sockets */ + for (i = 0; i < 3; i++) { + memrnd(txt[i], sizeof(txt[i])); + EXPECT_EQ(send(self->fd, txt[i], sizeof(txt[i]), 0), + sizeof(txt[i])); + n = recv(self->cfd, cip[i], sizeof(cip[i]), 0); + EXPECT_GT(n, sizeof(txt[i])); + /* Break the third message */ + if (i == 2) + cip[2][n - 1]++; + EXPECT_EQ(send(self->fd2, cip[i], n, 0), n); + } + + /* We should be able to receive the first two messages */ + EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), sizeof(txt[0]) * 2); + EXPECT_EQ(memcmp(buf, txt[0], sizeof(txt[0])), 0); + EXPECT_EQ(memcmp(buf + sizeof(txt[0]), txt[1], sizeof(txt[1])), 0); + /* Third mesasge is bad */ + EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1); + EXPECT_EQ(errno, EBADMSG); + EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1); + EXPECT_EQ(errno, EBADMSG); +} + +TEST_F(tls_err, bad_cmsg) +{ + char *test_str = "test_read"; + int send_len = 10; + char cip[128]; + char buf[128]; + char txt[64]; + int n; + + if (self->notls) + SKIP(return, "no TLS support"); + + /* Queue up one data record */ + memrnd(txt, sizeof(txt)); + EXPECT_EQ(send(self->fd, txt, sizeof(txt), 0), sizeof(txt)); + n = recv(self->cfd, cip, sizeof(cip), 0); + EXPECT_GT(n, sizeof(txt)); + EXPECT_EQ(send(self->fd2, cip, n, 0), n); + + EXPECT_EQ(tls_send_cmsg(self->fd, 100, test_str, send_len, 0), 10); + n = recv(self->cfd, cip, sizeof(cip), 0); + cip[n - 1]++; /* Break it */ + EXPECT_GT(n, send_len); + EXPECT_EQ(send(self->fd2, cip, n, 0), n); + + EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), sizeof(txt)); + EXPECT_EQ(memcmp(buf, txt, sizeof(txt)), 0); + EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1); + EXPECT_EQ(errno, EBADMSG); + EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1); + EXPECT_EQ(errno, EBADMSG); +} + TEST(non_established) { struct tls12_crypto_info_aes_gcm_256 tls12; struct sockaddr_in addr; @@ -1355,64 +1630,82 @@ TEST(non_established) { TEST(keysizes) { struct tls12_crypto_info_aes_gcm_256 tls12; - struct sockaddr_in addr; - int sfd, ret, fd, cfd; - socklen_t len; + int ret, fd, cfd; bool notls; - notls = false; - len = sizeof(addr); - memset(&tls12, 0, sizeof(tls12)); tls12.info.version = TLS_1_2_VERSION; tls12.info.cipher_type = TLS_CIPHER_AES_GCM_256; - addr.sin_family = AF_INET; - addr.sin_addr.s_addr = htonl(INADDR_ANY); - addr.sin_port = 0; + ulp_sock_pair(_metadata, &fd, &cfd, ¬ls); - fd = socket(AF_INET, SOCK_STREAM, 0); - sfd = socket(AF_INET, SOCK_STREAM, 0); + if (!notls) { + ret = setsockopt(fd, SOL_TLS, TLS_TX, &tls12, + sizeof(tls12)); + EXPECT_EQ(ret, 0); + + ret = setsockopt(cfd, SOL_TLS, TLS_RX, &tls12, + sizeof(tls12)); + EXPECT_EQ(ret, 0); + } + + close(fd); + close(cfd); +} + +TEST(tls_v6ops) { + struct tls_crypto_info_keys tls12; + struct sockaddr_in6 addr, addr2; + int sfd, ret, fd; + socklen_t len, len2; + + tls_crypto_info_init(TLS_1_2_VERSION, TLS_CIPHER_AES_GCM_128, &tls12); + + addr.sin6_family = AF_INET6; + addr.sin6_addr = in6addr_any; + addr.sin6_port = 0; + + fd = socket(AF_INET6, SOCK_STREAM, 0); + sfd = socket(AF_INET6, SOCK_STREAM, 0); ret = bind(sfd, &addr, sizeof(addr)); ASSERT_EQ(ret, 0); ret = listen(sfd, 10); ASSERT_EQ(ret, 0); + len = sizeof(addr); ret = getsockname(sfd, &addr, &len); ASSERT_EQ(ret, 0); ret = connect(fd, &addr, sizeof(addr)); ASSERT_EQ(ret, 0); + len = sizeof(addr); + ret = getsockname(fd, &addr, &len); + ASSERT_EQ(ret, 0); + ret = setsockopt(fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls")); - if (ret != 0) { - notls = true; - printf("Failure setting TCP_ULP, testing without tls\n"); + if (ret) { + ASSERT_EQ(errno, ENOENT); + SKIP(return, "no TLS support"); } + ASSERT_EQ(ret, 0); - if (!notls) { - ret = setsockopt(fd, SOL_TLS, TLS_TX, &tls12, - sizeof(tls12)); - EXPECT_EQ(ret, 0); - } + ret = setsockopt(fd, SOL_TLS, TLS_TX, &tls12, tls12.len); + ASSERT_EQ(ret, 0); - cfd = accept(sfd, &addr, &len); - ASSERT_GE(cfd, 0); + ret = setsockopt(fd, SOL_TLS, TLS_RX, &tls12, tls12.len); + ASSERT_EQ(ret, 0); - if (!notls) { - ret = setsockopt(cfd, IPPROTO_TCP, TCP_ULP, "tls", - sizeof("tls")); - EXPECT_EQ(ret, 0); + len2 = sizeof(addr2); + ret = getsockname(fd, &addr2, &len2); + ASSERT_EQ(ret, 0); - ret = setsockopt(cfd, SOL_TLS, TLS_RX, &tls12, - sizeof(tls12)); - EXPECT_EQ(ret, 0); - } + EXPECT_EQ(len2, len); + EXPECT_EQ(memcmp(&addr, &addr2, len), 0); - close(sfd); close(fd); - close(cfd); + close(sfd); } TEST_HARNESS_MAIN diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile index 8748199ac109..ffca314897c4 100644 --- a/tools/testing/selftests/netfilter/Makefile +++ b/tools/testing/selftests/netfilter/Makefile @@ -5,7 +5,8 @@ TEST_PROGS := nft_trans_stress.sh nft_fib.sh nft_nat.sh bridge_brouter.sh \ conntrack_icmp_related.sh nft_flowtable.sh ipvs.sh \ nft_concat_range.sh nft_conntrack_helper.sh \ nft_queue.sh nft_meta.sh nf_nat_edemux.sh \ - ipip-conntrack-mtu.sh conntrack_tcp_unreplied.sh + ipip-conntrack-mtu.sh conntrack_tcp_unreplied.sh \ + conntrack_vrf.sh LDLIBS = -lmnl TEST_GEN_FILES = nf-queue diff --git a/tools/testing/selftests/netfilter/conntrack_vrf.sh b/tools/testing/selftests/netfilter/conntrack_vrf.sh new file mode 100755 index 000000000000..8b5ea9234588 --- /dev/null +++ b/tools/testing/selftests/netfilter/conntrack_vrf.sh @@ -0,0 +1,241 @@ +#!/bin/sh + +# This script demonstrates interaction of conntrack and vrf. +# The vrf driver calls the netfilter hooks again, with oif/iif +# pointing at the VRF device. +# +# For ingress, this means first iteration has iifname of lower/real +# device. In this script, thats veth0. +# Second iteration is iifname set to vrf device, tvrf in this script. +# +# For egress, this is reversed: first iteration has the vrf device, +# second iteration is done with the lower/real/veth0 device. +# +# test_ct_zone_in demonstrates unexpected change of nftables +# behavior # caused by commit 09e856d54bda5f28 "vrf: Reset skb conntrack +# connection on VRF rcv" +# +# It was possible to assign conntrack zone to a packet (or mark it for +# `notracking`) in the prerouting chain before conntrack, based on real iif. +# +# After the change, the zone assignment is lost and the zone is assigned based +# on the VRF master interface (in case such a rule exists). +# assignment is lost. Instead, assignment based on the `iif` matching +# Thus it is impossible to distinguish packets based on the original +# interface. +# +# test_masquerade_vrf and test_masquerade_veth0 demonstrate the problem +# that was supposed to be fixed by the commit mentioned above to make sure +# that any fix to test case 1 won't break masquerade again. + +ksft_skip=4 + +IP0=172.30.30.1 +IP1=172.30.30.2 +PFXL=30 +ret=0 + +sfx=$(mktemp -u "XXXXXXXX") +ns0="ns0-$sfx" +ns1="ns1-$sfx" + +cleanup() +{ + ip netns pids $ns0 | xargs kill 2>/dev/null + ip netns pids $ns1 | xargs kill 2>/dev/null + + ip netns del $ns0 $ns1 +} + +nft --version > /dev/null 2>&1 +if [ $? -ne 0 ];then + echo "SKIP: Could not run test without nft tool" + exit $ksft_skip +fi + +ip -Version > /dev/null 2>&1 +if [ $? -ne 0 ];then + echo "SKIP: Could not run test without ip tool" + exit $ksft_skip +fi + +ip netns add "$ns0" +if [ $? -ne 0 ];then + echo "SKIP: Could not create net namespace $ns0" + exit $ksft_skip +fi +ip netns add "$ns1" + +trap cleanup EXIT + +ip netns exec $ns0 sysctl -q -w net.ipv4.conf.default.rp_filter=0 +ip netns exec $ns0 sysctl -q -w net.ipv4.conf.all.rp_filter=0 +ip netns exec $ns0 sysctl -q -w net.ipv4.conf.all.rp_filter=0 + +ip link add veth0 netns "$ns0" type veth peer name veth0 netns "$ns1" > /dev/null 2>&1 +if [ $? -ne 0 ];then + echo "SKIP: Could not add veth device" + exit $ksft_skip +fi + +ip -net $ns0 li add tvrf type vrf table 9876 +if [ $? -ne 0 ];then + echo "SKIP: Could not add vrf device" + exit $ksft_skip +fi + +ip -net $ns0 li set lo up + +ip -net $ns0 li set veth0 master tvrf +ip -net $ns0 li set tvrf up +ip -net $ns0 li set veth0 up +ip -net $ns1 li set veth0 up + +ip -net $ns0 addr add $IP0/$PFXL dev veth0 +ip -net $ns1 addr add $IP1/$PFXL dev veth0 + +ip netns exec $ns1 iperf3 -s > /dev/null 2>&1& +if [ $? -ne 0 ];then + echo "SKIP: Could not start iperf3" + exit $ksft_skip +fi + +# test vrf ingress handling. +# The incoming connection should be placed in conntrack zone 1, +# as decided by the first iteration of the ruleset. +test_ct_zone_in() +{ +ip netns exec $ns0 nft -f - <<EOF +table testct { + chain rawpre { + type filter hook prerouting priority raw; + + iif { veth0, tvrf } counter meta nftrace set 1 + iif veth0 counter ct zone set 1 counter return + iif tvrf counter ct zone set 2 counter return + ip protocol icmp counter + notrack counter + } + + chain rawout { + type filter hook output priority raw; + + oif veth0 counter ct zone set 1 counter return + oif tvrf counter ct zone set 2 counter return + notrack counter + } +} +EOF + ip netns exec $ns1 ping -W 1 -c 1 -I veth0 $IP0 > /dev/null + + # should be in zone 1, not zone 2 + count=$(ip netns exec $ns0 conntrack -L -s $IP1 -d $IP0 -p icmp --zone 1 2>/dev/null | wc -l) + if [ $count -eq 1 ]; then + echo "PASS: entry found in conntrack zone 1" + else + echo "FAIL: entry not found in conntrack zone 1" + count=$(ip netns exec $ns0 conntrack -L -s $IP1 -d $IP0 -p icmp --zone 2 2> /dev/null | wc -l) + if [ $count -eq 1 ]; then + echo "FAIL: entry found in zone 2 instead" + else + echo "FAIL: entry not in zone 1 or 2, dumping table" + ip netns exec $ns0 conntrack -L + ip netns exec $ns0 nft list ruleset + fi + fi +} + +# add masq rule that gets evaluated w. outif set to vrf device. +# This tests the first iteration of the packet through conntrack, +# oifname is the vrf device. +test_masquerade_vrf() +{ + local qdisc=$1 + + if [ "$qdisc" != "default" ]; then + tc -net $ns0 qdisc add dev tvrf root $qdisc + fi + + ip netns exec $ns0 conntrack -F 2>/dev/null + +ip netns exec $ns0 nft -f - <<EOF +flush ruleset +table ip nat { + chain rawout { + type filter hook output priority raw; + + oif tvrf ct state untracked counter + } + chain postrouting2 { + type filter hook postrouting priority mangle; + + oif tvrf ct state untracked counter + } + chain postrouting { + type nat hook postrouting priority 0; + # NB: masquerade should always be combined with 'oif(name) bla', + # lack of this is intentional here, we want to exercise double-snat. + ip saddr 172.30.30.0/30 counter masquerade random + } +} +EOF + ip netns exec $ns0 ip vrf exec tvrf iperf3 -t 1 -c $IP1 >/dev/null + if [ $? -ne 0 ]; then + echo "FAIL: iperf3 connect failure with masquerade + sport rewrite on vrf device" + ret=1 + return + fi + + # must also check that nat table was evaluated on second (lower device) iteration. + ip netns exec $ns0 nft list table ip nat |grep -q 'counter packets 2' && + ip netns exec $ns0 nft list table ip nat |grep -q 'untracked counter packets [1-9]' + if [ $? -eq 0 ]; then + echo "PASS: iperf3 connect with masquerade + sport rewrite on vrf device ($qdisc qdisc)" + else + echo "FAIL: vrf rules have unexpected counter value" + ret=1 + fi + + if [ "$qdisc" != "default" ]; then + tc -net $ns0 qdisc del dev tvrf root + fi +} + +# add masq rule that gets evaluated w. outif set to veth device. +# This tests the 2nd iteration of the packet through conntrack, +# oifname is the lower device (veth0 in this case). +test_masquerade_veth() +{ + ip netns exec $ns0 conntrack -F 2>/dev/null +ip netns exec $ns0 nft -f - <<EOF +flush ruleset +table ip nat { + chain postrouting { + type nat hook postrouting priority 0; + meta oif veth0 ip saddr 172.30.30.0/30 counter masquerade random + } +} +EOF + ip netns exec $ns0 ip vrf exec tvrf iperf3 -t 1 -c $IP1 > /dev/null + if [ $? -ne 0 ]; then + echo "FAIL: iperf3 connect failure with masquerade + sport rewrite on veth device" + ret=1 + return + fi + + # must also check that nat table was evaluated on second (lower device) iteration. + ip netns exec $ns0 nft list table ip nat |grep -q 'counter packets 2' + if [ $? -eq 0 ]; then + echo "PASS: iperf3 connect with masquerade + sport rewrite on veth device" + else + echo "FAIL: vrf masq rule has unexpected counter value" + ret=1 + fi +} + +test_ct_zone_in +test_masquerade_vrf "default" +test_masquerade_vrf "pfifo" +test_masquerade_veth + +exit $ret diff --git a/tools/testing/selftests/netfilter/nft_concat_range.sh b/tools/testing/selftests/netfilter/nft_concat_range.sh index 5a4938d6dcf2..ed61f6cab60f 100755 --- a/tools/testing/selftests/netfilter/nft_concat_range.sh +++ b/tools/testing/selftests/netfilter/nft_concat_range.sh @@ -23,8 +23,8 @@ TESTS="reported_issues correctness concurrency timeout" # Set types, defined by TYPE_ variables below TYPES="net_port port_net net6_port port_proto net6_port_mac net6_port_mac_proto - net_port_net net_mac net_mac_icmp net6_mac_icmp net6_port_net6_port - net_port_mac_proto_net" + net_port_net net_mac mac_net net_mac_icmp net6_mac_icmp + net6_port_net6_port net_port_mac_proto_net" # Reported bugs, also described by TYPE_ variables below BUGS="flush_remove_add" @@ -277,6 +277,23 @@ perf_entries 1000 perf_proto ipv4 " +TYPE_mac_net=" +display mac,net +type_spec ether_addr . ipv4_addr +chain_spec ether saddr . ip saddr +dst +src mac addr4 +start 1 +count 5 +src_delta 2000 +tools sendip nc bash +proto udp + +race_repeat 0 + +perf_duration 0 +" + TYPE_net_mac_icmp=" display net,mac - ICMP type_spec ipv4_addr . ether_addr @@ -984,7 +1001,8 @@ format() { fi done for f in ${src}; do - __expr="${__expr} . " + [ "${__expr}" != "{ " ] && __expr="${__expr} . " + __start="$(eval format_"${f}" "${srcstart}")" __end="$(eval format_"${f}" "${srcend}")" diff --git a/tools/testing/selftests/netfilter/nft_nat.sh b/tools/testing/selftests/netfilter/nft_nat.sh index da1c1e4b6c86..d88867d2fed7 100755 --- a/tools/testing/selftests/netfilter/nft_nat.sh +++ b/tools/testing/selftests/netfilter/nft_nat.sh @@ -759,19 +759,21 @@ test_port_shadow() local result="" local logmsg="" - echo ROUTER | ip netns exec "$ns0" nc -w 5 -u -l -p 1405 >/dev/null 2>&1 & - nc_r=$! + # make shadow entry, from client (ns2), going to (ns1), port 41404, sport 1405. + echo "fake-entry" | ip netns exec "$ns2" timeout 1 socat -u STDIN UDP:"$daddrc":41404,sourceport=1405 - echo CLIENT | ip netns exec "$ns2" nc -w 5 -u -l -p 1405 >/dev/null 2>&1 & - nc_c=$! + echo ROUTER | ip netns exec "$ns0" timeout 5 socat -u STDIN UDP4-LISTEN:1405 & + sc_r=$! - # make shadow entry, from client (ns2), going to (ns1), port 41404, sport 1405. - echo "fake-entry" | ip netns exec "$ns2" nc -w 1 -p 1405 -u "$daddrc" 41404 > /dev/null + echo CLIENT | ip netns exec "$ns2" timeout 5 socat -u STDIN UDP4-LISTEN:1405,reuseport & + sc_c=$! + + sleep 0.3 # ns1 tries to connect to ns0:1405. With default settings this should connect # to client, it matches the conntrack entry created above. - result=$(echo "" | ip netns exec "$ns1" nc -w 1 -p 41404 -u "$daddrs" 1405) + result=$(echo "data" | ip netns exec "$ns1" timeout 1 socat - UDP:"$daddrs":1405,sourceport=41404) if [ "$result" = "$expect" ] ;then echo "PASS: portshadow test $test: got reply from ${expect}${logmsg}" @@ -780,7 +782,7 @@ test_port_shadow() ret=1 fi - kill $nc_r $nc_c 2>/dev/null + kill $sc_r $sc_c 2>/dev/null # flush udp entries for next test round, if any ip netns exec "$ns0" conntrack -F >/dev/null 2>&1 @@ -816,11 +818,10 @@ table $family raw { chain prerouting { type filter hook prerouting priority -300; policy accept; meta iif veth0 udp dport 1405 notrack - udp dport 1405 notrack } chain output { type filter hook output priority -300; policy accept; - udp sport 1405 notrack + meta oif veth0 udp sport 1405 notrack } } EOF @@ -851,6 +852,18 @@ test_port_shadowing() { local family="ip" + conntrack -h >/dev/null 2>&1 + if [ $? -ne 0 ];then + echo "SKIP: Could not run nat port shadowing test without conntrack tool" + return + fi + + socat -h > /dev/null 2>&1 + if [ $? -ne 0 ];then + echo "SKIP: Could not run nat port shadowing test without socat tool" + return + fi + ip netns exec "$ns0" sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null ip netns exec "$ns0" sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null diff --git a/tools/testing/selftests/netfilter/nft_queue.sh b/tools/testing/selftests/netfilter/nft_queue.sh index 3d202b90b33d..7d27f1f3bc01 100755 --- a/tools/testing/selftests/netfilter/nft_queue.sh +++ b/tools/testing/selftests/netfilter/nft_queue.sh @@ -16,6 +16,10 @@ timeout=4 cleanup() { + ip netns pids ${ns1} | xargs kill 2>/dev/null + ip netns pids ${ns2} | xargs kill 2>/dev/null + ip netns pids ${nsrouter} | xargs kill 2>/dev/null + ip netns del ${ns1} ip netns del ${ns2} ip netns del ${nsrouter} @@ -332,6 +336,55 @@ EOF echo "PASS: tcp via loopback and re-queueing" } +test_icmp_vrf() { + ip -net $ns1 link add tvrf type vrf table 9876 + if [ $? -ne 0 ];then + echo "SKIP: Could not add vrf device" + return + fi + + ip -net $ns1 li set eth0 master tvrf + ip -net $ns1 li set tvrf up + + ip -net $ns1 route add 10.0.2.0/24 via 10.0.1.1 dev eth0 table 9876 +ip netns exec ${ns1} nft -f /dev/stdin <<EOF +flush ruleset +table inet filter { + chain output { + type filter hook output priority 0; policy accept; + meta oifname "tvrf" icmp type echo-request counter queue num 1 + meta oifname "eth0" icmp type echo-request counter queue num 1 + } + chain post { + type filter hook postrouting priority 0; policy accept; + meta oifname "tvrf" icmp type echo-request counter queue num 1 + meta oifname "eth0" icmp type echo-request counter queue num 1 + } +} +EOF + ip netns exec ${ns1} ./nf-queue -q 1 -t $timeout & + local nfqpid=$! + + sleep 1 + ip netns exec ${ns1} ip vrf exec tvrf ping -c 1 10.0.2.99 > /dev/null + + for n in output post; do + for d in tvrf eth0; do + ip netns exec ${ns1} nft list chain inet filter $n | grep -q "oifname \"$d\" icmp type echo-request counter packets 1" + if [ $? -ne 0 ] ; then + echo "FAIL: chain $n: icmp packet counter mismatch for device $d" 1>&2 + ip netns exec ${ns1} nft list ruleset + ret=1 + return + fi + done + done + + wait $nfqpid + [ $? -eq 0 ] && echo "PASS: icmp+nfqueue via vrf" + wait 2>/dev/null +} + ip netns exec ${nsrouter} sysctl net.ipv6.conf.all.forwarding=1 > /dev/null ip netns exec ${nsrouter} sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null ip netns exec ${nsrouter} sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null @@ -372,5 +425,6 @@ test_queue 20 test_tcp_forward test_tcp_localhost test_tcp_localhost_requeue +test_icmp_vrf exit $ret diff --git a/tools/testing/selftests/netfilter/nft_zones_many.sh b/tools/testing/selftests/netfilter/nft_zones_many.sh index ac646376eb01..04633119b29a 100755 --- a/tools/testing/selftests/netfilter/nft_zones_many.sh +++ b/tools/testing/selftests/netfilter/nft_zones_many.sh @@ -18,11 +18,17 @@ cleanup() ip netns del $ns } -ip netns add $ns -if [ $? -ne 0 ];then - echo "SKIP: Could not create net namespace $gw" - exit $ksft_skip -fi +checktool (){ + if ! $1 > /dev/null 2>&1; then + echo "SKIP: Could not $2" + exit $ksft_skip + fi +} + +checktool "nft --version" "run test without nft tool" +checktool "ip -Version" "run test without ip tool" +checktool "socat -V" "run test without socat tool" +checktool "ip netns add $ns" "create net namespace" trap cleanup EXIT @@ -71,7 +77,8 @@ EOF local start=$(date +%s%3N) i=$((i + 10000)) j=$((j + 1)) - dd if=/dev/zero of=/dev/stdout bs=8k count=10000 2>/dev/null | ip netns exec "$ns" nc -w 1 -q 1 -u -p 12345 127.0.0.1 12345 > /dev/null + # nft rule in output places each packet in a different zone. + dd if=/dev/zero of=/dev/stdout bs=8k count=10000 2>/dev/null | ip netns exec "$ns" socat STDIN UDP:127.0.0.1:12345,sourceport=12345 if [ $? -ne 0 ] ;then ret=1 break diff --git a/tools/testing/selftests/tc-testing/config b/tools/testing/selftests/tc-testing/config index b71828df5a6d..a3239d5e40c7 100644 --- a/tools/testing/selftests/tc-testing/config +++ b/tools/testing/selftests/tc-testing/config @@ -60,6 +60,8 @@ CONFIG_NET_IFE_SKBTCINDEX=m CONFIG_NET_SCH_FIFO=y CONFIG_NET_SCH_ETS=m CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_FQ_PIE=m +CONFIG_NETDEVSIM=m # ## Network testing diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json b/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json index 503982b8f295..91832400ddbd 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json @@ -68,7 +68,7 @@ "cmdUnderTest": "$TC action add action bpf object-file $EBPFDIR/action.o section action-ok index 667", "expExitCode": "0", "verifyCmd": "$TC action get action bpf index 667", - "matchPattern": "action order [0-9]*: bpf action.o:\\[action-ok\\] id [0-9]* tag [0-9a-f]{16}( jited)? default-action pipe.*index 667 ref", + "matchPattern": "action order [0-9]*: bpf action.o:\\[action-ok\\] id [0-9].* tag [0-9a-f]{16}( jited)? default-action pipe.*index 667 ref", "matchCount": "1", "teardown": [ "$TC action flush action bpf" diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/mq.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/mq.json index 88a20c781e49..c6046096d9db 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/mq.json +++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/mq.json @@ -15,7 +15,7 @@ "cmdUnderTest": "$TC qdisc add dev $ETH root handle 1: mq", "expExitCode": "0", "verifyCmd": "$TC qdisc show dev $ETH", - "matchPattern": "qdisc pfifo_fast 0: parent 1:[1-4] bands 3 priomap 1 2 2 2 1 2 0 0 1 1 1 1 1 1 1 1", + "matchPattern": "qdisc [a-zA-Z0-9_]+ 0: parent 1:[1-4]", "matchCount": "4", "teardown": [ "echo \"1\" > /sys/bus/netdevsim/del_device" @@ -37,7 +37,7 @@ "cmdUnderTest": "$TC qdisc add dev $ETH root handle 1: mq", "expExitCode": "0", "verifyCmd": "$TC qdisc show dev $ETH", - "matchPattern": "qdisc pfifo_fast 0: parent 1:[1-9,a-f][0-9,a-f]{0,2} bands 3 priomap 1 2 2 2 1 2 0 0 1 1 1 1 1 1 1 1", + "matchPattern": "qdisc [a-zA-Z0-9_]+ 0: parent 1:[1-9,a-f][0-9,a-f]{0,2}", "matchCount": "256", "teardown": [ "echo \"1\" > /sys/bus/netdevsim/del_device" @@ -60,7 +60,7 @@ "cmdUnderTest": "$TC qdisc add dev $ETH root handle 1: mq", "expExitCode": "2", "verifyCmd": "$TC qdisc show dev $ETH", - "matchPattern": "qdisc pfifo_fast 0: parent 1:[1-4] bands 3 priomap 1 2 2 2 1 2 0 0 1 1 1 1 1 1 1 1", + "matchPattern": "qdisc [a-zA-Z0-9_]+ 0: parent 1:[1-4]", "matchCount": "4", "teardown": [ "echo \"1\" > /sys/bus/netdevsim/del_device" @@ -82,7 +82,7 @@ "cmdUnderTest": "$TC qdisc del dev $ETH root handle 1: mq", "expExitCode": "2", "verifyCmd": "$TC qdisc show dev $ETH", - "matchPattern": "qdisc pfifo_fast 0: parent 1:[1-4] bands 3 priomap 1 2 2 2 1 2 0 0 1 1 1 1 1 1 1 1", + "matchPattern": "qdisc [a-zA-Z0-9_]+ 0: parent 1:[1-4]", "matchCount": "0", "teardown": [ "echo \"1\" > /sys/bus/netdevsim/del_device" @@ -106,7 +106,7 @@ "cmdUnderTest": "$TC qdisc del dev $ETH root handle 1: mq", "expExitCode": "2", "verifyCmd": "$TC qdisc show dev $ETH", - "matchPattern": "qdisc pfifo_fast 0: parent 1:[1-4] bands 3 priomap 1 2 2 2 1 2 0 0 1 1 1 1 1 1 1 1", + "matchPattern": "qdisc [a-zA-Z0-9_]+ 0: parent 1:[1-4]", "matchCount": "0", "teardown": [ "echo \"1\" > /sys/bus/netdevsim/del_device" @@ -128,7 +128,7 @@ "cmdUnderTest": "$TC qdisc add dev $ETH root handle 1: mq", "expExitCode": "2", "verifyCmd": "$TC qdisc show dev $ETH", - "matchPattern": "qdisc pfifo_fast 0: parent 1:[1-4] bands 3 priomap 1 2 2 2 1 2 0 0 1 1 1 1 1 1 1 1", + "matchPattern": "qdisc [a-zA-Z0-9_]+ 0: parent 1:[1-4]", "matchCount": "0", "teardown": [ "echo \"1\" > /sys/bus/netdevsim/del_device" diff --git a/tools/testing/selftests/tc-testing/tdc.py b/tools/testing/selftests/tc-testing/tdc.py index a3e43189d940..ee22e3447ec7 100755 --- a/tools/testing/selftests/tc-testing/tdc.py +++ b/tools/testing/selftests/tc-testing/tdc.py @@ -716,6 +716,7 @@ def set_operation_mode(pm, parser, args, remaining): list_test_cases(alltests) exit(0) + exit_code = 0 # KSFT_PASS if len(alltests): req_plugins = pm.get_required_plugins(alltests) try: @@ -724,6 +725,8 @@ def set_operation_mode(pm, parser, args, remaining): print('The following plugins were not found:') print('{}'.format(pde.missing_pg)) catresults = test_runner(pm, args, alltests) + if catresults.count_failures() != 0: + exit_code = 1 # KSFT_FAIL if args.format == 'none': print('Test results output suppression requested\n') else: @@ -748,6 +751,8 @@ def set_operation_mode(pm, parser, args, remaining): gid=int(os.getenv('SUDO_GID'))) else: print('No tests found\n') + exit_code = 4 # KSFT_SKIP + exit(exit_code) def main(): """ @@ -767,8 +772,5 @@ def main(): set_operation_mode(pm, parser, args, remaining) - exit(0) - - if __name__ == "__main__": main() diff --git a/tools/testing/selftests/tc-testing/tdc.sh b/tools/testing/selftests/tc-testing/tdc.sh index 7fe38c76db44..afb0cd86fa3d 100755 --- a/tools/testing/selftests/tc-testing/tdc.sh +++ b/tools/testing/selftests/tc-testing/tdc.sh @@ -1,5 +1,6 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 +modprobe netdevsim ./tdc.py -c actions --nobuildebpf ./tdc.py -c qdisc diff --git a/tools/testing/selftests/wireguard/netns.sh b/tools/testing/selftests/wireguard/netns.sh index ebc4ee0fe179..8a9461aa0878 100755 --- a/tools/testing/selftests/wireguard/netns.sh +++ b/tools/testing/selftests/wireguard/netns.sh @@ -276,7 +276,11 @@ n0 ping -W 1 -c 1 192.168.241.2 n1 wg set wg0 peer "$pub2" endpoint 192.168.241.2:7 ip2 link del wg0 ip2 link del wg1 -! n0 ping -W 1 -c 10 -f 192.168.241.2 || false # Should not crash kernel +read _ _ tx_bytes_before < <(n0 wg show wg1 transfer) +! n0 ping -W 1 -c 10 -f 192.168.241.2 || false +sleep 1 +read _ _ tx_bytes_after < <(n0 wg show wg1 transfer) +(( tx_bytes_after - tx_bytes_before < 70000 )) ip0 link del wg1 ip1 link del wg0 @@ -609,6 +613,28 @@ ip0 link set wg0 up kill $ncat_pid ip0 link del wg0 +# Ensure that dst_cache references don't outlive netns lifetime +ip1 link add dev wg0 type wireguard +ip2 link add dev wg0 type wireguard +configure_peers +ip1 link add veth1 type veth peer name veth2 +ip1 link set veth2 netns $netns2 +ip1 addr add fd00:aa::1/64 dev veth1 +ip2 addr add fd00:aa::2/64 dev veth2 +ip1 link set veth1 up +ip2 link set veth2 up +waitiface $netns1 veth1 +waitiface $netns2 veth2 +ip1 -6 route add default dev veth1 via fd00:aa::2 +ip2 -6 route add default dev veth2 via fd00:aa::1 +n1 wg set wg0 peer "$pub2" endpoint [fd00:aa::2]:2 +n2 wg set wg0 peer "$pub1" endpoint [fd00:aa::1]:1 +n1 ping6 -c 1 fd00::2 +pp ip netns delete $netns1 +pp ip netns delete $netns2 +pp ip netns add $netns1 +pp ip netns add $netns2 + # Ensure there aren't circular reference loops ip1 link add wg1 type wireguard ip2 link add wg2 type wireguard @@ -627,7 +653,7 @@ while read -t 0.1 -r line 2>/dev/null || [[ $? -ne 142 ]]; do done < /dev/kmsg alldeleted=1 for object in "${!objects[@]}"; do - if [[ ${objects["$object"]} != *createddestroyed ]]; then + if [[ ${objects["$object"]} != *createddestroyed && ${objects["$object"]} != *createdcreateddestroyeddestroyed ]]; then echo "Error: $object: merely ${objects["$object"]}" >&3 alldeleted=0 fi diff --git a/tools/testing/selftests/wireguard/qemu/debug.config b/tools/testing/selftests/wireguard/qemu/debug.config index fe07d97df9fa..2b321b8a96cf 100644 --- a/tools/testing/selftests/wireguard/qemu/debug.config +++ b/tools/testing/selftests/wireguard/qemu/debug.config @@ -47,7 +47,7 @@ CONFIG_DEBUG_ATOMIC_SLEEP=y CONFIG_TRACE_IRQFLAGS=y CONFIG_DEBUG_BUGVERBOSE=y CONFIG_DEBUG_LIST=y -CONFIG_DEBUG_PI_LIST=y +CONFIG_DEBUG_PLIST=y CONFIG_PROVE_RCU=y CONFIG_SPARSE_RCU_POINTER=y CONFIG_RCU_CPU_STALL_TIMEOUT=21 diff --git a/tools/testing/selftests/wireguard/qemu/kernel.config b/tools/testing/selftests/wireguard/qemu/kernel.config index 74db83a0aedd..a9b5a520a1d2 100644 --- a/tools/testing/selftests/wireguard/qemu/kernel.config +++ b/tools/testing/selftests/wireguard/qemu/kernel.config @@ -66,6 +66,7 @@ CONFIG_PROC_SYSCTL=y CONFIG_SYSFS=y CONFIG_TMPFS=y CONFIG_CONSOLE_LOGLEVEL_DEFAULT=15 +CONFIG_LOG_BUF_SHIFT=18 CONFIG_PRINTK_TIME=y CONFIG_BLK_DEV_INITRD=y CONFIG_LEGACY_VSYSCALL_NONE=y diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index d31724500501..72c4e6b39389 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1531,11 +1531,10 @@ static struct kvm_memslots *kvm_dup_memslots(struct kvm_memslots *old, static int kvm_set_memslot(struct kvm *kvm, const struct kvm_userspace_memory_region *mem, - struct kvm_memory_slot *old, struct kvm_memory_slot *new, int as_id, enum kvm_mr_change change) { - struct kvm_memory_slot *slot; + struct kvm_memory_slot *slot, old; struct kvm_memslots *slots; int r; @@ -1566,7 +1565,7 @@ static int kvm_set_memslot(struct kvm *kvm, * Note, the INVALID flag needs to be in the appropriate entry * in the freshly allocated memslots, not in @old or @new. */ - slot = id_to_memslot(slots, old->id); + slot = id_to_memslot(slots, new->id); slot->flags |= KVM_MEMSLOT_INVALID; /* @@ -1597,6 +1596,26 @@ static int kvm_set_memslot(struct kvm *kvm, kvm_copy_memslots(slots, __kvm_memslots(kvm, as_id)); } + /* + * Make a full copy of the old memslot, the pointer will become stale + * when the memslots are re-sorted by update_memslots(), and the old + * memslot needs to be referenced after calling update_memslots(), e.g. + * to free its resources and for arch specific behavior. This needs to + * happen *after* (re)acquiring slots_arch_lock. + */ + slot = id_to_memslot(slots, new->id); + if (slot) { + old = *slot; + } else { + WARN_ON_ONCE(change != KVM_MR_CREATE); + memset(&old, 0, sizeof(old)); + old.id = new->id; + old.as_id = as_id; + } + + /* Copy the arch-specific data, again after (re)acquiring slots_arch_lock. */ + memcpy(&new->arch, &old.arch, sizeof(old.arch)); + r = kvm_arch_prepare_memory_region(kvm, new, mem, change); if (r) goto out_slots; @@ -1604,14 +1623,18 @@ static int kvm_set_memslot(struct kvm *kvm, update_memslots(slots, new, change); slots = install_new_memslots(kvm, as_id, slots); - kvm_arch_commit_memory_region(kvm, mem, old, new, change); + kvm_arch_commit_memory_region(kvm, mem, &old, new, change); + + /* Free the old memslot's metadata. Note, this is the full copy!!! */ + if (change == KVM_MR_DELETE) + kvm_free_memslot(kvm, &old); kvfree(slots); return 0; out_slots: if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) { - slot = id_to_memslot(slots, old->id); + slot = id_to_memslot(slots, new->id); slot->flags &= ~KVM_MEMSLOT_INVALID; slots = install_new_memslots(kvm, as_id, slots); } else { @@ -1626,7 +1649,6 @@ static int kvm_delete_memslot(struct kvm *kvm, struct kvm_memory_slot *old, int as_id) { struct kvm_memory_slot new; - int r; if (!old->npages) return -EINVAL; @@ -1639,12 +1661,7 @@ static int kvm_delete_memslot(struct kvm *kvm, */ new.as_id = as_id; - r = kvm_set_memslot(kvm, mem, old, &new, as_id, KVM_MR_DELETE); - if (r) - return r; - - kvm_free_memslot(kvm, old); - return 0; + return kvm_set_memslot(kvm, mem, &new, as_id, KVM_MR_DELETE); } /* @@ -1672,7 +1689,8 @@ int __kvm_set_memory_region(struct kvm *kvm, id = (u16)mem->slot; /* General sanity checks */ - if (mem->memory_size & (PAGE_SIZE - 1)) + if ((mem->memory_size & (PAGE_SIZE - 1)) || + (mem->memory_size != (unsigned long)mem->memory_size)) return -EINVAL; if (mem->guest_phys_addr & (PAGE_SIZE - 1)) return -EINVAL; @@ -1718,7 +1736,6 @@ int __kvm_set_memory_region(struct kvm *kvm, if (!old.npages) { change = KVM_MR_CREATE; new.dirty_bitmap = NULL; - memset(&new.arch, 0, sizeof(new.arch)); } else { /* Modify an existing slot. */ if ((new.userspace_addr != old.userspace_addr) || (new.npages != old.npages) || @@ -1732,9 +1749,8 @@ int __kvm_set_memory_region(struct kvm *kvm, else /* Nothing to change. */ return 0; - /* Copy dirty_bitmap and arch from the current memslot. */ + /* Copy dirty_bitmap from the current memslot. */ new.dirty_bitmap = old.dirty_bitmap; - memcpy(&new.arch, &old.arch, sizeof(new.arch)); } if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { @@ -1760,7 +1776,7 @@ int __kvm_set_memory_region(struct kvm *kvm, bitmap_set(new.dirty_bitmap, 0, new.npages); } - r = kvm_set_memslot(kvm, mem, &old, &new, as_id, change); + r = kvm_set_memslot(kvm, mem, &new, as_id, change); if (r) goto out_bitmap; @@ -2548,72 +2564,36 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) } EXPORT_SYMBOL_GPL(gfn_to_page); -void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache) +void kvm_release_pfn(kvm_pfn_t pfn, bool dirty) { if (pfn == 0) return; - if (cache) - cache->pfn = cache->gfn = 0; - if (dirty) kvm_release_pfn_dirty(pfn); else kvm_release_pfn_clean(pfn); } -static void kvm_cache_gfn_to_pfn(struct kvm_memory_slot *slot, gfn_t gfn, - struct gfn_to_pfn_cache *cache, u64 gen) -{ - kvm_release_pfn(cache->pfn, cache->dirty, cache); - - cache->pfn = gfn_to_pfn_memslot(slot, gfn); - cache->gfn = gfn; - cache->dirty = false; - cache->generation = gen; -} - -static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn, - struct kvm_host_map *map, - struct gfn_to_pfn_cache *cache, - bool atomic) +int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map) { kvm_pfn_t pfn; void *hva = NULL; struct page *page = KVM_UNMAPPED_PAGE; - struct kvm_memory_slot *slot = __gfn_to_memslot(slots, gfn); - u64 gen = slots->generation; if (!map) return -EINVAL; - if (cache) { - if (!cache->pfn || cache->gfn != gfn || - cache->generation != gen) { - if (atomic) - return -EAGAIN; - kvm_cache_gfn_to_pfn(slot, gfn, cache, gen); - } - pfn = cache->pfn; - } else { - if (atomic) - return -EAGAIN; - pfn = gfn_to_pfn_memslot(slot, gfn); - } + pfn = gfn_to_pfn(vcpu->kvm, gfn); if (is_error_noslot_pfn(pfn)) return -EINVAL; if (pfn_valid(pfn)) { page = pfn_to_page(pfn); - if (atomic) - hva = kmap_atomic(page); - else - hva = kmap(page); + hva = kmap(page); #ifdef CONFIG_HAS_IOMEM - } else if (!atomic) { - hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB); } else { - return -EINVAL; + hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB); #endif } @@ -2627,27 +2607,9 @@ static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn, return 0; } - -int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map, - struct gfn_to_pfn_cache *cache, bool atomic) -{ - return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map, - cache, atomic); -} -EXPORT_SYMBOL_GPL(kvm_map_gfn); - -int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map) -{ - return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map, - NULL, false); -} EXPORT_SYMBOL_GPL(kvm_vcpu_map); -static void __kvm_unmap_gfn(struct kvm *kvm, - struct kvm_memory_slot *memslot, - struct kvm_host_map *map, - struct gfn_to_pfn_cache *cache, - bool dirty, bool atomic) +void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty) { if (!map) return; @@ -2655,45 +2617,21 @@ static void __kvm_unmap_gfn(struct kvm *kvm, if (!map->hva) return; - if (map->page != KVM_UNMAPPED_PAGE) { - if (atomic) - kunmap_atomic(map->hva); - else - kunmap(map->page); - } + if (map->page != KVM_UNMAPPED_PAGE) + kunmap(map->page); #ifdef CONFIG_HAS_IOMEM - else if (!atomic) - memunmap(map->hva); else - WARN_ONCE(1, "Unexpected unmapping in atomic context"); + memunmap(map->hva); #endif if (dirty) - mark_page_dirty_in_slot(kvm, memslot, map->gfn); + kvm_vcpu_mark_page_dirty(vcpu, map->gfn); - if (cache) - cache->dirty |= dirty; - else - kvm_release_pfn(map->pfn, dirty, NULL); + kvm_release_pfn(map->pfn, dirty); map->hva = NULL; map->page = NULL; } - -int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, - struct gfn_to_pfn_cache *cache, bool dirty, bool atomic) -{ - __kvm_unmap_gfn(vcpu->kvm, gfn_to_memslot(vcpu->kvm, map->gfn), map, - cache, dirty, atomic); - return 0; -} -EXPORT_SYMBOL_GPL(kvm_unmap_gfn); - -void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty) -{ - __kvm_unmap_gfn(vcpu->kvm, kvm_vcpu_gfn_to_memslot(vcpu, map->gfn), - map, NULL, dirty, false); -} EXPORT_SYMBOL_GPL(kvm_vcpu_unmap); struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn) @@ -2993,7 +2931,8 @@ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, int r; gpa_t gpa = ghc->gpa + offset; - BUG_ON(len + offset > ghc->len); + if (WARN_ON_ONCE(len + offset > ghc->len)) + return -EINVAL; if (slots->generation != ghc->generation) { if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len)) @@ -3030,7 +2969,8 @@ int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, int r; gpa_t gpa = ghc->gpa + offset; - BUG_ON(len + offset > ghc->len); + if (WARN_ON_ONCE(len + offset > ghc->len)) + return -EINVAL; if (slots->generation != ghc->generation) { if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len)) |