diff options
217 files changed, 4741 insertions, 1960 deletions
diff --git a/Documentation/devicetree/bindings/display/lvds-data-mapping.yaml b/Documentation/devicetree/bindings/display/lvds-data-mapping.yaml new file mode 100644 index 000000000000..d68982fe2e9b --- /dev/null +++ b/Documentation/devicetree/bindings/display/lvds-data-mapping.yaml @@ -0,0 +1,84 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/lvds-data-mapping.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: LVDS Data Mapping + +maintainers: + - Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com> + - Thierry Reding <thierry.reding@gmail.com> + +description: | + LVDS is a physical layer specification defined in ANSI/TIA/EIA-644-A. Multiple + incompatible data link layers have been used over time to transmit image data + to LVDS devices. This bindings supports devices compatible with the following + specifications. + + [JEIDA] "Digital Interface Standards for Monitor", JEIDA-59-1999, February + 1999 (Version 1.0), Japan Electronic Industry Development Association (JEIDA) + [LDI] "Open LVDS Display Interface", May 1999 (Version 0.95), National + Semiconductor + [VESA] "VESA Notebook Panel Standard", October 2007 (Version 1.0), Video + Electronics Standards Association (VESA) + + Device compatible with those specifications have been marketed under the + FPD-Link and FlatLink brands. + +properties: + data-mapping: + enum: + - jeida-18 + - jeida-24 + - vesa-24 + description: | + The color signals mapping order. + + LVDS data mappings are defined as follows. + + - "jeida-18" - 18-bit data mapping compatible with the [JEIDA], [LDI] and + [VESA] specifications. Data are transferred as follows on 3 LVDS lanes. + + Slot 0 1 2 3 4 5 6 + ________________ _________________ + Clock \_______________________/ + ______ ______ ______ ______ ______ ______ ______ + DATA0 ><__G0__><__R5__><__R4__><__R3__><__R2__><__R1__><__R0__>< + DATA1 ><__B1__><__B0__><__G5__><__G4__><__G3__><__G2__><__G1__>< + DATA2 ><_CTL2_><_CTL1_><_CTL0_><__B5__><__B4__><__B3__><__B2__>< + + - "jeida-24" - 24-bit data mapping compatible with the [DSIM] and [LDI] + specifications. Data are transferred as follows on 4 LVDS lanes. + + Slot 0 1 2 3 4 5 6 + ________________ _________________ + Clock \_______________________/ + ______ ______ ______ ______ ______ ______ ______ + DATA0 ><__G2__><__R7__><__R6__><__R5__><__R4__><__R3__><__R2__>< + DATA1 ><__B3__><__B2__><__G7__><__G6__><__G5__><__G4__><__G3__>< + DATA2 ><_CTL2_><_CTL1_><_CTL0_><__B7__><__B6__><__B5__><__B4__>< + DATA3 ><_CTL3_><__B1__><__B0__><__G1__><__G0__><__R1__><__R0__>< + + - "vesa-24" - 24-bit data mapping compatible with the [VESA] specification. + Data are transferred as follows on 4 LVDS lanes. + + Slot 0 1 2 3 4 5 6 + ________________ _________________ + Clock \_______________________/ + ______ ______ ______ ______ ______ ______ ______ + DATA0 ><__G0__><__R5__><__R4__><__R3__><__R2__><__R1__><__R0__>< + DATA1 ><__B1__><__B0__><__G5__><__G4__><__G3__><__G2__><__G1__>< + DATA2 ><_CTL2_><_CTL1_><_CTL0_><__B5__><__B4__><__B3__><__B2__>< + DATA3 ><_CTL3_><__B7__><__B6__><__G7__><__G6__><__R7__><__R6__>< + + Control signals are mapped as follows. + + CTL0: HSync + CTL1: VSync + CTL2: Data Enable + CTL3: 0 + +additionalProperties: true + +... diff --git a/Documentation/devicetree/bindings/display/lvds.yaml b/Documentation/devicetree/bindings/display/lvds.yaml index 7cd2ce7e9c33..224db4932011 100644 --- a/Documentation/devicetree/bindings/display/lvds.yaml +++ b/Documentation/devicetree/bindings/display/lvds.yaml @@ -6,83 +6,24 @@ $schema: http://devicetree.org/meta-schemas/core.yaml# title: LVDS Display Common Properties +allOf: + - $ref: lvds-data-mapping.yaml# + maintainers: - Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com> - Thierry Reding <thierry.reding@gmail.com> -description: |+ - LVDS is a physical layer specification defined in ANSI/TIA/EIA-644-A. Multiple - incompatible data link layers have been used over time to transmit image data - to LVDS devices. This bindings supports devices compatible with the following - specifications. - - [JEIDA] "Digital Interface Standards for Monitor", JEIDA-59-1999, February - 1999 (Version 1.0), Japan Electronic Industry Development Association (JEIDA) - [LDI] "Open LVDS Display Interface", May 1999 (Version 0.95), National - Semiconductor - [VESA] "VESA Notebook Panel Standard", October 2007 (Version 1.0), Video - Electronics Standards Association (VESA) - - Device compatible with those specifications have been marketed under the - FPD-Link and FlatLink brands. +description: + This binding extends the data mapping defined in lvds-data-mapping.yaml. + It supports reversing the bit order on the formats defined there in order + to accomodate for even more specialized data formats, since a variety of + data formats and layouts is used to drive LVDS displays. properties: - data-mapping: - enum: - - jeida-18 - - jeida-24 - - vesa-24 - description: | - The color signals mapping order. - - LVDS data mappings are defined as follows. - - - "jeida-18" - 18-bit data mapping compatible with the [JEIDA], [LDI] and - [VESA] specifications. Data are transferred as follows on 3 LVDS lanes. - - Slot 0 1 2 3 4 5 6 - ________________ _________________ - Clock \_______________________/ - ______ ______ ______ ______ ______ ______ ______ - DATA0 ><__G0__><__R5__><__R4__><__R3__><__R2__><__R1__><__R0__>< - DATA1 ><__B1__><__B0__><__G5__><__G4__><__G3__><__G2__><__G1__>< - DATA2 ><_CTL2_><_CTL1_><_CTL0_><__B5__><__B4__><__B3__><__B2__>< - - - "jeida-24" - 24-bit data mapping compatible with the [DSIM] and [LDI] - specifications. Data are transferred as follows on 4 LVDS lanes. - - Slot 0 1 2 3 4 5 6 - ________________ _________________ - Clock \_______________________/ - ______ ______ ______ ______ ______ ______ ______ - DATA0 ><__G2__><__R7__><__R6__><__R5__><__R4__><__R3__><__R2__>< - DATA1 ><__B3__><__B2__><__G7__><__G6__><__G5__><__G4__><__G3__>< - DATA2 ><_CTL2_><_CTL1_><_CTL0_><__B7__><__B6__><__B5__><__B4__>< - DATA3 ><_CTL3_><__B1__><__B0__><__G1__><__G0__><__R1__><__R0__>< - - - "vesa-24" - 24-bit data mapping compatible with the [VESA] specification. - Data are transferred as follows on 4 LVDS lanes. - - Slot 0 1 2 3 4 5 6 - ________________ _________________ - Clock \_______________________/ - ______ ______ ______ ______ ______ ______ ______ - DATA0 ><__G0__><__R5__><__R4__><__R3__><__R2__><__R1__><__R0__>< - DATA1 ><__B1__><__B0__><__G5__><__G4__><__G3__><__G2__><__G1__>< - DATA2 ><_CTL2_><_CTL1_><_CTL0_><__B5__><__B4__><__B3__><__B2__>< - DATA3 ><_CTL3_><__B7__><__B6__><__G7__><__G6__><__R7__><__R6__>< - - Control signals are mapped as follows. - - CTL0: HSync - CTL1: VSync - CTL2: Data Enable - CTL3: 0 - data-mirror: type: boolean description: - If set, reverse the bit order described in the data mappings below on all + If set, reverse the bit order described in the data mappings on all data lanes, transmitting bits for slots 6 to 0 instead of 0 to 6. additionalProperties: true diff --git a/Documentation/devicetree/bindings/display/panel/leadtek,ltk050h3146w.yaml b/Documentation/devicetree/bindings/display/panel/leadtek,ltk050h3146w.yaml index 3f6efbb942da..a40ab887ada7 100644 --- a/Documentation/devicetree/bindings/display/panel/leadtek,ltk050h3146w.yaml +++ b/Documentation/devicetree/bindings/display/panel/leadtek,ltk050h3146w.yaml @@ -17,6 +17,7 @@ properties: enum: - leadtek,ltk050h3146w - leadtek,ltk050h3146w-a2 + - leadtek,ltk050h3148w reg: true backlight: true reset-gpios: true diff --git a/Documentation/devicetree/bindings/display/panel/newvision,nv3051d.yaml b/Documentation/devicetree/bindings/display/panel/newvision,nv3051d.yaml index 116c1b6030a2..cce775a87f87 100644 --- a/Documentation/devicetree/bindings/display/panel/newvision,nv3051d.yaml +++ b/Documentation/devicetree/bindings/display/panel/newvision,nv3051d.yaml @@ -7,9 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml# title: NewVision NV3051D based LCD panel description: | - The NewVision NV3051D is a driver chip used to drive DSI panels. For now, - this driver only supports the 640x480 panels found in the Anbernic RG353 - based devices. + The NewVision NV3051D is a driver chip used to drive DSI panels. maintainers: - Chris Morgan <macromorgan@hotmail.com> @@ -21,6 +19,7 @@ properties: compatible: items: - enum: + - anbernic,rg351v-panel - anbernic,rg353p-panel - anbernic,rg353v-panel - const: newvision,nv3051d diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml index 50143fe67471..81ac402ceed1 100644 --- a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml +++ b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml @@ -21,9 +21,9 @@ description: | allOf: - $ref: panel-common.yaml# + - $ref: ../lvds-data-mapping.yaml# properties: - compatible: enum: # compatible must be listed in alphabetical order, ordered by compatible. @@ -359,6 +359,17 @@ properties: power-supply: true no-hpd: true hpd-gpios: true + data-mapping: true + +if: + not: + properties: + compatible: + contains: + const: innolux,g101ice-l01 +then: + properties: + data-mapping: false additionalProperties: false @@ -378,3 +389,16 @@ examples: }; }; }; + - | + panel_lvds: panel-lvds { + compatible = "innolux,g101ice-l01"; + power-supply = <&vcc_lcd_reg>; + + data-mapping = "jeida-24"; + + port { + panel_in_lvds: endpoint { + remote-endpoint = <<dc_out_lvds>; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/display/panel/raydium,rm692e5.yaml b/Documentation/devicetree/bindings/display/panel/raydium,rm692e5.yaml new file mode 100644 index 000000000000..f436ba6738ca --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/raydium,rm692e5.yaml @@ -0,0 +1,73 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/panel/raydium,rm692e5.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Raydium RM692E5 based DSI display panels + +maintainers: + - Konrad Dybcio <konradybcio@kernel.org> + +description: + The Raydium RM692E5 is a generic DSI Panel IC used to control + AMOLED panels. + +allOf: + - $ref: panel-common.yaml# + +properties: + compatible: + items: + - const: fairphone,fp5-rm692e5-boe + - const: raydium,rm692e5 + + dvdd-supply: + description: Digital voltage rail + + vci-supply: + description: Analog voltage rail + + vddio-supply: + description: I/O voltage rail + + reg: true + port: true + +required: + - compatible + - reg + - reset-gpios + - dvdd-supply + - vci-supply + - vddio-supply + - port + +unevaluatedProperties: false + +examples: + - | + #include <dt-bindings/gpio/gpio.h> + + dsi { + #address-cells = <1>; + #size-cells = <0>; + + panel@0 { + compatible = "fairphone,fp5-rm692e5-boe", "raydium,rm692e5"; + reg = <0>; + + reset-gpios = <&tlmm 44 GPIO_ACTIVE_LOW>; + dvdd-supply = <&vreg_oled_vci>; + vci-supply = <&vreg_l12c>; + vddio-supply = <&vreg_oled_dvdd>; + + port { + panel_in_0: endpoint { + remote-endpoint = <&dsi0_out>; + }; + }; + }; + }; + +... diff --git a/Documentation/gpu/drivers.rst b/Documentation/gpu/drivers.rst index 3a52f48215a3..45a12e552091 100644 --- a/Documentation/gpu/drivers.rst +++ b/Documentation/gpu/drivers.rst @@ -18,6 +18,7 @@ GPU Driver Documentation xen-front afbc komeda-kms + panfrost .. only:: subproject and html diff --git a/Documentation/gpu/drm-mm.rst b/Documentation/gpu/drm-mm.rst index c19b34b1c0ed..602010cb6894 100644 --- a/Documentation/gpu/drm-mm.rst +++ b/Documentation/gpu/drm-mm.rst @@ -466,40 +466,40 @@ DRM MM Range Allocator Function References .. kernel-doc:: drivers/gpu/drm/drm_mm.c :export: -DRM GPU VA Manager -================== +DRM GPUVM +========= Overview -------- -.. kernel-doc:: drivers/gpu/drm/drm_gpuva_mgr.c +.. kernel-doc:: drivers/gpu/drm/drm_gpuvm.c :doc: Overview Split and Merge --------------- -.. kernel-doc:: drivers/gpu/drm/drm_gpuva_mgr.c +.. kernel-doc:: drivers/gpu/drm/drm_gpuvm.c :doc: Split and Merge Locking ------- -.. kernel-doc:: drivers/gpu/drm/drm_gpuva_mgr.c +.. kernel-doc:: drivers/gpu/drm/drm_gpuvm.c :doc: Locking Examples -------- -.. kernel-doc:: drivers/gpu/drm/drm_gpuva_mgr.c +.. kernel-doc:: drivers/gpu/drm/drm_gpuvm.c :doc: Examples -DRM GPU VA Manager Function References --------------------------------------- +DRM GPUVM Function References +----------------------------- -.. kernel-doc:: include/drm/drm_gpuva_mgr.h +.. kernel-doc:: include/drm/drm_gpuvm.h :internal: -.. kernel-doc:: drivers/gpu/drm/drm_gpuva_mgr.c +.. kernel-doc:: drivers/gpu/drm/drm_gpuvm.c :export: DRM Buddy Allocator diff --git a/Documentation/gpu/drm-uapi.rst b/Documentation/gpu/drm-uapi.rst index eef5fd19bc92..632989df3727 100644 --- a/Documentation/gpu/drm-uapi.rst +++ b/Documentation/gpu/drm-uapi.rst @@ -285,6 +285,83 @@ for GPU1 and GPU2 from different vendors, and a third handler for mmapped regular files. Threads cause additional pain with signal handling as well. +Device reset +============ + +The GPU stack is really complex and is prone to errors, from hardware bugs, +faulty applications and everything in between the many layers. Some errors +require resetting the device in order to make the device usable again. This +section describes the expectations for DRM and usermode drivers when a +device resets and how to propagate the reset status. + +Device resets can not be disabled without tainting the kernel, which can lead to +hanging the entire kernel through shrinkers/mmu_notifiers. Userspace role in +device resets is to propagate the message to the application and apply any +special policy for blocking guilty applications, if any. Corollary is that +debugging a hung GPU context require hardware support to be able to preempt such +a GPU context while it's stopped. + +Kernel Mode Driver +------------------ + +The KMD is responsible for checking if the device needs a reset, and to perform +it as needed. Usually a hang is detected when a job gets stuck executing. KMD +should keep track of resets, because userspace can query any time about the +reset status for a specific context. This is needed to propagate to the rest of +the stack that a reset has happened. Currently, this is implemented by each +driver separately, with no common DRM interface. Ideally this should be properly +integrated at DRM scheduler to provide a common ground for all drivers. After a +reset, KMD should reject new command submissions for affected contexts. + +User Mode Driver +---------------- + +After command submission, UMD should check if the submission was accepted or +rejected. After a reset, KMD should reject submissions, and UMD can issue an +ioctl to the KMD to check the reset status, and this can be checked more often +if the UMD requires it. After detecting a reset, UMD will then proceed to report +it to the application using the appropriate API error code, as explained in the +section below about robustness. + +Robustness +---------- + +The only way to try to keep a graphical API context working after a reset is if +it complies with the robustness aspects of the graphical API that it is using. + +Graphical APIs provide ways to applications to deal with device resets. However, +there is no guarantee that the app will use such features correctly, and a +userspace that doesn't support robust interfaces (like a non-robust +OpenGL context or API without any robustness support like libva) leave the +robustness handling entirely to the userspace driver. There is no strong +community consensus on what the userspace driver should do in that case, +since all reasonable approaches have some clear downsides. + +OpenGL +~~~~~~ + +Apps using OpenGL should use the available robust interfaces, like the +extension ``GL_ARB_robustness`` (or ``GL_EXT_robustness`` for OpenGL ES). This +interface tells if a reset has happened, and if so, all the context state is +considered lost and the app proceeds by creating new ones. There's no consensus +on what to do to if robustness is not in use. + +Vulkan +~~~~~~ + +Apps using Vulkan should check for ``VK_ERROR_DEVICE_LOST`` for submissions. +This error code means, among other things, that a device reset has happened and +it needs to recreate the contexts to keep going. + +Reporting causes of resets +-------------------------- + +Apart from propagating the reset through the stack so apps can recover, it's +really useful for driver developers to learn more about what caused the reset in +the first place. DRM devices should make use of devcoredump to store relevant +information about the reset, so this information can be added to user bug +reports. + .. _drm_driver_ioctl: IOCTL Support on Device Nodes diff --git a/Documentation/gpu/drm-usage-stats.rst b/Documentation/gpu/drm-usage-stats.rst index 044e6b2ed1be..7aca5c7a7b1d 100644 --- a/Documentation/gpu/drm-usage-stats.rst +++ b/Documentation/gpu/drm-usage-stats.rst @@ -169,3 +169,4 @@ Driver specific implementations ------------------------------- :ref:`i915-usage-stats` +:ref:`panfrost-usage-stats` diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst index 378e825754d5..0ca1550fd9dc 100644 --- a/Documentation/gpu/i915.rst +++ b/Documentation/gpu/i915.rst @@ -267,19 +267,22 @@ i915 driver. Intel GPU Basics ---------------- -An Intel GPU has multiple engines. There are several engine types. - -- RCS engine is for rendering 3D and performing compute, this is named - `I915_EXEC_RENDER` in user space. -- BCS is a blitting (copy) engine, this is named `I915_EXEC_BLT` in user - space. -- VCS is a video encode and decode engine, this is named `I915_EXEC_BSD` - in user space -- VECS is video enhancement engine, this is named `I915_EXEC_VEBOX` in user - space. -- The enumeration `I915_EXEC_DEFAULT` does not refer to specific engine; - instead it is to be used by user space to specify a default rendering - engine (for 3D) that may or may not be the same as RCS. +An Intel GPU has multiple engines. There are several engine types: + +- Render Command Streamer (RCS). An engine for rendering 3D and + performing compute. +- Blitting Command Streamer (BCS). An engine for performing blitting and/or + copying operations. +- Video Command Streamer. An engine used for video encoding and decoding. Also + sometimes called 'BSD' in hardware documentation. +- Video Enhancement Command Streamer (VECS). An engine for video enhancement. + Also sometimes called 'VEBOX' in hardware documentation. +- Compute Command Streamer (CCS). An engine that has access to the media and + GPGPU pipelines, but not the 3D pipeline. +- Graphics Security Controller (GSCCS). A dedicated engine for internal + communication with GSC controller on security related tasks like + High-bandwidth Digital Content Protection (HDCP), Protected Xe Path (PXP), + and HuC firmware authentication. The Intel GPU family is a family of integrated GPU's using Unified Memory Access. For having the GPU "do work", user space will feed the diff --git a/Documentation/gpu/panfrost.rst b/Documentation/gpu/panfrost.rst new file mode 100644 index 000000000000..b80e41f4b2c5 --- /dev/null +++ b/Documentation/gpu/panfrost.rst @@ -0,0 +1,40 @@ +.. SPDX-License-Identifier: GPL-2.0+ + +========================= + drm/Panfrost Mali Driver +========================= + +.. _panfrost-usage-stats: + +Panfrost DRM client usage stats implementation +============================================== + +The drm/Panfrost driver implements the DRM client usage stats specification as +documented in :ref:`drm-client-usage-stats`. + +Example of the output showing the implemented key value pairs and entirety of +the currently possible format options: + +:: + pos: 0 + flags: 02400002 + mnt_id: 27 + ino: 531 + drm-driver: panfrost + drm-client-id: 14 + drm-engine-fragment: 1846584880 ns + drm-cycles-fragment: 1424359409 + drm-maxfreq-fragment: 799999987 Hz + drm-curfreq-fragment: 799999987 Hz + drm-engine-vertex-tiler: 71932239 ns + drm-cycles-vertex-tiler: 52617357 + drm-maxfreq-vertex-tiler: 799999987 Hz + drm-curfreq-vertex-tiler: 799999987 Hz + drm-total-memory: 290 MiB + drm-shared-memory: 0 MiB + drm-active-memory: 226 MiB + drm-resident-memory: 36496 KiB + drm-purgeable-memory: 128 KiB + +Possible `drm-engine-` key names are: `fragment`, and `vertex-tiler`. +`drm-curfreq-` values convey the current operating frequency for that engine. diff --git a/MAINTAINERS b/MAINTAINERS index c9ff335d2c8c..314088767b4e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1632,6 +1632,7 @@ R: Steven Price <steven.price@arm.com> L: dri-devel@lists.freedesktop.org S: Supported T: git git://anongit.freedesktop.org/drm/drm-misc +F: Documentation/gpu/panfrost.rst F: drivers/gpu/drm/panfrost/ F: include/uapi/drm/panfrost_drm.h @@ -6859,12 +6860,26 @@ M: Thomas Zimmermann <tzimmermann@suse.de> S: Maintained W: https://01.org/linuxgraphics/gfx-docs/maintainer-tools/drm-misc.html T: git git://anongit.freedesktop.org/drm/drm-misc +F: Documentation/devicetree/bindings/display/ +F: Documentation/devicetree/bindings/gpu/ F: Documentation/gpu/ -F: drivers/gpu/drm/* +F: drivers/gpu/drm/ F: drivers/gpu/vga/ -F: include/drm/drm* +F: include/drm/drm F: include/linux/vga* -F: include/uapi/drm/drm* +F: include/uapi/drm/ +X: drivers/gpu/drm/amd/ +X: drivers/gpu/drm/armada/ +X: drivers/gpu/drm/etnaviv/ +X: drivers/gpu/drm/exynos/ +X: drivers/gpu/drm/i915/ +X: drivers/gpu/drm/kmb/ +X: drivers/gpu/drm/mediatek/ +X: drivers/gpu/drm/msm/ +X: drivers/gpu/drm/nouveau/ +X: drivers/gpu/drm/radeon/ +X: drivers/gpu/drm/renesas/ +X: drivers/gpu/drm/tegra/ DRM DRIVERS FOR ALLWINNER A10 M: Maxime Ripard <mripard@kernel.org> @@ -15356,6 +15371,7 @@ M: Laurentiu Palcu <laurentiu.palcu@oss.nxp.com> R: Lucas Stach <l.stach@pengutronix.de> L: dri-devel@lists.freedesktop.org S: Maintained +T: git git://anongit.freedesktop.org/drm/drm-misc F: Documentation/devicetree/bindings/display/imx/nxp,imx8mq-dcss.yaml F: drivers/gpu/drm/imx/dcss/ diff --git a/arch/ia64/include/asm/fb.h b/arch/ia64/include/asm/fb.h index 1717b26fd423..7fce0d542359 100644 --- a/arch/ia64/include/asm/fb.h +++ b/arch/ia64/include/asm/fb.h @@ -8,17 +8,16 @@ #include <asm/page.h> -struct file; - -static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, - unsigned long off) +static inline pgprot_t pgprot_framebuffer(pgprot_t prot, + unsigned long vm_start, unsigned long vm_end, + unsigned long offset) { - if (efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start)) - vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + if (efi_range_is_wc(vm_start, vm_end - vm_start)) + return pgprot_writecombine(prot); else - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + return pgprot_noncached(prot); } -#define fb_pgprotect fb_pgprotect +#define pgprot_framebuffer pgprot_framebuffer static inline void fb_memcpy_fromio(void *to, const volatile void __iomem *from, size_t n) { diff --git a/arch/m68k/include/asm/fb.h b/arch/m68k/include/asm/fb.h index 24273fc7ad91..9941b7434b69 100644 --- a/arch/m68k/include/asm/fb.h +++ b/arch/m68k/include/asm/fb.h @@ -5,26 +5,27 @@ #include <asm/page.h> #include <asm/setup.h> -struct file; - -static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, - unsigned long off) +static inline pgprot_t pgprot_framebuffer(pgprot_t prot, + unsigned long vm_start, unsigned long vm_end, + unsigned long offset) { #ifdef CONFIG_MMU #ifdef CONFIG_SUN3 - pgprot_val(vma->vm_page_prot) |= SUN3_PAGE_NOCACHE; + pgprot_val(prot) |= SUN3_PAGE_NOCACHE; #else if (CPU_IS_020_OR_030) - pgprot_val(vma->vm_page_prot) |= _PAGE_NOCACHE030; + pgprot_val(prot) |= _PAGE_NOCACHE030; if (CPU_IS_040_OR_060) { - pgprot_val(vma->vm_page_prot) &= _CACHEMASK040; + pgprot_val(prot) &= _CACHEMASK040; /* Use no-cache mode, serialized */ - pgprot_val(vma->vm_page_prot) |= _PAGE_NOCACHE_S; + pgprot_val(prot) |= _PAGE_NOCACHE_S; } #endif /* CONFIG_SUN3 */ #endif /* CONFIG_MMU */ + + return prot; } -#define fb_pgprotect fb_pgprotect +#define pgprot_framebuffer pgprot_framebuffer #include <asm-generic/fb.h> diff --git a/arch/mips/include/asm/fb.h b/arch/mips/include/asm/fb.h index 18b7226403ba..d98d6681d64e 100644 --- a/arch/mips/include/asm/fb.h +++ b/arch/mips/include/asm/fb.h @@ -3,14 +3,13 @@ #include <asm/page.h> -struct file; - -static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, - unsigned long off) +static inline pgprot_t pgprot_framebuffer(pgprot_t prot, + unsigned long vm_start, unsigned long vm_end, + unsigned long offset) { - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + return pgprot_noncached(prot); } -#define fb_pgprotect fb_pgprotect +#define pgprot_framebuffer pgprot_framebuffer /* * MIPS doesn't define __raw_ I/O macros, so the helpers diff --git a/arch/powerpc/include/asm/fb.h b/arch/powerpc/include/asm/fb.h index 5f1a2e5f7654..3cecf14d51de 100644 --- a/arch/powerpc/include/asm/fb.h +++ b/arch/powerpc/include/asm/fb.h @@ -2,18 +2,20 @@ #ifndef _ASM_FB_H_ #define _ASM_FB_H_ -#include <linux/fs.h> - #include <asm/page.h> -static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, - unsigned long off) +static inline pgprot_t pgprot_framebuffer(pgprot_t prot, + unsigned long vm_start, unsigned long vm_end, + unsigned long offset) { - vma->vm_page_prot = phys_mem_access_prot(file, off >> PAGE_SHIFT, - vma->vm_end - vma->vm_start, - vma->vm_page_prot); + /* + * PowerPC's implementation of phys_mem_access_prot() does + * not use the file argument. Set it to NULL in preparation + * of later updates to the interface. + */ + return phys_mem_access_prot(NULL, PHYS_PFN(offset), vm_end - vm_start, prot); } -#define fb_pgprotect fb_pgprotect +#define pgprot_framebuffer pgprot_framebuffer #include <asm-generic/fb.h> diff --git a/arch/sparc/include/asm/fb.h b/arch/sparc/include/asm/fb.h index 572ecd3e1cc4..24440c0fda49 100644 --- a/arch/sparc/include/asm/fb.h +++ b/arch/sparc/include/asm/fb.h @@ -4,15 +4,18 @@ #include <linux/io.h> +#include <asm/page.h> + struct fb_info; -struct file; -struct vm_area_struct; #ifdef CONFIG_SPARC32 -static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, - unsigned long off) -{ } -#define fb_pgprotect fb_pgprotect +static inline pgprot_t pgprot_framebuffer(pgprot_t prot, + unsigned long vm_start, unsigned long vm_end, + unsigned long offset) +{ + return prot; +} +#define pgprot_framebuffer pgprot_framebuffer #endif int fb_is_primary_device(struct fb_info *info); diff --git a/arch/x86/include/asm/fb.h b/arch/x86/include/asm/fb.h index 23873da8fb77..c3b9582de7ef 100644 --- a/arch/x86/include/asm/fb.h +++ b/arch/x86/include/asm/fb.h @@ -2,12 +2,14 @@ #ifndef _ASM_X86_FB_H #define _ASM_X86_FB_H +#include <asm/page.h> + struct fb_info; -struct file; -struct vm_area_struct; -void fb_pgprotect(struct file *file, struct vm_area_struct *vma, unsigned long off); -#define fb_pgprotect fb_pgprotect +pgprot_t pgprot_framebuffer(pgprot_t prot, + unsigned long vm_start, unsigned long vm_end, + unsigned long offset); +#define pgprot_framebuffer pgprot_framebuffer int fb_is_primary_device(struct fb_info *info); #define fb_is_primary_device fb_is_primary_device diff --git a/arch/x86/video/fbdev.c b/arch/x86/video/fbdev.c index 49a0452402e9..1dd6528cc947 100644 --- a/arch/x86/video/fbdev.c +++ b/arch/x86/video/fbdev.c @@ -13,16 +13,17 @@ #include <linux/vgaarb.h> #include <asm/fb.h> -void fb_pgprotect(struct file *file, struct vm_area_struct *vma, unsigned long off) +pgprot_t pgprot_framebuffer(pgprot_t prot, + unsigned long vm_start, unsigned long vm_end, + unsigned long offset) { - unsigned long prot; - - prot = pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK; + pgprot_val(prot) &= ~_PAGE_CACHE_MASK; if (boot_cpu_data.x86 > 3) - pgprot_val(vma->vm_page_prot) = - prot | cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS); + pgprot_val(prot) |= cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS); + + return prot; } -EXPORT_SYMBOL(fb_pgprotect); +EXPORT_SYMBOL(pgprot_framebuffer); int fb_is_primary_device(struct fb_info *info) { diff --git a/drivers/accel/ivpu/Makefile b/drivers/accel/ivpu/Makefile index e4328b430564..95ff7ad16338 100644 --- a/drivers/accel/ivpu/Makefile +++ b/drivers/accel/ivpu/Makefile @@ -2,7 +2,6 @@ # Copyright (C) 2023 Intel Corporation intel_vpu-y := \ - ivpu_debugfs.o \ ivpu_drv.o \ ivpu_fw.o \ ivpu_fw_log.o \ @@ -16,4 +15,6 @@ intel_vpu-y := \ ivpu_mmu_context.o \ ivpu_pm.o +intel_vpu-$(CONFIG_DEBUG_FS) += ivpu_debugfs.o + obj-$(CONFIG_DRM_ACCEL_IVPU) += intel_vpu.o diff --git a/drivers/accel/ivpu/ivpu_debugfs.c b/drivers/accel/ivpu/ivpu_debugfs.c index 5e5996fd4f9f..ea453b985b49 100644 --- a/drivers/accel/ivpu/ivpu_debugfs.c +++ b/drivers/accel/ivpu/ivpu_debugfs.c @@ -17,20 +17,26 @@ #include "ivpu_jsm_msg.h" #include "ivpu_pm.h" +static inline struct ivpu_device *seq_to_ivpu(struct seq_file *s) +{ + struct drm_debugfs_entry *entry = s->private; + + return to_ivpu_device(entry->dev); +} + static int bo_list_show(struct seq_file *s, void *v) { - struct drm_info_node *node = (struct drm_info_node *)s->private; struct drm_printer p = drm_seq_file_printer(s); + struct ivpu_device *vdev = seq_to_ivpu(s); - ivpu_bo_list(node->minor->dev, &p); + ivpu_bo_list(&vdev->drm, &p); return 0; } static int fw_name_show(struct seq_file *s, void *v) { - struct drm_info_node *node = (struct drm_info_node *)s->private; - struct ivpu_device *vdev = to_ivpu_device(node->minor->dev); + struct ivpu_device *vdev = seq_to_ivpu(s); seq_printf(s, "%s\n", vdev->fw->name); return 0; @@ -38,8 +44,7 @@ static int fw_name_show(struct seq_file *s, void *v) static int fw_trace_capability_show(struct seq_file *s, void *v) { - struct drm_info_node *node = (struct drm_info_node *)s->private; - struct ivpu_device *vdev = to_ivpu_device(node->minor->dev); + struct ivpu_device *vdev = seq_to_ivpu(s); u64 trace_hw_component_mask; u32 trace_destination_mask; int ret; @@ -57,8 +62,7 @@ static int fw_trace_capability_show(struct seq_file *s, void *v) static int fw_trace_config_show(struct seq_file *s, void *v) { - struct drm_info_node *node = (struct drm_info_node *)s->private; - struct ivpu_device *vdev = to_ivpu_device(node->minor->dev); + struct ivpu_device *vdev = seq_to_ivpu(s); /** * WA: VPU_JSM_MSG_TRACE_GET_CONFIG command is not working yet, * so we use values from vdev->fw instead of calling ivpu_jsm_trace_get_config() @@ -78,8 +82,7 @@ static int fw_trace_config_show(struct seq_file *s, void *v) static int last_bootmode_show(struct seq_file *s, void *v) { - struct drm_info_node *node = (struct drm_info_node *)s->private; - struct ivpu_device *vdev = to_ivpu_device(node->minor->dev); + struct ivpu_device *vdev = seq_to_ivpu(s); seq_printf(s, "%s\n", (vdev->pm->is_warmboot) ? "warmboot" : "coldboot"); @@ -88,8 +91,7 @@ static int last_bootmode_show(struct seq_file *s, void *v) static int reset_counter_show(struct seq_file *s, void *v) { - struct drm_info_node *node = (struct drm_info_node *)s->private; - struct ivpu_device *vdev = to_ivpu_device(node->minor->dev); + struct ivpu_device *vdev = seq_to_ivpu(s); seq_printf(s, "%d\n", atomic_read(&vdev->pm->reset_counter)); return 0; @@ -97,14 +99,13 @@ static int reset_counter_show(struct seq_file *s, void *v) static int reset_pending_show(struct seq_file *s, void *v) { - struct drm_info_node *node = (struct drm_info_node *)s->private; - struct ivpu_device *vdev = to_ivpu_device(node->minor->dev); + struct ivpu_device *vdev = seq_to_ivpu(s); seq_printf(s, "%d\n", atomic_read(&vdev->pm->in_reset)); return 0; } -static const struct drm_info_list vdev_debugfs_list[] = { +static const struct drm_debugfs_info vdev_debugfs_list[] = { {"bo_list", bo_list_show, 0}, {"fw_name", fw_name_show, 0}, {"fw_trace_capability", fw_trace_capability_show, 0}, @@ -270,25 +271,24 @@ static const struct file_operations ivpu_reset_engine_fops = { .write = ivpu_reset_engine_fn, }; -void ivpu_debugfs_init(struct drm_minor *minor) +void ivpu_debugfs_init(struct ivpu_device *vdev) { - struct ivpu_device *vdev = to_ivpu_device(minor->dev); + struct dentry *debugfs_root = vdev->drm.debugfs_root; - drm_debugfs_create_files(vdev_debugfs_list, ARRAY_SIZE(vdev_debugfs_list), - minor->debugfs_root, minor); + drm_debugfs_add_files(&vdev->drm, vdev_debugfs_list, ARRAY_SIZE(vdev_debugfs_list)); - debugfs_create_file("force_recovery", 0200, minor->debugfs_root, vdev, + debugfs_create_file("force_recovery", 0200, debugfs_root, vdev, &ivpu_force_recovery_fops); - debugfs_create_file("fw_log", 0644, minor->debugfs_root, vdev, + debugfs_create_file("fw_log", 0644, debugfs_root, vdev, &fw_log_fops); - debugfs_create_file("fw_trace_destination_mask", 0200, minor->debugfs_root, vdev, + debugfs_create_file("fw_trace_destination_mask", 0200, debugfs_root, vdev, &fw_trace_destination_mask_fops); - debugfs_create_file("fw_trace_hw_comp_mask", 0200, minor->debugfs_root, vdev, + debugfs_create_file("fw_trace_hw_comp_mask", 0200, debugfs_root, vdev, &fw_trace_hw_comp_mask_fops); - debugfs_create_file("fw_trace_level", 0200, minor->debugfs_root, vdev, + debugfs_create_file("fw_trace_level", 0200, debugfs_root, vdev, &fw_trace_level_fops); - debugfs_create_file("reset_engine", 0200, minor->debugfs_root, vdev, + debugfs_create_file("reset_engine", 0200, debugfs_root, vdev, &ivpu_reset_engine_fops); } diff --git a/drivers/accel/ivpu/ivpu_debugfs.h b/drivers/accel/ivpu/ivpu_debugfs.h index 78f80c1e00e4..49ae9ea78287 100644 --- a/drivers/accel/ivpu/ivpu_debugfs.h +++ b/drivers/accel/ivpu/ivpu_debugfs.h @@ -6,8 +6,12 @@ #ifndef __IVPU_DEBUGFS_H__ #define __IVPU_DEBUGFS_H__ -struct drm_minor; +struct ivpu_device; -void ivpu_debugfs_init(struct drm_minor *minor); +#if defined(CONFIG_DEBUG_FS) +void ivpu_debugfs_init(struct ivpu_device *vdev); +#else +static inline void ivpu_debugfs_init(struct ivpu_device *vdev) { } +#endif #endif /* __IVPU_DEBUGFS_H__ */ diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c index fa0680ba9340..7851ff7773ca 100644 --- a/drivers/accel/ivpu/ivpu_drv.c +++ b/drivers/accel/ivpu/ivpu_drv.c @@ -395,10 +395,6 @@ static const struct drm_driver driver = { .postclose = ivpu_postclose, .gem_prime_import = ivpu_gem_prime_import, -#if defined(CONFIG_DEBUG_FS) - .debugfs_init = ivpu_debugfs_init, -#endif - .ioctls = ivpu_drm_ioctls, .num_ioctls = ARRAY_SIZE(ivpu_drm_ioctls), .fops = &ivpu_fops, @@ -631,6 +627,8 @@ static int ivpu_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (ret) return ret; + ivpu_debugfs_init(vdev); + ret = drm_dev_register(&vdev->drm, 0); if (ret) { dev_err(&pdev->dev, "Failed to register DRM device: %d\n", ret); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index 7d5e7ad28ba8..c298ff2043b8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -655,7 +655,7 @@ struct ip_hw_instance { u8 harvest; int num_base_addresses; - u32 base_addr[]; + u32 base_addr[] __counted_by(num_base_addresses); }; struct ip_hw_id { diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index baf7e5254fb3..2f94bcf128c0 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -204,15 +204,16 @@ void dm_helpers_dp_update_branch_info( {} static void dm_helpers_construct_old_payload( - struct dc_link *link, - int pbn_per_slot, + struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_topology_state *mst_state, struct drm_dp_mst_atomic_payload *new_payload, struct drm_dp_mst_atomic_payload *old_payload) { - struct link_mst_stream_allocation_table current_link_table = - link->mst_stream_alloc_table; - struct link_mst_stream_allocation *dc_alloc; - int i; + struct drm_dp_mst_atomic_payload *pos; + int pbn_per_slot = mst_state->pbn_div; + u8 next_payload_vc_start = mgr->next_start_slot; + u8 payload_vc_start = new_payload->vc_start_slot; + u8 allocated_time_slots; *old_payload = *new_payload; @@ -221,20 +222,17 @@ static void dm_helpers_construct_old_payload( * struct drm_dp_mst_atomic_payload are don't care fields * while calling drm_dp_remove_payload_part2() */ - for (i = 0; i < current_link_table.stream_count; i++) { - dc_alloc = - ¤t_link_table.stream_allocations[i]; - - if (dc_alloc->vcp_id == new_payload->vcpi) { - old_payload->time_slots = dc_alloc->slot_count; - old_payload->pbn = dc_alloc->slot_count * pbn_per_slot; - break; - } + list_for_each_entry(pos, &mst_state->payloads, next) { + if (pos != new_payload && + pos->vc_start_slot > payload_vc_start && + pos->vc_start_slot < next_payload_vc_start) + next_payload_vc_start = pos->vc_start_slot; } - /* make sure there is an old payload*/ - ASSERT(i != current_link_table.stream_count); + allocated_time_slots = next_payload_vc_start - payload_vc_start; + old_payload->time_slots = allocated_time_slots; + old_payload->pbn = allocated_time_slots * pbn_per_slot; } /* @@ -272,8 +270,8 @@ bool dm_helpers_dp_mst_write_payload_allocation_table( drm_dp_add_payload_part1(mst_mgr, mst_state, new_payload); } else { /* construct old payload by VCPI*/ - dm_helpers_construct_old_payload(stream->link, mst_state->pbn_div, - new_payload, &old_payload); + dm_helpers_construct_old_payload(mst_mgr, mst_state, + new_payload, &old_payload); target_payload = &old_payload; drm_dp_remove_payload_part1(mst_mgr, mst_state, new_payload); @@ -366,7 +364,7 @@ bool dm_helpers_dp_mst_send_payload_allocation( if (enable) { ret = drm_dp_add_payload_part2(mst_mgr, mst_state->base.state, new_payload); } else { - dm_helpers_construct_old_payload(stream->link, mst_state->pbn_div, + dm_helpers_construct_old_payload(mst_mgr, mst_state, new_payload, &old_payload); drm_dp_remove_payload_part2(mst_mgr, mst_state, &old_payload, new_payload); } diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.h index 808e0ecbe1f0..42adc2a3dcbc 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.h +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.h @@ -192,7 +192,7 @@ struct smu10_clock_voltage_dependency_record { struct smu10_voltage_dependency_table { uint32_t count; - struct smu10_clock_voltage_dependency_record entries[]; + struct smu10_clock_voltage_dependency_record entries[] __counted_by(count); }; struct smu10_clock_voltage_information { diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c index 2611afd2c1c1..d518de88b5c3 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c @@ -121,7 +121,7 @@ static const struct regmap_config adv7511_regmap_config = { .val_bits = 8, .max_register = 0xff, - .cache_type = REGCACHE_RBTREE, + .cache_type = REGCACHE_MAPLE, .reg_defaults_raw = adv7511_register_defaults, .num_reg_defaults_raw = ARRAY_SIZE(adv7511_register_defaults), @@ -1068,7 +1068,7 @@ static const struct regmap_config adv7511_cec_regmap_config = { .val_bits = 8, .max_register = 0xff, - .cache_type = REGCACHE_RBTREE, + .cache_type = REGCACHE_MAPLE, .volatile_reg = adv7511_cec_register_volatile, }; diff --git a/drivers/gpu/drm/bridge/chipone-icn6211.c b/drivers/gpu/drm/bridge/chipone-icn6211.c index d205e755e524..82d23e4df09e 100644 --- a/drivers/gpu/drm/bridge/chipone-icn6211.c +++ b/drivers/gpu/drm/bridge/chipone-icn6211.c @@ -197,7 +197,7 @@ static const struct regmap_config chipone_regmap_config = { .val_bits = 8, .rd_table = &chipone_dsi_readable_table, .wr_table = &chipone_dsi_writeable_table, - .cache_type = REGCACHE_RBTREE, + .cache_type = REGCACHE_MAPLE, .max_register = MIPI_ATE_STATUS(1), }; diff --git a/drivers/gpu/drm/bridge/lontium-lt9211.c b/drivers/gpu/drm/bridge/lontium-lt9211.c index 4d404f5ef87e..c8881796fba4 100644 --- a/drivers/gpu/drm/bridge/lontium-lt9211.c +++ b/drivers/gpu/drm/bridge/lontium-lt9211.c @@ -89,7 +89,7 @@ static const struct regmap_config lt9211_regmap_config = { .volatile_table = <9211_rw_table, .ranges = <9211_range, .num_ranges = 1, - .cache_type = REGCACHE_RBTREE, + .cache_type = REGCACHE_MAPLE, .max_register = 0xda00, }; diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c index 22c84d29c2bc..7835738a532e 100644 --- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c +++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c @@ -296,7 +296,7 @@ static int lt9611uxc_connector_get_modes(struct drm_connector *connector) unsigned int count; struct edid *edid; - edid = lt9611uxc->bridge.funcs->get_edid(<9611uxc->bridge, connector); + edid = drm_bridge_get_edid(<9611uxc->bridge, connector); drm_connector_update_edid_property(connector, edid); count = drm_add_edid_modes(connector, edid); kfree(edid); diff --git a/drivers/gpu/drm/bridge/samsung-dsim.c b/drivers/gpu/drm/bridge/samsung-dsim.c index 19bdb32dbc9a..be5914caa17d 100644 --- a/drivers/gpu/drm/bridge/samsung-dsim.c +++ b/drivers/gpu/drm/bridge/samsung-dsim.c @@ -410,6 +410,8 @@ static const struct samsung_dsim_driver_data exynos3_dsi_driver_data = { .num_bits_resol = 11, .pll_p_offset = 13, .reg_values = reg_values, + .pll_fin_min = 6, + .pll_fin_max = 12, .m_min = 41, .m_max = 125, .min_freq = 500, @@ -427,6 +429,8 @@ static const struct samsung_dsim_driver_data exynos4_dsi_driver_data = { .num_bits_resol = 11, .pll_p_offset = 13, .reg_values = reg_values, + .pll_fin_min = 6, + .pll_fin_max = 12, .m_min = 41, .m_max = 125, .min_freq = 500, @@ -442,6 +446,8 @@ static const struct samsung_dsim_driver_data exynos5_dsi_driver_data = { .num_bits_resol = 11, .pll_p_offset = 13, .reg_values = reg_values, + .pll_fin_min = 6, + .pll_fin_max = 12, .m_min = 41, .m_max = 125, .min_freq = 500, @@ -457,6 +463,8 @@ static const struct samsung_dsim_driver_data exynos5433_dsi_driver_data = { .num_bits_resol = 12, .pll_p_offset = 13, .reg_values = exynos5433_reg_values, + .pll_fin_min = 6, + .pll_fin_max = 12, .m_min = 41, .m_max = 125, .min_freq = 500, @@ -472,6 +480,8 @@ static const struct samsung_dsim_driver_data exynos5422_dsi_driver_data = { .num_bits_resol = 12, .pll_p_offset = 13, .reg_values = exynos5422_reg_values, + .pll_fin_min = 6, + .pll_fin_max = 12, .m_min = 41, .m_max = 125, .min_freq = 500, @@ -491,6 +501,8 @@ static const struct samsung_dsim_driver_data imx8mm_dsi_driver_data = { */ .pll_p_offset = 14, .reg_values = imx8mm_dsim_reg_values, + .pll_fin_min = 2, + .pll_fin_max = 30, .m_min = 64, .m_max = 1023, .min_freq = 1050, @@ -614,7 +626,23 @@ static unsigned long samsung_dsim_set_pll(struct samsung_dsim *dsi, u16 m; u32 reg; - fin = dsi->pll_clk_rate; + if (dsi->pll_clk) { + /* + * Ensure that the reference clock is generated with a power of + * two divider from its parent, but close to the PLLs upper + * limit. + */ + fin = clk_get_rate(clk_get_parent(dsi->pll_clk)); + while (fin > driver_data->pll_fin_max * MHZ) + fin /= 2; + clk_set_rate(dsi->pll_clk, fin); + + fin = clk_get_rate(dsi->pll_clk); + } else { + fin = dsi->pll_clk_rate; + } + dev_dbg(dsi->dev, "PLL ref clock freq %lu\n", fin); + fout = samsung_dsim_pll_find_pms(dsi, fin, freq, &p, &m, &s); if (!fout) { dev_err(dsi->dev, @@ -960,10 +988,12 @@ static void samsung_dsim_set_display_mode(struct samsung_dsim *dsi) u32 reg; if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) { - int byte_clk_khz = dsi->hs_clock / 1000 / 8; - int hfp = (m->hsync_start - m->hdisplay) * byte_clk_khz / m->clock; - int hbp = (m->htotal - m->hsync_end) * byte_clk_khz / m->clock; - int hsa = (m->hsync_end - m->hsync_start) * byte_clk_khz / m->clock; + u64 byte_clk = dsi->hs_clock / 8; + u64 pix_clk = m->clock * 1000; + + int hfp = DIV64_U64_ROUND_UP((m->hsync_start - m->hdisplay) * byte_clk, pix_clk); + int hbp = DIV64_U64_ROUND_UP((m->htotal - m->hsync_end) * byte_clk, pix_clk); + int hsa = DIV64_U64_ROUND_UP((m->hsync_end - m->hsync_start) * byte_clk, pix_clk); /* remove packet overhead when possible */ hfp = max(hfp - 6, 0); @@ -1726,7 +1756,10 @@ of_find_panel_or_bridge: return ret; } - DRM_DEV_INFO(dev, "Attached %s device\n", device->name); + DRM_DEV_INFO(dev, "Attached %s device (lanes:%d bpp:%d mode-flags:0x%lx)\n", + device->name, device->lanes, + mipi_dsi_pixel_format_to_bpp(device->format), + device->mode_flags); drm_bridge_add(&dsi->bridge); @@ -1833,18 +1866,15 @@ static int samsung_dsim_parse_dt(struct samsung_dsim *dsi) u32 lane_polarities[5] = { 0 }; struct device_node *endpoint; int i, nr_lanes, ret; - struct clk *pll_clk; ret = samsung_dsim_of_read_u32(node, "samsung,pll-clock-frequency", &dsi->pll_clk_rate, 1); /* If it doesn't exist, read it from the clock instead of failing */ if (ret < 0) { dev_dbg(dev, "Using sclk_mipi for pll clock frequency\n"); - pll_clk = devm_clk_get(dev, "sclk_mipi"); - if (!IS_ERR(pll_clk)) - dsi->pll_clk_rate = clk_get_rate(pll_clk); - else - return PTR_ERR(pll_clk); + dsi->pll_clk = devm_clk_get(dev, "sclk_mipi"); + if (IS_ERR(dsi->pll_clk)) + return PTR_ERR(dsi->pll_clk); } /* If it doesn't exist, use pixel clock instead of failing */ @@ -2005,7 +2035,7 @@ err_disable_runtime: } EXPORT_SYMBOL_GPL(samsung_dsim_probe); -int samsung_dsim_remove(struct platform_device *pdev) +void samsung_dsim_remove(struct platform_device *pdev) { struct samsung_dsim *dsi = platform_get_drvdata(pdev); @@ -2013,8 +2043,6 @@ int samsung_dsim_remove(struct platform_device *pdev) if (dsi->plat_data->host_ops && dsi->plat_data->host_ops->unregister_host) dsi->plat_data->host_ops->unregister_host(dsi); - - return 0; } EXPORT_SYMBOL_GPL(samsung_dsim_remove); @@ -2114,7 +2142,7 @@ MODULE_DEVICE_TABLE(of, samsung_dsim_of_match); static struct platform_driver samsung_dsim_driver = { .probe = samsung_dsim_probe, - .remove = samsung_dsim_remove, + .remove_new = samsung_dsim_remove, .driver = { .name = "samsung-dsim", .pm = &samsung_dsim_pm_ops, diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c index b45bffab7c81..ef2e373606ba 100644 --- a/drivers/gpu/drm/bridge/tc358767.c +++ b/drivers/gpu/drm/bridge/tc358767.c @@ -2005,7 +2005,7 @@ static const struct regmap_config tc_regmap_config = { .val_bits = 32, .reg_stride = 4, .max_register = PLL_DBG, - .cache_type = REGCACHE_RBTREE, + .cache_type = REGCACHE_MAPLE, .readable_reg = tc_readable_reg, .volatile_table = &tc_volatile_table, .writeable_reg = tc_writeable_reg, diff --git a/drivers/gpu/drm/bridge/ti-dlpc3433.c b/drivers/gpu/drm/bridge/ti-dlpc3433.c index b65632ec7e7d..ca3348109bcd 100644 --- a/drivers/gpu/drm/bridge/ti-dlpc3433.c +++ b/drivers/gpu/drm/bridge/ti-dlpc3433.c @@ -100,7 +100,7 @@ static struct regmap_config dlpc_regmap_config = { .max_register = WR_DSI_PORT_EN, .writeable_noinc_reg = dlpc_writeable_noinc_reg, .volatile_table = &dlpc_volatile_table, - .cache_type = REGCACHE_RBTREE, + .cache_type = REGCACHE_MAPLE, .name = "dlpc3433", }; diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c index 061e8bd5915d..4814b7b6d1fd 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c @@ -233,7 +233,7 @@ static const struct regmap_config sn65dsi83_regmap_config = { .rd_table = &sn65dsi83_readable_table, .wr_table = &sn65dsi83_writeable_table, .volatile_table = &sn65dsi83_volatile_table, - .cache_type = REGCACHE_RBTREE, + .cache_type = REGCACHE_MAPLE, .max_register = REG_IRQ_STAT, }; diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c index 8a1b64c57dfd..f3680f4e6970 100644 --- a/drivers/gpu/drm/display/drm_dp_helper.c +++ b/drivers/gpu/drm/display/drm_dp_helper.c @@ -746,8 +746,11 @@ int drm_dp_dpcd_read_phy_link_status(struct drm_dp_aux *aux, } EXPORT_SYMBOL(drm_dp_dpcd_read_phy_link_status); -static bool is_edid_digital_input_dp(const struct edid *edid) +static bool is_edid_digital_input_dp(const struct drm_edid *drm_edid) { + /* FIXME: get rid of drm_edid_raw() */ + const struct edid *edid = drm_edid_raw(drm_edid); + return edid && edid->revision >= 4 && edid->input & DRM_EDID_INPUT_DIGITAL && (edid->input & DRM_EDID_DIGITAL_TYPE_MASK) == DRM_EDID_DIGITAL_TYPE_DP; @@ -779,13 +782,13 @@ EXPORT_SYMBOL(drm_dp_downstream_is_type); * drm_dp_downstream_is_tmds() - is the downstream facing port TMDS? * @dpcd: DisplayPort configuration data * @port_cap: port capabilities - * @edid: EDID + * @drm_edid: EDID * * Returns: whether the downstream facing port is TMDS (HDMI/DVI). */ bool drm_dp_downstream_is_tmds(const u8 dpcd[DP_RECEIVER_CAP_SIZE], const u8 port_cap[4], - const struct edid *edid) + const struct drm_edid *drm_edid) { if (dpcd[DP_DPCD_REV] < 0x11) { switch (dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK) { @@ -798,7 +801,7 @@ bool drm_dp_downstream_is_tmds(const u8 dpcd[DP_RECEIVER_CAP_SIZE], switch (port_cap[0] & DP_DS_PORT_TYPE_MASK) { case DP_DS_PORT_TYPE_DP_DUALMODE: - if (is_edid_digital_input_dp(edid)) + if (is_edid_digital_input_dp(drm_edid)) return false; fallthrough; case DP_DS_PORT_TYPE_DVI: @@ -1036,14 +1039,14 @@ EXPORT_SYMBOL(drm_dp_downstream_max_dotclock); * drm_dp_downstream_max_tmds_clock() - extract downstream facing port max TMDS clock * @dpcd: DisplayPort configuration data * @port_cap: port capabilities - * @edid: EDID + * @drm_edid: EDID * * Returns: HDMI/DVI downstream facing port max TMDS clock in kHz on success, * or 0 if max TMDS clock not defined */ int drm_dp_downstream_max_tmds_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], const u8 port_cap[4], - const struct edid *edid) + const struct drm_edid *drm_edid) { if (!drm_dp_is_branch(dpcd)) return 0; @@ -1059,7 +1062,7 @@ int drm_dp_downstream_max_tmds_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], switch (port_cap[0] & DP_DS_PORT_TYPE_MASK) { case DP_DS_PORT_TYPE_DP_DUALMODE: - if (is_edid_digital_input_dp(edid)) + if (is_edid_digital_input_dp(drm_edid)) return 0; /* * It's left up to the driver to check the @@ -1101,14 +1104,14 @@ EXPORT_SYMBOL(drm_dp_downstream_max_tmds_clock); * drm_dp_downstream_min_tmds_clock() - extract downstream facing port min TMDS clock * @dpcd: DisplayPort configuration data * @port_cap: port capabilities - * @edid: EDID + * @drm_edid: EDID * * Returns: HDMI/DVI downstream facing port min TMDS clock in kHz on success, * or 0 if max TMDS clock not defined */ int drm_dp_downstream_min_tmds_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], const u8 port_cap[4], - const struct edid *edid) + const struct drm_edid *drm_edid) { if (!drm_dp_is_branch(dpcd)) return 0; @@ -1124,7 +1127,7 @@ int drm_dp_downstream_min_tmds_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], switch (port_cap[0] & DP_DS_PORT_TYPE_MASK) { case DP_DS_PORT_TYPE_DP_DUALMODE: - if (is_edid_digital_input_dp(edid)) + if (is_edid_digital_input_dp(drm_edid)) return 0; fallthrough; case DP_DS_PORT_TYPE_DVI: @@ -1145,13 +1148,13 @@ EXPORT_SYMBOL(drm_dp_downstream_min_tmds_clock); * bits per component * @dpcd: DisplayPort configuration data * @port_cap: downstream facing port capabilities - * @edid: EDID + * @drm_edid: EDID * * Returns: Max bpc on success or 0 if max bpc not defined */ int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE], const u8 port_cap[4], - const struct edid *edid) + const struct drm_edid *drm_edid) { if (!drm_dp_is_branch(dpcd)) return 0; @@ -1169,7 +1172,7 @@ int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE], case DP_DS_PORT_TYPE_DP: return 0; case DP_DS_PORT_TYPE_DP_DUALMODE: - if (is_edid_digital_input_dp(edid)) + if (is_edid_digital_input_dp(drm_edid)) return 0; fallthrough; case DP_DS_PORT_TYPE_HDMI: @@ -1362,14 +1365,14 @@ EXPORT_SYMBOL(drm_dp_downstream_id); * @m: pointer for debugfs file * @dpcd: DisplayPort configuration data * @port_cap: port capabilities - * @edid: EDID + * @drm_edid: EDID * @aux: DisplayPort AUX channel * */ void drm_dp_downstream_debug(struct seq_file *m, const u8 dpcd[DP_RECEIVER_CAP_SIZE], const u8 port_cap[4], - const struct edid *edid, + const struct drm_edid *drm_edid, struct drm_dp_aux *aux) { bool detailed_cap_info = dpcd[DP_DOWNSTREAMPORT_PRESENT] & @@ -1432,15 +1435,15 @@ void drm_dp_downstream_debug(struct seq_file *m, if (clk > 0) seq_printf(m, "\t\tMax dot clock: %d kHz\n", clk); - clk = drm_dp_downstream_max_tmds_clock(dpcd, port_cap, edid); + clk = drm_dp_downstream_max_tmds_clock(dpcd, port_cap, drm_edid); if (clk > 0) seq_printf(m, "\t\tMax TMDS clock: %d kHz\n", clk); - clk = drm_dp_downstream_min_tmds_clock(dpcd, port_cap, edid); + clk = drm_dp_downstream_min_tmds_clock(dpcd, port_cap, drm_edid); if (clk > 0) seq_printf(m, "\t\tMin TMDS clock: %d kHz\n", clk); - bpc = drm_dp_downstream_max_bpc(dpcd, port_cap, edid); + bpc = drm_dp_downstream_max_bpc(dpcd, port_cap, drm_edid); if (bpc > 0) seq_printf(m, "\t\tMax bpc: %d\n", bpc); diff --git a/drivers/gpu/drm/drm_bridge_connector.c b/drivers/gpu/drm/drm_bridge_connector.c index 1da93d5a1f61..8239ad43aed5 100644 --- a/drivers/gpu/drm/drm_bridge_connector.c +++ b/drivers/gpu/drm/drm_bridge_connector.c @@ -5,6 +5,8 @@ #include <linux/kernel.h> #include <linux/module.h> +#include <linux/of.h> +#include <linux/property.h> #include <linux/slab.h> #include <drm/drm_atomic_state_helper.h> @@ -107,27 +109,36 @@ static void drm_bridge_connector_hpd_notify(struct drm_connector *connector, } } -static void drm_bridge_connector_hpd_cb(void *cb_data, - enum drm_connector_status status) +static void drm_bridge_connector_handle_hpd(struct drm_bridge_connector *drm_bridge_connector, + enum drm_connector_status status) { - struct drm_bridge_connector *drm_bridge_connector = cb_data; struct drm_connector *connector = &drm_bridge_connector->base; struct drm_device *dev = connector->dev; - enum drm_connector_status old_status; mutex_lock(&dev->mode_config.mutex); - old_status = connector->status; connector->status = status; mutex_unlock(&dev->mode_config.mutex); - if (old_status == status) - return; - drm_bridge_connector_hpd_notify(connector, status); drm_kms_helper_connector_hotplug_event(connector); } +static void drm_bridge_connector_hpd_cb(void *cb_data, + enum drm_connector_status status) +{ + drm_bridge_connector_handle_hpd(cb_data, status); +} + +static void drm_bridge_connector_oob_hotplug_event(struct drm_connector *connector, + enum drm_connector_status status) +{ + struct drm_bridge_connector *bridge_connector = + to_drm_bridge_connector(connector); + + drm_bridge_connector_handle_hpd(bridge_connector, status); +} + static void drm_bridge_connector_enable_hpd(struct drm_connector *connector) { struct drm_bridge_connector *bridge_connector = @@ -196,6 +207,8 @@ static void drm_bridge_connector_destroy(struct drm_connector *connector) drm_connector_unregister(connector); drm_connector_cleanup(connector); + fwnode_handle_put(connector->fwnode); + kfree(bridge_connector); } @@ -221,6 +234,7 @@ static const struct drm_connector_funcs drm_bridge_connector_funcs = { .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .debugfs_init = drm_bridge_connector_debugfs_init, + .oob_hotplug_event = drm_bridge_connector_oob_hotplug_event, }; /* ----------------------------------------------------------------------------- @@ -238,7 +252,7 @@ static int drm_bridge_connector_get_modes_edid(struct drm_connector *connector, if (status != connector_status_connected) goto no_edid; - edid = bridge->funcs->get_edid(bridge, connector); + edid = drm_bridge_get_edid(bridge, connector); if (!drm_edid_is_valid(edid)) { kfree(edid); goto no_edid; @@ -357,6 +371,12 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, if (!drm_bridge_get_next_bridge(bridge)) connector_type = bridge->type; +#ifdef CONFIG_OF + if (!drm_bridge_get_next_bridge(bridge) && + bridge->of_node) + connector->fwnode = fwnode_handle_get(of_fwnode_handle(bridge->of_node)); +#endif + if (bridge->ddc) ddc = bridge->ddc; diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 9d4c7b0c5c05..c3725086f413 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -3060,6 +3060,7 @@ struct drm_connector *drm_connector_find_by_fwnode(struct fwnode_handle *fwnode) /** * drm_connector_oob_hotplug_event - Report out-of-band hotplug event to connector * @connector_fwnode: fwnode_handle to report the event on + * @status: hot plug detect logical state * * On some hardware a hotplug event notification may come from outside the display * driver / device. An example of this is some USB Type-C setups where the hardware @@ -3069,7 +3070,8 @@ struct drm_connector *drm_connector_find_by_fwnode(struct fwnode_handle *fwnode) * This function can be used to report these out-of-band events after obtaining * a drm_connector reference through calling drm_connector_find_by_fwnode(). */ -void drm_connector_oob_hotplug_event(struct fwnode_handle *connector_fwnode) +void drm_connector_oob_hotplug_event(struct fwnode_handle *connector_fwnode, + enum drm_connector_status status) { struct drm_connector *connector; @@ -3078,7 +3080,7 @@ void drm_connector_oob_hotplug_event(struct fwnode_handle *connector_fwnode) return; if (connector->funcs->oob_hotplug_event) - connector->funcs->oob_hotplug_event(connector); + connector->funcs->oob_hotplug_event(connector, status); drm_connector_put(connector); } diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index e692770ef6d3..446458aca8e9 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -964,6 +964,8 @@ void drm_show_memory_stats(struct drm_printer *p, struct drm_file *file) spin_lock(&file->table_lock); idr_for_each_entry (&file->object_idr, obj, id) { enum drm_gem_object_status s = 0; + size_t add_size = (obj->funcs && obj->funcs->rss) ? + obj->funcs->rss(obj) : obj->size; if (obj->funcs && obj->funcs->status) { s = obj->funcs->status(obj); @@ -978,7 +980,7 @@ void drm_show_memory_stats(struct drm_printer *p, struct drm_file *file) } if (s & DRM_GEM_OBJECT_RESIDENT) { - status.resident += obj->size; + status.resident += add_size; } else { /* If already purged or not yet backed by pages, don't * count it as purgeable: @@ -987,14 +989,14 @@ void drm_show_memory_stats(struct drm_printer *p, struct drm_file *file) } if (!dma_resv_test_signaled(obj->resv, dma_resv_usage_rw(true))) { - status.active += obj->size; + status.active += add_size; /* If still active, don't count as purgeable: */ s &= ~DRM_GEM_OBJECT_PURGEABLE; } if (s & DRM_GEM_OBJECT_PURGEABLE) - status.purgeable += obj->size; + status.purgeable += add_size; } spin_unlock(&file->table_lock); diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index ba51deb6d042..d3ba0698b84b 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c @@ -151,24 +151,6 @@ int drm_mode_addfb_ioctl(struct drm_device *dev, return drm_mode_addfb(dev, data, file_priv); } -static int fb_plane_width(int width, - const struct drm_format_info *format, int plane) -{ - if (plane == 0) - return width; - - return DIV_ROUND_UP(width, format->hsub); -} - -static int fb_plane_height(int height, - const struct drm_format_info *format, int plane) -{ - if (plane == 0) - return height; - - return DIV_ROUND_UP(height, format->vsub); -} - static int framebuffer_check(struct drm_device *dev, const struct drm_mode_fb_cmd2 *r) { @@ -196,8 +178,8 @@ static int framebuffer_check(struct drm_device *dev, info = drm_get_format_info(dev, r); for (i = 0; i < info->num_planes; i++) { - unsigned int width = fb_plane_width(r->width, info, i); - unsigned int height = fb_plane_height(r->height, info, i); + unsigned int width = drm_format_info_plane_width(info, r->width, i); + unsigned int height = drm_format_info_plane_height(info, r->height, i); unsigned int block_size = info->char_per_block[i]; u64 min_pitch = drm_format_info_min_pitch(info, i, width); @@ -1136,44 +1118,6 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb) } EXPORT_SYMBOL(drm_framebuffer_remove); -/** - * drm_framebuffer_plane_width - width of the plane given the first plane - * @width: width of the first plane - * @fb: the framebuffer - * @plane: plane index - * - * Returns: - * The width of @plane, given that the width of the first plane is @width. - */ -int drm_framebuffer_plane_width(int width, - const struct drm_framebuffer *fb, int plane) -{ - if (plane >= fb->format->num_planes) - return 0; - - return fb_plane_width(width, fb->format, plane); -} -EXPORT_SYMBOL(drm_framebuffer_plane_width); - -/** - * drm_framebuffer_plane_height - height of the plane given the first plane - * @height: height of the first plane - * @fb: the framebuffer - * @plane: plane index - * - * Returns: - * The height of @plane, given that the height of the first plane is @height. - */ -int drm_framebuffer_plane_height(int height, - const struct drm_framebuffer *fb, int plane) -{ - if (plane >= fb->format->num_planes) - return 0; - - return fb_plane_height(height, fb->format, plane); -} -EXPORT_SYMBOL(drm_framebuffer_plane_height); - void drm_framebuffer_print_info(struct drm_printer *p, unsigned int indent, const struct drm_framebuffer *fb) { @@ -1189,8 +1133,8 @@ void drm_framebuffer_print_info(struct drm_printer *p, unsigned int indent, for (i = 0; i < fb->format->num_planes; i++) { drm_printf_indent(p, indent + 1, "size[%u]=%dx%d\n", i, - drm_framebuffer_plane_width(fb->width, fb, i), - drm_framebuffer_plane_height(fb->height, fb, i)); + drm_format_info_plane_width(fb->format, fb->width, i), + drm_format_info_plane_height(fb->format, fb->height, i)); drm_printf_indent(p, indent + 1, "pitch[%u]=%u\n", i, fb->pitches[i]); drm_printf_indent(p, indent + 1, "offset[%u]=%u\n", i, fb->offsets[i]); drm_printf_indent(p, indent + 1, "obj[%u]:%s\n", i, diff --git a/drivers/gpu/drm/drm_vblank_work.c b/drivers/gpu/drm/drm_vblank_work.c index bd481fdd6b87..43cd5c0f4f6f 100644 --- a/drivers/gpu/drm/drm_vblank_work.c +++ b/drivers/gpu/drm/drm_vblank_work.c @@ -73,6 +73,9 @@ void drm_vblank_cancel_pending_works(struct drm_vblank_crtc *vblank) assert_spin_locked(&vblank->dev->event_lock); + drm_WARN_ONCE(vblank->dev, !list_empty(&vblank->pending_work), + "Cancelling pending vblank works!\n"); + list_for_each_entry_safe(work, next, &vblank->pending_work, node) { list_del_init(&work->node); drm_vblank_put(vblank->dev, vblank->pipe); diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index 69ea33cae651..2fe0e5f3f638 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -181,7 +181,7 @@ MODULE_DEVICE_TABLE(of, exynos_dsi_of_match); struct platform_driver dsi_driver = { .probe = samsung_dsim_probe, - .remove = samsung_dsim_remove, + .remove_new = samsung_dsim_remove, .driver = { .name = "exynos-dsi", .owner = THIS_MODULE, diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c b/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c index 06b5b2d70d48..939c53fd09e8 100644 --- a/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c +++ b/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c @@ -141,7 +141,7 @@ struct gma_i2c_chan *oaktrail_lvds_i2c_init(struct drm_device *dev) chan->drm_dev = dev; chan->reg = dev_priv->lpc_gpio_base; - strncpy(chan->base.name, "gma500 LPC", I2C_NAME_SIZE - 1); + strscpy(chan->base.name, "gma500 LPC", sizeof(chan->base.name)); chan->base.owner = THIS_MODULE; chan->base.algo_data = &chan->algo; chan->base.dev.parent = dev->dev; diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index de4967c141f0..88b2bb005014 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -3,24 +3,34 @@ # Makefile for the drm device driver. This driver provides support for the # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. -# Add a set of useful warning flags and enable -Werror for CI to prevent -# trivial mistakes from creeping in. We have to do this piecemeal as we reject -# any patch that isn't warning clean, so turning on -Wall -Wextra (or W=1) we -# need to filter out dubious warnings. Still it is our interest -# to keep running locally with W=1 C=1 until we are completely clean. -# -# Note the danger in using -Wall -Wextra is that when CI updates gcc we -# will most likely get a sudden build breakage... Hopefully we will fix -# new warnings before CI updates! -subdir-ccflags-y := -Wall -Wextra -subdir-ccflags-y += -Wno-format-security -subdir-ccflags-y += -Wno-unused-parameter -subdir-ccflags-y += -Wno-type-limits +# Unconditionally enable W=1 warnings locally +# --- begin copy-paste W=1 warnings from scripts/Makefile.extrawarn +subdir-ccflags-y += -Wextra -Wunused -Wno-unused-parameter +subdir-ccflags-y += -Wmissing-declarations +subdir-ccflags-y += $(call cc-option, -Wrestrict) +subdir-ccflags-y += -Wmissing-format-attribute +subdir-ccflags-y += -Wmissing-prototypes +subdir-ccflags-y += -Wold-style-definition +subdir-ccflags-y += -Wmissing-include-dirs +subdir-ccflags-y += $(call cc-option, -Wunused-but-set-variable) +subdir-ccflags-y += $(call cc-option, -Wunused-const-variable) +subdir-ccflags-y += $(call cc-option, -Wpacked-not-aligned) +subdir-ccflags-y += $(call cc-option, -Wformat-overflow) +subdir-ccflags-y += $(call cc-option, -Wformat-truncation) +subdir-ccflags-y += $(call cc-option, -Wstringop-overflow) +subdir-ccflags-y += $(call cc-option, -Wstringop-truncation) +# The following turn off the warnings enabled by -Wextra +ifeq ($(findstring 2, $(KBUILD_EXTRA_WARN)),) subdir-ccflags-y += -Wno-missing-field-initializers -subdir-ccflags-y += -Wno-sign-compare +subdir-ccflags-y += -Wno-type-limits subdir-ccflags-y += -Wno-shift-negative-value -subdir-ccflags-y += $(call cc-option, -Wunused-but-set-variable) -subdir-ccflags-y += $(call cc-disable-warning, frame-address) +endif +ifeq ($(findstring 3, $(KBUILD_EXTRA_WARN)),) +subdir-ccflags-y += -Wno-sign-compare +endif +# --- end copy-paste + +# Enable -Werror in CI and development subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror # Fine grained warnings disable @@ -28,6 +38,10 @@ CFLAGS_i915_pci.o = $(call cc-disable-warning, override-init) CFLAGS_display/intel_display_device.o = $(call cc-disable-warning, override-init) CFLAGS_display/intel_fbdev.o = $(call cc-disable-warning, override-init) +# Support compiling the display code separately for both i915 and xe +# drivers. Define I915 when building i915. +subdir-ccflags-y += -DI915 + subdir-ccflags-y += -I$(srctree)/$(src) # Please keep these build lists sorted! @@ -265,6 +279,7 @@ i915-y += \ display/intel_global_state.o \ display/intel_hdcp.o \ display/intel_hdcp_gsc.o \ + display/intel_hdcp_gsc_message.o \ display/intel_hotplug.o \ display/intel_hotplug_irq.o \ display/intel_hti.o \ diff --git a/drivers/gpu/drm/i915/display/g4x_dp.h b/drivers/gpu/drm/i915/display/g4x_dp.h index a38b3e1e01d3..a10638ab749c 100644 --- a/drivers/gpu/drm/i915/display/g4x_dp.h +++ b/drivers/gpu/drm/i915/display/g4x_dp.h @@ -17,6 +17,7 @@ struct intel_crtc_state; struct intel_dp; struct intel_encoder; +#ifdef I915 const struct dpll *vlv_get_dpll(struct drm_i915_private *i915); enum pipe vlv_active_pipe(struct intel_dp *intel_dp); void g4x_dp_set_clock(struct intel_encoder *encoder, @@ -26,5 +27,30 @@ bool g4x_dp_port_enabled(struct drm_i915_private *dev_priv, enum pipe *pipe); bool g4x_dp_init(struct drm_i915_private *dev_priv, i915_reg_t output_reg, enum port port); +#else +static inline const struct dpll *vlv_get_dpll(struct drm_i915_private *i915) +{ + return NULL; +} +static inline int vlv_active_pipe(struct intel_dp *intel_dp) +{ + return 0; +} +static inline void g4x_dp_set_clock(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config) +{ +} +static inline bool g4x_dp_port_enabled(struct drm_i915_private *dev_priv, + i915_reg_t dp_reg, int port, + enum pipe *pipe) +{ + return false; +} +static inline bool g4x_dp_init(struct drm_i915_private *dev_priv, + i915_reg_t output_reg, int port) +{ + return false; +} +#endif #endif diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.h b/drivers/gpu/drm/i915/display/g4x_hdmi.h index 1e3ea7f3c846..817f55c7a3a1 100644 --- a/drivers/gpu/drm/i915/display/g4x_hdmi.h +++ b/drivers/gpu/drm/i915/display/g4x_hdmi.h @@ -15,9 +15,21 @@ struct drm_atomic_state; struct drm_connector; struct drm_i915_private; +#ifdef I915 void g4x_hdmi_init(struct drm_i915_private *dev_priv, i915_reg_t hdmi_reg, enum port port); int g4x_hdmi_connector_atomic_check(struct drm_connector *connector, struct drm_atomic_state *state); +#else +static inline void g4x_hdmi_init(struct drm_i915_private *dev_priv, + i915_reg_t hdmi_reg, int port) +{ +} +static inline int g4x_hdmi_connector_atomic_check(struct drm_connector *connector, + struct drm_atomic_state *state) +{ + return 0; +} +#endif #endif diff --git a/drivers/gpu/drm/i915/display/hsw_ips.h b/drivers/gpu/drm/i915/display/hsw_ips.h index 4eb83b350791..35364228e1c1 100644 --- a/drivers/gpu/drm/i915/display/hsw_ips.h +++ b/drivers/gpu/drm/i915/display/hsw_ips.h @@ -12,6 +12,7 @@ struct intel_atomic_state; struct intel_crtc; struct intel_crtc_state; +#ifdef I915 bool hsw_ips_disable(const struct intel_crtc_state *crtc_state); bool hsw_ips_pre_update(struct intel_atomic_state *state, struct intel_crtc *crtc); @@ -23,5 +24,39 @@ int hsw_ips_compute_config(struct intel_atomic_state *state, struct intel_crtc *crtc); void hsw_ips_get_config(struct intel_crtc_state *crtc_state); void hsw_ips_crtc_debugfs_add(struct intel_crtc *crtc); +#else +static inline bool hsw_ips_disable(const struct intel_crtc_state *crtc_state) +{ + return false; +} +static inline bool hsw_ips_pre_update(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + return false; +} +static inline void hsw_ips_post_update(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ +} +static inline bool hsw_crtc_supports_ips(struct intel_crtc *crtc) +{ + return false; +} +static inline bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state) +{ + return false; +} +static inline int hsw_ips_compute_config(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + return 0; +} +static inline void hsw_ips_get_config(struct intel_crtc_state *crtc_state) +{ +} +static inline void hsw_ips_crtc_debugfs_add(struct intel_crtc *crtc) +{ +} +#endif #endif /* __HSW_IPS_H__ */ diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.h b/drivers/gpu/drm/i915/display/i9xx_plane.h index 027b66053984..b3d724a144cb 100644 --- a/drivers/gpu/drm/i915/display/i9xx_plane.h +++ b/drivers/gpu/drm/i915/display/i9xx_plane.h @@ -15,6 +15,7 @@ struct intel_initial_plane_config; struct intel_plane; struct intel_plane_state; +#ifdef I915 unsigned int i965_plane_max_stride(struct intel_plane *plane, u32 pixel_format, u64 modifier, unsigned int rotation); @@ -25,4 +26,26 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe); void i9xx_get_initial_plane_config(struct intel_crtc *crtc, struct intel_initial_plane_config *plane_config); +#else +static inline unsigned int i965_plane_max_stride(struct intel_plane *plane, + u32 pixel_format, u64 modifier, + unsigned int rotation) +{ + return 0; +} +static inline int i9xx_check_plane_surface(struct intel_plane_state *plane_state) +{ + return 0; +} +static inline struct intel_plane * +intel_primary_plane_create(struct drm_i915_private *dev_priv, int pipe) +{ + return NULL; +} +static inline void i9xx_get_initial_plane_config(struct intel_crtc *crtc, + struct intel_initial_plane_config *plane_config) +{ +} +#endif + #endif diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.h b/drivers/gpu/drm/i915/display/i9xx_wm.h index b87ae369685a..de0920730ab2 100644 --- a/drivers/gpu/drm/i915/display/i9xx_wm.h +++ b/drivers/gpu/drm/i915/display/i9xx_wm.h @@ -12,9 +12,26 @@ struct drm_i915_private; struct intel_crtc_state; struct intel_plane_state; +#ifdef I915 bool ilk_disable_lp_wm(struct drm_i915_private *i915); void ilk_wm_sanitize(struct drm_i915_private *i915); bool intel_set_memory_cxsr(struct drm_i915_private *i915, bool enable); void i9xx_wm_init(struct drm_i915_private *i915); +#else +static inline bool ilk_disable_lp_wm(struct drm_i915_private *i915) +{ + return false; +} +static inline void ilk_wm_sanitize(struct drm_i915_private *i915) +{ +} +static inline bool intel_set_memory_cxsr(struct drm_i915_private *i915, bool enable) +{ + return false; +} +static inline void i9xx_wm_init(struct drm_i915_private *i915) +{ +} +#endif #endif /* __I9XX_WM_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c index 4f92fc31059f..2a2a163ea652 100644 --- a/drivers/gpu/drm/i915/display/intel_color.c +++ b/drivers/gpu/drm/i915/display/intel_color.c @@ -3702,6 +3702,7 @@ static const struct intel_color_funcs vlv_color_funcs = { .read_luts = i965_read_luts, .lut_equal = i965_lut_equal, .read_csc = vlv_read_csc, + .get_config = i9xx_get_config, }; static const struct intel_color_funcs i965_color_funcs = { diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c index d4bad0ddff41..913e5d230a4d 100644 --- a/drivers/gpu/drm/i915/display/intel_crt.c +++ b/drivers/gpu/drm/i915/display/intel_crt.c @@ -838,7 +838,7 @@ intel_crt_detect(struct drm_connector *connector, connector->base.id, connector->name, force); - if (!INTEL_DISPLAY_ENABLED(dev_priv)) + if (!intel_display_device_enabled(dev_priv)) return connector_status_disconnected; if (dev_priv->params.load_detect_test) { diff --git a/drivers/gpu/drm/i915/display/intel_crt.h b/drivers/gpu/drm/i915/display/intel_crt.h index c6071efd93ce..fe7690c2b948 100644 --- a/drivers/gpu/drm/i915/display/intel_crt.h +++ b/drivers/gpu/drm/i915/display/intel_crt.h @@ -12,9 +12,23 @@ enum pipe; struct drm_encoder; struct drm_i915_private; +#ifdef I915 bool intel_crt_port_enabled(struct drm_i915_private *dev_priv, i915_reg_t adpa_reg, enum pipe *pipe); void intel_crt_init(struct drm_i915_private *dev_priv); void intel_crt_reset(struct drm_encoder *encoder); +#else +static inline bool intel_crt_port_enabled(struct drm_i915_private *dev_priv, + i915_reg_t adpa_reg, enum pipe *pipe) +{ + return false; +} +static inline void intel_crt_init(struct drm_i915_private *dev_priv) +{ +} +static inline void intel_crt_reset(struct drm_encoder *encoder) +{ +} +#endif #endif /* __INTEL_CRT_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c index abd607b564f1..6e6a1818071e 100644 --- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c +++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c @@ -2596,8 +2596,7 @@ static void intel_cx0_phy_lane_reset(struct drm_i915_private *i915, drm_warn(&i915->drm, "PHY %c failed to bring out of SOC reset after %dus.\n", phy_name(phy), XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US); - intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port), - XELPDP_LANE_PIPE_RESET(0) | XELPDP_LANE_PIPE_RESET(1), + intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port), lane_pipe_reset, lane_pipe_reset); if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL2(port), @@ -3005,12 +3004,13 @@ intel_mtl_port_pll_type(struct intel_encoder *encoder, } void intel_c10pll_state_verify(struct intel_atomic_state *state, - struct intel_crtc_state *new_crtc_state) + struct intel_crtc *crtc) { struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); struct intel_c10pll_state mpllb_hw_state = { 0 }; - struct intel_c10pll_state *mpllb_sw_state = &new_crtc_state->cx0pll_state.c10; - struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); + const struct intel_c10pll_state *mpllb_sw_state = &new_crtc_state->cx0pll_state.c10; struct intel_encoder *encoder; enum phy phy; int i; diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.h b/drivers/gpu/drm/i915/display/intel_cx0_phy.h index 912e0eeb0be3..0e0a38dac8cd 100644 --- a/drivers/gpu/drm/i915/display/intel_cx0_phy.h +++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.h @@ -16,6 +16,7 @@ struct drm_i915_private; struct intel_atomic_state; struct intel_c10pll_state; struct intel_c20pll_state; +struct intel_crtc; struct intel_crtc_state; struct intel_encoder; struct intel_hdmi; @@ -34,7 +35,7 @@ void intel_c10pll_dump_hw_state(struct drm_i915_private *dev_priv, int intel_c10pll_calc_port_clock(struct intel_encoder *encoder, const struct intel_c10pll_state *pll_state); void intel_c10pll_state_verify(struct intel_atomic_state *state, - struct intel_crtc_state *new_crtc_state); + struct intel_crtc *crtc); void intel_c20pll_readout_hw_state(struct intel_encoder *encoder, struct intel_c20pll_state *pll_state); void intel_c20pll_dump_hw_state(struct drm_i915_private *i915, diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 4668de45d6fe..9151d5add960 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -4333,7 +4333,7 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder, u8 config; int ret; - if (!connector || connector->base.status != connector_status_connected) + if (connector->base.status != connector_status_connected) return 0; ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index b3ae81a6ab16..28d85e1e858e 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -956,6 +956,8 @@ static void intel_post_plane_update(struct intel_atomic_state *state, intel_atomic_get_new_crtc_state(state, crtc); enum pipe pipe = crtc->pipe; + intel_psr_post_plane_update(state, crtc); + intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits); if (new_crtc_state->update_wm_post && new_crtc_state->hw.active) @@ -3997,7 +3999,7 @@ intel_encoder_current_mode(struct intel_encoder *encoder) } if (!intel_crtc_get_pipe_config(crtc_state)) { - kfree(crtc_state); + intel_crtc_destroy_state(&crtc->base, &crtc_state->uapi); kfree(mode); return NULL; } @@ -4006,7 +4008,7 @@ intel_encoder_current_mode(struct intel_encoder *encoder) intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode); - kfree(crtc_state); + intel_crtc_destroy_state(&crtc->base, &crtc_state->uapi); return mode; } @@ -4986,9 +4988,6 @@ pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc, static bool fastboot_enabled(struct drm_i915_private *dev_priv) { - if (dev_priv->params.fastboot != -1) - return dev_priv->params.fastboot; - /* Enable fastboot by default on Skylake and newer */ if (DISPLAY_VER(dev_priv) >= 9) return true; @@ -7216,7 +7215,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) intel_set_cdclk_pre_plane_update(state); - intel_modeset_verify_disabled(dev_priv, state); + intel_modeset_verify_disabled(state); } intel_sagv_pre_plane_update(state); @@ -7296,14 +7295,13 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) } intel_dbuf_post_plane_update(state); - intel_psr_post_plane_update(state); for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { intel_post_plane_update(state, crtc); intel_modeset_put_crtc_power_domains(crtc, &put_domains[crtc->pipe]); - intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state); + intel_modeset_verify_crtc(state, crtc); /* Must be done after gamma readout due to HSW split gamma vs. IPS w/a */ hsw_ips_post_update(state, crtc); diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h index 53e5c33e08c3..ccfe27630fb6 100644 --- a/drivers/gpu/drm/i915/display/intel_display_core.h +++ b/drivers/gpu/drm/i915/display/intel_display_core.h @@ -175,6 +175,9 @@ struct intel_hotplug { /* Whether or not to count short HPD IRQs in HPD storms */ u8 hpd_short_storm_enabled; + /* Last state reported by oob_hotplug_event for each encoder */ + unsigned long oob_hotplug_last_state; + /* * if we get a HPD irq from DP and a HPD irq from non-DP * the non-DP HPD could block the workqueue on a mode config diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c index f05b52381a83..fbe75d47a165 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c @@ -237,14 +237,13 @@ static void intel_dp_info(struct seq_file *m, struct intel_connector *connector) { struct intel_encoder *intel_encoder = intel_attached_encoder(connector); struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); - const struct edid *edid = drm_edid_raw(connector->detect_edid); seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]); seq_printf(m, "\taudio support: %s\n", str_yes_no(connector->base.display_info.has_audio)); drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports, - edid, &intel_dp->aux); + connector->detect_edid, &intel_dp->aux); } static void intel_dp_mst_info(struct seq_file *m, @@ -645,6 +644,7 @@ static int i915_display_info(struct seq_file *m, void *unused) static int i915_shared_dplls_info(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); + struct intel_shared_dpll *pll; int i; drm_modeset_lock_all(&dev_priv->drm); @@ -653,11 +653,9 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused) dev_priv->display.dpll.ref_clks.nssc, dev_priv->display.dpll.ref_clks.ssc); - for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) { - struct intel_shared_dpll *pll = &dev_priv->display.dpll.shared_dplls[i]; - - seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name, - pll->info->id); + for_each_shared_dpll(dev_priv, pll, i) { + seq_printf(m, "DPLL%i: %s, id: %i\n", pll->index, + pll->info->name, pll->info->id); seq_printf(m, " pipe_mask: 0x%x, active: 0x%x, on: %s\n", pll->state.pipe_mask, pll->active_mask, str_yes_no(pll->on)); diff --git a/drivers/gpu/drm/i915/display/intel_display_device.c b/drivers/gpu/drm/i915/display/intel_display_device.c index a6a18eae7ae8..2b1ec23ba9c3 100644 --- a/drivers/gpu/drm/i915/display/intel_display_device.c +++ b/drivers/gpu/drm/i915/display/intel_display_device.c @@ -926,7 +926,7 @@ void intel_display_device_probe(struct drm_i915_private *i915) else info = probe_display(i915); - i915->display.info.__device_info = info; + DISPLAY_INFO(i915) = info; memcpy(DISPLAY_RUNTIME_INFO(i915), &DISPLAY_INFO(i915)->__runtime_defaults, @@ -939,7 +939,7 @@ void intel_display_device_probe(struct drm_i915_private *i915) } } -void intel_display_device_info_runtime_init(struct drm_i915_private *i915) +static void __intel_display_device_info_runtime_init(struct drm_i915_private *i915) { struct intel_display_runtime_info *display_runtime = DISPLAY_RUNTIME_INFO(i915); enum pipe pipe; @@ -948,6 +948,13 @@ void intel_display_device_info_runtime_init(struct drm_i915_private *i915) BUILD_BUG_ON(BITS_PER_TYPE(display_runtime->cpu_transcoder_mask) < I915_MAX_TRANSCODERS); BUILD_BUG_ON(BITS_PER_TYPE(display_runtime->port_mask) < I915_MAX_PORTS); + /* This covers both ULT and ULX */ + if (IS_HASWELL_ULT(i915) || IS_BROADWELL_ULT(i915)) + display_runtime->port_mask &= ~BIT(PORT_D); + + if (IS_ICL_WITH_PORT_F(i915)) + display_runtime->port_mask |= BIT(PORT_F); + /* Wa_14011765242: adl-s A0,A1 */ if (IS_ALDERLAKE_S(i915) && IS_DISPLAY_STEP(i915, STEP_A0, STEP_A2)) for_each_pipe(i915, pipe) @@ -1065,12 +1072,44 @@ void intel_display_device_info_runtime_init(struct drm_i915_private *i915) display_runtime->has_dsc = 0; } + if (DISPLAY_VER(i915) >= 20) { + u32 cap = intel_de_read(i915, XE2LPD_DE_CAP); + + if (REG_FIELD_GET(XE2LPD_DE_CAP_DSC_MASK, cap) == + XE2LPD_DE_CAP_DSC_REMOVED) + display_runtime->has_dsc = 0; + + if (REG_FIELD_GET(XE2LPD_DE_CAP_SCALER_MASK, cap) == + XE2LPD_DE_CAP_SCALER_SINGLE) { + for_each_pipe(i915, pipe) + if (display_runtime->num_scalers[pipe]) + display_runtime->num_scalers[pipe] = 1; + } + } + return; display_fused_off: memset(display_runtime, 0, sizeof(*display_runtime)); } +void intel_display_device_info_runtime_init(struct drm_i915_private *i915) +{ + if (HAS_DISPLAY(i915)) + __intel_display_device_info_runtime_init(i915); + + /* Display may have been disabled by runtime init */ + if (!HAS_DISPLAY(i915)) { + i915->drm.driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC); + i915->display.info.__device_info = &no_display; + } + + /* Disable nuclear pageflip by default on pre-g4x */ + if (!i915->params.nuclear_pageflip && + DISPLAY_VER(i915) < 5 && !IS_G4X(i915)) + i915->drm.driver_features &= ~DRIVER_ATOMIC; +} + void intel_display_device_info_print(const struct intel_display_device_info *info, const struct intel_display_runtime_info *runtime, struct drm_printer *p) @@ -1091,3 +1130,20 @@ void intel_display_device_info_print(const struct intel_display_device_info *inf drm_printf(p, "has_dmc: %s\n", str_yes_no(runtime->has_dmc)); drm_printf(p, "has_dsc: %s\n", str_yes_no(runtime->has_dsc)); } + +/* + * Assuming the device has display hardware, should it be enabled? + * + * It's an error to call this function if the device does not have display + * hardware. + * + * Disabling display means taking over the display hardware, putting it to + * sleep, and preventing connectors from being connected via any means. + */ +bool intel_display_device_enabled(struct drm_i915_private *i915) +{ + /* Only valid when HAS_DISPLAY() is true */ + drm_WARN_ON(&i915->drm, !HAS_DISPLAY(i915)); + + return !i915->params.disable_display && !intel_opregion_headless_sku(i915); +} diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h index 44733c9d5812..5b5c0e53307f 100644 --- a/drivers/gpu/drm/i915/display/intel_display_device.h +++ b/drivers/gpu/drm/i915/display/intel_display_device.h @@ -98,6 +98,15 @@ struct drm_printer; (IS_DISPLAY_IP_RANGE((__i915), (ipver), (ipver)) && \ IS_DISPLAY_STEP((__i915), (from), (until))) +#define DISPLAY_INFO(i915) ((i915)->display.info.__device_info) +#define DISPLAY_RUNTIME_INFO(i915) (&(i915)->display.info.__runtime_info) + +#define DISPLAY_VER(i915) (DISPLAY_RUNTIME_INFO(i915)->ip.ver) +#define DISPLAY_VER_FULL(i915) IP_VER(DISPLAY_RUNTIME_INFO(i915)->ip.ver, \ + DISPLAY_RUNTIME_INFO(i915)->ip.rel) +#define IS_DISPLAY_VER(i915, from, until) \ + (DISPLAY_VER(i915) >= (from) && DISPLAY_VER(i915) <= (until)) + struct intel_display_runtime_info { struct { u16 ver; @@ -150,6 +159,7 @@ struct intel_display_device_info { } color; }; +bool intel_display_device_enabled(struct drm_i915_private *i915); void intel_display_device_probe(struct drm_i915_private *i915); void intel_display_device_info_runtime_init(struct drm_i915_private *i915); diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c index 1623c0c5e8a1..63e080e07023 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc.c +++ b/drivers/gpu/drm/i915/display/intel_dmc.c @@ -309,7 +309,7 @@ static const struct stepping_info * intel_get_stepping_info(struct drm_i915_private *i915, struct stepping_info *si) { - const char *step_name = intel_step_name(RUNTIME_INFO(i915)->step.display_step); + const char *step_name = intel_display_step_name(i915); si->stepping = step_name[0]; si->substepping = step_name[1]; diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 11420595c4f9..4f6835a7578e 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -5207,14 +5207,10 @@ intel_dp_update_dfp(struct intel_dp *intel_dp, { struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct intel_connector *connector = intel_dp->attached_connector; - const struct edid *edid; - - /* FIXME: Get rid of drm_edid_raw() */ - edid = drm_edid_raw(drm_edid); intel_dp->dfp.max_bpc = drm_dp_downstream_max_bpc(intel_dp->dpcd, - intel_dp->downstream_ports, edid); + intel_dp->downstream_ports, drm_edid); intel_dp->dfp.max_dotclock = drm_dp_downstream_max_dotclock(intel_dp->dpcd, @@ -5223,11 +5219,11 @@ intel_dp_update_dfp(struct intel_dp *intel_dp, intel_dp->dfp.min_tmds_clock = drm_dp_downstream_min_tmds_clock(intel_dp->dpcd, intel_dp->downstream_ports, - edid); + drm_edid); intel_dp->dfp.max_tmds_clock = drm_dp_downstream_max_tmds_clock(intel_dp->dpcd, intel_dp->downstream_ports, - edid); + drm_edid); intel_dp->dfp.pcon_max_frl_bw = drm_dp_get_pcon_max_frl_bw(intel_dp->dpcd, @@ -5358,7 +5354,7 @@ intel_dp_detect(struct drm_connector *connector, drm_WARN_ON(&dev_priv->drm, !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); - if (!INTEL_DISPLAY_ENABLED(dev_priv)) + if (!intel_display_device_enabled(dev_priv)) return connector_status_disconnected; /* Can't disconnect eDP */ @@ -5727,15 +5723,26 @@ static int intel_dp_connector_atomic_check(struct drm_connector *conn, return intel_modeset_synced_crtcs(state, conn); } -static void intel_dp_oob_hotplug_event(struct drm_connector *connector) +static void intel_dp_oob_hotplug_event(struct drm_connector *connector, + enum drm_connector_status hpd_state) { struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector)); struct drm_i915_private *i915 = to_i915(connector->dev); + bool hpd_high = hpd_state == connector_status_connected; + unsigned int hpd_pin = encoder->hpd_pin; + bool need_work = false; spin_lock_irq(&i915->irq_lock); - i915->display.hotplug.event_bits |= BIT(encoder->hpd_pin); + if (hpd_high != test_bit(hpd_pin, &i915->display.hotplug.oob_hotplug_last_state)) { + i915->display.hotplug.event_bits |= BIT(hpd_pin); + + __assign_bit(hpd_pin, &i915->display.hotplug.oob_hotplug_last_state, hpd_high); + need_work = true; + } spin_unlock_irq(&i915->irq_lock); - queue_delayed_work(i915->unordered_wq, &i915->display.hotplug.hotplug_work, 0); + + if (need_work) + queue_delayed_work(i915->unordered_wq, &i915->display.hotplug.hotplug_work, 0); } static const struct drm_connector_funcs intel_dp_connector_funcs = { diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 648cf37e02a8..73e397736463 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -94,12 +94,9 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder, crtc_state->lane_count = limits->max_lane_count; crtc_state->port_clock = limits->max_rate; - // TODO: Handle pbn_div changes by adding a new MST helper - if (!mst_state->pbn_div) { - mst_state->pbn_div = drm_dp_get_vc_payload_bw(&intel_dp->mst_mgr, - crtc_state->port_clock, - crtc_state->lane_count); - } + mst_state->pbn_div = drm_dp_get_vc_payload_bw(&intel_dp->mst_mgr, + crtc_state->port_clock, + crtc_state->lane_count); for (bpp = max_bpp; bpp >= min_bpp; bpp -= step) { drm_dbg_kms(&i915->drm, "Trying bpp %d\n", bpp); @@ -1062,7 +1059,7 @@ intel_dp_mst_detect(struct drm_connector *connector, struct intel_connector *intel_connector = to_intel_connector(connector); struct intel_dp *intel_dp = intel_connector->mst_port; - if (!INTEL_DISPLAY_ENABLED(i915)) + if (!intel_display_device_enabled(i915)) return connector_status_disconnected; if (drm_connector_is_unregistered(connector)) diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.h b/drivers/gpu/drm/i915/display/intel_dpio_phy.h index 9c7725dacb47..4d43dbbdf81c 100644 --- a/drivers/gpu/drm/i915/display/intel_dpio_phy.h +++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.h @@ -26,6 +26,7 @@ enum dpio_phy { DPIO_PHY2, }; +#ifdef I915 void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port, enum dpio_phy *phy, enum dpio_channel *ch); void bxt_ddi_phy_set_signal_levels(struct intel_encoder *encoder, @@ -70,5 +71,100 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state); void vlv_phy_reset_lanes(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state); +#else +static inline void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port, + enum dpio_phy *phy, enum dpio_channel *ch) +{ +} +static inline void bxt_ddi_phy_set_signal_levels(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ +} +static inline void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy) +{ +} +static inline void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy) +{ +} +static inline bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv, + enum dpio_phy phy) +{ + return false; +} +static inline bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv, + enum dpio_phy phy) +{ + return true; +} +static inline u8 bxt_ddi_phy_calc_lane_lat_optim_mask(u8 lane_count) +{ + return 0; +} +static inline void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder, + u8 lane_lat_optim_mask) +{ +} +static inline u8 bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder) +{ + return 0; +} +static inline enum dpio_channel vlv_dig_port_to_channel(struct intel_digital_port *dig_port) +{ + return DPIO_CH0; +} +static inline enum dpio_phy vlv_dig_port_to_phy(struct intel_digital_port *dig_port) +{ + return DPIO_PHY0; +} +static inline enum dpio_channel vlv_pipe_to_channel(enum pipe pipe) +{ + return DPIO_CH0; +} +static inline void chv_set_phy_signal_level(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + u32 deemph_reg_value, u32 margin_reg_value, + bool uniq_trans_scale) +{ +} +static inline void chv_data_lane_soft_reset(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + bool reset) +{ +} +static inline void chv_phy_pre_pll_enable(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ +} +static inline void chv_phy_pre_encoder_enable(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ +} +static inline void chv_phy_release_cl2_override(struct intel_encoder *encoder) +{ +} +static inline void chv_phy_post_pll_disable(struct intel_encoder *encoder, + const struct intel_crtc_state *old_crtc_state) +{ +} + +static inline void vlv_set_phy_signal_level(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + u32 demph_reg_value, u32 preemph_reg_value, + u32 uniqtranscale_reg_value, u32 tx3_demph) +{ +} +static inline void vlv_phy_pre_pll_enable(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ +} +static inline void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ +} +static inline void vlv_phy_reset_lanes(struct intel_encoder *encoder, + const struct intel_crtc_state *old_crtc_state) +{ +} +#endif #endif /* __INTEL_DPIO_PHY_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c index 2255ad651486..d41c1dc9f66c 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll.c +++ b/drivers/gpu/drm/i915/display/intel_dpll.c @@ -7,6 +7,7 @@ #include <linux/string_helpers.h> #include "i915_reg.h" +#include "intel_atomic.h" #include "intel_crtc.h" #include "intel_cx0_phy.h" #include "intel_de.h" @@ -2006,7 +2007,7 @@ int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe, vlv_enable_pll(crtc_state); } - kfree(crtc_state); + intel_crtc_destroy_state(&crtc->base, &crtc_state->uapi); return 0; } diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index 6d68b36292d3..399653a20f98 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -107,22 +107,20 @@ struct intel_dpll_mgr { struct intel_crtc *crtc, struct intel_encoder *encoder); void (*update_ref_clks)(struct drm_i915_private *i915); - void (*dump_hw_state)(struct drm_i915_private *dev_priv, + void (*dump_hw_state)(struct drm_i915_private *i915, const struct intel_dpll_hw_state *hw_state); }; static void -intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv, +intel_atomic_duplicate_dpll_state(struct drm_i915_private *i915, struct intel_shared_dpll_state *shared_dpll) { - enum intel_dpll_id i; + struct intel_shared_dpll *pll; + int i; /* Copy shared dpll state */ - for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) { - struct intel_shared_dpll *pll = &dev_priv->display.dpll.shared_dplls[i]; - - shared_dpll[i] = pll->state; - } + for_each_shared_dpll(i915, pll, i) + shared_dpll[pll->index] = pll->state; } static struct intel_shared_dpll_state * @@ -144,33 +142,42 @@ intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s) /** * intel_get_shared_dpll_by_id - get a DPLL given its id - * @dev_priv: i915 device instance + * @i915: i915 device instance * @id: pll id * * Returns: * A pointer to the DPLL with @id */ struct intel_shared_dpll * -intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv, +intel_get_shared_dpll_by_id(struct drm_i915_private *i915, enum intel_dpll_id id) { - return &dev_priv->display.dpll.shared_dplls[id]; + struct intel_shared_dpll *pll; + int i; + + for_each_shared_dpll(i915, pll, i) { + if (pll->info->id == id) + return pll; + } + + MISSING_CASE(id); + return NULL; } /* For ILK+ */ -void assert_shared_dpll(struct drm_i915_private *dev_priv, +void assert_shared_dpll(struct drm_i915_private *i915, struct intel_shared_dpll *pll, bool state) { bool cur_state; struct intel_dpll_hw_state hw_state; - if (drm_WARN(&dev_priv->drm, !pll, + if (drm_WARN(&i915->drm, !pll, "asserting DPLL %s with no DPLL\n", str_on_off(state))) return; - cur_state = intel_dpll_get_hw_state(dev_priv, pll, &hw_state); - I915_STATE_WARN(dev_priv, cur_state != state, + cur_state = intel_dpll_get_hw_state(i915, pll, &hw_state); + I915_STATE_WARN(i915, cur_state != state, "%s assertion failure (expected %s, current %s)\n", pll->info->name, str_on_off(state), str_on_off(cur_state)); @@ -221,41 +228,41 @@ intel_tc_pll_enable_reg(struct drm_i915_private *i915, void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); struct intel_shared_dpll *pll = crtc_state->shared_dpll; unsigned int pipe_mask = BIT(crtc->pipe); unsigned int old_mask; - if (drm_WARN_ON(&dev_priv->drm, pll == NULL)) + if (drm_WARN_ON(&i915->drm, pll == NULL)) return; - mutex_lock(&dev_priv->display.dpll.lock); + mutex_lock(&i915->display.dpll.lock); old_mask = pll->active_mask; - if (drm_WARN_ON(&dev_priv->drm, !(pll->state.pipe_mask & pipe_mask)) || - drm_WARN_ON(&dev_priv->drm, pll->active_mask & pipe_mask)) + if (drm_WARN_ON(&i915->drm, !(pll->state.pipe_mask & pipe_mask)) || + drm_WARN_ON(&i915->drm, pll->active_mask & pipe_mask)) goto out; pll->active_mask |= pipe_mask; - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(&i915->drm, "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n", pll->info->name, pll->active_mask, pll->on, crtc->base.base.id, crtc->base.name); if (old_mask) { - drm_WARN_ON(&dev_priv->drm, !pll->on); - assert_shared_dpll_enabled(dev_priv, pll); + drm_WARN_ON(&i915->drm, !pll->on); + assert_shared_dpll_enabled(i915, pll); goto out; } - drm_WARN_ON(&dev_priv->drm, pll->on); + drm_WARN_ON(&i915->drm, pll->on); - drm_dbg_kms(&dev_priv->drm, "enabling %s\n", pll->info->name); - pll->info->funcs->enable(dev_priv, pll); + drm_dbg_kms(&i915->drm, "enabling %s\n", pll->info->name); + pll->info->funcs->enable(i915, pll); pll->on = true; out: - mutex_unlock(&dev_priv->display.dpll.lock); + mutex_unlock(&i915->display.dpll.lock); } /** @@ -267,41 +274,57 @@ out: void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); struct intel_shared_dpll *pll = crtc_state->shared_dpll; unsigned int pipe_mask = BIT(crtc->pipe); /* PCH only available on ILK+ */ - if (DISPLAY_VER(dev_priv) < 5) + if (DISPLAY_VER(i915) < 5) return; if (pll == NULL) return; - mutex_lock(&dev_priv->display.dpll.lock); - if (drm_WARN(&dev_priv->drm, !(pll->active_mask & pipe_mask), + mutex_lock(&i915->display.dpll.lock); + if (drm_WARN(&i915->drm, !(pll->active_mask & pipe_mask), "%s not used by [CRTC:%d:%s]\n", pll->info->name, crtc->base.base.id, crtc->base.name)) goto out; - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(&i915->drm, "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n", pll->info->name, pll->active_mask, pll->on, crtc->base.base.id, crtc->base.name); - assert_shared_dpll_enabled(dev_priv, pll); - drm_WARN_ON(&dev_priv->drm, !pll->on); + assert_shared_dpll_enabled(i915, pll); + drm_WARN_ON(&i915->drm, !pll->on); pll->active_mask &= ~pipe_mask; if (pll->active_mask) goto out; - drm_dbg_kms(&dev_priv->drm, "disabling %s\n", pll->info->name); - pll->info->funcs->disable(dev_priv, pll); + drm_dbg_kms(&i915->drm, "disabling %s\n", pll->info->name); + pll->info->funcs->disable(i915, pll); pll->on = false; out: - mutex_unlock(&dev_priv->display.dpll.lock); + mutex_unlock(&i915->display.dpll.lock); +} + +static unsigned long +intel_dpll_mask_all(struct drm_i915_private *i915) +{ + struct intel_shared_dpll *pll; + unsigned long dpll_mask = 0; + int i; + + for_each_shared_dpll(i915, pll, i) { + drm_WARN_ON(&i915->drm, dpll_mask & BIT(pll->info->id)); + + dpll_mask |= BIT(pll->info->id); + } + + return dpll_mask; } static struct intel_shared_dpll * @@ -310,33 +333,38 @@ intel_find_shared_dpll(struct intel_atomic_state *state, const struct intel_dpll_hw_state *pll_state, unsigned long dpll_mask) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_shared_dpll *pll, *unused_pll = NULL; + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + unsigned long dpll_mask_all = intel_dpll_mask_all(i915); struct intel_shared_dpll_state *shared_dpll; - enum intel_dpll_id i; + struct intel_shared_dpll *unused_pll = NULL; + enum intel_dpll_id id; shared_dpll = intel_atomic_get_shared_dpll_state(&state->base); - drm_WARN_ON(&dev_priv->drm, dpll_mask & ~(BIT(I915_NUM_PLLS) - 1)); + drm_WARN_ON(&i915->drm, dpll_mask & ~dpll_mask_all); + + for_each_set_bit(id, &dpll_mask, fls(dpll_mask_all)) { + struct intel_shared_dpll *pll; - for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) { - pll = &dev_priv->display.dpll.shared_dplls[i]; + pll = intel_get_shared_dpll_by_id(i915, id); + if (!pll) + continue; /* Only want to check enabled timings first */ - if (shared_dpll[i].pipe_mask == 0) { + if (shared_dpll[pll->index].pipe_mask == 0) { if (!unused_pll) unused_pll = pll; continue; } if (memcmp(pll_state, - &shared_dpll[i].hw_state, + &shared_dpll[pll->index].hw_state, sizeof(*pll_state)) == 0) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n", crtc->base.base.id, crtc->base.name, pll->info->name, - shared_dpll[i].pipe_mask, + shared_dpll[pll->index].pipe_mask, pll->active_mask); return pll; } @@ -344,7 +372,7 @@ intel_find_shared_dpll(struct intel_atomic_state *state, /* Ok no matching timings, maybe there's a free one? */ if (unused_pll) { - drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] allocated %s\n", + drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] allocated %s\n", crtc->base.base.id, crtc->base.name, unused_pll->info->name); return unused_pll; @@ -383,14 +411,13 @@ intel_reference_shared_dpll(struct intel_atomic_state *state, const struct intel_dpll_hw_state *pll_state) { struct intel_shared_dpll_state *shared_dpll; - const enum intel_dpll_id id = pll->info->id; shared_dpll = intel_atomic_get_shared_dpll_state(&state->base); - if (shared_dpll[id].pipe_mask == 0) - shared_dpll[id].hw_state = *pll_state; + if (shared_dpll[pll->index].pipe_mask == 0) + shared_dpll[pll->index].hw_state = *pll_state; - intel_reference_shared_dpll_crtc(crtc, pll, &shared_dpll[id]); + intel_reference_shared_dpll_crtc(crtc, pll, &shared_dpll[pll->index]); } /** @@ -421,11 +448,10 @@ static void intel_unreference_shared_dpll(struct intel_atomic_state *state, const struct intel_shared_dpll *pll) { struct intel_shared_dpll_state *shared_dpll; - const enum intel_dpll_id id = pll->info->id; shared_dpll = intel_atomic_get_shared_dpll_state(&state->base); - intel_unreference_shared_dpll_crtc(crtc, pll, &shared_dpll[id]); + intel_unreference_shared_dpll_crtc(crtc, pll, &shared_dpll[pll->index]); } static void intel_put_dpll(struct intel_atomic_state *state, @@ -457,22 +483,19 @@ static void intel_put_dpll(struct intel_atomic_state *state, */ void intel_shared_dpll_swap_state(struct intel_atomic_state *state) { - struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct drm_i915_private *i915 = to_i915(state->base.dev); struct intel_shared_dpll_state *shared_dpll = state->shared_dpll; - enum intel_dpll_id i; + struct intel_shared_dpll *pll; + int i; if (!state->dpll_set) return; - for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) { - struct intel_shared_dpll *pll = - &dev_priv->display.dpll.shared_dplls[i]; - - swap(pll->state, shared_dpll[i]); - } + for_each_shared_dpll(i915, pll, i) + swap(pll->state, shared_dpll[pll->index]); } -static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv, +static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *i915, struct intel_shared_dpll *pll, struct intel_dpll_hw_state *hw_state) { @@ -480,48 +503,48 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv, intel_wakeref_t wakeref; u32 val; - wakeref = intel_display_power_get_if_enabled(dev_priv, + wakeref = intel_display_power_get_if_enabled(i915, POWER_DOMAIN_DISPLAY_CORE); if (!wakeref) return false; - val = intel_de_read(dev_priv, PCH_DPLL(id)); + val = intel_de_read(i915, PCH_DPLL(id)); hw_state->dpll = val; - hw_state->fp0 = intel_de_read(dev_priv, PCH_FP0(id)); - hw_state->fp1 = intel_de_read(dev_priv, PCH_FP1(id)); + hw_state->fp0 = intel_de_read(i915, PCH_FP0(id)); + hw_state->fp1 = intel_de_read(i915, PCH_FP1(id)); - intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); + intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref); return val & DPLL_VCO_ENABLE; } -static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv) +static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *i915) { u32 val; bool enabled; - val = intel_de_read(dev_priv, PCH_DREF_CONTROL); + val = intel_de_read(i915, PCH_DREF_CONTROL); enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK | DREF_SUPERSPREAD_SOURCE_MASK)); - I915_STATE_WARN(dev_priv, !enabled, + I915_STATE_WARN(i915, !enabled, "PCH refclk assertion failure, should be active but is disabled\n"); } -static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv, +static void ibx_pch_dpll_enable(struct drm_i915_private *i915, struct intel_shared_dpll *pll) { const enum intel_dpll_id id = pll->info->id; /* PCH refclock must be enabled first */ - ibx_assert_pch_refclk_enabled(dev_priv); + ibx_assert_pch_refclk_enabled(i915); - intel_de_write(dev_priv, PCH_FP0(id), pll->state.hw_state.fp0); - intel_de_write(dev_priv, PCH_FP1(id), pll->state.hw_state.fp1); + intel_de_write(i915, PCH_FP0(id), pll->state.hw_state.fp0); + intel_de_write(i915, PCH_FP1(id), pll->state.hw_state.fp1); - intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll); + intel_de_write(i915, PCH_DPLL(id), pll->state.hw_state.dpll); /* Wait for the clocks to stabilize. */ - intel_de_posting_read(dev_priv, PCH_DPLL(id)); + intel_de_posting_read(i915, PCH_DPLL(id)); udelay(150); /* The pixel multiplier can only be updated once the @@ -529,18 +552,18 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv, * * So write it again. */ - intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll); - intel_de_posting_read(dev_priv, PCH_DPLL(id)); + intel_de_write(i915, PCH_DPLL(id), pll->state.hw_state.dpll); + intel_de_posting_read(i915, PCH_DPLL(id)); udelay(200); } -static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv, +static void ibx_pch_dpll_disable(struct drm_i915_private *i915, struct intel_shared_dpll *pll) { const enum intel_dpll_id id = pll->info->id; - intel_de_write(dev_priv, PCH_DPLL(id), 0); - intel_de_posting_read(dev_priv, PCH_DPLL(id)); + intel_de_write(i915, PCH_DPLL(id), 0); + intel_de_posting_read(i915, PCH_DPLL(id)); udelay(200); } @@ -557,16 +580,16 @@ static int ibx_get_dpll(struct intel_atomic_state *state, { struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); struct intel_shared_dpll *pll; - enum intel_dpll_id i; + enum intel_dpll_id id; - if (HAS_PCH_IBX(dev_priv)) { + if (HAS_PCH_IBX(i915)) { /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */ - i = (enum intel_dpll_id) crtc->pipe; - pll = &dev_priv->display.dpll.shared_dplls[i]; + id = (enum intel_dpll_id) crtc->pipe; + pll = intel_get_shared_dpll_by_id(i915, id); - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] using pre-allocated %s\n", crtc->base.base.id, crtc->base.name, pll->info->name); @@ -589,10 +612,10 @@ static int ibx_get_dpll(struct intel_atomic_state *state, return 0; } -static void ibx_dump_hw_state(struct drm_i915_private *dev_priv, +static void ibx_dump_hw_state(struct drm_i915_private *i915, const struct intel_dpll_hw_state *hw_state) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(&i915->drm, "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, " "fp0: 0x%x, fp1: 0x%x\n", hw_state->dpll, @@ -621,57 +644,57 @@ static const struct intel_dpll_mgr pch_pll_mgr = { .dump_hw_state = ibx_dump_hw_state, }; -static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv, +static void hsw_ddi_wrpll_enable(struct drm_i915_private *i915, struct intel_shared_dpll *pll) { const enum intel_dpll_id id = pll->info->id; - intel_de_write(dev_priv, WRPLL_CTL(id), pll->state.hw_state.wrpll); - intel_de_posting_read(dev_priv, WRPLL_CTL(id)); + intel_de_write(i915, WRPLL_CTL(id), pll->state.hw_state.wrpll); + intel_de_posting_read(i915, WRPLL_CTL(id)); udelay(20); } -static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv, +static void hsw_ddi_spll_enable(struct drm_i915_private *i915, struct intel_shared_dpll *pll) { - intel_de_write(dev_priv, SPLL_CTL, pll->state.hw_state.spll); - intel_de_posting_read(dev_priv, SPLL_CTL); + intel_de_write(i915, SPLL_CTL, pll->state.hw_state.spll); + intel_de_posting_read(i915, SPLL_CTL); udelay(20); } -static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv, +static void hsw_ddi_wrpll_disable(struct drm_i915_private *i915, struct intel_shared_dpll *pll) { const enum intel_dpll_id id = pll->info->id; - intel_de_rmw(dev_priv, WRPLL_CTL(id), WRPLL_PLL_ENABLE, 0); - intel_de_posting_read(dev_priv, WRPLL_CTL(id)); + intel_de_rmw(i915, WRPLL_CTL(id), WRPLL_PLL_ENABLE, 0); + intel_de_posting_read(i915, WRPLL_CTL(id)); /* * Try to set up the PCH reference clock once all DPLLs * that depend on it have been shut down. */ - if (dev_priv->display.dpll.pch_ssc_use & BIT(id)) - intel_init_pch_refclk(dev_priv); + if (i915->display.dpll.pch_ssc_use & BIT(id)) + intel_init_pch_refclk(i915); } -static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv, +static void hsw_ddi_spll_disable(struct drm_i915_private *i915, struct intel_shared_dpll *pll) { enum intel_dpll_id id = pll->info->id; - intel_de_rmw(dev_priv, SPLL_CTL, SPLL_PLL_ENABLE, 0); - intel_de_posting_read(dev_priv, SPLL_CTL); + intel_de_rmw(i915, SPLL_CTL, SPLL_PLL_ENABLE, 0); + intel_de_posting_read(i915, SPLL_CTL); /* * Try to set up the PCH reference clock once all DPLLs * that depend on it have been shut down. */ - if (dev_priv->display.dpll.pch_ssc_use & BIT(id)) - intel_init_pch_refclk(dev_priv); + if (i915->display.dpll.pch_ssc_use & BIT(id)) + intel_init_pch_refclk(i915); } -static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv, +static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *i915, struct intel_shared_dpll *pll, struct intel_dpll_hw_state *hw_state) { @@ -679,35 +702,35 @@ static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv, intel_wakeref_t wakeref; u32 val; - wakeref = intel_display_power_get_if_enabled(dev_priv, + wakeref = intel_display_power_get_if_enabled(i915, POWER_DOMAIN_DISPLAY_CORE); if (!wakeref) return false; - val = intel_de_read(dev_priv, WRPLL_CTL(id)); + val = intel_de_read(i915, WRPLL_CTL(id)); hw_state->wrpll = val; - intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); + intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref); return val & WRPLL_PLL_ENABLE; } -static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv, +static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *i915, struct intel_shared_dpll *pll, struct intel_dpll_hw_state *hw_state) { intel_wakeref_t wakeref; u32 val; - wakeref = intel_display_power_get_if_enabled(dev_priv, + wakeref = intel_display_power_get_if_enabled(i915, POWER_DOMAIN_DISPLAY_CORE); if (!wakeref) return false; - val = intel_de_read(dev_priv, SPLL_CTL); + val = intel_de_read(i915, SPLL_CTL); hw_state->spll = val; - intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); + intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref); return val & SPLL_PLL_ENABLE; } @@ -918,7 +941,7 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */, *r2_out = best.r2; } -static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv, +static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *i915, const struct intel_shared_dpll *pll, const struct intel_dpll_hw_state *pll_state) { @@ -929,8 +952,8 @@ static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv, switch (wrpll & WRPLL_REF_MASK) { case WRPLL_REF_SPECIAL_HSW: /* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */ - if (IS_HASWELL(dev_priv) && !IS_HASWELL_ULT(dev_priv)) { - refclk = dev_priv->display.dpll.ref_clks.nssc; + if (IS_HASWELL(i915) && !IS_HASWELL_ULT(i915)) { + refclk = i915->display.dpll.ref_clks.nssc; break; } fallthrough; @@ -940,7 +963,7 @@ static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv, * code only cares about 5% accuracy, and spread is a max of * 0.5% downspread. */ - refclk = dev_priv->display.dpll.ref_clks.ssc; + refclk = i915->display.dpll.ref_clks.ssc; break; case WRPLL_REF_LCPLL: refclk = 2700000; @@ -996,7 +1019,7 @@ hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state, static int hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); int clock = crtc_state->port_clock; switch (clock / 2) { @@ -1005,7 +1028,7 @@ hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state) case 270000: return 0; default: - drm_dbg_kms(&dev_priv->drm, "Invalid clock for DP: %d\n", + drm_dbg_kms(&i915->drm, "Invalid clock for DP: %d\n", clock); return -EINVAL; } @@ -1014,7 +1037,7 @@ hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state) static struct intel_shared_dpll * hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); struct intel_shared_dpll *pll; enum intel_dpll_id pll_id; int clock = crtc_state->port_clock; @@ -1034,7 +1057,7 @@ hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state) return NULL; } - pll = intel_get_shared_dpll_by_id(dev_priv, pll_id); + pll = intel_get_shared_dpll_by_id(i915, pll_id); if (!pll) return NULL; @@ -1170,10 +1193,10 @@ static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915) i915->display.dpll.ref_clks.nssc = 135000; } -static void hsw_dump_hw_state(struct drm_i915_private *dev_priv, +static void hsw_dump_hw_state(struct drm_i915_private *i915, const struct intel_dpll_hw_state *hw_state) { - drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n", + drm_dbg_kms(&i915->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n", hw_state->wrpll, hw_state->spll); } @@ -1191,17 +1214,17 @@ static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = { .get_freq = hsw_ddi_spll_get_freq, }; -static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv, +static void hsw_ddi_lcpll_enable(struct drm_i915_private *i915, struct intel_shared_dpll *pll) { } -static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv, +static void hsw_ddi_lcpll_disable(struct drm_i915_private *i915, struct intel_shared_dpll *pll) { } -static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv, +static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *i915, struct intel_shared_dpll *pll, struct intel_dpll_hw_state *hw_state) { @@ -1265,60 +1288,60 @@ static const struct skl_dpll_regs skl_dpll_regs[4] = { }, }; -static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv, +static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *i915, struct intel_shared_dpll *pll) { const enum intel_dpll_id id = pll->info->id; - intel_de_rmw(dev_priv, DPLL_CTRL1, + intel_de_rmw(i915, DPLL_CTRL1, DPLL_CTRL1_HDMI_MODE(id) | DPLL_CTRL1_SSC(id) | DPLL_CTRL1_LINK_RATE_MASK(id), pll->state.hw_state.ctrl1 << (id * 6)); - intel_de_posting_read(dev_priv, DPLL_CTRL1); + intel_de_posting_read(i915, DPLL_CTRL1); } -static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv, +static void skl_ddi_pll_enable(struct drm_i915_private *i915, struct intel_shared_dpll *pll) { const struct skl_dpll_regs *regs = skl_dpll_regs; const enum intel_dpll_id id = pll->info->id; - skl_ddi_pll_write_ctrl1(dev_priv, pll); + skl_ddi_pll_write_ctrl1(i915, pll); - intel_de_write(dev_priv, regs[id].cfgcr1, pll->state.hw_state.cfgcr1); - intel_de_write(dev_priv, regs[id].cfgcr2, pll->state.hw_state.cfgcr2); - intel_de_posting_read(dev_priv, regs[id].cfgcr1); - intel_de_posting_read(dev_priv, regs[id].cfgcr2); + intel_de_write(i915, regs[id].cfgcr1, pll->state.hw_state.cfgcr1); + intel_de_write(i915, regs[id].cfgcr2, pll->state.hw_state.cfgcr2); + intel_de_posting_read(i915, regs[id].cfgcr1); + intel_de_posting_read(i915, regs[id].cfgcr2); /* the enable bit is always bit 31 */ - intel_de_rmw(dev_priv, regs[id].ctl, 0, LCPLL_PLL_ENABLE); + intel_de_rmw(i915, regs[id].ctl, 0, LCPLL_PLL_ENABLE); - if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5)) - drm_err(&dev_priv->drm, "DPLL %d not locked\n", id); + if (intel_de_wait_for_set(i915, DPLL_STATUS, DPLL_LOCK(id), 5)) + drm_err(&i915->drm, "DPLL %d not locked\n", id); } -static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv, +static void skl_ddi_dpll0_enable(struct drm_i915_private *i915, struct intel_shared_dpll *pll) { - skl_ddi_pll_write_ctrl1(dev_priv, pll); + skl_ddi_pll_write_ctrl1(i915, pll); } -static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv, +static void skl_ddi_pll_disable(struct drm_i915_private *i915, struct intel_shared_dpll *pll) { const struct skl_dpll_regs *regs = skl_dpll_regs; const enum intel_dpll_id id = pll->info->id; /* the enable bit is always bit 31 */ - intel_de_rmw(dev_priv, regs[id].ctl, LCPLL_PLL_ENABLE, 0); - intel_de_posting_read(dev_priv, regs[id].ctl); + intel_de_rmw(i915, regs[id].ctl, LCPLL_PLL_ENABLE, 0); + intel_de_posting_read(i915, regs[id].ctl); } -static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv, +static void skl_ddi_dpll0_disable(struct drm_i915_private *i915, struct intel_shared_dpll *pll) { } -static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, +static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *i915, struct intel_shared_dpll *pll, struct intel_dpll_hw_state *hw_state) { @@ -1328,34 +1351,34 @@ static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, intel_wakeref_t wakeref; bool ret; - wakeref = intel_display_power_get_if_enabled(dev_priv, + wakeref = intel_display_power_get_if_enabled(i915, POWER_DOMAIN_DISPLAY_CORE); if (!wakeref) return false; ret = false; - val = intel_de_read(dev_priv, regs[id].ctl); + val = intel_de_read(i915, regs[id].ctl); if (!(val & LCPLL_PLL_ENABLE)) goto out; - val = intel_de_read(dev_priv, DPLL_CTRL1); + val = intel_de_read(i915, DPLL_CTRL1); hw_state->ctrl1 = (val >> (id * 6)) & 0x3f; /* avoid reading back stale values if HDMI mode is not enabled */ if (val & DPLL_CTRL1_HDMI_MODE(id)) { - hw_state->cfgcr1 = intel_de_read(dev_priv, regs[id].cfgcr1); - hw_state->cfgcr2 = intel_de_read(dev_priv, regs[id].cfgcr2); + hw_state->cfgcr1 = intel_de_read(i915, regs[id].cfgcr1); + hw_state->cfgcr2 = intel_de_read(i915, regs[id].cfgcr2); } ret = true; out: - intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); + intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref); return ret; } -static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv, +static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *i915, struct intel_shared_dpll *pll, struct intel_dpll_hw_state *hw_state) { @@ -1365,7 +1388,7 @@ static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv, u32 val; bool ret; - wakeref = intel_display_power_get_if_enabled(dev_priv, + wakeref = intel_display_power_get_if_enabled(i915, POWER_DOMAIN_DISPLAY_CORE); if (!wakeref) return false; @@ -1373,17 +1396,17 @@ static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv, ret = false; /* DPLL0 is always enabled since it drives CDCLK */ - val = intel_de_read(dev_priv, regs[id].ctl); - if (drm_WARN_ON(&dev_priv->drm, !(val & LCPLL_PLL_ENABLE))) + val = intel_de_read(i915, regs[id].ctl); + if (drm_WARN_ON(&i915->drm, !(val & LCPLL_PLL_ENABLE))) goto out; - val = intel_de_read(dev_priv, DPLL_CTRL1); + val = intel_de_read(i915, DPLL_CTRL1); hw_state->ctrl1 = (val >> (id * 6)) & 0x3f; ret = true; out: - intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); + intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref); return ret; } @@ -1873,10 +1896,10 @@ static void skl_update_dpll_ref_clks(struct drm_i915_private *i915) i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref; } -static void skl_dump_hw_state(struct drm_i915_private *dev_priv, +static void skl_dump_hw_state(struct drm_i915_private *i915, const struct intel_dpll_hw_state *hw_state) { - drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: " + drm_dbg_kms(&i915->drm, "dpll_hw_state: " "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n", hw_state->ctrl1, hw_state->cfgcr1, @@ -1914,129 +1937,129 @@ static const struct intel_dpll_mgr skl_pll_mgr = { .dump_hw_state = skl_dump_hw_state, }; -static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv, - struct intel_shared_dpll *pll) +static void bxt_ddi_pll_enable(struct drm_i915_private *i915, + struct intel_shared_dpll *pll) { u32 temp; enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */ enum dpio_phy phy; enum dpio_channel ch; - bxt_port_to_phy_channel(dev_priv, port, &phy, &ch); + bxt_port_to_phy_channel(i915, port, &phy, &ch); /* Non-SSC reference */ - intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_REF_SEL); + intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_REF_SEL); - if (IS_GEMINILAKE(dev_priv)) { - intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), + if (IS_GEMINILAKE(i915)) { + intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_POWER_ENABLE); - if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & + if (wait_for_us((intel_de_read(i915, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_POWER_STATE), 200)) - drm_err(&dev_priv->drm, + drm_err(&i915->drm, "Power state not set for PLL:%d\n", port); } /* Disable 10 bit clock */ - intel_de_rmw(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), + intel_de_rmw(i915, BXT_PORT_PLL_EBB_4(phy, ch), PORT_PLL_10BIT_CLK_ENABLE, 0); /* Write P1 & P2 */ - intel_de_rmw(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch), + intel_de_rmw(i915, BXT_PORT_PLL_EBB_0(phy, ch), PORT_PLL_P1_MASK | PORT_PLL_P2_MASK, pll->state.hw_state.ebb0); /* Write M2 integer */ - intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 0), + intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 0), PORT_PLL_M2_INT_MASK, pll->state.hw_state.pll0); /* Write N */ - intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 1), + intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 1), PORT_PLL_N_MASK, pll->state.hw_state.pll1); /* Write M2 fraction */ - intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 2), + intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 2), PORT_PLL_M2_FRAC_MASK, pll->state.hw_state.pll2); /* Write M2 fraction enable */ - intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 3), + intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 3), PORT_PLL_M2_FRAC_ENABLE, pll->state.hw_state.pll3); /* Write coeff */ - temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6)); + temp = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 6)); temp &= ~PORT_PLL_PROP_COEFF_MASK; temp &= ~PORT_PLL_INT_COEFF_MASK; temp &= ~PORT_PLL_GAIN_CTL_MASK; temp |= pll->state.hw_state.pll6; - intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 6), temp); + intel_de_write(i915, BXT_PORT_PLL(phy, ch, 6), temp); /* Write calibration val */ - intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 8), + intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 8), PORT_PLL_TARGET_CNT_MASK, pll->state.hw_state.pll8); - intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 9), + intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 9), PORT_PLL_LOCK_THRESHOLD_MASK, pll->state.hw_state.pll9); - temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10)); + temp = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 10)); temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H; temp &= ~PORT_PLL_DCO_AMP_MASK; temp |= pll->state.hw_state.pll10; - intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 10), temp); + intel_de_write(i915, BXT_PORT_PLL(phy, ch, 10), temp); /* Recalibrate with new settings */ - temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch)); + temp = intel_de_read(i915, BXT_PORT_PLL_EBB_4(phy, ch)); temp |= PORT_PLL_RECALIBRATE; - intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp); + intel_de_write(i915, BXT_PORT_PLL_EBB_4(phy, ch), temp); temp &= ~PORT_PLL_10BIT_CLK_ENABLE; temp |= pll->state.hw_state.ebb4; - intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp); + intel_de_write(i915, BXT_PORT_PLL_EBB_4(phy, ch), temp); /* Enable PLL */ - intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_ENABLE); - intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port)); + intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_ENABLE); + intel_de_posting_read(i915, BXT_PORT_PLL_ENABLE(port)); - if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK), + if (wait_for_us((intel_de_read(i915, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK), 200)) - drm_err(&dev_priv->drm, "PLL %d not locked\n", port); + drm_err(&i915->drm, "PLL %d not locked\n", port); - if (IS_GEMINILAKE(dev_priv)) { - temp = intel_de_read(dev_priv, BXT_PORT_TX_DW5_LN0(phy, ch)); + if (IS_GEMINILAKE(i915)) { + temp = intel_de_read(i915, BXT_PORT_TX_DW5_LN0(phy, ch)); temp |= DCC_DELAY_RANGE_2; - intel_de_write(dev_priv, BXT_PORT_TX_DW5_GRP(phy, ch), temp); + intel_de_write(i915, BXT_PORT_TX_DW5_GRP(phy, ch), temp); } /* * While we write to the group register to program all lanes at once we * can read only lane registers and we pick lanes 0/1 for that. */ - temp = intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN01(phy, ch)); + temp = intel_de_read(i915, BXT_PORT_PCS_DW12_LN01(phy, ch)); temp &= ~LANE_STAGGER_MASK; temp &= ~LANESTAGGER_STRAP_OVRD; temp |= pll->state.hw_state.pcsdw12; - intel_de_write(dev_priv, BXT_PORT_PCS_DW12_GRP(phy, ch), temp); + intel_de_write(i915, BXT_PORT_PCS_DW12_GRP(phy, ch), temp); } -static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv, - struct intel_shared_dpll *pll) +static void bxt_ddi_pll_disable(struct drm_i915_private *i915, + struct intel_shared_dpll *pll) { enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */ - intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), PORT_PLL_ENABLE, 0); - intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port)); + intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), PORT_PLL_ENABLE, 0); + intel_de_posting_read(i915, BXT_PORT_PLL_ENABLE(port)); - if (IS_GEMINILAKE(dev_priv)) { - intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), + if (IS_GEMINILAKE(i915)) { + intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), PORT_PLL_POWER_ENABLE, 0); - if (wait_for_us(!(intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & + if (wait_for_us(!(intel_de_read(i915, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_POWER_STATE), 200)) - drm_err(&dev_priv->drm, + drm_err(&i915->drm, "Power state not reset for PLL:%d\n", port); } } -static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, - struct intel_shared_dpll *pll, - struct intel_dpll_hw_state *hw_state) +static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *i915, + struct intel_shared_dpll *pll, + struct intel_dpll_hw_state *hw_state) { enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */ intel_wakeref_t wakeref; @@ -2045,49 +2068,49 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, u32 val; bool ret; - bxt_port_to_phy_channel(dev_priv, port, &phy, &ch); + bxt_port_to_phy_channel(i915, port, &phy, &ch); - wakeref = intel_display_power_get_if_enabled(dev_priv, + wakeref = intel_display_power_get_if_enabled(i915, POWER_DOMAIN_DISPLAY_CORE); if (!wakeref) return false; ret = false; - val = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)); + val = intel_de_read(i915, BXT_PORT_PLL_ENABLE(port)); if (!(val & PORT_PLL_ENABLE)) goto out; - hw_state->ebb0 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch)); + hw_state->ebb0 = intel_de_read(i915, BXT_PORT_PLL_EBB_0(phy, ch)); hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK; - hw_state->ebb4 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch)); + hw_state->ebb4 = intel_de_read(i915, BXT_PORT_PLL_EBB_4(phy, ch)); hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE; - hw_state->pll0 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0)); + hw_state->pll0 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 0)); hw_state->pll0 &= PORT_PLL_M2_INT_MASK; - hw_state->pll1 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1)); + hw_state->pll1 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 1)); hw_state->pll1 &= PORT_PLL_N_MASK; - hw_state->pll2 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2)); + hw_state->pll2 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 2)); hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK; - hw_state->pll3 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3)); + hw_state->pll3 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 3)); hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE; - hw_state->pll6 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6)); + hw_state->pll6 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 6)); hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK | PORT_PLL_INT_COEFF_MASK | PORT_PLL_GAIN_CTL_MASK; - hw_state->pll8 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8)); + hw_state->pll8 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 8)); hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK; - hw_state->pll9 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9)); + hw_state->pll9 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 9)); hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK; - hw_state->pll10 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10)); + hw_state->pll10 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 10)); hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H | PORT_PLL_DCO_AMP_MASK; @@ -2096,20 +2119,20 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, * can read only lane registers. We configure all lanes the same way, so * here just read out lanes 0/1 and output a note if lanes 2/3 differ. */ - hw_state->pcsdw12 = intel_de_read(dev_priv, + hw_state->pcsdw12 = intel_de_read(i915, BXT_PORT_PCS_DW12_LN01(phy, ch)); - if (intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12) - drm_dbg(&dev_priv->drm, + if (intel_de_read(i915, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12) + drm_dbg(&i915->drm, "lane stagger config different for lane 01 (%08x) and 23 (%08x)\n", hw_state->pcsdw12, - intel_de_read(dev_priv, + intel_de_read(i915, BXT_PORT_PCS_DW12_LN23(phy, ch))); hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD; ret = true; out: - intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); + intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref); return ret; } @@ -2300,15 +2323,15 @@ static int bxt_get_dpll(struct intel_atomic_state *state, { struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); struct intel_shared_dpll *pll; enum intel_dpll_id id; /* 1:1 mapping between ports and PLLs */ id = (enum intel_dpll_id) encoder->port; - pll = intel_get_shared_dpll_by_id(dev_priv, id); + pll = intel_get_shared_dpll_by_id(i915, id); - drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] using pre-allocated %s\n", + drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] using pre-allocated %s\n", crtc->base.base.id, crtc->base.name, pll->info->name); intel_reference_shared_dpll(state, crtc, @@ -2326,10 +2349,10 @@ static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915) /* DSI non-SSC ref 19.2MHz */ } -static void bxt_dump_hw_state(struct drm_i915_private *dev_priv, +static void bxt_dump_hw_state(struct drm_i915_private *i915, const struct intel_dpll_hw_state *hw_state) { - drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x," + drm_dbg_kms(&i915->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x," "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, " "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n", hw_state->ebb0, @@ -2557,9 +2580,9 @@ static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = { static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state, struct skl_wrpll_params *pll_params) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); const struct icl_combo_pll_params *params = - dev_priv->display.dpll.ref_clks.nssc == 24000 ? + i915->display.dpll.ref_clks.nssc == 24000 ? icl_dp_combo_pll_24MHz_values : icl_dp_combo_pll_19_2MHz_values; int clock = crtc_state->port_clock; @@ -2579,12 +2602,12 @@ static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state, static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state, struct skl_wrpll_params *pll_params) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); - if (DISPLAY_VER(dev_priv) >= 12) { - switch (dev_priv->display.dpll.ref_clks.nssc) { + if (DISPLAY_VER(i915) >= 12) { + switch (i915->display.dpll.ref_clks.nssc) { default: - MISSING_CASE(dev_priv->display.dpll.ref_clks.nssc); + MISSING_CASE(i915->display.dpll.ref_clks.nssc); fallthrough; case 19200: case 38400: @@ -2595,9 +2618,9 @@ static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state, break; } } else { - switch (dev_priv->display.dpll.ref_clks.nssc) { + switch (i915->display.dpll.ref_clks.nssc) { default: - MISSING_CASE(dev_priv->display.dpll.ref_clks.nssc); + MISSING_CASE(i915->display.dpll.ref_clks.nssc); fallthrough; case 19200: case 38400: @@ -2853,8 +2876,8 @@ static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc, static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, struct intel_dpll_hw_state *pll_state) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - int refclk_khz = dev_priv->display.dpll.ref_clks.nssc; + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + int refclk_khz = i915->display.dpll.ref_clks.nssc; int clock = crtc_state->port_clock; u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac; u32 iref_ndiv, iref_trim, iref_pulse_w; @@ -2864,7 +2887,7 @@ static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, u64 tmp; bool use_ssc = false; bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI); - bool is_dkl = DISPLAY_VER(dev_priv) >= 12; + bool is_dkl = DISPLAY_VER(i915) >= 12; int ret; ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz, @@ -2962,8 +2985,8 @@ static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, DKL_PLL_DIV0_PROP_COEFF(prop_coeff) | DKL_PLL_DIV0_FBPREDIV(m1div) | DKL_PLL_DIV0_FBDIV_INT(m2div_int); - if (dev_priv->display.vbt.override_afc_startup) { - u8 val = dev_priv->display.vbt.override_afc_startup_val; + if (i915->display.vbt.override_afc_startup) { + u8 val = i915->display.vbt.override_afc_startup_val; pll_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val); } @@ -3053,16 +3076,16 @@ static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, return 0; } -static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv, +static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *i915, const struct intel_shared_dpll *pll, const struct intel_dpll_hw_state *pll_state) { u32 m1, m2_int, m2_frac, div1, div2, ref_clock; u64 tmp; - ref_clock = dev_priv->display.dpll.ref_clks.nssc; + ref_clock = i915->display.dpll.ref_clks.nssc; - if (DISPLAY_VER(dev_priv) >= 12) { + if (DISPLAY_VER(i915) >= 12) { m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK; m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT; m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK; @@ -3167,7 +3190,7 @@ static void icl_update_active_dpll(struct intel_atomic_state *state, static int icl_compute_combo_phy_dpll(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct icl_port_dpll *port_dpll = @@ -3184,12 +3207,12 @@ static int icl_compute_combo_phy_dpll(struct intel_atomic_state *state, if (ret) return ret; - icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state); + icl_calc_dpll_state(i915, &pll_params, &port_dpll->hw_state); /* this is mainly for the fastset check */ icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT); - crtc_state->port_clock = icl_ddi_combo_pll_get_freq(dev_priv, NULL, + crtc_state->port_clock = icl_ddi_combo_pll_get_freq(i915, NULL, &port_dpll->hw_state); return 0; @@ -3199,7 +3222,7 @@ static int icl_get_combo_phy_dpll(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct icl_port_dpll *port_dpll = @@ -3207,13 +3230,13 @@ static int icl_get_combo_phy_dpll(struct intel_atomic_state *state, enum port port = encoder->port; unsigned long dpll_mask; - if (IS_ALDERLAKE_S(dev_priv)) { + if (IS_ALDERLAKE_S(i915)) { dpll_mask = BIT(DPLL_ID_DG1_DPLL3) | BIT(DPLL_ID_DG1_DPLL2) | BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0); - } else if (IS_DG1(dev_priv)) { + } else if (IS_DG1(i915)) { if (port == PORT_D || port == PORT_E) { dpll_mask = BIT(DPLL_ID_DG1_DPLL2) | @@ -3223,13 +3246,13 @@ static int icl_get_combo_phy_dpll(struct intel_atomic_state *state, BIT(DPLL_ID_DG1_DPLL0) | BIT(DPLL_ID_DG1_DPLL1); } - } else if (IS_ROCKETLAKE(dev_priv)) { + } else if (IS_ROCKETLAKE(i915)) { dpll_mask = BIT(DPLL_ID_EHL_DPLL4) | BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0); - } else if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) && - port != PORT_A) { + } else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) && + port != PORT_A) { dpll_mask = BIT(DPLL_ID_EHL_DPLL4) | BIT(DPLL_ID_ICL_DPLL1) | @@ -3239,7 +3262,7 @@ static int icl_get_combo_phy_dpll(struct intel_atomic_state *state, } /* Eliminate DPLLs from consideration if reserved by HTI */ - dpll_mask &= ~intel_hti_dpll_mask(dev_priv); + dpll_mask &= ~intel_hti_dpll_mask(i915); port_dpll->pll = intel_find_shared_dpll(state, crtc, &port_dpll->hw_state, @@ -3258,7 +3281,7 @@ static int icl_get_combo_phy_dpll(struct intel_atomic_state *state, static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct drm_i915_private *i915 = to_i915(state->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct icl_port_dpll *port_dpll = @@ -3271,7 +3294,7 @@ static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state, if (ret) return ret; - icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state); + icl_calc_dpll_state(i915, &pll_params, &port_dpll->hw_state); port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY]; ret = icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state); @@ -3281,7 +3304,7 @@ static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state, /* this is mainly for the fastset check */ icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY); - crtc_state->port_clock = icl_ddi_mg_pll_get_freq(dev_priv, NULL, + crtc_state->port_clock = icl_ddi_mg_pll_get_freq(i915, NULL, &port_dpll->hw_state); return 0; @@ -3291,7 +3314,7 @@ static int icl_get_tc_phy_dplls(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct drm_i915_private *i915 = to_i915(state->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct icl_port_dpll *port_dpll = @@ -3310,7 +3333,7 @@ static int icl_get_tc_phy_dplls(struct intel_atomic_state *state, port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY]; - dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv, + dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(i915, encoder->port)); port_dpll->pll = intel_find_shared_dpll(state, crtc, &port_dpll->hw_state, @@ -3337,12 +3360,12 @@ static int icl_compute_dplls(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - enum phy phy = intel_port_to_phy(dev_priv, encoder->port); + struct drm_i915_private *i915 = to_i915(state->base.dev); + enum phy phy = intel_port_to_phy(i915, encoder->port); - if (intel_phy_is_combo(dev_priv, phy)) + if (intel_phy_is_combo(i915, phy)) return icl_compute_combo_phy_dpll(state, crtc); - else if (intel_phy_is_tc(dev_priv, phy)) + else if (intel_phy_is_tc(i915, phy)) return icl_compute_tc_phy_dplls(state, crtc); MISSING_CASE(phy); @@ -3354,12 +3377,12 @@ static int icl_get_dplls(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - enum phy phy = intel_port_to_phy(dev_priv, encoder->port); + struct drm_i915_private *i915 = to_i915(state->base.dev); + enum phy phy = intel_port_to_phy(i915, encoder->port); - if (intel_phy_is_combo(dev_priv, phy)) + if (intel_phy_is_combo(i915, phy)) return icl_get_combo_phy_dpll(state, crtc, encoder); - else if (intel_phy_is_tc(dev_priv, phy)) + else if (intel_phy_is_tc(i915, phy)) return icl_get_tc_phy_dplls(state, crtc, encoder); MISSING_CASE(phy); @@ -3393,7 +3416,7 @@ static void icl_put_dplls(struct intel_atomic_state *state, } } -static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv, +static bool mg_pll_get_hw_state(struct drm_i915_private *i915, struct intel_shared_dpll *pll, struct intel_dpll_hw_state *hw_state) { @@ -3403,46 +3426,46 @@ static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv, bool ret = false; u32 val; - i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll); + i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll); - wakeref = intel_display_power_get_if_enabled(dev_priv, + wakeref = intel_display_power_get_if_enabled(i915, POWER_DOMAIN_DISPLAY_CORE); if (!wakeref) return false; - val = intel_de_read(dev_priv, enable_reg); + val = intel_de_read(i915, enable_reg); if (!(val & PLL_ENABLE)) goto out; - hw_state->mg_refclkin_ctl = intel_de_read(dev_priv, + hw_state->mg_refclkin_ctl = intel_de_read(i915, MG_REFCLKIN_CTL(tc_port)); hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK; hw_state->mg_clktop2_coreclkctl1 = - intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port)); + intel_de_read(i915, MG_CLKTOP2_CORECLKCTL1(tc_port)); hw_state->mg_clktop2_coreclkctl1 &= MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK; hw_state->mg_clktop2_hsclkctl = - intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port)); + intel_de_read(i915, MG_CLKTOP2_HSCLKCTL(tc_port)); hw_state->mg_clktop2_hsclkctl &= MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK | MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK | MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK | MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK; - hw_state->mg_pll_div0 = intel_de_read(dev_priv, MG_PLL_DIV0(tc_port)); - hw_state->mg_pll_div1 = intel_de_read(dev_priv, MG_PLL_DIV1(tc_port)); - hw_state->mg_pll_lf = intel_de_read(dev_priv, MG_PLL_LF(tc_port)); - hw_state->mg_pll_frac_lock = intel_de_read(dev_priv, + hw_state->mg_pll_div0 = intel_de_read(i915, MG_PLL_DIV0(tc_port)); + hw_state->mg_pll_div1 = intel_de_read(i915, MG_PLL_DIV1(tc_port)); + hw_state->mg_pll_lf = intel_de_read(i915, MG_PLL_LF(tc_port)); + hw_state->mg_pll_frac_lock = intel_de_read(i915, MG_PLL_FRAC_LOCK(tc_port)); - hw_state->mg_pll_ssc = intel_de_read(dev_priv, MG_PLL_SSC(tc_port)); + hw_state->mg_pll_ssc = intel_de_read(i915, MG_PLL_SSC(tc_port)); - hw_state->mg_pll_bias = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port)); + hw_state->mg_pll_bias = intel_de_read(i915, MG_PLL_BIAS(tc_port)); hw_state->mg_pll_tdc_coldst_bias = - intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port)); + intel_de_read(i915, MG_PLL_TDC_COLDST_BIAS(tc_port)); - if (dev_priv->display.dpll.ref_clks.nssc == 38400) { + if (i915->display.dpll.ref_clks.nssc == 38400) { hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART; hw_state->mg_pll_bias_mask = 0; } else { @@ -3455,11 +3478,11 @@ static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv, ret = true; out: - intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); + intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref); return ret; } -static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv, +static bool dkl_pll_get_hw_state(struct drm_i915_private *i915, struct intel_shared_dpll *pll, struct intel_dpll_hw_state *hw_state) { @@ -3469,12 +3492,12 @@ static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv, bool ret = false; u32 val; - wakeref = intel_display_power_get_if_enabled(dev_priv, + wakeref = intel_display_power_get_if_enabled(i915, POWER_DOMAIN_DISPLAY_CORE); if (!wakeref) return false; - val = intel_de_read(dev_priv, intel_tc_pll_enable_reg(dev_priv, pll)); + val = intel_de_read(i915, intel_tc_pll_enable_reg(i915, pll)); if (!(val & PLL_ENABLE)) goto out; @@ -3482,12 +3505,12 @@ static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv, * All registers read here have the same HIP_INDEX_REG even though * they are on different building blocks */ - hw_state->mg_refclkin_ctl = intel_dkl_phy_read(dev_priv, + hw_state->mg_refclkin_ctl = intel_dkl_phy_read(i915, DKL_REFCLKIN_CTL(tc_port)); hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK; hw_state->mg_clktop2_hsclkctl = - intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port)); + intel_dkl_phy_read(i915, DKL_CLKTOP2_HSCLKCTL(tc_port)); hw_state->mg_clktop2_hsclkctl &= MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK | MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK | @@ -3495,42 +3518,42 @@ static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv, MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK; hw_state->mg_clktop2_coreclkctl1 = - intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port)); + intel_dkl_phy_read(i915, DKL_CLKTOP2_CORECLKCTL1(tc_port)); hw_state->mg_clktop2_coreclkctl1 &= MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK; - hw_state->mg_pll_div0 = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV0(tc_port)); + hw_state->mg_pll_div0 = intel_dkl_phy_read(i915, DKL_PLL_DIV0(tc_port)); val = DKL_PLL_DIV0_MASK; - if (dev_priv->display.vbt.override_afc_startup) + if (i915->display.vbt.override_afc_startup) val |= DKL_PLL_DIV0_AFC_STARTUP_MASK; hw_state->mg_pll_div0 &= val; - hw_state->mg_pll_div1 = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV1(tc_port)); + hw_state->mg_pll_div1 = intel_dkl_phy_read(i915, DKL_PLL_DIV1(tc_port)); hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK | DKL_PLL_DIV1_TDC_TARGET_CNT_MASK); - hw_state->mg_pll_ssc = intel_dkl_phy_read(dev_priv, DKL_PLL_SSC(tc_port)); + hw_state->mg_pll_ssc = intel_dkl_phy_read(i915, DKL_PLL_SSC(tc_port)); hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK | DKL_PLL_SSC_STEP_LEN_MASK | DKL_PLL_SSC_STEP_NUM_MASK | DKL_PLL_SSC_EN); - hw_state->mg_pll_bias = intel_dkl_phy_read(dev_priv, DKL_PLL_BIAS(tc_port)); + hw_state->mg_pll_bias = intel_dkl_phy_read(i915, DKL_PLL_BIAS(tc_port)); hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H | DKL_PLL_BIAS_FBDIV_FRAC_MASK); hw_state->mg_pll_tdc_coldst_bias = - intel_dkl_phy_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port)); + intel_dkl_phy_read(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port)); hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK | DKL_PLL_TDC_FEED_FWD_GAIN_MASK); ret = true; out: - intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); + intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref); return ret; } -static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv, +static bool icl_pll_get_hw_state(struct drm_i915_private *i915, struct intel_shared_dpll *pll, struct intel_dpll_hw_state *hw_state, i915_reg_t enable_reg) @@ -3540,94 +3563,94 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv, bool ret = false; u32 val; - wakeref = intel_display_power_get_if_enabled(dev_priv, + wakeref = intel_display_power_get_if_enabled(i915, POWER_DOMAIN_DISPLAY_CORE); if (!wakeref) return false; - val = intel_de_read(dev_priv, enable_reg); + val = intel_de_read(i915, enable_reg); if (!(val & PLL_ENABLE)) goto out; - if (IS_ALDERLAKE_S(dev_priv)) { - hw_state->cfgcr0 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR0(id)); - hw_state->cfgcr1 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR1(id)); - } else if (IS_DG1(dev_priv)) { - hw_state->cfgcr0 = intel_de_read(dev_priv, DG1_DPLL_CFGCR0(id)); - hw_state->cfgcr1 = intel_de_read(dev_priv, DG1_DPLL_CFGCR1(id)); - } else if (IS_ROCKETLAKE(dev_priv)) { - hw_state->cfgcr0 = intel_de_read(dev_priv, + if (IS_ALDERLAKE_S(i915)) { + hw_state->cfgcr0 = intel_de_read(i915, ADLS_DPLL_CFGCR0(id)); + hw_state->cfgcr1 = intel_de_read(i915, ADLS_DPLL_CFGCR1(id)); + } else if (IS_DG1(i915)) { + hw_state->cfgcr0 = intel_de_read(i915, DG1_DPLL_CFGCR0(id)); + hw_state->cfgcr1 = intel_de_read(i915, DG1_DPLL_CFGCR1(id)); + } else if (IS_ROCKETLAKE(i915)) { + hw_state->cfgcr0 = intel_de_read(i915, RKL_DPLL_CFGCR0(id)); - hw_state->cfgcr1 = intel_de_read(dev_priv, + hw_state->cfgcr1 = intel_de_read(i915, RKL_DPLL_CFGCR1(id)); - } else if (DISPLAY_VER(dev_priv) >= 12) { - hw_state->cfgcr0 = intel_de_read(dev_priv, + } else if (DISPLAY_VER(i915) >= 12) { + hw_state->cfgcr0 = intel_de_read(i915, TGL_DPLL_CFGCR0(id)); - hw_state->cfgcr1 = intel_de_read(dev_priv, + hw_state->cfgcr1 = intel_de_read(i915, TGL_DPLL_CFGCR1(id)); - if (dev_priv->display.vbt.override_afc_startup) { - hw_state->div0 = intel_de_read(dev_priv, TGL_DPLL0_DIV0(id)); + if (i915->display.vbt.override_afc_startup) { + hw_state->div0 = intel_de_read(i915, TGL_DPLL0_DIV0(id)); hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK; } } else { - if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) && + if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) && id == DPLL_ID_EHL_DPLL4) { - hw_state->cfgcr0 = intel_de_read(dev_priv, + hw_state->cfgcr0 = intel_de_read(i915, ICL_DPLL_CFGCR0(4)); - hw_state->cfgcr1 = intel_de_read(dev_priv, + hw_state->cfgcr1 = intel_de_read(i915, ICL_DPLL_CFGCR1(4)); } else { - hw_state->cfgcr0 = intel_de_read(dev_priv, + hw_state->cfgcr0 = intel_de_read(i915, ICL_DPLL_CFGCR0(id)); - hw_state->cfgcr1 = intel_de_read(dev_priv, + hw_state->cfgcr1 = intel_de_read(i915, ICL_DPLL_CFGCR1(id)); } } ret = true; out: - intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); + intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref); return ret; } -static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv, +static bool combo_pll_get_hw_state(struct drm_i915_private *i915, struct intel_shared_dpll *pll, struct intel_dpll_hw_state *hw_state) { - i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll); + i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll); - return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg); + return icl_pll_get_hw_state(i915, pll, hw_state, enable_reg); } -static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv, +static bool tbt_pll_get_hw_state(struct drm_i915_private *i915, struct intel_shared_dpll *pll, struct intel_dpll_hw_state *hw_state) { - return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE); + return icl_pll_get_hw_state(i915, pll, hw_state, TBT_PLL_ENABLE); } -static void icl_dpll_write(struct drm_i915_private *dev_priv, +static void icl_dpll_write(struct drm_i915_private *i915, struct intel_shared_dpll *pll) { struct intel_dpll_hw_state *hw_state = &pll->state.hw_state; const enum intel_dpll_id id = pll->info->id; i915_reg_t cfgcr0_reg, cfgcr1_reg, div0_reg = INVALID_MMIO_REG; - if (IS_ALDERLAKE_S(dev_priv)) { + if (IS_ALDERLAKE_S(i915)) { cfgcr0_reg = ADLS_DPLL_CFGCR0(id); cfgcr1_reg = ADLS_DPLL_CFGCR1(id); - } else if (IS_DG1(dev_priv)) { + } else if (IS_DG1(i915)) { cfgcr0_reg = DG1_DPLL_CFGCR0(id); cfgcr1_reg = DG1_DPLL_CFGCR1(id); - } else if (IS_ROCKETLAKE(dev_priv)) { + } else if (IS_ROCKETLAKE(i915)) { cfgcr0_reg = RKL_DPLL_CFGCR0(id); cfgcr1_reg = RKL_DPLL_CFGCR1(id); - } else if (DISPLAY_VER(dev_priv) >= 12) { + } else if (DISPLAY_VER(i915) >= 12) { cfgcr0_reg = TGL_DPLL_CFGCR0(id); cfgcr1_reg = TGL_DPLL_CFGCR1(id); div0_reg = TGL_DPLL0_DIV0(id); } else { - if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) && + if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) && id == DPLL_ID_EHL_DPLL4) { cfgcr0_reg = ICL_DPLL_CFGCR0(4); cfgcr1_reg = ICL_DPLL_CFGCR1(4); @@ -3637,18 +3660,18 @@ static void icl_dpll_write(struct drm_i915_private *dev_priv, } } - intel_de_write(dev_priv, cfgcr0_reg, hw_state->cfgcr0); - intel_de_write(dev_priv, cfgcr1_reg, hw_state->cfgcr1); - drm_WARN_ON_ONCE(&dev_priv->drm, dev_priv->display.vbt.override_afc_startup && + intel_de_write(i915, cfgcr0_reg, hw_state->cfgcr0); + intel_de_write(i915, cfgcr1_reg, hw_state->cfgcr1); + drm_WARN_ON_ONCE(&i915->drm, i915->display.vbt.override_afc_startup && !i915_mmio_reg_valid(div0_reg)); - if (dev_priv->display.vbt.override_afc_startup && + if (i915->display.vbt.override_afc_startup && i915_mmio_reg_valid(div0_reg)) - intel_de_rmw(dev_priv, div0_reg, + intel_de_rmw(i915, div0_reg, TGL_DPLL0_DIV0_AFC_STARTUP_MASK, hw_state->div0); - intel_de_posting_read(dev_priv, cfgcr1_reg); + intel_de_posting_read(i915, cfgcr1_reg); } -static void icl_mg_pll_write(struct drm_i915_private *dev_priv, +static void icl_mg_pll_write(struct drm_i915_private *i915, struct intel_shared_dpll *pll) { struct intel_dpll_hw_state *hw_state = &pll->state.hw_state; @@ -3660,38 +3683,38 @@ static void icl_mg_pll_write(struct drm_i915_private *dev_priv, * during the calc/readout phase if the mask depends on some other HW * state like refclk, see icl_calc_mg_pll_state(). */ - intel_de_rmw(dev_priv, MG_REFCLKIN_CTL(tc_port), + intel_de_rmw(i915, MG_REFCLKIN_CTL(tc_port), MG_REFCLKIN_CTL_OD_2_MUX_MASK, hw_state->mg_refclkin_ctl); - intel_de_rmw(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port), + intel_de_rmw(i915, MG_CLKTOP2_CORECLKCTL1(tc_port), MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK, hw_state->mg_clktop2_coreclkctl1); - intel_de_rmw(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port), + intel_de_rmw(i915, MG_CLKTOP2_HSCLKCTL(tc_port), MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK | MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK | MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK | MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK, hw_state->mg_clktop2_hsclkctl); - intel_de_write(dev_priv, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0); - intel_de_write(dev_priv, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1); - intel_de_write(dev_priv, MG_PLL_LF(tc_port), hw_state->mg_pll_lf); - intel_de_write(dev_priv, MG_PLL_FRAC_LOCK(tc_port), + intel_de_write(i915, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0); + intel_de_write(i915, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1); + intel_de_write(i915, MG_PLL_LF(tc_port), hw_state->mg_pll_lf); + intel_de_write(i915, MG_PLL_FRAC_LOCK(tc_port), hw_state->mg_pll_frac_lock); - intel_de_write(dev_priv, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc); + intel_de_write(i915, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc); - intel_de_rmw(dev_priv, MG_PLL_BIAS(tc_port), + intel_de_rmw(i915, MG_PLL_BIAS(tc_port), hw_state->mg_pll_bias_mask, hw_state->mg_pll_bias); - intel_de_rmw(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port), + intel_de_rmw(i915, MG_PLL_TDC_COLDST_BIAS(tc_port), hw_state->mg_pll_tdc_coldst_bias_mask, hw_state->mg_pll_tdc_coldst_bias); - intel_de_posting_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port)); + intel_de_posting_read(i915, MG_PLL_TDC_COLDST_BIAS(tc_port)); } -static void dkl_pll_write(struct drm_i915_private *dev_priv, +static void dkl_pll_write(struct drm_i915_private *i915, struct intel_shared_dpll *pll) { struct intel_dpll_hw_state *hw_state = &pll->state.hw_state; @@ -3703,83 +3726,83 @@ static void dkl_pll_write(struct drm_i915_private *dev_priv, * though on different building block */ /* All the registers are RMW */ - val = intel_dkl_phy_read(dev_priv, DKL_REFCLKIN_CTL(tc_port)); + val = intel_dkl_phy_read(i915, DKL_REFCLKIN_CTL(tc_port)); val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK; val |= hw_state->mg_refclkin_ctl; - intel_dkl_phy_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), val); + intel_dkl_phy_write(i915, DKL_REFCLKIN_CTL(tc_port), val); - val = intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port)); + val = intel_dkl_phy_read(i915, DKL_CLKTOP2_CORECLKCTL1(tc_port)); val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK; val |= hw_state->mg_clktop2_coreclkctl1; - intel_dkl_phy_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), val); + intel_dkl_phy_write(i915, DKL_CLKTOP2_CORECLKCTL1(tc_port), val); - val = intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port)); + val = intel_dkl_phy_read(i915, DKL_CLKTOP2_HSCLKCTL(tc_port)); val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK | MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK | MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK | MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK); val |= hw_state->mg_clktop2_hsclkctl; - intel_dkl_phy_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val); + intel_dkl_phy_write(i915, DKL_CLKTOP2_HSCLKCTL(tc_port), val); val = DKL_PLL_DIV0_MASK; - if (dev_priv->display.vbt.override_afc_startup) + if (i915->display.vbt.override_afc_startup) val |= DKL_PLL_DIV0_AFC_STARTUP_MASK; - intel_dkl_phy_rmw(dev_priv, DKL_PLL_DIV0(tc_port), val, + intel_dkl_phy_rmw(i915, DKL_PLL_DIV0(tc_port), val, hw_state->mg_pll_div0); - val = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV1(tc_port)); + val = intel_dkl_phy_read(i915, DKL_PLL_DIV1(tc_port)); val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK | DKL_PLL_DIV1_TDC_TARGET_CNT_MASK); val |= hw_state->mg_pll_div1; - intel_dkl_phy_write(dev_priv, DKL_PLL_DIV1(tc_port), val); + intel_dkl_phy_write(i915, DKL_PLL_DIV1(tc_port), val); - val = intel_dkl_phy_read(dev_priv, DKL_PLL_SSC(tc_port)); + val = intel_dkl_phy_read(i915, DKL_PLL_SSC(tc_port)); val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK | DKL_PLL_SSC_STEP_LEN_MASK | DKL_PLL_SSC_STEP_NUM_MASK | DKL_PLL_SSC_EN); val |= hw_state->mg_pll_ssc; - intel_dkl_phy_write(dev_priv, DKL_PLL_SSC(tc_port), val); + intel_dkl_phy_write(i915, DKL_PLL_SSC(tc_port), val); - val = intel_dkl_phy_read(dev_priv, DKL_PLL_BIAS(tc_port)); + val = intel_dkl_phy_read(i915, DKL_PLL_BIAS(tc_port)); val &= ~(DKL_PLL_BIAS_FRAC_EN_H | DKL_PLL_BIAS_FBDIV_FRAC_MASK); val |= hw_state->mg_pll_bias; - intel_dkl_phy_write(dev_priv, DKL_PLL_BIAS(tc_port), val); + intel_dkl_phy_write(i915, DKL_PLL_BIAS(tc_port), val); - val = intel_dkl_phy_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port)); + val = intel_dkl_phy_read(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port)); val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK | DKL_PLL_TDC_FEED_FWD_GAIN_MASK); val |= hw_state->mg_pll_tdc_coldst_bias; - intel_dkl_phy_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), val); + intel_dkl_phy_write(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port), val); - intel_dkl_phy_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port)); + intel_dkl_phy_posting_read(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port)); } -static void icl_pll_power_enable(struct drm_i915_private *dev_priv, +static void icl_pll_power_enable(struct drm_i915_private *i915, struct intel_shared_dpll *pll, i915_reg_t enable_reg) { - intel_de_rmw(dev_priv, enable_reg, 0, PLL_POWER_ENABLE); + intel_de_rmw(i915, enable_reg, 0, PLL_POWER_ENABLE); /* * The spec says we need to "wait" but it also says it should be * immediate. */ - if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1)) - drm_err(&dev_priv->drm, "PLL %d Power not enabled\n", + if (intel_de_wait_for_set(i915, enable_reg, PLL_POWER_STATE, 1)) + drm_err(&i915->drm, "PLL %d Power not enabled\n", pll->info->id); } -static void icl_pll_enable(struct drm_i915_private *dev_priv, +static void icl_pll_enable(struct drm_i915_private *i915, struct intel_shared_dpll *pll, i915_reg_t enable_reg) { - intel_de_rmw(dev_priv, enable_reg, 0, PLL_ENABLE); + intel_de_rmw(i915, enable_reg, 0, PLL_ENABLE); /* Timeout is actually 600us. */ - if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1)) - drm_err(&dev_priv->drm, "PLL %d not locked\n", pll->info->id); + if (intel_de_wait_for_set(i915, enable_reg, PLL_LOCK, 1)) + drm_err(&i915->drm, "PLL %d not locked\n", pll->info->id); } static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct intel_shared_dpll *pll) @@ -3806,12 +3829,12 @@ static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct inte drm_dbg_kms(&i915->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val); } -static void combo_pll_enable(struct drm_i915_private *dev_priv, +static void combo_pll_enable(struct drm_i915_private *i915, struct intel_shared_dpll *pll) { - i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll); + i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll); - if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) && + if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) && pll->info->id == DPLL_ID_EHL_DPLL4) { /* @@ -3819,13 +3842,13 @@ static void combo_pll_enable(struct drm_i915_private *dev_priv, * This can be done by taking a reference on DPLL4 power * domain. */ - pll->wakeref = intel_display_power_get(dev_priv, + pll->wakeref = intel_display_power_get(i915, POWER_DOMAIN_DC_OFF); } - icl_pll_power_enable(dev_priv, pll, enable_reg); + icl_pll_power_enable(i915, pll, enable_reg); - icl_dpll_write(dev_priv, pll); + icl_dpll_write(i915, pll); /* * DVFS pre sequence would be here, but in our driver the cdclk code @@ -3833,19 +3856,19 @@ static void combo_pll_enable(struct drm_i915_private *dev_priv, * nothing here. */ - icl_pll_enable(dev_priv, pll, enable_reg); + icl_pll_enable(i915, pll, enable_reg); - adlp_cmtg_clock_gating_wa(dev_priv, pll); + adlp_cmtg_clock_gating_wa(i915, pll); /* DVFS post sequence would be here. See the comment above. */ } -static void tbt_pll_enable(struct drm_i915_private *dev_priv, +static void tbt_pll_enable(struct drm_i915_private *i915, struct intel_shared_dpll *pll) { - icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE); + icl_pll_power_enable(i915, pll, TBT_PLL_ENABLE); - icl_dpll_write(dev_priv, pll); + icl_dpll_write(i915, pll); /* * DVFS pre sequence would be here, but in our driver the cdclk code @@ -3853,22 +3876,22 @@ static void tbt_pll_enable(struct drm_i915_private *dev_priv, * nothing here. */ - icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE); + icl_pll_enable(i915, pll, TBT_PLL_ENABLE); /* DVFS post sequence would be here. See the comment above. */ } -static void mg_pll_enable(struct drm_i915_private *dev_priv, +static void mg_pll_enable(struct drm_i915_private *i915, struct intel_shared_dpll *pll) { - i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll); + i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll); - icl_pll_power_enable(dev_priv, pll, enable_reg); + icl_pll_power_enable(i915, pll, enable_reg); - if (DISPLAY_VER(dev_priv) >= 12) - dkl_pll_write(dev_priv, pll); + if (DISPLAY_VER(i915) >= 12) + dkl_pll_write(i915, pll); else - icl_mg_pll_write(dev_priv, pll); + icl_mg_pll_write(i915, pll); /* * DVFS pre sequence would be here, but in our driver the cdclk code @@ -3876,12 +3899,12 @@ static void mg_pll_enable(struct drm_i915_private *dev_priv, * nothing here. */ - icl_pll_enable(dev_priv, pll, enable_reg); + icl_pll_enable(i915, pll, enable_reg); /* DVFS post sequence would be here. See the comment above. */ } -static void icl_pll_disable(struct drm_i915_private *dev_priv, +static void icl_pll_disable(struct drm_i915_private *i915, struct intel_shared_dpll *pll, i915_reg_t enable_reg) { @@ -3893,50 +3916,50 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv, * nothing here. */ - intel_de_rmw(dev_priv, enable_reg, PLL_ENABLE, 0); + intel_de_rmw(i915, enable_reg, PLL_ENABLE, 0); /* Timeout is actually 1us. */ - if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1)) - drm_err(&dev_priv->drm, "PLL %d locked\n", pll->info->id); + if (intel_de_wait_for_clear(i915, enable_reg, PLL_LOCK, 1)) + drm_err(&i915->drm, "PLL %d locked\n", pll->info->id); /* DVFS post sequence would be here. See the comment above. */ - intel_de_rmw(dev_priv, enable_reg, PLL_POWER_ENABLE, 0); + intel_de_rmw(i915, enable_reg, PLL_POWER_ENABLE, 0); /* * The spec says we need to "wait" but it also says it should be * immediate. */ - if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1)) - drm_err(&dev_priv->drm, "PLL %d Power not disabled\n", + if (intel_de_wait_for_clear(i915, enable_reg, PLL_POWER_STATE, 1)) + drm_err(&i915->drm, "PLL %d Power not disabled\n", pll->info->id); } -static void combo_pll_disable(struct drm_i915_private *dev_priv, +static void combo_pll_disable(struct drm_i915_private *i915, struct intel_shared_dpll *pll) { - i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll); + i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll); - icl_pll_disable(dev_priv, pll, enable_reg); + icl_pll_disable(i915, pll, enable_reg); - if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) && + if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) && pll->info->id == DPLL_ID_EHL_DPLL4) - intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF, + intel_display_power_put(i915, POWER_DOMAIN_DC_OFF, pll->wakeref); } -static void tbt_pll_disable(struct drm_i915_private *dev_priv, +static void tbt_pll_disable(struct drm_i915_private *i915, struct intel_shared_dpll *pll) { - icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE); + icl_pll_disable(i915, pll, TBT_PLL_ENABLE); } -static void mg_pll_disable(struct drm_i915_private *dev_priv, +static void mg_pll_disable(struct drm_i915_private *i915, struct intel_shared_dpll *pll) { - i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll); + i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll); - icl_pll_disable(dev_priv, pll, enable_reg); + icl_pll_disable(i915, pll, enable_reg); } static void icl_update_dpll_ref_clks(struct drm_i915_private *i915) @@ -3945,10 +3968,10 @@ static void icl_update_dpll_ref_clks(struct drm_i915_private *i915) i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref; } -static void icl_dump_hw_state(struct drm_i915_private *dev_priv, +static void icl_dump_hw_state(struct drm_i915_private *i915, const struct intel_dpll_hw_state *hw_state) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(&i915->drm, "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, " "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, " "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, " @@ -4130,62 +4153,64 @@ static const struct intel_dpll_mgr adlp_pll_mgr = { /** * intel_shared_dpll_init - Initialize shared DPLLs - * @dev_priv: i915 device + * @i915: i915 device * - * Initialize shared DPLLs for @dev_priv. + * Initialize shared DPLLs for @i915. */ -void intel_shared_dpll_init(struct drm_i915_private *dev_priv) +void intel_shared_dpll_init(struct drm_i915_private *i915) { const struct intel_dpll_mgr *dpll_mgr = NULL; const struct dpll_info *dpll_info; int i; - mutex_init(&dev_priv->display.dpll.lock); + mutex_init(&i915->display.dpll.lock); - if (DISPLAY_VER(dev_priv) >= 14 || IS_DG2(dev_priv)) + if (DISPLAY_VER(i915) >= 14 || IS_DG2(i915)) /* No shared DPLLs on DG2; port PLLs are part of the PHY */ dpll_mgr = NULL; - else if (IS_ALDERLAKE_P(dev_priv)) + else if (IS_ALDERLAKE_P(i915)) dpll_mgr = &adlp_pll_mgr; - else if (IS_ALDERLAKE_S(dev_priv)) + else if (IS_ALDERLAKE_S(i915)) dpll_mgr = &adls_pll_mgr; - else if (IS_DG1(dev_priv)) + else if (IS_DG1(i915)) dpll_mgr = &dg1_pll_mgr; - else if (IS_ROCKETLAKE(dev_priv)) + else if (IS_ROCKETLAKE(i915)) dpll_mgr = &rkl_pll_mgr; - else if (DISPLAY_VER(dev_priv) >= 12) + else if (DISPLAY_VER(i915) >= 12) dpll_mgr = &tgl_pll_mgr; - else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) + else if (IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) dpll_mgr = &ehl_pll_mgr; - else if (DISPLAY_VER(dev_priv) >= 11) + else if (DISPLAY_VER(i915) >= 11) dpll_mgr = &icl_pll_mgr; - else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) + else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) dpll_mgr = &bxt_pll_mgr; - else if (DISPLAY_VER(dev_priv) == 9) + else if (DISPLAY_VER(i915) == 9) dpll_mgr = &skl_pll_mgr; - else if (HAS_DDI(dev_priv)) + else if (HAS_DDI(i915)) dpll_mgr = &hsw_pll_mgr; - else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) + else if (HAS_PCH_IBX(i915) || HAS_PCH_CPT(i915)) dpll_mgr = &pch_pll_mgr; - if (!dpll_mgr) { - dev_priv->display.dpll.num_shared_dpll = 0; + if (!dpll_mgr) return; - } dpll_info = dpll_mgr->dpll_info; for (i = 0; dpll_info[i].name; i++) { - if (drm_WARN_ON(&dev_priv->drm, - i >= ARRAY_SIZE(dev_priv->display.dpll.shared_dplls))) + if (drm_WARN_ON(&i915->drm, + i >= ARRAY_SIZE(i915->display.dpll.shared_dplls))) + break; + + /* must fit into unsigned long bitmask on 32bit */ + if (drm_WARN_ON(&i915->drm, dpll_info[i].id >= 32)) break; - drm_WARN_ON(&dev_priv->drm, i != dpll_info[i].id); - dev_priv->display.dpll.shared_dplls[i].info = &dpll_info[i]; + i915->display.dpll.shared_dplls[i].info = &dpll_info[i]; + i915->display.dpll.shared_dplls[i].index = i; } - dev_priv->display.dpll.mgr = dpll_mgr; - dev_priv->display.dpll.num_shared_dpll = i; + i915->display.dpll.mgr = dpll_mgr; + i915->display.dpll.num_shared_dpll = i; } /** @@ -4206,10 +4231,10 @@ int intel_compute_shared_dplls(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr; + struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr; - if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr)) + if (drm_WARN_ON(&i915->drm, !dpll_mgr)) return -EINVAL; return dpll_mgr->compute_dplls(state, crtc, encoder); @@ -4239,10 +4264,10 @@ int intel_reserve_shared_dplls(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr; + struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr; - if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr)) + if (drm_WARN_ON(&i915->drm, !dpll_mgr)) return -EINVAL; return dpll_mgr->get_dplls(state, crtc, encoder); @@ -4262,8 +4287,8 @@ int intel_reserve_shared_dplls(struct intel_atomic_state *state, void intel_release_shared_dplls(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr; + struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr; /* * FIXME: this function is called for every platform having a @@ -4291,10 +4316,10 @@ void intel_update_active_dpll(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr; + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr; - if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr)) + if (drm_WARN_ON(&i915->drm, !dpll_mgr)) return; dpll_mgr->update_active_dpll(state, crtc, encoder); @@ -4370,10 +4395,11 @@ void intel_dpll_update_ref_clks(struct drm_i915_private *i915) void intel_dpll_readout_hw_state(struct drm_i915_private *i915) { + struct intel_shared_dpll *pll; int i; - for (i = 0; i < i915->display.dpll.num_shared_dpll; i++) - readout_dpll_hw_state(i915, &i915->display.dpll.shared_dplls[i]); + for_each_shared_dpll(i915, pll, i) + readout_dpll_hw_state(i915, pll); } static void sanitize_dpll_state(struct drm_i915_private *i915, @@ -4397,29 +4423,30 @@ static void sanitize_dpll_state(struct drm_i915_private *i915, void intel_dpll_sanitize_state(struct drm_i915_private *i915) { + struct intel_shared_dpll *pll; int i; - for (i = 0; i < i915->display.dpll.num_shared_dpll; i++) - sanitize_dpll_state(i915, &i915->display.dpll.shared_dplls[i]); + for_each_shared_dpll(i915, pll, i) + sanitize_dpll_state(i915, pll); } /** * intel_dpll_dump_hw_state - write hw_state to dmesg - * @dev_priv: i915 drm device + * @i915: i915 drm device * @hw_state: hw state to be written to the log * * Write the relevant values in @hw_state to dmesg using drm_dbg_kms. */ -void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv, +void intel_dpll_dump_hw_state(struct drm_i915_private *i915, const struct intel_dpll_hw_state *hw_state) { - if (dev_priv->display.dpll.mgr) { - dev_priv->display.dpll.mgr->dump_hw_state(dev_priv, hw_state); + if (i915->display.dpll.mgr) { + i915->display.dpll.mgr->dump_hw_state(i915, hw_state); } else { /* fallback for platforms that don't use the shared dpll * infrastructure */ - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(&i915->drm, "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, " "fp0: 0x%x, fp1: 0x%x\n", hw_state->dpll, @@ -4430,10 +4457,10 @@ void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv, } static void -verify_single_dpll_state(struct drm_i915_private *dev_priv, +verify_single_dpll_state(struct drm_i915_private *i915, struct intel_shared_dpll *pll, struct intel_crtc *crtc, - struct intel_crtc_state *new_crtc_state) + const struct intel_crtc_state *new_crtc_state) { struct intel_dpll_hw_state dpll_hw_state; u8 pipe_mask; @@ -4441,22 +4468,22 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv, memset(&dpll_hw_state, 0, sizeof(dpll_hw_state)); - drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name); + drm_dbg_kms(&i915->drm, "%s\n", pll->info->name); - active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state); + active = intel_dpll_get_hw_state(i915, pll, &dpll_hw_state); if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) { - I915_STATE_WARN(dev_priv, !pll->on && pll->active_mask, + I915_STATE_WARN(i915, !pll->on && pll->active_mask, "pll in active use but not on in sw tracking\n"); - I915_STATE_WARN(dev_priv, pll->on && !pll->active_mask, + I915_STATE_WARN(i915, pll->on && !pll->active_mask, "pll is on but not used by any active pipe\n"); - I915_STATE_WARN(dev_priv, pll->on != active, + I915_STATE_WARN(i915, pll->on != active, "pll on state mismatch (expected %i, found %i)\n", pll->on, active); } if (!crtc) { - I915_STATE_WARN(dev_priv, + I915_STATE_WARN(i915, pll->active_mask & ~pll->state.pipe_mask, "more active pll users than references: 0x%x vs 0x%x\n", pll->active_mask, pll->state.pipe_mask); @@ -4467,32 +4494,35 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv, pipe_mask = BIT(crtc->pipe); if (new_crtc_state->hw.active) - I915_STATE_WARN(dev_priv, !(pll->active_mask & pipe_mask), + I915_STATE_WARN(i915, !(pll->active_mask & pipe_mask), "pll active mismatch (expected pipe %c in active mask 0x%x)\n", pipe_name(crtc->pipe), pll->active_mask); else - I915_STATE_WARN(dev_priv, pll->active_mask & pipe_mask, + I915_STATE_WARN(i915, pll->active_mask & pipe_mask, "pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n", pipe_name(crtc->pipe), pll->active_mask); - I915_STATE_WARN(dev_priv, !(pll->state.pipe_mask & pipe_mask), + I915_STATE_WARN(i915, !(pll->state.pipe_mask & pipe_mask), "pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n", pipe_mask, pll->state.pipe_mask); - I915_STATE_WARN(dev_priv, + I915_STATE_WARN(i915, pll->on && memcmp(&pll->state.hw_state, &dpll_hw_state, sizeof(dpll_hw_state)), "pll hw state mismatch\n"); } -void intel_shared_dpll_state_verify(struct intel_crtc *crtc, - struct intel_crtc_state *old_crtc_state, - struct intel_crtc_state *new_crtc_state) +void intel_shared_dpll_state_verify(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + const struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); if (new_crtc_state->shared_dpll) - verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, + verify_single_dpll_state(i915, new_crtc_state->shared_dpll, crtc, new_crtc_state); if (old_crtc_state->shared_dpll && @@ -4500,20 +4530,21 @@ void intel_shared_dpll_state_verify(struct intel_crtc *crtc, u8 pipe_mask = BIT(crtc->pipe); struct intel_shared_dpll *pll = old_crtc_state->shared_dpll; - I915_STATE_WARN(dev_priv, pll->active_mask & pipe_mask, + I915_STATE_WARN(i915, pll->active_mask & pipe_mask, "pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n", pipe_name(crtc->pipe), pll->active_mask); - I915_STATE_WARN(dev_priv, pll->state.pipe_mask & pipe_mask, + I915_STATE_WARN(i915, pll->state.pipe_mask & pipe_mask, "pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n", pipe_name(crtc->pipe), pll->state.pipe_mask); } } -void intel_shared_dpll_verify_disabled(struct drm_i915_private *i915) +void intel_shared_dpll_verify_disabled(struct intel_atomic_state *state) { + struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_shared_dpll *pll; int i; - for (i = 0; i < i915->display.dpll.num_shared_dpll; i++) - verify_single_dpll_state(i915, &i915->display.dpll.shared_dplls[i], - NULL, NULL); + for_each_shared_dpll(i915, pll, i) + verify_single_dpll_state(i915, pll, NULL, NULL); } diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h index 04e6810954b2..dd4796a61751 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h @@ -29,6 +29,10 @@ #include "intel_wakeref.h" +#define for_each_shared_dpll(__i915, __pll, __i) \ + for ((__i) = 0; (__i) < (__i915)->display.dpll.num_shared_dpll && \ + ((__pll) = &(__i915)->display.dpll.shared_dplls[(__i)]) ; (__i)++) + enum tc_port; struct drm_i915_private; struct intel_atomic_state; @@ -262,8 +266,7 @@ struct dpll_info { const struct intel_shared_dpll_funcs *funcs; /** - * @id: unique indentifier for this DPLL; should match the index in the - * dev_priv->shared_dplls array + * @id: unique indentifier for this DPLL */ enum intel_dpll_id id; @@ -291,6 +294,11 @@ struct intel_shared_dpll { struct intel_shared_dpll_state state; /** + * @index: index for atomic state + */ + u8 index; + + /** * @active_mask: mask of active pipes (i.e. DPMS on) using this DPLL */ u8 active_mask; @@ -319,9 +327,9 @@ struct intel_shared_dpll { /* shared dpll functions */ struct intel_shared_dpll * -intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv, +intel_get_shared_dpll_by_id(struct drm_i915_private *i915, enum intel_dpll_id id); -void assert_shared_dpll(struct drm_i915_private *dev_priv, +void assert_shared_dpll(struct drm_i915_private *i915, struct intel_shared_dpll *pll, bool state); #define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true) @@ -351,19 +359,18 @@ bool intel_dpll_get_hw_state(struct drm_i915_private *i915, void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state); void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state); void intel_shared_dpll_swap_state(struct intel_atomic_state *state); -void intel_shared_dpll_init(struct drm_i915_private *dev_priv); -void intel_dpll_update_ref_clks(struct drm_i915_private *dev_priv); -void intel_dpll_readout_hw_state(struct drm_i915_private *dev_priv); -void intel_dpll_sanitize_state(struct drm_i915_private *dev_priv); +void intel_shared_dpll_init(struct drm_i915_private *i915); +void intel_dpll_update_ref_clks(struct drm_i915_private *i915); +void intel_dpll_readout_hw_state(struct drm_i915_private *i915); +void intel_dpll_sanitize_state(struct drm_i915_private *i915); -void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv, +void intel_dpll_dump_hw_state(struct drm_i915_private *i915, const struct intel_dpll_hw_state *hw_state); enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port); bool intel_dpll_is_combophy(enum intel_dpll_id id); -void intel_shared_dpll_state_verify(struct intel_crtc *crtc, - struct intel_crtc_state *old_crtc_state, - struct intel_crtc_state *new_crtc_state); -void intel_shared_dpll_verify_disabled(struct drm_i915_private *i915); +void intel_shared_dpll_state_verify(struct intel_atomic_state *state, + struct intel_crtc *crtc); +void intel_shared_dpll_verify_disabled(struct intel_atomic_state *state); #endif /* _INTEL_DPLL_MGR_H_ */ diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c index e56ec3f2d84a..24b2cbcfc1ef 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c +++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c @@ -565,6 +565,9 @@ static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data) u8 payload_size = *(data + 6); u8 *payload_data; + drm_dbg_kms(&i915->drm, "bus %d client-addr 0x%02x reg 0x%02x data %*ph\n", + vbt_i2c_bus_num, slave_addr, reg_offset, payload_size, data + 7); + if (intel_dsi->i2c_bus_num < 0) { intel_dsi->i2c_bus_num = vbt_i2c_bus_num; i2c_acpi_find_adapter(intel_dsi, slave_addr); diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c index d9f427856fb8..55d6743374bd 100644 --- a/drivers/gpu/drm/i915/display/intel_dvo.c +++ b/drivers/gpu/drm/i915/display/intel_dvo.c @@ -319,7 +319,7 @@ intel_dvo_detect(struct drm_connector *_connector, bool force) drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n", connector->base.base.id, connector->base.name); - if (!INTEL_DISPLAY_ENABLED(i915)) + if (!intel_display_device_enabled(i915)) return connector_status_disconnected; return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev); diff --git a/drivers/gpu/drm/i915/display/intel_dvo.h b/drivers/gpu/drm/i915/display/intel_dvo.h index 3ed0fdf8efff..bf7a356422ab 100644 --- a/drivers/gpu/drm/i915/display/intel_dvo.h +++ b/drivers/gpu/drm/i915/display/intel_dvo.h @@ -8,6 +8,12 @@ struct drm_i915_private; +#ifdef I915 void intel_dvo_init(struct drm_i915_private *dev_priv); +#else +static inline void intel_dvo_init(struct drm_i915_private *dev_priv) +{ +} +#endif #endif /* __INTEL_DVO_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c index e7678571b0d7..19b35ece31f1 100644 --- a/drivers/gpu/drm/i915/display/intel_fb.c +++ b/drivers/gpu/drm/i915/display/intel_fb.c @@ -1117,7 +1117,7 @@ static int intel_fb_offset_to_xy(int *x, int *y, return -EINVAL; } - height = drm_framebuffer_plane_height(fb->height, fb, color_plane); + height = drm_format_info_plane_height(fb->format, fb->height, color_plane); height = ALIGN(height, intel_tile_height(fb, color_plane)); /* Catch potential overflows early */ diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index 1cb9eec29640..4820d21cc942 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -593,6 +593,9 @@ static u32 ivb_dpfc_ctl(struct intel_fbc *fbc) if (IS_IVYBRIDGE(i915)) dpfc_ctl |= DPFC_CTL_PLANE_IVB(fbc_state->plane->i9xx_plane); + if (DISPLAY_VER(i915) >= 20) + dpfc_ctl |= DPFC_CTL_PLANE_BINDING(fbc_state->plane->id); + if (fbc_state->fence_id >= 0) dpfc_ctl |= DPFC_CTL_FENCE_EN_IVB; @@ -850,39 +853,64 @@ void intel_fbc_cleanup(struct drm_i915_private *i915) } } -static bool stride_is_valid(const struct intel_plane_state *plane_state) +static bool i8xx_fbc_stride_is_valid(const struct intel_plane_state *plane_state) { - struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); const struct drm_framebuffer *fb = plane_state->hw.fb; unsigned int stride = intel_fbc_plane_stride(plane_state) * fb->format->cpp[0]; - /* This should have been caught earlier. */ - if (drm_WARN_ON_ONCE(&i915->drm, (stride & (64 - 1)) != 0)) - return false; + return stride == 4096 || stride == 8192; +} - /* Below are the additional FBC restrictions. */ - if (stride < 512) - return false; +static bool i965_fbc_stride_is_valid(const struct intel_plane_state *plane_state) +{ + const struct drm_framebuffer *fb = plane_state->hw.fb; + unsigned int stride = intel_fbc_plane_stride(plane_state) * + fb->format->cpp[0]; - if (DISPLAY_VER(i915) == 2 || DISPLAY_VER(i915) == 3) - return stride == 4096 || stride == 8192; + return stride >= 2048 && stride <= 16384; +} - if (DISPLAY_VER(i915) == 4 && !IS_G4X(i915) && stride < 2048) - return false; +static bool g4x_fbc_stride_is_valid(const struct intel_plane_state *plane_state) +{ + return true; +} + +static bool skl_fbc_stride_is_valid(const struct intel_plane_state *plane_state) +{ + const struct drm_framebuffer *fb = plane_state->hw.fb; + unsigned int stride = intel_fbc_plane_stride(plane_state) * + fb->format->cpp[0]; /* Display WA #1105: skl,bxt,kbl,cfl,glk */ - if ((DISPLAY_VER(i915) == 9 || IS_GEMINILAKE(i915)) && - fb->modifier == DRM_FORMAT_MOD_LINEAR && stride & 511) + if (fb->modifier == DRM_FORMAT_MOD_LINEAR && stride & 511) return false; - if (stride > 16384) - return false; + return true; +} +static bool icl_fbc_stride_is_valid(const struct intel_plane_state *plane_state) +{ return true; } -static bool pixel_format_is_valid(const struct intel_plane_state *plane_state) +static bool stride_is_valid(const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); + + if (DISPLAY_VER(i915) >= 11) + return icl_fbc_stride_is_valid(plane_state); + else if (DISPLAY_VER(i915) >= 9) + return skl_fbc_stride_is_valid(plane_state); + else if (DISPLAY_VER(i915) >= 5 || IS_G4X(i915)) + return g4x_fbc_stride_is_valid(plane_state); + else if (DISPLAY_VER(i915) == 4) + return i965_fbc_stride_is_valid(plane_state); + else + return i8xx_fbc_stride_is_valid(plane_state); +} + +static bool i8xx_fbc_pixel_format_is_valid(const struct intel_plane_state *plane_state) { struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); const struct drm_framebuffer *fb = plane_state->hw.fb; @@ -896,6 +924,22 @@ static bool pixel_format_is_valid(const struct intel_plane_state *plane_state) /* 16bpp not supported on gen2 */ if (DISPLAY_VER(i915) == 2) return false; + return true; + default: + return false; + } +} + +static bool g4x_fbc_pixel_format_is_valid(const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); + const struct drm_framebuffer *fb = plane_state->hw.fb; + + switch (fb->format->format) { + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_XBGR8888: + return true; + case DRM_FORMAT_RGB565: /* WaFbcOnly1to1Ratio:ctg */ if (IS_G4X(i915)) return false; @@ -905,22 +949,68 @@ static bool pixel_format_is_valid(const struct intel_plane_state *plane_state) } } -static bool rotation_is_valid(const struct intel_plane_state *plane_state) +static bool lnl_fbc_pixel_format_is_valid(const struct intel_plane_state *plane_state) +{ + const struct drm_framebuffer *fb = plane_state->hw.fb; + + switch (fb->format->format) { + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_RGB565: + return true; + default: + return false; + } +} + +static bool pixel_format_is_valid(const struct intel_plane_state *plane_state) { struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); + + if (DISPLAY_VER(i915) >= 20) + return lnl_fbc_pixel_format_is_valid(plane_state); + else if (DISPLAY_VER(i915) >= 5 || IS_G4X(i915)) + return g4x_fbc_pixel_format_is_valid(plane_state); + else + return i8xx_fbc_pixel_format_is_valid(plane_state); +} + +static bool i8xx_fbc_rotation_is_valid(const struct intel_plane_state *plane_state) +{ + return plane_state->hw.rotation == DRM_MODE_ROTATE_0; +} + +static bool g4x_fbc_rotation_is_valid(const struct intel_plane_state *plane_state) +{ + return true; +} + +static bool skl_fbc_rotation_is_valid(const struct intel_plane_state *plane_state) +{ const struct drm_framebuffer *fb = plane_state->hw.fb; unsigned int rotation = plane_state->hw.rotation; - if (DISPLAY_VER(i915) >= 9 && fb->format->format == DRM_FORMAT_RGB565 && + if (fb->format->format == DRM_FORMAT_RGB565 && drm_rotation_90_or_270(rotation)) return false; - else if (DISPLAY_VER(i915) <= 4 && !IS_G4X(i915) && - rotation != DRM_MODE_ROTATE_0) - return false; return true; } +static bool rotation_is_valid(const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); + + if (DISPLAY_VER(i915) >= 9) + return skl_fbc_rotation_is_valid(plane_state); + else if (DISPLAY_VER(i915) >= 5 || IS_G4X(i915)) + return g4x_fbc_rotation_is_valid(plane_state); + else + return i8xx_fbc_rotation_is_valid(plane_state); +} + /* * For some reason, the hardware tracking starts looking at whatever we * programmed as the display plane base address register. It does not look at @@ -954,16 +1044,21 @@ static bool intel_fbc_hw_tracking_covers_screen(const struct intel_plane_state * return effective_w <= max_w && effective_h <= max_h; } -static bool tiling_is_valid(const struct intel_plane_state *plane_state) +static bool i8xx_fbc_tiling_valid(const struct intel_plane_state *plane_state) +{ + const struct drm_framebuffer *fb = plane_state->hw.fb; + + return fb->modifier == I915_FORMAT_MOD_X_TILED; +} + +static bool skl_fbc_tiling_valid(const struct intel_plane_state *plane_state) { - struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); const struct drm_framebuffer *fb = plane_state->hw.fb; switch (fb->modifier) { case DRM_FORMAT_MOD_LINEAR: case I915_FORMAT_MOD_Y_TILED: case I915_FORMAT_MOD_Yf_TILED: - return DISPLAY_VER(i915) >= 9; case I915_FORMAT_MOD_4_TILED: case I915_FORMAT_MOD_X_TILED: return true; @@ -972,6 +1067,16 @@ static bool tiling_is_valid(const struct intel_plane_state *plane_state) } } +static bool tiling_is_valid(const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); + + if (DISPLAY_VER(i915) >= 9) + return skl_fbc_tiling_valid(plane_state); + else + return i8xx_fbc_tiling_valid(plane_state); +} + static void intel_fbc_update_state(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_plane *plane) @@ -1129,7 +1234,8 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state, return 0; } - if (plane_state->hw.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE && + if (DISPLAY_VER(i915) < 20 && + plane_state->hw.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE && fb->format->has_alpha) { plane_state->no_fbc_reason = "per-pixel alpha not supported"; return 0; diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c index 412a19a888a2..ec46716b2f49 100644 --- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c +++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c @@ -56,6 +56,7 @@ */ #include "gem/i915_gem_object_frontbuffer.h" +#include "i915_active.h" #include "i915_drv.h" #include "intel_display_trace.h" #include "intel_display_types.h" diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index 8cca4793cf92..c89da3568ebd 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -173,14 +173,8 @@ bool intel_hdcp2_capable(struct intel_connector *connector) /* If MTL+ make sure gsc is loaded and proxy is setup */ if (intel_hdcp_gsc_cs_required(i915)) { - struct intel_gt *gt = i915->media_gt; - struct intel_gsc_uc *gsc = gt ? >->uc.gsc : NULL; - - if (!gsc || !intel_uc_fw_is_running(&gsc->fw)) { - drm_dbg_kms(&i915->drm, - "GSC components required for HDCP2.2 are not ready\n"); + if (!intel_hdcp_gsc_check_status(i915)) return false; - } } /* MEI/GSC interface is solid depending on which is used */ diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c index d753db3eef15..18117b789b16 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c @@ -11,610 +11,27 @@ #include "i915_drv.h" #include "i915_utils.h" #include "intel_hdcp_gsc.h" +#include "intel_hdcp_gsc_message.h" bool intel_hdcp_gsc_cs_required(struct drm_i915_private *i915) { return DISPLAY_VER(i915) >= 14; } -static int -gsc_hdcp_initiate_session(struct device *dev, struct hdcp_port_data *data, - struct hdcp2_ake_init *ake_data) +bool intel_hdcp_gsc_check_status(struct drm_i915_private *i915) { - struct wired_cmd_initiate_hdcp2_session_in session_init_in = { { 0 } }; - struct wired_cmd_initiate_hdcp2_session_out - session_init_out = { { 0 } }; - struct drm_i915_private *i915; - ssize_t byte; - - if (!dev || !data || !ake_data) - return -EINVAL; - - i915 = kdev_to_i915(dev); - if (!i915) { - dev_err(dev, "DRM not initialized, aborting HDCP.\n"); - return -ENODEV; - } - - session_init_in.header.api_version = HDCP_API_VERSION; - session_init_in.header.command_id = WIRED_INITIATE_HDCP2_SESSION; - session_init_in.header.status = FW_HDCP_STATUS_SUCCESS; - session_init_in.header.buffer_len = - WIRED_CMD_BUF_LEN_INITIATE_HDCP2_SESSION_IN; - - session_init_in.port.integrated_port_type = data->port_type; - session_init_in.port.physical_port = (u8)data->hdcp_ddi; - session_init_in.port.attached_transcoder = (u8)data->hdcp_transcoder; - session_init_in.protocol = data->protocol; - - byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&session_init_in, - sizeof(session_init_in), - (u8 *)&session_init_out, - sizeof(session_init_out)); - if (byte < 0) { - drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); - return byte; - } - - if (session_init_out.header.status != FW_HDCP_STATUS_SUCCESS) { - drm_dbg_kms(&i915->drm, "FW cmd 0x%08X Failed. Status: 0x%X\n", - WIRED_INITIATE_HDCP2_SESSION, - session_init_out.header.status); - return -EIO; - } - - ake_data->msg_id = HDCP_2_2_AKE_INIT; - ake_data->tx_caps = session_init_out.tx_caps; - memcpy(ake_data->r_tx, session_init_out.r_tx, HDCP_2_2_RTX_LEN); - - return 0; -} - -static int -gsc_hdcp_verify_receiver_cert_prepare_km(struct device *dev, - struct hdcp_port_data *data, - struct hdcp2_ake_send_cert *rx_cert, - bool *km_stored, - struct hdcp2_ake_no_stored_km - *ek_pub_km, - size_t *msg_sz) -{ - struct wired_cmd_verify_receiver_cert_in verify_rxcert_in = { { 0 } }; - struct wired_cmd_verify_receiver_cert_out verify_rxcert_out = { { 0 } }; - struct drm_i915_private *i915; - ssize_t byte; - - if (!dev || !data || !rx_cert || !km_stored || !ek_pub_km || !msg_sz) - return -EINVAL; - - i915 = kdev_to_i915(dev); - if (!i915) { - dev_err(dev, "DRM not initialized, aborting HDCP.\n"); - return -ENODEV; - } - - verify_rxcert_in.header.api_version = HDCP_API_VERSION; - verify_rxcert_in.header.command_id = WIRED_VERIFY_RECEIVER_CERT; - verify_rxcert_in.header.status = FW_HDCP_STATUS_SUCCESS; - verify_rxcert_in.header.buffer_len = - WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_IN; - - verify_rxcert_in.port.integrated_port_type = data->port_type; - verify_rxcert_in.port.physical_port = (u8)data->hdcp_ddi; - verify_rxcert_in.port.attached_transcoder = (u8)data->hdcp_transcoder; - - verify_rxcert_in.cert_rx = rx_cert->cert_rx; - memcpy(verify_rxcert_in.r_rx, &rx_cert->r_rx, HDCP_2_2_RRX_LEN); - memcpy(verify_rxcert_in.rx_caps, rx_cert->rx_caps, HDCP_2_2_RXCAPS_LEN); - - byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&verify_rxcert_in, - sizeof(verify_rxcert_in), - (u8 *)&verify_rxcert_out, - sizeof(verify_rxcert_out)); - if (byte < 0) { - drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed: %zd\n", byte); - return byte; - } - - if (verify_rxcert_out.header.status != FW_HDCP_STATUS_SUCCESS) { - drm_dbg_kms(&i915->drm, "FW cmd 0x%08X Failed. Status: 0x%X\n", - WIRED_VERIFY_RECEIVER_CERT, - verify_rxcert_out.header.status); - return -EIO; - } - - *km_stored = !!verify_rxcert_out.km_stored; - if (verify_rxcert_out.km_stored) { - ek_pub_km->msg_id = HDCP_2_2_AKE_STORED_KM; - *msg_sz = sizeof(struct hdcp2_ake_stored_km); - } else { - ek_pub_km->msg_id = HDCP_2_2_AKE_NO_STORED_KM; - *msg_sz = sizeof(struct hdcp2_ake_no_stored_km); - } - - memcpy(ek_pub_km->e_kpub_km, &verify_rxcert_out.ekm_buff, - sizeof(verify_rxcert_out.ekm_buff)); - - return 0; -} - -static int -gsc_hdcp_verify_hprime(struct device *dev, struct hdcp_port_data *data, - struct hdcp2_ake_send_hprime *rx_hprime) -{ - struct wired_cmd_ake_send_hprime_in send_hprime_in = { { 0 } }; - struct wired_cmd_ake_send_hprime_out send_hprime_out = { { 0 } }; - struct drm_i915_private *i915; - ssize_t byte; - - if (!dev || !data || !rx_hprime) - return -EINVAL; - - i915 = kdev_to_i915(dev); - if (!i915) { - dev_err(dev, "DRM not initialized, aborting HDCP.\n"); - return -ENODEV; - } - - send_hprime_in.header.api_version = HDCP_API_VERSION; - send_hprime_in.header.command_id = WIRED_AKE_SEND_HPRIME; - send_hprime_in.header.status = FW_HDCP_STATUS_SUCCESS; - send_hprime_in.header.buffer_len = WIRED_CMD_BUF_LEN_AKE_SEND_HPRIME_IN; - - send_hprime_in.port.integrated_port_type = data->port_type; - send_hprime_in.port.physical_port = (u8)data->hdcp_ddi; - send_hprime_in.port.attached_transcoder = (u8)data->hdcp_transcoder; - - memcpy(send_hprime_in.h_prime, rx_hprime->h_prime, - HDCP_2_2_H_PRIME_LEN); - - byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&send_hprime_in, - sizeof(send_hprime_in), - (u8 *)&send_hprime_out, - sizeof(send_hprime_out)); - if (byte < 0) { - drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); - return byte; - } - - if (send_hprime_out.header.status != FW_HDCP_STATUS_SUCCESS) { - drm_dbg_kms(&i915->drm, "FW cmd 0x%08X Failed. Status: 0x%X\n", - WIRED_AKE_SEND_HPRIME, send_hprime_out.header.status); - return -EIO; - } - - return 0; -} - -static int -gsc_hdcp_store_pairing_info(struct device *dev, struct hdcp_port_data *data, - struct hdcp2_ake_send_pairing_info *pairing_info) -{ - struct wired_cmd_ake_send_pairing_info_in pairing_info_in = { { 0 } }; - struct wired_cmd_ake_send_pairing_info_out pairing_info_out = { { 0 } }; - struct drm_i915_private *i915; - ssize_t byte; - - if (!dev || !data || !pairing_info) - return -EINVAL; - - i915 = kdev_to_i915(dev); - if (!i915) { - dev_err(dev, "DRM not initialized, aborting HDCP.\n"); - return -ENODEV; - } - - pairing_info_in.header.api_version = HDCP_API_VERSION; - pairing_info_in.header.command_id = WIRED_AKE_SEND_PAIRING_INFO; - pairing_info_in.header.status = FW_HDCP_STATUS_SUCCESS; - pairing_info_in.header.buffer_len = - WIRED_CMD_BUF_LEN_SEND_PAIRING_INFO_IN; - - pairing_info_in.port.integrated_port_type = data->port_type; - pairing_info_in.port.physical_port = (u8)data->hdcp_ddi; - pairing_info_in.port.attached_transcoder = (u8)data->hdcp_transcoder; - - memcpy(pairing_info_in.e_kh_km, pairing_info->e_kh_km, - HDCP_2_2_E_KH_KM_LEN); - - byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&pairing_info_in, - sizeof(pairing_info_in), - (u8 *)&pairing_info_out, - sizeof(pairing_info_out)); - if (byte < 0) { - drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); - return byte; - } - - if (pairing_info_out.header.status != FW_HDCP_STATUS_SUCCESS) { - drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. Status: 0x%X\n", - WIRED_AKE_SEND_PAIRING_INFO, - pairing_info_out.header.status); - return -EIO; - } - - return 0; -} - -static int -gsc_hdcp_initiate_locality_check(struct device *dev, - struct hdcp_port_data *data, - struct hdcp2_lc_init *lc_init_data) -{ - struct wired_cmd_init_locality_check_in lc_init_in = { { 0 } }; - struct wired_cmd_init_locality_check_out lc_init_out = { { 0 } }; - struct drm_i915_private *i915; - ssize_t byte; - - if (!dev || !data || !lc_init_data) - return -EINVAL; - - i915 = kdev_to_i915(dev); - if (!i915) { - dev_err(dev, "DRM not initialized, aborting HDCP.\n"); - return -ENODEV; - } - - lc_init_in.header.api_version = HDCP_API_VERSION; - lc_init_in.header.command_id = WIRED_INIT_LOCALITY_CHECK; - lc_init_in.header.status = FW_HDCP_STATUS_SUCCESS; - lc_init_in.header.buffer_len = WIRED_CMD_BUF_LEN_INIT_LOCALITY_CHECK_IN; - - lc_init_in.port.integrated_port_type = data->port_type; - lc_init_in.port.physical_port = (u8)data->hdcp_ddi; - lc_init_in.port.attached_transcoder = (u8)data->hdcp_transcoder; - - byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&lc_init_in, sizeof(lc_init_in), - (u8 *)&lc_init_out, sizeof(lc_init_out)); - if (byte < 0) { - drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); - return byte; - } - - if (lc_init_out.header.status != FW_HDCP_STATUS_SUCCESS) { - drm_dbg_kms(&i915->drm, "FW cmd 0x%08X Failed. status: 0x%X\n", - WIRED_INIT_LOCALITY_CHECK, lc_init_out.header.status); - return -EIO; - } - - lc_init_data->msg_id = HDCP_2_2_LC_INIT; - memcpy(lc_init_data->r_n, lc_init_out.r_n, HDCP_2_2_RN_LEN); - - return 0; -} - -static int -gsc_hdcp_verify_lprime(struct device *dev, struct hdcp_port_data *data, - struct hdcp2_lc_send_lprime *rx_lprime) -{ - struct wired_cmd_validate_locality_in verify_lprime_in = { { 0 } }; - struct wired_cmd_validate_locality_out verify_lprime_out = { { 0 } }; - struct drm_i915_private *i915; - ssize_t byte; - - if (!dev || !data || !rx_lprime) - return -EINVAL; - - i915 = kdev_to_i915(dev); - if (!i915) { - dev_err(dev, "DRM not initialized, aborting HDCP.\n"); - return -ENODEV; - } - - verify_lprime_in.header.api_version = HDCP_API_VERSION; - verify_lprime_in.header.command_id = WIRED_VALIDATE_LOCALITY; - verify_lprime_in.header.status = FW_HDCP_STATUS_SUCCESS; - verify_lprime_in.header.buffer_len = - WIRED_CMD_BUF_LEN_VALIDATE_LOCALITY_IN; - - verify_lprime_in.port.integrated_port_type = data->port_type; - verify_lprime_in.port.physical_port = (u8)data->hdcp_ddi; - verify_lprime_in.port.attached_transcoder = (u8)data->hdcp_transcoder; - - memcpy(verify_lprime_in.l_prime, rx_lprime->l_prime, - HDCP_2_2_L_PRIME_LEN); - - byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&verify_lprime_in, - sizeof(verify_lprime_in), - (u8 *)&verify_lprime_out, - sizeof(verify_lprime_out)); - if (byte < 0) { - drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); - return byte; - } - - if (verify_lprime_out.header.status != FW_HDCP_STATUS_SUCCESS) { - drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n", - WIRED_VALIDATE_LOCALITY, - verify_lprime_out.header.status); - return -EIO; - } - - return 0; -} - -static int gsc_hdcp_get_session_key(struct device *dev, - struct hdcp_port_data *data, - struct hdcp2_ske_send_eks *ske_data) -{ - struct wired_cmd_get_session_key_in get_skey_in = { { 0 } }; - struct wired_cmd_get_session_key_out get_skey_out = { { 0 } }; - struct drm_i915_private *i915; - ssize_t byte; - - if (!dev || !data || !ske_data) - return -EINVAL; - - i915 = kdev_to_i915(dev); - if (!i915) { - dev_err(dev, "DRM not initialized, aborting HDCP.\n"); - return -ENODEV; - } - - get_skey_in.header.api_version = HDCP_API_VERSION; - get_skey_in.header.command_id = WIRED_GET_SESSION_KEY; - get_skey_in.header.status = FW_HDCP_STATUS_SUCCESS; - get_skey_in.header.buffer_len = WIRED_CMD_BUF_LEN_GET_SESSION_KEY_IN; - - get_skey_in.port.integrated_port_type = data->port_type; - get_skey_in.port.physical_port = (u8)data->hdcp_ddi; - get_skey_in.port.attached_transcoder = (u8)data->hdcp_transcoder; - - byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&get_skey_in, sizeof(get_skey_in), - (u8 *)&get_skey_out, sizeof(get_skey_out)); - if (byte < 0) { - drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); - return byte; - } - - if (get_skey_out.header.status != FW_HDCP_STATUS_SUCCESS) { - drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n", - WIRED_GET_SESSION_KEY, get_skey_out.header.status); - return -EIO; - } - - ske_data->msg_id = HDCP_2_2_SKE_SEND_EKS; - memcpy(ske_data->e_dkey_ks, get_skey_out.e_dkey_ks, - HDCP_2_2_E_DKEY_KS_LEN); - memcpy(ske_data->riv, get_skey_out.r_iv, HDCP_2_2_RIV_LEN); - - return 0; -} - -static int -gsc_hdcp_repeater_check_flow_prepare_ack(struct device *dev, - struct hdcp_port_data *data, - struct hdcp2_rep_send_receiverid_list - *rep_topology, - struct hdcp2_rep_send_ack - *rep_send_ack) -{ - struct wired_cmd_verify_repeater_in verify_repeater_in = { { 0 } }; - struct wired_cmd_verify_repeater_out verify_repeater_out = { { 0 } }; - struct drm_i915_private *i915; - ssize_t byte; - - if (!dev || !rep_topology || !rep_send_ack || !data) - return -EINVAL; - - i915 = kdev_to_i915(dev); - if (!i915) { - dev_err(dev, "DRM not initialized, aborting HDCP.\n"); - return -ENODEV; - } - - verify_repeater_in.header.api_version = HDCP_API_VERSION; - verify_repeater_in.header.command_id = WIRED_VERIFY_REPEATER; - verify_repeater_in.header.status = FW_HDCP_STATUS_SUCCESS; - verify_repeater_in.header.buffer_len = - WIRED_CMD_BUF_LEN_VERIFY_REPEATER_IN; - - verify_repeater_in.port.integrated_port_type = data->port_type; - verify_repeater_in.port.physical_port = (u8)data->hdcp_ddi; - verify_repeater_in.port.attached_transcoder = (u8)data->hdcp_transcoder; - - memcpy(verify_repeater_in.rx_info, rep_topology->rx_info, - HDCP_2_2_RXINFO_LEN); - memcpy(verify_repeater_in.seq_num_v, rep_topology->seq_num_v, - HDCP_2_2_SEQ_NUM_LEN); - memcpy(verify_repeater_in.v_prime, rep_topology->v_prime, - HDCP_2_2_V_PRIME_HALF_LEN); - memcpy(verify_repeater_in.receiver_ids, rep_topology->receiver_ids, - HDCP_2_2_RECEIVER_IDS_MAX_LEN); - - byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&verify_repeater_in, - sizeof(verify_repeater_in), - (u8 *)&verify_repeater_out, - sizeof(verify_repeater_out)); - if (byte < 0) { - drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); - return byte; - } - - if (verify_repeater_out.header.status != FW_HDCP_STATUS_SUCCESS) { - drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n", - WIRED_VERIFY_REPEATER, - verify_repeater_out.header.status); - return -EIO; - } - - memcpy(rep_send_ack->v, verify_repeater_out.v, - HDCP_2_2_V_PRIME_HALF_LEN); - rep_send_ack->msg_id = HDCP_2_2_REP_SEND_ACK; - - return 0; -} - -static int gsc_hdcp_verify_mprime(struct device *dev, - struct hdcp_port_data *data, - struct hdcp2_rep_stream_ready *stream_ready) -{ - struct wired_cmd_repeater_auth_stream_req_in *verify_mprime_in; - struct wired_cmd_repeater_auth_stream_req_out - verify_mprime_out = { { 0 } }; - struct drm_i915_private *i915; - ssize_t byte; - size_t cmd_size; - - if (!dev || !stream_ready || !data) - return -EINVAL; - - i915 = kdev_to_i915(dev); - if (!i915) { - dev_err(dev, "DRM not initialized, aborting HDCP.\n"); - return -ENODEV; - } - - cmd_size = struct_size(verify_mprime_in, streams, data->k); - if (cmd_size == SIZE_MAX) - return -EINVAL; - - verify_mprime_in = kzalloc(cmd_size, GFP_KERNEL); - if (!verify_mprime_in) - return -ENOMEM; - - verify_mprime_in->header.api_version = HDCP_API_VERSION; - verify_mprime_in->header.command_id = WIRED_REPEATER_AUTH_STREAM_REQ; - verify_mprime_in->header.status = FW_HDCP_STATUS_SUCCESS; - verify_mprime_in->header.buffer_len = cmd_size - sizeof(verify_mprime_in->header); - - verify_mprime_in->port.integrated_port_type = data->port_type; - verify_mprime_in->port.physical_port = (u8)data->hdcp_ddi; - verify_mprime_in->port.attached_transcoder = (u8)data->hdcp_transcoder; - - memcpy(verify_mprime_in->m_prime, stream_ready->m_prime, HDCP_2_2_MPRIME_LEN); - drm_hdcp_cpu_to_be24(verify_mprime_in->seq_num_m, data->seq_num_m); - - memcpy(verify_mprime_in->streams, data->streams, - array_size(data->k, sizeof(*data->streams))); - - verify_mprime_in->k = cpu_to_be16(data->k); - - byte = intel_hdcp_gsc_msg_send(i915, (u8 *)verify_mprime_in, cmd_size, - (u8 *)&verify_mprime_out, - sizeof(verify_mprime_out)); - kfree(verify_mprime_in); - if (byte < 0) { - drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); - return byte; - } - - if (verify_mprime_out.header.status != FW_HDCP_STATUS_SUCCESS) { - drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n", - WIRED_REPEATER_AUTH_STREAM_REQ, - verify_mprime_out.header.status); - return -EIO; - } - - return 0; -} - -static int gsc_hdcp_enable_authentication(struct device *dev, - struct hdcp_port_data *data) -{ - struct wired_cmd_enable_auth_in enable_auth_in = { { 0 } }; - struct wired_cmd_enable_auth_out enable_auth_out = { { 0 } }; - struct drm_i915_private *i915; - ssize_t byte; - - if (!dev || !data) - return -EINVAL; - - i915 = kdev_to_i915(dev); - if (!i915) { - dev_err(dev, "DRM not initialized, aborting HDCP.\n"); - return -ENODEV; - } - - enable_auth_in.header.api_version = HDCP_API_VERSION; - enable_auth_in.header.command_id = WIRED_ENABLE_AUTH; - enable_auth_in.header.status = FW_HDCP_STATUS_SUCCESS; - enable_auth_in.header.buffer_len = WIRED_CMD_BUF_LEN_ENABLE_AUTH_IN; - - enable_auth_in.port.integrated_port_type = data->port_type; - enable_auth_in.port.physical_port = (u8)data->hdcp_ddi; - enable_auth_in.port.attached_transcoder = (u8)data->hdcp_transcoder; - enable_auth_in.stream_type = data->streams[0].stream_type; - - byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&enable_auth_in, - sizeof(enable_auth_in), - (u8 *)&enable_auth_out, - sizeof(enable_auth_out)); - if (byte < 0) { - drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); - return byte; - } - - if (enable_auth_out.header.status != FW_HDCP_STATUS_SUCCESS) { - drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n", - WIRED_ENABLE_AUTH, enable_auth_out.header.status); - return -EIO; - } - - return 0; -} - -static int -gsc_hdcp_close_session(struct device *dev, struct hdcp_port_data *data) -{ - struct wired_cmd_close_session_in session_close_in = { { 0 } }; - struct wired_cmd_close_session_out session_close_out = { { 0 } }; - struct drm_i915_private *i915; - ssize_t byte; - - if (!dev || !data) - return -EINVAL; - - i915 = kdev_to_i915(dev); - if (!i915) { - dev_err(dev, "DRM not initialized, aborting HDCP.\n"); - return -ENODEV; - } - - session_close_in.header.api_version = HDCP_API_VERSION; - session_close_in.header.command_id = WIRED_CLOSE_SESSION; - session_close_in.header.status = FW_HDCP_STATUS_SUCCESS; - session_close_in.header.buffer_len = - WIRED_CMD_BUF_LEN_CLOSE_SESSION_IN; - - session_close_in.port.integrated_port_type = data->port_type; - session_close_in.port.physical_port = (u8)data->hdcp_ddi; - session_close_in.port.attached_transcoder = (u8)data->hdcp_transcoder; - - byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&session_close_in, - sizeof(session_close_in), - (u8 *)&session_close_out, - sizeof(session_close_out)); - if (byte < 0) { - drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); - return byte; - } + struct intel_gt *gt = i915->media_gt; + struct intel_gsc_uc *gsc = gt ? >->uc.gsc : NULL; - if (session_close_out.header.status != FW_HDCP_STATUS_SUCCESS) { - drm_dbg_kms(&i915->drm, "Session Close Failed. status: 0x%X\n", - session_close_out.header.status); - return -EIO; + if (!gsc || !intel_uc_fw_is_running(&gsc->fw)) { + drm_dbg_kms(&i915->drm, + "GSC components required for HDCP2.2 are not ready\n"); + return false; } - return 0; + return true; } -static const struct i915_hdcp_ops gsc_hdcp_ops = { - .initiate_hdcp2_session = gsc_hdcp_initiate_session, - .verify_receiver_cert_prepare_km = - gsc_hdcp_verify_receiver_cert_prepare_km, - .verify_hprime = gsc_hdcp_verify_hprime, - .store_pairing_info = gsc_hdcp_store_pairing_info, - .initiate_locality_check = gsc_hdcp_initiate_locality_check, - .verify_lprime = gsc_hdcp_verify_lprime, - .get_session_key = gsc_hdcp_get_session_key, - .repeater_check_flow_prepare_ack = - gsc_hdcp_repeater_check_flow_prepare_ack, - .verify_mprime = gsc_hdcp_verify_mprime, - .enable_hdcp_authentication = gsc_hdcp_enable_authentication, - .close_hdcp_session = gsc_hdcp_close_session, -}; - /*This function helps allocate memory for the command that we will send to gsc cs */ static int intel_hdcp_gsc_initialize_message(struct drm_i915_private *i915, struct intel_hdcp_gsc_message *hdcp_message) @@ -667,6 +84,22 @@ out_unpin: return err; } +static const struct i915_hdcp_ops gsc_hdcp_ops = { + .initiate_hdcp2_session = intel_hdcp_gsc_initiate_session, + .verify_receiver_cert_prepare_km = + intel_hdcp_gsc_verify_receiver_cert_prepare_km, + .verify_hprime = intel_hdcp_gsc_verify_hprime, + .store_pairing_info = intel_hdcp_gsc_store_pairing_info, + .initiate_locality_check = intel_hdcp_gsc_initiate_locality_check, + .verify_lprime = intel_hdcp_gsc_verify_lprime, + .get_session_key = intel_hdcp_gsc_get_session_key, + .repeater_check_flow_prepare_ack = + intel_hdcp_gsc_repeater_check_flow_prepare_ack, + .verify_mprime = intel_hdcp_gsc_verify_mprime, + .enable_hdcp_authentication = intel_hdcp_gsc_enable_authentication, + .close_hdcp_session = intel_hdcp_gsc_close_session, +}; + static int intel_hdcp_gsc_hdcp2_init(struct drm_i915_private *i915) { struct intel_hdcp_gsc_message *hdcp_message; diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h index cbf96551e534..eba2057c5a9e 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h +++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h @@ -23,5 +23,6 @@ ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in, size_t msg_out_len); int intel_hdcp_gsc_init(struct drm_i915_private *i915); void intel_hdcp_gsc_fini(struct drm_i915_private *i915); +bool intel_hdcp_gsc_check_status(struct drm_i915_private *i915); #endif /* __INTEL_HDCP_GCS_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c b/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c new file mode 100644 index 000000000000..caa9f0b25729 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c @@ -0,0 +1,592 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2023, Intel Corporation. + */ + +#include <linux/err.h> +#include <drm/i915_hdcp_interface.h> + +#include "i915_drv.h" +#include "intel_hdcp_gsc_message.h" + +int +intel_hdcp_gsc_initiate_session(struct device *dev, struct hdcp_port_data *data, + struct hdcp2_ake_init *ake_data) +{ + struct wired_cmd_initiate_hdcp2_session_in session_init_in = { { 0 } }; + struct wired_cmd_initiate_hdcp2_session_out + session_init_out = { { 0 } }; + struct drm_i915_private *i915; + ssize_t byte; + + if (!dev || !data || !ake_data) + return -EINVAL; + + i915 = kdev_to_i915(dev); + if (!i915) { + dev_err(dev, "DRM not initialized, aborting HDCP.\n"); + return -ENODEV; + } + + session_init_in.header.api_version = HDCP_API_VERSION; + session_init_in.header.command_id = WIRED_INITIATE_HDCP2_SESSION; + session_init_in.header.status = FW_HDCP_STATUS_SUCCESS; + session_init_in.header.buffer_len = + WIRED_CMD_BUF_LEN_INITIATE_HDCP2_SESSION_IN; + + session_init_in.port.integrated_port_type = data->port_type; + session_init_in.port.physical_port = (u8)data->hdcp_ddi; + session_init_in.port.attached_transcoder = (u8)data->hdcp_transcoder; + session_init_in.protocol = data->protocol; + + byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&session_init_in, + sizeof(session_init_in), + (u8 *)&session_init_out, + sizeof(session_init_out)); + if (byte < 0) { + drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); + return byte; + } + + if (session_init_out.header.status != FW_HDCP_STATUS_SUCCESS) { + drm_dbg_kms(&i915->drm, "FW cmd 0x%08X Failed. Status: 0x%X\n", + WIRED_INITIATE_HDCP2_SESSION, + session_init_out.header.status); + return -EIO; + } + + ake_data->msg_id = HDCP_2_2_AKE_INIT; + ake_data->tx_caps = session_init_out.tx_caps; + memcpy(ake_data->r_tx, session_init_out.r_tx, HDCP_2_2_RTX_LEN); + + return 0; +} + +int +intel_hdcp_gsc_verify_receiver_cert_prepare_km(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_ake_send_cert *rx_cert, + bool *km_stored, + struct hdcp2_ake_no_stored_km + *ek_pub_km, + size_t *msg_sz) +{ + struct wired_cmd_verify_receiver_cert_in verify_rxcert_in = { { 0 } }; + struct wired_cmd_verify_receiver_cert_out verify_rxcert_out = { { 0 } }; + struct drm_i915_private *i915; + ssize_t byte; + + if (!dev || !data || !rx_cert || !km_stored || !ek_pub_km || !msg_sz) + return -EINVAL; + + i915 = kdev_to_i915(dev); + if (!i915) { + dev_err(dev, "DRM not initialized, aborting HDCP.\n"); + return -ENODEV; + } + + verify_rxcert_in.header.api_version = HDCP_API_VERSION; + verify_rxcert_in.header.command_id = WIRED_VERIFY_RECEIVER_CERT; + verify_rxcert_in.header.status = FW_HDCP_STATUS_SUCCESS; + verify_rxcert_in.header.buffer_len = + WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_IN; + + verify_rxcert_in.port.integrated_port_type = data->port_type; + verify_rxcert_in.port.physical_port = (u8)data->hdcp_ddi; + verify_rxcert_in.port.attached_transcoder = (u8)data->hdcp_transcoder; + + verify_rxcert_in.cert_rx = rx_cert->cert_rx; + memcpy(verify_rxcert_in.r_rx, &rx_cert->r_rx, HDCP_2_2_RRX_LEN); + memcpy(verify_rxcert_in.rx_caps, rx_cert->rx_caps, HDCP_2_2_RXCAPS_LEN); + + byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&verify_rxcert_in, + sizeof(verify_rxcert_in), + (u8 *)&verify_rxcert_out, + sizeof(verify_rxcert_out)); + if (byte < 0) { + drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed: %zd\n", byte); + return byte; + } + + if (verify_rxcert_out.header.status != FW_HDCP_STATUS_SUCCESS) { + drm_dbg_kms(&i915->drm, "FW cmd 0x%08X Failed. Status: 0x%X\n", + WIRED_VERIFY_RECEIVER_CERT, + verify_rxcert_out.header.status); + return -EIO; + } + + *km_stored = !!verify_rxcert_out.km_stored; + if (verify_rxcert_out.km_stored) { + ek_pub_km->msg_id = HDCP_2_2_AKE_STORED_KM; + *msg_sz = sizeof(struct hdcp2_ake_stored_km); + } else { + ek_pub_km->msg_id = HDCP_2_2_AKE_NO_STORED_KM; + *msg_sz = sizeof(struct hdcp2_ake_no_stored_km); + } + + memcpy(ek_pub_km->e_kpub_km, &verify_rxcert_out.ekm_buff, + sizeof(verify_rxcert_out.ekm_buff)); + + return 0; +} + +int +intel_hdcp_gsc_verify_hprime(struct device *dev, struct hdcp_port_data *data, + struct hdcp2_ake_send_hprime *rx_hprime) +{ + struct wired_cmd_ake_send_hprime_in send_hprime_in = { { 0 } }; + struct wired_cmd_ake_send_hprime_out send_hprime_out = { { 0 } }; + struct drm_i915_private *i915; + ssize_t byte; + + if (!dev || !data || !rx_hprime) + return -EINVAL; + + i915 = kdev_to_i915(dev); + if (!i915) { + dev_err(dev, "DRM not initialized, aborting HDCP.\n"); + return -ENODEV; + } + + send_hprime_in.header.api_version = HDCP_API_VERSION; + send_hprime_in.header.command_id = WIRED_AKE_SEND_HPRIME; + send_hprime_in.header.status = FW_HDCP_STATUS_SUCCESS; + send_hprime_in.header.buffer_len = WIRED_CMD_BUF_LEN_AKE_SEND_HPRIME_IN; + + send_hprime_in.port.integrated_port_type = data->port_type; + send_hprime_in.port.physical_port = (u8)data->hdcp_ddi; + send_hprime_in.port.attached_transcoder = (u8)data->hdcp_transcoder; + + memcpy(send_hprime_in.h_prime, rx_hprime->h_prime, + HDCP_2_2_H_PRIME_LEN); + + byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&send_hprime_in, + sizeof(send_hprime_in), + (u8 *)&send_hprime_out, + sizeof(send_hprime_out)); + if (byte < 0) { + drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); + return byte; + } + + if (send_hprime_out.header.status != FW_HDCP_STATUS_SUCCESS) { + drm_dbg_kms(&i915->drm, "FW cmd 0x%08X Failed. Status: 0x%X\n", + WIRED_AKE_SEND_HPRIME, send_hprime_out.header.status); + return -EIO; + } + + return 0; +} + +int +intel_hdcp_gsc_store_pairing_info(struct device *dev, struct hdcp_port_data *data, + struct hdcp2_ake_send_pairing_info *pairing_info) +{ + struct wired_cmd_ake_send_pairing_info_in pairing_info_in = { { 0 } }; + struct wired_cmd_ake_send_pairing_info_out pairing_info_out = { { 0 } }; + struct drm_i915_private *i915; + ssize_t byte; + + if (!dev || !data || !pairing_info) + return -EINVAL; + + i915 = kdev_to_i915(dev); + if (!i915) { + dev_err(dev, "DRM not initialized, aborting HDCP.\n"); + return -ENODEV; + } + + pairing_info_in.header.api_version = HDCP_API_VERSION; + pairing_info_in.header.command_id = WIRED_AKE_SEND_PAIRING_INFO; + pairing_info_in.header.status = FW_HDCP_STATUS_SUCCESS; + pairing_info_in.header.buffer_len = + WIRED_CMD_BUF_LEN_SEND_PAIRING_INFO_IN; + + pairing_info_in.port.integrated_port_type = data->port_type; + pairing_info_in.port.physical_port = (u8)data->hdcp_ddi; + pairing_info_in.port.attached_transcoder = (u8)data->hdcp_transcoder; + + memcpy(pairing_info_in.e_kh_km, pairing_info->e_kh_km, + HDCP_2_2_E_KH_KM_LEN); + + byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&pairing_info_in, + sizeof(pairing_info_in), + (u8 *)&pairing_info_out, + sizeof(pairing_info_out)); + if (byte < 0) { + drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); + return byte; + } + + if (pairing_info_out.header.status != FW_HDCP_STATUS_SUCCESS) { + drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. Status: 0x%X\n", + WIRED_AKE_SEND_PAIRING_INFO, + pairing_info_out.header.status); + return -EIO; + } + + return 0; +} + +int +intel_hdcp_gsc_initiate_locality_check(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_lc_init *lc_init_data) +{ + struct wired_cmd_init_locality_check_in lc_init_in = { { 0 } }; + struct wired_cmd_init_locality_check_out lc_init_out = { { 0 } }; + struct drm_i915_private *i915; + ssize_t byte; + + if (!dev || !data || !lc_init_data) + return -EINVAL; + + i915 = kdev_to_i915(dev); + if (!i915) { + dev_err(dev, "DRM not initialized, aborting HDCP.\n"); + return -ENODEV; + } + + lc_init_in.header.api_version = HDCP_API_VERSION; + lc_init_in.header.command_id = WIRED_INIT_LOCALITY_CHECK; + lc_init_in.header.status = FW_HDCP_STATUS_SUCCESS; + lc_init_in.header.buffer_len = WIRED_CMD_BUF_LEN_INIT_LOCALITY_CHECK_IN; + + lc_init_in.port.integrated_port_type = data->port_type; + lc_init_in.port.physical_port = (u8)data->hdcp_ddi; + lc_init_in.port.attached_transcoder = (u8)data->hdcp_transcoder; + + byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&lc_init_in, sizeof(lc_init_in), + (u8 *)&lc_init_out, sizeof(lc_init_out)); + if (byte < 0) { + drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); + return byte; + } + + if (lc_init_out.header.status != FW_HDCP_STATUS_SUCCESS) { + drm_dbg_kms(&i915->drm, "FW cmd 0x%08X Failed. status: 0x%X\n", + WIRED_INIT_LOCALITY_CHECK, lc_init_out.header.status); + return -EIO; + } + + lc_init_data->msg_id = HDCP_2_2_LC_INIT; + memcpy(lc_init_data->r_n, lc_init_out.r_n, HDCP_2_2_RN_LEN); + + return 0; +} + +int +intel_hdcp_gsc_verify_lprime(struct device *dev, struct hdcp_port_data *data, + struct hdcp2_lc_send_lprime *rx_lprime) +{ + struct wired_cmd_validate_locality_in verify_lprime_in = { { 0 } }; + struct wired_cmd_validate_locality_out verify_lprime_out = { { 0 } }; + struct drm_i915_private *i915; + ssize_t byte; + + if (!dev || !data || !rx_lprime) + return -EINVAL; + + i915 = kdev_to_i915(dev); + if (!i915) { + dev_err(dev, "DRM not initialized, aborting HDCP.\n"); + return -ENODEV; + } + + verify_lprime_in.header.api_version = HDCP_API_VERSION; + verify_lprime_in.header.command_id = WIRED_VALIDATE_LOCALITY; + verify_lprime_in.header.status = FW_HDCP_STATUS_SUCCESS; + verify_lprime_in.header.buffer_len = + WIRED_CMD_BUF_LEN_VALIDATE_LOCALITY_IN; + + verify_lprime_in.port.integrated_port_type = data->port_type; + verify_lprime_in.port.physical_port = (u8)data->hdcp_ddi; + verify_lprime_in.port.attached_transcoder = (u8)data->hdcp_transcoder; + + memcpy(verify_lprime_in.l_prime, rx_lprime->l_prime, + HDCP_2_2_L_PRIME_LEN); + + byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&verify_lprime_in, + sizeof(verify_lprime_in), + (u8 *)&verify_lprime_out, + sizeof(verify_lprime_out)); + if (byte < 0) { + drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); + return byte; + } + + if (verify_lprime_out.header.status != FW_HDCP_STATUS_SUCCESS) { + drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n", + WIRED_VALIDATE_LOCALITY, + verify_lprime_out.header.status); + return -EIO; + } + + return 0; +} + +int intel_hdcp_gsc_get_session_key(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_ske_send_eks *ske_data) +{ + struct wired_cmd_get_session_key_in get_skey_in = { { 0 } }; + struct wired_cmd_get_session_key_out get_skey_out = { { 0 } }; + struct drm_i915_private *i915; + ssize_t byte; + + if (!dev || !data || !ske_data) + return -EINVAL; + + i915 = kdev_to_i915(dev); + if (!i915) { + dev_err(dev, "DRM not initialized, aborting HDCP.\n"); + return -ENODEV; + } + + get_skey_in.header.api_version = HDCP_API_VERSION; + get_skey_in.header.command_id = WIRED_GET_SESSION_KEY; + get_skey_in.header.status = FW_HDCP_STATUS_SUCCESS; + get_skey_in.header.buffer_len = WIRED_CMD_BUF_LEN_GET_SESSION_KEY_IN; + + get_skey_in.port.integrated_port_type = data->port_type; + get_skey_in.port.physical_port = (u8)data->hdcp_ddi; + get_skey_in.port.attached_transcoder = (u8)data->hdcp_transcoder; + + byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&get_skey_in, sizeof(get_skey_in), + (u8 *)&get_skey_out, sizeof(get_skey_out)); + if (byte < 0) { + drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); + return byte; + } + + if (get_skey_out.header.status != FW_HDCP_STATUS_SUCCESS) { + drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n", + WIRED_GET_SESSION_KEY, get_skey_out.header.status); + return -EIO; + } + + ske_data->msg_id = HDCP_2_2_SKE_SEND_EKS; + memcpy(ske_data->e_dkey_ks, get_skey_out.e_dkey_ks, + HDCP_2_2_E_DKEY_KS_LEN); + memcpy(ske_data->riv, get_skey_out.r_iv, HDCP_2_2_RIV_LEN); + + return 0; +} + +int +intel_hdcp_gsc_repeater_check_flow_prepare_ack(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_rep_send_receiverid_list + *rep_topology, + struct hdcp2_rep_send_ack + *rep_send_ack) +{ + struct wired_cmd_verify_repeater_in verify_repeater_in = { { 0 } }; + struct wired_cmd_verify_repeater_out verify_repeater_out = { { 0 } }; + struct drm_i915_private *i915; + ssize_t byte; + + if (!dev || !rep_topology || !rep_send_ack || !data) + return -EINVAL; + + i915 = kdev_to_i915(dev); + if (!i915) { + dev_err(dev, "DRM not initialized, aborting HDCP.\n"); + return -ENODEV; + } + + verify_repeater_in.header.api_version = HDCP_API_VERSION; + verify_repeater_in.header.command_id = WIRED_VERIFY_REPEATER; + verify_repeater_in.header.status = FW_HDCP_STATUS_SUCCESS; + verify_repeater_in.header.buffer_len = + WIRED_CMD_BUF_LEN_VERIFY_REPEATER_IN; + + verify_repeater_in.port.integrated_port_type = data->port_type; + verify_repeater_in.port.physical_port = (u8)data->hdcp_ddi; + verify_repeater_in.port.attached_transcoder = (u8)data->hdcp_transcoder; + + memcpy(verify_repeater_in.rx_info, rep_topology->rx_info, + HDCP_2_2_RXINFO_LEN); + memcpy(verify_repeater_in.seq_num_v, rep_topology->seq_num_v, + HDCP_2_2_SEQ_NUM_LEN); + memcpy(verify_repeater_in.v_prime, rep_topology->v_prime, + HDCP_2_2_V_PRIME_HALF_LEN); + memcpy(verify_repeater_in.receiver_ids, rep_topology->receiver_ids, + HDCP_2_2_RECEIVER_IDS_MAX_LEN); + + byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&verify_repeater_in, + sizeof(verify_repeater_in), + (u8 *)&verify_repeater_out, + sizeof(verify_repeater_out)); + if (byte < 0) { + drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); + return byte; + } + + if (verify_repeater_out.header.status != FW_HDCP_STATUS_SUCCESS) { + drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n", + WIRED_VERIFY_REPEATER, + verify_repeater_out.header.status); + return -EIO; + } + + memcpy(rep_send_ack->v, verify_repeater_out.v, + HDCP_2_2_V_PRIME_HALF_LEN); + rep_send_ack->msg_id = HDCP_2_2_REP_SEND_ACK; + + return 0; +} + +int intel_hdcp_gsc_verify_mprime(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_rep_stream_ready *stream_ready) +{ + struct wired_cmd_repeater_auth_stream_req_in *verify_mprime_in; + struct wired_cmd_repeater_auth_stream_req_out + verify_mprime_out = { { 0 } }; + struct drm_i915_private *i915; + ssize_t byte; + size_t cmd_size; + + if (!dev || !stream_ready || !data) + return -EINVAL; + + i915 = kdev_to_i915(dev); + if (!i915) { + dev_err(dev, "DRM not initialized, aborting HDCP.\n"); + return -ENODEV; + } + + cmd_size = struct_size(verify_mprime_in, streams, data->k); + if (cmd_size == SIZE_MAX) + return -EINVAL; + + verify_mprime_in = kzalloc(cmd_size, GFP_KERNEL); + if (!verify_mprime_in) + return -ENOMEM; + + verify_mprime_in->header.api_version = HDCP_API_VERSION; + verify_mprime_in->header.command_id = WIRED_REPEATER_AUTH_STREAM_REQ; + verify_mprime_in->header.status = FW_HDCP_STATUS_SUCCESS; + verify_mprime_in->header.buffer_len = cmd_size - sizeof(verify_mprime_in->header); + + verify_mprime_in->port.integrated_port_type = data->port_type; + verify_mprime_in->port.physical_port = (u8)data->hdcp_ddi; + verify_mprime_in->port.attached_transcoder = (u8)data->hdcp_transcoder; + + memcpy(verify_mprime_in->m_prime, stream_ready->m_prime, HDCP_2_2_MPRIME_LEN); + drm_hdcp_cpu_to_be24(verify_mprime_in->seq_num_m, data->seq_num_m); + + memcpy(verify_mprime_in->streams, data->streams, + array_size(data->k, sizeof(*data->streams))); + + verify_mprime_in->k = cpu_to_be16(data->k); + + byte = intel_hdcp_gsc_msg_send(i915, (u8 *)verify_mprime_in, cmd_size, + (u8 *)&verify_mprime_out, + sizeof(verify_mprime_out)); + kfree(verify_mprime_in); + if (byte < 0) { + drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); + return byte; + } + + if (verify_mprime_out.header.status != FW_HDCP_STATUS_SUCCESS) { + drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n", + WIRED_REPEATER_AUTH_STREAM_REQ, + verify_mprime_out.header.status); + return -EIO; + } + + return 0; +} + +int intel_hdcp_gsc_enable_authentication(struct device *dev, + struct hdcp_port_data *data) +{ + struct wired_cmd_enable_auth_in enable_auth_in = { { 0 } }; + struct wired_cmd_enable_auth_out enable_auth_out = { { 0 } }; + struct drm_i915_private *i915; + ssize_t byte; + + if (!dev || !data) + return -EINVAL; + + i915 = kdev_to_i915(dev); + if (!i915) { + dev_err(dev, "DRM not initialized, aborting HDCP.\n"); + return -ENODEV; + } + + enable_auth_in.header.api_version = HDCP_API_VERSION; + enable_auth_in.header.command_id = WIRED_ENABLE_AUTH; + enable_auth_in.header.status = FW_HDCP_STATUS_SUCCESS; + enable_auth_in.header.buffer_len = WIRED_CMD_BUF_LEN_ENABLE_AUTH_IN; + + enable_auth_in.port.integrated_port_type = data->port_type; + enable_auth_in.port.physical_port = (u8)data->hdcp_ddi; + enable_auth_in.port.attached_transcoder = (u8)data->hdcp_transcoder; + enable_auth_in.stream_type = data->streams[0].stream_type; + + byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&enable_auth_in, + sizeof(enable_auth_in), + (u8 *)&enable_auth_out, + sizeof(enable_auth_out)); + if (byte < 0) { + drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); + return byte; + } + + if (enable_auth_out.header.status != FW_HDCP_STATUS_SUCCESS) { + drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n", + WIRED_ENABLE_AUTH, enable_auth_out.header.status); + return -EIO; + } + + return 0; +} + +int +intel_hdcp_gsc_close_session(struct device *dev, struct hdcp_port_data *data) +{ + struct wired_cmd_close_session_in session_close_in = { { 0 } }; + struct wired_cmd_close_session_out session_close_out = { { 0 } }; + struct drm_i915_private *i915; + ssize_t byte; + + if (!dev || !data) + return -EINVAL; + + i915 = kdev_to_i915(dev); + if (!i915) { + dev_err(dev, "DRM not initialized, aborting HDCP.\n"); + return -ENODEV; + } + + session_close_in.header.api_version = HDCP_API_VERSION; + session_close_in.header.command_id = WIRED_CLOSE_SESSION; + session_close_in.header.status = FW_HDCP_STATUS_SUCCESS; + session_close_in.header.buffer_len = + WIRED_CMD_BUF_LEN_CLOSE_SESSION_IN; + + session_close_in.port.integrated_port_type = data->port_type; + session_close_in.port.physical_port = (u8)data->hdcp_ddi; + session_close_in.port.attached_transcoder = (u8)data->hdcp_transcoder; + + byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&session_close_in, + sizeof(session_close_in), + (u8 *)&session_close_out, + sizeof(session_close_out)); + if (byte < 0) { + drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); + return byte; + } + + if (session_close_out.header.status != FW_HDCP_STATUS_SUCCESS) { + drm_dbg_kms(&i915->drm, "Session Close Failed. status: 0x%X\n", + session_close_out.header.status); + return -EIO; + } + + return 0; +} diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.h b/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.h new file mode 100644 index 000000000000..ce199d6f6232 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef __INTEL_HDCP_GSC_MESSAGE_H__ +#define __INTEL_HDCP_GSC_MESSAGE_H__ + +#include <linux/types.h> + +struct device; +struct drm_i915_private; +struct hdcp_port_data; +struct hdcp2_ake_init; +struct hdcp2_ake_send_cert; +struct hdcp2_ake_no_stored_km; +struct hdcp2_ake_send_hprime; +struct hdcp2_ake_send_pairing_info; +struct hdcp2_lc_init; +struct hdcp2_lc_send_lprime; +struct hdcp2_ske_send_eks; +struct hdcp2_rep_send_receiverid_list; +struct hdcp2_rep_send_ack; +struct hdcp2_rep_stream_ready; + +ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in, + size_t msg_in_len, u8 *msg_out, + size_t msg_out_len); +bool intel_hdcp_gsc_check_status(struct drm_i915_private *i915); +int +intel_hdcp_gsc_initiate_session(struct device *dev, struct hdcp_port_data *data, + struct hdcp2_ake_init *ake_data); +int +intel_hdcp_gsc_verify_receiver_cert_prepare_km(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_ake_send_cert *rx_cert, + bool *km_stored, + struct hdcp2_ake_no_stored_km + *ek_pub_km, + size_t *msg_sz); +int +intel_hdcp_gsc_verify_hprime(struct device *dev, struct hdcp_port_data *data, + struct hdcp2_ake_send_hprime *rx_hprime); +int +intel_hdcp_gsc_store_pairing_info(struct device *dev, struct hdcp_port_data *data, + struct hdcp2_ake_send_pairing_info *pairing_info); +int +intel_hdcp_gsc_initiate_locality_check(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_lc_init *lc_init_data); +int +intel_hdcp_gsc_verify_lprime(struct device *dev, struct hdcp_port_data *data, + struct hdcp2_lc_send_lprime *rx_lprime); +int intel_hdcp_gsc_get_session_key(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_ske_send_eks *ske_data); +int +intel_hdcp_gsc_repeater_check_flow_prepare_ack(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_rep_send_receiverid_list + *rep_topology, + struct hdcp2_rep_send_ack + *rep_send_ack); +int intel_hdcp_gsc_verify_mprime(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_rep_stream_ready *stream_ready); +int intel_hdcp_gsc_enable_authentication(struct device *dev, + struct hdcp_port_data *data); +int +intel_hdcp_gsc_close_session(struct device *dev, struct hdcp_port_data *data); + +#endif /* __INTEL_HDCP_GSC_MESSAGE_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index af4102e91769..ac315f8e7820 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -2496,7 +2496,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); - if (!INTEL_DISPLAY_ENABLED(dev_priv)) + if (!intel_display_device_enabled(dev_priv)) return connector_status_disconnected; wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c index e8562f6f8bb4..0c0700c6ec66 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug.c @@ -763,7 +763,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work) void intel_hpd_poll_enable(struct drm_i915_private *dev_priv) { if (!HAS_DISPLAY(dev_priv) || - !INTEL_DISPLAY_ENABLED(dev_priv)) + !intel_display_device_enabled(dev_priv)) return; WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, true); diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.h b/drivers/gpu/drm/i915/display/intel_lpe_audio.h index 0beecac267ae..2c5fcb6e1fd0 100644 --- a/drivers/gpu/drm/i915/display/intel_lpe_audio.h +++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.h @@ -12,11 +12,29 @@ enum port; enum transcoder; struct drm_i915_private; +#ifdef I915 int intel_lpe_audio_init(struct drm_i915_private *dev_priv); void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv); void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv); void intel_lpe_audio_notify(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder, enum port port, const void *eld, int ls_clock, bool dp_output); +#else +static inline int intel_lpe_audio_init(struct drm_i915_private *dev_priv) +{ + return -ENODEV; +} +static inline void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv) +{ +} +static inline void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv) +{ +} +static inline void intel_lpe_audio_notify(struct drm_i915_private *dev_priv, + enum transcoder cpu_transcoder, enum port port, + const void *eld, int ls_clock, bool dp_output) +{ +} +#endif #endif /* __INTEL_LPE_AUDIO_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c index 152a22a8ffd2..1d048fa98561 100644 --- a/drivers/gpu/drm/i915/display/intel_lspcon.c +++ b/drivers/gpu/drm/i915/display/intel_lspcon.c @@ -153,6 +153,18 @@ static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon) return current_mode; } +static int lspcon_get_mode_settle_timeout(struct intel_lspcon *lspcon) +{ + /* + * On some CometLake-based device designs the Parade PS175 takes more + * than 400ms to settle in PCON mode. 100 reboot trials on one device + * resulted in a median settle time of 440ms and a maximum of 444ms. + * Even after increasing the timeout to 500ms, 2% of devices still had + * this error. So this sets the timeout to 800ms. + */ + return lspcon->vendor == LSPCON_VENDOR_PARADE ? 800 : 400; +} + static enum drm_lspcon_mode lspcon_wait_mode(struct intel_lspcon *lspcon, enum drm_lspcon_mode mode) { @@ -167,7 +179,8 @@ static enum drm_lspcon_mode lspcon_wait_mode(struct intel_lspcon *lspcon, drm_dbg_kms(&i915->drm, "Waiting for LSPCON mode %s to settle\n", lspcon_mode_name(mode)); - wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, 400); + wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, + lspcon_get_mode_settle_timeout(lspcon)); if (current_mode != mode) drm_err(&i915->drm, "LSPCON mode hasn't settled\n"); diff --git a/drivers/gpu/drm/i915/display/intel_lvds.h b/drivers/gpu/drm/i915/display/intel_lvds.h index 9d3372dc503f..7ad5fa9c0434 100644 --- a/drivers/gpu/drm/i915/display/intel_lvds.h +++ b/drivers/gpu/drm/i915/display/intel_lvds.h @@ -13,10 +13,29 @@ enum pipe; struct drm_i915_private; +#ifdef I915 bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv, i915_reg_t lvds_reg, enum pipe *pipe); void intel_lvds_init(struct drm_i915_private *dev_priv); struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *dev_priv); bool intel_is_dual_link_lvds(struct drm_i915_private *dev_priv); +#else +static inline bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv, + i915_reg_t lvds_reg, enum pipe *pipe) +{ + return false; +} +static inline void intel_lvds_init(struct drm_i915_private *dev_priv) +{ +} +static inline struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *dev_priv) +{ + return NULL; +} +static inline bool intel_is_dual_link_lvds(struct drm_i915_private *dev_priv) +{ + return false; +} +#endif #endif /* __INTEL_LVDS_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_modeset_verify.c b/drivers/gpu/drm/i915/display/intel_modeset_verify.c index 138144a65a45..5e1c2c780412 100644 --- a/drivers/gpu/drm/i915/display/intel_modeset_verify.c +++ b/drivers/gpu/drm/i915/display/intel_modeset_verify.c @@ -23,8 +23,8 @@ * Cross check the actual hw state with our own modeset state tracking (and its * internal consistency). */ -static void intel_connector_verify_state(struct intel_crtc_state *crtc_state, - struct drm_connector_state *conn_state) +static void intel_connector_verify_state(const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) { struct intel_connector *connector = to_intel_connector(conn_state->connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); @@ -66,12 +66,12 @@ verify_connector_state(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_connector *connector; - struct drm_connector_state *new_conn_state; + const struct drm_connector_state *new_conn_state; int i; for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) { struct drm_encoder *encoder = connector->encoder; - struct intel_crtc_state *crtc_state = NULL; + const struct intel_crtc_state *crtc_state = NULL; if (new_conn_state->crtc != &crtc->base) continue; @@ -86,38 +86,40 @@ verify_connector_state(struct intel_atomic_state *state, } } -static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv, - const struct intel_crtc_state *pipe_config) +static void intel_pipe_config_sanity_check(const struct intel_crtc_state *crtc_state) { - if (pipe_config->has_pch_encoder) { - int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config), - &pipe_config->fdi_m_n); - int dotclock = pipe_config->hw.adjusted_mode.crtc_clock; + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + + if (crtc_state->has_pch_encoder) { + int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(i915, crtc_state), + &crtc_state->fdi_m_n); + int dotclock = crtc_state->hw.adjusted_mode.crtc_clock; /* * FDI already provided one idea for the dotclock. * Yell if the encoder disagrees. Allow for slight * rounding differences. */ - drm_WARN(&dev_priv->drm, abs(fdi_dotclock - dotclock) > 1, + drm_WARN(&i915->drm, abs(fdi_dotclock - dotclock) > 1, "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n", fdi_dotclock, dotclock); } } static void -verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state) +verify_encoder_state(struct intel_atomic_state *state) { + struct drm_i915_private *i915 = to_i915(state->base.dev); struct intel_encoder *encoder; struct drm_connector *connector; - struct drm_connector_state *old_conn_state, *new_conn_state; + const struct drm_connector_state *old_conn_state, *new_conn_state; int i; - for_each_intel_encoder(&dev_priv->drm, encoder) { + for_each_intel_encoder(&i915->drm, encoder) { bool enabled = false, found = false; enum pipe pipe; - drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n", + drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s]\n", encoder->base.base.id, encoder->base.name); @@ -132,7 +134,7 @@ verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_stat found = true; enabled = true; - I915_STATE_WARN(dev_priv, + I915_STATE_WARN(i915, new_conn_state->crtc != encoder->base.crtc, "connector's crtc doesn't match encoder crtc\n"); } @@ -140,7 +142,7 @@ verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_stat if (!found) continue; - I915_STATE_WARN(dev_priv, !!encoder->base.crtc != enabled, + I915_STATE_WARN(i915, !!encoder->base.crtc != enabled, "encoder's enabled state mismatch (expected %i, found %i)\n", !!encoder->base.crtc, enabled); @@ -148,7 +150,7 @@ verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_stat bool active; active = encoder->get_hw_state(encoder, &pipe); - I915_STATE_WARN(dev_priv, active, + I915_STATE_WARN(i915, active, "encoder detached but still enabled on pipe %c.\n", pipe_name(pipe)); } @@ -156,96 +158,98 @@ verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_stat } static void -verify_crtc_state(struct intel_crtc *crtc, - struct intel_crtc_state *old_crtc_state, - struct intel_crtc_state *new_crtc_state) +verify_crtc_state(struct intel_atomic_state *state, + struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_encoder *encoder; - struct intel_crtc_state *pipe_config = old_crtc_state; - struct drm_atomic_state *state = old_crtc_state->uapi.state; + struct drm_i915_private *i915 = to_i915(dev); + const struct intel_crtc_state *sw_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + struct intel_crtc_state *hw_crtc_state; struct intel_crtc *master_crtc; + struct intel_encoder *encoder; - __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi); - intel_crtc_free_hw_state(old_crtc_state); - intel_crtc_state_reset(old_crtc_state, crtc); - old_crtc_state->uapi.state = state; + hw_crtc_state = intel_crtc_state_alloc(crtc); + if (!hw_crtc_state) + return; - drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id, + drm_dbg_kms(&i915->drm, "[CRTC:%d:%s]\n", crtc->base.base.id, crtc->base.name); - pipe_config->hw.enable = new_crtc_state->hw.enable; + hw_crtc_state->hw.enable = sw_crtc_state->hw.enable; - intel_crtc_get_pipe_config(pipe_config); + intel_crtc_get_pipe_config(hw_crtc_state); /* we keep both pipes enabled on 830 */ - if (IS_I830(dev_priv) && pipe_config->hw.active) - pipe_config->hw.active = new_crtc_state->hw.active; + if (IS_I830(i915) && hw_crtc_state->hw.active) + hw_crtc_state->hw.active = sw_crtc_state->hw.active; - I915_STATE_WARN(dev_priv, - new_crtc_state->hw.active != pipe_config->hw.active, + I915_STATE_WARN(i915, + sw_crtc_state->hw.active != hw_crtc_state->hw.active, "crtc active state doesn't match with hw state (expected %i, found %i)\n", - new_crtc_state->hw.active, pipe_config->hw.active); + sw_crtc_state->hw.active, hw_crtc_state->hw.active); - I915_STATE_WARN(dev_priv, crtc->active != new_crtc_state->hw.active, + I915_STATE_WARN(i915, crtc->active != sw_crtc_state->hw.active, "transitional active state does not match atomic hw state (expected %i, found %i)\n", - new_crtc_state->hw.active, crtc->active); + sw_crtc_state->hw.active, crtc->active); - master_crtc = intel_master_crtc(new_crtc_state); + master_crtc = intel_master_crtc(sw_crtc_state); for_each_encoder_on_crtc(dev, &master_crtc->base, encoder) { enum pipe pipe; bool active; active = encoder->get_hw_state(encoder, &pipe); - I915_STATE_WARN(dev_priv, active != new_crtc_state->hw.active, + I915_STATE_WARN(i915, active != sw_crtc_state->hw.active, "[ENCODER:%i] active %i with crtc active %i\n", encoder->base.base.id, active, - new_crtc_state->hw.active); + sw_crtc_state->hw.active); - I915_STATE_WARN(dev_priv, active && master_crtc->pipe != pipe, + I915_STATE_WARN(i915, active && master_crtc->pipe != pipe, "Encoder connected to wrong pipe %c\n", pipe_name(pipe)); if (active) - intel_encoder_get_config(encoder, pipe_config); + intel_encoder_get_config(encoder, hw_crtc_state); } - if (!new_crtc_state->hw.active) - return; + if (!sw_crtc_state->hw.active) + goto destroy_state; - intel_pipe_config_sanity_check(dev_priv, pipe_config); + intel_pipe_config_sanity_check(hw_crtc_state); - if (!intel_pipe_config_compare(new_crtc_state, - pipe_config, false)) { - I915_STATE_WARN(dev_priv, 1, "pipe state doesn't match!\n"); - intel_crtc_state_dump(pipe_config, NULL, "hw state"); - intel_crtc_state_dump(new_crtc_state, NULL, "sw state"); + if (!intel_pipe_config_compare(sw_crtc_state, + hw_crtc_state, false)) { + I915_STATE_WARN(i915, 1, "pipe state doesn't match!\n"); + intel_crtc_state_dump(hw_crtc_state, NULL, "hw state"); + intel_crtc_state_dump(sw_crtc_state, NULL, "sw state"); } + +destroy_state: + intel_crtc_destroy_state(&crtc->base, &hw_crtc_state->uapi); } -void intel_modeset_verify_crtc(struct intel_crtc *crtc, - struct intel_atomic_state *state, - struct intel_crtc_state *old_crtc_state, - struct intel_crtc_state *new_crtc_state) +void intel_modeset_verify_crtc(struct intel_atomic_state *state, + struct intel_crtc *crtc) { + const struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + if (!intel_crtc_needs_modeset(new_crtc_state) && !intel_crtc_needs_fastset(new_crtc_state)) return; - intel_wm_state_verify(crtc, new_crtc_state); + intel_wm_state_verify(state, crtc); verify_connector_state(state, crtc); - verify_crtc_state(crtc, old_crtc_state, new_crtc_state); - intel_shared_dpll_state_verify(crtc, old_crtc_state, new_crtc_state); - intel_mpllb_state_verify(state, new_crtc_state); - intel_c10pll_state_verify(state, new_crtc_state); + verify_crtc_state(state, crtc); + intel_shared_dpll_state_verify(state, crtc); + intel_mpllb_state_verify(state, crtc); + intel_c10pll_state_verify(state, crtc); } -void intel_modeset_verify_disabled(struct drm_i915_private *dev_priv, - struct intel_atomic_state *state) +void intel_modeset_verify_disabled(struct intel_atomic_state *state) { - verify_encoder_state(dev_priv, state); + verify_encoder_state(state); verify_connector_state(state, NULL); - intel_shared_dpll_verify_disabled(dev_priv); + intel_shared_dpll_verify_disabled(state); } diff --git a/drivers/gpu/drm/i915/display/intel_modeset_verify.h b/drivers/gpu/drm/i915/display/intel_modeset_verify.h index 2d6fbe4f7846..3bef8735cb4b 100644 --- a/drivers/gpu/drm/i915/display/intel_modeset_verify.h +++ b/drivers/gpu/drm/i915/display/intel_modeset_verify.h @@ -6,16 +6,11 @@ #ifndef __INTEL_MODESET_VERIFY_H__ #define __INTEL_MODESET_VERIFY_H__ -struct drm_i915_private; struct intel_atomic_state; struct intel_crtc; -struct intel_crtc_state; -void intel_modeset_verify_crtc(struct intel_crtc *crtc, - struct intel_atomic_state *state, - struct intel_crtc_state *old_crtc_state, - struct intel_crtc_state *new_crtc_state); -void intel_modeset_verify_disabled(struct drm_i915_private *dev_priv, - struct intel_atomic_state *state); +void intel_modeset_verify_crtc(struct intel_atomic_state *state, + struct intel_crtc *crtc); +void intel_modeset_verify_disabled(struct intel_atomic_state *state); #endif /* __INTEL_MODESET_VERIFY_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_overlay.h b/drivers/gpu/drm/i915/display/intel_overlay.h index a167c28acd27..c3f68fce6f08 100644 --- a/drivers/gpu/drm/i915/display/intel_overlay.h +++ b/drivers/gpu/drm/i915/display/intel_overlay.h @@ -13,6 +13,7 @@ struct drm_i915_private; struct intel_overlay; struct intel_overlay_error_state; +#ifdef I915 void intel_overlay_setup(struct drm_i915_private *dev_priv); void intel_overlay_cleanup(struct drm_i915_private *dev_priv); int intel_overlay_switch_off(struct intel_overlay *overlay); @@ -25,5 +26,39 @@ struct intel_overlay_error_state * intel_overlay_capture_error_state(struct drm_i915_private *dev_priv); void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e, struct intel_overlay_error_state *error); +#else +static inline void intel_overlay_setup(struct drm_i915_private *dev_priv) +{ +} +static inline void intel_overlay_cleanup(struct drm_i915_private *dev_priv) +{ +} +static inline int intel_overlay_switch_off(struct intel_overlay *overlay) +{ + return 0; +} +static inline int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + return 0; +} +static inline int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + return 0; +} +static inline void intel_overlay_reset(struct drm_i915_private *dev_priv) +{ +} +static inline struct intel_overlay_error_state * +intel_overlay_capture_error_state(struct drm_i915_private *dev_priv) +{ + return NULL; +} +static inline void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e, + struct intel_overlay_error_state *error) +{ +} +#endif #endif /* __INTEL_OVERLAY_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c index 086cb8dbe22c..483beedac5b8 100644 --- a/drivers/gpu/drm/i915/display/intel_panel.c +++ b/drivers/gpu/drm/i915/display/intel_panel.c @@ -680,7 +680,7 @@ intel_panel_detect(struct drm_connector *connector, bool force) { struct drm_i915_private *i915 = to_i915(connector->dev); - if (!INTEL_DISPLAY_ENABLED(i915)) + if (!intel_display_device_enabled(i915)) return connector_status_disconnected; return connector_status_connected; diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.h b/drivers/gpu/drm/i915/display/intel_pch_display.h index 41a63413cb3d..35f8288af3d1 100644 --- a/drivers/gpu/drm/i915/display/intel_pch_display.h +++ b/drivers/gpu/drm/i915/display/intel_pch_display.h @@ -15,6 +15,7 @@ struct intel_crtc; struct intel_crtc_state; struct intel_link_m_n; +#ifdef I915 bool intel_has_pch_trancoder(struct drm_i915_private *i915, enum pipe pch_transcoder); enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc); @@ -41,5 +42,57 @@ void intel_pch_transcoder_get_m2_n2(struct intel_crtc *crtc, struct intel_link_m_n *m_n); void intel_pch_sanitize(struct drm_i915_private *i915); +#else +static inline bool intel_has_pch_trancoder(struct drm_i915_private *i915, + enum pipe pch_transcoder) +{ + return false; +} +static inline int intel_crtc_pch_transcoder(struct intel_crtc *crtc) +{ + return 0; +} +static inline void ilk_pch_pre_enable(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ +} +static inline void ilk_pch_enable(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ +} +static inline void ilk_pch_disable(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ +} +static inline void ilk_pch_post_disable(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ +} +static inline void ilk_pch_get_config(struct intel_crtc_state *crtc_state) +{ +} +static inline void lpt_pch_enable(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ +} +static inline void lpt_pch_disable(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ +} +static inline void lpt_pch_get_config(struct intel_crtc_state *crtc_state) +{ +} +static inline void intel_pch_transcoder_get_m1_n1(struct intel_crtc *crtc, + struct intel_link_m_n *m_n) +{ +} +static inline void intel_pch_transcoder_get_m2_n2(struct intel_crtc *crtc, + struct intel_link_m_n *m_n) +{ +} +static inline void intel_pch_sanitize(struct drm_i915_private *i915) +{ +} +#endif #endif diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.c b/drivers/gpu/drm/i915/display/intel_pch_refclk.c index 9583e86b602a..713cfba71475 100644 --- a/drivers/gpu/drm/i915/display/intel_pch_refclk.c +++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.c @@ -492,6 +492,7 @@ static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv) static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv) { struct intel_encoder *encoder; + struct intel_shared_dpll *pll; int i; u32 val, final; bool has_lvds = false; @@ -527,8 +528,10 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv) } /* Check if any DPLLs are using the SSC source */ - for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) { - u32 temp = intel_de_read(dev_priv, PCH_DPLL(i)); + for_each_shared_dpll(dev_priv, pll, i) { + u32 temp; + + temp = intel_de_read(dev_priv, PCH_DPLL(pll->info->id)); if (!(temp & DPLL_VCO_ENABLE)) continue; diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.h b/drivers/gpu/drm/i915/display/intel_pch_refclk.h index 9bcf56629f24..ae3403c0ced8 100644 --- a/drivers/gpu/drm/i915/display/intel_pch_refclk.h +++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.h @@ -11,6 +11,7 @@ struct drm_i915_private; struct intel_crtc_state; +#ifdef I915 void lpt_program_iclkip(const struct intel_crtc_state *crtc_state); void lpt_disable_iclkip(struct drm_i915_private *dev_priv); int lpt_get_iclkip(struct drm_i915_private *dev_priv); @@ -18,5 +19,27 @@ int lpt_iclkip(const struct intel_crtc_state *crtc_state); void intel_init_pch_refclk(struct drm_i915_private *dev_priv); void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv); +#else +static inline void lpt_program_iclkip(const struct intel_crtc_state *crtc_state) +{ +} +static inline void lpt_disable_iclkip(struct drm_i915_private *dev_priv) +{ +} +static inline int lpt_get_iclkip(struct drm_i915_private *dev_priv) +{ + return 0; +} +static inline int lpt_iclkip(const struct intel_crtc_state *crtc_state) +{ + return 0; +} +static inline void intel_init_pch_refclk(struct drm_i915_private *dev_priv) +{ +} +static inline void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv) +{ +} +#endif #endif diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 850b11f20285..bb65881e87cc 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -2195,10 +2195,12 @@ void intel_psr_pre_plane_update(struct intel_atomic_state *state, } } -static void _intel_psr_post_plane_update(const struct intel_atomic_state *state, - const struct intel_crtc_state *crtc_state) +void intel_psr_post_plane_update(struct intel_atomic_state *state, + struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); struct intel_encoder *encoder; if (!crtc_state->has_psr) @@ -2241,20 +2243,6 @@ static void _intel_psr_post_plane_update(const struct intel_atomic_state *state, } } -void intel_psr_post_plane_update(const struct intel_atomic_state *state) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - struct intel_crtc_state *crtc_state; - struct intel_crtc *crtc; - int i; - - if (!HAS_PSR(dev_priv)) - return; - - for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) - _intel_psr_post_plane_update(state, crtc_state); -} - static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h index 0b95e8aa615f..bf35f42df6bc 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.h +++ b/drivers/gpu/drm/i915/display/intel_psr.h @@ -24,7 +24,8 @@ struct intel_plane_state; void intel_psr_init_dpcd(struct intel_dp *intel_dp); void intel_psr_pre_plane_update(struct intel_atomic_state *state, struct intel_crtc *crtc); -void intel_psr_post_plane_update(const struct intel_atomic_state *state); +void intel_psr_post_plane_update(struct intel_atomic_state *state, + struct intel_crtc *crtc); void intel_psr_disable(struct intel_dp *intel_dp, const struct intel_crtc_state *old_crtc_state); int intel_psr_debug_set(struct intel_dp *intel_dp, u64 value); diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c index 950ba0431f5f..35137e978591 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.c +++ b/drivers/gpu/drm/i915/display/intel_sdvo.c @@ -2121,7 +2121,7 @@ intel_sdvo_detect(struct drm_connector *connector, bool force) DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); - if (!INTEL_DISPLAY_ENABLED(i915)) + if (!intel_display_device_enabled(i915)) return connector_status_disconnected; if (!intel_sdvo_set_target_output(intel_sdvo, diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.h b/drivers/gpu/drm/i915/display/intel_sdvo.h index 2868852c85f2..d1815b4103d4 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.h +++ b/drivers/gpu/drm/i915/display/intel_sdvo.h @@ -14,9 +14,22 @@ struct drm_i915_private; enum pipe; enum port; +#ifdef I915 bool intel_sdvo_port_enabled(struct drm_i915_private *dev_priv, i915_reg_t sdvo_reg, enum pipe *pipe); bool intel_sdvo_init(struct drm_i915_private *dev_priv, i915_reg_t reg, enum port port); +#else +static inline bool intel_sdvo_port_enabled(struct drm_i915_private *dev_priv, + i915_reg_t sdvo_reg, enum pipe *pipe) +{ + return false; +} +static inline bool intel_sdvo_init(struct drm_i915_private *dev_priv, + i915_reg_t reg, enum port port) +{ + return false; +} +#endif #endif /* __INTEL_SDVO_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c index 88ef56b6e0fd..c0285365efae 100644 --- a/drivers/gpu/drm/i915/display/intel_snps_phy.c +++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c @@ -1993,12 +1993,13 @@ int intel_snps_phy_check_hdmi_link_rate(int clock) } void intel_mpllb_state_verify(struct intel_atomic_state *state, - struct intel_crtc_state *new_crtc_state) + struct intel_crtc *crtc) { struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); struct intel_mpllb_state mpllb_hw_state = { 0 }; - struct intel_mpllb_state *mpllb_sw_state = &new_crtc_state->mpllb_state; - struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); + const struct intel_mpllb_state *mpllb_sw_state = &new_crtc_state->mpllb_state; struct intel_encoder *encoder; if (!IS_DG2(i915)) diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.h b/drivers/gpu/drm/i915/display/intel_snps_phy.h index 557ef820bc0b..515abf7c5902 100644 --- a/drivers/gpu/drm/i915/display/intel_snps_phy.h +++ b/drivers/gpu/drm/i915/display/intel_snps_phy.h @@ -10,6 +10,7 @@ struct drm_i915_private; struct intel_atomic_state; +struct intel_crtc; struct intel_crtc_state; struct intel_encoder; struct intel_mpllb_state; @@ -33,6 +34,6 @@ int intel_snps_phy_check_hdmi_link_rate(int clock); void intel_snps_phy_set_signal_levels(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state); void intel_mpllb_state_verify(struct intel_atomic_state *state, - struct intel_crtc_state *new_crtc_state); + struct intel_crtc *crtc); #endif /* __INTEL_SNPS_PHY_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_sprite.h b/drivers/gpu/drm/i915/display/intel_sprite.h index 91c6dca342b2..044a032e41b9 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.h +++ b/drivers/gpu/drm/i915/display/intel_sprite.h @@ -16,6 +16,7 @@ struct intel_crtc_state; struct intel_plane_state; enum pipe; +#ifdef I915 struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe, int plane); int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data, @@ -29,5 +30,12 @@ int hsw_plane_min_cdclk(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state); int vlv_plane_min_cdclk(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state); +#else +static inline struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv, + int pipe, int plane) +{ + return NULL; +} +#endif #endif /* __INTEL_SPRITE_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c index 36b479b46b60..31a79fdfc812 100644 --- a/drivers/gpu/drm/i915/display/intel_tv.c +++ b/drivers/gpu/drm/i915/display/intel_tv.c @@ -1720,7 +1720,7 @@ intel_tv_detect(struct drm_connector *connector, drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] force=%d\n", connector->base.id, connector->name, force); - if (!INTEL_DISPLAY_ENABLED(i915)) + if (!intel_display_device_enabled(i915)) return connector_status_disconnected; if (force) { diff --git a/drivers/gpu/drm/i915/display/intel_tv.h b/drivers/gpu/drm/i915/display/intel_tv.h index 44518575ec5c..f08827b8bf2b 100644 --- a/drivers/gpu/drm/i915/display/intel_tv.h +++ b/drivers/gpu/drm/i915/display/intel_tv.h @@ -8,6 +8,12 @@ struct drm_i915_private; +#ifdef I915 void intel_tv_init(struct drm_i915_private *dev_priv); +#else +static inline void intel_tv_init(struct drm_i915_private *dev_priv) +{ +} +#endif #endif /* __INTEL_TV_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_vga.c b/drivers/gpu/drm/i915/display/intel_vga.c index 286a0bdd28c6..4b98833bfa8c 100644 --- a/drivers/gpu/drm/i915/display/intel_vga.c +++ b/drivers/gpu/drm/i915/display/intel_vga.c @@ -3,11 +3,9 @@ * Copyright © 2019 Intel Corporation */ -#include <linux/pci.h> #include <linux/vgaarb.h> #include <video/vga.h> - #include "soc/intel_gmch.h" #include "i915_drv.h" @@ -99,20 +97,6 @@ void intel_vga_reset_io_mem(struct drm_i915_private *i915) vga_put(pdev, VGA_RSRC_LEGACY_IO); } -static unsigned int -intel_vga_set_decode(struct pci_dev *pdev, bool enable_decode) -{ - struct drm_i915_private *i915 = pdev_to_i915(pdev); - - intel_gmch_vga_set_state(i915, enable_decode); - - if (enable_decode) - return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | - VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; - else - return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; -} - int intel_vga_register(struct drm_i915_private *i915) { @@ -127,7 +111,7 @@ int intel_vga_register(struct drm_i915_private *i915) * then we do not take part in VGA arbitration and the * vga_client_register() fails with -ENODEV. */ - ret = vga_client_register(pdev, intel_vga_set_decode); + ret = vga_client_register(pdev, intel_gmch_vga_set_decode); if (ret && ret != -ENODEV) return ret; diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c index 2a30b8aa2994..245a64332cc7 100644 --- a/drivers/gpu/drm/i915/display/skl_universal_plane.c +++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c @@ -1943,13 +1943,16 @@ static enum intel_fbc_id skl_fbc_id_for_pipe(enum pipe pipe) return pipe - PIPE_A + INTEL_FBC_A; } -static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv, +static bool skl_plane_has_fbc(struct drm_i915_private *i915, enum intel_fbc_id fbc_id, enum plane_id plane_id) { - if ((DISPLAY_RUNTIME_INFO(dev_priv)->fbc_mask & BIT(fbc_id)) == 0) + if ((DISPLAY_RUNTIME_INFO(i915)->fbc_mask & BIT(fbc_id)) == 0) return false; - return plane_id == PLANE_PRIMARY; + if (DISPLAY_VER(i915) >= 20) + return icl_is_hdr_plane(i915, plane_id); + else + return plane_id == PLANE_PRIMARY; } static struct intel_fbc *skl_plane_fbc(struct drm_i915_private *dev_priv, diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c index 846e9a3e94dc..99b8ccdc3dfa 100644 --- a/drivers/gpu/drm/i915/display/skl_watermark.c +++ b/drivers/gpu/drm/i915/display/skl_watermark.c @@ -3134,10 +3134,12 @@ static void skl_wm_get_hw_state_and_sanitize(struct drm_i915_private *i915) skl_wm_sanitize(i915); } -void intel_wm_state_verify(struct intel_crtc *crtc, - struct intel_crtc_state *new_crtc_state) +void intel_wm_state_verify(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); struct skl_hw_state { struct skl_ddb_entry ddb[I915_MAX_PLANES]; struct skl_ddb_entry ddb_y[I915_MAX_PLANES]; diff --git a/drivers/gpu/drm/i915/display/skl_watermark.h b/drivers/gpu/drm/i915/display/skl_watermark.h index edb61e33df83..fb0da36fd3ec 100644 --- a/drivers/gpu/drm/i915/display/skl_watermark.h +++ b/drivers/gpu/drm/i915/display/skl_watermark.h @@ -38,8 +38,8 @@ bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb, const struct skl_ddb_entry *entries, int num_entries, int ignore_idx); -void intel_wm_state_verify(struct intel_crtc *crtc, - struct intel_crtc_state *new_crtc_state); +void intel_wm_state_verify(struct intel_atomic_state *state, + struct intel_crtc *crtc); void skl_watermark_ipc_init(struct drm_i915_private *i915); void skl_watermark_ipc_update(struct drm_i915_private *i915); diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c index a96e7d028c5c..55da627a8b8d 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi.c @@ -23,6 +23,7 @@ * Author: Jani Nikula <jani.nikula@intel.com> */ +#include <linux/dmi.h> #include <linux/slab.h> #include <drm/drm_atomic_helper.h> @@ -1744,6 +1745,126 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi) intel_dsi_log_params(intel_dsi); } +typedef void (*vlv_dsi_dmi_quirk_func)(struct intel_dsi *intel_dsi); + +/* + * Vtotal is wrong on the Asus TF103C leading to the last line of the display + * being shown as the first line. The factory installed Android has a hardcoded + * modeline, causing it to not suffer from this BIOS bug. + * + * Original mode: "1280x800": 60 67700 1280 1312 1328 1376 800 808 812 820 0x8 0xa + * Fixed mode: "1280x800": 60 67700 1280 1312 1328 1376 800 808 812 816 0x8 0xa + * + * https://gitlab.freedesktop.org/drm/intel/-/issues/9381 + */ +static void vlv_dsi_asus_tf103c_mode_fixup(struct intel_dsi *intel_dsi) +{ + /* Cast away the const as we want to fixup the mode */ + struct drm_display_mode *fixed_mode = (struct drm_display_mode *) + intel_panel_preferred_fixed_mode(intel_dsi->attached_connector); + + if (fixed_mode->vtotal == 820) + fixed_mode->vtotal -= 4; +} + +/* + * On the Lenovo Yoga Tablet 2 830 / 1050 there are 2 problems: + * 1. The I2C MIPI sequence elements reference bus 3. ACPI has I2C1 - I2C7 + * which under Linux become bus 0 - 6. And the MIPI sequence reference + * to bus 3 is indented for I2C3 which is bus 2 under Linux. + * + * Note mipi_exec_i2c() cannot just subtract 1 from the bus + * given in the I2C MIPI sequence element. Since on other + * devices the I2C bus-numbers used in the MIPI sequences do + * actually start at 0. + * + * 2. width_/height_mm contain a bogus 192mm x 120mm size. This is + * especially a problem on the 8" 830 version which uses a 10:16 + * portrait screen where as the bogus size is 16:10. + * + * https://gitlab.freedesktop.org/drm/intel/-/issues/9379 + */ +static void vlv_dsi_lenovo_yoga_tab2_size_fixup(struct intel_dsi *intel_dsi) +{ + const struct drm_display_mode *fixed_mode = + intel_panel_preferred_fixed_mode(intel_dsi->attached_connector); + struct drm_display_info *info = &intel_dsi->attached_connector->base.display_info; + + intel_dsi->i2c_bus_num = 2; + + /* + * The 10" 1050 uses a 1920x1200 landscape screen, where as the 8" 830 + * uses a 1200x1920 portrait screen. + */ + if (fixed_mode->hdisplay == 1920) { + info->width_mm = 216; + info->height_mm = 135; + } else { + info->width_mm = 107; + info->height_mm = 171; + } +} + +/* + * On the Lenovo Yoga Tab 3 Pro YT3-X90F there are 2 problems: + * 1. i2c_acpi_find_adapter() picks the wrong adapter causing mipi_exec_i2c() + * to not work. Fix this by setting i2c_bus_num. + * 2. There is no backlight off MIPI sequence, causing the backlight to stay on. + * Add a backlight off sequence mirroring the existing backlight on sequence. + * + * https://gitlab.freedesktop.org/drm/intel/-/issues/9380 + */ +static void vlv_dsi_lenovo_yoga_tab3_backlight_fixup(struct intel_dsi *intel_dsi) +{ + static const u8 backlight_off_sequence[16] = { + /* Header Seq-id 7, length after header 11 bytes */ + 0x07, 0x0b, 0x00, 0x00, 0x00, + /* MIPI_SEQ_ELEM_I2C bus 0 addr 0x2c reg 0x00 data-len 1 data 0x00 */ + 0x04, 0x08, 0x00, 0x00, 0x00, 0x2c, 0x00, 0x00, 0x01, 0x00, + /* MIPI_SEQ_ELEM_END */ + 0x00 + }; + struct intel_connector *connector = intel_dsi->attached_connector; + + intel_dsi->i2c_bus_num = 0; + connector->panel.vbt.dsi.sequence[MIPI_SEQ_BACKLIGHT_OFF] = backlight_off_sequence; +} + +static const struct dmi_system_id vlv_dsi_dmi_quirk_table[] = { + { + /* Asus Transformer Pad TF103C */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "TF103C"), + }, + .driver_data = (void *)vlv_dsi_asus_tf103c_mode_fixup, + }, + { + /* + * Lenovo Yoga Tablet 2 830F/L or 1050F/L (The 8" and 10" + * Lenovo Yoga Tablet 2 use the same mainboard) + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Intel Corp."), + DMI_MATCH(DMI_PRODUCT_NAME, "VALLEYVIEW C0 PLATFORM"), + DMI_MATCH(DMI_BOARD_NAME, "BYT-T FFD8"), + /* Partial match on beginning of BIOS version */ + DMI_MATCH(DMI_BIOS_VERSION, "BLADE_21"), + }, + .driver_data = (void *)vlv_dsi_lenovo_yoga_tab2_size_fixup, + }, + { + /* Lenovo Yoga Tab 3 Pro YT3-X90F */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"), + }, + .driver_data = (void *)vlv_dsi_lenovo_yoga_tab3_backlight_fixup, + }, + { } +}; + void vlv_dsi_init(struct drm_i915_private *dev_priv) { struct intel_dsi *intel_dsi; @@ -1752,6 +1873,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) struct intel_connector *intel_connector; struct drm_connector *connector; struct drm_display_mode *current_mode; + const struct dmi_system_id *dmi_id; enum port port; enum pipe pipe; @@ -1883,6 +2005,14 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) goto err_cleanup_connector; } + dmi_id = dmi_first_match(vlv_dsi_dmi_quirk_table); + if (dmi_id) { + vlv_dsi_dmi_quirk_func quirk_func = + (vlv_dsi_dmi_quirk_func)dmi_id->driver_data; + + quirk_func(intel_dsi); + } + intel_panel_init(intel_connector, NULL); intel_backlight_setup(intel_connector, INVALID_PIPE); diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.h b/drivers/gpu/drm/i915/display/vlv_dsi.h index 0c2b279df9d4..cf9d7b82f288 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi.h +++ b/drivers/gpu/drm/i915/display/vlv_dsi.h @@ -12,8 +12,21 @@ enum port; struct drm_i915_private; struct intel_dsi; +#ifdef I915 void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port); enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt); void vlv_dsi_init(struct drm_i915_private *dev_priv); +#else +static inline void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port) +{ +} +static inline enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt) +{ + return 0; +} +static inline void vlv_dsi_init(struct drm_i915_private *dev_priv) +{ +} +#endif #endif /* __VLV_DSI_H__ */ diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.h b/drivers/gpu/drm/i915/display/vlv_dsi_pll.h index ab9291ad1e79..fbe5113dbeb9 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.h +++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.h @@ -32,7 +32,16 @@ u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, struct intel_crtc_state *config); void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port); +#ifdef I915 void assert_dsi_pll_enabled(struct drm_i915_private *i915); void assert_dsi_pll_disabled(struct drm_i915_private *i915); +#else +static inline void assert_dsi_pll_enabled(struct drm_i915_private *i915) +{ +} +static inline void assert_dsi_pll_disabled(struct drm_i915_private *i915) +{ +} +#endif #endif /* __VLV_DSI_PLL_H__ */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index 6b6d22c19411..0ba955611dfb 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -198,7 +198,7 @@ static void flush_tlb_invalidate(struct drm_i915_gem_object *obj) for_each_gt(gt, i915, id) { if (!obj->mm.tlb[id]) - return; + continue; intel_gt_invalidate_tlb_full(gt, obj->mm.tlb[id]); obj->mm.tlb[id] = 0; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c index 214763942aa2..9cb7bbfb4278 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c @@ -14,6 +14,7 @@ #include <linux/vmalloc.h> #include "gt/intel_gt_requests.h" +#include "gt/intel_gt.h" #include "i915_trace.h" @@ -119,7 +120,8 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww, intel_wakeref_t wakeref = 0; unsigned long count = 0; unsigned long scanned = 0; - int err = 0; + int err = 0, i = 0; + struct intel_gt *gt; /* CHV + VTD workaround use stop_machine(); need to trylock vm->mutex */ bool trylock_vm = !ww && intel_vm_no_concurrent_access_wa(i915); @@ -147,9 +149,11 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww, * what we can do is give them a kick so that we do not keep idle * contexts around longer than is necessary. */ - if (shrink & I915_SHRINK_ACTIVE) - /* Retire requests to unpin all idle contexts */ - intel_gt_retire_requests(to_gt(i915)); + if (shrink & I915_SHRINK_ACTIVE) { + for_each_gt(gt, i915, i) + /* Retire requests to unpin all idle contexts */ + intel_gt_retire_requests(gt); + } /* * As we may completely rewrite the (un)bound list whilst unbinding @@ -389,6 +393,8 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr struct i915_vma *vma, *next; unsigned long freed_pages = 0; intel_wakeref_t wakeref; + struct intel_gt *gt; + int i; with_intel_runtime_pm(&i915->runtime_pm, wakeref) freed_pages += i915_gem_shrink(NULL, i915, -1UL, NULL, @@ -397,24 +403,26 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr I915_SHRINK_VMAPS); /* We also want to clear any cached iomaps as they wrap vmap */ - mutex_lock(&to_gt(i915)->ggtt->vm.mutex); - list_for_each_entry_safe(vma, next, - &to_gt(i915)->ggtt->vm.bound_list, vm_link) { - unsigned long count = i915_vma_size(vma) >> PAGE_SHIFT; - struct drm_i915_gem_object *obj = vma->obj; - - if (!vma->iomap || i915_vma_is_active(vma)) - continue; + for_each_gt(gt, i915, i) { + mutex_lock(>->ggtt->vm.mutex); + list_for_each_entry_safe(vma, next, + >->ggtt->vm.bound_list, vm_link) { + unsigned long count = i915_vma_size(vma) >> PAGE_SHIFT; + struct drm_i915_gem_object *obj = vma->obj; + + if (!vma->iomap || i915_vma_is_active(vma)) + continue; - if (!i915_gem_object_trylock(obj, NULL)) - continue; + if (!i915_gem_object_trylock(obj, NULL)) + continue; - if (__i915_vma_unbind(vma) == 0) - freed_pages += count; + if (__i915_vma_unbind(vma) == 0) + freed_pages += count; - i915_gem_object_unlock(obj); + i915_gem_object_unlock(obj); + } + mutex_unlock(>->ggtt->vm.mutex); } - mutex_unlock(&to_gt(i915)->ggtt->vm.mutex); *(unsigned long *)ptr += freed_pages; return NOTIFY_DONE; diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h index b58c30ac8ef0..40269e4c1e31 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine.h +++ b/drivers/gpu/drm/i915/gt/intel_engine.h @@ -170,6 +170,8 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value) #define I915_GEM_HWS_SEQNO 0x40 #define I915_GEM_HWS_SEQNO_ADDR (I915_GEM_HWS_SEQNO * sizeof(u32)) #define I915_GEM_HWS_MIGRATE (0x42 * sizeof(u32)) +#define I915_GEM_HWS_GGTT_BIND 0x46 +#define I915_GEM_HWS_GGTT_BIND_ADDR (I915_GEM_HWS_GGTT_BIND * sizeof(u32)) #define I915_GEM_HWS_PXP 0x60 #define I915_GEM_HWS_PXP_ADDR (I915_GEM_HWS_PXP * sizeof(u32)) #define I915_GEM_HWS_GSC 0x62 diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 84a75c95f3f7..179d9546865b 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -316,10 +316,9 @@ u32 intel_engine_context_size(struct intel_gt *gt, u8 class) * out in the wash. */ cxt_size = intel_uncore_read(uncore, CXT_SIZE) + 1; - drm_dbg(>->i915->drm, - "graphics_ver = %d CXT_SIZE = %d bytes [0x%08x]\n", - GRAPHICS_VER(gt->i915), cxt_size * 64, - cxt_size - 1); + gt_dbg(gt, "graphics_ver = %d CXT_SIZE = %d bytes [0x%08x]\n", + GRAPHICS_VER(gt->i915), cxt_size * 64, + cxt_size - 1); return round_up(cxt_size * 64, PAGE_SIZE); case 3: case 2: @@ -788,7 +787,7 @@ static void engine_mask_apply_media_fuses(struct intel_gt *gt) if (!(BIT(i) & vdbox_mask)) { gt->info.engine_mask &= ~BIT(_VCS(i)); - drm_dbg(&i915->drm, "vcs%u fused off\n", i); + gt_dbg(gt, "vcs%u fused off\n", i); continue; } @@ -796,8 +795,7 @@ static void engine_mask_apply_media_fuses(struct intel_gt *gt) gt->info.vdbox_sfc_access |= BIT(i); logical_vdbox++; } - drm_dbg(&i915->drm, "vdbox enable: %04x, instances: %04lx\n", - vdbox_mask, VDBOX_MASK(gt)); + gt_dbg(gt, "vdbox enable: %04x, instances: %04lx\n", vdbox_mask, VDBOX_MASK(gt)); GEM_BUG_ON(vdbox_mask != VDBOX_MASK(gt)); for (i = 0; i < I915_MAX_VECS; i++) { @@ -808,11 +806,10 @@ static void engine_mask_apply_media_fuses(struct intel_gt *gt) if (!(BIT(i) & vebox_mask)) { gt->info.engine_mask &= ~BIT(_VECS(i)); - drm_dbg(&i915->drm, "vecs%u fused off\n", i); + gt_dbg(gt, "vecs%u fused off\n", i); } } - drm_dbg(&i915->drm, "vebox enable: %04x, instances: %04lx\n", - vebox_mask, VEBOX_MASK(gt)); + gt_dbg(gt, "vebox enable: %04x, instances: %04lx\n", vebox_mask, VEBOX_MASK(gt)); GEM_BUG_ON(vebox_mask != VEBOX_MASK(gt)); } @@ -838,7 +835,7 @@ static void engine_mask_apply_compute_fuses(struct intel_gt *gt) */ for_each_clear_bit(i, &ccs_mask, I915_MAX_CCS) { info->engine_mask &= ~BIT(_CCS(i)); - drm_dbg(&i915->drm, "ccs%u fused off\n", i); + gt_dbg(gt, "ccs%u fused off\n", i); } } @@ -866,8 +863,8 @@ static void engine_mask_apply_copy_fuses(struct intel_gt *gt) _BCS(instance)); if (mask & info->engine_mask) { - drm_dbg(&i915->drm, "bcs%u fused off\n", instance); - drm_dbg(&i915->drm, "bcs%u fused off\n", instance + 1); + gt_dbg(gt, "bcs%u fused off\n", instance); + gt_dbg(gt, "bcs%u fused off\n", instance + 1); info->engine_mask &= ~mask; } @@ -907,8 +904,7 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt) * submission, which will wake up the GSC power well. */ if (__HAS_ENGINE(info->engine_mask, GSC0) && !intel_uc_wants_gsc_uc(>->uc)) { - drm_notice(>->i915->drm, - "No GSC FW selected, disabling GSC CS and media C6\n"); + gt_notice(gt, "No GSC FW selected, disabling GSC CS and media C6\n"); info->engine_mask &= ~BIT(GSC0); } @@ -1097,8 +1093,7 @@ static int init_status_page(struct intel_engine_cs *engine) */ obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE); if (IS_ERR(obj)) { - drm_err(&engine->i915->drm, - "Failed to allocate status page\n"); + gt_err(engine->gt, "Failed to allocate status page\n"); return PTR_ERR(obj); } @@ -1419,6 +1414,20 @@ void intel_engine_destroy_pinned_context(struct intel_context *ce) } static struct intel_context * +create_ggtt_bind_context(struct intel_engine_cs *engine) +{ + static struct lock_class_key kernel; + + /* + * MI_UPDATE_GTT can insert up to 511 PTE entries and there could be multiple + * bind requets at a time so get a bigger ring. + */ + return intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_512K, + I915_GEM_HWS_GGTT_BIND_ADDR, + &kernel, "ggtt_bind_context"); +} + +static struct intel_context * create_kernel_context(struct intel_engine_cs *engine) { static struct lock_class_key kernel; @@ -1441,7 +1450,7 @@ create_kernel_context(struct intel_engine_cs *engine) */ static int engine_init_common(struct intel_engine_cs *engine) { - struct intel_context *ce; + struct intel_context *ce, *bce = NULL; int ret; engine->set_default_submission(engine); @@ -1457,17 +1466,33 @@ static int engine_init_common(struct intel_engine_cs *engine) ce = create_kernel_context(engine); if (IS_ERR(ce)) return PTR_ERR(ce); + /* + * Create a separate pinned context for GGTT update with blitter engine + * if a platform require such service. MI_UPDATE_GTT works on other + * engines as well but BCS should be less busy engine so pick that for + * GGTT updates. + */ + if (i915_ggtt_require_binder(engine->i915) && engine->id == BCS0) { + bce = create_ggtt_bind_context(engine); + if (IS_ERR(bce)) { + ret = PTR_ERR(bce); + goto err_ce_context; + } + } ret = measure_breadcrumb_dw(ce); if (ret < 0) - goto err_context; + goto err_bce_context; engine->emit_fini_breadcrumb_dw = ret; engine->kernel_context = ce; + engine->bind_context = bce; return 0; -err_context: +err_bce_context: + intel_engine_destroy_pinned_context(bce); +err_ce_context: intel_engine_destroy_pinned_context(ce); return ret; } @@ -1537,6 +1562,10 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine) if (engine->kernel_context) intel_engine_destroy_pinned_context(engine->kernel_context); + if (engine->bind_context) + intel_engine_destroy_pinned_context(engine->bind_context); + + GEM_BUG_ON(!llist_empty(&engine->barrier_tasks)); cleanup_status_page(engine); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index a7e677598004..8769760257fd 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -402,7 +402,15 @@ struct intel_engine_cs { unsigned long context_tag; - struct rb_node uabi_node; + /* + * The type evolves during initialization, see related comment for + * struct drm_i915_private's uabi_engines member. + */ + union { + struct llist_node uabi_llist; + struct list_head uabi_list; + struct rb_node uabi_node; + }; struct intel_sseu sseu; @@ -416,6 +424,9 @@ struct intel_engine_cs { struct llist_head barrier_tasks; struct intel_context *kernel_context; /* pinned */ + struct intel_context *bind_context; /* pinned, only for BCS0 */ + /* mark the bind context's availability status */ + bool bind_context_ready; /** * pinned_contexts_list: List of pinned contexts. This list is only diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.c b/drivers/gpu/drm/i915/gt/intel_engine_user.c index dcedff41a825..118164ddbb2e 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_user.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_user.c @@ -38,8 +38,7 @@ intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance) void intel_engine_add_user(struct intel_engine_cs *engine) { - llist_add((struct llist_node *)&engine->uabi_node, - (struct llist_head *)&engine->i915->uabi_engines); + llist_add(&engine->uabi_llist, &engine->i915->uabi_engines_llist); } static const u8 uabi_classes[] = { @@ -54,9 +53,9 @@ static int engine_cmp(void *priv, const struct list_head *A, const struct list_head *B) { const struct intel_engine_cs *a = - container_of((struct rb_node *)A, typeof(*a), uabi_node); + container_of(A, typeof(*a), uabi_list); const struct intel_engine_cs *b = - container_of((struct rb_node *)B, typeof(*b), uabi_node); + container_of(B, typeof(*b), uabi_list); if (uabi_classes[a->class] < uabi_classes[b->class]) return -1; @@ -73,7 +72,7 @@ static int engine_cmp(void *priv, const struct list_head *A, static struct llist_node *get_engines(struct drm_i915_private *i915) { - return llist_del_all((struct llist_head *)&i915->uabi_engines); + return llist_del_all(&i915->uabi_engines_llist); } static void sort_engines(struct drm_i915_private *i915, @@ -83,9 +82,8 @@ static void sort_engines(struct drm_i915_private *i915, llist_for_each_safe(pos, next, get_engines(i915)) { struct intel_engine_cs *engine = - container_of((struct rb_node *)pos, typeof(*engine), - uabi_node); - list_add((struct list_head *)&engine->uabi_node, engines); + container_of(pos, typeof(*engine), uabi_llist); + list_add(&engine->uabi_list, engines); } list_sort(NULL, engines, engine_cmp); } @@ -213,8 +211,7 @@ void intel_engines_driver_register(struct drm_i915_private *i915) p = &i915->uabi_engines.rb_node; list_for_each_safe(it, next, &engines) { struct intel_engine_cs *engine = - container_of((struct rb_node *)it, typeof(*engine), - uabi_node); + container_of(it, typeof(*engine), uabi_list); if (intel_gt_has_unrecoverable_error(engine->gt)) continue; /* ignore incomplete engines */ diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c index da21f2786b5d..4d7d88b92632 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@ -15,18 +15,23 @@ #include "display/intel_display.h" #include "gem/i915_gem_lmem.h" +#include "intel_context.h" #include "intel_ggtt_gmch.h" +#include "intel_gpu_commands.h" #include "intel_gt.h" #include "intel_gt_regs.h" #include "intel_pci_config.h" +#include "intel_ring.h" #include "i915_drv.h" #include "i915_pci.h" +#include "i915_request.h" #include "i915_scatterlist.h" #include "i915_utils.h" #include "i915_vgpu.h" #include "intel_gtt.h" #include "gen8_ppgtt.h" +#include "intel_engine_pm.h" static void i915_ggtt_color_adjust(const struct drm_mm_node *node, unsigned long color, @@ -252,6 +257,145 @@ u64 gen8_ggtt_pte_encode(dma_addr_t addr, return pte; } +static bool should_update_ggtt_with_bind(struct i915_ggtt *ggtt) +{ + struct intel_gt *gt = ggtt->vm.gt; + + return intel_gt_is_bind_context_ready(gt); +} + +static struct intel_context *gen8_ggtt_bind_get_ce(struct i915_ggtt *ggtt) +{ + struct intel_context *ce; + struct intel_gt *gt = ggtt->vm.gt; + + if (intel_gt_is_wedged(gt)) + return NULL; + + ce = gt->engine[BCS0]->bind_context; + GEM_BUG_ON(!ce); + + /* + * If the GT is not awake already at this stage then fallback + * to pci based GGTT update otherwise __intel_wakeref_get_first() + * would conflict with fs_reclaim trying to allocate memory while + * doing rpm_resume(). + */ + if (!intel_gt_pm_get_if_awake(gt)) + return NULL; + + intel_engine_pm_get(ce->engine); + + return ce; +} + +static void gen8_ggtt_bind_put_ce(struct intel_context *ce) +{ + intel_engine_pm_put(ce->engine); + intel_gt_pm_put(ce->engine->gt); +} + +static bool gen8_ggtt_bind_ptes(struct i915_ggtt *ggtt, u32 offset, + struct sg_table *pages, u32 num_entries, + const gen8_pte_t pte) +{ + struct i915_sched_attr attr = {}; + struct intel_gt *gt = ggtt->vm.gt; + const gen8_pte_t scratch_pte = ggtt->vm.scratch[0]->encode; + struct sgt_iter iter; + struct i915_request *rq; + struct intel_context *ce; + u32 *cs; + + if (!num_entries) + return true; + + ce = gen8_ggtt_bind_get_ce(ggtt); + if (!ce) + return false; + + if (pages) + iter = __sgt_iter(pages->sgl, true); + + while (num_entries) { + int count = 0; + dma_addr_t addr; + /* + * MI_UPDATE_GTT can update 512 entries in a single command but + * that end up with engine reset, 511 works. + */ + u32 n_ptes = min_t(u32, 511, num_entries); + + if (mutex_lock_interruptible(&ce->timeline->mutex)) + goto put_ce; + + intel_context_enter(ce); + rq = __i915_request_create(ce, GFP_NOWAIT | GFP_ATOMIC); + intel_context_exit(ce); + if (IS_ERR(rq)) { + GT_TRACE(gt, "Failed to get bind request\n"); + mutex_unlock(&ce->timeline->mutex); + goto put_ce; + } + + cs = intel_ring_begin(rq, 2 * n_ptes + 2); + if (IS_ERR(cs)) { + GT_TRACE(gt, "Failed to ring space for GGTT bind\n"); + i915_request_set_error_once(rq, PTR_ERR(cs)); + /* once a request is created, it must be queued */ + goto queue_err_rq; + } + + *cs++ = MI_UPDATE_GTT | (2 * n_ptes); + *cs++ = offset << 12; + + if (pages) { + for_each_sgt_daddr_next(addr, iter) { + if (count == n_ptes) + break; + *cs++ = lower_32_bits(pte | addr); + *cs++ = upper_32_bits(pte | addr); + count++; + } + /* fill remaining with scratch pte, if any */ + if (count < n_ptes) { + memset64((u64 *)cs, scratch_pte, + n_ptes - count); + cs += (n_ptes - count) * 2; + } + } else { + memset64((u64 *)cs, pte, n_ptes); + cs += n_ptes * 2; + } + + intel_ring_advance(rq, cs); +queue_err_rq: + i915_request_get(rq); + __i915_request_commit(rq); + __i915_request_queue(rq, &attr); + + mutex_unlock(&ce->timeline->mutex); + /* This will break if the request is complete or after engine reset */ + i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT); + if (rq->fence.error) + goto err_rq; + + i915_request_put(rq); + + num_entries -= n_ptes; + offset += n_ptes; + } + + gen8_ggtt_bind_put_ce(ce); + return true; + +err_rq: + i915_request_put(rq); +put_ce: + gen8_ggtt_bind_put_ce(ce); + return false; +} + static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) { writeq(pte, addr); @@ -272,6 +416,21 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm, ggtt->invalidate(ggtt); } +static void gen8_ggtt_insert_page_bind(struct i915_address_space *vm, + dma_addr_t addr, u64 offset, + unsigned int pat_index, u32 flags) +{ + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + gen8_pte_t pte; + + pte = ggtt->vm.pte_encode(addr, pat_index, flags); + if (should_update_ggtt_with_bind(i915_vm_to_ggtt(vm)) && + gen8_ggtt_bind_ptes(ggtt, offset, NULL, 1, pte)) + return ggtt->invalidate(ggtt); + + gen8_ggtt_insert_page(vm, addr, offset, pat_index, flags); +} + static void gen8_ggtt_insert_entries(struct i915_address_space *vm, struct i915_vma_resource *vma_res, unsigned int pat_index, @@ -311,6 +470,50 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, ggtt->invalidate(ggtt); } +static bool __gen8_ggtt_insert_entries_bind(struct i915_address_space *vm, + struct i915_vma_resource *vma_res, + unsigned int pat_index, u32 flags) +{ + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + gen8_pte_t scratch_pte = vm->scratch[0]->encode; + gen8_pte_t pte_encode; + u64 start, end; + + pte_encode = ggtt->vm.pte_encode(0, pat_index, flags); + start = (vma_res->start - vma_res->guard) / I915_GTT_PAGE_SIZE; + end = start + vma_res->guard / I915_GTT_PAGE_SIZE; + if (!gen8_ggtt_bind_ptes(ggtt, start, NULL, end - start, scratch_pte)) + goto err; + + start = end; + end += (vma_res->node_size + vma_res->guard) / I915_GTT_PAGE_SIZE; + if (!gen8_ggtt_bind_ptes(ggtt, start, vma_res->bi.pages, + vma_res->node_size / I915_GTT_PAGE_SIZE, pte_encode)) + goto err; + + start += vma_res->node_size / I915_GTT_PAGE_SIZE; + if (!gen8_ggtt_bind_ptes(ggtt, start, NULL, end - start, scratch_pte)) + goto err; + + return true; + +err: + return false; +} + +static void gen8_ggtt_insert_entries_bind(struct i915_address_space *vm, + struct i915_vma_resource *vma_res, + unsigned int pat_index, u32 flags) +{ + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + + if (should_update_ggtt_with_bind(i915_vm_to_ggtt(vm)) && + __gen8_ggtt_insert_entries_bind(vm, vma_res, pat_index, flags)) + return ggtt->invalidate(ggtt); + + gen8_ggtt_insert_entries(vm, vma_res, pat_index, flags); +} + static void gen8_ggtt_clear_range(struct i915_address_space *vm, u64 start, u64 length) { @@ -332,6 +535,27 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm, gen8_set_pte(>t_base[i], scratch_pte); } +static void gen8_ggtt_scratch_range_bind(struct i915_address_space *vm, + u64 start, u64 length) +{ + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + unsigned int first_entry = start / I915_GTT_PAGE_SIZE; + unsigned int num_entries = length / I915_GTT_PAGE_SIZE; + const gen8_pte_t scratch_pte = vm->scratch[0]->encode; + const int max_entries = ggtt_total_entries(ggtt) - first_entry; + + if (WARN(num_entries > max_entries, + "First entry = %d; Num entries = %d (max=%d)\n", + first_entry, num_entries, max_entries)) + num_entries = max_entries; + + if (should_update_ggtt_with_bind(ggtt) && gen8_ggtt_bind_ptes(ggtt, first_entry, + NULL, num_entries, scratch_pte)) + return ggtt->invalidate(ggtt); + + gen8_ggtt_clear_range(vm, start, length); +} + static void gen6_ggtt_insert_page(struct i915_address_space *vm, dma_addr_t addr, u64 offset, @@ -1008,6 +1232,17 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; } + if (i915_ggtt_require_binder(i915)) { + ggtt->vm.scratch_range = gen8_ggtt_scratch_range_bind; + ggtt->vm.insert_page = gen8_ggtt_insert_page_bind; + ggtt->vm.insert_entries = gen8_ggtt_insert_entries_bind; + /* + * On GPU is hung, we might bind VMAs for error capture. + * Fallback to CPU GGTT updates in that case. + */ + ggtt->vm.raw_insert_page = gen8_ggtt_insert_page; + } + if (intel_uc_wants_guc(&ggtt->vm.gt->uc)) ggtt->invalidate = guc_ggtt_invalidate; else diff --git a/drivers/gpu/drm/i915/gt/intel_gsc.c b/drivers/gpu/drm/i915/gt/intel_gsc.c index bcc3605158db..6d440de8ba01 100644 --- a/drivers/gpu/drm/i915/gt/intel_gsc.c +++ b/drivers/gpu/drm/i915/gt/intel_gsc.c @@ -11,6 +11,7 @@ #include "gem/i915_gem_region.h" #include "gt/intel_gsc.h" #include "gt/intel_gt.h" +#include "gt/intel_gt_print.h" #define GSC_BAR_LENGTH 0x00000FFC @@ -49,13 +50,13 @@ gsc_ext_om_alloc(struct intel_gsc *gsc, struct intel_gsc_intf *intf, size_t size I915_BO_ALLOC_CONTIGUOUS | I915_BO_ALLOC_CPU_CLEAR); if (IS_ERR(obj)) { - drm_err(>->i915->drm, "Failed to allocate gsc memory\n"); + gt_err(gt, "Failed to allocate gsc memory\n"); return PTR_ERR(obj); } err = i915_gem_object_pin_pages_unlocked(obj); if (err) { - drm_err(>->i915->drm, "Failed to pin pages for gsc memory\n"); + gt_err(gt, "Failed to pin pages for gsc memory\n"); goto out_put; } @@ -286,12 +287,12 @@ static void gsc_irq_handler(struct intel_gt *gt, unsigned int intf_id) int ret; if (intf_id >= INTEL_GSC_NUM_INTERFACES) { - drm_warn_once(>->i915->drm, "GSC irq: intf_id %d is out of range", intf_id); + gt_warn_once(gt, "GSC irq: intf_id %d is out of range", intf_id); return; } if (!HAS_HECI_GSC(gt->i915)) { - drm_warn_once(>->i915->drm, "GSC irq: not supported"); + gt_warn_once(gt, "GSC irq: not supported"); return; } @@ -300,7 +301,7 @@ static void gsc_irq_handler(struct intel_gt *gt, unsigned int intf_id) ret = generic_handle_irq(gt->gsc.intf[intf_id].irq); if (ret) - drm_err_ratelimited(>->i915->drm, "error handling GSC irq: %d\n", ret); + gt_err_ratelimited(gt, "error handling GSC irq: %d\n", ret); } void intel_gsc_irq_handler(struct intel_gt *gt, u32 iir) diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index 93062c35e072..ed32bf5b1546 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -62,7 +62,13 @@ void intel_gt_common_init_early(struct intel_gt *gt) /* Preliminary initialization of Tile 0 */ int intel_root_gt_init_early(struct drm_i915_private *i915) { - struct intel_gt *gt = to_gt(i915); + struct intel_gt *gt; + + gt = drmm_kzalloc(&i915->drm, sizeof(*gt), GFP_KERNEL); + if (!gt) + return -ENOMEM; + + i915->gt[0] = gt; gt->i915 = i915; gt->uncore = &i915->uncore; @@ -262,10 +268,21 @@ intel_gt_clear_error_registers(struct intel_gt *gt, I915_MASTER_ERROR_INTERRUPT); } - if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) { + /* + * For the media GT, this ring fault register is not replicated, + * so don't do multicast/replicated register read/write operation on it. + */ + if (MEDIA_VER(i915) >= 13 && gt->type == GT_MEDIA) { + intel_uncore_rmw(uncore, XELPMP_RING_FAULT_REG, + RING_FAULT_VALID, 0); + intel_uncore_posting_read(uncore, + XELPMP_RING_FAULT_REG); + + } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) { intel_gt_mcr_multicast_rmw(gt, XEHP_RING_FAULT_REG, RING_FAULT_VALID, 0); intel_gt_mcr_read_any(gt, XEHP_RING_FAULT_REG); + } else if (GRAPHICS_VER(i915) >= 12) { intel_uncore_rmw(uncore, GEN12_RING_FAULT_REG, RING_FAULT_VALID, 0); intel_uncore_posting_read(uncore, GEN12_RING_FAULT_REG); @@ -911,8 +928,6 @@ int intel_gt_probe_all(struct drm_i915_private *i915) if (ret) return ret; - i915->gt[0] = gt; - if (!HAS_EXTRA_GT_LIST(i915)) return 0; @@ -1024,3 +1039,52 @@ bool intel_gt_needs_wa_22016122933(struct intel_gt *gt) { return MEDIA_VER_FULL(gt->i915) == IP_VER(13, 0) && gt->type == GT_MEDIA; } + +static void __intel_gt_bind_context_set_ready(struct intel_gt *gt, bool ready) +{ + struct intel_engine_cs *engine = gt->engine[BCS0]; + + if (engine && engine->bind_context) + engine->bind_context_ready = ready; +} + +/** + * intel_gt_bind_context_set_ready - Set the context binding as ready + * + * @gt: GT structure + * + * This function marks the binder context as ready. + */ +void intel_gt_bind_context_set_ready(struct intel_gt *gt) +{ + __intel_gt_bind_context_set_ready(gt, true); +} + +/** + * intel_gt_bind_context_set_unready - Set the context binding as ready + * @gt: GT structure + * + * This function marks the binder context as not ready. + */ + +void intel_gt_bind_context_set_unready(struct intel_gt *gt) +{ + __intel_gt_bind_context_set_ready(gt, false); +} + +/** + * intel_gt_is_bind_context_ready - Check if context binding is ready + * + * @gt: GT structure + * + * This function returns binder context's ready status. + */ +bool intel_gt_is_bind_context_ready(struct intel_gt *gt) +{ + struct intel_engine_cs *engine = gt->engine[BCS0]; + + if (engine) + return engine->bind_context_ready; + + return false; +} diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h index 2cac499d5aa3..970bedf6b78a 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.h +++ b/drivers/gpu/drm/i915/gt/intel_gt.h @@ -176,4 +176,7 @@ enum i915_map_type intel_gt_coherent_map_type(struct intel_gt *gt, struct drm_i915_gem_object *obj, bool always_coherent); +void intel_gt_bind_context_set_ready(struct intel_gt *gt); +void intel_gt_bind_context_set_unready(struct intel_gt *gt); +bool intel_gt_is_bind_context_ready(struct intel_gt *gt); #endif /* __INTEL_GT_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c index bf4a933de03a..326c2ed1d99b 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c @@ -420,6 +420,28 @@ void intel_gt_mcr_unlock(struct intel_gt *gt, unsigned long flags) } /** + * intel_gt_mcr_lock_sanitize - Sanitize MCR steering lock + * @gt: GT structure + * + * This will be used to sanitize the initial status of the hardware lock + * during driver load and resume since there won't be any concurrent access + * from other agents at those times, but it's possible that boot firmware + * may have left the lock in a bad state. + * + */ +void intel_gt_mcr_lock_sanitize(struct intel_gt *gt) +{ + /* + * This gets called at load/resume time, so we shouldn't be + * racing with other driver threads grabbing the mcr lock. + */ + lockdep_assert_not_held(>->mcr_lock); + + if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70)) + intel_uncore_write_fw(gt->uncore, MTL_STEER_SEMAPHORE, 0x1); +} + +/** * intel_gt_mcr_read - read a specific instance of an MCR register * @gt: GT structure * @reg: the MCR register to read diff --git a/drivers/gpu/drm/i915/gt/intel_gt_mcr.h b/drivers/gpu/drm/i915/gt/intel_gt_mcr.h index 41684495b7da..01ac565a56a4 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_mcr.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_mcr.h @@ -11,6 +11,7 @@ void intel_gt_mcr_init(struct intel_gt *gt); void intel_gt_mcr_lock(struct intel_gt *gt, unsigned long *flags); void intel_gt_mcr_unlock(struct intel_gt *gt, unsigned long flags); +void intel_gt_mcr_lock_sanitize(struct intel_gt *gt); u32 intel_gt_mcr_read(struct intel_gt *gt, i915_mcr_reg_t reg, diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c index 5a942af0a14e..f5899d503e23 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c @@ -13,6 +13,7 @@ #include "intel_engine_pm.h" #include "intel_gt.h" #include "intel_gt_clock_utils.h" +#include "intel_gt_mcr.h" #include "intel_gt_pm.h" #include "intel_gt_print.h" #include "intel_gt_requests.h" @@ -216,6 +217,21 @@ void intel_gt_pm_fini(struct intel_gt *gt) intel_rc6_fini(>->rc6); } +void intel_gt_resume_early(struct intel_gt *gt) +{ + /* + * Sanitize steer semaphores during driver resume. This is necessary + * to address observed cases of steer semaphores being + * held after a suspend operation. Confirmation from the hardware team + * assures the safety of this operation, as no lock acquisitions + * by other agents occur during driver load/resume process. + */ + intel_gt_mcr_lock_sanitize(gt); + + intel_uncore_resume_early(gt->uncore); + intel_gt_check_and_clear_faults(gt); +} + int intel_gt_resume(struct intel_gt *gt) { struct intel_engine_cs *engine; @@ -280,6 +296,7 @@ int intel_gt_resume(struct intel_gt *gt) out_fw: intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL); intel_gt_pm_put(gt); + intel_gt_bind_context_set_ready(gt); return err; err_wedged: @@ -306,6 +323,7 @@ static void wait_for_suspend(struct intel_gt *gt) void intel_gt_suspend_prepare(struct intel_gt *gt) { + intel_gt_bind_context_set_unready(gt); user_forcewake(gt, true); wait_for_suspend(gt); } @@ -359,6 +377,7 @@ void intel_gt_suspend_late(struct intel_gt *gt) void intel_gt_runtime_suspend(struct intel_gt *gt) { + intel_gt_bind_context_set_unready(gt); intel_uc_runtime_suspend(>->uc); GT_TRACE(gt, "\n"); @@ -376,6 +395,7 @@ int intel_gt_runtime_resume(struct intel_gt *gt) if (ret) return ret; + intel_gt_bind_context_set_ready(gt); return 0; } diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h index 6c9a46452364..b1eeb5b33918 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h @@ -78,6 +78,7 @@ void intel_gt_pm_fini(struct intel_gt *gt); void intel_gt_suspend_prepare(struct intel_gt *gt); void intel_gt_suspend_late(struct intel_gt *gt); int intel_gt_resume(struct intel_gt *gt); +void intel_gt_resume_early(struct intel_gt *gt); void intel_gt_runtime_suspend(struct intel_gt *gt); int intel_gt_runtime_resume(struct intel_gt *gt); diff --git a/drivers/gpu/drm/i915/gt/intel_gt_print.h b/drivers/gpu/drm/i915/gt/intel_gt_print.h index 55a336a9ff06..7fdc78c79273 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_print.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_print.h @@ -16,6 +16,9 @@ #define gt_warn(_gt, _fmt, ...) \ drm_warn(&(_gt)->i915->drm, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__) +#define gt_warn_once(_gt, _fmt, ...) \ + drm_warn_once(&(_gt)->i915->drm, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__) + #define gt_notice(_gt, _fmt, ...) \ drm_notice(&(_gt)->i915->drm, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__) diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h index cca4bac8f8b0..eecd0a87a647 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h @@ -1084,6 +1084,7 @@ #define GEN12_RING_FAULT_REG _MMIO(0xcec4) #define XEHP_RING_FAULT_REG MCR_REG(0xcec4) +#define XELPMP_RING_FAULT_REG _MMIO(0xcec4) #define GEN8_RING_FAULT_ENGINE_ID(x) (((x) >> 12) & 0x7) #define RING_FAULT_GTTSEL_MASK (1 << 11) #define RING_FAULT_SRCID(x) (((x) >> 3) & 0xff) diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c index 13944a14ea2d..4fbed27ef0ec 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.c +++ b/drivers/gpu/drm/i915/gt/intel_gtt.c @@ -21,6 +21,11 @@ #include "intel_gt_regs.h" #include "intel_gtt.h" +bool i915_ggtt_require_binder(struct drm_i915_private *i915) +{ + /* Wa_13010847436 & Wa_14019519902 */ + return MEDIA_VER_FULL(i915) == IP_VER(13, 0); +} static bool intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *i915) { diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h index 346ec8ec2edd..b471edac2699 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.h +++ b/drivers/gpu/drm/i915/gt/intel_gtt.h @@ -171,6 +171,9 @@ struct intel_gt; #define for_each_sgt_daddr(__dp, __iter, __sgt) \ __for_each_sgt_daddr(__dp, __iter, __sgt, I915_GTT_PAGE_SIZE) +#define for_each_sgt_daddr_next(__dp, __iter) \ + __for_each_daddr_next(__dp, __iter, I915_GTT_PAGE_SIZE) + struct i915_page_table { struct drm_i915_gem_object *base; union { @@ -688,4 +691,6 @@ static inline struct sgt_dma { return (struct sgt_dma){ sg, addr, addr + sg_dma_len(sg) }; } +bool i915_ggtt_require_binder(struct drm_i915_private *i915); + #endif diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c index 07269ff3be13..353f93baaca0 100644 --- a/drivers/gpu/drm/i915/gt/intel_mocs.c +++ b/drivers/gpu/drm/i915/gt/intel_mocs.c @@ -487,7 +487,7 @@ static bool has_mocs(const struct drm_i915_private *i915) return !IS_DGFX(i915); } -static unsigned int get_mocs_settings(const struct drm_i915_private *i915, +static unsigned int get_mocs_settings(struct drm_i915_private *i915, struct drm_i915_mocs_table *table) { unsigned int flags; @@ -495,7 +495,7 @@ static unsigned int get_mocs_settings(const struct drm_i915_private *i915, memset(table, 0, sizeof(struct drm_i915_mocs_table)); table->unused_entries_index = I915_MOCS_PTE; - if (IS_GFX_GT_IP_RANGE(&i915->gt0, IP_VER(12, 70), IP_VER(12, 71))) { + if (IS_GFX_GT_IP_RANGE(to_gt(i915), IP_VER(12, 70), IP_VER(12, 71))) { table->size = ARRAY_SIZE(mtl_mocs_table); table->table = mtl_mocs_table; table->n_entries = MTL_NUM_MOCS_ENTRIES; diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index a21e939fdbf6..d5ed904f355d 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -26,6 +26,7 @@ #include "intel_engine_regs.h" #include "intel_gt.h" #include "intel_gt_pm.h" +#include "intel_gt_print.h" #include "intel_gt_requests.h" #include "intel_mchbar_regs.h" #include "intel_pci_config.h" @@ -592,10 +593,10 @@ static int gen8_engine_reset_prepare(struct intel_engine_cs *engine) ret = __intel_wait_for_register_fw(uncore, reg, mask, ack, 700, 0, NULL); if (ret) - drm_err(&engine->i915->drm, - "%s reset request timed out: {request: %08x, RESET_CTL: %08x}\n", - engine->name, request, - intel_uncore_read_fw(uncore, reg)); + gt_err(engine->gt, + "%s reset request timed out: {request: %08x, RESET_CTL: %08x}\n", + engine->name, request, + intel_uncore_read_fw(uncore, reg)); return ret; } @@ -1199,17 +1200,16 @@ void intel_gt_reset(struct intel_gt *gt, goto unlock; if (reason) - drm_notice(>->i915->drm, - "Resetting chip for %s\n", reason); + gt_notice(gt, "Resetting chip for %s\n", reason); atomic_inc(>->i915->gpu_error.reset_count); awake = reset_prepare(gt); if (!intel_has_gpu_reset(gt)) { if (gt->i915->params.reset) - drm_err(>->i915->drm, "GPU reset not supported\n"); + gt_err(gt, "GPU reset not supported\n"); else - drm_dbg(>->i915->drm, "GPU reset disabled\n"); + gt_dbg(gt, "GPU reset disabled\n"); goto error; } @@ -1217,7 +1217,7 @@ void intel_gt_reset(struct intel_gt *gt, intel_runtime_pm_disable_interrupts(gt->i915); if (do_reset(gt, stalled_mask)) { - drm_err(>->i915->drm, "Failed to reset chip\n"); + gt_err(gt, "Failed to reset chip\n"); goto taint; } @@ -1236,9 +1236,7 @@ void intel_gt_reset(struct intel_gt *gt, */ ret = intel_gt_init_hw(gt); if (ret) { - drm_err(>->i915->drm, - "Failed to initialise HW following reset (%d)\n", - ret); + gt_err(gt, "Failed to initialise HW following reset (%d)\n", ret); goto taint; } @@ -1605,9 +1603,7 @@ static void intel_wedge_me(struct work_struct *work) { struct intel_wedge_me *w = container_of(work, typeof(*w), work.work); - drm_err(&w->gt->i915->drm, - "%s timed out, cancelling all in-flight rendering.\n", - w->name); + gt_err(w->gt, "%s timed out, cancelling all in-flight rendering.\n", w->name); intel_gt_set_wedged(w->gt); } diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index b86a10b1f534..192ac0e59afa 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -11,6 +11,7 @@ #include "intel_gpu_commands.h" #include "intel_gt.h" #include "intel_gt_mcr.h" +#include "intel_gt_print.h" #include "intel_gt_regs.h" #include "intel_ring.h" #include "intel_workarounds.h" @@ -119,8 +120,8 @@ static void wa_init_finish(struct i915_wa_list *wal) if (!wal->count) return; - drm_dbg(&wal->gt->i915->drm, "Initialized %u %s workarounds on %s\n", - wal->wa_count, wal->name, wal->engine_name); + gt_dbg(wal->gt, "Initialized %u %s workarounds on %s\n", + wal->wa_count, wal->name, wal->engine_name); } static enum forcewake_domains @@ -1780,10 +1781,10 @@ wa_verify(struct intel_gt *gt, const struct i915_wa *wa, u32 cur, const char *name, const char *from) { if ((cur ^ wa->set) & wa->read) { - drm_err(>->i915->drm, - "%s workaround lost on %s! (reg[%x]=0x%x, relevant bits were 0x%x vs expected 0x%x)\n", - name, from, i915_mmio_reg_offset(wa->reg), - cur, cur & wa->read, wa->set & wa->read); + gt_err(gt, + "%s workaround lost on %s! (reg[%x]=0x%x, relevant bits were 0x%x vs expected 0x%x)\n", + name, from, i915_mmio_reg_offset(wa->reg), + cur, cur & wa->read, wa->set & wa->read); return false; } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c index 0d3b22a74365..453d855dd1de 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c @@ -68,8 +68,7 @@ static void gsc_work(struct work_struct *work) * A proxy failure right after firmware load means the proxy-init * step has failed so mark GSC as not usable after this */ - drm_err(>->i915->drm, - "GSC proxy handler failed to init\n"); + gt_err(gt, "GSC proxy handler failed to init\n"); intel_uc_fw_change_status(&gsc->fw, INTEL_UC_FIRMWARE_LOAD_FAIL); } goto out_put; @@ -83,11 +82,10 @@ static void gsc_work(struct work_struct *work) * status register to check if the proxy init was actually successful */ if (intel_gsc_uc_fw_proxy_init_done(gsc, false)) { - drm_dbg(>->i915->drm, "GSC Proxy initialized\n"); + gt_dbg(gt, "GSC Proxy initialized\n"); intel_uc_fw_change_status(&gsc->fw, INTEL_UC_FIRMWARE_RUNNING); } else { - drm_err(>->i915->drm, - "GSC status reports proxy init not complete\n"); + gt_err(gt, "GSC status reports proxy init not complete\n"); intel_uc_fw_change_status(&gsc->fw, INTEL_UC_FIRMWARE_LOAD_FAIL); } } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index 27df41c53b89..3f3df1166b86 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -319,6 +319,12 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc) if (!RCS_MASK(gt)) flags |= GUC_WA_RCS_REGS_IN_CCS_REGS_LIST; + /* Wa_14018913170 */ + if (GUC_FIRMWARE_VER(guc) >= MAKE_GUC_VER(70, 7, 0)) { + if (IS_DG2(gt->i915) || IS_METEORLAKE(gt->i915) || IS_PONTEVECCHIO(gt->i915)) + flags |= GUC_WA_ENABLE_TSC_CHECK_ON_RC6; + } + return flags; } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h index 6c392bad29c1..818c8c146fd4 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h @@ -295,6 +295,7 @@ struct intel_guc { #define MAKE_GUC_VER(maj, min, pat) (((maj) << 16) | ((min) << 8) | (pat)) #define MAKE_GUC_VER_STRUCT(ver) MAKE_GUC_VER((ver).major, (ver).minor, (ver).patch) #define GUC_SUBMIT_VER(guc) MAKE_GUC_VER_STRUCT((guc)->submission_version) +#define GUC_FIRMWARE_VER(guc) MAKE_GUC_VER_STRUCT((guc)->fw.file_selected.ver) static inline struct intel_guc *log_to_guc(struct intel_guc_log *log) { diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c index 6e22af31513a..c33210ead1ef 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c @@ -96,7 +96,7 @@ struct ct_request { struct ct_incoming_msg { struct list_head link; u32 size; - u32 msg[]; + u32 msg[] __counted_by(size); }; enum { CTB_SEND = 0, CTB_RECV = 1 }; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h index b4d56eccfb1f..123ad75d2eb2 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h @@ -100,6 +100,7 @@ #define GUC_WA_HOLD_CCS_SWITCHOUT BIT(17) #define GUC_WA_POLLCS BIT(18) #define GUC_WA_RCS_REGS_IN_CCS_REGS_LIST BIT(21) +#define GUC_WA_ENABLE_TSC_CHECK_ON_RC6 BIT(22) #define GUC_CTL_FEATURE 2 #define GUC_CTL_ENABLE_SLPC BIT(2) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index ae3495a9c814..2cce5ec1ff00 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -4802,19 +4802,19 @@ static void guc_context_replay(struct intel_context *ce) static void guc_handle_context_reset(struct intel_guc *guc, struct intel_context *ce) { + bool capture = intel_context_is_schedulable(ce); + trace_intel_context_reset(ce); - guc_dbg(guc, "Got context reset notification: 0x%04X on %s, exiting = %s, banned = %s\n", + guc_dbg(guc, "%s context reset notification: 0x%04X on %s, exiting = %s, banned = %s\n", + capture ? "Got" : "Ignoring", ce->guc_id.id, ce->engine->name, str_yes_no(intel_context_is_exiting(ce)), str_yes_no(intel_context_is_banned(ce))); - if (likely(intel_context_is_schedulable(ce))) { + if (capture) { capture_error_state(guc, ce); guc_context_replay(ce); - } else { - guc_info(guc, "Ignoring context reset notification of exiting context 0x%04X on %s", - ce->guc_id.id, ce->engine->name); } } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c index 32e27e9a2490..362639162ed6 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c @@ -88,12 +88,12 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw, * security fixes, etc. to be enabled. */ #define INTEL_GUC_FIRMWARE_DEFS(fw_def, guc_maj, guc_mmp) \ - fw_def(METEORLAKE, 0, guc_maj(mtl, 70, 6, 6)) \ - fw_def(DG2, 0, guc_maj(dg2, 70, 5, 1)) \ - fw_def(ALDERLAKE_P, 0, guc_maj(adlp, 70, 5, 1)) \ + fw_def(METEORLAKE, 0, guc_maj(mtl, 70, 12, 1)) \ + fw_def(DG2, 0, guc_maj(dg2, 70, 12, 1)) \ + fw_def(ALDERLAKE_P, 0, guc_maj(adlp, 70, 12, 1)) \ fw_def(ALDERLAKE_P, 0, guc_mmp(adlp, 70, 1, 1)) \ fw_def(ALDERLAKE_P, 0, guc_mmp(adlp, 69, 0, 3)) \ - fw_def(ALDERLAKE_S, 0, guc_maj(tgl, 70, 5, 1)) \ + fw_def(ALDERLAKE_S, 0, guc_maj(tgl, 70, 12, 1)) \ fw_def(ALDERLAKE_S, 0, guc_mmp(tgl, 70, 1, 1)) \ fw_def(ALDERLAKE_S, 0, guc_mmp(tgl, 69, 0, 3)) \ fw_def(DG1, 0, guc_maj(dg1, 70, 5, 1)) \ diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 53a0a42a50db..c57aba09091f 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -39,7 +39,7 @@ #include <asm/kvm_page_track.h> -#include "i915_drv.h" +#include "gt/intel_gt.h" #include "intel_gvt.h" #include "debug.h" @@ -60,6 +60,8 @@ #define GVT_MAX_VGPU 8 +struct engine_mmio; + /* Describe per-platform limitations. */ struct intel_gvt_device_info { u32 max_support_vgpus; @@ -368,11 +370,6 @@ struct intel_gvt { struct dentry *debugfs_root; }; -static inline struct intel_gvt *to_gvt(struct drm_i915_private *i915) -{ - return i915->gvt; -} - enum { /* Scheduling trigger by timer */ INTEL_GVT_REQUEST_SCHED = 0, diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c index 68eca023bbc6..de3f5903d1a7 100644 --- a/drivers/gpu/drm/i915/gvt/interrupt.c +++ b/drivers/gpu/drm/i915/gvt/interrupt.c @@ -36,6 +36,23 @@ #include "gvt.h" #include "trace.h" +struct intel_gvt_irq_info { + char *name; + i915_reg_t reg_base; + enum intel_gvt_event_type bit_to_event[INTEL_GVT_IRQ_BITWIDTH]; + unsigned long warned; + int group; + DECLARE_BITMAP(downstream_irq_bitmap, INTEL_GVT_IRQ_BITWIDTH); + bool has_upstream_irq; +}; + +struct intel_gvt_irq_map { + int up_irq_group; + int up_irq_bit; + int down_irq_group; + u32 down_irq_bitmask; +}; + /* common offset among interrupt control registers */ #define regbase_to_isr(base) (base) #define regbase_to_imr(base) (base + 0x4) diff --git a/drivers/gpu/drm/i915/gvt/interrupt.h b/drivers/gpu/drm/i915/gvt/interrupt.h index b62f04ab47cb..e60ad476fe60 100644 --- a/drivers/gpu/drm/i915/gvt/interrupt.h +++ b/drivers/gpu/drm/i915/gvt/interrupt.h @@ -32,10 +32,13 @@ #ifndef _GVT_INTERRUPT_H_ #define _GVT_INTERRUPT_H_ -#include <linux/hrtimer.h> -#include <linux/kernel.h> +#include <linux/bitops.h> -#include "i915_reg_defs.h" +struct intel_gvt; +struct intel_gvt_irq; +struct intel_gvt_irq_info; +struct intel_gvt_irq_map; +struct intel_vgpu; enum intel_gvt_event_type { RCS_MI_USER_INTERRUPT = 0, @@ -138,10 +141,6 @@ enum intel_gvt_event_type { INTEL_GVT_EVENT_MAX, }; -struct intel_gvt_irq; -struct intel_gvt; -struct intel_vgpu; - typedef void (*gvt_event_virt_handler_t)(struct intel_gvt_irq *irq, enum intel_gvt_event_type event, struct intel_vgpu *vgpu); @@ -175,17 +174,6 @@ enum intel_gvt_irq_type { #define INTEL_GVT_IRQ_BITWIDTH 32 -/* device specific interrupt bit definitions */ -struct intel_gvt_irq_info { - char *name; - i915_reg_t reg_base; - enum intel_gvt_event_type bit_to_event[INTEL_GVT_IRQ_BITWIDTH]; - unsigned long warned; - int group; - DECLARE_BITMAP(downstream_irq_bitmap, INTEL_GVT_IRQ_BITWIDTH); - bool has_upstream_irq; -}; - /* per-event information */ struct intel_gvt_event_info { int bit; /* map to register bit */ @@ -194,13 +182,6 @@ struct intel_gvt_event_info { gvt_event_virt_handler_t v_handler; /* for v_event */ }; -struct intel_gvt_irq_map { - int up_irq_group; - int up_irq_bit; - int down_irq_group; - u32 down_irq_bitmask; -}; - /* structure containing device specific IRQ state */ struct intel_gvt_irq { const struct intel_gvt_irq_ops *ops; diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index 490e8ae51228..273db14fd5fc 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c @@ -45,6 +45,14 @@ #define GEN9_MOCS_SIZE 64 +struct engine_mmio { + enum intel_engine_id id; + i915_reg_t reg; + u32 mask; + bool in_context; + u32 value; +}; + /* Raw offset is appened to each line for convenience. */ static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = { {RCS0, RING_MODE_GEN7(RENDER_RING_BASE), 0xffff, false}, /* 0x229c */ diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.h b/drivers/gpu/drm/i915/gvt/mmio_context.h index 9540813b88e5..a821edf574dd 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.h +++ b/drivers/gpu/drm/i915/gvt/mmio_context.h @@ -39,8 +39,6 @@ #include <linux/types.h> #include "gt/intel_engine_regs.h" -#include "gt/intel_engine_types.h" -#include "gt/intel_lrc_reg.h" struct i915_request; struct intel_context; @@ -48,14 +46,6 @@ struct intel_engine_cs; struct intel_gvt; struct intel_vgpu; -struct engine_mmio { - enum intel_engine_id id; - i915_reg_t reg; - u32 mask; - bool in_context; - u32 value; -}; - void intel_gvt_switch_mmio(struct intel_vgpu *pre, struct intel_vgpu *next, const struct intel_engine_cs *engine); diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c index d50347e5773a..8a0e2c745e1f 100644 --- a/drivers/gpu/drm/i915/i915_driver.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@ -71,6 +71,7 @@ #include "gem/i915_gem_pm.h" #include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" +#include "gt/intel_gt_print.h" #include "gt/intel_rc6.h" #include "pxp/intel_pxp.h" @@ -338,6 +339,7 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv) /* Try to make sure MCHBAR is enabled before poking at it */ intel_gmch_bar_setup(dev_priv); intel_device_info_runtime_init(dev_priv); + intel_display_device_info_runtime_init(dev_priv); for_each_gt(gt, dev_priv, i) { ret = intel_gt_init_mmio(gt); @@ -428,7 +430,7 @@ static int i915_pcode_init(struct drm_i915_private *i915) for_each_gt(gt, i915, id) { ret = intel_pcode_init(gt->uncore); if (ret) { - drm_err(>->i915->drm, "gt%d: intel_pcode_init failed %d\n", id, ret); + gt_err(gt, "intel_pcode_init failed %d\n", ret); return ret; } } @@ -1327,10 +1329,8 @@ static int i915_drm_resume_early(struct drm_device *dev) drm_err(&dev_priv->drm, "Resume prepare failed: %d, continuing anyway\n", ret); - for_each_gt(gt, dev_priv, i) { - intel_uncore_resume_early(gt->uncore); - intel_gt_check_and_clear_faults(gt); - } + for_each_gt(gt, dev_priv, i) + intel_gt_resume_early(gt); intel_display_power_resume_early(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 511eba3bbdba..cb60fc9cf873 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -222,7 +222,22 @@ struct drm_i915_private { bool mchbar_need_disable; } gmch; - struct rb_root uabi_engines; + /* + * Chaining user engines happens in multiple stages, starting with a + * simple lock-less linked list created by intel_engine_add_user(), + * which later gets sorted and converted to an intermediate regular + * list, just to be converted once again to its final rb tree structure + * in intel_engines_driver_register(). + * + * Make sure to use the right iterator helper, depending on if the code + * in question runs before or after intel_engines_driver_register() -- + * for_each_uabi_engine() can only be used afterwards! + */ + union { + struct llist_head uabi_engines_llist; + struct list_head uabi_engines_list; + struct rb_root uabi_engines; + }; unsigned int engine_uabi_class_count[I915_LAST_UABI_ENGINE_CLASS + 1]; /* protects the irq masks */ @@ -317,12 +332,6 @@ struct drm_i915_private { struct i915_hwmon *hwmon; - /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */ - struct intel_gt gt0; - - /* - * i915->gt[0] == &i915->gt0 - */ struct intel_gt *gt[I915_MAX_GT]; struct kobject *sysfs_gt; @@ -382,9 +391,9 @@ static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev) return pci_get_drvdata(pdev); } -static inline struct intel_gt *to_gt(struct drm_i915_private *i915) +static inline struct intel_gt *to_gt(const struct drm_i915_private *i915) { - return &i915->gt0; + return i915->gt[0]; } /* Simple iterator over all initialised engines */ @@ -416,8 +425,6 @@ static inline struct intel_gt *to_gt(struct drm_i915_private *i915) #define INTEL_INFO(i915) ((i915)->__info) #define RUNTIME_INFO(i915) (&(i915)->__runtime) -#define DISPLAY_INFO(i915) ((i915)->display.info.__device_info) -#define DISPLAY_RUNTIME_INFO(i915) (&(i915)->display.info.__runtime_info) #define DRIVER_CAPS(i915) (&(i915)->caps) #define INTEL_DEVID(i915) (RUNTIME_INFO(i915)->device_id) @@ -436,12 +443,6 @@ static inline struct intel_gt *to_gt(struct drm_i915_private *i915) #define IS_MEDIA_VER(i915, from, until) \ (MEDIA_VER(i915) >= (from) && MEDIA_VER(i915) <= (until)) -#define DISPLAY_VER(i915) (DISPLAY_RUNTIME_INFO(i915)->ip.ver) -#define DISPLAY_VER_FULL(i915) IP_VER(DISPLAY_RUNTIME_INFO(i915)->ip.ver, \ - DISPLAY_RUNTIME_INFO(i915)->ip.rel) -#define IS_DISPLAY_VER(i915, from, until) \ - (DISPLAY_VER(i915) >= (from) && DISPLAY_VER(i915) <= (until)) - #define INTEL_REVID(i915) (to_pci_dev((i915)->drm.dev)->revision) #define INTEL_DISPLAY_STEP(__i915) (RUNTIME_INFO(__i915)->step.display_step) @@ -790,12 +791,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define NUM_L3_SLICES(i915) (IS_HASWELL_GT3(i915) ? \ 2 : HAS_L3_DPF(i915)) -/* Only valid when HAS_DISPLAY() is true */ -#define INTEL_DISPLAY_ENABLED(i915) \ - (drm_WARN_ON(&(i915)->drm, !HAS_DISPLAY(i915)), \ - !(i915)->params.disable_display && \ - !intel_opregion_headless_sku(i915)) - #define HAS_GUC_DEPRIVILEGE(i915) \ (INTEL_INFO(i915)->has_guc_deprivilege) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 147d5b95b9ac..c166ad5e187a 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1199,6 +1199,13 @@ int i915_gem_init(struct drm_i915_private *dev_priv) goto err_unlock; } + /* + * Register engines early to ensure the engine list is in its final + * rb-tree form, lowering the amount of code that has to deal with + * the intermediate llist state. + */ + intel_engines_driver_register(dev_priv); + return 0; /* @@ -1246,8 +1253,6 @@ err_unlock: void i915_gem_driver_register(struct drm_i915_private *i915) { i915_gem_driver_register__shrinker(i915); - - intel_engines_driver_register(i915); } void i915_gem_driver_unregister(struct drm_i915_private *i915) diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index f4ebcfb70289..b4e31e59c799 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -1234,7 +1234,16 @@ static void engine_record_registers(struct intel_engine_coredump *ee) if (GRAPHICS_VER(i915) >= 6) { ee->rc_psmi = ENGINE_READ(engine, RING_PSMI_CTL); - if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) + /* + * For the media GT, this ring fault register is not replicated, + * so don't do multicast/replicated register read/write + * operation on it. + */ + if (MEDIA_VER(i915) >= 13 && engine->gt->type == GT_MEDIA) + ee->fault_reg = intel_uncore_read(engine->uncore, + XELPMP_RING_FAULT_REG); + + else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) ee->fault_reg = intel_gt_mcr_read_any(engine->gt, XEHP_RING_FAULT_REG); else if (GRAPHICS_VER(i915) >= 12) diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index 0a171b57fd8f..036c4c3ed6ed 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c @@ -137,11 +137,6 @@ i915_param_named_unsafe(enable_ips, int, 0400, "Enable IPS (default: true)"); i915_param_named_unsafe(enable_dpt, bool, 0400, "Enable display page table (DPT) (default: true)"); -i915_param_named(fastboot, int, 0400, - "Try to skip unnecessary mode sets at boot time " - "(0=disabled, 1=enabled) " - "Default: -1 (use per-chip default)"); - i915_param_named_unsafe(load_detect_test, bool, 0400, "Force-enable the VGA load detect code for testing (default:false). " "For developers only."); diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h index 68abf0ad6c00..d5194b039aab 100644 --- a/drivers/gpu/drm/i915/i915_params.h +++ b/drivers/gpu/drm/i915/i915_params.h @@ -72,7 +72,6 @@ struct drm_printer; param(int, edp_vswing, 0, 0400) \ param(unsigned int, reset, 3, 0600) \ param(unsigned int, inject_probe_failure, 0, 0) \ - param(int, fastboot, -1, 0600) \ param(int, enable_dpcd_backlight, -1, 0600) \ param(char *, force_probe, CONFIG_DRM_I915_FORCE_PROBE, 0400) \ param(unsigned int, request_timeout_ms, CONFIG_DRM_I915_REQUEST_TIMEOUT, CONFIG_DRM_I915_REQUEST_TIMEOUT ? 0600 : 0) \ diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 1347e4ec9dd5..8f7ab64feec0 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -206,6 +206,7 @@ #include "gt/intel_gt.h" #include "gt/intel_gt_clock_utils.h" #include "gt/intel_gt_mcr.h" +#include "gt/intel_gt_print.h" #include "gt/intel_gt_regs.h" #include "gt/intel_lrc.h" #include "gt/intel_lrc_reg.h" @@ -1659,9 +1660,8 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream) free_noa_wait(stream); if (perf->spurious_report_rs.missed) { - drm_notice(>->i915->drm, - "%d spurious OA report notices suppressed due to ratelimiting\n", - perf->spurious_report_rs.missed); + gt_notice(gt, "%d spurious OA report notices suppressed due to ratelimiting\n", + perf->spurious_report_rs.missed); } } @@ -1852,7 +1852,7 @@ static int alloc_oa_buffer(struct i915_perf_stream *stream) */ ret = i915_vma_pin(vma, 0, SZ_16M, PIN_GLOBAL | PIN_HIGH); if (ret) { - drm_err(>->i915->drm, "Failed to pin OA buffer %d\n", ret); + gt_err(gt, "Failed to pin OA buffer %d\n", ret); goto err_unref; } diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index d35973b41186..108b675088ba 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -696,12 +696,11 @@ static void i915_pmu_event_read(struct perf_event *event) event->hw.state = PERF_HES_STOPPED; return; } -again: - prev = local64_read(&hwc->prev_count); - new = __i915_pmu_event_read(event); - if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev) - goto again; + prev = local64_read(&hwc->prev_count); + do { + new = __i915_pmu_event_read(event); + } while (!local64_try_cmpxchg(&hwc->prev_count, &prev, new)); local64_add(new - prev, &event->count); } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index e0ea2dc13556..135e8d8dbdf0 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1327,6 +1327,8 @@ #define DPFC_CTL_PLANE_IVB(i9xx_plane) REG_FIELD_PREP(DPFC_CTL_PLANE_MASK_IVB, (i9xx_plane)) #define DPFC_CTL_FENCE_EN_IVB REG_BIT(28) /* ivb+ */ #define DPFC_CTL_PERSISTENT_MODE REG_BIT(25) /* g4x-snb */ +#define DPFC_CTL_PLANE_BINDING_MASK REG_GENMASK(12, 11) /* lnl+ */ +#define DPFC_CTL_PLANE_BINDING(plane_id) REG_FIELD_PREP(DPFC_CTL_PLANE_BINDING_MASK, (plane_id)) #define DPFC_CTL_FALSE_COLOR REG_BIT(10) /* ivb+ */ #define DPFC_CTL_SR_EN REG_BIT(10) /* g4x only */ #define DPFC_CTL_SR_EXIT_DIS REG_BIT(9) /* g4x only */ @@ -4678,6 +4680,13 @@ #define TGL_DFSM_PIPE_D_DISABLE (1 << 22) #define GLK_DFSM_DISPLAY_DSC_DISABLE (1 << 7) +#define XE2LPD_DE_CAP _MMIO(0x41100) +#define XE2LPD_DE_CAP_3DLUT_MASK REG_GENMASK(31, 30) +#define XE2LPD_DE_CAP_DSC_MASK REG_GENMASK(29, 28) +#define XE2LPD_DE_CAP_DSC_REMOVED 1 +#define XE2LPD_DE_CAP_SCALER_MASK REG_GENMASK(27, 26) +#define XE2LPD_DE_CAP_SCALER_SINGLE 1 + #define SKL_DSSM _MMIO(0x51004) #define ICL_DSSM_CDCLK_PLL_REFCLK_MASK (7 << 29) #define ICL_DSSM_CDCLK_PLL_REFCLK_24MHz (0 << 29) diff --git a/drivers/gpu/drm/i915/i915_scatterlist.h b/drivers/gpu/drm/i915/i915_scatterlist.h index 5a10c1a31183..6cf8a298849f 100644 --- a/drivers/gpu/drm/i915/i915_scatterlist.h +++ b/drivers/gpu/drm/i915/i915_scatterlist.h @@ -91,6 +91,16 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg) ((__dp) = (__iter).dma + (__iter).curr), (__iter).sgp; \ (((__iter).curr += (__step)) >= (__iter).max) ? \ (__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0 : 0) +/** + * __for_each_daddr_next - iterates over the device addresses with pre-initialized iterator. + * @__dp: Device address (output) + * @__iter: 'struct sgt_iter' (iterator state, external) + * @__step: step size + */ +#define __for_each_daddr_next(__dp, __iter, __step) \ + for (; ((__dp) = (__iter).dma + (__iter).curr), (__iter).sgp; \ + (((__iter).curr += (__step)) >= (__iter).max) ? \ + (__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0 : 0) /** * for_each_sgt_page - iterate over the pages of the given sg_table diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index d2ed0f057cb2..59bea1398c91 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -27,7 +27,6 @@ #include <drm/drm_print.h> #include <drm/i915_pciids.h> -#include "display/intel_display_device.h" #include "gt/intel_gt_regs.h" #include "i915_drv.h" #include "i915_reg.h" @@ -232,19 +231,15 @@ static void intel_device_info_subplatform_init(struct drm_i915_private *i915) if (find_devid(devid, subplatform_ult_ids, ARRAY_SIZE(subplatform_ult_ids))) { mask = BIT(INTEL_SUBPLATFORM_ULT); - if (IS_HASWELL(i915) || IS_BROADWELL(i915)) - DISPLAY_RUNTIME_INFO(i915)->port_mask &= ~BIT(PORT_D); } else if (find_devid(devid, subplatform_ulx_ids, ARRAY_SIZE(subplatform_ulx_ids))) { mask = BIT(INTEL_SUBPLATFORM_ULX); if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { /* ULX machines are also considered ULT. */ mask |= BIT(INTEL_SUBPLATFORM_ULT); - DISPLAY_RUNTIME_INFO(i915)->port_mask &= ~BIT(PORT_D); } } else if (find_devid(devid, subplatform_portf_ids, ARRAY_SIZE(subplatform_portf_ids))) { - DISPLAY_RUNTIME_INFO(i915)->port_mask |= BIT(PORT_F); mask = BIT(INTEL_SUBPLATFORM_PORTF); } else if (find_devid(devid, subplatform_uy_ids, ARRAY_SIZE(subplatform_uy_ids))) { @@ -350,8 +345,6 @@ void intel_device_info_runtime_init_early(struct drm_i915_private *i915) intel_device_info_subplatform_init(i915); } -static const struct intel_display_device_info no_display = {}; - /** * intel_device_info_runtime_init - initialize runtime info * @dev_priv: the i915 device @@ -372,21 +365,6 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) { struct intel_runtime_info *runtime = RUNTIME_INFO(dev_priv); - if (HAS_DISPLAY(dev_priv)) - intel_display_device_info_runtime_init(dev_priv); - - /* Display may have been disabled by runtime init */ - if (!HAS_DISPLAY(dev_priv)) { - dev_priv->drm.driver_features &= ~(DRIVER_MODESET | - DRIVER_ATOMIC); - dev_priv->display.info.__device_info = &no_display; - } - - /* Disable nuclear pageflip by default on pre-g4x */ - if (!dev_priv->params.nuclear_pageflip && - DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) - dev_priv->drm.driver_features &= ~DRIVER_ATOMIC; - BUILD_BUG_ON(BITS_PER_TYPE(intel_engine_mask_t) < I915_NUM_ENGINES); if (GRAPHICS_VER(dev_priv) == 6 && i915_vtd_active(dev_priv)) { diff --git a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c index b3c036a54529..87ecc5104fd9 100644 --- a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c +++ b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c @@ -15,8 +15,9 @@ #include "display/intel_psr_regs.h" #include "display/skl_watermark_regs.h" #include "display/vlv_dsi_pll_regs.h" +#include "gt/intel_engine_regs.h" #include "gt/intel_gt_regs.h" -#include "gvt/gvt.h" +#include "gvt/reg.h" #include "i915_drv.h" #include "i915_pvinfo.h" diff --git a/drivers/gpu/drm/i915/intel_step.c b/drivers/gpu/drm/i915/intel_step.c index ee4e5a2c0220..b4162f1be765 100644 --- a/drivers/gpu/drm/i915/intel_step.c +++ b/drivers/gpu/drm/i915/intel_step.c @@ -353,3 +353,8 @@ const char *intel_step_name(enum intel_step step) return "**"; } } + +const char *intel_display_step_name(struct drm_i915_private *i915) +{ + return intel_step_name(RUNTIME_INFO(i915)->step.display_step); +} diff --git a/drivers/gpu/drm/i915/intel_step.h b/drivers/gpu/drm/i915/intel_step.h index 96dfca4cba73..b6f43b624774 100644 --- a/drivers/gpu/drm/i915/intel_step.h +++ b/drivers/gpu/drm/i915/intel_step.h @@ -78,5 +78,6 @@ enum intel_step { void intel_step_init(struct drm_i915_private *i915); const char *intel_step_name(enum intel_step step); +const char *intel_display_step_name(struct drm_i915_private *i915); #endif /* __INTEL_STEP_H__ */ diff --git a/drivers/gpu/drm/i915/intel_wakeref.c b/drivers/gpu/drm/i915/intel_wakeref.c index 718f2f1b6174..623a69089386 100644 --- a/drivers/gpu/drm/i915/intel_wakeref.c +++ b/drivers/gpu/drm/i915/intel_wakeref.c @@ -10,21 +10,12 @@ #include "intel_wakeref.h" #include "i915_drv.h" -static void rpm_get(struct intel_wakeref *wf) -{ - wf->wakeref = intel_runtime_pm_get(&wf->i915->runtime_pm); -} - -static void rpm_put(struct intel_wakeref *wf) -{ - intel_wakeref_t wakeref = fetch_and_zero(&wf->wakeref); - - intel_runtime_pm_put(&wf->i915->runtime_pm, wakeref); - INTEL_WAKEREF_BUG_ON(!wakeref); -} - int __intel_wakeref_get_first(struct intel_wakeref *wf) { + intel_wakeref_t wakeref; + int ret = 0; + + wakeref = intel_runtime_pm_get(&wf->i915->runtime_pm); /* * Treat get/put as different subclasses, as we may need to run * the put callback from under the shrinker and do not want to @@ -32,41 +23,52 @@ int __intel_wakeref_get_first(struct intel_wakeref *wf) * upon acquiring the wakeref. */ mutex_lock_nested(&wf->mutex, SINGLE_DEPTH_NESTING); - if (!atomic_read(&wf->count)) { - int err; - - rpm_get(wf); - err = wf->ops->get(wf); - if (unlikely(err)) { - rpm_put(wf); - mutex_unlock(&wf->mutex); - return err; + if (!atomic_read(&wf->count)) { + INTEL_WAKEREF_BUG_ON(wf->wakeref); + wf->wakeref = wakeref; + wakeref = 0; + + ret = wf->ops->get(wf); + if (ret) { + wakeref = xchg(&wf->wakeref, 0); + wake_up_var(&wf->wakeref); + goto unlock; } smp_mb__before_atomic(); /* release wf->count */ } + atomic_inc(&wf->count); + INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0); + +unlock: mutex_unlock(&wf->mutex); + if (unlikely(wakeref)) + intel_runtime_pm_put(&wf->i915->runtime_pm, wakeref); - INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0); - return 0; + return ret; } static void ____intel_wakeref_put_last(struct intel_wakeref *wf) { + intel_wakeref_t wakeref = 0; + INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0); if (unlikely(!atomic_dec_and_test(&wf->count))) goto unlock; /* ops->put() must reschedule its own release on error/deferral */ if (likely(!wf->ops->put(wf))) { - rpm_put(wf); + INTEL_WAKEREF_BUG_ON(!wf->wakeref); + wakeref = xchg(&wf->wakeref, 0); wake_up_var(&wf->wakeref); } unlock: mutex_unlock(&wf->mutex); + if (wakeref) + intel_runtime_pm_put(&wf->i915->runtime_pm, wakeref); } void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags) diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index a9b79888c193..acae30a04a94 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -1924,7 +1924,7 @@ struct perf_stats { struct perf_series { struct drm_i915_private *i915; unsigned int nengines; - struct intel_context *ce[]; + struct intel_context *ce[] __counted_by(nengines); }; static int cmp_u32(const void *A, const void *B) diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 7de6477803f8..af349fd9abc2 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -114,7 +114,6 @@ static struct dev_pm_domain pm_domain = { static void mock_gt_probe(struct drm_i915_private *i915) { - i915->gt[0] = to_gt(i915); i915->gt[0]->name = "Mock GT"; } diff --git a/drivers/gpu/drm/i915/soc/intel_gmch.c b/drivers/gpu/drm/i915/soc/intel_gmch.c index 49c7fb16e934..f32e9f78770a 100644 --- a/drivers/gpu/drm/i915/soc/intel_gmch.c +++ b/drivers/gpu/drm/i915/soc/intel_gmch.c @@ -5,6 +5,7 @@ #include <linux/pci.h> #include <linux/pnp.h> +#include <linux/vgaarb.h> #include <drm/drm_managed.h> #include <drm/i915_drm.h> @@ -167,3 +168,16 @@ int intel_gmch_vga_set_state(struct drm_i915_private *i915, bool enable_decode) return 0; } + +unsigned int intel_gmch_vga_set_decode(struct pci_dev *pdev, bool enable_decode) +{ + struct drm_i915_private *i915 = pdev_to_i915(pdev); + + intel_gmch_vga_set_state(i915, enable_decode); + + if (enable_decode) + return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | + VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; + else + return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; +} diff --git a/drivers/gpu/drm/i915/soc/intel_gmch.h b/drivers/gpu/drm/i915/soc/intel_gmch.h index d0133eedc720..23be2d113afd 100644 --- a/drivers/gpu/drm/i915/soc/intel_gmch.h +++ b/drivers/gpu/drm/i915/soc/intel_gmch.h @@ -8,11 +8,13 @@ #include <linux/types.h> +struct pci_dev; struct drm_i915_private; int intel_gmch_bridge_setup(struct drm_i915_private *i915); void intel_gmch_bar_setup(struct drm_i915_private *i915); void intel_gmch_bar_teardown(struct drm_i915_private *i915); int intel_gmch_vga_set_state(struct drm_i915_private *i915, bool enable_decode); +unsigned int intel_gmch_vga_set_decode(struct pci_dev *pdev, bool enable_decode); #endif /* __INTEL_GMCH_H__ */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h index dab761e54863..50cf9523d367 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h @@ -61,7 +61,7 @@ struct dpu_hw_intr { void (*cb)(void *arg, int irq_idx); void *arg; atomic_t count; - } irq_tbl[]; + } irq_tbl[] __counted_by(total_irqs); }; /** diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 52f1569ee37c..a0ac8c258d9f 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -1560,15 +1560,13 @@ nv50_sor_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *st { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct nv50_head *head = nv50_head(nv_encoder->crtc); - struct nouveau_connector *nv_connector = nv50_outp_get_old_connector(state, nv_encoder); #ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT + struct nouveau_connector *nv_connector = nv50_outp_get_old_connector(state, nv_encoder); struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); struct nouveau_backlight *backlight = nv_connector->backlight; -#endif struct drm_dp_aux *aux = &nv_connector->aux; int ret; -#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT if (backlight && backlight->uses_dpcd) { ret = drm_edp_backlight_disable(aux, &backlight->edp_info); if (ret < 0) diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h index 6ae25d3e7f45..c011227f7052 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h @@ -82,7 +82,7 @@ struct nvkm_perfdom { u8 mode; u32 clk; u16 signal_nr; - struct nvkm_perfsig signal[]; + struct nvkm_perfsig signal[] __counted_by(signal_nr); }; struct nvkm_funcdom { diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig index 2d6d96ee3547..ecb22ea326cb 100644 --- a/drivers/gpu/drm/panel/Kconfig +++ b/drivers/gpu/drm/panel/Kconfig @@ -516,6 +516,15 @@ config DRM_PANEL_RAYDIUM_RM68200 Say Y here if you want to enable support for Raydium RM68200 720x1280 DSI video mode panel. +config DRM_PANEL_RAYDIUM_RM692E5 + tristate "Raydium RM692E5-based DSI panel" + depends on OF + depends on DRM_MIPI_DSI + depends on BACKLIGHT_CLASS_DEVICE + help + Say Y here if you want to enable support for Raydium RM692E5-based + display panels, such as the one found in the Fairphone 5 smartphone. + config DRM_PANEL_RONBO_RB070D30 tristate "Ronbo Electronics RB070D30 panel" depends on OF diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile index 157c77ff157f..e14ce55a0875 100644 --- a/drivers/gpu/drm/panel/Makefile +++ b/drivers/gpu/drm/panel/Makefile @@ -49,6 +49,7 @@ obj-$(CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00) += panel-panasonic-vvx10f034n00.o obj-$(CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN) += panel-raspberrypi-touchscreen.o obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM67191) += panel-raydium-rm67191.o obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM68200) += panel-raydium-rm68200.o +obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM692E5) += panel-raydium-rm692e5.o obj-$(CONFIG_DRM_PANEL_RONBO_RB070D30) += panel-ronbo-rb070d30.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_ATNA33XC20) += panel-samsung-atna33xc20.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_DB7430) += panel-samsung-db7430.o diff --git a/drivers/gpu/drm/panel/panel-arm-versatile.c b/drivers/gpu/drm/panel/panel-arm-versatile.c index abb0788843c6..503ecea72c5e 100644 --- a/drivers/gpu/drm/panel/panel-arm-versatile.c +++ b/drivers/gpu/drm/panel/panel-arm-versatile.c @@ -267,6 +267,8 @@ static int versatile_panel_get_modes(struct drm_panel *panel, connector->display_info.bus_flags = vpanel->panel_type->bus_flags; mode = drm_mode_duplicate(connector->dev, &vpanel->panel_type->mode); + if (!mode) + return -ENOMEM; drm_mode_set_name(mode); mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c index 61c872f0f7ca..4a6dcfd781e8 100644 --- a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c +++ b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c @@ -325,11 +325,6 @@ static struct regmap_bus ili9322_regmap_bus = { .val_format_endian_default = REGMAP_ENDIAN_BIG, }; -static bool ili9322_volatile_reg(struct device *dev, unsigned int reg) -{ - return false; -} - static bool ili9322_writeable_reg(struct device *dev, unsigned int reg) { /* Just register 0 is read-only */ @@ -342,8 +337,7 @@ static const struct regmap_config ili9322_regmap_config = { .reg_bits = 8, .val_bits = 8, .max_register = 0x44, - .cache_type = REGCACHE_RBTREE, - .volatile_reg = ili9322_volatile_reg, + .cache_type = REGCACHE_MAPLE, .writeable_reg = ili9322_writeable_reg, }; diff --git a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c index d41482d3a34f..6e3670508e3a 100644 --- a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c +++ b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c @@ -24,6 +24,7 @@ struct ltk050h3146w_cmd { struct ltk050h3146w; struct ltk050h3146w_desc { + const unsigned long mode_flags; const struct drm_display_mode *mode; int (*init)(struct ltk050h3146w *ctx); }; @@ -243,6 +244,91 @@ struct ltk050h3146w *panel_to_ltk050h3146w(struct drm_panel *panel) return container_of(panel, struct ltk050h3146w, panel); } +static int ltk050h3148w_init_sequence(struct ltk050h3146w *ctx) +{ + struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); + int ret; + + /* + * Init sequence was supplied by the panel vendor without much + * documentation. + */ + mipi_dsi_dcs_write_seq(dsi, 0xb9, 0xff, 0x83, 0x94); + mipi_dsi_dcs_write_seq(dsi, 0xb1, 0x50, 0x15, 0x75, 0x09, 0x32, 0x44, + 0x71, 0x31, 0x55, 0x2f); + mipi_dsi_dcs_write_seq(dsi, 0xba, 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0); + mipi_dsi_dcs_write_seq(dsi, 0xd2, 0x88); + mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x00, 0x80, 0x64, 0x10, 0x07); + mipi_dsi_dcs_write_seq(dsi, 0xb4, 0x05, 0x70, 0x05, 0x70, 0x01, 0x70, + 0x01, 0x0c, 0x86, 0x75, 0x00, 0x3f, 0x01, 0x74, + 0x01, 0x74, 0x01, 0x74, 0x01, 0x0c, 0x86); + mipi_dsi_dcs_write_seq(dsi, 0xd3, 0x00, 0x00, 0x07, 0x07, 0x40, 0x1e, + 0x08, 0x00, 0x32, 0x10, 0x08, 0x00, 0x08, 0x54, + 0x15, 0x10, 0x05, 0x04, 0x02, 0x12, 0x10, 0x05, + 0x07, 0x33, 0x34, 0x0c, 0x0c, 0x37, 0x10, 0x07, + 0x17, 0x11, 0x40); + mipi_dsi_dcs_write_seq(dsi, 0xd5, 0x19, 0x19, 0x18, 0x18, 0x1b, 0x1b, + 0x1a, 0x1a, 0x04, 0x05, 0x06, 0x07, 0x00, 0x01, + 0x02, 0x03, 0x20, 0x21, 0x18, 0x18, 0x22, 0x23, + 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, + 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, + 0x18, 0x18, 0x18, 0x18, 0x18, 0x18); + mipi_dsi_dcs_write_seq(dsi, 0xd6, 0x18, 0x18, 0x19, 0x19, 0x1b, 0x1b, + 0x1a, 0x1a, 0x03, 0x02, 0x01, 0x00, 0x07, 0x06, + 0x05, 0x04, 0x23, 0x22, 0x18, 0x18, 0x21, 0x20, + 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, + 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, + 0x18, 0x18, 0x18, 0x18, 0x18, 0x18); + mipi_dsi_dcs_write_seq(dsi, 0xe0, 0x00, 0x03, 0x09, 0x11, 0x11, 0x14, + 0x18, 0x16, 0x2e, 0x3d, 0x4d, 0x4d, 0x58, 0x6c, + 0x72, 0x78, 0x88, 0x8b, 0x86, 0xa4, 0xb2, 0x58, + 0x55, 0x59, 0x5b, 0x5d, 0x60, 0x64, 0x7f, 0x00, + 0x03, 0x09, 0x0f, 0x11, 0x14, 0x18, 0x16, 0x2e, + 0x3d, 0x4d, 0x4d, 0x58, 0x6d, 0x73, 0x78, 0x88, + 0x8b, 0x87, 0xa5, 0xb2, 0x58, 0x55, 0x58, 0x5b, + 0x5d, 0x61, 0x65, 0x7f); + mipi_dsi_dcs_write_seq(dsi, 0xcc, 0x0b); + mipi_dsi_dcs_write_seq(dsi, 0xc0, 0x1f, 0x31); + mipi_dsi_dcs_write_seq(dsi, 0xb6, 0xc4, 0xc4); + mipi_dsi_dcs_write_seq(dsi, 0xbd, 0x01); + mipi_dsi_dcs_write_seq(dsi, 0xb1, 0x00); + mipi_dsi_dcs_write_seq(dsi, 0xbd, 0x00); + mipi_dsi_dcs_write_seq(dsi, 0xc6, 0xef); + mipi_dsi_dcs_write_seq(dsi, 0xd4, 0x02); + mipi_dsi_dcs_write_seq(dsi, 0x11); + mipi_dsi_dcs_write_seq(dsi, 0x29); + + ret = mipi_dsi_dcs_set_tear_on(dsi, 1); + if (ret < 0) { + dev_err(ctx->dev, "failed to set tear on: %d\n", ret); + return ret; + } + + msleep(60); + + return 0; +} + +static const struct drm_display_mode ltk050h3148w_mode = { + .hdisplay = 720, + .hsync_start = 720 + 12, + .hsync_end = 720 + 12 + 6, + .htotal = 720 + 12 + 6 + 24, + .vdisplay = 1280, + .vsync_start = 1280 + 9, + .vsync_end = 1280 + 9 + 2, + .vtotal = 1280 + 9 + 2 + 16, + .clock = 59756, + .width_mm = 62, + .height_mm = 110, +}; + +static const struct ltk050h3146w_desc ltk050h3148w_data = { + .mode = <k050h3148w_mode, + .init = ltk050h3148w_init_sequence, + .mode_flags = MIPI_DSI_MODE_VIDEO_SYNC_PULSE, +}; + static int ltk050h3146w_init_sequence(struct ltk050h3146w *ctx) { struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); @@ -330,6 +416,8 @@ static const struct drm_display_mode ltk050h3146w_mode = { static const struct ltk050h3146w_desc ltk050h3146w_data = { .mode = <k050h3146w_mode, .init = ltk050h3146w_init_sequence, + .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | + MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET, }; static int ltk050h3146w_a2_select_page(struct ltk050h3146w *ctx, int page) @@ -424,6 +512,8 @@ static const struct drm_display_mode ltk050h3146w_a2_mode = { static const struct ltk050h3146w_desc ltk050h3146w_a2_data = { .mode = <k050h3146w_a2_mode, .init = ltk050h3146w_a2_init_sequence, + .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | + MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET, }; static int ltk050h3146w_unprepare(struct drm_panel *panel) @@ -583,8 +673,7 @@ static int ltk050h3146w_probe(struct mipi_dsi_device *dsi) dsi->lanes = 4; dsi->format = MIPI_DSI_FMT_RGB888; - dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | - MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET; + dsi->mode_flags = ctx->panel_desc->mode_flags; drm_panel_init(&ctx->panel, &dsi->dev, <k050h3146w_funcs, DRM_MODE_CONNECTOR_DSI); @@ -642,6 +731,10 @@ static const struct of_device_id ltk050h3146w_of_match[] = { .compatible = "leadtek,ltk050h3146w-a2", .data = <k050h3146w_a2_data, }, + { + .compatible = "leadtek,ltk050h3148w", + .data = <k050h3148w_data, + }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, ltk050h3146w_of_match); diff --git a/drivers/gpu/drm/panel/panel-newvision-nv3051d.c b/drivers/gpu/drm/panel/panel-newvision-nv3051d.c index ad98dd9322b4..79de6c886292 100644 --- a/drivers/gpu/drm/panel/panel-newvision-nv3051d.c +++ b/drivers/gpu/drm/panel/panel-newvision-nv3051d.c @@ -388,6 +388,13 @@ static int panel_nv3051d_probe(struct mipi_dsi_device *dsi) dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET; + /* + * The panel in the RG351V is identical to the 353P, except it + * requires MIPI_DSI_CLOCK_NON_CONTINUOUS to operate correctly. + */ + if (of_device_is_compatible(dev->of_node, "anbernic,rg351v-panel")) + dsi->mode_flags |= MIPI_DSI_CLOCK_NON_CONTINUOUS; + drm_panel_init(&ctx->panel, &dsi->dev, &panel_nv3051d_funcs, DRM_MODE_CONNECTOR_DSI); diff --git a/drivers/gpu/drm/panel/panel-raydium-rm692e5.c b/drivers/gpu/drm/panel/panel-raydium-rm692e5.c new file mode 100644 index 000000000000..a613ba5b816c --- /dev/null +++ b/drivers/gpu/drm/panel/panel-raydium-rm692e5.c @@ -0,0 +1,423 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Generated with linux-mdss-dsi-panel-driver-generator from vendor device tree. + * Copyright (c) 2023 Linaro Limited + */ + +#include <linux/backlight.h> +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/regulator/consumer.h> + +#include <drm/display/drm_dsc.h> +#include <drm/display/drm_dsc_helper.h> +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_modes.h> +#include <drm/drm_panel.h> + +struct rm692e5_panel { + struct drm_panel panel; + struct mipi_dsi_device *dsi; + struct drm_dsc_config dsc; + struct regulator_bulk_data supplies[3]; + struct gpio_desc *reset_gpio; + bool prepared; +}; + +static inline struct rm692e5_panel *to_rm692e5_panel(struct drm_panel *panel) +{ + return container_of(panel, struct rm692e5_panel, panel); +} + +static void rm692e5_reset(struct rm692e5_panel *ctx) +{ + gpiod_set_value_cansleep(ctx->reset_gpio, 0); + usleep_range(10000, 11000); + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + usleep_range(5000, 6000); + gpiod_set_value_cansleep(ctx->reset_gpio, 0); + usleep_range(10000, 11000); +} + +static int rm692e5_on(struct rm692e5_panel *ctx) +{ + struct mipi_dsi_device *dsi = ctx->dsi; + struct device *dev = &dsi->dev; + int ret; + + dsi->mode_flags |= MIPI_DSI_MODE_LPM; + + mipi_dsi_generic_write_seq(dsi, 0xfe, 0x41); + mipi_dsi_generic_write_seq(dsi, 0xd6, 0x00); + mipi_dsi_generic_write_seq(dsi, 0xfe, 0x16); + mipi_dsi_generic_write_seq(dsi, 0x8a, 0x87); + mipi_dsi_generic_write_seq(dsi, 0xfe, 0x71); + mipi_dsi_generic_write_seq(dsi, 0x82, 0x01); + mipi_dsi_generic_write_seq(dsi, 0xc6, 0x00); + mipi_dsi_generic_write_seq(dsi, 0xc7, 0x2c); + mipi_dsi_generic_write_seq(dsi, 0xc8, 0x64); + mipi_dsi_generic_write_seq(dsi, 0xc9, 0x3c); + mipi_dsi_generic_write_seq(dsi, 0xca, 0x80); + mipi_dsi_generic_write_seq(dsi, 0xcb, 0x02); + mipi_dsi_generic_write_seq(dsi, 0xcc, 0x02); + mipi_dsi_generic_write_seq(dsi, 0xfe, 0x38); + mipi_dsi_generic_write_seq(dsi, 0x18, 0x13); + mipi_dsi_generic_write_seq(dsi, 0xfe, 0xf4); + mipi_dsi_generic_write_seq(dsi, 0x00, 0xff); + mipi_dsi_generic_write_seq(dsi, 0x01, 0xff); + mipi_dsi_generic_write_seq(dsi, 0x02, 0xcf); + mipi_dsi_generic_write_seq(dsi, 0x03, 0xbc); + mipi_dsi_generic_write_seq(dsi, 0x04, 0xb9); + mipi_dsi_generic_write_seq(dsi, 0x05, 0x99); + mipi_dsi_generic_write_seq(dsi, 0x06, 0x02); + mipi_dsi_generic_write_seq(dsi, 0x07, 0x0a); + mipi_dsi_generic_write_seq(dsi, 0x08, 0xe0); + mipi_dsi_generic_write_seq(dsi, 0x09, 0x4c); + mipi_dsi_generic_write_seq(dsi, 0x0a, 0xeb); + mipi_dsi_generic_write_seq(dsi, 0x0b, 0xe8); + mipi_dsi_generic_write_seq(dsi, 0x0c, 0x32); + mipi_dsi_generic_write_seq(dsi, 0x0d, 0x07); + mipi_dsi_generic_write_seq(dsi, 0xfe, 0xf4); + mipi_dsi_generic_write_seq(dsi, 0x0d, 0xc0); + mipi_dsi_generic_write_seq(dsi, 0x0e, 0xff); + mipi_dsi_generic_write_seq(dsi, 0x0f, 0xff); + mipi_dsi_generic_write_seq(dsi, 0x10, 0x33); + mipi_dsi_generic_write_seq(dsi, 0x11, 0x6f); + mipi_dsi_generic_write_seq(dsi, 0x12, 0x6e); + mipi_dsi_generic_write_seq(dsi, 0x13, 0xa6); + mipi_dsi_generic_write_seq(dsi, 0x14, 0x80); + mipi_dsi_generic_write_seq(dsi, 0x15, 0x02); + mipi_dsi_generic_write_seq(dsi, 0x16, 0x38); + mipi_dsi_generic_write_seq(dsi, 0x17, 0xd3); + mipi_dsi_generic_write_seq(dsi, 0x18, 0x3a); + mipi_dsi_generic_write_seq(dsi, 0x19, 0xba); + mipi_dsi_generic_write_seq(dsi, 0x1a, 0xcc); + mipi_dsi_generic_write_seq(dsi, 0x1b, 0x01); + + ret = mipi_dsi_dcs_nop(dsi); + if (ret < 0) { + dev_err(dev, "Failed to nop: %d\n", ret); + return ret; + } + msleep(32); + + mipi_dsi_generic_write_seq(dsi, 0xfe, 0x38); + mipi_dsi_generic_write_seq(dsi, 0x18, 0x13); + mipi_dsi_generic_write_seq(dsi, 0xfe, 0xd1); + mipi_dsi_generic_write_seq(dsi, 0xd3, 0x00); + mipi_dsi_generic_write_seq(dsi, 0xd0, 0x00); + mipi_dsi_generic_write_seq(dsi, 0xd2, 0x00); + mipi_dsi_generic_write_seq(dsi, 0xd4, 0x00); + mipi_dsi_generic_write_seq(dsi, 0xb4, 0x01); + mipi_dsi_generic_write_seq(dsi, 0xfe, 0xf9); + mipi_dsi_generic_write_seq(dsi, 0x00, 0xaf); + mipi_dsi_generic_write_seq(dsi, 0x1d, 0x37); + mipi_dsi_generic_write_seq(dsi, 0x44, 0x0a, 0x7b); + mipi_dsi_generic_write_seq(dsi, 0xfe, 0x00); + mipi_dsi_generic_write_seq(dsi, 0xfa, 0x01); + mipi_dsi_generic_write_seq(dsi, 0xc2, 0x08); + mipi_dsi_generic_write_seq(dsi, 0x35, 0x00); + mipi_dsi_generic_write_seq(dsi, 0x51, 0x05, 0x42); + + ret = mipi_dsi_dcs_exit_sleep_mode(dsi); + if (ret < 0) { + dev_err(dev, "Failed to exit sleep mode: %d\n", ret); + return ret; + } + msleep(100); + + ret = mipi_dsi_dcs_set_display_on(dsi); + if (ret < 0) { + dev_err(dev, "Failed to set display on: %d\n", ret); + return ret; + } + + return 0; +} + +static int rm692e5_disable(struct drm_panel *panel) +{ + struct rm692e5_panel *ctx = to_rm692e5_panel(panel); + struct mipi_dsi_device *dsi = ctx->dsi; + struct device *dev = &dsi->dev; + int ret; + + dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; + + mipi_dsi_generic_write_seq(dsi, 0xfe, 0x00); + + ret = mipi_dsi_dcs_set_display_off(dsi); + if (ret < 0) { + dev_err(dev, "Failed to set display off: %d\n", ret); + return ret; + } + + ret = mipi_dsi_dcs_enter_sleep_mode(dsi); + if (ret < 0) { + dev_err(dev, "Failed to enter sleep mode: %d\n", ret); + return ret; + } + msleep(100); + + return 0; +} + +static int rm692e5_prepare(struct drm_panel *panel) +{ + struct rm692e5_panel *ctx = to_rm692e5_panel(panel); + struct drm_dsc_picture_parameter_set pps; + struct device *dev = &ctx->dsi->dev; + int ret; + + if (ctx->prepared) + return 0; + + ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies); + if (ret < 0) { + dev_err(dev, "Failed to enable regulators: %d\n", ret); + return ret; + } + + rm692e5_reset(ctx); + + ret = rm692e5_on(ctx); + if (ret < 0) { + dev_err(dev, "Failed to initialize panel: %d\n", ret); + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies); + return ret; + } + + drm_dsc_pps_payload_pack(&pps, &ctx->dsc); + + ret = mipi_dsi_picture_parameter_set(ctx->dsi, &pps); + if (ret < 0) { + dev_err(panel->dev, "failed to transmit PPS: %d\n", ret); + return ret; + } + + ret = mipi_dsi_compression_mode(ctx->dsi, true); + if (ret < 0) { + dev_err(dev, "failed to enable compression mode: %d\n", ret); + return ret; + } + + msleep(28); + + mipi_dsi_generic_write_seq(ctx->dsi, 0xfe, 0x40); + + /* 0x05 -> 90Hz, 0x00 -> 60Hz */ + mipi_dsi_generic_write_seq(ctx->dsi, 0xbd, 0x05); + + mipi_dsi_generic_write_seq(ctx->dsi, 0xfe, 0x00); + + ctx->prepared = true; + + return 0; +} + +static int rm692e5_unprepare(struct drm_panel *panel) +{ + struct rm692e5_panel *ctx = to_rm692e5_panel(panel); + + if (!ctx->prepared) + return 0; + + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies); + + ctx->prepared = false; + return 0; +} + +static const struct drm_display_mode rm692e5_mode = { + .clock = (1224 + 32 + 8 + 8) * (2700 + 8 + 2 + 8) * 90 / 1000, + .hdisplay = 1224, + .hsync_start = 1224 + 32, + .hsync_end = 1224 + 32 + 8, + .htotal = 1224 + 32 + 8 + 8, + .vdisplay = 2700, + .vsync_start = 2700 + 8, + .vsync_end = 2700 + 8 + 2, + .vtotal = 2700 + 8 + 2 + 8, + .width_mm = 68, + .height_mm = 150, +}; + +static int rm692e5_get_modes(struct drm_panel *panel, + struct drm_connector *connector) +{ + struct drm_display_mode *mode; + + mode = drm_mode_duplicate(connector->dev, &rm692e5_mode); + if (!mode) + return -ENOMEM; + + drm_mode_set_name(mode); + + mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; + connector->display_info.width_mm = mode->width_mm; + connector->display_info.height_mm = mode->height_mm; + drm_mode_probed_add(connector, mode); + + return 1; +} + +static const struct drm_panel_funcs rm692e5_panel_funcs = { + .prepare = rm692e5_prepare, + .unprepare = rm692e5_unprepare, + .disable = rm692e5_disable, + .get_modes = rm692e5_get_modes, +}; + +static int rm692e5_bl_update_status(struct backlight_device *bl) +{ + struct mipi_dsi_device *dsi = bl_get_data(bl); + u16 brightness = backlight_get_brightness(bl); + int ret; + + dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; + + ret = mipi_dsi_dcs_set_display_brightness_large(dsi, brightness); + if (ret < 0) + return ret; + + dsi->mode_flags |= MIPI_DSI_MODE_LPM; + + return 0; +} + +static int rm692e5_bl_get_brightness(struct backlight_device *bl) +{ + struct mipi_dsi_device *dsi = bl_get_data(bl); + u16 brightness; + int ret; + + dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; + + ret = mipi_dsi_dcs_get_display_brightness_large(dsi, &brightness); + if (ret < 0) + return ret; + + dsi->mode_flags |= MIPI_DSI_MODE_LPM; + + return brightness; +} + +static const struct backlight_ops rm692e5_bl_ops = { + .update_status = rm692e5_bl_update_status, + .get_brightness = rm692e5_bl_get_brightness, +}; + +static struct backlight_device * +rm692e5_create_backlight(struct mipi_dsi_device *dsi) +{ + struct device *dev = &dsi->dev; + const struct backlight_properties props = { + .type = BACKLIGHT_RAW, + .brightness = 4095, + .max_brightness = 4095, + }; + + return devm_backlight_device_register(dev, dev_name(dev), dev, dsi, + &rm692e5_bl_ops, &props); +} + +static int rm692e5_probe(struct mipi_dsi_device *dsi) +{ + struct device *dev = &dsi->dev; + struct rm692e5_panel *ctx; + int ret; + + ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->supplies[0].supply = "vddio"; + ctx->supplies[1].supply = "dvdd"; + ctx->supplies[2].supply = "vci"; + ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies), + ctx->supplies); + if (ret < 0) + return dev_err_probe(dev, ret, "Failed to get regulators\n"); + + ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(ctx->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio), + "Failed to get reset-gpios\n"); + + ctx->dsi = dsi; + mipi_dsi_set_drvdata(dsi, ctx); + + dsi->lanes = 4; + dsi->format = MIPI_DSI_FMT_RGB888; + dsi->mode_flags = MIPI_DSI_MODE_NO_EOT_PACKET | + MIPI_DSI_CLOCK_NON_CONTINUOUS; + + drm_panel_init(&ctx->panel, dev, &rm692e5_panel_funcs, + DRM_MODE_CONNECTOR_DSI); + ctx->panel.prepare_prev_first = true; + + ctx->panel.backlight = rm692e5_create_backlight(dsi); + if (IS_ERR(ctx->panel.backlight)) + return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight), + "Failed to create backlight\n"); + + drm_panel_add(&ctx->panel); + + /* This panel only supports DSC; unconditionally enable it */ + dsi->dsc = &ctx->dsc; + + /* TODO: Pass slice_per_pkt = 2 */ + ctx->dsc.dsc_version_major = 1; + ctx->dsc.dsc_version_minor = 1; + ctx->dsc.slice_height = 60; + ctx->dsc.slice_width = 1224; + + ctx->dsc.slice_count = 1224 / ctx->dsc.slice_width; + ctx->dsc.bits_per_component = 8; + ctx->dsc.bits_per_pixel = 8 << 4; /* 4 fractional bits */ + ctx->dsc.block_pred_enable = true; + + ret = mipi_dsi_attach(dsi); + if (ret < 0) { + dev_err(dev, "Failed to attach to DSI host: %d\n", ret); + drm_panel_remove(&ctx->panel); + return ret; + } + + return 0; +} + +static void rm692e5_remove(struct mipi_dsi_device *dsi) +{ + struct rm692e5_panel *ctx = mipi_dsi_get_drvdata(dsi); + int ret; + + ret = mipi_dsi_detach(dsi); + if (ret < 0) + dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret); + + drm_panel_remove(&ctx->panel); +} + +static const struct of_device_id rm692e5_of_match[] = { + { .compatible = "fairphone,fp5-rm692e5-boe" }, + { } +}; +MODULE_DEVICE_TABLE(of, rm692e5_of_match); + +static struct mipi_dsi_driver rm692e5_driver = { + .probe = rm692e5_probe, + .remove = rm692e5_remove, + .driver = { + .name = "panel-rm692e5-boe-amoled", + .of_match_table = rm692e5_of_match, + }, +}; +module_mipi_dsi_driver(rm692e5_driver); + +MODULE_DESCRIPTION("DRM driver for rm692e5-equipped DSI panels"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 857bc01591db..4195cf54934b 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -40,6 +40,7 @@ #include <drm/drm_edid.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_panel.h> +#include <drm/drm_of.h> /** * struct panel_desc - Describes a simple panel. @@ -549,6 +550,51 @@ static void panel_simple_parse_panel_timing_node(struct device *dev, dev_err(dev, "Reject override mode: No display_timing found\n"); } +static int panel_simple_override_nondefault_lvds_datamapping(struct device *dev, + struct panel_simple *panel) +{ + int ret, bpc; + + ret = drm_of_lvds_get_data_mapping(dev->of_node); + if (ret < 0) { + if (ret == -EINVAL) + dev_warn(dev, "Ignore invalid data-mapping property\n"); + + /* + * Ignore non-existing or malformatted property, fallback to + * default data-mapping, and return 0. + */ + return 0; + } + + switch (ret) { + default: + WARN_ON(1); + fallthrough; + case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG: + fallthrough; + case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA: + bpc = 8; + break; + case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG: + bpc = 6; + } + + if (panel->desc->bpc != bpc || panel->desc->bus_format != ret) { + struct panel_desc *override_desc; + + override_desc = devm_kmemdup(dev, panel->desc, sizeof(*panel->desc), GFP_KERNEL); + if (!override_desc) + return -ENOMEM; + + override_desc->bus_format = ret; + override_desc->bpc = bpc; + panel->desc = override_desc; + } + + return 0; +} + static int panel_simple_probe(struct device *dev, const struct panel_desc *desc) { struct panel_simple *panel; @@ -601,6 +647,13 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc) panel_simple_parse_panel_timing_node(dev, panel, &dt); } + if (desc->connector_type == DRM_MODE_CONNECTOR_LVDS) { + /* Optional data-mapping property for overriding bus format */ + err = panel_simple_override_nondefault_lvds_datamapping(dev, panel); + if (err) + goto free_ddc; + } + connector_type = desc->connector_type; /* Catch common mistakes for panels. */ switch (connector_type) { diff --git a/drivers/gpu/drm/panel/panel-tpo-tpg110.c b/drivers/gpu/drm/panel/panel-tpo-tpg110.c index 845304435e23..f6a212e542cb 100644 --- a/drivers/gpu/drm/panel/panel-tpo-tpg110.c +++ b/drivers/gpu/drm/panel/panel-tpo-tpg110.c @@ -379,6 +379,8 @@ static int tpg110_get_modes(struct drm_panel *panel, connector->display_info.bus_flags = tpg->panel_mode->bus_flags; mode = drm_mode_duplicate(connector->dev, &tpg->panel_mode->mode); + if (!mode) + return -ENOMEM; drm_mode_set_name(mode); mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; diff --git a/drivers/gpu/drm/panfrost/Makefile b/drivers/gpu/drm/panfrost/Makefile index 7da2b3f02ed9..2c01c1e7523e 100644 --- a/drivers/gpu/drm/panfrost/Makefile +++ b/drivers/gpu/drm/panfrost/Makefile @@ -12,4 +12,6 @@ panfrost-y := \ panfrost_perfcnt.o \ panfrost_dump.o +panfrost-$(CONFIG_DEBUG_FS) += panfrost_debugfs.o + obj-$(CONFIG_DRM_PANFROST) += panfrost.o diff --git a/drivers/gpu/drm/panfrost/panfrost_debugfs.c b/drivers/gpu/drm/panfrost/panfrost_debugfs.c new file mode 100644 index 000000000000..72d4286a6bf7 --- /dev/null +++ b/drivers/gpu/drm/panfrost/panfrost_debugfs.c @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright 2023 Collabora ltd. */ +/* Copyright 2023 Amazon.com, Inc. or its affiliates. */ + +#include <linux/debugfs.h> +#include <linux/platform_device.h> +#include <drm/drm_debugfs.h> +#include <drm/drm_file.h> +#include <drm/panfrost_drm.h> + +#include "panfrost_device.h" +#include "panfrost_gpu.h" +#include "panfrost_debugfs.h" + +void panfrost_debugfs_init(struct drm_minor *minor) +{ + struct drm_device *dev = minor->dev; + struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev->dev)); + + debugfs_create_atomic_t("profile", 0600, minor->debugfs_root, &pfdev->profile_mode); +} diff --git a/drivers/gpu/drm/panfrost/panfrost_debugfs.h b/drivers/gpu/drm/panfrost/panfrost_debugfs.h new file mode 100644 index 000000000000..c5af5f35877f --- /dev/null +++ b/drivers/gpu/drm/panfrost/panfrost_debugfs.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2023 Collabora ltd. + * Copyright 2023 Amazon.com, Inc. or its affiliates. + */ + +#ifndef PANFROST_DEBUGFS_H +#define PANFROST_DEBUGFS_H + +#ifdef CONFIG_DEBUG_FS +void panfrost_debugfs_init(struct drm_minor *minor); +#endif + +#endif /* PANFROST_DEBUGFS_H */ diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c index e78de99e9933..f59c82ea8870 100644 --- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c +++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c @@ -58,6 +58,7 @@ static int panfrost_devfreq_get_dev_status(struct device *dev, spin_lock_irqsave(&pfdevfreq->lock, irqflags); panfrost_devfreq_update_utilization(pfdevfreq); + pfdevfreq->current_frequency = status->current_frequency; status->total_time = ktime_to_ns(ktime_add(pfdevfreq->busy_time, pfdevfreq->idle_time)); @@ -117,6 +118,7 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev) struct devfreq *devfreq; struct thermal_cooling_device *cooling; struct panfrost_devfreq *pfdevfreq = &pfdev->pfdevfreq; + unsigned long freq = ULONG_MAX; if (pfdev->comp->num_supplies > 1) { /* @@ -172,6 +174,12 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev) return ret; } + /* Find the fastest defined rate */ + opp = dev_pm_opp_find_freq_floor(dev, &freq); + if (IS_ERR(opp)) + return PTR_ERR(opp); + pfdevfreq->fast_rate = freq; + dev_pm_opp_put(opp); /* diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.h b/drivers/gpu/drm/panfrost/panfrost_devfreq.h index 1514c1f9d91c..48dbe185f206 100644 --- a/drivers/gpu/drm/panfrost/panfrost_devfreq.h +++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.h @@ -19,6 +19,9 @@ struct panfrost_devfreq { struct devfreq_simple_ondemand_data gov_data; bool opp_of_table_added; + unsigned long current_frequency; + unsigned long fast_rate; + ktime_t busy_time; ktime_t idle_time; ktime_t time_last_update; diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c index fa1a086a862b..28f7046e1b1a 100644 --- a/drivers/gpu/drm/panfrost/panfrost_device.c +++ b/drivers/gpu/drm/panfrost/panfrost_device.c @@ -207,6 +207,8 @@ int panfrost_device_init(struct panfrost_device *pfdev) spin_lock_init(&pfdev->as_lock); + spin_lock_init(&pfdev->cycle_counter.lock); + err = panfrost_clk_init(pfdev); if (err) { dev_err(pfdev->dev, "clk init failed %d\n", err); diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h index b0126b9fbadc..1e85656dc2f7 100644 --- a/drivers/gpu/drm/panfrost/panfrost_device.h +++ b/drivers/gpu/drm/panfrost/panfrost_device.h @@ -107,6 +107,7 @@ struct panfrost_device { struct list_head scheduled_jobs; struct panfrost_perfcnt *perfcnt; + atomic_t profile_mode; struct mutex sched_lock; @@ -121,6 +122,11 @@ struct panfrost_device { struct shrinker shrinker; struct panfrost_devfreq pfdevfreq; + + struct { + atomic_t use_count; + spinlock_t lock; + } cycle_counter; }; struct panfrost_mmu { @@ -135,12 +141,19 @@ struct panfrost_mmu { struct list_head list; }; +struct panfrost_engine_usage { + unsigned long long elapsed_ns[NUM_JOB_SLOTS]; + unsigned long long cycles[NUM_JOB_SLOTS]; +}; + struct panfrost_file_priv { struct panfrost_device *pfdev; struct drm_sched_entity sched_entity[NUM_JOB_SLOTS]; struct panfrost_mmu *mmu; + + struct panfrost_engine_usage engine_usage; }; static inline struct panfrost_device *to_panfrost_device(struct drm_device *ddev) diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index a2ab99698ca8..b834777b409b 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -20,6 +20,7 @@ #include "panfrost_job.h" #include "panfrost_gpu.h" #include "panfrost_perfcnt.h" +#include "panfrost_debugfs.h" static bool unstable_ioctls; module_param_unsafe(unstable_ioctls, bool, 0600); @@ -267,6 +268,7 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data, job->requirements = args->requirements; job->flush_id = panfrost_gpu_get_latest_flush_id(pfdev); job->mmu = file_priv->mmu; + job->engine_usage = &file_priv->engine_usage; slot = panfrost_job_get_slot(job); @@ -523,7 +525,58 @@ static const struct drm_ioctl_desc panfrost_drm_driver_ioctls[] = { PANFROST_IOCTL(MADVISE, madvise, DRM_RENDER_ALLOW), }; -DEFINE_DRM_GEM_FOPS(panfrost_drm_driver_fops); +static void panfrost_gpu_show_fdinfo(struct panfrost_device *pfdev, + struct panfrost_file_priv *panfrost_priv, + struct drm_printer *p) +{ + int i; + + /* + * IMPORTANT NOTE: drm-cycles and drm-engine measurements are not + * accurate, as they only provide a rough estimation of the number of + * GPU cycles and CPU time spent in a given context. This is due to two + * different factors: + * - Firstly, we must consider the time the CPU and then the kernel + * takes to process the GPU interrupt, which means additional time and + * GPU cycles will be added in excess to the real figure. + * - Secondly, the pipelining done by the Job Manager (2 job slots per + * engine) implies there is no way to know exactly how much time each + * job spent on the GPU. + */ + + static const char * const engine_names[] = { + "fragment", "vertex-tiler", "compute-only" + }; + + BUILD_BUG_ON(ARRAY_SIZE(engine_names) != NUM_JOB_SLOTS); + + for (i = 0; i < NUM_JOB_SLOTS - 1; i++) { + drm_printf(p, "drm-engine-%s:\t%llu ns\n", + engine_names[i], panfrost_priv->engine_usage.elapsed_ns[i]); + drm_printf(p, "drm-cycles-%s:\t%llu\n", + engine_names[i], panfrost_priv->engine_usage.cycles[i]); + drm_printf(p, "drm-maxfreq-%s:\t%lu Hz\n", + engine_names[i], pfdev->pfdevfreq.fast_rate); + drm_printf(p, "drm-curfreq-%s:\t%lu Hz\n", + engine_names[i], pfdev->pfdevfreq.current_frequency); + } +} + +static void panfrost_show_fdinfo(struct drm_printer *p, struct drm_file *file) +{ + struct drm_device *dev = file->minor->dev; + struct panfrost_device *pfdev = dev->dev_private; + + panfrost_gpu_show_fdinfo(pfdev, file->driver_priv, p); + + drm_show_memory_stats(p, file); +} + +static const struct file_operations panfrost_drm_driver_fops = { + .owner = THIS_MODULE, + DRM_GEM_FOPS, + .show_fdinfo = drm_show_fdinfo, +}; /* * Panfrost driver version: @@ -535,6 +588,7 @@ static const struct drm_driver panfrost_drm_driver = { .driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ, .open = panfrost_open, .postclose = panfrost_postclose, + .show_fdinfo = panfrost_show_fdinfo, .ioctls = panfrost_drm_driver_ioctls, .num_ioctls = ARRAY_SIZE(panfrost_drm_driver_ioctls), .fops = &panfrost_drm_driver_fops, @@ -546,6 +600,10 @@ static const struct drm_driver panfrost_drm_driver = { .gem_create_object = panfrost_gem_create_object, .gem_prime_import_sg_table = panfrost_gem_prime_import_sg_table, + +#ifdef CONFIG_DEBUG_FS + .debugfs_init = panfrost_debugfs_init, +#endif }; static int panfrost_probe(struct platform_device *pdev) diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c index 3c812fbd126f..0cf64456e29a 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem.c @@ -195,6 +195,34 @@ static int panfrost_gem_pin(struct drm_gem_object *obj) return drm_gem_shmem_pin(&bo->base); } +static enum drm_gem_object_status panfrost_gem_status(struct drm_gem_object *obj) +{ + struct panfrost_gem_object *bo = to_panfrost_bo(obj); + enum drm_gem_object_status res = 0; + + if (bo->base.pages) + res |= DRM_GEM_OBJECT_RESIDENT; + + if (bo->base.madv == PANFROST_MADV_DONTNEED) + res |= DRM_GEM_OBJECT_PURGEABLE; + + return res; +} + +static size_t panfrost_gem_rss(struct drm_gem_object *obj) +{ + struct panfrost_gem_object *bo = to_panfrost_bo(obj); + + if (bo->is_heap) { + return bo->heap_rss_size; + } else if (bo->base.pages) { + WARN_ON(bo->heap_rss_size); + return bo->base.base.size; + } + + return 0; +} + static const struct drm_gem_object_funcs panfrost_gem_funcs = { .free = panfrost_gem_free_object, .open = panfrost_gem_open, @@ -206,6 +234,8 @@ static const struct drm_gem_object_funcs panfrost_gem_funcs = { .vmap = drm_gem_shmem_object_vmap, .vunmap = drm_gem_shmem_object_vunmap, .mmap = drm_gem_shmem_object_mmap, + .status = panfrost_gem_status, + .rss = panfrost_gem_rss, .vm_ops = &drm_gem_shmem_vm_ops, }; diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h index ad2877eeeccd..13c0a8149c3a 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.h +++ b/drivers/gpu/drm/panfrost/panfrost_gem.h @@ -36,6 +36,11 @@ struct panfrost_gem_object { */ atomic_t gpu_usecount; + /* + * Object chunk size currently mapped onto physical memory + */ + size_t heap_rss_size; + bool noexec :1; bool is_heap :1; }; diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c index 2faa344d89ee..f0be7e19b13e 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gpu.c +++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c @@ -73,6 +73,13 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev) gpu_write(pfdev, GPU_INT_CLEAR, GPU_IRQ_MASK_ALL); gpu_write(pfdev, GPU_INT_MASK, GPU_IRQ_MASK_ALL); + /* + * All in-flight jobs should have released their cycle + * counter references upon reset, but let us make sure + */ + if (drm_WARN_ON(pfdev->ddev, atomic_read(&pfdev->cycle_counter.use_count) != 0)) + atomic_set(&pfdev->cycle_counter.use_count, 0); + return 0; } @@ -321,6 +328,40 @@ static void panfrost_gpu_init_features(struct panfrost_device *pfdev) pfdev->features.shader_present, pfdev->features.l2_present); } +void panfrost_cycle_counter_get(struct panfrost_device *pfdev) +{ + if (atomic_inc_not_zero(&pfdev->cycle_counter.use_count)) + return; + + spin_lock(&pfdev->cycle_counter.lock); + if (atomic_inc_return(&pfdev->cycle_counter.use_count) == 1) + gpu_write(pfdev, GPU_CMD, GPU_CMD_CYCLE_COUNT_START); + spin_unlock(&pfdev->cycle_counter.lock); +} + +void panfrost_cycle_counter_put(struct panfrost_device *pfdev) +{ + if (atomic_add_unless(&pfdev->cycle_counter.use_count, -1, 1)) + return; + + spin_lock(&pfdev->cycle_counter.lock); + if (atomic_dec_return(&pfdev->cycle_counter.use_count) == 0) + gpu_write(pfdev, GPU_CMD, GPU_CMD_CYCLE_COUNT_STOP); + spin_unlock(&pfdev->cycle_counter.lock); +} + +unsigned long long panfrost_cycle_counter_read(struct panfrost_device *pfdev) +{ + u32 hi, lo; + + do { + hi = gpu_read(pfdev, GPU_CYCLE_COUNT_HI); + lo = gpu_read(pfdev, GPU_CYCLE_COUNT_LO); + } while (hi != gpu_read(pfdev, GPU_CYCLE_COUNT_HI)); + + return ((u64)hi << 32) | lo; +} + void panfrost_gpu_power_on(struct panfrost_device *pfdev) { int ret; diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.h b/drivers/gpu/drm/panfrost/panfrost_gpu.h index 468c51e7e46d..876fdad9f721 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gpu.h +++ b/drivers/gpu/drm/panfrost/panfrost_gpu.h @@ -16,6 +16,10 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev); void panfrost_gpu_power_on(struct panfrost_device *pfdev); void panfrost_gpu_power_off(struct panfrost_device *pfdev); +void panfrost_cycle_counter_get(struct panfrost_device *pfdev); +void panfrost_cycle_counter_put(struct panfrost_device *pfdev); +unsigned long long panfrost_cycle_counter_read(struct panfrost_device *pfdev); + void panfrost_gpu_amlogic_quirk(struct panfrost_device *pfdev); #endif diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c index 033f5e684707..fb16de2d0420 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.c +++ b/drivers/gpu/drm/panfrost/panfrost_job.c @@ -159,6 +159,16 @@ panfrost_dequeue_job(struct panfrost_device *pfdev, int slot) struct panfrost_job *job = pfdev->jobs[slot][0]; WARN_ON(!job); + if (job->is_profiled) { + if (job->engine_usage) { + job->engine_usage->elapsed_ns[slot] += + ktime_to_ns(ktime_sub(ktime_get(), job->start_time)); + job->engine_usage->cycles[slot] += + panfrost_cycle_counter_read(pfdev) - job->start_cycles; + } + panfrost_cycle_counter_put(job->pfdev); + } + pfdev->jobs[slot][0] = pfdev->jobs[slot][1]; pfdev->jobs[slot][1] = NULL; @@ -233,6 +243,13 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js) subslot = panfrost_enqueue_job(pfdev, js, job); /* Don't queue the job if a reset is in progress */ if (!atomic_read(&pfdev->reset.pending)) { + if (atomic_read(&pfdev->profile_mode)) { + panfrost_cycle_counter_get(pfdev); + job->is_profiled = true; + job->start_time = ktime_get(); + job->start_cycles = panfrost_cycle_counter_read(pfdev); + } + job_write(pfdev, JS_COMMAND_NEXT(js), JS_COMMAND_START); dev_dbg(pfdev->dev, "JS: Submitting atom %p to js[%d][%d] with head=0x%llx AS %d", @@ -660,10 +677,14 @@ panfrost_reset(struct panfrost_device *pfdev, * stuck jobs. Let's make sure the PM counters stay balanced by * manually calling pm_runtime_put_noidle() and * panfrost_devfreq_record_idle() for each stuck job. + * Let's also make sure the cycle counting register's refcnt is + * kept balanced to prevent it from running forever */ spin_lock(&pfdev->js->job_lock); for (i = 0; i < NUM_JOB_SLOTS; i++) { for (j = 0; j < ARRAY_SIZE(pfdev->jobs[0]) && pfdev->jobs[i][j]; j++) { + if (pfdev->jobs[i][j]->is_profiled) + panfrost_cycle_counter_put(pfdev->jobs[i][j]->pfdev); pm_runtime_put_noidle(pfdev->dev); panfrost_devfreq_record_idle(&pfdev->pfdevfreq); } @@ -926,6 +947,9 @@ void panfrost_job_close(struct panfrost_file_priv *panfrost_priv) } job_write(pfdev, JS_COMMAND(i), cmd); + + /* Jobs can outlive their file context */ + job->engine_usage = NULL; } } spin_unlock(&pfdev->js->job_lock); diff --git a/drivers/gpu/drm/panfrost/panfrost_job.h b/drivers/gpu/drm/panfrost/panfrost_job.h index 8becc1ba0eb9..17ff808dba07 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.h +++ b/drivers/gpu/drm/panfrost/panfrost_job.h @@ -32,6 +32,11 @@ struct panfrost_job { /* Fence to be signaled by drm-sched once its done with the job */ struct dma_fence *render_done_fence; + + struct panfrost_engine_usage *engine_usage; + bool is_profiled; + ktime_t start_time; + u64 start_cycles; }; int panfrost_job_init(struct panfrost_device *pfdev); diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index d54d4e7b2195..846dd697c410 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -522,6 +522,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt); bomapping->active = true; + bo->heap_rss_size += SZ_2M; dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr); diff --git a/drivers/gpu/drm/panfrost/panfrost_regs.h b/drivers/gpu/drm/panfrost/panfrost_regs.h index 919f44ac853d..55ec807550b3 100644 --- a/drivers/gpu/drm/panfrost/panfrost_regs.h +++ b/drivers/gpu/drm/panfrost/panfrost_regs.h @@ -46,6 +46,8 @@ #define GPU_CMD_SOFT_RESET 0x01 #define GPU_CMD_PERFCNT_CLEAR 0x03 #define GPU_CMD_PERFCNT_SAMPLE 0x04 +#define GPU_CMD_CYCLE_COUNT_START 0x05 +#define GPU_CMD_CYCLE_COUNT_STOP 0x06 #define GPU_CMD_CLEAN_CACHES 0x07 #define GPU_CMD_CLEAN_INV_CACHES 0x08 #define GPU_STATUS 0x34 @@ -73,6 +75,9 @@ #define GPU_PRFCNT_TILER_EN 0x74 #define GPU_PRFCNT_MMU_L2_EN 0x7c +#define GPU_CYCLE_COUNT_LO 0x90 +#define GPU_CYCLE_COUNT_HI 0x94 + #define GPU_THREAD_MAX_THREADS 0x0A0 /* (RO) Maximum number of threads per core */ #define GPU_THREAD_MAX_WORKGROUP_SIZE 0x0A4 /* (RO) Maximum workgroup size */ #define GPU_THREAD_MAX_BARRIER_SIZE 0x0A8 /* (RO) Maximum threads waiting at a barrier */ diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c index a29fbafce393..21254e4e107a 100644 --- a/drivers/gpu/drm/rockchip/cdn-dp-core.c +++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c @@ -1177,6 +1177,7 @@ static int cdn_dp_probe(struct platform_device *pdev) struct cdn_dp_device *dp; struct extcon_dev *extcon; struct phy *phy; + int ret; int i; dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL); @@ -1217,9 +1218,19 @@ static int cdn_dp_probe(struct platform_device *pdev) mutex_init(&dp->lock); dev_set_drvdata(dev, dp); - cdn_dp_audio_codec_init(dp, dev); + ret = cdn_dp_audio_codec_init(dp, dev); + if (ret) + return ret; + + ret = component_add(dev, &cdn_dp_component_ops); + if (ret) + goto err_audio_deinit; - return component_add(dev, &cdn_dp_component_ops); + return 0; + +err_audio_deinit: + platform_device_unregister(dp->audio_pdev); + return ret; } static void cdn_dp_remove(struct platform_device *pdev) @@ -1250,7 +1261,7 @@ struct platform_driver cdn_dp_driver = { .driver = { .name = "cdn-dp", .owner = THIS_MODULE, - .of_match_table = of_match_ptr(cdn_dp_dt_ids), + .of_match_table = cdn_dp_dt_ids, .pm = &cdn_dp_pm_ops, }, }; diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c index 8bafb2a2747f..6396f9324dab 100644 --- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c @@ -1358,8 +1358,7 @@ static int dw_mipi_dsi_rockchip_probe(struct platform_device *pdev) if (!dsi) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - dsi->base = devm_ioremap_resource(dev, res); + dsi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(dsi->base)) { DRM_DEV_ERROR(dev, "Unable to get dsi registers\n"); return PTR_ERR(dsi->base); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c index c306806aa3de..ec7e0e6149af 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c @@ -469,8 +469,8 @@ static bool rockchip_vop2_mod_supported(struct drm_plane *plane, u32 format, return true; if (!rockchip_afbc(plane, modifier)) { - drm_err(vop2->drm, "Unsupported format modifier 0x%llx\n", - modifier); + drm_dbg_kms(vop2->drm, "Unsupported format modifier 0x%llx\n", + modifier); return false; } @@ -2640,7 +2640,7 @@ static const struct regmap_config vop2_regmap_config = { .max_register = 0x3000, .name = "vop2", .volatile_table = &vop2_volatile_table, - .cache_type = REGCACHE_RBTREE, + .cache_type = REGCACHE_MAPLE, }; static int vop2_bind(struct device *dev, struct device *master, void *data) diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c index 582859387792..f0f47e9abf5a 100644 --- a/drivers/gpu/drm/rockchip/rockchip_lvds.c +++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c @@ -752,6 +752,6 @@ struct platform_driver rockchip_lvds_driver = { .remove_new = rockchip_lvds_remove, .driver = { .name = "rockchip-lvds", - .of_match_table = of_match_ptr(rockchip_lvds_dt_ids), + .of_match_table = rockchip_lvds_dt_ids, }, }; diff --git a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c index 62b573f282a7..fcd4cf3072cd 100644 --- a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c +++ b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c @@ -274,6 +274,6 @@ struct platform_driver vop2_platform_driver = { .remove_new = vop2_remove, .driver = { .name = "rockchip-vop2", - .of_match_table = of_match_ptr(vop2_dt_match), + .of_match_table = vop2_dt_match, }, }; diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h index f9d18e8cf6ab..ccb5d74fa227 100644 --- a/drivers/gpu/drm/tegra/drm.h +++ b/drivers/gpu/drm/tegra/drm.h @@ -120,9 +120,6 @@ int tegra_drm_unregister_client(struct tegra_drm *tegra, int host1x_client_iommu_attach(struct host1x_client *client); void host1x_client_iommu_detach(struct host1x_client *client); -int tegra_drm_init(struct tegra_drm *tegra, struct drm_device *drm); -int tegra_drm_exit(struct tegra_drm *tegra); - void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size, dma_addr_t *iova); void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt, dma_addr_t iova); diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c index a4023163493d..b4eb030ea961 100644 --- a/drivers/gpu/drm/tegra/gem.c +++ b/drivers/gpu/drm/tegra/gem.c @@ -177,18 +177,27 @@ static void tegra_bo_unpin(struct host1x_bo_mapping *map) static void *tegra_bo_mmap(struct host1x_bo *bo) { struct tegra_bo *obj = host1x_to_tegra_bo(bo); - struct iosys_map map; + struct iosys_map map = { 0 }; + void *vaddr; int ret; - if (obj->vaddr) { + if (obj->vaddr) return obj->vaddr; - } else if (obj->gem.import_attach) { + + if (obj->gem.import_attach) { ret = dma_buf_vmap_unlocked(obj->gem.import_attach->dmabuf, &map); - return ret ? NULL : map.vaddr; - } else { - return vmap(obj->pages, obj->num_pages, VM_MAP, - pgprot_writecombine(PAGE_KERNEL)); + if (ret < 0) + return ERR_PTR(ret); + + return map.vaddr; } + + vaddr = vmap(obj->pages, obj->num_pages, VM_MAP, + pgprot_writecombine(PAGE_KERNEL)); + if (!vaddr) + return ERR_PTR(-ENOMEM); + + return vaddr; } static void tegra_bo_munmap(struct host1x_bo *bo, void *addr) @@ -198,10 +207,11 @@ static void tegra_bo_munmap(struct host1x_bo *bo, void *addr) if (obj->vaddr) return; - else if (obj->gem.import_attach) - dma_buf_vunmap_unlocked(obj->gem.import_attach->dmabuf, &map); - else - vunmap(addr); + + if (obj->gem.import_attach) + return dma_buf_vunmap_unlocked(obj->gem.import_attach->dmabuf, &map); + + vunmap(addr); } static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo) diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c index 1af5f8318d91..f21e57e8599e 100644 --- a/drivers/gpu/drm/tegra/hub.c +++ b/drivers/gpu/drm/tegra/hub.c @@ -1101,7 +1101,7 @@ static int tegra_display_hub_probe(struct platform_device *pdev) for (i = 0; i < hub->soc->num_wgrps; i++) { struct tegra_windowgroup *wgrp = &hub->wgrps[i]; - char id[8]; + char id[16]; snprintf(id, sizeof(id), "wgrp%u", i); mutex_init(&wgrp->lock); diff --git a/drivers/gpu/drm/tests/drm_format_helper_test.c b/drivers/gpu/drm/tests/drm_format_helper_test.c index 1a6bd291345d..f6408e56f786 100644 --- a/drivers/gpu/drm/tests/drm_format_helper_test.c +++ b/drivers/gpu/drm/tests/drm_format_helper_test.c @@ -81,6 +81,16 @@ struct fb_swab_result { const u32 expected[TEST_BUF_SIZE]; }; +struct convert_to_xbgr8888_result { + unsigned int dst_pitch; + const u32 expected[TEST_BUF_SIZE]; +}; + +struct convert_to_abgr8888_result { + unsigned int dst_pitch; + const u32 expected[TEST_BUF_SIZE]; +}; + struct convert_xrgb8888_case { const char *name; unsigned int pitch; @@ -98,6 +108,8 @@ struct convert_xrgb8888_case { struct convert_to_argb2101010_result argb2101010_result; struct convert_to_mono_result mono_result; struct fb_swab_result swab_result; + struct convert_to_xbgr8888_result xbgr8888_result; + struct convert_to_abgr8888_result abgr8888_result; }; static struct convert_xrgb8888_case convert_xrgb8888_cases[] = { @@ -155,6 +167,14 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = { .dst_pitch = TEST_USE_DEFAULT_PITCH, .expected = { 0x0000FF01 }, }, + .xbgr8888_result = { + .dst_pitch = TEST_USE_DEFAULT_PITCH, + .expected = { 0x010000FF }, + }, + .abgr8888_result = { + .dst_pitch = TEST_USE_DEFAULT_PITCH, + .expected = { 0xFF0000FF }, + }, }, { .name = "single_pixel_clip_rectangle", @@ -213,6 +233,14 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = { .dst_pitch = TEST_USE_DEFAULT_PITCH, .expected = { 0x0000FF10 }, }, + .xbgr8888_result = { + .dst_pitch = TEST_USE_DEFAULT_PITCH, + .expected = { 0x100000FF }, + }, + .abgr8888_result = { + .dst_pitch = TEST_USE_DEFAULT_PITCH, + .expected = { 0xFF0000FF }, + }, }, { /* Well known colors: White, black, red, green, blue, magenta, @@ -343,6 +371,24 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = { 0x00FFFF77, 0xFFFF0088, }, }, + .xbgr8888_result = { + .dst_pitch = TEST_USE_DEFAULT_PITCH, + .expected = { + 0x11FFFFFF, 0x22000000, + 0x330000FF, 0x4400FF00, + 0x55FF0000, 0x66FF00FF, + 0x7700FFFF, 0x88FFFF00, + }, + }, + .abgr8888_result = { + .dst_pitch = TEST_USE_DEFAULT_PITCH, + .expected = { + 0xFFFFFFFF, 0xFF000000, + 0xFF0000FF, 0xFF00FF00, + 0xFFFF0000, 0xFFFF00FF, + 0xFF00FFFF, 0xFFFFFF00, + }, + }, }, { /* Randomly picked colors. Full buffer within the clip area. */ @@ -458,6 +504,22 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = { 0x0303A8C2, 0x73F06CD2, 0x9C440EA3, 0x00000000, 0x00000000, }, }, + .xbgr8888_result = { + .dst_pitch = 20, + .expected = { + 0xA19C440E, 0xB1054D11, 0xC103F3A8, 0x00000000, 0x00000000, + 0xD173F06C, 0xA29C440E, 0xB2054D11, 0x00000000, 0x00000000, + 0xC20303A8, 0xD273F06C, 0xA39C440E, 0x00000000, 0x00000000, + }, + }, + .abgr8888_result = { + .dst_pitch = 20, + .expected = { + 0xFF9C440E, 0xFF054D11, 0xFF03F3A8, 0x00000000, 0x00000000, + 0xFF73F06C, 0xFF9C440E, 0xFF054D11, 0x00000000, 0x00000000, + 0xFF0303A8, 0xFF73F06C, 0xFF9C440E, 0x00000000, 0x00000000, + }, + }, }, }; @@ -643,6 +705,18 @@ static void drm_test_fb_xrgb8888_to_rgb565(struct kunit *test) drm_fb_xrgb8888_to_rgb565(&dst, &result->dst_pitch, &src, &fb, ¶ms->clip, true); buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16)); KUNIT_EXPECT_MEMEQ(test, buf, result->expected_swab, dst_size); + + buf = dst.vaddr; + memset(buf, 0, dst_size); + + int blit_result = 0; + + blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_RGB565, &src, &fb, ¶ms->clip); + + buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16)); + + KUNIT_EXPECT_FALSE(test, blit_result); + KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); } static void drm_test_fb_xrgb8888_to_xrgb1555(struct kunit *test) @@ -677,6 +751,18 @@ static void drm_test_fb_xrgb8888_to_xrgb1555(struct kunit *test) drm_fb_xrgb8888_to_xrgb1555(&dst, dst_pitch, &src, &fb, ¶ms->clip); buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16)); KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); + + buf = dst.vaddr; /* restore original value of buf */ + memset(buf, 0, dst_size); + + int blit_result = 0; + + blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XRGB1555, &src, &fb, ¶ms->clip); + + buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16)); + + KUNIT_EXPECT_FALSE(test, blit_result); + KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); } static void drm_test_fb_xrgb8888_to_argb1555(struct kunit *test) @@ -711,6 +797,18 @@ static void drm_test_fb_xrgb8888_to_argb1555(struct kunit *test) drm_fb_xrgb8888_to_argb1555(&dst, dst_pitch, &src, &fb, ¶ms->clip); buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16)); KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); + + buf = dst.vaddr; /* restore original value of buf */ + memset(buf, 0, dst_size); + + int blit_result = 0; + + blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ARGB1555, &src, &fb, ¶ms->clip); + + buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16)); + + KUNIT_EXPECT_FALSE(test, blit_result); + KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); } static void drm_test_fb_xrgb8888_to_rgba5551(struct kunit *test) @@ -745,6 +843,18 @@ static void drm_test_fb_xrgb8888_to_rgba5551(struct kunit *test) drm_fb_xrgb8888_to_rgba5551(&dst, dst_pitch, &src, &fb, ¶ms->clip); buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16)); KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); + + buf = dst.vaddr; /* restore original value of buf */ + memset(buf, 0, dst_size); + + int blit_result = 0; + + blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_RGBA5551, &src, &fb, ¶ms->clip); + + buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16)); + + KUNIT_EXPECT_FALSE(test, blit_result); + KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); } static void drm_test_fb_xrgb8888_to_rgb888(struct kunit *test) @@ -782,6 +892,16 @@ static void drm_test_fb_xrgb8888_to_rgb888(struct kunit *test) drm_fb_xrgb8888_to_rgb888(&dst, dst_pitch, &src, &fb, ¶ms->clip); KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); + + buf = dst.vaddr; /* restore original value of buf */ + memset(buf, 0, dst_size); + + int blit_result = 0; + + blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_RGB888, &src, &fb, ¶ms->clip); + + KUNIT_EXPECT_FALSE(test, blit_result); + KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); } static void drm_test_fb_xrgb8888_to_argb8888(struct kunit *test) @@ -816,6 +936,18 @@ static void drm_test_fb_xrgb8888_to_argb8888(struct kunit *test) drm_fb_xrgb8888_to_argb8888(&dst, dst_pitch, &src, &fb, ¶ms->clip); buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32)); KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); + + buf = dst.vaddr; /* restore original value of buf */ + memset(buf, 0, dst_size); + + int blit_result = 0; + + blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ARGB8888, &src, &fb, ¶ms->clip); + + buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32)); + + KUNIT_EXPECT_FALSE(test, blit_result); + KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); } static void drm_test_fb_xrgb8888_to_xrgb2101010(struct kunit *test) @@ -850,6 +982,17 @@ static void drm_test_fb_xrgb8888_to_xrgb2101010(struct kunit *test) drm_fb_xrgb8888_to_xrgb2101010(&dst, dst_pitch, &src, &fb, ¶ms->clip); buf = le32buf_to_cpu(test, buf, dst_size / sizeof(u32)); KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); + + buf = dst.vaddr; /* restore original value of buf */ + memset(buf, 0, dst_size); + + int blit_result = 0; + + blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XRGB2101010, &src, &fb, + ¶ms->clip); + + KUNIT_EXPECT_FALSE(test, blit_result); + KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); } static void drm_test_fb_xrgb8888_to_argb2101010(struct kunit *test) @@ -884,6 +1027,19 @@ static void drm_test_fb_xrgb8888_to_argb2101010(struct kunit *test) drm_fb_xrgb8888_to_argb2101010(&dst, dst_pitch, &src, &fb, ¶ms->clip); buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32)); KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); + + buf = dst.vaddr; /* restore original value of buf */ + memset(buf, 0, dst_size); + + int blit_result = 0; + + blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ARGB2101010, &src, &fb, + ¶ms->clip); + + buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32)); + + KUNIT_EXPECT_FALSE(test, blit_result); + KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); } static void drm_test_fb_xrgb8888_to_mono(struct kunit *test) @@ -951,6 +1107,119 @@ static void drm_test_fb_swab(struct kunit *test) drm_fb_swab(&dst, dst_pitch, &src, &fb, ¶ms->clip, false); buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32)); KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); + + buf = dst.vaddr; /* restore original value of buf */ + memset(buf, 0, dst_size); + + int blit_result; + + blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XRGB8888 | DRM_FORMAT_BIG_ENDIAN, + &src, &fb, ¶ms->clip); + buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32)); + + KUNIT_EXPECT_FALSE(test, blit_result); + KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); + + buf = dst.vaddr; + memset(buf, 0, dst_size); + + blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_BGRX8888, &src, &fb, ¶ms->clip); + buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32)); + + KUNIT_EXPECT_FALSE(test, blit_result); + KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); + + buf = dst.vaddr; + memset(buf, 0, dst_size); + + struct drm_format_info mock_format = *fb.format; + + mock_format.format |= DRM_FORMAT_BIG_ENDIAN; + fb.format = &mock_format; + + blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XRGB8888, &src, &fb, ¶ms->clip); + buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32)); + + KUNIT_EXPECT_FALSE(test, blit_result); + KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); +} + +static void drm_test_fb_xrgb8888_to_abgr8888(struct kunit *test) +{ + const struct convert_xrgb8888_case *params = test->param_value; + const struct convert_to_abgr8888_result *result = ¶ms->abgr8888_result; + size_t dst_size; + u32 *buf = NULL; + __le32 *xrgb8888 = NULL; + struct iosys_map dst, src; + + struct drm_framebuffer fb = { + .format = drm_format_info(DRM_FORMAT_XRGB8888), + .pitches = { params->pitch, 0, 0 }, + }; + + dst_size = conversion_buf_size(DRM_FORMAT_XBGR8888, result->dst_pitch, ¶ms->clip, 0); + + KUNIT_ASSERT_GT(test, dst_size, 0); + + buf = kunit_kzalloc(test, dst_size, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf); + iosys_map_set_vaddr(&dst, buf); + + xrgb8888 = cpubuf_to_le32(test, params->xrgb8888, TEST_BUF_SIZE); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888); + iosys_map_set_vaddr(&src, xrgb8888); + + const unsigned int *dst_pitch = (result->dst_pitch == TEST_USE_DEFAULT_PITCH) ? + NULL : &result->dst_pitch; + + int blit_result = 0; + + blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ABGR8888, &src, &fb, ¶ms->clip); + + buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32)); + + KUNIT_EXPECT_FALSE(test, blit_result); + KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); +} + +static void drm_test_fb_xrgb8888_to_xbgr8888(struct kunit *test) +{ + const struct convert_xrgb8888_case *params = test->param_value; + const struct convert_to_xbgr8888_result *result = ¶ms->xbgr8888_result; + size_t dst_size; + u32 *buf = NULL; + __le32 *xrgb8888 = NULL; + struct iosys_map dst, src; + + struct drm_framebuffer fb = { + .format = drm_format_info(DRM_FORMAT_XRGB8888), + .pitches = { params->pitch, 0, 0 }, + }; + + dst_size = conversion_buf_size(DRM_FORMAT_XBGR8888, result->dst_pitch, ¶ms->clip, 0); + + KUNIT_ASSERT_GT(test, dst_size, 0); + + buf = kunit_kzalloc(test, dst_size, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf); + iosys_map_set_vaddr(&dst, buf); + + xrgb8888 = cpubuf_to_le32(test, params->xrgb8888, TEST_BUF_SIZE); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888); + iosys_map_set_vaddr(&src, xrgb8888); + + const unsigned int *dst_pitch = (result->dst_pitch == TEST_USE_DEFAULT_PITCH) ? + NULL : &result->dst_pitch; + + int blit_result = 0; + + blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XBGR8888, &src, &fb, ¶ms->clip); + + buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32)); + + KUNIT_EXPECT_FALSE(test, blit_result); + KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); } struct clip_offset_case { @@ -1542,6 +1811,19 @@ static void drm_test_fb_memcpy(struct kunit *test) expected[i] = cpubuf_to_le32(test, params->expected[i], TEST_BUF_SIZE); KUNIT_EXPECT_MEMEQ_MSG(test, buf[i], expected[i], dst_size[i], "Failed expectation on plane %zu", i); + + memset(buf[i], 0, dst_size[i]); + } + + int blit_result; + + blit_result = drm_fb_blit(dst, dst_pitches, params->format, src, &fb, ¶ms->clip); + + KUNIT_EXPECT_FALSE(test, blit_result); + for (size_t i = 0; i < fb.format->num_planes; i++) { + expected[i] = cpubuf_to_le32(test, params->expected[i], TEST_BUF_SIZE); + KUNIT_EXPECT_MEMEQ_MSG(test, buf[i], expected[i], dst_size[i], + "Failed expectation on plane %zu", i); } } @@ -1558,6 +1840,8 @@ static struct kunit_case drm_format_helper_test_cases[] = { KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_argb2101010, convert_xrgb8888_gen_params), KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_mono, convert_xrgb8888_gen_params), KUNIT_CASE_PARAM(drm_test_fb_swab, convert_xrgb8888_gen_params), + KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_xbgr8888, convert_xrgb8888_gen_params), + KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_abgr8888, convert_xrgb8888_gen_params), KUNIT_CASE_PARAM(drm_test_fb_clip_offset, clip_offset_gen_params), KUNIT_CASE_PARAM(drm_test_fb_build_fourcc_list, fb_build_fourcc_list_gen_params), KUNIT_CASE_PARAM(drm_test_fb_memcpy, fb_memcpy_gen_params), diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c index 9c597461d1e2..8bdaf66044fc 100644 --- a/drivers/gpu/drm/tiny/simpledrm.c +++ b/drivers/gpu/drm/tiny/simpledrm.c @@ -506,7 +506,7 @@ static void simpledrm_device_detach_genpd(void *res) return; for (i = sdev->pwr_dom_count - 1; i >= 0; i--) { - if (!sdev->pwr_dom_links[i]) + if (sdev->pwr_dom_links[i]) device_link_del(sdev->pwr_dom_links[i]); if (!IS_ERR_OR_NULL(sdev->pwr_dom_devs[i])) dev_pm_domain_detach(sdev->pwr_dom_devs[i], true); diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h index 7f664a4b2a75..106454f28956 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.h +++ b/drivers/gpu/drm/v3d/v3d_drv.h @@ -59,7 +59,7 @@ struct v3d_perfmon { * values can't be reset, but you can fake a reset by * destroying the perfmon and creating a new one. */ - u64 values[]; + u64 values[] __counted_by(ncounters); }; struct v3d_dev { diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h index bf66499765fb..ab61e96e7e14 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.h +++ b/drivers/gpu/drm/vc4/vc4_drv.h @@ -76,7 +76,7 @@ struct vc4_perfmon { * Note that counter values can't be reset, but you can fake a reset by * destroying the perfmon and creating a new one. */ - u64 counters[]; + u64 counters[] __counted_by(ncounters); }; struct vc4_dev { diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index 8513b671f871..96365a772f77 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -119,7 +119,7 @@ struct virtio_gpu_object_array { struct ww_acquire_ctx ticket; struct list_head next; u32 nents, total; - struct drm_gem_object *objs[]; + struct drm_gem_object *objs[] __counted_by(total); }; struct virtio_gpu_vbuffer; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 5db403ee8261..2d1d857f99ae 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -77,7 +77,7 @@ struct vmw_surface_offset { struct vmw_surface_dirty { struct vmw_surface_cache cache; u32 num_subres; - SVGA3dBox boxes[]; + SVGA3dBox boxes[] __counted_by(num_subres); }; static void vmw_user_surface_free(struct vmw_resource *res); diff --git a/drivers/gpu/host1x/channel.c b/drivers/gpu/host1x/channel.c index 2d0051d6314c..08077afe4cde 100644 --- a/drivers/gpu/host1x/channel.c +++ b/drivers/gpu/host1x/channel.c @@ -27,6 +27,8 @@ int host1x_channel_list_init(struct host1x_channel_list *chlist, return -ENOMEM; } + mutex_init(&chlist->lock); + return 0; } @@ -79,6 +81,25 @@ void host1x_channel_stop(struct host1x_channel *channel) } EXPORT_SYMBOL(host1x_channel_stop); +/** + * host1x_channel_stop_all() - disable CDMA on allocated channels + * @host: host1x instance + * + * Stop CDMA on allocated channels + */ +void host1x_channel_stop_all(struct host1x *host) +{ + struct host1x_channel_list *chlist = &host->channel_list; + int bit; + + mutex_lock(&chlist->lock); + + for_each_set_bit(bit, chlist->allocated_channels, host->info->nb_channels) + host1x_channel_stop(&chlist->channels[bit]); + + mutex_unlock(&chlist->lock); +} + static void release_channel(struct kref *kref) { struct host1x_channel *channel = @@ -104,8 +125,11 @@ static struct host1x_channel *acquire_unused_channel(struct host1x *host) unsigned int max_channels = host->info->nb_channels; unsigned int index; + mutex_lock(&chlist->lock); + index = find_first_zero_bit(chlist->allocated_channels, max_channels); if (index >= max_channels) { + mutex_unlock(&chlist->lock); dev_err(host->dev, "failed to find free channel\n"); return NULL; } @@ -114,6 +138,8 @@ static struct host1x_channel *acquire_unused_channel(struct host1x *host) set_bit(index, chlist->allocated_channels); + mutex_unlock(&chlist->lock); + return &chlist->channels[index]; } diff --git a/drivers/gpu/host1x/channel.h b/drivers/gpu/host1x/channel.h index 39044ff6c3aa..d7aede204d83 100644 --- a/drivers/gpu/host1x/channel.h +++ b/drivers/gpu/host1x/channel.h @@ -10,6 +10,7 @@ #include <linux/io.h> #include <linux/kref.h> +#include <linux/mutex.h> #include "cdma.h" @@ -18,6 +19,8 @@ struct host1x_channel; struct host1x_channel_list { struct host1x_channel *channels; + + struct mutex lock; unsigned long *allocated_channels; }; @@ -37,5 +40,6 @@ int host1x_channel_list_init(struct host1x_channel_list *chlist, void host1x_channel_list_free(struct host1x_channel_list *chlist); struct host1x_channel *host1x_channel_get_index(struct host1x *host, unsigned int index); +void host1x_channel_stop_all(struct host1x *host); #endif diff --git a/drivers/gpu/host1x/context.c b/drivers/gpu/host1x/context.c index a3f336edd991..955c971c528d 100644 --- a/drivers/gpu/host1x/context.c +++ b/drivers/gpu/host1x/context.c @@ -34,10 +34,10 @@ int host1x_memory_context_list_init(struct host1x *host1x) if (err < 0) return 0; - cdl->devs = kcalloc(err, sizeof(*cdl->devs), GFP_KERNEL); + cdl->len = err / 4; + cdl->devs = kcalloc(cdl->len, sizeof(*cdl->devs), GFP_KERNEL); if (!cdl->devs) return -ENOMEM; - cdl->len = err / 4; for (i = 0; i < cdl->len; i++) { ctx = &cdl->devs[i]; diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c index 7c6699aed7d2..42fd504abbcd 100644 --- a/drivers/gpu/host1x/dev.c +++ b/drivers/gpu/host1x/dev.c @@ -488,7 +488,7 @@ static int host1x_get_resets(struct host1x *host) static int host1x_probe(struct platform_device *pdev) { struct host1x *host; - int err; + int err, i; host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL); if (!host) @@ -516,9 +516,30 @@ static int host1x_probe(struct platform_device *pdev) return PTR_ERR(host->regs); } - host->syncpt_irq = platform_get_irq(pdev, 0); - if (host->syncpt_irq < 0) - return host->syncpt_irq; + for (i = 0; i < ARRAY_SIZE(host->syncpt_irqs); i++) { + char irq_name[] = "syncptX"; + + sprintf(irq_name, "syncpt%d", i); + + err = platform_get_irq_byname_optional(pdev, irq_name); + if (err == -ENXIO) + break; + if (err < 0) + return err; + + host->syncpt_irqs[i] = err; + } + + host->num_syncpt_irqs = i; + + /* Device tree without irq names */ + if (i == 0) { + host->syncpt_irqs[0] = platform_get_irq(pdev, 0); + if (host->syncpt_irqs[0] < 0) + return host->syncpt_irqs[0]; + + host->num_syncpt_irqs = 1; + } mutex_init(&host->devices_lock); INIT_LIST_HEAD(&host->devices); @@ -655,6 +676,7 @@ static int __maybe_unused host1x_runtime_suspend(struct device *dev) struct host1x *host = dev_get_drvdata(dev); int err; + host1x_channel_stop_all(host); host1x_intr_stop(host); host1x_syncpt_save(host); @@ -719,7 +741,7 @@ release_reset: static const struct dev_pm_ops host1x_pm_ops = { SET_RUNTIME_PM_OPS(host1x_runtime_suspend, host1x_runtime_resume, NULL) - /* TODO: add system suspend-resume once driver will be ready for that */ + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) }; static struct platform_driver tegra_host1x_driver = { diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h index 75de50fe03d0..c8e302de7625 100644 --- a/drivers/gpu/host1x/dev.h +++ b/drivers/gpu/host1x/dev.h @@ -124,7 +124,8 @@ struct host1x { void __iomem *regs; void __iomem *hv_regs; /* hypervisor region */ void __iomem *common_regs; - int syncpt_irq; + int syncpt_irqs[8]; + int num_syncpt_irqs; struct host1x_syncpt *syncpt; struct host1x_syncpt_base *bases; struct device *dev; diff --git a/drivers/gpu/host1x/hw/intr_hw.c b/drivers/gpu/host1x/hw/intr_hw.c index b915ef7d0348..9880e0c47235 100644 --- a/drivers/gpu/host1x/hw/intr_hw.c +++ b/drivers/gpu/host1x/hw/intr_hw.c @@ -13,13 +13,20 @@ #include "../intr.h" #include "../dev.h" +struct host1x_intr_irq_data { + struct host1x *host; + u32 offset; +}; + static irqreturn_t syncpt_thresh_isr(int irq, void *dev_id) { - struct host1x *host = dev_id; + struct host1x_intr_irq_data *irq_data = dev_id; + struct host1x *host = irq_data->host; unsigned long reg; unsigned int i, id; - for (i = 0; i < DIV_ROUND_UP(host->info->nb_pts, 32); i++) { + for (i = irq_data->offset; i < DIV_ROUND_UP(host->info->nb_pts, 32); + i += host->num_syncpt_irqs) { reg = host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(i)); @@ -67,26 +74,41 @@ static void intr_hw_init(struct host1x *host, u32 cpm) /* * Program threshold interrupt destination among 8 lines per VM, - * per syncpoint. For now, just direct all to the first interrupt - * line. + * per syncpoint. For each group of 32 syncpoints (corresponding to one + * interrupt status register), direct to one interrupt line, going + * around in a round robin fashion. */ - for (id = 0; id < host->info->nb_pts; id++) - host1x_sync_writel(host, 0, HOST1X_SYNC_SYNCPT_INTR_DEST(id)); + for (id = 0; id < host->info->nb_pts; id++) { + u32 reg_offset = id / 32; + u32 irq_index = reg_offset % host->num_syncpt_irqs; + + host1x_sync_writel(host, irq_index, HOST1X_SYNC_SYNCPT_INTR_DEST(id)); + } #endif } static int host1x_intr_init_host_sync(struct host1x *host, u32 cpm) { - int err; + int err, i; + struct host1x_intr_irq_data *irq_data; + + irq_data = devm_kcalloc(host->dev, host->num_syncpt_irqs, sizeof(irq_data[0]), GFP_KERNEL); + if (!irq_data) + return -ENOMEM; host1x_hw_intr_disable_all_syncpt_intrs(host); - err = devm_request_irq(host->dev, host->syncpt_irq, - syncpt_thresh_isr, IRQF_SHARED, - "host1x_syncpt", host); - if (err < 0) - return err; + for (i = 0; i < host->num_syncpt_irqs; i++) { + irq_data[i].host = host; + irq_data[i].offset = i; + + err = devm_request_irq(host->dev, host->syncpt_irqs[i], + syncpt_thresh_isr, IRQF_SHARED, + "host1x_syncpt", &irq_data[i]); + if (err < 0) + return err; + } intr_hw_init(host, cpm); diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c index 426c88a516e5..29f40e658772 100644 --- a/drivers/usb/typec/altmodes/displayport.c +++ b/drivers/usb/typec/altmodes/displayport.c @@ -153,11 +153,11 @@ static int dp_altmode_status_update(struct dp_altmode *dp) } } } else { - if (dp->hpd != hpd) { - drm_connector_oob_hotplug_event(dp->connector_fwnode); - dp->hpd = hpd; - sysfs_notify(&dp->alt->dev.kobj, "displayport", "hpd"); - } + drm_connector_oob_hotplug_event(dp->connector_fwnode, + hpd ? connector_status_connected : + connector_status_disconnected); + dp->hpd = hpd; + sysfs_notify(&dp->alt->dev.kobj, "displayport", "hpd"); } return ret; @@ -173,7 +173,8 @@ static int dp_altmode_configured(struct dp_altmode *dp) * configuration is complete to signal HPD. */ if (dp->pending_hpd) { - drm_connector_oob_hotplug_event(dp->connector_fwnode); + drm_connector_oob_hotplug_event(dp->connector_fwnode, + connector_status_connected); sysfs_notify(&dp->alt->dev.kobj, "displayport", "hpd"); dp->pending_hpd = false; } @@ -618,8 +619,8 @@ void dp_altmode_remove(struct typec_altmode *alt) cancel_work_sync(&dp->work); if (dp->connector_fwnode) { - if (dp->hpd) - drm_connector_oob_hotplug_event(dp->connector_fwnode); + drm_connector_oob_hotplug_event(dp->connector_fwnode, + connector_status_disconnected); fwnode_handle_put(dp->connector_fwnode); } diff --git a/drivers/video/fbdev/core/fb_chrdev.c b/drivers/video/fbdev/core/fb_chrdev.c index eadb81f53a82..32a7315b4b6d 100644 --- a/drivers/video/fbdev/core/fb_chrdev.c +++ b/drivers/video/fbdev/core/fb_chrdev.c @@ -365,7 +365,8 @@ static int fb_mmap(struct file *file, struct vm_area_struct *vma) mutex_unlock(&info->mm_lock); vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); - fb_pgprotect(file, vma, start); + vma->vm_page_prot = pgprot_framebuffer(vma->vm_page_prot, vma->vm_start, + vma->vm_end, start); return vm_iomap_memory(vma, start, len); } diff --git a/include/asm-generic/fb.h b/include/asm-generic/fb.h index bb7ee9c70e60..6ccabb400aa6 100644 --- a/include/asm-generic/fb.h +++ b/include/asm-generic/fb.h @@ -12,14 +12,14 @@ #include <linux/pgtable.h> struct fb_info; -struct file; -#ifndef fb_pgprotect -#define fb_pgprotect fb_pgprotect -static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, - unsigned long off) +#ifndef pgprot_framebuffer +#define pgprot_framebuffer pgprot_framebuffer +static inline pgprot_t pgprot_framebuffer(pgprot_t prot, + unsigned long vm_start, unsigned long vm_end, + unsigned long offset) { - vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + return pgprot_writecombine(prot); } #endif diff --git a/include/drm/bridge/samsung-dsim.h b/include/drm/bridge/samsung-dsim.h index 6fc9bb2979e4..e0c105051246 100644 --- a/include/drm/bridge/samsung-dsim.h +++ b/include/drm/bridge/samsung-dsim.h @@ -61,6 +61,8 @@ struct samsung_dsim_driver_data { unsigned int num_bits_resol; unsigned int pll_p_offset; const unsigned int *reg_values; + unsigned int pll_fin_min; + unsigned int pll_fin_max; u16 m_min; u16 m_max; }; @@ -88,6 +90,7 @@ struct samsung_dsim { void __iomem *reg_base; struct phy *phy; struct clk **clks; + struct clk *pll_clk; struct regulator_bulk_data supplies[2]; int irq; struct gpio_desc *te_gpio; @@ -116,7 +119,7 @@ struct samsung_dsim { }; extern int samsung_dsim_probe(struct platform_device *pdev); -extern int samsung_dsim_remove(struct platform_device *pdev); +extern void samsung_dsim_remove(struct platform_device *pdev); extern const struct dev_pm_ops samsung_dsim_pm_ops; #endif /* __SAMSUNG_DSIM__ */ diff --git a/include/drm/display/drm_dp_helper.h b/include/drm/display/drm_dp_helper.h index 3369104e2d25..3d74b2cec72f 100644 --- a/include/drm/display/drm_dp_helper.h +++ b/include/drm/display/drm_dp_helper.h @@ -272,8 +272,8 @@ struct drm_dp_aux_msg { }; struct cec_adapter; -struct edid; struct drm_connector; +struct drm_edid; /** * struct drm_dp_aux_cec - DisplayPort CEC-Tunneling-over-AUX @@ -507,18 +507,18 @@ bool drm_dp_downstream_is_type(const u8 dpcd[DP_RECEIVER_CAP_SIZE], const u8 port_cap[4], u8 type); bool drm_dp_downstream_is_tmds(const u8 dpcd[DP_RECEIVER_CAP_SIZE], const u8 port_cap[4], - const struct edid *edid); + const struct drm_edid *drm_edid); int drm_dp_downstream_max_dotclock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], const u8 port_cap[4]); int drm_dp_downstream_max_tmds_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], const u8 port_cap[4], - const struct edid *edid); + const struct drm_edid *drm_edid); int drm_dp_downstream_min_tmds_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], const u8 port_cap[4], - const struct edid *edid); + const struct drm_edid *drm_edid); int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE], const u8 port_cap[4], - const struct edid *edid); + const struct drm_edid *drm_edid); bool drm_dp_downstream_420_passthrough(const u8 dpcd[DP_RECEIVER_CAP_SIZE], const u8 port_cap[4]); bool drm_dp_downstream_444_to_420_conversion(const u8 dpcd[DP_RECEIVER_CAP_SIZE], @@ -530,7 +530,7 @@ int drm_dp_downstream_id(struct drm_dp_aux *aux, char id[6]); void drm_dp_downstream_debug(struct seq_file *m, const u8 dpcd[DP_RECEIVER_CAP_SIZE], const u8 port_cap[4], - const struct edid *edid, + const struct drm_edid *drm_edid, struct drm_dp_aux *aux); enum drm_mode_subconnector drm_dp_subconnector_type(const u8 dpcd[DP_RECEIVER_CAP_SIZE], diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index 6302caa0bd51..fe88d7fc6b8f 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h @@ -1336,7 +1336,8 @@ struct drm_connector_funcs { * This will get called when a hotplug-event for a drm-connector * has been received from a source outside the display driver / device. */ - void (*oob_hotplug_event)(struct drm_connector *connector); + void (*oob_hotplug_event)(struct drm_connector *connector, + enum drm_connector_status status); /** * @debugfs_init: @@ -1980,7 +1981,8 @@ drm_connector_is_unregistered(struct drm_connector *connector) DRM_CONNECTOR_UNREGISTERED; } -void drm_connector_oob_hotplug_event(struct fwnode_handle *connector_fwnode); +void drm_connector_oob_hotplug_event(struct fwnode_handle *connector_fwnode, + enum drm_connector_status status); const char *drm_get_connector_type_name(unsigned int connector_type); const char *drm_get_connector_status_name(enum drm_connector_status status); const char *drm_get_subpixel_order_name(enum subpixel_order order); diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h index 532ae78ca747..ccf91daa4307 100644 --- a/include/drm/drm_fourcc.h +++ b/include/drm/drm_fourcc.h @@ -22,6 +22,7 @@ #ifndef __DRM_FOURCC_H__ #define __DRM_FOURCC_H__ +#include <linux/math.h> #include <linux/types.h> #include <uapi/drm/drm_fourcc.h> @@ -279,7 +280,7 @@ int drm_format_info_plane_width(const struct drm_format_info *info, int width, if (plane == 0) return width; - return width / info->hsub; + return DIV_ROUND_UP(width, info->hsub); } /** @@ -301,7 +302,7 @@ int drm_format_info_plane_height(const struct drm_format_info *info, int height, if (plane == 0) return height; - return height / info->vsub; + return DIV_ROUND_UP(height, info->vsub); } const struct drm_format_info *__drm_format_info(u32 format); diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h index 0dcc07b68654..80ece7b6dd9b 100644 --- a/include/drm/drm_framebuffer.h +++ b/include/drm/drm_framebuffer.h @@ -292,11 +292,6 @@ static inline void drm_framebuffer_assign(struct drm_framebuffer **p, &fb->head != (&(dev)->mode_config.fb_list); \ fb = list_next_entry(fb, head)) -int drm_framebuffer_plane_width(int width, - const struct drm_framebuffer *fb, int plane); -int drm_framebuffer_plane_height(int height, - const struct drm_framebuffer *fb, int plane); - /** * struct drm_afbc_framebuffer - a special afbc frame buffer object * diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h index bc9f6aa2f3fe..16364487fde9 100644 --- a/include/drm/drm_gem.h +++ b/include/drm/drm_gem.h @@ -209,6 +209,15 @@ struct drm_gem_object_funcs { enum drm_gem_object_status (*status)(struct drm_gem_object *obj); /** + * @rss: + * + * Return resident size of the object in physical memory. + * + * Called by drm_show_memory_stats(). + */ + size_t (*rss)(struct drm_gem_object *obj); + + /** * @vm_ops: * * Virtual memory operations used with mmap. diff --git a/include/linux/iosys-map.h b/include/linux/iosys-map.h index cb71aa616bd3..1b06d074ade0 100644 --- a/include/linux/iosys-map.h +++ b/include/linux/iosys-map.h @@ -426,7 +426,7 @@ static inline void iosys_map_memset(struct iosys_map *dst, size_t offset, * iosys_map_rd_field - Read a member from a struct in the iosys_map * * @map__: The iosys_map structure - * @struct_offset__: Offset from the beggining of the map, where the struct + * @struct_offset__: Offset from the beginning of the map, where the struct * is located * @struct_type__: The struct describing the layout of the mapping * @field__: Member of the struct to read @@ -494,7 +494,7 @@ static inline void iosys_map_memset(struct iosys_map *dst, size_t offset, * iosys_map_wr_field - Write to a member of a struct in the iosys_map * * @map__: The iosys_map structure - * @struct_offset__: Offset from the beggining of the map, where the struct + * @struct_offset__: Offset from the beginning of the map, where the struct * is located * @struct_type__: The struct describing the layout of the mapping * @field__: Member of the struct to read diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index 7000e5910a1d..218edb0a96f8 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -38,13 +38,13 @@ extern "C" { */ /** - * DOC: uevents generated by i915 on it's device node + * DOC: uevents generated by i915 on its device node * * I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch - * event from the gpu l3 cache. Additional information supplied is ROW, + * event from the GPU L3 cache. Additional information supplied is ROW, * BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep - * track of these events and if a specific cache-line seems to have a - * persistent error remap it with the l3 remapping tool supplied in + * track of these events, and if a specific cache-line seems to have a + * persistent error, remap it with the L3 remapping tool supplied in * intel-gpu-tools. The value supplied with the event is always 1. * * I915_ERROR_UEVENT - Generated upon error detection, currently only via |