diff options
344 files changed, 14884 insertions, 12108 deletions
diff --git a/Documentation/devicetree/bindings/display/bridge/analogix,dp.yaml b/Documentation/devicetree/bindings/display/bridge/analogix,dp.yaml new file mode 100644 index 000000000000..c9b06885cc63 --- /dev/null +++ b/Documentation/devicetree/bindings/display/bridge/analogix,dp.yaml @@ -0,0 +1,63 @@ +# SPDX-License-Identifier: GPL-2.0 +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/bridge/analogix,dp.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Analogix Display Port bridge + +maintainers: + - Rob Herring <[email protected]> + +properties: + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + clocks: true + + clock-names: true + + phys: true + + phy-names: + const: dp + + force-hpd: + description: + Indicate driver need force hpd when hpd detect failed, this + is used for some eDP screen which don not have a hpd signal. + + hpd-gpios: + description: + Hotplug detect GPIO. + Indicates which GPIO should be used for hotplug detection + + ports: + $ref: /schemas/graph.yaml#/properties/ports + + properties: + port@0: + $ref: /schemas/graph.yaml#/properties/port + description: + Input node to receive pixel data. + + port@1: + $ref: /schemas/graph.yaml#/properties/port + description: + Port node with one endpoint connected to a dp-connector node. + + required: + - port@0 + - port@1 + +required: + - reg + - interrupts + - clock-names + - clocks + - ports + +additionalProperties: true diff --git a/Documentation/devicetree/bindings/display/bridge/analogix_dp.txt b/Documentation/devicetree/bindings/display/bridge/analogix_dp.txt deleted file mode 100644 index 027d76c27a41..000000000000 --- a/Documentation/devicetree/bindings/display/bridge/analogix_dp.txt +++ /dev/null @@ -1,51 +0,0 @@ -Analogix Display Port bridge bindings - -Required properties for dp-controller: - -compatible: - platform specific such as: - * "samsung,exynos5-dp" - * "rockchip,rk3288-dp" - * "rockchip,rk3399-edp" - -reg: - physical base address of the controller and length - of memory mapped region. - -interrupts: - interrupt combiner values. - -clocks: - from common clock binding: handle to dp clock. - -clock-names: - from common clock binding: Shall be "dp". - -phys: - from general PHY binding: the phandle for the PHY device. - -phy-names: - from general PHY binding: Should be "dp". - -Optional properties for dp-controller: - -force-hpd: - Indicate driver need force hpd when hpd detect failed, this - is used for some eDP screen which don't have hpd signal. - -hpd-gpios: - Hotplug detect GPIO. - Indicates which GPIO should be used for hotplug detection - -port@[X]: SoC specific port nodes with endpoint definitions as defined - in Documentation/devicetree/bindings/media/video-interfaces.txt, - please refer to the SoC specific binding document: - * Documentation/devicetree/bindings/display/exynos/exynos_dp.txt - * Documentation/devicetree/bindings/display/rockchip/analogix_dp-rockchip.txt - -[1]: Documentation/devicetree/bindings/media/video-interfaces.txt -------------------------------------------------------------------------------- - -Example: - - dp-controller { - compatible = "samsung,exynos5-dp"; - reg = <0x145b0000 0x10000>; - interrupts = <10 3>; - interrupt-parent = <&combiner>; - clocks = <&clock 342>; - clock-names = "dp"; - - phys = <&dp_phy>; - phy-names = "dp"; - }; diff --git a/Documentation/devicetree/bindings/display/bridge/snps,dw-mipi-dsi.yaml b/Documentation/devicetree/bindings/display/bridge/snps,dw-mipi-dsi.yaml index 11fd68a70dca..0b51c64f141a 100644 --- a/Documentation/devicetree/bindings/display/bridge/snps,dw-mipi-dsi.yaml +++ b/Documentation/devicetree/bindings/display/bridge/snps,dw-mipi-dsi.yaml @@ -26,19 +26,9 @@ properties: reg: maxItems: 1 - clocks: - items: - - description: Module clock - - description: DSI bus clock for either AHB and APB - - description: Pixel clock for the DPI/RGB input - minItems: 2 - - clock-names: - items: - - const: ref - - const: pclk - - const: px_clk - minItems: 2 + clocks: true + + clock-names: true resets: maxItems: 1 diff --git a/Documentation/devicetree/bindings/display/dsi-controller.yaml b/Documentation/devicetree/bindings/display/dsi-controller.yaml index ca21671f6bdd..67ce10307ee0 100644 --- a/Documentation/devicetree/bindings/display/dsi-controller.yaml +++ b/Documentation/devicetree/bindings/display/dsi-controller.yaml @@ -30,6 +30,15 @@ properties: $nodename: pattern: "^dsi(@.*)?$" + clock-master: + type: boolean + description: + Should be enabled if the host is being used in conjunction with + another DSI host to drive the same peripheral. Hardware supporting + such a configuration generally requires the data on both the busses + to be driven by the same clock. Only the DSI host instance + controlling this clock should contain this property. + "#address-cells": const: 1 @@ -52,15 +61,6 @@ patternProperties: case the reg property can take multiple entries, one for each virtual channel that the peripheral responds to. - clock-master: - type: boolean - description: - Should be enabled if the host is being used in conjunction with - another DSI host to drive the same peripheral. Hardware supporting - such a configuration generally requires the data on both the busses - to be driven by the same clock. Only the DSI host instance - controlling this clock should contain this property. - enforce-video-mode: type: boolean description: diff --git a/Documentation/devicetree/bindings/display/exynos/exynos_dp.txt b/Documentation/devicetree/bindings/display/exynos/exynos_dp.txt index 9b6cba3f82af..3a401590320f 100644 --- a/Documentation/devicetree/bindings/display/exynos/exynos_dp.txt +++ b/Documentation/devicetree/bindings/display/exynos/exynos_dp.txt @@ -50,7 +50,7 @@ Optional properties for dp-controller: Documentation/devicetree/bindings/display/panel/display-timing.txt For the below properties, please refer to Analogix DP binding document: - * Documentation/devicetree/bindings/display/bridge/analogix_dp.txt + * Documentation/devicetree/bindings/display/bridge/analogix,dp.yaml -phys (required) -phy-names (required) -hpd-gpios (optional) diff --git a/Documentation/devicetree/bindings/display/panel/jadard,jd9365da-h3.yaml b/Documentation/devicetree/bindings/display/panel/jadard,jd9365da-h3.yaml index c06902e4fe70..41eb7fbf7715 100644 --- a/Documentation/devicetree/bindings/display/panel/jadard,jd9365da-h3.yaml +++ b/Documentation/devicetree/bindings/display/panel/jadard,jd9365da-h3.yaml @@ -17,6 +17,8 @@ properties: items: - enum: - chongzhou,cz101b4001 + - radxa,display-10hd-ad001 + - radxa,display-8hd-ad002 - const: jadard,jd9365da-h3 reg: true diff --git a/Documentation/devicetree/bindings/display/panel/samsung,ams495qa01.yaml b/Documentation/devicetree/bindings/display/panel/samsung,ams495qa01.yaml new file mode 100644 index 000000000000..58fa073ce258 --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/samsung,ams495qa01.yaml @@ -0,0 +1,57 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/panel/samsung,ams495qa01.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Samsung AMS495QA01 panel with Magnachip D53E6EA8966 controller + +maintainers: + - Chris Morgan <[email protected]> + +allOf: + - $ref: panel-common.yaml# + +properties: + compatible: + const: samsung,ams495qa01 + + reg: true + reset-gpios: + description: reset gpio, must be GPIO_ACTIVE_LOW + elvdd-supply: + description: regulator that supplies voltage to the panel display + enable-gpios: true + port: true + vdd-supply: + description: regulator that supplies voltage to panel logic + +required: + - compatible + - reg + - reset-gpios + - vdd-supply + +additionalProperties: false + +examples: + - | + #include <dt-bindings/gpio/gpio.h> + spi { + #address-cells = <1>; + #size-cells = <0>; + panel@0 { + compatible = "samsung,ams495qa01"; + reg = <0>; + reset-gpios = <&gpio4 0 GPIO_ACTIVE_LOW>; + vdd-supply = <&vcc_3v3>; + + port { + mipi_in_panel: endpoint { + remote-endpoint = <&mipi_out_panel>; + }; + }; + }; + }; + +... diff --git a/Documentation/devicetree/bindings/display/rockchip/analogix_dp-rockchip.txt b/Documentation/devicetree/bindings/display/rockchip/analogix_dp-rockchip.txt deleted file mode 100644 index 43561584c13a..000000000000 --- a/Documentation/devicetree/bindings/display/rockchip/analogix_dp-rockchip.txt +++ /dev/null @@ -1,98 +0,0 @@ -Rockchip RK3288 specific extensions to the Analogix Display Port -================================ - -Required properties: -- compatible: "rockchip,rk3288-dp", - "rockchip,rk3399-edp"; - -- reg: physical base address of the controller and length - -- clocks: from common clock binding: handle to dp clock. - of memory mapped region. - -- clock-names: from common clock binding: - Required elements: "dp" "pclk" - -- resets: Must contain an entry for each entry in reset-names. - See ../reset/reset.txt for details. - -- pinctrl-names: Names corresponding to the chip hotplug pinctrl states. -- pinctrl-0: pin-control mode. should be <&edp_hpd> - -- reset-names: Must include the name "dp" - -- rockchip,grf: this soc should set GRF regs, so need get grf here. - -- ports: there are 2 port nodes with endpoint definitions as defined in - Documentation/devicetree/bindings/media/video-interfaces.txt. - Port 0: contained 2 endpoints, connecting to the output of vop. - Port 1: contained 1 endpoint, connecting to the input of panel. - -Optional property for different chips: -- clocks: from common clock binding: handle to grf_vio clock. - -- clock-names: from common clock binding: - Required elements: "grf" - -For the below properties, please refer to Analogix DP binding document: - * Documentation/devicetree/bindings/display/bridge/analogix_dp.txt -- phys (required) -- phy-names (required) -- hpd-gpios (optional) -- force-hpd (optional) -------------------------------------------------------------------------------- - -Example: - dp-controller: dp@ff970000 { - compatible = "rockchip,rk3288-dp"; - reg = <0xff970000 0x4000>; - interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&cru SCLK_EDP>, <&cru PCLK_EDP_CTRL>; - clock-names = "dp", "pclk"; - phys = <&dp_phy>; - phy-names = "dp"; - - rockchip,grf = <&grf>; - resets = <&cru 111>; - reset-names = "dp"; - - pinctrl-names = "default"; - pinctrl-0 = <&edp_hpd>; - - - ports { - #address-cells = <1>; - #size-cells = <0>; - edp_in: port@0 { - reg = <0>; - #address-cells = <1>; - #size-cells = <0>; - edp_in_vopb: endpoint@0 { - reg = <0>; - remote-endpoint = <&vopb_out_edp>; - }; - edp_in_vopl: endpoint@1 { - reg = <1>; - remote-endpoint = <&vopl_out_edp>; - }; - }; - - edp_out: port@1 { - reg = <1>; - #address-cells = <1>; - #size-cells = <0>; - edp_out_panel: endpoint { - reg = <0>; - remote-endpoint = <&panel_in_edp> - }; - }; - }; - }; - - pinctrl { - edp { - edp_hpd: edp-hpd { - rockchip,pins = <7 11 RK_FUNC_2 &pcfg_pull_none>; - }; - }; - }; diff --git a/Documentation/devicetree/bindings/display/rockchip/dw_mipi_dsi_rockchip.txt b/Documentation/devicetree/bindings/display/rockchip/dw_mipi_dsi_rockchip.txt deleted file mode 100644 index 9a223df8530c..000000000000 --- a/Documentation/devicetree/bindings/display/rockchip/dw_mipi_dsi_rockchip.txt +++ /dev/null @@ -1,94 +0,0 @@ -Rockchip specific extensions to the Synopsys Designware MIPI DSI -================================ - -Required properties: -- #address-cells: Should be <1>. -- #size-cells: Should be <0>. -- compatible: one of - "rockchip,px30-mipi-dsi", "snps,dw-mipi-dsi" - "rockchip,rk3288-mipi-dsi", "snps,dw-mipi-dsi" - "rockchip,rk3399-mipi-dsi", "snps,dw-mipi-dsi" - "rockchip,rk3568-mipi-dsi", "snps,dw-mipi-dsi" -- reg: Represent the physical address range of the controller. -- interrupts: Represent the controller's interrupt to the CPU(s). -- clocks, clock-names: Phandles to the controller's pll reference - clock(ref) when using an internal dphy and APB clock(pclk). - For RK3399, a phy config clock (phy_cfg) and a grf clock(grf) - are required. As described in [1]. -- rockchip,grf: this soc should set GRF regs to mux vopl/vopb. -- ports: contain a port node with endpoint definitions as defined in [2]. - For vopb,set the reg = <0> and set the reg = <1> for vopl. -- video port 0 for the VOP input, the remote endpoint maybe vopb or vopl -- video port 1 for either a panel or subsequent encoder - -Optional properties: -- phys: from general PHY binding: the phandle for the PHY device. -- phy-names: Should be "dphy" if phys references an external phy. -- #phy-cells: Defined when used as ISP phy, should be 0. -- power-domains: a phandle to mipi dsi power domain node. -- resets: list of phandle + reset specifier pairs, as described in [3]. -- reset-names: string reset name, must be "apb". - -[1] Documentation/devicetree/bindings/clock/clock-bindings.txt -[2] Documentation/devicetree/bindings/media/video-interfaces.txt -[3] Documentation/devicetree/bindings/reset/reset.txt - -Example: - mipi_dsi: mipi@ff960000 { - #address-cells = <1>; - #size-cells = <0>; - compatible = "rockchip,rk3288-mipi-dsi", "snps,dw-mipi-dsi"; - reg = <0xff960000 0x4000>; - interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&cru SCLK_MIPI_24M>, <&cru PCLK_MIPI_DSI0>; - clock-names = "ref", "pclk"; - resets = <&cru SRST_MIPIDSI0>; - reset-names = "apb"; - rockchip,grf = <&grf>; - - ports { - #address-cells = <1>; - #size-cells = <0>; - - mipi_in: port@0 { - reg = <0>; - #address-cells = <1>; - #size-cells = <0>; - - mipi_in_vopb: endpoint@0 { - reg = <0>; - remote-endpoint = <&vopb_out_mipi>; - }; - mipi_in_vopl: endpoint@1 { - reg = <1>; - remote-endpoint = <&vopl_out_mipi>; - }; - }; - - mipi_out: port@1 { - reg = <1>; - #address-cells = <1>; - #size-cells = <0>; - - mipi_out_panel: endpoint { - remote-endpoint = <&panel_in_mipi>; - }; - }; - }; - - panel { - compatible ="boe,tv080wum-nl0"; - reg = <0>; - - enable-gpios = <&gpio7 3 GPIO_ACTIVE_HIGH>; - pinctrl-names = "default"; - pinctrl-0 = <&lcd_en>; - backlight = <&backlight>; - - port { - panel_in_mipi: endpoint { - remote-endpoint = <&mipi_out_panel>; - }; - }; - }; - }; diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip,analogix-dp.yaml b/Documentation/devicetree/bindings/display/rockchip/rockchip,analogix-dp.yaml new file mode 100644 index 000000000000..60dedf9b2be7 --- /dev/null +++ b/Documentation/devicetree/bindings/display/rockchip/rockchip,analogix-dp.yaml @@ -0,0 +1,103 @@ +# SPDX-License-Identifier: GPL-2.0 +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/rockchip/rockchip,analogix-dp.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Rockchip specific extensions to the Analogix Display Port + +maintainers: + - Sandy Huang <[email protected]> + - Heiko Stuebner <[email protected]> + +properties: + compatible: + enum: + - rockchip,rk3288-dp + - rockchip,rk3399-edp + + clocks: + minItems: 2 + maxItems: 3 + + clock-names: + minItems: 2 + items: + - const: dp + - const: pclk + - const: grf + + power-domains: + maxItems: 1 + + resets: + maxItems: 1 + + reset-names: + const: dp + + rockchip,grf: + $ref: /schemas/types.yaml#/definitions/phandle + description: + This SoC makes use of GRF regs. + +required: + - compatible + - clocks + - clock-names + - resets + - reset-names + - rockchip,grf + +allOf: + - $ref: /schemas/display/bridge/analogix,dp.yaml# + +unevaluatedProperties: false + +examples: + - | + #include <dt-bindings/clock/rk3288-cru.h> + #include <dt-bindings/interrupt-controller/arm-gic.h> + #include <dt-bindings/interrupt-controller/irq.h> + dp@ff970000 { + compatible = "rockchip,rk3288-dp"; + reg = <0xff970000 0x4000>; + interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&cru SCLK_EDP>, <&cru PCLK_EDP_CTRL>; + clock-names = "dp", "pclk"; + phys = <&dp_phy>; + phy-names = "dp"; + resets = <&cru 111>; + reset-names = "dp"; + rockchip,grf = <&grf>; + pinctrl-0 = <&edp_hpd>; + pinctrl-names = "default"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + edp_in: port@0 { + reg = <0>; + #address-cells = <1>; + #size-cells = <0>; + + edp_in_vopb: endpoint@0 { + reg = <0>; + remote-endpoint = <&vopb_out_edp>; + }; + edp_in_vopl: endpoint@1 { + reg = <1>; + remote-endpoint = <&vopl_out_edp>; + }; + }; + + edp_out: port@1 { + reg = <1>; + + edp_out_panel: endpoint { + remote-endpoint = <&panel_in_edp>; + }; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip,dw-mipi-dsi.yaml b/Documentation/devicetree/bindings/display/rockchip/rockchip,dw-mipi-dsi.yaml new file mode 100644 index 000000000000..8e8a40879140 --- /dev/null +++ b/Documentation/devicetree/bindings/display/rockchip/rockchip,dw-mipi-dsi.yaml @@ -0,0 +1,166 @@ +# SPDX-License-Identifier: GPL-2.0 +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/rockchip/rockchip,dw-mipi-dsi.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Rockchip specific extensions to the Synopsys Designware MIPI DSI + +maintainers: + - Sandy Huang <[email protected]> + - Heiko Stuebner <[email protected]> + +properties: + compatible: + items: + - enum: + - rockchip,px30-mipi-dsi + - rockchip,rk3288-mipi-dsi + - rockchip,rk3399-mipi-dsi + - rockchip,rk3568-mipi-dsi + - const: snps,dw-mipi-dsi + + interrupts: + maxItems: 1 + + clocks: + minItems: 1 + maxItems: 4 + + clock-names: + oneOf: + - minItems: 2 + items: + - const: ref + - const: pclk + - const: phy_cfg + - const: grf + - const: pclk + + rockchip,grf: + $ref: /schemas/types.yaml#/definitions/phandle + description: + This SoC uses GRF regs to switch between vopl/vopb. + + phys: + maxItems: 1 + + phy-names: + const: dphy + + "#phy-cells": + const: 0 + description: + Defined when in use as ISP phy. + + power-domains: + maxItems: 1 + + "#address-cells": + const: 1 + + "#size-cells": + const: 0 + +required: + - compatible + - clocks + - clock-names + - rockchip,grf + +allOf: + - $ref: /schemas/display/bridge/snps,dw-mipi-dsi.yaml# + - if: + properties: + compatible: + contains: + enum: + - rockchip,px30-mipi-dsi + - rockchip,rk3568-mipi-dsi + + then: + properties: + clocks: + maxItems: 1 + + clock-names: + maxItems: 1 + + required: + - phys + - phy-names + + - if: + properties: + compatible: + contains: + const: rockchip,rk3288-mipi-dsi + + then: + properties: + clocks: + maxItems: 2 + + clock-names: + maxItems: 2 + + - if: + properties: + compatible: + contains: + const: rockchip,rk3399-mipi-dsi + + then: + properties: + clocks: + minItems: 4 + + clock-names: + minItems: 4 + +unevaluatedProperties: false + +examples: + - | + #include <dt-bindings/clock/rk3288-cru.h> + #include <dt-bindings/interrupt-controller/arm-gic.h> + #include <dt-bindings/interrupt-controller/irq.h> + + mipi_dsi: dsi@ff960000 { + compatible = "rockchip,rk3288-mipi-dsi", "snps,dw-mipi-dsi"; + reg = <0xff960000 0x4000>; + interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&cru SCLK_MIPIDSI_24M>, <&cru PCLK_MIPI_DSI0>; + clock-names = "ref", "pclk"; + resets = <&cru SRST_MIPIDSI0>; + reset-names = "apb"; + rockchip,grf = <&grf>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + mipi_in: port@0 { + reg = <0>; + #address-cells = <1>; + #size-cells = <0>; + + mipi_in_vopb: endpoint@0 { + reg = <0>; + remote-endpoint = <&vopb_out_mipi>; + }; + mipi_in_vopl: endpoint@1 { + reg = <1>; + remote-endpoint = <&vopl_out_mipi>; + }; + }; + + mipi_out: port@1 { + reg = <1>; + + mipi_out_panel: endpoint { + remote-endpoint = <&panel_in_mipi>; + }; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip,lvds.yaml b/Documentation/devicetree/bindings/display/rockchip/rockchip,lvds.yaml new file mode 100644 index 000000000000..03b002a05c47 --- /dev/null +++ b/Documentation/devicetree/bindings/display/rockchip/rockchip,lvds.yaml @@ -0,0 +1,170 @@ +# SPDX-License-Identifier: GPL-2.0 +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/rockchip/rockchip,lvds.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Rockchip low-voltage differential signal (LVDS) transmitter + +maintainers: + - Sandy Huang <[email protected]> + - Heiko Stuebner <[email protected]> + +properties: + compatible: + enum: + - rockchip,px30-lvds + - rockchip,rk3288-lvds + + reg: + maxItems: 1 + + clocks: + maxItems: 1 + + clock-names: + const: pclk_lvds + + avdd1v0-supply: + description: 1.0V analog power. + + avdd1v8-supply: + description: 1.8V analog power. + + avdd3v3-supply: + description: 3.3V analog power. + + rockchip,grf: + $ref: /schemas/types.yaml#/definitions/phandle + description: Phandle to the general register files syscon. + + rockchip,output: + $ref: /schemas/types.yaml#/definitions/string + enum: [rgb, lvds, duallvds] + description: This describes the output interface. + + phys: + maxItems: 1 + + phy-names: + const: dphy + + pinctrl-names: + const: lcdc + + pinctrl-0: true + + power-domains: + maxItems: 1 + + ports: + $ref: /schemas/graph.yaml#/properties/ports + + properties: + port@0: + $ref: /schemas/graph.yaml#/properties/port + description: + Video port 0 for the VOP input. + The remote endpoint maybe vopb or vopl. + + port@1: + $ref: /schemas/graph.yaml#/properties/port + description: + Video port 1 for either a panel or subsequent encoder. + + required: + - port@0 + - port@1 + +required: + - compatible + - rockchip,grf + - rockchip,output + - ports + +allOf: + - if: + properties: + compatible: + contains: + const: rockchip,px30-lvds + + then: + properties: + reg: false + clocks: false + clock-names: false + avdd1v0-supply: false + avdd1v8-supply: false + avdd3v3-supply: false + + required: + - phys + - phy-names + + - if: + properties: + compatible: + contains: + const: rockchip,rk3288-lvds + + then: + properties: + phys: false + phy-names: false + + required: + - reg + - clocks + - clock-names + - avdd1v0-supply + - avdd1v8-supply + - avdd3v3-supply + +additionalProperties: false + +examples: + - | + #include <dt-bindings/clock/rk3288-cru.h> + + lvds: lvds@ff96c000 { + compatible = "rockchip,rk3288-lvds"; + reg = <0xff96c000 0x4000>; + clocks = <&cru PCLK_LVDS_PHY>; + clock-names = "pclk_lvds"; + avdd1v0-supply = <&vdd10_lcd>; + avdd1v8-supply = <&vcc18_lcd>; + avdd3v3-supply = <&vcca_33>; + pinctrl-names = "lcdc"; + pinctrl-0 = <&lcdc_ctl>; + rockchip,grf = <&grf>; + rockchip,output = "rgb"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + lvds_in: port@0 { + reg = <0>; + #address-cells = <1>; + #size-cells = <0>; + + lvds_in_vopb: endpoint@0 { + reg = <0>; + remote-endpoint = <&vopb_out_lvds>; + }; + lvds_in_vopl: endpoint@1 { + reg = <1>; + remote-endpoint = <&vopl_out_lvds>; + }; + }; + + lvds_out: port@1 { + reg = <1>; + + lvds_out_panel: endpoint { + remote-endpoint = <&panel_in_lvds>; + }; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip-lvds.txt b/Documentation/devicetree/bindings/display/rockchip/rockchip-lvds.txt deleted file mode 100644 index aaf8c44cf90f..000000000000 --- a/Documentation/devicetree/bindings/display/rockchip/rockchip-lvds.txt +++ /dev/null @@ -1,92 +0,0 @@ -Rockchip RK3288 LVDS interface -================================ - -Required properties: -- compatible: matching the soc type, one of - - "rockchip,rk3288-lvds"; - - "rockchip,px30-lvds"; - -- reg: physical base address of the controller and length - of memory mapped region. -- clocks: must include clock specifiers corresponding to entries in the - clock-names property. -- clock-names: must contain "pclk_lvds" - -- avdd1v0-supply: regulator phandle for 1.0V analog power -- avdd1v8-supply: regulator phandle for 1.8V analog power -- avdd3v3-supply: regulator phandle for 3.3V analog power - -- rockchip,grf: phandle to the general register files syscon -- rockchip,output: "rgb", "lvds" or "duallvds", This describes the output interface - -- phys: LVDS/DSI DPHY (px30 only) -- phy-names: name of the PHY, must be "dphy" (px30 only) - -Optional properties: -- pinctrl-names: must contain a "lcdc" entry. -- pinctrl-0: pin control group to be used for this controller. - -Required nodes: - -The lvds has two video ports as described by - Documentation/devicetree/bindings/media/video-interfaces.txt -Their connections are modeled using the OF graph bindings specified in - Documentation/devicetree/bindings/graph.txt. - -- video port 0 for the VOP input, the remote endpoint maybe vopb or vopl -- video port 1 for either a panel or subsequent encoder - -Example: - -lvds_panel: lvds-panel { - compatible = "auo,b101ean01"; - enable-gpios = <&gpio7 21 GPIO_ACTIVE_HIGH>; - data-mapping = "jeida-24"; - - ports { - panel_in_lvds: endpoint { - remote-endpoint = <&lvds_out_panel>; - }; - }; -}; - -For Rockchip RK3288: - - lvds: lvds@ff96c000 { - compatible = "rockchip,rk3288-lvds"; - rockchip,grf = <&grf>; - reg = <0xff96c000 0x4000>; - clocks = <&cru PCLK_LVDS_PHY>; - clock-names = "pclk_lvds"; - pinctrl-names = "lcdc"; - pinctrl-0 = <&lcdc_ctl>; - avdd1v0-supply = <&vdd10_lcd>; - avdd1v8-supply = <&vcc18_lcd>; - avdd3v3-supply = <&vcca_33>; - rockchip,output = "rgb"; - ports { - #address-cells = <1>; - #size-cells = <0>; - - lvds_in: port@0 { - reg = <0>; - - lvds_in_vopb: endpoint@0 { - reg = <0>; - remote-endpoint = <&vopb_out_lvds>; - }; - lvds_in_vopl: endpoint@1 { - reg = <1>; - remote-endpoint = <&vopl_out_lvds>; - }; - }; - - lvds_out: port@1 { - reg = <1>; - - lvds_out_panel: endpoint { - remote-endpoint = <&panel_in_lvds>; - }; - }; - }; - }; diff --git a/Documentation/devicetree/bindings/display/simple-framebuffer.yaml b/Documentation/devicetree/bindings/display/simple-framebuffer.yaml index 3c9f29e428a4..296500f9da05 100644 --- a/Documentation/devicetree/bindings/display/simple-framebuffer.yaml +++ b/Documentation/devicetree/bindings/display/simple-framebuffer.yaml @@ -26,6 +26,11 @@ description: |+ over control to a driver for the real hardware. The bindings for the hw nodes must specify which node is considered the primary node. + If a panel node is given, then the driver uses this to configure the + physical width and height of the display. If no panel node is given, + then the driver uses the width and height properties of the simplefb + node to estimate it. + It is advised to add display# aliases to help the OS determine how to number things. If display# aliases are used, then if the simplefb node contains a display property then the /aliases/display# path @@ -117,6 +122,10 @@ properties: $ref: /schemas/types.yaml#/definitions/phandle description: Primary display hardware node + panel: + $ref: /schemas/types.yaml#/definitions/phandle + description: Display panel node + allwinner,pipeline: description: Pipeline used by the framebuffer on Allwinner SoCs enum: diff --git a/Documentation/devicetree/bindings/soc/rockchip/grf.yaml b/Documentation/devicetree/bindings/soc/rockchip/grf.yaml index e697c928900d..65a2d5a4f28d 100644 --- a/Documentation/devicetree/bindings/soc/rockchip/grf.yaml +++ b/Documentation/devicetree/bindings/soc/rockchip/grf.yaml @@ -80,13 +80,17 @@ allOf: properties: compatible: contains: - const: rockchip,px30-grf + enum: + - rockchip,px30-grf then: properties: lvds: - description: - Documentation/devicetree/bindings/display/rockchip/rockchip-lvds.txt + type: object + + $ref: /schemas/display/rockchip/rockchip,lvds.yaml# + + unevaluatedProperties: false - if: properties: diff --git a/MAINTAINERS b/MAINTAINERS index ec57c42ed544..dd389b88c512 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6518,6 +6518,7 @@ L: [email protected] S: Maintained +B: https://gitlab.freedesktop.org/drm/msm/-/issues T: git https://gitlab.freedesktop.org/drm/msm.git F: Documentation/devicetree/bindings/display/msm/ F: drivers/gpu/drm/msm/ @@ -7044,7 +7045,7 @@ F: Documentation/devicetree/bindings/display/xlnx/ F: drivers/gpu/drm/xlnx/ DRM PANEL DRIVERS -M: Thierry Reding <[email protected]> +M: Neil Armstrong <[email protected]> R: Sam Ravnborg <[email protected]> S: Maintained diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c index 553bcbd787b3..a880f1dd857e 100644 --- a/drivers/accel/ivpu/ivpu_pm.c +++ b/drivers/accel/ivpu/ivpu_pm.c @@ -237,8 +237,6 @@ int ivpu_rpm_get(struct ivpu_device *vdev) { int ret; - ivpu_dbg(vdev, RPM, "rpm_get count %d\n", atomic_read(&vdev->drm.dev->power.usage_count)); - ret = pm_runtime_resume_and_get(vdev->drm.dev); if (!drm_WARN_ON(&vdev->drm, ret < 0)) vdev->pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT; @@ -248,8 +246,6 @@ int ivpu_rpm_get(struct ivpu_device *vdev) void ivpu_rpm_put(struct ivpu_device *vdev) { - ivpu_dbg(vdev, RPM, "rpm_put count %d\n", atomic_read(&vdev->drm.dev->power.usage_count)); - pm_runtime_mark_last_busy(vdev->drm.dev); pm_runtime_put_autosuspend(vdev->drm.dev); } @@ -314,16 +310,10 @@ void ivpu_pm_enable(struct ivpu_device *vdev) pm_runtime_allow(dev); pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); - - ivpu_dbg(vdev, RPM, "Enable RPM count %d\n", atomic_read(&dev->power.usage_count)); } void ivpu_pm_disable(struct ivpu_device *vdev) { - struct device *dev = vdev->drm.dev; - - ivpu_dbg(vdev, RPM, "Disable RPM count %d\n", atomic_read(&dev->power.usage_count)); - pm_runtime_get_noresume(vdev->drm.dev); pm_runtime_forbid(vdev->drm.dev); } diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 757c0fb77a6c..aa4ea8530cb3 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -828,7 +828,7 @@ static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach, * - dma_buf_attach() * - dma_buf_dynamic_attach() * - dma_buf_detach() - * - dma_buf_export( + * - dma_buf_export() * - dma_buf_fd() * - dma_buf_get() * - dma_buf_put() diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index dc0f94f02a82..ba3fb04bb691 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -10,13 +10,13 @@ menuconfig DRM depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && HAS_DMA select DRM_PANEL_ORIENTATION_QUIRKS select HDMI - select FB_CMDLINE select I2C select DMA_SHARED_BUFFER select SYNC_FILE # gallium uses SYS_kcmp for os_same_file_description() to de-duplicate # device and dmabuf fd. Let's make sure that is available for our userspace. select KCMP + select VIDEO_CMDLINE select VIDEO_NOMODESET help Kernel-level support for the Direct Rendering Infrastructure (DRI) @@ -232,6 +232,10 @@ config DRM_GEM_SHMEM_HELPER help Choose this if you need the GEM shmem helper functions +config DRM_SUBALLOC_HELPER + tristate + depends on DRM + config DRM_SCHED tristate depends on DRM diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index ab4460fcd63f..1e04d135e866 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -88,6 +88,9 @@ obj-$(CONFIG_DRM_GEM_DMA_HELPER) += drm_dma_helper.o drm_shmem_helper-y := drm_gem_shmem_helper.o obj-$(CONFIG_DRM_GEM_SHMEM_HELPER) += drm_shmem_helper.o +drm_suballoc_helper-y := drm_suballoc.o +obj-$(CONFIG_DRM_SUBALLOC_HELPER) += drm_suballoc_helper.o + drm_vram_helper-y := drm_gem_vram_helper.o obj-$(CONFIG_DRM_VRAM_HELPER) += drm_vram_helper.o diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig index a82d36ea88e2..5d1e28218020 100644 --- a/drivers/gpu/drm/amd/amdgpu/Kconfig +++ b/drivers/gpu/drm/amd/amdgpu/Kconfig @@ -19,6 +19,7 @@ config DRM_AMDGPU select BACKLIGHT_CLASS_DEVICE select INTERVAL_TREE select DRM_BUDDY + select DRM_SUBALLOC_HELPER # amdgpu depends on ACPI_VIDEO when ACPI is enabled, for select to work # ACPI_VIDEO's dependencies must also be selected. select INPUT if ACPI diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 164141bc8b4a..dda88090f044 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -424,29 +424,11 @@ struct amdgpu_clock { * alignment). */ -#define AMDGPU_SA_NUM_FENCE_LISTS 32 - struct amdgpu_sa_manager { - wait_queue_head_t wq; - struct amdgpu_bo *bo; - struct list_head *hole; - struct list_head flist[AMDGPU_SA_NUM_FENCE_LISTS]; - struct list_head olist; - unsigned size; - uint64_t gpu_addr; - void *cpu_ptr; - uint32_t domain; - uint32_t align; -}; - -/* sub-allocation buffer */ -struct amdgpu_sa_bo { - struct list_head olist; - struct list_head flist; - struct amdgpu_sa_manager *manager; - unsigned soffset; - unsigned eoffset; - struct dma_fence *fence; + struct drm_suballoc_manager base; + struct amdgpu_bo *bo; + uint64_t gpu_addr; + void *cpu_ptr; }; int amdgpu_fence_slab_init(void); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index bcccc348dbe2..df7eb0b7c4b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -69,7 +69,7 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, if (size) { r = amdgpu_sa_bo_new(&adev->ib_pools[pool_type], - &ib->sa_bo, size, 256); + &ib->sa_bo, size); if (r) { dev_err(adev->dev, "failed to get a new IB (%d)\n", r); return r; @@ -309,8 +309,7 @@ int amdgpu_ib_pool_init(struct amdgpu_device *adev) for (i = 0; i < AMDGPU_IB_POOL_MAX; i++) { r = amdgpu_sa_bo_manager_init(adev, &adev->ib_pools[i], - AMDGPU_IB_POOL_SIZE, - AMDGPU_GPU_PAGE_SIZE, + AMDGPU_IB_POOL_SIZE, 256, AMDGPU_GEM_DOMAIN_GTT); if (r) goto error; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 6c7d672412b2..c842ce635a88 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -600,7 +600,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev, if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && bo->tbo.resource->mem_type == TTM_PL_VRAM && - bo->tbo.resource->start < adev->gmc.visible_vram_size >> PAGE_SHIFT) + amdgpu_bo_in_cpu_visible_vram(bo)) amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, ctx.bytes_moved); else @@ -1346,7 +1346,6 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); struct ttm_operation_ctx ctx = { false, false }; struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); - unsigned long offset; int r; /* Remember that this BO was accessed by the CPU */ @@ -1355,8 +1354,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) if (bo->resource->mem_type != TTM_PL_VRAM) return 0; - offset = bo->resource->start << PAGE_SHIFT; - if ((offset + bo->base.size) <= adev->gmc.visible_vram_size) + if (amdgpu_bo_in_cpu_visible_vram(abo)) return 0; /* Can't move a pinned BO to visible VRAM */ @@ -1378,10 +1376,9 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) else if (unlikely(r)) return VM_FAULT_SIGBUS; - offset = bo->resource->start << PAGE_SHIFT; /* this should never happen */ if (bo->resource->mem_type == TTM_PL_VRAM && - (offset + bo->base.size) > adev->gmc.visible_vram_size) + !amdgpu_bo_in_cpu_visible_vram(abo)) return VM_FAULT_SIGBUS; ttm_bo_move_to_lru_tail_unlocked(bo); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 93207badf83f..5a85726ce853 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -336,15 +336,22 @@ uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev, /* * sub allocation */ +static inline struct amdgpu_sa_manager * +to_amdgpu_sa_manager(struct drm_suballoc_manager *manager) +{ + return container_of(manager, struct amdgpu_sa_manager, base); +} -static inline uint64_t amdgpu_sa_bo_gpu_addr(struct amdgpu_sa_bo *sa_bo) +static inline uint64_t amdgpu_sa_bo_gpu_addr(struct drm_suballoc *sa_bo) { - return sa_bo->manager->gpu_addr + sa_bo->soffset; + return to_amdgpu_sa_manager(sa_bo->manager)->gpu_addr + + drm_suballoc_soffset(sa_bo); } -static inline void * amdgpu_sa_bo_cpu_addr(struct amdgpu_sa_bo *sa_bo) +static inline void *amdgpu_sa_bo_cpu_addr(struct drm_suballoc *sa_bo) { - return sa_bo->manager->cpu_ptr + sa_bo->soffset; + return to_amdgpu_sa_manager(sa_bo->manager)->cpu_ptr + + drm_suballoc_soffset(sa_bo); } int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev, @@ -355,11 +362,11 @@ void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev, int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev, struct amdgpu_sa_manager *sa_manager); int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, - struct amdgpu_sa_bo **sa_bo, - unsigned size, unsigned align); + struct drm_suballoc **sa_bo, + unsigned int size); void amdgpu_sa_bo_free(struct amdgpu_device *adev, - struct amdgpu_sa_bo **sa_bo, - struct dma_fence *fence); + struct drm_suballoc **sa_bo, + struct dma_fence *fence); #if defined(CONFIG_DEBUG_FS) void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, struct seq_file *m); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index 3989e755a5b4..018f36b10de8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -27,6 +27,7 @@ #include <drm/amdgpu_drm.h> #include <drm/gpu_scheduler.h> #include <drm/drm_print.h> +#include <drm/drm_suballoc.h> struct amdgpu_device; struct amdgpu_ring; @@ -92,7 +93,7 @@ enum amdgpu_ib_pool_type { }; struct amdgpu_ib { - struct amdgpu_sa_bo *sa_bo; + struct drm_suballoc *sa_bo; uint32_t length_dw; uint64_t gpu_addr; uint32_t *ptr; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c index 524d10b21041..c6b4337eb20c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c @@ -44,327 +44,63 @@ #include "amdgpu.h" -static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo); -static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager); - int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev, struct amdgpu_sa_manager *sa_manager, - unsigned size, u32 align, u32 domain) + unsigned int size, u32 suballoc_align, u32 domain) { - int i, r; - - init_waitqueue_head(&sa_manager->wq); - sa_manager->bo = NULL; - sa_manager->size = size; - sa_manager->domain = domain; - sa_manager->align = align; - sa_manager->hole = &sa_manager->olist; - INIT_LIST_HEAD(&sa_manager->olist); - for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) - INIT_LIST_HEAD(&sa_manager->flist[i]); + int r; - r = amdgpu_bo_create_kernel(adev, size, align, domain, &sa_manager->bo, - &sa_manager->gpu_addr, &sa_manager->cpu_ptr); + r = amdgpu_bo_create_kernel(adev, size, AMDGPU_GPU_PAGE_SIZE, domain, + &sa_manager->bo, &sa_manager->gpu_addr, + &sa_manager->cpu_ptr); if (r) { dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r); return r; } - memset(sa_manager->cpu_ptr, 0, sa_manager->size); + memset(sa_manager->cpu_ptr, 0, size); + drm_suballoc_manager_init(&sa_manager->base, size, suballoc_align); return r; } void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev, struct amdgpu_sa_manager *sa_manager) { - struct amdgpu_sa_bo *sa_bo, *tmp; - if (sa_manager->bo == NULL) { dev_err(adev->dev, "no bo for sa manager\n"); return; } - if (!list_empty(&sa_manager->olist)) { - sa_manager->hole = &sa_manager->olist, - amdgpu_sa_bo_try_free(sa_manager); - if (!list_empty(&sa_manager->olist)) { - dev_err(adev->dev, "sa_manager is not empty, clearing anyway\n"); - } - } - list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) { - amdgpu_sa_bo_remove_locked(sa_bo); - } + drm_suballoc_manager_fini(&sa_manager->base); amdgpu_bo_free_kernel(&sa_manager->bo, &sa_manager->gpu_addr, &sa_manager->cpu_ptr); - sa_manager->size = 0; } -static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo) -{ - struct amdgpu_sa_manager *sa_manager = sa_bo->manager; - if (sa_manager->hole == &sa_bo->olist) { - sa_manager->hole = sa_bo->olist.prev; - } - list_del_init(&sa_bo->olist); - list_del_init(&sa_bo->flist); - dma_fence_put(sa_bo->fence); - kfree(sa_bo); -} - -static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager) +int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, + struct drm_suballoc **sa_bo, + unsigned int size) { - struct amdgpu_sa_bo *sa_bo, *tmp; + struct drm_suballoc *sa = drm_suballoc_new(&sa_manager->base, size, + GFP_KERNEL, true, 0); - if (sa_manager->hole->next == &sa_manager->olist) - return; + if (IS_ERR(sa)) { + *sa_bo = NULL; - sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist); - list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) { - if (sa_bo->fence == NULL || - !dma_fence_is_signaled(sa_bo->fence)) { - return; - } - amdgpu_sa_bo_remove_locked(sa_bo); + return PTR_ERR(sa); } -} -static inline unsigned amdgpu_sa_bo_hole_soffset(struct amdgpu_sa_manager *sa_manager) -{ - struct list_head *hole = sa_manager->hole; - - if (hole != &sa_manager->olist) { - return list_entry(hole, struct amdgpu_sa_bo, olist)->eoffset; - } + *sa_bo = sa; return 0; } -static inline unsigned amdgpu_sa_bo_hole_eoffset(struct amdgpu_sa_manager *sa_manager) -{ - struct list_head *hole = sa_manager->hole; - - if (hole->next != &sa_manager->olist) { - return list_entry(hole->next, struct amdgpu_sa_bo, olist)->soffset; - } - return sa_manager->size; -} - -static bool amdgpu_sa_bo_try_alloc(struct amdgpu_sa_manager *sa_manager, - struct amdgpu_sa_bo *sa_bo, - unsigned size, unsigned align) -{ - unsigned soffset, eoffset, wasted; - - soffset = amdgpu_sa_bo_hole_soffset(sa_manager); - eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager); - wasted = (align - (soffset % align)) % align; - - if ((eoffset - soffset) >= (size + wasted)) { - soffset += wasted; - - sa_bo->manager = sa_manager; - sa_bo->soffset = soffset; - sa_bo->eoffset = soffset + size; - list_add(&sa_bo->olist, sa_manager->hole); - INIT_LIST_HEAD(&sa_bo->flist); - sa_manager->hole = &sa_bo->olist; - return true; - } - return false; -} - -/** - * amdgpu_sa_event - Check if we can stop waiting - * - * @sa_manager: pointer to the sa_manager - * @size: number of bytes we want to allocate - * @align: alignment we need to match - * - * Check if either there is a fence we can wait for or - * enough free memory to satisfy the allocation directly - */ -static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager, - unsigned size, unsigned align) -{ - unsigned soffset, eoffset, wasted; - int i; - - for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) - if (!list_empty(&sa_manager->flist[i])) - return true; - - soffset = amdgpu_sa_bo_hole_soffset(sa_manager); - eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager); - wasted = (align - (soffset % align)) % align; - - if ((eoffset - soffset) >= (size + wasted)) { - return true; - } - - return false; -} - -static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager, - struct dma_fence **fences, - unsigned *tries) -{ - struct amdgpu_sa_bo *best_bo = NULL; - unsigned i, soffset, best, tmp; - - /* if hole points to the end of the buffer */ - if (sa_manager->hole->next == &sa_manager->olist) { - /* try again with its beginning */ - sa_manager->hole = &sa_manager->olist; - return true; - } - - soffset = amdgpu_sa_bo_hole_soffset(sa_manager); - /* to handle wrap around we add sa_manager->size */ - best = sa_manager->size * 2; - /* go over all fence list and try to find the closest sa_bo - * of the current last - */ - for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) { - struct amdgpu_sa_bo *sa_bo; - - fences[i] = NULL; - - if (list_empty(&sa_manager->flist[i])) - continue; - - sa_bo = list_first_entry(&sa_manager->flist[i], - struct amdgpu_sa_bo, flist); - - if (!dma_fence_is_signaled(sa_bo->fence)) { - fences[i] = sa_bo->fence; - continue; - } - - /* limit the number of tries each ring gets */ - if (tries[i] > 2) { - continue; - } - - tmp = sa_bo->soffset; - if (tmp < soffset) { - /* wrap around, pretend it's after */ - tmp += sa_manager->size; - } - tmp -= soffset; - if (tmp < best) { - /* this sa bo is the closest one */ - best = tmp; - best_bo = sa_bo; - } - } - - if (best_bo) { - uint32_t idx = best_bo->fence->context; - - idx %= AMDGPU_SA_NUM_FENCE_LISTS; - ++tries[idx]; - sa_manager->hole = best_bo->olist.prev; - - /* we knew that this one is signaled, - so it's save to remote it */ - amdgpu_sa_bo_remove_locked(best_bo); - return true; - } - return false; -} - -int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, - struct amdgpu_sa_bo **sa_bo, - unsigned size, unsigned align) -{ - struct dma_fence *fences[AMDGPU_SA_NUM_FENCE_LISTS]; - unsigned tries[AMDGPU_SA_NUM_FENCE_LISTS]; - unsigned count; - int i, r; - signed long t; - - if (WARN_ON_ONCE(align > sa_manager->align)) - return -EINVAL; - - if (WARN_ON_ONCE(size > sa_manager->size)) - return -EINVAL; - - *sa_bo = kmalloc(sizeof(struct amdgpu_sa_bo), GFP_KERNEL); - if (!(*sa_bo)) - return -ENOMEM; - (*sa_bo)->manager = sa_manager; - (*sa_bo)->fence = NULL; - INIT_LIST_HEAD(&(*sa_bo)->olist); - INIT_LIST_HEAD(&(*sa_bo)->flist); - - spin_lock(&sa_manager->wq.lock); - do { - for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) - tries[i] = 0; - - do { - amdgpu_sa_bo_try_free(sa_manager); - - if (amdgpu_sa_bo_try_alloc(sa_manager, *sa_bo, - size, align)) { - spin_unlock(&sa_manager->wq.lock); - return 0; - } - - /* see if we can skip over some allocations */ - } while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries)); - - for (i = 0, count = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) - if (fences[i]) - fences[count++] = dma_fence_get(fences[i]); - - if (count) { - spin_unlock(&sa_manager->wq.lock); - t = dma_fence_wait_any_timeout(fences, count, false, - MAX_SCHEDULE_TIMEOUT, - NULL); - for (i = 0; i < count; ++i) - dma_fence_put(fences[i]); - - r = (t > 0) ? 0 : t; - spin_lock(&sa_manager->wq.lock); - } else { - /* if we have nothing to wait for block */ - r = wait_event_interruptible_locked( - sa_manager->wq, - amdgpu_sa_event(sa_manager, size, align) - ); - } - - } while (!r); - - spin_unlock(&sa_manager->wq.lock); - kfree(*sa_bo); - *sa_bo = NULL; - return r; -} - -void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo, +void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct drm_suballoc **sa_bo, struct dma_fence *fence) { - struct amdgpu_sa_manager *sa_manager; - if (sa_bo == NULL || *sa_bo == NULL) { return; } - sa_manager = (*sa_bo)->manager; - spin_lock(&sa_manager->wq.lock); - if (fence && !dma_fence_is_signaled(fence)) { - uint32_t idx; - - (*sa_bo)->fence = dma_fence_get(fence); - idx = fence->context % AMDGPU_SA_NUM_FENCE_LISTS; - list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]); - } else { - amdgpu_sa_bo_remove_locked(*sa_bo); - } - wake_up_all_locked(&sa_manager->wq); - spin_unlock(&sa_manager->wq.lock); + drm_suballoc_free(*sa_bo, fence); *sa_bo = NULL; } @@ -373,26 +109,8 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo, void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, struct seq_file *m) { - struct amdgpu_sa_bo *i; - - spin_lock(&sa_manager->wq.lock); - list_for_each_entry(i, &sa_manager->olist, olist) { - uint64_t soffset = i->soffset + sa_manager->gpu_addr; - uint64_t eoffset = i->eoffset + sa_manager->gpu_addr; - if (&i->olist == sa_manager->hole) { - seq_printf(m, ">"); - } else { - seq_printf(m, " "); - } - seq_printf(m, "[0x%010llx 0x%010llx] size %8lld", - soffset, eoffset, eoffset - soffset); + struct drm_printer p = drm_seq_file_printer(m); - if (i->fence) - seq_printf(m, " protected by 0x%016llx on context %llu", - i->fence->seqno, i->fence->context); - - seq_printf(m, "\n"); - } - spin_unlock(&sa_manager->wq.lock); + drm_suballoc_dump_debug_info(&sa_manager->base, &p, sa_manager->gpu_addr); } #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index c5ef7f7bdc15..2cd081cbf706 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -466,11 +466,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, return r; } - /* Can't move a pinned BO */ abo = ttm_to_amdgpu_bo(bo); - if (WARN_ON_ONCE(abo->tbo.pin_count > 0)) - return -EINVAL; - adev = amdgpu_ttm_adev(bo->bdev); if (!old_mem || (old_mem->mem_type == TTM_PL_SYSTEM && diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c index 589c1c66a6dc..cf040e2e9efe 100644 --- a/drivers/gpu/drm/arm/malidp_drv.c +++ b/drivers/gpu/drm/arm/malidp_drv.c @@ -649,7 +649,7 @@ static ssize_t core_id_show(struct device *dev, struct device_attribute *attr, struct drm_device *drm = dev_get_drvdata(dev); struct malidp_drm *malidp = drm_to_malidp(drm); - return snprintf(buf, PAGE_SIZE, "%08x\n", malidp->core_id); + return sysfs_emit(buf, "%08x\n", malidp->core_id); } static DEVICE_ATTR_RO(core_id); diff --git a/drivers/gpu/drm/ast/ast_dp.c b/drivers/gpu/drm/ast/ast_dp.c index 56483860306b..fbb070f63e36 100644 --- a/drivers/gpu/drm/ast/ast_dp.c +++ b/drivers/gpu/drm/ast/ast_dp.c @@ -9,7 +9,7 @@ int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata) { - struct ast_private *ast = to_ast_private(dev); + struct ast_device *ast = to_ast_device(dev); u8 i = 0, j = 0; /* @@ -125,7 +125,7 @@ void ast_dp_launch(struct drm_device *dev, u8 bPower) u8 bDPTX = 0; u8 bDPExecute = 1; - struct ast_private *ast = to_ast_private(dev); + struct ast_device *ast = to_ast_device(dev); // S3 come back, need more time to wait BMC ready. if (bPower) WaitCount = 300; @@ -172,7 +172,7 @@ void ast_dp_launch(struct drm_device *dev, u8 bPower) void ast_dp_power_on_off(struct drm_device *dev, bool on) { - struct ast_private *ast = to_ast_private(dev); + struct ast_device *ast = to_ast_device(dev); // Read and Turn off DP PHY sleep u8 bE3 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE3, AST_DP_VIDEO_ENABLE); @@ -188,7 +188,7 @@ void ast_dp_power_on_off(struct drm_device *dev, bool on) void ast_dp_set_on_off(struct drm_device *dev, bool on) { - struct ast_private *ast = to_ast_private(dev); + struct ast_device *ast = to_ast_device(dev); u8 video_on_off = on; // Video On/Off @@ -208,7 +208,7 @@ void ast_dp_set_on_off(struct drm_device *dev, bool on) void ast_dp_set_mode(struct drm_crtc *crtc, struct ast_vbios_mode_info *vbios_mode) { - struct ast_private *ast = to_ast_private(crtc->dev); + struct ast_device *ast = to_ast_device(crtc->dev); u32 ulRefreshRateIndex; u8 ModeIdx; diff --git a/drivers/gpu/drm/ast/ast_dp501.c b/drivers/gpu/drm/ast/ast_dp501.c index 4f75a9efb610..1bc35a992369 100644 --- a/drivers/gpu/drm/ast/ast_dp501.c +++ b/drivers/gpu/drm/ast/ast_dp501.c @@ -10,7 +10,7 @@ MODULE_FIRMWARE("ast_dp501_fw.bin"); static void ast_release_firmware(void *data) { - struct ast_private *ast = data; + struct ast_device *ast = data; release_firmware(ast->dp501_fw); ast->dp501_fw = NULL; @@ -18,7 +18,7 @@ static void ast_release_firmware(void *data) static int ast_load_dp501_microcode(struct drm_device *dev) { - struct ast_private *ast = to_ast_private(dev); + struct ast_device *ast = to_ast_device(dev); int ret; ret = request_firmware(&ast->dp501_fw, "ast_dp501_fw.bin", dev->dev); @@ -28,7 +28,7 @@ static int ast_load_dp501_microcode(struct drm_device *dev) return devm_add_action_or_reset(dev->dev, ast_release_firmware, ast); } -static void send_ack(struct ast_private *ast) +static void send_ack(struct ast_device *ast) { u8 sendack; sendack = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0xff); @@ -36,7 +36,7 @@ static void send_ack(struct ast_private *ast) ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0x00, sendack); } -static void send_nack(struct ast_private *ast) +static void send_nack(struct ast_device *ast) { u8 sendack; sendack = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0xff); @@ -44,7 +44,7 @@ static void send_nack(struct ast_private *ast) ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0x00, sendack); } -static bool wait_ack(struct ast_private *ast) +static bool wait_ack(struct ast_device *ast) { u8 waitack; u32 retry = 0; @@ -60,7 +60,7 @@ static bool wait_ack(struct ast_private *ast) return false; } -static bool wait_nack(struct ast_private *ast) +static bool wait_nack(struct ast_device *ast) { u8 waitack; u32 retry = 0; @@ -76,18 +76,18 @@ static bool wait_nack(struct ast_private *ast) return false; } -static void set_cmd_trigger(struct ast_private *ast) +static void set_cmd_trigger(struct ast_device *ast) { ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, ~0x40, 0x40); } -static void clear_cmd_trigger(struct ast_private *ast) +static void clear_cmd_trigger(struct ast_device *ast) { ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, ~0x40, 0x00); } #if 0 -static bool wait_fw_ready(struct ast_private *ast) +static bool wait_fw_ready(struct ast_device *ast) { u8 waitready; u32 retry = 0; @@ -106,7 +106,7 @@ static bool wait_fw_ready(struct ast_private *ast) static bool ast_write_cmd(struct drm_device *dev, u8 data) { - struct ast_private *ast = to_ast_private(dev); + struct ast_device *ast = to_ast_device(dev); int retry = 0; if (wait_nack(ast)) { send_nack(ast); @@ -128,7 +128,7 @@ static bool ast_write_cmd(struct drm_device *dev, u8 data) static bool ast_write_data(struct drm_device *dev, u8 data) { - struct ast_private *ast = to_ast_private(dev); + struct ast_device *ast = to_ast_device(dev); if (wait_nack(ast)) { send_nack(ast); @@ -146,7 +146,7 @@ static bool ast_write_data(struct drm_device *dev, u8 data) #if 0 static bool ast_read_data(struct drm_device *dev, u8 *data) { - struct ast_private *ast = to_ast_private(dev); + struct ast_device *ast = to_ast_device(dev); u8 tmp; *data = 0; @@ -163,7 +163,7 @@ static bool ast_read_data(struct drm_device *dev, u8 *data) return true; } -static void clear_cmd(struct ast_private *ast) +static void clear_cmd(struct ast_device *ast) { send_nack(ast); ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9a, 0x00, 0x00); @@ -178,14 +178,14 @@ void ast_set_dp501_video_output(struct drm_device *dev, u8 mode) msleep(10); } -static u32 get_fw_base(struct ast_private *ast) +static u32 get_fw_base(struct ast_device *ast) { return ast_mindwm(ast, 0x1e6e2104) & 0x7fffffff; } bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size) { - struct ast_private *ast = to_ast_private(dev); + struct ast_device *ast = to_ast_device(dev); u32 i, data; u32 boot_address; @@ -204,7 +204,7 @@ bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size) static bool ast_launch_m68k(struct drm_device *dev) { - struct ast_private *ast = to_ast_private(dev); + struct ast_device *ast = to_ast_device(dev); u32 i, data, len = 0; u32 boot_address; u8 *fw_addr = NULL; @@ -274,7 +274,7 @@ static bool ast_launch_m68k(struct drm_device *dev) bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata) { - struct ast_private *ast = to_ast_private(dev); + struct ast_device *ast = to_ast_device(dev); u32 i, boot_address, offset, data; u32 *pEDIDidx; @@ -334,7 +334,7 @@ bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata) static bool ast_init_dvo(struct drm_device *dev) { - struct ast_private *ast = to_ast_private(dev); + struct ast_device *ast = to_ast_device(dev); u8 jreg; u32 data; ast_write32(ast, 0xf004, 0x1e6e0000); @@ -407,7 +407,7 @@ static bool ast_init_dvo(struct drm_device *dev) static void ast_init_analog(struct drm_device *dev) { - struct ast_private *ast = to_ast_private(dev); + struct ast_device *ast = to_ast_device(dev); u32 data; /* @@ -434,7 +434,7 @@ static void ast_init_analog(struct drm_device *dev) void ast_init_3rdtx(struct drm_device *dev) { - struct ast_private *ast = to_ast_private(dev); + struct ast_device *ast = to_ast_device(dev); u8 jreg; if (ast->chip == AST2300 || ast->chip == AST2400) { diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c index d78852c7cf5b..3a7af6d5aa79 100644 --- a/drivers/gpu/drm/ast/ast_drv.c +++ b/drivers/gpu/drm/ast/ast_drv.c @@ -105,7 +105,7 @@ static int ast_remove_conflicting_framebuffers(struct pci_dev *pdev) static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { - struct ast_private *ast; + struct ast_device *ast; struct drm_device *dev; int ret; diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index d51b81fea9c8..a501169cddad 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -157,7 +157,7 @@ to_ast_sil164_connector(struct drm_connector *connector) * Device */ -struct ast_private { +struct ast_device { struct drm_device base; struct mutex ioregs_lock; /* Protects access to I/O registers in ioregs */ @@ -210,14 +210,14 @@ struct ast_private { const struct firmware *dp501_fw; /* dp501 fw */ }; -static inline struct ast_private *to_ast_private(struct drm_device *dev) +static inline struct ast_device *to_ast_device(struct drm_device *dev) { - return container_of(dev, struct ast_private, base); + return container_of(dev, struct ast_device, base); } -struct ast_private *ast_device_create(const struct drm_driver *drv, - struct pci_dev *pdev, - unsigned long flags); +struct ast_device *ast_device_create(const struct drm_driver *drv, + struct pci_dev *pdev, + unsigned long flags); #define AST_IO_AR_PORT_WRITE (0x40) #define AST_IO_MISC_PORT_WRITE (0x42) @@ -238,62 +238,44 @@ struct ast_private *ast_device_create(const struct drm_driver *drv, #define AST_IO_VGACRCB_HWC_ENABLED BIT(1) #define AST_IO_VGACRCB_HWC_16BPP BIT(0) /* set: ARGB4444, cleared: 2bpp palette */ -#define __ast_read(x) \ -static inline u##x ast_read##x(struct ast_private *ast, u32 reg) { \ -u##x val = 0;\ -val = ioread##x(ast->regs + reg); \ -return val;\ +static inline u32 ast_read32(struct ast_device *ast, u32 reg) +{ + return ioread32(ast->regs + reg); } -__ast_read(8); -__ast_read(16); -__ast_read(32) - -#define __ast_io_read(x) \ -static inline u##x ast_io_read##x(struct ast_private *ast, u32 reg) { \ -u##x val = 0;\ -val = ioread##x(ast->ioregs + reg); \ -return val;\ +static inline void ast_write32(struct ast_device *ast, u32 reg, u32 val) +{ + iowrite32(val, ast->regs + reg); } -__ast_io_read(8); -__ast_io_read(16); -__ast_io_read(32); - -#define __ast_write(x) \ -static inline void ast_write##x(struct ast_private *ast, u32 reg, u##x val) {\ - iowrite##x(val, ast->regs + reg);\ - } - -__ast_write(8); -__ast_write(16); -__ast_write(32); - -#define __ast_io_write(x) \ -static inline void ast_io_write##x(struct ast_private *ast, u32 reg, u##x val) {\ - iowrite##x(val, ast->ioregs + reg);\ - } +static inline u8 ast_io_read8(struct ast_device *ast, u32 reg) +{ + return ioread8(ast->ioregs + reg); +} -__ast_io_write(8); -__ast_io_write(16); -#undef __ast_io_write +static inline void ast_io_write8(struct ast_device *ast, u32 reg, u8 val) +{ + iowrite8(val, ast->ioregs + reg); +} -static inline void ast_set_index_reg(struct ast_private *ast, +static inline void ast_set_index_reg(struct ast_device *ast, uint32_t base, uint8_t index, uint8_t val) { - ast_io_write16(ast, base, ((u16)val << 8) | index); + ast_io_write8(ast, base, index); + ++base; + ast_io_write8(ast, base, val); } -void ast_set_index_reg_mask(struct ast_private *ast, +void ast_set_index_reg_mask(struct ast_device *ast, uint32_t base, uint8_t index, uint8_t mask, uint8_t val); -uint8_t ast_get_index_reg(struct ast_private *ast, +uint8_t ast_get_index_reg(struct ast_device *ast, uint32_t base, uint8_t index); -uint8_t ast_get_index_reg_mask(struct ast_private *ast, +uint8_t ast_get_index_reg_mask(struct ast_device *ast, uint32_t base, uint8_t index, uint8_t mask); -static inline void ast_open_key(struct ast_private *ast) +static inline void ast_open_key(struct ast_device *ast) { ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x80, 0xA8); } @@ -352,7 +334,7 @@ struct ast_crtc_state { #define to_ast_crtc_state(state) container_of(state, struct ast_crtc_state, base) -int ast_mode_config_init(struct ast_private *ast); +int ast_mode_config_init(struct ast_device *ast); #define AST_MM_ALIGN_SHIFT 4 #define AST_MM_ALIGN_MASK ((1 << AST_MM_ALIGN_SHIFT) - 1) @@ -476,16 +458,16 @@ int ast_mode_config_init(struct ast_private *ast); #define ASTDP_1366x768_60 0x1E #define ASTDP_1152x864_75 0x1F -int ast_mm_init(struct ast_private *ast); +int ast_mm_init(struct ast_device *ast); /* ast post */ void ast_enable_vga(struct drm_device *dev); void ast_enable_mmio(struct drm_device *dev); bool ast_is_vga_enabled(struct drm_device *dev); void ast_post_gpu(struct drm_device *dev); -u32 ast_mindwm(struct ast_private *ast, u32 r); -void ast_moutdwm(struct ast_private *ast, u32 r, u32 v); -void ast_patch_ahb_2500(struct ast_private *ast); +u32 ast_mindwm(struct ast_device *ast, u32 r); +void ast_moutdwm(struct ast_device *ast, u32 r, u32 v); +void ast_patch_ahb_2500(struct ast_device *ast); /* ast dp501 */ void ast_set_dp501_video_output(struct drm_device *dev, u8 mode); bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size); diff --git a/drivers/gpu/drm/ast/ast_i2c.c b/drivers/gpu/drm/ast/ast_i2c.c index 93e91c36d649..d64045c0b849 100644 --- a/drivers/gpu/drm/ast/ast_i2c.c +++ b/drivers/gpu/drm/ast/ast_i2c.c @@ -29,7 +29,7 @@ static void ast_i2c_setsda(void *i2c_priv, int data) { struct ast_i2c_chan *i2c = i2c_priv; - struct ast_private *ast = to_ast_private(i2c->dev); + struct ast_device *ast = to_ast_device(i2c->dev); int i; u8 ujcrb7, jtemp; @@ -45,7 +45,7 @@ static void ast_i2c_setsda(void *i2c_priv, int data) static void ast_i2c_setscl(void *i2c_priv, int clock) { struct ast_i2c_chan *i2c = i2c_priv; - struct ast_private *ast = to_ast_private(i2c->dev); + struct ast_device *ast = to_ast_device(i2c->dev); int i; u8 ujcrb7, jtemp; @@ -61,7 +61,7 @@ static void ast_i2c_setscl(void *i2c_priv, int clock) static int ast_i2c_getsda(void *i2c_priv) { struct ast_i2c_chan *i2c = i2c_priv; - struct ast_private *ast = to_ast_private(i2c->dev); + struct ast_device *ast = to_ast_device(i2c->dev); uint32_t val, val2, count, pass; count = 0; @@ -83,7 +83,7 @@ static int ast_i2c_getsda(void *i2c_priv) static int ast_i2c_getscl(void *i2c_priv) { struct ast_i2c_chan *i2c = i2c_priv; - struct ast_private *ast = to_ast_private(i2c->dev); + struct ast_device *ast = to_ast_device(i2c->dev); uint32_t val, val2, count, pass; count = 0; diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index f83ce77127cb..794ffd4a29c5 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -35,7 +35,7 @@ #include "ast_drv.h" -void ast_set_index_reg_mask(struct ast_private *ast, +void ast_set_index_reg_mask(struct ast_device *ast, uint32_t base, uint8_t index, uint8_t mask, uint8_t val) { @@ -45,7 +45,7 @@ void ast_set_index_reg_mask(struct ast_private *ast, ast_set_index_reg(ast, base, index, tmp); } -uint8_t ast_get_index_reg(struct ast_private *ast, +uint8_t ast_get_index_reg(struct ast_device *ast, uint32_t base, uint8_t index) { uint8_t ret; @@ -54,7 +54,7 @@ uint8_t ast_get_index_reg(struct ast_private *ast, return ret; } -uint8_t ast_get_index_reg_mask(struct ast_private *ast, +uint8_t ast_get_index_reg_mask(struct ast_device *ast, uint32_t base, uint8_t index, uint8_t mask) { uint8_t ret; @@ -66,7 +66,7 @@ uint8_t ast_get_index_reg_mask(struct ast_private *ast, static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev) { struct device_node *np = dev->dev->of_node; - struct ast_private *ast = to_ast_private(dev); + struct ast_device *ast = to_ast_device(dev); struct pci_dev *pdev = to_pci_dev(dev->dev); uint32_t data, jregd0, jregd1; @@ -122,7 +122,7 @@ static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev) static int ast_detect_chip(struct drm_device *dev, bool *need_post) { - struct ast_private *ast = to_ast_private(dev); + struct ast_device *ast = to_ast_device(dev); struct pci_dev *pdev = to_pci_dev(dev->dev); uint32_t jreg, scu_rev; @@ -271,7 +271,7 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) static int ast_get_dram_info(struct drm_device *dev) { struct device_node *np = dev->dev->of_node; - struct ast_private *ast = to_ast_private(dev); + struct ast_device *ast = to_ast_device(dev); uint32_t mcr_cfg, mcr_scu_mpll, mcr_scu_strap; uint32_t denum, num, div, ref_pll, dsel; @@ -394,22 +394,22 @@ static int ast_get_dram_info(struct drm_device *dev) */ static void ast_device_release(void *data) { - struct ast_private *ast = data; + struct ast_device *ast = data; /* enable standard VGA decode */ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x04); } -struct ast_private *ast_device_create(const struct drm_driver *drv, - struct pci_dev *pdev, - unsigned long flags) +struct ast_device *ast_device_create(const struct drm_driver *drv, + struct pci_dev *pdev, + unsigned long flags) { struct drm_device *dev; - struct ast_private *ast; + struct ast_device *ast; bool need_post; int ret = 0; - ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_private, base); + ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base); if (IS_ERR(ast)) return ast; dev = &ast->base; diff --git a/drivers/gpu/drm/ast/ast_mm.c b/drivers/gpu/drm/ast/ast_mm.c index 248284a4b3ff..e16af60deef9 100644 --- a/drivers/gpu/drm/ast/ast_mm.c +++ b/drivers/gpu/drm/ast/ast_mm.c @@ -33,7 +33,7 @@ #include "ast_drv.h" -static u32 ast_get_vram_size(struct ast_private *ast) +static u32 ast_get_vram_size(struct ast_device *ast) { u8 jreg; u32 vram_size; @@ -73,7 +73,7 @@ static u32 ast_get_vram_size(struct ast_private *ast) return vram_size; } -int ast_mm_init(struct ast_private *ast) +int ast_mm_init(struct ast_device *ast) { struct drm_device *dev = &ast->base; struct pci_dev *pdev = to_pci_dev(dev->dev); diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 984ec590a7e7..36374828f6c8 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -51,7 +51,7 @@ #define AST_LUT_SIZE 256 -static inline void ast_load_palette_index(struct ast_private *ast, +static inline void ast_load_palette_index(struct ast_device *ast, u8 index, u8 red, u8 green, u8 blue) { @@ -65,7 +65,7 @@ static inline void ast_load_palette_index(struct ast_private *ast, ast_io_read8(ast, AST_IO_SEQ_PORT); } -static void ast_crtc_set_gamma_linear(struct ast_private *ast, +static void ast_crtc_set_gamma_linear(struct ast_device *ast, const struct drm_format_info *format) { int i; @@ -84,7 +84,7 @@ static void ast_crtc_set_gamma_linear(struct ast_private *ast, } } -static void ast_crtc_set_gamma(struct ast_private *ast, +static void ast_crtc_set_gamma(struct ast_device *ast, const struct drm_format_info *format, struct drm_color_lut *lut) { @@ -232,7 +232,7 @@ static bool ast_get_vbios_mode_info(const struct drm_format_info *format, return true; } -static void ast_set_vbios_color_reg(struct ast_private *ast, +static void ast_set_vbios_color_reg(struct ast_device *ast, const struct drm_format_info *format, const struct ast_vbios_mode_info *vbios_mode) { @@ -263,7 +263,7 @@ static void ast_set_vbios_color_reg(struct ast_private *ast, } } -static void ast_set_vbios_mode_reg(struct ast_private *ast, +static void ast_set_vbios_mode_reg(struct ast_device *ast, const struct drm_display_mode *adjusted_mode, const struct ast_vbios_mode_info *vbios_mode) { @@ -287,7 +287,7 @@ static void ast_set_vbios_mode_reg(struct ast_private *ast, } } -static void ast_set_std_reg(struct ast_private *ast, +static void ast_set_std_reg(struct ast_device *ast, struct drm_display_mode *mode, struct ast_vbios_mode_info *vbios_mode) { @@ -335,7 +335,7 @@ static void ast_set_std_reg(struct ast_private *ast, ast_set_index_reg(ast, AST_IO_GR_PORT, i, stdtable->gr[i]); } -static void ast_set_crtc_reg(struct ast_private *ast, +static void ast_set_crtc_reg(struct ast_device *ast, struct drm_display_mode *mode, struct ast_vbios_mode_info *vbios_mode) { @@ -450,7 +450,7 @@ static void ast_set_crtc_reg(struct ast_private *ast, ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x80); } -static void ast_set_offset_reg(struct ast_private *ast, +static void ast_set_offset_reg(struct ast_device *ast, struct drm_framebuffer *fb) { u16 offset; @@ -460,7 +460,7 @@ static void ast_set_offset_reg(struct ast_private *ast, ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xb0, (offset >> 8) & 0x3f); } -static void ast_set_dclk_reg(struct ast_private *ast, +static void ast_set_dclk_reg(struct ast_device *ast, struct drm_display_mode *mode, struct ast_vbios_mode_info *vbios_mode) { @@ -478,7 +478,7 @@ static void ast_set_dclk_reg(struct ast_private *ast, ((clk_info->param3 & 0x3) << 4)); } -static void ast_set_color_reg(struct ast_private *ast, +static void ast_set_color_reg(struct ast_device *ast, const struct drm_format_info *format) { u8 jregA0 = 0, jregA3 = 0, jregA8 = 0; @@ -507,7 +507,7 @@ static void ast_set_color_reg(struct ast_private *ast, ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa8, 0xfd, jregA8); } -static void ast_set_crtthd_reg(struct ast_private *ast) +static void ast_set_crtthd_reg(struct ast_device *ast) { /* Set Threshold */ if (ast->chip == AST2600) { @@ -529,7 +529,7 @@ static void ast_set_crtthd_reg(struct ast_private *ast) } } -static void ast_set_sync_reg(struct ast_private *ast, +static void ast_set_sync_reg(struct ast_device *ast, struct drm_display_mode *mode, struct ast_vbios_mode_info *vbios_mode) { @@ -544,7 +544,7 @@ static void ast_set_sync_reg(struct ast_private *ast, ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, jreg); } -static void ast_set_start_address_crt1(struct ast_private *ast, +static void ast_set_start_address_crt1(struct ast_device *ast, unsigned int offset) { u32 addr; @@ -556,7 +556,7 @@ static void ast_set_start_address_crt1(struct ast_private *ast, } -static void ast_wait_for_vretrace(struct ast_private *ast) +static void ast_wait_for_vretrace(struct ast_device *ast) { unsigned long timeout = jiffies + HZ; u8 vgair1; @@ -645,7 +645,7 @@ static void ast_primary_plane_helper_atomic_update(struct drm_plane *plane, struct drm_atomic_state *state) { struct drm_device *dev = plane->dev; - struct ast_private *ast = to_ast_private(dev); + struct ast_device *ast = to_ast_device(dev); struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane); struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); struct drm_framebuffer *fb = plane_state->fb; @@ -672,23 +672,34 @@ static void ast_primary_plane_helper_atomic_update(struct drm_plane *plane, /* * Some BMCs stop scanning out the video signal after the driver - * reprogrammed the offset or scanout address. This stalls display - * output for several seconds and makes the display unusable. - * Therefore only update the offset if it changes and reprogram the - * address after enabling the plane. + * reprogrammed the offset. This stalls display output for several + * seconds and makes the display unusable. Therefore only update + * the offset if it changes. */ if (!old_fb || old_fb->pitches[0] != fb->pitches[0]) ast_set_offset_reg(ast, fb); - if (!old_fb) { - ast_set_start_address_crt1(ast, (u32)ast_plane->offset); - ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x00); - } +} + +static void ast_primary_plane_helper_atomic_enable(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct ast_device *ast = to_ast_device(plane->dev); + struct ast_plane *ast_plane = to_ast_plane(plane); + + /* + * Some BMCs stop scanning out the video signal after the driver + * reprogrammed the scanout address. This stalls display + * output for several seconds and makes the display unusable. + * Therefore only reprogram the address after enabling the plane. + */ + ast_set_start_address_crt1(ast, (u32)ast_plane->offset); + ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x00); } static void ast_primary_plane_helper_atomic_disable(struct drm_plane *plane, struct drm_atomic_state *state) { - struct ast_private *ast = to_ast_private(plane->dev); + struct ast_device *ast = to_ast_device(plane->dev); ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x20); } @@ -697,6 +708,7 @@ static const struct drm_plane_helper_funcs ast_primary_plane_helper_funcs = { DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, .atomic_check = ast_primary_plane_helper_atomic_check, .atomic_update = ast_primary_plane_helper_atomic_update, + .atomic_enable = ast_primary_plane_helper_atomic_enable, .atomic_disable = ast_primary_plane_helper_atomic_disable, }; @@ -707,7 +719,7 @@ static const struct drm_plane_funcs ast_primary_plane_funcs = { DRM_GEM_SHADOW_PLANE_FUNCS, }; -static int ast_primary_plane_init(struct ast_private *ast) +static int ast_primary_plane_init(struct ast_device *ast) { struct drm_device *dev = &ast->base; struct ast_plane *ast_primary_plane = &ast->primary_plane; @@ -800,7 +812,7 @@ static void ast_update_cursor_image(u8 __iomem *dst, const u8 *src, int width, i writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTY); } -static void ast_set_cursor_base(struct ast_private *ast, u64 address) +static void ast_set_cursor_base(struct ast_device *ast, u64 address) { u8 addr0 = (address >> 3) & 0xff; u8 addr1 = (address >> 11) & 0xff; @@ -811,7 +823,7 @@ static void ast_set_cursor_base(struct ast_private *ast, u64 address) ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xca, addr2); } -static void ast_set_cursor_location(struct ast_private *ast, u16 x, u16 y, +static void ast_set_cursor_location(struct ast_device *ast, u16 x, u16 y, u8 x_offset, u8 y_offset) { u8 x0 = (x & 0x00ff); @@ -827,7 +839,7 @@ static void ast_set_cursor_location(struct ast_private *ast, u16 x, u16 y, ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc7, y1); } -static void ast_set_cursor_enabled(struct ast_private *ast, bool enabled) +static void ast_set_cursor_enabled(struct ast_device *ast, bool enabled) { static const u8 mask = (u8)~(AST_IO_VGACRCB_HWC_16BPP | AST_IO_VGACRCB_HWC_ENABLED); @@ -876,7 +888,7 @@ static void ast_cursor_plane_helper_atomic_update(struct drm_plane *plane, struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); struct drm_framebuffer *fb = plane_state->fb; struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane); - struct ast_private *ast = to_ast_private(plane->dev); + struct ast_device *ast = to_ast_device(plane->dev); struct iosys_map src_map = shadow_plane_state->data[0]; struct drm_rect damage; const u8 *src = src_map.vaddr; /* TODO: Use mapping abstraction properly */ @@ -931,7 +943,7 @@ static void ast_cursor_plane_helper_atomic_update(struct drm_plane *plane, static void ast_cursor_plane_helper_atomic_disable(struct drm_plane *plane, struct drm_atomic_state *state) { - struct ast_private *ast = to_ast_private(plane->dev); + struct ast_device *ast = to_ast_device(plane->dev); ast_set_cursor_enabled(ast, false); } @@ -950,7 +962,7 @@ static const struct drm_plane_funcs ast_cursor_plane_funcs = { DRM_GEM_SHADOW_PLANE_FUNCS, }; -static int ast_cursor_plane_init(struct ast_private *ast) +static int ast_cursor_plane_init(struct ast_device *ast) { struct drm_device *dev = &ast->base; struct ast_plane *ast_cursor_plane = &ast->cursor_plane; @@ -995,7 +1007,7 @@ static int ast_cursor_plane_init(struct ast_private *ast) static void ast_crtc_dpms(struct drm_crtc *crtc, int mode) { - struct ast_private *ast = to_ast_private(crtc->dev); + struct ast_device *ast = to_ast_device(crtc->dev); u8 ch = AST_DPMS_VSYNC_OFF | AST_DPMS_HSYNC_OFF; struct ast_crtc_state *ast_state; const struct drm_format_info *format; @@ -1052,7 +1064,7 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode) static enum drm_mode_status ast_crtc_helper_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode) { - struct ast_private *ast = to_ast_private(crtc->dev); + struct ast_device *ast = to_ast_device(crtc->dev); enum drm_mode_status status; uint32_t jtemp; @@ -1177,7 +1189,7 @@ ast_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc); struct drm_device *dev = crtc->dev; - struct ast_private *ast = to_ast_private(dev); + struct ast_device *ast = to_ast_device(dev); struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc_state); struct ast_vbios_mode_info *vbios_mode_info = &ast_crtc_state->vbios_mode_info; @@ -1202,7 +1214,7 @@ ast_crtc_helper_atomic_flush(struct drm_crtc *crtc, static void ast_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct drm_device *dev = crtc->dev; - struct ast_private *ast = to_ast_private(dev); + struct ast_device *ast = to_ast_device(dev); struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc); struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc_state); struct ast_vbios_mode_info *vbios_mode_info = @@ -1224,7 +1236,7 @@ static void ast_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_ato { struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc); struct drm_device *dev = crtc->dev; - struct ast_private *ast = to_ast_private(dev); + struct ast_device *ast = to_ast_device(dev); ast_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); @@ -1312,7 +1324,7 @@ static const struct drm_crtc_funcs ast_crtc_funcs = { static int ast_crtc_init(struct drm_device *dev) { - struct ast_private *ast = to_ast_private(dev); + struct ast_device *ast = to_ast_device(dev); struct drm_crtc *crtc = &ast->crtc; int ret; @@ -1338,7 +1350,7 @@ static int ast_vga_connector_helper_get_modes(struct drm_connector *connector) { struct ast_vga_connector *ast_vga_connector = to_ast_vga_connector(connector); struct drm_device *dev = connector->dev; - struct ast_private *ast = to_ast_private(dev); + struct ast_device *ast = to_ast_device(dev); struct edid *edid; int count; @@ -1411,7 +1423,7 @@ static int ast_vga_connector_init(struct drm_device *dev, return 0; } -static int ast_vga_output_init(struct ast_private *ast) +static int ast_vga_output_init(struct ast_device *ast) { struct drm_device *dev = &ast->base; struct drm_crtc *crtc = &ast->crtc; @@ -1444,7 +1456,7 @@ static int ast_sil164_connector_helper_get_modes(struct drm_connector *connector { struct ast_sil164_connector *ast_sil164_connector = to_ast_sil164_connector(connector); struct drm_device *dev = connector->dev; - struct ast_private *ast = to_ast_private(dev); + struct ast_device *ast = to_ast_device(dev); struct edid *edid; int count; @@ -1517,7 +1529,7 @@ static int ast_sil164_connector_init(struct drm_device *dev, return 0; } -static int ast_sil164_output_init(struct ast_private *ast) +static int ast_sil164_output_init(struct ast_device *ast) { struct drm_device *dev = &ast->base; struct drm_crtc *crtc = &ast->crtc; @@ -1604,7 +1616,7 @@ static int ast_dp501_connector_init(struct drm_device *dev, struct drm_connector return 0; } -static int ast_dp501_output_init(struct ast_private *ast) +static int ast_dp501_output_init(struct ast_device *ast) { struct drm_device *dev = &ast->base; struct drm_crtc *crtc = &ast->crtc; @@ -1691,7 +1703,7 @@ static int ast_astdp_connector_init(struct drm_device *dev, struct drm_connector return 0; } -static int ast_astdp_output_init(struct ast_private *ast) +static int ast_astdp_output_init(struct ast_device *ast) { struct drm_device *dev = &ast->base; struct drm_crtc *crtc = &ast->crtc; @@ -1721,7 +1733,7 @@ static int ast_astdp_output_init(struct ast_private *ast) static void ast_mode_config_helper_atomic_commit_tail(struct drm_atomic_state *state) { - struct ast_private *ast = to_ast_private(state->dev); + struct ast_device *ast = to_ast_device(state->dev); /* * Concurrent operations could possibly trigger a call to @@ -1742,7 +1754,7 @@ static enum drm_mode_status ast_mode_config_mode_valid(struct drm_device *dev, const struct drm_display_mode *mode) { static const unsigned long max_bpp = 4; /* DRM_FORMAT_XRGB8888 */ - struct ast_private *ast = to_ast_private(dev); + struct ast_device *ast = to_ast_device(dev); unsigned long fbsize, fbpages, max_fbpages; max_fbpages = (ast->vram_fb_available) >> PAGE_SHIFT; @@ -1763,7 +1775,7 @@ static const struct drm_mode_config_funcs ast_mode_config_funcs = { .atomic_commit = drm_atomic_helper_commit, }; -int ast_mode_config_init(struct ast_private *ast) +int ast_mode_config_init(struct ast_device *ast) { struct drm_device *dev = &ast->base; int ret; diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c index 82fd3c8adee1..71bb36b865fd 100644 --- a/drivers/gpu/drm/ast/ast_post.c +++ b/drivers/gpu/drm/ast/ast_post.c @@ -39,7 +39,7 @@ static void ast_post_chip_2500(struct drm_device *dev); void ast_enable_vga(struct drm_device *dev) { - struct ast_private *ast = to_ast_private(dev); + struct ast_device *ast = to_ast_device(dev); ast_io_write8(ast, AST_IO_VGA_ENABLE_PORT, 0x01); ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, 0x01); @@ -47,7 +47,7 @@ void ast_enable_vga(struct drm_device *dev) void ast_enable_mmio(struct drm_device *dev) { - struct ast_private *ast = to_ast_private(dev); + struct ast_device *ast = to_ast_device(dev); ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06); } @@ -55,7 +55,7 @@ void ast_enable_mmio(struct drm_device *dev) bool ast_is_vga_enabled(struct drm_device *dev) { - struct ast_private *ast = to_ast_private(dev); + struct ast_device *ast = to_ast_device(dev); u8 ch; ch = ast_io_read8(ast, AST_IO_VGA_ENABLE_PORT); @@ -70,7 +70,7 @@ static const u8 extreginfo_ast2300[] = { 0x0f, 0x04, 0x1f, 0xff }; static void ast_set_def_ext_reg(struct drm_device *dev) { - struct ast_private *ast = to_ast_private(dev); + struct ast_device *ast = to_ast_device(dev); struct pci_dev *pdev = to_pci_dev(dev->dev); u8 i, index, reg; const u8 *ext_reg_info; @@ -110,7 +110,7 @@ ast_set_def_ext_reg(struct drm_device *dev) ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xff, reg); } -u32 ast_mindwm(struct ast_private *ast, u32 r) +u32 ast_mindwm(struct ast_device *ast, u32 r) { uint32_t data; @@ -123,7 +123,7 @@ u32 ast_mindwm(struct ast_private *ast, u32 r) return ast_read32(ast, 0x10000 + (r & 0x0000ffff)); } -void ast_moutdwm(struct ast_private *ast, u32 r, u32 v) +void ast_moutdwm(struct ast_device *ast, u32 r, u32 v) { uint32_t data; ast_write32(ast, 0xf004, r & 0xffff0000); @@ -162,7 +162,7 @@ static const u32 pattern_AST2150[14] = { 0x20F050E0 }; -static u32 mmctestburst2_ast2150(struct ast_private *ast, u32 datagen) +static u32 mmctestburst2_ast2150(struct ast_device *ast, u32 datagen) { u32 data, timeout; @@ -192,7 +192,7 @@ static u32 mmctestburst2_ast2150(struct ast_private *ast, u32 datagen) } #if 0 /* unused in DDX driver - here for completeness */ -static u32 mmctestsingle2_ast2150(struct ast_private *ast, u32 datagen) +static u32 mmctestsingle2_ast2150(struct ast_device *ast, u32 datagen) { u32 data, timeout; @@ -212,7 +212,7 @@ static u32 mmctestsingle2_ast2150(struct ast_private *ast, u32 datagen) } #endif -static int cbrtest_ast2150(struct ast_private *ast) +static int cbrtest_ast2150(struct ast_device *ast) { int i; @@ -222,7 +222,7 @@ static int cbrtest_ast2150(struct ast_private *ast) return 1; } -static int cbrscan_ast2150(struct ast_private *ast, int busw) +static int cbrscan_ast2150(struct ast_device *ast, int busw) { u32 patcnt, loop; @@ -239,7 +239,7 @@ static int cbrscan_ast2150(struct ast_private *ast, int busw) } -static void cbrdlli_ast2150(struct ast_private *ast, int busw) +static void cbrdlli_ast2150(struct ast_device *ast, int busw) { u32 dll_min[4], dll_max[4], dlli, data, passcnt; @@ -273,7 +273,7 @@ cbr_start: static void ast_init_dram_reg(struct drm_device *dev) { - struct ast_private *ast = to_ast_private(dev); + struct ast_device *ast = to_ast_device(dev); u8 j; u32 data, temp, i; const struct ast_dramstruct *dram_reg_info; @@ -366,7 +366,7 @@ static void ast_init_dram_reg(struct drm_device *dev) void ast_post_gpu(struct drm_device *dev) { - struct ast_private *ast = to_ast_private(dev); + struct ast_device *ast = to_ast_device(dev); struct pci_dev *pdev = to_pci_dev(dev->dev); u32 reg; @@ -449,7 +449,7 @@ static const u32 pattern[8] = { 0x7C61D253 }; -static bool mmc_test(struct ast_private *ast, u32 datagen, u8 test_ctl) +static bool mmc_test(struct ast_device *ast, u32 datagen, u8 test_ctl) { u32 data, timeout; @@ -469,7 +469,7 @@ static bool mmc_test(struct ast_private *ast, u32 datagen, u8 test_ctl) return true; } -static u32 mmc_test2(struct ast_private *ast, u32 datagen, u8 test_ctl) +static u32 mmc_test2(struct ast_device *ast, u32 datagen, u8 test_ctl) { u32 data, timeout; @@ -490,32 +490,32 @@ static u32 mmc_test2(struct ast_private *ast, u32 datagen, u8 test_ctl) } -static bool mmc_test_burst(struct ast_private *ast, u32 datagen) +static bool mmc_test_burst(struct ast_device *ast, u32 datagen) { return mmc_test(ast, datagen, 0xc1); } -static u32 mmc_test_burst2(struct ast_private *ast, u32 datagen) +static u32 mmc_test_burst2(struct ast_device *ast, u32 datagen) { return mmc_test2(ast, datagen, 0x41); } -static bool mmc_test_single(struct ast_private *ast, u32 datagen) +static bool mmc_test_single(struct ast_device *ast, u32 datagen) { return mmc_test(ast, datagen, 0xc5); } -static u32 mmc_test_single2(struct ast_private *ast, u32 datagen) +static u32 mmc_test_single2(struct ast_device *ast, u32 datagen) { return mmc_test2(ast, datagen, 0x05); } -static bool mmc_test_single_2500(struct ast_private *ast, u32 datagen) +static bool mmc_test_single_2500(struct ast_device *ast, u32 datagen) { return mmc_test(ast, datagen, 0x85); } -static int cbr_test(struct ast_private *ast) +static int cbr_test(struct ast_device *ast) { u32 data; int i; @@ -534,7 +534,7 @@ static int cbr_test(struct ast_private *ast) return 1; } -static int cbr_scan(struct ast_private *ast) +static int cbr_scan(struct ast_device *ast) { u32 data, data2, patcnt, loop; @@ -555,7 +555,7 @@ static int cbr_scan(struct ast_private *ast) return data2; } -static u32 cbr_test2(struct ast_private *ast) +static u32 cbr_test2(struct ast_device *ast) { u32 data; @@ -569,7 +569,7 @@ static u32 cbr_test2(struct ast_private *ast) return ~data & 0xffff; } -static u32 cbr_scan2(struct ast_private *ast) +static u32 cbr_scan2(struct ast_device *ast) { u32 data, data2, patcnt, loop; @@ -590,7 +590,7 @@ static u32 cbr_scan2(struct ast_private *ast) return data2; } -static bool cbr_test3(struct ast_private *ast) +static bool cbr_test3(struct ast_device *ast) { if (!mmc_test_burst(ast, 0)) return false; @@ -599,7 +599,7 @@ static bool cbr_test3(struct ast_private *ast) return true; } -static bool cbr_scan3(struct ast_private *ast) +static bool cbr_scan3(struct ast_device *ast) { u32 patcnt, loop; @@ -615,7 +615,7 @@ static bool cbr_scan3(struct ast_private *ast) return true; } -static bool finetuneDQI_L(struct ast_private *ast, struct ast2300_dram_param *param) +static bool finetuneDQI_L(struct ast_device *ast, struct ast2300_dram_param *param) { u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, passcnt, retry = 0; bool status = false; @@ -714,7 +714,7 @@ FINETUNE_DONE: return status; } /* finetuneDQI_L */ -static void finetuneDQSI(struct ast_private *ast) +static void finetuneDQSI(struct ast_device *ast) { u32 dlli, dqsip, dqidly; u32 reg_mcr18, reg_mcr0c, passcnt[2], diff; @@ -804,7 +804,7 @@ static void finetuneDQSI(struct ast_private *ast) ast_moutdwm(ast, 0x1E6E0018, reg_mcr18); } -static bool cbr_dll2(struct ast_private *ast, struct ast2300_dram_param *param) +static bool cbr_dll2(struct ast_device *ast, struct ast2300_dram_param *param) { u32 dllmin[2], dllmax[2], dlli, data, passcnt, retry = 0; bool status = false; @@ -860,7 +860,7 @@ CBR_DONE2: return status; } /* CBRDLL2 */ -static void get_ddr3_info(struct ast_private *ast, struct ast2300_dram_param *param) +static void get_ddr3_info(struct ast_device *ast, struct ast2300_dram_param *param) { u32 trap, trap_AC2, trap_MRS; @@ -1102,7 +1102,7 @@ static void get_ddr3_info(struct ast_private *ast, struct ast2300_dram_param *pa } -static void ddr3_init(struct ast_private *ast, struct ast2300_dram_param *param) +static void ddr3_init(struct ast_device *ast, struct ast2300_dram_param *param) { u32 data, data2, retry = 0; @@ -1225,7 +1225,7 @@ ddr3_init_start: } -static void get_ddr2_info(struct ast_private *ast, struct ast2300_dram_param *param) +static void get_ddr2_info(struct ast_device *ast, struct ast2300_dram_param *param) { u32 trap, trap_AC2, trap_MRS; @@ -1472,7 +1472,7 @@ static void get_ddr2_info(struct ast_private *ast, struct ast2300_dram_param *pa } } -static void ddr2_init(struct ast_private *ast, struct ast2300_dram_param *param) +static void ddr2_init(struct ast_device *ast, struct ast2300_dram_param *param) { u32 data, data2, retry = 0; @@ -1600,7 +1600,7 @@ ddr2_init_start: static void ast_post_chip_2300(struct drm_device *dev) { - struct ast_private *ast = to_ast_private(dev); + struct ast_device *ast = to_ast_device(dev); struct ast2300_dram_param param; u32 temp; u8 reg; @@ -1681,7 +1681,7 @@ static void ast_post_chip_2300(struct drm_device *dev) } while ((reg & 0x40) == 0); } -static bool cbr_test_2500(struct ast_private *ast) +static bool cbr_test_2500(struct ast_device *ast) { ast_moutdwm(ast, 0x1E6E0074, 0x0000FFFF); ast_moutdwm(ast, 0x1E6E007C, 0xFF00FF00); @@ -1692,7 +1692,7 @@ static bool cbr_test_2500(struct ast_private *ast) return true; } -static bool ddr_test_2500(struct ast_private *ast) +static bool ddr_test_2500(struct ast_device *ast) { ast_moutdwm(ast, 0x1E6E0074, 0x0000FFFF); ast_moutdwm(ast, 0x1E6E007C, 0xFF00FF00); @@ -1709,7 +1709,7 @@ static bool ddr_test_2500(struct ast_private *ast) return true; } -static void ddr_init_common_2500(struct ast_private *ast) +static void ddr_init_common_2500(struct ast_device *ast) { ast_moutdwm(ast, 0x1E6E0034, 0x00020080); ast_moutdwm(ast, 0x1E6E0008, 0x2003000F); @@ -1732,7 +1732,7 @@ static void ddr_init_common_2500(struct ast_private *ast) ast_moutdwm(ast, 0x1E6E024C, 0x80808080); } -static void ddr_phy_init_2500(struct ast_private *ast) +static void ddr_phy_init_2500(struct ast_device *ast) { u32 data, pass, timecnt; @@ -1766,7 +1766,7 @@ static void ddr_phy_init_2500(struct ast_private *ast) * 4Gb : 0x80000000 ~ 0x9FFFFFFF * 8Gb : 0x80000000 ~ 0xBFFFFFFF */ -static void check_dram_size_2500(struct ast_private *ast, u32 tRFC) +static void check_dram_size_2500(struct ast_device *ast, u32 tRFC) { u32 reg_04, reg_14; @@ -1797,7 +1797,7 @@ static void check_dram_size_2500(struct ast_private *ast, u32 tRFC) ast_moutdwm(ast, 0x1E6E0014, reg_14); } -static void enable_cache_2500(struct ast_private *ast) +static void enable_cache_2500(struct ast_device *ast) { u32 reg_04, data; @@ -1810,7 +1810,7 @@ static void enable_cache_2500(struct ast_private *ast) ast_moutdwm(ast, 0x1E6E0004, reg_04 | 0x400); } -static void set_mpll_2500(struct ast_private *ast) +static void set_mpll_2500(struct ast_device *ast) { u32 addr, data, param; @@ -1837,7 +1837,7 @@ static void set_mpll_2500(struct ast_private *ast) udelay(100); } -static void reset_mmc_2500(struct ast_private *ast) +static void reset_mmc_2500(struct ast_device *ast) { ast_moutdwm(ast, 0x1E78505C, 0x00000004); ast_moutdwm(ast, 0x1E785044, 0x00000001); @@ -1848,7 +1848,7 @@ static void reset_mmc_2500(struct ast_private *ast) ast_moutdwm(ast, 0x1E6E0000, 0xFC600309); } -static void ddr3_init_2500(struct ast_private *ast, const u32 *ddr_table) +static void ddr3_init_2500(struct ast_device *ast, const u32 *ddr_table) { ast_moutdwm(ast, 0x1E6E0004, 0x00000303); @@ -1892,7 +1892,7 @@ static void ddr3_init_2500(struct ast_private *ast, const u32 *ddr_table) ast_moutdwm(ast, 0x1E6E0038, 0xFFFFFF00); } -static void ddr4_init_2500(struct ast_private *ast, const u32 *ddr_table) +static void ddr4_init_2500(struct ast_device *ast, const u32 *ddr_table) { u32 data, data2, pass, retrycnt; u32 ddr_vref, phy_vref; @@ -2002,7 +2002,7 @@ static void ddr4_init_2500(struct ast_private *ast, const u32 *ddr_table) ast_moutdwm(ast, 0x1E6E0038, 0xFFFFFF00); } -static bool ast_dram_init_2500(struct ast_private *ast) +static bool ast_dram_init_2500(struct ast_device *ast) { u32 data; u32 max_tries = 5; @@ -2030,7 +2030,7 @@ static bool ast_dram_init_2500(struct ast_private *ast) return true; } -void ast_patch_ahb_2500(struct ast_private *ast) +void ast_patch_ahb_2500(struct ast_device *ast) { u32 data; @@ -2066,7 +2066,7 @@ void ast_patch_ahb_2500(struct ast_private *ast) void ast_post_chip_2500(struct drm_device *dev) { - struct ast_private *ast = to_ast_private(dev); + struct ast_device *ast = to_ast_device(dev); u32 temp; u8 reg; diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index 8b2226f72b24..12e8f30c65f7 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -326,7 +326,7 @@ config DRM_TI_DLPC3433 input that produces a DMD output in RGB565, RGB666, RGB888 formats. - It supports upto 720p resolution with 60 and 120 Hz refresh + It supports up to 720p resolution with 60 and 120 Hz refresh rates. config DRM_TI_TFP410 diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c index e8aae3cdc73d..9316384b4474 100644 --- a/drivers/gpu/drm/bridge/panel.c +++ b/drivers/gpu/drm/bridge/panel.c @@ -81,6 +81,8 @@ static int panel_bridge_attach(struct drm_bridge *bridge, return ret; } + drm_panel_bridge_set_orientation(connector, bridge); + drm_connector_attach_encoder(&panel_bridge->connector, bridge->encoder); @@ -109,30 +111,82 @@ static void panel_bridge_detach(struct drm_bridge *bridge) drm_connector_cleanup(connector); } -static void panel_bridge_pre_enable(struct drm_bridge *bridge) +static void panel_bridge_atomic_pre_enable(struct drm_bridge *bridge, + struct drm_bridge_state *old_bridge_state) { struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge); + struct drm_atomic_state *atomic_state = old_bridge_state->base.state; + struct drm_encoder *encoder = bridge->encoder; + struct drm_crtc *crtc; + struct drm_crtc_state *old_crtc_state; + + crtc = drm_atomic_get_new_crtc_for_encoder(atomic_state, encoder); + if (!crtc) + return; + + old_crtc_state = drm_atomic_get_old_crtc_state(atomic_state, crtc); + if (old_crtc_state && old_crtc_state->self_refresh_active) + return; drm_panel_prepare(panel_bridge->panel); } -static void panel_bridge_enable(struct drm_bridge *bridge) +static void panel_bridge_atomic_enable(struct drm_bridge *bridge, + struct drm_bridge_state *old_bridge_state) { struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge); + struct drm_atomic_state *atomic_state = old_bridge_state->base.state; + struct drm_encoder *encoder = bridge->encoder; + struct drm_crtc *crtc; + struct drm_crtc_state *old_crtc_state; + + crtc = drm_atomic_get_new_crtc_for_encoder(atomic_state, encoder); + if (!crtc) + return; + + old_crtc_state = drm_atomic_get_old_crtc_state(atomic_state, crtc); + if (old_crtc_state && old_crtc_state->self_refresh_active) + return; drm_panel_enable(panel_bridge->panel); } -static void panel_bridge_disable(struct drm_bridge *bridge) +static void panel_bridge_atomic_disable(struct drm_bridge *bridge, + struct drm_bridge_state *old_bridge_state) { struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge); + struct drm_atomic_state *atomic_state = old_bridge_state->base.state; + struct drm_encoder *encoder = bridge->encoder; + struct drm_crtc *crtc; + struct drm_crtc_state *new_crtc_state; + + crtc = drm_atomic_get_old_crtc_for_encoder(atomic_state, encoder); + if (!crtc) + return; + + new_crtc_state = drm_atomic_get_new_crtc_state(atomic_state, crtc); + if (new_crtc_state && new_crtc_state->self_refresh_active) + return; drm_panel_disable(panel_bridge->panel); } -static void panel_bridge_post_disable(struct drm_bridge *bridge) +static void panel_bridge_atomic_post_disable(struct drm_bridge *bridge, + struct drm_bridge_state *old_bridge_state) { struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge); + struct drm_atomic_state *atomic_state = old_bridge_state->base.state; + struct drm_encoder *encoder = bridge->encoder; + struct drm_crtc *crtc; + struct drm_crtc_state *new_crtc_state; + + crtc = drm_atomic_get_old_crtc_for_encoder(atomic_state, encoder); + if (!crtc) + return; + + new_crtc_state = drm_atomic_get_new_crtc_state(atomic_state, crtc); + if (new_crtc_state && new_crtc_state->self_refresh_active) + return; drm_panel_unprepare(panel_bridge->panel); } @@ -159,10 +213,10 @@ static void panel_bridge_debugfs_init(struct drm_bridge *bridge, static const struct drm_bridge_funcs panel_bridge_bridge_funcs = { .attach = panel_bridge_attach, .detach = panel_bridge_detach, - .pre_enable = panel_bridge_pre_enable, - .enable = panel_bridge_enable, - .disable = panel_bridge_disable, - .post_disable = panel_bridge_post_disable, + .atomic_pre_enable = panel_bridge_atomic_pre_enable, + .atomic_enable = panel_bridge_atomic_enable, + .atomic_disable = panel_bridge_atomic_disable, + .atomic_post_disable = panel_bridge_atomic_post_disable, .get_modes = panel_bridge_get_modes, .atomic_reset = drm_atomic_helper_bridge_reset, .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, diff --git a/drivers/gpu/drm/bridge/tc358762.c b/drivers/gpu/drm/bridge/tc358762.c index 0b6a28436885..77f7f7f54757 100644 --- a/drivers/gpu/drm/bridge/tc358762.c +++ b/drivers/gpu/drm/bridge/tc358762.c @@ -229,6 +229,7 @@ static int tc358762_probe(struct mipi_dsi_device *dsi) ctx->bridge.funcs = &tc358762_bridge_funcs; ctx->bridge.type = DRM_MODE_CONNECTOR_DPI; ctx->bridge.of_node = dev->of_node; + ctx->bridge.pre_enable_prev_first = true; drm_bridge_add(&ctx->bridge); diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index fed41800fea7..b4c6ffc438da 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -985,6 +985,66 @@ drm_atomic_get_new_connector_for_encoder(const struct drm_atomic_state *state, EXPORT_SYMBOL(drm_atomic_get_new_connector_for_encoder); /** + * drm_atomic_get_old_crtc_for_encoder - Get old crtc for an encoder + * @state: Atomic state + * @encoder: The encoder to fetch the crtc state for + * + * This function finds and returns the crtc that was connected to @encoder + * as specified by the @state. + * + * Returns: The old crtc connected to @encoder, or NULL if the encoder is + * not connected. + */ +struct drm_crtc * +drm_atomic_get_old_crtc_for_encoder(struct drm_atomic_state *state, + struct drm_encoder *encoder) +{ + struct drm_connector *connector; + struct drm_connector_state *conn_state; + + connector = drm_atomic_get_old_connector_for_encoder(state, encoder); + if (!connector) + return NULL; + + conn_state = drm_atomic_get_old_connector_state(state, connector); + if (!conn_state) + return NULL; + + return conn_state->crtc; +} +EXPORT_SYMBOL(drm_atomic_get_old_crtc_for_encoder); + +/** + * drm_atomic_get_new_crtc_for_encoder - Get new crtc for an encoder + * @state: Atomic state + * @encoder: The encoder to fetch the crtc state for + * + * This function finds and returns the crtc that will be connected to @encoder + * as specified by the @state. + * + * Returns: The new crtc connected to @encoder, or NULL if the encoder is + * not connected. + */ +struct drm_crtc * +drm_atomic_get_new_crtc_for_encoder(struct drm_atomic_state *state, + struct drm_encoder *encoder) +{ + struct drm_connector *connector; + struct drm_connector_state *conn_state; + + connector = drm_atomic_get_new_connector_for_encoder(state, encoder); + if (!connector) + return NULL; + + conn_state = drm_atomic_get_new_connector_state(state, connector); + if (!conn_state) + return NULL; + + return conn_state->crtc; +} +EXPORT_SYMBOL(drm_atomic_get_new_crtc_for_encoder); + +/** * drm_atomic_get_connector_state - get connector state * @state: global atomic state object * @connector: connector to get state object for diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 28e3f2c8917e..d4d2a2ce40f8 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -2739,6 +2739,11 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev, funcs->atomic_disable(plane, old_state); } else if (new_plane_state->crtc || disabling) { funcs->atomic_update(plane, old_state); + + if (!disabling && funcs->atomic_enable) { + if (drm_atomic_plane_enabling(old_plane_state, new_plane_state)) + funcs->atomic_enable(plane, old_state); + } } } @@ -2799,6 +2804,7 @@ drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state) struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(old_state, plane); const struct drm_plane_helper_funcs *plane_funcs; + bool disabling; plane_funcs = plane->helper_private; @@ -2808,12 +2814,18 @@ drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state) WARN_ON(new_plane_state->crtc && new_plane_state->crtc != crtc); - if (drm_atomic_plane_disabling(old_plane_state, new_plane_state) && - plane_funcs->atomic_disable) + disabling = drm_atomic_plane_disabling(old_plane_state, new_plane_state); + + if (disabling && plane_funcs->atomic_disable) { plane_funcs->atomic_disable(plane, old_state); - else if (new_plane_state->crtc || - drm_atomic_plane_disabling(old_plane_state, new_plane_state)) + } else if (new_plane_state->crtc || disabling) { plane_funcs->atomic_update(plane, old_state); + + if (!disabling && plane_funcs->atomic_enable) { + if (drm_atomic_plane_enabling(old_plane_state, new_plane_state)) + plane_funcs->atomic_enable(plane, old_state); + } + } } if (crtc_funcs && crtc_funcs->atomic_flush) diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 9d0250c28e9b..4b12c7a39ee3 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -33,9 +33,11 @@ #include <drm/drm_sysfs.h> #include <drm/drm_utils.h> -#include <linux/fb.h> +#include <linux/property.h> #include <linux/uaccess.h> +#include <video/cmdline.h> + #include "drm_crtc_internal.h" #include "drm_internal.h" @@ -154,9 +156,10 @@ EXPORT_SYMBOL(drm_get_connector_type_name); static void drm_connector_get_cmdline_mode(struct drm_connector *connector) { struct drm_cmdline_mode *mode = &connector->cmdline_mode; - char *option = NULL; + const char *option; - if (fb_get_options(connector->name, &option)) + option = video_get_options(connector->name); + if (!option) return; if (!drm_mode_parse_command_line_for_connector(option, @@ -1446,6 +1449,20 @@ static const struct drm_prop_enum_list dp_colorspaces[] = { * a firmware handled hotkey. Therefor userspace must not include the * privacy-screen sw-state in an atomic commit unless it wants to change * its value. + * + * left margin, right margin, top margin, bottom margin: + * Add margins to the connector's viewport. This is typically used to + * mitigate underscan on TVs. + * + * The value is the size in pixels of the black border which will be + * added. The attached CRTC's content will be scaled to fill the whole + * area inside the margin. + * + * The margins configuration might be sent to the sink, e.g. via HDMI AVI + * InfoFrames. + * + * Drivers can set up these properties by calling + * drm_mode_create_tv_margin_properties(). */ int drm_connector_create_standard_properties(struct drm_device *dev) @@ -1590,10 +1607,6 @@ EXPORT_SYMBOL(drm_connector_attach_dp_subconnector_property); /* * TODO: Document the properties: - * - left margin - * - right margin - * - top margin - * - bottom margin * - brightness * - contrast * - flicker reduction @@ -1602,7 +1615,6 @@ EXPORT_SYMBOL(drm_connector_attach_dp_subconnector_property); * - overscan * - saturation * - select subconnector - * - subconnector */ /** * DOC: Analog TV Connector Properties diff --git a/drivers/gpu/drm/drm_displayid.c b/drivers/gpu/drm/drm_displayid.c index 38ea8203df45..9edc111be7ee 100644 --- a/drivers/gpu/drm/drm_displayid.c +++ b/drivers/gpu/drm/drm_displayid.c @@ -7,13 +7,29 @@ #include <drm/drm_edid.h> #include <drm/drm_print.h> -static int validate_displayid(const u8 *displayid, int length, int idx) +static const struct displayid_header * +displayid_get_header(const u8 *displayid, int length, int index) +{ + const struct displayid_header *base; + + if (sizeof(*base) > length - index) + return ERR_PTR(-EINVAL); + + base = (const struct displayid_header *)&displayid[index]; + + return base; +} + +static const struct displayid_header * +validate_displayid(const u8 *displayid, int length, int idx) { int i, dispid_length; u8 csum = 0; const struct displayid_header *base; - base = (const struct displayid_header *)&displayid[idx]; + base = displayid_get_header(displayid, length, idx); + if (IS_ERR(base)) + return base; DRM_DEBUG_KMS("base revision 0x%x, length %d, %d %d\n", base->rev, base->bytes, base->prod_id, base->ext_count); @@ -21,16 +37,16 @@ static int validate_displayid(const u8 *displayid, int length, int idx) /* +1 for DispID checksum */ dispid_length = sizeof(*base) + base->bytes + 1; if (dispid_length > length - idx) - return -EINVAL; + return ERR_PTR(-EINVAL); for (i = 0; i < dispid_length; i++) csum += displayid[idx + i]; if (csum) { DRM_NOTE("DisplayID checksum invalid, remainder is %d\n", csum); - return -EINVAL; + return ERR_PTR(-EINVAL); } - return 0; + return base; } static const u8 *drm_find_displayid_extension(const struct drm_edid *drm_edid, @@ -39,7 +55,6 @@ static const u8 *drm_find_displayid_extension(const struct drm_edid *drm_edid, { const u8 *displayid = drm_find_edid_extension(drm_edid, DISPLAYID_EXT, ext_index); const struct displayid_header *base; - int ret; if (!displayid) return NULL; @@ -48,11 +63,10 @@ static const u8 *drm_find_displayid_extension(const struct drm_edid *drm_edid, *length = EDID_LENGTH - 1; *idx = 1; - ret = validate_displayid(displayid, *length, *idx); - if (ret) + base = validate_displayid(displayid, *length, *idx); + if (IS_ERR(base)) return NULL; - base = (const struct displayid_header *)&displayid[*idx]; *length = *idx + sizeof(*base) + base->bytes; return displayid; @@ -109,6 +123,9 @@ __displayid_iter_next(struct displayid_iter *iter) } for (;;) { + /* The first section we encounter is the base section */ + bool base_section = !iter->section; + iter->section = drm_find_displayid_extension(iter->drm_edid, &iter->length, &iter->idx, @@ -118,6 +135,18 @@ __displayid_iter_next(struct displayid_iter *iter) return NULL; } + /* Save the structure version and primary use case. */ + if (base_section) { + const struct displayid_header *base; + + base = displayid_get_header(iter->section, iter->length, + iter->idx); + if (!IS_ERR(base)) { + iter->version = base->rev; + iter->primary_use = base->prod_id; + } + } + iter->idx += sizeof(struct displayid_header); block = displayid_iter_block(iter); @@ -130,3 +159,18 @@ void displayid_iter_end(struct displayid_iter *iter) { memset(iter, 0, sizeof(*iter)); } + +/* DisplayID Structure Version/Revision from the Base Section. */ +u8 displayid_version(const struct displayid_iter *iter) +{ + return iter->version; +} + +/* + * DisplayID Primary Use Case (2.0+) or Product Type Identifier (1.0-1.3) from + * the Base Section. + */ +u8 displayid_primary_use(const struct displayid_iter *iter) +{ + return iter->primary_use; +} diff --git a/drivers/gpu/drm/drm_dumb_buffers.c b/drivers/gpu/drm/drm_dumb_buffers.c index ad17fa21cebb..70032bba1c97 100644 --- a/drivers/gpu/drm/drm_dumb_buffers.c +++ b/drivers/gpu/drm/drm_dumb_buffers.c @@ -139,10 +139,7 @@ int drm_mode_destroy_dumb(struct drm_device *dev, u32 handle, if (!dev->driver->dumb_create) return -ENOSYS; - if (dev->driver->dumb_destroy) - return dev->driver->dumb_destroy(file_priv, dev, handle); - else - return drm_gem_dumb_destroy(file_priv, dev, handle); + return drm_gem_handle_delete(file_priv, handle); } int drm_mode_destroy_dumb_ioctl(struct drm_device *dev, diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 3d0a4da661bc..c18ec866678d 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -3424,10 +3424,6 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto connector->base.id, connector->name); return NULL; } - if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) { - drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Composite sync not supported\n", - connector->base.id, connector->name); - } /* it is incorrect if hsync/vsync width is zero */ if (!hsync_pulse_width || !vsync_pulse_width) { @@ -3474,10 +3470,27 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto if (info->quirks & EDID_QUIRK_DETAILED_SYNC_PP) { mode->flags |= DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC; } else { - mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ? - DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC; - mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ? - DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC; + switch (pt->misc & DRM_EDID_PT_SYNC_MASK) { + case DRM_EDID_PT_ANALOG_CSYNC: + case DRM_EDID_PT_BIPOLAR_ANALOG_CSYNC: + drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Analog composite sync!\n", + connector->base.id, connector->name); + mode->flags |= DRM_MODE_FLAG_CSYNC | DRM_MODE_FLAG_NCSYNC; + break; + case DRM_EDID_PT_DIGITAL_CSYNC: + drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Digital composite sync!\n", + connector->base.id, connector->name); + mode->flags |= DRM_MODE_FLAG_CSYNC; + mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ? + DRM_MODE_FLAG_PCSYNC : DRM_MODE_FLAG_NCSYNC; + break; + case DRM_EDID_PT_DIGITAL_SEPARATE_SYNC: + mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ? + DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC; + mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ? + DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC; + break; + } } set_size: @@ -6433,6 +6446,29 @@ static void drm_reset_display_info(struct drm_connector *connector) info->quirks = 0; } +static void update_displayid_info(struct drm_connector *connector, + const struct drm_edid *drm_edid) +{ + struct drm_display_info *info = &connector->display_info; + const struct displayid_block *block; + struct displayid_iter iter; + + displayid_iter_edid_begin(drm_edid, &iter); + displayid_iter_for_each(block, &iter) { + if (displayid_version(&iter) == DISPLAY_ID_STRUCTURE_VER_20 && + (displayid_primary_use(&iter) == PRIMARY_USE_HEAD_MOUNTED_VR || + displayid_primary_use(&iter) == PRIMARY_USE_HEAD_MOUNTED_AR)) + info->non_desktop = true; + + /* + * We're only interested in the base section here, no need to + * iterate further. + */ + break; + } + displayid_iter_end(&iter); +} + static void update_display_info(struct drm_connector *connector, const struct drm_edid *drm_edid) { @@ -6463,6 +6499,8 @@ static void update_display_info(struct drm_connector *connector, info->color_formats |= DRM_COLOR_FORMAT_RGB444; drm_parse_cea_ext(connector, drm_edid); + update_displayid_info(connector, drm_edid); + /* * Digital sink with "DFP 1.x compliant TMDS" according to EDID 1.3? * @@ -7242,6 +7280,15 @@ static void drm_parse_tiled_block(struct drm_connector *connector, } } +static bool displayid_is_tiled_block(const struct displayid_iter *iter, + const struct displayid_block *block) +{ + return (displayid_version(iter) == DISPLAY_ID_STRUCTURE_VER_12 && + block->tag == DATA_BLOCK_TILED_DISPLAY) || + (displayid_version(iter) == DISPLAY_ID_STRUCTURE_VER_20 && + block->tag == DATA_BLOCK_2_TILED_DISPLAY_TOPOLOGY); +} + static void _drm_update_tile_info(struct drm_connector *connector, const struct drm_edid *drm_edid) { @@ -7252,7 +7299,7 @@ static void _drm_update_tile_info(struct drm_connector *connector, displayid_iter_edid_begin(drm_edid, &iter); displayid_iter_for_each(block, &iter) { - if (block->tag == DATA_BLOCK_TILED_DISPLAY) + if (displayid_is_tiled_block(&iter, block)) drm_parse_tiled_block(connector, block); } displayid_iter_end(&iter); diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 7a3cb08dc942..0f7f928f21da 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -336,13 +336,6 @@ out: } EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset); -int drm_gem_dumb_destroy(struct drm_file *file, - struct drm_device *dev, - u32 handle) -{ - return drm_gem_handle_delete(file, handle); -} - /** * drm_gem_handle_create_tail - internal functions to create a handle * @file_priv: drm file-private structure to register the handle for @@ -1344,7 +1337,15 @@ drm_gem_lru_remove(struct drm_gem_object *obj) } EXPORT_SYMBOL(drm_gem_lru_remove); -static void +/** + * drm_gem_lru_move_tail_locked - move the object to the tail of the LRU + * + * Like &drm_gem_lru_move_tail but lru lock must be held + * + * @lru: The LRU to move the object into. + * @obj: The GEM object to move into this LRU + */ +void drm_gem_lru_move_tail_locked(struct drm_gem_lru *lru, struct drm_gem_object *obj) { lockdep_assert_held_once(lru->lock); @@ -1356,6 +1357,7 @@ drm_gem_lru_move_tail_locked(struct drm_gem_lru *lru, struct drm_gem_object *obj list_add_tail(&obj->lru_node, &lru->list); obj->lru = lru; } +EXPORT_SYMBOL(drm_gem_lru_move_tail_locked); /** * drm_gem_lru_move_tail - move the object to the tail of the LRU @@ -1466,3 +1468,21 @@ tail: return freed; } EXPORT_SYMBOL(drm_gem_lru_scan); + +/** + * drm_gem_evict - helper to evict backing pages for a GEM object + * @obj: obj in question + */ +int drm_gem_evict(struct drm_gem_object *obj) +{ + dma_resv_assert_held(obj->resv); + + if (!dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ)) + return -EBUSY; + + if (obj->funcs->evict) + return obj->funcs->evict(obj); + + return 0; +} +EXPORT_SYMBOL(drm_gem_evict); diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index 75185a960fc4..9b0d540ff4a8 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -141,7 +141,7 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj = &shmem->base; - WARN_ON(shmem->vmap_use_count); + drm_WARN_ON(obj->dev, shmem->vmap_use_count); if (obj->import_attach) { drm_prime_gem_destroy(obj, shmem->sgt); @@ -156,7 +156,7 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem) drm_gem_shmem_put_pages(shmem); } - WARN_ON(shmem->pages_use_count); + drm_WARN_ON(obj->dev, shmem->pages_use_count); drm_gem_object_release(obj); mutex_destroy(&shmem->pages_lock); @@ -175,7 +175,8 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem) pages = drm_gem_get_pages(obj); if (IS_ERR(pages)) { - DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages)); + drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n", + PTR_ERR(pages)); shmem->pages_use_count = 0; return PTR_ERR(pages); } @@ -207,9 +208,10 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem) */ int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem) { + struct drm_gem_object *obj = &shmem->base; int ret; - WARN_ON(shmem->base.import_attach); + drm_WARN_ON(obj->dev, obj->import_attach); ret = mutex_lock_interruptible(&shmem->pages_lock); if (ret) @@ -225,7 +227,7 @@ static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj = &shmem->base; - if (WARN_ON_ONCE(!shmem->pages_use_count)) + if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count)) return; if (--shmem->pages_use_count > 0) @@ -268,7 +270,9 @@ EXPORT_SYMBOL(drm_gem_shmem_put_pages); */ int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem) { - WARN_ON(shmem->base.import_attach); + struct drm_gem_object *obj = &shmem->base; + + drm_WARN_ON(obj->dev, obj->import_attach); return drm_gem_shmem_get_pages(shmem); } @@ -283,7 +287,9 @@ EXPORT_SYMBOL(drm_gem_shmem_pin); */ void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem) { - WARN_ON(shmem->base.import_attach); + struct drm_gem_object *obj = &shmem->base; + + drm_WARN_ON(obj->dev, obj->import_attach); drm_gem_shmem_put_pages(shmem); } @@ -295,24 +301,22 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, struct drm_gem_object *obj = &shmem->base; int ret = 0; - if (shmem->vmap_use_count++ > 0) { - iosys_map_set_vaddr(map, shmem->vaddr); - return 0; - } - if (obj->import_attach) { ret = dma_buf_vmap(obj->import_attach->dmabuf, map); if (!ret) { - if (WARN_ON(map->is_iomem)) { + if (drm_WARN_ON(obj->dev, map->is_iomem)) { dma_buf_vunmap(obj->import_attach->dmabuf, map); - ret = -EIO; - goto err_put_pages; + return -EIO; } - shmem->vaddr = map->vaddr; } } else { pgprot_t prot = PAGE_KERNEL; + if (shmem->vmap_use_count++ > 0) { + iosys_map_set_vaddr(map, shmem->vaddr); + return 0; + } + ret = drm_gem_shmem_get_pages(shmem); if (ret) goto err_zero_use; @@ -328,7 +332,7 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, } if (ret) { - DRM_DEBUG_KMS("Failed to vmap pages, error %d\n", ret); + drm_dbg_kms(obj->dev, "Failed to vmap pages, error %d\n", ret); goto err_put_pages; } @@ -378,15 +382,15 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem, { struct drm_gem_object *obj = &shmem->base; - if (WARN_ON_ONCE(!shmem->vmap_use_count)) - return; - - if (--shmem->vmap_use_count > 0) - return; - if (obj->import_attach) { dma_buf_vunmap(obj->import_attach->dmabuf, map); } else { + if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count)) + return; + + if (--shmem->vmap_use_count > 0) + return; + vunmap(shmem->vaddr); drm_gem_shmem_put_pages(shmem); } @@ -461,7 +465,7 @@ void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem) struct drm_gem_object *obj = &shmem->base; struct drm_device *dev = obj->dev; - WARN_ON(!drm_gem_shmem_is_purgeable(shmem)); + drm_WARN_ON(obj->dev, !drm_gem_shmem_is_purgeable(shmem)); dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0); sg_free_table(shmem->sgt); @@ -550,7 +554,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf) mutex_lock(&shmem->pages_lock); if (page_offset >= num_pages || - WARN_ON_ONCE(!shmem->pages) || + drm_WARN_ON_ONCE(obj->dev, !shmem->pages) || shmem->madv < 0) { ret = VM_FAULT_SIGBUS; } else { @@ -569,7 +573,7 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma) struct drm_gem_object *obj = vma->vm_private_data; struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); - WARN_ON(shmem->base.import_attach); + drm_WARN_ON(obj->dev, obj->import_attach); mutex_lock(&shmem->pages_lock); @@ -578,7 +582,7 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma) * mmap'd, vm_open() just grabs an additional reference for the new * mm the vma is getting copied into (ie. on fork()). */ - if (!WARN_ON_ONCE(!shmem->pages_use_count)) + if (!drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count)) shmem->pages_use_count++; mutex_unlock(&shmem->pages_lock); @@ -648,6 +652,9 @@ EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap); void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem, struct drm_printer *p, unsigned int indent) { + if (shmem->base.import_attach) + return; + drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count); drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count); drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr); @@ -672,7 +679,7 @@ struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj = &shmem->base; - WARN_ON(shmem->base.import_attach); + drm_WARN_ON(obj->dev, obj->import_attach); return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT); } @@ -687,7 +694,7 @@ static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_ if (shmem->sgt) return shmem->sgt; - WARN_ON(obj->import_attach); + drm_WARN_ON(obj->dev, obj->import_attach); ret = drm_gem_shmem_get_pages_locked(shmem); if (ret) diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c index d40b3edb52d0..0bea3df2a16d 100644 --- a/drivers/gpu/drm/drm_gem_vram_helper.c +++ b/drivers/gpu/drm/drm_gem_vram_helper.c @@ -916,6 +916,17 @@ static int bo_driver_move(struct ttm_buffer_object *bo, { struct drm_gem_vram_object *gbo; + if (!bo->resource) { + if (new_mem->mem_type != TTM_PL_SYSTEM) { + hop->mem_type = TTM_PL_SYSTEM; + hop->flags = TTM_PL_FLAG_TEMPORARY; + return -EMULTIHOP; + } + + ttm_bo_move_null(bo, new_mem); + return 0; + } + gbo = drm_gem_vram_of_bo(bo); return drm_gem_vram_bo_driver_move(gbo, evict, ctx, new_mem); diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index ed2103ee272c..d7e023bbb0d5 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -178,9 +178,6 @@ void drm_gem_unpin(struct drm_gem_object *obj); int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map); void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map); -int drm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev, - u32 handle); - /* drm_debugfs.c drm_debugfs_crc.c */ #if defined(CONFIG_DEBUG_FS) int drm_debugfs_init(struct drm_minor *minor, int minor_id, diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 40d482a01178..ac9a406250c5 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -2339,8 +2339,7 @@ static int drm_mode_parse_cmdline_named_mode(const char *name, * @mode: preallocated drm_cmdline_mode structure to fill out * * This parses @mode_option command line modeline for modes and options to - * configure the connector. If @mode_option is NULL the default command line - * modeline in fb_mode_option will be parsed instead. + * configure the connector. * * This uses the same parameters as the fb modedb.c, except for an extra * force-enable, force-enable-digital and force-disable bit at the end:: diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c index 7bbcb999bb75..177b600895d3 100644 --- a/drivers/gpu/drm/drm_of.c +++ b/drivers/gpu/drm/drm_of.c @@ -10,6 +10,7 @@ #include <drm/drm_crtc.h> #include <drm/drm_device.h> #include <drm/drm_encoder.h> +#include <drm/drm_mipi_dsi.h> #include <drm/drm_of.h> #include <drm/drm_panel.h> @@ -493,3 +494,53 @@ int drm_of_get_data_lanes_count_ep(const struct device_node *port, return ret; } EXPORT_SYMBOL_GPL(drm_of_get_data_lanes_count_ep); + +#if IS_ENABLED(CONFIG_DRM_MIPI_DSI) + +/** + * drm_of_get_dsi_bus - find the DSI bus for a given device + * @dev: parent device of display (SPI, I2C) + * + * Gets parent DSI bus for a DSI device controlled through a bus other + * than MIPI-DCS (SPI, I2C, etc.) using the Device Tree. + * + * Returns pointer to mipi_dsi_host if successful, -EINVAL if the + * request is unsupported, -EPROBE_DEFER if the DSI host is found but + * not available, or -ENODEV otherwise. + */ +struct mipi_dsi_host *drm_of_get_dsi_bus(struct device *dev) +{ + struct mipi_dsi_host *dsi_host; + struct device_node *endpoint, *dsi_host_node; + + /* + * Get first endpoint child from device. + */ + endpoint = of_graph_get_next_endpoint(dev->of_node, NULL); + if (!endpoint) + return ERR_PTR(-ENODEV); + + /* + * Follow the first endpoint to get the DSI host node and then + * release the endpoint since we no longer need it. + */ + dsi_host_node = of_graph_get_remote_port_parent(endpoint); + of_node_put(endpoint); + if (!dsi_host_node) + return ERR_PTR(-ENODEV); + + /* + * Get the DSI host from the DSI host node. If we get an error + * or the return is null assume we're not ready to probe just + * yet. Release the DSI host node since we're done with it. + */ + dsi_host = of_find_mipi_dsi_host_by_node(dsi_host_node); + of_node_put(dsi_host_node); + if (IS_ERR_OR_NULL(dsi_host)) + return ERR_PTR(-EPROBE_DEFER); + + return dsi_host; +} +EXPORT_SYMBOL_GPL(drm_of_get_dsi_bus); + +#endif /* CONFIG_DRM_MIPI_DSI */ diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index 8127be134c39..2fb9bf901a2c 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -590,8 +590,9 @@ retry: */ dev->mode_config.delayed_event = true; if (dev->mode_config.poll_enabled) - schedule_delayed_work(&dev->mode_config.output_poll_work, - 0); + mod_delayed_work(system_wq, + &dev->mode_config.output_poll_work, + 0); } /* Re-enable polling in case the global poll config changed. */ diff --git a/drivers/gpu/drm/drm_suballoc.c b/drivers/gpu/drm/drm_suballoc.c new file mode 100644 index 000000000000..38cc7a123819 --- /dev/null +++ b/drivers/gpu/drm/drm_suballoc.c @@ -0,0 +1,457 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright 2011 Red Hat Inc. + * Copyright 2023 Intel Corporation. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + */ +/* Algorithm: + * + * We store the last allocated bo in "hole", we always try to allocate + * after the last allocated bo. Principle is that in a linear GPU ring + * progression was is after last is the oldest bo we allocated and thus + * the first one that should no longer be in use by the GPU. + * + * If it's not the case we skip over the bo after last to the closest + * done bo if such one exist. If none exist and we are not asked to + * block we report failure to allocate. + * + * If we are asked to block we wait on all the oldest fence of all + * rings. We just wait for any of those fence to complete. + */ + +#include <drm/drm_suballoc.h> +#include <drm/drm_print.h> +#include <linux/slab.h> +#include <linux/sched.h> +#include <linux/wait.h> +#include <linux/dma-fence.h> + +static void drm_suballoc_remove_locked(struct drm_suballoc *sa); +static void drm_suballoc_try_free(struct drm_suballoc_manager *sa_manager); + +/** + * drm_suballoc_manager_init() - Initialise the drm_suballoc_manager + * @sa_manager: pointer to the sa_manager + * @size: number of bytes we want to suballocate + * @align: alignment for each suballocated chunk + * + * Prepares the suballocation manager for suballocations. + */ +void drm_suballoc_manager_init(struct drm_suballoc_manager *sa_manager, + size_t size, size_t align) +{ + unsigned int i; + + BUILD_BUG_ON(!is_power_of_2(DRM_SUBALLOC_MAX_QUEUES)); + + if (!align) + align = 1; + + /* alignment must be a power of 2 */ + if (WARN_ON_ONCE(align & (align - 1))) + align = roundup_pow_of_two(align); + + init_waitqueue_head(&sa_manager->wq); + sa_manager->size = size; + sa_manager->align = align; + sa_manager->hole = &sa_manager->olist; + INIT_LIST_HEAD(&sa_manager->olist); + for (i = 0; i < DRM_SUBALLOC_MAX_QUEUES; ++i) + INIT_LIST_HEAD(&sa_manager->flist[i]); +} +EXPORT_SYMBOL(drm_suballoc_manager_init); + +/** + * drm_suballoc_manager_fini() - Destroy the drm_suballoc_manager + * @sa_manager: pointer to the sa_manager + * + * Cleans up the suballocation manager after use. All fences added + * with drm_suballoc_free() must be signaled, or we cannot clean up + * the entire manager. + */ +void drm_suballoc_manager_fini(struct drm_suballoc_manager *sa_manager) +{ + struct drm_suballoc *sa, *tmp; + + if (!sa_manager->size) + return; + + if (!list_empty(&sa_manager->olist)) { + sa_manager->hole = &sa_manager->olist; + drm_suballoc_try_free(sa_manager); + if (!list_empty(&sa_manager->olist)) + DRM_ERROR("sa_manager is not empty, clearing anyway\n"); + } + list_for_each_entry_safe(sa, tmp, &sa_manager->olist, olist) { + drm_suballoc_remove_locked(sa); + } + + sa_manager->size = 0; +} +EXPORT_SYMBOL(drm_suballoc_manager_fini); + +static void drm_suballoc_remove_locked(struct drm_suballoc *sa) +{ + struct drm_suballoc_manager *sa_manager = sa->manager; + + if (sa_manager->hole == &sa->olist) + sa_manager->hole = sa->olist.prev; + + list_del_init(&sa->olist); + list_del_init(&sa->flist); + dma_fence_put(sa->fence); + kfree(sa); +} + +static void drm_suballoc_try_free(struct drm_suballoc_manager *sa_manager) +{ + struct drm_suballoc *sa, *tmp; + + if (sa_manager->hole->next == &sa_manager->olist) + return; + + sa = list_entry(sa_manager->hole->next, struct drm_suballoc, olist); + list_for_each_entry_safe_from(sa, tmp, &sa_manager->olist, olist) { + if (!sa->fence || !dma_fence_is_signaled(sa->fence)) + return; + + drm_suballoc_remove_locked(sa); + } +} + +static size_t drm_suballoc_hole_soffset(struct drm_suballoc_manager *sa_manager) +{ + struct list_head *hole = sa_manager->hole; + + if (hole != &sa_manager->olist) + return list_entry(hole, struct drm_suballoc, olist)->eoffset; + + return 0; +} + +static size_t drm_suballoc_hole_eoffset(struct drm_suballoc_manager *sa_manager) +{ + struct list_head *hole = sa_manager->hole; + + if (hole->next != &sa_manager->olist) + return list_entry(hole->next, struct drm_suballoc, olist)->soffset; + return sa_manager->size; +} + +static bool drm_suballoc_try_alloc(struct drm_suballoc_manager *sa_manager, + struct drm_suballoc *sa, + size_t size, size_t align) +{ + size_t soffset, eoffset, wasted; + + soffset = drm_suballoc_hole_soffset(sa_manager); + eoffset = drm_suballoc_hole_eoffset(sa_manager); + wasted = round_up(soffset, align) - soffset; + + if ((eoffset - soffset) >= (size + wasted)) { + soffset += wasted; + + sa->manager = sa_manager; + sa->soffset = soffset; + sa->eoffset = soffset + size; + list_add(&sa->olist, sa_manager->hole); + INIT_LIST_HEAD(&sa->flist); + sa_manager->hole = &sa->olist; + return true; + } + return false; +} + +static bool __drm_suballoc_event(struct drm_suballoc_manager *sa_manager, + size_t size, size_t align) +{ + size_t soffset, eoffset, wasted; + unsigned int i; + + for (i = 0; i < DRM_SUBALLOC_MAX_QUEUES; ++i) + if (!list_empty(&sa_manager->flist[i])) + return true; + + soffset = drm_suballoc_hole_soffset(sa_manager); + eoffset = drm_suballoc_hole_eoffset(sa_manager); + wasted = round_up(soffset, align) - soffset; + + return ((eoffset - soffset) >= (size + wasted)); +} + +/** + * drm_suballoc_event() - Check if we can stop waiting + * @sa_manager: pointer to the sa_manager + * @size: number of bytes we want to allocate + * @align: alignment we need to match + * + * Return: true if either there is a fence we can wait for or + * enough free memory to satisfy the allocation directly. + * false otherwise. + */ +static bool drm_suballoc_event(struct drm_suballoc_manager *sa_manager, + size_t size, size_t align) +{ + bool ret; + + spin_lock(&sa_manager->wq.lock); + ret = __drm_suballoc_event(sa_manager, size, align); + spin_unlock(&sa_manager->wq.lock); + return ret; +} + +static bool drm_suballoc_next_hole(struct drm_suballoc_manager *sa_manager, + struct dma_fence **fences, + unsigned int *tries) +{ + struct drm_suballoc *best_bo = NULL; + unsigned int i, best_idx; + size_t soffset, best, tmp; + + /* if hole points to the end of the buffer */ + if (sa_manager->hole->next == &sa_manager->olist) { + /* try again with its beginning */ + sa_manager->hole = &sa_manager->olist; + return true; + } + + soffset = drm_suballoc_hole_soffset(sa_manager); + /* to handle wrap around we add sa_manager->size */ + best = sa_manager->size * 2; + /* go over all fence list and try to find the closest sa + * of the current last + */ + for (i = 0; i < DRM_SUBALLOC_MAX_QUEUES; ++i) { + struct drm_suballoc *sa; + + fences[i] = NULL; + + if (list_empty(&sa_manager->flist[i])) + continue; + + sa = list_first_entry(&sa_manager->flist[i], + struct drm_suballoc, flist); + + if (!dma_fence_is_signaled(sa->fence)) { + fences[i] = sa->fence; + continue; + } + + /* limit the number of tries each freelist gets */ + if (tries[i] > 2) + continue; + + tmp = sa->soffset; + if (tmp < soffset) { + /* wrap around, pretend it's after */ + tmp += sa_manager->size; + } + tmp -= soffset; + if (tmp < best) { + /* this sa bo is the closest one */ + best = tmp; + best_idx = i; + best_bo = sa; + } + } + + if (best_bo) { + ++tries[best_idx]; + sa_manager->hole = best_bo->olist.prev; + + /* + * We know that this one is signaled, + * so it's safe to remove it. + */ + drm_suballoc_remove_locked(best_bo); + return true; + } + return false; +} + +/** + * drm_suballoc_new() - Make a suballocation. + * @sa_manager: pointer to the sa_manager + * @size: number of bytes we want to suballocate. + * @gfp: gfp flags used for memory allocation. Typically GFP_KERNEL but + * the argument is provided for suballocations from reclaim context or + * where the caller wants to avoid pipelining rather than wait for + * reclaim. + * @intr: Whether to perform waits interruptible. This should typically + * always be true, unless the caller needs to propagate a + * non-interruptible context from above layers. + * @align: Alignment. Must not exceed the default manager alignment. + * If @align is zero, then the manager alignment is used. + * + * Try to make a suballocation of size @size, which will be rounded + * up to the alignment specified in specified in drm_suballoc_manager_init(). + * + * Return: a new suballocated bo, or an ERR_PTR. + */ +struct drm_suballoc * +drm_suballoc_new(struct drm_suballoc_manager *sa_manager, size_t size, + gfp_t gfp, bool intr, size_t align) +{ + struct dma_fence *fences[DRM_SUBALLOC_MAX_QUEUES]; + unsigned int tries[DRM_SUBALLOC_MAX_QUEUES]; + unsigned int count; + int i, r; + struct drm_suballoc *sa; + + if (WARN_ON_ONCE(align > sa_manager->align)) + return ERR_PTR(-EINVAL); + if (WARN_ON_ONCE(size > sa_manager->size || !size)) + return ERR_PTR(-EINVAL); + + if (!align) + align = sa_manager->align; + + sa = kmalloc(sizeof(*sa), gfp); + if (!sa) + return ERR_PTR(-ENOMEM); + sa->manager = sa_manager; + sa->fence = NULL; + INIT_LIST_HEAD(&sa->olist); + INIT_LIST_HEAD(&sa->flist); + + spin_lock(&sa_manager->wq.lock); + do { + for (i = 0; i < DRM_SUBALLOC_MAX_QUEUES; ++i) + tries[i] = 0; + + do { + drm_suballoc_try_free(sa_manager); + + if (drm_suballoc_try_alloc(sa_manager, sa, + size, align)) { + spin_unlock(&sa_manager->wq.lock); + return sa; + } + + /* see if we can skip over some allocations */ + } while (drm_suballoc_next_hole(sa_manager, fences, tries)); + + for (i = 0, count = 0; i < DRM_SUBALLOC_MAX_QUEUES; ++i) + if (fences[i]) + fences[count++] = dma_fence_get(fences[i]); + + if (count) { + long t; + + spin_unlock(&sa_manager->wq.lock); + t = dma_fence_wait_any_timeout(fences, count, intr, + MAX_SCHEDULE_TIMEOUT, + NULL); + for (i = 0; i < count; ++i) + dma_fence_put(fences[i]); + + r = (t > 0) ? 0 : t; + spin_lock(&sa_manager->wq.lock); + } else if (intr) { + /* if we have nothing to wait for block */ + r = wait_event_interruptible_locked + (sa_manager->wq, + __drm_suballoc_event(sa_manager, size, align)); + } else { + spin_unlock(&sa_manager->wq.lock); + wait_event(sa_manager->wq, + drm_suballoc_event(sa_manager, size, align)); + r = 0; + spin_lock(&sa_manager->wq.lock); + } + } while (!r); + + spin_unlock(&sa_manager->wq.lock); + kfree(sa); + return ERR_PTR(r); +} +EXPORT_SYMBOL(drm_suballoc_new); + +/** + * drm_suballoc_free - Free a suballocation + * @suballoc: pointer to the suballocation + * @fence: fence that signals when suballocation is idle + * + * Free the suballocation. The suballocation can be re-used after @fence signals. + */ +void drm_suballoc_free(struct drm_suballoc *suballoc, + struct dma_fence *fence) +{ + struct drm_suballoc_manager *sa_manager; + + if (!suballoc) + return; + + sa_manager = suballoc->manager; + + spin_lock(&sa_manager->wq.lock); + if (fence && !dma_fence_is_signaled(fence)) { + u32 idx; + + suballoc->fence = dma_fence_get(fence); + idx = fence->context & (DRM_SUBALLOC_MAX_QUEUES - 1); + list_add_tail(&suballoc->flist, &sa_manager->flist[idx]); + } else { + drm_suballoc_remove_locked(suballoc); + } + wake_up_all_locked(&sa_manager->wq); + spin_unlock(&sa_manager->wq.lock); +} +EXPORT_SYMBOL(drm_suballoc_free); + +#ifdef CONFIG_DEBUG_FS +void drm_suballoc_dump_debug_info(struct drm_suballoc_manager *sa_manager, + struct drm_printer *p, + unsigned long long suballoc_base) +{ + struct drm_suballoc *i; + + spin_lock(&sa_manager->wq.lock); + list_for_each_entry(i, &sa_manager->olist, olist) { + unsigned long long soffset = i->soffset; + unsigned long long eoffset = i->eoffset; + + if (&i->olist == sa_manager->hole) + drm_puts(p, ">"); + else + drm_puts(p, " "); + + drm_printf(p, "[0x%010llx 0x%010llx] size %8lld", + suballoc_base + soffset, suballoc_base + eoffset, + eoffset - soffset); + + if (i->fence) + drm_printf(p, " protected by 0x%016llx on context %llu", + (unsigned long long)i->fence->seqno, + (unsigned long long)i->fence->context); + + drm_puts(p, "\n"); + } + spin_unlock(&sa_manager->wq.lock); +} +EXPORT_SYMBOL(drm_suballoc_dump_debug_info); +#endif +MODULE_AUTHOR("Multiple"); +MODULE_DESCRIPTION("Range suballocator helper"); +MODULE_LICENSE("Dual MIT/GPL"); diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 918470a04591..8e46f57e4569 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -239,6 +239,7 @@ i915-y += \ display/intel_display_power.o \ display/intel_display_power_map.o \ display/intel_display_power_well.o \ + display/intel_display_rps.o \ display/intel_dmc.o \ display/intel_dpio_phy.o \ display/intel_dpll.o \ @@ -269,7 +270,9 @@ i915-y += \ display/intel_tc.o \ display/intel_vblank.o \ display/intel_vga.o \ + display/intel_wm.o \ display/i9xx_plane.o \ + display/i9xx_wm.o \ display/skl_scaler.o \ display/skl_universal_plane.o \ display/skl_watermark.o diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c index fa754038d669..920d570f7594 100644 --- a/drivers/gpu/drm/i915/display/g4x_dp.c +++ b/drivers/gpu/drm/i915/display/g4x_dp.c @@ -17,6 +17,7 @@ #include "intel_display_power.h" #include "intel_display_types.h" #include "intel_dp.h" +#include "intel_dp_aux.h" #include "intel_dp_link_training.h" #include "intel_dpio_phy.h" #include "intel_fifo_underrun.h" @@ -136,16 +137,12 @@ static void intel_dp_prepare(struct intel_encoder *encoder, intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe); } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) { - u32 trans_dp; - intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; - trans_dp = intel_de_read(dev_priv, TRANS_DP_CTL(crtc->pipe)); - if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) - trans_dp |= TRANS_DP_ENH_FRAMING; - else - trans_dp &= ~TRANS_DP_ENH_FRAMING; - intel_de_write(dev_priv, TRANS_DP_CTL(crtc->pipe), trans_dp); + intel_de_rmw(dev_priv, TRANS_DP_CTL(crtc->pipe), + TRANS_DP_ENH_FRAMING, + drm_dp_enhanced_frame_cap(intel_dp->dpcd) ? + TRANS_DP_ENH_FRAMING : 0); } else { if (IS_G4X(dev_priv) && pipe_config->limited_color_range) intel_dp->DP |= DP_COLOR_RANGE_16_235; @@ -1200,29 +1197,6 @@ static bool g4x_digital_port_connected(struct intel_encoder *encoder) return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit; } -static bool gm45_digital_port_connected(struct intel_encoder *encoder) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - u32 bit; - - switch (encoder->hpd_pin) { - case HPD_PORT_B: - bit = PORTB_HOTPLUG_LIVE_STATUS_GM45; - break; - case HPD_PORT_C: - bit = PORTC_HOTPLUG_LIVE_STATUS_GM45; - break; - case HPD_PORT_D: - bit = PORTD_HOTPLUG_LIVE_STATUS_GM45; - break; - default: - MISSING_CASE(encoder->hpd_pin); - return false; - } - - return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit; -} - static bool ilk_digital_port_connected(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); @@ -1279,11 +1253,19 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = { bool g4x_dp_init(struct drm_i915_private *dev_priv, i915_reg_t output_reg, enum port port) { + const struct intel_bios_encoder_data *devdata; struct intel_digital_port *dig_port; struct intel_encoder *intel_encoder; struct drm_encoder *encoder; struct intel_connector *intel_connector; + devdata = intel_bios_encoder_data_lookup(dev_priv, port); + + /* FIXME bail? */ + if (!devdata) + drm_dbg_kms(&dev_priv->drm, "No VBT child device for DP-%c\n", + port_name(port)); + dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL); if (!dig_port) return false; @@ -1295,6 +1277,8 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv, intel_encoder = &dig_port->base; encoder = &intel_encoder->base; + intel_encoder->devdata = devdata; + mutex_init(&dig_port->hdcp_mutex); if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base, @@ -1377,10 +1361,7 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv, dig_port->hpd_pulse = intel_dp_hpd_pulse; if (HAS_GMCH(dev_priv)) { - if (IS_GM45(dev_priv)) - dig_port->connected = gm45_digital_port_connected; - else - dig_port->connected = g4x_digital_port_connected; + dig_port->connected = g4x_digital_port_connected; } else { if (port == PORT_A) dig_port->connected = ilk_digital_port_connected; @@ -1391,7 +1372,7 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv, if (port != PORT_A) intel_infoframe_init(dig_port); - dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port); + dig_port->aux_ch = intel_dp_aux_ch(intel_encoder); if (!intel_dp_init_connector(dig_port, intel_connector)) goto err_init_connector; diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c index 64c3b3990702..448ea26786e0 100644 --- a/drivers/gpu/drm/i915/display/g4x_hdmi.c +++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c @@ -13,6 +13,7 @@ #include "intel_de.h" #include "intel_display_power.h" #include "intel_display_types.h" +#include "intel_dp_aux.h" #include "intel_dpio_phy.h" #include "intel_fifo_underrun.h" #include "intel_hdmi.h" @@ -273,8 +274,8 @@ static void cpt_enable_hdmi(struct intel_atomic_state *state, */ if (pipe_config->pipe_bpp > 24) { - intel_de_write(dev_priv, TRANS_CHICKEN1(pipe), - intel_de_read(dev_priv, TRANS_CHICKEN1(pipe)) | TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE); + intel_de_rmw(dev_priv, TRANS_CHICKEN1(pipe), + 0, TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE); temp &= ~SDVO_COLOR_FORMAT_MASK; temp |= SDVO_COLOR_FORMAT_8bpc; @@ -290,8 +291,8 @@ static void cpt_enable_hdmi(struct intel_atomic_state *state, intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp); intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg); - intel_de_write(dev_priv, TRANS_CHICKEN1(pipe), - intel_de_read(dev_priv, TRANS_CHICKEN1(pipe)) & ~TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE); + intel_de_rmw(dev_priv, TRANS_CHICKEN1(pipe), + TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE, 0); } drm_WARN_ON(&dev_priv->drm, pipe_config->has_audio && @@ -548,10 +549,18 @@ intel_hdmi_hotplug(struct intel_encoder *encoder, void g4x_hdmi_init(struct drm_i915_private *dev_priv, i915_reg_t hdmi_reg, enum port port) { + const struct intel_bios_encoder_data *devdata; struct intel_digital_port *dig_port; struct intel_encoder *intel_encoder; struct intel_connector *intel_connector; + devdata = intel_bios_encoder_data_lookup(dev_priv, port); + + /* FIXME bail? */ + if (!devdata) + drm_dbg_kms(&dev_priv->drm, "No VBT child device for HDMI-%c\n", + port_name(port)); + dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL); if (!dig_port) return; @@ -564,6 +573,8 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv, intel_encoder = &dig_port->base; + intel_encoder->devdata = devdata; + mutex_init(&dig_port->hdcp_mutex); drm_encoder_init(&dev_priv->drm, &intel_encoder->base, @@ -629,6 +640,6 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv, intel_infoframe_init(dig_port); - dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port); + dig_port->aux_ch = intel_dp_aux_ch(intel_encoder); intel_hdmi_init_connector(dig_port, intel_connector); } diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.c b/drivers/gpu/drm/i915/display/i9xx_wm.c new file mode 100644 index 000000000000..caef72d38798 --- /dev/null +++ b/drivers/gpu/drm/i915/display/i9xx_wm.c @@ -0,0 +1,4047 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2023 Intel Corporation + */ + +#include "i915_drv.h" +#include "i9xx_wm.h" +#include "intel_atomic.h" +#include "intel_display.h" +#include "intel_display_trace.h" +#include "intel_mchbar_regs.h" +#include "intel_wm.h" +#include "skl_watermark.h" +#include "vlv_sideband.h" + +/* used in computing the new watermarks state */ +struct intel_wm_config { + unsigned int num_pipes_active; + bool sprites_enabled; + bool sprites_scaled; +}; + +struct cxsr_latency { + bool is_desktop : 1; + bool is_ddr3 : 1; + u16 fsb_freq; + u16 mem_freq; + u16 display_sr; + u16 display_hpll_disable; + u16 cursor_sr; + u16 cursor_hpll_disable; +}; + +static const struct cxsr_latency cxsr_latency_table[] = { + {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */ + {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */ + {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */ + {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */ + {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */ + + {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */ + {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */ + {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */ + {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */ + {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */ + + {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */ + {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */ + {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */ + {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */ + {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */ + + {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */ + {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */ + {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */ + {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */ + {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */ + + {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */ + {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */ + {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */ + {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */ + {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */ + + {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */ + {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */ + {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */ + {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */ + {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */ +}; + +static const struct cxsr_latency *intel_get_cxsr_latency(bool is_desktop, + bool is_ddr3, + int fsb, + int mem) +{ + const struct cxsr_latency *latency; + int i; + + if (fsb == 0 || mem == 0) + return NULL; + + for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) { + latency = &cxsr_latency_table[i]; + if (is_desktop == latency->is_desktop && + is_ddr3 == latency->is_ddr3 && + fsb == latency->fsb_freq && mem == latency->mem_freq) + return latency; + } + + DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); + + return NULL; +} + +static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable) +{ + u32 val; + + vlv_punit_get(dev_priv); + + val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); + if (enable) + val &= ~FORCE_DDR_HIGH_FREQ; + else + val |= FORCE_DDR_HIGH_FREQ; + val &= ~FORCE_DDR_LOW_FREQ; + val |= FORCE_DDR_FREQ_REQ_ACK; + vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val); + + if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) & + FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) + drm_err(&dev_priv->drm, + "timed out waiting for Punit DDR DVFS request\n"); + + vlv_punit_put(dev_priv); +} + +static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable) +{ + u32 val; + + vlv_punit_get(dev_priv); + + val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); + if (enable) + val |= DSP_MAXFIFO_PM5_ENABLE; + else + val &= ~DSP_MAXFIFO_PM5_ENABLE; + vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val); + + vlv_punit_put(dev_priv); +} + +#define FW_WM(value, plane) \ + (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK) + +static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) +{ + bool was_enabled; + u32 val; + + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { + was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; + intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0); + intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF_VLV); + } else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) { + was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN; + intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0); + intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF); + } else if (IS_PINEVIEW(dev_priv)) { + val = intel_uncore_read(&dev_priv->uncore, DSPFW3); + was_enabled = val & PINEVIEW_SELF_REFRESH_EN; + if (enable) + val |= PINEVIEW_SELF_REFRESH_EN; + else + val &= ~PINEVIEW_SELF_REFRESH_EN; + intel_uncore_write(&dev_priv->uncore, DSPFW3, val); + intel_uncore_posting_read(&dev_priv->uncore, DSPFW3); + } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) { + was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN; + val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) : + _MASKED_BIT_DISABLE(FW_BLC_SELF_EN); + intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, val); + intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF); + } else if (IS_I915GM(dev_priv)) { + /* + * FIXME can't find a bit like this for 915G, and + * yet it does have the related watermark in + * FW_BLC_SELF. What's going on? + */ + was_enabled = intel_uncore_read(&dev_priv->uncore, INSTPM) & INSTPM_SELF_EN; + val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) : + _MASKED_BIT_DISABLE(INSTPM_SELF_EN); + intel_uncore_write(&dev_priv->uncore, INSTPM, val); + intel_uncore_posting_read(&dev_priv->uncore, INSTPM); + } else { + return false; + } + + trace_intel_memory_cxsr(dev_priv, was_enabled, enable); + + drm_dbg_kms(&dev_priv->drm, "memory self-refresh is %s (was %s)\n", + str_enabled_disabled(enable), + str_enabled_disabled(was_enabled)); + + return was_enabled; +} + +/** + * intel_set_memory_cxsr - Configure CxSR state + * @dev_priv: i915 device + * @enable: Allow vs. disallow CxSR + * + * Allow or disallow the system to enter a special CxSR + * (C-state self refresh) state. What typically happens in CxSR mode + * is that several display FIFOs may get combined into a single larger + * FIFO for a particular plane (so called max FIFO mode) to allow the + * system to defer memory fetches longer, and the memory will enter + * self refresh. + * + * Note that enabling CxSR does not guarantee that the system enter + * this special mode, nor does it guarantee that the system stays + * in that mode once entered. So this just allows/disallows the system + * to autonomously utilize the CxSR mode. Other factors such as core + * C-states will affect when/if the system actually enters/exits the + * CxSR mode. + * + * Note that on VLV/CHV this actually only controls the max FIFO mode, + * and the system is free to enter/exit memory self refresh at any time + * even when the use of CxSR has been disallowed. + * + * While the system is actually in the CxSR/max FIFO mode, some plane + * control registers will not get latched on vblank. Thus in order to + * guarantee the system will respond to changes in the plane registers + * we must always disallow CxSR prior to making changes to those registers. + * Unfortunately the system will re-evaluate the CxSR conditions at + * frame start which happens after vblank start (which is when the plane + * registers would get latched), so we can't proceed with the plane update + * during the same frame where we disallowed CxSR. + * + * Certain platforms also have a deeper HPLL SR mode. Fortunately the + * HPLL SR mode depends on CxSR itself, so we don't have to hand hold + * the hardware w.r.t. HPLL SR when writing to plane registers. + * Disallowing just CxSR is sufficient. + */ +bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) +{ + bool ret; + + mutex_lock(&dev_priv->display.wm.wm_mutex); + ret = _intel_set_memory_cxsr(dev_priv, enable); + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + dev_priv->display.wm.vlv.cxsr = enable; + else if (IS_G4X(dev_priv)) + dev_priv->display.wm.g4x.cxsr = enable; + mutex_unlock(&dev_priv->display.wm.wm_mutex); + + return ret; +} + +/* + * Latency for FIFO fetches is dependent on several factors: + * - memory configuration (speed, channels) + * - chipset + * - current MCH state + * It can be fairly high in some situations, so here we assume a fairly + * pessimal value. It's a tradeoff between extra memory fetches (if we + * set this value too high, the FIFO will fetch frequently to stay full) + * and power consumption (set it too low to save power and we might see + * FIFO underruns and display "flicker"). + * + * A value of 5us seems to be a good balance; safe for very low end + * platforms but not overly aggressive on lower latency configs. + */ +static const int pessimal_latency_ns = 5000; + +#define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \ + ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8)) + +static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state; + enum pipe pipe = crtc->pipe; + int sprite0_start, sprite1_start; + u32 dsparb, dsparb2, dsparb3; + + switch (pipe) { + case PIPE_A: + dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB); + dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2); + sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0); + sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4); + break; + case PIPE_B: + dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB); + dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2); + sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8); + sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12); + break; + case PIPE_C: + dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2); + dsparb3 = intel_uncore_read(&dev_priv->uncore, DSPARB3); + sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16); + sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20); + break; + default: + MISSING_CASE(pipe); + return; + } + + fifo_state->plane[PLANE_PRIMARY] = sprite0_start; + fifo_state->plane[PLANE_SPRITE0] = sprite1_start - sprite0_start; + fifo_state->plane[PLANE_SPRITE1] = 511 - sprite1_start; + fifo_state->plane[PLANE_CURSOR] = 63; +} + +static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv, + enum i9xx_plane_id i9xx_plane) +{ + u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB); + int size; + + size = dsparb & 0x7f; + if (i9xx_plane == PLANE_B) + size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size; + + drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n", + dsparb, plane_name(i9xx_plane), size); + + return size; +} + +static int i830_get_fifo_size(struct drm_i915_private *dev_priv, + enum i9xx_plane_id i9xx_plane) +{ + u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB); + int size; + + size = dsparb & 0x1ff; + if (i9xx_plane == PLANE_B) + size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size; + size >>= 1; /* Convert to cachelines */ + + drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n", + dsparb, plane_name(i9xx_plane), size); + + return size; +} + +static int i845_get_fifo_size(struct drm_i915_private *dev_priv, + enum i9xx_plane_id i9xx_plane) +{ + u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB); + int size; + + size = dsparb & 0x7f; + size >>= 2; /* Convert to cachelines */ + + drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n", + dsparb, plane_name(i9xx_plane), size); + + return size; +} + +/* Pineview has different values for various configs */ +static const struct intel_watermark_params pnv_display_wm = { + .fifo_size = PINEVIEW_DISPLAY_FIFO, + .max_wm = PINEVIEW_MAX_WM, + .default_wm = PINEVIEW_DFT_WM, + .guard_size = PINEVIEW_GUARD_WM, + .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, +}; + +static const struct intel_watermark_params pnv_display_hplloff_wm = { + .fifo_size = PINEVIEW_DISPLAY_FIFO, + .max_wm = PINEVIEW_MAX_WM, + .default_wm = PINEVIEW_DFT_HPLLOFF_WM, + .guard_size = PINEVIEW_GUARD_WM, + .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, +}; + +static const struct intel_watermark_params pnv_cursor_wm = { + .fifo_size = PINEVIEW_CURSOR_FIFO, + .max_wm = PINEVIEW_CURSOR_MAX_WM, + .default_wm = PINEVIEW_CURSOR_DFT_WM, + .guard_size = PINEVIEW_CURSOR_GUARD_WM, + .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, +}; + +static const struct intel_watermark_params pnv_cursor_hplloff_wm = { + .fifo_size = PINEVIEW_CURSOR_FIFO, + .max_wm = PINEVIEW_CURSOR_MAX_WM, + .default_wm = PINEVIEW_CURSOR_DFT_WM, + .guard_size = PINEVIEW_CURSOR_GUARD_WM, + .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, +}; + +static const struct intel_watermark_params i965_cursor_wm_info = { + .fifo_size = I965_CURSOR_FIFO, + .max_wm = I965_CURSOR_MAX_WM, + .default_wm = I965_CURSOR_DFT_WM, + .guard_size = 2, + .cacheline_size = I915_FIFO_LINE_SIZE, +}; + +static const struct intel_watermark_params i945_wm_info = { + .fifo_size = I945_FIFO_SIZE, + .max_wm = I915_MAX_WM, + .default_wm = 1, + .guard_size = 2, + .cacheline_size = I915_FIFO_LINE_SIZE, +}; + +static const struct intel_watermark_params i915_wm_info = { + .fifo_size = I915_FIFO_SIZE, + .max_wm = I915_MAX_WM, + .default_wm = 1, + .guard_size = 2, + .cacheline_size = I915_FIFO_LINE_SIZE, +}; + +static const struct intel_watermark_params i830_a_wm_info = { + .fifo_size = I855GM_FIFO_SIZE, + .max_wm = I915_MAX_WM, + .default_wm = 1, + .guard_size = 2, + .cacheline_size = I830_FIFO_LINE_SIZE, +}; + +static const struct intel_watermark_params i830_bc_wm_info = { + .fifo_size = I855GM_FIFO_SIZE, + .max_wm = I915_MAX_WM / 2, + .default_wm = 1, + .guard_size = 2, + .cacheline_size = I830_FIFO_LINE_SIZE, +}; + +static const struct intel_watermark_params i845_wm_info = { + .fifo_size = I830_FIFO_SIZE, + .max_wm = I915_MAX_WM, + .default_wm = 1, + .guard_size = 2, + .cacheline_size = I830_FIFO_LINE_SIZE, +}; + +/** + * intel_wm_method1 - Method 1 / "small buffer" watermark formula + * @pixel_rate: Pipe pixel rate in kHz + * @cpp: Plane bytes per pixel + * @latency: Memory wakeup latency in 0.1us units + * + * Compute the watermark using the method 1 or "small buffer" + * formula. The caller may additonally add extra cachelines + * to account for TLB misses and clock crossings. + * + * This method is concerned with the short term drain rate + * of the FIFO, ie. it does not account for blanking periods + * which would effectively reduce the average drain rate across + * a longer period. The name "small" refers to the fact the + * FIFO is relatively small compared to the amount of data + * fetched. + * + * The FIFO level vs. time graph might look something like: + * + * |\ |\ + * | \ | \ + * __---__---__ (- plane active, _ blanking) + * -> time + * + * or perhaps like this: + * + * |\|\ |\|\ + * __----__----__ (- plane active, _ blanking) + * -> time + * + * Returns: + * The watermark in bytes + */ +static unsigned int intel_wm_method1(unsigned int pixel_rate, + unsigned int cpp, + unsigned int latency) +{ + u64 ret; + + ret = mul_u32_u32(pixel_rate, cpp * latency); + ret = DIV_ROUND_UP_ULL(ret, 10000); + + return ret; +} + +/** + * intel_wm_method2 - Method 2 / "large buffer" watermark formula + * @pixel_rate: Pipe pixel rate in kHz + * @htotal: Pipe horizontal total + * @width: Plane width in pixels + * @cpp: Plane bytes per pixel + * @latency: Memory wakeup latency in 0.1us units + * + * Compute the watermark using the method 2 or "large buffer" + * formula. The caller may additonally add extra cachelines + * to account for TLB misses and clock crossings. + * + * This method is concerned with the long term drain rate + * of the FIFO, ie. it does account for blanking periods + * which effectively reduce the average drain rate across + * a longer period. The name "large" refers to the fact the + * FIFO is relatively large compared to the amount of data + * fetched. + * + * The FIFO level vs. time graph might look something like: + * + * |\___ |\___ + * | \___ | \___ + * | \ | \ + * __ --__--__--__--__--__--__ (- plane active, _ blanking) + * -> time + * + * Returns: + * The watermark in bytes + */ +static unsigned int intel_wm_method2(unsigned int pixel_rate, + unsigned int htotal, + unsigned int width, + unsigned int cpp, + unsigned int latency) +{ + unsigned int ret; + + /* + * FIXME remove once all users are computing + * watermarks in the correct place. + */ + if (WARN_ON_ONCE(htotal == 0)) + htotal = 1; + + ret = (latency * pixel_rate) / (htotal * 10000); + ret = (ret + 1) * width * cpp; + + return ret; +} + +/** + * intel_calculate_wm - calculate watermark level + * @pixel_rate: pixel clock + * @wm: chip FIFO params + * @fifo_size: size of the FIFO buffer + * @cpp: bytes per pixel + * @latency_ns: memory latency for the platform + * + * Calculate the watermark level (the level at which the display plane will + * start fetching from memory again). Each chip has a different display + * FIFO size and allocation, so the caller needs to figure that out and pass + * in the correct intel_watermark_params structure. + * + * As the pixel clock runs, the FIFO will be drained at a rate that depends + * on the pixel size. When it reaches the watermark level, it'll start + * fetching FIFO line sized based chunks from memory until the FIFO fills + * past the watermark point. If the FIFO drains completely, a FIFO underrun + * will occur, and a display engine hang could result. + */ +static unsigned int intel_calculate_wm(int pixel_rate, + const struct intel_watermark_params *wm, + int fifo_size, int cpp, + unsigned int latency_ns) +{ + int entries, wm_size; + + /* + * Note: we need to make sure we don't overflow for various clock & + * latency values. + * clocks go from a few thousand to several hundred thousand. + * latency is usually a few thousand + */ + entries = intel_wm_method1(pixel_rate, cpp, + latency_ns / 100); + entries = DIV_ROUND_UP(entries, wm->cacheline_size) + + wm->guard_size; + DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries); + + wm_size = fifo_size - entries; + DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size); + + /* Don't promote wm_size to unsigned... */ + if (wm_size > wm->max_wm) + wm_size = wm->max_wm; + if (wm_size <= 0) + wm_size = wm->default_wm; + + /* + * Bspec seems to indicate that the value shouldn't be lower than + * 'burst size + 1'. Certainly 830 is quite unhappy with low values. + * Lets go for 8 which is the burst size since certain platforms + * already use a hardcoded 8 (which is what the spec says should be + * done). + */ + if (wm_size <= 8) + wm_size = 8; + + return wm_size; +} + +static bool is_disabling(int old, int new, int threshold) +{ + return old >= threshold && new < threshold; +} + +static bool is_enabling(int old, int new, int threshold) +{ + return old < threshold && new >= threshold; +} + +static bool intel_crtc_active(struct intel_crtc *crtc) +{ + /* Be paranoid as we can arrive here with only partial + * state retrieved from the hardware during setup. + * + * We can ditch the adjusted_mode.crtc_clock check as soon + * as Haswell has gained clock readout/fastboot support. + * + * We can ditch the crtc->primary->state->fb check as soon as we can + * properly reconstruct framebuffers. + * + * FIXME: The intel_crtc->active here should be switched to + * crtc->state->active once we have proper CRTC states wired up + * for atomic. + */ + return crtc && crtc->active && crtc->base.primary->state->fb && + crtc->config->hw.adjusted_mode.crtc_clock; +} + +static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv) +{ + struct intel_crtc *crtc, *enabled = NULL; + + for_each_intel_crtc(&dev_priv->drm, crtc) { + if (intel_crtc_active(crtc)) { + if (enabled) + return NULL; + enabled = crtc; + } + } + + return enabled; +} + +static void pnv_update_wm(struct drm_i915_private *dev_priv) +{ + struct intel_crtc *crtc; + const struct cxsr_latency *latency; + u32 reg; + unsigned int wm; + + latency = intel_get_cxsr_latency(!IS_MOBILE(dev_priv), + dev_priv->is_ddr3, + dev_priv->fsb_freq, + dev_priv->mem_freq); + if (!latency) { + drm_dbg_kms(&dev_priv->drm, + "Unknown FSB/MEM found, disable CxSR\n"); + intel_set_memory_cxsr(dev_priv, false); + return; + } + + crtc = single_enabled_crtc(dev_priv); + if (crtc) { + const struct drm_framebuffer *fb = + crtc->base.primary->state->fb; + int pixel_rate = crtc->config->pixel_rate; + int cpp = fb->format->cpp[0]; + + /* Display SR */ + wm = intel_calculate_wm(pixel_rate, &pnv_display_wm, + pnv_display_wm.fifo_size, + cpp, latency->display_sr); + reg = intel_uncore_read(&dev_priv->uncore, DSPFW1); + reg &= ~DSPFW_SR_MASK; + reg |= FW_WM(wm, SR); + intel_uncore_write(&dev_priv->uncore, DSPFW1, reg); + drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg); + + /* cursor SR */ + wm = intel_calculate_wm(pixel_rate, &pnv_cursor_wm, + pnv_display_wm.fifo_size, + 4, latency->cursor_sr); + intel_uncore_rmw(&dev_priv->uncore, DSPFW3, DSPFW_CURSOR_SR_MASK, + FW_WM(wm, CURSOR_SR)); + + /* Display HPLL off SR */ + wm = intel_calculate_wm(pixel_rate, &pnv_display_hplloff_wm, + pnv_display_hplloff_wm.fifo_size, + cpp, latency->display_hpll_disable); + intel_uncore_rmw(&dev_priv->uncore, DSPFW3, DSPFW_HPLL_SR_MASK, FW_WM(wm, HPLL_SR)); + + /* cursor HPLL off SR */ + wm = intel_calculate_wm(pixel_rate, &pnv_cursor_hplloff_wm, + pnv_display_hplloff_wm.fifo_size, + 4, latency->cursor_hpll_disable); + reg = intel_uncore_read(&dev_priv->uncore, DSPFW3); + reg &= ~DSPFW_HPLL_CURSOR_MASK; + reg |= FW_WM(wm, HPLL_CURSOR); + intel_uncore_write(&dev_priv->uncore, DSPFW3, reg); + drm_dbg_kms(&dev_priv->drm, "DSPFW3 register is %x\n", reg); + + intel_set_memory_cxsr(dev_priv, true); + } else { + intel_set_memory_cxsr(dev_priv, false); + } +} + +/* + * Documentation says: + * "If the line size is small, the TLB fetches can get in the way of the + * data fetches, causing some lag in the pixel data return which is not + * accounted for in the above formulas. The following adjustment only + * needs to be applied if eight whole lines fit in the buffer at once. + * The WM is adjusted upwards by the difference between the FIFO size + * and the size of 8 whole lines. This adjustment is always performed + * in the actual pixel depth regardless of whether FBC is enabled or not." + */ +static unsigned int g4x_tlb_miss_wa(int fifo_size, int width, int cpp) +{ + int tlb_miss = fifo_size * 64 - width * cpp * 8; + + return max(0, tlb_miss); +} + +static void g4x_write_wm_values(struct drm_i915_private *dev_priv, + const struct g4x_wm_values *wm) +{ + enum pipe pipe; + + for_each_pipe(dev_priv, pipe) + trace_g4x_wm(intel_crtc_for_pipe(dev_priv, pipe), wm); + + intel_uncore_write(&dev_priv->uncore, DSPFW1, + FW_WM(wm->sr.plane, SR) | + FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) | + FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) | + FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA)); + intel_uncore_write(&dev_priv->uncore, DSPFW2, + (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) | + FW_WM(wm->sr.fbc, FBC_SR) | + FW_WM(wm->hpll.fbc, FBC_HPLL_SR) | + FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) | + FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) | + FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA)); + intel_uncore_write(&dev_priv->uncore, DSPFW3, + (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) | + FW_WM(wm->sr.cursor, CURSOR_SR) | + FW_WM(wm->hpll.cursor, HPLL_CURSOR) | + FW_WM(wm->hpll.plane, HPLL_SR)); + + intel_uncore_posting_read(&dev_priv->uncore, DSPFW1); +} + +#define FW_WM_VLV(value, plane) \ + (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV) + +static void vlv_write_wm_values(struct drm_i915_private *dev_priv, + const struct vlv_wm_values *wm) +{ + enum pipe pipe; + + for_each_pipe(dev_priv, pipe) { + trace_vlv_wm(intel_crtc_for_pipe(dev_priv, pipe), wm); + + intel_uncore_write(&dev_priv->uncore, VLV_DDL(pipe), + (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) | + (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) | + (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) | + (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT)); + } + + /* + * Zero the (unused) WM1 watermarks, and also clear all the + * high order bits so that there are no out of bounds values + * present in the registers during the reprogramming. + */ + intel_uncore_write(&dev_priv->uncore, DSPHOWM, 0); + intel_uncore_write(&dev_priv->uncore, DSPHOWM1, 0); + intel_uncore_write(&dev_priv->uncore, DSPFW4, 0); + intel_uncore_write(&dev_priv->uncore, DSPFW5, 0); + intel_uncore_write(&dev_priv->uncore, DSPFW6, 0); + + intel_uncore_write(&dev_priv->uncore, DSPFW1, + FW_WM(wm->sr.plane, SR) | + FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) | + FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) | + FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA)); + intel_uncore_write(&dev_priv->uncore, DSPFW2, + FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) | + FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) | + FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA)); + intel_uncore_write(&dev_priv->uncore, DSPFW3, + FW_WM(wm->sr.cursor, CURSOR_SR)); + + if (IS_CHERRYVIEW(dev_priv)) { + intel_uncore_write(&dev_priv->uncore, DSPFW7_CHV, + FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) | + FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC)); + intel_uncore_write(&dev_priv->uncore, DSPFW8_CHV, + FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) | + FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE)); + intel_uncore_write(&dev_priv->uncore, DSPFW9_CHV, + FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) | + FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC)); + intel_uncore_write(&dev_priv->uncore, DSPHOWM, + FW_WM(wm->sr.plane >> 9, SR_HI) | + FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) | + FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) | + FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) | + FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) | + FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) | + FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) | + FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) | + FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) | + FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI)); + } else { + intel_uncore_write(&dev_priv->uncore, DSPFW7, + FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) | + FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC)); + intel_uncore_write(&dev_priv->uncore, DSPHOWM, + FW_WM(wm->sr.plane >> 9, SR_HI) | + FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) | + FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) | + FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) | + FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) | + FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) | + FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI)); + } + + intel_uncore_posting_read(&dev_priv->uncore, DSPFW1); +} + +#undef FW_WM_VLV + +static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv) +{ + /* all latencies in usec */ + dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5; + dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_SR] = 12; + dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35; + + dev_priv->display.wm.num_levels = G4X_WM_LEVEL_HPLL + 1; +} + +static int g4x_plane_fifo_size(enum plane_id plane_id, int level) +{ + /* + * DSPCNTR[13] supposedly controls whether the + * primary plane can use the FIFO space otherwise + * reserved for the sprite plane. It's not 100% clear + * what the actual FIFO size is, but it looks like we + * can happily set both primary and sprite watermarks + * up to 127 cachelines. So that would seem to mean + * that either DSPCNTR[13] doesn't do anything, or that + * the total FIFO is >= 256 cachelines in size. Either + * way, we don't seem to have to worry about this + * repartitioning as the maximum watermark value the + * register can hold for each plane is lower than the + * minimum FIFO size. + */ + switch (plane_id) { + case PLANE_CURSOR: + return 63; + case PLANE_PRIMARY: + return level == G4X_WM_LEVEL_NORMAL ? 127 : 511; + case PLANE_SPRITE0: + return level == G4X_WM_LEVEL_NORMAL ? 127 : 0; + default: + MISSING_CASE(plane_id); + return 0; + } +} + +static int g4x_fbc_fifo_size(int level) +{ + switch (level) { + case G4X_WM_LEVEL_SR: + return 7; + case G4X_WM_LEVEL_HPLL: + return 15; + default: + MISSING_CASE(level); + return 0; + } +} + +static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + int level) +{ + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + const struct drm_display_mode *pipe_mode = + &crtc_state->hw.pipe_mode; + unsigned int latency = dev_priv->display.wm.pri_latency[level] * 10; + unsigned int pixel_rate, htotal, cpp, width, wm; + + if (latency == 0) + return USHRT_MAX; + + if (!intel_wm_plane_visible(crtc_state, plane_state)) + return 0; + + cpp = plane_state->hw.fb->format->cpp[0]; + + /* + * WaUse32BppForSRWM:ctg,elk + * + * The spec fails to list this restriction for the + * HPLL watermark, which seems a little strange. + * Let's use 32bpp for the HPLL watermark as well. + */ + if (plane->id == PLANE_PRIMARY && + level != G4X_WM_LEVEL_NORMAL) + cpp = max(cpp, 4u); + + pixel_rate = crtc_state->pixel_rate; + htotal = pipe_mode->crtc_htotal; + width = drm_rect_width(&plane_state->uapi.src) >> 16; + + if (plane->id == PLANE_CURSOR) { + wm = intel_wm_method2(pixel_rate, htotal, width, cpp, latency); + } else if (plane->id == PLANE_PRIMARY && + level == G4X_WM_LEVEL_NORMAL) { + wm = intel_wm_method1(pixel_rate, cpp, latency); + } else { + unsigned int small, large; + + small = intel_wm_method1(pixel_rate, cpp, latency); + large = intel_wm_method2(pixel_rate, htotal, width, cpp, latency); + + wm = min(small, large); + } + + wm += g4x_tlb_miss_wa(g4x_plane_fifo_size(plane->id, level), + width, cpp); + + wm = DIV_ROUND_UP(wm, 64) + 2; + + return min_t(unsigned int, wm, USHRT_MAX); +} + +static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state, + int level, enum plane_id plane_id, u16 value) +{ + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); + bool dirty = false; + + for (; level < dev_priv->display.wm.num_levels; level++) { + struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level]; + + dirty |= raw->plane[plane_id] != value; + raw->plane[plane_id] = value; + } + + return dirty; +} + +static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state, + int level, u16 value) +{ + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); + bool dirty = false; + + /* NORMAL level doesn't have an FBC watermark */ + level = max(level, G4X_WM_LEVEL_SR); + + for (; level < dev_priv->display.wm.num_levels; level++) { + struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level]; + + dirty |= raw->fbc != value; + raw->fbc = value; + } + + return dirty; +} + +static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + u32 pri_val); + +static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); + enum plane_id plane_id = plane->id; + bool dirty = false; + int level; + + if (!intel_wm_plane_visible(crtc_state, plane_state)) { + dirty |= g4x_raw_plane_wm_set(crtc_state, 0, plane_id, 0); + if (plane_id == PLANE_PRIMARY) + dirty |= g4x_raw_fbc_wm_set(crtc_state, 0, 0); + goto out; + } + + for (level = 0; level < dev_priv->display.wm.num_levels; level++) { + struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level]; + int wm, max_wm; + + wm = g4x_compute_wm(crtc_state, plane_state, level); + max_wm = g4x_plane_fifo_size(plane_id, level); + + if (wm > max_wm) + break; + + dirty |= raw->plane[plane_id] != wm; + raw->plane[plane_id] = wm; + + if (plane_id != PLANE_PRIMARY || + level == G4X_WM_LEVEL_NORMAL) + continue; + + wm = ilk_compute_fbc_wm(crtc_state, plane_state, + raw->plane[plane_id]); + max_wm = g4x_fbc_fifo_size(level); + + /* + * FBC wm is not mandatory as we + * can always just disable its use. + */ + if (wm > max_wm) + wm = USHRT_MAX; + + dirty |= raw->fbc != wm; + raw->fbc = wm; + } + + /* mark watermarks as invalid */ + dirty |= g4x_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX); + + if (plane_id == PLANE_PRIMARY) + dirty |= g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX); + + out: + if (dirty) { + drm_dbg_kms(&dev_priv->drm, + "%s watermarks: normal=%d, SR=%d, HPLL=%d\n", + plane->base.name, + crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id], + crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].plane[plane_id], + crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]); + + if (plane_id == PLANE_PRIMARY) + drm_dbg_kms(&dev_priv->drm, + "FBC watermarks: SR=%d, HPLL=%d\n", + crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc, + crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc); + } + + return dirty; +} + +static bool g4x_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state, + enum plane_id plane_id, int level) +{ + const struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level]; + + return raw->plane[plane_id] <= g4x_plane_fifo_size(plane_id, level); +} + +static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, + int level) +{ + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); + + if (level >= dev_priv->display.wm.num_levels) + return false; + + return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) && + g4x_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) && + g4x_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level); +} + +/* mark all levels starting from 'level' as invalid */ +static void g4x_invalidate_wms(struct intel_crtc *crtc, + struct g4x_wm_state *wm_state, int level) +{ + if (level <= G4X_WM_LEVEL_NORMAL) { + enum plane_id plane_id; + + for_each_plane_id_on_crtc(crtc, plane_id) + wm_state->wm.plane[plane_id] = USHRT_MAX; + } + + if (level <= G4X_WM_LEVEL_SR) { + wm_state->cxsr = false; + wm_state->sr.cursor = USHRT_MAX; + wm_state->sr.plane = USHRT_MAX; + wm_state->sr.fbc = USHRT_MAX; + } + + if (level <= G4X_WM_LEVEL_HPLL) { + wm_state->hpll_en = false; + wm_state->hpll.cursor = USHRT_MAX; + wm_state->hpll.plane = USHRT_MAX; + wm_state->hpll.fbc = USHRT_MAX; + } +} + +static bool g4x_compute_fbc_en(const struct g4x_wm_state *wm_state, + int level) +{ + if (level < G4X_WM_LEVEL_SR) + return false; + + if (level >= G4X_WM_LEVEL_SR && + wm_state->sr.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_SR)) + return false; + + if (level >= G4X_WM_LEVEL_HPLL && + wm_state->hpll.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_HPLL)) + return false; + + return true; +} + +static int _g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal; + u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR); + const struct g4x_pipe_wm *raw; + enum plane_id plane_id; + int level; + + level = G4X_WM_LEVEL_NORMAL; + if (!g4x_raw_crtc_wm_is_valid(crtc_state, level)) + goto out; + + raw = &crtc_state->wm.g4x.raw[level]; + for_each_plane_id_on_crtc(crtc, plane_id) + wm_state->wm.plane[plane_id] = raw->plane[plane_id]; + + level = G4X_WM_LEVEL_SR; + if (!g4x_raw_crtc_wm_is_valid(crtc_state, level)) + goto out; + + raw = &crtc_state->wm.g4x.raw[level]; + wm_state->sr.plane = raw->plane[PLANE_PRIMARY]; + wm_state->sr.cursor = raw->plane[PLANE_CURSOR]; + wm_state->sr.fbc = raw->fbc; + + wm_state->cxsr = active_planes == BIT(PLANE_PRIMARY); + + level = G4X_WM_LEVEL_HPLL; + if (!g4x_raw_crtc_wm_is_valid(crtc_state, level)) + goto out; + + raw = &crtc_state->wm.g4x.raw[level]; + wm_state->hpll.plane = raw->plane[PLANE_PRIMARY]; + wm_state->hpll.cursor = raw->plane[PLANE_CURSOR]; + wm_state->hpll.fbc = raw->fbc; + + wm_state->hpll_en = wm_state->cxsr; + + level++; + + out: + if (level == G4X_WM_LEVEL_NORMAL) + return -EINVAL; + + /* invalidate the higher levels */ + g4x_invalidate_wms(crtc, wm_state, level); + + /* + * Determine if the FBC watermark(s) can be used. IF + * this isn't the case we prefer to disable the FBC + * watermark(s) rather than disable the SR/HPLL + * level(s) entirely. 'level-1' is the highest valid + * level here. + */ + wm_state->fbc_en = g4x_compute_fbc_en(wm_state, level - 1); + + return 0; +} + +static int g4x_compute_pipe_wm(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + const struct intel_plane_state *old_plane_state; + const struct intel_plane_state *new_plane_state; + struct intel_plane *plane; + unsigned int dirty = 0; + int i; + + for_each_oldnew_intel_plane_in_state(state, plane, + old_plane_state, + new_plane_state, i) { + if (new_plane_state->hw.crtc != &crtc->base && + old_plane_state->hw.crtc != &crtc->base) + continue; + + if (g4x_raw_plane_wm_compute(crtc_state, new_plane_state)) + dirty |= BIT(plane->id); + } + + if (!dirty) + return 0; + + return _g4x_compute_pipe_wm(crtc_state); +} + +static int g4x_compute_intermediate_wm(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + struct g4x_wm_state *intermediate = &new_crtc_state->wm.g4x.intermediate; + const struct g4x_wm_state *optimal = &new_crtc_state->wm.g4x.optimal; + const struct g4x_wm_state *active = &old_crtc_state->wm.g4x.optimal; + enum plane_id plane_id; + + if (!new_crtc_state->hw.active || + intel_crtc_needs_modeset(new_crtc_state)) { + *intermediate = *optimal; + + intermediate->cxsr = false; + intermediate->hpll_en = false; + goto out; + } + + intermediate->cxsr = optimal->cxsr && active->cxsr && + !new_crtc_state->disable_cxsr; + intermediate->hpll_en = optimal->hpll_en && active->hpll_en && + !new_crtc_state->disable_cxsr; + intermediate->fbc_en = optimal->fbc_en && active->fbc_en; + + for_each_plane_id_on_crtc(crtc, plane_id) { + intermediate->wm.plane[plane_id] = + max(optimal->wm.plane[plane_id], + active->wm.plane[plane_id]); + + drm_WARN_ON(&dev_priv->drm, intermediate->wm.plane[plane_id] > + g4x_plane_fifo_size(plane_id, G4X_WM_LEVEL_NORMAL)); + } + + intermediate->sr.plane = max(optimal->sr.plane, + active->sr.plane); + intermediate->sr.cursor = max(optimal->sr.cursor, + active->sr.cursor); + intermediate->sr.fbc = max(optimal->sr.fbc, + active->sr.fbc); + + intermediate->hpll.plane = max(optimal->hpll.plane, + active->hpll.plane); + intermediate->hpll.cursor = max(optimal->hpll.cursor, + active->hpll.cursor); + intermediate->hpll.fbc = max(optimal->hpll.fbc, + active->hpll.fbc); + + drm_WARN_ON(&dev_priv->drm, + (intermediate->sr.plane > + g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_SR) || + intermediate->sr.cursor > + g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) && + intermediate->cxsr); + drm_WARN_ON(&dev_priv->drm, + (intermediate->sr.plane > + g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) || + intermediate->sr.cursor > + g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) && + intermediate->hpll_en); + + drm_WARN_ON(&dev_priv->drm, + intermediate->sr.fbc > g4x_fbc_fifo_size(1) && + intermediate->fbc_en && intermediate->cxsr); + drm_WARN_ON(&dev_priv->drm, + intermediate->hpll.fbc > g4x_fbc_fifo_size(2) && + intermediate->fbc_en && intermediate->hpll_en); + +out: + /* + * If our intermediate WM are identical to the final WM, then we can + * omit the post-vblank programming; only update if it's different. + */ + if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0) + new_crtc_state->wm.need_postvbl_update = true; + + return 0; +} + +static void g4x_merge_wm(struct drm_i915_private *dev_priv, + struct g4x_wm_values *wm) +{ + struct intel_crtc *crtc; + int num_active_pipes = 0; + + wm->cxsr = true; + wm->hpll_en = true; + wm->fbc_en = true; + + for_each_intel_crtc(&dev_priv->drm, crtc) { + const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x; + + if (!crtc->active) + continue; + + if (!wm_state->cxsr) + wm->cxsr = false; + if (!wm_state->hpll_en) + wm->hpll_en = false; + if (!wm_state->fbc_en) + wm->fbc_en = false; + + num_active_pipes++; + } + + if (num_active_pipes != 1) { + wm->cxsr = false; + wm->hpll_en = false; + wm->fbc_en = false; + } + + for_each_intel_crtc(&dev_priv->drm, crtc) { + const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x; + enum pipe pipe = crtc->pipe; + + wm->pipe[pipe] = wm_state->wm; + if (crtc->active && wm->cxsr) + wm->sr = wm_state->sr; + if (crtc->active && wm->hpll_en) + wm->hpll = wm_state->hpll; + } +} + +static void g4x_program_watermarks(struct drm_i915_private *dev_priv) +{ + struct g4x_wm_values *old_wm = &dev_priv->display.wm.g4x; + struct g4x_wm_values new_wm = {}; + + g4x_merge_wm(dev_priv, &new_wm); + + if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0) + return; + + if (is_disabling(old_wm->cxsr, new_wm.cxsr, true)) + _intel_set_memory_cxsr(dev_priv, false); + + g4x_write_wm_values(dev_priv, &new_wm); + + if (is_enabling(old_wm->cxsr, new_wm.cxsr, true)) + _intel_set_memory_cxsr(dev_priv, true); + + *old_wm = new_wm; +} + +static void g4x_initial_watermarks(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + + mutex_lock(&dev_priv->display.wm.wm_mutex); + crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate; + g4x_program_watermarks(dev_priv); + mutex_unlock(&dev_priv->display.wm.wm_mutex); +} + +static void g4x_optimize_watermarks(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + + if (!crtc_state->wm.need_postvbl_update) + return; + + mutex_lock(&dev_priv->display.wm.wm_mutex); + crtc->wm.active.g4x = crtc_state->wm.g4x.optimal; + g4x_program_watermarks(dev_priv); + mutex_unlock(&dev_priv->display.wm.wm_mutex); +} + +/* latency must be in 0.1us units. */ +static unsigned int vlv_wm_method2(unsigned int pixel_rate, + unsigned int htotal, + unsigned int width, + unsigned int cpp, + unsigned int latency) +{ + unsigned int ret; + + ret = intel_wm_method2(pixel_rate, htotal, + width, cpp, latency); + ret = DIV_ROUND_UP(ret, 64); + + return ret; +} + +static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv) +{ + /* all latencies in usec */ + dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM2] = 3; + + dev_priv->display.wm.num_levels = VLV_WM_LEVEL_PM2 + 1; + + if (IS_CHERRYVIEW(dev_priv)) { + dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM5] = 12; + dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33; + + dev_priv->display.wm.num_levels = VLV_WM_LEVEL_DDR_DVFS + 1; + } +} + +static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + int level) +{ + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + const struct drm_display_mode *pipe_mode = + &crtc_state->hw.pipe_mode; + unsigned int pixel_rate, htotal, cpp, width, wm; + + if (dev_priv->display.wm.pri_latency[level] == 0) + return USHRT_MAX; + + if (!intel_wm_plane_visible(crtc_state, plane_state)) + return 0; + + cpp = plane_state->hw.fb->format->cpp[0]; + pixel_rate = crtc_state->pixel_rate; + htotal = pipe_mode->crtc_htotal; + width = drm_rect_width(&plane_state->uapi.src) >> 16; + + if (plane->id == PLANE_CURSOR) { + /* + * FIXME the formula gives values that are + * too big for the cursor FIFO, and hence we + * would never be able to use cursors. For + * now just hardcode the watermark. + */ + wm = 63; + } else { + wm = vlv_wm_method2(pixel_rate, htotal, width, cpp, + dev_priv->display.wm.pri_latency[level] * 10); + } + + return min_t(unsigned int, wm, USHRT_MAX); +} + +static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes) +{ + return (active_planes & (BIT(PLANE_SPRITE0) | + BIT(PLANE_SPRITE1))) == BIT(PLANE_SPRITE1); +} + +static int vlv_compute_fifo(struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct g4x_pipe_wm *raw = + &crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2]; + struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state; + u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR); + int num_active_planes = hweight8(active_planes); + const int fifo_size = 511; + int fifo_extra, fifo_left = fifo_size; + int sprite0_fifo_extra = 0; + unsigned int total_rate; + enum plane_id plane_id; + + /* + * When enabling sprite0 after sprite1 has already been enabled + * we tend to get an underrun unless sprite0 already has some + * FIFO space allcoated. Hence we always allocate at least one + * cacheline for sprite0 whenever sprite1 is enabled. + * + * All other plane enable sequences appear immune to this problem. + */ + if (vlv_need_sprite0_fifo_workaround(active_planes)) + sprite0_fifo_extra = 1; + + total_rate = raw->plane[PLANE_PRIMARY] + + raw->plane[PLANE_SPRITE0] + + raw->plane[PLANE_SPRITE1] + + sprite0_fifo_extra; + + if (total_rate > fifo_size) + return -EINVAL; + + if (total_rate == 0) + total_rate = 1; + + for_each_plane_id_on_crtc(crtc, plane_id) { + unsigned int rate; + + if ((active_planes & BIT(plane_id)) == 0) { + fifo_state->plane[plane_id] = 0; + continue; + } + + rate = raw->plane[plane_id]; + fifo_state->plane[plane_id] = fifo_size * rate / total_rate; + fifo_left -= fifo_state->plane[plane_id]; + } + + fifo_state->plane[PLANE_SPRITE0] += sprite0_fifo_extra; + fifo_left -= sprite0_fifo_extra; + + fifo_state->plane[PLANE_CURSOR] = 63; + + fifo_extra = DIV_ROUND_UP(fifo_left, num_active_planes ?: 1); + + /* spread the remainder evenly */ + for_each_plane_id_on_crtc(crtc, plane_id) { + int plane_extra; + + if (fifo_left == 0) + break; + + if ((active_planes & BIT(plane_id)) == 0) + continue; + + plane_extra = min(fifo_extra, fifo_left); + fifo_state->plane[plane_id] += plane_extra; + fifo_left -= plane_extra; + } + + drm_WARN_ON(&dev_priv->drm, active_planes != 0 && fifo_left != 0); + + /* give it all to the first plane if none are active */ + if (active_planes == 0) { + drm_WARN_ON(&dev_priv->drm, fifo_left != fifo_size); + fifo_state->plane[PLANE_PRIMARY] = fifo_left; + } + + return 0; +} + +/* mark all levels starting from 'level' as invalid */ +static void vlv_invalidate_wms(struct intel_crtc *crtc, + struct vlv_wm_state *wm_state, int level) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + + for (; level < dev_priv->display.wm.num_levels; level++) { + enum plane_id plane_id; + + for_each_plane_id_on_crtc(crtc, plane_id) + wm_state->wm[level].plane[plane_id] = USHRT_MAX; + + wm_state->sr[level].cursor = USHRT_MAX; + wm_state->sr[level].plane = USHRT_MAX; + } +} + +static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size) +{ + if (wm > fifo_size) + return USHRT_MAX; + else + return fifo_size - wm; +} + +/* + * Starting from 'level' set all higher + * levels to 'value' in the "raw" watermarks. + */ +static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state, + int level, enum plane_id plane_id, u16 value) +{ + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); + bool dirty = false; + + for (; level < dev_priv->display.wm.num_levels; level++) { + struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level]; + + dirty |= raw->plane[plane_id] != value; + raw->plane[plane_id] = value; + } + + return dirty; +} + +static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); + enum plane_id plane_id = plane->id; + int level; + bool dirty = false; + + if (!intel_wm_plane_visible(crtc_state, plane_state)) { + dirty |= vlv_raw_plane_wm_set(crtc_state, 0, plane_id, 0); + goto out; + } + + for (level = 0; level < dev_priv->display.wm.num_levels; level++) { + struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level]; + int wm = vlv_compute_wm_level(crtc_state, plane_state, level); + int max_wm = plane_id == PLANE_CURSOR ? 63 : 511; + + if (wm > max_wm) + break; + + dirty |= raw->plane[plane_id] != wm; + raw->plane[plane_id] = wm; + } + + /* mark all higher levels as invalid */ + dirty |= vlv_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX); + +out: + if (dirty) + drm_dbg_kms(&dev_priv->drm, + "%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n", + plane->base.name, + crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id], + crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5].plane[plane_id], + crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS].plane[plane_id]); + + return dirty; +} + +static bool vlv_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state, + enum plane_id plane_id, int level) +{ + const struct g4x_pipe_wm *raw = + &crtc_state->wm.vlv.raw[level]; + const struct vlv_fifo_state *fifo_state = + &crtc_state->wm.vlv.fifo_state; + + return raw->plane[plane_id] <= fifo_state->plane[plane_id]; +} + +static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, int level) +{ + return vlv_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) && + vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) && + vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE1, level) && + vlv_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level); +} + +static int _vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal; + const struct vlv_fifo_state *fifo_state = + &crtc_state->wm.vlv.fifo_state; + u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR); + int num_active_planes = hweight8(active_planes); + enum plane_id plane_id; + int level; + + /* initially allow all levels */ + wm_state->num_levels = dev_priv->display.wm.num_levels; + /* + * Note that enabling cxsr with no primary/sprite planes + * enabled can wedge the pipe. Hence we only allow cxsr + * with exactly one enabled primary/sprite plane. + */ + wm_state->cxsr = crtc->pipe != PIPE_C && num_active_planes == 1; + + for (level = 0; level < wm_state->num_levels; level++) { + const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level]; + const int sr_fifo_size = INTEL_NUM_PIPES(dev_priv) * 512 - 1; + + if (!vlv_raw_crtc_wm_is_valid(crtc_state, level)) + break; + + for_each_plane_id_on_crtc(crtc, plane_id) { + wm_state->wm[level].plane[plane_id] = + vlv_invert_wm_value(raw->plane[plane_id], + fifo_state->plane[plane_id]); + } + + wm_state->sr[level].plane = + vlv_invert_wm_value(max3(raw->plane[PLANE_PRIMARY], + raw->plane[PLANE_SPRITE0], + raw->plane[PLANE_SPRITE1]), + sr_fifo_size); + + wm_state->sr[level].cursor = + vlv_invert_wm_value(raw->plane[PLANE_CURSOR], + 63); + } + + if (level == 0) + return -EINVAL; + + /* limit to only levels we can actually handle */ + wm_state->num_levels = level; + + /* invalidate the higher levels */ + vlv_invalidate_wms(crtc, wm_state, level); + + return 0; +} + +static int vlv_compute_pipe_wm(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + const struct intel_plane_state *old_plane_state; + const struct intel_plane_state *new_plane_state; + struct intel_plane *plane; + unsigned int dirty = 0; + int i; + + for_each_oldnew_intel_plane_in_state(state, plane, + old_plane_state, + new_plane_state, i) { + if (new_plane_state->hw.crtc != &crtc->base && + old_plane_state->hw.crtc != &crtc->base) + continue; + + if (vlv_raw_plane_wm_compute(crtc_state, new_plane_state)) + dirty |= BIT(plane->id); + } + + /* + * DSPARB registers may have been reset due to the + * power well being turned off. Make sure we restore + * them to a consistent state even if no primary/sprite + * planes are initially active. We also force a FIFO + * recomputation so that we are sure to sanitize the + * FIFO setting we took over from the BIOS even if there + * are no active planes on the crtc. + */ + if (intel_crtc_needs_modeset(crtc_state)) + dirty = ~0; + + if (!dirty) + return 0; + + /* cursor changes don't warrant a FIFO recompute */ + if (dirty & ~BIT(PLANE_CURSOR)) { + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + const struct vlv_fifo_state *old_fifo_state = + &old_crtc_state->wm.vlv.fifo_state; + const struct vlv_fifo_state *new_fifo_state = + &crtc_state->wm.vlv.fifo_state; + int ret; + + ret = vlv_compute_fifo(crtc_state); + if (ret) + return ret; + + if (intel_crtc_needs_modeset(crtc_state) || + memcmp(old_fifo_state, new_fifo_state, + sizeof(*new_fifo_state)) != 0) + crtc_state->fifo_changed = true; + } + + return _vlv_compute_pipe_wm(crtc_state); +} + +#define VLV_FIFO(plane, value) \ + (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV) + +static void vlv_atomic_update_fifo(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_uncore *uncore = &dev_priv->uncore; + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + const struct vlv_fifo_state *fifo_state = + &crtc_state->wm.vlv.fifo_state; + int sprite0_start, sprite1_start, fifo_size; + u32 dsparb, dsparb2, dsparb3; + + if (!crtc_state->fifo_changed) + return; + + sprite0_start = fifo_state->plane[PLANE_PRIMARY]; + sprite1_start = fifo_state->plane[PLANE_SPRITE0] + sprite0_start; + fifo_size = fifo_state->plane[PLANE_SPRITE1] + sprite1_start; + + drm_WARN_ON(&dev_priv->drm, fifo_state->plane[PLANE_CURSOR] != 63); + drm_WARN_ON(&dev_priv->drm, fifo_size != 511); + + trace_vlv_fifo_size(crtc, sprite0_start, sprite1_start, fifo_size); + + /* + * uncore.lock serves a double purpose here. It allows us to + * use the less expensive I915_{READ,WRITE}_FW() functions, and + * it protects the DSPARB registers from getting clobbered by + * parallel updates from multiple pipes. + * + * intel_pipe_update_start() has already disabled interrupts + * for us, so a plain spin_lock() is sufficient here. + */ + spin_lock(&uncore->lock); + + switch (crtc->pipe) { + case PIPE_A: + dsparb = intel_uncore_read_fw(uncore, DSPARB); + dsparb2 = intel_uncore_read_fw(uncore, DSPARB2); + + dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) | + VLV_FIFO(SPRITEB, 0xff)); + dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) | + VLV_FIFO(SPRITEB, sprite1_start)); + + dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) | + VLV_FIFO(SPRITEB_HI, 0x1)); + dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) | + VLV_FIFO(SPRITEB_HI, sprite1_start >> 8)); + + intel_uncore_write_fw(uncore, DSPARB, dsparb); + intel_uncore_write_fw(uncore, DSPARB2, dsparb2); + break; + case PIPE_B: + dsparb = intel_uncore_read_fw(uncore, DSPARB); + dsparb2 = intel_uncore_read_fw(uncore, DSPARB2); + + dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) | + VLV_FIFO(SPRITED, 0xff)); + dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) | + VLV_FIFO(SPRITED, sprite1_start)); + + dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) | + VLV_FIFO(SPRITED_HI, 0xff)); + dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) | + VLV_FIFO(SPRITED_HI, sprite1_start >> 8)); + + intel_uncore_write_fw(uncore, DSPARB, dsparb); + intel_uncore_write_fw(uncore, DSPARB2, dsparb2); + break; + case PIPE_C: + dsparb3 = intel_uncore_read_fw(uncore, DSPARB3); + dsparb2 = intel_uncore_read_fw(uncore, DSPARB2); + + dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) | + VLV_FIFO(SPRITEF, 0xff)); + dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) | + VLV_FIFO(SPRITEF, sprite1_start)); + + dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) | + VLV_FIFO(SPRITEF_HI, 0xff)); + dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) | + VLV_FIFO(SPRITEF_HI, sprite1_start >> 8)); + + intel_uncore_write_fw(uncore, DSPARB3, dsparb3); + intel_uncore_write_fw(uncore, DSPARB2, dsparb2); + break; + default: + break; + } + + intel_uncore_posting_read_fw(uncore, DSPARB); + + spin_unlock(&uncore->lock); +} + +#undef VLV_FIFO + +static int vlv_compute_intermediate_wm(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + struct vlv_wm_state *intermediate = &new_crtc_state->wm.vlv.intermediate; + const struct vlv_wm_state *optimal = &new_crtc_state->wm.vlv.optimal; + const struct vlv_wm_state *active = &old_crtc_state->wm.vlv.optimal; + int level; + + if (!new_crtc_state->hw.active || + intel_crtc_needs_modeset(new_crtc_state)) { + *intermediate = *optimal; + + intermediate->cxsr = false; + goto out; + } + + intermediate->num_levels = min(optimal->num_levels, active->num_levels); + intermediate->cxsr = optimal->cxsr && active->cxsr && + !new_crtc_state->disable_cxsr; + + for (level = 0; level < intermediate->num_levels; level++) { + enum plane_id plane_id; + + for_each_plane_id_on_crtc(crtc, plane_id) { + intermediate->wm[level].plane[plane_id] = + min(optimal->wm[level].plane[plane_id], + active->wm[level].plane[plane_id]); + } + + intermediate->sr[level].plane = min(optimal->sr[level].plane, + active->sr[level].plane); + intermediate->sr[level].cursor = min(optimal->sr[level].cursor, + active->sr[level].cursor); + } + + vlv_invalidate_wms(crtc, intermediate, level); + +out: + /* + * If our intermediate WM are identical to the final WM, then we can + * omit the post-vblank programming; only update if it's different. + */ + if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0) + new_crtc_state->wm.need_postvbl_update = true; + + return 0; +} + +static void vlv_merge_wm(struct drm_i915_private *dev_priv, + struct vlv_wm_values *wm) +{ + struct intel_crtc *crtc; + int num_active_pipes = 0; + + wm->level = dev_priv->display.wm.num_levels - 1; + wm->cxsr = true; + + for_each_intel_crtc(&dev_priv->drm, crtc) { + const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv; + + if (!crtc->active) + continue; + + if (!wm_state->cxsr) + wm->cxsr = false; + + num_active_pipes++; + wm->level = min_t(int, wm->level, wm_state->num_levels - 1); + } + + if (num_active_pipes != 1) + wm->cxsr = false; + + if (num_active_pipes > 1) + wm->level = VLV_WM_LEVEL_PM2; + + for_each_intel_crtc(&dev_priv->drm, crtc) { + const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv; + enum pipe pipe = crtc->pipe; + + wm->pipe[pipe] = wm_state->wm[wm->level]; + if (crtc->active && wm->cxsr) + wm->sr = wm_state->sr[wm->level]; + + wm->ddl[pipe].plane[PLANE_PRIMARY] = DDL_PRECISION_HIGH | 2; + wm->ddl[pipe].plane[PLANE_SPRITE0] = DDL_PRECISION_HIGH | 2; + wm->ddl[pipe].plane[PLANE_SPRITE1] = DDL_PRECISION_HIGH | 2; + wm->ddl[pipe].plane[PLANE_CURSOR] = DDL_PRECISION_HIGH | 2; + } +} + +static void vlv_program_watermarks(struct drm_i915_private *dev_priv) +{ + struct vlv_wm_values *old_wm = &dev_priv->display.wm.vlv; + struct vlv_wm_values new_wm = {}; + + vlv_merge_wm(dev_priv, &new_wm); + + if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0) + return; + + if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS)) + chv_set_memory_dvfs(dev_priv, false); + + if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5)) + chv_set_memory_pm5(dev_priv, false); + + if (is_disabling(old_wm->cxsr, new_wm.cxsr, true)) + _intel_set_memory_cxsr(dev_priv, false); + + vlv_write_wm_values(dev_priv, &new_wm); + + if (is_enabling(old_wm->cxsr, new_wm.cxsr, true)) + _intel_set_memory_cxsr(dev_priv, true); + + if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5)) + chv_set_memory_pm5(dev_priv, true); + + if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS)) + chv_set_memory_dvfs(dev_priv, true); + + *old_wm = new_wm; +} + +static void vlv_initial_watermarks(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + + mutex_lock(&dev_priv->display.wm.wm_mutex); + crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate; + vlv_program_watermarks(dev_priv); + mutex_unlock(&dev_priv->display.wm.wm_mutex); +} + +static void vlv_optimize_watermarks(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + + if (!crtc_state->wm.need_postvbl_update) + return; + + mutex_lock(&dev_priv->display.wm.wm_mutex); + crtc->wm.active.vlv = crtc_state->wm.vlv.optimal; + vlv_program_watermarks(dev_priv); + mutex_unlock(&dev_priv->display.wm.wm_mutex); +} + +static void i965_update_wm(struct drm_i915_private *dev_priv) +{ + struct intel_crtc *crtc; + int srwm = 1; + int cursor_sr = 16; + bool cxsr_enabled; + + /* Calc sr entries for one plane configs */ + crtc = single_enabled_crtc(dev_priv); + if (crtc) { + /* self-refresh has much higher latency */ + static const int sr_latency_ns = 12000; + const struct drm_display_mode *pipe_mode = + &crtc->config->hw.pipe_mode; + const struct drm_framebuffer *fb = + crtc->base.primary->state->fb; + int pixel_rate = crtc->config->pixel_rate; + int htotal = pipe_mode->crtc_htotal; + int width = drm_rect_width(&crtc->base.primary->state->src) >> 16; + int cpp = fb->format->cpp[0]; + int entries; + + entries = intel_wm_method2(pixel_rate, htotal, + width, cpp, sr_latency_ns / 100); + entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE); + srwm = I965_FIFO_SIZE - entries; + if (srwm < 0) + srwm = 1; + srwm &= 0x1ff; + drm_dbg_kms(&dev_priv->drm, + "self-refresh entries: %d, wm: %d\n", + entries, srwm); + + entries = intel_wm_method2(pixel_rate, htotal, + crtc->base.cursor->state->crtc_w, 4, + sr_latency_ns / 100); + entries = DIV_ROUND_UP(entries, + i965_cursor_wm_info.cacheline_size) + + i965_cursor_wm_info.guard_size; + + cursor_sr = i965_cursor_wm_info.fifo_size - entries; + if (cursor_sr > i965_cursor_wm_info.max_wm) + cursor_sr = i965_cursor_wm_info.max_wm; + + drm_dbg_kms(&dev_priv->drm, + "self-refresh watermark: display plane %d " + "cursor %d\n", srwm, cursor_sr); + + cxsr_enabled = true; + } else { + cxsr_enabled = false; + /* Turn off self refresh if both pipes are enabled */ + intel_set_memory_cxsr(dev_priv, false); + } + + drm_dbg_kms(&dev_priv->drm, + "Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", + srwm); + + /* 965 has limitations... */ + intel_uncore_write(&dev_priv->uncore, DSPFW1, FW_WM(srwm, SR) | + FW_WM(8, CURSORB) | + FW_WM(8, PLANEB) | + FW_WM(8, PLANEA)); + intel_uncore_write(&dev_priv->uncore, DSPFW2, FW_WM(8, CURSORA) | + FW_WM(8, PLANEC_OLD)); + /* update cursor SR watermark */ + intel_uncore_write(&dev_priv->uncore, DSPFW3, FW_WM(cursor_sr, CURSOR_SR)); + + if (cxsr_enabled) + intel_set_memory_cxsr(dev_priv, true); +} + +#undef FW_WM + +static struct intel_crtc *intel_crtc_for_plane(struct drm_i915_private *i915, + enum i9xx_plane_id i9xx_plane) +{ + struct intel_plane *plane; + + for_each_intel_plane(&i915->drm, plane) { + if (plane->id == PLANE_PRIMARY && + plane->i9xx_plane == i9xx_plane) + return intel_crtc_for_pipe(i915, plane->pipe); + } + + return NULL; +} + +static void i9xx_update_wm(struct drm_i915_private *dev_priv) +{ + const struct intel_watermark_params *wm_info; + u32 fwater_lo; + u32 fwater_hi; + int cwm, srwm = 1; + int fifo_size; + int planea_wm, planeb_wm; + struct intel_crtc *crtc; + + if (IS_I945GM(dev_priv)) + wm_info = &i945_wm_info; + else if (DISPLAY_VER(dev_priv) != 2) + wm_info = &i915_wm_info; + else + wm_info = &i830_a_wm_info; + + if (DISPLAY_VER(dev_priv) == 2) + fifo_size = i830_get_fifo_size(dev_priv, PLANE_A); + else + fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_A); + crtc = intel_crtc_for_plane(dev_priv, PLANE_A); + if (intel_crtc_active(crtc)) { + const struct drm_framebuffer *fb = + crtc->base.primary->state->fb; + int cpp; + + if (DISPLAY_VER(dev_priv) == 2) + cpp = 4; + else + cpp = fb->format->cpp[0]; + + planea_wm = intel_calculate_wm(crtc->config->pixel_rate, + wm_info, fifo_size, cpp, + pessimal_latency_ns); + } else { + planea_wm = fifo_size - wm_info->guard_size; + if (planea_wm > (long)wm_info->max_wm) + planea_wm = wm_info->max_wm; + } + + if (DISPLAY_VER(dev_priv) == 2) + wm_info = &i830_bc_wm_info; + + if (DISPLAY_VER(dev_priv) == 2) + fifo_size = i830_get_fifo_size(dev_priv, PLANE_B); + else + fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_B); + crtc = intel_crtc_for_plane(dev_priv, PLANE_B); + if (intel_crtc_active(crtc)) { + const struct drm_framebuffer *fb = + crtc->base.primary->state->fb; + int cpp; + + if (DISPLAY_VER(dev_priv) == 2) + cpp = 4; + else + cpp = fb->format->cpp[0]; + + planeb_wm = intel_calculate_wm(crtc->config->pixel_rate, + wm_info, fifo_size, cpp, + pessimal_latency_ns); + } else { + planeb_wm = fifo_size - wm_info->guard_size; + if (planeb_wm > (long)wm_info->max_wm) + planeb_wm = wm_info->max_wm; + } + + drm_dbg_kms(&dev_priv->drm, + "FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); + + crtc = single_enabled_crtc(dev_priv); + if (IS_I915GM(dev_priv) && crtc) { + struct drm_i915_gem_object *obj; + + obj = intel_fb_obj(crtc->base.primary->state->fb); + + /* self-refresh seems busted with untiled */ + if (!i915_gem_object_is_tiled(obj)) + crtc = NULL; + } + + /* + * Overlay gets an aggressive default since video jitter is bad. + */ + cwm = 2; + + /* Play safe and disable self-refresh before adjusting watermarks. */ + intel_set_memory_cxsr(dev_priv, false); + + /* Calc sr entries for one plane configs */ + if (HAS_FW_BLC(dev_priv) && crtc) { + /* self-refresh has much higher latency */ + static const int sr_latency_ns = 6000; + const struct drm_display_mode *pipe_mode = + &crtc->config->hw.pipe_mode; + const struct drm_framebuffer *fb = + crtc->base.primary->state->fb; + int pixel_rate = crtc->config->pixel_rate; + int htotal = pipe_mode->crtc_htotal; + int width = drm_rect_width(&crtc->base.primary->state->src) >> 16; + int cpp; + int entries; + + if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv)) + cpp = 4; + else + cpp = fb->format->cpp[0]; + + entries = intel_wm_method2(pixel_rate, htotal, width, cpp, + sr_latency_ns / 100); + entries = DIV_ROUND_UP(entries, wm_info->cacheline_size); + drm_dbg_kms(&dev_priv->drm, + "self-refresh entries: %d\n", entries); + srwm = wm_info->fifo_size - entries; + if (srwm < 0) + srwm = 1; + + if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) + intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, + FW_BLC_SELF_FIFO_MASK | (srwm & 0xff)); + else + intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, srwm & 0x3f); + } + + drm_dbg_kms(&dev_priv->drm, + "Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", + planea_wm, planeb_wm, cwm, srwm); + + fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f); + fwater_hi = (cwm & 0x1f); + + /* Set request length to 8 cachelines per fetch */ + fwater_lo = fwater_lo | (1 << 24) | (1 << 8); + fwater_hi = fwater_hi | (1 << 8); + + intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo); + intel_uncore_write(&dev_priv->uncore, FW_BLC2, fwater_hi); + + if (crtc) + intel_set_memory_cxsr(dev_priv, true); +} + +static void i845_update_wm(struct drm_i915_private *dev_priv) +{ + struct intel_crtc *crtc; + u32 fwater_lo; + int planea_wm; + + crtc = single_enabled_crtc(dev_priv); + if (crtc == NULL) + return; + + planea_wm = intel_calculate_wm(crtc->config->pixel_rate, + &i845_wm_info, + i845_get_fifo_size(dev_priv, PLANE_A), + 4, pessimal_latency_ns); + fwater_lo = intel_uncore_read(&dev_priv->uncore, FW_BLC) & ~0xfff; + fwater_lo |= (3<<8) | planea_wm; + + drm_dbg_kms(&dev_priv->drm, + "Setting FIFO watermarks - A: %d\n", planea_wm); + + intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo); +} + +/* latency must be in 0.1us units. */ +static unsigned int ilk_wm_method1(unsigned int pixel_rate, + unsigned int cpp, + unsigned int latency) +{ + unsigned int ret; + + ret = intel_wm_method1(pixel_rate, cpp, latency); + ret = DIV_ROUND_UP(ret, 64) + 2; + + return ret; +} + +/* latency must be in 0.1us units. */ +static unsigned int ilk_wm_method2(unsigned int pixel_rate, + unsigned int htotal, + unsigned int width, + unsigned int cpp, + unsigned int latency) +{ + unsigned int ret; + + ret = intel_wm_method2(pixel_rate, htotal, + width, cpp, latency); + ret = DIV_ROUND_UP(ret, 64) + 2; + + return ret; +} + +static u32 ilk_wm_fbc(u32 pri_val, u32 horiz_pixels, u8 cpp) +{ + /* + * Neither of these should be possible since this function shouldn't be + * called if the CRTC is off or the plane is invisible. But let's be + * extra paranoid to avoid a potential divide-by-zero if we screw up + * elsewhere in the driver. + */ + if (WARN_ON(!cpp)) + return 0; + if (WARN_ON(!horiz_pixels)) + return 0; + + return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2; +} + +struct ilk_wm_maximums { + u16 pri; + u16 spr; + u16 cur; + u16 fbc; +}; + +/* + * For both WM_PIPE and WM_LP. + * mem_value must be in 0.1us units. + */ +static u32 ilk_compute_pri_wm(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + u32 mem_value, bool is_lp) +{ + u32 method1, method2; + int cpp; + + if (mem_value == 0) + return U32_MAX; + + if (!intel_wm_plane_visible(crtc_state, plane_state)) + return 0; + + cpp = plane_state->hw.fb->format->cpp[0]; + + method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value); + + if (!is_lp) + return method1; + + method2 = ilk_wm_method2(crtc_state->pixel_rate, + crtc_state->hw.pipe_mode.crtc_htotal, + drm_rect_width(&plane_state->uapi.src) >> 16, + cpp, mem_value); + + return min(method1, method2); +} + +/* + * For both WM_PIPE and WM_LP. + * mem_value must be in 0.1us units. + */ +static u32 ilk_compute_spr_wm(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + u32 mem_value) +{ + u32 method1, method2; + int cpp; + + if (mem_value == 0) + return U32_MAX; + + if (!intel_wm_plane_visible(crtc_state, plane_state)) + return 0; + + cpp = plane_state->hw.fb->format->cpp[0]; + + method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value); + method2 = ilk_wm_method2(crtc_state->pixel_rate, + crtc_state->hw.pipe_mode.crtc_htotal, + drm_rect_width(&plane_state->uapi.src) >> 16, + cpp, mem_value); + return min(method1, method2); +} + +/* + * For both WM_PIPE and WM_LP. + * mem_value must be in 0.1us units. + */ +static u32 ilk_compute_cur_wm(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + u32 mem_value) +{ + int cpp; + + if (mem_value == 0) + return U32_MAX; + + if (!intel_wm_plane_visible(crtc_state, plane_state)) + return 0; + + cpp = plane_state->hw.fb->format->cpp[0]; + + return ilk_wm_method2(crtc_state->pixel_rate, + crtc_state->hw.pipe_mode.crtc_htotal, + drm_rect_width(&plane_state->uapi.src) >> 16, + cpp, mem_value); +} + +/* Only for WM_LP. */ +static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + u32 pri_val) +{ + int cpp; + + if (!intel_wm_plane_visible(crtc_state, plane_state)) + return 0; + + cpp = plane_state->hw.fb->format->cpp[0]; + + return ilk_wm_fbc(pri_val, drm_rect_width(&plane_state->uapi.src) >> 16, + cpp); +} + +static unsigned int +ilk_display_fifo_size(const struct drm_i915_private *dev_priv) +{ + if (DISPLAY_VER(dev_priv) >= 8) + return 3072; + else if (DISPLAY_VER(dev_priv) >= 7) + return 768; + else + return 512; +} + +static unsigned int +ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv, + int level, bool is_sprite) +{ + if (DISPLAY_VER(dev_priv) >= 8) + /* BDW primary/sprite plane watermarks */ + return level == 0 ? 255 : 2047; + else if (DISPLAY_VER(dev_priv) >= 7) + /* IVB/HSW primary/sprite plane watermarks */ + return level == 0 ? 127 : 1023; + else if (!is_sprite) + /* ILK/SNB primary plane watermarks */ + return level == 0 ? 127 : 511; + else + /* ILK/SNB sprite plane watermarks */ + return level == 0 ? 63 : 255; +} + +static unsigned int +ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level) +{ + if (DISPLAY_VER(dev_priv) >= 7) + return level == 0 ? 63 : 255; + else + return level == 0 ? 31 : 63; +} + +static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv) +{ + if (DISPLAY_VER(dev_priv) >= 8) + return 31; + else + return 15; +} + +/* Calculate the maximum primary/sprite plane watermark */ +static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv, + int level, + const struct intel_wm_config *config, + enum intel_ddb_partitioning ddb_partitioning, + bool is_sprite) +{ + unsigned int fifo_size = ilk_display_fifo_size(dev_priv); + + /* if sprites aren't enabled, sprites get nothing */ + if (is_sprite && !config->sprites_enabled) + return 0; + + /* HSW allows LP1+ watermarks even with multiple pipes */ + if (level == 0 || config->num_pipes_active > 1) { + fifo_size /= INTEL_NUM_PIPES(dev_priv); + + /* + * For some reason the non self refresh + * FIFO size is only half of the self + * refresh FIFO size on ILK/SNB. + */ + if (DISPLAY_VER(dev_priv) <= 6) + fifo_size /= 2; + } + + if (config->sprites_enabled) { + /* level 0 is always calculated with 1:1 split */ + if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) { + if (is_sprite) + fifo_size *= 5; + fifo_size /= 6; + } else { + fifo_size /= 2; + } + } + + /* clamp to max that the registers can hold */ + return min(fifo_size, ilk_plane_wm_reg_max(dev_priv, level, is_sprite)); +} + +/* Calculate the maximum cursor plane watermark */ +static unsigned int ilk_cursor_wm_max(const struct drm_i915_private *dev_priv, + int level, + const struct intel_wm_config *config) +{ + /* HSW LP1+ watermarks w/ multiple pipes */ + if (level > 0 && config->num_pipes_active > 1) + return 64; + + /* otherwise just report max that registers can hold */ + return ilk_cursor_wm_reg_max(dev_priv, level); +} + +static void ilk_compute_wm_maximums(const struct drm_i915_private *dev_priv, + int level, + const struct intel_wm_config *config, + enum intel_ddb_partitioning ddb_partitioning, + struct ilk_wm_maximums *max) +{ + max->pri = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, false); + max->spr = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, true); + max->cur = ilk_cursor_wm_max(dev_priv, level, config); + max->fbc = ilk_fbc_wm_reg_max(dev_priv); +} + +static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv, + int level, + struct ilk_wm_maximums *max) +{ + max->pri = ilk_plane_wm_reg_max(dev_priv, level, false); + max->spr = ilk_plane_wm_reg_max(dev_priv, level, true); + max->cur = ilk_cursor_wm_reg_max(dev_priv, level); + max->fbc = ilk_fbc_wm_reg_max(dev_priv); +} + +static bool ilk_validate_wm_level(int level, + const struct ilk_wm_maximums *max, + struct intel_wm_level *result) +{ + bool ret; + + /* already determined to be invalid? */ + if (!result->enable) + return false; + + result->enable = result->pri_val <= max->pri && + result->spr_val <= max->spr && + result->cur_val <= max->cur; + + ret = result->enable; + + /* + * HACK until we can pre-compute everything, + * and thus fail gracefully if LP0 watermarks + * are exceeded... + */ + if (level == 0 && !result->enable) { + if (result->pri_val > max->pri) + DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n", + level, result->pri_val, max->pri); + if (result->spr_val > max->spr) + DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n", + level, result->spr_val, max->spr); + if (result->cur_val > max->cur) + DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n", + level, result->cur_val, max->cur); + + result->pri_val = min_t(u32, result->pri_val, max->pri); + result->spr_val = min_t(u32, result->spr_val, max->spr); + result->cur_val = min_t(u32, result->cur_val, max->cur); + result->enable = true; + } + + return ret; +} + +static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv, + const struct intel_crtc *crtc, + int level, + struct intel_crtc_state *crtc_state, + const struct intel_plane_state *pristate, + const struct intel_plane_state *sprstate, + const struct intel_plane_state *curstate, + struct intel_wm_level *result) +{ + u16 pri_latency = dev_priv->display.wm.pri_latency[level]; + u16 spr_latency = dev_priv->display.wm.spr_latency[level]; + u16 cur_latency = dev_priv->display.wm.cur_latency[level]; + + /* WM1+ latency values stored in 0.5us units */ + if (level > 0) { + pri_latency *= 5; + spr_latency *= 5; + cur_latency *= 5; + } + + if (pristate) { + result->pri_val = ilk_compute_pri_wm(crtc_state, pristate, + pri_latency, level); + result->fbc_val = ilk_compute_fbc_wm(crtc_state, pristate, result->pri_val); + } + + if (sprstate) + result->spr_val = ilk_compute_spr_wm(crtc_state, sprstate, spr_latency); + + if (curstate) + result->cur_val = ilk_compute_cur_wm(crtc_state, curstate, cur_latency); + + result->enable = true; +} + +static void hsw_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) +{ + u64 sskpd; + + i915->display.wm.num_levels = 5; + + sskpd = intel_uncore_read64(&i915->uncore, MCH_SSKPD); + + wm[0] = REG_FIELD_GET64(SSKPD_NEW_WM0_MASK_HSW, sskpd); + if (wm[0] == 0) + wm[0] = REG_FIELD_GET64(SSKPD_OLD_WM0_MASK_HSW, sskpd); + wm[1] = REG_FIELD_GET64(SSKPD_WM1_MASK_HSW, sskpd); + wm[2] = REG_FIELD_GET64(SSKPD_WM2_MASK_HSW, sskpd); + wm[3] = REG_FIELD_GET64(SSKPD_WM3_MASK_HSW, sskpd); + wm[4] = REG_FIELD_GET64(SSKPD_WM4_MASK_HSW, sskpd); +} + +static void snb_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) +{ + u32 sskpd; + + i915->display.wm.num_levels = 4; + + sskpd = intel_uncore_read(&i915->uncore, MCH_SSKPD); + + wm[0] = REG_FIELD_GET(SSKPD_WM0_MASK_SNB, sskpd); + wm[1] = REG_FIELD_GET(SSKPD_WM1_MASK_SNB, sskpd); + wm[2] = REG_FIELD_GET(SSKPD_WM2_MASK_SNB, sskpd); + wm[3] = REG_FIELD_GET(SSKPD_WM3_MASK_SNB, sskpd); +} + +static void ilk_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) +{ + u32 mltr; + + i915->display.wm.num_levels = 3; + + mltr = intel_uncore_read(&i915->uncore, MLTR_ILK); + + /* ILK primary LP0 latency is 700 ns */ + wm[0] = 7; + wm[1] = REG_FIELD_GET(MLTR_WM1_MASK, mltr); + wm[2] = REG_FIELD_GET(MLTR_WM2_MASK, mltr); +} + +static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv, + u16 wm[5]) +{ + /* ILK sprite LP0 latency is 1300 ns */ + if (DISPLAY_VER(dev_priv) == 5) + wm[0] = 13; +} + +static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv, + u16 wm[5]) +{ + /* ILK cursor LP0 latency is 1300 ns */ + if (DISPLAY_VER(dev_priv) == 5) + wm[0] = 13; +} + +static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv, + u16 wm[5], u16 min) +{ + int level; + + if (wm[0] >= min) + return false; + + wm[0] = max(wm[0], min); + for (level = 1; level < dev_priv->display.wm.num_levels; level++) + wm[level] = max_t(u16, wm[level], DIV_ROUND_UP(min, 5)); + + return true; +} + +static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv) +{ + bool changed; + + /* + * The BIOS provided WM memory latency values are often + * inadequate for high resolution displays. Adjust them. + */ + changed = ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.pri_latency, 12); + changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.spr_latency, 12); + changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.cur_latency, 12); + + if (!changed) + return; + + drm_dbg_kms(&dev_priv->drm, + "WM latency values increased to avoid potential underruns\n"); + intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency); + intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency); + intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency); +} + +static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv) +{ + /* + * On some SNB machines (Thinkpad X220 Tablet at least) + * LP3 usage can cause vblank interrupts to be lost. + * The DEIIR bit will go high but it looks like the CPU + * never gets interrupted. + * + * It's not clear whether other interrupt source could + * be affected or if this is somehow limited to vblank + * interrupts only. To play it safe we disable LP3 + * watermarks entirely. + */ + if (dev_priv->display.wm.pri_latency[3] == 0 && + dev_priv->display.wm.spr_latency[3] == 0 && + dev_priv->display.wm.cur_latency[3] == 0) + return; + + dev_priv->display.wm.pri_latency[3] = 0; + dev_priv->display.wm.spr_latency[3] = 0; + dev_priv->display.wm.cur_latency[3] = 0; + + drm_dbg_kms(&dev_priv->drm, + "LP3 watermarks disabled due to potential for lost interrupts\n"); + intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency); + intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency); + intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency); +} + +static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv) +{ + if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) + hsw_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency); + else if (DISPLAY_VER(dev_priv) >= 6) + snb_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency); + else + ilk_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency); + + memcpy(dev_priv->display.wm.spr_latency, dev_priv->display.wm.pri_latency, + sizeof(dev_priv->display.wm.pri_latency)); + memcpy(dev_priv->display.wm.cur_latency, dev_priv->display.wm.pri_latency, + sizeof(dev_priv->display.wm.pri_latency)); + + intel_fixup_spr_wm_latency(dev_priv, dev_priv->display.wm.spr_latency); + intel_fixup_cur_wm_latency(dev_priv, dev_priv->display.wm.cur_latency); + + intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency); + intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency); + intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency); + + if (DISPLAY_VER(dev_priv) == 6) { + snb_wm_latency_quirk(dev_priv); + snb_wm_lp3_irq_quirk(dev_priv); + } +} + +static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv, + struct intel_pipe_wm *pipe_wm) +{ + /* LP0 watermark maximums depend on this pipe alone */ + const struct intel_wm_config config = { + .num_pipes_active = 1, + .sprites_enabled = pipe_wm->sprites_enabled, + .sprites_scaled = pipe_wm->sprites_scaled, + }; + struct ilk_wm_maximums max; + + /* LP0 watermarks always use 1/2 DDB partitioning */ + ilk_compute_wm_maximums(dev_priv, 0, &config, INTEL_DDB_PART_1_2, &max); + + /* At least LP0 must be valid */ + if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) { + drm_dbg_kms(&dev_priv->drm, "LP0 watermark invalid\n"); + return false; + } + + return true; +} + +/* Compute new watermarks for the pipe */ +static int ilk_compute_pipe_wm(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + struct intel_pipe_wm *pipe_wm; + struct intel_plane *plane; + const struct intel_plane_state *plane_state; + const struct intel_plane_state *pristate = NULL; + const struct intel_plane_state *sprstate = NULL; + const struct intel_plane_state *curstate = NULL; + struct ilk_wm_maximums max; + int level, usable_level; + + pipe_wm = &crtc_state->wm.ilk.optimal; + + intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) { + if (plane->base.type == DRM_PLANE_TYPE_PRIMARY) + pristate = plane_state; + else if (plane->base.type == DRM_PLANE_TYPE_OVERLAY) + sprstate = plane_state; + else if (plane->base.type == DRM_PLANE_TYPE_CURSOR) + curstate = plane_state; + } + + pipe_wm->pipe_enabled = crtc_state->hw.active; + pipe_wm->sprites_enabled = crtc_state->active_planes & BIT(PLANE_SPRITE0); + pipe_wm->sprites_scaled = crtc_state->scaled_planes & BIT(PLANE_SPRITE0); + + usable_level = dev_priv->display.wm.num_levels - 1; + + /* ILK/SNB: LP2+ watermarks only w/o sprites */ + if (DISPLAY_VER(dev_priv) <= 6 && pipe_wm->sprites_enabled) + usable_level = 1; + + /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */ + if (pipe_wm->sprites_scaled) + usable_level = 0; + + memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm)); + ilk_compute_wm_level(dev_priv, crtc, 0, crtc_state, + pristate, sprstate, curstate, &pipe_wm->wm[0]); + + if (!ilk_validate_pipe_wm(dev_priv, pipe_wm)) + return -EINVAL; + + ilk_compute_wm_reg_maximums(dev_priv, 1, &max); + + for (level = 1; level <= usable_level; level++) { + struct intel_wm_level *wm = &pipe_wm->wm[level]; + + ilk_compute_wm_level(dev_priv, crtc, level, crtc_state, + pristate, sprstate, curstate, wm); + + /* + * Disable any watermark level that exceeds the + * register maximums since such watermarks are + * always invalid. + */ + if (!ilk_validate_wm_level(level, &max, wm)) { + memset(wm, 0, sizeof(*wm)); + break; + } + } + + return 0; +} + +/* + * Build a set of 'intermediate' watermark values that satisfy both the old + * state and the new state. These can be programmed to the hardware + * immediately. + */ +static int ilk_compute_intermediate_wm(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + struct intel_pipe_wm *a = &new_crtc_state->wm.ilk.intermediate; + const struct intel_pipe_wm *b = &old_crtc_state->wm.ilk.optimal; + int level; + + /* + * Start with the final, target watermarks, then combine with the + * currently active watermarks to get values that are safe both before + * and after the vblank. + */ + *a = new_crtc_state->wm.ilk.optimal; + if (!new_crtc_state->hw.active || + intel_crtc_needs_modeset(new_crtc_state) || + state->skip_intermediate_wm) + return 0; + + a->pipe_enabled |= b->pipe_enabled; + a->sprites_enabled |= b->sprites_enabled; + a->sprites_scaled |= b->sprites_scaled; + + for (level = 0; level < dev_priv->display.wm.num_levels; level++) { + struct intel_wm_level *a_wm = &a->wm[level]; + const struct intel_wm_level *b_wm = &b->wm[level]; + + a_wm->enable &= b_wm->enable; + a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val); + a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val); + a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val); + a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val); + } + + /* + * We need to make sure that these merged watermark values are + * actually a valid configuration themselves. If they're not, + * there's no safe way to transition from the old state to + * the new state, so we need to fail the atomic transaction. + */ + if (!ilk_validate_pipe_wm(dev_priv, a)) + return -EINVAL; + + /* + * If our intermediate WM are identical to the final WM, then we can + * omit the post-vblank programming; only update if it's different. + */ + if (memcmp(a, &new_crtc_state->wm.ilk.optimal, sizeof(*a)) != 0) + new_crtc_state->wm.need_postvbl_update = true; + + return 0; +} + +/* + * Merge the watermarks from all active pipes for a specific level. + */ +static void ilk_merge_wm_level(struct drm_i915_private *dev_priv, + int level, + struct intel_wm_level *ret_wm) +{ + const struct intel_crtc *crtc; + + ret_wm->enable = true; + + for_each_intel_crtc(&dev_priv->drm, crtc) { + const struct intel_pipe_wm *active = &crtc->wm.active.ilk; + const struct intel_wm_level *wm = &active->wm[level]; + + if (!active->pipe_enabled) + continue; + + /* + * The watermark values may have been used in the past, + * so we must maintain them in the registers for some + * time even if the level is now disabled. + */ + if (!wm->enable) + ret_wm->enable = false; + + ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val); + ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val); + ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val); + ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val); + } +} + +/* + * Merge all low power watermarks for all active pipes. + */ +static void ilk_wm_merge(struct drm_i915_private *dev_priv, + const struct intel_wm_config *config, + const struct ilk_wm_maximums *max, + struct intel_pipe_wm *merged) +{ + int level, num_levels = dev_priv->display.wm.num_levels; + int last_enabled_level = num_levels - 1; + + /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */ + if ((DISPLAY_VER(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) && + config->num_pipes_active > 1) + last_enabled_level = 0; + + /* ILK: FBC WM must be disabled always */ + merged->fbc_wm_enabled = DISPLAY_VER(dev_priv) >= 6; + + /* merge each WM1+ level */ + for (level = 1; level < num_levels; level++) { + struct intel_wm_level *wm = &merged->wm[level]; + + ilk_merge_wm_level(dev_priv, level, wm); + + if (level > last_enabled_level) + wm->enable = false; + else if (!ilk_validate_wm_level(level, max, wm)) + /* make sure all following levels get disabled */ + last_enabled_level = level - 1; + + /* + * The spec says it is preferred to disable + * FBC WMs instead of disabling a WM level. + */ + if (wm->fbc_val > max->fbc) { + if (wm->enable) + merged->fbc_wm_enabled = false; + wm->fbc_val = 0; + } + } + + /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */ + if (DISPLAY_VER(dev_priv) == 5 && HAS_FBC(dev_priv) && + dev_priv->params.enable_fbc && !merged->fbc_wm_enabled) { + for (level = 2; level < num_levels; level++) { + struct intel_wm_level *wm = &merged->wm[level]; + + wm->enable = false; + } + } +} + +static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm) +{ + /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */ + return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable); +} + +/* The value we need to program into the WM_LPx latency field */ +static unsigned int ilk_wm_lp_latency(struct drm_i915_private *dev_priv, + int level) +{ + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) + return 2 * level; + else + return dev_priv->display.wm.pri_latency[level]; +} + +static void ilk_compute_wm_results(struct drm_i915_private *dev_priv, + const struct intel_pipe_wm *merged, + enum intel_ddb_partitioning partitioning, + struct ilk_wm_values *results) +{ + struct intel_crtc *crtc; + int level, wm_lp; + + results->enable_fbc_wm = merged->fbc_wm_enabled; + results->partitioning = partitioning; + + /* LP1+ register values */ + for (wm_lp = 1; wm_lp <= 3; wm_lp++) { + const struct intel_wm_level *r; + + level = ilk_wm_lp_to_level(wm_lp, merged); + + r = &merged->wm[level]; + + /* + * Maintain the watermark values even if the level is + * disabled. Doing otherwise could cause underruns. + */ + results->wm_lp[wm_lp - 1] = + WM_LP_LATENCY(ilk_wm_lp_latency(dev_priv, level)) | + WM_LP_PRIMARY(r->pri_val) | + WM_LP_CURSOR(r->cur_val); + + if (r->enable) + results->wm_lp[wm_lp - 1] |= WM_LP_ENABLE; + + if (DISPLAY_VER(dev_priv) >= 8) + results->wm_lp[wm_lp - 1] |= WM_LP_FBC_BDW(r->fbc_val); + else + results->wm_lp[wm_lp - 1] |= WM_LP_FBC_ILK(r->fbc_val); + + results->wm_lp_spr[wm_lp - 1] = WM_LP_SPRITE(r->spr_val); + + /* + * Always set WM_LP_SPRITE_EN when spr_val != 0, even if the + * level is disabled. Doing otherwise could cause underruns. + */ + if (DISPLAY_VER(dev_priv) <= 6 && r->spr_val) { + drm_WARN_ON(&dev_priv->drm, wm_lp != 1); + results->wm_lp_spr[wm_lp - 1] |= WM_LP_SPRITE_ENABLE; + } + } + + /* LP0 register values */ + for_each_intel_crtc(&dev_priv->drm, crtc) { + enum pipe pipe = crtc->pipe; + const struct intel_pipe_wm *pipe_wm = &crtc->wm.active.ilk; + const struct intel_wm_level *r = &pipe_wm->wm[0]; + + if (drm_WARN_ON(&dev_priv->drm, !r->enable)) + continue; + + results->wm_pipe[pipe] = + WM0_PIPE_PRIMARY(r->pri_val) | + WM0_PIPE_SPRITE(r->spr_val) | + WM0_PIPE_CURSOR(r->cur_val); + } +} + +/* + * Find the result with the highest level enabled. Check for enable_fbc_wm in + * case both are at the same level. Prefer r1 in case they're the same. + */ +static struct intel_pipe_wm * +ilk_find_best_result(struct drm_i915_private *dev_priv, + struct intel_pipe_wm *r1, + struct intel_pipe_wm *r2) +{ + int level, level1 = 0, level2 = 0; + + for (level = 1; level < dev_priv->display.wm.num_levels; level++) { + if (r1->wm[level].enable) + level1 = level; + if (r2->wm[level].enable) + level2 = level; + } + + if (level1 == level2) { + if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled) + return r2; + else + return r1; + } else if (level1 > level2) { + return r1; + } else { + return r2; + } +} + +/* dirty bits used to track which watermarks need changes */ +#define WM_DIRTY_PIPE(pipe) (1 << (pipe)) +#define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp))) +#define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3)) +#define WM_DIRTY_FBC (1 << 24) +#define WM_DIRTY_DDB (1 << 25) + +static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv, + const struct ilk_wm_values *old, + const struct ilk_wm_values *new) +{ + unsigned int dirty = 0; + enum pipe pipe; + int wm_lp; + + for_each_pipe(dev_priv, pipe) { + if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) { + dirty |= WM_DIRTY_PIPE(pipe); + /* Must disable LP1+ watermarks too */ + dirty |= WM_DIRTY_LP_ALL; + } + } + + if (old->enable_fbc_wm != new->enable_fbc_wm) { + dirty |= WM_DIRTY_FBC; + /* Must disable LP1+ watermarks too */ + dirty |= WM_DIRTY_LP_ALL; + } + + if (old->partitioning != new->partitioning) { + dirty |= WM_DIRTY_DDB; + /* Must disable LP1+ watermarks too */ + dirty |= WM_DIRTY_LP_ALL; + } + + /* LP1+ watermarks already deemed dirty, no need to continue */ + if (dirty & WM_DIRTY_LP_ALL) + return dirty; + + /* Find the lowest numbered LP1+ watermark in need of an update... */ + for (wm_lp = 1; wm_lp <= 3; wm_lp++) { + if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] || + old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1]) + break; + } + + /* ...and mark it and all higher numbered LP1+ watermarks as dirty */ + for (; wm_lp <= 3; wm_lp++) + dirty |= WM_DIRTY_LP(wm_lp); + + return dirty; +} + +static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv, + unsigned int dirty) +{ + struct ilk_wm_values *previous = &dev_priv->display.wm.hw; + bool changed = false; + + if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM_LP_ENABLE) { + previous->wm_lp[2] &= ~WM_LP_ENABLE; + intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, previous->wm_lp[2]); + changed = true; + } + if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM_LP_ENABLE) { + previous->wm_lp[1] &= ~WM_LP_ENABLE; + intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, previous->wm_lp[1]); + changed = true; + } + if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM_LP_ENABLE) { + previous->wm_lp[0] &= ~WM_LP_ENABLE; + intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, previous->wm_lp[0]); + changed = true; + } + + /* + * Don't touch WM_LP_SPRITE_ENABLE here. + * Doing so could cause underruns. + */ + + return changed; +} + +/* + * The spec says we shouldn't write when we don't need, because every write + * causes WMs to be re-evaluated, expending some power. + */ +static void ilk_write_wm_values(struct drm_i915_private *dev_priv, + struct ilk_wm_values *results) +{ + struct ilk_wm_values *previous = &dev_priv->display.wm.hw; + unsigned int dirty; + + dirty = ilk_compute_wm_dirty(dev_priv, previous, results); + if (!dirty) + return; + + _ilk_disable_lp_wm(dev_priv, dirty); + + if (dirty & WM_DIRTY_PIPE(PIPE_A)) + intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_A), results->wm_pipe[0]); + if (dirty & WM_DIRTY_PIPE(PIPE_B)) + intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_B), results->wm_pipe[1]); + if (dirty & WM_DIRTY_PIPE(PIPE_C)) + intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_C), results->wm_pipe[2]); + + if (dirty & WM_DIRTY_DDB) { + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) + intel_uncore_rmw(&dev_priv->uncore, WM_MISC, WM_MISC_DATA_PARTITION_5_6, + results->partitioning == INTEL_DDB_PART_1_2 ? 0 : + WM_MISC_DATA_PARTITION_5_6); + else + intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL2, DISP_DATA_PARTITION_5_6, + results->partitioning == INTEL_DDB_PART_1_2 ? 0 : + DISP_DATA_PARTITION_5_6); + } + + if (dirty & WM_DIRTY_FBC) + intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL, DISP_FBC_WM_DIS, + results->enable_fbc_wm ? 0 : DISP_FBC_WM_DIS); + + if (dirty & WM_DIRTY_LP(1) && + previous->wm_lp_spr[0] != results->wm_lp_spr[0]) + intel_uncore_write(&dev_priv->uncore, WM1S_LP_ILK, results->wm_lp_spr[0]); + + if (DISPLAY_VER(dev_priv) >= 7) { + if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1]) + intel_uncore_write(&dev_priv->uncore, WM2S_LP_IVB, results->wm_lp_spr[1]); + if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2]) + intel_uncore_write(&dev_priv->uncore, WM3S_LP_IVB, results->wm_lp_spr[2]); + } + + if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0]) + intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, results->wm_lp[0]); + if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1]) + intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, results->wm_lp[1]); + if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2]) + intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, results->wm_lp[2]); + + dev_priv->display.wm.hw = *results; +} + +bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv) +{ + return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL); +} + +static void ilk_compute_wm_config(struct drm_i915_private *dev_priv, + struct intel_wm_config *config) +{ + struct intel_crtc *crtc; + + /* Compute the currently _active_ config */ + for_each_intel_crtc(&dev_priv->drm, crtc) { + const struct intel_pipe_wm *wm = &crtc->wm.active.ilk; + + if (!wm->pipe_enabled) + continue; + + config->sprites_enabled |= wm->sprites_enabled; + config->sprites_scaled |= wm->sprites_scaled; + config->num_pipes_active++; + } +} + +static void ilk_program_watermarks(struct drm_i915_private *dev_priv) +{ + struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm; + struct ilk_wm_maximums max; + struct intel_wm_config config = {}; + struct ilk_wm_values results = {}; + enum intel_ddb_partitioning partitioning; + + ilk_compute_wm_config(dev_priv, &config); + + ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_1_2, &max); + ilk_wm_merge(dev_priv, &config, &max, &lp_wm_1_2); + + /* 5/6 split only in single pipe config on IVB+ */ + if (DISPLAY_VER(dev_priv) >= 7 && + config.num_pipes_active == 1 && config.sprites_enabled) { + ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_5_6, &max); + ilk_wm_merge(dev_priv, &config, &max, &lp_wm_5_6); + + best_lp_wm = ilk_find_best_result(dev_priv, &lp_wm_1_2, &lp_wm_5_6); + } else { + best_lp_wm = &lp_wm_1_2; + } + + partitioning = (best_lp_wm == &lp_wm_1_2) ? + INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6; + + ilk_compute_wm_results(dev_priv, best_lp_wm, partitioning, &results); + + ilk_write_wm_values(dev_priv, &results); +} + +static void ilk_initial_watermarks(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + + mutex_lock(&dev_priv->display.wm.wm_mutex); + crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate; + ilk_program_watermarks(dev_priv); + mutex_unlock(&dev_priv->display.wm.wm_mutex); +} + +static void ilk_optimize_watermarks(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + + if (!crtc_state->wm.need_postvbl_update) + return; + + mutex_lock(&dev_priv->display.wm.wm_mutex); + crtc->wm.active.ilk = crtc_state->wm.ilk.optimal; + ilk_program_watermarks(dev_priv); + mutex_unlock(&dev_priv->display.wm.wm_mutex); +} + +static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct ilk_wm_values *hw = &dev_priv->display.wm.hw; + struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); + struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal; + enum pipe pipe = crtc->pipe; + + hw->wm_pipe[pipe] = intel_uncore_read(&dev_priv->uncore, WM0_PIPE_ILK(pipe)); + + memset(active, 0, sizeof(*active)); + + active->pipe_enabled = crtc->active; + + if (active->pipe_enabled) { + u32 tmp = hw->wm_pipe[pipe]; + + /* + * For active pipes LP0 watermark is marked as + * enabled, and LP1+ watermaks as disabled since + * we can't really reverse compute them in case + * multiple pipes are active. + */ + active->wm[0].enable = true; + active->wm[0].pri_val = REG_FIELD_GET(WM0_PIPE_PRIMARY_MASK, tmp); + active->wm[0].spr_val = REG_FIELD_GET(WM0_PIPE_SPRITE_MASK, tmp); + active->wm[0].cur_val = REG_FIELD_GET(WM0_PIPE_CURSOR_MASK, tmp); + } else { + int level; + + /* + * For inactive pipes, all watermark levels + * should be marked as enabled but zeroed, + * which is what we'd compute them to. + */ + for (level = 0; level < dev_priv->display.wm.num_levels; level++) + active->wm[level].enable = true; + } + + crtc->wm.active.ilk = *active; +} + +static int ilk_sanitize_watermarks_add_affected(struct drm_atomic_state *state) +{ + struct drm_plane *plane; + struct intel_crtc *crtc; + + for_each_intel_crtc(state->dev, crtc) { + struct intel_crtc_state *crtc_state; + + crtc_state = intel_atomic_get_crtc_state(state, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + + if (crtc_state->hw.active) { + /* + * Preserve the inherited flag to avoid + * taking the full modeset path. + */ + crtc_state->inherited = true; + } + } + + drm_for_each_plane(plane, state->dev) { + struct drm_plane_state *plane_state; + + plane_state = drm_atomic_get_plane_state(state, plane); + if (IS_ERR(plane_state)) + return PTR_ERR(plane_state); + } + + return 0; +} + +/* + * Calculate what we think the watermarks should be for the state we've read + * out of the hardware and then immediately program those watermarks so that + * we ensure the hardware settings match our internal state. + * + * We can calculate what we think WM's should be by creating a duplicate of the + * current state (which was constructed during hardware readout) and running it + * through the atomic check code to calculate new watermark values in the + * state object. + */ +void ilk_wm_sanitize(struct drm_i915_private *dev_priv) +{ + struct drm_atomic_state *state; + struct intel_atomic_state *intel_state; + struct intel_crtc *crtc; + struct intel_crtc_state *crtc_state; + struct drm_modeset_acquire_ctx ctx; + int ret; + int i; + + /* Only supported on platforms that use atomic watermark design */ + if (!dev_priv->display.funcs.wm->optimize_watermarks) + return; + + if (drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) >= 9)) + return; + + state = drm_atomic_state_alloc(&dev_priv->drm); + if (drm_WARN_ON(&dev_priv->drm, !state)) + return; + + intel_state = to_intel_atomic_state(state); + + drm_modeset_acquire_init(&ctx, 0); + +retry: + state->acquire_ctx = &ctx; + + /* + * Hardware readout is the only time we don't want to calculate + * intermediate watermarks (since we don't trust the current + * watermarks). + */ + if (!HAS_GMCH(dev_priv)) + intel_state->skip_intermediate_wm = true; + + ret = ilk_sanitize_watermarks_add_affected(state); + if (ret) + goto fail; + + ret = intel_atomic_check(&dev_priv->drm, state); + if (ret) + goto fail; + + /* Write calculated watermark values back */ + for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) { + crtc_state->wm.need_postvbl_update = true; + intel_optimize_watermarks(intel_state, crtc); + + to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm; + } + +fail: + if (ret == -EDEADLK) { + drm_atomic_state_clear(state); + drm_modeset_backoff(&ctx); + goto retry; + } + + /* + * If we fail here, it means that the hardware appears to be + * programmed in a way that shouldn't be possible, given our + * understanding of watermark requirements. This might mean a + * mistake in the hardware readout code or a mistake in the + * watermark calculations for a given platform. Raise a WARN + * so that this is noticeable. + * + * If this actually happens, we'll have to just leave the + * BIOS-programmed watermarks untouched and hope for the best. + */ + drm_WARN(&dev_priv->drm, ret, + "Could not determine valid watermarks for inherited state\n"); + + drm_atomic_state_put(state); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); +} + +#define _FW_WM(value, plane) \ + (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT) +#define _FW_WM_VLV(value, plane) \ + (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT) + +static void g4x_read_wm_values(struct drm_i915_private *dev_priv, + struct g4x_wm_values *wm) +{ + u32 tmp; + + tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1); + wm->sr.plane = _FW_WM(tmp, SR); + wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB); + wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEB); + wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEA); + + tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2); + wm->fbc_en = tmp & DSPFW_FBC_SR_EN; + wm->sr.fbc = _FW_WM(tmp, FBC_SR); + wm->hpll.fbc = _FW_WM(tmp, FBC_HPLL_SR); + wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEB); + wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA); + wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEA); + + tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3); + wm->hpll_en = tmp & DSPFW_HPLL_SR_EN; + wm->sr.cursor = _FW_WM(tmp, CURSOR_SR); + wm->hpll.cursor = _FW_WM(tmp, HPLL_CURSOR); + wm->hpll.plane = _FW_WM(tmp, HPLL_SR); +} + +static void vlv_read_wm_values(struct drm_i915_private *dev_priv, + struct vlv_wm_values *wm) +{ + enum pipe pipe; + u32 tmp; + + for_each_pipe(dev_priv, pipe) { + tmp = intel_uncore_read(&dev_priv->uncore, VLV_DDL(pipe)); + + wm->ddl[pipe].plane[PLANE_PRIMARY] = + (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); + wm->ddl[pipe].plane[PLANE_CURSOR] = + (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); + wm->ddl[pipe].plane[PLANE_SPRITE0] = + (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); + wm->ddl[pipe].plane[PLANE_SPRITE1] = + (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); + } + + tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1); + wm->sr.plane = _FW_WM(tmp, SR); + wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB); + wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB); + wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA); + + tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2); + wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB); + wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA); + wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA); + + tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3); + wm->sr.cursor = _FW_WM(tmp, CURSOR_SR); + + if (IS_CHERRYVIEW(dev_priv)) { + tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7_CHV); + wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED); + wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC); + + tmp = intel_uncore_read(&dev_priv->uncore, DSPFW8_CHV); + wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF); + wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE); + + tmp = intel_uncore_read(&dev_priv->uncore, DSPFW9_CHV); + wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC); + wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC); + + tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM); + wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9; + wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8; + wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8; + wm->pipe[PIPE_C].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEC_HI) << 8; + wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8; + wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8; + wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8; + wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8; + wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8; + wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8; + } else { + tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7); + wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED); + wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC); + + tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM); + wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9; + wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8; + wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8; + wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8; + wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8; + wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8; + wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8; + } +} + +#undef _FW_WM +#undef _FW_WM_VLV + +static void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv) +{ + struct g4x_wm_values *wm = &dev_priv->display.wm.g4x; + struct intel_crtc *crtc; + + g4x_read_wm_values(dev_priv, wm); + + wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN; + + for_each_intel_crtc(&dev_priv->drm, crtc) { + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + struct g4x_wm_state *active = &crtc->wm.active.g4x; + struct g4x_pipe_wm *raw; + enum pipe pipe = crtc->pipe; + enum plane_id plane_id; + int level, max_level; + + active->cxsr = wm->cxsr; + active->hpll_en = wm->hpll_en; + active->fbc_en = wm->fbc_en; + + active->sr = wm->sr; + active->hpll = wm->hpll; + + for_each_plane_id_on_crtc(crtc, plane_id) { + active->wm.plane[plane_id] = + wm->pipe[pipe].plane[plane_id]; + } + + if (wm->cxsr && wm->hpll_en) + max_level = G4X_WM_LEVEL_HPLL; + else if (wm->cxsr) + max_level = G4X_WM_LEVEL_SR; + else + max_level = G4X_WM_LEVEL_NORMAL; + + level = G4X_WM_LEVEL_NORMAL; + raw = &crtc_state->wm.g4x.raw[level]; + for_each_plane_id_on_crtc(crtc, plane_id) + raw->plane[plane_id] = active->wm.plane[plane_id]; + + level = G4X_WM_LEVEL_SR; + if (level > max_level) + goto out; + + raw = &crtc_state->wm.g4x.raw[level]; + raw->plane[PLANE_PRIMARY] = active->sr.plane; + raw->plane[PLANE_CURSOR] = active->sr.cursor; + raw->plane[PLANE_SPRITE0] = 0; + raw->fbc = active->sr.fbc; + + level = G4X_WM_LEVEL_HPLL; + if (level > max_level) + goto out; + + raw = &crtc_state->wm.g4x.raw[level]; + raw->plane[PLANE_PRIMARY] = active->hpll.plane; + raw->plane[PLANE_CURSOR] = active->hpll.cursor; + raw->plane[PLANE_SPRITE0] = 0; + raw->fbc = active->hpll.fbc; + + level++; + out: + for_each_plane_id_on_crtc(crtc, plane_id) + g4x_raw_plane_wm_set(crtc_state, level, + plane_id, USHRT_MAX); + g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX); + + g4x_invalidate_wms(crtc, active, level); + + crtc_state->wm.g4x.optimal = *active; + crtc_state->wm.g4x.intermediate = *active; + + drm_dbg_kms(&dev_priv->drm, + "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n", + pipe_name(pipe), + wm->pipe[pipe].plane[PLANE_PRIMARY], + wm->pipe[pipe].plane[PLANE_CURSOR], + wm->pipe[pipe].plane[PLANE_SPRITE0]); + } + + drm_dbg_kms(&dev_priv->drm, + "Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n", + wm->sr.plane, wm->sr.cursor, wm->sr.fbc); + drm_dbg_kms(&dev_priv->drm, + "Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n", + wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc); + drm_dbg_kms(&dev_priv->drm, "Initial SR=%s HPLL=%s FBC=%s\n", + str_yes_no(wm->cxsr), str_yes_no(wm->hpll_en), + str_yes_no(wm->fbc_en)); +} + +static void g4x_wm_sanitize(struct drm_i915_private *dev_priv) +{ + struct intel_plane *plane; + struct intel_crtc *crtc; + + mutex_lock(&dev_priv->display.wm.wm_mutex); + + for_each_intel_plane(&dev_priv->drm, plane) { + struct intel_crtc *crtc = + intel_crtc_for_pipe(dev_priv, plane->pipe); + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + struct intel_plane_state *plane_state = + to_intel_plane_state(plane->base.state); + enum plane_id plane_id = plane->id; + int level; + + if (plane_state->uapi.visible) + continue; + + for (level = 0; level < dev_priv->display.wm.num_levels; level++) { + struct g4x_pipe_wm *raw = + &crtc_state->wm.g4x.raw[level]; + + raw->plane[plane_id] = 0; + + if (plane_id == PLANE_PRIMARY) + raw->fbc = 0; + } + } + + for_each_intel_crtc(&dev_priv->drm, crtc) { + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + int ret; + + ret = _g4x_compute_pipe_wm(crtc_state); + drm_WARN_ON(&dev_priv->drm, ret); + + crtc_state->wm.g4x.intermediate = + crtc_state->wm.g4x.optimal; + crtc->wm.active.g4x = crtc_state->wm.g4x.optimal; + } + + g4x_program_watermarks(dev_priv); + + mutex_unlock(&dev_priv->display.wm.wm_mutex); +} + +static void g4x_wm_get_hw_state_and_sanitize(struct drm_i915_private *i915) +{ + g4x_wm_get_hw_state(i915); + g4x_wm_sanitize(i915); +} + +static void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv) +{ + struct vlv_wm_values *wm = &dev_priv->display.wm.vlv; + struct intel_crtc *crtc; + u32 val; + + vlv_read_wm_values(dev_priv, wm); + + wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; + wm->level = VLV_WM_LEVEL_PM2; + + if (IS_CHERRYVIEW(dev_priv)) { + vlv_punit_get(dev_priv); + + val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); + if (val & DSP_MAXFIFO_PM5_ENABLE) + wm->level = VLV_WM_LEVEL_PM5; + + /* + * If DDR DVFS is disabled in the BIOS, Punit + * will never ack the request. So if that happens + * assume we don't have to enable/disable DDR DVFS + * dynamically. To test that just set the REQ_ACK + * bit to poke the Punit, but don't change the + * HIGH/LOW bits so that we don't actually change + * the current state. + */ + val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); + val |= FORCE_DDR_FREQ_REQ_ACK; + vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val); + + if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) & + FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) { + drm_dbg_kms(&dev_priv->drm, + "Punit not acking DDR DVFS request, " + "assuming DDR DVFS is disabled\n"); + dev_priv->display.wm.num_levels = VLV_WM_LEVEL_PM5 + 1; + } else { + val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); + if ((val & FORCE_DDR_HIGH_FREQ) == 0) + wm->level = VLV_WM_LEVEL_DDR_DVFS; + } + + vlv_punit_put(dev_priv); + } + + for_each_intel_crtc(&dev_priv->drm, crtc) { + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + struct vlv_wm_state *active = &crtc->wm.active.vlv; + const struct vlv_fifo_state *fifo_state = + &crtc_state->wm.vlv.fifo_state; + enum pipe pipe = crtc->pipe; + enum plane_id plane_id; + int level; + + vlv_get_fifo_size(crtc_state); + + active->num_levels = wm->level + 1; + active->cxsr = wm->cxsr; + + for (level = 0; level < active->num_levels; level++) { + struct g4x_pipe_wm *raw = + &crtc_state->wm.vlv.raw[level]; + + active->sr[level].plane = wm->sr.plane; + active->sr[level].cursor = wm->sr.cursor; + + for_each_plane_id_on_crtc(crtc, plane_id) { + active->wm[level].plane[plane_id] = + wm->pipe[pipe].plane[plane_id]; + + raw->plane[plane_id] = + vlv_invert_wm_value(active->wm[level].plane[plane_id], + fifo_state->plane[plane_id]); + } + } + + for_each_plane_id_on_crtc(crtc, plane_id) + vlv_raw_plane_wm_set(crtc_state, level, + plane_id, USHRT_MAX); + vlv_invalidate_wms(crtc, active, level); + + crtc_state->wm.vlv.optimal = *active; + crtc_state->wm.vlv.intermediate = *active; + + drm_dbg_kms(&dev_priv->drm, + "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n", + pipe_name(pipe), + wm->pipe[pipe].plane[PLANE_PRIMARY], + wm->pipe[pipe].plane[PLANE_CURSOR], + wm->pipe[pipe].plane[PLANE_SPRITE0], + wm->pipe[pipe].plane[PLANE_SPRITE1]); + } + + drm_dbg_kms(&dev_priv->drm, + "Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n", + wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr); +} + +static void vlv_wm_sanitize(struct drm_i915_private *dev_priv) +{ + struct intel_plane *plane; + struct intel_crtc *crtc; + + mutex_lock(&dev_priv->display.wm.wm_mutex); + + for_each_intel_plane(&dev_priv->drm, plane) { + struct intel_crtc *crtc = + intel_crtc_for_pipe(dev_priv, plane->pipe); + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + struct intel_plane_state *plane_state = + to_intel_plane_state(plane->base.state); + enum plane_id plane_id = plane->id; + int level; + + if (plane_state->uapi.visible) + continue; + + for (level = 0; level < dev_priv->display.wm.num_levels; level++) { + struct g4x_pipe_wm *raw = + &crtc_state->wm.vlv.raw[level]; + + raw->plane[plane_id] = 0; + } + } + + for_each_intel_crtc(&dev_priv->drm, crtc) { + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + int ret; + + ret = _vlv_compute_pipe_wm(crtc_state); + drm_WARN_ON(&dev_priv->drm, ret); + + crtc_state->wm.vlv.intermediate = + crtc_state->wm.vlv.optimal; + crtc->wm.active.vlv = crtc_state->wm.vlv.optimal; + } + + vlv_program_watermarks(dev_priv); + + mutex_unlock(&dev_priv->display.wm.wm_mutex); +} + +static void vlv_wm_get_hw_state_and_sanitize(struct drm_i915_private *i915) +{ + vlv_wm_get_hw_state(i915); + vlv_wm_sanitize(i915); +} + +/* + * FIXME should probably kill this and improve + * the real watermark readout/sanitation instead + */ +static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv) +{ + intel_uncore_rmw(&dev_priv->uncore, WM3_LP_ILK, WM_LP_ENABLE, 0); + intel_uncore_rmw(&dev_priv->uncore, WM2_LP_ILK, WM_LP_ENABLE, 0); + intel_uncore_rmw(&dev_priv->uncore, WM1_LP_ILK, WM_LP_ENABLE, 0); + + /* + * Don't touch WM_LP_SPRITE_ENABLE here. + * Doing so could cause underruns. + */ +} + +static void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv) +{ + struct ilk_wm_values *hw = &dev_priv->display.wm.hw; + struct intel_crtc *crtc; + + ilk_init_lp_watermarks(dev_priv); + + for_each_intel_crtc(&dev_priv->drm, crtc) + ilk_pipe_wm_get_hw_state(crtc); + + hw->wm_lp[0] = intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK); + hw->wm_lp[1] = intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK); + hw->wm_lp[2] = intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK); + + hw->wm_lp_spr[0] = intel_uncore_read(&dev_priv->uncore, WM1S_LP_ILK); + if (DISPLAY_VER(dev_priv) >= 7) { + hw->wm_lp_spr[1] = intel_uncore_read(&dev_priv->uncore, WM2S_LP_IVB); + hw->wm_lp_spr[2] = intel_uncore_read(&dev_priv->uncore, WM3S_LP_IVB); + } + + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) + hw->partitioning = (intel_uncore_read(&dev_priv->uncore, WM_MISC) & + WM_MISC_DATA_PARTITION_5_6) ? + INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; + else if (IS_IVYBRIDGE(dev_priv)) + hw->partitioning = (intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2) & + DISP_DATA_PARTITION_5_6) ? + INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; + + hw->enable_fbc_wm = + !(intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) & DISP_FBC_WM_DIS); +} + +static const struct intel_wm_funcs ilk_wm_funcs = { + .compute_pipe_wm = ilk_compute_pipe_wm, + .compute_intermediate_wm = ilk_compute_intermediate_wm, + .initial_watermarks = ilk_initial_watermarks, + .optimize_watermarks = ilk_optimize_watermarks, + .get_hw_state = ilk_wm_get_hw_state, +}; + +static const struct intel_wm_funcs vlv_wm_funcs = { + .compute_pipe_wm = vlv_compute_pipe_wm, + .compute_intermediate_wm = vlv_compute_intermediate_wm, + .initial_watermarks = vlv_initial_watermarks, + .optimize_watermarks = vlv_optimize_watermarks, + .atomic_update_watermarks = vlv_atomic_update_fifo, + .get_hw_state = vlv_wm_get_hw_state_and_sanitize, +}; + +static const struct intel_wm_funcs g4x_wm_funcs = { + .compute_pipe_wm = g4x_compute_pipe_wm, + .compute_intermediate_wm = g4x_compute_intermediate_wm, + .initial_watermarks = g4x_initial_watermarks, + .optimize_watermarks = g4x_optimize_watermarks, + .get_hw_state = g4x_wm_get_hw_state_and_sanitize, +}; + +static const struct intel_wm_funcs pnv_wm_funcs = { + .update_wm = pnv_update_wm, +}; + +static const struct intel_wm_funcs i965_wm_funcs = { + .update_wm = i965_update_wm, +}; + +static const struct intel_wm_funcs i9xx_wm_funcs = { + .update_wm = i9xx_update_wm, +}; + +static const struct intel_wm_funcs i845_wm_funcs = { + .update_wm = i845_update_wm, +}; + +static const struct intel_wm_funcs nop_funcs = { +}; + +void i9xx_wm_init(struct drm_i915_private *dev_priv) +{ + /* For FIFO watermark updates */ + if (HAS_PCH_SPLIT(dev_priv)) { + ilk_setup_wm_latency(dev_priv); + dev_priv->display.funcs.wm = &ilk_wm_funcs; + } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { + vlv_setup_wm_latency(dev_priv); + dev_priv->display.funcs.wm = &vlv_wm_funcs; + } else if (IS_G4X(dev_priv)) { + g4x_setup_wm_latency(dev_priv); + dev_priv->display.funcs.wm = &g4x_wm_funcs; + } else if (IS_PINEVIEW(dev_priv)) { + if (!intel_get_cxsr_latency(!IS_MOBILE(dev_priv), + dev_priv->is_ddr3, + dev_priv->fsb_freq, + dev_priv->mem_freq)) { + drm_info(&dev_priv->drm, + "failed to find known CxSR latency " + "(found ddr%s fsb freq %d, mem freq %d), " + "disabling CxSR\n", + (dev_priv->is_ddr3 == 1) ? "3" : "2", + dev_priv->fsb_freq, dev_priv->mem_freq); + /* Disable CxSR and never update its watermark again */ + intel_set_memory_cxsr(dev_priv, false); + dev_priv->display.funcs.wm = &nop_funcs; + } else { + dev_priv->display.funcs.wm = &pnv_wm_funcs; + } + } else if (DISPLAY_VER(dev_priv) == 4) { + dev_priv->display.funcs.wm = &i965_wm_funcs; + } else if (DISPLAY_VER(dev_priv) == 3) { + dev_priv->display.funcs.wm = &i9xx_wm_funcs; + } else if (DISPLAY_VER(dev_priv) == 2) { + if (INTEL_NUM_PIPES(dev_priv) == 1) + dev_priv->display.funcs.wm = &i845_wm_funcs; + else + dev_priv->display.funcs.wm = &i9xx_wm_funcs; + } else { + drm_err(&dev_priv->drm, + "unexpected fall-through in %s\n", __func__); + dev_priv->display.funcs.wm = &nop_funcs; + } +} diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.h b/drivers/gpu/drm/i915/display/i9xx_wm.h new file mode 100644 index 000000000000..a7875cbcd05a --- /dev/null +++ b/drivers/gpu/drm/i915/display/i9xx_wm.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef __I9XX_WM_H__ +#define __I9XX_WM_H__ + +#include <linux/types.h> + +struct drm_i915_private; +struct intel_crtc_state; +struct intel_plane_state; + +int ilk_wm_max_level(const struct drm_i915_private *i915); +bool ilk_disable_lp_wm(struct drm_i915_private *i915); +void ilk_wm_sanitize(struct drm_i915_private *i915); +bool intel_set_memory_cxsr(struct drm_i915_private *i915, bool enable); +void i9xx_wm_init(struct drm_i915_private *i915); + +#endif /* __I9XX_WM_H__ */ diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index 468a792e6a40..50dcaa895854 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -45,6 +45,7 @@ #include "intel_dsi_vbt.h" #include "intel_panel.h" #include "intel_vdsc.h" +#include "intel_vdsc_regs.h" #include "skl_scaler.h" #include "skl_universal_plane.h" @@ -207,7 +208,7 @@ void icl_dsi_frame_update(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - u32 tmp, mode_flags; + u32 mode_flags; enum port port; mode_flags = crtc_state->mode_flags; @@ -224,9 +225,7 @@ void icl_dsi_frame_update(struct intel_crtc_state *crtc_state) else return; - tmp = intel_de_read(dev_priv, DSI_CMD_FRMCTL(port)); - tmp |= DSI_FRAME_UPDATE_REQUEST; - intel_de_write(dev_priv, DSI_CMD_FRMCTL(port), tmp); + intel_de_rmw(dev_priv, DSI_CMD_FRMCTL(port), 0, DSI_FRAME_UPDATE_REQUEST); } static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder) @@ -234,7 +233,7 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder) struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum phy phy; - u32 tmp; + u32 tmp, mask, val; int lane; for_each_dsi_phy(phy, intel_dsi->phys) { @@ -242,56 +241,35 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder) * Program voltage swing and pre-emphasis level values as per * table in BSPEC under DDI buffer programing */ + mask = SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK; + val = SCALING_MODE_SEL(0x2) | TAP2_DISABLE | TAP3_DISABLE | + RTERM_SELECT(0x6); tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy)); - tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK); - tmp |= SCALING_MODE_SEL(0x2); - tmp |= TAP2_DISABLE | TAP3_DISABLE; - tmp |= RTERM_SELECT(0x6); + tmp &= ~mask; + tmp |= val; intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp); + intel_de_rmw(dev_priv, ICL_PORT_TX_DW5_AUX(phy), mask, val); - tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_AUX(phy)); - tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK); - tmp |= SCALING_MODE_SEL(0x2); - tmp |= TAP2_DISABLE | TAP3_DISABLE; - tmp |= RTERM_SELECT(0x6); - intel_de_write(dev_priv, ICL_PORT_TX_DW5_AUX(phy), tmp); - + mask = SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | + RCOMP_SCALAR_MASK; + val = SWING_SEL_UPPER(0x2) | SWING_SEL_LOWER(0x2) | + RCOMP_SCALAR(0x98); tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN(0, phy)); - tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | - RCOMP_SCALAR_MASK); - tmp |= SWING_SEL_UPPER(0x2); - tmp |= SWING_SEL_LOWER(0x2); - tmp |= RCOMP_SCALAR(0x98); + tmp &= ~mask; + tmp |= val; intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), tmp); + intel_de_rmw(dev_priv, ICL_PORT_TX_DW2_AUX(phy), mask, val); - tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_AUX(phy)); - tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | - RCOMP_SCALAR_MASK); - tmp |= SWING_SEL_UPPER(0x2); - tmp |= SWING_SEL_LOWER(0x2); - tmp |= RCOMP_SCALAR(0x98); - intel_de_write(dev_priv, ICL_PORT_TX_DW2_AUX(phy), tmp); - - tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW4_AUX(phy)); - tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | - CURSOR_COEFF_MASK); - tmp |= POST_CURSOR_1(0x0); - tmp |= POST_CURSOR_2(0x0); - tmp |= CURSOR_COEFF(0x3f); - intel_de_write(dev_priv, ICL_PORT_TX_DW4_AUX(phy), tmp); - - for (lane = 0; lane <= 3; lane++) { - /* Bspec: must not use GRP register for write */ - tmp = intel_de_read(dev_priv, - ICL_PORT_TX_DW4_LN(lane, phy)); - tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | - CURSOR_COEFF_MASK); - tmp |= POST_CURSOR_1(0x0); - tmp |= POST_CURSOR_2(0x0); - tmp |= CURSOR_COEFF(0x3f); - intel_de_write(dev_priv, - ICL_PORT_TX_DW4_LN(lane, phy), tmp); - } + mask = POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | + CURSOR_COEFF_MASK; + val = POST_CURSOR_1(0x0) | POST_CURSOR_2(0x0) | + CURSOR_COEFF(0x3f); + intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_AUX(phy), mask, val); + + /* Bspec: must not use GRP register for write */ + for (lane = 0; lane <= 3; lane++) + intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(lane, phy), + mask, val); } } @@ -300,9 +278,21 @@ static void configure_dual_link_mode(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); + i915_reg_t dss_ctl1_reg, dss_ctl2_reg; u32 dss_ctl1; - dss_ctl1 = intel_de_read(dev_priv, DSS_CTL1); + /* FIXME: Move all DSS handling to intel_vdsc.c */ + if (DISPLAY_VER(dev_priv) >= 12) { + struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); + + dss_ctl1_reg = ICL_PIPE_DSS_CTL1(crtc->pipe); + dss_ctl2_reg = ICL_PIPE_DSS_CTL2(crtc->pipe); + } else { + dss_ctl1_reg = DSS_CTL1; + dss_ctl2_reg = DSS_CTL2; + } + + dss_ctl1 = intel_de_read(dev_priv, dss_ctl1_reg); dss_ctl1 |= SPLITTER_ENABLE; dss_ctl1 &= ~OVERLAP_PIXELS_MASK; dss_ctl1 |= OVERLAP_PIXELS(intel_dsi->pixel_overlap); @@ -310,7 +300,6 @@ static void configure_dual_link_mode(struct intel_encoder *encoder, if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) { const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; - u32 dss_ctl2; u16 hactive = adjusted_mode->crtc_hdisplay; u16 dl_buffer_depth; @@ -323,16 +312,14 @@ static void configure_dual_link_mode(struct intel_encoder *encoder, dss_ctl1 &= ~LEFT_DL_BUF_TARGET_DEPTH_MASK; dss_ctl1 |= LEFT_DL_BUF_TARGET_DEPTH(dl_buffer_depth); - dss_ctl2 = intel_de_read(dev_priv, DSS_CTL2); - dss_ctl2 &= ~RIGHT_DL_BUF_TARGET_DEPTH_MASK; - dss_ctl2 |= RIGHT_DL_BUF_TARGET_DEPTH(dl_buffer_depth); - intel_de_write(dev_priv, DSS_CTL2, dss_ctl2); + intel_de_rmw(dev_priv, dss_ctl2_reg, RIGHT_DL_BUF_TARGET_DEPTH_MASK, + RIGHT_DL_BUF_TARGET_DEPTH(dl_buffer_depth)); } else { /* Interleave */ dss_ctl1 |= DUAL_LINK_MODE_INTERLEAVE; } - intel_de_write(dev_priv, DSS_CTL1, dss_ctl1); + intel_de_write(dev_priv, dss_ctl1_reg, dss_ctl1); } /* aka DSI 8X clock */ @@ -412,13 +399,10 @@ static void gen11_dsi_enable_io_power(struct intel_encoder *encoder) struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; - u32 tmp; - for_each_dsi_port(port, intel_dsi->ports) { - tmp = intel_de_read(dev_priv, ICL_DSI_IO_MODECTL(port)); - tmp |= COMBO_PHY_MODE_DSI; - intel_de_write(dev_priv, ICL_DSI_IO_MODECTL(port), tmp); - } + for_each_dsi_port(port, intel_dsi->ports) + intel_de_rmw(dev_priv, ICL_DSI_IO_MODECTL(port), + 0, COMBO_PHY_MODE_DSI); get_dsi_io_power_domains(dev_priv, intel_dsi); } @@ -444,26 +428,16 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder) /* Step 4b(i) set loadgen select for transmit and aux lanes */ for_each_dsi_phy(phy, intel_dsi->phys) { - tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW4_AUX(phy)); - tmp &= ~LOADGEN_SELECT; - intel_de_write(dev_priv, ICL_PORT_TX_DW4_AUX(phy), tmp); - for (lane = 0; lane <= 3; lane++) { - tmp = intel_de_read(dev_priv, - ICL_PORT_TX_DW4_LN(lane, phy)); - tmp &= ~LOADGEN_SELECT; - if (lane != 2) - tmp |= LOADGEN_SELECT; - intel_de_write(dev_priv, - ICL_PORT_TX_DW4_LN(lane, phy), tmp); - } + intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_AUX(phy), LOADGEN_SELECT, 0); + for (lane = 0; lane <= 3; lane++) + intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(lane, phy), + LOADGEN_SELECT, lane != 2 ? LOADGEN_SELECT : 0); } /* Step 4b(ii) set latency optimization for transmit and aux lanes */ for_each_dsi_phy(phy, intel_dsi->phys) { - tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_AUX(phy)); - tmp &= ~FRC_LATENCY_OPTIM_MASK; - tmp |= FRC_LATENCY_OPTIM_VAL(0x5); - intel_de_write(dev_priv, ICL_PORT_TX_DW2_AUX(phy), tmp); + intel_de_rmw(dev_priv, ICL_PORT_TX_DW2_AUX(phy), + FRC_LATENCY_OPTIM_MASK, FRC_LATENCY_OPTIM_VAL(0x5)); tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN(0, phy)); tmp &= ~FRC_LATENCY_OPTIM_MASK; tmp |= FRC_LATENCY_OPTIM_VAL(0x5); @@ -471,12 +445,8 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder) /* For EHL, TGL, set latency optimization for PCS_DW1 lanes */ if (IS_JSL_EHL(dev_priv) || (DISPLAY_VER(dev_priv) >= 12)) { - tmp = intel_de_read(dev_priv, - ICL_PORT_PCS_DW1_AUX(phy)); - tmp &= ~LATENCY_OPTIM_MASK; - tmp |= LATENCY_OPTIM_VAL(0); - intel_de_write(dev_priv, ICL_PORT_PCS_DW1_AUX(phy), - tmp); + intel_de_rmw(dev_priv, ICL_PORT_PCS_DW1_AUX(phy), + LATENCY_OPTIM_MASK, LATENCY_OPTIM_VAL(0)); tmp = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN(0, phy)); @@ -501,9 +471,7 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder) tmp = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN(0, phy)); tmp &= ~COMMON_KEEPER_EN; intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), tmp); - tmp = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_AUX(phy)); - tmp &= ~COMMON_KEEPER_EN; - intel_de_write(dev_priv, ICL_PORT_PCS_DW1_AUX(phy), tmp); + intel_de_rmw(dev_priv, ICL_PORT_PCS_DW1_AUX(phy), COMMON_KEEPER_EN, 0); } /* @@ -511,20 +479,15 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder) * Note: loadgen select program is done * as part of lane phy sequence configuration */ - for_each_dsi_phy(phy, intel_dsi->phys) { - tmp = intel_de_read(dev_priv, ICL_PORT_CL_DW5(phy)); - tmp |= SUS_CLOCK_CONFIG; - intel_de_write(dev_priv, ICL_PORT_CL_DW5(phy), tmp); - } + for_each_dsi_phy(phy, intel_dsi->phys) + intel_de_rmw(dev_priv, ICL_PORT_CL_DW5(phy), 0, SUS_CLOCK_CONFIG); /* Clear training enable to change swing values */ for_each_dsi_phy(phy, intel_dsi->phys) { tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy)); tmp &= ~TX_TRAINING_EN; intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp); - tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_AUX(phy)); - tmp &= ~TX_TRAINING_EN; - intel_de_write(dev_priv, ICL_PORT_TX_DW5_AUX(phy), tmp); + intel_de_rmw(dev_priv, ICL_PORT_TX_DW5_AUX(phy), TX_TRAINING_EN, 0); } /* Program swing and de-emphasis */ @@ -535,9 +498,7 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder) tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy)); tmp |= TX_TRAINING_EN; intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp); - tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_AUX(phy)); - tmp |= TX_TRAINING_EN; - intel_de_write(dev_priv, ICL_PORT_TX_DW5_AUX(phy), tmp); + intel_de_rmw(dev_priv, ICL_PORT_TX_DW5_AUX(phy), 0, TX_TRAINING_EN); } } @@ -545,13 +506,10 @@ static void gen11_dsi_enable_ddi_buffer(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); - u32 tmp; enum port port; for_each_dsi_port(port, intel_dsi->ports) { - tmp = intel_de_read(dev_priv, DDI_BUF_CTL(port)); - tmp |= DDI_BUF_CTL_ENABLE; - intel_de_write(dev_priv, DDI_BUF_CTL(port), tmp); + intel_de_rmw(dev_priv, DDI_BUF_CTL(port), 0, DDI_BUF_CTL_ENABLE); if (wait_for_us(!(intel_de_read(dev_priv, DDI_BUF_CTL(port)) & DDI_BUF_IS_IDLE), @@ -567,17 +525,13 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); - u32 tmp; enum port port; enum phy phy; /* Program T-INIT master registers */ - for_each_dsi_port(port, intel_dsi->ports) { - tmp = intel_de_read(dev_priv, ICL_DSI_T_INIT_MASTER(port)); - tmp &= ~DSI_T_INIT_MASTER_MASK; - tmp |= intel_dsi->init_count; - intel_de_write(dev_priv, ICL_DSI_T_INIT_MASTER(port), tmp); - } + for_each_dsi_port(port, intel_dsi->ports) + intel_de_rmw(dev_priv, ICL_DSI_T_INIT_MASTER(port), + DSI_T_INIT_MASTER_MASK, intel_dsi->init_count); /* Program DPHY clock lanes timings */ for_each_dsi_port(port, intel_dsi->ports) { @@ -608,31 +562,22 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder, if (DISPLAY_VER(dev_priv) == 11) { if (afe_clk(encoder, crtc_state) <= 800000) { for_each_dsi_port(port, intel_dsi->ports) { - tmp = intel_de_read(dev_priv, - DPHY_TA_TIMING_PARAM(port)); - tmp &= ~TA_SURE_MASK; - tmp |= TA_SURE_OVERRIDE | TA_SURE(0); - intel_de_write(dev_priv, - DPHY_TA_TIMING_PARAM(port), - tmp); + intel_de_rmw(dev_priv, DPHY_TA_TIMING_PARAM(port), + TA_SURE_MASK, + TA_SURE_OVERRIDE | TA_SURE(0)); /* shadow register inside display core */ - tmp = intel_de_read(dev_priv, - DSI_TA_TIMING_PARAM(port)); - tmp &= ~TA_SURE_MASK; - tmp |= TA_SURE_OVERRIDE | TA_SURE(0); - intel_de_write(dev_priv, - DSI_TA_TIMING_PARAM(port), tmp); + intel_de_rmw(dev_priv, DSI_TA_TIMING_PARAM(port), + TA_SURE_MASK, + TA_SURE_OVERRIDE | TA_SURE(0)); } } } if (IS_JSL_EHL(dev_priv)) { - for_each_dsi_phy(phy, intel_dsi->phys) { - tmp = intel_de_read(dev_priv, ICL_DPHY_CHKN(phy)); - tmp |= ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP; - intel_de_write(dev_priv, ICL_DPHY_CHKN(phy), tmp); - } + for_each_dsi_phy(phy, intel_dsi->phys) + intel_de_rmw(dev_priv, ICL_DPHY_CHKN(phy), + 0, ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP); } } @@ -824,11 +769,8 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder, if (intel_dsi->dual_link) { for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - tmp = intel_de_read(dev_priv, - TRANS_DDI_FUNC_CTL2(dsi_trans)); - tmp |= PORT_SYNC_MODE_ENABLE; - intel_de_write(dev_priv, - TRANS_DDI_FUNC_CTL2(dsi_trans), tmp); + intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL2(dsi_trans), + 0, PORT_SYNC_MODE_ENABLE); } /* configure stream splitting */ @@ -958,8 +900,8 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, /* program TRANS_HTOTAL register */ for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - intel_de_write(dev_priv, HTOTAL(dsi_trans), - (hactive - 1) | ((htotal - 1) << 16)); + intel_de_write(dev_priv, TRANS_HTOTAL(dsi_trans), + HACTIVE(hactive - 1) | HTOTAL(htotal - 1)); } /* TRANS_HSYNC register to be programmed only for video mode */ @@ -981,8 +923,8 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - intel_de_write(dev_priv, HSYNC(dsi_trans), - (hsync_start - 1) | ((hsync_end - 1) << 16)); + intel_de_write(dev_priv, TRANS_HSYNC(dsi_trans), + HSYNC_START(hsync_start - 1) | HSYNC_END(hsync_end - 1)); } } @@ -995,8 +937,8 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, * struct drm_display_mode. * For interlace mode: program required pixel minus 2 */ - intel_de_write(dev_priv, VTOTAL(dsi_trans), - (vactive - 1) | ((vtotal - 1) << 16)); + intel_de_write(dev_priv, TRANS_VTOTAL(dsi_trans), + VACTIVE(vactive - 1) | VTOTAL(vtotal - 1)); } if (vsync_end < vsync_start || vsync_end > vtotal) @@ -1009,8 +951,8 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, if (is_vid_mode(intel_dsi)) { for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - intel_de_write(dev_priv, VSYNC(dsi_trans), - (vsync_start - 1) | ((vsync_end - 1) << 16)); + intel_de_write(dev_priv, TRANS_VSYNC(dsi_trans), + VSYNC_START(vsync_start - 1) | VSYNC_END(vsync_end - 1)); } } @@ -1023,17 +965,22 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, if (is_vid_mode(intel_dsi)) { for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - intel_de_write(dev_priv, VSYNCSHIFT(dsi_trans), + intel_de_write(dev_priv, TRANS_VSYNCSHIFT(dsi_trans), vsync_shift); } } - /* program TRANS_VBLANK register, should be same as vtotal programmed */ + /* + * program TRANS_VBLANK register, should be same as vtotal programmed + * + * FIXME get rid of these local hacks and do it right, + * this will not handle eg. delayed vblank correctly. + */ if (DISPLAY_VER(dev_priv) >= 12) { for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - intel_de_write(dev_priv, VBLANK(dsi_trans), - (vactive - 1) | ((vtotal - 1) << 16)); + intel_de_write(dev_priv, TRANS_VBLANK(dsi_trans), + VBLANK_START(vactive - 1) | VBLANK_END(vtotal - 1)); } } } @@ -1044,17 +991,14 @@ static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder) struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; enum transcoder dsi_trans; - u32 tmp; for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - tmp = intel_de_read(dev_priv, PIPECONF(dsi_trans)); - tmp |= PIPECONF_ENABLE; - intel_de_write(dev_priv, PIPECONF(dsi_trans), tmp); + intel_de_rmw(dev_priv, TRANSCONF(dsi_trans), 0, TRANSCONF_ENABLE); /* wait for transcoder to be enabled */ - if (intel_de_wait_for_set(dev_priv, PIPECONF(dsi_trans), - PIPECONF_STATE_ENABLE, 10)) + if (intel_de_wait_for_set(dev_priv, TRANSCONF(dsi_trans), + TRANSCONF_STATE_ENABLE, 10)) drm_err(&dev_priv->drm, "DSI transcoder not enabled\n"); } @@ -1067,7 +1011,7 @@ static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder, struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; enum transcoder dsi_trans; - u32 tmp, hs_tx_timeout, lp_rx_timeout, ta_timeout, divisor, mul; + u32 hs_tx_timeout, lp_rx_timeout, ta_timeout, divisor, mul; /* * escape clock count calculation: @@ -1087,26 +1031,23 @@ static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder, dsi_trans = dsi_port_to_transcoder(port); /* program hst_tx_timeout */ - tmp = intel_de_read(dev_priv, DSI_HSTX_TO(dsi_trans)); - tmp &= ~HSTX_TIMEOUT_VALUE_MASK; - tmp |= HSTX_TIMEOUT_VALUE(hs_tx_timeout); - intel_de_write(dev_priv, DSI_HSTX_TO(dsi_trans), tmp); + intel_de_rmw(dev_priv, DSI_HSTX_TO(dsi_trans), + HSTX_TIMEOUT_VALUE_MASK, + HSTX_TIMEOUT_VALUE(hs_tx_timeout)); /* FIXME: DSI_CALIB_TO */ /* program lp_rx_host timeout */ - tmp = intel_de_read(dev_priv, DSI_LPRX_HOST_TO(dsi_trans)); - tmp &= ~LPRX_TIMEOUT_VALUE_MASK; - tmp |= LPRX_TIMEOUT_VALUE(lp_rx_timeout); - intel_de_write(dev_priv, DSI_LPRX_HOST_TO(dsi_trans), tmp); + intel_de_rmw(dev_priv, DSI_LPRX_HOST_TO(dsi_trans), + LPRX_TIMEOUT_VALUE_MASK, + LPRX_TIMEOUT_VALUE(lp_rx_timeout)); /* FIXME: DSI_PWAIT_TO */ /* program turn around timeout */ - tmp = intel_de_read(dev_priv, DSI_TA_TO(dsi_trans)); - tmp &= ~TA_TIMEOUT_VALUE_MASK; - tmp |= TA_TIMEOUT_VALUE(ta_timeout); - intel_de_write(dev_priv, DSI_TA_TO(dsi_trans), tmp); + intel_de_rmw(dev_priv, DSI_TA_TO(dsi_trans), + TA_TIMEOUT_VALUE_MASK, + TA_TIMEOUT_VALUE(ta_timeout)); } } @@ -1310,19 +1251,16 @@ static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder) struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; enum transcoder dsi_trans; - u32 tmp; for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); /* disable transcoder */ - tmp = intel_de_read(dev_priv, PIPECONF(dsi_trans)); - tmp &= ~PIPECONF_ENABLE; - intel_de_write(dev_priv, PIPECONF(dsi_trans), tmp); + intel_de_rmw(dev_priv, TRANSCONF(dsi_trans), TRANSCONF_ENABLE, 0); /* wait for transcoder to be disabled */ - if (intel_de_wait_for_clear(dev_priv, PIPECONF(dsi_trans), - PIPECONF_STATE_ENABLE, 50)) + if (intel_de_wait_for_clear(dev_priv, TRANSCONF(dsi_trans), + TRANSCONF_STATE_ENABLE, 50)) drm_err(&dev_priv->drm, "DSI trancoder not disabled\n"); } @@ -1350,11 +1288,9 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder) /* disable periodic update mode */ if (is_cmd_mode(intel_dsi)) { - for_each_dsi_port(port, intel_dsi->ports) { - tmp = intel_de_read(dev_priv, DSI_CMD_FRMCTL(port)); - tmp &= ~DSI_PERIODIC_FRAME_UPDATE_ENABLE; - intel_de_write(dev_priv, DSI_CMD_FRMCTL(port), tmp); - } + for_each_dsi_port(port, intel_dsi->ports) + intel_de_rmw(dev_priv, DSI_CMD_FRMCTL(port), + DSI_PERIODIC_FRAME_UPDATE_ENABLE, 0); } /* put dsi link in ULPS */ @@ -1374,20 +1310,16 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder) /* disable ddi function */ for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans)); - tmp &= ~TRANS_DDI_FUNC_ENABLE; - intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans), tmp); + intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans), + TRANS_DDI_FUNC_ENABLE, 0); } /* disable port sync mode if dual link */ if (intel_dsi->dual_link) { for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - tmp = intel_de_read(dev_priv, - TRANS_DDI_FUNC_CTL2(dsi_trans)); - tmp &= ~PORT_SYNC_MODE_ENABLE; - intel_de_write(dev_priv, - TRANS_DDI_FUNC_CTL2(dsi_trans), tmp); + intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL2(dsi_trans), + PORT_SYNC_MODE_ENABLE, 0); } } } @@ -1396,14 +1328,11 @@ static void gen11_dsi_disable_port(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); - u32 tmp; enum port port; gen11_dsi_ungate_clocks(encoder); for_each_dsi_port(port, intel_dsi->ports) { - tmp = intel_de_read(dev_priv, DDI_BUF_CTL(port)); - tmp &= ~DDI_BUF_CTL_ENABLE; - intel_de_write(dev_priv, DDI_BUF_CTL(port), tmp); + intel_de_rmw(dev_priv, DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE, 0); if (wait_for_us((intel_de_read(dev_priv, DDI_BUF_CTL(port)) & DDI_BUF_IS_IDLE), @@ -1420,7 +1349,6 @@ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder) struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; - u32 tmp; for_each_dsi_port(port, intel_dsi->ports) { intel_wakeref_t wakeref; @@ -1434,11 +1362,9 @@ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder) } /* set mode to DDI */ - for_each_dsi_port(port, intel_dsi->ports) { - tmp = intel_de_read(dev_priv, ICL_DSI_IO_MODECTL(port)); - tmp &= ~COMBO_PHY_MODE_DSI; - intel_de_write(dev_priv, ICL_DSI_IO_MODECTL(port), tmp); - } + for_each_dsi_port(port, intel_dsi->ports) + intel_de_rmw(dev_priv, ICL_DSI_IO_MODECTL(port), + COMBO_PHY_MODE_DSI, 0); } static void gen11_dsi_disable(struct intel_atomic_state *state, @@ -1754,8 +1680,8 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder, goto out; } - tmp = intel_de_read(dev_priv, PIPECONF(dsi_trans)); - ret = tmp & PIPECONF_ENABLE; + tmp = intel_de_read(dev_priv, TRANSCONF(dsi_trans)); + ret = tmp & TRANSCONF_ENABLE; } out: intel_display_power_put(dev_priv, encoder->power_domain, wakeref); diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c index 1409bcfb6fd3..719a60e278f3 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c @@ -34,11 +34,10 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_fourcc.h> -#include "gt/intel_rps.h" - #include "i915_config.h" #include "intel_atomic_plane.h" #include "intel_cdclk.h" +#include "intel_display_rps.h" #include "intel_display_trace.h" #include "intel_display_types.h" #include "intel_fb.h" @@ -363,6 +362,7 @@ void intel_plane_set_invisible(struct intel_crtc_state *crtc_state, crtc_state->scaled_planes &= ~BIT(plane->id); crtc_state->nv12_planes &= ~BIT(plane->id); crtc_state->c8_planes &= ~BIT(plane->id); + crtc_state->async_flip_planes &= ~BIT(plane->id); crtc_state->data_rate[plane->id] = 0; crtc_state->data_rate_y[plane->id] = 0; crtc_state->rel_data_rate[plane->id] = 0; @@ -582,8 +582,10 @@ static int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_cr intel_plane_is_scaled(new_plane_state)))) new_crtc_state->disable_lp_wm = true; - if (intel_plane_do_async_flip(plane, old_crtc_state, new_crtc_state)) + if (intel_plane_do_async_flip(plane, old_crtc_state, new_crtc_state)) { new_crtc_state->do_async_flip = true; + new_crtc_state->async_flip_planes |= BIT(plane->id); + } return 0; } @@ -938,64 +940,6 @@ int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state, return 0; } -struct wait_rps_boost { - struct wait_queue_entry wait; - - struct drm_crtc *crtc; - struct i915_request *request; -}; - -static int do_rps_boost(struct wait_queue_entry *_wait, - unsigned mode, int sync, void *key) -{ - struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait); - struct i915_request *rq = wait->request; - - /* - * If we missed the vblank, but the request is already running it - * is reasonable to assume that it will complete before the next - * vblank without our intervention, so leave RPS alone. - */ - if (!i915_request_started(rq)) - intel_rps_boost(rq); - i915_request_put(rq); - - drm_crtc_vblank_put(wait->crtc); - - list_del(&wait->wait.entry); - kfree(wait); - return 1; -} - -static void add_rps_boost_after_vblank(struct drm_crtc *crtc, - struct dma_fence *fence) -{ - struct wait_rps_boost *wait; - - if (!dma_fence_is_i915(fence)) - return; - - if (DISPLAY_VER(to_i915(crtc->dev)) < 6) - return; - - if (drm_crtc_vblank_get(crtc)) - return; - - wait = kmalloc(sizeof(*wait), GFP_KERNEL); - if (!wait) { - drm_crtc_vblank_put(crtc); - return; - } - - wait->request = to_request(dma_fence_get(fence)); - wait->crtc = crtc; - - wait->wait.func = do_rps_boost; - wait->wait.flags = 0; - - add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait); -} - /** * intel_prepare_plane_fb - Prepare fb for usage on plane * @_plane: drm plane to prepare for @@ -1086,13 +1030,13 @@ intel_prepare_plane_fb(struct drm_plane *_plane, dma_resv_iter_begin(&cursor, obj->base.resv, DMA_RESV_USAGE_WRITE); dma_resv_for_each_fence_unlocked(&cursor, fence) { - add_rps_boost_after_vblank(new_plane_state->hw.crtc, - fence); + intel_display_rps_boost_after_vblank(new_plane_state->hw.crtc, + fence); } dma_resv_iter_end(&cursor); } else { - add_rps_boost_after_vblank(new_plane_state->hw.crtc, - new_plane_state->uapi.fence); + intel_display_rps_boost_after_vblank(new_plane_state->hw.crtc, + new_plane_state->uapi.fence); } /* @@ -1103,10 +1047,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane, * that are not quite steady state without resorting to forcing * maximum clocks following a vblank miss (see do_rps_boost()). */ - if (!state->rps_interactive) { - intel_rps_mark_interactive(&to_gt(dev_priv)->rps, true); - state->rps_interactive = true; - } + intel_display_rps_mark_interactive(dev_priv, state, true); return 0; @@ -1137,10 +1078,7 @@ intel_cleanup_plane_fb(struct drm_plane *plane, if (!obj) return; - if (state->rps_interactive) { - intel_rps_mark_interactive(&to_gt(dev_priv)->rps, false); - state->rps_interactive = false; - } + intel_display_rps_mark_interactive(dev_priv, state, false); /* Should only be called after a successful intel_prepare_plane_fb()! */ intel_plane_unpin_fb(old_plane_state); diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c index a9335c856644..65151f5dcb15 100644 --- a/drivers/gpu/drm/i915/display/intel_audio.c +++ b/drivers/gpu/drm/i915/display/intel_audio.c @@ -581,8 +581,7 @@ static void enable_audio_dsc_wa(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - enum pipe pipe = crtc->pipe; + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; unsigned int hblank_early_prog, samples_room; unsigned int val; @@ -592,32 +591,32 @@ static void enable_audio_dsc_wa(struct intel_encoder *encoder, val = intel_de_read(i915, AUD_CONFIG_BE); if (DISPLAY_VER(i915) == 11) - val |= HBLANK_EARLY_ENABLE_ICL(pipe); + val |= HBLANK_EARLY_ENABLE_ICL(cpu_transcoder); else if (DISPLAY_VER(i915) >= 12) - val |= HBLANK_EARLY_ENABLE_TGL(pipe); + val |= HBLANK_EARLY_ENABLE_TGL(cpu_transcoder); if (crtc_state->dsc.compression_enable && crtc_state->hw.adjusted_mode.hdisplay >= 3840 && crtc_state->hw.adjusted_mode.vdisplay >= 2160) { /* Get hblank early enable value required */ - val &= ~HBLANK_START_COUNT_MASK(pipe); + val &= ~HBLANK_START_COUNT_MASK(cpu_transcoder); hblank_early_prog = calc_hblank_early_prog(encoder, crtc_state); if (hblank_early_prog < 32) - val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_32); + val |= HBLANK_START_COUNT(cpu_transcoder, HBLANK_START_COUNT_32); else if (hblank_early_prog < 64) - val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_64); + val |= HBLANK_START_COUNT(cpu_transcoder, HBLANK_START_COUNT_64); else if (hblank_early_prog < 96) - val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_96); + val |= HBLANK_START_COUNT(cpu_transcoder, HBLANK_START_COUNT_96); else - val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_128); + val |= HBLANK_START_COUNT(cpu_transcoder, HBLANK_START_COUNT_128); /* Get samples room value required */ - val &= ~NUMBER_SAMPLES_PER_LINE_MASK(pipe); + val &= ~NUMBER_SAMPLES_PER_LINE_MASK(cpu_transcoder); samples_room = calc_samples_room(crtc_state); if (samples_room < 3) - val |= NUMBER_SAMPLES_PER_LINE(pipe, samples_room); + val |= NUMBER_SAMPLES_PER_LINE(cpu_transcoder, samples_room); else /* Program 0 i.e "All Samples available in buffer" */ - val |= NUMBER_SAMPLES_PER_LINE(pipe, 0x0); + val |= NUMBER_SAMPLES_PER_LINE(cpu_transcoder, 0x0); } intel_de_write(i915, AUD_CONFIG_BE, val); @@ -812,9 +811,9 @@ void intel_audio_codec_enable(struct intel_encoder *encoder, struct i915_audio_component *acomp = i915->display.audio.component; struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct intel_connector *connector = to_intel_connector(conn_state->connector); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; struct intel_audio_state *audio_state; enum port port = encoder->port; - enum pipe pipe = crtc->pipe; if (!crtc_state->has_audio) return; @@ -832,7 +831,7 @@ void intel_audio_codec_enable(struct intel_encoder *encoder, mutex_lock(&i915->display.audio.mutex); - audio_state = &i915->display.audio.state[pipe]; + audio_state = &i915->display.audio.state[cpu_transcoder]; audio_state->encoder = encoder; BUILD_BUG_ON(sizeof(audio_state->eld) != sizeof(crtc_state->eld)); @@ -842,14 +841,14 @@ void intel_audio_codec_enable(struct intel_encoder *encoder, if (acomp && acomp->base.audio_ops && acomp->base.audio_ops->pin_eld_notify) { - /* audio drivers expect pipe = -1 to indicate Non-MST cases */ + /* audio drivers expect cpu_transcoder = -1 to indicate Non-MST cases */ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) - pipe = -1; + cpu_transcoder = -1; acomp->base.audio_ops->pin_eld_notify(acomp->base.audio_ops->audio_ptr, - (int)port, (int)pipe); + (int)port, (int)cpu_transcoder); } - intel_lpe_audio_notify(i915, pipe, port, crtc_state->eld, + intel_lpe_audio_notify(i915, cpu_transcoder, port, crtc_state->eld, crtc_state->port_clock, intel_crtc_has_dp_encoder(crtc_state)); } @@ -871,9 +870,9 @@ void intel_audio_codec_disable(struct intel_encoder *encoder, struct i915_audio_component *acomp = i915->display.audio.component; struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); struct intel_connector *connector = to_intel_connector(old_conn_state->connector); + enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; struct intel_audio_state *audio_state; enum port port = encoder->port; - enum pipe pipe = crtc->pipe; if (!old_crtc_state->has_audio) return; @@ -890,7 +889,7 @@ void intel_audio_codec_disable(struct intel_encoder *encoder, mutex_lock(&i915->display.audio.mutex); - audio_state = &i915->display.audio.state[pipe]; + audio_state = &i915->display.audio.state[cpu_transcoder]; audio_state->encoder = NULL; memset(audio_state->eld, 0, sizeof(audio_state->eld)); @@ -899,27 +898,26 @@ void intel_audio_codec_disable(struct intel_encoder *encoder, if (acomp && acomp->base.audio_ops && acomp->base.audio_ops->pin_eld_notify) { - /* audio drivers expect pipe = -1 to indicate Non-MST cases */ + /* audio drivers expect cpu_transcoder = -1 to indicate Non-MST cases */ if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST)) - pipe = -1; + cpu_transcoder = -1; acomp->base.audio_ops->pin_eld_notify(acomp->base.audio_ops->audio_ptr, - (int)port, (int)pipe); + (int)port, (int)cpu_transcoder); } - intel_lpe_audio_notify(i915, pipe, port, NULL, 0, false); + intel_lpe_audio_notify(i915, cpu_transcoder, port, NULL, 0, false); } static void intel_acomp_get_config(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; struct intel_audio_state *audio_state; - enum pipe pipe = crtc->pipe; mutex_lock(&i915->display.audio.mutex); - audio_state = &i915->display.audio.state[pipe]; + audio_state = &i915->display.audio.state[cpu_transcoder]; if (audio_state->encoder) memcpy(crtc_state->eld, audio_state->eld, sizeof(audio_state->eld)); @@ -1147,27 +1145,27 @@ static int i915_audio_component_get_cdclk_freq(struct device *kdev) } /* - * get the intel audio state according to the parameter port and pipe - * MST & (pipe >= 0): return the audio.state[pipe].encoder], + * get the intel audio state according to the parameter port and cpu_transcoder + * MST & (cpu_transcoder >= 0): return the audio.state[cpu_transcoder].encoder], * when port is matched - * MST & (pipe < 0): this is invalid - * Non-MST & (pipe >= 0): only pipe = 0 (the first device entry) + * MST & (cpu_transcoder < 0): this is invalid + * Non-MST & (cpu_transcoder >= 0): only cpu_transcoder = 0 (the first device entry) * will get the right intel_encoder with port matched - * Non-MST & (pipe < 0): get the right intel_encoder with port matched + * Non-MST & (cpu_transcoder < 0): get the right intel_encoder with port matched */ static struct intel_audio_state *find_audio_state(struct drm_i915_private *i915, - int port, int pipe) + int port, int cpu_transcoder) { /* MST */ - if (pipe >= 0) { + if (cpu_transcoder >= 0) { struct intel_audio_state *audio_state; struct intel_encoder *encoder; if (drm_WARN_ON(&i915->drm, - pipe >= ARRAY_SIZE(i915->display.audio.state))) + cpu_transcoder >= ARRAY_SIZE(i915->display.audio.state))) return NULL; - audio_state = &i915->display.audio.state[pipe]; + audio_state = &i915->display.audio.state[cpu_transcoder]; encoder = audio_state->encoder; if (encoder && encoder->port == port && @@ -1176,14 +1174,14 @@ static struct intel_audio_state *find_audio_state(struct drm_i915_private *i915, } /* Non-MST */ - if (pipe > 0) + if (cpu_transcoder > 0) return NULL; - for_each_pipe(i915, pipe) { + for_each_cpu_transcoder(i915, cpu_transcoder) { struct intel_audio_state *audio_state; struct intel_encoder *encoder; - audio_state = &i915->display.audio.state[pipe]; + audio_state = &i915->display.audio.state[cpu_transcoder]; encoder = audio_state->encoder; if (encoder && encoder->port == port && @@ -1195,7 +1193,7 @@ static struct intel_audio_state *find_audio_state(struct drm_i915_private *i915, } static int i915_audio_component_sync_audio_rate(struct device *kdev, int port, - int pipe, int rate) + int cpu_transcoder, int rate) { struct drm_i915_private *i915 = kdev_to_i915(kdev); struct i915_audio_component *acomp = i915->display.audio.component; @@ -1211,7 +1209,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port, cookie = i915_audio_component_get_power(kdev); mutex_lock(&i915->display.audio.mutex); - audio_state = find_audio_state(i915, port, pipe); + audio_state = find_audio_state(i915, port, cpu_transcoder); if (!audio_state) { drm_dbg_kms(&i915->drm, "Not valid for port %c\n", port_name(port)); err = -ENODEV; @@ -1223,7 +1221,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port, /* FIXME stop using the legacy crtc pointer */ crtc = to_intel_crtc(encoder->base.crtc); - /* port must be valid now, otherwise the pipe will be invalid */ + /* port must be valid now, otherwise the cpu_transcoder will be invalid */ acomp->aud_sample_rate[port] = rate; /* FIXME get rid of the crtc->config stuff */ @@ -1236,7 +1234,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port, } static int i915_audio_component_get_eld(struct device *kdev, int port, - int pipe, bool *enabled, + int cpu_transcoder, bool *enabled, unsigned char *buf, int max_bytes) { struct drm_i915_private *i915 = kdev_to_i915(kdev); @@ -1245,7 +1243,7 @@ static int i915_audio_component_get_eld(struct device *kdev, int port, mutex_lock(&i915->display.audio.mutex); - audio_state = find_audio_state(i915, port, pipe); + audio_state = find_audio_state(i915, port, cpu_transcoder); if (!audio_state) { drm_dbg_kms(&i915->drm, "Not valid for port %c\n", port_name(port)); mutex_unlock(&i915->display.audio.mutex); diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c index a4e4b7f79e4d..2e8f17c04522 100644 --- a/drivers/gpu/drm/i915/display/intel_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_backlight.c @@ -105,7 +105,8 @@ void intel_backlight_set_pwm_level(const struct drm_connector_state *conn_state, struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; - drm_dbg_kms(&i915->drm, "set backlight PWM = %d\n", val); + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] set backlight PWM = %d\n", + connector->base.base.id, connector->base.name, val); panel->backlight.pwm_funcs->set(conn_state, val); } @@ -283,7 +284,8 @@ intel_panel_actually_set_backlight(const struct drm_connector_state *conn_state, struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; - drm_dbg_kms(&i915->drm, "set backlight level = %d\n", level); + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] set backlight level = %d\n", + connector->base.base.id, connector->base.name, level); panel->backlight.funcs->set(conn_state, level); } @@ -345,27 +347,24 @@ static void lpt_disable_backlight(const struct drm_connector_state *old_conn_sta */ tmp = intel_de_read(i915, BLC_PWM_CPU_CTL2); if (tmp & BLM_PWM_ENABLE) { - drm_dbg_kms(&i915->drm, "cpu backlight was enabled, disabling\n"); + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] CPU backlight was enabled, disabling\n", + connector->base.base.id, connector->base.name); intel_de_write(i915, BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE); } - tmp = intel_de_read(i915, BLC_PWM_PCH_CTL1); - intel_de_write(i915, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE); + intel_de_rmw(i915, BLC_PWM_PCH_CTL1, BLM_PCH_PWM_ENABLE, 0); } static void pch_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val) { struct intel_connector *connector = to_intel_connector(old_conn_state->connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); - u32 tmp; intel_backlight_set_pwm_level(old_conn_state, val); - tmp = intel_de_read(i915, BLC_PWM_CPU_CTL2); - intel_de_write(i915, BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE); + intel_de_rmw(i915, BLC_PWM_CPU_CTL2, BLM_PWM_ENABLE, 0); - tmp = intel_de_read(i915, BLC_PWM_PCH_CTL1); - intel_de_write(i915, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE); + intel_de_rmw(i915, BLC_PWM_PCH_CTL1, BLM_PCH_PWM_ENABLE, 0); } static void i9xx_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val) @@ -376,12 +375,10 @@ static void i9xx_disable_backlight(const struct drm_connector_state *old_conn_st static void i965_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val) { struct drm_i915_private *i915 = to_i915(old_conn_state->connector->dev); - u32 tmp; intel_backlight_set_pwm_level(old_conn_state, val); - tmp = intel_de_read(i915, BLC_PWM_CTL2); - intel_de_write(i915, BLC_PWM_CTL2, tmp & ~BLM_PWM_ENABLE); + intel_de_rmw(i915, BLC_PWM_CTL2, BLM_PWM_ENABLE, 0); } static void vlv_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val) @@ -389,12 +386,10 @@ static void vlv_disable_backlight(const struct drm_connector_state *old_conn_sta struct intel_connector *connector = to_intel_connector(old_conn_state->connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); enum pipe pipe = to_intel_crtc(old_conn_state->crtc)->pipe; - u32 tmp; intel_backlight_set_pwm_level(old_conn_state, val); - tmp = intel_de_read(i915, VLV_BLC_PWM_CTL2(pipe)); - intel_de_write(i915, VLV_BLC_PWM_CTL2(pipe), tmp & ~BLM_PWM_ENABLE); + intel_de_rmw(i915, VLV_BLC_PWM_CTL2(pipe), BLM_PWM_ENABLE, 0); } static void bxt_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val) @@ -402,19 +397,14 @@ static void bxt_disable_backlight(const struct drm_connector_state *old_conn_sta struct intel_connector *connector = to_intel_connector(old_conn_state->connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; - u32 tmp; intel_backlight_set_pwm_level(old_conn_state, val); - tmp = intel_de_read(i915, BXT_BLC_PWM_CTL(panel->backlight.controller)); - intel_de_write(i915, BXT_BLC_PWM_CTL(panel->backlight.controller), - tmp & ~BXT_BLC_PWM_ENABLE); + intel_de_rmw(i915, BXT_BLC_PWM_CTL(panel->backlight.controller), + BXT_BLC_PWM_ENABLE, 0); - if (panel->backlight.controller == 1) { - val = intel_de_read(i915, UTIL_PIN_CTL); - val &= ~UTIL_PIN_ENABLE; - intel_de_write(i915, UTIL_PIN_CTL, val); - } + if (panel->backlight.controller == 1) + intel_de_rmw(i915, UTIL_PIN_CTL, UTIL_PIN_ENABLE, 0); } static void cnp_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val) @@ -422,13 +412,11 @@ static void cnp_disable_backlight(const struct drm_connector_state *old_conn_sta struct intel_connector *connector = to_intel_connector(old_conn_state->connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; - u32 tmp; intel_backlight_set_pwm_level(old_conn_state, val); - tmp = intel_de_read(i915, BXT_BLC_PWM_CTL(panel->backlight.controller)); - intel_de_write(i915, BXT_BLC_PWM_CTL(panel->backlight.controller), - tmp & ~BXT_BLC_PWM_ENABLE); + intel_de_rmw(i915, BXT_BLC_PWM_CTL(panel->backlight.controller), + BXT_BLC_PWM_ENABLE, 0); } static void ext_pwm_disable_backlight(const struct drm_connector_state *old_conn_state, u32 level) @@ -458,7 +446,8 @@ void intel_backlight_disable(const struct drm_connector_state *old_conn_state) * another client is not activated. */ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_CHANGING) { - drm_dbg_kms(&i915->drm, "Skipping backlight disable on vga switch\n"); + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Skipping backlight disable on vga switch\n", + connector->base.base.id, connector->base.name); return; } @@ -478,30 +467,24 @@ static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state, struct intel_connector *connector = to_intel_connector(conn_state->connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; - u32 pch_ctl1, pch_ctl2, schicken; + u32 pch_ctl1, pch_ctl2; pch_ctl1 = intel_de_read(i915, BLC_PWM_PCH_CTL1); if (pch_ctl1 & BLM_PCH_PWM_ENABLE) { - drm_dbg_kms(&i915->drm, "pch backlight already enabled\n"); + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] PCH backlight already enabled\n", + connector->base.base.id, connector->base.name); pch_ctl1 &= ~BLM_PCH_PWM_ENABLE; intel_de_write(i915, BLC_PWM_PCH_CTL1, pch_ctl1); } - if (HAS_PCH_LPT(i915)) { - schicken = intel_de_read(i915, SOUTH_CHICKEN2); - if (panel->backlight.alternate_pwm_increment) - schicken |= LPT_PWM_GRANULARITY; - else - schicken &= ~LPT_PWM_GRANULARITY; - intel_de_write(i915, SOUTH_CHICKEN2, schicken); - } else { - schicken = intel_de_read(i915, SOUTH_CHICKEN1); - if (panel->backlight.alternate_pwm_increment) - schicken |= SPT_PWM_GRANULARITY; - else - schicken &= ~SPT_PWM_GRANULARITY; - intel_de_write(i915, SOUTH_CHICKEN1, schicken); - } + if (HAS_PCH_LPT(i915)) + intel_de_rmw(i915, SOUTH_CHICKEN2, LPT_PWM_GRANULARITY, + panel->backlight.alternate_pwm_increment ? + LPT_PWM_GRANULARITY : 0); + else + intel_de_rmw(i915, SOUTH_CHICKEN1, SPT_PWM_GRANULARITY, + panel->backlight.alternate_pwm_increment ? + SPT_PWM_GRANULARITY : 0); pch_ctl2 = panel->backlight.pwm_level_max << 16; intel_de_write(i915, BLC_PWM_PCH_CTL2, pch_ctl2); @@ -533,14 +516,16 @@ static void pch_enable_backlight(const struct intel_crtc_state *crtc_state, cpu_ctl2 = intel_de_read(i915, BLC_PWM_CPU_CTL2); if (cpu_ctl2 & BLM_PWM_ENABLE) { - drm_dbg_kms(&i915->drm, "cpu backlight already enabled\n"); + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] CPU backlight already enabled\n", + connector->base.base.id, connector->base.name); cpu_ctl2 &= ~BLM_PWM_ENABLE; intel_de_write(i915, BLC_PWM_CPU_CTL2, cpu_ctl2); } pch_ctl1 = intel_de_read(i915, BLC_PWM_PCH_CTL1); if (pch_ctl1 & BLM_PCH_PWM_ENABLE) { - drm_dbg_kms(&i915->drm, "pch backlight already enabled\n"); + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] PCH backlight already enabled\n", + connector->base.base.id, connector->base.name); pch_ctl1 &= ~BLM_PCH_PWM_ENABLE; intel_de_write(i915, BLC_PWM_PCH_CTL1, pch_ctl1); } @@ -578,7 +563,8 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state, ctl = intel_de_read(i915, BLC_PWM_CTL); if (ctl & BACKLIGHT_DUTY_CYCLE_MASK_PNV) { - drm_dbg_kms(&i915->drm, "backlight already enabled\n"); + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] backlight already enabled\n", + connector->base.base.id, connector->base.name); intel_de_write(i915, BLC_PWM_CTL, 0); } @@ -618,7 +604,8 @@ static void i965_enable_backlight(const struct intel_crtc_state *crtc_state, ctl2 = intel_de_read(i915, BLC_PWM_CTL2); if (ctl2 & BLM_PWM_ENABLE) { - drm_dbg_kms(&i915->drm, "backlight already enabled\n"); + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] backlight already enabled\n", + connector->base.base.id, connector->base.name); ctl2 &= ~BLM_PWM_ENABLE; intel_de_write(i915, BLC_PWM_CTL2, ctl2); } @@ -653,7 +640,8 @@ static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state, ctl2 = intel_de_read(i915, VLV_BLC_PWM_CTL2(pipe)); if (ctl2 & BLM_PWM_ENABLE) { - drm_dbg_kms(&i915->drm, "backlight already enabled\n"); + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] backlight already enabled\n", + connector->base.base.id, connector->base.name); ctl2 &= ~BLM_PWM_ENABLE; intel_de_write(i915, VLV_BLC_PWM_CTL2(pipe), ctl2); } @@ -685,7 +673,8 @@ static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state, if (panel->backlight.controller == 1) { val = intel_de_read(i915, UTIL_PIN_CTL); if (val & UTIL_PIN_ENABLE) { - drm_dbg_kms(&i915->drm, "util pin already enabled\n"); + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] utility pin already enabled\n", + connector->base.base.id, connector->base.name); val &= ~UTIL_PIN_ENABLE; intel_de_write(i915, UTIL_PIN_CTL, val); } @@ -699,7 +688,8 @@ static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state, pwm_ctl = intel_de_read(i915, BXT_BLC_PWM_CTL(panel->backlight.controller)); if (pwm_ctl & BXT_BLC_PWM_ENABLE) { - drm_dbg_kms(&i915->drm, "backlight already enabled\n"); + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] backlight already enabled\n", + connector->base.base.id, connector->base.name); pwm_ctl &= ~BXT_BLC_PWM_ENABLE; intel_de_write(i915, BXT_BLC_PWM_CTL(panel->backlight.controller), pwm_ctl); @@ -1270,6 +1260,10 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus cpu_ctl2 & ~BLM_PWM_ENABLE); } + drm_dbg_kms(&i915->drm, + "[CONNECTOR:%d:%s] Using native PCH PWM for backlight control\n", + connector->base.base.id, connector->base.name); + return 0; } @@ -1297,6 +1291,10 @@ static int pch_setup_backlight(struct intel_connector *connector, enum pipe unus panel->backlight.pwm_enabled = (cpu_ctl2 & BLM_PWM_ENABLE) && (pch_ctl1 & BLM_PCH_PWM_ENABLE); + drm_dbg_kms(&i915->drm, + "[CONNECTOR:%d:%s] Using native PCH PWM for backlight control\n", + connector->base.base.id, connector->base.name); + return 0; } @@ -1335,6 +1333,10 @@ static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unu panel->backlight.pwm_enabled = val != 0; + drm_dbg_kms(&i915->drm, + "[CONNECTOR:%d:%s] Using native PWM for backlight control\n", + connector->base.base.id, connector->base.name); + return 0; } @@ -1364,6 +1366,10 @@ static int i965_setup_backlight(struct intel_connector *connector, enum pipe unu panel->backlight.pwm_enabled = ctl2 & BLM_PWM_ENABLE; + drm_dbg_kms(&i915->drm, + "[CONNECTOR:%d:%s] Using native PWM for backlight control\n", + connector->base.base.id, connector->base.name); + return 0; } @@ -1392,6 +1398,10 @@ static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe panel->backlight.pwm_enabled = ctl2 & BLM_PWM_ENABLE; + drm_dbg_kms(&i915->drm, + "[CONNECTOR:%d:%s] Using native PWM for backlight control (on pipe %c)\n", + connector->base.base.id, connector->base.name, pipe_name(pipe)); + return 0; } @@ -1428,6 +1438,11 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused) panel->backlight.pwm_enabled = pwm_ctl & BXT_BLC_PWM_ENABLE; + drm_dbg_kms(&i915->drm, + "[CONNECTOR:%d:%s] Using native PWM for backlight control (controller=%d)\n", + connector->base.base.id, connector->base.name, + panel->backlight.controller); + return 0; } @@ -1468,7 +1483,8 @@ cnp_setup_backlight(struct intel_connector *connector, enum pipe unused) */ panel->backlight.controller = connector->panel.vbt.backlight.controller; if (!cnp_backlight_controller_is_valid(i915, panel->backlight.controller)) { - drm_dbg_kms(&i915->drm, "Invalid backlight controller %d, assuming 0\n", + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Invalid backlight controller %d, assuming 0\n", + connector->base.base.id, connector->base.name, panel->backlight.controller); panel->backlight.controller = 0; } @@ -1490,6 +1506,11 @@ cnp_setup_backlight(struct intel_connector *connector, enum pipe unused) panel->backlight.pwm_enabled = pwm_ctl & BXT_BLC_PWM_ENABLE; + drm_dbg_kms(&i915->drm, + "[CONNECTOR:%d:%s] Using native PCH PWM for backlight control (controller=%d)\n", + connector->base.base.id, connector->base.name, + panel->backlight.controller); + return 0; } @@ -1511,8 +1532,8 @@ static int ext_pwm_setup_backlight(struct intel_connector *connector, } if (IS_ERR(panel->backlight.pwm)) { - drm_err(&i915->drm, "Failed to get the %s PWM chip\n", - desc); + drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to get the %s PWM chip\n", + connector->base.base.id, connector->base.name, desc); panel->backlight.pwm = NULL; return -ENODEV; } @@ -1529,7 +1550,8 @@ static int ext_pwm_setup_backlight(struct intel_connector *connector, level = intel_backlight_invert_pwm_level(connector, level); panel->backlight.pwm_enabled = true; - drm_dbg_kms(&i915->drm, "PWM already enabled at freq %ld, VBT freq %d, level %d\n", + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] PWM already enabled at freq %ld, VBT freq %d, level %d\n", + connector->base.base.id, connector->base.name, NSEC_PER_SEC / (unsigned long)panel->backlight.pwm_state.period, get_vbt_pwm_freq(connector), level); } else { @@ -1538,8 +1560,10 @@ static int ext_pwm_setup_backlight(struct intel_connector *connector, NSEC_PER_SEC / get_vbt_pwm_freq(connector); } - drm_info(&i915->drm, "Using %s PWM for LCD backlight control\n", - desc); + drm_dbg_kms(&i915->drm, + "[CONNECTOR:%d:%s] Using %s PWM for backlight control\n", + connector->base.base.id, connector->base.name, desc); + return 0; } @@ -1582,8 +1606,9 @@ static void intel_pwm_disable_backlight(const struct drm_connector_state *conn_s static int intel_pwm_setup_backlight(struct intel_connector *connector, enum pipe pipe) { struct intel_panel *panel = &connector->panel; - int ret = panel->backlight.pwm_funcs->setup(connector, pipe); + int ret; + ret = panel->backlight.pwm_funcs->setup(connector, pipe); if (ret < 0) return ret; @@ -1623,10 +1648,12 @@ int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe) if (!connector->panel.vbt.backlight.present) { if (intel_has_quirk(i915, QUIRK_BACKLIGHT_PRESENT)) { drm_dbg_kms(&i915->drm, - "no backlight present per VBT, but present per quirk\n"); + "[CONNECTOR:%d:%s] no backlight present per VBT, but present per quirk\n", + connector->base.base.id, connector->base.name); } else { drm_dbg_kms(&i915->drm, - "no backlight present per VBT\n"); + "[CONNECTOR:%d:%s] no backlight present per VBT\n", + connector->base.base.id, connector->base.name); return 0; } } @@ -1642,16 +1669,16 @@ int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe) if (ret) { drm_dbg_kms(&i915->drm, - "failed to setup backlight for connector %s\n", - connector->base.name); + "[CONNECTOR:%d:%s] failed to setup backlight\n", + connector->base.base.id, connector->base.name); return ret; } panel->backlight.present = true; drm_dbg_kms(&i915->drm, - "Connector %s backlight initialized, %s, brightness %u/%u\n", - connector->base.name, + "[CONNECTOR:%d:%s] backlight initialized, %s, brightness %u/%u\n", + connector->base.base.id, connector->base.name, str_enabled_disabled(panel->backlight.enabled), panel->backlight.level, panel->backlight.max); diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index 04b846440de6..e54febd34ca9 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -1084,6 +1084,12 @@ parse_lfp_backlight(struct drm_i915_private *i915, panel->vbt.backlight.min_brightness = entry->min_brightness; } + if (i915->display.vbt.version >= 239) + panel->vbt.backlight.hdr_dpcd_refresh_timeout = + DIV_ROUND_UP(backlight_data->hdr_dpcd_refresh_timeout[panel_type], 100); + else + panel->vbt.backlight.hdr_dpcd_refresh_timeout = 30; + drm_dbg_kms(&i915->drm, "VBT backlight PWM modulation frequency %u Hz, " "active %s, min brightness %u, level %u, controller %u\n", @@ -1202,9 +1208,7 @@ child_device_ptr(const struct bdb_general_definitions *defs, int i) static void parse_sdvo_device_mapping(struct drm_i915_private *i915) { - struct sdvo_device_mapping *mapping; const struct intel_bios_encoder_data *devdata; - const struct child_device_config *child; int count = 0; /* @@ -1217,7 +1221,8 @@ parse_sdvo_device_mapping(struct drm_i915_private *i915) } list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { - child = &devdata->child; + const struct child_device_config *child = &devdata->child; + struct sdvo_device_mapping *mapping; if (child->slave_addr != SLAVE_ADDR1 && child->slave_addr != SLAVE_ADDR2) { @@ -2075,7 +2080,6 @@ parse_compression_parameters(struct drm_i915_private *i915) { const struct bdb_compression_parameters *params; struct intel_bios_encoder_data *devdata; - const struct child_device_config *child; u16 block_size; int index; @@ -2100,7 +2104,7 @@ parse_compression_parameters(struct drm_i915_private *i915) } list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { - child = &devdata->child; + const struct child_device_config *child = &devdata->child; if (!child->compression_enable) continue; @@ -2226,14 +2230,14 @@ static u8 map_ddc_pin(struct drm_i915_private *i915, u8 vbt_pin) static enum port get_port_by_ddc_pin(struct drm_i915_private *i915, u8 ddc_pin) { - const struct intel_bios_encoder_data *devdata; enum port port; if (!ddc_pin) return PORT_NONE; for_each_port(port) { - devdata = i915->display.vbt.ports[port]; + const struct intel_bios_encoder_data *devdata = + i915->display.vbt.ports[port]; if (devdata && ddc_pin == devdata->child.ddc_pin) return port; @@ -2292,14 +2296,14 @@ static void sanitize_ddc_pin(struct intel_bios_encoder_data *devdata, static enum port get_port_by_aux_ch(struct drm_i915_private *i915, u8 aux_ch) { - const struct intel_bios_encoder_data *devdata; enum port port; if (!aux_ch) return PORT_NONE; for_each_port(port) { - devdata = i915->display.vbt.ports[port]; + const struct intel_bios_encoder_data *devdata = + i915->display.vbt.ports[port]; if (devdata && aux_ch == devdata->child.aux_channel) return port; @@ -2522,7 +2526,7 @@ static int parse_bdb_216_dp_max_link_rate(const int vbt_max_link_rate) } } -static int _intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *devdata) +int intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *devdata) { if (!devdata || devdata->i915->display.vbt.version < 216) return 0; @@ -2533,7 +2537,7 @@ static int _intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *de return parse_bdb_216_dp_max_link_rate(devdata->child.dp_max_link_rate); } -static int _intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata) +int intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata) { if (!devdata || devdata->i915->display.vbt.version < 244) return 0; @@ -2587,7 +2591,7 @@ intel_bios_encoder_supports_dp(const struct intel_bios_encoder_data *devdata) return devdata->child.device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT; } -static bool +bool intel_bios_encoder_supports_edp(const struct intel_bios_encoder_data *devdata) { return intel_bios_encoder_supports_dp(devdata) && @@ -2600,7 +2604,14 @@ intel_bios_encoder_supports_dsi(const struct intel_bios_encoder_data *devdata) return devdata->child.device_type & DEVICE_TYPE_MIPI_OUTPUT; } -static int _intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata) +bool +intel_bios_encoder_is_lspcon(const struct intel_bios_encoder_data *devdata) +{ + return devdata && HAS_LSPCON(devdata->i915) && devdata->child.lspcon; +} + +/* This is an index in the HDMI/DVI DDI buffer translation table, or -1 */ +int intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata) { if (!devdata || devdata->i915->display.vbt.version < 158) return -1; @@ -2608,7 +2619,7 @@ static int _intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *de return devdata->child.hdmi_level_shifter_value; } -static int _intel_bios_max_tmds_clock(const struct intel_bios_encoder_data *devdata) +int intel_bios_hdmi_max_tmds_clock(const struct intel_bios_encoder_data *devdata) { if (!devdata || devdata->i915->display.vbt.version < 204) return 0; @@ -2666,37 +2677,37 @@ static void print_ddi_port(const struct intel_bios_encoder_data *devdata, drm_dbg_kms(&i915->drm, "Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d DSI:%d LSPCON:%d USB-Type-C:%d TBT:%d DSC:%d\n", port_name(port), is_crt, is_dvi, is_hdmi, is_dp, is_edp, is_dsi, - HAS_LSPCON(i915) && child->lspcon, + intel_bios_encoder_is_lspcon(devdata), supports_typec_usb, supports_tbt, devdata->dsc != NULL); - hdmi_level_shift = _intel_bios_hdmi_level_shift(devdata); + hdmi_level_shift = intel_bios_hdmi_level_shift(devdata); if (hdmi_level_shift >= 0) { drm_dbg_kms(&i915->drm, "Port %c VBT HDMI level shift: %d\n", port_name(port), hdmi_level_shift); } - max_tmds_clock = _intel_bios_max_tmds_clock(devdata); + max_tmds_clock = intel_bios_hdmi_max_tmds_clock(devdata); if (max_tmds_clock) drm_dbg_kms(&i915->drm, "Port %c VBT HDMI max TMDS clock: %d kHz\n", port_name(port), max_tmds_clock); /* I_boost config for SKL and above */ - dp_boost_level = intel_bios_encoder_dp_boost_level(devdata); + dp_boost_level = intel_bios_dp_boost_level(devdata); if (dp_boost_level) drm_dbg_kms(&i915->drm, "Port %c VBT (e)DP boost level: %d\n", port_name(port), dp_boost_level); - hdmi_boost_level = intel_bios_encoder_hdmi_boost_level(devdata); + hdmi_boost_level = intel_bios_hdmi_boost_level(devdata); if (hdmi_boost_level) drm_dbg_kms(&i915->drm, "Port %c VBT HDMI boost level: %d\n", port_name(port), hdmi_boost_level); - dp_max_link_rate = _intel_bios_dp_max_link_rate(devdata); + dp_max_link_rate = intel_bios_dp_max_link_rate(devdata); if (dp_max_link_rate) drm_dbg_kms(&i915->drm, "Port %c VBT DP max link rate: %d\n", @@ -2811,7 +2822,7 @@ parse_general_definitions(struct drm_i915_private *i915) expected_size = 37; } else if (i915->display.vbt.version <= 215) { expected_size = 38; - } else if (i915->display.vbt.version <= 237) { + } else if (i915->display.vbt.version <= 250) { expected_size = 39; } else { expected_size = sizeof(*child); @@ -3306,7 +3317,6 @@ void intel_bios_fini_panel(struct intel_panel *panel) bool intel_bios_is_tv_present(struct drm_i915_private *i915) { const struct intel_bios_encoder_data *devdata; - const struct child_device_config *child; if (!i915->display.vbt.int_tv_support) return false; @@ -3315,7 +3325,7 @@ bool intel_bios_is_tv_present(struct drm_i915_private *i915) return true; list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { - child = &devdata->child; + const struct child_device_config *child = &devdata->child; /* * If the device type is not TV, continue. @@ -3349,13 +3359,12 @@ bool intel_bios_is_tv_present(struct drm_i915_private *i915) bool intel_bios_is_lvds_present(struct drm_i915_private *i915, u8 *i2c_pin) { const struct intel_bios_encoder_data *devdata; - const struct child_device_config *child; if (list_empty(&i915->display.vbt.display_devices)) return true; list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { - child = &devdata->child; + const struct child_device_config *child = &devdata->child; /* If the device type is not LFP, continue. * We have to check both the new identifiers as well as the @@ -3397,25 +3406,22 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *i915, u8 *i2c_pin) */ bool intel_bios_is_port_present(struct drm_i915_private *i915, enum port port) { + const struct intel_bios_encoder_data *devdata; + if (WARN_ON(!has_ddi_port_info(i915))) return true; - return i915->display.vbt.ports[port]; -} + if (!is_port_valid(i915, port)) + return false; -/** - * intel_bios_is_port_edp - is the device in given port eDP - * @i915: i915 device instance - * @port: port to check - * - * Return true if the device in %port is eDP. - */ -bool intel_bios_is_port_edp(struct drm_i915_private *i915, enum port port) -{ - const struct intel_bios_encoder_data *devdata = - intel_bios_encoder_data_lookup(i915, port); + list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { + const struct child_device_config *child = &devdata->child; + + if (dvo_port_to_port(i915, child->dvo_port) == port) + return true; + } - return devdata && intel_bios_encoder_supports_edp(devdata); + return false; } static bool intel_bios_encoder_supports_dp_dual_mode(const struct intel_bios_encoder_data *devdata) @@ -3457,17 +3463,14 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *i915, enum port *port) { const struct intel_bios_encoder_data *devdata; - const struct child_device_config *child; - u8 dvo_port; list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { - child = &devdata->child; + const struct child_device_config *child = &devdata->child; + u8 dvo_port = child->dvo_port; if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT)) continue; - dvo_port = child->dvo_port; - if (dsi_dvo_port_to_port(i915, dvo_port) == PORT_NONE) { drm_dbg_kms(&i915->drm, "VBT has unsupported DSI port %c\n", @@ -3554,10 +3557,9 @@ bool intel_bios_get_dsc_params(struct intel_encoder *encoder, { struct drm_i915_private *i915 = to_i915(encoder->base.dev); const struct intel_bios_encoder_data *devdata; - const struct child_device_config *child; list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { - child = &devdata->child; + const struct child_device_config *child = &devdata->child; if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT)) continue; @@ -3576,73 +3578,10 @@ bool intel_bios_get_dsc_params(struct intel_encoder *encoder, return false; } -/** - * intel_bios_is_port_hpd_inverted - is HPD inverted for %port - * @i915: i915 device instance - * @port: port to check - * - * Return true if HPD should be inverted for %port. - */ -bool -intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915, - enum port port) +static enum aux_ch map_aux_ch(struct drm_i915_private *i915, u8 aux_channel) { - const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[port]; - - if (drm_WARN_ON_ONCE(&i915->drm, - !IS_GEMINILAKE(i915) && !IS_BROXTON(i915))) - return false; - - return devdata && devdata->child.hpd_invert; -} - -/** - * intel_bios_is_lspcon_present - if LSPCON is attached on %port - * @i915: i915 device instance - * @port: port to check - * - * Return true if LSPCON is present on this port - */ -bool -intel_bios_is_lspcon_present(const struct drm_i915_private *i915, - enum port port) -{ - const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[port]; - - return HAS_LSPCON(i915) && devdata && devdata->child.lspcon; -} - -/** - * intel_bios_is_lane_reversal_needed - if lane reversal needed on port - * @i915: i915 device instance - * @port: port to check - * - * Return true if port requires lane reversal - */ -bool -intel_bios_is_lane_reversal_needed(const struct drm_i915_private *i915, - enum port port) -{ - const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[port]; - - return devdata && devdata->child.lane_reversal; -} - -enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915, - enum port port) -{ - const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[port]; enum aux_ch aux_ch; - if (!devdata || !devdata->child.aux_channel) { - aux_ch = (enum aux_ch)port; - - drm_dbg_kms(&i915->drm, - "using AUX %c for port %c (platform default)\n", - aux_ch_name(aux_ch), port_name(port)); - return aux_ch; - } - /* * RKL/DG1 VBT uses PHY based mapping. Combo PHYs A,B,C,D * map to DDI A,B,TC1,TC2 respectively. @@ -3650,7 +3589,7 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915, * ADL-S VBT uses PHY based mapping. Combo PHYs A,B,C,D,E * map to DDI A,TC1,TC2,TC3,TC4 respectively. */ - switch (devdata->child.aux_channel) { + switch (aux_channel) { case DP_AUX_A: aux_ch = AUX_CH_A; break; @@ -3711,35 +3650,23 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915, aux_ch = AUX_CH_I; break; default: - MISSING_CASE(devdata->child.aux_channel); + MISSING_CASE(aux_channel); aux_ch = AUX_CH_A; break; } - drm_dbg_kms(&i915->drm, "using AUX %c for port %c (VBT)\n", - aux_ch_name(aux_ch), port_name(port)); - return aux_ch; } -int intel_bios_max_tmds_clock(struct intel_encoder *encoder) +enum aux_ch intel_bios_dp_aux_ch(const struct intel_bios_encoder_data *devdata) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port]; + if (!devdata || !devdata->child.aux_channel) + return AUX_CH_NONE; - return _intel_bios_max_tmds_clock(devdata); + return map_aux_ch(devdata->i915, devdata->child.aux_channel); } -/* This is an index in the HDMI/DVI DDI buffer translation table, or -1 */ -int intel_bios_hdmi_level_shift(struct intel_encoder *encoder) -{ - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port]; - - return _intel_bios_hdmi_level_shift(devdata); -} - -int intel_bios_encoder_dp_boost_level(const struct intel_bios_encoder_data *devdata) +int intel_bios_dp_boost_level(const struct intel_bios_encoder_data *devdata) { if (!devdata || devdata->i915->display.vbt.version < 196 || !devdata->child.iboost) return 0; @@ -3747,7 +3674,7 @@ int intel_bios_encoder_dp_boost_level(const struct intel_bios_encoder_data *devd return translate_iboost(devdata->child.dp_iboost_level); } -int intel_bios_encoder_hdmi_boost_level(const struct intel_bios_encoder_data *devdata) +int intel_bios_hdmi_boost_level(const struct intel_bios_encoder_data *devdata) { if (!devdata || devdata->i915->display.vbt.version < 196 || !devdata->child.iboost) return 0; @@ -3755,31 +3682,12 @@ int intel_bios_encoder_hdmi_boost_level(const struct intel_bios_encoder_data *de return translate_iboost(devdata->child.hdmi_iboost_level); } -int intel_bios_dp_max_link_rate(struct intel_encoder *encoder) -{ - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port]; - - return _intel_bios_dp_max_link_rate(devdata); -} - -int intel_bios_dp_max_lane_count(struct intel_encoder *encoder) -{ - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port]; - - return _intel_bios_dp_max_lane_count(devdata); -} - -int intel_bios_alternate_ddc_pin(struct intel_encoder *encoder) +int intel_bios_hdmi_ddc_pin(const struct intel_bios_encoder_data *devdata) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port]; - if (!devdata || !devdata->child.ddc_pin) return 0; - return map_ddc_pin(i915, devdata->child.ddc_pin); + return map_ddc_pin(devdata->i915, devdata->child.ddc_pin); } bool intel_bios_encoder_supports_typec_usb(const struct intel_bios_encoder_data *devdata) @@ -3792,6 +3700,16 @@ bool intel_bios_encoder_supports_tbt(const struct intel_bios_encoder_data *devda return devdata->i915->display.vbt.version >= 209 && devdata->child.tbt; } +bool intel_bios_encoder_lane_reversal(const struct intel_bios_encoder_data *devdata) +{ + return devdata && devdata->child.lane_reversal; +} + +bool intel_bios_encoder_hpd_invert(const struct intel_bios_encoder_data *devdata) +{ + return devdata && devdata->child.hpd_invert; +} + const struct intel_bios_encoder_data * intel_bios_encoder_data_lookup(struct drm_i915_private *i915, enum port port) { diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h index d221f784aa88..8a0730c9b48c 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.h +++ b/drivers/gpu/drm/i915/display/intel_bios.h @@ -38,6 +38,7 @@ struct intel_bios_encoder_data; struct intel_crtc_state; struct intel_encoder; struct intel_panel; +enum aux_ch; enum port; enum intel_backlight_type { @@ -248,21 +249,9 @@ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port por bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port); bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port); bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port); -bool intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915, - enum port port); -bool intel_bios_is_lspcon_present(const struct drm_i915_private *i915, - enum port port); -bool intel_bios_is_lane_reversal_needed(const struct drm_i915_private *i915, - enum port port); -enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, enum port port); bool intel_bios_get_dsc_params(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, int dsc_max_bpc); -int intel_bios_max_tmds_clock(struct intel_encoder *encoder); -int intel_bios_hdmi_level_shift(struct intel_encoder *encoder); -int intel_bios_dp_max_link_rate(struct intel_encoder *encoder); -int intel_bios_dp_max_lane_count(struct intel_encoder *encoder); -int intel_bios_alternate_ddc_pin(struct intel_encoder *encoder); bool intel_bios_port_supports_typec_usb(struct drm_i915_private *i915, enum port port); bool intel_bios_port_supports_tbt(struct drm_i915_private *i915, enum port port); @@ -272,9 +261,19 @@ intel_bios_encoder_data_lookup(struct drm_i915_private *i915, enum port port); bool intel_bios_encoder_supports_dvi(const struct intel_bios_encoder_data *devdata); bool intel_bios_encoder_supports_hdmi(const struct intel_bios_encoder_data *devdata); bool intel_bios_encoder_supports_dp(const struct intel_bios_encoder_data *devdata); +bool intel_bios_encoder_supports_edp(const struct intel_bios_encoder_data *devdata); bool intel_bios_encoder_supports_typec_usb(const struct intel_bios_encoder_data *devdata); bool intel_bios_encoder_supports_tbt(const struct intel_bios_encoder_data *devdata); -int intel_bios_encoder_dp_boost_level(const struct intel_bios_encoder_data *devdata); -int intel_bios_encoder_hdmi_boost_level(const struct intel_bios_encoder_data *devdata); +bool intel_bios_encoder_is_lspcon(const struct intel_bios_encoder_data *devdata); +bool intel_bios_encoder_lane_reversal(const struct intel_bios_encoder_data *devdata); +bool intel_bios_encoder_hpd_invert(const struct intel_bios_encoder_data *devdata); +enum aux_ch intel_bios_dp_aux_ch(const struct intel_bios_encoder_data *devdata); +int intel_bios_dp_boost_level(const struct intel_bios_encoder_data *devdata); +int intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata); +int intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *devdata); +int intel_bios_hdmi_boost_level(const struct intel_bios_encoder_data *devdata); +int intel_bios_hdmi_ddc_pin(const struct intel_bios_encoder_data *devdata); +int intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata); +int intel_bios_hdmi_max_tmds_clock(const struct intel_bios_encoder_data *devdata); #endif /* _INTEL_BIOS_H_ */ diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c index 1c236f02b380..202321ffbe2a 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.c +++ b/drivers/gpu/drm/i915/display/intel_bw.c @@ -119,6 +119,32 @@ static int adls_pcode_read_psf_gv_point_info(struct drm_i915_private *dev_priv, return 0; } +static u16 icl_qgv_points_mask(struct drm_i915_private *i915) +{ + unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points; + unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points; + u16 qgv_points = 0, psf_points = 0; + + /* + * We can _not_ use the whole ADLS_QGV_PT_MASK here, as PCode rejects + * it with failure if we try masking any unadvertised points. + * So need to operate only with those returned from PCode. + */ + if (num_qgv_points > 0) + qgv_points = GENMASK(num_qgv_points - 1, 0); + + if (num_psf_gv_points > 0) + psf_points = GENMASK(num_psf_gv_points - 1, 0); + + return ICL_PCODE_REQ_QGV_PT(qgv_points) | ADLS_PCODE_REQ_PSF_PT(psf_points); +} + +static bool is_sagv_enabled(struct drm_i915_private *i915, u16 points_mask) +{ + return !is_power_of_2(~points_mask & icl_qgv_points_mask(i915) & + ICL_PCODE_REQ_QGV_PT_MASK); +} + int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv, u32 points_mask) { @@ -136,6 +162,9 @@ int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv, return ret; } + dev_priv->display.sagv.status = is_sagv_enabled(dev_priv, points_mask) ? + I915_SAGV_ENABLED : I915_SAGV_DISABLED; + return 0; } @@ -965,26 +994,6 @@ int intel_bw_calc_min_cdclk(struct intel_atomic_state *state, return 0; } -static u16 icl_qgv_points_mask(struct drm_i915_private *i915) -{ - unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points; - unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points; - u16 qgv_points = 0, psf_points = 0; - - /* - * We can _not_ use the whole ADLS_QGV_PT_MASK here, as PCode rejects - * it with failure if we try masking any unadvertised points. - * So need to operate only with those returned from PCode. - */ - if (num_qgv_points > 0) - qgv_points = GENMASK(num_qgv_points - 1, 0); - - if (num_psf_gv_points > 0) - psf_points = GENMASK(num_psf_gv_points - 1, 0); - - return ICL_PCODE_REQ_QGV_PT(qgv_points) | ADLS_PCODE_REQ_PSF_PT(psf_points); -} - static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *changed) { struct drm_i915_private *i915 = to_i915(state->base.dev); diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 7e16b655c833..084a483f9776 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -1329,6 +1329,30 @@ static const struct intel_cdclk_vals adlp_cdclk_table[] = { {} }; +static const struct intel_cdclk_vals rplu_cdclk_table[] = { + { .refclk = 19200, .cdclk = 172800, .divider = 3, .ratio = 27 }, + { .refclk = 19200, .cdclk = 192000, .divider = 2, .ratio = 20 }, + { .refclk = 19200, .cdclk = 307200, .divider = 2, .ratio = 32 }, + { .refclk = 19200, .cdclk = 480000, .divider = 2, .ratio = 50 }, + { .refclk = 19200, .cdclk = 556800, .divider = 2, .ratio = 58 }, + { .refclk = 19200, .cdclk = 652800, .divider = 2, .ratio = 68 }, + + { .refclk = 24000, .cdclk = 176000, .divider = 3, .ratio = 22 }, + { .refclk = 24000, .cdclk = 192000, .divider = 2, .ratio = 16 }, + { .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 }, + { .refclk = 24000, .cdclk = 480000, .divider = 2, .ratio = 40 }, + { .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 }, + { .refclk = 24000, .cdclk = 648000, .divider = 2, .ratio = 54 }, + + { .refclk = 38400, .cdclk = 179200, .divider = 3, .ratio = 14 }, + { .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 10 }, + { .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 }, + { .refclk = 38400, .cdclk = 480000, .divider = 2, .ratio = 25 }, + { .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 }, + { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 }, + {} +}; + static const struct intel_cdclk_vals dg2_cdclk_table[] = { { .refclk = 38400, .cdclk = 163200, .divider = 2, .ratio = 34, .waveform = 0x8888 }, { .refclk = 38400, .cdclk = 204000, .divider = 2, .ratio = 34, .waveform = 0x9248 }, @@ -1801,6 +1825,13 @@ static bool cdclk_compute_crawl_and_squash_midpoint(struct drm_i915_private *i91 return true; } +static bool pll_enable_wa_needed(struct drm_i915_private *dev_priv) +{ + return ((IS_DG2(dev_priv) || IS_METEORLAKE(dev_priv)) && + dev_priv->display.cdclk.hw.vco > 0 && + HAS_CDCLK_SQUASH(dev_priv)); +} + static void _bxt_set_cdclk(struct drm_i915_private *dev_priv, const struct intel_cdclk_config *cdclk_config, enum pipe pipe) @@ -1815,9 +1846,13 @@ static void _bxt_set_cdclk(struct drm_i915_private *dev_priv, !cdclk_pll_is_unknown(dev_priv->display.cdclk.hw.vco)) { if (dev_priv->display.cdclk.hw.vco != vco) adlp_cdclk_pll_crawl(dev_priv, vco); - } else if (DISPLAY_VER(dev_priv) >= 11) + } else if (DISPLAY_VER(dev_priv) >= 11) { + /* wa_15010685871: dg2, mtl */ + if (pll_enable_wa_needed(dev_priv)) + dg2_cdclk_squash_program(dev_priv, 0); + icl_cdclk_pll_update(dev_priv, vco); - else + } else bxt_cdclk_pll_update(dev_priv, vco); waveform = cdclk_squash_waveform(dev_priv, cdclk); @@ -3353,6 +3388,8 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv) /* Wa_22011320316:adl-p[a0] */ if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) dev_priv->display.cdclk.table = adlp_a_step_cdclk_table; + else if (IS_ADLP_RPLU(dev_priv)) + dev_priv->display.cdclk.table = rplu_cdclk_table; else dev_priv->display.cdclk.table = adlp_cdclk_table; } else if (IS_ROCKETLAKE(dev_priv)) { diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c index 8d97c299e657..a6dd08598233 100644 --- a/drivers/gpu/drm/i915/display/intel_color.c +++ b/drivers/gpu/drm/i915/display/intel_color.c @@ -257,7 +257,7 @@ static bool ilk_limited_range(const struct intel_crtc_state *crtc_state) if (DISPLAY_VER(i915) >= 11) return false; - /* pre-hsw have PIPECONF_COLOR_RANGE_SELECT */ + /* pre-hsw have TRANSCONF_COLOR_RANGE_SELECT */ if (DISPLAY_VER(i915) < 7 || IS_IVYBRIDGE(i915)) return false; @@ -624,7 +624,7 @@ static void ilk_color_commit_noarm(const struct intel_crtc_state *crtc_state) static void i9xx_color_commit_arm(const struct intel_crtc_state *crtc_state) { - /* update PIPECONF GAMMA_MODE */ + /* update TRANSCONF GAMMA_MODE */ i9xx_set_pipeconf(crtc_state); } @@ -633,7 +633,7 @@ static void ilk_color_commit_arm(const struct intel_crtc_state *crtc_state) struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); - /* update PIPECONF GAMMA_MODE */ + /* update TRANSCONF GAMMA_MODE */ ilk_set_pipeconf(crtc_state); intel_de_write_fw(i915, PIPE_CSC_MODE(crtc->pipe), @@ -1256,8 +1256,11 @@ static void icl_load_luts(const struct intel_crtc_state *crtc_state) break; } - if (crtc_state->dsb) - intel_dsb_commit(crtc_state->dsb); + if (crtc_state->dsb) { + intel_dsb_finish(crtc_state->dsb); + intel_dsb_commit(crtc_state->dsb, false); + intel_dsb_wait(crtc_state->dsb); + } } static u32 chv_cgm_degamma_ldw(const struct drm_color_lut *color) @@ -1380,6 +1383,9 @@ void intel_color_prepare_commit(struct intel_crtc_state *crtc_state) /* FIXME DSB has issues loading LUTs, disable it for now */ return; + if (!crtc_state->pre_csc_lut && !crtc_state->post_csc_lut) + return; + crtc_state->dsb = intel_dsb_prepare(crtc, 1024); } @@ -1500,6 +1506,8 @@ intel_color_add_affected_planes(struct intel_crtc_state *new_crtc_state) return PTR_ERR(plane_state); new_crtc_state->update_planes |= BIT(plane->id); + new_crtc_state->async_flip_planes = 0; + new_crtc_state->do_async_flip = false; /* plane control register changes blocked by CxSR */ if (HAS_GMCH(i915)) diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c index 8b870b2dd4f9..922a6d87b553 100644 --- a/drivers/gpu/drm/i915/display/intel_combo_phy.c +++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c @@ -78,14 +78,11 @@ static void icl_set_procmon_ref_values(struct drm_i915_private *dev_priv, enum phy phy) { const struct icl_procmon *procmon; - u32 val; procmon = icl_get_procmon_ref_values(dev_priv, phy); - val = intel_de_read(dev_priv, ICL_PORT_COMP_DW1(phy)); - val &= ~((0xff << 16) | 0xff); - val |= procmon->dw1; - intel_de_write(dev_priv, ICL_PORT_COMP_DW1(phy), val); + intel_de_rmw(dev_priv, ICL_PORT_COMP_DW1(phy), + (0xff << 16) | 0xff, procmon->dw1); intel_de_write(dev_priv, ICL_PORT_COMP_DW9(phy), procmon->dw9); intel_de_write(dev_priv, ICL_PORT_COMP_DW10(phy), procmon->dw10); @@ -236,8 +233,7 @@ static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv, ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_DIV2); ret &= check_phy_reg(dev_priv, phy, ICL_PORT_PCS_DW1_LN(0, phy), - DCC_MODE_SELECT_MASK, - DCC_MODE_SELECT_CONTINUOSLY); + DCC_MODE_SELECT_MASK, RUN_DCC_ONCE); } ret &= icl_verify_procmon_ref_values(dev_priv, phy); @@ -267,7 +263,6 @@ void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv, int lane_count, bool lane_reversal) { u8 lane_mask; - u32 val; if (is_dsi) { drm_WARN_ON(&dev_priv->drm, lane_reversal); @@ -308,10 +303,8 @@ void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv, } } - val = intel_de_read(dev_priv, ICL_PORT_CL_DW10(phy)); - val &= ~PWR_DOWN_LN_MASK; - val |= lane_mask; - intel_de_write(dev_priv, ICL_PORT_CL_DW10(phy), val); + intel_de_rmw(dev_priv, ICL_PORT_CL_DW10(phy), + PWR_DOWN_LN_MASK, lane_mask); } static void icl_combo_phys_init(struct drm_i915_private *dev_priv) @@ -360,25 +353,19 @@ skip_phy_misc: val = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN(0, phy)); val &= ~DCC_MODE_SELECT_MASK; - val |= DCC_MODE_SELECT_CONTINUOSLY; + val |= RUN_DCC_ONCE; intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), val); } icl_set_procmon_ref_values(dev_priv, phy); - if (phy_is_master(dev_priv, phy)) { - val = intel_de_read(dev_priv, ICL_PORT_COMP_DW8(phy)); - val |= IREFGEN; - intel_de_write(dev_priv, ICL_PORT_COMP_DW8(phy), val); - } - - val = intel_de_read(dev_priv, ICL_PORT_COMP_DW0(phy)); - val |= COMP_INIT; - intel_de_write(dev_priv, ICL_PORT_COMP_DW0(phy), val); + if (phy_is_master(dev_priv, phy)) + intel_de_rmw(dev_priv, ICL_PORT_COMP_DW8(phy), + 0, IREFGEN); - val = intel_de_read(dev_priv, ICL_PORT_CL_DW5(phy)); - val |= CL_POWER_DOWN_ENABLE; - intel_de_write(dev_priv, ICL_PORT_CL_DW5(phy), val); + intel_de_rmw(dev_priv, ICL_PORT_COMP_DW0(phy), 0, COMP_INIT); + intel_de_rmw(dev_priv, ICL_PORT_CL_DW5(phy), + 0, CL_POWER_DOWN_ENABLE); } } @@ -387,8 +374,6 @@ static void icl_combo_phys_uninit(struct drm_i915_private *dev_priv) enum phy phy; for_each_combo_phy_reverse(dev_priv, phy) { - u32 val; - if (phy == PHY_A && !icl_combo_phy_verify_state(dev_priv, phy)) { if (IS_TIGERLAKE(dev_priv) || IS_DG1(dev_priv)) { @@ -410,14 +395,11 @@ static void icl_combo_phys_uninit(struct drm_i915_private *dev_priv) if (!has_phy_misc(dev_priv, phy)) goto skip_phy_misc; - val = intel_de_read(dev_priv, ICL_PHY_MISC(phy)); - val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN; - intel_de_write(dev_priv, ICL_PHY_MISC(phy), val); + intel_de_rmw(dev_priv, ICL_PHY_MISC(phy), 0, + ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN); skip_phy_misc: - val = intel_de_read(dev_priv, ICL_PORT_COMP_DW0(phy)); - val &= ~COMP_INIT; - intel_de_write(dev_priv, ICL_PORT_COMP_DW0(phy), val); + intel_de_rmw(dev_priv, ICL_PORT_COMP_DW0(phy), COMP_INIT, 0); } } diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h b/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h index 2ed65193ca19..b0983edccf3f 100644 --- a/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h +++ b/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h @@ -90,8 +90,8 @@ #define ICL_PORT_PCS_DW1_AUX(phy) _MMIO(_ICL_PORT_PCS_DW_AUX(1, phy)) #define ICL_PORT_PCS_DW1_GRP(phy) _MMIO(_ICL_PORT_PCS_DW_GRP(1, phy)) #define ICL_PORT_PCS_DW1_LN(ln, phy) _MMIO(_ICL_PORT_PCS_DW_LN(1, ln, phy)) -#define DCC_MODE_SELECT_MASK (0x3 << 20) -#define DCC_MODE_SELECT_CONTINUOSLY (0x3 << 20) +#define DCC_MODE_SELECT_MASK REG_GENMASK(21, 20) +#define RUN_DCC_ONCE REG_FIELD_PREP(DCC_MODE_SELECT_MASK, 0) #define COMMON_KEEPER_EN (1 << 26) #define LATENCY_OPTIM_MASK (0x3 << 2) #define LATENCY_OPTIM_VAL(x) ((x) << 2) diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c index 7267ffc7f539..8f2ebead0826 100644 --- a/drivers/gpu/drm/i915/display/intel_crt.c +++ b/drivers/gpu/drm/i915/display/intel_crt.c @@ -260,7 +260,7 @@ static void hsw_post_disable_crt(struct intel_atomic_state *state, ilk_pfit_disable(old_crtc_state); - intel_ddi_disable_pipe_clock(old_crtc_state); + intel_ddi_disable_transcoder_clock(old_crtc_state); pch_post_disable_crt(state, encoder, old_crtc_state, old_conn_state); @@ -300,7 +300,7 @@ static void hsw_pre_enable_crt(struct intel_atomic_state *state, hsw_fdi_link_train(encoder, crtc_state); - intel_ddi_enable_pipe_clock(encoder, crtc_state); + intel_ddi_enable_transcoder_clock(encoder, crtc_state); } static void hsw_enable_crt(struct intel_atomic_state *state, @@ -678,10 +678,11 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector) } static enum drm_connector_status -intel_crt_load_detect(struct intel_crt *crt, u32 pipe) +intel_crt_load_detect(struct intel_crt *crt, enum pipe pipe) { struct drm_device *dev = crt->base.base.dev; struct drm_i915_private *dev_priv = to_i915(dev); + enum transcoder cpu_transcoder = (enum transcoder)pipe; u32 save_bclrpat; u32 save_vtotal; u32 vtotal, vactive; @@ -693,25 +694,25 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe) drm_dbg_kms(&dev_priv->drm, "starting load-detect on CRT\n"); - save_bclrpat = intel_de_read(dev_priv, BCLRPAT(pipe)); - save_vtotal = intel_de_read(dev_priv, VTOTAL(pipe)); - vblank = intel_de_read(dev_priv, VBLANK(pipe)); + save_bclrpat = intel_de_read(dev_priv, BCLRPAT(cpu_transcoder)); + save_vtotal = intel_de_read(dev_priv, TRANS_VTOTAL(cpu_transcoder)); + vblank = intel_de_read(dev_priv, TRANS_VBLANK(cpu_transcoder)); - vtotal = ((save_vtotal >> 16) & 0xfff) + 1; - vactive = (save_vtotal & 0x7ff) + 1; + vtotal = REG_FIELD_GET(VTOTAL_MASK, save_vtotal) + 1; + vactive = REG_FIELD_GET(VACTIVE_MASK, save_vtotal) + 1; - vblank_start = (vblank & 0xfff) + 1; - vblank_end = ((vblank >> 16) & 0xfff) + 1; + vblank_start = REG_FIELD_GET(VBLANK_START_MASK, vblank) + 1; + vblank_end = REG_FIELD_GET(VBLANK_END_MASK, vblank) + 1; /* Set the border color to purple. */ - intel_de_write(dev_priv, BCLRPAT(pipe), 0x500050); + intel_de_write(dev_priv, BCLRPAT(cpu_transcoder), 0x500050); if (DISPLAY_VER(dev_priv) != 2) { - u32 pipeconf = intel_de_read(dev_priv, PIPECONF(pipe)); + u32 transconf = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)); - intel_de_write(dev_priv, PIPECONF(pipe), - pipeconf | PIPECONF_FORCE_BORDER); - intel_de_posting_read(dev_priv, PIPECONF(pipe)); + intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), + transconf | TRANSCONF_FORCE_BORDER); + intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder)); /* Wait for next Vblank to substitue * border color for Color info */ intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(dev_priv, pipe)); @@ -720,7 +721,7 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe) connector_status_connected : connector_status_disconnected; - intel_de_write(dev_priv, PIPECONF(pipe), pipeconf); + intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), transconf); } else { bool restore_vblank = false; int count, detect; @@ -730,12 +731,13 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe) * Yes, this will flicker */ if (vblank_start <= vactive && vblank_end >= vtotal) { - u32 vsync = intel_de_read(dev_priv, VSYNC(pipe)); - u32 vsync_start = (vsync & 0xffff) + 1; + u32 vsync = intel_de_read(dev_priv, TRANS_VSYNC(cpu_transcoder)); + u32 vsync_start = REG_FIELD_GET(VSYNC_START_MASK, vsync) + 1; vblank_start = vsync_start; - intel_de_write(dev_priv, VBLANK(pipe), - (vblank_start - 1) | ((vblank_end - 1) << 16)); + intel_de_write(dev_priv, TRANS_VBLANK(cpu_transcoder), + VBLANK_START(vblank_start - 1) | + VBLANK_END(vblank_end - 1)); restore_vblank = true; } /* sample in the vertical border, selecting the larger one */ @@ -766,7 +768,7 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe) /* restore vblank if necessary */ if (restore_vblank) - intel_de_write(dev_priv, VBLANK(pipe), vblank); + intel_de_write(dev_priv, TRANS_VBLANK(cpu_transcoder), vblank); /* * If more than 3/4 of the scanline detected a monitor, * then it is assumed to be present. This works even on i830, @@ -779,7 +781,7 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe) } /* Restore previous settings */ - intel_de_write(dev_priv, BCLRPAT(pipe), save_bclrpat); + intel_de_write(dev_priv, BCLRPAT(cpu_transcoder), save_bclrpat); return status; } diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c index 82be0fbe9934..b79a8834559f 100644 --- a/drivers/gpu/drm/i915/display/intel_crtc.c +++ b/drivers/gpu/drm/i915/display/intel_crtc.c @@ -25,6 +25,7 @@ #include "intel_display_types.h" #include "intel_drrs.h" #include "intel_dsi.h" +#include "intel_fifo_underrun.h" #include "intel_pipe_crc.h" #include "intel_psr.h" #include "intel_sprite.h" @@ -314,6 +315,8 @@ int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) } crtc->plane_ids_mask |= BIT(primary->id); + intel_init_fifo_underrun_reporting(dev_priv, crtc, false); + for_each_sprite(dev_priv, pipe, sprite) { struct intel_plane *plane; diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c index 2422d6ef5777..766633566fd6 100644 --- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c +++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c @@ -14,14 +14,16 @@ static void intel_dump_crtc_timings(struct drm_i915_private *i915, const struct drm_display_mode *mode) { - drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, " - "type: 0x%x flags: 0x%x\n", + drm_dbg_kms(&i915->drm, "crtc timings: clock=%d, " + "hd=%d hb=%d-%d hs=%d-%d ht=%d, " + "vd=%d vb=%d-%d vs=%d-%d vt=%d, " + "flags=0x%x\n", mode->crtc_clock, - mode->crtc_hdisplay, mode->crtc_hsync_start, - mode->crtc_hsync_end, mode->crtc_htotal, - mode->crtc_vdisplay, mode->crtc_vsync_start, - mode->crtc_vsync_end, mode->crtc_vtotal, - mode->type, mode->flags); + mode->crtc_hdisplay, mode->crtc_hblank_start, mode->crtc_hblank_end, + mode->crtc_hsync_start, mode->crtc_hsync_end, mode->crtc_htotal, + mode->crtc_vdisplay, mode->crtc_vblank_start, mode->crtc_vblank_end, + mode->crtc_vsync_start, mode->crtc_vsync_end, mode->crtc_vtotal, + mode->flags); } static void diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c index d190fa0d393b..c3173c0c2068 100644 --- a/drivers/gpu/drm/i915/display/intel_cursor.c +++ b/drivers/gpu/drm/i915/display/intel_cursor.c @@ -532,9 +532,10 @@ static void i9xx_cursor_update_arm(struct intel_plane *plane, skl_write_cursor_wm(plane, crtc_state); if (plane_state) - intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, 0); + intel_psr2_program_plane_sel_fetch_arm(plane, crtc_state, + plane_state); else - intel_psr2_disable_plane_sel_fetch(plane, crtc_state); + intel_psr2_disable_plane_sel_fetch_arm(plane, crtc_state); if (plane->cursor.base != base || plane->cursor.size != fbc_ctl || diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 254559abedfb..0950bcfea4c0 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -47,6 +47,7 @@ #include "intel_dkl_phy.h" #include "intel_dkl_phy_regs.h" #include "intel_dp.h" +#include "intel_dp_aux.h" #include "intel_dp_link_training.h" #include "intel_dp_mst.h" #include "intel_dpio_phy.h" @@ -67,6 +68,7 @@ #include "intel_sprite.h" #include "intel_tc.h" #include "intel_vdsc.h" +#include "intel_vdsc_regs.h" #include "intel_vrr.h" #include "skl_scaler.h" #include "skl_universal_plane.h" @@ -89,7 +91,7 @@ static int intel_ddi_hdmi_level(struct intel_encoder *encoder, { int level; - level = intel_bios_hdmi_level_shift(encoder); + level = intel_bios_hdmi_level_shift(encoder->devdata); if (level < 0) level = trans->hdmi_default_entry; @@ -126,7 +128,7 @@ void hsw_prepare_dp_ddi_buffers(struct intel_encoder *encoder, /* If we're boosting the current, set bit 31 of trans1 */ if (has_iboost(dev_priv) && - intel_bios_encoder_dp_boost_level(encoder->devdata)) + intel_bios_dp_boost_level(encoder->devdata)) iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE; for (i = 0; i < n_entries; i++) { @@ -158,7 +160,7 @@ static void hsw_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder, /* If we're boosting the current, set bit 31 of trans1 */ if (has_iboost(dev_priv) && - intel_bios_encoder_hdmi_boost_level(encoder->devdata)) + intel_bios_hdmi_boost_level(encoder->devdata)) iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE; /* Entry 9 is for HDMI: */ @@ -644,19 +646,14 @@ int intel_ddi_toggle_hdcp_bits(struct intel_encoder *intel_encoder, struct drm_i915_private *dev_priv = to_i915(dev); intel_wakeref_t wakeref; int ret = 0; - u32 tmp; wakeref = intel_display_power_get_if_enabled(dev_priv, intel_encoder->power_domain); if (drm_WARN_ON(dev, !wakeref)) return -ENXIO; - tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)); - if (enable) - tmp |= hdcp_mask; - else - tmp &= ~hdcp_mask; - intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), tmp); + intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), + hdcp_mask, enable ? hdcp_mask : 0); intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref); return ret; } @@ -948,8 +945,8 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder, main_link_aux_power_domain_get(dig_port, crtc_state); } -void intel_ddi_enable_pipe_clock(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state) +void intel_ddi_enable_transcoder_clock(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -957,33 +954,34 @@ void intel_ddi_enable_pipe_clock(struct intel_encoder *encoder, enum phy phy = intel_port_to_phy(dev_priv, encoder->port); u32 val; - if (cpu_transcoder != TRANSCODER_EDP) { - if (DISPLAY_VER(dev_priv) >= 13) - val = TGL_TRANS_CLK_SEL_PORT(phy); - else if (DISPLAY_VER(dev_priv) >= 12) - val = TGL_TRANS_CLK_SEL_PORT(encoder->port); - else - val = TRANS_CLK_SEL_PORT(encoder->port); + if (cpu_transcoder == TRANSCODER_EDP) + return; - intel_de_write(dev_priv, TRANS_CLK_SEL(cpu_transcoder), val); - } + if (DISPLAY_VER(dev_priv) >= 13) + val = TGL_TRANS_CLK_SEL_PORT(phy); + else if (DISPLAY_VER(dev_priv) >= 12) + val = TGL_TRANS_CLK_SEL_PORT(encoder->port); + else + val = TRANS_CLK_SEL_PORT(encoder->port); + + intel_de_write(dev_priv, TRANS_CLK_SEL(cpu_transcoder), val); } -void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state) +void intel_ddi_disable_transcoder_clock(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + u32 val; - if (cpu_transcoder != TRANSCODER_EDP) { - if (DISPLAY_VER(dev_priv) >= 12) - intel_de_write(dev_priv, - TRANS_CLK_SEL(cpu_transcoder), - TGL_TRANS_CLK_SEL_DISABLED); - else - intel_de_write(dev_priv, - TRANS_CLK_SEL(cpu_transcoder), - TRANS_CLK_SEL_DISABLED); - } + if (cpu_transcoder == TRANSCODER_EDP) + return; + + if (DISPLAY_VER(dev_priv) >= 12) + val = TGL_TRANS_CLK_SEL_DISABLED; + else + val = TRANS_CLK_SEL_DISABLED; + + intel_de_write(dev_priv, TRANS_CLK_SEL(cpu_transcoder), val); } static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv, @@ -1009,9 +1007,9 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder, u8 iboost; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) - iboost = intel_bios_encoder_hdmi_boost_level(encoder->devdata); + iboost = intel_bios_hdmi_boost_level(encoder->devdata); else - iboost = intel_bios_encoder_dp_boost_level(encoder->devdata); + iboost = intel_bios_dp_boost_level(encoder->devdata); if (iboost == 0) { const struct intel_ddi_buf_trans *trans; @@ -2200,15 +2198,13 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dp *intel_dp; - u32 val; if (!crtc_state->fec_enable) return; intel_dp = enc_to_intel_dp(encoder); - val = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state)); - val |= DP_TP_CTL_FEC_ENABLE; - intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), val); + intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), + 0, DP_TP_CTL_FEC_ENABLE); } static void intel_ddi_disable_fec_state(struct intel_encoder *encoder, @@ -2216,15 +2212,13 @@ static void intel_ddi_disable_fec_state(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dp *intel_dp; - u32 val; if (!crtc_state->fec_enable) return; intel_dp = enc_to_intel_dp(encoder); - val = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state)); - val &= ~DP_TP_CTL_FEC_ENABLE; - intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), val); + intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), + DP_TP_CTL_FEC_ENABLE, 0); intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state)); } @@ -2387,7 +2381,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state, * 7.a Configure Transcoder Clock Select to direct the Port clock to the * Transcoder. */ - intel_ddi_enable_pipe_clock(encoder, crtc_state); + intel_ddi_enable_transcoder_clock(encoder, crtc_state); if (HAS_DP20(dev_priv)) intel_ddi_config_transcoder_dp2(encoder, crtc_state); @@ -2514,7 +2508,7 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state, intel_ddi_enable_fec(encoder, crtc_state); if (!is_mst) - intel_ddi_enable_pipe_clock(encoder, crtc_state); + intel_ddi_enable_transcoder_clock(encoder, crtc_state); intel_dsc_dp_pps_write(encoder, crtc_state); } @@ -2556,7 +2550,7 @@ static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state, icl_program_mg_dp_mode(dig_port, crtc_state); - intel_ddi_enable_pipe_clock(encoder, crtc_state); + intel_ddi_enable_transcoder_clock(encoder, crtc_state); dig_port->set_infoframes(encoder, crtc_state->has_infoframe, @@ -2622,12 +2616,10 @@ static void intel_disable_ddi_buf(struct intel_encoder *encoder, wait = true; } - if (intel_crtc_has_dp_encoder(crtc_state)) { - val = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state)); - val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK); - val |= DP_TP_CTL_LINK_TRAIN_PAT1; - intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), val); - } + if (intel_crtc_has_dp_encoder(crtc_state)) + intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), + DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK, + DP_TP_CTL_LINK_TRAIN_PAT1); /* Disable FEC in DP Sink */ intel_ddi_disable_fec_state(encoder, crtc_state); @@ -2660,19 +2652,14 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state, if (DISPLAY_VER(dev_priv) >= 12) { if (is_mst) { enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; - u32 val; - val = intel_de_read(dev_priv, - TRANS_DDI_FUNC_CTL(cpu_transcoder)); - val &= ~(TGL_TRANS_DDI_PORT_MASK | - TRANS_DDI_MODE_SELECT_MASK); - intel_de_write(dev_priv, - TRANS_DDI_FUNC_CTL(cpu_transcoder), - val); + intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), + TGL_TRANS_DDI_PORT_MASK | TRANS_DDI_MODE_SELECT_MASK, + 0); } } else { if (!is_mst) - intel_ddi_disable_pipe_clock(old_crtc_state); + intel_ddi_disable_transcoder_clock(old_crtc_state); } intel_disable_ddi_buf(encoder, old_crtc_state); @@ -2683,7 +2670,7 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state, * transcoder" */ if (DISPLAY_VER(dev_priv) >= 12) - intel_ddi_disable_pipe_clock(old_crtc_state); + intel_ddi_disable_transcoder_clock(old_crtc_state); intel_pps_vdd_on(intel_dp); intel_pps_off(intel_dp); @@ -2709,12 +2696,12 @@ static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state, old_crtc_state, old_conn_state); if (DISPLAY_VER(dev_priv) < 12) - intel_ddi_disable_pipe_clock(old_crtc_state); + intel_ddi_disable_transcoder_clock(old_crtc_state); intel_disable_ddi_buf(encoder, old_crtc_state); if (DISPLAY_VER(dev_priv) >= 12) - intel_ddi_disable_pipe_clock(old_crtc_state); + intel_ddi_disable_transcoder_clock(old_crtc_state); intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain, @@ -3222,12 +3209,9 @@ static void intel_ddi_set_idle_link_train(struct intel_dp *intel_dp, struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; - u32 val; - val = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state)); - val &= ~DP_TP_CTL_LINK_TRAIN_MASK; - val |= DP_TP_CTL_LINK_TRAIN_IDLE; - intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), val); + intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), + DP_TP_CTL_LINK_TRAIN_MASK, DP_TP_CTL_LINK_TRAIN_IDLE); /* * Until TGL on PORT_A we can have only eDP in SST mode. There the only @@ -4305,7 +4289,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) intel_bios_encoder_supports_hdmi(devdata); init_dp = intel_bios_encoder_supports_dp(devdata); - if (intel_bios_is_lspcon_present(dev_priv, port)) { + if (intel_bios_encoder_is_lspcon(devdata)) { /* * Lspcon device needs to be driven with DP connector * with special detection sequence. So make sure DP @@ -4500,12 +4484,12 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) intel_de_read(dev_priv, DDI_BUF_CTL(port)) & (DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES); - if (intel_bios_is_lane_reversal_needed(dev_priv, port)) + if (intel_bios_encoder_lane_reversal(devdata)) dig_port->saved_port_bits |= DDI_BUF_PORT_REVERSAL; dig_port->dp.output_reg = INVALID_MMIO_REG; dig_port->max_lanes = intel_ddi_max_lanes(dig_port); - dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port); + dig_port->aux_ch = intel_dp_aux_ch(encoder); if (intel_phy_is_tc(dev_priv, phy)) { bool is_legacy = @@ -4521,35 +4505,21 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) drm_WARN_ON(&dev_priv->drm, port > PORT_I); dig_port->ddi_io_power_domain = intel_display_power_ddi_io_domain(dev_priv, port); - if (init_dp) { - if (!intel_ddi_init_dp_connector(dig_port)) - goto err; - - dig_port->hpd_pulse = intel_dp_hpd_pulse; - - if (dig_port->dp.mso_link_count) - encoder->pipe_mask = intel_ddi_splitter_pipe_mask(dev_priv); - } - - /* In theory we don't need the encoder->type check, but leave it just in - * case we have some really bad VBTs... */ - if (encoder->type != INTEL_OUTPUT_EDP && init_hdmi) { - if (!intel_ddi_init_hdmi_connector(dig_port)) - goto err; - } - if (DISPLAY_VER(dev_priv) >= 11) { if (intel_phy_is_tc(dev_priv, phy)) dig_port->connected = intel_tc_port_connected; else dig_port->connected = lpt_digital_port_connected; - } else if (DISPLAY_VER(dev_priv) >= 8) { - if (port == PORT_A || IS_GEMINILAKE(dev_priv) || - IS_BROXTON(dev_priv)) + } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { + dig_port->connected = bdw_digital_port_connected; + } else if (DISPLAY_VER(dev_priv) == 9) { + dig_port->connected = lpt_digital_port_connected; + } else if (IS_BROADWELL(dev_priv)) { + if (port == PORT_A) dig_port->connected = bdw_digital_port_connected; else dig_port->connected = lpt_digital_port_connected; - } else { + } else if (IS_HASWELL(dev_priv)) { if (port == PORT_A) dig_port->connected = hsw_digital_port_connected; else @@ -4558,6 +4528,25 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) intel_infoframe_init(dig_port); + if (init_dp) { + if (!intel_ddi_init_dp_connector(dig_port)) + goto err; + + dig_port->hpd_pulse = intel_dp_hpd_pulse; + + if (dig_port->dp.mso_link_count) + encoder->pipe_mask = intel_ddi_splitter_pipe_mask(dev_priv); + } + + /* + * In theory we don't need the encoder->type check, + * but leave it just in case we have some really bad VBTs... + */ + if (encoder->type != INTEL_OUTPUT_EDP && init_hdmi) { + if (!intel_ddi_init_hdmi_connector(dig_port)) + goto err; + } + return; err: diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h index d39076facdce..361f6874dde5 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.h +++ b/drivers/gpu/drm/i915/display/intel_ddi.h @@ -52,9 +52,9 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe); void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state); void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state); -void intel_ddi_enable_pipe_clock(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state); -void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state); +void intel_ddi_enable_transcoder_clock(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state); +void intel_ddi_disable_transcoder_clock(const struct intel_crtc_state *crtc_state); void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector); diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index d3994e2a7d63..edbcb1273ca2 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -55,6 +55,7 @@ #include "i915_reg.h" #include "i915_utils.h" #include "i9xx_plane.h" +#include "i9xx_wm.h" #include "icl_dsi.h" #include "intel_acpi.h" #include "intel_atomic.h" @@ -94,6 +95,7 @@ #include "intel_hotplug.h" #include "intel_hti.h" #include "intel_lvds.h" +#include "intel_lvds_regs.h" #include "intel_modeset_setup.h" #include "intel_modeset_verify.h" #include "intel_overlay.h" @@ -114,8 +116,10 @@ #include "intel_tv.h" #include "intel_vblank.h" #include "intel_vdsc.h" +#include "intel_vdsc_regs.h" #include "intel_vga.h" #include "intel_vrr.h" +#include "intel_wm.h" #include "skl_scaler.h" #include "skl_universal_plane.h" #include "skl_watermark.h" @@ -130,101 +134,6 @@ static void hsw_set_transconf(const struct intel_crtc_state *crtc_state); static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state); static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state); -/** - * intel_update_watermarks - update FIFO watermark values based on current modes - * @dev_priv: i915 device - * - * Calculate watermark values for the various WM regs based on current mode - * and plane configuration. - * - * There are several cases to deal with here: - * - normal (i.e. non-self-refresh) - * - self-refresh (SR) mode - * - lines are large relative to FIFO size (buffer can hold up to 2) - * - lines are small relative to FIFO size (buffer can hold more than 2 - * lines), so need to account for TLB latency - * - * The normal calculation is: - * watermark = dotclock * bytes per pixel * latency - * where latency is platform & configuration dependent (we assume pessimal - * values here). - * - * The SR calculation is: - * watermark = (trunc(latency/line time)+1) * surface width * - * bytes per pixel - * where - * line time = htotal / dotclock - * surface width = hdisplay for normal plane and 64 for cursor - * and latency is assumed to be high, as above. - * - * The final value programmed to the register should always be rounded up, - * and include an extra 2 entries to account for clock crossings. - * - * We don't use the sprite, so we can ignore that. And on Crestline we have - * to set the non-SR watermarks to 8. - */ -void intel_update_watermarks(struct drm_i915_private *dev_priv) -{ - if (dev_priv->display.funcs.wm->update_wm) - dev_priv->display.funcs.wm->update_wm(dev_priv); -} - -static int intel_compute_pipe_wm(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - if (dev_priv->display.funcs.wm->compute_pipe_wm) - return dev_priv->display.funcs.wm->compute_pipe_wm(state, crtc); - return 0; -} - -static int intel_compute_intermediate_wm(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - if (!dev_priv->display.funcs.wm->compute_intermediate_wm) - return 0; - if (drm_WARN_ON(&dev_priv->drm, - !dev_priv->display.funcs.wm->compute_pipe_wm)) - return 0; - return dev_priv->display.funcs.wm->compute_intermediate_wm(state, crtc); -} - -static bool intel_initial_watermarks(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - if (dev_priv->display.funcs.wm->initial_watermarks) { - dev_priv->display.funcs.wm->initial_watermarks(state, crtc); - return true; - } - return false; -} - -static void intel_atomic_update_watermarks(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - if (dev_priv->display.funcs.wm->atomic_update_watermarks) - dev_priv->display.funcs.wm->atomic_update_watermarks(state, crtc); -} - -static void intel_optimize_watermarks(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - if (dev_priv->display.funcs.wm->optimize_watermarks) - dev_priv->display.funcs.wm->optimize_watermarks(state, crtc); -} - -static int intel_compute_global_watermarks(struct intel_atomic_state *state) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - if (dev_priv->display.funcs.wm->compute_global_watermarks) - return dev_priv->display.funcs.wm->compute_global_watermarks(state); - return 0; -} - /* returns HPLL frequency in kHz */ int vlv_get_hpll_vco(struct drm_i915_private *dev_priv) { @@ -293,11 +202,11 @@ static void skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable) { if (enable) - intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), - intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS); + intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), + 0, DUPS1_GATING_DIS | DUPS2_GATING_DIS); else - intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), - intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS)); + intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), + DUPS1_GATING_DIS | DUPS2_GATING_DIS, 0); } /* Wa_2006604312:icl,ehl */ @@ -306,11 +215,9 @@ icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable) { if (enable) - intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), - intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS); + intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), 0, DPFR_GATING_DIS); else - intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), - intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS); + intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), DPFR_GATING_DIS, 0); } /* Wa_1604331009:icl,jsl,ehl */ @@ -395,8 +302,8 @@ intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state) enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; /* Wait for the Pipe State to go off */ - if (intel_de_wait_for_clear(dev_priv, PIPECONF(cpu_transcoder), - PIPECONF_STATE_ENABLE, 100)) + if (intel_de_wait_for_clear(dev_priv, TRANSCONF(cpu_transcoder), + TRANSCONF_STATE_ENABLE, 100)) drm_WARN(&dev_priv->drm, 1, "pipe_off wait timed out\n"); } else { intel_wait_for_pipe_scanline_stopped(crtc); @@ -417,8 +324,8 @@ void assert_transcoder(struct drm_i915_private *dev_priv, power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); if (wakeref) { - u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder)); - cur_state = !!(val & PIPECONF_ENABLE); + u32 val = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)); + cur_state = !!(val & TRANSCONF_ENABLE); intel_display_power_put(dev_priv, power_domain, wakeref); } else { @@ -530,15 +437,15 @@ void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state) intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe), 0, PIPE_ARB_USE_PROG_SLOTS); - reg = PIPECONF(cpu_transcoder); + reg = TRANSCONF(cpu_transcoder); val = intel_de_read(dev_priv, reg); - if (val & PIPECONF_ENABLE) { + if (val & TRANSCONF_ENABLE) { /* we keep both pipes enabled on 830 */ drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv)); return; } - intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE); + intel_de_write(dev_priv, reg, val | TRANSCONF_ENABLE); intel_de_posting_read(dev_priv, reg); /* @@ -569,9 +476,9 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state) */ assert_planes_disabled(crtc); - reg = PIPECONF(cpu_transcoder); + reg = TRANSCONF(cpu_transcoder); val = intel_de_read(dev_priv, reg); - if ((val & PIPECONF_ENABLE) == 0) + if ((val & TRANSCONF_ENABLE) == 0) return; /* @@ -579,11 +486,11 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state) * so best keep it disabled when not needed. */ if (old_crtc_state->double_wide) - val &= ~PIPECONF_DOUBLE_WIDE; + val &= ~TRANSCONF_DOUBLE_WIDE; /* Don't disable pipe or pipe PLLs if needed */ if (!IS_I830(dev_priv)) - val &= ~PIPECONF_ENABLE; + val &= ~TRANSCONF_ENABLE; if (DISPLAY_VER(dev_priv) >= 14) intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(cpu_transcoder), @@ -593,7 +500,7 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state) FECSTALL_DIS_DPTSTREAM_DPTTG, 0); intel_de_write(dev_priv, reg, val); - if ((val & PIPECONF_ENABLE) == 0) + if ((val & TRANSCONF_ENABLE) == 0) intel_wait_for_pipe_off(old_crtc_state); } @@ -1252,7 +1159,8 @@ static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state, intel_atomic_get_old_crtc_state(state, crtc); const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - u8 update_planes = new_crtc_state->update_planes; + u8 disable_async_flip_planes = old_crtc_state->async_flip_planes & + ~new_crtc_state->async_flip_planes; const struct intel_plane_state *old_plane_state; struct intel_plane *plane; bool need_vbl_wait = false; @@ -1261,7 +1169,7 @@ static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state, for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) { if (plane->need_async_flip_disable_wa && plane->pipe == crtc->pipe && - update_planes & BIT(plane->id)) { + disable_async_flip_planes & BIT(plane->id)) { /* * Apart from the async flip bit we want to * preserve the old state for the plane. @@ -1378,7 +1286,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state, * WA for platforms where async address update enable bit * is double buffered and only latched at start of vblank. */ - if (old_crtc_state->uapi.async_flip && !new_crtc_state->uapi.async_flip) + if (old_crtc_state->async_flip_planes & ~new_crtc_state->async_flip_planes) intel_crtc_async_flip_disable_wa(state, crtc); } @@ -1801,12 +1709,10 @@ static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state) enum transcoder transcoder = crtc_state->cpu_transcoder; i915_reg_t reg = DISPLAY_VER(dev_priv) >= 14 ? MTL_CHICKEN_TRANS(transcoder) : CHICKEN_TRANS(transcoder); - u32 val; - val = intel_de_read(dev_priv, reg); - val &= ~HSW_FRAME_START_DELAY_MASK; - val |= HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1); - intel_de_write(dev_priv, reg, val); + intel_de_rmw(dev_priv, reg, + HSW_FRAME_START_DELAY_MASK, + HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1)); } static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state, @@ -1846,7 +1752,7 @@ static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_sta intel_set_transcoder_timings(crtc_state); if (cpu_transcoder != TRANSCODER_EDP) - intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder), + intel_de_write(dev_priv, TRANS_MULT(cpu_transcoder), crtc_state->pixel_multiplier - 1); hsw_set_frame_start_delay(crtc_state); @@ -2819,12 +2725,14 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_sta enum pipe pipe = crtc->pipe; enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; - u32 crtc_vtotal, crtc_vblank_end; + u32 crtc_vdisplay, crtc_vtotal, crtc_vblank_start, crtc_vblank_end; int vsyncshift = 0; /* We need to be careful not to changed the adjusted mode, for otherwise * the hw state checker will get angry at the mismatch. */ + crtc_vdisplay = adjusted_mode->crtc_vdisplay; crtc_vtotal = adjusted_mode->crtc_vtotal; + crtc_vblank_start = adjusted_mode->crtc_vblank_start; crtc_vblank_end = adjusted_mode->crtc_vblank_end; if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { @@ -2841,23 +2749,44 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_sta vsyncshift += adjusted_mode->crtc_htotal; } + /* + * VBLANK_START no longer works on ADL+, instead we must use + * TRANS_SET_CONTEXT_LATENCY to configure the pipe vblank start. + */ + if (DISPLAY_VER(dev_priv) >= 13) { + intel_de_write(dev_priv, TRANS_SET_CONTEXT_LATENCY(cpu_transcoder), + crtc_vblank_start - crtc_vdisplay); + + /* + * VBLANK_START not used by hw, just clear it + * to make it stand out in register dumps. + */ + crtc_vblank_start = 1; + } + if (DISPLAY_VER(dev_priv) > 3) - intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder), - vsyncshift); - - intel_de_write(dev_priv, HTOTAL(cpu_transcoder), - (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16)); - intel_de_write(dev_priv, HBLANK(cpu_transcoder), - (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16)); - intel_de_write(dev_priv, HSYNC(cpu_transcoder), - (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16)); - - intel_de_write(dev_priv, VTOTAL(cpu_transcoder), - (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16)); - intel_de_write(dev_priv, VBLANK(cpu_transcoder), - (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16)); - intel_de_write(dev_priv, VSYNC(cpu_transcoder), - (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16)); + intel_de_write(dev_priv, TRANS_VSYNCSHIFT(cpu_transcoder), + vsyncshift); + + intel_de_write(dev_priv, TRANS_HTOTAL(cpu_transcoder), + HACTIVE(adjusted_mode->crtc_hdisplay - 1) | + HTOTAL(adjusted_mode->crtc_htotal - 1)); + intel_de_write(dev_priv, TRANS_HBLANK(cpu_transcoder), + HBLANK_START(adjusted_mode->crtc_hblank_start - 1) | + HBLANK_END(adjusted_mode->crtc_hblank_end - 1)); + intel_de_write(dev_priv, TRANS_HSYNC(cpu_transcoder), + HSYNC_START(adjusted_mode->crtc_hsync_start - 1) | + HSYNC_END(adjusted_mode->crtc_hsync_end - 1)); + + intel_de_write(dev_priv, TRANS_VTOTAL(cpu_transcoder), + VACTIVE(crtc_vdisplay - 1) | + VTOTAL(crtc_vtotal - 1)); + intel_de_write(dev_priv, TRANS_VBLANK(cpu_transcoder), + VBLANK_START(crtc_vblank_start - 1) | + VBLANK_END(crtc_vblank_end - 1)); + intel_de_write(dev_priv, TRANS_VSYNC(cpu_transcoder), + VSYNC_START(adjusted_mode->crtc_vsync_start - 1) | + VSYNC_END(adjusted_mode->crtc_vsync_end - 1)); /* Workaround: when the EDP input selection is B, the VTOTAL_B must be * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is @@ -2865,9 +2794,9 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_sta * bits. */ if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP && (pipe == PIPE_B || pipe == PIPE_C)) - intel_de_write(dev_priv, VTOTAL(pipe), - intel_de_read(dev_priv, VTOTAL(cpu_transcoder))); - + intel_de_write(dev_priv, TRANS_VTOTAL(pipe), + VACTIVE(crtc_vdisplay - 1) | + VTOTAL(crtc_vtotal - 1)); } static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state) @@ -2895,9 +2824,9 @@ static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state) if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) - return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW; + return intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)) & TRANSCONF_INTERLACE_MASK_HSW; else - return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK; + return intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)) & TRANSCONF_INTERLACE_MASK; } static void intel_get_transcoder_timings(struct intel_crtc *crtc, @@ -2906,43 +2835,47 @@ static void intel_get_transcoder_timings(struct intel_crtc *crtc, struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; + struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; u32 tmp; - tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder)); - pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1; - pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1; + tmp = intel_de_read(dev_priv, TRANS_HTOTAL(cpu_transcoder)); + adjusted_mode->crtc_hdisplay = REG_FIELD_GET(HACTIVE_MASK, tmp) + 1; + adjusted_mode->crtc_htotal = REG_FIELD_GET(HTOTAL_MASK, tmp) + 1; if (!transcoder_is_dsi(cpu_transcoder)) { - tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder)); - pipe_config->hw.adjusted_mode.crtc_hblank_start = - (tmp & 0xffff) + 1; - pipe_config->hw.adjusted_mode.crtc_hblank_end = - ((tmp >> 16) & 0xffff) + 1; + tmp = intel_de_read(dev_priv, TRANS_HBLANK(cpu_transcoder)); + adjusted_mode->crtc_hblank_start = REG_FIELD_GET(HBLANK_START_MASK, tmp) + 1; + adjusted_mode->crtc_hblank_end = REG_FIELD_GET(HBLANK_END_MASK, tmp) + 1; } - tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder)); - pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1; - pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1; - tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder)); - pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1; - pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1; + tmp = intel_de_read(dev_priv, TRANS_HSYNC(cpu_transcoder)); + adjusted_mode->crtc_hsync_start = REG_FIELD_GET(HSYNC_START_MASK, tmp) + 1; + adjusted_mode->crtc_hsync_end = REG_FIELD_GET(HSYNC_END_MASK, tmp) + 1; + + tmp = intel_de_read(dev_priv, TRANS_VTOTAL(cpu_transcoder)); + adjusted_mode->crtc_vdisplay = REG_FIELD_GET(VACTIVE_MASK, tmp) + 1; + adjusted_mode->crtc_vtotal = REG_FIELD_GET(VTOTAL_MASK, tmp) + 1; + /* FIXME TGL+ DSI transcoders have this! */ if (!transcoder_is_dsi(cpu_transcoder)) { - tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder)); - pipe_config->hw.adjusted_mode.crtc_vblank_start = - (tmp & 0xffff) + 1; - pipe_config->hw.adjusted_mode.crtc_vblank_end = - ((tmp >> 16) & 0xffff) + 1; + tmp = intel_de_read(dev_priv, TRANS_VBLANK(cpu_transcoder)); + adjusted_mode->crtc_vblank_start = REG_FIELD_GET(VBLANK_START_MASK, tmp) + 1; + adjusted_mode->crtc_vblank_end = REG_FIELD_GET(VBLANK_END_MASK, tmp) + 1; } - tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder)); - pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1; - pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1; + tmp = intel_de_read(dev_priv, TRANS_VSYNC(cpu_transcoder)); + adjusted_mode->crtc_vsync_start = REG_FIELD_GET(VSYNC_START_MASK, tmp) + 1; + adjusted_mode->crtc_vsync_end = REG_FIELD_GET(VSYNC_END_MASK, tmp) + 1; if (intel_pipe_is_interlaced(pipe_config)) { - pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE; - pipe_config->hw.adjusted_mode.crtc_vtotal += 1; - pipe_config->hw.adjusted_mode.crtc_vblank_end += 1; + adjusted_mode->flags |= DRM_MODE_FLAG_INTERLACE; + adjusted_mode->crtc_vtotal += 1; + adjusted_mode->crtc_vblank_end += 1; } + + if (DISPLAY_VER(dev_priv) >= 13 && !transcoder_is_dsi(cpu_transcoder)) + adjusted_mode->crtc_vblank_start = + adjusted_mode->crtc_vdisplay + + intel_de_read(dev_priv, TRANS_SET_CONTEXT_LATENCY(cpu_transcoder)); } static void intel_bigjoiner_adjust_pipe_src(struct intel_crtc_state *crtc_state) @@ -2982,7 +2915,8 @@ void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - u32 pipeconf = 0; + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + u32 val = 0; /* * - We keep both pipes enabled on 830 @@ -2990,18 +2924,18 @@ void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state) * - During fastset the pipe is already enabled and must remain so */ if (IS_I830(dev_priv) || !intel_crtc_needs_modeset(crtc_state)) - pipeconf |= PIPECONF_ENABLE; + val |= TRANSCONF_ENABLE; if (crtc_state->double_wide) - pipeconf |= PIPECONF_DOUBLE_WIDE; + val |= TRANSCONF_DOUBLE_WIDE; /* only g4x and later have fancy bpc/dither controls */ if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { /* Bspec claims that we can't use dithering for 30bpp pipes. */ if (crtc_state->dither && crtc_state->pipe_bpp != 30) - pipeconf |= PIPECONF_DITHER_EN | - PIPECONF_DITHER_TYPE_SP; + val |= TRANSCONF_DITHER_EN | + TRANSCONF_DITHER_TYPE_SP; switch (crtc_state->pipe_bpp) { default: @@ -3009,13 +2943,13 @@ void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state) MISSING_CASE(crtc_state->pipe_bpp); fallthrough; case 18: - pipeconf |= PIPECONF_BPC_6; + val |= TRANSCONF_BPC_6; break; case 24: - pipeconf |= PIPECONF_BPC_8; + val |= TRANSCONF_BPC_8; break; case 30: - pipeconf |= PIPECONF_BPC_10; + val |= TRANSCONF_BPC_10; break; } } @@ -3023,23 +2957,23 @@ void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state) if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { if (DISPLAY_VER(dev_priv) < 4 || intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) - pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; + val |= TRANSCONF_INTERLACE_W_FIELD_INDICATION; else - pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT; + val |= TRANSCONF_INTERLACE_W_SYNC_SHIFT; } else { - pipeconf |= PIPECONF_INTERLACE_PROGRESSIVE; + val |= TRANSCONF_INTERLACE_PROGRESSIVE; } if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && crtc_state->limited_color_range) - pipeconf |= PIPECONF_COLOR_RANGE_SELECT; + val |= TRANSCONF_COLOR_RANGE_SELECT; - pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode); + val |= TRANSCONF_GAMMA_MODE(crtc_state->gamma_mode); - pipeconf |= PIPECONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1); + val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1); - intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf); - intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe)); + intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val); + intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder)); } static bool i9xx_has_pfit(struct drm_i915_private *dev_priv) @@ -3198,20 +3132,20 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, ret = false; - tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe)); - if (!(tmp & PIPECONF_ENABLE)) + tmp = intel_de_read(dev_priv, TRANSCONF(pipe_config->cpu_transcoder)); + if (!(tmp & TRANSCONF_ENABLE)) goto out; if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - switch (tmp & PIPECONF_BPC_MASK) { - case PIPECONF_BPC_6: + switch (tmp & TRANSCONF_BPC_MASK) { + case TRANSCONF_BPC_6: pipe_config->pipe_bpp = 18; break; - case PIPECONF_BPC_8: + case TRANSCONF_BPC_8: pipe_config->pipe_bpp = 24; break; - case PIPECONF_BPC_10: + case TRANSCONF_BPC_10: pipe_config->pipe_bpp = 30; break; default: @@ -3221,12 +3155,12 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, } if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && - (tmp & PIPECONF_COLOR_RANGE_SELECT)) + (tmp & TRANSCONF_COLOR_RANGE_SELECT)) pipe_config->limited_color_range = true; - pipe_config->gamma_mode = REG_FIELD_GET(PIPECONF_GAMMA_MODE_MASK_I9XX, tmp); + pipe_config->gamma_mode = REG_FIELD_GET(TRANSCONF_GAMMA_MODE_MASK_I9XX, tmp); - pipe_config->framestart_delay = REG_FIELD_GET(PIPECONF_FRAME_START_DELAY_MASK, tmp) + 1; + pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1; if (IS_CHERRYVIEW(dev_priv)) pipe_config->cgm_mode = intel_de_read(dev_priv, @@ -3236,7 +3170,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, intel_color_get_config(pipe_config); if (DISPLAY_VER(dev_priv) < 4) - pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE; + pipe_config->double_wide = tmp & TRANSCONF_DOUBLE_WIDE; intel_get_transcoder_timings(crtc, pipe_config); intel_get_pipe_src_size(crtc, pipe_config); @@ -3306,7 +3240,7 @@ void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum pipe pipe = crtc->pipe; + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; u32 val = 0; /* @@ -3314,7 +3248,7 @@ void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state) * - During fastset the pipe is already enabled and must remain so */ if (!intel_crtc_needs_modeset(crtc_state)) - val |= PIPECONF_ENABLE; + val |= TRANSCONF_ENABLE; switch (crtc_state->pipe_bpp) { default: @@ -3322,26 +3256,26 @@ void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state) MISSING_CASE(crtc_state->pipe_bpp); fallthrough; case 18: - val |= PIPECONF_BPC_6; + val |= TRANSCONF_BPC_6; break; case 24: - val |= PIPECONF_BPC_8; + val |= TRANSCONF_BPC_8; break; case 30: - val |= PIPECONF_BPC_10; + val |= TRANSCONF_BPC_10; break; case 36: - val |= PIPECONF_BPC_12; + val |= TRANSCONF_BPC_12; break; } if (crtc_state->dither) - val |= PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP; + val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP; if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) - val |= PIPECONF_INTERLACE_IF_ID_ILK; + val |= TRANSCONF_INTERLACE_IF_ID_ILK; else - val |= PIPECONF_INTERLACE_PF_PD_ILK; + val |= TRANSCONF_INTERLACE_PF_PD_ILK; /* * This would end up with an odd purple hue over @@ -3352,18 +3286,18 @@ void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state) if (crtc_state->limited_color_range && !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) - val |= PIPECONF_COLOR_RANGE_SELECT; + val |= TRANSCONF_COLOR_RANGE_SELECT; if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) - val |= PIPECONF_OUTPUT_COLORSPACE_YUV709; + val |= TRANSCONF_OUTPUT_COLORSPACE_YUV709; - val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode); + val |= TRANSCONF_GAMMA_MODE(crtc_state->gamma_mode); - val |= PIPECONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1); - val |= PIPECONF_MSA_TIMING_DELAY(crtc_state->msa_timing_delay); + val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1); + val |= TRANSCONF_MSA_TIMING_DELAY(crtc_state->msa_timing_delay); - intel_de_write(dev_priv, PIPECONF(pipe), val); - intel_de_posting_read(dev_priv, PIPECONF(pipe)); + intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val); + intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder)); } static void hsw_set_transconf(const struct intel_crtc_state *crtc_state) @@ -3378,22 +3312,22 @@ static void hsw_set_transconf(const struct intel_crtc_state *crtc_state) * - During fastset the pipe is already enabled and must remain so */ if (!intel_crtc_needs_modeset(crtc_state)) - val |= PIPECONF_ENABLE; + val |= TRANSCONF_ENABLE; if (IS_HASWELL(dev_priv) && crtc_state->dither) - val |= PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP; + val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP; if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) - val |= PIPECONF_INTERLACE_IF_ID_ILK; + val |= TRANSCONF_INTERLACE_IF_ID_ILK; else - val |= PIPECONF_INTERLACE_PF_PD_ILK; + val |= TRANSCONF_INTERLACE_PF_PD_ILK; if (IS_HASWELL(dev_priv) && crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) - val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW; + val |= TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW; - intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val); - intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder)); + intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val); + intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder)); } static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state) @@ -3618,33 +3552,33 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc, pipe_config->shared_dpll = NULL; ret = false; - tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe)); - if (!(tmp & PIPECONF_ENABLE)) + tmp = intel_de_read(dev_priv, TRANSCONF(pipe_config->cpu_transcoder)); + if (!(tmp & TRANSCONF_ENABLE)) goto out; - switch (tmp & PIPECONF_BPC_MASK) { - case PIPECONF_BPC_6: + switch (tmp & TRANSCONF_BPC_MASK) { + case TRANSCONF_BPC_6: pipe_config->pipe_bpp = 18; break; - case PIPECONF_BPC_8: + case TRANSCONF_BPC_8: pipe_config->pipe_bpp = 24; break; - case PIPECONF_BPC_10: + case TRANSCONF_BPC_10: pipe_config->pipe_bpp = 30; break; - case PIPECONF_BPC_12: + case TRANSCONF_BPC_12: pipe_config->pipe_bpp = 36; break; default: break; } - if (tmp & PIPECONF_COLOR_RANGE_SELECT) + if (tmp & TRANSCONF_COLOR_RANGE_SELECT) pipe_config->limited_color_range = true; - switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) { - case PIPECONF_OUTPUT_COLORSPACE_YUV601: - case PIPECONF_OUTPUT_COLORSPACE_YUV709: + switch (tmp & TRANSCONF_OUTPUT_COLORSPACE_MASK) { + case TRANSCONF_OUTPUT_COLORSPACE_YUV601: + case TRANSCONF_OUTPUT_COLORSPACE_YUV709: pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; break; default: @@ -3652,11 +3586,11 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc, break; } - pipe_config->gamma_mode = REG_FIELD_GET(PIPECONF_GAMMA_MODE_MASK_ILK, tmp); + pipe_config->gamma_mode = REG_FIELD_GET(TRANSCONF_GAMMA_MODE_MASK_ILK, tmp); - pipe_config->framestart_delay = REG_FIELD_GET(PIPECONF_FRAME_START_DELAY_MASK, tmp) + 1; + pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1; - pipe_config->msa_timing_delay = REG_FIELD_GET(PIPECONF_MSA_TIMING_DELAY_MASK, tmp); + pipe_config->msa_timing_delay = REG_FIELD_GET(TRANSCONF_MSA_TIMING_DELAY_MASK, tmp); pipe_config->csc_mode = intel_de_read(dev_priv, PIPE_CSC_MODE(crtc->pipe)); @@ -3933,9 +3867,9 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc, pipe_config->pch_pfit.force_thru = true; } - tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder)); + tmp = intel_de_read(dev_priv, TRANSCONF(pipe_config->cpu_transcoder)); - return tmp & PIPECONF_ENABLE; + return tmp & TRANSCONF_ENABLE; } static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, @@ -4039,9 +3973,9 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc, if (IS_HASWELL(dev_priv)) { u32 tmp = intel_de_read(dev_priv, - PIPECONF(pipe_config->cpu_transcoder)); + TRANSCONF(pipe_config->cpu_transcoder)); - if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW) + if (tmp & TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW) pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; else pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; @@ -4090,7 +4024,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc, !transcoder_is_dsi(pipe_config->cpu_transcoder)) { pipe_config->pixel_multiplier = intel_de_read(dev_priv, - PIPE_MULT(pipe_config->cpu_transcoder)) + 1; + TRANS_MULT(pipe_config->cpu_transcoder)) + 1; } else { pipe_config->pixel_multiplier = 1; } @@ -5439,6 +5373,20 @@ pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv, } } +/* Returns the length up to and including the last differing byte */ +static size_t +memcmp_diff_len(const u8 *a, const u8 *b, size_t len) +{ + int i; + + for (i = len - 1; i >= 0; i--) { + if (a[i] != b[i]) + return i + 1; + } + + return 0; +} + static void pipe_config_buffer_mismatch(struct drm_i915_private *dev_priv, bool fastset, const char *name, @@ -5448,6 +5396,9 @@ pipe_config_buffer_mismatch(struct drm_i915_private *dev_priv, if (!drm_debug_enabled(DRM_UT_KMS)) return; + /* only dump up to the last difference */ + len = memcmp_diff_len(a, b, len); + drm_dbg_kms(&dev_priv->drm, "fastset mismatch in %s buffer\n", name); print_hex_dump(KERN_DEBUG, "expected: ", DUMP_PREFIX_NONE, @@ -5455,6 +5406,9 @@ pipe_config_buffer_mismatch(struct drm_i915_private *dev_priv, print_hex_dump(KERN_DEBUG, "found: ", DUMP_PREFIX_NONE, 16, 0, b, len, false); } else { + /* only dump up to the last difference */ + len = memcmp_diff_len(a, b, len); + drm_err(&dev_priv->drm, "mismatch in %s buffer\n", name); print_hex_dump(KERN_ERR, "expected: ", DUMP_PREFIX_NONE, 16, 0, a, len, false); @@ -5943,6 +5897,8 @@ int intel_modeset_all_pipes(struct intel_atomic_state *state, return ret; crtc_state->update_planes |= crtc_state->active_planes; + crtc_state->async_flip_planes = 0; + crtc_state->do_async_flip = false; } return 0; @@ -6695,8 +6651,8 @@ static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state) * @dev: drm device * @_state: state to validate */ -static int intel_atomic_check(struct drm_device *dev, - struct drm_atomic_state *_state) +int intel_atomic_check(struct drm_device *dev, + struct drm_atomic_state *_state) { struct drm_i915_private *dev_priv = to_i915(dev); struct intel_atomic_state *state = to_intel_atomic_state(_state); @@ -8356,124 +8312,6 @@ void intel_modeset_init_hw(struct drm_i915_private *i915) cdclk_state->logical = cdclk_state->actual = i915->display.cdclk.hw; } -static int sanitize_watermarks_add_affected(struct drm_atomic_state *state) -{ - struct drm_plane *plane; - struct intel_crtc *crtc; - - for_each_intel_crtc(state->dev, crtc) { - struct intel_crtc_state *crtc_state; - - crtc_state = intel_atomic_get_crtc_state(state, crtc); - if (IS_ERR(crtc_state)) - return PTR_ERR(crtc_state); - - if (crtc_state->hw.active) { - /* - * Preserve the inherited flag to avoid - * taking the full modeset path. - */ - crtc_state->inherited = true; - } - } - - drm_for_each_plane(plane, state->dev) { - struct drm_plane_state *plane_state; - - plane_state = drm_atomic_get_plane_state(state, plane); - if (IS_ERR(plane_state)) - return PTR_ERR(plane_state); - } - - return 0; -} - -/* - * Calculate what we think the watermarks should be for the state we've read - * out of the hardware and then immediately program those watermarks so that - * we ensure the hardware settings match our internal state. - * - * We can calculate what we think WM's should be by creating a duplicate of the - * current state (which was constructed during hardware readout) and running it - * through the atomic check code to calculate new watermark values in the - * state object. - */ -static void sanitize_watermarks(struct drm_i915_private *dev_priv) -{ - struct drm_atomic_state *state; - struct intel_atomic_state *intel_state; - struct intel_crtc *crtc; - struct intel_crtc_state *crtc_state; - struct drm_modeset_acquire_ctx ctx; - int ret; - int i; - - /* Only supported on platforms that use atomic watermark design */ - if (!dev_priv->display.funcs.wm->optimize_watermarks) - return; - - state = drm_atomic_state_alloc(&dev_priv->drm); - if (drm_WARN_ON(&dev_priv->drm, !state)) - return; - - intel_state = to_intel_atomic_state(state); - - drm_modeset_acquire_init(&ctx, 0); - -retry: - state->acquire_ctx = &ctx; - - /* - * Hardware readout is the only time we don't want to calculate - * intermediate watermarks (since we don't trust the current - * watermarks). - */ - if (!HAS_GMCH(dev_priv)) - intel_state->skip_intermediate_wm = true; - - ret = sanitize_watermarks_add_affected(state); - if (ret) - goto fail; - - ret = intel_atomic_check(&dev_priv->drm, state); - if (ret) - goto fail; - - /* Write calculated watermark values back */ - for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) { - crtc_state->wm.need_postvbl_update = true; - intel_optimize_watermarks(intel_state, crtc); - - to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm; - } - -fail: - if (ret == -EDEADLK) { - drm_atomic_state_clear(state); - drm_modeset_backoff(&ctx); - goto retry; - } - - /* - * If we fail here, it means that the hardware appears to be - * programmed in a way that shouldn't be possible, given our - * understanding of watermark requirements. This might mean a - * mistake in the hardware readout code or a mistake in the - * watermark calculations for a given platform. Raise a WARN - * so that this is noticeable. - * - * If this actually happens, we'll have to just leave the - * BIOS-programmed watermarks untouched and hope for the best. - */ - drm_WARN(&dev_priv->drm, ret, - "Could not determine valid watermarks for inherited state\n"); - - drm_atomic_state_put(state); - - drm_modeset_drop_locks(&ctx); - drm_modeset_acquire_fini(&ctx); -} - static int intel_initial_commit(struct drm_device *dev) { struct drm_atomic_state *state = NULL; @@ -8634,12 +8472,16 @@ int intel_modeset_init_noirq(struct drm_i915_private *i915) goto cleanup_bios; /* FIXME: completely on the wrong abstraction layer */ + ret = intel_power_domains_init(i915); + if (ret < 0) + goto cleanup_vga; + intel_power_domains_init_hw(i915, false); if (!HAS_DISPLAY(i915)) return 0; - intel_dmc_ucode_init(i915); + intel_dmc_init(i915); i915->display.wq.modeset = alloc_ordered_workqueue("i915_modeset", 0); i915->display.wq.flip = alloc_workqueue("i915_flip", WQ_HIGHPRI | @@ -8674,8 +8516,9 @@ int intel_modeset_init_noirq(struct drm_i915_private *i915) return 0; cleanup_vga_client_pw_domain_dmc: - intel_dmc_ucode_fini(i915); + intel_dmc_fini(i915); intel_power_domains_driver_remove(i915); +cleanup_vga: intel_vga_unregister(i915); cleanup_bios: intel_bios_driver_remove(i915); @@ -8694,7 +8537,7 @@ int intel_modeset_init_nogem(struct drm_i915_private *i915) if (!HAS_DISPLAY(i915)) return 0; - intel_init_pm(i915); + intel_wm_init(i915); intel_panel_sanitize_ssc(i915); @@ -8750,7 +8593,7 @@ int intel_modeset_init_nogem(struct drm_i915_private *i915) * since the watermark calculation done here will use pstate->fb. */ if (!HAS_GMCH(i915)) - sanitize_watermarks(i915); + ilk_wm_sanitize(i915); return 0; } @@ -8791,6 +8634,7 @@ int intel_modeset_init(struct drm_i915_private *i915) void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) { struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); + enum transcoder cpu_transcoder = (enum transcoder)pipe; /* 640x480@60Hz, ~25175 kHz */ struct dpll clock = { .m1 = 18, @@ -8817,13 +8661,20 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) PLL_REF_INPUT_DREFCLK | DPLL_VCO_ENABLE; - intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16)); - intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16)); - intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16)); - intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16)); - intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16)); - intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16)); - intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1)); + intel_de_write(dev_priv, TRANS_HTOTAL(cpu_transcoder), + HACTIVE(640 - 1) | HTOTAL(800 - 1)); + intel_de_write(dev_priv, TRANS_HBLANK(cpu_transcoder), + HBLANK_START(640 - 1) | HBLANK_END(800 - 1)); + intel_de_write(dev_priv, TRANS_HSYNC(cpu_transcoder), + HSYNC_START(656 - 1) | HSYNC_END(752 - 1)); + intel_de_write(dev_priv, TRANS_VTOTAL(cpu_transcoder), + VACTIVE(480 - 1) | VTOTAL(525 - 1)); + intel_de_write(dev_priv, TRANS_VBLANK(cpu_transcoder), + VBLANK_START(480 - 1) | VBLANK_END(525 - 1)); + intel_de_write(dev_priv, TRANS_VSYNC(cpu_transcoder), + VSYNC_START(490 - 1) | VSYNC_END(492 - 1)); + intel_de_write(dev_priv, PIPESRC(pipe), + PIPESRC_WIDTH(640 - 1) | PIPESRC_HEIGHT(480 - 1)); intel_de_write(dev_priv, FP0(pipe), fp); intel_de_write(dev_priv, FP1(pipe), fp); @@ -8854,8 +8705,8 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) udelay(150); /* wait for warmup */ } - intel_de_write(dev_priv, PIPECONF(pipe), PIPECONF_ENABLE); - intel_de_posting_read(dev_priv, PIPECONF(pipe)); + intel_de_write(dev_priv, TRANSCONF(pipe), TRANSCONF_ENABLE); + intel_de_posting_read(dev_priv, TRANSCONF(pipe)); intel_wait_for_pipe_scanline_moving(crtc); } @@ -8878,8 +8729,8 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) drm_WARN_ON(&dev_priv->drm, intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE_MASK); - intel_de_write(dev_priv, PIPECONF(pipe), 0); - intel_de_posting_read(dev_priv, PIPECONF(pipe)); + intel_de_write(dev_priv, TRANSCONF(pipe), 0); + intel_de_posting_read(dev_priv, TRANSCONF(pipe)); intel_wait_for_pipe_scanline_stopped(crtc); @@ -9000,7 +8851,7 @@ void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915) /* part #3: call after gem init */ void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915) { - intel_dmc_ucode_fini(i915); + intel_dmc_fini(i915); intel_power_domains_driver_remove(i915); @@ -9051,7 +8902,7 @@ void intel_display_driver_register(struct drm_i915_private *i915) * enabled. We do it last so that the async config cannot run * before the connectors are registered. */ - intel_fbdev_initial_config_async(&i915->drm); + intel_fbdev_initial_config_async(i915); /* * We need to coordinate the hotplugs with the asynchronous diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index cb6f520cc575..50285fb4fcf5 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -32,6 +32,7 @@ enum drm_scaling_filter; struct dpll; +struct drm_atomic_state; struct drm_connector; struct drm_device; struct drm_display_mode; @@ -171,6 +172,8 @@ enum tc_port_mode { }; enum aux_ch { + AUX_CH_NONE = -1, + AUX_CH_A, AUX_CH_B, AUX_CH_C, @@ -394,6 +397,7 @@ enum phy_fia { ((connector) = to_intel_connector((__state)->base.connectors[__i].ptr), \ (new_connector_state) = to_intel_digital_connector_state((__state)->base.connectors[__i].new_state), 1)) +int intel_atomic_check(struct drm_device *dev, struct drm_atomic_state *state); int intel_atomic_add_affected_planes(struct intel_atomic_state *state, struct intel_crtc *crtc); u8 intel_calc_active_pipes(struct intel_atomic_state *state, diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h index fb8670aa2932..fdab7bb93a7d 100644 --- a/drivers/gpu/drm/i915/display/intel_display_core.h +++ b/drivers/gpu/drm/i915/display/intel_display_core.h @@ -19,13 +19,12 @@ #include "intel_cdclk.h" #include "intel_display_limits.h" #include "intel_display_power.h" -#include "intel_dmc.h" #include "intel_dpll_mgr.h" #include "intel_fbc.h" #include "intel_global_state.h" #include "intel_gmbus.h" #include "intel_opregion.h" -#include "intel_pm_types.h" +#include "intel_wm_types.h" struct drm_i915_private; struct drm_property; @@ -40,6 +39,7 @@ struct intel_cdclk_vals; struct intel_color_funcs; struct intel_crtc; struct intel_crtc_state; +struct intel_dmc; struct intel_dpll_funcs; struct intel_dpll_mgr; struct intel_fbdev; @@ -85,6 +85,7 @@ struct intel_wm_funcs { void (*optimize_watermarks)(struct intel_atomic_state *state, struct intel_crtc *crtc); int (*compute_global_watermarks)(struct intel_atomic_state *state); + void (*get_hw_state)(struct drm_i915_private *i915); }; struct intel_audio_state { @@ -102,7 +103,7 @@ struct intel_audio { u32 freq_cntrl; /* current audio state for the audio component hooks */ - struct intel_audio_state state[I915_MAX_PIPES]; + struct intel_audio_state state[I915_MAX_TRANSCODERS]; /* necessary resource sharing with HDMI LPE audio driver. */ struct { @@ -243,7 +244,7 @@ struct intel_wm { struct g4x_wm_values g4x; }; - u8 max_level; + u8 num_levels; /* * Should be held around atomic WM register writing; also @@ -340,6 +341,11 @@ struct intel_display { } dkl; struct { + struct intel_dmc *dmc; + intel_wakeref_t wakeref; + } dmc; + + struct { /* VLV/CHV/BXT/GLK DSI MMIO register base address */ u32 mmio_base; } dsi; @@ -466,7 +472,6 @@ struct intel_display { /* Grouping using named structs. Keep sorted. */ struct intel_audio audio; - struct intel_dmc dmc; struct intel_dpll dpll; struct intel_fbc *fbc[I915_MAX_FBCS]; struct intel_frontbuffer_tracking fb_tracking; diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c index 7bcd90384a46..1e654ddd0815 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c @@ -26,10 +26,9 @@ #include "intel_hdmi.h" #include "intel_hotplug.h" #include "intel_panel.h" -#include "intel_pm.h" #include "intel_psr.h" #include "intel_sprite.h" -#include "skl_watermark.h" +#include "intel_wm.h" static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node) { @@ -1282,237 +1281,6 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data) } DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type); -static void wm_latency_show(struct seq_file *m, const u16 wm[8]) -{ - struct drm_i915_private *dev_priv = m->private; - int level; - int num_levels; - - if (IS_CHERRYVIEW(dev_priv)) - num_levels = 3; - else if (IS_VALLEYVIEW(dev_priv)) - num_levels = 1; - else if (IS_G4X(dev_priv)) - num_levels = 3; - else - num_levels = ilk_wm_max_level(dev_priv) + 1; - - drm_modeset_lock_all(&dev_priv->drm); - - for (level = 0; level < num_levels; level++) { - unsigned int latency = wm[level]; - - /* - * - WM1+ latency values in 0.5us units - * - latencies are in us on gen9/vlv/chv - */ - if (DISPLAY_VER(dev_priv) >= 9 || - IS_VALLEYVIEW(dev_priv) || - IS_CHERRYVIEW(dev_priv) || - IS_G4X(dev_priv)) - latency *= 10; - else if (level > 0) - latency *= 5; - - seq_printf(m, "WM%d %u (%u.%u usec)\n", - level, wm[level], latency / 10, latency % 10); - } - - drm_modeset_unlock_all(&dev_priv->drm); -} - -static int pri_wm_latency_show(struct seq_file *m, void *data) -{ - struct drm_i915_private *dev_priv = m->private; - const u16 *latencies; - - if (DISPLAY_VER(dev_priv) >= 9) - latencies = dev_priv->display.wm.skl_latency; - else - latencies = dev_priv->display.wm.pri_latency; - - wm_latency_show(m, latencies); - - return 0; -} - -static int spr_wm_latency_show(struct seq_file *m, void *data) -{ - struct drm_i915_private *dev_priv = m->private; - const u16 *latencies; - - if (DISPLAY_VER(dev_priv) >= 9) - latencies = dev_priv->display.wm.skl_latency; - else - latencies = dev_priv->display.wm.spr_latency; - - wm_latency_show(m, latencies); - - return 0; -} - -static int cur_wm_latency_show(struct seq_file *m, void *data) -{ - struct drm_i915_private *dev_priv = m->private; - const u16 *latencies; - - if (DISPLAY_VER(dev_priv) >= 9) - latencies = dev_priv->display.wm.skl_latency; - else - latencies = dev_priv->display.wm.cur_latency; - - wm_latency_show(m, latencies); - - return 0; -} - -static int pri_wm_latency_open(struct inode *inode, struct file *file) -{ - struct drm_i915_private *dev_priv = inode->i_private; - - if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) - return -ENODEV; - - return single_open(file, pri_wm_latency_show, dev_priv); -} - -static int spr_wm_latency_open(struct inode *inode, struct file *file) -{ - struct drm_i915_private *dev_priv = inode->i_private; - - if (HAS_GMCH(dev_priv)) - return -ENODEV; - - return single_open(file, spr_wm_latency_show, dev_priv); -} - -static int cur_wm_latency_open(struct inode *inode, struct file *file) -{ - struct drm_i915_private *dev_priv = inode->i_private; - - if (HAS_GMCH(dev_priv)) - return -ENODEV; - - return single_open(file, cur_wm_latency_show, dev_priv); -} - -static ssize_t wm_latency_write(struct file *file, const char __user *ubuf, - size_t len, loff_t *offp, u16 wm[8]) -{ - struct seq_file *m = file->private_data; - struct drm_i915_private *dev_priv = m->private; - u16 new[8] = { 0 }; - int num_levels; - int level; - int ret; - char tmp[32]; - - if (IS_CHERRYVIEW(dev_priv)) - num_levels = 3; - else if (IS_VALLEYVIEW(dev_priv)) - num_levels = 1; - else if (IS_G4X(dev_priv)) - num_levels = 3; - else - num_levels = ilk_wm_max_level(dev_priv) + 1; - - if (len >= sizeof(tmp)) - return -EINVAL; - - if (copy_from_user(tmp, ubuf, len)) - return -EFAULT; - - tmp[len] = '\0'; - - ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu", - &new[0], &new[1], &new[2], &new[3], - &new[4], &new[5], &new[6], &new[7]); - if (ret != num_levels) - return -EINVAL; - - drm_modeset_lock_all(&dev_priv->drm); - - for (level = 0; level < num_levels; level++) - wm[level] = new[level]; - - drm_modeset_unlock_all(&dev_priv->drm); - - return len; -} - - -static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf, - size_t len, loff_t *offp) -{ - struct seq_file *m = file->private_data; - struct drm_i915_private *dev_priv = m->private; - u16 *latencies; - - if (DISPLAY_VER(dev_priv) >= 9) - latencies = dev_priv->display.wm.skl_latency; - else - latencies = dev_priv->display.wm.pri_latency; - - return wm_latency_write(file, ubuf, len, offp, latencies); -} - -static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf, - size_t len, loff_t *offp) -{ - struct seq_file *m = file->private_data; - struct drm_i915_private *dev_priv = m->private; - u16 *latencies; - - if (DISPLAY_VER(dev_priv) >= 9) - latencies = dev_priv->display.wm.skl_latency; - else - latencies = dev_priv->display.wm.spr_latency; - - return wm_latency_write(file, ubuf, len, offp, latencies); -} - -static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf, - size_t len, loff_t *offp) -{ - struct seq_file *m = file->private_data; - struct drm_i915_private *dev_priv = m->private; - u16 *latencies; - - if (DISPLAY_VER(dev_priv) >= 9) - latencies = dev_priv->display.wm.skl_latency; - else - latencies = dev_priv->display.wm.cur_latency; - - return wm_latency_write(file, ubuf, len, offp, latencies); -} - -static const struct file_operations i915_pri_wm_latency_fops = { - .owner = THIS_MODULE, - .open = pri_wm_latency_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .write = pri_wm_latency_write -}; - -static const struct file_operations i915_spr_wm_latency_fops = { - .owner = THIS_MODULE, - .open = spr_wm_latency_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .write = spr_wm_latency_write -}; - -static const struct file_operations i915_cur_wm_latency_fops = { - .owner = THIS_MODULE, - .open = cur_wm_latency_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .write = cur_wm_latency_write -}; - static ssize_t i915_fifo_underrun_reset_write(struct file *filp, const char __user *ubuf, @@ -1593,9 +1361,6 @@ static const struct { const struct file_operations *fops; } intel_display_debugfs_files[] = { {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops}, - {"i915_pri_wm_latency", &i915_pri_wm_latency_fops}, - {"i915_spr_wm_latency", &i915_spr_wm_latency_fops}, - {"i915_cur_wm_latency", &i915_cur_wm_latency_fops}, {"i915_dp_test_data", &i915_displayport_test_data_fops}, {"i915_dp_test_type", &i915_displayport_test_type_fops}, {"i915_dp_test_active", &i915_displayport_test_active_fops}, @@ -1622,7 +1387,7 @@ void intel_display_debugfs_register(struct drm_i915_private *i915) intel_dmc_debugfs_register(i915); intel_fbc_debugfs_register(i915); intel_hpd_debugfs_register(i915); - skl_watermark_ipc_debugfs_register(i915); + intel_wm_debugfs_register(i915); } static int i915_panel_show(struct seq_file *m, void *data) diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 1a23ecd4623a..f085ae971150 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -264,9 +264,10 @@ bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, } static u32 -sanitize_target_dc_state(struct drm_i915_private *dev_priv, +sanitize_target_dc_state(struct drm_i915_private *i915, u32 target_dc_state) { + struct i915_power_domains *power_domains = &i915->display.power.domains; static const u32 states[] = { DC_STATE_EN_UPTO_DC6, DC_STATE_EN_UPTO_DC5, @@ -279,7 +280,7 @@ sanitize_target_dc_state(struct drm_i915_private *dev_priv, if (target_dc_state != states[i]) continue; - if (dev_priv->display.dmc.allowed_dc_mask & target_dc_state) + if (power_domains->allowed_dc_mask & target_dc_state) break; target_dc_state = states[i + 1]; @@ -312,7 +313,7 @@ void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv, state = sanitize_target_dc_state(dev_priv, state); - if (state == dev_priv->display.dmc.target_dc_state) + if (state == power_domains->target_dc_state) goto unlock; dc_off_enabled = intel_power_well_is_enabled(dev_priv, power_well); @@ -323,7 +324,7 @@ void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv, if (!dc_off_enabled) intel_power_well_enable(dev_priv, power_well); - dev_priv->display.dmc.target_dc_state = state; + power_domains->target_dc_state = state; if (!dc_off_enabled) intel_power_well_disable(dev_priv, power_well); @@ -992,10 +993,10 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) dev_priv->params.disable_power_well = sanitize_disable_power_well_option(dev_priv, dev_priv->params.disable_power_well); - dev_priv->display.dmc.allowed_dc_mask = + power_domains->allowed_dc_mask = get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc); - dev_priv->display.dmc.target_dc_state = + power_domains->target_dc_state = sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); mutex_init(&power_domains->lock); @@ -1260,9 +1261,7 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n"); if (allow_power_down) { - val = intel_de_read(dev_priv, LCPLL_CTL); - val |= LCPLL_POWER_DOWN_ALLOW; - intel_de_write(dev_priv, LCPLL_CTL, val); + intel_de_rmw(dev_priv, LCPLL_CTL, 0, LCPLL_POWER_DOWN_ALLOW); intel_de_posting_read(dev_priv, LCPLL_CTL); } } @@ -1306,9 +1305,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) drm_err(&dev_priv->drm, "LCPLL not locked yet\n"); if (val & LCPLL_CD_SOURCE_FCLK) { - val = intel_de_read(dev_priv, LCPLL_CTL); - val &= ~LCPLL_CD_SOURCE_FCLK; - intel_de_write(dev_priv, LCPLL_CTL, val); + intel_de_rmw(dev_priv, LCPLL_CTL, LCPLL_CD_SOURCE_FCLK, 0); if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) & LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) @@ -1347,15 +1344,11 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) */ static void hsw_enable_pc8(struct drm_i915_private *dev_priv) { - u32 val; - drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n"); - if (HAS_PCH_LPT_LP(dev_priv)) { - val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D); - val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; - intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val); - } + if (HAS_PCH_LPT_LP(dev_priv)) + intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, + PCH_LP_PARTITION_LEVEL_DISABLE, 0); lpt_disable_clkout_dp(dev_priv); hsw_disable_lcpll(dev_priv, true, true); @@ -1363,25 +1356,21 @@ static void hsw_enable_pc8(struct drm_i915_private *dev_priv) static void hsw_disable_pc8(struct drm_i915_private *dev_priv) { - u32 val; - drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n"); hsw_restore_lcpll(dev_priv); intel_init_pch_refclk(dev_priv); - if (HAS_PCH_LPT_LP(dev_priv)) { - val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D); - val |= PCH_LP_PARTITION_LEVEL_DISABLE; - intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val); - } + if (HAS_PCH_LPT_LP(dev_priv)) + intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, + 0, PCH_LP_PARTITION_LEVEL_DISABLE); } static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv, bool enable) { i915_reg_t reg; - u32 reset_bits, val; + u32 reset_bits; if (IS_IVYBRIDGE(dev_priv)) { reg = GEN7_MSG_CTL; @@ -1394,14 +1383,7 @@ static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv, if (DISPLAY_VER(dev_priv) >= 14) reset_bits |= MTL_RESET_PICA_HANDSHAKE_EN; - val = intel_de_read(dev_priv, reg); - - if (enable) - val |= reset_bits; - else - val &= ~reset_bits; - - intel_de_write(dev_priv, reg, val); + intel_de_rmw(dev_priv, reg, reset_bits, enable ? reset_bits : 0); } static void skl_display_core_init(struct drm_i915_private *dev_priv, @@ -1580,10 +1562,8 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv) return; if (IS_ALDERLAKE_S(dev_priv) || - IS_DG1_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) || - IS_RKL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) || - IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) - /* Wa_1409767108:tgl,dg1,adl-s */ + IS_RKL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) + /* Wa_1409767108 */ table = wa_1409767108_buddy_page_masks; else table = tgl_buddy_page_masks; @@ -1618,7 +1598,6 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv, { struct i915_power_domains *power_domains = &dev_priv->display.power.domains; struct i915_power_well *well; - u32 val; gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); @@ -1670,11 +1649,10 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv, intel_dmc_load_program(dev_priv); /* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p */ - if (DISPLAY_VER(dev_priv) >= 12) { - val = DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM | - DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR; - intel_de_rmw(dev_priv, GEN11_CHICKEN_DCPR_2, 0, val); - } + if (DISPLAY_VER(dev_priv) >= 12) + intel_de_rmw(dev_priv, GEN11_CHICKEN_DCPR_2, 0, + DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM | + DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR); /* Wa_14011503030:xelpd */ if (DISPLAY_VER(dev_priv) >= 13) @@ -2055,7 +2033,7 @@ void intel_power_domains_suspend(struct drm_i915_private *i915, * resources as required and also enable deeper system power states * that would be blocked if the firmware was inactive. */ - if (!(i915->display.dmc.allowed_dc_mask & DC_STATE_EN_DC9) && + if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC9) && suspend_mode == I915_DRM_SUSPEND_IDLE && intel_dmc_has_payload(i915)) { intel_display_power_flush_work(i915); @@ -2244,22 +2222,22 @@ void intel_display_power_suspend(struct drm_i915_private *i915) void intel_display_power_resume(struct drm_i915_private *i915) { + struct i915_power_domains *power_domains = &i915->display.power.domains; + if (DISPLAY_VER(i915) >= 11) { bxt_disable_dc9(i915); icl_display_core_init(i915, true); if (intel_dmc_has_payload(i915)) { - if (i915->display.dmc.allowed_dc_mask & - DC_STATE_EN_UPTO_DC6) + if (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC6) skl_enable_dc6(i915); - else if (i915->display.dmc.allowed_dc_mask & - DC_STATE_EN_UPTO_DC5) + else if (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC5) gen9_enable_dc5(i915); } } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { bxt_disable_dc9(i915); bxt_display_core_init(i915, true); if (intel_dmc_has_payload(i915) && - (i915->display.dmc.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)) + (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC5)) gen9_enable_dc5(i915); } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { hsw_disable_pc8(i915); diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h index 2154d900b1aa..8e96be8e6330 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.h +++ b/drivers/gpu/drm/i915/display/intel_display_power.h @@ -137,6 +137,10 @@ struct i915_power_domains { bool display_core_suspended; int power_well_count; + u32 dc_state; + u32 target_dc_state; + u32 allowed_dc_mask; + intel_wakeref_t init_wakeref; intel_wakeref_t disable_wakeref; diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c index 8710dd41ffd4..1676df1dc066 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_well.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c @@ -333,7 +333,6 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv, { const struct i915_power_well_regs *regs = power_well->desc->ops->regs; int pw_idx = i915_power_well_instance(power_well)->hsw.idx; - u32 val; if (power_well->desc->has_fuses) { enum skl_power_gate pg; @@ -356,9 +355,7 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv, gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0); } - val = intel_de_read(dev_priv, regs->driver); - intel_de_write(dev_priv, regs->driver, - val | HSW_PWR_WELL_CTL_REQ(pw_idx)); + intel_de_rmw(dev_priv, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx)); hsw_wait_for_power_well_enable(dev_priv, power_well, false); @@ -380,17 +377,27 @@ static void hsw_power_well_disable(struct drm_i915_private *dev_priv, { const struct i915_power_well_regs *regs = power_well->desc->ops->regs; int pw_idx = i915_power_well_instance(power_well)->hsw.idx; - u32 val; hsw_power_well_pre_disable(dev_priv, power_well->desc->irq_pipe_mask); - val = intel_de_read(dev_priv, regs->driver); - intel_de_write(dev_priv, regs->driver, - val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); + intel_de_rmw(dev_priv, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0); hsw_wait_for_power_well_disable(dev_priv, power_well); } +static bool intel_port_is_edp(struct drm_i915_private *i915, enum port port) +{ + struct intel_encoder *encoder; + + for_each_intel_encoder(&i915->drm, encoder) { + if (encoder->type == INTEL_OUTPUT_EDP && + encoder->port == port) + return true; + } + + return false; +} + static void icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) @@ -398,29 +405,22 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, const struct i915_power_well_regs *regs = power_well->desc->ops->regs; int pw_idx = i915_power_well_instance(power_well)->hsw.idx; enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); - u32 val; drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv)); - val = intel_de_read(dev_priv, regs->driver); - intel_de_write(dev_priv, regs->driver, - val | HSW_PWR_WELL_CTL_REQ(pw_idx)); + intel_de_rmw(dev_priv, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx)); - if (DISPLAY_VER(dev_priv) < 12) { - val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy)); - intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy), - val | ICL_LANE_ENABLE_AUX); - } + if (DISPLAY_VER(dev_priv) < 12) + intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy), + 0, ICL_LANE_ENABLE_AUX); hsw_wait_for_power_well_enable(dev_priv, power_well, false); /* Display WA #1178: icl */ if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B && - !intel_bios_is_port_edp(dev_priv, (enum port)phy)) { - val = intel_de_read(dev_priv, ICL_AUX_ANAOVRD1(pw_idx)); - val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS; - intel_de_write(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), val); - } + !intel_port_is_edp(dev_priv, (enum port)phy)) + intel_de_rmw(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), + 0, ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS); } static void @@ -430,17 +430,12 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, const struct i915_power_well_regs *regs = power_well->desc->ops->regs; int pw_idx = i915_power_well_instance(power_well)->hsw.idx; enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); - u32 val; drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv)); - val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy)); - intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy), - val & ~ICL_LANE_ENABLE_AUX); + intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy), ICL_LANE_ENABLE_AUX, 0); - val = intel_de_read(dev_priv, regs->driver); - intel_de_write(dev_priv, regs->driver, - val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); + intel_de_rmw(dev_priv, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0); hsw_wait_for_power_well_disable(dev_priv, power_well); } @@ -502,19 +497,15 @@ icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, const struct i915_power_well_regs *regs = power_well->desc->ops->regs; bool is_tbt = power_well->desc->is_tc_tbt; bool timeout_expected; - u32 val; icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port); - val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch)); - val &= ~DP_AUX_CH_CTL_TBT_IO; - if (is_tbt) - val |= DP_AUX_CH_CTL_TBT_IO; - intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val); + intel_de_rmw(dev_priv, DP_AUX_CH_CTL(aux_ch), + DP_AUX_CH_CTL_TBT_IO, is_tbt ? DP_AUX_CH_CTL_TBT_IO : 0); - val = intel_de_read(dev_priv, regs->driver); - intel_de_write(dev_priv, regs->driver, - val | HSW_PWR_WELL_CTL_REQ(i915_power_well_instance(power_well)->hsw.idx)); + intel_de_rmw(dev_priv, regs->driver, + 0, + HSW_PWR_WELL_CTL_REQ(i915_power_well_instance(power_well)->hsw.idx)); /* * An AUX timeout is expected if the TBT DP tunnel is down, @@ -700,19 +691,20 @@ static u32 gen9_dc_mask(struct drm_i915_private *dev_priv) return mask; } -void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv) +void gen9_sanitize_dc_state(struct drm_i915_private *i915) { + struct i915_power_domains *power_domains = &i915->display.power.domains; u32 val; - if (!HAS_DISPLAY(dev_priv)) + if (!HAS_DISPLAY(i915)) return; - val = intel_de_read(dev_priv, DC_STATE_EN) & gen9_dc_mask(dev_priv); + val = intel_de_read(i915, DC_STATE_EN) & gen9_dc_mask(i915); - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(&i915->drm, "Resetting DC state tracking from %02x to %02x\n", - dev_priv->display.dmc.dc_state, val); - dev_priv->display.dmc.dc_state = val; + power_domains->dc_state, val); + power_domains->dc_state = val; } /** @@ -740,6 +732,7 @@ void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv) */ void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state) { + struct i915_power_domains *power_domains = &dev_priv->display.power.domains; u32 val; u32 mask; @@ -747,8 +740,8 @@ void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state) return; if (drm_WARN_ON_ONCE(&dev_priv->drm, - state & ~dev_priv->display.dmc.allowed_dc_mask)) - state &= dev_priv->display.dmc.allowed_dc_mask; + state & ~power_domains->allowed_dc_mask)) + state &= power_domains->allowed_dc_mask; val = intel_de_read(dev_priv, DC_STATE_EN); mask = gen9_dc_mask(dev_priv); @@ -756,16 +749,16 @@ void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state) val & mask, state); /* Check if DMC is ignoring our DC state requests */ - if ((val & mask) != dev_priv->display.dmc.dc_state) + if ((val & mask) != power_domains->dc_state) drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n", - dev_priv->display.dmc.dc_state, val & mask); + power_domains->dc_state, val & mask); val &= ~mask; val |= state; gen9_write_dc_state(dev_priv, val); - dev_priv->display.dmc.dc_state = val & mask; + power_domains->dc_state = val & mask; } static void tgl_enable_dc3co(struct drm_i915_private *dev_priv) @@ -776,12 +769,8 @@ static void tgl_enable_dc3co(struct drm_i915_private *dev_priv) static void tgl_disable_dc3co(struct drm_i915_private *dev_priv) { - u32 val; - drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n"); - val = intel_de_read(dev_priv, DC_STATE_EN); - val &= ~DC_STATE_DC3CO_STATUS; - intel_de_write(dev_priv, DC_STATE_EN, val); + intel_de_rmw(dev_priv, DC_STATE_EN, DC_STATE_DC3CO_STATUS, 0); gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); /* * Delay of 200us DC3CO Exit time B.Spec 49196 @@ -820,8 +809,8 @@ void gen9_enable_dc5(struct drm_i915_private *dev_priv) /* Wa Display #1183: skl,kbl,cfl */ if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) - intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1, - intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT); + intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, + 0, SKL_SELECT_ALTERNATE_DC_EXIT); gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5); } @@ -847,8 +836,8 @@ void skl_enable_dc6(struct drm_i915_private *dev_priv) /* Wa Display #1183: skl,kbl,cfl */ if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) - intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1, - intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT); + intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, + 0, SKL_SELECT_ALTERNATE_DC_EXIT); gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); } @@ -957,9 +946,10 @@ static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv) void gen9_disable_dc_states(struct drm_i915_private *dev_priv) { + struct i915_power_domains *power_domains = &dev_priv->display.power.domains; struct intel_cdclk_config cdclk_config = {}; - if (dev_priv->display.dmc.target_dc_state == DC_STATE_EN_DC3CO) { + if (power_domains->target_dc_state == DC_STATE_EN_DC3CO) { tgl_disable_dc3co(dev_priv); return; } @@ -998,10 +988,12 @@ static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv, static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { + struct i915_power_domains *power_domains = &dev_priv->display.power.domains; + if (!intel_dmc_has_payload(dev_priv)) return; - switch (dev_priv->display.dmc.target_dc_state) { + switch (power_domains->target_dc_state) { case DC_STATE_EN_DC3CO: tgl_enable_dc3co(dev_priv); break; @@ -1033,9 +1025,9 @@ static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv, static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - if ((intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0) + if ((intel_de_read(dev_priv, TRANSCONF(PIPE_A)) & TRANSCONF_ENABLE) == 0) i830_enable_pipe(dev_priv, PIPE_A); - if ((intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0) + if ((intel_de_read(dev_priv, TRANSCONF(PIPE_B)) & TRANSCONF_ENABLE) == 0) i830_enable_pipe(dev_priv, PIPE_B); } @@ -1049,8 +1041,8 @@ static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv, static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - return intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE && - intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE; + return intel_de_read(dev_priv, TRANSCONF(PIPE_A)) & TRANSCONF_ENABLE && + intel_de_read(dev_priv, TRANSCONF(PIPE_B)) & TRANSCONF_ENABLE; } static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv, @@ -1149,18 +1141,14 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) { - u32 val; - /* * On driver load, a pipe may be active and driving a DSI display. * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck * (and never recovering) in this case. intel_dsi_post_disable() will * clear it when we turn off the display. */ - val = intel_de_read(dev_priv, DSPCLK_GATE_D(dev_priv)); - val &= DPOUNIT_CLOCK_GATE_DISABLE; - val |= VRHUNIT_CLOCK_GATE_DISABLE; - intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv), val); + intel_de_rmw(dev_priv, DSPCLK_GATE_D(dev_priv), + ~DPOUNIT_CLOCK_GATE_DISABLE, VRHUNIT_CLOCK_GATE_DISABLE); /* * Disable trickle feed and enable pnd deadline calculation @@ -1276,8 +1264,7 @@ static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, * both PLLs disabled, or we risk losing DPIO and PLL * synchronization. */ - intel_de_write(dev_priv, DPIO_CTL, - intel_de_read(dev_priv, DPIO_CTL) | DPIO_CMNRST); + intel_de_rmw(dev_priv, DPIO_CTL, 0, DPIO_CMNRST); } static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, @@ -1289,8 +1276,7 @@ static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, assert_pll_disabled(dev_priv, pipe); /* Assert common reset */ - intel_de_write(dev_priv, DPIO_CTL, - intel_de_read(dev_priv, DPIO_CTL) & ~DPIO_CMNRST); + intel_de_rmw(dev_priv, DPIO_CTL, DPIO_CMNRST, 0); vlv_set_power_well(dev_priv, power_well, false); } diff --git a/drivers/gpu/drm/i915/display/intel_display_reg_defs.h b/drivers/gpu/drm/i915/display/intel_display_reg_defs.h index 02605418ff08..755c1ea8225c 100644 --- a/drivers/gpu/drm/i915/display/intel_display_reg_defs.h +++ b/drivers/gpu/drm/i915/display/intel_display_reg_defs.h @@ -13,7 +13,7 @@ #define VLV_DISPLAY_BASE 0x180000 /* - * Named helper wrappers around _PICK_EVEN() and _PICK(). + * Named helper wrappers around _PICK_EVEN() and _PICK_EVEN_2RANGES(). */ #define _PIPE(pipe, a, b) _PICK_EVEN(pipe, a, b) #define _PLANE(plane, a, b) _PICK_EVEN(plane, a, b) @@ -29,12 +29,8 @@ #define _MMIO_PLL(pll, a, b) _MMIO(_PLL(pll, a, b)) #define _MMIO_PHY(phy, a, b) _MMIO(_PHY(phy, a, b)) -#define _PHY3(phy, ...) _PICK(phy, __VA_ARGS__) - -#define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c)) -#define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c)) -#define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c)) -#define _MMIO_PLL3(pll, ...) _MMIO(_PICK(pll, __VA_ARGS__)) +#define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK_EVEN_2RANGES(pipe, 1, a, a, b, c)) +#define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK_EVEN_2RANGES(pipe, 1, a, a, b, c)) /* * Device info offset array based helpers for groups of registers with unevenly diff --git a/drivers/gpu/drm/i915/display/intel_display_rps.c b/drivers/gpu/drm/i915/display/intel_display_rps.c new file mode 100644 index 000000000000..918d0327169a --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_display_rps.c @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2023 Intel Corporation + */ + +#include <drm/drm_crtc.h> +#include <drm/drm_vblank.h> + +#include "gt/intel_rps.h" +#include "i915_drv.h" +#include "intel_display_rps.h" +#include "intel_display_types.h" + +struct wait_rps_boost { + struct wait_queue_entry wait; + + struct drm_crtc *crtc; + struct i915_request *request; +}; + +static int do_rps_boost(struct wait_queue_entry *_wait, + unsigned mode, int sync, void *key) +{ + struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait); + struct i915_request *rq = wait->request; + + /* + * If we missed the vblank, but the request is already running it + * is reasonable to assume that it will complete before the next + * vblank without our intervention, so leave RPS alone. + */ + if (!i915_request_started(rq)) + intel_rps_boost(rq); + i915_request_put(rq); + + drm_crtc_vblank_put(wait->crtc); + + list_del(&wait->wait.entry); + kfree(wait); + return 1; +} + +void intel_display_rps_boost_after_vblank(struct drm_crtc *crtc, + struct dma_fence *fence) +{ + struct wait_rps_boost *wait; + + if (!dma_fence_is_i915(fence)) + return; + + if (DISPLAY_VER(to_i915(crtc->dev)) < 6) + return; + + if (drm_crtc_vblank_get(crtc)) + return; + + wait = kmalloc(sizeof(*wait), GFP_KERNEL); + if (!wait) { + drm_crtc_vblank_put(crtc); + return; + } + + wait->request = to_request(dma_fence_get(fence)); + wait->crtc = crtc; + + wait->wait.func = do_rps_boost; + wait->wait.flags = 0; + + add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait); +} + +void intel_display_rps_mark_interactive(struct drm_i915_private *i915, + struct intel_atomic_state *state, + bool interactive) +{ + if (state->rps_interactive == interactive) + return; + + intel_rps_mark_interactive(&to_gt(i915)->rps, interactive); + state->rps_interactive = interactive; +} diff --git a/drivers/gpu/drm/i915/display/intel_display_rps.h b/drivers/gpu/drm/i915/display/intel_display_rps.h new file mode 100644 index 000000000000..e19009c2371a --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_display_rps.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef __INTEL_DISPLAY_RPS_H__ +#define __INTEL_DISPLAY_RPS_H__ + +#include <linux/types.h> + +struct dma_fence; +struct drm_crtc; +struct drm_i915_private; +struct intel_atomic_state; + +void intel_display_rps_boost_after_vblank(struct drm_crtc *crtc, + struct dma_fence *fence); +void intel_display_rps_mark_interactive(struct drm_i915_private *i915, + struct intel_atomic_state *state, + bool interactive); + +#endif /* __INTEL_DISPLAY_RPS_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index 54c517ca9632..c32bfba06ca1 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -53,7 +53,7 @@ #include "intel_display_limits.h" #include "intel_display_power.h" #include "intel_dpll_mgr.h" -#include "intel_pm_types.h" +#include "intel_wm_types.h" struct drm_printer; struct __intel_global_objs_state; @@ -326,6 +326,7 @@ struct intel_vbt_panel_data { struct { u16 pwm_freq_hz; u16 brightness_precision_bits; + u16 hdr_dpcd_refresh_timeout; bool present; bool active_low_pwm; u8 min_brightness; /* min_brightness/255 of max */ @@ -1249,6 +1250,9 @@ struct intel_crtc_state { /* bitmask of planes that will be updated during the commit */ u8 update_planes; + /* bitmask of planes with async flip active */ + u8 async_flip_planes; + u8 framestart_delay; /* 1-4 */ u8 msa_timing_delay; /* 0-3 */ @@ -1502,17 +1506,6 @@ struct intel_watermark_params { u8 cacheline_size; }; -struct cxsr_latency { - bool is_desktop : 1; - bool is_ddr3 : 1; - u16 fsb_freq; - u16 mem_freq; - u16 display_sr; - u16 display_hpll_disable; - u16 cursor_sr; - u16 cursor_hpll_disable; -}; - #define to_intel_atomic_state(x) container_of(x, struct intel_atomic_state, base) #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) #define to_intel_crtc_state(x) container_of(x, struct intel_crtc_state, uapi) @@ -1631,6 +1624,8 @@ struct intel_psr { bool psr2_sel_fetch_cff_enabled; bool req_psr2_sdp_prior_scanline; u8 sink_sync_latency; + u8 io_wake_lines; + u8 fast_wake_lines; ktime_t last_entry_attempt; ktime_t last_exit; bool sink_not_reliable; diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c index 257aa2b7cf20..6b162f77340e 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc.c +++ b/drivers/gpu/drm/i915/display/intel_dmc.c @@ -38,6 +38,39 @@ * low-power state and comes back to normal. */ +enum intel_dmc_id { + DMC_FW_MAIN = 0, + DMC_FW_PIPEA, + DMC_FW_PIPEB, + DMC_FW_PIPEC, + DMC_FW_PIPED, + DMC_FW_MAX +}; + +struct intel_dmc { + struct drm_i915_private *i915; + struct work_struct work; + const char *fw_path; + u32 max_fw_size; /* bytes */ + u32 version; + struct dmc_fw_info { + u32 mmio_count; + i915_reg_t mmioaddr[20]; + u32 mmiodata[20]; + u32 dmc_offset; + u32 start_mmioaddr; + u32 dmc_fw_size; /*dwords */ + u32 *payload; + bool present; + } dmc_info[DMC_FW_MAX]; +}; + +/* Note: This may be NULL. */ +static struct intel_dmc *i915_to_dmc(struct drm_i915_private *i915) +{ + return i915->display.dmc.dmc; +} + #define DMC_VERSION(major, minor) ((major) << 16 | (minor)) #define DMC_VERSION_MAJOR(version) ((version) >> 16) #define DMC_VERSION_MINOR(version) ((version) & 0xffff) @@ -249,9 +282,19 @@ struct stepping_info { char substepping; }; -static bool has_dmc_id_fw(struct drm_i915_private *i915, int dmc_id) +#define for_each_dmc_id(__dmc_id) \ + for ((__dmc_id) = DMC_FW_MAIN; (__dmc_id) < DMC_FW_MAX; (__dmc_id)++) + +static bool is_valid_dmc_id(enum intel_dmc_id dmc_id) { - return i915->display.dmc.dmc_info[dmc_id].payload; + return dmc_id >= DMC_FW_MAIN && dmc_id < DMC_FW_MAX; +} + +static bool has_dmc_id_fw(struct drm_i915_private *i915, enum intel_dmc_id dmc_id) +{ + struct intel_dmc *dmc = i915_to_dmc(i915); + + return dmc && dmc->dmc_info[dmc_id].payload; } bool intel_dmc_has_payload(struct drm_i915_private *i915) @@ -270,12 +313,12 @@ intel_get_stepping_info(struct drm_i915_private *i915, return si; } -static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv) +static void gen9_set_dc_state_debugmask(struct drm_i915_private *i915) { /* The below bit doesn't need to be cleared ever afterwards */ - intel_de_rmw(dev_priv, DC_STATE_DEBUG, 0, + intel_de_rmw(i915, DC_STATE_DEBUG, 0, DC_STATE_DEBUG_MASK_CORES | DC_STATE_DEBUG_MASK_MEMORY_UP); - intel_de_posting_read(dev_priv, DC_STATE_DEBUG); + intel_de_posting_read(i915, DC_STATE_DEBUG); } static void disable_event_handler(struct drm_i915_private *i915, @@ -315,26 +358,23 @@ disable_flip_queue_event(struct drm_i915_private *i915, } static bool -get_flip_queue_event_regs(struct drm_i915_private *i915, int dmc_id, +get_flip_queue_event_regs(struct drm_i915_private *i915, enum intel_dmc_id dmc_id, i915_reg_t *ctl_reg, i915_reg_t *htp_reg) { - switch (dmc_id) { - case DMC_FW_MAIN: + if (dmc_id == DMC_FW_MAIN) { if (DISPLAY_VER(i915) == 12) { *ctl_reg = DMC_EVT_CTL(i915, dmc_id, 3); *htp_reg = DMC_EVT_HTP(i915, dmc_id, 3); return true; } - break; - case DMC_FW_PIPEA ... DMC_FW_PIPED: + } else if (dmc_id >= DMC_FW_PIPEA && dmc_id <= DMC_FW_PIPED) { if (IS_DG2(i915)) { *ctl_reg = DMC_EVT_CTL(i915, dmc_id, 2); *htp_reg = DMC_EVT_HTP(i915, dmc_id, 2); return true; } - break; } return false; @@ -343,13 +383,13 @@ get_flip_queue_event_regs(struct drm_i915_private *i915, int dmc_id, static void disable_all_flip_queue_events(struct drm_i915_private *i915) { - int dmc_id; + enum intel_dmc_id dmc_id; /* TODO: check if the following applies to all D13+ platforms. */ if (!IS_DG2(i915) && !IS_TIGERLAKE(i915)) return; - for (dmc_id = 0; dmc_id < DMC_FW_MAX; dmc_id++) { + for_each_dmc_id(dmc_id) { i915_reg_t ctl_reg; i915_reg_t htp_reg; @@ -365,22 +405,22 @@ disable_all_flip_queue_events(struct drm_i915_private *i915) static void disable_all_event_handlers(struct drm_i915_private *i915) { - int id; + enum intel_dmc_id dmc_id; /* TODO: disable the event handlers on pre-GEN12 platforms as well */ if (DISPLAY_VER(i915) < 12) return; - for (id = DMC_FW_MAIN; id < DMC_FW_MAX; id++) { + for_each_dmc_id(dmc_id) { int handler; - if (!has_dmc_id_fw(i915, id)) + if (!has_dmc_id_fw(i915, dmc_id)) continue; for (handler = 0; handler < DMC_EVENT_HANDLER_COUNT_GEN12; handler++) disable_event_handler(i915, - DMC_EVT_CTL(i915, id, handler), - DMC_EVT_HTP(i915, id, handler)); + DMC_EVT_CTL(i915, dmc_id, handler), + DMC_EVT_HTP(i915, dmc_id, handler)); } } @@ -410,7 +450,9 @@ static void pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable) void intel_dmc_enable_pipe(struct drm_i915_private *i915, enum pipe pipe) { - if (!has_dmc_id_fw(i915, PIPE_TO_DMC_ID(pipe))) + enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe); + + if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(i915, dmc_id)) return; if (DISPLAY_VER(i915) >= 14) @@ -421,7 +463,9 @@ void intel_dmc_enable_pipe(struct drm_i915_private *i915, enum pipe pipe) void intel_dmc_disable_pipe(struct drm_i915_private *i915, enum pipe pipe) { - if (!has_dmc_id_fw(i915, PIPE_TO_DMC_ID(pipe))) + enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe); + + if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(i915, dmc_id)) return; if (DISPLAY_VER(i915) >= 14) @@ -432,57 +476,59 @@ void intel_dmc_disable_pipe(struct drm_i915_private *i915, enum pipe pipe) /** * intel_dmc_load_program() - write the firmware from memory to register. - * @dev_priv: i915 drm device. + * @i915: i915 drm device. * * DMC firmware is read from a .bin file and kept in internal memory one time. * Everytime display comes back from low power state this function is called to * copy the firmware from internal memory to registers. */ -void intel_dmc_load_program(struct drm_i915_private *dev_priv) +void intel_dmc_load_program(struct drm_i915_private *i915) { - struct intel_dmc *dmc = &dev_priv->display.dmc; - u32 id, i; + struct i915_power_domains *power_domains = &i915->display.power.domains; + struct intel_dmc *dmc = i915_to_dmc(i915); + enum intel_dmc_id dmc_id; + u32 i; - if (!intel_dmc_has_payload(dev_priv)) + if (!intel_dmc_has_payload(i915)) return; - pipedmc_clock_gating_wa(dev_priv, true); + pipedmc_clock_gating_wa(i915, true); - disable_all_event_handlers(dev_priv); + disable_all_event_handlers(i915); - assert_rpm_wakelock_held(&dev_priv->runtime_pm); + assert_rpm_wakelock_held(&i915->runtime_pm); preempt_disable(); - for (id = 0; id < DMC_FW_MAX; id++) { - for (i = 0; i < dmc->dmc_info[id].dmc_fw_size; i++) { - intel_de_write_fw(dev_priv, - DMC_PROGRAM(dmc->dmc_info[id].start_mmioaddr, i), - dmc->dmc_info[id].payload[i]); + for_each_dmc_id(dmc_id) { + for (i = 0; i < dmc->dmc_info[dmc_id].dmc_fw_size; i++) { + intel_de_write_fw(i915, + DMC_PROGRAM(dmc->dmc_info[dmc_id].start_mmioaddr, i), + dmc->dmc_info[dmc_id].payload[i]); } } preempt_enable(); - for (id = 0; id < DMC_FW_MAX; id++) { - for (i = 0; i < dmc->dmc_info[id].mmio_count; i++) { - intel_de_write(dev_priv, dmc->dmc_info[id].mmioaddr[i], - dmc->dmc_info[id].mmiodata[i]); + for_each_dmc_id(dmc_id) { + for (i = 0; i < dmc->dmc_info[dmc_id].mmio_count; i++) { + intel_de_write(i915, dmc->dmc_info[dmc_id].mmioaddr[i], + dmc->dmc_info[dmc_id].mmiodata[i]); } } - dev_priv->display.dmc.dc_state = 0; + power_domains->dc_state = 0; - gen9_set_dc_state_debugmask(dev_priv); + gen9_set_dc_state_debugmask(i915); /* * Flip queue events need to be disabled before enabling DC5/6. * i915 doesn't use the flip queue feature, so disable it already * here. */ - disable_all_flip_queue_events(dev_priv); + disable_all_flip_queue_events(i915); - pipedmc_clock_gating_wa(dev_priv, false); + pipedmc_clock_gating_wa(i915, false); } /** @@ -504,8 +550,11 @@ void intel_dmc_disable_program(struct drm_i915_private *i915) void assert_dmc_loaded(struct drm_i915_private *i915) { - drm_WARN_ONCE(&i915->drm, - !intel_de_read(i915, DMC_PROGRAM(i915->display.dmc.dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)), + struct intel_dmc *dmc = i915_to_dmc(i915); + + drm_WARN_ONCE(&i915->drm, !dmc, "DMC not initialized\n"); + drm_WARN_ONCE(&i915->drm, dmc && + !intel_de_read(i915, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)), "DMC program storage start is NULL\n"); drm_WARN_ONCE(&i915->drm, !intel_de_read(i915, DMC_SSP_BASE), "DMC SSP Base Not fine\n"); @@ -540,15 +589,15 @@ static void dmc_set_fw_offset(struct intel_dmc *dmc, const struct stepping_info *si, u8 package_ver) { - unsigned int i, id; - - struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc); + struct drm_i915_private *i915 = dmc->i915; + enum intel_dmc_id dmc_id; + unsigned int i; for (i = 0; i < num_entries; i++) { - id = package_ver <= 1 ? DMC_FW_MAIN : fw_info[i].dmc_id; + dmc_id = package_ver <= 1 ? DMC_FW_MAIN : fw_info[i].dmc_id; - if (id >= DMC_FW_MAX) { - drm_dbg(&i915->drm, "Unsupported firmware id: %u\n", id); + if (!is_valid_dmc_id(dmc_id)) { + drm_dbg(&i915->drm, "Unsupported firmware id: %u\n", dmc_id); continue; } @@ -556,29 +605,24 @@ static void dmc_set_fw_offset(struct intel_dmc *dmc, * check for the stepping since we already found a previous FW * for this id. */ - if (dmc->dmc_info[id].present) + if (dmc->dmc_info[dmc_id].present) continue; if (fw_info_matches_stepping(&fw_info[i], si)) { - dmc->dmc_info[id].present = true; - dmc->dmc_info[id].dmc_offset = fw_info[i].offset; + dmc->dmc_info[dmc_id].present = true; + dmc->dmc_info[dmc_id].dmc_offset = fw_info[i].offset; } } } static bool dmc_mmio_addr_sanity_check(struct intel_dmc *dmc, const u32 *mmioaddr, u32 mmio_count, - int header_ver, u8 dmc_id) + int header_ver, enum intel_dmc_id dmc_id) { - struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc); + struct drm_i915_private *i915 = dmc->i915; u32 start_range, end_range; int i; - if (dmc_id >= DMC_FW_MAX) { - drm_warn(&i915->drm, "Unsupported firmware id %u\n", dmc_id); - return false; - } - if (header_ver == 1) { start_range = DMC_MMIO_START_RANGE; end_range = DMC_MMIO_END_RANGE; @@ -606,9 +650,9 @@ static bool dmc_mmio_addr_sanity_check(struct intel_dmc *dmc, static u32 parse_dmc_fw_header(struct intel_dmc *dmc, const struct intel_dmc_header_base *dmc_header, - size_t rem_size, u8 dmc_id) + size_t rem_size, enum intel_dmc_id dmc_id) { - struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc); + struct drm_i915_private *i915 = dmc->i915; struct dmc_fw_info *dmc_info = &dmc->dmc_info[dmc_id]; unsigned int header_len_bytes, dmc_header_size, payload_size, i; const u32 *mmioaddr, *mmiodata; @@ -719,7 +763,7 @@ parse_dmc_fw_package(struct intel_dmc *dmc, const struct stepping_info *si, size_t rem_size) { - struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc); + struct drm_i915_private *i915 = dmc->i915; u32 package_size = sizeof(struct intel_package_header); u32 num_entries, max_entries; const struct intel_fw_info *fw_info; @@ -773,7 +817,7 @@ static u32 parse_dmc_fw_css(struct intel_dmc *dmc, struct intel_css_header *css_header, size_t rem_size) { - struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc); + struct drm_i915_private *i915 = dmc->i915; if (rem_size < sizeof(struct intel_css_header)) { drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n"); @@ -793,18 +837,17 @@ static u32 parse_dmc_fw_css(struct intel_dmc *dmc, return sizeof(struct intel_css_header); } -static void parse_dmc_fw(struct drm_i915_private *dev_priv, - const struct firmware *fw) +static void parse_dmc_fw(struct intel_dmc *dmc, const struct firmware *fw) { + struct drm_i915_private *i915 = dmc->i915; struct intel_css_header *css_header; struct intel_package_header *package_header; struct intel_dmc_header_base *dmc_header; - struct intel_dmc *dmc = &dev_priv->display.dmc; struct stepping_info display_info = { '*', '*'}; - const struct stepping_info *si = intel_get_stepping_info(dev_priv, &display_info); + const struct stepping_info *si = intel_get_stepping_info(i915, &display_info); + enum intel_dmc_id dmc_id; u32 readcount = 0; u32 r, offset; - int id; if (!fw) return; @@ -825,34 +868,33 @@ static void parse_dmc_fw(struct drm_i915_private *dev_priv, readcount += r; - for (id = 0; id < DMC_FW_MAX; id++) { - if (!dev_priv->display.dmc.dmc_info[id].present) + for_each_dmc_id(dmc_id) { + if (!dmc->dmc_info[dmc_id].present) continue; - offset = readcount + dmc->dmc_info[id].dmc_offset * 4; + offset = readcount + dmc->dmc_info[dmc_id].dmc_offset * 4; if (offset > fw->size) { - drm_err(&dev_priv->drm, "Reading beyond the fw_size\n"); + drm_err(&i915->drm, "Reading beyond the fw_size\n"); continue; } dmc_header = (struct intel_dmc_header_base *)&fw->data[offset]; - parse_dmc_fw_header(dmc, dmc_header, fw->size - offset, id); + parse_dmc_fw_header(dmc, dmc_header, fw->size - offset, dmc_id); } } -static void intel_dmc_runtime_pm_get(struct drm_i915_private *dev_priv) +static void intel_dmc_runtime_pm_get(struct drm_i915_private *i915) { - drm_WARN_ON(&dev_priv->drm, dev_priv->display.dmc.wakeref); - dev_priv->display.dmc.wakeref = - intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); + drm_WARN_ON(&i915->drm, i915->display.dmc.wakeref); + i915->display.dmc.wakeref = intel_display_power_get(i915, POWER_DOMAIN_INIT); } -static void intel_dmc_runtime_pm_put(struct drm_i915_private *dev_priv) +static void intel_dmc_runtime_pm_put(struct drm_i915_private *i915) { intel_wakeref_t wakeref __maybe_unused = - fetch_and_zero(&dev_priv->display.dmc.wakeref); + fetch_and_zero(&i915->display.dmc.wakeref); - intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref); + intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); } static const char *dmc_fallback_path(struct drm_i915_private *i915) @@ -865,46 +907,40 @@ static const char *dmc_fallback_path(struct drm_i915_private *i915) static void dmc_load_work_fn(struct work_struct *work) { - struct drm_i915_private *dev_priv; - struct intel_dmc *dmc; + struct intel_dmc *dmc = container_of(work, typeof(*dmc), work); + struct drm_i915_private *i915 = dmc->i915; const struct firmware *fw = NULL; const char *fallback_path; int err; - dev_priv = container_of(work, typeof(*dev_priv), display.dmc.work); - dmc = &dev_priv->display.dmc; - - err = request_firmware(&fw, dev_priv->display.dmc.fw_path, dev_priv->drm.dev); + err = request_firmware(&fw, dmc->fw_path, i915->drm.dev); - if (err == -ENOENT && !dev_priv->params.dmc_firmware_path) { - fallback_path = dmc_fallback_path(dev_priv); + if (err == -ENOENT && !i915->params.dmc_firmware_path) { + fallback_path = dmc_fallback_path(i915); if (fallback_path) { - drm_dbg_kms(&dev_priv->drm, - "%s not found, falling back to %s\n", - dmc->fw_path, - fallback_path); - err = request_firmware(&fw, fallback_path, dev_priv->drm.dev); + drm_dbg_kms(&i915->drm, "%s not found, falling back to %s\n", + dmc->fw_path, fallback_path); + err = request_firmware(&fw, fallback_path, i915->drm.dev); if (err == 0) - dev_priv->display.dmc.fw_path = fallback_path; + dmc->fw_path = fallback_path; } } - parse_dmc_fw(dev_priv, fw); + parse_dmc_fw(dmc, fw); - if (intel_dmc_has_payload(dev_priv)) { - intel_dmc_load_program(dev_priv); - intel_dmc_runtime_pm_put(dev_priv); + if (intel_dmc_has_payload(i915)) { + intel_dmc_load_program(i915); + intel_dmc_runtime_pm_put(i915); - drm_info(&dev_priv->drm, - "Finished loading DMC firmware %s (v%u.%u)\n", - dev_priv->display.dmc.fw_path, DMC_VERSION_MAJOR(dmc->version), + drm_info(&i915->drm, "Finished loading DMC firmware %s (v%u.%u)\n", + dmc->fw_path, DMC_VERSION_MAJOR(dmc->version), DMC_VERSION_MINOR(dmc->version)); } else { - drm_notice(&dev_priv->drm, + drm_notice(&i915->drm, "Failed to load DMC firmware %s." " Disabling runtime power management.\n", dmc->fw_path); - drm_notice(&dev_priv->drm, "DMC firmware homepage: %s", + drm_notice(&i915->drm, "DMC firmware homepage: %s", INTEL_UC_FIRMWARE_URL); } @@ -912,19 +948,17 @@ static void dmc_load_work_fn(struct work_struct *work) } /** - * intel_dmc_ucode_init() - initialize the firmware loading. - * @dev_priv: i915 drm device. + * intel_dmc_init() - initialize the firmware loading. + * @i915: i915 drm device. * * This function is called at the time of loading the display driver to read * firmware from a .bin file and copied into a internal memory. */ -void intel_dmc_ucode_init(struct drm_i915_private *dev_priv) +void intel_dmc_init(struct drm_i915_private *i915) { - struct intel_dmc *dmc = &dev_priv->display.dmc; - - INIT_WORK(&dev_priv->display.dmc.work, dmc_load_work_fn); + struct intel_dmc *dmc; - if (!HAS_DMC(dev_priv)) + if (!HAS_DMC(i915)) return; /* @@ -935,168 +969,192 @@ void intel_dmc_ucode_init(struct drm_i915_private *dev_priv) * suspend as runtime suspend *requires* a working DMC for whatever * reason. */ - intel_dmc_runtime_pm_get(dev_priv); + intel_dmc_runtime_pm_get(i915); + + dmc = kzalloc(sizeof(*dmc), GFP_KERNEL); + if (!dmc) + return; - if (IS_DG2(dev_priv)) { + dmc->i915 = i915; + + INIT_WORK(&dmc->work, dmc_load_work_fn); + + if (IS_DG2(i915)) { dmc->fw_path = DG2_DMC_PATH; dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE; - } else if (IS_ALDERLAKE_P(dev_priv)) { + } else if (IS_ALDERLAKE_P(i915)) { dmc->fw_path = ADLP_DMC_PATH; dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE; - } else if (IS_ALDERLAKE_S(dev_priv)) { + } else if (IS_ALDERLAKE_S(i915)) { dmc->fw_path = ADLS_DMC_PATH; dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE; - } else if (IS_DG1(dev_priv)) { + } else if (IS_DG1(i915)) { dmc->fw_path = DG1_DMC_PATH; dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE; - } else if (IS_ROCKETLAKE(dev_priv)) { + } else if (IS_ROCKETLAKE(i915)) { dmc->fw_path = RKL_DMC_PATH; dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE; - } else if (IS_TIGERLAKE(dev_priv)) { + } else if (IS_TIGERLAKE(i915)) { dmc->fw_path = TGL_DMC_PATH; dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE; - } else if (DISPLAY_VER(dev_priv) == 11) { + } else if (DISPLAY_VER(i915) == 11) { dmc->fw_path = ICL_DMC_PATH; dmc->max_fw_size = ICL_DMC_MAX_FW_SIZE; - } else if (IS_GEMINILAKE(dev_priv)) { + } else if (IS_GEMINILAKE(i915)) { dmc->fw_path = GLK_DMC_PATH; dmc->max_fw_size = GLK_DMC_MAX_FW_SIZE; - } else if (IS_KABYLAKE(dev_priv) || - IS_COFFEELAKE(dev_priv) || - IS_COMETLAKE(dev_priv)) { + } else if (IS_KABYLAKE(i915) || + IS_COFFEELAKE(i915) || + IS_COMETLAKE(i915)) { dmc->fw_path = KBL_DMC_PATH; dmc->max_fw_size = KBL_DMC_MAX_FW_SIZE; - } else if (IS_SKYLAKE(dev_priv)) { + } else if (IS_SKYLAKE(i915)) { dmc->fw_path = SKL_DMC_PATH; dmc->max_fw_size = SKL_DMC_MAX_FW_SIZE; - } else if (IS_BROXTON(dev_priv)) { + } else if (IS_BROXTON(i915)) { dmc->fw_path = BXT_DMC_PATH; dmc->max_fw_size = BXT_DMC_MAX_FW_SIZE; } - if (dev_priv->params.dmc_firmware_path) { - if (strlen(dev_priv->params.dmc_firmware_path) == 0) { - dmc->fw_path = NULL; - drm_info(&dev_priv->drm, + if (i915->params.dmc_firmware_path) { + if (strlen(i915->params.dmc_firmware_path) == 0) { + drm_info(&i915->drm, "Disabling DMC firmware and runtime PM\n"); - return; + goto out; } - dmc->fw_path = dev_priv->params.dmc_firmware_path; + dmc->fw_path = i915->params.dmc_firmware_path; } if (!dmc->fw_path) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(&i915->drm, "No known DMC firmware for platform, disabling runtime PM\n"); - return; + goto out; } - drm_dbg_kms(&dev_priv->drm, "Loading %s\n", dmc->fw_path); - schedule_work(&dev_priv->display.dmc.work); + i915->display.dmc.dmc = dmc; + + drm_dbg_kms(&i915->drm, "Loading %s\n", dmc->fw_path); + schedule_work(&dmc->work); + + return; + +out: + kfree(dmc); } /** - * intel_dmc_ucode_suspend() - prepare DMC firmware before system suspend - * @dev_priv: i915 drm device + * intel_dmc_suspend() - prepare DMC firmware before system suspend + * @i915: i915 drm device * * Prepare the DMC firmware before entering system suspend. This includes * flushing pending work items and releasing any resources acquired during * init. */ -void intel_dmc_ucode_suspend(struct drm_i915_private *dev_priv) +void intel_dmc_suspend(struct drm_i915_private *i915) { - if (!HAS_DMC(dev_priv)) + struct intel_dmc *dmc = i915_to_dmc(i915); + + if (!HAS_DMC(i915)) return; - flush_work(&dev_priv->display.dmc.work); + if (dmc) + flush_work(&dmc->work); /* Drop the reference held in case DMC isn't loaded. */ - if (!intel_dmc_has_payload(dev_priv)) - intel_dmc_runtime_pm_put(dev_priv); + if (!intel_dmc_has_payload(i915)) + intel_dmc_runtime_pm_put(i915); } /** - * intel_dmc_ucode_resume() - init DMC firmware during system resume - * @dev_priv: i915 drm device + * intel_dmc_resume() - init DMC firmware during system resume + * @i915: i915 drm device * * Reinitialize the DMC firmware during system resume, reacquiring any - * resources released in intel_dmc_ucode_suspend(). + * resources released in intel_dmc_suspend(). */ -void intel_dmc_ucode_resume(struct drm_i915_private *dev_priv) +void intel_dmc_resume(struct drm_i915_private *i915) { - if (!HAS_DMC(dev_priv)) + if (!HAS_DMC(i915)) return; /* * Reacquire the reference to keep RPM disabled in case DMC isn't * loaded. */ - if (!intel_dmc_has_payload(dev_priv)) - intel_dmc_runtime_pm_get(dev_priv); + if (!intel_dmc_has_payload(i915)) + intel_dmc_runtime_pm_get(i915); } /** - * intel_dmc_ucode_fini() - unload the DMC firmware. - * @dev_priv: i915 drm device. + * intel_dmc_fini() - unload the DMC firmware. + * @i915: i915 drm device. * * Firmmware unloading includes freeing the internal memory and reset the * firmware loading status. */ -void intel_dmc_ucode_fini(struct drm_i915_private *dev_priv) +void intel_dmc_fini(struct drm_i915_private *i915) { - int id; + struct intel_dmc *dmc = i915_to_dmc(i915); + enum intel_dmc_id dmc_id; - if (!HAS_DMC(dev_priv)) + if (!HAS_DMC(i915)) return; - intel_dmc_ucode_suspend(dev_priv); - drm_WARN_ON(&dev_priv->drm, dev_priv->display.dmc.wakeref); + intel_dmc_suspend(i915); + drm_WARN_ON(&i915->drm, i915->display.dmc.wakeref); - for (id = 0; id < DMC_FW_MAX; id++) - kfree(dev_priv->display.dmc.dmc_info[id].payload); + if (dmc) { + for_each_dmc_id(dmc_id) + kfree(dmc->dmc_info[dmc_id].payload); + + kfree(dmc); + i915->display.dmc.dmc = NULL; + } } void intel_dmc_print_error_state(struct drm_i915_error_state_buf *m, struct drm_i915_private *i915) { - struct intel_dmc *dmc = &i915->display.dmc; + struct intel_dmc *dmc = i915_to_dmc(i915); if (!HAS_DMC(i915)) return; + i915_error_printf(m, "DMC initialized: %s\n", str_yes_no(dmc)); i915_error_printf(m, "DMC loaded: %s\n", str_yes_no(intel_dmc_has_payload(i915))); - i915_error_printf(m, "DMC fw version: %d.%d\n", - DMC_VERSION_MAJOR(dmc->version), - DMC_VERSION_MINOR(dmc->version)); + if (dmc) + i915_error_printf(m, "DMC fw version: %d.%d\n", + DMC_VERSION_MAJOR(dmc->version), + DMC_VERSION_MINOR(dmc->version)); } static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused) { struct drm_i915_private *i915 = m->private; + struct intel_dmc *dmc = i915_to_dmc(i915); intel_wakeref_t wakeref; - struct intel_dmc *dmc; i915_reg_t dc5_reg, dc6_reg = INVALID_MMIO_REG; if (!HAS_DMC(i915)) return -ENODEV; - dmc = &i915->display.dmc; - wakeref = intel_runtime_pm_get(&i915->runtime_pm); + seq_printf(m, "DMC initialized: %s\n", str_yes_no(dmc)); seq_printf(m, "fw loaded: %s\n", str_yes_no(intel_dmc_has_payload(i915))); - seq_printf(m, "path: %s\n", dmc->fw_path); + seq_printf(m, "path: %s\n", dmc ? dmc->fw_path : "N/A"); seq_printf(m, "Pipe A fw needed: %s\n", str_yes_no(GRAPHICS_VER(i915) >= 12)); seq_printf(m, "Pipe A fw loaded: %s\n", - str_yes_no(dmc->dmc_info[DMC_FW_PIPEA].payload)); + str_yes_no(has_dmc_id_fw(i915, DMC_FW_PIPEA))); seq_printf(m, "Pipe B fw needed: %s\n", str_yes_no(IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14)); seq_printf(m, "Pipe B fw loaded: %s\n", - str_yes_no(dmc->dmc_info[DMC_FW_PIPEB].payload)); + str_yes_no(has_dmc_id_fw(i915, DMC_FW_PIPEB))); if (!intel_dmc_has_payload(i915)) goto out; @@ -1130,9 +1188,10 @@ static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused) seq_printf(m, "DC5 -> DC6 count: %d\n", intel_de_read(i915, dc6_reg)); -out: seq_printf(m, "program base: 0x%08x\n", intel_de_read(i915, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0))); + +out: seq_printf(m, "ssp base: 0x%08x\n", intel_de_read(i915, DMC_SSP_BASE)); seq_printf(m, "htp: 0x%08x\n", intel_de_read(i915, DMC_HTP_SKL)); diff --git a/drivers/gpu/drm/i915/display/intel_dmc.h b/drivers/gpu/drm/i915/display/intel_dmc.h index fd1725de4289..fd607afff2ef 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc.h +++ b/drivers/gpu/drm/i915/display/intel_dmc.h @@ -6,54 +6,20 @@ #ifndef __INTEL_DMC_H__ #define __INTEL_DMC_H__ -#include "i915_reg_defs.h" -#include "intel_wakeref.h" -#include <linux/workqueue.h> +#include <linux/types.h> struct drm_i915_error_state_buf; struct drm_i915_private; - enum pipe; -enum { - DMC_FW_MAIN = 0, - DMC_FW_PIPEA, - DMC_FW_PIPEB, - DMC_FW_PIPEC, - DMC_FW_PIPED, - DMC_FW_MAX -}; - -struct intel_dmc { - struct work_struct work; - const char *fw_path; - u32 max_fw_size; /* bytes */ - u32 version; - struct dmc_fw_info { - u32 mmio_count; - i915_reg_t mmioaddr[20]; - u32 mmiodata[20]; - u32 dmc_offset; - u32 start_mmioaddr; - u32 dmc_fw_size; /*dwords */ - u32 *payload; - bool present; - } dmc_info[DMC_FW_MAX]; - - u32 dc_state; - u32 target_dc_state; - u32 allowed_dc_mask; - intel_wakeref_t wakeref; -}; - -void intel_dmc_ucode_init(struct drm_i915_private *i915); +void intel_dmc_init(struct drm_i915_private *i915); void intel_dmc_load_program(struct drm_i915_private *i915); void intel_dmc_disable_program(struct drm_i915_private *i915); void intel_dmc_enable_pipe(struct drm_i915_private *i915, enum pipe pipe); void intel_dmc_disable_pipe(struct drm_i915_private *i915, enum pipe pipe); -void intel_dmc_ucode_fini(struct drm_i915_private *i915); -void intel_dmc_ucode_suspend(struct drm_i915_private *i915); -void intel_dmc_ucode_resume(struct drm_i915_private *i915); +void intel_dmc_fini(struct drm_i915_private *i915); +void intel_dmc_suspend(struct drm_i915_private *i915); +void intel_dmc_resume(struct drm_i915_private *i915); bool intel_dmc_has_payload(struct drm_i915_private *i915); void intel_dmc_debugfs_register(struct drm_i915_private *i915); void intel_dmc_print_error_state(struct drm_i915_error_state_buf *m, diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 62cbab7402e9..aee93b0d810e 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -288,7 +288,7 @@ static int intel_dp_max_common_rate(struct intel_dp *intel_dp) static int intel_dp_max_source_lane_count(struct intel_digital_port *dig_port) { - int vbt_max_lanes = intel_bios_dp_max_lane_count(&dig_port->base); + int vbt_max_lanes = intel_bios_dp_max_lane_count(dig_port->base.devdata); int max_lanes = dig_port->max_lanes; if (vbt_max_lanes) @@ -425,7 +425,7 @@ static int vbt_max_link_rate(struct intel_dp *intel_dp) struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; int max_rate; - max_rate = intel_bios_dp_max_link_rate(encoder); + max_rate = intel_bios_dp_max_link_rate(encoder->devdata); if (intel_dp_is_edp(intel_dp)) { struct intel_connector *connector = intel_dp->attached_connector; @@ -1415,6 +1415,28 @@ static int intel_dp_sink_dsc_version_minor(struct intel_dp *intel_dp) DP_DSC_MINOR_SHIFT; } +static int intel_dp_get_slice_height(int vactive) +{ + int slice_height; + + /* + * VDSC 1.2a spec in Section 3.8 Options for Slices implies that 108 + * lines is an optimal slice height, but any size can be used as long as + * vertical active integer multiple and maximum vertical slice count + * requirements are met. + */ + for (slice_height = 108; slice_height <= vactive; slice_height += 2) + if (vactive % slice_height == 0) + return slice_height; + + /* + * Highly unlikely we reach here as most of the resolutions will end up + * finding appropriate slice_height in above loop but returning + * slice_height as 2 here as it should work with all resolutions. + */ + return 2; +} + static int intel_dp_dsc_compute_params(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) { @@ -1433,17 +1455,7 @@ static int intel_dp_dsc_compute_params(struct intel_encoder *encoder, vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST; vdsc_cfg->pic_height = crtc_state->hw.adjusted_mode.crtc_vdisplay; - /* - * Slice Height of 8 works for all currently available panels. So start - * with that if pic_height is an integral multiple of 8. Eventually add - * logic to try multiple slice heights. - */ - if (vdsc_cfg->pic_height % 8 == 0) - vdsc_cfg->slice_height = 8; - else if (vdsc_cfg->pic_height % 4 == 0) - vdsc_cfg->slice_height = 4; - else - vdsc_cfg->slice_height = 2; + vdsc_cfg->slice_height = intel_dp_get_slice_height(vdsc_cfg->pic_height); ret = intel_dsc_compute_params(crtc_state); if (ret) @@ -1727,7 +1739,7 @@ bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state, * Our YCbCr output is always limited range. * crtc_state->limited_color_range only applies to RGB, * and it must never be set for YCbCr or we risk setting - * some conflicting bits in PIPECONF which will mess up + * some conflicting bits in TRANSCONF which will mess up * the colors on the monitor. */ if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) @@ -1991,7 +2003,6 @@ intel_dp_drrs_compute_config(struct intel_connector *connector, } static bool intel_dp_has_audio(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); @@ -2057,7 +2068,7 @@ intel_dp_audio_compute_config(struct intel_encoder *encoder, struct drm_connector *connector = conn_state->connector; pipe_config->sdp_split_enable = - intel_dp_has_audio(encoder, pipe_config, conn_state) && + intel_dp_has_audio(encoder, conn_state) && intel_dp_is_uhbr(pipe_config); drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] SDP split enable: %s\n", @@ -2081,7 +2092,7 @@ intel_dp_compute_config(struct intel_encoder *encoder, pipe_config->has_pch_encoder = true; pipe_config->has_audio = - intel_dp_has_audio(encoder, pipe_config, conn_state) && + intel_dp_has_audio(encoder, conn_state) && intel_audio_compute_config(encoder, pipe_config, conn_state); fixed_mode = intel_panel_fixed_mode(connector, adjusted_mode); @@ -2281,10 +2292,15 @@ intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful) void intel_dp_wait_source_oui(struct intel_dp *intel_dp) { + struct intel_connector *connector = intel_dp->attached_connector; struct drm_i915_private *i915 = dp_to_i915(intel_dp); - drm_dbg_kms(&i915->drm, "Performing OUI wait\n"); - wait_remaining_ms_from_jiffies(intel_dp->last_oui_write, 30); + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Performing OUI wait (%u ms)\n", + connector->base.base.id, connector->base.name, + connector->panel.vbt.backlight.hdr_dpcd_refresh_timeout); + + wait_remaining_ms_from_jiffies(intel_dp->last_oui_write, + connector->panel.vbt.backlight.hdr_dpcd_refresh_timeout); } /* If the device supports it, try to set the power state appropriately */ @@ -4851,7 +4867,7 @@ intel_dp_connector_register(struct drm_connector *connector) if (!ret) drm_dp_cec_register_connector(&intel_dp->aux, connector); - if (!intel_bios_is_lspcon_present(i915, dig_port->base.port)) + if (!intel_bios_encoder_is_lspcon(dig_port->base.devdata)) return ret; /* @@ -5129,8 +5145,9 @@ intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd) return IRQ_HANDLED; } -/* check the VBT to see whether the eDP is on another port */ -bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port) +static bool _intel_dp_is_port_edp(struct drm_i915_private *dev_priv, + const struct intel_bios_encoder_data *devdata, + enum port port) { /* * eDP not supported on g4x. so bail out early just @@ -5142,13 +5159,24 @@ bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port) if (DISPLAY_VER(dev_priv) < 9 && port == PORT_A) return true; - return intel_bios_is_port_edp(dev_priv, port); + return devdata && intel_bios_encoder_supports_edp(devdata); +} + +bool intel_dp_is_port_edp(struct drm_i915_private *i915, enum port port) +{ + const struct intel_bios_encoder_data *devdata = + intel_bios_encoder_data_lookup(i915, port); + + return _intel_dp_is_port_edp(i915, devdata, port); } static bool -has_gamut_metadata_dip(struct drm_i915_private *i915, enum port port) +has_gamut_metadata_dip(struct intel_encoder *encoder) { - if (intel_bios_is_lspcon_present(i915, port)) + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum port port = encoder->port; + + if (intel_bios_encoder_is_lspcon(encoder->devdata)) return false; if (DISPLAY_VER(i915) >= 11) @@ -5183,14 +5211,14 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect drm_connector_attach_max_bpc_property(connector, 6, 12); /* Register HDMI colorspace for case of lspcon */ - if (intel_bios_is_lspcon_present(dev_priv, port)) { + if (intel_bios_encoder_is_lspcon(dp_to_dig_port(intel_dp)->base.devdata)) { drm_connector_attach_content_type_property(connector); intel_attach_hdmi_colorspace_property(connector); } else { intel_attach_dp_colorspace_property(connector); } - if (has_gamut_metadata_dip(dev_priv, port)) + if (has_gamut_metadata_dip(&dp_to_dig_port(intel_dp)->base)) drm_connector_attach_hdr_output_metadata_property(connector); if (HAS_VRR(dev_priv)) @@ -5232,11 +5260,6 @@ static void intel_edp_backlight_setup(struct intel_dp *intel_dp, if (pipe != PIPE_A && pipe != PIPE_B) pipe = PIPE_A; - - drm_dbg_kms(&i915->drm, - "[CONNECTOR:%d:%s] using pipe %c for initial backlight setup\n", - connector->base.base.id, connector->base.name, - pipe_name(pipe)); } intel_backlight_setup(connector, pipe); @@ -5412,7 +5435,7 @@ intel_dp_init_connector(struct intel_digital_port *dig_port, intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg); intel_dp->attached_connector = intel_connector; - if (intel_dp_is_port_edp(dev_priv, port)) { + if (_intel_dp_is_port_edp(dev_priv, intel_encoder->devdata, port)) { /* * Currently we don't support eDP on TypeC ports, although in * theory it could work on TypeC legacy ports. diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c index 5a176bfb10a2..96967e21c94c 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c @@ -6,6 +6,7 @@ #include "i915_drv.h" #include "i915_reg.h" #include "i915_trace.h" +#include "intel_bios.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_dp_aux.h" @@ -737,3 +738,37 @@ void intel_dp_aux_init(struct intel_dp *intel_dp) intel_dp->aux.transfer = intel_dp_aux_transfer; cpu_latency_qos_add_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE); } + +static enum aux_ch default_aux_ch(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + + /* SKL has DDI E but no AUX E */ + if (DISPLAY_VER(i915) == 9 && encoder->port == PORT_E) + return AUX_CH_A; + + return (enum aux_ch)encoder->port; +} + +enum aux_ch intel_dp_aux_ch(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum aux_ch aux_ch; + + aux_ch = intel_bios_dp_aux_ch(encoder->devdata); + if (aux_ch != AUX_CH_NONE) { + drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] using AUX %c (VBT)\n", + encoder->base.base.id, encoder->base.name, + aux_ch_name(aux_ch)); + return aux_ch; + } + + aux_ch = default_aux_ch(encoder); + + drm_dbg_kms(&i915->drm, + "[ENCODER:%d:%s] using AUX %c (platform default)\n", + encoder->base.base.id, encoder->base.name, + aux_ch_name(aux_ch)); + + return aux_ch; +} diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.h b/drivers/gpu/drm/i915/display/intel_dp_aux.h index 738577537bc7..138e340f94ee 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux.h +++ b/drivers/gpu/drm/i915/display/intel_dp_aux.h @@ -6,9 +6,13 @@ #ifndef __INTEL_DP_AUX_H__ #define __INTEL_DP_AUX_H__ +enum aux_ch; struct intel_dp; +struct intel_encoder; void intel_dp_aux_fini(struct intel_dp *intel_dp); void intel_dp_aux_init(struct intel_dp *intel_dp); +enum aux_ch intel_dp_aux_ch(struct intel_encoder *encoder); + #endif /* __INTEL_DP_AUX_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c index 83af95bce98d..95cc5251843e 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c @@ -105,6 +105,11 @@ enum intel_dp_aux_backlight_modparam { INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL = 3, }; +static bool is_intel_tcon_cap(const u8 tcon_cap[4]) +{ + return tcon_cap[0] >= 1; +} + /* Intel EDP backlight callbacks */ static bool intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector) @@ -125,14 +130,12 @@ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector) if (!(tcon_cap[1] & INTEL_EDP_HDR_TCON_BRIGHTNESS_NITS_CAP)) return false; - if (tcon_cap[0] >= 1) { - drm_dbg_kms(&i915->drm, "Detected Intel HDR backlight interface version %d\n", - tcon_cap[0]); - } else { - drm_dbg_kms(&i915->drm, "Detected unsupported HDR backlight interface version %d\n", - tcon_cap[0]); + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Detected %s HDR backlight interface version %d\n", + connector->base.base.id, connector->base.name, + is_intel_tcon_cap(tcon_cap) ? "Intel" : "unsupported", tcon_cap[0]); + + if (!is_intel_tcon_cap(tcon_cap)) return false; - } /* * If we don't have HDR static metadata there is no way to @@ -147,7 +150,8 @@ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector) !(connector->base.hdr_sink_metadata.hdmi_type1.metadata_type & BIT(HDMI_STATIC_METADATA_TYPE1))) { drm_info(&i915->drm, - "Panel is missing HDR static metadata. Possible support for Intel HDR backlight interface is not used. If your backlight controls don't work try booting with i915.enable_dpcd_backlight=%d. needs this, please file a _new_ bug report on drm/i915, see " FDO_BUG_URL " for details.\n", + "[CONNECTOR:%d:%s] Panel is missing HDR static metadata. Possible support for Intel HDR backlight interface is not used. If your backlight controls don't work try booting with i915.enable_dpcd_backlight=%d. needs this, please file a _new_ bug report on drm/i915, see " FDO_BUG_URL " for details.\n", + connector->base.base.id, connector->base.name, INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL); return false; } @@ -168,7 +172,8 @@ intel_dp_aux_hdr_get_backlight(struct intel_connector *connector, enum pipe pipe u8 buf[2] = { 0 }; if (drm_dp_dpcd_readb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, &tmp) != 1) { - drm_err(&i915->drm, "Failed to read current backlight mode from DPCD\n"); + drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to read current backlight mode from DPCD\n", + connector->base.base.id, connector->base.name); return 0; } @@ -185,7 +190,8 @@ intel_dp_aux_hdr_get_backlight(struct intel_connector *connector, enum pipe pipe if (drm_dp_dpcd_read(&intel_dp->aux, INTEL_EDP_BRIGHTNESS_NITS_LSB, buf, sizeof(buf)) != sizeof(buf)) { - drm_err(&i915->drm, "Failed to read brightness from DPCD\n"); + drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to read brightness from DPCD\n", + connector->base.base.id, connector->base.name); return 0; } @@ -205,7 +211,8 @@ intel_dp_aux_hdr_set_aux_backlight(const struct drm_connector_state *conn_state, if (drm_dp_dpcd_write(&intel_dp->aux, INTEL_EDP_BRIGHTNESS_NITS_LSB, buf, sizeof(buf)) != sizeof(buf)) - drm_err(dev, "Failed to write brightness level to DPCD\n"); + drm_err(dev, "[CONNECTOR:%d:%s] Failed to write brightness level to DPCD\n", + connector->base.base.id, connector->base.name); } static void @@ -238,7 +245,8 @@ intel_dp_aux_hdr_enable_backlight(const struct intel_crtc_state *crtc_state, ret = drm_dp_dpcd_readb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, &old_ctrl); if (ret != 1) { - drm_err(&i915->drm, "Failed to read current backlight control mode: %d\n", ret); + drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to read current backlight control mode: %d\n", + connector->base.base.id, connector->base.name, ret); return; } @@ -254,9 +262,10 @@ intel_dp_aux_hdr_enable_backlight(const struct intel_crtc_state *crtc_state, ctrl &= ~INTEL_EDP_HDR_TCON_BRIGHTNESS_AUX_ENABLE; } - if (ctrl != old_ctrl) - if (drm_dp_dpcd_writeb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, ctrl) != 1) - drm_err(&i915->drm, "Failed to configure DPCD brightness controls\n"); + if (ctrl != old_ctrl && + drm_dp_dpcd_writeb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, ctrl) != 1) + drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to configure DPCD brightness controls\n", + connector->base.base.id, connector->base.name); } static void @@ -273,6 +282,11 @@ intel_dp_aux_hdr_disable_backlight(const struct drm_connector_state *conn_state, panel->backlight.pwm_funcs->disable(conn_state, intel_backlight_invert_pwm_level(connector, 0)); } +static const char *dpcd_vs_pwm_str(bool aux) +{ + return aux ? "DPCD" : "PWM"; +} + static int intel_dp_aux_hdr_setup_backlight(struct intel_connector *connector, enum pipe pipe) { @@ -282,15 +296,16 @@ intel_dp_aux_hdr_setup_backlight(struct intel_connector *connector, enum pipe pi &connector->base.display_info.luminance_range; int ret; - if (panel->backlight.edp.intel.sdr_uses_aux) { - drm_dbg_kms(&i915->drm, "SDR backlight is controlled through DPCD\n"); - } else { - drm_dbg_kms(&i915->drm, "SDR backlight is controlled through PWM\n"); + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] SDR backlight is controlled through %s\n", + connector->base.base.id, connector->base.name, + dpcd_vs_pwm_str(panel->backlight.edp.intel.sdr_uses_aux)); + if (!panel->backlight.edp.intel.sdr_uses_aux) { ret = panel->backlight.pwm_funcs->setup(connector, pipe); if (ret < 0) { drm_err(&i915->drm, - "Failed to setup SDR backlight controls through PWM: %d\n", ret); + "[CONNECTOR:%d:%s] Failed to setup SDR backlight controls through PWM: %d\n", + connector->base.base.id, connector->base.name, ret); return ret; } } @@ -303,8 +318,10 @@ intel_dp_aux_hdr_setup_backlight(struct intel_connector *connector, enum pipe pi panel->backlight.min = 0; } - drm_dbg_kms(&i915->drm, "Using backlight range %d..%d\n", panel->backlight.min, - panel->backlight.max); + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Using AUX HDR interface for backlight control (range %d..%d)\n", + connector->base.base.id, connector->base.name, + panel->backlight.min, panel->backlight.max); + panel->backlight.level = intel_dp_aux_hdr_get_backlight(connector, pipe); panel->backlight.enabled = panel->backlight.level != 0; @@ -386,12 +403,19 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector, if (ret < 0) return ret; + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] AUX VESA backlight enable is controlled through %s\n", + connector->base.base.id, connector->base.name, + dpcd_vs_pwm_str(panel->backlight.edp.vesa.info.aux_enable)); + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] AUX VESA backlight level is controlled through %s\n", + connector->base.base.id, connector->base.name, + dpcd_vs_pwm_str(panel->backlight.edp.vesa.info.aux_set)); + if (!panel->backlight.edp.vesa.info.aux_set || !panel->backlight.edp.vesa.info.aux_enable) { ret = panel->backlight.pwm_funcs->setup(connector, pipe); if (ret < 0) { drm_err(&i915->drm, - "Failed to setup PWM backlight controls for eDP backlight: %d\n", - ret); + "[CONNECTOR:%d:%s] Failed to setup PWM backlight controls for eDP backlight: %d\n", + connector->base.base.id, connector->base.name, ret); return ret; } } @@ -418,6 +442,9 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector, } } + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Using AUX VESA interface for backlight control\n", + connector->base.base.id, connector->base.name); + return 0; } @@ -428,7 +455,8 @@ intel_dp_aux_supports_vesa_backlight(struct intel_connector *connector) struct drm_i915_private *i915 = dp_to_i915(intel_dp); if (drm_edp_backlight_supported(intel_dp->edp_dpcd)) { - drm_dbg_kms(&i915->drm, "AUX Backlight Control Supported!\n"); + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] AUX Backlight Control Supported!\n", + connector->base.base.id, connector->base.name); return true; } return false; @@ -504,13 +532,15 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector) * interfaces is to probe for Intel's first, and VESA's second. */ if (try_intel_interface && intel_dp_aux_supports_hdr_backlight(connector)) { - drm_dbg_kms(dev, "Using Intel proprietary eDP backlight controls\n"); + drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Using Intel proprietary eDP backlight controls\n", + connector->base.base.id, connector->base.name); panel->backlight.funcs = &intel_dp_hdr_bl_funcs; return 0; } if (try_vesa_interface && intel_dp_aux_supports_vesa_backlight(connector)) { - drm_dbg_kms(dev, "Using VESA eDP backlight controls\n"); + drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Using VESA eDP backlight controls\n", + connector->base.base.id, connector->base.name); panel->backlight.funcs = &intel_dp_vesa_bl_funcs; return 0; } diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 054a009e800d..a860cbc5dbea 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -265,6 +265,19 @@ static int intel_dp_mst_update_slots(struct intel_encoder *encoder, return 0; } +static bool intel_dp_mst_has_audio(const struct drm_connector_state *conn_state) +{ + const struct intel_digital_connector_state *intel_conn_state = + to_intel_digital_connector_state(conn_state); + struct intel_connector *connector = + to_intel_connector(conn_state->connector); + + if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) + return connector->port->has_audio; + else + return intel_conn_state->force_audio == HDMI_AUDIO_ON; +} + static int intel_dp_mst_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) @@ -272,10 +285,6 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); struct intel_dp *intel_dp = &intel_mst->primary->dp; - struct intel_connector *connector = - to_intel_connector(conn_state->connector); - struct intel_digital_connector_state *intel_conn_state = - to_intel_digital_connector_state(conn_state); const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; struct link_config_limits limits; @@ -287,11 +296,9 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder, pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; pipe_config->has_pch_encoder = false; - if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) - pipe_config->has_audio = connector->port->has_audio; - else - pipe_config->has_audio = - intel_conn_state->force_audio == HDMI_AUDIO_ON; + pipe_config->has_audio = + intel_dp_mst_has_audio(conn_state) && + intel_audio_compute_config(encoder, pipe_config, conn_state); /* * for MST we always configure max link bw - the spec doesn't @@ -604,7 +611,7 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state, * no clock to the transcoder" */ if (DISPLAY_VER(dev_priv) < 12 || !last_mst_stream) - intel_ddi_disable_pipe_clock(old_crtc_state); + intel_ddi_disable_transcoder_clock(old_crtc_state); intel_mst->connector = NULL; @@ -684,7 +691,7 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state, * here for the following ones. */ if (DISPLAY_VER(dev_priv) < 12 || !first_mst_stream) - intel_ddi_enable_pipe_clock(encoder, pipe_config); + intel_ddi_enable_transcoder_clock(encoder, pipe_config); intel_ddi_set_dp_msa(pipe_config, conn_state); } diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c index 565c06de2432..62b93d097e44 100644 --- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c +++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c @@ -389,9 +389,7 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv, "force reprogramming it\n", phy); } - val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON); - val |= phy_info->pwron_mask; - intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON, val); + intel_de_rmw(dev_priv, BXT_P_CR_GT_DISP_PWRON, 0, phy_info->pwron_mask); /* * The PHY registers start out inaccessible and respond to reads with @@ -410,27 +408,19 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv, phy); /* Program PLL Rcomp code offset */ - val = intel_de_read(dev_priv, BXT_PORT_CL1CM_DW9(phy)); - val &= ~IREF0RC_OFFSET_MASK; - val |= 0xE4 << IREF0RC_OFFSET_SHIFT; - intel_de_write(dev_priv, BXT_PORT_CL1CM_DW9(phy), val); + intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW9(phy), IREF0RC_OFFSET_MASK, + 0xE4 << IREF0RC_OFFSET_SHIFT); - val = intel_de_read(dev_priv, BXT_PORT_CL1CM_DW10(phy)); - val &= ~IREF1RC_OFFSET_MASK; - val |= 0xE4 << IREF1RC_OFFSET_SHIFT; - intel_de_write(dev_priv, BXT_PORT_CL1CM_DW10(phy), val); + intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW10(phy), IREF1RC_OFFSET_MASK, + 0xE4 << IREF1RC_OFFSET_SHIFT); /* Program power gating */ - val = intel_de_read(dev_priv, BXT_PORT_CL1CM_DW28(phy)); - val |= OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | - SUS_CLK_CONFIG; - intel_de_write(dev_priv, BXT_PORT_CL1CM_DW28(phy), val); - - if (phy_info->dual_channel) { - val = intel_de_read(dev_priv, BXT_PORT_CL2CM_DW6(phy)); - val |= DW6_OLDO_DYN_PWR_DOWN_EN; - intel_de_write(dev_priv, BXT_PORT_CL2CM_DW6(phy), val); - } + intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW28(phy), 0, + OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | SUS_CLK_CONFIG); + + if (phy_info->dual_channel) + intel_de_rmw(dev_priv, BXT_PORT_CL2CM_DW6(phy), 0, + DW6_OLDO_DYN_PWR_DOWN_EN); if (phy_info->rcomp_phy != -1) { u32 grc_code; @@ -449,34 +439,25 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv, val << GRC_CODE_SLOW_SHIFT | val; intel_de_write(dev_priv, BXT_PORT_REF_DW6(phy), grc_code); - - val = intel_de_read(dev_priv, BXT_PORT_REF_DW8(phy)); - val |= GRC_DIS | GRC_RDY_OVRD; - intel_de_write(dev_priv, BXT_PORT_REF_DW8(phy), val); + intel_de_rmw(dev_priv, BXT_PORT_REF_DW8(phy), + 0, GRC_DIS | GRC_RDY_OVRD); } if (phy_info->reset_delay) udelay(phy_info->reset_delay); - val = intel_de_read(dev_priv, BXT_PHY_CTL_FAMILY(phy)); - val |= COMMON_RESET_DIS; - intel_de_write(dev_priv, BXT_PHY_CTL_FAMILY(phy), val); + intel_de_rmw(dev_priv, BXT_PHY_CTL_FAMILY(phy), 0, COMMON_RESET_DIS); } void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy) { const struct bxt_ddi_phy_info *phy_info; - u32 val; phy_info = bxt_get_phy_info(dev_priv, phy); - val = intel_de_read(dev_priv, BXT_PHY_CTL_FAMILY(phy)); - val &= ~COMMON_RESET_DIS; - intel_de_write(dev_priv, BXT_PHY_CTL_FAMILY(phy), val); + intel_de_rmw(dev_priv, BXT_PHY_CTL_FAMILY(phy), COMMON_RESET_DIS, 0); - val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON); - val &= ~phy_info->pwron_mask; - intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON, val); + intel_de_rmw(dev_priv, BXT_P_CR_GT_DISP_PWRON, phy_info->pwron_mask, 0); } void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy) diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index 380368eff31a..22fc908b7e5d 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -608,10 +608,8 @@ static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { const enum intel_dpll_id id = pll->info->id; - u32 val; - val = intel_de_read(dev_priv, WRPLL_CTL(id)); - intel_de_write(dev_priv, WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE); + intel_de_rmw(dev_priv, WRPLL_CTL(id), WRPLL_PLL_ENABLE, 0); intel_de_posting_read(dev_priv, WRPLL_CTL(id)); /* @@ -626,10 +624,8 @@ static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { enum intel_dpll_id id = pll->info->id; - u32 val; - val = intel_de_read(dev_priv, SPLL_CTL); - intel_de_write(dev_priv, SPLL_CTL, val & ~SPLL_PLL_ENABLE); + intel_de_rmw(dev_priv, SPLL_CTL, SPLL_PLL_ENABLE, 0); intel_de_posting_read(dev_priv, SPLL_CTL); /* @@ -1238,16 +1234,10 @@ static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { const enum intel_dpll_id id = pll->info->id; - u32 val; - val = intel_de_read(dev_priv, DPLL_CTRL1); - - val &= ~(DPLL_CTRL1_HDMI_MODE(id) | - DPLL_CTRL1_SSC(id) | - DPLL_CTRL1_LINK_RATE_MASK(id)); - val |= pll->state.hw_state.ctrl1 << (id * 6); - - intel_de_write(dev_priv, DPLL_CTRL1, val); + intel_de_rmw(dev_priv, DPLL_CTRL1, + DPLL_CTRL1_HDMI_MODE(id) | DPLL_CTRL1_SSC(id) | DPLL_CTRL1_LINK_RATE_MASK(id), + pll->state.hw_state.ctrl1 << (id * 6)); intel_de_posting_read(dev_priv, DPLL_CTRL1); } @@ -1265,8 +1255,7 @@ static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv, intel_de_posting_read(dev_priv, regs[id].cfgcr2); /* the enable bit is always bit 31 */ - intel_de_write(dev_priv, regs[id].ctl, - intel_de_read(dev_priv, regs[id].ctl) | LCPLL_PLL_ENABLE); + intel_de_rmw(dev_priv, regs[id].ctl, 0, LCPLL_PLL_ENABLE); if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5)) drm_err(&dev_priv->drm, "DPLL %d not locked\n", id); @@ -1285,8 +1274,7 @@ static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv, const enum intel_dpll_id id = pll->info->id; /* the enable bit is always bit 31 */ - intel_de_write(dev_priv, regs[id].ctl, - intel_de_read(dev_priv, regs[id].ctl) & ~LCPLL_PLL_ENABLE); + intel_de_rmw(dev_priv, regs[id].ctl, LCPLL_PLL_ENABLE, 0); intel_de_posting_read(dev_priv, regs[id].ctl); } @@ -1902,14 +1890,11 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv, bxt_port_to_phy_channel(dev_priv, port, &phy, &ch); /* Non-SSC reference */ - temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)); - temp |= PORT_PLL_REF_SEL; - intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp); + intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_REF_SEL); if (IS_GEMINILAKE(dev_priv)) { - temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)); - temp |= PORT_PLL_POWER_ENABLE; - intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp); + intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), + 0, PORT_PLL_POWER_ENABLE); if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_POWER_STATE), 200)) @@ -1918,39 +1903,28 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv, } /* Disable 10 bit clock */ - temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch)); - temp &= ~PORT_PLL_10BIT_CLK_ENABLE; - intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp); + intel_de_rmw(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), + PORT_PLL_10BIT_CLK_ENABLE, 0); /* Write P1 & P2 */ - temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch)); - temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK); - temp |= pll->state.hw_state.ebb0; - intel_de_write(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch), temp); + intel_de_rmw(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch), + PORT_PLL_P1_MASK | PORT_PLL_P2_MASK, pll->state.hw_state.ebb0); /* Write M2 integer */ - temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0)); - temp &= ~PORT_PLL_M2_INT_MASK; - temp |= pll->state.hw_state.pll0; - intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 0), temp); + intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 0), + PORT_PLL_M2_INT_MASK, pll->state.hw_state.pll0); /* Write N */ - temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1)); - temp &= ~PORT_PLL_N_MASK; - temp |= pll->state.hw_state.pll1; - intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 1), temp); + intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 1), + PORT_PLL_N_MASK, pll->state.hw_state.pll1); /* Write M2 fraction */ - temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2)); - temp &= ~PORT_PLL_M2_FRAC_MASK; - temp |= pll->state.hw_state.pll2; - intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 2), temp); + intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 2), + PORT_PLL_M2_FRAC_MASK, pll->state.hw_state.pll2); /* Write M2 fraction enable */ - temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3)); - temp &= ~PORT_PLL_M2_FRAC_ENABLE; - temp |= pll->state.hw_state.pll3; - intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 3), temp); + intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 3), + PORT_PLL_M2_FRAC_ENABLE, pll->state.hw_state.pll3); /* Write coeff */ temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6)); @@ -1961,15 +1935,11 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv, intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 6), temp); /* Write calibration val */ - temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8)); - temp &= ~PORT_PLL_TARGET_CNT_MASK; - temp |= pll->state.hw_state.pll8; - intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 8), temp); + intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 8), + PORT_PLL_TARGET_CNT_MASK, pll->state.hw_state.pll8); - temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9)); - temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK; - temp |= pll->state.hw_state.pll9; - intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 9), temp); + intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 9), + PORT_PLL_LOCK_THRESHOLD_MASK, pll->state.hw_state.pll9); temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10)); temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H; @@ -1986,9 +1956,7 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv, intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp); /* Enable PLL */ - temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)); - temp |= PORT_PLL_ENABLE; - intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp); + intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_ENABLE); intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port)); if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK), @@ -2016,17 +1984,13 @@ static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */ - u32 temp; - temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)); - temp &= ~PORT_PLL_ENABLE; - intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp); + intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), PORT_PLL_ENABLE, 0); intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port)); if (IS_GEMINILAKE(dev_priv)) { - temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)); - temp &= ~PORT_PLL_POWER_ENABLE; - intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp); + intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), + PORT_PLL_POWER_ENABLE, 0); if (wait_for_us(!(intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_POWER_STATE), 200)) @@ -3641,8 +3605,8 @@ static void icl_dpll_write(struct drm_i915_private *dev_priv, !i915_mmio_reg_valid(div0_reg)); if (dev_priv->display.vbt.override_afc_startup && i915_mmio_reg_valid(div0_reg)) - intel_de_rmw(dev_priv, div0_reg, TGL_DPLL0_DIV0_AFC_STARTUP_MASK, - hw_state->div0); + intel_de_rmw(dev_priv, div0_reg, + TGL_DPLL0_DIV0_AFC_STARTUP_MASK, hw_state->div0); intel_de_posting_read(dev_priv, cfgcr1_reg); } @@ -3651,7 +3615,6 @@ static void icl_mg_pll_write(struct drm_i915_private *dev_priv, { struct intel_dpll_hw_state *hw_state = &pll->state.hw_state; enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id); - u32 val; /* * Some of the following registers have reserved fields, so program @@ -3659,23 +3622,19 @@ static void icl_mg_pll_write(struct drm_i915_private *dev_priv, * during the calc/readout phase if the mask depends on some other HW * state like refclk, see icl_calc_mg_pll_state(). */ - val = intel_de_read(dev_priv, MG_REFCLKIN_CTL(tc_port)); - val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK; - val |= hw_state->mg_refclkin_ctl; - intel_de_write(dev_priv, MG_REFCLKIN_CTL(tc_port), val); + intel_de_rmw(dev_priv, MG_REFCLKIN_CTL(tc_port), + MG_REFCLKIN_CTL_OD_2_MUX_MASK, hw_state->mg_refclkin_ctl); - val = intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port)); - val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK; - val |= hw_state->mg_clktop2_coreclkctl1; - intel_de_write(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port), val); + intel_de_rmw(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port), + MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK, + hw_state->mg_clktop2_coreclkctl1); - val = intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port)); - val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK | - MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK | - MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK | - MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK); - val |= hw_state->mg_clktop2_hsclkctl; - intel_de_write(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port), val); + intel_de_rmw(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port), + MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK | + MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK | + MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK | + MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK, + hw_state->mg_clktop2_hsclkctl); intel_de_write(dev_priv, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0); intel_de_write(dev_priv, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1); @@ -3684,15 +3643,12 @@ static void icl_mg_pll_write(struct drm_i915_private *dev_priv, hw_state->mg_pll_frac_lock); intel_de_write(dev_priv, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc); - val = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port)); - val &= ~hw_state->mg_pll_bias_mask; - val |= hw_state->mg_pll_bias; - intel_de_write(dev_priv, MG_PLL_BIAS(tc_port), val); + intel_de_rmw(dev_priv, MG_PLL_BIAS(tc_port), + hw_state->mg_pll_bias_mask, hw_state->mg_pll_bias); - val = intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port)); - val &= ~hw_state->mg_pll_tdc_coldst_bias_mask; - val |= hw_state->mg_pll_tdc_coldst_bias; - intel_de_write(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port), val); + intel_de_rmw(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port), + hw_state->mg_pll_tdc_coldst_bias_mask, + hw_state->mg_pll_tdc_coldst_bias); intel_de_posting_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port)); } @@ -3766,11 +3722,7 @@ static void icl_pll_power_enable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, i915_reg_t enable_reg) { - u32 val; - - val = intel_de_read(dev_priv, enable_reg); - val |= PLL_POWER_ENABLE; - intel_de_write(dev_priv, enable_reg, val); + intel_de_rmw(dev_priv, enable_reg, 0, PLL_POWER_ENABLE); /* * The spec says we need to "wait" but it also says it should be @@ -3785,11 +3737,7 @@ static void icl_pll_enable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, i915_reg_t enable_reg) { - u32 val; - - val = intel_de_read(dev_priv, enable_reg); - val |= PLL_ENABLE; - intel_de_write(dev_priv, enable_reg, val); + intel_de_rmw(dev_priv, enable_reg, 0, PLL_ENABLE); /* Timeout is actually 600us. */ if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1)) @@ -3815,8 +3763,7 @@ static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct inte * since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled. */ val = intel_de_read(i915, TRANS_CMTG_CHICKEN); - val = intel_de_read(i915, TRANS_CMTG_CHICKEN); - intel_de_write(i915, TRANS_CMTG_CHICKEN, DISABLE_DPT_CLK_GATING); + val = intel_de_rmw(i915, TRANS_CMTG_CHICKEN, ~0, DISABLE_DPT_CLK_GATING); if (drm_WARN_ON(&i915->drm, val & ~DISABLE_DPT_CLK_GATING)) drm_dbg_kms(&i915->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val); } @@ -3900,8 +3847,6 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, i915_reg_t enable_reg) { - u32 val; - /* The first steps are done by intel_ddi_post_disable(). */ /* @@ -3910,9 +3855,7 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv, * nothing here. */ - val = intel_de_read(dev_priv, enable_reg); - val &= ~PLL_ENABLE; - intel_de_write(dev_priv, enable_reg, val); + intel_de_rmw(dev_priv, enable_reg, PLL_ENABLE, 0); /* Timeout is actually 1us. */ if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1)) @@ -3920,9 +3863,7 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv, /* DVFS post sequence would be here. See the comment above. */ - val = intel_de_read(dev_priv, enable_reg); - val &= ~PLL_POWER_ENABLE; - intel_de_write(dev_priv, enable_reg, val); + intel_de_rmw(dev_priv, enable_reg, PLL_POWER_ENABLE, 0); /* * The spec says we need to "wait" but it also says it should be diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c index 29c6421cd666..760e63cdc0c8 100644 --- a/drivers/gpu/drm/i915/display/intel_drrs.c +++ b/drivers/gpu/drm/i915/display/intel_drrs.c @@ -68,21 +68,15 @@ intel_drrs_set_refresh_rate_pipeconf(struct intel_crtc *crtc, { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc->drrs.cpu_transcoder; - u32 val, bit; + u32 bit; if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - bit = PIPECONF_REFRESH_RATE_ALT_VLV; + bit = TRANSCONF_REFRESH_RATE_ALT_VLV; else - bit = PIPECONF_REFRESH_RATE_ALT_ILK; + bit = TRANSCONF_REFRESH_RATE_ALT_ILK; - val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder)); - - if (refresh_rate == DRRS_REFRESH_RATE_LOW) - val |= bit; - else - val &= ~bit; - - intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val); + intel_de_rmw(dev_priv, TRANSCONF(cpu_transcoder), + bit, refresh_rate == DRRS_REFRESH_RATE_LOW ? bit : 0); } static void diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c index 96bc117fd6a0..19e422da57dc 100644 --- a/drivers/gpu/drm/i915/display/intel_dsb.c +++ b/drivers/gpu/drm/i915/display/intel_dsb.c @@ -88,7 +88,8 @@ static bool assert_dsb_has_room(struct intel_dsb *dsb) /* each instruction is 2 dwords */ return !drm_WARN(&i915->drm, dsb->free_pos > dsb->size - 2, - "DSB buffer overflow\n"); + "[CRTC:%d:%s] DSB %d buffer overflow\n", + crtc->base.base.id, crtc->base.name, dsb->id); } static bool is_dsb_busy(struct drm_i915_private *i915, enum pipe pipe, @@ -198,7 +199,7 @@ void intel_dsb_reg_write(struct intel_dsb *dsb, } } -static u32 intel_dsb_align_tail(struct intel_dsb *dsb) +static void intel_dsb_align_tail(struct intel_dsb *dsb) { u32 aligned_tail, tail; @@ -210,49 +211,58 @@ static u32 intel_dsb_align_tail(struct intel_dsb *dsb) aligned_tail - tail); dsb->free_pos = aligned_tail / 4; +} - return aligned_tail; +void intel_dsb_finish(struct intel_dsb *dsb) +{ + intel_dsb_align_tail(dsb); } /** * intel_dsb_commit() - Trigger workload execution of DSB. * @dsb: DSB context + * @wait_for_vblank: wait for vblank before executing * * This function is used to do actual write to hardware using DSB. */ -void intel_dsb_commit(struct intel_dsb *dsb) +void intel_dsb_commit(struct intel_dsb *dsb, bool wait_for_vblank) { struct intel_crtc *crtc = dsb->crtc; struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; u32 tail; - tail = intel_dsb_align_tail(dsb); - if (tail == 0) + tail = dsb->free_pos * 4; + if (drm_WARN_ON(&dev_priv->drm, !IS_ALIGNED(tail, CACHELINE_BYTES))) return; if (is_dsb_busy(dev_priv, pipe, dsb->id)) { - drm_err(&dev_priv->drm, "DSB engine is busy.\n"); - goto reset; + drm_err(&dev_priv->drm, "[CRTC:%d:%s] DSB %d is busy\n", + crtc->base.base.id, crtc->base.name, dsb->id); + return; } intel_de_write(dev_priv, DSB_CTRL(pipe, dsb->id), + (wait_for_vblank ? DSB_WAIT_FOR_VBLANK : 0) | DSB_ENABLE); intel_de_write(dev_priv, DSB_HEAD(pipe, dsb->id), i915_ggtt_offset(dsb->vma)); intel_de_write(dev_priv, DSB_TAIL(pipe, dsb->id), i915_ggtt_offset(dsb->vma) + tail); +} - drm_dbg_kms(&dev_priv->drm, - "DSB execution started - head 0x%x, tail 0x%x\n", - i915_ggtt_offset(dsb->vma), - i915_ggtt_offset(dsb->vma) + tail); +void intel_dsb_wait(struct intel_dsb *dsb) +{ + struct intel_crtc *crtc = dsb->crtc; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; if (wait_for(!is_dsb_busy(dev_priv, pipe, dsb->id), 1)) drm_err(&dev_priv->drm, - "Timed out waiting for DSB workload completion.\n"); + "[CRTC:%d:%s] DSB %d timed out waiting for idle\n", + crtc->base.base.id, crtc->base.name, dsb->id); -reset: + /* Attempt to reset it */ dsb->free_pos = 0; dsb->ins_start_offset = 0; intel_de_write(dev_priv, DSB_CTRL(pipe, dsb->id), 0); @@ -325,7 +335,8 @@ out_put_rpm: kfree(dsb); out: drm_info_once(&i915->drm, - "DSB queue setup failed, will fallback to MMIO for display HW programming\n"); + "[CRTC:%d:%s] DSB %d queue setup failed, will fallback to MMIO for display HW programming\n", + crtc->base.base.id, crtc->base.name, DSB1); return NULL; } diff --git a/drivers/gpu/drm/i915/display/intel_dsb.h b/drivers/gpu/drm/i915/display/intel_dsb.h index 05c221b6d0a4..b8148b47022d 100644 --- a/drivers/gpu/drm/i915/display/intel_dsb.h +++ b/drivers/gpu/drm/i915/display/intel_dsb.h @@ -15,9 +15,12 @@ struct intel_dsb; struct intel_dsb *intel_dsb_prepare(struct intel_crtc *crtc, unsigned int max_cmds); +void intel_dsb_finish(struct intel_dsb *dsb); void intel_dsb_cleanup(struct intel_dsb *dsb); void intel_dsb_reg_write(struct intel_dsb *dsb, i915_reg_t reg, u32 val); -void intel_dsb_commit(struct intel_dsb *dsb); +void intel_dsb_commit(struct intel_dsb *dsb, + bool wait_for_vblank); +void intel_dsb_wait(struct intel_dsb *dsb); #endif diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c index 20e466d843ce..049443245310 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c @@ -162,6 +162,7 @@ static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state, static int dcs_setup_backlight(struct intel_connector *connector, enum pipe unused) { + struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; if (panel->vbt.backlight.brightness_precision_bits > 8) @@ -171,6 +172,10 @@ static int dcs_setup_backlight(struct intel_connector *connector, panel->backlight.level = panel->backlight.max; + drm_dbg_kms(&i915->drm, + "[CONNECTOR:%d:%s] Using DCS for backlight control\n", + connector->base.base.id, connector->base.name); + return 0; } diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c index 0be8105cb18a..eb2dcd866cc8 100644 --- a/drivers/gpu/drm/i915/display/intel_dvo.c +++ b/drivers/gpu/drm/i915/display/intel_dvo.c @@ -444,11 +444,8 @@ static bool intel_dvo_init_dev(struct drm_i915_private *dev_priv, * the clock enabled before we attempt to initialize * the device. */ - for_each_pipe(dev_priv, pipe) { - dpll[pipe] = intel_de_read(dev_priv, DPLL(pipe)); - intel_de_write(dev_priv, DPLL(pipe), - dpll[pipe] | DPLL_DVO_2X_MODE); - } + for_each_pipe(dev_priv, pipe) + dpll[pipe] = intel_de_rmw(dev_priv, DPLL(pipe), 0, DPLL_DVO_2X_MODE); ret = dvo->dev_ops->init(&intel_dvo->dev, i2c); diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c index 93d0e46e5481..799bdc81a6a9 100644 --- a/drivers/gpu/drm/i915/display/intel_fb.c +++ b/drivers/gpu/drm/i915/display/intel_fb.c @@ -2007,6 +2007,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb, vm = intel_dpt_create(intel_fb); if (IS_ERR(vm)) { + drm_dbg_kms(&dev_priv->drm, "failed to create DPT\n"); ret = PTR_ERR(vm); goto err; } @@ -2017,11 +2018,14 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb, ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs); if (ret) { drm_err(&dev_priv->drm, "framebuffer init failed %d\n", ret); - goto err; + goto err_free_dpt; } return 0; +err_free_dpt: + if (intel_fb_uses_dpt(fb)) + intel_dpt_destroy(intel_fb->dpt_vm); err: intel_frontbuffer_put(intel_fb->frontbuffer); return ret; @@ -2046,6 +2050,7 @@ intel_user_framebuffer_create(struct drm_device *dev, if (HAS_LMEM(i915) && !i915_gem_object_can_migrate(obj, INTEL_REGION_LMEM_0)) { /* object is "remote", not in local memory */ i915_gem_object_put(obj); + drm_dbg_kms(&i915->drm, "framebuffer must reside in local memory\n"); return ERR_PTR(-EREMOTE); } diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c index f76b06293eb9..3659350061a7 100644 --- a/drivers/gpu/drm/i915/display/intel_fbdev.c +++ b/drivers/gpu/drm/i915/display/intel_fbdev.c @@ -561,9 +561,9 @@ static void intel_fbdev_initial_config(void *data, async_cookie_t cookie) intel_fbdev_unregister(to_i915(ifbdev->helper.dev)); } -void intel_fbdev_initial_config_async(struct drm_device *dev) +void intel_fbdev_initial_config_async(struct drm_i915_private *dev_priv) { - struct intel_fbdev *ifbdev = to_i915(dev)->display.fbdev.fbdev; + struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev; if (!ifbdev) return; @@ -706,9 +706,9 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev) drm_fb_helper_hotplug_event(&ifbdev->helper); } -void intel_fbdev_restore_mode(struct drm_device *dev) +void intel_fbdev_restore_mode(struct drm_i915_private *dev_priv) { - struct intel_fbdev *ifbdev = to_i915(dev)->display.fbdev.fbdev; + struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev; if (!ifbdev) return; diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.h b/drivers/gpu/drm/i915/display/intel_fbdev.h index 0e95e9472fa3..04fd523a5023 100644 --- a/drivers/gpu/drm/i915/display/intel_fbdev.h +++ b/drivers/gpu/drm/i915/display/intel_fbdev.h @@ -15,12 +15,12 @@ struct intel_framebuffer; #ifdef CONFIG_DRM_FBDEV_EMULATION int intel_fbdev_init(struct drm_device *dev); -void intel_fbdev_initial_config_async(struct drm_device *dev); +void intel_fbdev_initial_config_async(struct drm_i915_private *dev_priv); void intel_fbdev_unregister(struct drm_i915_private *dev_priv); void intel_fbdev_fini(struct drm_i915_private *dev_priv); void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous); void intel_fbdev_output_poll_changed(struct drm_device *dev); -void intel_fbdev_restore_mode(struct drm_device *dev); +void intel_fbdev_restore_mode(struct drm_i915_private *dev_priv); struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev); #else static inline int intel_fbdev_init(struct drm_device *dev) @@ -28,7 +28,7 @@ static inline int intel_fbdev_init(struct drm_device *dev) return 0; } -static inline void intel_fbdev_initial_config_async(struct drm_device *dev) +static inline void intel_fbdev_initial_config_async(struct drm_i915_private *dev_priv) { } @@ -48,7 +48,7 @@ static inline void intel_fbdev_output_poll_changed(struct drm_device *dev) { } -static inline void intel_fbdev_restore_mode(struct drm_device *dev) +static inline void intel_fbdev_restore_mode(struct drm_i915_private *i915) { } static inline struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev) diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c index 063f1da4f229..f55b4893c00f 100644 --- a/drivers/gpu/drm/i915/display/intel_fdi.c +++ b/drivers/gpu/drm/i915/display/intel_fdi.c @@ -366,8 +366,7 @@ void intel_fdi_normal_train(struct intel_crtc *crtc) /* IVB wants error correction enabled */ if (IS_IVYBRIDGE(dev_priv)) - intel_de_write(dev_priv, reg, - intel_de_read(dev_priv, reg) | FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE); + intel_de_rmw(dev_priv, reg, 0, FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE); } /* The FDI link training functions for ILK/Ibexpeak. */ @@ -439,19 +438,11 @@ static void ilk_fdi_link_train(struct intel_crtc *crtc, drm_err(&dev_priv->drm, "FDI train 1 fail!\n"); /* Train 2 */ - reg = FDI_TX_CTL(pipe); - temp = intel_de_read(dev_priv, reg); - temp &= ~FDI_LINK_TRAIN_NONE; - temp |= FDI_LINK_TRAIN_PATTERN_2; - intel_de_write(dev_priv, reg, temp); - - reg = FDI_RX_CTL(pipe); - temp = intel_de_read(dev_priv, reg); - temp &= ~FDI_LINK_TRAIN_NONE; - temp |= FDI_LINK_TRAIN_PATTERN_2; - intel_de_write(dev_priv, reg, temp); - - intel_de_posting_read(dev_priv, reg); + intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), + FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2); + intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), + FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2); + intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe)); udelay(150); reg = FDI_RX_IIR(pipe); @@ -538,13 +529,9 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc, udelay(150); for (i = 0; i < 4; i++) { - reg = FDI_TX_CTL(pipe); - temp = intel_de_read(dev_priv, reg); - temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; - temp |= snb_b_fdi_train_param[i]; - intel_de_write(dev_priv, reg, temp); - - intel_de_posting_read(dev_priv, reg); + intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), + FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]); + intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe)); udelay(500); for (retry = 0; retry < 5; retry++) { @@ -593,13 +580,9 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc, udelay(150); for (i = 0; i < 4; i++) { - reg = FDI_TX_CTL(pipe); - temp = intel_de_read(dev_priv, reg); - temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; - temp |= snb_b_fdi_train_param[i]; - intel_de_write(dev_priv, reg, temp); - - intel_de_posting_read(dev_priv, reg); + intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), + FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]); + intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe)); udelay(500); for (retry = 0; retry < 5; retry++) { @@ -719,19 +702,13 @@ static void ivb_manual_fdi_link_train(struct intel_crtc *crtc, } /* Train 2 */ - reg = FDI_TX_CTL(pipe); - temp = intel_de_read(dev_priv, reg); - temp &= ~FDI_LINK_TRAIN_NONE_IVB; - temp |= FDI_LINK_TRAIN_PATTERN_2_IVB; - intel_de_write(dev_priv, reg, temp); - - reg = FDI_RX_CTL(pipe); - temp = intel_de_read(dev_priv, reg); - temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; - temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; - intel_de_write(dev_priv, reg, temp); - - intel_de_posting_read(dev_priv, reg); + intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), + FDI_LINK_TRAIN_NONE_IVB, + FDI_LINK_TRAIN_PATTERN_2_IVB); + intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), + FDI_LINK_TRAIN_PATTERN_MASK_CPT, + FDI_LINK_TRAIN_PATTERN_2_CPT); + intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe)); udelay(2); /* should be 1.5us */ for (i = 0; i < 4; i++) { @@ -837,9 +814,8 @@ void hsw_fdi_link_train(struct intel_encoder *encoder, udelay(30); /* Unset FDI_RX_MISC pwrdn lanes */ - temp = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A)); - temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); - intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), temp); + intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A), + FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK, 0); intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A)); /* Wait for FDI auto training time */ @@ -865,25 +841,21 @@ void hsw_fdi_link_train(struct intel_encoder *encoder, intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val); intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A)); - temp = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_E)); - temp &= ~DDI_BUF_CTL_ENABLE; - intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E), temp); + intel_de_rmw(dev_priv, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0); intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E)); /* Disable DP_TP_CTL and FDI_RX_CTL and retry */ - temp = intel_de_read(dev_priv, DP_TP_CTL(PORT_E)); - temp &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK); - temp |= DP_TP_CTL_LINK_TRAIN_PAT1; - intel_de_write(dev_priv, DP_TP_CTL(PORT_E), temp); + intel_de_rmw(dev_priv, DP_TP_CTL(PORT_E), + DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK, + DP_TP_CTL_LINK_TRAIN_PAT1); intel_de_posting_read(dev_priv, DP_TP_CTL(PORT_E)); intel_wait_ddi_buf_idle(dev_priv, PORT_E); /* Reset FDI_RX_MISC pwrdn lanes */ - temp = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A)); - temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); - temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2); - intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), temp); + intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A), + FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK, + FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2)); intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A)); } @@ -898,7 +870,6 @@ void hsw_fdi_link_train(struct intel_encoder *encoder, void hsw_fdi_disable(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - u32 val; /* * Bspec lists this as both step 13 (before DDI_BUF_CTL disable) @@ -906,30 +877,15 @@ void hsw_fdi_disable(struct intel_encoder *encoder) * step 13 is the correct place for it. Step 18 is where it was * originally before the BUN. */ - val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); - val &= ~FDI_RX_ENABLE; - intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val); - - val = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_E)); - val &= ~DDI_BUF_CTL_ENABLE; - intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E), val); - + intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_RX_ENABLE, 0); + intel_de_rmw(dev_priv, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0); intel_wait_ddi_buf_idle(dev_priv, PORT_E); - intel_ddi_disable_clock(encoder); - - val = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A)); - val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); - val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2); - intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), val); - - val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); - val &= ~FDI_PCDCLK; - intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val); - - val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); - val &= ~FDI_RX_PLL_ENABLE; - intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val); + intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A), + FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK, + FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2)); + intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_PCDCLK, 0); + intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_RX_PLL_ENABLE, 0); } void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state) @@ -945,16 +901,14 @@ void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state) temp = intel_de_read(dev_priv, reg); temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16)); temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); - temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; + temp |= (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) << 11; intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE); intel_de_posting_read(dev_priv, reg); udelay(200); /* Switch from Rawclk to PCDclk */ - temp = intel_de_read(dev_priv, reg); - intel_de_write(dev_priv, reg, temp | FDI_PCDCLK); - + intel_de_rmw(dev_priv, reg, 0, FDI_PCDCLK); intel_de_posting_read(dev_priv, reg); udelay(200); @@ -974,28 +928,18 @@ void ilk_fdi_pll_disable(struct intel_crtc *crtc) struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe = crtc->pipe; - i915_reg_t reg; - u32 temp; /* Switch from PCDclk to Rawclk */ - reg = FDI_RX_CTL(pipe); - temp = intel_de_read(dev_priv, reg); - intel_de_write(dev_priv, reg, temp & ~FDI_PCDCLK); + intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), FDI_PCDCLK, 0); /* Disable CPU FDI TX PLL */ - reg = FDI_TX_CTL(pipe); - temp = intel_de_read(dev_priv, reg); - intel_de_write(dev_priv, reg, temp & ~FDI_TX_PLL_ENABLE); - - intel_de_posting_read(dev_priv, reg); + intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_TX_PLL_ENABLE, 0); + intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe)); udelay(100); - reg = FDI_RX_CTL(pipe); - temp = intel_de_read(dev_priv, reg); - intel_de_write(dev_priv, reg, temp & ~FDI_RX_PLL_ENABLE); - /* Wait for the clocks to turn off. */ - intel_de_posting_read(dev_priv, reg); + intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), FDI_RX_PLL_ENABLE, 0); + intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe)); udelay(100); } @@ -1007,15 +951,13 @@ void ilk_fdi_disable(struct intel_crtc *crtc) u32 temp; /* disable CPU FDI tx and PCH FDI rx */ - reg = FDI_TX_CTL(pipe); - temp = intel_de_read(dev_priv, reg); - intel_de_write(dev_priv, reg, temp & ~FDI_TX_ENABLE); - intel_de_posting_read(dev_priv, reg); + intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_TX_ENABLE, 0); + intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe)); reg = FDI_RX_CTL(pipe); temp = intel_de_read(dev_priv, reg); temp &= ~(0x7 << 16); - temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; + temp |= (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) << 11; intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE); intel_de_posting_read(dev_priv, reg); @@ -1027,11 +969,8 @@ void ilk_fdi_disable(struct intel_crtc *crtc) FDI_RX_PHASE_SYNC_POINTER_OVR); /* still set train pattern 1 */ - reg = FDI_TX_CTL(pipe); - temp = intel_de_read(dev_priv, reg); - temp &= ~FDI_LINK_TRAIN_NONE; - temp |= FDI_LINK_TRAIN_PATTERN_1; - intel_de_write(dev_priv, reg, temp); + intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), + FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_1); reg = FDI_RX_CTL(pipe); temp = intel_de_read(dev_priv, reg); @@ -1042,9 +981,9 @@ void ilk_fdi_disable(struct intel_crtc *crtc) temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_1; } - /* BPC in FDI rx is consistent with that in PIPECONF */ + /* BPC in FDI rx is consistent with that in TRANSCONF */ temp &= ~(0x07 << 16); - temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; + temp |= (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) << 11; intel_de_write(dev_priv, reg, temp); intel_de_posting_read(dev_priv, reg); diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c index d636d21fa9ce..b708a62e509a 100644 --- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c +++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c @@ -31,6 +31,7 @@ #include "intel_display_types.h" #include "intel_fbc.h" #include "intel_fifo_underrun.h" +#include "intel_pch_display.h" /** * DOC: fifo underrun handling @@ -509,3 +510,22 @@ void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv) spin_unlock_irq(&dev_priv->irq_lock); } + +void intel_init_fifo_underrun_reporting(struct drm_i915_private *i915, + struct intel_crtc *crtc, + bool enable) +{ + crtc->cpu_fifo_underrun_disabled = !enable; + + /* + * We track the PCH trancoder underrun reporting state + * within the crtc. With crtc for pipe A housing the underrun + * reporting state for PCH transcoder A, crtc for pipe B housing + * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A, + * and marking underrun reporting as disabled for the non-existing + * PCH transcoders B and C would prevent enabling the south + * error interrupt (see cpt_can_enable_serr_int()). + */ + if (intel_has_pch_trancoder(i915, crtc->pipe)) + crtc->pch_fifo_underrun_disabled = !enable; +} diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.h b/drivers/gpu/drm/i915/display/intel_fifo_underrun.h index 2e47d7d3c101..b00d8abebcf9 100644 --- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.h +++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.h @@ -9,8 +9,11 @@ #include <linux/types.h> struct drm_i915_private; +struct intel_crtc; enum pipe; +void intel_init_fifo_underrun_reporting(struct drm_i915_private *i915, + struct intel_crtc *crtc, bool enable); bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable); bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv, diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c index 0bc4f6b48e80..3ddfc8080ee8 100644 --- a/drivers/gpu/drm/i915/display/intel_gmbus.c +++ b/drivers/gpu/drm/i915/display/intel_gmbus.c @@ -215,41 +215,23 @@ intel_gmbus_reset(struct drm_i915_private *i915) static void pnv_gmbus_clock_gating(struct drm_i915_private *i915, bool enable) { - u32 val; - /* When using bit bashing for I2C, this bit needs to be set to 1 */ - val = intel_de_read(i915, DSPCLK_GATE_D(i915)); - if (!enable) - val |= PNV_GMBUSUNIT_CLOCK_GATE_DISABLE; - else - val &= ~PNV_GMBUSUNIT_CLOCK_GATE_DISABLE; - intel_de_write(i915, DSPCLK_GATE_D(i915), val); + intel_de_rmw(i915, DSPCLK_GATE_D(i915), PNV_GMBUSUNIT_CLOCK_GATE_DISABLE, + !enable ? PNV_GMBUSUNIT_CLOCK_GATE_DISABLE : 0); } static void pch_gmbus_clock_gating(struct drm_i915_private *i915, bool enable) { - u32 val; - - val = intel_de_read(i915, SOUTH_DSPCLK_GATE_D); - if (!enable) - val |= PCH_GMBUSUNIT_CLOCK_GATE_DISABLE; - else - val &= ~PCH_GMBUSUNIT_CLOCK_GATE_DISABLE; - intel_de_write(i915, SOUTH_DSPCLK_GATE_D, val); + intel_de_rmw(i915, SOUTH_DSPCLK_GATE_D, PCH_GMBUSUNIT_CLOCK_GATE_DISABLE, + !enable ? PCH_GMBUSUNIT_CLOCK_GATE_DISABLE : 0); } static void bxt_gmbus_clock_gating(struct drm_i915_private *i915, bool enable) { - u32 val; - - val = intel_de_read(i915, GEN9_CLKGATE_DIS_4); - if (!enable) - val |= BXT_GMBUS_GATING_DIS; - else - val &= ~BXT_GMBUS_GATING_DIS; - intel_de_write(i915, GEN9_CLKGATE_DIS_4, val); + intel_de_rmw(i915, GEN9_CLKGATE_DIS_4, BXT_GMBUS_GATING_DIS, + !enable ? BXT_GMBUS_GATING_DIS : 0); } static u32 get_reserved(struct intel_gmbus *bus) diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index 6406fd487ee5..2984d2810e42 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -943,8 +943,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector) repeater_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port); - intel_de_write(dev_priv, HDCP_REP_CTL, - intel_de_read(dev_priv, HDCP_REP_CTL) & ~repeater_ctl); + intel_de_rmw(dev_priv, HDCP_REP_CTL, repeater_ctl, 0); ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, false); if (ret) { @@ -1819,12 +1818,10 @@ static int hdcp2_enable_encryption(struct intel_connector *connector) } if (intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) & - LINK_AUTH_STATUS) { + LINK_AUTH_STATUS) /* Link is Authenticated. Now set for Encryption */ - intel_de_write(dev_priv, - HDCP2_CTL(dev_priv, cpu_transcoder, port), - intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) | CTL_LINK_ENCRYPTION_REQ); - } + intel_de_rmw(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port), + 0, CTL_LINK_ENCRYPTION_REQ); ret = intel_de_wait_for_set(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, @@ -1848,8 +1845,8 @@ static int hdcp2_disable_encryption(struct intel_connector *connector) drm_WARN_ON(&dev_priv->drm, !(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) & LINK_ENCRYPTION_STATUS)); - intel_de_write(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port), - intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) & ~CTL_LINK_ENCRYPTION_REQ); + intel_de_rmw(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port), + CTL_LINK_ENCRYPTION_REQ, 0); ret = intel_de_wait_for_clear(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index c0ce6d3dc505..c7e9e1fbed37 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -238,15 +238,11 @@ static void g4x_read_infoframe(struct intel_encoder *encoder, void *frame, ssize_t len) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - u32 val, *data = frame; + u32 *data = frame; int i; - val = intel_de_read(dev_priv, VIDEO_DIP_CTL); - - val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ - val |= g4x_infoframe_index(type); - - intel_de_write(dev_priv, VIDEO_DIP_CTL, val); + intel_de_rmw(dev_priv, VIDEO_DIP_CTL, + VIDEO_DIP_SELECT_MASK | 0xf, g4x_infoframe_index(type)); for (i = 0; i < len; i += 4) *data++ = intel_de_read(dev_priv, VIDEO_DIP_DATA); @@ -314,15 +310,11 @@ static void ibx_read_infoframe(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - u32 val, *data = frame; + u32 *data = frame; int i; - val = intel_de_read(dev_priv, TVIDEO_DIP_CTL(crtc->pipe)); - - val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ - val |= g4x_infoframe_index(type); - - intel_de_write(dev_priv, TVIDEO_DIP_CTL(crtc->pipe), val); + intel_de_rmw(dev_priv, TVIDEO_DIP_CTL(crtc->pipe), + VIDEO_DIP_SELECT_MASK | 0xf, g4x_infoframe_index(type)); for (i = 0; i < len; i += 4) *data++ = intel_de_read(dev_priv, TVIDEO_DIP_DATA(crtc->pipe)); @@ -396,15 +388,11 @@ static void cpt_read_infoframe(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - u32 val, *data = frame; + u32 *data = frame; int i; - val = intel_de_read(dev_priv, TVIDEO_DIP_CTL(crtc->pipe)); - - val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ - val |= g4x_infoframe_index(type); - - intel_de_write(dev_priv, TVIDEO_DIP_CTL(crtc->pipe), val); + intel_de_rmw(dev_priv, TVIDEO_DIP_CTL(crtc->pipe), + VIDEO_DIP_SELECT_MASK | 0xf, g4x_infoframe_index(type)); for (i = 0; i < len; i += 4) *data++ = intel_de_read(dev_priv, TVIDEO_DIP_DATA(crtc->pipe)); @@ -472,15 +460,11 @@ static void vlv_read_infoframe(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - u32 val, *data = frame; + u32 *data = frame; int i; - val = intel_de_read(dev_priv, VLV_TVIDEO_DIP_CTL(crtc->pipe)); - - val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ - val |= g4x_infoframe_index(type); - - intel_de_write(dev_priv, VLV_TVIDEO_DIP_CTL(crtc->pipe), val); + intel_de_rmw(dev_priv, VLV_TVIDEO_DIP_CTL(crtc->pipe), + VIDEO_DIP_SELECT_MASK | 0xf, g4x_infoframe_index(type)); for (i = 0; i < len; i += 4) *data++ = intel_de_read(dev_priv, @@ -1795,7 +1779,7 @@ static int intel_hdmi_source_max_tmds_clock(struct intel_encoder *encoder) else max_tmds_clock = 165000; - vbt_max_tmds_clock = intel_bios_max_tmds_clock(encoder); + vbt_max_tmds_clock = intel_bios_hdmi_max_tmds_clock(encoder->devdata); if (vbt_max_tmds_clock) max_tmds_clock = min(max_tmds_clock, vbt_max_tmds_clock); @@ -2152,7 +2136,7 @@ bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_state, * Our YCbCr output is always limited range. * crtc_state->limited_color_range only applies to RGB, * and it must never be set for YCbCr or we risk setting - * some conflicting bits in PIPECONF which will mess up + * some conflicting bits in TRANSCONF which will mess up * the colors on the monitor. */ if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) @@ -2240,6 +2224,25 @@ static bool intel_hdmi_is_cloned(const struct intel_crtc_state *crtc_state) !is_power_of_2(crtc_state->uapi.encoder_mask); } +static bool source_supports_scrambling(struct intel_encoder *encoder) +{ + /* + * Gen 10+ support HDMI 2.0 : the max tmds clock is 594MHz, and + * scrambling is supported. + * But there seem to be cases where certain platforms that support + * HDMI 2.0, have an HDMI1.4 retimer chip, and the max tmds clock is + * capped by VBT to less than 340MHz. + * + * In such cases when an HDMI2.0 sink is connected, it creates a + * problem : the platform and the sink both support scrambling but the + * HDMI 1.4 retimer chip doesn't. + * + * So go for scrambling, based on the max tmds clock taking into account, + * restrictions coming from VBT. + */ + return intel_hdmi_source_max_tmds_clock(encoder) > 340000; +} + int intel_hdmi_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) @@ -2302,7 +2305,7 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder, pipe_config->lane_count = 4; - if (scdc->scrambling.supported && DISPLAY_VER(dev_priv) >= 10) { + if (scdc->scrambling.supported && source_supports_scrambling(encoder)) { if (scdc->scrambling.low_rates) pipe_config->hdmi_scrambling = true; @@ -2852,11 +2855,12 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder) enum port port = encoder->port; u8 ddc_pin; - ddc_pin = intel_bios_alternate_ddc_pin(encoder); + ddc_pin = intel_bios_hdmi_ddc_pin(encoder->devdata); if (ddc_pin) { drm_dbg_kms(&dev_priv->drm, - "Using DDC pin 0x%x for port %c (VBT)\n", - ddc_pin, port_name(port)); + "[ENCODER:%d:%s] Using DDC pin 0x%x (VBT)\n", + encoder->base.base.id, encoder->base.name, + ddc_pin); return ddc_pin; } @@ -2882,8 +2886,9 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder) ddc_pin = g4x_port_to_ddc_pin(dev_priv, port); drm_dbg_kms(&dev_priv->drm, - "Using DDC pin 0x%x for port %c (platform default)\n", - ddc_pin, port_name(port)); + "[ENCODER:%d:%s] Using DDC pin 0x%x (platform default)\n", + encoder->base.base.id, encoder->base.name, + ddc_pin); return ddc_pin; } @@ -2904,7 +2909,7 @@ void intel_infoframe_init(struct intel_digital_port *dig_port) dig_port->set_infoframes = g4x_set_infoframes; dig_port->infoframes_enabled = g4x_infoframes_enabled; } else if (HAS_DDI(dev_priv)) { - if (intel_bios_is_lspcon_present(dev_priv, dig_port->base.port)) { + if (intel_bios_encoder_is_lspcon(dig_port->base.devdata)) { dig_port->write_infoframe = lspcon_write_infoframe; dig_port->read_infoframe = lspcon_read_infoframe; dig_port->set_infoframes = lspcon_set_infoframes; diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c index 8aaaef4d7856..5863763de530 100644 --- a/drivers/gpu/drm/i915/display/intel_lpe_audio.c +++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.c @@ -315,7 +315,7 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv) * intel_lpe_audio_notify() - notify lpe audio event * audio driver and i915 * @dev_priv: the i915 drm device private data - * @pipe: pipe + * @cpu_transcoder: CPU transcoder * @port: port * @eld : ELD data * @ls_clock: Link symbol clock in kHz @@ -324,7 +324,7 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv) * Notify lpe audio driver of eld change. */ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv, - enum pipe pipe, enum port port, + enum transcoder cpu_transcoder, enum port port, const void *eld, int ls_clock, bool dp_output) { unsigned long irqflags; @@ -344,7 +344,7 @@ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv, if (eld != NULL) { memcpy(ppdata->eld, eld, HDMI_MAX_ELD_BYTES); - ppdata->pipe = pipe; + ppdata->pipe = cpu_transcoder; ppdata->ls_clock = ls_clock; ppdata->dp_output = dp_output; diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.h b/drivers/gpu/drm/i915/display/intel_lpe_audio.h index f848c5038714..0beecac267ae 100644 --- a/drivers/gpu/drm/i915/display/intel_lpe_audio.h +++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.h @@ -8,15 +8,15 @@ #include <linux/types.h> -enum pipe; enum port; +enum transcoder; struct drm_i915_private; int intel_lpe_audio_init(struct drm_i915_private *dev_priv); void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv); void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv); void intel_lpe_audio_notify(struct drm_i915_private *dev_priv, - enum pipe pipe, enum port port, + enum transcoder cpu_transcoder, enum port port, const void *eld, int ls_clock, bool dp_output); #endif /* __INTEL_LPE_AUDIO_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c index 9ff1c0b223ad..bb3b5355a0d9 100644 --- a/drivers/gpu/drm/i915/display/intel_lspcon.c +++ b/drivers/gpu/drm/i915/display/intel_lspcon.c @@ -689,7 +689,7 @@ void lspcon_resume(struct intel_digital_port *dig_port) struct drm_i915_private *i915 = to_i915(dev); enum drm_lspcon_mode expected_mode; - if (!intel_bios_is_lspcon_present(i915, dig_port->base.port)) + if (!intel_bios_encoder_is_lspcon(dig_port->base.devdata)) return; if (!lspcon->active) { diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c index a1557d84ce0a..a504b3a7fbd5 100644 --- a/drivers/gpu/drm/i915/display/intel_lvds.c +++ b/drivers/gpu/drm/i915/display/intel_lvds.c @@ -49,6 +49,7 @@ #include "intel_fdi.h" #include "intel_gmbus.h" #include "intel_lvds.h" +#include "intel_lvds_regs.h" #include "intel_panel.h" /* Private structure for the integrated LVDS support */ @@ -84,18 +85,18 @@ static struct intel_lvds_encoder *to_lvds_encoder(struct intel_encoder *encoder) return container_of(encoder, struct intel_lvds_encoder, base); } -bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv, +bool intel_lvds_port_enabled(struct drm_i915_private *i915, i915_reg_t lvds_reg, enum pipe *pipe) { u32 val; - val = intel_de_read(dev_priv, lvds_reg); + val = intel_de_read(i915, lvds_reg); /* asserts want to know the pipe even if the port is disabled */ - if (HAS_PCH_CPT(dev_priv)) - *pipe = (val & LVDS_PIPE_SEL_MASK_CPT) >> LVDS_PIPE_SEL_SHIFT_CPT; + if (HAS_PCH_CPT(i915)) + *pipe = REG_FIELD_GET(LVDS_PIPE_SEL_MASK_CPT, val); else - *pipe = (val & LVDS_PIPE_SEL_MASK) >> LVDS_PIPE_SEL_SHIFT; + *pipe = REG_FIELD_GET(LVDS_PIPE_SEL_MASK, val); return val & LVDS_PORT_EN; } @@ -103,31 +104,30 @@ bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv, static bool intel_lvds_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder); intel_wakeref_t wakeref; bool ret; - wakeref = intel_display_power_get_if_enabled(dev_priv, - encoder->power_domain); + wakeref = intel_display_power_get_if_enabled(i915, encoder->power_domain); if (!wakeref) return false; - ret = intel_lvds_port_enabled(dev_priv, lvds_encoder->reg, pipe); + ret = intel_lvds_port_enabled(i915, lvds_encoder->reg, pipe); - intel_display_power_put(dev_priv, encoder->power_domain, wakeref); + intel_display_power_put(i915, encoder->power_domain, wakeref); return ret; } static void intel_lvds_get_config(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config) + struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder); u32 tmp, flags = 0; - pipe_config->output_types |= BIT(INTEL_OUTPUT_LVDS); + crtc_state->output_types |= BIT(INTEL_OUTPUT_LVDS); tmp = intel_de_read(dev_priv, lvds_encoder->reg); if (tmp & LVDS_HSYNC_POLARITY) @@ -139,20 +139,20 @@ static void intel_lvds_get_config(struct intel_encoder *encoder, else flags |= DRM_MODE_FLAG_PVSYNC; - pipe_config->hw.adjusted_mode.flags |= flags; + crtc_state->hw.adjusted_mode.flags |= flags; if (DISPLAY_VER(dev_priv) < 5) - pipe_config->gmch_pfit.lvds_border_bits = + crtc_state->gmch_pfit.lvds_border_bits = tmp & LVDS_BORDER_ENABLE; /* gen2/3 store dither state in pfit control, needs to match */ if (DISPLAY_VER(dev_priv) < 4) { tmp = intel_de_read(dev_priv, PFIT_CONTROL); - pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE; + crtc_state->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE; } - pipe_config->hw.adjusted_mode.crtc_clock = pipe_config->port_clock; + crtc_state->hw.adjusted_mode.crtc_clock = crtc_state->port_clock; } static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv, @@ -216,41 +216,44 @@ static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv, intel_de_write(dev_priv, PP_CONTROL(0), val); intel_de_write(dev_priv, PP_ON_DELAYS(0), - REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, pps->port) | REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, pps->t1_t2) | REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, pps->t5)); + REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, pps->port) | + REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, pps->t1_t2) | + REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, pps->t5)); intel_de_write(dev_priv, PP_OFF_DELAYS(0), - REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, pps->t3) | REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, pps->tx)); + REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, pps->t3) | + REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, pps->tx)); intel_de_write(dev_priv, PP_DIVISOR(0), - REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, pps->divider) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(pps->t4, 1000) + 1)); + REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, pps->divider) | + REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(pps->t4, 1000) + 1)); } static void intel_pre_enable_lvds(struct intel_atomic_state *state, struct intel_encoder *encoder, - const struct intel_crtc_state *pipe_config, + const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder); - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); - const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; enum pipe pipe = crtc->pipe; u32 temp; - if (HAS_PCH_SPLIT(dev_priv)) { - assert_fdi_rx_pll_disabled(dev_priv, pipe); - assert_shared_dpll_disabled(dev_priv, - pipe_config->shared_dpll); + if (HAS_PCH_SPLIT(i915)) { + assert_fdi_rx_pll_disabled(i915, pipe); + assert_shared_dpll_disabled(i915, crtc_state->shared_dpll); } else { - assert_pll_disabled(dev_priv, pipe); + assert_pll_disabled(i915, pipe); } - intel_lvds_pps_init_hw(dev_priv, &lvds_encoder->init_pps); + intel_lvds_pps_init_hw(i915, &lvds_encoder->init_pps); temp = lvds_encoder->init_lvds_val; temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; - if (HAS_PCH_CPT(dev_priv)) { + if (HAS_PCH_CPT(i915)) { temp &= ~LVDS_PIPE_SEL_MASK_CPT; temp |= LVDS_PIPE_SEL_CPT(pipe); } else { @@ -260,7 +263,7 @@ static void intel_pre_enable_lvds(struct intel_atomic_state *state, /* set the corresponsding LVDS_BORDER bit */ temp &= ~LVDS_BORDER_ENABLE; - temp |= pipe_config->gmch_pfit.lvds_border_bits; + temp |= crtc_state->gmch_pfit.lvds_border_bits; /* * Set the B0-B3 data pairs corresponding to whether we're going to @@ -283,14 +286,14 @@ static void intel_pre_enable_lvds(struct intel_atomic_state *state, /* * Set the dithering flag on LVDS as needed, note that there is no * special lvds dither control bit on pch-split platforms, dithering is - * only controlled through the PIPECONF reg. + * only controlled through the TRANSCONF reg. */ - if (DISPLAY_VER(dev_priv) == 4) { + if (DISPLAY_VER(i915) == 4) { /* * Bspec wording suggests that LVDS port dithering only exists * for 18bpp panels. */ - if (pipe_config->dither && pipe_config->pipe_bpp == 18) + if (crtc_state->dither && crtc_state->pipe_bpp == 18) temp |= LVDS_ENABLE_DITHER; else temp &= ~LVDS_ENABLE_DITHER; @@ -301,7 +304,7 @@ static void intel_pre_enable_lvds(struct intel_atomic_state *state, if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) temp |= LVDS_VSYNC_POLARITY; - intel_de_write(dev_priv, lvds_encoder->reg, temp); + intel_de_write(i915, lvds_encoder->reg, temp); } /* @@ -309,25 +312,22 @@ static void intel_pre_enable_lvds(struct intel_atomic_state *state, */ static void intel_enable_lvds(struct intel_atomic_state *state, struct intel_encoder *encoder, - const struct intel_crtc_state *pipe_config, + const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { - struct drm_device *dev = encoder->base.dev; struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder); - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - intel_de_write(dev_priv, lvds_encoder->reg, - intel_de_read(dev_priv, lvds_encoder->reg) | LVDS_PORT_EN); + intel_de_rmw(dev_priv, lvds_encoder->reg, 0, LVDS_PORT_EN); - intel_de_write(dev_priv, PP_CONTROL(0), - intel_de_read(dev_priv, PP_CONTROL(0)) | PANEL_POWER_ON); + intel_de_rmw(dev_priv, PP_CONTROL(0), 0, PANEL_POWER_ON); intel_de_posting_read(dev_priv, lvds_encoder->reg); if (intel_de_wait_for_set(dev_priv, PP_STATUS(0), PP_ON, 5000)) drm_err(&dev_priv->drm, "timed out waiting for panel to power on\n"); - intel_backlight_enable(pipe_config, conn_state); + intel_backlight_enable(crtc_state, conn_state); } static void intel_disable_lvds(struct intel_atomic_state *state, @@ -338,14 +338,12 @@ static void intel_disable_lvds(struct intel_atomic_state *state, struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - intel_de_write(dev_priv, PP_CONTROL(0), - intel_de_read(dev_priv, PP_CONTROL(0)) & ~PANEL_POWER_ON); + intel_de_rmw(dev_priv, PP_CONTROL(0), PANEL_POWER_ON, 0); if (intel_de_wait_for_clear(dev_priv, PP_STATUS(0), PP_ON, 1000)) drm_err(&dev_priv->drm, "timed out waiting for panel to power off\n"); - intel_de_write(dev_priv, lvds_encoder->reg, - intel_de_read(dev_priv, lvds_encoder->reg) & ~LVDS_PORT_EN); + intel_de_rmw(dev_priv, lvds_encoder->reg, LVDS_PORT_EN, 0); intel_de_posting_read(dev_priv, lvds_encoder->reg); } @@ -386,19 +384,19 @@ static void intel_lvds_shutdown(struct intel_encoder *encoder) } static enum drm_mode_status -intel_lvds_mode_valid(struct drm_connector *connector, +intel_lvds_mode_valid(struct drm_connector *_connector, struct drm_display_mode *mode) { - struct intel_connector *intel_connector = to_intel_connector(connector); + struct intel_connector *connector = to_intel_connector(_connector); const struct drm_display_mode *fixed_mode = - intel_panel_fixed_mode(intel_connector, mode); - int max_pixclk = to_i915(connector->dev)->max_dotclk_freq; + intel_panel_fixed_mode(connector, mode); + int max_pixclk = to_i915(connector->base.dev)->max_dotclk_freq; enum drm_mode_status status; if (mode->flags & DRM_MODE_FLAG_DBLSCAN) return MODE_NO_DBLESCAN; - status = intel_panel_mode_valid(intel_connector, mode); + status = intel_panel_mode_valid(connector, mode); if (status != MODE_OK) return status; @@ -408,23 +406,21 @@ intel_lvds_mode_valid(struct drm_connector *connector, return MODE_OK; } -static int intel_lvds_compute_config(struct intel_encoder *intel_encoder, - struct intel_crtc_state *pipe_config, +static int intel_lvds_compute_config(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state, struct drm_connector_state *conn_state) { - struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); - struct intel_lvds_encoder *lvds_encoder = - to_lvds_encoder(intel_encoder); - struct intel_connector *intel_connector = - lvds_encoder->attached_connector; - struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; - struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder); + struct intel_connector *connector = lvds_encoder->attached_connector; + struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); unsigned int lvds_bpp; int ret; /* Should never happen!! */ - if (DISPLAY_VER(dev_priv) < 4 && crtc->pipe == 0) { - drm_err(&dev_priv->drm, "Can't support LVDS on pipe A\n"); + if (DISPLAY_VER(i915) < 4 && crtc->pipe == 0) { + drm_err(&i915->drm, "Can't support LVDS on pipe A\n"); return -EINVAL; } @@ -433,14 +429,14 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder, else lvds_bpp = 6*3; - if (lvds_bpp != pipe_config->pipe_bpp && !pipe_config->bw_constrained) { - drm_dbg_kms(&dev_priv->drm, + if (lvds_bpp != crtc_state->pipe_bpp && !crtc_state->bw_constrained) { + drm_dbg_kms(&i915->drm, "forcing display bpp (was %d) to LVDS (%d)\n", - pipe_config->pipe_bpp, lvds_bpp); - pipe_config->pipe_bpp = lvds_bpp; + crtc_state->pipe_bpp, lvds_bpp); + crtc_state->pipe_bpp = lvds_bpp; } - pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; + crtc_state->output_format = INTEL_OUTPUT_FORMAT_RGB; /* * We have timings from the BIOS for the panel, put them in @@ -448,17 +444,17 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder, * with the panel scaling set up to source from the H/VDisplay * of the original mode. */ - ret = intel_panel_compute_config(intel_connector, adjusted_mode); + ret = intel_panel_compute_config(connector, adjusted_mode); if (ret) return ret; if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) return -EINVAL; - if (HAS_PCH_SPLIT(dev_priv)) - pipe_config->has_pch_encoder = true; + if (HAS_PCH_SPLIT(i915)) + crtc_state->has_pch_encoder = true; - ret = intel_panel_fitting(pipe_config, conn_state); + ret = intel_panel_fitting(crtc_state, conn_state); if (ret) return ret; @@ -474,19 +470,19 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder, /* * Return the list of DDC modes if available, or the BIOS fixed mode otherwise. */ -static int intel_lvds_get_modes(struct drm_connector *connector) +static int intel_lvds_get_modes(struct drm_connector *_connector) { - struct intel_connector *intel_connector = to_intel_connector(connector); - const struct drm_edid *fixed_edid = intel_connector->panel.fixed_edid; + struct intel_connector *connector = to_intel_connector(_connector); + const struct drm_edid *fixed_edid = connector->panel.fixed_edid; /* Use panel fixed edid if we have one */ if (!IS_ERR_OR_NULL(fixed_edid)) { - drm_edid_connector_update(connector, fixed_edid); + drm_edid_connector_update(&connector->base, fixed_edid); - return drm_edid_connector_add_modes(connector); + return drm_edid_connector_add_modes(&connector->base); } - return intel_panel_get_modes(intel_connector); + return intel_panel_get_modes(connector); } static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = { @@ -585,12 +581,12 @@ static const struct dmi_system_id intel_no_lvds[] = { }, { .callback = intel_no_lvds_dmi_callback, - .ident = "AOpen i45GMx-I", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"), - DMI_MATCH(DMI_BOARD_NAME, "i45GMx-I"), - }, - }, + .ident = "AOpen i45GMx-I", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"), + DMI_MATCH(DMI_BOARD_NAME, "i45GMx-I"), + }, + }, { .callback = intel_no_lvds_dmi_callback, .ident = "Aopen i945GTt-VFA", @@ -607,14 +603,14 @@ static const struct dmi_system_id intel_no_lvds[] = { }, }, { - .callback = intel_no_lvds_dmi_callback, - .ident = "Clientron E830", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Clientron"), - DMI_MATCH(DMI_PRODUCT_NAME, "E830"), - }, - }, - { + .callback = intel_no_lvds_dmi_callback, + .ident = "Clientron E830", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Clientron"), + DMI_MATCH(DMI_PRODUCT_NAME, "E830"), + }, + }, + { .callback = intel_no_lvds_dmi_callback, .ident = "Asus EeeBox PC EB1007", .matches = { @@ -764,11 +760,11 @@ static const struct dmi_system_id intel_dual_link_lvds[] = { { } /* terminating entry */ }; -struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *dev_priv) +struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *i915) { struct intel_encoder *encoder; - for_each_intel_encoder(&dev_priv->drm, encoder) { + for_each_intel_encoder(&i915->drm, encoder) { if (encoder->type == INTEL_OUTPUT_LVDS) return encoder; } @@ -776,24 +772,24 @@ struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *dev_priv) return NULL; } -bool intel_is_dual_link_lvds(struct drm_i915_private *dev_priv) +bool intel_is_dual_link_lvds(struct drm_i915_private *i915) { - struct intel_encoder *encoder = intel_get_lvds_encoder(dev_priv); + struct intel_encoder *encoder = intel_get_lvds_encoder(i915); return encoder && to_lvds_encoder(encoder)->is_dual_link; } static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder) { - struct drm_i915_private *dev_priv = to_i915(lvds_encoder->base.base.dev); + struct drm_i915_private *i915 = to_i915(lvds_encoder->base.base.dev); struct intel_connector *connector = lvds_encoder->attached_connector; const struct drm_display_mode *fixed_mode = intel_panel_preferred_fixed_mode(connector); unsigned int val; /* use the module option value if specified */ - if (dev_priv->params.lvds_channel_mode > 0) - return dev_priv->params.lvds_channel_mode == 2; + if (i915->params.lvds_channel_mode > 0) + return i915->params.lvds_channel_mode == 2; /* single channel LVDS is limited to 112 MHz */ if (fixed_mode->clock > 112999) @@ -808,8 +804,8 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder) * we need to check "the value to be set" in VBT when LVDS * register is uninitialized. */ - val = intel_de_read(dev_priv, lvds_encoder->reg); - if (HAS_PCH_CPT(dev_priv)) + val = intel_de_read(i915, lvds_encoder->reg); + if (HAS_PCH_CPT(i915)) val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK_CPT); else val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK); @@ -826,56 +822,54 @@ static void intel_lvds_add_properties(struct drm_connector *connector) /** * intel_lvds_init - setup LVDS connectors on this device - * @dev_priv: i915 device + * @i915: i915 device * * Create the connector, register the LVDS DDC bus, and try to figure out what * modes we can display on the LVDS panel (if present). */ -void intel_lvds_init(struct drm_i915_private *dev_priv) +void intel_lvds_init(struct drm_i915_private *i915) { struct intel_lvds_encoder *lvds_encoder; - struct intel_encoder *intel_encoder; - struct intel_connector *intel_connector; - struct drm_connector *connector; - struct drm_encoder *encoder; + struct intel_connector *connector; const struct drm_edid *drm_edid; + struct intel_encoder *encoder; i915_reg_t lvds_reg; u32 lvds; u8 pin; /* Skip init on machines we know falsely report LVDS */ if (dmi_check_system(intel_no_lvds)) { - drm_WARN(&dev_priv->drm, !dev_priv->display.vbt.int_lvds_support, + drm_WARN(&i915->drm, !i915->display.vbt.int_lvds_support, "Useless DMI match. Internal LVDS support disabled by VBT\n"); return; } - if (!dev_priv->display.vbt.int_lvds_support) { - drm_dbg_kms(&dev_priv->drm, + if (!i915->display.vbt.int_lvds_support) { + drm_dbg_kms(&i915->drm, "Internal LVDS support disabled by VBT\n"); return; } - if (HAS_PCH_SPLIT(dev_priv)) + if (HAS_PCH_SPLIT(i915)) lvds_reg = PCH_LVDS; else lvds_reg = LVDS; - lvds = intel_de_read(dev_priv, lvds_reg); + lvds = intel_de_read(i915, lvds_reg); - if (HAS_PCH_SPLIT(dev_priv)) { + if (HAS_PCH_SPLIT(i915)) { if ((lvds & LVDS_DETECTED) == 0) return; } pin = GMBUS_PIN_PANEL; - if (!intel_bios_is_lvds_present(dev_priv, &pin)) { + if (!intel_bios_is_lvds_present(i915, &pin)) { if ((lvds & LVDS_PORT_EN) == 0) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(&i915->drm, "LVDS is not present in VBT\n"); return; } - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(&i915->drm, "LVDS is not present in VBT, but enabled anyway\n"); } @@ -883,57 +877,55 @@ void intel_lvds_init(struct drm_i915_private *dev_priv) if (!lvds_encoder) return; - intel_connector = intel_connector_alloc(); - if (!intel_connector) { + connector = intel_connector_alloc(); + if (!connector) { kfree(lvds_encoder); return; } - lvds_encoder->attached_connector = intel_connector; + lvds_encoder->attached_connector = connector; + encoder = &lvds_encoder->base; - intel_encoder = &lvds_encoder->base; - encoder = &intel_encoder->base; - connector = &intel_connector->base; - drm_connector_init(&dev_priv->drm, &intel_connector->base, &intel_lvds_connector_funcs, + drm_connector_init(&i915->drm, &connector->base, &intel_lvds_connector_funcs, DRM_MODE_CONNECTOR_LVDS); - drm_encoder_init(&dev_priv->drm, &intel_encoder->base, &intel_lvds_enc_funcs, + drm_encoder_init(&i915->drm, &encoder->base, &intel_lvds_enc_funcs, DRM_MODE_ENCODER_LVDS, "LVDS"); - intel_encoder->enable = intel_enable_lvds; - intel_encoder->pre_enable = intel_pre_enable_lvds; - intel_encoder->compute_config = intel_lvds_compute_config; - if (HAS_PCH_SPLIT(dev_priv)) { - intel_encoder->disable = pch_disable_lvds; - intel_encoder->post_disable = pch_post_disable_lvds; + encoder->enable = intel_enable_lvds; + encoder->pre_enable = intel_pre_enable_lvds; + encoder->compute_config = intel_lvds_compute_config; + if (HAS_PCH_SPLIT(i915)) { + encoder->disable = pch_disable_lvds; + encoder->post_disable = pch_post_disable_lvds; } else { - intel_encoder->disable = gmch_disable_lvds; + encoder->disable = gmch_disable_lvds; } - intel_encoder->get_hw_state = intel_lvds_get_hw_state; - intel_encoder->get_config = intel_lvds_get_config; - intel_encoder->update_pipe = intel_backlight_update; - intel_encoder->shutdown = intel_lvds_shutdown; - intel_connector->get_hw_state = intel_connector_get_hw_state; - - intel_connector_attach_encoder(intel_connector, intel_encoder); - - intel_encoder->type = INTEL_OUTPUT_LVDS; - intel_encoder->power_domain = POWER_DOMAIN_PORT_OTHER; - intel_encoder->port = PORT_NONE; - intel_encoder->cloneable = 0; - if (DISPLAY_VER(dev_priv) < 4) - intel_encoder->pipe_mask = BIT(PIPE_B); + encoder->get_hw_state = intel_lvds_get_hw_state; + encoder->get_config = intel_lvds_get_config; + encoder->update_pipe = intel_backlight_update; + encoder->shutdown = intel_lvds_shutdown; + connector->get_hw_state = intel_connector_get_hw_state; + + intel_connector_attach_encoder(connector, encoder); + + encoder->type = INTEL_OUTPUT_LVDS; + encoder->power_domain = POWER_DOMAIN_PORT_OTHER; + encoder->port = PORT_NONE; + encoder->cloneable = 0; + if (DISPLAY_VER(i915) < 4) + encoder->pipe_mask = BIT(PIPE_B); else - intel_encoder->pipe_mask = ~0; + encoder->pipe_mask = ~0; - drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); - connector->display_info.subpixel_order = SubPixelHorizontalRGB; + drm_connector_helper_add(&connector->base, &intel_lvds_connector_helper_funcs); + connector->base.display_info.subpixel_order = SubPixelHorizontalRGB; lvds_encoder->reg = lvds_reg; - intel_lvds_add_properties(connector); + intel_lvds_add_properties(&connector->base); - intel_lvds_pps_get_hw_state(dev_priv, &lvds_encoder->init_pps); + intel_lvds_pps_get_hw_state(i915, &lvds_encoder->init_pps); lvds_encoder->init_lvds_val = lvds; /* @@ -948,13 +940,13 @@ void intel_lvds_init(struct drm_i915_private *dev_priv) * Attempt to get the fixed panel mode from DDC. Assume that the * preferred mode is the right one. */ - mutex_lock(&dev_priv->drm.mode_config.mutex); + mutex_lock(&i915->drm.mode_config.mutex); if (vga_switcheroo_handler_flags() & VGA_SWITCHEROO_CAN_SWITCH_DDC) { const struct edid *edid; /* FIXME: Make drm_get_edid_switcheroo() return drm_edid */ - edid = drm_get_edid_switcheroo(connector, - intel_gmbus_get_adapter(dev_priv, pin)); + edid = drm_get_edid_switcheroo(&connector->base, + intel_gmbus_get_adapter(i915, pin)); if (edid) { drm_edid = drm_edid_alloc(edid, (edid->extensions + 1) * EDID_LENGTH); kfree(edid); @@ -962,49 +954,49 @@ void intel_lvds_init(struct drm_i915_private *dev_priv) drm_edid = NULL; } } else { - drm_edid = drm_edid_read_ddc(connector, - intel_gmbus_get_adapter(dev_priv, pin)); + drm_edid = drm_edid_read_ddc(&connector->base, + intel_gmbus_get_adapter(i915, pin)); } if (drm_edid) { - if (drm_edid_connector_update(connector, drm_edid) || - !drm_edid_connector_add_modes(connector)) { - drm_edid_connector_update(connector, NULL); + if (drm_edid_connector_update(&connector->base, drm_edid) || + !drm_edid_connector_add_modes(&connector->base)) { + drm_edid_connector_update(&connector->base, NULL); drm_edid_free(drm_edid); drm_edid = ERR_PTR(-EINVAL); } } else { drm_edid = ERR_PTR(-ENOENT); } - intel_bios_init_panel_late(dev_priv, &intel_connector->panel, NULL, + intel_bios_init_panel_late(i915, &connector->panel, NULL, IS_ERR(drm_edid) ? NULL : drm_edid); /* Try EDID first */ - intel_panel_add_edid_fixed_modes(intel_connector, true); + intel_panel_add_edid_fixed_modes(connector, true); /* Failed to get EDID, what about VBT? */ - if (!intel_panel_preferred_fixed_mode(intel_connector)) - intel_panel_add_vbt_lfp_fixed_mode(intel_connector); + if (!intel_panel_preferred_fixed_mode(connector)) + intel_panel_add_vbt_lfp_fixed_mode(connector); /* * If we didn't get a fixed mode from EDID or VBT, try checking * if the panel is already turned on. If so, assume that * whatever is currently programmed is the correct mode. */ - if (!intel_panel_preferred_fixed_mode(intel_connector)) - intel_panel_add_encoder_fixed_mode(intel_connector, intel_encoder); + if (!intel_panel_preferred_fixed_mode(connector)) + intel_panel_add_encoder_fixed_mode(connector, encoder); - mutex_unlock(&dev_priv->drm.mode_config.mutex); + mutex_unlock(&i915->drm.mode_config.mutex); /* If we still don't have a mode after all that, give up. */ - if (!intel_panel_preferred_fixed_mode(intel_connector)) + if (!intel_panel_preferred_fixed_mode(connector)) goto failed; - intel_panel_init(intel_connector, drm_edid); + intel_panel_init(connector, drm_edid); - intel_backlight_setup(intel_connector, INVALID_PIPE); + intel_backlight_setup(connector, INVALID_PIPE); lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder); - drm_dbg_kms(&dev_priv->drm, "detected %s-link lvds configuration\n", + drm_dbg_kms(&i915->drm, "detected %s-link lvds configuration\n", lvds_encoder->is_dual_link ? "dual" : "single"); lvds_encoder->a3_power = lvds & LVDS_A3_POWER_MASK; @@ -1012,10 +1004,10 @@ void intel_lvds_init(struct drm_i915_private *dev_priv) return; failed: - drm_dbg_kms(&dev_priv->drm, "No LVDS modes found, disabling.\n"); - drm_connector_cleanup(connector); - drm_encoder_cleanup(encoder); + drm_dbg_kms(&i915->drm, "No LVDS modes found, disabling.\n"); + drm_connector_cleanup(&connector->base); + drm_encoder_cleanup(&encoder->base); kfree(lvds_encoder); - intel_connector_free(intel_connector); + intel_connector_free(connector); return; } diff --git a/drivers/gpu/drm/i915/display/intel_lvds_regs.h b/drivers/gpu/drm/i915/display/intel_lvds_regs.h new file mode 100644 index 000000000000..47c1832819ee --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_lvds_regs.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef __INTEL_LVDS_REGS_H__ +#define __INTEL_LVDS_REGS_H__ + +#include "intel_display_reg_defs.h" + +/* LVDS port control */ +#define LVDS _MMIO(0x61180) +/* + * Enables the LVDS port. This bit must be set before DPLLs are enabled, as + * the DPLL semantics change when the LVDS is assigned to that pipe. + */ +#define LVDS_PORT_EN REG_BIT(31) +/* Selects pipe B for LVDS data. Must be set on pre-965. */ +#define LVDS_PIPE_SEL_MASK REG_BIT(30) +#define LVDS_PIPE_SEL(pipe) REG_FIELD_PREP(LVDS_PIPE_SEL_MASK, (pipe)) +#define LVDS_PIPE_SEL_MASK_CPT REG_GENMASK(30, 29) +#define LVDS_PIPE_SEL_CPT(pipe) REG_FIELD_PREP(LVDS_PIPE_SEL_MASK_CPT, (pipe)) +/* LVDS dithering flag on 965/g4x platform */ +#define LVDS_ENABLE_DITHER REG_BIT(25) +/* LVDS sync polarity flags. Set to invert (i.e. negative) */ +#define LVDS_VSYNC_POLARITY REG_BIT(21) +#define LVDS_HSYNC_POLARITY REG_BIT(20) + +/* Enable border for unscaled (or aspect-scaled) display */ +#define LVDS_BORDER_ENABLE REG_BIT(15) +/* + * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per + * pixel. + */ +#define LVDS_A0A2_CLKA_POWER_MASK REG_GENMASK(9, 8) +#define LVDS_A0A2_CLKA_POWER_DOWN REG_FIELD_PREP(LVDS_A0A2_CLKA_POWER_MASK, 0) +#define LVDS_A0A2_CLKA_POWER_UP REG_FIELD_PREP(LVDS_A0A2_CLKA_POWER_MASK, 3) +/* + * Controls the A3 data pair, which contains the additional LSBs for 24 bit + * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be + * on. + */ +#define LVDS_A3_POWER_MASK REG_GENMASK(7, 6) +#define LVDS_A3_POWER_DOWN REG_FIELD_PREP(LVDS_A3_POWER_MASK, 0) +#define LVDS_A3_POWER_UP REG_FIELD_PREP(LVDS_A3_POWER_MASK, 3) +/* + * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP + * is set. + */ +#define LVDS_CLKB_POWER_MASK REG_GENMASK(5, 4) +#define LVDS_CLKB_POWER_DOWN REG_FIELD_PREP(LVDS_CLKB_POWER_MASK, 0) +#define LVDS_CLKB_POWER_UP REG_FIELD_PREP(LVDS_CLKB_POWER_MASK, 3) +/* + * Controls the B0-B3 data pairs. This must be set to match the DPLL p2 + * setting for whether we are in dual-channel mode. The B3 pair will + * additionally only be powered up when LVDS_A3_POWER_UP is set. + */ +#define LVDS_B0B3_POWER_MASK REG_GENMASK(3, 2) +#define LVDS_B0B3_POWER_DOWN REG_FIELD_PREP(LVDS_B0B3_POWER_MASK, 0) +#define LVDS_B0B3_POWER_UP REG_FIELD_PREP(LVDS_B0B3_POWER_MASK, 3) + +#define PCH_LVDS _MMIO(0xe1180) +#define LVDS_DETECTED REG_BIT(1) + +#endif /* __INTEL_LVDS_REGS_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_mg_phy_regs.h b/drivers/gpu/drm/i915/display/intel_mg_phy_regs.h index 0e8248bce52d..0306ade2bc30 100644 --- a/drivers/gpu/drm/i915/display/intel_mg_phy_regs.h +++ b/drivers/gpu/drm/i915/display/intel_mg_phy_regs.h @@ -142,7 +142,9 @@ #define FIA1_BASE 0x163000 #define FIA2_BASE 0x16E000 #define FIA3_BASE 0x16F000 -#define _FIA(fia) _PICK((fia), FIA1_BASE, FIA2_BASE, FIA3_BASE) +#define _FIA(fia) _PICK_EVEN_2RANGES((fia), 1, \ + FIA1_BASE, FIA1_BASE,\ + FIA2_BASE, FIA3_BASE) #define _MMIO_FIA(fia, off) _MMIO(_FIA(fia) + (off)) /* ICL PHY DFLEX registers */ diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c index 52cdbd4fc2fa..1d0c9e247c42 100644 --- a/drivers/gpu/drm/i915/display/intel_modeset_setup.c +++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c @@ -11,6 +11,7 @@ #include "i915_drv.h" #include "i915_reg.h" +#include "i9xx_wm.h" #include "intel_atomic.h" #include "intel_bw.h" #include "intel_color.h" @@ -21,9 +22,11 @@ #include "intel_display.h" #include "intel_display_power.h" #include "intel_display_types.h" +#include "intel_dmc.h" +#include "intel_fifo_underrun.h" #include "intel_modeset_setup.h" #include "intel_pch_display.h" -#include "intel_pm.h" +#include "intel_wm.h" #include "skl_watermark.h" static void intel_crtc_disable_noatomic(struct intel_crtc *crtc, @@ -234,12 +237,9 @@ static void intel_sanitize_fifo_underrun_reporting(const struct intel_crtc_state struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); - if (!crtc_state->hw.active && !HAS_GMCH(i915)) - return; - /* - * We start out with underrun reporting disabled to avoid races. - * For correct bookkeeping mark this on active crtcs. + * We start out with underrun reporting disabled on active + * pipes to avoid races. * * Also on gmch platforms we dont have any hardware bits to * disable the underrun reporting. Which means we need to start @@ -250,19 +250,9 @@ static void intel_sanitize_fifo_underrun_reporting(const struct intel_crtc_state * No protection against concurrent access is required - at * worst a fifo underrun happens which also sets this to false. */ - crtc->cpu_fifo_underrun_disabled = true; - - /* - * We track the PCH trancoder underrun reporting state - * within the crtc. With crtc for pipe A housing the underrun - * reporting state for PCH transcoder A, crtc for pipe B housing - * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A, - * and marking underrun reporting as disabled for the non-existing - * PCH transcoders B and C would prevent enabling the south - * error interrupt (see cpt_can_enable_serr_int()). - */ - if (intel_has_pch_trancoder(i915, crtc->pipe)) - crtc->pch_fifo_underrun_disabled = true; + intel_init_fifo_underrun_reporting(i915, crtc, + !crtc_state->hw.active && + !HAS_GMCH(i915)); } static void intel_sanitize_crtc(struct intel_crtc *crtc, @@ -647,17 +637,14 @@ static void intel_early_display_was(struct drm_i915_private *i915) * Also known as Wa_14010480278. */ if (IS_DISPLAY_VER(i915, 10, 12)) - intel_de_write(i915, GEN9_CLKGATE_DIS_0, - intel_de_read(i915, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS); + intel_de_rmw(i915, GEN9_CLKGATE_DIS_0, 0, DARBF_GATING_DIS); - if (IS_HASWELL(i915)) { - /* - * WaRsPkgCStateDisplayPMReq:hsw - * System hang if this isn't done before disabling all planes! - */ - intel_de_write(i915, CHICKEN_PAR1_1, - intel_de_read(i915, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES); - } + /* + * WaRsPkgCStateDisplayPMReq:hsw + * System hang if this isn't done before disabling all planes! + */ + if (IS_HASWELL(i915)) + intel_de_rmw(i915, CHICKEN_PAR1_1, 0, FORCE_ARB_IDLE_PLANES); if (IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) { /* Display WA #1142:kbl,cfl,cml */ @@ -723,18 +710,7 @@ void intel_modeset_setup_hw_state(struct drm_i915_private *i915, intel_dpll_sanitize_state(i915); - if (IS_G4X(i915)) { - g4x_wm_get_hw_state(i915); - g4x_wm_sanitize(i915); - } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { - vlv_wm_get_hw_state(i915); - vlv_wm_sanitize(i915); - } else if (DISPLAY_VER(i915) >= 9) { - skl_wm_get_hw_state(i915); - skl_wm_sanitize(i915); - } else if (HAS_PCH_SPLIT(i915)) { - ilk_wm_get_hw_state(i915); - } + intel_wm_get_hw_state(i915); for_each_intel_crtc(&i915->drm, crtc) { struct intel_crtc_state *crtc_state = diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c index 42aa04bac261..ce2a34a25211 100644 --- a/drivers/gpu/drm/i915/display/intel_panel.c +++ b/drivers/gpu/drm/i915/display/intel_panel.c @@ -39,6 +39,7 @@ #include "intel_de.h" #include "intel_display_types.h" #include "intel_drrs.h" +#include "intel_lvds_regs.h" #include "intel_panel.h" #include "intel_quirks.h" diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c index cecc0d007cf3..22507da0b5f0 100644 --- a/drivers/gpu/drm/i915/display/intel_pch_display.c +++ b/drivers/gpu/drm/i915/display/intel_pch_display.c @@ -10,6 +10,7 @@ #include "intel_display_types.h" #include "intel_fdi.h" #include "intel_lvds.h" +#include "intel_lvds_regs.h" #include "intel_pch_display.h" #include "intel_pch_refclk.h" #include "intel_pps.h" @@ -219,20 +220,20 @@ static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_s enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder), - intel_de_read(dev_priv, HTOTAL(cpu_transcoder))); + intel_de_read(dev_priv, TRANS_HTOTAL(cpu_transcoder))); intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder), - intel_de_read(dev_priv, HBLANK(cpu_transcoder))); + intel_de_read(dev_priv, TRANS_HBLANK(cpu_transcoder))); intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder), - intel_de_read(dev_priv, HSYNC(cpu_transcoder))); + intel_de_read(dev_priv, TRANS_HSYNC(cpu_transcoder))); intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder), - intel_de_read(dev_priv, VTOTAL(cpu_transcoder))); + intel_de_read(dev_priv, TRANS_VTOTAL(cpu_transcoder))); intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder), - intel_de_read(dev_priv, VBLANK(cpu_transcoder))); + intel_de_read(dev_priv, TRANS_VBLANK(cpu_transcoder))); intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder), - intel_de_read(dev_priv, VSYNC(cpu_transcoder))); + intel_de_read(dev_priv, TRANS_VSYNC(cpu_transcoder))); intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder), - intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder))); + intel_de_read(dev_priv, TRANS_VSYNCSHIFT(cpu_transcoder))); } static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state) @@ -266,7 +267,7 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state) reg = PCH_TRANSCONF(pipe); val = intel_de_read(dev_priv, reg); - pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe)); + pipeconf_val = intel_de_read(dev_priv, TRANSCONF(pipe)); if (HAS_PCH_IBX(dev_priv)) { /* Configure frame start delay to match the CPU */ @@ -278,15 +279,15 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state) * that in pipeconf reg. For HDMI we must use 8bpc * here for both 8bpc and 12bpc. */ - val &= ~PIPECONF_BPC_MASK; + val &= ~TRANSCONF_BPC_MASK; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) - val |= PIPECONF_BPC_8; + val |= TRANSCONF_BPC_8; else - val |= pipeconf_val & PIPECONF_BPC_MASK; + val |= pipeconf_val & TRANSCONF_BPC_MASK; } val &= ~TRANS_INTERLACE_MASK; - if ((pipeconf_val & PIPECONF_INTERLACE_MASK_ILK) == PIPECONF_INTERLACE_IF_ID_ILK) { + if ((pipeconf_val & TRANSCONF_INTERLACE_MASK_ILK) == TRANSCONF_INTERLACE_IF_ID_ILK) { if (HAS_PCH_IBX(dev_priv) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) val |= TRANS_INTERLACE_LEGACY_VSYNC_IBX; @@ -307,7 +308,6 @@ static void ilk_disable_pch_transcoder(struct intel_crtc *crtc) struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; i915_reg_t reg; - u32 val; /* FDI relies on the transcoder */ assert_fdi_tx_disabled(dev_priv, pipe); @@ -317,21 +317,16 @@ static void ilk_disable_pch_transcoder(struct intel_crtc *crtc) assert_pch_ports_disabled(dev_priv, pipe); reg = PCH_TRANSCONF(pipe); - val = intel_de_read(dev_priv, reg); - val &= ~TRANS_ENABLE; - intel_de_write(dev_priv, reg, val); + intel_de_rmw(dev_priv, reg, TRANS_ENABLE, 0); /* wait for PCH transcoder off, transcoder state */ if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50)) drm_err(&dev_priv->drm, "failed to disable transcoder %c\n", pipe_name(pipe)); - if (HAS_PCH_CPT(dev_priv)) { + if (HAS_PCH_CPT(dev_priv)) /* Workaround: Clear the timing override chicken bit again. */ - reg = TRANS_CHICKEN2(pipe); - val = intel_de_read(dev_priv, reg); - val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; - intel_de_write(dev_priv, reg, val); - } + intel_de_rmw(dev_priv, TRANS_CHICKEN2(pipe), + TRANS_CHICKEN2_TIMING_OVERRIDE, 0); } void ilk_pch_pre_enable(struct intel_atomic_state *state, @@ -414,7 +409,7 @@ void ilk_pch_enable(struct intel_atomic_state *state, intel_crtc_has_dp_encoder(crtc_state)) { const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; - u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; + u32 bpc = (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) >> 5; i915_reg_t reg = TRANS_DP_CTL(pipe); enum port port; @@ -456,21 +451,14 @@ void ilk_pch_post_disable(struct intel_atomic_state *state, ilk_disable_pch_transcoder(crtc); if (HAS_PCH_CPT(dev_priv)) { - i915_reg_t reg; - u32 temp; - /* disable TRANS_DP_CTL */ - reg = TRANS_DP_CTL(pipe); - temp = intel_de_read(dev_priv, reg); - temp &= ~(TRANS_DP_OUTPUT_ENABLE | - TRANS_DP_PORT_SEL_MASK); - temp |= TRANS_DP_PORT_SEL_NONE; - intel_de_write(dev_priv, reg, temp); + intel_de_rmw(dev_priv, TRANS_DP_CTL(pipe), + TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK, + TRANS_DP_PORT_SEL_NONE); /* disable DPLL_SEL */ - temp = intel_de_read(dev_priv, PCH_DPLL_SEL); - temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe)); - intel_de_write(dev_priv, PCH_DPLL_SEL, temp); + intel_de_rmw(dev_priv, PCH_DPLL_SEL, + TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe), 0); } ilk_fdi_pll_disable(crtc); @@ -565,9 +553,9 @@ static void lpt_enable_pch_transcoder(const struct intel_crtc_state *crtc_state) intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val); val = TRANS_ENABLE; - pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder)); + pipeconf_val = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)); - if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == PIPECONF_INTERLACE_IF_ID_ILK) + if ((pipeconf_val & TRANSCONF_INTERLACE_MASK_HSW) == TRANSCONF_INTERLACE_IF_ID_ILK) val |= TRANS_INTERLACE_INTERLACED; else val |= TRANS_INTERLACE_PROGRESSIVE; @@ -580,20 +568,14 @@ static void lpt_enable_pch_transcoder(const struct intel_crtc_state *crtc_state) static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) { - u32 val; - - val = intel_de_read(dev_priv, LPT_TRANSCONF); - val &= ~TRANS_ENABLE; - intel_de_write(dev_priv, LPT_TRANSCONF, val); + intel_de_rmw(dev_priv, LPT_TRANSCONF, TRANS_ENABLE, 0); /* wait for PCH transcoder off, transcoder state */ if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF, TRANS_STATE_ENABLE, 50)) drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n"); /* Workaround: clear timing override bit. */ - val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A)); - val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; - intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val); + intel_de_rmw(dev_priv, TRANS_CHICKEN2(PIPE_A), TRANS_CHICKEN2_TIMING_OVERRIDE, 0); } void lpt_pch_enable(struct intel_atomic_state *state, diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.c b/drivers/gpu/drm/i915/display/intel_pch_refclk.c index 3657b2940702..f4c09cc37a5e 100644 --- a/drivers/gpu/drm/i915/display/intel_pch_refclk.c +++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.c @@ -12,19 +12,13 @@ static void lpt_fdi_reset_mphy(struct drm_i915_private *dev_priv) { - u32 tmp; - - tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2); - tmp |= FDI_MPHY_IOSFSB_RESET_CTL; - intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp); + intel_de_rmw(dev_priv, SOUTH_CHICKEN2, 0, FDI_MPHY_IOSFSB_RESET_CTL); if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) & FDI_MPHY_IOSFSB_RESET_STATUS, 100)) drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n"); - tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2); - tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; - intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp); + intel_de_rmw(dev_priv, SOUTH_CHICKEN2, FDI_MPHY_IOSFSB_RESET_CTL, 0); if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) & FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c index 7b21438edd9b..24b5b12f7732 100644 --- a/drivers/gpu/drm/i915/display/intel_pps.c +++ b/drivers/gpu/drm/i915/display/intel_pps.c @@ -13,6 +13,7 @@ #include "intel_dpio_phy.h" #include "intel_dpll.h" #include "intel_lvds.h" +#include "intel_lvds_regs.h" #include "intel_pps.h" #include "intel_quirks.h" @@ -1534,17 +1535,13 @@ static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd /* * Compute the divisor for the pp clock, simply match the Bspec formula. */ - if (i915_mmio_reg_valid(regs.pp_div)) { + if (i915_mmio_reg_valid(regs.pp_div)) intel_de_write(dev_priv, regs.pp_div, REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000))); - } else { - u32 pp_ctl; - - pp_ctl = intel_de_read(dev_priv, regs.pp_ctrl); - pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK; - pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)); - intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl); - } + else + intel_de_rmw(dev_priv, regs.pp_ctrl, BXT_POWER_CYCLE_DELAY_MASK, + REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, + DIV_ROUND_UP(seq->t11_t12, 1000))); drm_dbg_kms(&dev_priv->drm, "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 7a72e15e6836..44610b20cd29 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -152,7 +152,7 @@ static void psr_irq_control(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); i915_reg_t imr_reg; - u32 mask, val; + u32 mask; if (DISPLAY_VER(dev_priv) >= 12) imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder); @@ -164,10 +164,7 @@ static void psr_irq_control(struct intel_dp *intel_dp) mask |= psr_irq_post_exit_bit_get(intel_dp) | psr_irq_pre_entry_bit_get(intel_dp); - val = intel_de_read(dev_priv, imr_reg); - val &= ~psr_irq_mask_get(intel_dp); - val |= ~mask; - intel_de_write(dev_priv, imr_reg, val); + intel_de_rmw(dev_priv, imr_reg, psr_irq_mask_get(intel_dp), ~mask); } static void psr_event_print(struct drm_i915_private *i915, @@ -245,8 +242,6 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir) } if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) { - u32 val; - drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n", transcoder_name(cpu_transcoder)); @@ -260,9 +255,7 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir) * again so we don't care about unmask the interruption * or unset irq_aux_error. */ - val = intel_de_read(dev_priv, imr_reg); - val |= psr_irq_psr_error_bit_get(intel_dp); - intel_de_write(dev_priv, imr_reg, val); + intel_de_rmw(dev_priv, imr_reg, 0, psr_irq_psr_error_bit_get(intel_dp)); schedule_work(&intel_dp->psr.work); } @@ -542,6 +535,14 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) val |= EDP_PSR2_FRAME_BEFORE_SU(max_t(u8, intel_dp->psr.sink_sync_latency + 1, 2)); val |= intel_psr2_get_tp_time(intel_dp); + if (DISPLAY_VER(dev_priv) >= 12) { + if (intel_dp->psr.io_wake_lines < 9 && + intel_dp->psr.fast_wake_lines < 9) + val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2; + else + val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3; + } + /* Wa_22012278275:adl-p */ if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) { static const u8 map[] = { @@ -558,31 +559,21 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see * comments bellow for more information */ - u32 tmp, lines = 7; - - val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2; + u32 tmp; - tmp = map[lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES]; + tmp = map[intel_dp->psr.io_wake_lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES]; tmp = tmp << TGL_EDP_PSR2_IO_BUFFER_WAKE_SHIFT; val |= tmp; - tmp = map[lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES]; + tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES]; tmp = tmp << TGL_EDP_PSR2_FAST_WAKE_MIN_SHIFT; val |= tmp; } else if (DISPLAY_VER(dev_priv) >= 12) { - /* - * TODO: 7 lines of IO_BUFFER_WAKE and FAST_WAKE are default - * values from BSpec. In order to setting an optimal power - * consumption, lower than 4k resolution mode needs to decrease - * IO_BUFFER_WAKE and FAST_WAKE. And higher than 4K resolution - * mode needs to increase IO_BUFFER_WAKE and FAST_WAKE. - */ - val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2; - val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(7); - val |= TGL_EDP_PSR2_FAST_WAKE(7); + val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines); + val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines); } else if (DISPLAY_VER(dev_priv) >= 9) { - val |= EDP_PSR2_IO_BUFFER_WAKE(7); - val |= EDP_PSR2_FAST_WAKE(7); + val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines); + val |= EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines); } if (intel_dp->psr.req_psr2_sdp_prior_scanline) @@ -591,12 +582,6 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) if (intel_dp->psr.psr2_sel_fetch_enabled) { u32 tmp; - /* Wa_1408330847 */ - if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) - intel_de_rmw(dev_priv, CHICKEN_PAR1_1, - DIS_RAM_BYPASS_PSR2_MAN_TRACK, - DIS_RAM_BYPASS_PSR2_MAN_TRACK); - tmp = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder)); drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE)); } else if (HAS_PSR2_SEL_FETCH(dev_priv)) { @@ -637,13 +622,10 @@ static void psr2_program_idle_frames(struct intel_dp *intel_dp, u32 idle_frames) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - u32 val; idle_frames <<= EDP_PSR2_IDLE_FRAME_SHIFT; - val = intel_de_read(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder)); - val &= ~EDP_PSR2_IDLE_FRAME_MASK; - val |= idle_frames; - intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), val); + intel_de_rmw(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), + EDP_PSR2_IDLE_FRAME_MASK, idle_frames); } static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp) @@ -708,6 +690,7 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp, { const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay; struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct i915_power_domains *power_domains = &dev_priv->display.power.domains; u32 exit_scanlines; /* @@ -724,7 +707,7 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp, if (crtc_state->enable_psr2_sel_fetch) return; - if (!(dev_priv->display.dmc.allowed_dc_mask & DC_STATE_EN_DC3CO)) + if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC3CO)) return; if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state)) @@ -765,13 +748,6 @@ static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp, return false; } - /* Wa_14010254185 Wa_14010103792 */ - if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) { - drm_dbg_kms(&dev_priv->drm, - "PSR2 sel fetch not enabled, missing the implementation of WAs\n"); - return false; - } - return crtc_state->enable_psr2_sel_fetch = true; } @@ -842,6 +818,46 @@ static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_d return true; } +static bool _compute_psr2_wake_times(struct intel_dp *intel_dp, + struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time; + u8 max_wake_lines; + + if (DISPLAY_VER(i915) >= 12) { + io_wake_time = 42; + /* + * According to Bspec it's 42us, but based on testing + * it is not enough -> use 45 us. + */ + fast_wake_time = 45; + max_wake_lines = 12; + } else { + io_wake_time = 50; + fast_wake_time = 32; + max_wake_lines = 8; + } + + io_wake_lines = intel_usecs_to_scanlines( + &crtc_state->uapi.adjusted_mode, io_wake_time); + fast_wake_lines = intel_usecs_to_scanlines( + &crtc_state->uapi.adjusted_mode, fast_wake_time); + + if (io_wake_lines > max_wake_lines || + fast_wake_lines > max_wake_lines) + return false; + + if (i915->params.psr_safest_params) + io_wake_lines = fast_wake_lines = max_wake_lines; + + /* According to Bspec lower limit should be set as 7 lines. */ + intel_dp->psr.io_wake_lines = max(io_wake_lines, 7); + intel_dp->psr.fast_wake_lines = max(fast_wake_lines, 7); + + return true; +} + static bool intel_psr2_config_valid(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state) { @@ -936,6 +952,12 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, return false; } + if (!_compute_psr2_wake_times(intel_dp, crtc_state)) { + drm_dbg_kms(&dev_priv->drm, + "PSR2 not enabled, Unable to use long enough wake times\n"); + return false; + } + if (HAS_PSR2_SEL_FETCH(dev_priv)) { if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) && !HAS_PSR_HW_TRACKING(dev_priv)) { @@ -945,13 +967,6 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, } } - /* Wa_2209313811 */ - if (!crtc_state->enable_psr2_sel_fetch && - IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) { - drm_dbg_kms(&dev_priv->drm, "PSR2 HW tracking is not supported this Display stepping\n"); - goto unsupported; - } - if (!psr2_granularity_check(intel_dp, crtc_state)) { drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n"); goto unsupported; @@ -1071,7 +1086,7 @@ void intel_psr_get_config(struct intel_encoder *encoder, } if (DISPLAY_VER(dev_priv) >= 12) { - val = intel_de_read(dev_priv, EXITLINE(intel_dp->psr.transcoder)); + val = intel_de_read(dev_priv, TRANS_EXITLINE(intel_dp->psr.transcoder)); val &= EXITLINE_MASK; pipe_config->dc3co_exitline = val; } @@ -1145,19 +1160,13 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, psr_irq_control(intel_dp); - if (intel_dp->psr.dc3co_exitline) { - u32 val; - - /* - * TODO: if future platforms supports DC3CO in more than one - * transcoder, EXITLINE will need to be unset when disabling PSR - */ - val = intel_de_read(dev_priv, EXITLINE(cpu_transcoder)); - val &= ~EXITLINE_MASK; - val |= intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT; - val |= EXITLINE_ENABLE; - intel_de_write(dev_priv, EXITLINE(cpu_transcoder), val); - } + /* + * TODO: if future platforms supports DC3CO in more than one + * transcoder, EXITLINE will need to be unset when disabling PSR + */ + if (intel_dp->psr.dc3co_exitline) + intel_de_rmw(dev_priv, TRANS_EXITLINE(cpu_transcoder), EXITLINE_MASK, + intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE); if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv)) intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING, @@ -1170,13 +1179,8 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, */ if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) || IS_DISPLAY_VER(dev_priv, 12, 13)) { - u16 vtotal, vblank; - - vtotal = crtc_state->uapi.adjusted_mode.crtc_vtotal - - crtc_state->uapi.adjusted_mode.crtc_vdisplay; - vblank = crtc_state->uapi.adjusted_mode.crtc_vblank_end - - crtc_state->uapi.adjusted_mode.crtc_vblank_start; - if (vblank > vtotal) + if (crtc_state->hw.adjusted_mode.crtc_vblank_start != + crtc_state->hw.adjusted_mode.crtc_vdisplay) intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0, wa_16013835468_bit_get(intel_dp)); } @@ -1199,13 +1203,6 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0, ADLP_1_BASED_X_GRANULARITY); - /* Wa_16011168373:adl-p */ - if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) - intel_de_rmw(dev_priv, - TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder), - TRANS_SET_CONTEXT_LATENCY_MASK, - TRANS_SET_CONTEXT_LATENCY_VALUE(1)); - /* Wa_16012604467:adlp,mtl[a0,b0] */ if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) intel_de_rmw(dev_priv, @@ -1360,12 +1357,6 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp) intel_psr_exit(intel_dp); intel_psr_wait_exit_locked(intel_dp); - /* Wa_1408330847 */ - if (intel_dp->psr.psr2_sel_fetch_enabled && - IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) - intel_de_rmw(dev_priv, CHICKEN_PAR1_1, - DIS_RAM_BYPASS_PSR2_MAN_TRACK, 0); - /* * Wa_16013835468 * Wa_14015648006 @@ -1376,12 +1367,6 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp) wa_16013835468_bit_get(intel_dp), 0); if (intel_dp->psr.psr2_enabled) { - /* Wa_16011168373:adl-p */ - if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) - intel_de_rmw(dev_priv, - TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder), - TRANS_SET_CONTEXT_LATENCY_MASK, 0); - /* Wa_16012604467:adlp,mtl[a0,b0] */ if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) intel_de_rmw(dev_priv, @@ -1547,8 +1532,8 @@ static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp) intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0); } -void intel_psr2_disable_plane_sel_fetch(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state) +void intel_psr2_disable_plane_sel_fetch_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; @@ -1559,10 +1544,28 @@ void intel_psr2_disable_plane_sel_fetch(struct intel_plane *plane, intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), 0); } -void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state, - int color_plane) +void intel_psr2_program_plane_sel_fetch_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *i915 = to_i915(plane->base.dev); + enum pipe pipe = plane->pipe; + + if (!crtc_state->enable_psr2_sel_fetch) + return; + + if (plane->id == PLANE_CURSOR) + intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id), + plane_state->ctl); + else + intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id), + PLANE_SEL_FETCH_CTL_ENABLE); +} + +void intel_psr2_program_plane_sel_fetch_noarm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + int color_plane) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; @@ -1573,11 +1576,8 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane, if (!crtc_state->enable_psr2_sel_fetch) return; - if (plane->id == PLANE_CURSOR) { - intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), - plane_state->ctl); + if (plane->id == PLANE_CURSOR) return; - } clip = &plane_state->psr2_sel_fetch_area; @@ -1605,9 +1605,6 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane, val = (drm_rect_height(clip) - 1) << 16; val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1; intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val); - - intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), - PLANE_SEL_FETCH_CTL_ENABLE); } void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state) diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h index 2ac3a46cccc5..7a38a9e7fa5b 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.h +++ b/drivers/gpu/drm/i915/display/intel_psr.h @@ -46,12 +46,16 @@ bool intel_psr_enabled(struct intel_dp *intel_dp); int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, struct intel_crtc *crtc); void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state); -void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state, - int color_plane); -void intel_psr2_disable_plane_sel_fetch(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state); +void intel_psr2_program_plane_sel_fetch_noarm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + int color_plane); +void intel_psr2_program_plane_sel_fetch_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state); + +void intel_psr2_disable_plane_sel_fetch_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state); void intel_psr_pause(struct intel_dp *intel_dp); void intel_psr_resume(struct intel_dp *intel_dp); diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c index c65c771f5c46..1cfb94b5cedb 100644 --- a/drivers/gpu/drm/i915/display/intel_snps_phy.c +++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c @@ -1419,6 +1419,36 @@ static const struct intel_mpllb_state dg2_hdmi_262750 = { REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), }; +static const struct intel_mpllb_state dg2_hdmi_267300 = { + .clock = 267300, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 74) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 30146) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 36699), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + static const struct intel_mpllb_state dg2_hdmi_268500 = { .clock = 268500, .ref_control = @@ -1509,6 +1539,36 @@ static const struct intel_mpllb_state dg2_hdmi_241500 = { REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), }; +static const struct intel_mpllb_state dg2_hdmi_319890 = { + .clock = 319890, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 94) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 64094) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 13631), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + static const struct intel_mpllb_state dg2_hdmi_497750 = { .clock = 497750, .ref_control = @@ -1696,8 +1756,10 @@ static const struct intel_mpllb_state * const dg2_hdmi_tables[] = { &dg2_hdmi_209800, &dg2_hdmi_241500, &dg2_hdmi_262750, + &dg2_hdmi_267300, &dg2_hdmi_268500, &dg2_hdmi_296703, + &dg2_hdmi_319890, &dg2_hdmi_497750, &dg2_hdmi_592000, &dg2_hdmi_593407, diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c index e6b4d24b9cd0..a16e56a60c30 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.c +++ b/drivers/gpu/drm/i915/display/intel_sprite.c @@ -1217,7 +1217,8 @@ g4x_sprite_update_arm(struct intel_plane *plane, } intel_de_write_fw(dev_priv, DVSLINOFF(pipe), linear_offset); - intel_de_write_fw(dev_priv, DVSTILEOFF(pipe), (y << 16) | x); + intel_de_write_fw(dev_priv, DVSTILEOFF(pipe), + DVS_OFFSET_Y(y) | DVS_OFFSET_X(x)); /* * The control register self-arms if the plane was previously diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c index b986bf075889..3b5ff84dc615 100644 --- a/drivers/gpu/drm/i915/display/intel_tv.c +++ b/drivers/gpu/drm/i915/display/intel_tv.c @@ -930,8 +930,7 @@ intel_enable_tv(struct intel_atomic_state *state, /* Prevents vblank waits from timing out in intel_tv_detect_type() */ intel_crtc_wait_for_next_vblank(to_intel_crtc(pipe_config->uapi.crtc)); - intel_de_write(dev_priv, TV_CTL, - intel_de_read(dev_priv, TV_CTL) | TV_ENC_ENABLE); + intel_de_rmw(dev_priv, TV_CTL, 0, TV_ENC_ENABLE); } static void @@ -943,8 +942,7 @@ intel_disable_tv(struct intel_atomic_state *state, struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - intel_de_write(dev_priv, TV_CTL, - intel_de_read(dev_priv, TV_CTL) & ~TV_ENC_ENABLE); + intel_de_rmw(dev_priv, TV_CTL, TV_ENC_ENABLE, 0); } static const struct tv_mode *intel_tv_mode_find(const struct drm_connector_state *conn_state) diff --git a/drivers/gpu/drm/i915/display/intel_vblank.c b/drivers/gpu/drm/i915/display/intel_vblank.c index 4c83e2320bca..571f5dda1e66 100644 --- a/drivers/gpu/drm/i915/display/intel_vblank.c +++ b/drivers/gpu/drm/i915/display/intel_vblank.c @@ -26,7 +26,7 @@ * | * | frame start: * | generate frame start interrupt (aka. vblank interrupt) (gmch) - * | may be shifted forward 1-3 extra lines via PIPECONF + * | may be shifted forward 1-3 extra lines via TRANSCONF * | | * | | start of vsync: * | | generate vsync interrupt @@ -54,7 +54,7 @@ * Summary: * - most events happen at the start of horizontal sync * - frame start happens at the start of horizontal blank, 1-4 lines - * (depending on PIPECONF settings) after the start of vblank + * (depending on TRANSCONF settings) after the start of vblank * - gen3/4 pixel and frame counter are synchronized with the start * of horizontal active on the first line of vertical active */ diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c index 207b2a648d32..09b32ffdc552 100644 --- a/drivers/gpu/drm/i915/display/intel_vdsc.c +++ b/drivers/gpu/drm/i915/display/intel_vdsc.c @@ -17,6 +17,7 @@ #include "intel_dsi.h" #include "intel_qp_tables.h" #include "intel_vdsc.h" +#include "intel_vdsc_regs.h" enum ROW_INDEX_BPP { ROW_INDEX_6BPP = 0, diff --git a/drivers/gpu/drm/i915/display/intel_vdsc_regs.h b/drivers/gpu/drm/i915/display/intel_vdsc_regs.h new file mode 100644 index 000000000000..4fd883463752 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_vdsc_regs.h @@ -0,0 +1,461 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef __INTEL_VDSC_REGS_H__ +#define __INTEL_VDSC_REGS_H__ + +#include "intel_display_reg_defs.h" + +/* Display Stream Splitter Control */ +#define DSS_CTL1 _MMIO(0x67400) +#define SPLITTER_ENABLE (1 << 31) +#define JOINER_ENABLE (1 << 30) +#define DUAL_LINK_MODE_INTERLEAVE (1 << 24) +#define DUAL_LINK_MODE_FRONTBACK (0 << 24) +#define OVERLAP_PIXELS_MASK (0xf << 16) +#define OVERLAP_PIXELS(pixels) ((pixels) << 16) +#define LEFT_DL_BUF_TARGET_DEPTH_MASK (0xfff << 0) +#define LEFT_DL_BUF_TARGET_DEPTH(pixels) ((pixels) << 0) +#define MAX_DL_BUFFER_TARGET_DEPTH 0x5a0 + +#define DSS_CTL2 _MMIO(0x67404) +#define LEFT_BRANCH_VDSC_ENABLE (1 << 31) +#define RIGHT_BRANCH_VDSC_ENABLE (1 << 15) +#define RIGHT_DL_BUF_TARGET_DEPTH_MASK (0xfff << 0) +#define RIGHT_DL_BUF_TARGET_DEPTH(pixels) ((pixels) << 0) + +#define _ICL_PIPE_DSS_CTL1_PB 0x78200 +#define _ICL_PIPE_DSS_CTL1_PC 0x78400 +#define ICL_PIPE_DSS_CTL1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_PIPE_DSS_CTL1_PB, \ + _ICL_PIPE_DSS_CTL1_PC) +#define BIG_JOINER_ENABLE (1 << 29) +#define MASTER_BIG_JOINER_ENABLE (1 << 28) +#define VGA_CENTERING_ENABLE (1 << 27) +#define SPLITTER_CONFIGURATION_MASK REG_GENMASK(26, 25) +#define SPLITTER_CONFIGURATION_2_SEGMENT REG_FIELD_PREP(SPLITTER_CONFIGURATION_MASK, 0) +#define SPLITTER_CONFIGURATION_4_SEGMENT REG_FIELD_PREP(SPLITTER_CONFIGURATION_MASK, 1) +#define UNCOMPRESSED_JOINER_MASTER (1 << 21) +#define UNCOMPRESSED_JOINER_SLAVE (1 << 20) + +#define _ICL_PIPE_DSS_CTL2_PB 0x78204 +#define _ICL_PIPE_DSS_CTL2_PC 0x78404 +#define ICL_PIPE_DSS_CTL2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_PIPE_DSS_CTL2_PB, \ + _ICL_PIPE_DSS_CTL2_PC) + +/* Icelake Display Stream Compression Registers */ +#define DSCA_PICTURE_PARAMETER_SET_0 _MMIO(0x6B200) +#define DSCC_PICTURE_PARAMETER_SET_0 _MMIO(0x6BA00) +#define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB 0x78270 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB 0x78370 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC 0x78470 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PC 0x78570 +#define ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_0_PC) +#define DSC_ALT_ICH_SEL (1 << 20) +#define DSC_VBR_ENABLE (1 << 19) +#define DSC_422_ENABLE (1 << 18) +#define DSC_COLOR_SPACE_CONVERSION (1 << 17) +#define DSC_BLOCK_PREDICTION (1 << 16) +#define DSC_LINE_BUF_DEPTH_SHIFT 12 +#define DSC_BPC_SHIFT 8 +#define DSC_VER_MIN_SHIFT 4 +#define DSC_VER_MAJ (0x1 << 0) + +#define DSCA_PICTURE_PARAMETER_SET_1 _MMIO(0x6B204) +#define DSCC_PICTURE_PARAMETER_SET_1 _MMIO(0x6BA04) +#define _ICL_DSC0_PICTURE_PARAMETER_SET_1_PB 0x78274 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_1_PB 0x78374 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_1_PC 0x78474 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_1_PC 0x78574 +#define ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_1_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_1_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_1_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_1_PC) +#define DSC_BPP(bpp) ((bpp) << 0) + +#define DSCA_PICTURE_PARAMETER_SET_2 _MMIO(0x6B208) +#define DSCC_PICTURE_PARAMETER_SET_2 _MMIO(0x6BA08) +#define _ICL_DSC0_PICTURE_PARAMETER_SET_2_PB 0x78278 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_2_PB 0x78378 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_2_PC 0x78478 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_2_PC 0x78578 +#define ICL_DSC0_PICTURE_PARAMETER_SET_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_2_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_2_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_2_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_2_PC) +#define DSC_PIC_WIDTH(pic_width) ((pic_width) << 16) +#define DSC_PIC_HEIGHT(pic_height) ((pic_height) << 0) + +#define DSCA_PICTURE_PARAMETER_SET_3 _MMIO(0x6B20C) +#define DSCC_PICTURE_PARAMETER_SET_3 _MMIO(0x6BA0C) +#define _ICL_DSC0_PICTURE_PARAMETER_SET_3_PB 0x7827C +#define _ICL_DSC1_PICTURE_PARAMETER_SET_3_PB 0x7837C +#define _ICL_DSC0_PICTURE_PARAMETER_SET_3_PC 0x7847C +#define _ICL_DSC1_PICTURE_PARAMETER_SET_3_PC 0x7857C +#define ICL_DSC0_PICTURE_PARAMETER_SET_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_3_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_3_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_3_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_3_PC) +#define DSC_SLICE_WIDTH(slice_width) ((slice_width) << 16) +#define DSC_SLICE_HEIGHT(slice_height) ((slice_height) << 0) + +#define DSCA_PICTURE_PARAMETER_SET_4 _MMIO(0x6B210) +#define DSCC_PICTURE_PARAMETER_SET_4 _MMIO(0x6BA10) +#define _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB 0x78280 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB 0x78380 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC 0x78480 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC 0x78580 +#define ICL_DSC0_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC) +#define DSC_INITIAL_DEC_DELAY(dec_delay) ((dec_delay) << 16) +#define DSC_INITIAL_XMIT_DELAY(xmit_delay) ((xmit_delay) << 0) + +#define DSCA_PICTURE_PARAMETER_SET_5 _MMIO(0x6B214) +#define DSCC_PICTURE_PARAMETER_SET_5 _MMIO(0x6BA14) +#define _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB 0x78284 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB 0x78384 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC 0x78484 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC 0x78584 +#define ICL_DSC0_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC) +#define DSC_SCALE_DEC_INT(scale_dec) ((scale_dec) << 16) +#define DSC_SCALE_INC_INT(scale_inc) ((scale_inc) << 0) + +#define DSCA_PICTURE_PARAMETER_SET_6 _MMIO(0x6B218) +#define DSCC_PICTURE_PARAMETER_SET_6 _MMIO(0x6BA18) +#define _ICL_DSC0_PICTURE_PARAMETER_SET_6_PB 0x78288 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_6_PB 0x78388 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_6_PC 0x78488 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_6_PC 0x78588 +#define ICL_DSC0_PICTURE_PARAMETER_SET_6(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_6_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_6_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_6(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_6_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_6_PC) +#define DSC_FLATNESS_MAX_QP(max_qp) ((max_qp) << 24) +#define DSC_FLATNESS_MIN_QP(min_qp) ((min_qp) << 16) +#define DSC_FIRST_LINE_BPG_OFFSET(offset) ((offset) << 8) +#define DSC_INITIAL_SCALE_VALUE(value) ((value) << 0) + +#define DSCA_PICTURE_PARAMETER_SET_7 _MMIO(0x6B21C) +#define DSCC_PICTURE_PARAMETER_SET_7 _MMIO(0x6BA1C) +#define _ICL_DSC0_PICTURE_PARAMETER_SET_7_PB 0x7828C +#define _ICL_DSC1_PICTURE_PARAMETER_SET_7_PB 0x7838C +#define _ICL_DSC0_PICTURE_PARAMETER_SET_7_PC 0x7848C +#define _ICL_DSC1_PICTURE_PARAMETER_SET_7_PC 0x7858C +#define ICL_DSC0_PICTURE_PARAMETER_SET_7(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_7_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_7_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_7(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_7_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_7_PC) +#define DSC_NFL_BPG_OFFSET(bpg_offset) ((bpg_offset) << 16) +#define DSC_SLICE_BPG_OFFSET(bpg_offset) ((bpg_offset) << 0) + +#define DSCA_PICTURE_PARAMETER_SET_8 _MMIO(0x6B220) +#define DSCC_PICTURE_PARAMETER_SET_8 _MMIO(0x6BA20) +#define _ICL_DSC0_PICTURE_PARAMETER_SET_8_PB 0x78290 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_8_PB 0x78390 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_8_PC 0x78490 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_8_PC 0x78590 +#define ICL_DSC0_PICTURE_PARAMETER_SET_8(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_8_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_8_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_8(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_8_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_8_PC) +#define DSC_INITIAL_OFFSET(initial_offset) ((initial_offset) << 16) +#define DSC_FINAL_OFFSET(final_offset) ((final_offset) << 0) + +#define DSCA_PICTURE_PARAMETER_SET_9 _MMIO(0x6B224) +#define DSCC_PICTURE_PARAMETER_SET_9 _MMIO(0x6BA24) +#define _ICL_DSC0_PICTURE_PARAMETER_SET_9_PB 0x78294 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_9_PB 0x78394 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_9_PC 0x78494 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_9_PC 0x78594 +#define ICL_DSC0_PICTURE_PARAMETER_SET_9(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_9_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_9_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_9(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_9_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_9_PC) +#define DSC_RC_EDGE_FACTOR(rc_edge_fact) ((rc_edge_fact) << 16) +#define DSC_RC_MODEL_SIZE(rc_model_size) ((rc_model_size) << 0) + +#define DSCA_PICTURE_PARAMETER_SET_10 _MMIO(0x6B228) +#define DSCC_PICTURE_PARAMETER_SET_10 _MMIO(0x6BA28) +#define _ICL_DSC0_PICTURE_PARAMETER_SET_10_PB 0x78298 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_10_PB 0x78398 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_10_PC 0x78498 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_10_PC 0x78598 +#define ICL_DSC0_PICTURE_PARAMETER_SET_10(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_10_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_10_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_10(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_10_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_10_PC) +#define DSC_RC_TARGET_OFF_LOW(rc_tgt_off_low) ((rc_tgt_off_low) << 20) +#define DSC_RC_TARGET_OFF_HIGH(rc_tgt_off_high) ((rc_tgt_off_high) << 16) +#define DSC_RC_QUANT_INC_LIMIT1(lim) ((lim) << 8) +#define DSC_RC_QUANT_INC_LIMIT0(lim) ((lim) << 0) + +#define DSCA_PICTURE_PARAMETER_SET_11 _MMIO(0x6B22C) +#define DSCC_PICTURE_PARAMETER_SET_11 _MMIO(0x6BA2C) +#define _ICL_DSC0_PICTURE_PARAMETER_SET_11_PB 0x7829C +#define _ICL_DSC1_PICTURE_PARAMETER_SET_11_PB 0x7839C +#define _ICL_DSC0_PICTURE_PARAMETER_SET_11_PC 0x7849C +#define _ICL_DSC1_PICTURE_PARAMETER_SET_11_PC 0x7859C +#define ICL_DSC0_PICTURE_PARAMETER_SET_11(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_11_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_11_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_11(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_11_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_11_PC) + +#define DSCA_PICTURE_PARAMETER_SET_12 _MMIO(0x6B260) +#define DSCC_PICTURE_PARAMETER_SET_12 _MMIO(0x6BA60) +#define _ICL_DSC0_PICTURE_PARAMETER_SET_12_PB 0x782A0 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_12_PB 0x783A0 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_12_PC 0x784A0 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_12_PC 0x785A0 +#define ICL_DSC0_PICTURE_PARAMETER_SET_12(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_12_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_12_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_12(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_12_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_12_PC) + +#define DSCA_PICTURE_PARAMETER_SET_13 _MMIO(0x6B264) +#define DSCC_PICTURE_PARAMETER_SET_13 _MMIO(0x6BA64) +#define _ICL_DSC0_PICTURE_PARAMETER_SET_13_PB 0x782A4 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_13_PB 0x783A4 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_13_PC 0x784A4 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_13_PC 0x785A4 +#define ICL_DSC0_PICTURE_PARAMETER_SET_13(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_13_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_13_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_13(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_13_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_13_PC) + +#define DSCA_PICTURE_PARAMETER_SET_14 _MMIO(0x6B268) +#define DSCC_PICTURE_PARAMETER_SET_14 _MMIO(0x6BA68) +#define _ICL_DSC0_PICTURE_PARAMETER_SET_14_PB 0x782A8 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_14_PB 0x783A8 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_14_PC 0x784A8 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_14_PC 0x785A8 +#define ICL_DSC0_PICTURE_PARAMETER_SET_14(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_14_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_14_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_14(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_14_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_14_PC) + +#define DSCA_PICTURE_PARAMETER_SET_15 _MMIO(0x6B26C) +#define DSCC_PICTURE_PARAMETER_SET_15 _MMIO(0x6BA6C) +#define _ICL_DSC0_PICTURE_PARAMETER_SET_15_PB 0x782AC +#define _ICL_DSC1_PICTURE_PARAMETER_SET_15_PB 0x783AC +#define _ICL_DSC0_PICTURE_PARAMETER_SET_15_PC 0x784AC +#define _ICL_DSC1_PICTURE_PARAMETER_SET_15_PC 0x785AC +#define ICL_DSC0_PICTURE_PARAMETER_SET_15(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_15_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_15_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_15(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_15_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_15_PC) + +#define DSCA_PICTURE_PARAMETER_SET_16 _MMIO(0x6B270) +#define DSCC_PICTURE_PARAMETER_SET_16 _MMIO(0x6BA70) +#define _ICL_DSC0_PICTURE_PARAMETER_SET_16_PB 0x782B0 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB 0x783B0 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_16_PC 0x784B0 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_16_PC 0x785B0 +#define ICL_DSC0_PICTURE_PARAMETER_SET_16(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_16_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_16_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_16_PC) +#define DSC_SLICE_ROW_PER_FRAME(slice_row_per_frame) ((slice_row_per_frame) << 20) +#define DSC_SLICE_PER_LINE(slice_per_line) ((slice_per_line) << 16) +#define DSC_SLICE_CHUNK_SIZE(slice_chunk_size) ((slice_chunk_size) << 0) + +/* Icelake Rate Control Buffer Threshold Registers */ +#define DSCA_RC_BUF_THRESH_0 _MMIO(0x6B230) +#define DSCA_RC_BUF_THRESH_0_UDW _MMIO(0x6B230 + 4) +#define DSCC_RC_BUF_THRESH_0 _MMIO(0x6BA30) +#define DSCC_RC_BUF_THRESH_0_UDW _MMIO(0x6BA30 + 4) +#define _ICL_DSC0_RC_BUF_THRESH_0_PB (0x78254) +#define _ICL_DSC0_RC_BUF_THRESH_0_UDW_PB (0x78254 + 4) +#define _ICL_DSC1_RC_BUF_THRESH_0_PB (0x78354) +#define _ICL_DSC1_RC_BUF_THRESH_0_UDW_PB (0x78354 + 4) +#define _ICL_DSC0_RC_BUF_THRESH_0_PC (0x78454) +#define _ICL_DSC0_RC_BUF_THRESH_0_UDW_PC (0x78454 + 4) +#define _ICL_DSC1_RC_BUF_THRESH_0_PC (0x78554) +#define _ICL_DSC1_RC_BUF_THRESH_0_UDW_PC (0x78554 + 4) +#define ICL_DSC0_RC_BUF_THRESH_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_BUF_THRESH_0_PB, \ + _ICL_DSC0_RC_BUF_THRESH_0_PC) +#define ICL_DSC0_RC_BUF_THRESH_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_BUF_THRESH_0_UDW_PB, \ + _ICL_DSC0_RC_BUF_THRESH_0_UDW_PC) +#define ICL_DSC1_RC_BUF_THRESH_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_BUF_THRESH_0_PB, \ + _ICL_DSC1_RC_BUF_THRESH_0_PC) +#define ICL_DSC1_RC_BUF_THRESH_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_BUF_THRESH_0_UDW_PB, \ + _ICL_DSC1_RC_BUF_THRESH_0_UDW_PC) + +#define DSCA_RC_BUF_THRESH_1 _MMIO(0x6B238) +#define DSCA_RC_BUF_THRESH_1_UDW _MMIO(0x6B238 + 4) +#define DSCC_RC_BUF_THRESH_1 _MMIO(0x6BA38) +#define DSCC_RC_BUF_THRESH_1_UDW _MMIO(0x6BA38 + 4) +#define _ICL_DSC0_RC_BUF_THRESH_1_PB (0x7825C) +#define _ICL_DSC0_RC_BUF_THRESH_1_UDW_PB (0x7825C + 4) +#define _ICL_DSC1_RC_BUF_THRESH_1_PB (0x7835C) +#define _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB (0x7835C + 4) +#define _ICL_DSC0_RC_BUF_THRESH_1_PC (0x7845C) +#define _ICL_DSC0_RC_BUF_THRESH_1_UDW_PC (0x7845C + 4) +#define _ICL_DSC1_RC_BUF_THRESH_1_PC (0x7855C) +#define _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC (0x7855C + 4) +#define ICL_DSC0_RC_BUF_THRESH_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_BUF_THRESH_1_PB, \ + _ICL_DSC0_RC_BUF_THRESH_1_PC) +#define ICL_DSC0_RC_BUF_THRESH_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_BUF_THRESH_1_UDW_PB, \ + _ICL_DSC0_RC_BUF_THRESH_1_UDW_PC) +#define ICL_DSC1_RC_BUF_THRESH_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_BUF_THRESH_1_PB, \ + _ICL_DSC1_RC_BUF_THRESH_1_PC) +#define ICL_DSC1_RC_BUF_THRESH_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB, \ + _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC) + +/* Icelake DSC Rate Control Range Parameter Registers */ +#define DSCA_RC_RANGE_PARAMETERS_0 _MMIO(0x6B240) +#define DSCA_RC_RANGE_PARAMETERS_0_UDW _MMIO(0x6B240 + 4) +#define DSCC_RC_RANGE_PARAMETERS_0 _MMIO(0x6BA40) +#define DSCC_RC_RANGE_PARAMETERS_0_UDW _MMIO(0x6BA40 + 4) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PB (0x78208) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB (0x78208 + 4) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PB (0x78308) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB (0x78308 + 4) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PC (0x78408) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC (0x78408 + 4) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PC (0x78508) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC (0x78508 + 4) +#define ICL_DSC0_RC_RANGE_PARAMETERS_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_0_PB, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_0_PC) +#define ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC) +#define ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_0_PB, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_0_PC) +#define ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC) +#define RC_BPG_OFFSET_SHIFT 10 +#define RC_MAX_QP_SHIFT 5 +#define RC_MIN_QP_SHIFT 0 + +#define DSCA_RC_RANGE_PARAMETERS_1 _MMIO(0x6B248) +#define DSCA_RC_RANGE_PARAMETERS_1_UDW _MMIO(0x6B248 + 4) +#define DSCC_RC_RANGE_PARAMETERS_1 _MMIO(0x6BA48) +#define DSCC_RC_RANGE_PARAMETERS_1_UDW _MMIO(0x6BA48 + 4) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PB (0x78210) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB (0x78210 + 4) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PB (0x78310) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB (0x78310 + 4) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PC (0x78410) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC (0x78410 + 4) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PC (0x78510) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC (0x78510 + 4) +#define ICL_DSC0_RC_RANGE_PARAMETERS_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_1_PB, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_1_PC) +#define ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC) +#define ICL_DSC1_RC_RANGE_PARAMETERS_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_1_PB, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_1_PC) +#define ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC) + +#define DSCA_RC_RANGE_PARAMETERS_2 _MMIO(0x6B250) +#define DSCA_RC_RANGE_PARAMETERS_2_UDW _MMIO(0x6B250 + 4) +#define DSCC_RC_RANGE_PARAMETERS_2 _MMIO(0x6BA50) +#define DSCC_RC_RANGE_PARAMETERS_2_UDW _MMIO(0x6BA50 + 4) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PB (0x78218) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB (0x78218 + 4) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PB (0x78318) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB (0x78318 + 4) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PC (0x78418) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC (0x78418 + 4) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PC (0x78518) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC (0x78518 + 4) +#define ICL_DSC0_RC_RANGE_PARAMETERS_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_2_PB, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_2_PC) +#define ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC) +#define ICL_DSC1_RC_RANGE_PARAMETERS_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_2_PB, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_2_PC) +#define ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC) + +#define DSCA_RC_RANGE_PARAMETERS_3 _MMIO(0x6B258) +#define DSCA_RC_RANGE_PARAMETERS_3_UDW _MMIO(0x6B258 + 4) +#define DSCC_RC_RANGE_PARAMETERS_3 _MMIO(0x6BA58) +#define DSCC_RC_RANGE_PARAMETERS_3_UDW _MMIO(0x6BA58 + 4) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PB (0x78220) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB (0x78220 + 4) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PB (0x78320) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB (0x78320 + 4) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PC (0x78420) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC (0x78420 + 4) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PC (0x78520) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC (0x78520 + 4) +#define ICL_DSC0_RC_RANGE_PARAMETERS_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_3_PB, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_3_PC) +#define ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC) +#define ICL_DSC1_RC_RANGE_PARAMETERS_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_3_PB, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_3_PC) +#define ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC) + +#endif /* __INTEL_VDSC_REGS_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c index 5ff6aed9575e..4228f26b4c11 100644 --- a/drivers/gpu/drm/i915/display/intel_vrr.c +++ b/drivers/gpu/drm/i915/display/intel_vrr.c @@ -144,17 +144,11 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state, * is deprecated. */ if (DISPLAY_VER(i915) >= 13) { - /* - * FIXME: Subtract Window2 delay from below value. - * - * Window2 specifies time required to program DSB (Window2) in - * number of scan lines. Assuming 0 for no DSB. - */ crtc_state->vrr.guardband = - crtc_state->vrr.vmin + 1 - adjusted_mode->crtc_vdisplay; + crtc_state->vrr.vmin + 1 - adjusted_mode->crtc_vblank_start; } else { crtc_state->vrr.pipeline_full = - min(255, crtc_state->vrr.vmin - adjusted_mode->crtc_vdisplay - + min(255, crtc_state->vrr.vmin - adjusted_mode->crtc_vblank_start - crtc_state->framestart_delay - 1); } diff --git a/drivers/gpu/drm/i915/display/intel_wm.c b/drivers/gpu/drm/i915/display/intel_wm.c new file mode 100644 index 000000000000..bb99179cd5fd --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_wm.c @@ -0,0 +1,408 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2023 Intel Corporation + */ + +#include "i915_drv.h" +#include "i9xx_wm.h" +#include "intel_display_types.h" +#include "intel_wm.h" +#include "skl_watermark.h" + +/** + * intel_update_watermarks - update FIFO watermark values based on current modes + * @dev_priv: i915 device + * + * Calculate watermark values for the various WM regs based on current mode + * and plane configuration. + * + * There are several cases to deal with here: + * - normal (i.e. non-self-refresh) + * - self-refresh (SR) mode + * - lines are large relative to FIFO size (buffer can hold up to 2) + * - lines are small relative to FIFO size (buffer can hold more than 2 + * lines), so need to account for TLB latency + * + * The normal calculation is: + * watermark = dotclock * bytes per pixel * latency + * where latency is platform & configuration dependent (we assume pessimal + * values here). + * + * The SR calculation is: + * watermark = (trunc(latency/line time)+1) * surface width * + * bytes per pixel + * where + * line time = htotal / dotclock + * surface width = hdisplay for normal plane and 64 for cursor + * and latency is assumed to be high, as above. + * + * The final value programmed to the register should always be rounded up, + * and include an extra 2 entries to account for clock crossings. + * + * We don't use the sprite, so we can ignore that. And on Crestline we have + * to set the non-SR watermarks to 8. + */ +void intel_update_watermarks(struct drm_i915_private *i915) +{ + if (i915->display.funcs.wm->update_wm) + i915->display.funcs.wm->update_wm(i915); +} + +int intel_compute_pipe_wm(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + + if (i915->display.funcs.wm->compute_pipe_wm) + return i915->display.funcs.wm->compute_pipe_wm(state, crtc); + + return 0; +} + +int intel_compute_intermediate_wm(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + + if (!i915->display.funcs.wm->compute_intermediate_wm) + return 0; + + if (drm_WARN_ON(&i915->drm, !i915->display.funcs.wm->compute_pipe_wm)) + return 0; + + return i915->display.funcs.wm->compute_intermediate_wm(state, crtc); +} + +bool intel_initial_watermarks(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + + if (i915->display.funcs.wm->initial_watermarks) { + i915->display.funcs.wm->initial_watermarks(state, crtc); + return true; + } + + return false; +} + +void intel_atomic_update_watermarks(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + + if (i915->display.funcs.wm->atomic_update_watermarks) + i915->display.funcs.wm->atomic_update_watermarks(state, crtc); +} + +void intel_optimize_watermarks(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + + if (i915->display.funcs.wm->optimize_watermarks) + i915->display.funcs.wm->optimize_watermarks(state, crtc); +} + +int intel_compute_global_watermarks(struct intel_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + + if (i915->display.funcs.wm->compute_global_watermarks) + return i915->display.funcs.wm->compute_global_watermarks(state); + + return 0; +} + +void intel_wm_get_hw_state(struct drm_i915_private *i915) +{ + if (i915->display.funcs.wm->get_hw_state) + return i915->display.funcs.wm->get_hw_state(i915); +} + +bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); + + /* FIXME check the 'enable' instead */ + if (!crtc_state->hw.active) + return false; + + /* + * Treat cursor with fb as always visible since cursor updates + * can happen faster than the vrefresh rate, and the current + * watermark code doesn't handle that correctly. Cursor updates + * which set/clear the fb or change the cursor size are going + * to get throttled by intel_legacy_cursor_update() to work + * around this problem with the watermark code. + */ + if (plane->id == PLANE_CURSOR) + return plane_state->hw.fb != NULL; + else + return plane_state->uapi.visible; +} + +void intel_print_wm_latency(struct drm_i915_private *dev_priv, + const char *name, const u16 wm[]) +{ + int level; + + for (level = 0; level < dev_priv->display.wm.num_levels; level++) { + unsigned int latency = wm[level]; + + if (latency == 0) { + drm_dbg_kms(&dev_priv->drm, + "%s WM%d latency not provided\n", + name, level); + continue; + } + + /* + * - latencies are in us on gen9. + * - before then, WM1+ latency values are in 0.5us units + */ + if (DISPLAY_VER(dev_priv) >= 9) + latency *= 10; + else if (level > 0) + latency *= 5; + + drm_dbg_kms(&dev_priv->drm, + "%s WM%d latency %u (%u.%u usec)\n", name, level, + wm[level], latency / 10, latency % 10); + } +} + +void intel_wm_init(struct drm_i915_private *i915) +{ + if (DISPLAY_VER(i915) >= 9) + skl_wm_init(i915); + else + i9xx_wm_init(i915); +} + +static void wm_latency_show(struct seq_file *m, const u16 wm[8]) +{ + struct drm_i915_private *dev_priv = m->private; + int level; + + drm_modeset_lock_all(&dev_priv->drm); + + for (level = 0; level < dev_priv->display.wm.num_levels; level++) { + unsigned int latency = wm[level]; + + /* + * - WM1+ latency values in 0.5us units + * - latencies are in us on gen9/vlv/chv + */ + if (DISPLAY_VER(dev_priv) >= 9 || + IS_VALLEYVIEW(dev_priv) || + IS_CHERRYVIEW(dev_priv) || + IS_G4X(dev_priv)) + latency *= 10; + else if (level > 0) + latency *= 5; + + seq_printf(m, "WM%d %u (%u.%u usec)\n", + level, wm[level], latency / 10, latency % 10); + } + + drm_modeset_unlock_all(&dev_priv->drm); +} + +static int pri_wm_latency_show(struct seq_file *m, void *data) +{ + struct drm_i915_private *dev_priv = m->private; + const u16 *latencies; + + if (DISPLAY_VER(dev_priv) >= 9) + latencies = dev_priv->display.wm.skl_latency; + else + latencies = dev_priv->display.wm.pri_latency; + + wm_latency_show(m, latencies); + + return 0; +} + +static int spr_wm_latency_show(struct seq_file *m, void *data) +{ + struct drm_i915_private *dev_priv = m->private; + const u16 *latencies; + + if (DISPLAY_VER(dev_priv) >= 9) + latencies = dev_priv->display.wm.skl_latency; + else + latencies = dev_priv->display.wm.spr_latency; + + wm_latency_show(m, latencies); + + return 0; +} + +static int cur_wm_latency_show(struct seq_file *m, void *data) +{ + struct drm_i915_private *dev_priv = m->private; + const u16 *latencies; + + if (DISPLAY_VER(dev_priv) >= 9) + latencies = dev_priv->display.wm.skl_latency; + else + latencies = dev_priv->display.wm.cur_latency; + + wm_latency_show(m, latencies); + + return 0; +} + +static int pri_wm_latency_open(struct inode *inode, struct file *file) +{ + struct drm_i915_private *dev_priv = inode->i_private; + + if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) + return -ENODEV; + + return single_open(file, pri_wm_latency_show, dev_priv); +} + +static int spr_wm_latency_open(struct inode *inode, struct file *file) +{ + struct drm_i915_private *dev_priv = inode->i_private; + + if (HAS_GMCH(dev_priv)) + return -ENODEV; + + return single_open(file, spr_wm_latency_show, dev_priv); +} + +static int cur_wm_latency_open(struct inode *inode, struct file *file) +{ + struct drm_i915_private *dev_priv = inode->i_private; + + if (HAS_GMCH(dev_priv)) + return -ENODEV; + + return single_open(file, cur_wm_latency_show, dev_priv); +} + +static ssize_t wm_latency_write(struct file *file, const char __user *ubuf, + size_t len, loff_t *offp, u16 wm[8]) +{ + struct seq_file *m = file->private_data; + struct drm_i915_private *dev_priv = m->private; + u16 new[8] = { 0 }; + int level; + int ret; + char tmp[32]; + + if (len >= sizeof(tmp)) + return -EINVAL; + + if (copy_from_user(tmp, ubuf, len)) + return -EFAULT; + + tmp[len] = '\0'; + + ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu", + &new[0], &new[1], &new[2], &new[3], + &new[4], &new[5], &new[6], &new[7]); + if (ret != dev_priv->display.wm.num_levels) + return -EINVAL; + + drm_modeset_lock_all(&dev_priv->drm); + + for (level = 0; level < dev_priv->display.wm.num_levels; level++) + wm[level] = new[level]; + + drm_modeset_unlock_all(&dev_priv->drm); + + return len; +} + +static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf, + size_t len, loff_t *offp) +{ + struct seq_file *m = file->private_data; + struct drm_i915_private *dev_priv = m->private; + u16 *latencies; + + if (DISPLAY_VER(dev_priv) >= 9) + latencies = dev_priv->display.wm.skl_latency; + else + latencies = dev_priv->display.wm.pri_latency; + + return wm_latency_write(file, ubuf, len, offp, latencies); +} + +static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf, + size_t len, loff_t *offp) +{ + struct seq_file *m = file->private_data; + struct drm_i915_private *dev_priv = m->private; + u16 *latencies; + + if (DISPLAY_VER(dev_priv) >= 9) + latencies = dev_priv->display.wm.skl_latency; + else + latencies = dev_priv->display.wm.spr_latency; + + return wm_latency_write(file, ubuf, len, offp, latencies); +} + +static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf, + size_t len, loff_t *offp) +{ + struct seq_file *m = file->private_data; + struct drm_i915_private *dev_priv = m->private; + u16 *latencies; + + if (DISPLAY_VER(dev_priv) >= 9) + latencies = dev_priv->display.wm.skl_latency; + else + latencies = dev_priv->display.wm.cur_latency; + + return wm_latency_write(file, ubuf, len, offp, latencies); +} + +static const struct file_operations i915_pri_wm_latency_fops = { + .owner = THIS_MODULE, + .open = pri_wm_latency_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = pri_wm_latency_write +}; + +static const struct file_operations i915_spr_wm_latency_fops = { + .owner = THIS_MODULE, + .open = spr_wm_latency_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = spr_wm_latency_write +}; + +static const struct file_operations i915_cur_wm_latency_fops = { + .owner = THIS_MODULE, + .open = cur_wm_latency_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = cur_wm_latency_write +}; + +void intel_wm_debugfs_register(struct drm_i915_private *i915) +{ + struct drm_minor *minor = i915->drm.primary; + + debugfs_create_file("i915_pri_wm_latency", 0644, minor->debugfs_root, + i915, &i915_pri_wm_latency_fops); + + debugfs_create_file("i915_spr_wm_latency", 0644, minor->debugfs_root, + i915, &i915_spr_wm_latency_fops); + + debugfs_create_file("i915_cur_wm_latency", 0644, minor->debugfs_root, + i915, &i915_cur_wm_latency_fops); + + skl_watermark_debugfs_register(i915); +} diff --git a/drivers/gpu/drm/i915/display/intel_wm.h b/drivers/gpu/drm/i915/display/intel_wm.h new file mode 100644 index 000000000000..48429ac140d2 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_wm.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef __INTEL_WM_H__ +#define __INTEL_WM_H__ + +#include <linux/types.h> + +struct drm_i915_private; +struct intel_atomic_state; +struct intel_crtc; +struct intel_crtc_state; +struct intel_plane_state; + +void intel_update_watermarks(struct drm_i915_private *i915); +int intel_compute_pipe_wm(struct intel_atomic_state *state, + struct intel_crtc *crtc); +int intel_compute_intermediate_wm(struct intel_atomic_state *state, + struct intel_crtc *crtc); +bool intel_initial_watermarks(struct intel_atomic_state *state, + struct intel_crtc *crtc); +void intel_atomic_update_watermarks(struct intel_atomic_state *state, + struct intel_crtc *crtc); +void intel_optimize_watermarks(struct intel_atomic_state *state, + struct intel_crtc *crtc); +int intel_compute_global_watermarks(struct intel_atomic_state *state); +void intel_wm_get_hw_state(struct drm_i915_private *i915); +bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state); +void intel_print_wm_latency(struct drm_i915_private *i915, + const char *name, const u16 wm[]); +void intel_wm_init(struct drm_i915_private *i915); +void intel_wm_debugfs_register(struct drm_i915_private *i915); + +#endif /* __INTEL_WM_H__ */ diff --git a/drivers/gpu/drm/i915/intel_pm_types.h b/drivers/gpu/drm/i915/display/intel_wm_types.h index 93152537b420..628b7c0ce484 100644 --- a/drivers/gpu/drm/i915/intel_pm_types.h +++ b/drivers/gpu/drm/i915/display/intel_wm_types.h @@ -3,12 +3,12 @@ * Copyright © 2021 Intel Corporation */ -#ifndef __INTEL_PM_TYPES_H__ -#define __INTEL_PM_TYPES_H__ +#ifndef __INTEL_WM_TYPES_H__ +#define __INTEL_WM_TYPES_H__ #include <linux/types.h> -#include "display/intel_display_limits.h" +#include "intel_display_limits.h" enum intel_ddb_partitioning { INTEL_DDB_PART_1_2, @@ -73,4 +73,4 @@ static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1, return false; } -#endif /* __INTEL_PM_TYPES_H__ */ +#endif /* __INTEL_WM_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c index 9b172a1e90de..ce55b8f09301 100644 --- a/drivers/gpu/drm/i915/display/skl_universal_plane.c +++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c @@ -642,7 +642,7 @@ icl_plane_disable_arm(struct intel_plane *plane, skl_write_plane_wm(plane, crtc_state); - intel_psr2_disable_plane_sel_fetch(plane, crtc_state); + intel_psr2_disable_plane_sel_fetch_arm(plane, crtc_state); intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), 0); intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), 0); } @@ -1260,7 +1260,7 @@ icl_plane_update_noarm(struct intel_plane *plane, if (plane_state->force_black) icl_plane_csc_load_black(plane); - intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, color_plane); + intel_psr2_program_plane_sel_fetch_noarm(plane, crtc_state, plane_state, color_plane); } static void @@ -1287,6 +1287,8 @@ icl_plane_update_arm(struct intel_plane *plane, if (plane_state->scaler_id >= 0) skl_program_plane_scaler(plane, crtc_state, plane_state); + intel_psr2_program_plane_sel_fetch_arm(plane, crtc_state, plane_state); + /* * The control register self-arms if the plane was previously * disabled. Try to make the plane enable atomic by writing @@ -2180,7 +2182,7 @@ static bool gen12_plane_has_mc_ccs(struct drm_i915_private *i915, if (DISPLAY_VER(i915) < 12) return false; - /* Wa_14010477008:tgl[a0..c0],rkl[all],dg1[all] */ + /* Wa_14010477008 */ if (IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TGL_DISPLAY_STEP(i915, STEP_A0, STEP_D0)) return false; diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c index d1670cc3eff2..f0af997d2a23 100644 --- a/drivers/gpu/drm/i915/display/skl_watermark.c +++ b/drivers/gpu/drm/i915/display/skl_watermark.c @@ -5,6 +5,10 @@ #include <drm/drm_blend.h> +#include "i915_drv.h" +#include "i915_fixed.h" +#include "i915_reg.h" +#include "i9xx_wm.h" #include "intel_atomic.h" #include "intel_atomic_plane.h" #include "intel_bw.h" @@ -13,13 +17,9 @@ #include "intel_display_power.h" #include "intel_display_types.h" #include "intel_fb.h" -#include "skl_watermark.h" - -#include "i915_drv.h" -#include "i915_fixed.h" -#include "i915_reg.h" #include "intel_pcode.h" -#include "intel_pm.h" +#include "intel_wm.h" +#include "skl_watermark.h" static void skl_sagv_disable(struct drm_i915_private *i915); @@ -64,7 +64,7 @@ static bool skl_needs_memory_bw_wa(struct drm_i915_private *i915) static bool intel_has_sagv(struct drm_i915_private *i915) { - return DISPLAY_VER(i915) >= 9 && !IS_LP(i915) && + return HAS_SAGV(i915) && i915->display.sagv.status != I915_SAGV_NOT_CONTROLLED; } @@ -92,7 +92,7 @@ intel_sagv_block_time(struct drm_i915_private *i915) return val; } else if (DISPLAY_VER(i915) == 11) { return 10; - } else if (DISPLAY_VER(i915) == 9 && !IS_LP(i915)) { + } else if (HAS_SAGV(i915)) { return 30; } else { return 0; @@ -101,7 +101,7 @@ intel_sagv_block_time(struct drm_i915_private *i915) static void intel_sagv_init(struct drm_i915_private *i915) { - if (!intel_has_sagv(i915)) + if (!HAS_SAGV(i915)) i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED; /* @@ -359,7 +359,7 @@ static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state) continue; /* Find the highest enabled wm level for this plane */ - for (level = ilk_wm_max_level(i915); + for (level = i915->display.wm.num_levels - 1; !wm->wm[level].enable; --level) { } @@ -710,10 +710,10 @@ skl_cursor_allocation(const struct intel_crtc_state *crtc_state, { struct intel_plane *plane = to_intel_plane(crtc_state->uapi.crtc->cursor); struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); - int level, max_level = ilk_wm_max_level(i915); struct skl_wm_level wm = {}; int ret, min_ddb_alloc = 0; struct skl_wm_params wp; + int level; ret = skl_compute_wm_params(crtc_state, 256, drm_format_info(DRM_FORMAT_ARGB8888), @@ -722,7 +722,7 @@ skl_cursor_allocation(const struct intel_crtc_state *crtc_state, crtc_state->pixel_rate, &wp, 0); drm_WARN_ON(&i915->drm, ret); - for (level = 0; level <= max_level; level++) { + for (level = 0; level < i915->display.wm.num_levels; level++) { unsigned int latency = i915->display.wm.skl_latency[level]; skl_compute_plane_wm(crtc_state, plane, level, latency, &wp, &wm, &wm); @@ -1407,16 +1407,22 @@ skl_check_nv12_wm_level(struct skl_wm_level *wm, struct skl_wm_level *uv_wm, } } -static bool icl_need_wm1_wa(struct drm_i915_private *i915, - enum plane_id plane_id) +static bool skl_need_wm_copy_wa(struct drm_i915_private *i915, int level, + const struct skl_plane_wm *wm) { /* * Wa_1408961008:icl, ehl * Wa_14012656716:tgl, adl - * Underruns with WM1+ disabled + * Wa_14017887344:icl + * Wa_14017868169:adl, tgl + * Due to some power saving optimizations, different subsystems + * like PSR, might still use even disabled wm level registers, + * for "reference", so lets keep at least the values sane. + * Considering amount of WA requiring us to do similar things, was + * decided to simply do it for all of the platforms, as those wm + * levels are disabled, this isn't going to do harm anyway. */ - return DISPLAY_VER(i915) == 11 || - (IS_DISPLAY_VER(i915, 12, 13) && plane_id == PLANE_CURSOR); + return level > 0 && !wm->wm[level].enable; } struct skl_plane_ddb_iter { @@ -1492,7 +1498,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state, * Find the highest watermark level for which we can satisfy the block * requirement of active planes. */ - for (level = ilk_wm_max_level(i915); level >= 0; level--) { + for (level = i915->display.wm.num_levels - 1; level >= 0; level--) { blocks = 0; for_each_plane_id_on_crtc(crtc, plane_id) { const struct skl_plane_wm *wm = @@ -1568,7 +1574,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state, * all levels as "enabled." Go back now and disable the ones * that aren't actually possible. */ - for (level++; level <= ilk_wm_max_level(i915); level++) { + for (level++; level < i915->display.wm.num_levels; level++) { for_each_plane_id_on_crtc(crtc, plane_id) { const struct skl_ddb_entry *ddb = &crtc_state->wm.skl.plane_ddb[plane_id]; @@ -1585,12 +1591,10 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state, else skl_check_wm_level(&wm->wm[level], ddb); - if (icl_need_wm1_wa(i915, plane_id) && - level == 1 && !wm->wm[level].enable && - wm->wm[0].enable) { - wm->wm[level].blocks = wm->wm[0].blocks; - wm->wm[level].lines = wm->wm[0].lines; - wm->wm[level].ignore_lines = wm->wm[0].ignore_lines; + if (skl_need_wm_copy_wa(i915, level, wm)) { + wm->wm[level].blocks = wm->wm[level - 1].blocks; + wm->wm[level].lines = wm->wm[level - 1].lines; + wm->wm[level].ignore_lines = wm->wm[level - 1].ignore_lines; } } } @@ -1967,10 +1971,10 @@ skl_compute_wm_levels(const struct intel_crtc_state *crtc_state, struct skl_wm_level *levels) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); - int level, max_level = ilk_wm_max_level(i915); struct skl_wm_level *result_prev = &levels[0]; + int level; - for (level = 0; level <= max_level; level++) { + for (level = 0; level < i915->display.wm.num_levels; level++) { struct skl_wm_level *result = &levels[level]; unsigned int latency = i915->display.wm.skl_latency[level]; @@ -2248,7 +2252,6 @@ void skl_write_plane_wm(struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(plane->base.dev); - int level, max_level = ilk_wm_max_level(i915); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal; @@ -2256,8 +2259,9 @@ void skl_write_plane_wm(struct intel_plane *plane, &crtc_state->wm.skl.plane_ddb[plane_id]; const struct skl_ddb_entry *ddb_y = &crtc_state->wm.skl.plane_ddb_y[plane_id]; + int level; - for (level = 0; level <= max_level; level++) + for (level = 0; level < i915->display.wm.num_levels; level++) skl_write_wm_level(i915, PLANE_WM(pipe, plane_id, level), skl_plane_wm_level(pipe_wm, plane_id, level)); @@ -2285,14 +2289,14 @@ void skl_write_cursor_wm(struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(plane->base.dev); - int level, max_level = ilk_wm_max_level(i915); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal; const struct skl_ddb_entry *ddb = &crtc_state->wm.skl.plane_ddb[plane_id]; + int level; - for (level = 0; level <= max_level; level++) + for (level = 0; level < i915->display.wm.num_levels; level++) skl_write_wm_level(i915, CUR_WM(pipe, level), skl_plane_wm_level(pipe_wm, plane_id, level)); @@ -2324,9 +2328,9 @@ static bool skl_plane_wm_equals(struct drm_i915_private *i915, const struct skl_plane_wm *wm1, const struct skl_plane_wm *wm2) { - int level, max_level = ilk_wm_max_level(i915); + int level; - for (level = 0; level <= max_level; level++) { + for (level = 0; level < i915->display.wm.num_levels; level++) { /* * We don't check uv_wm as the hardware doesn't actually * use it. It only gets used for calculating the required @@ -2398,6 +2402,8 @@ skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state, return PTR_ERR(plane_state); new_crtc_state->update_planes |= BIT(plane_id); + new_crtc_state->async_flip_planes = 0; + new_crtc_state->do_async_flip = false; } return 0; @@ -2674,9 +2680,9 @@ static bool skl_plane_selected_wm_equals(struct intel_plane *plane, const struct skl_pipe_wm *new_pipe_wm) { struct drm_i915_private *i915 = to_i915(plane->base.dev); - int level, max_level = ilk_wm_max_level(i915); + int level; - for (level = 0; level <= max_level; level++) { + for (level = 0; level < i915->display.wm.num_levels; level++) { /* * We don't check uv_wm as the hardware doesn't actually * use it. It only gets used for calculating the required @@ -2755,6 +2761,8 @@ static int skl_wm_add_affected_planes(struct intel_atomic_state *state, return PTR_ERR(plane_state); new_crtc_state->update_planes |= BIT(plane_id); + new_crtc_state->async_flip_planes = 0; + new_crtc_state->do_async_flip = false; } return 0; @@ -2810,16 +2818,14 @@ static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc, { struct drm_i915_private *i915 = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; - int level, max_level; enum plane_id plane_id; + int level; u32 val; - max_level = ilk_wm_max_level(i915); - for_each_plane_id_on_crtc(crtc, plane_id) { struct skl_plane_wm *wm = &out->planes[plane_id]; - for (level = 0; level <= max_level; level++) { + for (level = 0; level < i915->display.wm.num_levels; level++) { if (plane_id != PLANE_CURSOR) val = intel_de_read(i915, PLANE_WM(pipe, plane_id, level)); else @@ -2856,7 +2862,7 @@ static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc, } } -void skl_wm_get_hw_state(struct drm_i915_private *i915) +static void skl_wm_get_hw_state(struct drm_i915_private *i915) { struct intel_dbuf_state *dbuf_state = to_intel_dbuf_state(i915->display.dbuf.obj.state); @@ -2956,7 +2962,7 @@ static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915) return false; } -void skl_wm_sanitize(struct drm_i915_private *i915) +static void skl_wm_sanitize(struct drm_i915_private *i915) { struct intel_crtc *crtc; @@ -2992,6 +2998,12 @@ void skl_wm_sanitize(struct drm_i915_private *i915) } } +static void skl_wm_get_hw_state_and_sanitize(struct drm_i915_private *i915) +{ + skl_wm_get_hw_state(i915); + skl_wm_sanitize(i915); +} + void intel_wm_state_verify(struct intel_crtc *crtc, struct intel_crtc_state *new_crtc_state) { @@ -3002,9 +3014,9 @@ void intel_wm_state_verify(struct intel_crtc *crtc, struct skl_pipe_wm wm; } *hw; const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal; - int level, max_level = ilk_wm_max_level(i915); struct intel_plane *plane; u8 hw_enabled_slices; + int level; if (DISPLAY_VER(i915) < 9 || !new_crtc_state->hw.active) return; @@ -3031,7 +3043,7 @@ void intel_wm_state_verify(struct intel_crtc *crtc, const struct skl_wm_level *hw_wm_level, *sw_wm_level; /* Watermarks */ - for (level = 0; level <= max_level; level++) { + for (level = 0; level < i915->display.wm.num_levels; level++) { hw_wm_level = &hw->wm.planes[plane->id].wm[level]; sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level); @@ -3153,7 +3165,7 @@ void skl_watermark_ipc_init(struct drm_i915_private *i915) static void adjust_wm_latency(struct drm_i915_private *i915, - u16 wm[], int max_level, int read_latency) + u16 wm[], int num_levels, int read_latency) { bool wm_lv_0_adjust_needed = i915->dram_info.wm_lv_0_adjust_needed; int i, level; @@ -3163,12 +3175,12 @@ adjust_wm_latency(struct drm_i915_private *i915, * need to be disabled. We make sure to sanitize the values out * of the punit to satisfy this requirement. */ - for (level = 1; level <= max_level; level++) { + for (level = 1; level < num_levels; level++) { if (wm[level] == 0) { - for (i = level + 1; i <= max_level; i++) + for (i = level + 1; i < num_levels; i++) wm[i] = 0; - max_level = level - 1; + num_levels = level; break; } } @@ -3181,7 +3193,7 @@ adjust_wm_latency(struct drm_i915_private *i915, * from the punit when level 0 response data is 0us. */ if (wm[0] == 0) { - for (level = 0; level <= max_level; level++) + for (level = 0; level < num_levels; level++) wm[level] += read_latency; } @@ -3197,7 +3209,7 @@ adjust_wm_latency(struct drm_i915_private *i915, static void mtl_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) { - int max_level = ilk_wm_max_level(i915); + int num_levels = i915->display.wm.num_levels; u32 val; val = intel_de_read(i915, MTL_LATENCY_LP0_LP1); @@ -3212,12 +3224,12 @@ static void mtl_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) wm[4] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val); wm[5] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val); - adjust_wm_latency(i915, wm, max_level, 6); + adjust_wm_latency(i915, wm, num_levels, 6); } static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) { - int max_level = ilk_wm_max_level(i915); + int num_levels = i915->display.wm.num_levels; int read_latency = DISPLAY_VER(i915) >= 12 ? 3 : 2; int mult = IS_DG2(i915) ? 2 : 1; u32 val; @@ -3249,11 +3261,16 @@ static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) wm[6] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val) * mult; wm[7] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val) * mult; - adjust_wm_latency(i915, wm, max_level, read_latency); + adjust_wm_latency(i915, wm, num_levels, read_latency); } static void skl_setup_wm_latency(struct drm_i915_private *i915) { + if (HAS_HW_SAGV_WM(i915)) + i915->display.wm.num_levels = 6; + else + i915->display.wm.num_levels = 8; + if (DISPLAY_VER(i915) >= 14) mtl_read_wm_latency(i915, i915->display.wm.skl_latency); else @@ -3264,6 +3281,7 @@ static void skl_setup_wm_latency(struct drm_i915_private *i915) static const struct intel_wm_funcs skl_wm_funcs = { .compute_global_watermarks = skl_compute_wm, + .get_hw_state = skl_wm_get_hw_state_and_sanitize, }; void skl_wm_init(struct drm_i915_private *i915) @@ -3541,13 +3559,34 @@ static const struct file_operations skl_watermark_ipc_status_fops = { .write = skl_watermark_ipc_status_write }; -void skl_watermark_ipc_debugfs_register(struct drm_i915_private *i915) +static int intel_sagv_status_show(struct seq_file *m, void *unused) +{ + struct drm_i915_private *i915 = m->private; + static const char * const sagv_status[] = { + [I915_SAGV_UNKNOWN] = "unknown", + [I915_SAGV_DISABLED] = "disabled", + [I915_SAGV_ENABLED] = "enabled", + [I915_SAGV_NOT_CONTROLLED] = "not controlled", + }; + + seq_printf(m, "SAGV available: %s\n", str_yes_no(intel_has_sagv(i915))); + seq_printf(m, "SAGV status: %s\n", sagv_status[i915->display.sagv.status]); + seq_printf(m, "SAGV block time: %d usec\n", i915->display.sagv.block_time_us); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(intel_sagv_status); + +void skl_watermark_debugfs_register(struct drm_i915_private *i915) { struct drm_minor *minor = i915->drm.primary; - if (!HAS_IPC(i915)) - return; + if (HAS_IPC(i915)) + debugfs_create_file("i915_ipc_status", 0644, minor->debugfs_root, i915, + &skl_watermark_ipc_status_fops); - debugfs_create_file("i915_ipc_status", 0644, minor->debugfs_root, i915, - &skl_watermark_ipc_status_fops); + if (HAS_SAGV(i915)) + debugfs_create_file("i915_sagv_status", 0444, minor->debugfs_root, i915, + &intel_sagv_status_fops); } diff --git a/drivers/gpu/drm/i915/display/skl_watermark.h b/drivers/gpu/drm/i915/display/skl_watermark.h index 37954c472070..f91a3d4ddc07 100644 --- a/drivers/gpu/drm/i915/display/skl_watermark.h +++ b/drivers/gpu/drm/i915/display/skl_watermark.h @@ -10,7 +10,7 @@ #include "intel_display_limits.h" #include "intel_global_state.h" -#include "intel_pm_types.h" +#include "intel_wm_types.h" struct drm_i915_private; struct intel_atomic_state; @@ -38,16 +38,13 @@ bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb, const struct skl_ddb_entry *entries, int num_entries, int ignore_idx); -void skl_wm_get_hw_state(struct drm_i915_private *i915); -void skl_wm_sanitize(struct drm_i915_private *i915); - void intel_wm_state_verify(struct intel_crtc *crtc, struct intel_crtc_state *new_crtc_state); void skl_watermark_ipc_init(struct drm_i915_private *i915); void skl_watermark_ipc_update(struct drm_i915_private *i915); bool skl_watermark_ipc_enabled(struct drm_i915_private *i915); -void skl_watermark_ipc_debugfs_register(struct drm_i915_private *i915); +void skl_watermark_debugfs_register(struct drm_i915_private *i915); void skl_wm_init(struct drm_i915_private *i915); diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c index 2289f6b1b4eb..8d2e6e151ba0 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi.c @@ -331,32 +331,23 @@ static bool glk_dsi_enable_io(struct intel_encoder *encoder) struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; - u32 tmp; bool cold_boot = false; /* Set the MIPI mode * If MIPI_Mode is off, then writing to LP_Wake bit is not reflecting. * Power ON MIPI IO first and then write into IO reset and LP wake bits */ - for_each_dsi_port(port, intel_dsi->ports) { - tmp = intel_de_read(dev_priv, MIPI_CTRL(port)); - intel_de_write(dev_priv, MIPI_CTRL(port), - tmp | GLK_MIPIIO_ENABLE); - } + for_each_dsi_port(port, intel_dsi->ports) + intel_de_rmw(dev_priv, MIPI_CTRL(port), 0, GLK_MIPIIO_ENABLE); /* Put the IO into reset */ - tmp = intel_de_read(dev_priv, MIPI_CTRL(PORT_A)); - tmp &= ~GLK_MIPIIO_RESET_RELEASED; - intel_de_write(dev_priv, MIPI_CTRL(PORT_A), tmp); + intel_de_rmw(dev_priv, MIPI_CTRL(PORT_A), GLK_MIPIIO_RESET_RELEASED, 0); /* Program LP Wake */ for_each_dsi_port(port, intel_dsi->ports) { - tmp = intel_de_read(dev_priv, MIPI_CTRL(port)); - if (!(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY)) - tmp &= ~GLK_LP_WAKE; - else - tmp |= GLK_LP_WAKE; - intel_de_write(dev_priv, MIPI_CTRL(port), tmp); + u32 tmp = intel_de_read(dev_priv, MIPI_DEVICE_READY(port)); + intel_de_rmw(dev_priv, MIPI_CTRL(port), + GLK_LP_WAKE, (tmp & DEVICE_READY) ? GLK_LP_WAKE : 0); } /* Wait for Pwr ACK */ @@ -380,7 +371,6 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder) struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; - u32 val; /* Wait for MIPI PHY status bit to set */ for_each_dsi_port(port, intel_dsi->ports) { @@ -390,24 +380,18 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder) } /* Get IO out of reset */ - val = intel_de_read(dev_priv, MIPI_CTRL(PORT_A)); - intel_de_write(dev_priv, MIPI_CTRL(PORT_A), - val | GLK_MIPIIO_RESET_RELEASED); + intel_de_rmw(dev_priv, MIPI_CTRL(PORT_A), 0, GLK_MIPIIO_RESET_RELEASED); /* Get IO out of Low power state*/ for_each_dsi_port(port, intel_dsi->ports) { if (!(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY)) { - val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port)); - val &= ~ULPS_STATE_MASK; - val |= DEVICE_READY; - intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val); + intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port), + ULPS_STATE_MASK, DEVICE_READY); usleep_range(10, 15); } else { /* Enter ULPS */ - val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port)); - val &= ~ULPS_STATE_MASK; - val |= (ULPS_STATE_ENTER | DEVICE_READY); - intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val); + intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port), + ULPS_STATE_MASK, ULPS_STATE_ENTER | DEVICE_READY); /* Wait for ULPS active */ if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port), @@ -415,20 +399,15 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder) drm_err(&dev_priv->drm, "ULPS not active\n"); /* Exit ULPS */ - val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port)); - val &= ~ULPS_STATE_MASK; - val |= (ULPS_STATE_EXIT | DEVICE_READY); - intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val); + intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port), + ULPS_STATE_MASK, ULPS_STATE_EXIT | DEVICE_READY); /* Enter Normal Mode */ - val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port)); - val &= ~ULPS_STATE_MASK; - val |= (ULPS_STATE_NORMAL_OPERATION | DEVICE_READY); - intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val); - - val = intel_de_read(dev_priv, MIPI_CTRL(port)); - val &= ~GLK_LP_WAKE; - intel_de_write(dev_priv, MIPI_CTRL(port), val); + intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port), + ULPS_STATE_MASK, + ULPS_STATE_NORMAL_OPERATION | DEVICE_READY); + + intel_de_rmw(dev_priv, MIPI_CTRL(port), GLK_LP_WAKE, 0); } } @@ -460,9 +439,7 @@ static void bxt_dsi_device_ready(struct intel_encoder *encoder) /* Enable MIPI PHY transparent latch */ for_each_dsi_port(port, intel_dsi->ports) { - val = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port)); - intel_de_write(dev_priv, BXT_MIPI_PORT_CTRL(port), - val | LP_OUTPUT_HOLD); + intel_de_rmw(dev_priv, BXT_MIPI_PORT_CTRL(port), 0, LP_OUTPUT_HOLD); usleep_range(2000, 2500); } @@ -482,7 +459,6 @@ static void vlv_dsi_device_ready(struct intel_encoder *encoder) struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; - u32 val; drm_dbg_kms(&dev_priv->drm, "\n"); @@ -505,9 +481,7 @@ static void vlv_dsi_device_ready(struct intel_encoder *encoder) * Common bit for both MIPI Port A & MIPI Port C * No similar bit in MIPI Port C reg */ - val = intel_de_read(dev_priv, MIPI_PORT_CTRL(PORT_A)); - intel_de_write(dev_priv, MIPI_PORT_CTRL(PORT_A), - val | LP_OUTPUT_HOLD); + intel_de_rmw(dev_priv, MIPI_PORT_CTRL(PORT_A), 0, LP_OUTPUT_HOLD); usleep_range(1000, 1500); intel_de_write(dev_priv, MIPI_DEVICE_READY(port), @@ -537,15 +511,11 @@ static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder) struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; - u32 val; /* Enter ULPS */ - for_each_dsi_port(port, intel_dsi->ports) { - val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port)); - val &= ~ULPS_STATE_MASK; - val |= (ULPS_STATE_ENTER | DEVICE_READY); - intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val); - } + for_each_dsi_port(port, intel_dsi->ports) + intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port), + ULPS_STATE_MASK, ULPS_STATE_ENTER | DEVICE_READY); /* Wait for MIPI PHY status bit to unset */ for_each_dsi_port(port, intel_dsi->ports) { @@ -568,12 +538,9 @@ static void glk_dsi_disable_mipi_io(struct intel_encoder *encoder) struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; - u32 tmp; /* Put the IO into reset */ - tmp = intel_de_read(dev_priv, MIPI_CTRL(PORT_A)); - tmp &= ~GLK_MIPIIO_RESET_RELEASED; - intel_de_write(dev_priv, MIPI_CTRL(PORT_A), tmp); + intel_de_rmw(dev_priv, MIPI_CTRL(PORT_A), GLK_MIPIIO_RESET_RELEASED, 0); /* Wait for MIPI PHY status bit to unset */ for_each_dsi_port(port, intel_dsi->ports) { @@ -583,11 +550,8 @@ static void glk_dsi_disable_mipi_io(struct intel_encoder *encoder) } /* Clear MIPI mode */ - for_each_dsi_port(port, intel_dsi->ports) { - tmp = intel_de_read(dev_priv, MIPI_CTRL(port)); - tmp &= ~GLK_MIPIIO_ENABLE; - intel_de_write(dev_priv, MIPI_CTRL(port), tmp); - } + for_each_dsi_port(port, intel_dsi->ports) + intel_de_rmw(dev_priv, MIPI_CTRL(port), GLK_MIPIIO_ENABLE, 0); } static void glk_dsi_clear_device_ready(struct intel_encoder *encoder) @@ -607,7 +571,6 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder) /* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */ i915_reg_t port_ctrl = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ? BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A); - u32 val; intel_de_write(dev_priv, MIPI_DEVICE_READY(port), DEVICE_READY | ULPS_STATE_ENTER); @@ -631,8 +594,7 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder) drm_err(&dev_priv->drm, "DSI LP not going Low\n"); /* Disable MIPI PHY transparent latch */ - val = intel_de_read(dev_priv, port_ctrl); - intel_de_write(dev_priv, port_ctrl, val & ~LP_OUTPUT_HOLD); + intel_de_rmw(dev_priv, port_ctrl, LP_OUTPUT_HOLD, 0); usleep_range(1000, 1500); intel_de_write(dev_priv, MIPI_DEVICE_READY(port), 0x00); @@ -649,23 +611,17 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder, enum port port; if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) { - u32 temp; + u32 temp = intel_dsi->pixel_overlap; + if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { - for_each_dsi_port(port, intel_dsi->ports) { - temp = intel_de_read(dev_priv, - MIPI_CTRL(port)); - temp &= ~BXT_PIXEL_OVERLAP_CNT_MASK | - intel_dsi->pixel_overlap << - BXT_PIXEL_OVERLAP_CNT_SHIFT; - intel_de_write(dev_priv, MIPI_CTRL(port), - temp); - } + for_each_dsi_port(port, intel_dsi->ports) + intel_de_rmw(dev_priv, MIPI_CTRL(port), + BXT_PIXEL_OVERLAP_CNT_MASK, + temp << BXT_PIXEL_OVERLAP_CNT_SHIFT); } else { - temp = intel_de_read(dev_priv, VLV_CHICKEN_3); - temp &= ~PIXEL_OVERLAP_CNT_MASK | - intel_dsi->pixel_overlap << - PIXEL_OVERLAP_CNT_SHIFT; - intel_de_write(dev_priv, VLV_CHICKEN_3, temp); + intel_de_rmw(dev_priv, VLV_CHICKEN_3, + PIXEL_OVERLAP_CNT_MASK, + temp << PIXEL_OVERLAP_CNT_SHIFT); } } @@ -709,11 +665,9 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder) for_each_dsi_port(port, intel_dsi->ports) { i915_reg_t port_ctrl = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ? BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port); - u32 temp; /* de-assert ip_tg_enable signal */ - temp = intel_de_read(dev_priv, port_ctrl); - intel_de_write(dev_priv, port_ctrl, temp & ~DPI_ENABLE); + intel_de_rmw(dev_priv, port_ctrl, DPI_ENABLE, 0); intel_de_posting_read(dev_priv, port_ctrl); } } @@ -787,7 +741,6 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state, struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; enum port port; - u32 val; bool glk_cold_boot = false; drm_dbg_kms(&dev_priv->drm, "\n"); @@ -810,9 +763,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state, if (IS_BROXTON(dev_priv)) { /* Add MIPI IO reset programming for modeset */ - val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON); - intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON, - val | MIPIO_RST_CTRL); + intel_de_rmw(dev_priv, BXT_P_CR_GT_DISP_PWRON, 0, MIPIO_RST_CTRL); /* Power up DSI regulator */ intel_de_write(dev_priv, BXT_P_DSI_REGULATOR_CFG, STAP_SELECT); @@ -820,12 +771,9 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state, } if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - u32 val; - /* Disable DPOunit clock gating, can stall pipe */ - val = intel_de_read(dev_priv, DSPCLK_GATE_D(dev_priv)); - val |= DPOUNIT_CLOCK_GATE_DISABLE; - intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv), val); + intel_de_rmw(dev_priv, DSPCLK_GATE_D(dev_priv), + 0, DPOUNIT_CLOCK_GATE_DISABLE); } if (!IS_GEMINILAKE(dev_priv)) @@ -949,7 +897,6 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; - u32 val; drm_dbg_kms(&dev_priv->drm, "\n"); @@ -987,21 +934,16 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state, HS_IO_CTRL_SELECT); /* Add MIPI IO reset programming for modeset */ - val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON); - intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON, - val & ~MIPIO_RST_CTRL); + intel_de_rmw(dev_priv, BXT_P_CR_GT_DISP_PWRON, MIPIO_RST_CTRL, 0); } if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { bxt_dsi_pll_disable(encoder); } else { - u32 val; - vlv_dsi_pll_disable(encoder); - val = intel_de_read(dev_priv, DSPCLK_GATE_D(dev_priv)); - val &= ~DPOUNIT_CLOCK_GATE_DISABLE; - intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv), val); + intel_de_rmw(dev_priv, DSPCLK_GATE_D(dev_priv), + DPOUNIT_CLOCK_GATE_DISABLE, 0); } /* Assert reset */ @@ -1058,7 +1000,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, */ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && port == PORT_C) - enabled = intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE; + enabled = intel_de_read(dev_priv, TRANSCONF(PIPE_B)) & TRANSCONF_ENABLE; /* Try command mode if video mode not enabled */ if (!enabled) { @@ -1432,11 +1374,8 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder, } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { enum pipe pipe = crtc->pipe; - tmp = intel_de_read(dev_priv, MIPI_CTRL(port)); - tmp &= ~BXT_PIPE_SELECT_MASK; - - tmp |= BXT_PIPE_SELECT(pipe); - intel_de_write(dev_priv, MIPI_CTRL(port), tmp); + intel_de_rmw(dev_priv, MIPI_CTRL(port), + BXT_PIPE_SELECT_MASK, BXT_PIPE_SELECT(pipe)); } /* XXX: why here, why like this? handling in irq handler?! */ @@ -1605,7 +1544,6 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder) struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; - u32 val; if (IS_GEMINILAKE(dev_priv)) return; @@ -1620,9 +1558,7 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder) vlv_dsi_reset_clocks(encoder, port); intel_de_write(dev_priv, MIPI_EOT_DISABLE(port), CLOCKSTOP); - val = intel_de_read(dev_priv, MIPI_DSI_FUNC_PRG(port)); - val &= ~VID_MODE_FORMAT_MASK; - intel_de_write(dev_priv, MIPI_DSI_FUNC_PRG(port), val); + intel_de_rmw(dev_priv, MIPI_DSI_FUNC_PRG(port), VID_MODE_FORMAT_MASK, 0); intel_de_write(dev_priv, MIPI_DEVICE_READY(port), 0x1); } diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c index af7402127cd9..b697badbbe71 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c @@ -302,13 +302,10 @@ bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv) void bxt_dsi_pll_disable(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - u32 val; drm_dbg_kms(&dev_priv->drm, "\n"); - val = intel_de_read(dev_priv, BXT_DSI_PLL_ENABLE); - val &= ~BXT_DSI_PLL_DO_ENABLE; - intel_de_write(dev_priv, BXT_DSI_PLL_ENABLE, val); + intel_de_rmw(dev_priv, BXT_DSI_PLL_ENABLE, BXT_DSI_PLL_DO_ENABLE, 0); /* * PLL lock should deassert within 200us. @@ -542,7 +539,6 @@ void bxt_dsi_pll_enable(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; - u32 val; drm_dbg_kms(&dev_priv->drm, "\n"); @@ -559,9 +555,7 @@ void bxt_dsi_pll_enable(struct intel_encoder *encoder, } /* Enable DSI PLL */ - val = intel_de_read(dev_priv, BXT_DSI_PLL_ENABLE); - val |= BXT_DSI_PLL_DO_ENABLE; - intel_de_write(dev_priv, BXT_DSI_PLL_ENABLE, val); + intel_de_rmw(dev_priv, BXT_DSI_PLL_ENABLE, 0, BXT_DSI_PLL_DO_ENABLE); /* Timeout and fail if PLL not locked */ if (intel_de_wait_for_set(dev_priv, BXT_DSI_PLL_ENABLE, @@ -589,13 +583,9 @@ void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port) tmp &= ~(BXT_MIPI_RX_ESCLK_LOWER_FIXDIV_MASK(port)); intel_de_write(dev_priv, BXT_MIPI_CLOCK_CTL, tmp); } else { - tmp = intel_de_read(dev_priv, MIPIO_TXESC_CLK_DIV1); - tmp &= ~GLK_TX_ESC_CLK_DIV1_MASK; - intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV1, tmp); + intel_de_rmw(dev_priv, MIPIO_TXESC_CLK_DIV1, GLK_TX_ESC_CLK_DIV1_MASK, 0); - tmp = intel_de_read(dev_priv, MIPIO_TXESC_CLK_DIV2); - tmp &= ~GLK_TX_ESC_CLK_DIV2_MASK; - intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV2, tmp); + intel_de_rmw(dev_priv, MIPIO_TXESC_CLK_DIV2, GLK_TX_ESC_CLK_DIV2_MASK, 0); } intel_de_write(dev_priv, MIPI_EOT_DISABLE(port), CLOCKSTOP); } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c index 7420276827a5..341b94672abc 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c @@ -472,7 +472,7 @@ static int i915_ttm_shrink(struct drm_i915_gem_object *obj, unsigned int flags) struct ttm_placement place = {}; int ret; - if (!bo->ttm || bo->resource->mem_type != TTM_PL_SYSTEM) + if (!bo->ttm || i915_ttm_cpu_maps_iomem(bo->resource)) return 0; GEM_BUG_ON(!i915_tt->is_shmem); @@ -511,7 +511,13 @@ static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo) { struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); - if (bo->resource && !i915_ttm_is_ghost_object(bo)) { + /* + * This gets called twice by ttm, so long as we have a ttm resource or + * ttm_tt then we can still safely call this. Due to pipeline-gutting, + * we maybe have NULL bo->resource, but in that case we should always + * have a ttm alive (like if the pages are swapped out). + */ + if ((bo->resource || bo->ttm) && !i915_ttm_is_ghost_object(bo)) { __i915_gem_object_pages_fini(obj); i915_ttm_free_cached_io_rsgt(obj); } @@ -1067,11 +1073,12 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) .interruptible = true, .no_wait_gpu = true, /* should be idle already */ }; + int err; GEM_BUG_ON(!bo->ttm || !(bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED)); - ret = ttm_bo_validate(bo, i915_ttm_sys_placement(), &ctx); - if (ret) { + err = ttm_bo_validate(bo, i915_ttm_sys_placement(), &ctx); + if (err) { dma_resv_unlock(bo->base.resv); return VM_FAULT_SIGBUS; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h index 2a94a99ef76b..f8f6bed1b297 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h @@ -98,7 +98,7 @@ static inline bool i915_ttm_gtt_binds_lmem(struct ttm_resource *mem) static inline bool i915_ttm_cpu_maps_iomem(struct ttm_resource *mem) { /* Once / if we support GGTT, this is also false for cached ttm_tts */ - return mem->mem_type != I915_PL_SYSTEM; + return mem && mem->mem_type != I915_PL_SYSTEM; } bool i915_ttm_resource_mappable(struct ttm_resource *res); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c index 76dd9e5e1a8b..d030182ca176 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c @@ -711,6 +711,10 @@ int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst, assert_object_held(dst); assert_object_held(src); + + if (GEM_WARN_ON(!src_bo->resource || !dst_bo->resource)) + return -EINVAL; + i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); ret = dma_resv_reserve_fences(src_bo->base.resv, 1); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c index 7e67742bc65e..dfe39c8e74d8 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c @@ -53,7 +53,7 @@ static int i915_ttm_backup(struct i915_gem_apply_to_region *apply, unsigned int flags; int err = 0; - if (bo->resource->mem_type == I915_PL_SYSTEM || obj->ttm.backup) + if (!i915_ttm_cpu_maps_iomem(bo->resource) || obj->ttm.backup) return 0; if (pm_apply->allow_gpu && i915_gem_object_evictable(obj)) @@ -187,7 +187,10 @@ static int i915_ttm_restore(struct i915_gem_apply_to_region *apply, return err; /* Content may have been swapped. */ - err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx); + if (!backup_bo->resource) + err = ttm_bo_validate(backup_bo, i915_ttm_sys_placement(), &ctx); + if (!err) + err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx); if (!err) { err = i915_gem_obj_copy_ttm(obj, backup, pm_apply->allow_gpu, false); diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index f0dbfc434e07..f4f694f12907 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -28,7 +28,6 @@ #include "intel_migrate.h" #include "intel_mocs.h" #include "intel_pci_config.h" -#include "intel_pm.h" #include "intel_rc6.h" #include "intel_renderstate.h" #include "intel_rps.h" diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c index cef3d6f5c34e..85ae7dc079f2 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c @@ -17,7 +17,6 @@ #include "intel_gt_print.h" #include "intel_gt_requests.h" #include "intel_llc.h" -#include "intel_pm.h" #include "intel_rc6.h" #include "intel_rps.h" #include "intel_wakeref.h" diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h index be0f6e305c88..df07e1e799e3 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h @@ -9,8 +9,6 @@ #include "i915_reg_defs.h" #include "display/intel_display_reg_defs.h" /* VLV_DISPLAY_BASE */ -#define MCR_REG(offset) ((const i915_mcr_reg_t){ .reg = (offset) }) - /* * The perf control registers are technically multicast registers, but the * driver never needs to read/write them directly; we only use them to build diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c index f3ad93db0b21..89fdfc67f8d1 100644 --- a/drivers/gpu/drm/i915/gt/intel_region_lmem.c +++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c @@ -158,7 +158,7 @@ static const struct intel_memory_region_ops intel_region_lmem_ops = { static bool get_legacy_lowmem_region(struct intel_uncore *uncore, u64 *start, u32 *size) { - if (!IS_DG1_GRAPHICS_STEP(uncore->i915, STEP_A0, STEP_C0)) + if (!IS_DG1(uncore->i915)) return false; *start = 0; diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c index f5d7b5126433..4d0dc9de23f9 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.c +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -1677,7 +1677,6 @@ static void vlv_init_gpll_ref_freq(struct intel_rps *rps) static void vlv_rps_init(struct intel_rps *rps) { struct drm_i915_private *i915 = rps_to_i915(rps); - u32 val; vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_PUNIT) | @@ -1686,21 +1685,6 @@ static void vlv_rps_init(struct intel_rps *rps) vlv_init_gpll_ref_freq(rps); - val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS); - switch ((val >> 6) & 3) { - case 0: - case 1: - i915->mem_freq = 800; - break; - case 2: - i915->mem_freq = 1066; - break; - case 3: - i915->mem_freq = 1333; - break; - } - drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq); - rps->max_freq = vlv_rps_max_freq(rps); rps->rp0_freq = rps->max_freq; drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n", @@ -1727,7 +1711,6 @@ static void vlv_rps_init(struct intel_rps *rps) static void chv_rps_init(struct intel_rps *rps) { struct drm_i915_private *i915 = rps_to_i915(rps); - u32 val; vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_PUNIT) | @@ -1736,18 +1719,6 @@ static void chv_rps_init(struct intel_rps *rps) vlv_init_gpll_ref_freq(rps); - val = vlv_cck_read(i915, CCK_FUSE_REG); - - switch ((val >> 2) & 0x7) { - case 3: - i915->mem_freq = 2000; - break; - default: - i915->mem_freq = 1600; - break; - } - drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq); - rps->max_freq = chv_rps_max_freq(rps); rps->rp0_freq = rps->max_freq; drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n", diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 485c5cc5d0f9..8859eb118510 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -1470,43 +1470,12 @@ gen12_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) } static void -tgl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) -{ - struct drm_i915_private *i915 = gt->i915; - - gen12_gt_workarounds_init(gt, wal); - - /* Wa_1409420604:tgl */ - if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) - wa_mcr_write_or(wal, - SUBSLICE_UNIT_LEVEL_CLKGATE2, - CPSSUNIT_CLKGATE_DIS); - - /* Wa_1607087056:tgl also know as BUG:1409180338 */ - if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) - wa_write_or(wal, - GEN11_SLICE_UNIT_LEVEL_CLKGATE, - L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS); - - /* Wa_1408615072:tgl[a0] */ - if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) - wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2, - VSUNIT_CLKGATE_DIS_TGL); -} - -static void dg1_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) { struct drm_i915_private *i915 = gt->i915; gen12_gt_workarounds_init(gt, wal); - /* Wa_1607087056:dg1 */ - if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) - wa_write_or(wal, - GEN11_SLICE_UNIT_LEVEL_CLKGATE, - L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS); - /* Wa_1409420604:dg1 */ if (IS_DG1(i915)) wa_mcr_write_or(wal, @@ -1779,8 +1748,6 @@ gt_init_workarounds(struct intel_gt *gt, struct i915_wa_list *wal) xehpsdv_gt_workarounds_init(gt, wal); else if (IS_DG1(i915)) dg1_gt_workarounds_init(gt, wal); - else if (IS_TIGERLAKE(i915)) - tgl_gt_workarounds_init(gt, wal); else if (GRAPHICS_VER(i915) == 12) gen12_gt_workarounds_init(gt, wal); else if (GRAPHICS_VER(i915) == 11) @@ -2193,20 +2160,6 @@ static void tgl_whitelist_build(struct intel_engine_cs *engine) } } -static void dg1_whitelist_build(struct intel_engine_cs *engine) -{ - struct i915_wa_list *w = &engine->whitelist; - - tgl_whitelist_build(engine); - - /* GEN:BUG:1409280441:dg1 */ - if (IS_DG1_GRAPHICS_STEP(engine->i915, STEP_A0, STEP_B0) && - (engine->class == RENDER_CLASS || - engine->class == COPY_ENGINE_CLASS)) - whitelist_reg_ext(w, RING_ID(engine->mmio_base), - RING_FORCE_TO_NONPRIV_ACCESS_RD); -} - static void xehpsdv_whitelist_build(struct intel_engine_cs *engine) { allow_read_ctx_timestamp(engine); @@ -2286,8 +2239,6 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine) dg2_whitelist_build(engine); else if (IS_XEHPSDV(i915)) xehpsdv_whitelist_build(engine); - else if (IS_DG1(i915)) - dg1_whitelist_build(engine); else if (GRAPHICS_VER(i915) == 12) tgl_whitelist_build(engine); else if (GRAPHICS_VER(i915) == 11) @@ -2482,27 +2433,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) true); } - if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) || - IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) { - /* - * Wa_1607138336:tgl[a0],dg1[a0] - * Wa_1607063988:tgl[a0],dg1[a0] - */ - wa_write_or(wal, - GEN9_CTX_PREEMPT_REG, - GEN12_DISABLE_POSH_BUSY_FF_DOP_CG); - } - - if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) { - /* - * Wa_1606679103:tgl - * (see also Wa_1606682166:icl) - */ - wa_write_or(wal, - GEN7_SARCHKMD, - GEN7_DISABLE_SAMPLER_PREFETCH); - } - if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) || IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) { /* Wa_1606931601:tgl,rkl,dg1,adl-s,adl-p */ @@ -2532,30 +2462,22 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) } if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) || - IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) { - /* Wa_1409804808:tgl,rkl,dg1[a0],adl-s,adl-p */ + /* Wa_1409804808 */ wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2, GEN12_PUSH_CONST_DEREF_HOLD_DIS); - /* - * Wa_1409085225:tgl - * Wa_14010229206:tgl,rkl,dg1[a0],adl-s,adl-p - */ + /* Wa_14010229206 */ wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH); } - if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) || - IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915) || IS_ALDERLAKE_P(i915)) { + if (IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915) || IS_ALDERLAKE_P(i915)) { /* - * Wa_1607030317:tgl - * Wa_1607186500:tgl - * Wa_1607297627:tgl,rkl,dg1[a0],adlp + * Wa_1607297627 * * On TGL and RKL there are multiple entries for this WA in the * BSpec; some indicate this is an A0-only WA, others indicate * it applies to all steppings so we trust the "all steppings." - * For DG1 this only applies to A0. */ wa_masked_en(wal, RING_PSMI_CTL(RENDER_RING_BASE), diff --git a/drivers/gpu/drm/i915/gt/selftest_llc.c b/drivers/gpu/drm/i915/gt/selftest_llc.c index cfd736d88939..779fadcec7c4 100644 --- a/drivers/gpu/drm/i915/gt/selftest_llc.c +++ b/drivers/gpu/drm/i915/gt/selftest_llc.c @@ -3,7 +3,6 @@ * Copyright © 2019 Intel Corporation */ -#include "intel_pm.h" /* intel_gpu_freq() */ #include "selftest_llc.h" #include "intel_rps.h" diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index 4d898b14de93..e0c5dfb788eb 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -63,7 +63,7 @@ static int edp_pipe_is_enabled(struct intel_vgpu *vgpu) { struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; - if (!(vgpu_vreg_t(vgpu, PIPECONF(_PIPE_EDP)) & PIPECONF_ENABLE)) + if (!(vgpu_vreg_t(vgpu, TRANSCONF(TRANSCODER_EDP)) & TRANSCONF_ENABLE)) return 0; if (!(vgpu_vreg(vgpu, _TRANS_DDI_FUNC_CTL_EDP) & TRANS_DDI_FUNC_ENABLE)) @@ -79,7 +79,7 @@ int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe) pipe < PIPE_A || pipe >= I915_MAX_PIPES)) return -EINVAL; - if (vgpu_vreg_t(vgpu, PIPECONF(pipe)) & PIPECONF_ENABLE) + if (vgpu_vreg_t(vgpu, TRANSCONF(pipe)) & TRANSCONF_ENABLE) return 1; if (edp_pipe_is_enabled(vgpu) && @@ -187,8 +187,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) GEN8_DE_PORT_HOTPLUG(HPD_PORT_C)); for_each_pipe(dev_priv, pipe) { - vgpu_vreg_t(vgpu, PIPECONF(pipe)) &= - ~(PIPECONF_ENABLE | PIPECONF_STATE_ENABLE); + vgpu_vreg_t(vgpu, TRANSCONF(pipe)) &= + ~(TRANSCONF_ENABLE | TRANSCONF_STATE_ENABLE); vgpu_vreg_t(vgpu, DSPCNTR(pipe)) &= ~DISP_ENABLE; vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE; vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~MCURSOR_MODE_MASK; @@ -248,8 +248,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) * TRANSCODER_A can be enabled. PORT_x depends on the input of * setup_virtual_dp_monitor. */ - vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE; - vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_STATE_ENABLE; + vgpu_vreg_t(vgpu, TRANSCONF(TRANSCODER_A)) |= TRANSCONF_ENABLE; + vgpu_vreg_t(vgpu, TRANSCONF(TRANSCODER_A)) |= TRANSCONF_STATE_ENABLE; /* * Golden M/N are calculated based on: @@ -506,7 +506,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= MCURSOR_MODE_DISABLE; } - vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE; + vgpu_vreg_t(vgpu, TRANSCONF(TRANSCODER_A)) |= TRANSCONF_ENABLE; } static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num) @@ -584,7 +584,7 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num, * @turnon: Turn ON/OFF vblank_timer * * This function is used to turn on/off or update the per-vGPU vblank_timer - * when PIPECONF is enabled or disabled. vblank_timer period is also updated + * when TRANSCONF is enabled or disabled. vblank_timer period is also updated * if guest changed the refresh rate. * */ diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 735fc83e7026..3c8e0d198c4f 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -666,8 +666,8 @@ static void vgpu_update_refresh_rate(struct intel_vgpu *vgpu) link_n = vgpu_vreg_t(vgpu, PIPE_LINK_N1(TRANSCODER_A)); /* Get H/V total from transcoder timing */ - htotal = (vgpu_vreg_t(vgpu, HTOTAL(TRANSCODER_A)) >> TRANS_HTOTAL_SHIFT); - vtotal = (vgpu_vreg_t(vgpu, VTOTAL(TRANSCODER_A)) >> TRANS_VTOTAL_SHIFT); + htotal = (vgpu_vreg_t(vgpu, TRANS_HTOTAL(TRANSCODER_A)) >> TRANS_HTOTAL_SHIFT); + vtotal = (vgpu_vreg_t(vgpu, TRANS_VTOTAL(TRANSCODER_A)) >> TRANS_VTOTAL_SHIFT); if (dp_br && link_n && htotal && vtotal) { u64 pixel_clk = 0; @@ -697,12 +697,12 @@ static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, write_vreg(vgpu, offset, p_data, bytes); data = vgpu_vreg(vgpu, offset); - if (data & PIPECONF_ENABLE) { - vgpu_vreg(vgpu, offset) |= PIPECONF_STATE_ENABLE; + if (data & TRANSCONF_ENABLE) { + vgpu_vreg(vgpu, offset) |= TRANSCONF_STATE_ENABLE; vgpu_update_refresh_rate(vgpu); vgpu_update_vblank_emulation(vgpu, true); } else { - vgpu_vreg(vgpu, offset) &= ~PIPECONF_STATE_ENABLE; + vgpu_vreg(vgpu, offset) &= ~TRANSCONF_STATE_ENABLE; vgpu_update_vblank_emulation(vgpu, false); } return 0; @@ -2262,10 +2262,10 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_DFH(GEN7_HALF_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); /* display */ - MMIO_DH(PIPECONF(PIPE_A), D_ALL, NULL, pipeconf_mmio_write); - MMIO_DH(PIPECONF(PIPE_B), D_ALL, NULL, pipeconf_mmio_write); - MMIO_DH(PIPECONF(PIPE_C), D_ALL, NULL, pipeconf_mmio_write); - MMIO_DH(PIPECONF(_PIPE_EDP), D_ALL, NULL, pipeconf_mmio_write); + MMIO_DH(TRANSCONF(TRANSCODER_A), D_ALL, NULL, pipeconf_mmio_write); + MMIO_DH(TRANSCONF(TRANSCODER_B), D_ALL, NULL, pipeconf_mmio_write); + MMIO_DH(TRANSCONF(TRANSCODER_C), D_ALL, NULL, pipeconf_mmio_write); + MMIO_DH(TRANSCONF(TRANSCODER_EDP), D_ALL, NULL, pipeconf_mmio_write); MMIO_DH(DSPSURF(PIPE_A), D_ALL, NULL, pri_surf_mmio_write); MMIO_DH(REG_50080(PIPE_A, PLANE_PRIMARY), D_ALL, NULL, reg50080_mmio_write); diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 45773ce1deac..16011c0286ad 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -52,7 +52,6 @@ #include "i915_irq.h" #include "i915_scheduler.h" #include "intel_mchbar_regs.h" -#include "intel_pm.h" static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node) { diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c index cf1c0970ecb4..db7a86def7e2 100644 --- a/drivers/gpu/drm/i915/i915_driver.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@ -167,6 +167,8 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv) pre |= IS_KABYLAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x1; pre |= IS_GEMINILAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x3; pre |= IS_ICELAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x7; + pre |= IS_TIGERLAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x1; + pre |= IS_DG1(dev_priv) && INTEL_REVID(dev_priv) < 0x1; if (pre) { drm_err(&dev_priv->drm, "This is a pre-production stepping. " @@ -248,10 +250,6 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv) /* This must be called before any calls to HAS_PCH_* */ intel_detect_pch(dev_priv); - intel_pm_setup(dev_priv); - ret = intel_power_domains_init(dev_priv); - if (ret < 0) - goto err_gem; intel_irq_init(dev_priv); intel_init_display_hooks(dev_priv); intel_init_clock_gating_hooks(dev_priv); @@ -260,10 +258,6 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv) return 0; -err_gem: - i915_gem_cleanup_early(dev_priv); - intel_gt_driver_late_release_all(dev_priv); - i915_drm_clients_fini(&dev_priv->clients); err_rootgt: intel_region_ttm_device_fini(dev_priv); err_ttm: @@ -936,7 +930,9 @@ static int i915_driver_open(struct drm_device *dev, struct drm_file *file) */ static void i915_driver_lastclose(struct drm_device *dev) { - intel_fbdev_restore_mode(dev); + struct drm_i915_private *i915 = to_i915(dev); + + intel_fbdev_restore_mode(i915); vga_switcheroo_process_delayed_switch(); } @@ -1002,7 +998,7 @@ void i915_driver_shutdown(struct drm_i915_private *i915) intel_suspend_encoders(i915); intel_shutdown_encoders(i915); - intel_dmc_ucode_suspend(i915); + intel_dmc_suspend(i915); i915_gem_suspend(i915); @@ -1032,6 +1028,13 @@ static bool suspend_to_idle(struct drm_i915_private *dev_priv) return false; } +static void i915_drm_complete(struct drm_device *dev) +{ + struct drm_i915_private *i915 = to_i915(dev); + + intel_pxp_resume_complete(i915->pxp); +} + static int i915_drm_prepare(struct drm_device *dev) { struct drm_i915_private *i915 = to_i915(dev); @@ -1072,8 +1075,6 @@ static int i915_drm_suspend(struct drm_device *dev) intel_suspend_encoders(dev_priv); - intel_suspend_hw(dev_priv); - /* Must be called before GGTT is suspended. */ intel_dpt_suspend(dev_priv); i915_ggtt_suspend(to_gt(dev_priv)->ggtt); @@ -1087,7 +1088,7 @@ static int i915_drm_suspend(struct drm_device *dev) dev_priv->suspend_count++; - intel_dmc_ucode_suspend(dev_priv); + intel_dmc_suspend(dev_priv); enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); @@ -1208,7 +1209,7 @@ static int i915_drm_resume(struct drm_device *dev) /* Must be called after GGTT is resumed. */ intel_dpt_resume(dev_priv); - intel_dmc_ucode_resume(dev_priv); + intel_dmc_resume(dev_priv); i915_restore_display(dev_priv); intel_pps_unlock_regs_wa(dev_priv); @@ -1232,8 +1233,6 @@ static int i915_drm_resume(struct drm_device *dev) i915_gem_resume(dev_priv); - intel_pxp_resume(dev_priv->pxp); - intel_modeset_init_hw(dev_priv); intel_init_clock_gating(dev_priv); intel_hpd_init(dev_priv); @@ -1425,6 +1424,16 @@ static int i915_pm_resume(struct device *kdev) return i915_drm_resume(&i915->drm); } +static void i915_pm_complete(struct device *kdev) +{ + struct drm_i915_private *i915 = kdev_to_i915(kdev); + + if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) + return; + + i915_drm_complete(&i915->drm); +} + /* freeze: before creating the hibernation_image */ static int i915_pm_freeze(struct device *kdev) { @@ -1645,6 +1654,7 @@ const struct dev_pm_ops i915_pm_ops = { .suspend_late = i915_pm_suspend_late, .resume_early = i915_pm_resume_early, .resume = i915_pm_resume, + .complete = i915_pm_complete, /* * S4 event handlers diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 4295306487c7..6254aa977398 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -580,6 +580,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_N) #define IS_ADLP_RPLP(dev_priv) \ IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_RPL) +#define IS_ADLP_RPLU(dev_priv) \ + IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_RPLU) #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \ (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00) #define IS_BDW_ULT(dev_priv) \ @@ -653,22 +655,9 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, (IS_TIGERLAKE(__i915) && \ IS_DISPLAY_STEP(__i915, since, until)) -#define IS_TGL_UY_GRAPHICS_STEP(__i915, since, until) \ - (IS_TGL_UY(__i915) && \ - IS_GRAPHICS_STEP(__i915, since, until)) - -#define IS_TGL_GRAPHICS_STEP(__i915, since, until) \ - (IS_TIGERLAKE(__i915) && !IS_TGL_UY(__i915)) && \ - IS_GRAPHICS_STEP(__i915, since, until)) - #define IS_RKL_DISPLAY_STEP(p, since, until) \ (IS_ROCKETLAKE(p) && IS_DISPLAY_STEP(p, since, until)) -#define IS_DG1_GRAPHICS_STEP(p, since, until) \ - (IS_DG1(p) && IS_GRAPHICS_STEP(p, since, until)) -#define IS_DG1_DISPLAY_STEP(p, since, until) \ - (IS_DG1(p) && IS_DISPLAY_STEP(p, since, until)) - #define IS_ADLS_DISPLAY_STEP(__i915, since, until) \ (IS_ALDERLAKE_S(__i915) && \ IS_DISPLAY_STEP(__i915, since, until)) @@ -876,7 +865,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, */ #define HAS_64K_PAGES(dev_priv) (INTEL_INFO(dev_priv)->has_64k_pages) -#define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc) +#define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc) +#define HAS_SAGV(dev_priv) (DISPLAY_VER(dev_priv) >= 9 && !IS_LP(dev_priv)) #define HAS_REGION(i915, i) (RUNTIME_INFO(i915)->memory_regions & (i)) #define HAS_LMEM(i915) HAS_REGION(i915, REGION_LMEM) diff --git a/drivers/gpu/drm/i915/i915_hwmon.c b/drivers/gpu/drm/i915/i915_hwmon.c index 1225bc432f0d..596dd2c07010 100644 --- a/drivers/gpu/drm/i915/i915_hwmon.c +++ b/drivers/gpu/drm/i915/i915_hwmon.c @@ -99,20 +99,6 @@ hwm_field_read_and_scale(struct hwm_drvdata *ddat, i915_reg_t rgadr, return mul_u64_u32_shr(reg_value, scale_factor, nshift); } -static void -hwm_field_scale_and_write(struct hwm_drvdata *ddat, i915_reg_t rgadr, - int nshift, unsigned int scale_factor, long lval) -{ - u32 nval; - - /* Computation in 64-bits to avoid overflow. Round to nearest. */ - nval = DIV_ROUND_CLOSEST_ULL((u64)lval << nshift, scale_factor); - - hwm_locked_with_pm_intel_uncore_rmw(ddat, rgadr, - PKG_PWR_LIM_1, - REG_FIELD_PREP(PKG_PWR_LIM_1, nval)); -} - /* * hwm_energy - Obtain energy value * @@ -232,11 +218,15 @@ hwm_power1_max_interval_store(struct device *dev, /* val in hw units */ val = DIV_ROUND_CLOSEST_ULL((u64)val << hwmon->scl_shift_time, SF_TIME); /* Convert to 1.x * power(2,y) */ - if (!val) - return -EINVAL; - y = ilog2(val); - /* x = (val - (1 << y)) >> (y - 2); */ - x = (val - (1ul << y)) << x_w >> y; + if (!val) { + /* Avoid ilog2(0) */ + y = 0; + x = 0; + } else { + y = ilog2(val); + /* x = (val - (1 << y)) >> (y - 2); */ + x = (val - (1ul << y)) << x_w >> y; + } rxy = REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_X, x) | REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_Y, y); @@ -392,6 +382,22 @@ hwm_power_max_read(struct hwm_drvdata *ddat, long *val) } static int +hwm_power_max_write(struct hwm_drvdata *ddat, long val) +{ + struct i915_hwmon *hwmon = ddat->hwmon; + u32 nval; + + /* Computation in 64-bits to avoid overflow. Round to nearest. */ + nval = DIV_ROUND_CLOSEST_ULL((u64)val << hwmon->scl_shift_power, SF_POWER); + nval = PKG_PWR_LIM_1_EN | REG_FIELD_PREP(PKG_PWR_LIM_1, nval); + + hwm_locked_with_pm_intel_uncore_rmw(ddat, hwmon->rg.pkg_rapl_limit, + PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, + nval); + return 0; +} + +static int hwm_power_read(struct hwm_drvdata *ddat, u32 attr, int chan, long *val) { struct i915_hwmon *hwmon = ddat->hwmon; @@ -425,16 +431,11 @@ hwm_power_read(struct hwm_drvdata *ddat, u32 attr, int chan, long *val) static int hwm_power_write(struct hwm_drvdata *ddat, u32 attr, int chan, long val) { - struct i915_hwmon *hwmon = ddat->hwmon; u32 uval; switch (attr) { case hwmon_power_max: - hwm_field_scale_and_write(ddat, - hwmon->rg.pkg_rapl_limit, - hwmon->scl_shift_power, - SF_POWER, val); - return 0; + return hwm_power_max_write(ddat, val); case hwmon_power_crit: uval = DIV_ROUND_CLOSEST_ULL(val << POWER_SETUP_I1_SHIFT, SF_POWER); return hwm_pcode_write_i1(ddat->uncore->i915, uval); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 240d5e198904..31271c30a8cf 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -52,7 +52,6 @@ #include "i915_driver.h" #include "i915_drv.h" #include "i915_irq.h" -#include "intel_pm.h" /** * DOC: interrupt handling @@ -81,8 +80,7 @@ static inline void pmu_irq_stats(struct drm_i915_private *i915, } typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val); -typedef u32 (*hotplug_enables_func)(struct drm_i915_private *i915, - enum hpd_pin pin); +typedef u32 (*hotplug_enables_func)(struct intel_encoder *encoder); static const u32 hpd_ilk[HPD_NUM_PINS] = { [HPD_PORT_A] = DE_DP_A_HOTPLUG, @@ -199,6 +197,8 @@ static void intel_hpd_init_pins(struct drm_i915_private *dev_priv) hpd->hpd = hpd_gen11; else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) hpd->hpd = hpd_bxt; + else if (DISPLAY_VER(dev_priv) == 9) + hpd->hpd = NULL; /* no north HPD on SKL */ else if (DISPLAY_VER(dev_priv) >= 8) hpd->hpd = hpd_bdw; else if (DISPLAY_VER(dev_priv) >= 7) @@ -884,7 +884,7 @@ static u32 intel_hpd_hotplug_enables(struct drm_i915_private *i915, u32 hotplug = 0; for_each_intel_encoder(&i915->drm, encoder) - hotplug |= hotplug_enables(i915, encoder->hpd_pin); + hotplug |= hotplug_enables(encoder); return hotplug; } @@ -2835,10 +2835,11 @@ static void cherryview_irq_reset(struct drm_i915_private *dev_priv) spin_unlock_irq(&dev_priv->irq_lock); } -static u32 ibx_hotplug_enables(struct drm_i915_private *i915, - enum hpd_pin pin) +static u32 ibx_hotplug_enables(struct intel_encoder *encoder) { - switch (pin) { + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + + switch (encoder->hpd_pin) { case HPD_PORT_A: /* * When CPU and PCH are on the same package, port A @@ -2890,31 +2891,29 @@ static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) ibx_hpd_detection_setup(dev_priv); } -static u32 icp_ddi_hotplug_enables(struct drm_i915_private *i915, - enum hpd_pin pin) +static u32 icp_ddi_hotplug_enables(struct intel_encoder *encoder) { - switch (pin) { + switch (encoder->hpd_pin) { case HPD_PORT_A: case HPD_PORT_B: case HPD_PORT_C: case HPD_PORT_D: - return SHOTPLUG_CTL_DDI_HPD_ENABLE(pin); + return SHOTPLUG_CTL_DDI_HPD_ENABLE(encoder->hpd_pin); default: return 0; } } -static u32 icp_tc_hotplug_enables(struct drm_i915_private *i915, - enum hpd_pin pin) +static u32 icp_tc_hotplug_enables(struct intel_encoder *encoder) { - switch (pin) { + switch (encoder->hpd_pin) { case HPD_PORT_TC1: case HPD_PORT_TC2: case HPD_PORT_TC3: case HPD_PORT_TC4: case HPD_PORT_TC5: case HPD_PORT_TC6: - return ICP_TC_HPD_ENABLE(pin); + return ICP_TC_HPD_ENABLE(encoder->hpd_pin); default: return 0; } @@ -2958,17 +2957,16 @@ static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv) icp_tc_hpd_detection_setup(dev_priv); } -static u32 gen11_hotplug_enables(struct drm_i915_private *i915, - enum hpd_pin pin) +static u32 gen11_hotplug_enables(struct intel_encoder *encoder) { - switch (pin) { + switch (encoder->hpd_pin) { case HPD_PORT_TC1: case HPD_PORT_TC2: case HPD_PORT_TC3: case HPD_PORT_TC4: case HPD_PORT_TC5: case HPD_PORT_TC6: - return GEN11_HOTPLUG_CTL_ENABLE(pin); + return GEN11_HOTPLUG_CTL_ENABLE(encoder->hpd_pin); default: return 0; } @@ -3031,10 +3029,9 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv) icp_hpd_irq_setup(dev_priv); } -static u32 spt_hotplug_enables(struct drm_i915_private *i915, - enum hpd_pin pin) +static u32 spt_hotplug_enables(struct intel_encoder *encoder) { - switch (pin) { + switch (encoder->hpd_pin) { case HPD_PORT_A: return PORTA_HOTPLUG_ENABLE; case HPD_PORT_B: @@ -3048,10 +3045,9 @@ static u32 spt_hotplug_enables(struct drm_i915_private *i915, } } -static u32 spt_hotplug2_enables(struct drm_i915_private *i915, - enum hpd_pin pin) +static u32 spt_hotplug2_enables(struct intel_encoder *encoder) { - switch (pin) { + switch (encoder->hpd_pin) { case HPD_PORT_E: return PORTE_HOTPLUG_ENABLE; default: @@ -3094,10 +3090,9 @@ static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) spt_hpd_detection_setup(dev_priv); } -static u32 ilk_hotplug_enables(struct drm_i915_private *i915, - enum hpd_pin pin) +static u32 ilk_hotplug_enables(struct intel_encoder *encoder) { - switch (pin) { + switch (encoder->hpd_pin) { case HPD_PORT_A: return DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms; @@ -3135,25 +3130,24 @@ static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) ibx_hpd_irq_setup(dev_priv); } -static u32 bxt_hotplug_enables(struct drm_i915_private *i915, - enum hpd_pin pin) +static u32 bxt_hotplug_enables(struct intel_encoder *encoder) { u32 hotplug; - switch (pin) { + switch (encoder->hpd_pin) { case HPD_PORT_A: hotplug = PORTA_HOTPLUG_ENABLE; - if (intel_bios_is_port_hpd_inverted(i915, PORT_A)) + if (intel_bios_encoder_hpd_invert(encoder->devdata)) hotplug |= BXT_DDIA_HPD_INVERT; return hotplug; case HPD_PORT_B: hotplug = PORTB_HOTPLUG_ENABLE; - if (intel_bios_is_port_hpd_inverted(i915, PORT_B)) + if (intel_bios_encoder_hpd_invert(encoder->devdata)) hotplug |= BXT_DDIB_HPD_INVERT; return hotplug; case HPD_PORT_C: hotplug = PORTC_HOTPLUG_ENABLE; - if (intel_bios_is_port_hpd_inverted(i915, PORT_C)) + if (intel_bios_encoder_hpd_invert(encoder->devdata)) hotplug |= BXT_DDIC_HPD_INVERT; return hotplug; default: @@ -3471,15 +3465,33 @@ static void i8xx_irq_reset(struct drm_i915_private *dev_priv) dev_priv->irq_mask = ~0u; } +static u32 i9xx_error_mask(struct drm_i915_private *i915) +{ + /* + * On gen2/3 FBC generates (seemingly spurious) + * display INVALID_GTT/INVALID_GTT_PTE table errors. + * + * Also gen3 bspec has this to say: + * "DISPA_INVALID_GTT_PTE + " [DevNapa] : Reserved. This bit does not reflect the page + " table error for the display plane A." + * + * Unfortunately we can't mask off individual PGTBL_ER bits, + * so we just have to mask off all page table errors via EMR. + */ + if (HAS_FBC(i915)) + return ~I915_ERROR_MEMORY_REFRESH; + else + return ~(I915_ERROR_PAGE_TABLE | + I915_ERROR_MEMORY_REFRESH); +} + static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv) { struct intel_uncore *uncore = &dev_priv->uncore; u16 enable_mask; - intel_uncore_write16(uncore, - EMR, - ~(I915_ERROR_PAGE_TABLE | - I915_ERROR_MEMORY_REFRESH)); + intel_uncore_write16(uncore, EMR, i9xx_error_mask(dev_priv)); /* Unmask the interrupts that we always want on. */ dev_priv->irq_mask = @@ -3510,9 +3522,7 @@ static void i8xx_error_irq_ack(struct drm_i915_private *i915, u16 emr; *eir = intel_uncore_read16(uncore, EIR); - - if (*eir) - intel_uncore_write16(uncore, EIR, *eir); + intel_uncore_write16(uncore, EIR, *eir); *eir_stuck = intel_uncore_read16(uncore, EIR); if (*eir_stuck == 0) @@ -3541,6 +3551,9 @@ static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv, if (eir_stuck) drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n", eir_stuck); + + drm_dbg(&dev_priv->drm, "PGTBL_ER: 0x%08x\n", + intel_uncore_read(&dev_priv->uncore, PGTBL_ER)); } static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv, @@ -3548,7 +3561,8 @@ static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv, { u32 emr; - *eir = intel_uncore_rmw(&dev_priv->uncore, EIR, 0, 0); + *eir = intel_uncore_read(&dev_priv->uncore, EIR); + intel_uncore_write(&dev_priv->uncore, EIR, *eir); *eir_stuck = intel_uncore_read(&dev_priv->uncore, EIR); if (*eir_stuck == 0) @@ -3564,7 +3578,8 @@ static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv, * (or by a GPU reset) so we mask any bit that * remains set. */ - emr = intel_uncore_rmw(&dev_priv->uncore, EMR, ~0, 0xffffffff); + emr = intel_uncore_read(&dev_priv->uncore, EMR); + intel_uncore_write(&dev_priv->uncore, EMR, 0xffffffff); intel_uncore_write(&dev_priv->uncore, EMR, emr | *eir_stuck); } @@ -3576,6 +3591,9 @@ static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv, if (eir_stuck) drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n", eir_stuck); + + drm_dbg(&dev_priv->drm, "PGTBL_ER: 0x%08x\n", + intel_uncore_read(&dev_priv->uncore, PGTBL_ER)); } static irqreturn_t i8xx_irq_handler(int irq, void *arg) @@ -3645,8 +3663,7 @@ static void i915_irq_postinstall(struct drm_i915_private *dev_priv) struct intel_uncore *uncore = &dev_priv->uncore; u32 enable_mask; - intel_uncore_write(uncore, EMR, ~(I915_ERROR_PAGE_TABLE | - I915_ERROR_MEMORY_REFRESH)); + intel_uncore_write(uncore, EMR, i9xx_error_mask(dev_priv)); /* Unmask the interrupts that we always want on. */ dev_priv->irq_mask = @@ -3749,26 +3766,31 @@ static void i965_irq_reset(struct drm_i915_private *dev_priv) dev_priv->irq_mask = ~0u; } -static void i965_irq_postinstall(struct drm_i915_private *dev_priv) +static u32 i965_error_mask(struct drm_i915_private *i915) { - struct intel_uncore *uncore = &dev_priv->uncore; - u32 enable_mask; - u32 error_mask; - /* * Enable some error detection, note the instruction error mask * bit is reserved, so we leave it masked. + * + * i965 FBC no longer generates spurious GTT errors, + * so we can always enable the page table errors. */ - if (IS_G4X(dev_priv)) { - error_mask = ~(GM45_ERROR_PAGE_TABLE | - GM45_ERROR_MEM_PRIV | - GM45_ERROR_CP_PRIV | - I915_ERROR_MEMORY_REFRESH); - } else { - error_mask = ~(I915_ERROR_PAGE_TABLE | - I915_ERROR_MEMORY_REFRESH); - } - intel_uncore_write(uncore, EMR, error_mask); + if (IS_G4X(i915)) + return ~(GM45_ERROR_PAGE_TABLE | + GM45_ERROR_MEM_PRIV | + GM45_ERROR_CP_PRIV | + I915_ERROR_MEMORY_REFRESH); + else + return ~(I915_ERROR_PAGE_TABLE | + I915_ERROR_MEMORY_REFRESH); +} + +static void i965_irq_postinstall(struct drm_i915_private *dev_priv) +{ + struct intel_uncore *uncore = &dev_priv->uncore; + u32 enable_mask; + + intel_uncore_write(uncore, EMR, i965_error_mask(dev_priv)); /* Unmask the interrupts that we always want on. */ dev_priv->irq_mask = diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index 52531ab28c5f..a76c5ce9513d 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -17,7 +17,6 @@ #include "i915_drv.h" #include "i915_pmu.h" -#include "intel_pm.h" /* Frequency for the sampling timer for events which need it. */ #define FREQUENCY 200 diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 3b2642397b82..1757fb8fdf5b 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -116,6 +116,9 @@ * #define GEN8_BAR _MMIO(0xb888) */ +#define GU_CNTL_PROTECTED _MMIO(0x10100C) +#define DEPRESENT REG_BIT(9) + #define GU_CNTL _MMIO(0x101010) #define LMEM_INIT REG_BIT(7) #define DRIVERFLR REG_BIT(31) @@ -541,9 +544,10 @@ #define _BXT_PHY0_BASE 0x6C000 #define _BXT_PHY1_BASE 0x162000 #define _BXT_PHY2_BASE 0x163000 -#define BXT_PHY_BASE(phy) _PHY3((phy), _BXT_PHY0_BASE, \ - _BXT_PHY1_BASE, \ - _BXT_PHY2_BASE) +#define BXT_PHY_BASE(phy) \ + _PICK_EVEN_2RANGES(phy, 1, \ + _BXT_PHY0_BASE, _BXT_PHY0_BASE, \ + _BXT_PHY1_BASE, _BXT_PHY2_BASE) #define _BXT_PHY(phy, reg) \ _MMIO(BXT_PHY_BASE(phy) - _BXT_PHY0_BASE + (reg)) @@ -566,13 +570,14 @@ #define BXT_PHY_CTL(port) _MMIO_PORT(port, _BXT_PHY_CTL_DDI_A, \ _BXT_PHY_CTL_DDI_B) -#define _PHY_CTL_FAMILY_EDP 0x64C80 #define _PHY_CTL_FAMILY_DDI 0x64C90 +#define _PHY_CTL_FAMILY_EDP 0x64C80 #define _PHY_CTL_FAMILY_DDI_C 0x64CA0 #define COMMON_RESET_DIS (1 << 31) -#define BXT_PHY_CTL_FAMILY(phy) _MMIO_PHY3((phy), _PHY_CTL_FAMILY_DDI, \ - _PHY_CTL_FAMILY_EDP, \ - _PHY_CTL_FAMILY_DDI_C) +#define BXT_PHY_CTL_FAMILY(phy) \ + _MMIO(_PICK_EVEN_2RANGES(phy, 1, \ + _PHY_CTL_FAMILY_DDI, _PHY_CTL_FAMILY_DDI, \ + _PHY_CTL_FAMILY_EDP, _PHY_CTL_FAMILY_DDI_C)) /* BXT PHY PLL registers */ #define _PORT_PLL_A 0x46074 @@ -1038,9 +1043,11 @@ #define _MBUS_ABOX0_CTL 0x45038 #define _MBUS_ABOX1_CTL 0x45048 #define _MBUS_ABOX2_CTL 0x4504C -#define MBUS_ABOX_CTL(x) _MMIO(_PICK(x, _MBUS_ABOX0_CTL, \ - _MBUS_ABOX1_CTL, \ - _MBUS_ABOX2_CTL)) +#define MBUS_ABOX_CTL(x) \ + _MMIO(_PICK_EVEN_2RANGES(x, 2, \ + _MBUS_ABOX0_CTL, _MBUS_ABOX1_CTL, \ + _MBUS_ABOX2_CTL, _MBUS_ABOX2_CTL)) + #define MBUS_ABOX_BW_CREDIT_MASK (3 << 20) #define MBUS_ABOX_BW_CREDIT(x) ((x) << 20) #define MBUS_ABOX_B_CREDIT_MASK (0xF << 16) @@ -1730,10 +1737,11 @@ #define PALETTE_10BIT_BLUE_EXP_MASK REG_GENMASK(7, 6) #define PALETTE_10BIT_BLUE_MANT_MASK REG_GENMASK(5, 2) #define PALETTE_10BIT_BLUE_UDW_MASK REG_GENMASK(1, 0) -#define PALETTE(pipe, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + \ - _PICK((pipe), _PALETTE_A, \ - _PALETTE_B, _CHV_PALETTE_C) + \ - (i) * 4) +#define PALETTE(pipe, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + \ + _PICK_EVEN_2RANGES(pipe, 2, \ + _PALETTE_A, _PALETTE_B, \ + _CHV_PALETTE_C, _CHV_PALETTE_C) + \ + (i) * 4) #define PEG_BAND_GAP_DATA _MMIO(0x14d68) @@ -1906,48 +1914,72 @@ #define PIPE_CRC_RES_RES1_I915(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_RES1_A_I915) #define PIPE_CRC_RES_RES2_G4X(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_RES2_A_G4X) -/* Pipe A timing regs */ -#define _HTOTAL_A 0x60000 -#define _HBLANK_A 0x60004 -#define _HSYNC_A 0x60008 -#define _VTOTAL_A 0x6000c -#define _VBLANK_A 0x60010 -#define _VSYNC_A 0x60014 -#define _EXITLINE_A 0x60018 -#define _PIPEASRC 0x6001c +/* Pipe/transcoder A timing regs */ +#define _TRANS_HTOTAL_A 0x60000 +#define HTOTAL_MASK REG_GENMASK(31, 16) +#define HTOTAL(htotal) REG_FIELD_PREP(HTOTAL_MASK, (htotal)) +#define HACTIVE_MASK REG_GENMASK(15, 0) +#define HACTIVE(hdisplay) REG_FIELD_PREP(HACTIVE_MASK, (hdisplay)) +#define _TRANS_HBLANK_A 0x60004 +#define HBLANK_END_MASK REG_GENMASK(31, 16) +#define HBLANK_END(hblank_end) REG_FIELD_PREP(HBLANK_END_MASK, (hblank_end)) +#define HBLANK_START_MASK REG_GENMASK(15, 0) +#define HBLANK_START(hblank_start) REG_FIELD_PREP(HBLANK_START_MASK, (hblank_start)) +#define _TRANS_HSYNC_A 0x60008 +#define HSYNC_END_MASK REG_GENMASK(31, 16) +#define HSYNC_END(hsync_end) REG_FIELD_PREP(HSYNC_END_MASK, (hsync_end)) +#define HSYNC_START_MASK REG_GENMASK(15, 0) +#define HSYNC_START(hsync_start) REG_FIELD_PREP(HSYNC_START_MASK, (hsync_start)) +#define _TRANS_VTOTAL_A 0x6000c +#define VTOTAL_MASK REG_GENMASK(31, 16) +#define VTOTAL(vtotal) REG_FIELD_PREP(VTOTAL_MASK, (vtotal)) +#define VACTIVE_MASK REG_GENMASK(15, 0) +#define VACTIVE(vdisplay) REG_FIELD_PREP(VACTIVE_MASK, (vdisplay)) +#define _TRANS_VBLANK_A 0x60010 +#define VBLANK_END_MASK REG_GENMASK(31, 16) +#define VBLANK_END(vblank_end) REG_FIELD_PREP(VBLANK_END_MASK, (vblank_end)) +#define VBLANK_START_MASK REG_GENMASK(15, 0) +#define VBLANK_START(vblank_start) REG_FIELD_PREP(VBLANK_START_MASK, (vblank_start)) +#define _TRANS_VSYNC_A 0x60014 +#define VSYNC_END_MASK REG_GENMASK(31, 16) +#define VSYNC_END(vsync_end) REG_FIELD_PREP(VSYNC_END_MASK, (vsync_end)) +#define VSYNC_START_MASK REG_GENMASK(15, 0) +#define VSYNC_START(vsync_start) REG_FIELD_PREP(VSYNC_START_MASK, (vsync_start)) +#define _TRANS_EXITLINE_A 0x60018 +#define _PIPEASRC 0x6001c #define PIPESRC_WIDTH_MASK REG_GENMASK(31, 16) #define PIPESRC_WIDTH(w) REG_FIELD_PREP(PIPESRC_WIDTH_MASK, (w)) #define PIPESRC_HEIGHT_MASK REG_GENMASK(15, 0) #define PIPESRC_HEIGHT(h) REG_FIELD_PREP(PIPESRC_HEIGHT_MASK, (h)) -#define _BCLRPAT_A 0x60020 -#define _VSYNCSHIFT_A 0x60028 -#define _PIPE_MULT_A 0x6002c - -/* Pipe B timing regs */ -#define _HTOTAL_B 0x61000 -#define _HBLANK_B 0x61004 -#define _HSYNC_B 0x61008 -#define _VTOTAL_B 0x6100c -#define _VBLANK_B 0x61010 -#define _VSYNC_B 0x61014 -#define _PIPEBSRC 0x6101c -#define _BCLRPAT_B 0x61020 -#define _VSYNCSHIFT_B 0x61028 -#define _PIPE_MULT_B 0x6102c +#define _BCLRPAT_A 0x60020 +#define _TRANS_VSYNCSHIFT_A 0x60028 +#define _TRANS_MULT_A 0x6002c + +/* Pipe/transcoder B timing regs */ +#define _TRANS_HTOTAL_B 0x61000 +#define _TRANS_HBLANK_B 0x61004 +#define _TRANS_HSYNC_B 0x61008 +#define _TRANS_VTOTAL_B 0x6100c +#define _TRANS_VBLANK_B 0x61010 +#define _TRANS_VSYNC_B 0x61014 +#define _PIPEBSRC 0x6101c +#define _BCLRPAT_B 0x61020 +#define _TRANS_VSYNCSHIFT_B 0x61028 +#define _TRANS_MULT_B 0x6102c /* DSI 0 timing regs */ -#define _HTOTAL_DSI0 0x6b000 -#define _HSYNC_DSI0 0x6b008 -#define _VTOTAL_DSI0 0x6b00c -#define _VSYNC_DSI0 0x6b014 -#define _VSYNCSHIFT_DSI0 0x6b028 +#define _TRANS_HTOTAL_DSI0 0x6b000 +#define _TRANS_HSYNC_DSI0 0x6b008 +#define _TRANS_VTOTAL_DSI0 0x6b00c +#define _TRANS_VSYNC_DSI0 0x6b014 +#define _TRANS_VSYNCSHIFT_DSI0 0x6b028 /* DSI 1 timing regs */ -#define _HTOTAL_DSI1 0x6b800 -#define _HSYNC_DSI1 0x6b808 -#define _VTOTAL_DSI1 0x6b80c -#define _VSYNC_DSI1 0x6b814 -#define _VSYNCSHIFT_DSI1 0x6b828 +#define _TRANS_HTOTAL_DSI1 0x6b800 +#define _TRANS_HSYNC_DSI1 0x6b808 +#define _TRANS_VTOTAL_DSI1 0x6b80c +#define _TRANS_VSYNC_DSI1 0x6b814 +#define _TRANS_VSYNCSHIFT_DSI1 0x6b828 #define TRANSCODER_A_OFFSET 0x60000 #define TRANSCODER_B_OFFSET 0x61000 @@ -1958,18 +1990,18 @@ #define TRANSCODER_DSI0_OFFSET 0x6b000 #define TRANSCODER_DSI1_OFFSET 0x6b800 -#define HTOTAL(trans) _MMIO_TRANS2(trans, _HTOTAL_A) -#define HBLANK(trans) _MMIO_TRANS2(trans, _HBLANK_A) -#define HSYNC(trans) _MMIO_TRANS2(trans, _HSYNC_A) -#define VTOTAL(trans) _MMIO_TRANS2(trans, _VTOTAL_A) -#define VBLANK(trans) _MMIO_TRANS2(trans, _VBLANK_A) -#define VSYNC(trans) _MMIO_TRANS2(trans, _VSYNC_A) -#define BCLRPAT(trans) _MMIO_TRANS2(trans, _BCLRPAT_A) -#define VSYNCSHIFT(trans) _MMIO_TRANS2(trans, _VSYNCSHIFT_A) -#define PIPESRC(trans) _MMIO_TRANS2(trans, _PIPEASRC) -#define PIPE_MULT(trans) _MMIO_TRANS2(trans, _PIPE_MULT_A) - -#define EXITLINE(trans) _MMIO_TRANS2(trans, _EXITLINE_A) +#define TRANS_HTOTAL(trans) _MMIO_TRANS2((trans), _TRANS_HTOTAL_A) +#define TRANS_HBLANK(trans) _MMIO_TRANS2((trans), _TRANS_HBLANK_A) +#define TRANS_HSYNC(trans) _MMIO_TRANS2((trans), _TRANS_HSYNC_A) +#define TRANS_VTOTAL(trans) _MMIO_TRANS2((trans), _TRANS_VTOTAL_A) +#define TRANS_VBLANK(trans) _MMIO_TRANS2((trans), _TRANS_VBLANK_A) +#define TRANS_VSYNC(trans) _MMIO_TRANS2((trans), _TRANS_VSYNC_A) +#define BCLRPAT(trans) _MMIO_TRANS2((trans), _BCLRPAT_A) +#define TRANS_VSYNCSHIFT(trans) _MMIO_TRANS2((trans), _TRANS_VSYNCSHIFT_A) +#define PIPESRC(pipe) _MMIO_TRANS2((pipe), _PIPEASRC) +#define TRANS_MULT(trans) _MMIO_TRANS2((trans), _TRANS_MULT_A) + +#define TRANS_EXITLINE(trans) _MMIO_TRANS2((trans), _TRANS_EXITLINE_A) #define EXITLINE_ENABLE REG_BIT(31) #define EXITLINE_MASK REG_GENMASK(12, 0) #define EXITLINE_SHIFT 0 @@ -2266,110 +2298,6 @@ #define ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME REG_BIT(14) #define ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME REG_BIT(13) -/* Icelake DSC Rate Control Range Parameter Registers */ -#define DSCA_RC_RANGE_PARAMETERS_0 _MMIO(0x6B240) -#define DSCA_RC_RANGE_PARAMETERS_0_UDW _MMIO(0x6B240 + 4) -#define DSCC_RC_RANGE_PARAMETERS_0 _MMIO(0x6BA40) -#define DSCC_RC_RANGE_PARAMETERS_0_UDW _MMIO(0x6BA40 + 4) -#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PB (0x78208) -#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB (0x78208 + 4) -#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PB (0x78308) -#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB (0x78308 + 4) -#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PC (0x78408) -#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC (0x78408 + 4) -#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PC (0x78508) -#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC (0x78508 + 4) -#define ICL_DSC0_RC_RANGE_PARAMETERS_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_RC_RANGE_PARAMETERS_0_PB, \ - _ICL_DSC0_RC_RANGE_PARAMETERS_0_PC) -#define ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB, \ - _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC) -#define ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_RC_RANGE_PARAMETERS_0_PB, \ - _ICL_DSC1_RC_RANGE_PARAMETERS_0_PC) -#define ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB, \ - _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC) -#define RC_BPG_OFFSET_SHIFT 10 -#define RC_MAX_QP_SHIFT 5 -#define RC_MIN_QP_SHIFT 0 - -#define DSCA_RC_RANGE_PARAMETERS_1 _MMIO(0x6B248) -#define DSCA_RC_RANGE_PARAMETERS_1_UDW _MMIO(0x6B248 + 4) -#define DSCC_RC_RANGE_PARAMETERS_1 _MMIO(0x6BA48) -#define DSCC_RC_RANGE_PARAMETERS_1_UDW _MMIO(0x6BA48 + 4) -#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PB (0x78210) -#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB (0x78210 + 4) -#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PB (0x78310) -#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB (0x78310 + 4) -#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PC (0x78410) -#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC (0x78410 + 4) -#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PC (0x78510) -#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC (0x78510 + 4) -#define ICL_DSC0_RC_RANGE_PARAMETERS_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_RC_RANGE_PARAMETERS_1_PB, \ - _ICL_DSC0_RC_RANGE_PARAMETERS_1_PC) -#define ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB, \ - _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC) -#define ICL_DSC1_RC_RANGE_PARAMETERS_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_RC_RANGE_PARAMETERS_1_PB, \ - _ICL_DSC1_RC_RANGE_PARAMETERS_1_PC) -#define ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB, \ - _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC) - -#define DSCA_RC_RANGE_PARAMETERS_2 _MMIO(0x6B250) -#define DSCA_RC_RANGE_PARAMETERS_2_UDW _MMIO(0x6B250 + 4) -#define DSCC_RC_RANGE_PARAMETERS_2 _MMIO(0x6BA50) -#define DSCC_RC_RANGE_PARAMETERS_2_UDW _MMIO(0x6BA50 + 4) -#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PB (0x78218) -#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB (0x78218 + 4) -#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PB (0x78318) -#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB (0x78318 + 4) -#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PC (0x78418) -#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC (0x78418 + 4) -#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PC (0x78518) -#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC (0x78518 + 4) -#define ICL_DSC0_RC_RANGE_PARAMETERS_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_RC_RANGE_PARAMETERS_2_PB, \ - _ICL_DSC0_RC_RANGE_PARAMETERS_2_PC) -#define ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB, \ - _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC) -#define ICL_DSC1_RC_RANGE_PARAMETERS_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_RC_RANGE_PARAMETERS_2_PB, \ - _ICL_DSC1_RC_RANGE_PARAMETERS_2_PC) -#define ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB, \ - _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC) - -#define DSCA_RC_RANGE_PARAMETERS_3 _MMIO(0x6B258) -#define DSCA_RC_RANGE_PARAMETERS_3_UDW _MMIO(0x6B258 + 4) -#define DSCC_RC_RANGE_PARAMETERS_3 _MMIO(0x6BA58) -#define DSCC_RC_RANGE_PARAMETERS_3_UDW _MMIO(0x6BA58 + 4) -#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PB (0x78220) -#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB (0x78220 + 4) -#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PB (0x78320) -#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB (0x78320 + 4) -#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PC (0x78420) -#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC (0x78420 + 4) -#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PC (0x78520) -#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC (0x78520 + 4) -#define ICL_DSC0_RC_RANGE_PARAMETERS_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_RC_RANGE_PARAMETERS_3_PB, \ - _ICL_DSC0_RC_RANGE_PARAMETERS_3_PC) -#define ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB, \ - _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC) -#define ICL_DSC1_RC_RANGE_PARAMETERS_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_RC_RANGE_PARAMETERS_3_PB, \ - _ICL_DSC1_RC_RANGE_PARAMETERS_3_PC) -#define ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB, \ - _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC) - /* VGA port control */ #define ADPA _MMIO(0x61100) #define PCH_ADPA _MMIO(0xe1100) @@ -2451,18 +2379,7 @@ #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) #define PORT_HOTPLUG_STAT _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61114) -/* - * HDMI/DP bits are g4x+ - * - * WARNING: Bspec for hpd status bits on gen4 seems to be completely confused. - * Please check the detailed lore in the commit message for for experimental - * evidence. - */ -/* Bspec says GM45 should match G4X/VLV/CHV, but reality disagrees */ -#define PORTD_HOTPLUG_LIVE_STATUS_GM45 (1 << 29) -#define PORTC_HOTPLUG_LIVE_STATUS_GM45 (1 << 28) -#define PORTB_HOTPLUG_LIVE_STATUS_GM45 (1 << 27) -/* G4X/VLV/CHV DP/HDMI bits again match Bspec */ +/* HDMI/DP bits are g4x+ */ #define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 27) #define PORTC_HOTPLUG_LIVE_STATUS_G4X (1 << 28) #define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 29) @@ -2592,59 +2509,6 @@ #define SDVO_PIPE_SEL_MASK_CHV (3 << 24) #define SDVO_PIPE_SEL_CHV(pipe) ((pipe) << 24) -/* LVDS port control */ -#define LVDS _MMIO(0x61180) -/* - * Enables the LVDS port. This bit must be set before DPLLs are enabled, as - * the DPLL semantics change when the LVDS is assigned to that pipe. - */ -#define LVDS_PORT_EN (1 << 31) -/* Selects pipe B for LVDS data. Must be set on pre-965. */ -#define LVDS_PIPE_SEL_SHIFT 30 -#define LVDS_PIPE_SEL_MASK (1 << 30) -#define LVDS_PIPE_SEL(pipe) ((pipe) << 30) -#define LVDS_PIPE_SEL_SHIFT_CPT 29 -#define LVDS_PIPE_SEL_MASK_CPT (3 << 29) -#define LVDS_PIPE_SEL_CPT(pipe) ((pipe) << 29) -/* LVDS dithering flag on 965/g4x platform */ -#define LVDS_ENABLE_DITHER (1 << 25) -/* LVDS sync polarity flags. Set to invert (i.e. negative) */ -#define LVDS_VSYNC_POLARITY (1 << 21) -#define LVDS_HSYNC_POLARITY (1 << 20) - -/* Enable border for unscaled (or aspect-scaled) display */ -#define LVDS_BORDER_ENABLE (1 << 15) -/* - * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per - * pixel. - */ -#define LVDS_A0A2_CLKA_POWER_MASK (3 << 8) -#define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8) -#define LVDS_A0A2_CLKA_POWER_UP (3 << 8) -/* - * Controls the A3 data pair, which contains the additional LSBs for 24 bit - * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be - * on. - */ -#define LVDS_A3_POWER_MASK (3 << 6) -#define LVDS_A3_POWER_DOWN (0 << 6) -#define LVDS_A3_POWER_UP (3 << 6) -/* - * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP - * is set. - */ -#define LVDS_CLKB_POWER_MASK (3 << 4) -#define LVDS_CLKB_POWER_DOWN (0 << 4) -#define LVDS_CLKB_POWER_UP (3 << 4) -/* - * Controls the B0-B3 data pairs. This must be set to match the DPLL p2 - * setting for whether we are in dual-channel mode. The B3 pair will - * additionally only be powered up when LVDS_A3_POWER_UP is set. - */ -#define LVDS_B0B3_POWER_MASK (3 << 2) -#define LVDS_B0B3_POWER_DOWN (0 << 2) -#define LVDS_B0B3_POWER_UP (3 << 2) - /* Video Data Island Packet control */ #define VIDEO_DIP_DATA _MMIO(0x61178) /* Read the description of VIDEO_DIP_DATA (before Haswell) or VIDEO_DIP_ECC @@ -3492,61 +3356,61 @@ #define _PIPEADSL 0x70000 #define PIPEDSL_CURR_FIELD REG_BIT(31) /* ctg+ */ #define PIPEDSL_LINE_MASK REG_GENMASK(19, 0) -#define _PIPEACONF 0x70008 -#define PIPECONF_ENABLE REG_BIT(31) -#define PIPECONF_DOUBLE_WIDE REG_BIT(30) /* pre-i965 */ -#define PIPECONF_STATE_ENABLE REG_BIT(30) /* i965+ */ -#define PIPECONF_DSI_PLL_LOCKED REG_BIT(29) /* vlv & pipe A only */ -#define PIPECONF_FRAME_START_DELAY_MASK REG_GENMASK(28, 27) /* pre-hsw */ -#define PIPECONF_FRAME_START_DELAY(x) REG_FIELD_PREP(PIPECONF_FRAME_START_DELAY_MASK, (x)) /* pre-hsw: 0-3 */ -#define PIPECONF_PIPE_LOCKED REG_BIT(25) -#define PIPECONF_FORCE_BORDER REG_BIT(25) -#define PIPECONF_GAMMA_MODE_MASK_I9XX REG_BIT(24) /* gmch */ -#define PIPECONF_GAMMA_MODE_MASK_ILK REG_GENMASK(25, 24) /* ilk-ivb */ -#define PIPECONF_GAMMA_MODE_8BIT REG_FIELD_PREP(PIPECONF_GAMMA_MODE_MASK, 0) -#define PIPECONF_GAMMA_MODE_10BIT REG_FIELD_PREP(PIPECONF_GAMMA_MODE_MASK, 1) -#define PIPECONF_GAMMA_MODE_12BIT REG_FIELD_PREP(PIPECONF_GAMMA_MODE_MASK_ILK, 2) /* ilk-ivb */ -#define PIPECONF_GAMMA_MODE_SPLIT REG_FIELD_PREP(PIPECONF_GAMMA_MODE_MASK_ILK, 3) /* ivb */ -#define PIPECONF_GAMMA_MODE(x) REG_FIELD_PREP(PIPECONF_GAMMA_MODE_MASK_ILK, (x)) /* pass in GAMMA_MODE_MODE_* */ -#define PIPECONF_INTERLACE_MASK REG_GENMASK(23, 21) /* gen3+ */ -#define PIPECONF_INTERLACE_PROGRESSIVE REG_FIELD_PREP(PIPECONF_INTERLACE_MASK, 0) -#define PIPECONF_INTERLACE_W_SYNC_SHIFT_PANEL REG_FIELD_PREP(PIPECONF_INTERLACE_MASK, 4) /* gen4 only */ -#define PIPECONF_INTERLACE_W_SYNC_SHIFT REG_FIELD_PREP(PIPECONF_INTERLACE_MASK, 5) /* gen4 only */ -#define PIPECONF_INTERLACE_W_FIELD_INDICATION REG_FIELD_PREP(PIPECONF_INTERLACE_MASK, 6) -#define PIPECONF_INTERLACE_FIELD_0_ONLY REG_FIELD_PREP(PIPECONF_INTERLACE_MASK, 7) /* gen3 only */ +#define _TRANSACONF 0x70008 +#define TRANSCONF_ENABLE REG_BIT(31) +#define TRANSCONF_DOUBLE_WIDE REG_BIT(30) /* pre-i965 */ +#define TRANSCONF_STATE_ENABLE REG_BIT(30) /* i965+ */ +#define TRANSCONF_DSI_PLL_LOCKED REG_BIT(29) /* vlv & pipe A only */ +#define TRANSCONF_FRAME_START_DELAY_MASK REG_GENMASK(28, 27) /* pre-hsw */ +#define TRANSCONF_FRAME_START_DELAY(x) REG_FIELD_PREP(TRANSCONF_FRAME_START_DELAY_MASK, (x)) /* pre-hsw: 0-3 */ +#define TRANSCONF_PIPE_LOCKED REG_BIT(25) +#define TRANSCONF_FORCE_BORDER REG_BIT(25) +#define TRANSCONF_GAMMA_MODE_MASK_I9XX REG_BIT(24) /* gmch */ +#define TRANSCONF_GAMMA_MODE_MASK_ILK REG_GENMASK(25, 24) /* ilk-ivb */ +#define TRANSCONF_GAMMA_MODE_8BIT REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK, 0) +#define TRANSCONF_GAMMA_MODE_10BIT REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK, 1) +#define TRANSCONF_GAMMA_MODE_12BIT REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK_ILK, 2) /* ilk-ivb */ +#define TRANSCONF_GAMMA_MODE_SPLIT REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK_ILK, 3) /* ivb */ +#define TRANSCONF_GAMMA_MODE(x) REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK_ILK, (x)) /* pass in GAMMA_MODE_MODE_* */ +#define TRANSCONF_INTERLACE_MASK REG_GENMASK(23, 21) /* gen3+ */ +#define TRANSCONF_INTERLACE_PROGRESSIVE REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 0) +#define TRANSCONF_INTERLACE_W_SYNC_SHIFT_PANEL REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 4) /* gen4 only */ +#define TRANSCONF_INTERLACE_W_SYNC_SHIFT REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 5) /* gen4 only */ +#define TRANSCONF_INTERLACE_W_FIELD_INDICATION REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 6) +#define TRANSCONF_INTERLACE_FIELD_0_ONLY REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 7) /* gen3 only */ /* * ilk+: PF/D=progressive fetch/display, IF/D=interlaced fetch/display, * DBL=power saving pixel doubling, PF-ID* requires panel fitter */ -#define PIPECONF_INTERLACE_MASK_ILK REG_GENMASK(23, 21) /* ilk+ */ -#define PIPECONF_INTERLACE_MASK_HSW REG_GENMASK(22, 21) /* hsw+ */ -#define PIPECONF_INTERLACE_PF_PD_ILK REG_FIELD_PREP(PIPECONF_INTERLACE_MASK_ILK, 0) -#define PIPECONF_INTERLACE_PF_ID_ILK REG_FIELD_PREP(PIPECONF_INTERLACE_MASK_ILK, 1) -#define PIPECONF_INTERLACE_IF_ID_ILK REG_FIELD_PREP(PIPECONF_INTERLACE_MASK_ILK, 3) -#define PIPECONF_INTERLACE_IF_ID_DBL_ILK REG_FIELD_PREP(PIPECONF_INTERLACE_MASK_ILK, 4) /* ilk/snb only */ -#define PIPECONF_INTERLACE_PF_ID_DBL_ILK REG_FIELD_PREP(PIPECONF_INTERLACE_MASK_ILK, 5) /* ilk/snb only */ -#define PIPECONF_REFRESH_RATE_ALT_ILK REG_BIT(20) -#define PIPECONF_MSA_TIMING_DELAY_MASK REG_GENMASK(19, 18) /* ilk/snb/ivb */ -#define PIPECONF_MSA_TIMING_DELAY(x) REG_FIELD_PREP(PIPECONF_MSA_TIMING_DELAY_MASK, (x)) -#define PIPECONF_CXSR_DOWNCLOCK REG_BIT(16) -#define PIPECONF_REFRESH_RATE_ALT_VLV REG_BIT(14) -#define PIPECONF_COLOR_RANGE_SELECT REG_BIT(13) -#define PIPECONF_OUTPUT_COLORSPACE_MASK REG_GENMASK(12, 11) /* ilk-ivb */ -#define PIPECONF_OUTPUT_COLORSPACE_RGB REG_FIELD_PREP(PIPECONF_OUTPUT_COLORSPACE_MASK, 0) /* ilk-ivb */ -#define PIPECONF_OUTPUT_COLORSPACE_YUV601 REG_FIELD_PREP(PIPECONF_OUTPUT_COLORSPACE_MASK, 1) /* ilk-ivb */ -#define PIPECONF_OUTPUT_COLORSPACE_YUV709 REG_FIELD_PREP(PIPECONF_OUTPUT_COLORSPACE_MASK, 2) /* ilk-ivb */ -#define PIPECONF_OUTPUT_COLORSPACE_YUV_HSW REG_BIT(11) /* hsw only */ -#define PIPECONF_BPC_MASK REG_GENMASK(7, 5) /* ctg-ivb */ -#define PIPECONF_BPC_8 REG_FIELD_PREP(PIPECONF_BPC_MASK, 0) -#define PIPECONF_BPC_10 REG_FIELD_PREP(PIPECONF_BPC_MASK, 1) -#define PIPECONF_BPC_6 REG_FIELD_PREP(PIPECONF_BPC_MASK, 2) -#define PIPECONF_BPC_12 REG_FIELD_PREP(PIPECONF_BPC_MASK, 3) -#define PIPECONF_DITHER_EN REG_BIT(4) -#define PIPECONF_DITHER_TYPE_MASK REG_GENMASK(3, 2) -#define PIPECONF_DITHER_TYPE_SP REG_FIELD_PREP(PIPECONF_DITHER_TYPE_MASK, 0) -#define PIPECONF_DITHER_TYPE_ST1 REG_FIELD_PREP(PIPECONF_DITHER_TYPE_MASK, 1) -#define PIPECONF_DITHER_TYPE_ST2 REG_FIELD_PREP(PIPECONF_DITHER_TYPE_MASK, 2) -#define PIPECONF_DITHER_TYPE_TEMP REG_FIELD_PREP(PIPECONF_DITHER_TYPE_MASK, 3) +#define TRANSCONF_INTERLACE_MASK_ILK REG_GENMASK(23, 21) /* ilk+ */ +#define TRANSCONF_INTERLACE_MASK_HSW REG_GENMASK(22, 21) /* hsw+ */ +#define TRANSCONF_INTERLACE_PF_PD_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 0) +#define TRANSCONF_INTERLACE_PF_ID_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 1) +#define TRANSCONF_INTERLACE_IF_ID_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 3) +#define TRANSCONF_INTERLACE_IF_ID_DBL_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 4) /* ilk/snb only */ +#define TRANSCONF_INTERLACE_PF_ID_DBL_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 5) /* ilk/snb only */ +#define TRANSCONF_REFRESH_RATE_ALT_ILK REG_BIT(20) +#define TRANSCONF_MSA_TIMING_DELAY_MASK REG_GENMASK(19, 18) /* ilk/snb/ivb */ +#define TRANSCONF_MSA_TIMING_DELAY(x) REG_FIELD_PREP(TRANSCONF_MSA_TIMING_DELAY_MASK, (x)) +#define TRANSCONF_CXSR_DOWNCLOCK REG_BIT(16) +#define TRANSCONF_REFRESH_RATE_ALT_VLV REG_BIT(14) +#define TRANSCONF_COLOR_RANGE_SELECT REG_BIT(13) +#define TRANSCONF_OUTPUT_COLORSPACE_MASK REG_GENMASK(12, 11) /* ilk-ivb */ +#define TRANSCONF_OUTPUT_COLORSPACE_RGB REG_FIELD_PREP(TRANSCONF_OUTPUT_COLORSPACE_MASK, 0) /* ilk-ivb */ +#define TRANSCONF_OUTPUT_COLORSPACE_YUV601 REG_FIELD_PREP(TRANSCONF_OUTPUT_COLORSPACE_MASK, 1) /* ilk-ivb */ +#define TRANSCONF_OUTPUT_COLORSPACE_YUV709 REG_FIELD_PREP(TRANSCONF_OUTPUT_COLORSPACE_MASK, 2) /* ilk-ivb */ +#define TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW REG_BIT(11) /* hsw only */ +#define TRANSCONF_BPC_MASK REG_GENMASK(7, 5) /* ctg-ivb */ +#define TRANSCONF_BPC_8 REG_FIELD_PREP(TRANSCONF_BPC_MASK, 0) +#define TRANSCONF_BPC_10 REG_FIELD_PREP(TRANSCONF_BPC_MASK, 1) +#define TRANSCONF_BPC_6 REG_FIELD_PREP(TRANSCONF_BPC_MASK, 2) +#define TRANSCONF_BPC_12 REG_FIELD_PREP(TRANSCONF_BPC_MASK, 3) +#define TRANSCONF_DITHER_EN REG_BIT(4) +#define TRANSCONF_DITHER_TYPE_MASK REG_GENMASK(3, 2) +#define TRANSCONF_DITHER_TYPE_SP REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 0) +#define TRANSCONF_DITHER_TYPE_ST1 REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 1) +#define TRANSCONF_DITHER_TYPE_ST2 REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 2) +#define TRANSCONF_DITHER_TYPE_TEMP REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 3) #define _PIPEASTAT 0x70024 #define PIPE_FIFO_UNDERRUN_STATUS (1UL << 31) #define SPRITE1_FLIP_DONE_INT_EN_VLV (1UL << 30) @@ -3615,7 +3479,7 @@ #define PIPE_DSI0_OFFSET 0x7b000 #define PIPE_DSI1_OFFSET 0x7b800 -#define PIPECONF(pipe) _MMIO_PIPE2(pipe, _PIPEACONF) +#define TRANSCONF(trans) _MMIO_PIPE2((trans), _TRANSACONF) #define PIPEDSL(pipe) _MMIO_PIPE2(pipe, _PIPEADSL) #define PIPEFRAME(pipe) _MMIO_PIPE2(pipe, _PIPEAFRAMEHIGH) #define PIPEFRAMEPIXEL(pipe) _MMIO_PIPE2(pipe, _PIPEAFRAMEPIXEL) @@ -4255,7 +4119,7 @@ /* Pipe B */ #define _PIPEBDSL (DISPLAY_MMIO_BASE(dev_priv) + 0x71000) -#define _PIPEBCONF (DISPLAY_MMIO_BASE(dev_priv) + 0x71008) +#define _TRANSBCONF (DISPLAY_MMIO_BASE(dev_priv) + 0x71008) #define _PIPEBSTAT (DISPLAY_MMIO_BASE(dev_priv) + 0x71024) #define _PIPEBFRAMEHIGH 0x71040 #define _PIPEBFRAMEPIXEL 0x71044 @@ -5432,6 +5296,7 @@ #define GEN8_PIPE_CDCLK_CRC_DONE (1 << 28) #define XELPD_PIPE_SOFT_UNDERRUN (1 << 22) #define XELPD_PIPE_HARD_UNDERRUN (1 << 21) +#define GEN12_PIPE_VBLANK_UNMOD (1 << 19) #define GEN8_PIPE_CURSOR_FAULT (1 << 10) #define GEN8_PIPE_SPRITE_FAULT (1 << 9) #define GEN8_PIPE_PRIMARY_FAULT (1 << 8) @@ -6392,9 +6257,6 @@ #define FDI_PLL_CTL_1 _MMIO(0xfe000) #define FDI_PLL_CTL_2 _MMIO(0xfe004) -#define PCH_LVDS _MMIO(0xe1180) -#define LVDS_DETECTED (1 << 1) - #define _PCH_DP_B 0xe4100 #define PCH_DP_B _MMIO(_PCH_DP_B) #define _PCH_DPB_AUX_CH_CTL 0xe4110 @@ -7224,21 +7086,23 @@ enum skl_power_gate { ADLS_DPCLKA_DDIK_SEL_MASK) /* ICL PLL */ -#define DPLL0_ENABLE 0x46010 -#define DPLL1_ENABLE 0x46014 +#define _DPLL0_ENABLE 0x46010 +#define _DPLL1_ENABLE 0x46014 #define _ADLS_DPLL2_ENABLE 0x46018 #define _ADLS_DPLL3_ENABLE 0x46030 -#define PLL_ENABLE (1 << 31) -#define PLL_LOCK (1 << 30) -#define PLL_POWER_ENABLE (1 << 27) -#define PLL_POWER_STATE (1 << 26) -#define ICL_DPLL_ENABLE(pll) _MMIO_PLL3(pll, DPLL0_ENABLE, DPLL1_ENABLE, \ - _ADLS_DPLL2_ENABLE, _ADLS_DPLL3_ENABLE) +#define PLL_ENABLE REG_BIT(31) +#define PLL_LOCK REG_BIT(30) +#define PLL_POWER_ENABLE REG_BIT(27) +#define PLL_POWER_STATE REG_BIT(26) +#define ICL_DPLL_ENABLE(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 3, \ + _DPLL0_ENABLE, _DPLL1_ENABLE, \ + _ADLS_DPLL3_ENABLE, _ADLS_DPLL3_ENABLE)) #define _DG2_PLL3_ENABLE 0x4601C -#define DG2_PLL_ENABLE(pll) _MMIO_PLL3(pll, DPLL0_ENABLE, DPLL1_ENABLE, \ - _ADLS_DPLL2_ENABLE, _DG2_PLL3_ENABLE) +#define DG2_PLL_ENABLE(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 3, \ + _DPLL0_ENABLE, _DPLL1_ENABLE, \ + _DG2_PLL3_ENABLE, _DG2_PLL3_ENABLE)) #define TBT_PLL_ENABLE _MMIO(0x46020) @@ -7246,13 +7110,14 @@ enum skl_power_gate { #define _MG_PLL2_ENABLE 0x46034 #define _MG_PLL3_ENABLE 0x46038 #define _MG_PLL4_ENABLE 0x4603C -/* Bits are the same as DPLL0_ENABLE */ +/* Bits are the same as _DPLL0_ENABLE */ #define MG_PLL_ENABLE(tc_port) _MMIO_PORT((tc_port), _MG_PLL1_ENABLE, \ _MG_PLL2_ENABLE) /* DG1 PLL */ -#define DG1_DPLL_ENABLE(pll) _MMIO_PLL3(pll, DPLL0_ENABLE, DPLL1_ENABLE, \ - _MG_PLL1_ENABLE, _MG_PLL2_ENABLE) +#define DG1_DPLL_ENABLE(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \ + _DPLL0_ENABLE, _DPLL1_ENABLE, \ + _MG_PLL1_ENABLE, _MG_PLL2_ENABLE)) /* ADL-P Type C PLL */ #define PORTTC1_PLL_ENABLE 0x46038 @@ -7312,9 +7177,9 @@ enum skl_power_gate { #define _TGL_DPLL0_CFGCR0 0x164284 #define _TGL_DPLL1_CFGCR0 0x16428C #define _TGL_TBTPLL_CFGCR0 0x16429C -#define TGL_DPLL_CFGCR0(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR0, \ - _TGL_DPLL1_CFGCR0, \ - _TGL_TBTPLL_CFGCR0) +#define TGL_DPLL_CFGCR0(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \ + _TGL_DPLL0_CFGCR0, _TGL_DPLL1_CFGCR0, \ + _TGL_TBTPLL_CFGCR0, _TGL_TBTPLL_CFGCR0)) #define RKL_DPLL_CFGCR0(pll) _MMIO_PLL(pll, _TGL_DPLL0_CFGCR0, \ _TGL_DPLL1_CFGCR0) @@ -7327,40 +7192,36 @@ enum skl_power_gate { #define _TGL_DPLL0_CFGCR1 0x164288 #define _TGL_DPLL1_CFGCR1 0x164290 #define _TGL_TBTPLL_CFGCR1 0x1642A0 -#define TGL_DPLL_CFGCR1(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR1, \ - _TGL_DPLL1_CFGCR1, \ - _TGL_TBTPLL_CFGCR1) +#define TGL_DPLL_CFGCR1(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \ + _TGL_DPLL0_CFGCR1, _TGL_DPLL1_CFGCR1, \ + _TGL_TBTPLL_CFGCR1, _TGL_TBTPLL_CFGCR1)) #define RKL_DPLL_CFGCR1(pll) _MMIO_PLL(pll, _TGL_DPLL0_CFGCR1, \ _TGL_DPLL1_CFGCR1) #define _DG1_DPLL2_CFGCR0 0x16C284 #define _DG1_DPLL3_CFGCR0 0x16C28C -#define DG1_DPLL_CFGCR0(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR0, \ - _TGL_DPLL1_CFGCR0, \ - _DG1_DPLL2_CFGCR0, \ - _DG1_DPLL3_CFGCR0) +#define DG1_DPLL_CFGCR0(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \ + _TGL_DPLL0_CFGCR0, _TGL_DPLL1_CFGCR0, \ + _DG1_DPLL2_CFGCR0, _DG1_DPLL3_CFGCR0)) #define _DG1_DPLL2_CFGCR1 0x16C288 #define _DG1_DPLL3_CFGCR1 0x16C290 -#define DG1_DPLL_CFGCR1(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR1, \ - _TGL_DPLL1_CFGCR1, \ - _DG1_DPLL2_CFGCR1, \ - _DG1_DPLL3_CFGCR1) +#define DG1_DPLL_CFGCR1(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \ + _TGL_DPLL0_CFGCR1, _TGL_DPLL1_CFGCR1, \ + _DG1_DPLL2_CFGCR1, _DG1_DPLL3_CFGCR1)) /* For ADL-S DPLL4_CFGCR0/1 are used to control DPLL2 */ -#define _ADLS_DPLL3_CFGCR0 0x1642C0 #define _ADLS_DPLL4_CFGCR0 0x164294 -#define ADLS_DPLL_CFGCR0(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR0, \ - _TGL_DPLL1_CFGCR0, \ - _ADLS_DPLL4_CFGCR0, \ - _ADLS_DPLL3_CFGCR0) +#define _ADLS_DPLL3_CFGCR0 0x1642C0 +#define ADLS_DPLL_CFGCR0(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \ + _TGL_DPLL0_CFGCR0, _TGL_DPLL1_CFGCR0, \ + _ADLS_DPLL4_CFGCR0, _ADLS_DPLL3_CFGCR0)) -#define _ADLS_DPLL3_CFGCR1 0x1642C4 #define _ADLS_DPLL4_CFGCR1 0x164298 -#define ADLS_DPLL_CFGCR1(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR1, \ - _TGL_DPLL1_CFGCR1, \ - _ADLS_DPLL4_CFGCR1, \ - _ADLS_DPLL3_CFGCR1) +#define _ADLS_DPLL3_CFGCR1 0x1642C4 +#define ADLS_DPLL_CFGCR1(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \ + _TGL_DPLL0_CFGCR1, _TGL_DPLL1_CFGCR1, \ + _ADLS_DPLL4_CFGCR1, _ADLS_DPLL3_CFGCR1)) /* BXT display engine PLL */ #define BXT_DE_PLL_CTL _MMIO(0x6d000) @@ -7693,44 +7554,6 @@ enum skl_power_gate { #define PIPE_FRMTMSTMP(pipe) \ _MMIO_PIPE2(pipe, _PIPE_FRMTMSTMP_A) -/* Display Stream Splitter Control */ -#define DSS_CTL1 _MMIO(0x67400) -#define SPLITTER_ENABLE (1 << 31) -#define JOINER_ENABLE (1 << 30) -#define DUAL_LINK_MODE_INTERLEAVE (1 << 24) -#define DUAL_LINK_MODE_FRONTBACK (0 << 24) -#define OVERLAP_PIXELS_MASK (0xf << 16) -#define OVERLAP_PIXELS(pixels) ((pixels) << 16) -#define LEFT_DL_BUF_TARGET_DEPTH_MASK (0xfff << 0) -#define LEFT_DL_BUF_TARGET_DEPTH(pixels) ((pixels) << 0) -#define MAX_DL_BUFFER_TARGET_DEPTH 0x5a0 - -#define DSS_CTL2 _MMIO(0x67404) -#define LEFT_BRANCH_VDSC_ENABLE (1 << 31) -#define RIGHT_BRANCH_VDSC_ENABLE (1 << 15) -#define RIGHT_DL_BUF_TARGET_DEPTH_MASK (0xfff << 0) -#define RIGHT_DL_BUF_TARGET_DEPTH(pixels) ((pixels) << 0) - -#define _ICL_PIPE_DSS_CTL1_PB 0x78200 -#define _ICL_PIPE_DSS_CTL1_PC 0x78400 -#define ICL_PIPE_DSS_CTL1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_PIPE_DSS_CTL1_PB, \ - _ICL_PIPE_DSS_CTL1_PC) -#define BIG_JOINER_ENABLE (1 << 29) -#define MASTER_BIG_JOINER_ENABLE (1 << 28) -#define VGA_CENTERING_ENABLE (1 << 27) -#define SPLITTER_CONFIGURATION_MASK REG_GENMASK(26, 25) -#define SPLITTER_CONFIGURATION_2_SEGMENT REG_FIELD_PREP(SPLITTER_CONFIGURATION_MASK, 0) -#define SPLITTER_CONFIGURATION_4_SEGMENT REG_FIELD_PREP(SPLITTER_CONFIGURATION_MASK, 1) -#define UNCOMPRESSED_JOINER_MASTER (1 << 21) -#define UNCOMPRESSED_JOINER_SLAVE (1 << 20) - -#define _ICL_PIPE_DSS_CTL2_PB 0x78204 -#define _ICL_PIPE_DSS_CTL2_PC 0x78404 -#define ICL_PIPE_DSS_CTL2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_PIPE_DSS_CTL2_PB, \ - _ICL_PIPE_DSS_CTL2_PC) - #define GGC _MMIO(0x108040) #define GMS_MASK REG_GENMASK(15, 8) #define GGMS_MASK REG_GENMASK(7, 6) @@ -7754,314 +7577,6 @@ enum skl_power_gate { #define ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN (1 << 23) #define DG2_PHY_DP_TX_ACK_MASK REG_GENMASK(23, 20) -/* Icelake Display Stream Compression Registers */ -#define DSCA_PICTURE_PARAMETER_SET_0 _MMIO(0x6B200) -#define DSCC_PICTURE_PARAMETER_SET_0 _MMIO(0x6BA00) -#define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB 0x78270 -#define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB 0x78370 -#define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC 0x78470 -#define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PC 0x78570 -#define ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC) -#define ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_0_PC) -#define DSC_ALT_ICH_SEL (1 << 20) -#define DSC_VBR_ENABLE (1 << 19) -#define DSC_422_ENABLE (1 << 18) -#define DSC_COLOR_SPACE_CONVERSION (1 << 17) -#define DSC_BLOCK_PREDICTION (1 << 16) -#define DSC_LINE_BUF_DEPTH_SHIFT 12 -#define DSC_BPC_SHIFT 8 -#define DSC_VER_MIN_SHIFT 4 -#define DSC_VER_MAJ (0x1 << 0) - -#define DSCA_PICTURE_PARAMETER_SET_1 _MMIO(0x6B204) -#define DSCC_PICTURE_PARAMETER_SET_1 _MMIO(0x6BA04) -#define _ICL_DSC0_PICTURE_PARAMETER_SET_1_PB 0x78274 -#define _ICL_DSC1_PICTURE_PARAMETER_SET_1_PB 0x78374 -#define _ICL_DSC0_PICTURE_PARAMETER_SET_1_PC 0x78474 -#define _ICL_DSC1_PICTURE_PARAMETER_SET_1_PC 0x78574 -#define ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_1_PB, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_1_PC) -#define ICL_DSC1_PICTURE_PARAMETER_SET_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_1_PB, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_1_PC) -#define DSC_BPP(bpp) ((bpp) << 0) - -#define DSCA_PICTURE_PARAMETER_SET_2 _MMIO(0x6B208) -#define DSCC_PICTURE_PARAMETER_SET_2 _MMIO(0x6BA08) -#define _ICL_DSC0_PICTURE_PARAMETER_SET_2_PB 0x78278 -#define _ICL_DSC1_PICTURE_PARAMETER_SET_2_PB 0x78378 -#define _ICL_DSC0_PICTURE_PARAMETER_SET_2_PC 0x78478 -#define _ICL_DSC1_PICTURE_PARAMETER_SET_2_PC 0x78578 -#define ICL_DSC0_PICTURE_PARAMETER_SET_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_2_PB, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_2_PC) -#define ICL_DSC1_PICTURE_PARAMETER_SET_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_2_PB, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_2_PC) -#define DSC_PIC_WIDTH(pic_width) ((pic_width) << 16) -#define DSC_PIC_HEIGHT(pic_height) ((pic_height) << 0) - -#define DSCA_PICTURE_PARAMETER_SET_3 _MMIO(0x6B20C) -#define DSCC_PICTURE_PARAMETER_SET_3 _MMIO(0x6BA0C) -#define _ICL_DSC0_PICTURE_PARAMETER_SET_3_PB 0x7827C -#define _ICL_DSC1_PICTURE_PARAMETER_SET_3_PB 0x7837C -#define _ICL_DSC0_PICTURE_PARAMETER_SET_3_PC 0x7847C -#define _ICL_DSC1_PICTURE_PARAMETER_SET_3_PC 0x7857C -#define ICL_DSC0_PICTURE_PARAMETER_SET_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_3_PB, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_3_PC) -#define ICL_DSC1_PICTURE_PARAMETER_SET_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_3_PB, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_3_PC) -#define DSC_SLICE_WIDTH(slice_width) ((slice_width) << 16) -#define DSC_SLICE_HEIGHT(slice_height) ((slice_height) << 0) - -#define DSCA_PICTURE_PARAMETER_SET_4 _MMIO(0x6B210) -#define DSCC_PICTURE_PARAMETER_SET_4 _MMIO(0x6BA10) -#define _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB 0x78280 -#define _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB 0x78380 -#define _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC 0x78480 -#define _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC 0x78580 -#define ICL_DSC0_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC) -#define ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC) -#define DSC_INITIAL_DEC_DELAY(dec_delay) ((dec_delay) << 16) -#define DSC_INITIAL_XMIT_DELAY(xmit_delay) ((xmit_delay) << 0) - -#define DSCA_PICTURE_PARAMETER_SET_5 _MMIO(0x6B214) -#define DSCC_PICTURE_PARAMETER_SET_5 _MMIO(0x6BA14) -#define _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB 0x78284 -#define _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB 0x78384 -#define _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC 0x78484 -#define _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC 0x78584 -#define ICL_DSC0_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC) -#define ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC) -#define DSC_SCALE_DEC_INT(scale_dec) ((scale_dec) << 16) -#define DSC_SCALE_INC_INT(scale_inc) ((scale_inc) << 0) - -#define DSCA_PICTURE_PARAMETER_SET_6 _MMIO(0x6B218) -#define DSCC_PICTURE_PARAMETER_SET_6 _MMIO(0x6BA18) -#define _ICL_DSC0_PICTURE_PARAMETER_SET_6_PB 0x78288 -#define _ICL_DSC1_PICTURE_PARAMETER_SET_6_PB 0x78388 -#define _ICL_DSC0_PICTURE_PARAMETER_SET_6_PC 0x78488 -#define _ICL_DSC1_PICTURE_PARAMETER_SET_6_PC 0x78588 -#define ICL_DSC0_PICTURE_PARAMETER_SET_6(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_6_PB, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_6_PC) -#define ICL_DSC1_PICTURE_PARAMETER_SET_6(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_6_PB, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_6_PC) -#define DSC_FLATNESS_MAX_QP(max_qp) ((max_qp) << 24) -#define DSC_FLATNESS_MIN_QP(min_qp) ((min_qp) << 16) -#define DSC_FIRST_LINE_BPG_OFFSET(offset) ((offset) << 8) -#define DSC_INITIAL_SCALE_VALUE(value) ((value) << 0) - -#define DSCA_PICTURE_PARAMETER_SET_7 _MMIO(0x6B21C) -#define DSCC_PICTURE_PARAMETER_SET_7 _MMIO(0x6BA1C) -#define _ICL_DSC0_PICTURE_PARAMETER_SET_7_PB 0x7828C -#define _ICL_DSC1_PICTURE_PARAMETER_SET_7_PB 0x7838C -#define _ICL_DSC0_PICTURE_PARAMETER_SET_7_PC 0x7848C -#define _ICL_DSC1_PICTURE_PARAMETER_SET_7_PC 0x7858C -#define ICL_DSC0_PICTURE_PARAMETER_SET_7(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_7_PB, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_7_PC) -#define ICL_DSC1_PICTURE_PARAMETER_SET_7(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_7_PB, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_7_PC) -#define DSC_NFL_BPG_OFFSET(bpg_offset) ((bpg_offset) << 16) -#define DSC_SLICE_BPG_OFFSET(bpg_offset) ((bpg_offset) << 0) - -#define DSCA_PICTURE_PARAMETER_SET_8 _MMIO(0x6B220) -#define DSCC_PICTURE_PARAMETER_SET_8 _MMIO(0x6BA20) -#define _ICL_DSC0_PICTURE_PARAMETER_SET_8_PB 0x78290 -#define _ICL_DSC1_PICTURE_PARAMETER_SET_8_PB 0x78390 -#define _ICL_DSC0_PICTURE_PARAMETER_SET_8_PC 0x78490 -#define _ICL_DSC1_PICTURE_PARAMETER_SET_8_PC 0x78590 -#define ICL_DSC0_PICTURE_PARAMETER_SET_8(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_8_PB, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_8_PC) -#define ICL_DSC1_PICTURE_PARAMETER_SET_8(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_8_PB, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_8_PC) -#define DSC_INITIAL_OFFSET(initial_offset) ((initial_offset) << 16) -#define DSC_FINAL_OFFSET(final_offset) ((final_offset) << 0) - -#define DSCA_PICTURE_PARAMETER_SET_9 _MMIO(0x6B224) -#define DSCC_PICTURE_PARAMETER_SET_9 _MMIO(0x6BA24) -#define _ICL_DSC0_PICTURE_PARAMETER_SET_9_PB 0x78294 -#define _ICL_DSC1_PICTURE_PARAMETER_SET_9_PB 0x78394 -#define _ICL_DSC0_PICTURE_PARAMETER_SET_9_PC 0x78494 -#define _ICL_DSC1_PICTURE_PARAMETER_SET_9_PC 0x78594 -#define ICL_DSC0_PICTURE_PARAMETER_SET_9(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_9_PB, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_9_PC) -#define ICL_DSC1_PICTURE_PARAMETER_SET_9(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_9_PB, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_9_PC) -#define DSC_RC_EDGE_FACTOR(rc_edge_fact) ((rc_edge_fact) << 16) -#define DSC_RC_MODEL_SIZE(rc_model_size) ((rc_model_size) << 0) - -#define DSCA_PICTURE_PARAMETER_SET_10 _MMIO(0x6B228) -#define DSCC_PICTURE_PARAMETER_SET_10 _MMIO(0x6BA28) -#define _ICL_DSC0_PICTURE_PARAMETER_SET_10_PB 0x78298 -#define _ICL_DSC1_PICTURE_PARAMETER_SET_10_PB 0x78398 -#define _ICL_DSC0_PICTURE_PARAMETER_SET_10_PC 0x78498 -#define _ICL_DSC1_PICTURE_PARAMETER_SET_10_PC 0x78598 -#define ICL_DSC0_PICTURE_PARAMETER_SET_10(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_10_PB, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_10_PC) -#define ICL_DSC1_PICTURE_PARAMETER_SET_10(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_10_PB, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_10_PC) -#define DSC_RC_TARGET_OFF_LOW(rc_tgt_off_low) ((rc_tgt_off_low) << 20) -#define DSC_RC_TARGET_OFF_HIGH(rc_tgt_off_high) ((rc_tgt_off_high) << 16) -#define DSC_RC_QUANT_INC_LIMIT1(lim) ((lim) << 8) -#define DSC_RC_QUANT_INC_LIMIT0(lim) ((lim) << 0) - -#define DSCA_PICTURE_PARAMETER_SET_11 _MMIO(0x6B22C) -#define DSCC_PICTURE_PARAMETER_SET_11 _MMIO(0x6BA2C) -#define _ICL_DSC0_PICTURE_PARAMETER_SET_11_PB 0x7829C -#define _ICL_DSC1_PICTURE_PARAMETER_SET_11_PB 0x7839C -#define _ICL_DSC0_PICTURE_PARAMETER_SET_11_PC 0x7849C -#define _ICL_DSC1_PICTURE_PARAMETER_SET_11_PC 0x7859C -#define ICL_DSC0_PICTURE_PARAMETER_SET_11(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_11_PB, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_11_PC) -#define ICL_DSC1_PICTURE_PARAMETER_SET_11(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_11_PB, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_11_PC) - -#define DSCA_PICTURE_PARAMETER_SET_12 _MMIO(0x6B260) -#define DSCC_PICTURE_PARAMETER_SET_12 _MMIO(0x6BA60) -#define _ICL_DSC0_PICTURE_PARAMETER_SET_12_PB 0x782A0 -#define _ICL_DSC1_PICTURE_PARAMETER_SET_12_PB 0x783A0 -#define _ICL_DSC0_PICTURE_PARAMETER_SET_12_PC 0x784A0 -#define _ICL_DSC1_PICTURE_PARAMETER_SET_12_PC 0x785A0 -#define ICL_DSC0_PICTURE_PARAMETER_SET_12(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_12_PB, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_12_PC) -#define ICL_DSC1_PICTURE_PARAMETER_SET_12(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_12_PB, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_12_PC) - -#define DSCA_PICTURE_PARAMETER_SET_13 _MMIO(0x6B264) -#define DSCC_PICTURE_PARAMETER_SET_13 _MMIO(0x6BA64) -#define _ICL_DSC0_PICTURE_PARAMETER_SET_13_PB 0x782A4 -#define _ICL_DSC1_PICTURE_PARAMETER_SET_13_PB 0x783A4 -#define _ICL_DSC0_PICTURE_PARAMETER_SET_13_PC 0x784A4 -#define _ICL_DSC1_PICTURE_PARAMETER_SET_13_PC 0x785A4 -#define ICL_DSC0_PICTURE_PARAMETER_SET_13(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_13_PB, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_13_PC) -#define ICL_DSC1_PICTURE_PARAMETER_SET_13(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_13_PB, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_13_PC) - -#define DSCA_PICTURE_PARAMETER_SET_14 _MMIO(0x6B268) -#define DSCC_PICTURE_PARAMETER_SET_14 _MMIO(0x6BA68) -#define _ICL_DSC0_PICTURE_PARAMETER_SET_14_PB 0x782A8 -#define _ICL_DSC1_PICTURE_PARAMETER_SET_14_PB 0x783A8 -#define _ICL_DSC0_PICTURE_PARAMETER_SET_14_PC 0x784A8 -#define _ICL_DSC1_PICTURE_PARAMETER_SET_14_PC 0x785A8 -#define ICL_DSC0_PICTURE_PARAMETER_SET_14(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_14_PB, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_14_PC) -#define ICL_DSC1_PICTURE_PARAMETER_SET_14(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_14_PB, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_14_PC) - -#define DSCA_PICTURE_PARAMETER_SET_15 _MMIO(0x6B26C) -#define DSCC_PICTURE_PARAMETER_SET_15 _MMIO(0x6BA6C) -#define _ICL_DSC0_PICTURE_PARAMETER_SET_15_PB 0x782AC -#define _ICL_DSC1_PICTURE_PARAMETER_SET_15_PB 0x783AC -#define _ICL_DSC0_PICTURE_PARAMETER_SET_15_PC 0x784AC -#define _ICL_DSC1_PICTURE_PARAMETER_SET_15_PC 0x785AC -#define ICL_DSC0_PICTURE_PARAMETER_SET_15(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_15_PB, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_15_PC) -#define ICL_DSC1_PICTURE_PARAMETER_SET_15(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_15_PB, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_15_PC) - -#define DSCA_PICTURE_PARAMETER_SET_16 _MMIO(0x6B270) -#define DSCC_PICTURE_PARAMETER_SET_16 _MMIO(0x6BA70) -#define _ICL_DSC0_PICTURE_PARAMETER_SET_16_PB 0x782B0 -#define _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB 0x783B0 -#define _ICL_DSC0_PICTURE_PARAMETER_SET_16_PC 0x784B0 -#define _ICL_DSC1_PICTURE_PARAMETER_SET_16_PC 0x785B0 -#define ICL_DSC0_PICTURE_PARAMETER_SET_16(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_16_PB, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_16_PC) -#define ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_16_PC) -#define DSC_SLICE_ROW_PER_FRAME(slice_row_per_frame) ((slice_row_per_frame) << 20) -#define DSC_SLICE_PER_LINE(slice_per_line) ((slice_per_line) << 16) -#define DSC_SLICE_CHUNK_SIZE(slice_chunk_size) ((slice_chunk_size) << 0) - -/* Icelake Rate Control Buffer Threshold Registers */ -#define DSCA_RC_BUF_THRESH_0 _MMIO(0x6B230) -#define DSCA_RC_BUF_THRESH_0_UDW _MMIO(0x6B230 + 4) -#define DSCC_RC_BUF_THRESH_0 _MMIO(0x6BA30) -#define DSCC_RC_BUF_THRESH_0_UDW _MMIO(0x6BA30 + 4) -#define _ICL_DSC0_RC_BUF_THRESH_0_PB (0x78254) -#define _ICL_DSC0_RC_BUF_THRESH_0_UDW_PB (0x78254 + 4) -#define _ICL_DSC1_RC_BUF_THRESH_0_PB (0x78354) -#define _ICL_DSC1_RC_BUF_THRESH_0_UDW_PB (0x78354 + 4) -#define _ICL_DSC0_RC_BUF_THRESH_0_PC (0x78454) -#define _ICL_DSC0_RC_BUF_THRESH_0_UDW_PC (0x78454 + 4) -#define _ICL_DSC1_RC_BUF_THRESH_0_PC (0x78554) -#define _ICL_DSC1_RC_BUF_THRESH_0_UDW_PC (0x78554 + 4) -#define ICL_DSC0_RC_BUF_THRESH_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_RC_BUF_THRESH_0_PB, \ - _ICL_DSC0_RC_BUF_THRESH_0_PC) -#define ICL_DSC0_RC_BUF_THRESH_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_RC_BUF_THRESH_0_UDW_PB, \ - _ICL_DSC0_RC_BUF_THRESH_0_UDW_PC) -#define ICL_DSC1_RC_BUF_THRESH_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_RC_BUF_THRESH_0_PB, \ - _ICL_DSC1_RC_BUF_THRESH_0_PC) -#define ICL_DSC1_RC_BUF_THRESH_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_RC_BUF_THRESH_0_UDW_PB, \ - _ICL_DSC1_RC_BUF_THRESH_0_UDW_PC) - -#define DSCA_RC_BUF_THRESH_1 _MMIO(0x6B238) -#define DSCA_RC_BUF_THRESH_1_UDW _MMIO(0x6B238 + 4) -#define DSCC_RC_BUF_THRESH_1 _MMIO(0x6BA38) -#define DSCC_RC_BUF_THRESH_1_UDW _MMIO(0x6BA38 + 4) -#define _ICL_DSC0_RC_BUF_THRESH_1_PB (0x7825C) -#define _ICL_DSC0_RC_BUF_THRESH_1_UDW_PB (0x7825C + 4) -#define _ICL_DSC1_RC_BUF_THRESH_1_PB (0x7835C) -#define _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB (0x7835C + 4) -#define _ICL_DSC0_RC_BUF_THRESH_1_PC (0x7845C) -#define _ICL_DSC0_RC_BUF_THRESH_1_UDW_PC (0x7845C + 4) -#define _ICL_DSC1_RC_BUF_THRESH_1_PC (0x7855C) -#define _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC (0x7855C + 4) -#define ICL_DSC0_RC_BUF_THRESH_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_RC_BUF_THRESH_1_PB, \ - _ICL_DSC0_RC_BUF_THRESH_1_PC) -#define ICL_DSC0_RC_BUF_THRESH_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_RC_BUF_THRESH_1_UDW_PB, \ - _ICL_DSC0_RC_BUF_THRESH_1_UDW_PC) -#define ICL_DSC1_RC_BUF_THRESH_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_RC_BUF_THRESH_1_PB, \ - _ICL_DSC1_RC_BUF_THRESH_1_PC) -#define ICL_DSC1_RC_BUF_THRESH_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB, \ - _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC) - #define PORT_TX_DFLEXDPSP(fia) _MMIO_FIA((fia), 0x008A0) #define MODULAR_FIA_MASK (1 << 4) #define TC_LIVE_STATE_TBT(idx) (1 << ((idx) * 8 + 6)) @@ -8105,8 +7620,54 @@ enum skl_power_gate { #define DSB_HEAD(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x0) #define DSB_TAIL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x4) #define DSB_CTRL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x8) -#define DSB_ENABLE (1 << 31) -#define DSB_STATUS_BUSY (1 << 0) +#define DSB_ENABLE REG_BIT(31) +#define DSB_BUF_REITERATE REG_BIT(29) +#define DSB_WAIT_FOR_VBLANK REG_BIT(28) +#define DSB_WAIT_FOR_LINE_IN REG_BIT(27) +#define DSB_HALT REG_BIT(16) +#define DSB_NON_POSTED REG_BIT(8) +#define DSB_STATUS_BUSY REG_BIT(0) +#define DSB_MMIOCTRL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0xc) +#define DSB_MMIO_DEAD_CLOCKS_ENABLE REG_BIT(31) +#define DSB_MMIO_DEAD_CLOCKS_COUNT_MASK REG_GENMASK(15, 8) +#define DSB_MMIO_DEAD_CLOCKS_COUNT(x) REG_FIELD_PREP(DSB_MMIO_DEAD_CLOCK_COUNT_MASK, (x)) +#define DSB_MMIO_CYCLES_MASK REG_GENMASK(7, 0) +#define DSB_MMIO_CYCLES(x) REG_FIELD_PREP(DSB_MMIO_CYCLES_MASK, (x)) +#define DSB_POLLFUNC(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x10) +#define DSB_POLL_ENABLE REG_BIT(31) +#define DSB_POLL_WAIT_MASK REG_GENMASK(30, 23) +#define DSB_POLL_WAIT(x) REG_FIELD_PREP(DSB_POLL_WAIT_MASK, (x)) /* usec */ +#define DSB_POLL_COUNT_MASK REG_GENMASK(22, 15) +#define DSB_POLL_COUNT(x) REG_FIELD_PREP(DSB_POLL_COUNT_MASK, (x)) +#define DSB_DEBUG(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x14) +#define DSB_POLLMASK(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x1c) +#define DSB_STATUS(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x24) +#define DSB_INTERRUPT(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x28) +#define DSB_ATS_FAULT_INT_EN REG_BIT(20) +#define DSB_GTT_FAULT_INT_EN REG_BIT(19) +#define DSB_RSPTIMEOUT_INT_EN REG_BIT(18) +#define DSB_POLL_ERR_INT_EN REG_BIT(17) +#define DSB_PROG_INT_EN REG_BIT(16) +#define DSB_ATS_FAULT_INT_STATUS REG_BIT(4) +#define DSB_GTT_FAULT_INT_STATUS REG_BIT(3) +#define DSB_RSPTIMEOUT_INT_STATUS REG_BIT(2) +#define DSB_POLL_ERR_INT_STATUS REG_BIT(1) +#define DSB_PROG_INT_STATUS REG_BIT(0) +#define DSB_CURRENT_HEAD(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x2c) +#define DSB_RM_TIMEOUT(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x30) +#define DSB_RM_CLAIM_TIMEOUT REG_BIT(31) +#define DSB_RM_READY_TIMEOUT REG_BIT(30) +#define DSB_RM_CLAIM_TIMEOUT_COUNT_MASK REG_GENMASK(23, 16) +#define DSB_RM_CLAIM_TIMEOUT_COUNT(x) REG_FIELD_PREP(DSB_RM_CLAIM_TIMEOUT_COUNT_MASK, (x)) /* clocks */ +#define DSB_RM_READY_TIMEOUT_VALUE_MASK REG_GENMASK(15, 0) +#define DSB_RM_READY_TIMEOUT_VALUE(x) REG_FIELD_PREP(DSB_RM_READY_TIMEOUT_VALUE, (x)) /* usec */ +#define DSB_RMTIMEOUTREG_CAPTURE(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x34) +#define DSB_PMCTRL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x38) +#define DSB_PMCTRL_2(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x3c) +#define DSB_PF_LN_LOWER(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x40) +#define DSB_PF_LN_UPPER(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x44) +#define DSB_BUFRPT_CNT(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x48) +#define DSB_CHICKEN(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0xf0) #define CLKREQ_POLICY _MMIO(0x101038) #define CLKREQ_POLICY_MEM_UP_OVRD REG_BIT(1) diff --git a/drivers/gpu/drm/i915/i915_reg_defs.h b/drivers/gpu/drm/i915/i915_reg_defs.h index be43580a6979..db26de6b57bc 100644 --- a/drivers/gpu/drm/i915/i915_reg_defs.h +++ b/drivers/gpu/drm/i915/i915_reg_defs.h @@ -120,6 +120,35 @@ #define _PICK_EVEN(__index, __a, __b) ((__a) + (__index) * ((__b) - (__a))) /* + * Like _PICK_EVEN(), but supports 2 ranges of evenly spaced address offsets. + * @__c_index corresponds to the index in which the second range starts to be + * used. Using math interval notation, the first range is used for indexes [ 0, + * @__c_index), while the second range is used for [ @__c_index, ... ). Example: + * + * #define _FOO_A 0xf000 + * #define _FOO_B 0xf004 + * #define _FOO_C 0xf008 + * #define _SUPER_FOO_A 0xa000 + * #define _SUPER_FOO_B 0xa100 + * #define FOO(x) _MMIO(_PICK_EVEN_2RANGES(x, 3, \ + * _FOO_A, _FOO_B, \ + * _SUPER_FOO_A, _SUPER_FOO_B)) + * + * This expands to: + * 0: 0xf000, + * 1: 0xf004, + * 2: 0xf008, + * 3: 0xa000, + * 4: 0xa100, + * 5: 0xa200, + * ... + */ +#define _PICK_EVEN_2RANGES(__index, __c_index, __a, __b, __c, __d) \ + (BUILD_BUG_ON_ZERO(!__is_constexpr(__c_index)) + \ + ((__index) < (__c_index) ? _PICK_EVEN(__index, __a, __b) : \ + _PICK_EVEN((__index) - (__c_index), __c, __d))) + +/* * Given the arbitrary numbers in varargs, pick the 0-based __index'th number. * * Always prefer _PICK_EVEN() over this if the numbers are evenly spaced. @@ -136,6 +165,8 @@ typedef struct { u32 reg; } i915_mcr_reg_t; +#define MCR_REG(offset) ((const i915_mcr_reg_t){ .reg = (offset) }) + #define INVALID_MMIO_REG _MMIO(0) /* diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 7503dcb9043b..630a732aaecc 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -48,7 +48,6 @@ #include "i915_driver.h" #include "i915_drv.h" #include "i915_trace.h" -#include "intel_pm.h" struct execute_cb { struct irq_work work; diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 595e8b574990..e88bb4f04305 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -37,7 +37,6 @@ #include "i915_drv.h" #include "i915_sysfs.h" -#include "intel_pm.h" struct drm_i915_private *kdev_minor_to_i915(struct device *kdev) { diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 98769e5f2c3d..fc5cd14adfcc 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -119,9 +119,14 @@ void intel_device_info_print(const struct intel_device_info *info, drm_printf(p, "display version: %u\n", runtime->display.ip.ver); + drm_printf(p, "graphics stepping: %s\n", intel_step_name(runtime->step.graphics_step)); + drm_printf(p, "media stepping: %s\n", intel_step_name(runtime->step.media_step)); + drm_printf(p, "display stepping: %s\n", intel_step_name(runtime->step.display_step)); + drm_printf(p, "base die stepping: %s\n", intel_step_name(runtime->step.basedie_step)); + drm_printf(p, "gt: %d\n", info->gt); - drm_printf(p, "memory-regions: %x\n", runtime->memory_regions); - drm_printf(p, "page-sizes: %x\n", runtime->page_sizes); + drm_printf(p, "memory-regions: 0x%x\n", runtime->memory_regions); + drm_printf(p, "page-sizes: 0x%x\n", runtime->page_sizes); drm_printf(p, "platform: %s\n", intel_platform_name(info->platform)); drm_printf(p, "ppgtt-size: %d\n", runtime->ppgtt_size); drm_printf(p, "ppgtt-type: %d\n", runtime->ppgtt_type); @@ -202,6 +207,10 @@ static const u16 subplatform_rpl_ids[] = { INTEL_RPLP_IDS(0), }; +static const u16 subplatform_rplu_ids[] = { + INTEL_RPLU_IDS(0), +}; + static const u16 subplatform_g10_ids[] = { INTEL_DG2_G10_IDS(0), INTEL_ATS_M150_IDS(0), @@ -269,6 +278,9 @@ static void intel_device_info_subplatform_init(struct drm_i915_private *i915) } else if (find_devid(devid, subplatform_rpl_ids, ARRAY_SIZE(subplatform_rpl_ids))) { mask = BIT(INTEL_SUBPLATFORM_RPL); + if (find_devid(devid, subplatform_rplu_ids, + ARRAY_SIZE(subplatform_rplu_ids))) + mask |= BIT(INTEL_SUBPLATFORM_RPLU); } else if (find_devid(devid, subplatform_g10_ids, ARRAY_SIZE(subplatform_g10_ids))) { mask = BIT(INTEL_SUBPLATFORM_G10); @@ -436,6 +448,14 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) runtime->num_sprites[pipe] = 1; } + if (HAS_DISPLAY(dev_priv) && + (IS_DGFX(dev_priv) || DISPLAY_VER(dev_priv) >= 14) && + !(intel_de_read(dev_priv, GU_CNTL_PROTECTED) & DEPRESENT)) { + drm_info(&dev_priv->drm, "Display not present, disabling\n"); + + runtime->pipe_mask = 0; + } + if (HAS_DISPLAY(dev_priv) && IS_GRAPHICS_VER(dev_priv, 7, 8) && HAS_PCH_SPLIT(dev_priv)) { u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP); @@ -457,8 +477,6 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) drm_info(&dev_priv->drm, "Display fused off, disabling\n"); runtime->pipe_mask = 0; - runtime->cpu_transcoder_mask = 0; - runtime->fbc_mask = 0; } else if (fuse_strap & IVB_PIPE_C_DISABLE) { drm_info(&dev_priv->drm, "PipeC fused off\n"); runtime->pipe_mask &= ~BIT(PIPE_C); @@ -535,5 +553,5 @@ void intel_driver_caps_print(const struct intel_driver_caps *caps, { drm_printf(p, "Has logical contexts? %s\n", str_yes_no(caps->has_logical_contexts)); - drm_printf(p, "scheduler: %x\n", caps->scheduler); + drm_printf(p, "scheduler: 0x%x\n", caps->scheduler); } diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index 80bda653d61b..b30cc8b97c3a 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -127,6 +127,7 @@ enum intel_platform { * bit set */ #define INTEL_SUBPLATFORM_N 1 +#define INTEL_SUBPLATFORM_RPLU 2 /* MTL */ #define INTEL_SUBPLATFORM_M 0 diff --git a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c index 1f4805aa2b08..2b3fe469b360 100644 --- a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c +++ b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c @@ -8,6 +8,7 @@ #include "display/intel_display_types.h" #include "display/intel_dmc_regs.h" #include "display/intel_dpio_phy.h" +#include "display/intel_lvds_regs.h" #include "display/vlv_dsi_pll_regs.h" #include "gt/intel_gt_regs.h" #include "gvt/gvt.h" @@ -117,10 +118,10 @@ static int iterate_generic_mmio(struct intel_gvt_mmio_table_iter *iter) MMIO_D(PIPEDSL(PIPE_B)); MMIO_D(PIPEDSL(PIPE_C)); MMIO_D(PIPEDSL(_PIPE_EDP)); - MMIO_D(PIPECONF(PIPE_A)); - MMIO_D(PIPECONF(PIPE_B)); - MMIO_D(PIPECONF(PIPE_C)); - MMIO_D(PIPECONF(_PIPE_EDP)); + MMIO_D(TRANSCONF(TRANSCODER_A)); + MMIO_D(TRANSCONF(TRANSCODER_B)); + MMIO_D(TRANSCONF(TRANSCODER_C)); + MMIO_D(TRANSCONF(TRANSCODER_EDP)); MMIO_D(PIPESTAT(PIPE_A)); MMIO_D(PIPESTAT(PIPE_B)); MMIO_D(PIPESTAT(PIPE_C)); @@ -218,41 +219,41 @@ static int iterate_generic_mmio(struct intel_gvt_mmio_table_iter *iter) MMIO_D(SPRSCALE(PIPE_C)); MMIO_D(SPRSURFLIVE(PIPE_C)); MMIO_D(REG_50080(PIPE_C, PLANE_SPRITE0)); - MMIO_D(HTOTAL(TRANSCODER_A)); - MMIO_D(HBLANK(TRANSCODER_A)); - MMIO_D(HSYNC(TRANSCODER_A)); - MMIO_D(VTOTAL(TRANSCODER_A)); - MMIO_D(VBLANK(TRANSCODER_A)); - MMIO_D(VSYNC(TRANSCODER_A)); + MMIO_D(TRANS_HTOTAL(TRANSCODER_A)); + MMIO_D(TRANS_HBLANK(TRANSCODER_A)); + MMIO_D(TRANS_HSYNC(TRANSCODER_A)); + MMIO_D(TRANS_VTOTAL(TRANSCODER_A)); + MMIO_D(TRANS_VBLANK(TRANSCODER_A)); + MMIO_D(TRANS_VSYNC(TRANSCODER_A)); MMIO_D(BCLRPAT(TRANSCODER_A)); - MMIO_D(VSYNCSHIFT(TRANSCODER_A)); + MMIO_D(TRANS_VSYNCSHIFT(TRANSCODER_A)); MMIO_D(PIPESRC(TRANSCODER_A)); - MMIO_D(HTOTAL(TRANSCODER_B)); - MMIO_D(HBLANK(TRANSCODER_B)); - MMIO_D(HSYNC(TRANSCODER_B)); - MMIO_D(VTOTAL(TRANSCODER_B)); - MMIO_D(VBLANK(TRANSCODER_B)); - MMIO_D(VSYNC(TRANSCODER_B)); + MMIO_D(TRANS_HTOTAL(TRANSCODER_B)); + MMIO_D(TRANS_HBLANK(TRANSCODER_B)); + MMIO_D(TRANS_HSYNC(TRANSCODER_B)); + MMIO_D(TRANS_VTOTAL(TRANSCODER_B)); + MMIO_D(TRANS_VBLANK(TRANSCODER_B)); + MMIO_D(TRANS_VSYNC(TRANSCODER_B)); MMIO_D(BCLRPAT(TRANSCODER_B)); - MMIO_D(VSYNCSHIFT(TRANSCODER_B)); + MMIO_D(TRANS_VSYNCSHIFT(TRANSCODER_B)); MMIO_D(PIPESRC(TRANSCODER_B)); - MMIO_D(HTOTAL(TRANSCODER_C)); - MMIO_D(HBLANK(TRANSCODER_C)); - MMIO_D(HSYNC(TRANSCODER_C)); - MMIO_D(VTOTAL(TRANSCODER_C)); - MMIO_D(VBLANK(TRANSCODER_C)); - MMIO_D(VSYNC(TRANSCODER_C)); + MMIO_D(TRANS_HTOTAL(TRANSCODER_C)); + MMIO_D(TRANS_HBLANK(TRANSCODER_C)); + MMIO_D(TRANS_HSYNC(TRANSCODER_C)); + MMIO_D(TRANS_VTOTAL(TRANSCODER_C)); + MMIO_D(TRANS_VBLANK(TRANSCODER_C)); + MMIO_D(TRANS_VSYNC(TRANSCODER_C)); MMIO_D(BCLRPAT(TRANSCODER_C)); - MMIO_D(VSYNCSHIFT(TRANSCODER_C)); + MMIO_D(TRANS_VSYNCSHIFT(TRANSCODER_C)); MMIO_D(PIPESRC(TRANSCODER_C)); - MMIO_D(HTOTAL(TRANSCODER_EDP)); - MMIO_D(HBLANK(TRANSCODER_EDP)); - MMIO_D(HSYNC(TRANSCODER_EDP)); - MMIO_D(VTOTAL(TRANSCODER_EDP)); - MMIO_D(VBLANK(TRANSCODER_EDP)); - MMIO_D(VSYNC(TRANSCODER_EDP)); + MMIO_D(TRANS_HTOTAL(TRANSCODER_EDP)); + MMIO_D(TRANS_HBLANK(TRANSCODER_EDP)); + MMIO_D(TRANS_HSYNC(TRANSCODER_EDP)); + MMIO_D(TRANS_VTOTAL(TRANSCODER_EDP)); + MMIO_D(TRANS_VBLANK(TRANSCODER_EDP)); + MMIO_D(TRANS_VSYNC(TRANSCODER_EDP)); MMIO_D(BCLRPAT(TRANSCODER_EDP)); - MMIO_D(VSYNCSHIFT(TRANSCODER_EDP)); + MMIO_D(TRANS_VSYNCSHIFT(TRANSCODER_EDP)); MMIO_D(PIPE_DATA_M1(TRANSCODER_A)); MMIO_D(PIPE_DATA_N1(TRANSCODER_A)); MMIO_D(PIPE_DATA_M2(TRANSCODER_A)); @@ -493,9 +494,9 @@ static int iterate_generic_mmio(struct intel_gvt_mmio_table_iter *iter) MMIO_D(GAMMA_MODE(PIPE_A)); MMIO_D(GAMMA_MODE(PIPE_B)); MMIO_D(GAMMA_MODE(PIPE_C)); - MMIO_D(PIPE_MULT(PIPE_A)); - MMIO_D(PIPE_MULT(PIPE_B)); - MMIO_D(PIPE_MULT(PIPE_C)); + MMIO_D(TRANS_MULT(TRANSCODER_A)); + MMIO_D(TRANS_MULT(TRANSCODER_B)); + MMIO_D(TRANS_MULT(TRANSCODER_C)); MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_A)); MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_B)); MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_C)); diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 59714b1080d4..c45af0d981fd 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -44,13 +44,6 @@ struct drm_i915_clock_gating_funcs { void (*init_clock_gating)(struct drm_i915_private *i915); }; -/* used in computing the new watermarks state */ -struct intel_wm_config { - unsigned int num_pipes_active; - bool sprites_enabled; - bool sprites_scaled; -}; - static void gen9_init_clock_gating(struct drm_i915_private *dev_priv) { if (HAS_LLC(dev_priv)) { @@ -131,3961 +124,6 @@ static void glk_init_clock_gating(struct drm_i915_private *dev_priv) PWM1_GATING_DIS | PWM2_GATING_DIS); } -static void pnv_get_mem_freq(struct drm_i915_private *dev_priv) -{ - u32 tmp; - - tmp = intel_uncore_read(&dev_priv->uncore, CLKCFG); - - switch (tmp & CLKCFG_FSB_MASK) { - case CLKCFG_FSB_533: - dev_priv->fsb_freq = 533; /* 133*4 */ - break; - case CLKCFG_FSB_800: - dev_priv->fsb_freq = 800; /* 200*4 */ - break; - case CLKCFG_FSB_667: - dev_priv->fsb_freq = 667; /* 167*4 */ - break; - case CLKCFG_FSB_400: - dev_priv->fsb_freq = 400; /* 100*4 */ - break; - } - - switch (tmp & CLKCFG_MEM_MASK) { - case CLKCFG_MEM_533: - dev_priv->mem_freq = 533; - break; - case CLKCFG_MEM_667: - dev_priv->mem_freq = 667; - break; - case CLKCFG_MEM_800: - dev_priv->mem_freq = 800; - break; - } - - /* detect pineview DDR3 setting */ - tmp = intel_uncore_read(&dev_priv->uncore, CSHRDDR3CTL); - dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0; -} - -static void ilk_get_mem_freq(struct drm_i915_private *dev_priv) -{ - u16 ddrpll, csipll; - - ddrpll = intel_uncore_read16(&dev_priv->uncore, DDRMPLL1); - csipll = intel_uncore_read16(&dev_priv->uncore, CSIPLL0); - - switch (ddrpll & 0xff) { - case 0xc: - dev_priv->mem_freq = 800; - break; - case 0x10: - dev_priv->mem_freq = 1066; - break; - case 0x14: - dev_priv->mem_freq = 1333; - break; - case 0x18: - dev_priv->mem_freq = 1600; - break; - default: - drm_dbg(&dev_priv->drm, "unknown memory frequency 0x%02x\n", - ddrpll & 0xff); - dev_priv->mem_freq = 0; - break; - } - - switch (csipll & 0x3ff) { - case 0x00c: - dev_priv->fsb_freq = 3200; - break; - case 0x00e: - dev_priv->fsb_freq = 3733; - break; - case 0x010: - dev_priv->fsb_freq = 4266; - break; - case 0x012: - dev_priv->fsb_freq = 4800; - break; - case 0x014: - dev_priv->fsb_freq = 5333; - break; - case 0x016: - dev_priv->fsb_freq = 5866; - break; - case 0x018: - dev_priv->fsb_freq = 6400; - break; - default: - drm_dbg(&dev_priv->drm, "unknown fsb frequency 0x%04x\n", - csipll & 0x3ff); - dev_priv->fsb_freq = 0; - break; - } -} - -static const struct cxsr_latency cxsr_latency_table[] = { - {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */ - {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */ - {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */ - {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */ - {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */ - - {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */ - {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */ - {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */ - {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */ - {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */ - - {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */ - {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */ - {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */ - {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */ - {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */ - - {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */ - {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */ - {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */ - {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */ - {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */ - - {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */ - {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */ - {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */ - {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */ - {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */ - - {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */ - {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */ - {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */ - {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */ - {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */ -}; - -static const struct cxsr_latency *intel_get_cxsr_latency(bool is_desktop, - bool is_ddr3, - int fsb, - int mem) -{ - const struct cxsr_latency *latency; - int i; - - if (fsb == 0 || mem == 0) - return NULL; - - for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) { - latency = &cxsr_latency_table[i]; - if (is_desktop == latency->is_desktop && - is_ddr3 == latency->is_ddr3 && - fsb == latency->fsb_freq && mem == latency->mem_freq) - return latency; - } - - DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); - - return NULL; -} - -static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable) -{ - u32 val; - - vlv_punit_get(dev_priv); - - val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); - if (enable) - val &= ~FORCE_DDR_HIGH_FREQ; - else - val |= FORCE_DDR_HIGH_FREQ; - val &= ~FORCE_DDR_LOW_FREQ; - val |= FORCE_DDR_FREQ_REQ_ACK; - vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val); - - if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) & - FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) - drm_err(&dev_priv->drm, - "timed out waiting for Punit DDR DVFS request\n"); - - vlv_punit_put(dev_priv); -} - -static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable) -{ - u32 val; - - vlv_punit_get(dev_priv); - - val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); - if (enable) - val |= DSP_MAXFIFO_PM5_ENABLE; - else - val &= ~DSP_MAXFIFO_PM5_ENABLE; - vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val); - - vlv_punit_put(dev_priv); -} - -#define FW_WM(value, plane) \ - (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK) - -static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) -{ - bool was_enabled; - u32 val; - - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; - intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0); - intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF_VLV); - } else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) { - was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN; - intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0); - intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF); - } else if (IS_PINEVIEW(dev_priv)) { - val = intel_uncore_read(&dev_priv->uncore, DSPFW3); - was_enabled = val & PINEVIEW_SELF_REFRESH_EN; - if (enable) - val |= PINEVIEW_SELF_REFRESH_EN; - else - val &= ~PINEVIEW_SELF_REFRESH_EN; - intel_uncore_write(&dev_priv->uncore, DSPFW3, val); - intel_uncore_posting_read(&dev_priv->uncore, DSPFW3); - } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) { - was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN; - val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) : - _MASKED_BIT_DISABLE(FW_BLC_SELF_EN); - intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, val); - intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF); - } else if (IS_I915GM(dev_priv)) { - /* - * FIXME can't find a bit like this for 915G, and - * and yet it does have the related watermark in - * FW_BLC_SELF. What's going on? - */ - was_enabled = intel_uncore_read(&dev_priv->uncore, INSTPM) & INSTPM_SELF_EN; - val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) : - _MASKED_BIT_DISABLE(INSTPM_SELF_EN); - intel_uncore_write(&dev_priv->uncore, INSTPM, val); - intel_uncore_posting_read(&dev_priv->uncore, INSTPM); - } else { - return false; - } - - trace_intel_memory_cxsr(dev_priv, was_enabled, enable); - - drm_dbg_kms(&dev_priv->drm, "memory self-refresh is %s (was %s)\n", - str_enabled_disabled(enable), - str_enabled_disabled(was_enabled)); - - return was_enabled; -} - -/** - * intel_set_memory_cxsr - Configure CxSR state - * @dev_priv: i915 device - * @enable: Allow vs. disallow CxSR - * - * Allow or disallow the system to enter a special CxSR - * (C-state self refresh) state. What typically happens in CxSR mode - * is that several display FIFOs may get combined into a single larger - * FIFO for a particular plane (so called max FIFO mode) to allow the - * system to defer memory fetches longer, and the memory will enter - * self refresh. - * - * Note that enabling CxSR does not guarantee that the system enter - * this special mode, nor does it guarantee that the system stays - * in that mode once entered. So this just allows/disallows the system - * to autonomously utilize the CxSR mode. Other factors such as core - * C-states will affect when/if the system actually enters/exits the - * CxSR mode. - * - * Note that on VLV/CHV this actually only controls the max FIFO mode, - * and the system is free to enter/exit memory self refresh at any time - * even when the use of CxSR has been disallowed. - * - * While the system is actually in the CxSR/max FIFO mode, some plane - * control registers will not get latched on vblank. Thus in order to - * guarantee the system will respond to changes in the plane registers - * we must always disallow CxSR prior to making changes to those registers. - * Unfortunately the system will re-evaluate the CxSR conditions at - * frame start which happens after vblank start (which is when the plane - * registers would get latched), so we can't proceed with the plane update - * during the same frame where we disallowed CxSR. - * - * Certain platforms also have a deeper HPLL SR mode. Fortunately the - * HPLL SR mode depends on CxSR itself, so we don't have to hand hold - * the hardware w.r.t. HPLL SR when writing to plane registers. - * Disallowing just CxSR is sufficient. - */ -bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) -{ - bool ret; - - mutex_lock(&dev_priv->display.wm.wm_mutex); - ret = _intel_set_memory_cxsr(dev_priv, enable); - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - dev_priv->display.wm.vlv.cxsr = enable; - else if (IS_G4X(dev_priv)) - dev_priv->display.wm.g4x.cxsr = enable; - mutex_unlock(&dev_priv->display.wm.wm_mutex); - - return ret; -} - -/* - * Latency for FIFO fetches is dependent on several factors: - * - memory configuration (speed, channels) - * - chipset - * - current MCH state - * It can be fairly high in some situations, so here we assume a fairly - * pessimal value. It's a tradeoff between extra memory fetches (if we - * set this value too high, the FIFO will fetch frequently to stay full) - * and power consumption (set it too low to save power and we might see - * FIFO underruns and display "flicker"). - * - * A value of 5us seems to be a good balance; safe for very low end - * platforms but not overly aggressive on lower latency configs. - */ -static const int pessimal_latency_ns = 5000; - -#define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \ - ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8)) - -static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state; - enum pipe pipe = crtc->pipe; - int sprite0_start, sprite1_start; - u32 dsparb, dsparb2, dsparb3; - - switch (pipe) { - case PIPE_A: - dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB); - dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2); - sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0); - sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4); - break; - case PIPE_B: - dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB); - dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2); - sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8); - sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12); - break; - case PIPE_C: - dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2); - dsparb3 = intel_uncore_read(&dev_priv->uncore, DSPARB3); - sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16); - sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20); - break; - default: - MISSING_CASE(pipe); - return; - } - - fifo_state->plane[PLANE_PRIMARY] = sprite0_start; - fifo_state->plane[PLANE_SPRITE0] = sprite1_start - sprite0_start; - fifo_state->plane[PLANE_SPRITE1] = 511 - sprite1_start; - fifo_state->plane[PLANE_CURSOR] = 63; -} - -static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv, - enum i9xx_plane_id i9xx_plane) -{ - u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB); - int size; - - size = dsparb & 0x7f; - if (i9xx_plane == PLANE_B) - size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size; - - drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n", - dsparb, plane_name(i9xx_plane), size); - - return size; -} - -static int i830_get_fifo_size(struct drm_i915_private *dev_priv, - enum i9xx_plane_id i9xx_plane) -{ - u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB); - int size; - - size = dsparb & 0x1ff; - if (i9xx_plane == PLANE_B) - size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size; - size >>= 1; /* Convert to cachelines */ - - drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n", - dsparb, plane_name(i9xx_plane), size); - - return size; -} - -static int i845_get_fifo_size(struct drm_i915_private *dev_priv, - enum i9xx_plane_id i9xx_plane) -{ - u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB); - int size; - - size = dsparb & 0x7f; - size >>= 2; /* Convert to cachelines */ - - drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n", - dsparb, plane_name(i9xx_plane), size); - - return size; -} - -/* Pineview has different values for various configs */ -static const struct intel_watermark_params pnv_display_wm = { - .fifo_size = PINEVIEW_DISPLAY_FIFO, - .max_wm = PINEVIEW_MAX_WM, - .default_wm = PINEVIEW_DFT_WM, - .guard_size = PINEVIEW_GUARD_WM, - .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, -}; - -static const struct intel_watermark_params pnv_display_hplloff_wm = { - .fifo_size = PINEVIEW_DISPLAY_FIFO, - .max_wm = PINEVIEW_MAX_WM, - .default_wm = PINEVIEW_DFT_HPLLOFF_WM, - .guard_size = PINEVIEW_GUARD_WM, - .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, -}; - -static const struct intel_watermark_params pnv_cursor_wm = { - .fifo_size = PINEVIEW_CURSOR_FIFO, - .max_wm = PINEVIEW_CURSOR_MAX_WM, - .default_wm = PINEVIEW_CURSOR_DFT_WM, - .guard_size = PINEVIEW_CURSOR_GUARD_WM, - .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, -}; - -static const struct intel_watermark_params pnv_cursor_hplloff_wm = { - .fifo_size = PINEVIEW_CURSOR_FIFO, - .max_wm = PINEVIEW_CURSOR_MAX_WM, - .default_wm = PINEVIEW_CURSOR_DFT_WM, - .guard_size = PINEVIEW_CURSOR_GUARD_WM, - .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, -}; - -static const struct intel_watermark_params i965_cursor_wm_info = { - .fifo_size = I965_CURSOR_FIFO, - .max_wm = I965_CURSOR_MAX_WM, - .default_wm = I965_CURSOR_DFT_WM, - .guard_size = 2, - .cacheline_size = I915_FIFO_LINE_SIZE, -}; - -static const struct intel_watermark_params i945_wm_info = { - .fifo_size = I945_FIFO_SIZE, - .max_wm = I915_MAX_WM, - .default_wm = 1, - .guard_size = 2, - .cacheline_size = I915_FIFO_LINE_SIZE, -}; - -static const struct intel_watermark_params i915_wm_info = { - .fifo_size = I915_FIFO_SIZE, - .max_wm = I915_MAX_WM, - .default_wm = 1, - .guard_size = 2, - .cacheline_size = I915_FIFO_LINE_SIZE, -}; - -static const struct intel_watermark_params i830_a_wm_info = { - .fifo_size = I855GM_FIFO_SIZE, - .max_wm = I915_MAX_WM, - .default_wm = 1, - .guard_size = 2, - .cacheline_size = I830_FIFO_LINE_SIZE, -}; - -static const struct intel_watermark_params i830_bc_wm_info = { - .fifo_size = I855GM_FIFO_SIZE, - .max_wm = I915_MAX_WM/2, - .default_wm = 1, - .guard_size = 2, - .cacheline_size = I830_FIFO_LINE_SIZE, -}; - -static const struct intel_watermark_params i845_wm_info = { - .fifo_size = I830_FIFO_SIZE, - .max_wm = I915_MAX_WM, - .default_wm = 1, - .guard_size = 2, - .cacheline_size = I830_FIFO_LINE_SIZE, -}; - -/** - * intel_wm_method1 - Method 1 / "small buffer" watermark formula - * @pixel_rate: Pipe pixel rate in kHz - * @cpp: Plane bytes per pixel - * @latency: Memory wakeup latency in 0.1us units - * - * Compute the watermark using the method 1 or "small buffer" - * formula. The caller may additonally add extra cachelines - * to account for TLB misses and clock crossings. - * - * This method is concerned with the short term drain rate - * of the FIFO, ie. it does not account for blanking periods - * which would effectively reduce the average drain rate across - * a longer period. The name "small" refers to the fact the - * FIFO is relatively small compared to the amount of data - * fetched. - * - * The FIFO level vs. time graph might look something like: - * - * |\ |\ - * | \ | \ - * __---__---__ (- plane active, _ blanking) - * -> time - * - * or perhaps like this: - * - * |\|\ |\|\ - * __----__----__ (- plane active, _ blanking) - * -> time - * - * Returns: - * The watermark in bytes - */ -static unsigned int intel_wm_method1(unsigned int pixel_rate, - unsigned int cpp, - unsigned int latency) -{ - u64 ret; - - ret = mul_u32_u32(pixel_rate, cpp * latency); - ret = DIV_ROUND_UP_ULL(ret, 10000); - - return ret; -} - -/** - * intel_wm_method2 - Method 2 / "large buffer" watermark formula - * @pixel_rate: Pipe pixel rate in kHz - * @htotal: Pipe horizontal total - * @width: Plane width in pixels - * @cpp: Plane bytes per pixel - * @latency: Memory wakeup latency in 0.1us units - * - * Compute the watermark using the method 2 or "large buffer" - * formula. The caller may additonally add extra cachelines - * to account for TLB misses and clock crossings. - * - * This method is concerned with the long term drain rate - * of the FIFO, ie. it does account for blanking periods - * which effectively reduce the average drain rate across - * a longer period. The name "large" refers to the fact the - * FIFO is relatively large compared to the amount of data - * fetched. - * - * The FIFO level vs. time graph might look something like: - * - * |\___ |\___ - * | \___ | \___ - * | \ | \ - * __ --__--__--__--__--__--__ (- plane active, _ blanking) - * -> time - * - * Returns: - * The watermark in bytes - */ -static unsigned int intel_wm_method2(unsigned int pixel_rate, - unsigned int htotal, - unsigned int width, - unsigned int cpp, - unsigned int latency) -{ - unsigned int ret; - - /* - * FIXME remove once all users are computing - * watermarks in the correct place. - */ - if (WARN_ON_ONCE(htotal == 0)) - htotal = 1; - - ret = (latency * pixel_rate) / (htotal * 10000); - ret = (ret + 1) * width * cpp; - - return ret; -} - -/** - * intel_calculate_wm - calculate watermark level - * @pixel_rate: pixel clock - * @wm: chip FIFO params - * @fifo_size: size of the FIFO buffer - * @cpp: bytes per pixel - * @latency_ns: memory latency for the platform - * - * Calculate the watermark level (the level at which the display plane will - * start fetching from memory again). Each chip has a different display - * FIFO size and allocation, so the caller needs to figure that out and pass - * in the correct intel_watermark_params structure. - * - * As the pixel clock runs, the FIFO will be drained at a rate that depends - * on the pixel size. When it reaches the watermark level, it'll start - * fetching FIFO line sized based chunks from memory until the FIFO fills - * past the watermark point. If the FIFO drains completely, a FIFO underrun - * will occur, and a display engine hang could result. - */ -static unsigned int intel_calculate_wm(int pixel_rate, - const struct intel_watermark_params *wm, - int fifo_size, int cpp, - unsigned int latency_ns) -{ - int entries, wm_size; - - /* - * Note: we need to make sure we don't overflow for various clock & - * latency values. - * clocks go from a few thousand to several hundred thousand. - * latency is usually a few thousand - */ - entries = intel_wm_method1(pixel_rate, cpp, - latency_ns / 100); - entries = DIV_ROUND_UP(entries, wm->cacheline_size) + - wm->guard_size; - DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries); - - wm_size = fifo_size - entries; - DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size); - - /* Don't promote wm_size to unsigned... */ - if (wm_size > wm->max_wm) - wm_size = wm->max_wm; - if (wm_size <= 0) - wm_size = wm->default_wm; - - /* - * Bspec seems to indicate that the value shouldn't be lower than - * 'burst size + 1'. Certainly 830 is quite unhappy with low values. - * Lets go for 8 which is the burst size since certain platforms - * already use a hardcoded 8 (which is what the spec says should be - * done). - */ - if (wm_size <= 8) - wm_size = 8; - - return wm_size; -} - -static bool is_disabling(int old, int new, int threshold) -{ - return old >= threshold && new < threshold; -} - -static bool is_enabling(int old, int new, int threshold) -{ - return old < threshold && new >= threshold; -} - -static int intel_wm_num_levels(struct drm_i915_private *dev_priv) -{ - return dev_priv->display.wm.max_level + 1; -} - -bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) -{ - struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); - - /* FIXME check the 'enable' instead */ - if (!crtc_state->hw.active) - return false; - - /* - * Treat cursor with fb as always visible since cursor updates - * can happen faster than the vrefresh rate, and the current - * watermark code doesn't handle that correctly. Cursor updates - * which set/clear the fb or change the cursor size are going - * to get throttled by intel_legacy_cursor_update() to work - * around this problem with the watermark code. - */ - if (plane->id == PLANE_CURSOR) - return plane_state->hw.fb != NULL; - else - return plane_state->uapi.visible; -} - -static bool intel_crtc_active(struct intel_crtc *crtc) -{ - /* Be paranoid as we can arrive here with only partial - * state retrieved from the hardware during setup. - * - * We can ditch the adjusted_mode.crtc_clock check as soon - * as Haswell has gained clock readout/fastboot support. - * - * We can ditch the crtc->primary->state->fb check as soon as we can - * properly reconstruct framebuffers. - * - * FIXME: The intel_crtc->active here should be switched to - * crtc->state->active once we have proper CRTC states wired up - * for atomic. - */ - return crtc && crtc->active && crtc->base.primary->state->fb && - crtc->config->hw.adjusted_mode.crtc_clock; -} - -static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv) -{ - struct intel_crtc *crtc, *enabled = NULL; - - for_each_intel_crtc(&dev_priv->drm, crtc) { - if (intel_crtc_active(crtc)) { - if (enabled) - return NULL; - enabled = crtc; - } - } - - return enabled; -} - -static void pnv_update_wm(struct drm_i915_private *dev_priv) -{ - struct intel_crtc *crtc; - const struct cxsr_latency *latency; - u32 reg; - unsigned int wm; - - latency = intel_get_cxsr_latency(!IS_MOBILE(dev_priv), - dev_priv->is_ddr3, - dev_priv->fsb_freq, - dev_priv->mem_freq); - if (!latency) { - drm_dbg_kms(&dev_priv->drm, - "Unknown FSB/MEM found, disable CxSR\n"); - intel_set_memory_cxsr(dev_priv, false); - return; - } - - crtc = single_enabled_crtc(dev_priv); - if (crtc) { - const struct drm_framebuffer *fb = - crtc->base.primary->state->fb; - int pixel_rate = crtc->config->pixel_rate; - int cpp = fb->format->cpp[0]; - - /* Display SR */ - wm = intel_calculate_wm(pixel_rate, &pnv_display_wm, - pnv_display_wm.fifo_size, - cpp, latency->display_sr); - reg = intel_uncore_read(&dev_priv->uncore, DSPFW1); - reg &= ~DSPFW_SR_MASK; - reg |= FW_WM(wm, SR); - intel_uncore_write(&dev_priv->uncore, DSPFW1, reg); - drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg); - - /* cursor SR */ - wm = intel_calculate_wm(pixel_rate, &pnv_cursor_wm, - pnv_display_wm.fifo_size, - 4, latency->cursor_sr); - intel_uncore_rmw(&dev_priv->uncore, DSPFW3, DSPFW_CURSOR_SR_MASK, - FW_WM(wm, CURSOR_SR)); - - /* Display HPLL off SR */ - wm = intel_calculate_wm(pixel_rate, &pnv_display_hplloff_wm, - pnv_display_hplloff_wm.fifo_size, - cpp, latency->display_hpll_disable); - intel_uncore_rmw(&dev_priv->uncore, DSPFW3, DSPFW_HPLL_SR_MASK, FW_WM(wm, HPLL_SR)); - - /* cursor HPLL off SR */ - wm = intel_calculate_wm(pixel_rate, &pnv_cursor_hplloff_wm, - pnv_display_hplloff_wm.fifo_size, - 4, latency->cursor_hpll_disable); - reg = intel_uncore_read(&dev_priv->uncore, DSPFW3); - reg &= ~DSPFW_HPLL_CURSOR_MASK; - reg |= FW_WM(wm, HPLL_CURSOR); - intel_uncore_write(&dev_priv->uncore, DSPFW3, reg); - drm_dbg_kms(&dev_priv->drm, "DSPFW3 register is %x\n", reg); - - intel_set_memory_cxsr(dev_priv, true); - } else { - intel_set_memory_cxsr(dev_priv, false); - } -} - -/* - * Documentation says: - * "If the line size is small, the TLB fetches can get in the way of the - * data fetches, causing some lag in the pixel data return which is not - * accounted for in the above formulas. The following adjustment only - * needs to be applied if eight whole lines fit in the buffer at once. - * The WM is adjusted upwards by the difference between the FIFO size - * and the size of 8 whole lines. This adjustment is always performed - * in the actual pixel depth regardless of whether FBC is enabled or not." - */ -static unsigned int g4x_tlb_miss_wa(int fifo_size, int width, int cpp) -{ - int tlb_miss = fifo_size * 64 - width * cpp * 8; - - return max(0, tlb_miss); -} - -static void g4x_write_wm_values(struct drm_i915_private *dev_priv, - const struct g4x_wm_values *wm) -{ - enum pipe pipe; - - for_each_pipe(dev_priv, pipe) - trace_g4x_wm(intel_crtc_for_pipe(dev_priv, pipe), wm); - - intel_uncore_write(&dev_priv->uncore, DSPFW1, - FW_WM(wm->sr.plane, SR) | - FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) | - FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) | - FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA)); - intel_uncore_write(&dev_priv->uncore, DSPFW2, - (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) | - FW_WM(wm->sr.fbc, FBC_SR) | - FW_WM(wm->hpll.fbc, FBC_HPLL_SR) | - FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) | - FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) | - FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA)); - intel_uncore_write(&dev_priv->uncore, DSPFW3, - (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) | - FW_WM(wm->sr.cursor, CURSOR_SR) | - FW_WM(wm->hpll.cursor, HPLL_CURSOR) | - FW_WM(wm->hpll.plane, HPLL_SR)); - - intel_uncore_posting_read(&dev_priv->uncore, DSPFW1); -} - -#define FW_WM_VLV(value, plane) \ - (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV) - -static void vlv_write_wm_values(struct drm_i915_private *dev_priv, - const struct vlv_wm_values *wm) -{ - enum pipe pipe; - - for_each_pipe(dev_priv, pipe) { - trace_vlv_wm(intel_crtc_for_pipe(dev_priv, pipe), wm); - - intel_uncore_write(&dev_priv->uncore, VLV_DDL(pipe), - (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) | - (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) | - (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) | - (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT)); - } - - /* - * Zero the (unused) WM1 watermarks, and also clear all the - * high order bits so that there are no out of bounds values - * present in the registers during the reprogramming. - */ - intel_uncore_write(&dev_priv->uncore, DSPHOWM, 0); - intel_uncore_write(&dev_priv->uncore, DSPHOWM1, 0); - intel_uncore_write(&dev_priv->uncore, DSPFW4, 0); - intel_uncore_write(&dev_priv->uncore, DSPFW5, 0); - intel_uncore_write(&dev_priv->uncore, DSPFW6, 0); - - intel_uncore_write(&dev_priv->uncore, DSPFW1, - FW_WM(wm->sr.plane, SR) | - FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) | - FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) | - FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA)); - intel_uncore_write(&dev_priv->uncore, DSPFW2, - FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) | - FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) | - FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA)); - intel_uncore_write(&dev_priv->uncore, DSPFW3, - FW_WM(wm->sr.cursor, CURSOR_SR)); - - if (IS_CHERRYVIEW(dev_priv)) { - intel_uncore_write(&dev_priv->uncore, DSPFW7_CHV, - FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) | - FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC)); - intel_uncore_write(&dev_priv->uncore, DSPFW8_CHV, - FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) | - FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE)); - intel_uncore_write(&dev_priv->uncore, DSPFW9_CHV, - FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) | - FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC)); - intel_uncore_write(&dev_priv->uncore, DSPHOWM, - FW_WM(wm->sr.plane >> 9, SR_HI) | - FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) | - FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) | - FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) | - FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) | - FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) | - FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) | - FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) | - FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) | - FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI)); - } else { - intel_uncore_write(&dev_priv->uncore, DSPFW7, - FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) | - FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC)); - intel_uncore_write(&dev_priv->uncore, DSPHOWM, - FW_WM(wm->sr.plane >> 9, SR_HI) | - FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) | - FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) | - FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) | - FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) | - FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) | - FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI)); - } - - intel_uncore_posting_read(&dev_priv->uncore, DSPFW1); -} - -#undef FW_WM_VLV - -static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv) -{ - /* all latencies in usec */ - dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5; - dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_SR] = 12; - dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35; - - dev_priv->display.wm.max_level = G4X_WM_LEVEL_HPLL; -} - -static int g4x_plane_fifo_size(enum plane_id plane_id, int level) -{ - /* - * DSPCNTR[13] supposedly controls whether the - * primary plane can use the FIFO space otherwise - * reserved for the sprite plane. It's not 100% clear - * what the actual FIFO size is, but it looks like we - * can happily set both primary and sprite watermarks - * up to 127 cachelines. So that would seem to mean - * that either DSPCNTR[13] doesn't do anything, or that - * the total FIFO is >= 256 cachelines in size. Either - * way, we don't seem to have to worry about this - * repartitioning as the maximum watermark value the - * register can hold for each plane is lower than the - * minimum FIFO size. - */ - switch (plane_id) { - case PLANE_CURSOR: - return 63; - case PLANE_PRIMARY: - return level == G4X_WM_LEVEL_NORMAL ? 127 : 511; - case PLANE_SPRITE0: - return level == G4X_WM_LEVEL_NORMAL ? 127 : 0; - default: - MISSING_CASE(plane_id); - return 0; - } -} - -static int g4x_fbc_fifo_size(int level) -{ - switch (level) { - case G4X_WM_LEVEL_SR: - return 7; - case G4X_WM_LEVEL_HPLL: - return 15; - default: - MISSING_CASE(level); - return 0; - } -} - -static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state, - int level) -{ - struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - const struct drm_display_mode *pipe_mode = - &crtc_state->hw.pipe_mode; - unsigned int latency = dev_priv->display.wm.pri_latency[level] * 10; - unsigned int pixel_rate, htotal, cpp, width, wm; - - if (latency == 0) - return USHRT_MAX; - - if (!intel_wm_plane_visible(crtc_state, plane_state)) - return 0; - - cpp = plane_state->hw.fb->format->cpp[0]; - - /* - * WaUse32BppForSRWM:ctg,elk - * - * The spec fails to list this restriction for the - * HPLL watermark, which seems a little strange. - * Let's use 32bpp for the HPLL watermark as well. - */ - if (plane->id == PLANE_PRIMARY && - level != G4X_WM_LEVEL_NORMAL) - cpp = max(cpp, 4u); - - pixel_rate = crtc_state->pixel_rate; - htotal = pipe_mode->crtc_htotal; - width = drm_rect_width(&plane_state->uapi.src) >> 16; - - if (plane->id == PLANE_CURSOR) { - wm = intel_wm_method2(pixel_rate, htotal, width, cpp, latency); - } else if (plane->id == PLANE_PRIMARY && - level == G4X_WM_LEVEL_NORMAL) { - wm = intel_wm_method1(pixel_rate, cpp, latency); - } else { - unsigned int small, large; - - small = intel_wm_method1(pixel_rate, cpp, latency); - large = intel_wm_method2(pixel_rate, htotal, width, cpp, latency); - - wm = min(small, large); - } - - wm += g4x_tlb_miss_wa(g4x_plane_fifo_size(plane->id, level), - width, cpp); - - wm = DIV_ROUND_UP(wm, 64) + 2; - - return min_t(unsigned int, wm, USHRT_MAX); -} - -static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state, - int level, enum plane_id plane_id, u16 value) -{ - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - bool dirty = false; - - for (; level < intel_wm_num_levels(dev_priv); level++) { - struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level]; - - dirty |= raw->plane[plane_id] != value; - raw->plane[plane_id] = value; - } - - return dirty; -} - -static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state, - int level, u16 value) -{ - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - bool dirty = false; - - /* NORMAL level doesn't have an FBC watermark */ - level = max(level, G4X_WM_LEVEL_SR); - - for (; level < intel_wm_num_levels(dev_priv); level++) { - struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level]; - - dirty |= raw->fbc != value; - raw->fbc = value; - } - - return dirty; -} - -static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state, - u32 pri_val); - -static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) -{ - struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - int num_levels = intel_wm_num_levels(to_i915(plane->base.dev)); - enum plane_id plane_id = plane->id; - bool dirty = false; - int level; - - if (!intel_wm_plane_visible(crtc_state, plane_state)) { - dirty |= g4x_raw_plane_wm_set(crtc_state, 0, plane_id, 0); - if (plane_id == PLANE_PRIMARY) - dirty |= g4x_raw_fbc_wm_set(crtc_state, 0, 0); - goto out; - } - - for (level = 0; level < num_levels; level++) { - struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level]; - int wm, max_wm; - - wm = g4x_compute_wm(crtc_state, plane_state, level); - max_wm = g4x_plane_fifo_size(plane_id, level); - - if (wm > max_wm) - break; - - dirty |= raw->plane[plane_id] != wm; - raw->plane[plane_id] = wm; - - if (plane_id != PLANE_PRIMARY || - level == G4X_WM_LEVEL_NORMAL) - continue; - - wm = ilk_compute_fbc_wm(crtc_state, plane_state, - raw->plane[plane_id]); - max_wm = g4x_fbc_fifo_size(level); - - /* - * FBC wm is not mandatory as we - * can always just disable its use. - */ - if (wm > max_wm) - wm = USHRT_MAX; - - dirty |= raw->fbc != wm; - raw->fbc = wm; - } - - /* mark watermarks as invalid */ - dirty |= g4x_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX); - - if (plane_id == PLANE_PRIMARY) - dirty |= g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX); - - out: - if (dirty) { - drm_dbg_kms(&dev_priv->drm, - "%s watermarks: normal=%d, SR=%d, HPLL=%d\n", - plane->base.name, - crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id], - crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].plane[plane_id], - crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]); - - if (plane_id == PLANE_PRIMARY) - drm_dbg_kms(&dev_priv->drm, - "FBC watermarks: SR=%d, HPLL=%d\n", - crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc, - crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc); - } - - return dirty; -} - -static bool g4x_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state, - enum plane_id plane_id, int level) -{ - const struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level]; - - return raw->plane[plane_id] <= g4x_plane_fifo_size(plane_id, level); -} - -static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, - int level) -{ - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - - if (level > dev_priv->display.wm.max_level) - return false; - - return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) && - g4x_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) && - g4x_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level); -} - -/* mark all levels starting from 'level' as invalid */ -static void g4x_invalidate_wms(struct intel_crtc *crtc, - struct g4x_wm_state *wm_state, int level) -{ - if (level <= G4X_WM_LEVEL_NORMAL) { - enum plane_id plane_id; - - for_each_plane_id_on_crtc(crtc, plane_id) - wm_state->wm.plane[plane_id] = USHRT_MAX; - } - - if (level <= G4X_WM_LEVEL_SR) { - wm_state->cxsr = false; - wm_state->sr.cursor = USHRT_MAX; - wm_state->sr.plane = USHRT_MAX; - wm_state->sr.fbc = USHRT_MAX; - } - - if (level <= G4X_WM_LEVEL_HPLL) { - wm_state->hpll_en = false; - wm_state->hpll.cursor = USHRT_MAX; - wm_state->hpll.plane = USHRT_MAX; - wm_state->hpll.fbc = USHRT_MAX; - } -} - -static bool g4x_compute_fbc_en(const struct g4x_wm_state *wm_state, - int level) -{ - if (level < G4X_WM_LEVEL_SR) - return false; - - if (level >= G4X_WM_LEVEL_SR && - wm_state->sr.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_SR)) - return false; - - if (level >= G4X_WM_LEVEL_HPLL && - wm_state->hpll.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_HPLL)) - return false; - - return true; -} - -static int _g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal; - u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR); - const struct g4x_pipe_wm *raw; - enum plane_id plane_id; - int level; - - level = G4X_WM_LEVEL_NORMAL; - if (!g4x_raw_crtc_wm_is_valid(crtc_state, level)) - goto out; - - raw = &crtc_state->wm.g4x.raw[level]; - for_each_plane_id_on_crtc(crtc, plane_id) - wm_state->wm.plane[plane_id] = raw->plane[plane_id]; - - level = G4X_WM_LEVEL_SR; - if (!g4x_raw_crtc_wm_is_valid(crtc_state, level)) - goto out; - - raw = &crtc_state->wm.g4x.raw[level]; - wm_state->sr.plane = raw->plane[PLANE_PRIMARY]; - wm_state->sr.cursor = raw->plane[PLANE_CURSOR]; - wm_state->sr.fbc = raw->fbc; - - wm_state->cxsr = active_planes == BIT(PLANE_PRIMARY); - - level = G4X_WM_LEVEL_HPLL; - if (!g4x_raw_crtc_wm_is_valid(crtc_state, level)) - goto out; - - raw = &crtc_state->wm.g4x.raw[level]; - wm_state->hpll.plane = raw->plane[PLANE_PRIMARY]; - wm_state->hpll.cursor = raw->plane[PLANE_CURSOR]; - wm_state->hpll.fbc = raw->fbc; - - wm_state->hpll_en = wm_state->cxsr; - - level++; - - out: - if (level == G4X_WM_LEVEL_NORMAL) - return -EINVAL; - - /* invalidate the higher levels */ - g4x_invalidate_wms(crtc, wm_state, level); - - /* - * Determine if the FBC watermark(s) can be used. IF - * this isn't the case we prefer to disable the FBC - * watermark(s) rather than disable the SR/HPLL - * level(s) entirely. 'level-1' is the highest valid - * level here. - */ - wm_state->fbc_en = g4x_compute_fbc_en(wm_state, level - 1); - - return 0; -} - -static int g4x_compute_pipe_wm(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct intel_crtc_state *crtc_state = - intel_atomic_get_new_crtc_state(state, crtc); - const struct intel_plane_state *old_plane_state; - const struct intel_plane_state *new_plane_state; - struct intel_plane *plane; - unsigned int dirty = 0; - int i; - - for_each_oldnew_intel_plane_in_state(state, plane, - old_plane_state, - new_plane_state, i) { - if (new_plane_state->hw.crtc != &crtc->base && - old_plane_state->hw.crtc != &crtc->base) - continue; - - if (g4x_raw_plane_wm_compute(crtc_state, new_plane_state)) - dirty |= BIT(plane->id); - } - - if (!dirty) - return 0; - - return _g4x_compute_pipe_wm(crtc_state); -} - -static int g4x_compute_intermediate_wm(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_crtc_state *new_crtc_state = - intel_atomic_get_new_crtc_state(state, crtc); - const struct intel_crtc_state *old_crtc_state = - intel_atomic_get_old_crtc_state(state, crtc); - struct g4x_wm_state *intermediate = &new_crtc_state->wm.g4x.intermediate; - const struct g4x_wm_state *optimal = &new_crtc_state->wm.g4x.optimal; - const struct g4x_wm_state *active = &old_crtc_state->wm.g4x.optimal; - enum plane_id plane_id; - - if (!new_crtc_state->hw.active || - intel_crtc_needs_modeset(new_crtc_state)) { - *intermediate = *optimal; - - intermediate->cxsr = false; - intermediate->hpll_en = false; - goto out; - } - - intermediate->cxsr = optimal->cxsr && active->cxsr && - !new_crtc_state->disable_cxsr; - intermediate->hpll_en = optimal->hpll_en && active->hpll_en && - !new_crtc_state->disable_cxsr; - intermediate->fbc_en = optimal->fbc_en && active->fbc_en; - - for_each_plane_id_on_crtc(crtc, plane_id) { - intermediate->wm.plane[plane_id] = - max(optimal->wm.plane[plane_id], - active->wm.plane[plane_id]); - - drm_WARN_ON(&dev_priv->drm, intermediate->wm.plane[plane_id] > - g4x_plane_fifo_size(plane_id, G4X_WM_LEVEL_NORMAL)); - } - - intermediate->sr.plane = max(optimal->sr.plane, - active->sr.plane); - intermediate->sr.cursor = max(optimal->sr.cursor, - active->sr.cursor); - intermediate->sr.fbc = max(optimal->sr.fbc, - active->sr.fbc); - - intermediate->hpll.plane = max(optimal->hpll.plane, - active->hpll.plane); - intermediate->hpll.cursor = max(optimal->hpll.cursor, - active->hpll.cursor); - intermediate->hpll.fbc = max(optimal->hpll.fbc, - active->hpll.fbc); - - drm_WARN_ON(&dev_priv->drm, - (intermediate->sr.plane > - g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_SR) || - intermediate->sr.cursor > - g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) && - intermediate->cxsr); - drm_WARN_ON(&dev_priv->drm, - (intermediate->sr.plane > - g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) || - intermediate->sr.cursor > - g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) && - intermediate->hpll_en); - - drm_WARN_ON(&dev_priv->drm, - intermediate->sr.fbc > g4x_fbc_fifo_size(1) && - intermediate->fbc_en && intermediate->cxsr); - drm_WARN_ON(&dev_priv->drm, - intermediate->hpll.fbc > g4x_fbc_fifo_size(2) && - intermediate->fbc_en && intermediate->hpll_en); - -out: - /* - * If our intermediate WM are identical to the final WM, then we can - * omit the post-vblank programming; only update if it's different. - */ - if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0) - new_crtc_state->wm.need_postvbl_update = true; - - return 0; -} - -static void g4x_merge_wm(struct drm_i915_private *dev_priv, - struct g4x_wm_values *wm) -{ - struct intel_crtc *crtc; - int num_active_pipes = 0; - - wm->cxsr = true; - wm->hpll_en = true; - wm->fbc_en = true; - - for_each_intel_crtc(&dev_priv->drm, crtc) { - const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x; - - if (!crtc->active) - continue; - - if (!wm_state->cxsr) - wm->cxsr = false; - if (!wm_state->hpll_en) - wm->hpll_en = false; - if (!wm_state->fbc_en) - wm->fbc_en = false; - - num_active_pipes++; - } - - if (num_active_pipes != 1) { - wm->cxsr = false; - wm->hpll_en = false; - wm->fbc_en = false; - } - - for_each_intel_crtc(&dev_priv->drm, crtc) { - const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x; - enum pipe pipe = crtc->pipe; - - wm->pipe[pipe] = wm_state->wm; - if (crtc->active && wm->cxsr) - wm->sr = wm_state->sr; - if (crtc->active && wm->hpll_en) - wm->hpll = wm_state->hpll; - } -} - -static void g4x_program_watermarks(struct drm_i915_private *dev_priv) -{ - struct g4x_wm_values *old_wm = &dev_priv->display.wm.g4x; - struct g4x_wm_values new_wm = {}; - - g4x_merge_wm(dev_priv, &new_wm); - - if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0) - return; - - if (is_disabling(old_wm->cxsr, new_wm.cxsr, true)) - _intel_set_memory_cxsr(dev_priv, false); - - g4x_write_wm_values(dev_priv, &new_wm); - - if (is_enabling(old_wm->cxsr, new_wm.cxsr, true)) - _intel_set_memory_cxsr(dev_priv, true); - - *old_wm = new_wm; -} - -static void g4x_initial_watermarks(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - const struct intel_crtc_state *crtc_state = - intel_atomic_get_new_crtc_state(state, crtc); - - mutex_lock(&dev_priv->display.wm.wm_mutex); - crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate; - g4x_program_watermarks(dev_priv); - mutex_unlock(&dev_priv->display.wm.wm_mutex); -} - -static void g4x_optimize_watermarks(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - const struct intel_crtc_state *crtc_state = - intel_atomic_get_new_crtc_state(state, crtc); - - if (!crtc_state->wm.need_postvbl_update) - return; - - mutex_lock(&dev_priv->display.wm.wm_mutex); - crtc->wm.active.g4x = crtc_state->wm.g4x.optimal; - g4x_program_watermarks(dev_priv); - mutex_unlock(&dev_priv->display.wm.wm_mutex); -} - -/* latency must be in 0.1us units. */ -static unsigned int vlv_wm_method2(unsigned int pixel_rate, - unsigned int htotal, - unsigned int width, - unsigned int cpp, - unsigned int latency) -{ - unsigned int ret; - - ret = intel_wm_method2(pixel_rate, htotal, - width, cpp, latency); - ret = DIV_ROUND_UP(ret, 64); - - return ret; -} - -static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv) -{ - /* all latencies in usec */ - dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM2] = 3; - - dev_priv->display.wm.max_level = VLV_WM_LEVEL_PM2; - - if (IS_CHERRYVIEW(dev_priv)) { - dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM5] = 12; - dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33; - - dev_priv->display.wm.max_level = VLV_WM_LEVEL_DDR_DVFS; - } -} - -static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state, - int level) -{ - struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - const struct drm_display_mode *pipe_mode = - &crtc_state->hw.pipe_mode; - unsigned int pixel_rate, htotal, cpp, width, wm; - - if (dev_priv->display.wm.pri_latency[level] == 0) - return USHRT_MAX; - - if (!intel_wm_plane_visible(crtc_state, plane_state)) - return 0; - - cpp = plane_state->hw.fb->format->cpp[0]; - pixel_rate = crtc_state->pixel_rate; - htotal = pipe_mode->crtc_htotal; - width = drm_rect_width(&plane_state->uapi.src) >> 16; - - if (plane->id == PLANE_CURSOR) { - /* - * FIXME the formula gives values that are - * too big for the cursor FIFO, and hence we - * would never be able to use cursors. For - * now just hardcode the watermark. - */ - wm = 63; - } else { - wm = vlv_wm_method2(pixel_rate, htotal, width, cpp, - dev_priv->display.wm.pri_latency[level] * 10); - } - - return min_t(unsigned int, wm, USHRT_MAX); -} - -static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes) -{ - return (active_planes & (BIT(PLANE_SPRITE0) | - BIT(PLANE_SPRITE1))) == BIT(PLANE_SPRITE1); -} - -static int vlv_compute_fifo(struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - const struct g4x_pipe_wm *raw = - &crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2]; - struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state; - u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR); - int num_active_planes = hweight8(active_planes); - const int fifo_size = 511; - int fifo_extra, fifo_left = fifo_size; - int sprite0_fifo_extra = 0; - unsigned int total_rate; - enum plane_id plane_id; - - /* - * When enabling sprite0 after sprite1 has already been enabled - * we tend to get an underrun unless sprite0 already has some - * FIFO space allcoated. Hence we always allocate at least one - * cacheline for sprite0 whenever sprite1 is enabled. - * - * All other plane enable sequences appear immune to this problem. - */ - if (vlv_need_sprite0_fifo_workaround(active_planes)) - sprite0_fifo_extra = 1; - - total_rate = raw->plane[PLANE_PRIMARY] + - raw->plane[PLANE_SPRITE0] + - raw->plane[PLANE_SPRITE1] + - sprite0_fifo_extra; - - if (total_rate > fifo_size) - return -EINVAL; - - if (total_rate == 0) - total_rate = 1; - - for_each_plane_id_on_crtc(crtc, plane_id) { - unsigned int rate; - - if ((active_planes & BIT(plane_id)) == 0) { - fifo_state->plane[plane_id] = 0; - continue; - } - - rate = raw->plane[plane_id]; - fifo_state->plane[plane_id] = fifo_size * rate / total_rate; - fifo_left -= fifo_state->plane[plane_id]; - } - - fifo_state->plane[PLANE_SPRITE0] += sprite0_fifo_extra; - fifo_left -= sprite0_fifo_extra; - - fifo_state->plane[PLANE_CURSOR] = 63; - - fifo_extra = DIV_ROUND_UP(fifo_left, num_active_planes ?: 1); - - /* spread the remainder evenly */ - for_each_plane_id_on_crtc(crtc, plane_id) { - int plane_extra; - - if (fifo_left == 0) - break; - - if ((active_planes & BIT(plane_id)) == 0) - continue; - - plane_extra = min(fifo_extra, fifo_left); - fifo_state->plane[plane_id] += plane_extra; - fifo_left -= plane_extra; - } - - drm_WARN_ON(&dev_priv->drm, active_planes != 0 && fifo_left != 0); - - /* give it all to the first plane if none are active */ - if (active_planes == 0) { - drm_WARN_ON(&dev_priv->drm, fifo_left != fifo_size); - fifo_state->plane[PLANE_PRIMARY] = fifo_left; - } - - return 0; -} - -/* mark all levels starting from 'level' as invalid */ -static void vlv_invalidate_wms(struct intel_crtc *crtc, - struct vlv_wm_state *wm_state, int level) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - - for (; level < intel_wm_num_levels(dev_priv); level++) { - enum plane_id plane_id; - - for_each_plane_id_on_crtc(crtc, plane_id) - wm_state->wm[level].plane[plane_id] = USHRT_MAX; - - wm_state->sr[level].cursor = USHRT_MAX; - wm_state->sr[level].plane = USHRT_MAX; - } -} - -static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size) -{ - if (wm > fifo_size) - return USHRT_MAX; - else - return fifo_size - wm; -} - -/* - * Starting from 'level' set all higher - * levels to 'value' in the "raw" watermarks. - */ -static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state, - int level, enum plane_id plane_id, u16 value) -{ - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - int num_levels = intel_wm_num_levels(dev_priv); - bool dirty = false; - - for (; level < num_levels; level++) { - struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level]; - - dirty |= raw->plane[plane_id] != value; - raw->plane[plane_id] = value; - } - - return dirty; -} - -static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) -{ - struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - enum plane_id plane_id = plane->id; - int num_levels = intel_wm_num_levels(to_i915(plane->base.dev)); - int level; - bool dirty = false; - - if (!intel_wm_plane_visible(crtc_state, plane_state)) { - dirty |= vlv_raw_plane_wm_set(crtc_state, 0, plane_id, 0); - goto out; - } - - for (level = 0; level < num_levels; level++) { - struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level]; - int wm = vlv_compute_wm_level(crtc_state, plane_state, level); - int max_wm = plane_id == PLANE_CURSOR ? 63 : 511; - - if (wm > max_wm) - break; - - dirty |= raw->plane[plane_id] != wm; - raw->plane[plane_id] = wm; - } - - /* mark all higher levels as invalid */ - dirty |= vlv_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX); - -out: - if (dirty) - drm_dbg_kms(&dev_priv->drm, - "%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n", - plane->base.name, - crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id], - crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5].plane[plane_id], - crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS].plane[plane_id]); - - return dirty; -} - -static bool vlv_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state, - enum plane_id plane_id, int level) -{ - const struct g4x_pipe_wm *raw = - &crtc_state->wm.vlv.raw[level]; - const struct vlv_fifo_state *fifo_state = - &crtc_state->wm.vlv.fifo_state; - - return raw->plane[plane_id] <= fifo_state->plane[plane_id]; -} - -static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, int level) -{ - return vlv_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) && - vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) && - vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE1, level) && - vlv_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level); -} - -static int _vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal; - const struct vlv_fifo_state *fifo_state = - &crtc_state->wm.vlv.fifo_state; - u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR); - int num_active_planes = hweight8(active_planes); - enum plane_id plane_id; - int level; - - /* initially allow all levels */ - wm_state->num_levels = intel_wm_num_levels(dev_priv); - /* - * Note that enabling cxsr with no primary/sprite planes - * enabled can wedge the pipe. Hence we only allow cxsr - * with exactly one enabled primary/sprite plane. - */ - wm_state->cxsr = crtc->pipe != PIPE_C && num_active_planes == 1; - - for (level = 0; level < wm_state->num_levels; level++) { - const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level]; - const int sr_fifo_size = INTEL_NUM_PIPES(dev_priv) * 512 - 1; - - if (!vlv_raw_crtc_wm_is_valid(crtc_state, level)) - break; - - for_each_plane_id_on_crtc(crtc, plane_id) { - wm_state->wm[level].plane[plane_id] = - vlv_invert_wm_value(raw->plane[plane_id], - fifo_state->plane[plane_id]); - } - - wm_state->sr[level].plane = - vlv_invert_wm_value(max3(raw->plane[PLANE_PRIMARY], - raw->plane[PLANE_SPRITE0], - raw->plane[PLANE_SPRITE1]), - sr_fifo_size); - - wm_state->sr[level].cursor = - vlv_invert_wm_value(raw->plane[PLANE_CURSOR], - 63); - } - - if (level == 0) - return -EINVAL; - - /* limit to only levels we can actually handle */ - wm_state->num_levels = level; - - /* invalidate the higher levels */ - vlv_invalidate_wms(crtc, wm_state, level); - - return 0; -} - -static int vlv_compute_pipe_wm(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct intel_crtc_state *crtc_state = - intel_atomic_get_new_crtc_state(state, crtc); - const struct intel_plane_state *old_plane_state; - const struct intel_plane_state *new_plane_state; - struct intel_plane *plane; - unsigned int dirty = 0; - int i; - - for_each_oldnew_intel_plane_in_state(state, plane, - old_plane_state, - new_plane_state, i) { - if (new_plane_state->hw.crtc != &crtc->base && - old_plane_state->hw.crtc != &crtc->base) - continue; - - if (vlv_raw_plane_wm_compute(crtc_state, new_plane_state)) - dirty |= BIT(plane->id); - } - - /* - * DSPARB registers may have been reset due to the - * power well being turned off. Make sure we restore - * them to a consistent state even if no primary/sprite - * planes are initially active. We also force a FIFO - * recomputation so that we are sure to sanitize the - * FIFO setting we took over from the BIOS even if there - * are no active planes on the crtc. - */ - if (intel_crtc_needs_modeset(crtc_state)) - dirty = ~0; - - if (!dirty) - return 0; - - /* cursor changes don't warrant a FIFO recompute */ - if (dirty & ~BIT(PLANE_CURSOR)) { - const struct intel_crtc_state *old_crtc_state = - intel_atomic_get_old_crtc_state(state, crtc); - const struct vlv_fifo_state *old_fifo_state = - &old_crtc_state->wm.vlv.fifo_state; - const struct vlv_fifo_state *new_fifo_state = - &crtc_state->wm.vlv.fifo_state; - int ret; - - ret = vlv_compute_fifo(crtc_state); - if (ret) - return ret; - - if (intel_crtc_needs_modeset(crtc_state) || - memcmp(old_fifo_state, new_fifo_state, - sizeof(*new_fifo_state)) != 0) - crtc_state->fifo_changed = true; - } - - return _vlv_compute_pipe_wm(crtc_state); -} - -#define VLV_FIFO(plane, value) \ - (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV) - -static void vlv_atomic_update_fifo(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_uncore *uncore = &dev_priv->uncore; - const struct intel_crtc_state *crtc_state = - intel_atomic_get_new_crtc_state(state, crtc); - const struct vlv_fifo_state *fifo_state = - &crtc_state->wm.vlv.fifo_state; - int sprite0_start, sprite1_start, fifo_size; - u32 dsparb, dsparb2, dsparb3; - - if (!crtc_state->fifo_changed) - return; - - sprite0_start = fifo_state->plane[PLANE_PRIMARY]; - sprite1_start = fifo_state->plane[PLANE_SPRITE0] + sprite0_start; - fifo_size = fifo_state->plane[PLANE_SPRITE1] + sprite1_start; - - drm_WARN_ON(&dev_priv->drm, fifo_state->plane[PLANE_CURSOR] != 63); - drm_WARN_ON(&dev_priv->drm, fifo_size != 511); - - trace_vlv_fifo_size(crtc, sprite0_start, sprite1_start, fifo_size); - - /* - * uncore.lock serves a double purpose here. It allows us to - * use the less expensive I915_{READ,WRITE}_FW() functions, and - * it protects the DSPARB registers from getting clobbered by - * parallel updates from multiple pipes. - * - * intel_pipe_update_start() has already disabled interrupts - * for us, so a plain spin_lock() is sufficient here. - */ - spin_lock(&uncore->lock); - - switch (crtc->pipe) { - case PIPE_A: - dsparb = intel_uncore_read_fw(uncore, DSPARB); - dsparb2 = intel_uncore_read_fw(uncore, DSPARB2); - - dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) | - VLV_FIFO(SPRITEB, 0xff)); - dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) | - VLV_FIFO(SPRITEB, sprite1_start)); - - dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) | - VLV_FIFO(SPRITEB_HI, 0x1)); - dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) | - VLV_FIFO(SPRITEB_HI, sprite1_start >> 8)); - - intel_uncore_write_fw(uncore, DSPARB, dsparb); - intel_uncore_write_fw(uncore, DSPARB2, dsparb2); - break; - case PIPE_B: - dsparb = intel_uncore_read_fw(uncore, DSPARB); - dsparb2 = intel_uncore_read_fw(uncore, DSPARB2); - - dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) | - VLV_FIFO(SPRITED, 0xff)); - dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) | - VLV_FIFO(SPRITED, sprite1_start)); - - dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) | - VLV_FIFO(SPRITED_HI, 0xff)); - dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) | - VLV_FIFO(SPRITED_HI, sprite1_start >> 8)); - - intel_uncore_write_fw(uncore, DSPARB, dsparb); - intel_uncore_write_fw(uncore, DSPARB2, dsparb2); - break; - case PIPE_C: - dsparb3 = intel_uncore_read_fw(uncore, DSPARB3); - dsparb2 = intel_uncore_read_fw(uncore, DSPARB2); - - dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) | - VLV_FIFO(SPRITEF, 0xff)); - dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) | - VLV_FIFO(SPRITEF, sprite1_start)); - - dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) | - VLV_FIFO(SPRITEF_HI, 0xff)); - dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) | - VLV_FIFO(SPRITEF_HI, sprite1_start >> 8)); - - intel_uncore_write_fw(uncore, DSPARB3, dsparb3); - intel_uncore_write_fw(uncore, DSPARB2, dsparb2); - break; - default: - break; - } - - intel_uncore_posting_read_fw(uncore, DSPARB); - - spin_unlock(&uncore->lock); -} - -#undef VLV_FIFO - -static int vlv_compute_intermediate_wm(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct intel_crtc_state *new_crtc_state = - intel_atomic_get_new_crtc_state(state, crtc); - const struct intel_crtc_state *old_crtc_state = - intel_atomic_get_old_crtc_state(state, crtc); - struct vlv_wm_state *intermediate = &new_crtc_state->wm.vlv.intermediate; - const struct vlv_wm_state *optimal = &new_crtc_state->wm.vlv.optimal; - const struct vlv_wm_state *active = &old_crtc_state->wm.vlv.optimal; - int level; - - if (!new_crtc_state->hw.active || - intel_crtc_needs_modeset(new_crtc_state)) { - *intermediate = *optimal; - - intermediate->cxsr = false; - goto out; - } - - intermediate->num_levels = min(optimal->num_levels, active->num_levels); - intermediate->cxsr = optimal->cxsr && active->cxsr && - !new_crtc_state->disable_cxsr; - - for (level = 0; level < intermediate->num_levels; level++) { - enum plane_id plane_id; - - for_each_plane_id_on_crtc(crtc, plane_id) { - intermediate->wm[level].plane[plane_id] = - min(optimal->wm[level].plane[plane_id], - active->wm[level].plane[plane_id]); - } - - intermediate->sr[level].plane = min(optimal->sr[level].plane, - active->sr[level].plane); - intermediate->sr[level].cursor = min(optimal->sr[level].cursor, - active->sr[level].cursor); - } - - vlv_invalidate_wms(crtc, intermediate, level); - -out: - /* - * If our intermediate WM are identical to the final WM, then we can - * omit the post-vblank programming; only update if it's different. - */ - if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0) - new_crtc_state->wm.need_postvbl_update = true; - - return 0; -} - -static void vlv_merge_wm(struct drm_i915_private *dev_priv, - struct vlv_wm_values *wm) -{ - struct intel_crtc *crtc; - int num_active_pipes = 0; - - wm->level = dev_priv->display.wm.max_level; - wm->cxsr = true; - - for_each_intel_crtc(&dev_priv->drm, crtc) { - const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv; - - if (!crtc->active) - continue; - - if (!wm_state->cxsr) - wm->cxsr = false; - - num_active_pipes++; - wm->level = min_t(int, wm->level, wm_state->num_levels - 1); - } - - if (num_active_pipes != 1) - wm->cxsr = false; - - if (num_active_pipes > 1) - wm->level = VLV_WM_LEVEL_PM2; - - for_each_intel_crtc(&dev_priv->drm, crtc) { - const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv; - enum pipe pipe = crtc->pipe; - - wm->pipe[pipe] = wm_state->wm[wm->level]; - if (crtc->active && wm->cxsr) - wm->sr = wm_state->sr[wm->level]; - - wm->ddl[pipe].plane[PLANE_PRIMARY] = DDL_PRECISION_HIGH | 2; - wm->ddl[pipe].plane[PLANE_SPRITE0] = DDL_PRECISION_HIGH | 2; - wm->ddl[pipe].plane[PLANE_SPRITE1] = DDL_PRECISION_HIGH | 2; - wm->ddl[pipe].plane[PLANE_CURSOR] = DDL_PRECISION_HIGH | 2; - } -} - -static void vlv_program_watermarks(struct drm_i915_private *dev_priv) -{ - struct vlv_wm_values *old_wm = &dev_priv->display.wm.vlv; - struct vlv_wm_values new_wm = {}; - - vlv_merge_wm(dev_priv, &new_wm); - - if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0) - return; - - if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS)) - chv_set_memory_dvfs(dev_priv, false); - - if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5)) - chv_set_memory_pm5(dev_priv, false); - - if (is_disabling(old_wm->cxsr, new_wm.cxsr, true)) - _intel_set_memory_cxsr(dev_priv, false); - - vlv_write_wm_values(dev_priv, &new_wm); - - if (is_enabling(old_wm->cxsr, new_wm.cxsr, true)) - _intel_set_memory_cxsr(dev_priv, true); - - if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5)) - chv_set_memory_pm5(dev_priv, true); - - if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS)) - chv_set_memory_dvfs(dev_priv, true); - - *old_wm = new_wm; -} - -static void vlv_initial_watermarks(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - const struct intel_crtc_state *crtc_state = - intel_atomic_get_new_crtc_state(state, crtc); - - mutex_lock(&dev_priv->display.wm.wm_mutex); - crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate; - vlv_program_watermarks(dev_priv); - mutex_unlock(&dev_priv->display.wm.wm_mutex); -} - -static void vlv_optimize_watermarks(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - const struct intel_crtc_state *crtc_state = - intel_atomic_get_new_crtc_state(state, crtc); - - if (!crtc_state->wm.need_postvbl_update) - return; - - mutex_lock(&dev_priv->display.wm.wm_mutex); - crtc->wm.active.vlv = crtc_state->wm.vlv.optimal; - vlv_program_watermarks(dev_priv); - mutex_unlock(&dev_priv->display.wm.wm_mutex); -} - -static void i965_update_wm(struct drm_i915_private *dev_priv) -{ - struct intel_crtc *crtc; - int srwm = 1; - int cursor_sr = 16; - bool cxsr_enabled; - - /* Calc sr entries for one plane configs */ - crtc = single_enabled_crtc(dev_priv); - if (crtc) { - /* self-refresh has much higher latency */ - static const int sr_latency_ns = 12000; - const struct drm_display_mode *pipe_mode = - &crtc->config->hw.pipe_mode; - const struct drm_framebuffer *fb = - crtc->base.primary->state->fb; - int pixel_rate = crtc->config->pixel_rate; - int htotal = pipe_mode->crtc_htotal; - int width = drm_rect_width(&crtc->base.primary->state->src) >> 16; - int cpp = fb->format->cpp[0]; - int entries; - - entries = intel_wm_method2(pixel_rate, htotal, - width, cpp, sr_latency_ns / 100); - entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE); - srwm = I965_FIFO_SIZE - entries; - if (srwm < 0) - srwm = 1; - srwm &= 0x1ff; - drm_dbg_kms(&dev_priv->drm, - "self-refresh entries: %d, wm: %d\n", - entries, srwm); - - entries = intel_wm_method2(pixel_rate, htotal, - crtc->base.cursor->state->crtc_w, 4, - sr_latency_ns / 100); - entries = DIV_ROUND_UP(entries, - i965_cursor_wm_info.cacheline_size) + - i965_cursor_wm_info.guard_size; - - cursor_sr = i965_cursor_wm_info.fifo_size - entries; - if (cursor_sr > i965_cursor_wm_info.max_wm) - cursor_sr = i965_cursor_wm_info.max_wm; - - drm_dbg_kms(&dev_priv->drm, - "self-refresh watermark: display plane %d " - "cursor %d\n", srwm, cursor_sr); - - cxsr_enabled = true; - } else { - cxsr_enabled = false; - /* Turn off self refresh if both pipes are enabled */ - intel_set_memory_cxsr(dev_priv, false); - } - - drm_dbg_kms(&dev_priv->drm, - "Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", - srwm); - - /* 965 has limitations... */ - intel_uncore_write(&dev_priv->uncore, DSPFW1, FW_WM(srwm, SR) | - FW_WM(8, CURSORB) | - FW_WM(8, PLANEB) | - FW_WM(8, PLANEA)); - intel_uncore_write(&dev_priv->uncore, DSPFW2, FW_WM(8, CURSORA) | - FW_WM(8, PLANEC_OLD)); - /* update cursor SR watermark */ - intel_uncore_write(&dev_priv->uncore, DSPFW3, FW_WM(cursor_sr, CURSOR_SR)); - - if (cxsr_enabled) - intel_set_memory_cxsr(dev_priv, true); -} - -#undef FW_WM - -static struct intel_crtc *intel_crtc_for_plane(struct drm_i915_private *i915, - enum i9xx_plane_id i9xx_plane) -{ - struct intel_plane *plane; - - for_each_intel_plane(&i915->drm, plane) { - if (plane->id == PLANE_PRIMARY && - plane->i9xx_plane == i9xx_plane) - return intel_crtc_for_pipe(i915, plane->pipe); - } - - return NULL; -} - -static void i9xx_update_wm(struct drm_i915_private *dev_priv) -{ - const struct intel_watermark_params *wm_info; - u32 fwater_lo; - u32 fwater_hi; - int cwm, srwm = 1; - int fifo_size; - int planea_wm, planeb_wm; - struct intel_crtc *crtc; - - if (IS_I945GM(dev_priv)) - wm_info = &i945_wm_info; - else if (DISPLAY_VER(dev_priv) != 2) - wm_info = &i915_wm_info; - else - wm_info = &i830_a_wm_info; - - if (DISPLAY_VER(dev_priv) == 2) - fifo_size = i830_get_fifo_size(dev_priv, PLANE_A); - else - fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_A); - crtc = intel_crtc_for_plane(dev_priv, PLANE_A); - if (intel_crtc_active(crtc)) { - const struct drm_framebuffer *fb = - crtc->base.primary->state->fb; - int cpp; - - if (DISPLAY_VER(dev_priv) == 2) - cpp = 4; - else - cpp = fb->format->cpp[0]; - - planea_wm = intel_calculate_wm(crtc->config->pixel_rate, - wm_info, fifo_size, cpp, - pessimal_latency_ns); - } else { - planea_wm = fifo_size - wm_info->guard_size; - if (planea_wm > (long)wm_info->max_wm) - planea_wm = wm_info->max_wm; - } - - if (DISPLAY_VER(dev_priv) == 2) - wm_info = &i830_bc_wm_info; - - if (DISPLAY_VER(dev_priv) == 2) - fifo_size = i830_get_fifo_size(dev_priv, PLANE_B); - else - fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_B); - crtc = intel_crtc_for_plane(dev_priv, PLANE_B); - if (intel_crtc_active(crtc)) { - const struct drm_framebuffer *fb = - crtc->base.primary->state->fb; - int cpp; - - if (DISPLAY_VER(dev_priv) == 2) - cpp = 4; - else - cpp = fb->format->cpp[0]; - - planeb_wm = intel_calculate_wm(crtc->config->pixel_rate, - wm_info, fifo_size, cpp, - pessimal_latency_ns); - } else { - planeb_wm = fifo_size - wm_info->guard_size; - if (planeb_wm > (long)wm_info->max_wm) - planeb_wm = wm_info->max_wm; - } - - drm_dbg_kms(&dev_priv->drm, - "FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); - - crtc = single_enabled_crtc(dev_priv); - if (IS_I915GM(dev_priv) && crtc) { - struct drm_i915_gem_object *obj; - - obj = intel_fb_obj(crtc->base.primary->state->fb); - - /* self-refresh seems busted with untiled */ - if (!i915_gem_object_is_tiled(obj)) - crtc = NULL; - } - - /* - * Overlay gets an aggressive default since video jitter is bad. - */ - cwm = 2; - - /* Play safe and disable self-refresh before adjusting watermarks. */ - intel_set_memory_cxsr(dev_priv, false); - - /* Calc sr entries for one plane configs */ - if (HAS_FW_BLC(dev_priv) && crtc) { - /* self-refresh has much higher latency */ - static const int sr_latency_ns = 6000; - const struct drm_display_mode *pipe_mode = - &crtc->config->hw.pipe_mode; - const struct drm_framebuffer *fb = - crtc->base.primary->state->fb; - int pixel_rate = crtc->config->pixel_rate; - int htotal = pipe_mode->crtc_htotal; - int width = drm_rect_width(&crtc->base.primary->state->src) >> 16; - int cpp; - int entries; - - if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv)) - cpp = 4; - else - cpp = fb->format->cpp[0]; - - entries = intel_wm_method2(pixel_rate, htotal, width, cpp, - sr_latency_ns / 100); - entries = DIV_ROUND_UP(entries, wm_info->cacheline_size); - drm_dbg_kms(&dev_priv->drm, - "self-refresh entries: %d\n", entries); - srwm = wm_info->fifo_size - entries; - if (srwm < 0) - srwm = 1; - - if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) - intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, - FW_BLC_SELF_FIFO_MASK | (srwm & 0xff)); - else - intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, srwm & 0x3f); - } - - drm_dbg_kms(&dev_priv->drm, - "Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", - planea_wm, planeb_wm, cwm, srwm); - - fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f); - fwater_hi = (cwm & 0x1f); - - /* Set request length to 8 cachelines per fetch */ - fwater_lo = fwater_lo | (1 << 24) | (1 << 8); - fwater_hi = fwater_hi | (1 << 8); - - intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo); - intel_uncore_write(&dev_priv->uncore, FW_BLC2, fwater_hi); - - if (crtc) - intel_set_memory_cxsr(dev_priv, true); -} - -static void i845_update_wm(struct drm_i915_private *dev_priv) -{ - struct intel_crtc *crtc; - u32 fwater_lo; - int planea_wm; - - crtc = single_enabled_crtc(dev_priv); - if (crtc == NULL) - return; - - planea_wm = intel_calculate_wm(crtc->config->pixel_rate, - &i845_wm_info, - i845_get_fifo_size(dev_priv, PLANE_A), - 4, pessimal_latency_ns); - fwater_lo = intel_uncore_read(&dev_priv->uncore, FW_BLC) & ~0xfff; - fwater_lo |= (3<<8) | planea_wm; - - drm_dbg_kms(&dev_priv->drm, - "Setting FIFO watermarks - A: %d\n", planea_wm); - - intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo); -} - -/* latency must be in 0.1us units. */ -static unsigned int ilk_wm_method1(unsigned int pixel_rate, - unsigned int cpp, - unsigned int latency) -{ - unsigned int ret; - - ret = intel_wm_method1(pixel_rate, cpp, latency); - ret = DIV_ROUND_UP(ret, 64) + 2; - - return ret; -} - -/* latency must be in 0.1us units. */ -static unsigned int ilk_wm_method2(unsigned int pixel_rate, - unsigned int htotal, - unsigned int width, - unsigned int cpp, - unsigned int latency) -{ - unsigned int ret; - - ret = intel_wm_method2(pixel_rate, htotal, - width, cpp, latency); - ret = DIV_ROUND_UP(ret, 64) + 2; - - return ret; -} - -static u32 ilk_wm_fbc(u32 pri_val, u32 horiz_pixels, u8 cpp) -{ - /* - * Neither of these should be possible since this function shouldn't be - * called if the CRTC is off or the plane is invisible. But let's be - * extra paranoid to avoid a potential divide-by-zero if we screw up - * elsewhere in the driver. - */ - if (WARN_ON(!cpp)) - return 0; - if (WARN_ON(!horiz_pixels)) - return 0; - - return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2; -} - -struct ilk_wm_maximums { - u16 pri; - u16 spr; - u16 cur; - u16 fbc; -}; - -/* - * For both WM_PIPE and WM_LP. - * mem_value must be in 0.1us units. - */ -static u32 ilk_compute_pri_wm(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state, - u32 mem_value, bool is_lp) -{ - u32 method1, method2; - int cpp; - - if (mem_value == 0) - return U32_MAX; - - if (!intel_wm_plane_visible(crtc_state, plane_state)) - return 0; - - cpp = plane_state->hw.fb->format->cpp[0]; - - method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value); - - if (!is_lp) - return method1; - - method2 = ilk_wm_method2(crtc_state->pixel_rate, - crtc_state->hw.pipe_mode.crtc_htotal, - drm_rect_width(&plane_state->uapi.src) >> 16, - cpp, mem_value); - - return min(method1, method2); -} - -/* - * For both WM_PIPE and WM_LP. - * mem_value must be in 0.1us units. - */ -static u32 ilk_compute_spr_wm(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state, - u32 mem_value) -{ - u32 method1, method2; - int cpp; - - if (mem_value == 0) - return U32_MAX; - - if (!intel_wm_plane_visible(crtc_state, plane_state)) - return 0; - - cpp = plane_state->hw.fb->format->cpp[0]; - - method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value); - method2 = ilk_wm_method2(crtc_state->pixel_rate, - crtc_state->hw.pipe_mode.crtc_htotal, - drm_rect_width(&plane_state->uapi.src) >> 16, - cpp, mem_value); - return min(method1, method2); -} - -/* - * For both WM_PIPE and WM_LP. - * mem_value must be in 0.1us units. - */ -static u32 ilk_compute_cur_wm(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state, - u32 mem_value) -{ - int cpp; - - if (mem_value == 0) - return U32_MAX; - - if (!intel_wm_plane_visible(crtc_state, plane_state)) - return 0; - - cpp = plane_state->hw.fb->format->cpp[0]; - - return ilk_wm_method2(crtc_state->pixel_rate, - crtc_state->hw.pipe_mode.crtc_htotal, - drm_rect_width(&plane_state->uapi.src) >> 16, - cpp, mem_value); -} - -/* Only for WM_LP. */ -static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state, - u32 pri_val) -{ - int cpp; - - if (!intel_wm_plane_visible(crtc_state, plane_state)) - return 0; - - cpp = plane_state->hw.fb->format->cpp[0]; - - return ilk_wm_fbc(pri_val, drm_rect_width(&plane_state->uapi.src) >> 16, - cpp); -} - -static unsigned int -ilk_display_fifo_size(const struct drm_i915_private *dev_priv) -{ - if (DISPLAY_VER(dev_priv) >= 8) - return 3072; - else if (DISPLAY_VER(dev_priv) >= 7) - return 768; - else - return 512; -} - -static unsigned int -ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv, - int level, bool is_sprite) -{ - if (DISPLAY_VER(dev_priv) >= 8) - /* BDW primary/sprite plane watermarks */ - return level == 0 ? 255 : 2047; - else if (DISPLAY_VER(dev_priv) >= 7) - /* IVB/HSW primary/sprite plane watermarks */ - return level == 0 ? 127 : 1023; - else if (!is_sprite) - /* ILK/SNB primary plane watermarks */ - return level == 0 ? 127 : 511; - else - /* ILK/SNB sprite plane watermarks */ - return level == 0 ? 63 : 255; -} - -static unsigned int -ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level) -{ - if (DISPLAY_VER(dev_priv) >= 7) - return level == 0 ? 63 : 255; - else - return level == 0 ? 31 : 63; -} - -static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv) -{ - if (DISPLAY_VER(dev_priv) >= 8) - return 31; - else - return 15; -} - -/* Calculate the maximum primary/sprite plane watermark */ -static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv, - int level, - const struct intel_wm_config *config, - enum intel_ddb_partitioning ddb_partitioning, - bool is_sprite) -{ - unsigned int fifo_size = ilk_display_fifo_size(dev_priv); - - /* if sprites aren't enabled, sprites get nothing */ - if (is_sprite && !config->sprites_enabled) - return 0; - - /* HSW allows LP1+ watermarks even with multiple pipes */ - if (level == 0 || config->num_pipes_active > 1) { - fifo_size /= INTEL_NUM_PIPES(dev_priv); - - /* - * For some reason the non self refresh - * FIFO size is only half of the self - * refresh FIFO size on ILK/SNB. - */ - if (DISPLAY_VER(dev_priv) <= 6) - fifo_size /= 2; - } - - if (config->sprites_enabled) { - /* level 0 is always calculated with 1:1 split */ - if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) { - if (is_sprite) - fifo_size *= 5; - fifo_size /= 6; - } else { - fifo_size /= 2; - } - } - - /* clamp to max that the registers can hold */ - return min(fifo_size, ilk_plane_wm_reg_max(dev_priv, level, is_sprite)); -} - -/* Calculate the maximum cursor plane watermark */ -static unsigned int ilk_cursor_wm_max(const struct drm_i915_private *dev_priv, - int level, - const struct intel_wm_config *config) -{ - /* HSW LP1+ watermarks w/ multiple pipes */ - if (level > 0 && config->num_pipes_active > 1) - return 64; - - /* otherwise just report max that registers can hold */ - return ilk_cursor_wm_reg_max(dev_priv, level); -} - -static void ilk_compute_wm_maximums(const struct drm_i915_private *dev_priv, - int level, - const struct intel_wm_config *config, - enum intel_ddb_partitioning ddb_partitioning, - struct ilk_wm_maximums *max) -{ - max->pri = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, false); - max->spr = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, true); - max->cur = ilk_cursor_wm_max(dev_priv, level, config); - max->fbc = ilk_fbc_wm_reg_max(dev_priv); -} - -static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv, - int level, - struct ilk_wm_maximums *max) -{ - max->pri = ilk_plane_wm_reg_max(dev_priv, level, false); - max->spr = ilk_plane_wm_reg_max(dev_priv, level, true); - max->cur = ilk_cursor_wm_reg_max(dev_priv, level); - max->fbc = ilk_fbc_wm_reg_max(dev_priv); -} - -static bool ilk_validate_wm_level(int level, - const struct ilk_wm_maximums *max, - struct intel_wm_level *result) -{ - bool ret; - - /* already determined to be invalid? */ - if (!result->enable) - return false; - - result->enable = result->pri_val <= max->pri && - result->spr_val <= max->spr && - result->cur_val <= max->cur; - - ret = result->enable; - - /* - * HACK until we can pre-compute everything, - * and thus fail gracefully if LP0 watermarks - * are exceeded... - */ - if (level == 0 && !result->enable) { - if (result->pri_val > max->pri) - DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n", - level, result->pri_val, max->pri); - if (result->spr_val > max->spr) - DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n", - level, result->spr_val, max->spr); - if (result->cur_val > max->cur) - DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n", - level, result->cur_val, max->cur); - - result->pri_val = min_t(u32, result->pri_val, max->pri); - result->spr_val = min_t(u32, result->spr_val, max->spr); - result->cur_val = min_t(u32, result->cur_val, max->cur); - result->enable = true; - } - - return ret; -} - -static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv, - const struct intel_crtc *crtc, - int level, - struct intel_crtc_state *crtc_state, - const struct intel_plane_state *pristate, - const struct intel_plane_state *sprstate, - const struct intel_plane_state *curstate, - struct intel_wm_level *result) -{ - u16 pri_latency = dev_priv->display.wm.pri_latency[level]; - u16 spr_latency = dev_priv->display.wm.spr_latency[level]; - u16 cur_latency = dev_priv->display.wm.cur_latency[level]; - - /* WM1+ latency values stored in 0.5us units */ - if (level > 0) { - pri_latency *= 5; - spr_latency *= 5; - cur_latency *= 5; - } - - if (pristate) { - result->pri_val = ilk_compute_pri_wm(crtc_state, pristate, - pri_latency, level); - result->fbc_val = ilk_compute_fbc_wm(crtc_state, pristate, result->pri_val); - } - - if (sprstate) - result->spr_val = ilk_compute_spr_wm(crtc_state, sprstate, spr_latency); - - if (curstate) - result->cur_val = ilk_compute_cur_wm(crtc_state, curstate, cur_latency); - - result->enable = true; -} - -static void hsw_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) -{ - u64 sskpd; - - sskpd = intel_uncore_read64(&i915->uncore, MCH_SSKPD); - - wm[0] = REG_FIELD_GET64(SSKPD_NEW_WM0_MASK_HSW, sskpd); - if (wm[0] == 0) - wm[0] = REG_FIELD_GET64(SSKPD_OLD_WM0_MASK_HSW, sskpd); - wm[1] = REG_FIELD_GET64(SSKPD_WM1_MASK_HSW, sskpd); - wm[2] = REG_FIELD_GET64(SSKPD_WM2_MASK_HSW, sskpd); - wm[3] = REG_FIELD_GET64(SSKPD_WM3_MASK_HSW, sskpd); - wm[4] = REG_FIELD_GET64(SSKPD_WM4_MASK_HSW, sskpd); -} - -static void snb_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) -{ - u32 sskpd; - - sskpd = intel_uncore_read(&i915->uncore, MCH_SSKPD); - - wm[0] = REG_FIELD_GET(SSKPD_WM0_MASK_SNB, sskpd); - wm[1] = REG_FIELD_GET(SSKPD_WM1_MASK_SNB, sskpd); - wm[2] = REG_FIELD_GET(SSKPD_WM2_MASK_SNB, sskpd); - wm[3] = REG_FIELD_GET(SSKPD_WM3_MASK_SNB, sskpd); -} - -static void ilk_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) -{ - u32 mltr; - - mltr = intel_uncore_read(&i915->uncore, MLTR_ILK); - - /* ILK primary LP0 latency is 700 ns */ - wm[0] = 7; - wm[1] = REG_FIELD_GET(MLTR_WM1_MASK, mltr); - wm[2] = REG_FIELD_GET(MLTR_WM2_MASK, mltr); -} - -static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv, - u16 wm[5]) -{ - /* ILK sprite LP0 latency is 1300 ns */ - if (DISPLAY_VER(dev_priv) == 5) - wm[0] = 13; -} - -static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv, - u16 wm[5]) -{ - /* ILK cursor LP0 latency is 1300 ns */ - if (DISPLAY_VER(dev_priv) == 5) - wm[0] = 13; -} - -int ilk_wm_max_level(const struct drm_i915_private *dev_priv) -{ - /* how many WM levels are we expecting */ - if (HAS_HW_SAGV_WM(dev_priv)) - return 5; - else if (DISPLAY_VER(dev_priv) >= 9) - return 7; - else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) - return 4; - else if (DISPLAY_VER(dev_priv) >= 6) - return 3; - else - return 2; -} - -void intel_print_wm_latency(struct drm_i915_private *dev_priv, - const char *name, const u16 wm[]) -{ - int level, max_level = ilk_wm_max_level(dev_priv); - - for (level = 0; level <= max_level; level++) { - unsigned int latency = wm[level]; - - if (latency == 0) { - drm_dbg_kms(&dev_priv->drm, - "%s WM%d latency not provided\n", - name, level); - continue; - } - - /* - * - latencies are in us on gen9. - * - before then, WM1+ latency values are in 0.5us units - */ - if (DISPLAY_VER(dev_priv) >= 9) - latency *= 10; - else if (level > 0) - latency *= 5; - - drm_dbg_kms(&dev_priv->drm, - "%s WM%d latency %u (%u.%u usec)\n", name, level, - wm[level], latency / 10, latency % 10); - } -} - -static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv, - u16 wm[5], u16 min) -{ - int level, max_level = ilk_wm_max_level(dev_priv); - - if (wm[0] >= min) - return false; - - wm[0] = max(wm[0], min); - for (level = 1; level <= max_level; level++) - wm[level] = max_t(u16, wm[level], DIV_ROUND_UP(min, 5)); - - return true; -} - -static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv) -{ - bool changed; - - /* - * The BIOS provided WM memory latency values are often - * inadequate for high resolution displays. Adjust them. - */ - changed = ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.pri_latency, 12); - changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.spr_latency, 12); - changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.cur_latency, 12); - - if (!changed) - return; - - drm_dbg_kms(&dev_priv->drm, - "WM latency values increased to avoid potential underruns\n"); - intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency); - intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency); - intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency); -} - -static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv) -{ - /* - * On some SNB machines (Thinkpad X220 Tablet at least) - * LP3 usage can cause vblank interrupts to be lost. - * The DEIIR bit will go high but it looks like the CPU - * never gets interrupted. - * - * It's not clear whether other interrupt source could - * be affected or if this is somehow limited to vblank - * interrupts only. To play it safe we disable LP3 - * watermarks entirely. - */ - if (dev_priv->display.wm.pri_latency[3] == 0 && - dev_priv->display.wm.spr_latency[3] == 0 && - dev_priv->display.wm.cur_latency[3] == 0) - return; - - dev_priv->display.wm.pri_latency[3] = 0; - dev_priv->display.wm.spr_latency[3] = 0; - dev_priv->display.wm.cur_latency[3] = 0; - - drm_dbg_kms(&dev_priv->drm, - "LP3 watermarks disabled due to potential for lost interrupts\n"); - intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency); - intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency); - intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency); -} - -static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv) -{ - if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) - hsw_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency); - else if (DISPLAY_VER(dev_priv) >= 6) - snb_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency); - else - ilk_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency); - - memcpy(dev_priv->display.wm.spr_latency, dev_priv->display.wm.pri_latency, - sizeof(dev_priv->display.wm.pri_latency)); - memcpy(dev_priv->display.wm.cur_latency, dev_priv->display.wm.pri_latency, - sizeof(dev_priv->display.wm.pri_latency)); - - intel_fixup_spr_wm_latency(dev_priv, dev_priv->display.wm.spr_latency); - intel_fixup_cur_wm_latency(dev_priv, dev_priv->display.wm.cur_latency); - - intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency); - intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency); - intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency); - - if (DISPLAY_VER(dev_priv) == 6) { - snb_wm_latency_quirk(dev_priv); - snb_wm_lp3_irq_quirk(dev_priv); - } -} - -static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv, - struct intel_pipe_wm *pipe_wm) -{ - /* LP0 watermark maximums depend on this pipe alone */ - const struct intel_wm_config config = { - .num_pipes_active = 1, - .sprites_enabled = pipe_wm->sprites_enabled, - .sprites_scaled = pipe_wm->sprites_scaled, - }; - struct ilk_wm_maximums max; - - /* LP0 watermarks always use 1/2 DDB partitioning */ - ilk_compute_wm_maximums(dev_priv, 0, &config, INTEL_DDB_PART_1_2, &max); - - /* At least LP0 must be valid */ - if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) { - drm_dbg_kms(&dev_priv->drm, "LP0 watermark invalid\n"); - return false; - } - - return true; -} - -/* Compute new watermarks for the pipe */ -static int ilk_compute_pipe_wm(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - struct intel_crtc_state *crtc_state = - intel_atomic_get_new_crtc_state(state, crtc); - struct intel_pipe_wm *pipe_wm; - struct intel_plane *plane; - const struct intel_plane_state *plane_state; - const struct intel_plane_state *pristate = NULL; - const struct intel_plane_state *sprstate = NULL; - const struct intel_plane_state *curstate = NULL; - int level, max_level = ilk_wm_max_level(dev_priv), usable_level; - struct ilk_wm_maximums max; - - pipe_wm = &crtc_state->wm.ilk.optimal; - - intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) { - if (plane->base.type == DRM_PLANE_TYPE_PRIMARY) - pristate = plane_state; - else if (plane->base.type == DRM_PLANE_TYPE_OVERLAY) - sprstate = plane_state; - else if (plane->base.type == DRM_PLANE_TYPE_CURSOR) - curstate = plane_state; - } - - pipe_wm->pipe_enabled = crtc_state->hw.active; - pipe_wm->sprites_enabled = crtc_state->active_planes & BIT(PLANE_SPRITE0); - pipe_wm->sprites_scaled = crtc_state->scaled_planes & BIT(PLANE_SPRITE0); - - usable_level = max_level; - - /* ILK/SNB: LP2+ watermarks only w/o sprites */ - if (DISPLAY_VER(dev_priv) <= 6 && pipe_wm->sprites_enabled) - usable_level = 1; - - /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */ - if (pipe_wm->sprites_scaled) - usable_level = 0; - - memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm)); - ilk_compute_wm_level(dev_priv, crtc, 0, crtc_state, - pristate, sprstate, curstate, &pipe_wm->wm[0]); - - if (!ilk_validate_pipe_wm(dev_priv, pipe_wm)) - return -EINVAL; - - ilk_compute_wm_reg_maximums(dev_priv, 1, &max); - - for (level = 1; level <= usable_level; level++) { - struct intel_wm_level *wm = &pipe_wm->wm[level]; - - ilk_compute_wm_level(dev_priv, crtc, level, crtc_state, - pristate, sprstate, curstate, wm); - - /* - * Disable any watermark level that exceeds the - * register maximums since such watermarks are - * always invalid. - */ - if (!ilk_validate_wm_level(level, &max, wm)) { - memset(wm, 0, sizeof(*wm)); - break; - } - } - - return 0; -} - -/* - * Build a set of 'intermediate' watermark values that satisfy both the old - * state and the new state. These can be programmed to the hardware - * immediately. - */ -static int ilk_compute_intermediate_wm(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_crtc_state *new_crtc_state = - intel_atomic_get_new_crtc_state(state, crtc); - const struct intel_crtc_state *old_crtc_state = - intel_atomic_get_old_crtc_state(state, crtc); - struct intel_pipe_wm *a = &new_crtc_state->wm.ilk.intermediate; - const struct intel_pipe_wm *b = &old_crtc_state->wm.ilk.optimal; - int level, max_level = ilk_wm_max_level(dev_priv); - - /* - * Start with the final, target watermarks, then combine with the - * currently active watermarks to get values that are safe both before - * and after the vblank. - */ - *a = new_crtc_state->wm.ilk.optimal; - if (!new_crtc_state->hw.active || - intel_crtc_needs_modeset(new_crtc_state) || - state->skip_intermediate_wm) - return 0; - - a->pipe_enabled |= b->pipe_enabled; - a->sprites_enabled |= b->sprites_enabled; - a->sprites_scaled |= b->sprites_scaled; - - for (level = 0; level <= max_level; level++) { - struct intel_wm_level *a_wm = &a->wm[level]; - const struct intel_wm_level *b_wm = &b->wm[level]; - - a_wm->enable &= b_wm->enable; - a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val); - a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val); - a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val); - a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val); - } - - /* - * We need to make sure that these merged watermark values are - * actually a valid configuration themselves. If they're not, - * there's no safe way to transition from the old state to - * the new state, so we need to fail the atomic transaction. - */ - if (!ilk_validate_pipe_wm(dev_priv, a)) - return -EINVAL; - - /* - * If our intermediate WM are identical to the final WM, then we can - * omit the post-vblank programming; only update if it's different. - */ - if (memcmp(a, &new_crtc_state->wm.ilk.optimal, sizeof(*a)) != 0) - new_crtc_state->wm.need_postvbl_update = true; - - return 0; -} - -/* - * Merge the watermarks from all active pipes for a specific level. - */ -static void ilk_merge_wm_level(struct drm_i915_private *dev_priv, - int level, - struct intel_wm_level *ret_wm) -{ - const struct intel_crtc *crtc; - - ret_wm->enable = true; - - for_each_intel_crtc(&dev_priv->drm, crtc) { - const struct intel_pipe_wm *active = &crtc->wm.active.ilk; - const struct intel_wm_level *wm = &active->wm[level]; - - if (!active->pipe_enabled) - continue; - - /* - * The watermark values may have been used in the past, - * so we must maintain them in the registers for some - * time even if the level is now disabled. - */ - if (!wm->enable) - ret_wm->enable = false; - - ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val); - ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val); - ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val); - ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val); - } -} - -/* - * Merge all low power watermarks for all active pipes. - */ -static void ilk_wm_merge(struct drm_i915_private *dev_priv, - const struct intel_wm_config *config, - const struct ilk_wm_maximums *max, - struct intel_pipe_wm *merged) -{ - int level, max_level = ilk_wm_max_level(dev_priv); - int last_enabled_level = max_level; - - /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */ - if ((DISPLAY_VER(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) && - config->num_pipes_active > 1) - last_enabled_level = 0; - - /* ILK: FBC WM must be disabled always */ - merged->fbc_wm_enabled = DISPLAY_VER(dev_priv) >= 6; - - /* merge each WM1+ level */ - for (level = 1; level <= max_level; level++) { - struct intel_wm_level *wm = &merged->wm[level]; - - ilk_merge_wm_level(dev_priv, level, wm); - - if (level > last_enabled_level) - wm->enable = false; - else if (!ilk_validate_wm_level(level, max, wm)) - /* make sure all following levels get disabled */ - last_enabled_level = level - 1; - - /* - * The spec says it is preferred to disable - * FBC WMs instead of disabling a WM level. - */ - if (wm->fbc_val > max->fbc) { - if (wm->enable) - merged->fbc_wm_enabled = false; - wm->fbc_val = 0; - } - } - - /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */ - if (DISPLAY_VER(dev_priv) == 5 && HAS_FBC(dev_priv) && - dev_priv->params.enable_fbc && !merged->fbc_wm_enabled) { - for (level = 2; level <= max_level; level++) { - struct intel_wm_level *wm = &merged->wm[level]; - - wm->enable = false; - } - } -} - -static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm) -{ - /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */ - return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable); -} - -/* The value we need to program into the WM_LPx latency field */ -static unsigned int ilk_wm_lp_latency(struct drm_i915_private *dev_priv, - int level) -{ - if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) - return 2 * level; - else - return dev_priv->display.wm.pri_latency[level]; -} - -static void ilk_compute_wm_results(struct drm_i915_private *dev_priv, - const struct intel_pipe_wm *merged, - enum intel_ddb_partitioning partitioning, - struct ilk_wm_values *results) -{ - struct intel_crtc *crtc; - int level, wm_lp; - - results->enable_fbc_wm = merged->fbc_wm_enabled; - results->partitioning = partitioning; - - /* LP1+ register values */ - for (wm_lp = 1; wm_lp <= 3; wm_lp++) { - const struct intel_wm_level *r; - - level = ilk_wm_lp_to_level(wm_lp, merged); - - r = &merged->wm[level]; - - /* - * Maintain the watermark values even if the level is - * disabled. Doing otherwise could cause underruns. - */ - results->wm_lp[wm_lp - 1] = - WM_LP_LATENCY(ilk_wm_lp_latency(dev_priv, level)) | - WM_LP_PRIMARY(r->pri_val) | - WM_LP_CURSOR(r->cur_val); - - if (r->enable) - results->wm_lp[wm_lp - 1] |= WM_LP_ENABLE; - - if (DISPLAY_VER(dev_priv) >= 8) - results->wm_lp[wm_lp - 1] |= WM_LP_FBC_BDW(r->fbc_val); - else - results->wm_lp[wm_lp - 1] |= WM_LP_FBC_ILK(r->fbc_val); - - results->wm_lp_spr[wm_lp - 1] = WM_LP_SPRITE(r->spr_val); - - /* - * Always set WM_LP_SPRITE_EN when spr_val != 0, even if the - * level is disabled. Doing otherwise could cause underruns. - */ - if (DISPLAY_VER(dev_priv) <= 6 && r->spr_val) { - drm_WARN_ON(&dev_priv->drm, wm_lp != 1); - results->wm_lp_spr[wm_lp - 1] |= WM_LP_SPRITE_ENABLE; - } - } - - /* LP0 register values */ - for_each_intel_crtc(&dev_priv->drm, crtc) { - enum pipe pipe = crtc->pipe; - const struct intel_pipe_wm *pipe_wm = &crtc->wm.active.ilk; - const struct intel_wm_level *r = &pipe_wm->wm[0]; - - if (drm_WARN_ON(&dev_priv->drm, !r->enable)) - continue; - - results->wm_pipe[pipe] = - WM0_PIPE_PRIMARY(r->pri_val) | - WM0_PIPE_SPRITE(r->spr_val) | - WM0_PIPE_CURSOR(r->cur_val); - } -} - -/* Find the result with the highest level enabled. Check for enable_fbc_wm in - * case both are at the same level. Prefer r1 in case they're the same. */ -static struct intel_pipe_wm * -ilk_find_best_result(struct drm_i915_private *dev_priv, - struct intel_pipe_wm *r1, - struct intel_pipe_wm *r2) -{ - int level, max_level = ilk_wm_max_level(dev_priv); - int level1 = 0, level2 = 0; - - for (level = 1; level <= max_level; level++) { - if (r1->wm[level].enable) - level1 = level; - if (r2->wm[level].enable) - level2 = level; - } - - if (level1 == level2) { - if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled) - return r2; - else - return r1; - } else if (level1 > level2) { - return r1; - } else { - return r2; - } -} - -/* dirty bits used to track which watermarks need changes */ -#define WM_DIRTY_PIPE(pipe) (1 << (pipe)) -#define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp))) -#define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3)) -#define WM_DIRTY_FBC (1 << 24) -#define WM_DIRTY_DDB (1 << 25) - -static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv, - const struct ilk_wm_values *old, - const struct ilk_wm_values *new) -{ - unsigned int dirty = 0; - enum pipe pipe; - int wm_lp; - - for_each_pipe(dev_priv, pipe) { - if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) { - dirty |= WM_DIRTY_PIPE(pipe); - /* Must disable LP1+ watermarks too */ - dirty |= WM_DIRTY_LP_ALL; - } - } - - if (old->enable_fbc_wm != new->enable_fbc_wm) { - dirty |= WM_DIRTY_FBC; - /* Must disable LP1+ watermarks too */ - dirty |= WM_DIRTY_LP_ALL; - } - - if (old->partitioning != new->partitioning) { - dirty |= WM_DIRTY_DDB; - /* Must disable LP1+ watermarks too */ - dirty |= WM_DIRTY_LP_ALL; - } - - /* LP1+ watermarks already deemed dirty, no need to continue */ - if (dirty & WM_DIRTY_LP_ALL) - return dirty; - - /* Find the lowest numbered LP1+ watermark in need of an update... */ - for (wm_lp = 1; wm_lp <= 3; wm_lp++) { - if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] || - old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1]) - break; - } - - /* ...and mark it and all higher numbered LP1+ watermarks as dirty */ - for (; wm_lp <= 3; wm_lp++) - dirty |= WM_DIRTY_LP(wm_lp); - - return dirty; -} - -static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv, - unsigned int dirty) -{ - struct ilk_wm_values *previous = &dev_priv->display.wm.hw; - bool changed = false; - - if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM_LP_ENABLE) { - previous->wm_lp[2] &= ~WM_LP_ENABLE; - intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, previous->wm_lp[2]); - changed = true; - } - if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM_LP_ENABLE) { - previous->wm_lp[1] &= ~WM_LP_ENABLE; - intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, previous->wm_lp[1]); - changed = true; - } - if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM_LP_ENABLE) { - previous->wm_lp[0] &= ~WM_LP_ENABLE; - intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, previous->wm_lp[0]); - changed = true; - } - - /* - * Don't touch WM_LP_SPRITE_ENABLE here. - * Doing so could cause underruns. - */ - - return changed; -} - -/* - * The spec says we shouldn't write when we don't need, because every write - * causes WMs to be re-evaluated, expending some power. - */ -static void ilk_write_wm_values(struct drm_i915_private *dev_priv, - struct ilk_wm_values *results) -{ - struct ilk_wm_values *previous = &dev_priv->display.wm.hw; - unsigned int dirty; - - dirty = ilk_compute_wm_dirty(dev_priv, previous, results); - if (!dirty) - return; - - _ilk_disable_lp_wm(dev_priv, dirty); - - if (dirty & WM_DIRTY_PIPE(PIPE_A)) - intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_A), results->wm_pipe[0]); - if (dirty & WM_DIRTY_PIPE(PIPE_B)) - intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_B), results->wm_pipe[1]); - if (dirty & WM_DIRTY_PIPE(PIPE_C)) - intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_C), results->wm_pipe[2]); - - if (dirty & WM_DIRTY_DDB) { - if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) - intel_uncore_rmw(&dev_priv->uncore, WM_MISC, WM_MISC_DATA_PARTITION_5_6, - results->partitioning == INTEL_DDB_PART_1_2 ? 0 : - WM_MISC_DATA_PARTITION_5_6); - else - intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL2, DISP_DATA_PARTITION_5_6, - results->partitioning == INTEL_DDB_PART_1_2 ? 0 : - DISP_DATA_PARTITION_5_6); - } - - if (dirty & WM_DIRTY_FBC) - intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL, DISP_FBC_WM_DIS, - results->enable_fbc_wm ? 0 : DISP_FBC_WM_DIS); - - if (dirty & WM_DIRTY_LP(1) && - previous->wm_lp_spr[0] != results->wm_lp_spr[0]) - intel_uncore_write(&dev_priv->uncore, WM1S_LP_ILK, results->wm_lp_spr[0]); - - if (DISPLAY_VER(dev_priv) >= 7) { - if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1]) - intel_uncore_write(&dev_priv->uncore, WM2S_LP_IVB, results->wm_lp_spr[1]); - if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2]) - intel_uncore_write(&dev_priv->uncore, WM3S_LP_IVB, results->wm_lp_spr[2]); - } - - if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0]) - intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, results->wm_lp[0]); - if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1]) - intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, results->wm_lp[1]); - if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2]) - intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, results->wm_lp[2]); - - dev_priv->display.wm.hw = *results; -} - -bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv) -{ - return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL); -} - -static void ilk_compute_wm_config(struct drm_i915_private *dev_priv, - struct intel_wm_config *config) -{ - struct intel_crtc *crtc; - - /* Compute the currently _active_ config */ - for_each_intel_crtc(&dev_priv->drm, crtc) { - const struct intel_pipe_wm *wm = &crtc->wm.active.ilk; - - if (!wm->pipe_enabled) - continue; - - config->sprites_enabled |= wm->sprites_enabled; - config->sprites_scaled |= wm->sprites_scaled; - config->num_pipes_active++; - } -} - -static void ilk_program_watermarks(struct drm_i915_private *dev_priv) -{ - struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm; - struct ilk_wm_maximums max; - struct intel_wm_config config = {}; - struct ilk_wm_values results = {}; - enum intel_ddb_partitioning partitioning; - - ilk_compute_wm_config(dev_priv, &config); - - ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_1_2, &max); - ilk_wm_merge(dev_priv, &config, &max, &lp_wm_1_2); - - /* 5/6 split only in single pipe config on IVB+ */ - if (DISPLAY_VER(dev_priv) >= 7 && - config.num_pipes_active == 1 && config.sprites_enabled) { - ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_5_6, &max); - ilk_wm_merge(dev_priv, &config, &max, &lp_wm_5_6); - - best_lp_wm = ilk_find_best_result(dev_priv, &lp_wm_1_2, &lp_wm_5_6); - } else { - best_lp_wm = &lp_wm_1_2; - } - - partitioning = (best_lp_wm == &lp_wm_1_2) ? - INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6; - - ilk_compute_wm_results(dev_priv, best_lp_wm, partitioning, &results); - - ilk_write_wm_values(dev_priv, &results); -} - -static void ilk_initial_watermarks(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - const struct intel_crtc_state *crtc_state = - intel_atomic_get_new_crtc_state(state, crtc); - - mutex_lock(&dev_priv->display.wm.wm_mutex); - crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate; - ilk_program_watermarks(dev_priv); - mutex_unlock(&dev_priv->display.wm.wm_mutex); -} - -static void ilk_optimize_watermarks(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - const struct intel_crtc_state *crtc_state = - intel_atomic_get_new_crtc_state(state, crtc); - - if (!crtc_state->wm.need_postvbl_update) - return; - - mutex_lock(&dev_priv->display.wm.wm_mutex); - crtc->wm.active.ilk = crtc_state->wm.ilk.optimal; - ilk_program_watermarks(dev_priv); - mutex_unlock(&dev_priv->display.wm.wm_mutex); -} - -static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc) -{ - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct ilk_wm_values *hw = &dev_priv->display.wm.hw; - struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); - struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal; - enum pipe pipe = crtc->pipe; - - hw->wm_pipe[pipe] = intel_uncore_read(&dev_priv->uncore, WM0_PIPE_ILK(pipe)); - - memset(active, 0, sizeof(*active)); - - active->pipe_enabled = crtc->active; - - if (active->pipe_enabled) { - u32 tmp = hw->wm_pipe[pipe]; - - /* - * For active pipes LP0 watermark is marked as - * enabled, and LP1+ watermaks as disabled since - * we can't really reverse compute them in case - * multiple pipes are active. - */ - active->wm[0].enable = true; - active->wm[0].pri_val = REG_FIELD_GET(WM0_PIPE_PRIMARY_MASK, tmp); - active->wm[0].spr_val = REG_FIELD_GET(WM0_PIPE_SPRITE_MASK, tmp); - active->wm[0].cur_val = REG_FIELD_GET(WM0_PIPE_CURSOR_MASK, tmp); - } else { - int level, max_level = ilk_wm_max_level(dev_priv); - - /* - * For inactive pipes, all watermark levels - * should be marked as enabled but zeroed, - * which is what we'd compute them to. - */ - for (level = 0; level <= max_level; level++) - active->wm[level].enable = true; - } - - crtc->wm.active.ilk = *active; -} - -#define _FW_WM(value, plane) \ - (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT) -#define _FW_WM_VLV(value, plane) \ - (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT) - -static void g4x_read_wm_values(struct drm_i915_private *dev_priv, - struct g4x_wm_values *wm) -{ - u32 tmp; - - tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1); - wm->sr.plane = _FW_WM(tmp, SR); - wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB); - wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEB); - wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEA); - - tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2); - wm->fbc_en = tmp & DSPFW_FBC_SR_EN; - wm->sr.fbc = _FW_WM(tmp, FBC_SR); - wm->hpll.fbc = _FW_WM(tmp, FBC_HPLL_SR); - wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEB); - wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA); - wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEA); - - tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3); - wm->hpll_en = tmp & DSPFW_HPLL_SR_EN; - wm->sr.cursor = _FW_WM(tmp, CURSOR_SR); - wm->hpll.cursor = _FW_WM(tmp, HPLL_CURSOR); - wm->hpll.plane = _FW_WM(tmp, HPLL_SR); -} - -static void vlv_read_wm_values(struct drm_i915_private *dev_priv, - struct vlv_wm_values *wm) -{ - enum pipe pipe; - u32 tmp; - - for_each_pipe(dev_priv, pipe) { - tmp = intel_uncore_read(&dev_priv->uncore, VLV_DDL(pipe)); - - wm->ddl[pipe].plane[PLANE_PRIMARY] = - (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); - wm->ddl[pipe].plane[PLANE_CURSOR] = - (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); - wm->ddl[pipe].plane[PLANE_SPRITE0] = - (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); - wm->ddl[pipe].plane[PLANE_SPRITE1] = - (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); - } - - tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1); - wm->sr.plane = _FW_WM(tmp, SR); - wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB); - wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB); - wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA); - - tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2); - wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB); - wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA); - wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA); - - tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3); - wm->sr.cursor = _FW_WM(tmp, CURSOR_SR); - - if (IS_CHERRYVIEW(dev_priv)) { - tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7_CHV); - wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED); - wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC); - - tmp = intel_uncore_read(&dev_priv->uncore, DSPFW8_CHV); - wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF); - wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE); - - tmp = intel_uncore_read(&dev_priv->uncore, DSPFW9_CHV); - wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC); - wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC); - - tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM); - wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9; - wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8; - wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8; - wm->pipe[PIPE_C].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEC_HI) << 8; - wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8; - wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8; - wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8; - wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8; - wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8; - wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8; - } else { - tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7); - wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED); - wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC); - - tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM); - wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9; - wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8; - wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8; - wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8; - wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8; - wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8; - wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8; - } -} - -#undef _FW_WM -#undef _FW_WM_VLV - -void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv) -{ - struct g4x_wm_values *wm = &dev_priv->display.wm.g4x; - struct intel_crtc *crtc; - - g4x_read_wm_values(dev_priv, wm); - - wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN; - - for_each_intel_crtc(&dev_priv->drm, crtc) { - struct intel_crtc_state *crtc_state = - to_intel_crtc_state(crtc->base.state); - struct g4x_wm_state *active = &crtc->wm.active.g4x; - struct g4x_pipe_wm *raw; - enum pipe pipe = crtc->pipe; - enum plane_id plane_id; - int level, max_level; - - active->cxsr = wm->cxsr; - active->hpll_en = wm->hpll_en; - active->fbc_en = wm->fbc_en; - - active->sr = wm->sr; - active->hpll = wm->hpll; - - for_each_plane_id_on_crtc(crtc, plane_id) { - active->wm.plane[plane_id] = - wm->pipe[pipe].plane[plane_id]; - } - - if (wm->cxsr && wm->hpll_en) - max_level = G4X_WM_LEVEL_HPLL; - else if (wm->cxsr) - max_level = G4X_WM_LEVEL_SR; - else - max_level = G4X_WM_LEVEL_NORMAL; - - level = G4X_WM_LEVEL_NORMAL; - raw = &crtc_state->wm.g4x.raw[level]; - for_each_plane_id_on_crtc(crtc, plane_id) - raw->plane[plane_id] = active->wm.plane[plane_id]; - - level = G4X_WM_LEVEL_SR; - if (level > max_level) - goto out; - - raw = &crtc_state->wm.g4x.raw[level]; - raw->plane[PLANE_PRIMARY] = active->sr.plane; - raw->plane[PLANE_CURSOR] = active->sr.cursor; - raw->plane[PLANE_SPRITE0] = 0; - raw->fbc = active->sr.fbc; - - level = G4X_WM_LEVEL_HPLL; - if (level > max_level) - goto out; - - raw = &crtc_state->wm.g4x.raw[level]; - raw->plane[PLANE_PRIMARY] = active->hpll.plane; - raw->plane[PLANE_CURSOR] = active->hpll.cursor; - raw->plane[PLANE_SPRITE0] = 0; - raw->fbc = active->hpll.fbc; - - level++; - out: - for_each_plane_id_on_crtc(crtc, plane_id) - g4x_raw_plane_wm_set(crtc_state, level, - plane_id, USHRT_MAX); - g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX); - - g4x_invalidate_wms(crtc, active, level); - - crtc_state->wm.g4x.optimal = *active; - crtc_state->wm.g4x.intermediate = *active; - - drm_dbg_kms(&dev_priv->drm, - "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n", - pipe_name(pipe), - wm->pipe[pipe].plane[PLANE_PRIMARY], - wm->pipe[pipe].plane[PLANE_CURSOR], - wm->pipe[pipe].plane[PLANE_SPRITE0]); - } - - drm_dbg_kms(&dev_priv->drm, - "Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n", - wm->sr.plane, wm->sr.cursor, wm->sr.fbc); - drm_dbg_kms(&dev_priv->drm, - "Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n", - wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc); - drm_dbg_kms(&dev_priv->drm, "Initial SR=%s HPLL=%s FBC=%s\n", - str_yes_no(wm->cxsr), str_yes_no(wm->hpll_en), - str_yes_no(wm->fbc_en)); -} - -void g4x_wm_sanitize(struct drm_i915_private *dev_priv) -{ - struct intel_plane *plane; - struct intel_crtc *crtc; - - mutex_lock(&dev_priv->display.wm.wm_mutex); - - for_each_intel_plane(&dev_priv->drm, plane) { - struct intel_crtc *crtc = - intel_crtc_for_pipe(dev_priv, plane->pipe); - struct intel_crtc_state *crtc_state = - to_intel_crtc_state(crtc->base.state); - struct intel_plane_state *plane_state = - to_intel_plane_state(plane->base.state); - enum plane_id plane_id = plane->id; - int level, num_levels = intel_wm_num_levels(dev_priv); - - if (plane_state->uapi.visible) - continue; - - for (level = 0; level < num_levels; level++) { - struct g4x_pipe_wm *raw = - &crtc_state->wm.g4x.raw[level]; - - raw->plane[plane_id] = 0; - - if (plane_id == PLANE_PRIMARY) - raw->fbc = 0; - } - } - - for_each_intel_crtc(&dev_priv->drm, crtc) { - struct intel_crtc_state *crtc_state = - to_intel_crtc_state(crtc->base.state); - int ret; - - ret = _g4x_compute_pipe_wm(crtc_state); - drm_WARN_ON(&dev_priv->drm, ret); - - crtc_state->wm.g4x.intermediate = - crtc_state->wm.g4x.optimal; - crtc->wm.active.g4x = crtc_state->wm.g4x.optimal; - } - - g4x_program_watermarks(dev_priv); - - mutex_unlock(&dev_priv->display.wm.wm_mutex); -} - -void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv) -{ - struct vlv_wm_values *wm = &dev_priv->display.wm.vlv; - struct intel_crtc *crtc; - u32 val; - - vlv_read_wm_values(dev_priv, wm); - - wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; - wm->level = VLV_WM_LEVEL_PM2; - - if (IS_CHERRYVIEW(dev_priv)) { - vlv_punit_get(dev_priv); - - val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); - if (val & DSP_MAXFIFO_PM5_ENABLE) - wm->level = VLV_WM_LEVEL_PM5; - - /* - * If DDR DVFS is disabled in the BIOS, Punit - * will never ack the request. So if that happens - * assume we don't have to enable/disable DDR DVFS - * dynamically. To test that just set the REQ_ACK - * bit to poke the Punit, but don't change the - * HIGH/LOW bits so that we don't actually change - * the current state. - */ - val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); - val |= FORCE_DDR_FREQ_REQ_ACK; - vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val); - - if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) & - FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) { - drm_dbg_kms(&dev_priv->drm, - "Punit not acking DDR DVFS request, " - "assuming DDR DVFS is disabled\n"); - dev_priv->display.wm.max_level = VLV_WM_LEVEL_PM5; - } else { - val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); - if ((val & FORCE_DDR_HIGH_FREQ) == 0) - wm->level = VLV_WM_LEVEL_DDR_DVFS; - } - - vlv_punit_put(dev_priv); - } - - for_each_intel_crtc(&dev_priv->drm, crtc) { - struct intel_crtc_state *crtc_state = - to_intel_crtc_state(crtc->base.state); - struct vlv_wm_state *active = &crtc->wm.active.vlv; - const struct vlv_fifo_state *fifo_state = - &crtc_state->wm.vlv.fifo_state; - enum pipe pipe = crtc->pipe; - enum plane_id plane_id; - int level; - - vlv_get_fifo_size(crtc_state); - - active->num_levels = wm->level + 1; - active->cxsr = wm->cxsr; - - for (level = 0; level < active->num_levels; level++) { - struct g4x_pipe_wm *raw = - &crtc_state->wm.vlv.raw[level]; - - active->sr[level].plane = wm->sr.plane; - active->sr[level].cursor = wm->sr.cursor; - - for_each_plane_id_on_crtc(crtc, plane_id) { - active->wm[level].plane[plane_id] = - wm->pipe[pipe].plane[plane_id]; - - raw->plane[plane_id] = - vlv_invert_wm_value(active->wm[level].plane[plane_id], - fifo_state->plane[plane_id]); - } - } - - for_each_plane_id_on_crtc(crtc, plane_id) - vlv_raw_plane_wm_set(crtc_state, level, - plane_id, USHRT_MAX); - vlv_invalidate_wms(crtc, active, level); - - crtc_state->wm.vlv.optimal = *active; - crtc_state->wm.vlv.intermediate = *active; - - drm_dbg_kms(&dev_priv->drm, - "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n", - pipe_name(pipe), - wm->pipe[pipe].plane[PLANE_PRIMARY], - wm->pipe[pipe].plane[PLANE_CURSOR], - wm->pipe[pipe].plane[PLANE_SPRITE0], - wm->pipe[pipe].plane[PLANE_SPRITE1]); - } - - drm_dbg_kms(&dev_priv->drm, - "Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n", - wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr); -} - -void vlv_wm_sanitize(struct drm_i915_private *dev_priv) -{ - struct intel_plane *plane; - struct intel_crtc *crtc; - - mutex_lock(&dev_priv->display.wm.wm_mutex); - - for_each_intel_plane(&dev_priv->drm, plane) { - struct intel_crtc *crtc = - intel_crtc_for_pipe(dev_priv, plane->pipe); - struct intel_crtc_state *crtc_state = - to_intel_crtc_state(crtc->base.state); - struct intel_plane_state *plane_state = - to_intel_plane_state(plane->base.state); - enum plane_id plane_id = plane->id; - int level, num_levels = intel_wm_num_levels(dev_priv); - - if (plane_state->uapi.visible) - continue; - - for (level = 0; level < num_levels; level++) { - struct g4x_pipe_wm *raw = - &crtc_state->wm.vlv.raw[level]; - - raw->plane[plane_id] = 0; - } - } - - for_each_intel_crtc(&dev_priv->drm, crtc) { - struct intel_crtc_state *crtc_state = - to_intel_crtc_state(crtc->base.state); - int ret; - - ret = _vlv_compute_pipe_wm(crtc_state); - drm_WARN_ON(&dev_priv->drm, ret); - - crtc_state->wm.vlv.intermediate = - crtc_state->wm.vlv.optimal; - crtc->wm.active.vlv = crtc_state->wm.vlv.optimal; - } - - vlv_program_watermarks(dev_priv); - - mutex_unlock(&dev_priv->display.wm.wm_mutex); -} - -/* - * FIXME should probably kill this and improve - * the real watermark readout/sanitation instead - */ -static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv) -{ - intel_uncore_rmw(&dev_priv->uncore, WM3_LP_ILK, WM_LP_ENABLE, 0); - intel_uncore_rmw(&dev_priv->uncore, WM2_LP_ILK, WM_LP_ENABLE, 0); - intel_uncore_rmw(&dev_priv->uncore, WM1_LP_ILK, WM_LP_ENABLE, 0); - - /* - * Don't touch WM_LP_SPRITE_ENABLE here. - * Doing so could cause underruns. - */ -} - -void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv) -{ - struct ilk_wm_values *hw = &dev_priv->display.wm.hw; - struct intel_crtc *crtc; - - ilk_init_lp_watermarks(dev_priv); - - for_each_intel_crtc(&dev_priv->drm, crtc) - ilk_pipe_wm_get_hw_state(crtc); - - hw->wm_lp[0] = intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK); - hw->wm_lp[1] = intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK); - hw->wm_lp[2] = intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK); - - hw->wm_lp_spr[0] = intel_uncore_read(&dev_priv->uncore, WM1S_LP_ILK); - if (DISPLAY_VER(dev_priv) >= 7) { - hw->wm_lp_spr[1] = intel_uncore_read(&dev_priv->uncore, WM2S_LP_IVB); - hw->wm_lp_spr[2] = intel_uncore_read(&dev_priv->uncore, WM3S_LP_IVB); - } - - if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) - hw->partitioning = (intel_uncore_read(&dev_priv->uncore, WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ? - INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; - else if (IS_IVYBRIDGE(dev_priv)) - hw->partitioning = (intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ? - INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; - - hw->enable_fbc_wm = - !(intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) & DISP_FBC_WM_DIS); -} - static void ibx_init_clock_gating(struct drm_i915_private *dev_priv) { /* @@ -4282,16 +320,6 @@ static void lpt_init_clock_gating(struct drm_i915_private *dev_priv) 0, TRANS_CHICKEN1_DP0UNIT_GC_DISABLE); } -static void lpt_suspend_hw(struct drm_i915_private *dev_priv) -{ - if (HAS_PCH_LPT_LP(dev_priv)) { - u32 val = intel_uncore_read(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D); - - val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; - intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, val); - } -} - static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv, int general_prio_credits, int high_prio_credits) @@ -4336,10 +364,6 @@ static void gen12lp_init_clock_gating(struct drm_i915_private *dev_priv) intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A), DPFC_CHICKEN_COMP_DUMMY_PIXEL); - /* Wa_1409825376:tgl (pre-prod)*/ - if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) - intel_uncore_rmw(&dev_priv->uncore, GEN9_CLKGATE_DIS_3, 0, TGL_VRH_GATING_DIS); - /* Wa_14013723622:tgl,rkl,dg1,adl-s */ if (DISPLAY_VER(dev_priv) == 12) intel_uncore_rmw(&dev_priv->uncore, CLKREQ_POLICY, @@ -4357,15 +381,6 @@ static void adlp_init_clock_gating(struct drm_i915_private *dev_priv) intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, DDI_CLOCK_REG_ACCESS, 0); } -static void dg1_init_clock_gating(struct drm_i915_private *dev_priv) -{ - gen12lp_init_clock_gating(dev_priv); - - /* Wa_1409836686:dg1[a0] */ - if (IS_DG1_GRAPHICS_STEP(dev_priv, STEP_A0, STEP_B0)) - intel_uncore_rmw(&dev_priv->uncore, GEN9_CLKGATE_DIS_3, 0, DPT_GATING_DIS); -} - static void xehpsdv_init_clock_gating(struct drm_i915_private *dev_priv) { /* Wa_22010146351:xehpsdv */ @@ -4764,12 +779,6 @@ void intel_init_clock_gating(struct drm_i915_private *dev_priv) dev_priv->clock_gating_funcs->init_clock_gating(dev_priv); } -void intel_suspend_hw(struct drm_i915_private *dev_priv) -{ - if (HAS_PCH_LPT(dev_priv)) - lpt_suspend_hw(dev_priv); -} - static void nop_init_clock_gating(struct drm_i915_private *dev_priv) { drm_dbg_kms(&dev_priv->drm, @@ -4785,7 +794,6 @@ CG_FUNCS(pvc); CG_FUNCS(dg2); CG_FUNCS(xehpsdv); CG_FUNCS(adlp); -CG_FUNCS(dg1); CG_FUNCS(gen12lp); CG_FUNCS(icl); CG_FUNCS(cfl); @@ -4820,7 +828,9 @@ CG_FUNCS(nop); */ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv) { - if (IS_PONTEVECCHIO(dev_priv)) + if (IS_METEORLAKE(dev_priv)) + dev_priv->clock_gating_funcs = &nop_clock_gating_funcs; + else if (IS_PONTEVECCHIO(dev_priv)) dev_priv->clock_gating_funcs = &pvc_clock_gating_funcs; else if (IS_DG2(dev_priv)) dev_priv->clock_gating_funcs = &dg2_clock_gating_funcs; @@ -4828,8 +838,6 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv) dev_priv->clock_gating_funcs = &xehpsdv_clock_gating_funcs; else if (IS_ALDERLAKE_P(dev_priv)) dev_priv->clock_gating_funcs = &adlp_clock_gating_funcs; - else if (IS_DG1(dev_priv)) - dev_priv->clock_gating_funcs = &dg1_clock_gating_funcs; else if (GRAPHICS_VER(dev_priv) == 12) dev_priv->clock_gating_funcs = &gen12lp_clock_gating_funcs; else if (GRAPHICS_VER(dev_priv) == 11) @@ -4875,117 +883,3 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv) dev_priv->clock_gating_funcs = &nop_clock_gating_funcs; } } - -static const struct intel_wm_funcs ilk_wm_funcs = { - .compute_pipe_wm = ilk_compute_pipe_wm, - .compute_intermediate_wm = ilk_compute_intermediate_wm, - .initial_watermarks = ilk_initial_watermarks, - .optimize_watermarks = ilk_optimize_watermarks, -}; - -static const struct intel_wm_funcs vlv_wm_funcs = { - .compute_pipe_wm = vlv_compute_pipe_wm, - .compute_intermediate_wm = vlv_compute_intermediate_wm, - .initial_watermarks = vlv_initial_watermarks, - .optimize_watermarks = vlv_optimize_watermarks, - .atomic_update_watermarks = vlv_atomic_update_fifo, -}; - -static const struct intel_wm_funcs g4x_wm_funcs = { - .compute_pipe_wm = g4x_compute_pipe_wm, - .compute_intermediate_wm = g4x_compute_intermediate_wm, - .initial_watermarks = g4x_initial_watermarks, - .optimize_watermarks = g4x_optimize_watermarks, -}; - -static const struct intel_wm_funcs pnv_wm_funcs = { - .update_wm = pnv_update_wm, -}; - -static const struct intel_wm_funcs i965_wm_funcs = { - .update_wm = i965_update_wm, -}; - -static const struct intel_wm_funcs i9xx_wm_funcs = { - .update_wm = i9xx_update_wm, -}; - -static const struct intel_wm_funcs i845_wm_funcs = { - .update_wm = i845_update_wm, -}; - -static const struct intel_wm_funcs nop_funcs = { -}; - -/* Set up chip specific power management-related functions */ -void intel_init_pm(struct drm_i915_private *dev_priv) -{ - if (DISPLAY_VER(dev_priv) >= 9) { - skl_wm_init(dev_priv); - return; - } - - /* For cxsr */ - if (IS_PINEVIEW(dev_priv)) - pnv_get_mem_freq(dev_priv); - else if (GRAPHICS_VER(dev_priv) == 5) - ilk_get_mem_freq(dev_priv); - - /* For FIFO watermark updates */ - if (HAS_PCH_SPLIT(dev_priv)) { - ilk_setup_wm_latency(dev_priv); - - if ((DISPLAY_VER(dev_priv) == 5 && dev_priv->display.wm.pri_latency[1] && - dev_priv->display.wm.spr_latency[1] && dev_priv->display.wm.cur_latency[1]) || - (DISPLAY_VER(dev_priv) != 5 && dev_priv->display.wm.pri_latency[0] && - dev_priv->display.wm.spr_latency[0] && dev_priv->display.wm.cur_latency[0])) { - dev_priv->display.funcs.wm = &ilk_wm_funcs; - } else { - drm_dbg_kms(&dev_priv->drm, - "Failed to read display plane latency. " - "Disable CxSR\n"); - dev_priv->display.funcs.wm = &nop_funcs; - } - } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - vlv_setup_wm_latency(dev_priv); - dev_priv->display.funcs.wm = &vlv_wm_funcs; - } else if (IS_G4X(dev_priv)) { - g4x_setup_wm_latency(dev_priv); - dev_priv->display.funcs.wm = &g4x_wm_funcs; - } else if (IS_PINEVIEW(dev_priv)) { - if (!intel_get_cxsr_latency(!IS_MOBILE(dev_priv), - dev_priv->is_ddr3, - dev_priv->fsb_freq, - dev_priv->mem_freq)) { - drm_info(&dev_priv->drm, - "failed to find known CxSR latency " - "(found ddr%s fsb freq %d, mem freq %d), " - "disabling CxSR\n", - (dev_priv->is_ddr3 == 1) ? "3" : "2", - dev_priv->fsb_freq, dev_priv->mem_freq); - /* Disable CxSR and never update its watermark again */ - intel_set_memory_cxsr(dev_priv, false); - dev_priv->display.funcs.wm = &nop_funcs; - } else - dev_priv->display.funcs.wm = &pnv_wm_funcs; - } else if (DISPLAY_VER(dev_priv) == 4) { - dev_priv->display.funcs.wm = &i965_wm_funcs; - } else if (DISPLAY_VER(dev_priv) == 3) { - dev_priv->display.funcs.wm = &i9xx_wm_funcs; - } else if (DISPLAY_VER(dev_priv) == 2) { - if (INTEL_NUM_PIPES(dev_priv) == 1) - dev_priv->display.funcs.wm = &i845_wm_funcs; - else - dev_priv->display.funcs.wm = &i9xx_wm_funcs; - } else { - drm_err(&dev_priv->drm, - "unexpected fall-through in %s\n", __func__); - dev_priv->display.funcs.wm = &nop_funcs; - } -} - -void intel_pm_setup(struct drm_i915_private *dev_priv) -{ - dev_priv->runtime_pm.suspended = false; - atomic_set(&dev_priv->runtime_pm.wakeref_count, 0); -} diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h index c09b872d65c8..f774bddcdca6 100644 --- a/drivers/gpu/drm/i915/intel_pm.h +++ b/drivers/gpu/drm/i915/intel_pm.h @@ -13,22 +13,6 @@ struct intel_crtc_state; struct intel_plane_state; void intel_init_clock_gating(struct drm_i915_private *dev_priv); -void intel_suspend_hw(struct drm_i915_private *dev_priv); -int ilk_wm_max_level(const struct drm_i915_private *dev_priv); -void intel_init_pm(struct drm_i915_private *dev_priv); void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv); -void intel_pm_setup(struct drm_i915_private *dev_priv); -void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv); -void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv); -void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv); -void g4x_wm_sanitize(struct drm_i915_private *dev_priv); -void vlv_wm_sanitize(struct drm_i915_private *dev_priv); -bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv); -bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state); -void intel_print_wm_latency(struct drm_i915_private *dev_priv, - const char *name, const u16 wm[]); - -bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable); #endif /* __INTEL_PM_H__ */ diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 129746713d07..cf5122299b6b 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -652,6 +652,8 @@ void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm) rpm->kdev = kdev; rpm->available = HAS_RUNTIME_PM(i915); + rpm->suspended = false; + atomic_set(&rpm->wakeref_count, 0); init_intel_runtime_pm_wakeref(rpm); INIT_LIST_HEAD(&rpm->lmem_userfault_list); diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 8dee9e62a73e..f4b3b2063018 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -32,7 +32,6 @@ #include "i915_reg.h" #include "i915_trace.h" #include "i915_vgpu.h" -#include "intel_pm.h" #define FORCEWAKE_ACK_TIMEOUT_MS 50 #define GT_FIFO_TIMEOUT_MS 10 @@ -2460,7 +2459,7 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb, static void uncore_unmap_mmio(struct drm_device *drm, void *regs) { - iounmap(regs); + iounmap((void __iomem *)regs); } int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr) @@ -2491,7 +2490,8 @@ int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr) return -EIO; } - return drmm_add_action_or_reset(&i915->drm, uncore_unmap_mmio, uncore->regs); + return drmm_add_action_or_reset(&i915->drm, uncore_unmap_mmio, + (void __force *)uncore->regs); } void intel_uncore_init_early(struct intel_uncore *uncore, diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.c b/drivers/gpu/drm/i915/pxp/intel_pxp.c index cfc9af8b3d21..9d4c7724e98e 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp.c +++ b/drivers/gpu/drm/i915/pxp/intel_pxp.c @@ -270,6 +270,60 @@ static bool pxp_component_bound(struct intel_pxp *pxp) return bound; } +static int __pxp_global_teardown_final(struct intel_pxp *pxp) +{ + if (!pxp->arb_is_valid) + return 0; + /* + * To ensure synchronous and coherent session teardown completion + * in response to suspend or shutdown triggers, don't use a worker. + */ + intel_pxp_mark_termination_in_progress(pxp); + intel_pxp_terminate(pxp, false); + + if (!wait_for_completion_timeout(&pxp->termination, msecs_to_jiffies(250))) + return -ETIMEDOUT; + + return 0; +} + +static int __pxp_global_teardown_restart(struct intel_pxp *pxp) +{ + if (pxp->arb_is_valid) + return 0; + /* + * The arb-session is currently inactive and we are doing a reset and restart + * due to a runtime event. Use the worker that was designed for this. + */ + pxp_queue_termination(pxp); + + if (!wait_for_completion_timeout(&pxp->termination, msecs_to_jiffies(250))) + return -ETIMEDOUT; + + return 0; +} + +void intel_pxp_end(struct intel_pxp *pxp) +{ + struct drm_i915_private *i915 = pxp->ctrl_gt->i915; + intel_wakeref_t wakeref; + + if (!intel_pxp_is_enabled(pxp)) + return; + + wakeref = intel_runtime_pm_get(&i915->runtime_pm); + + mutex_lock(&pxp->arb_mutex); + + if (__pxp_global_teardown_final(pxp)) + drm_dbg(&i915->drm, "PXP end timed out\n"); + + mutex_unlock(&pxp->arb_mutex); + + intel_pxp_fini_hw(pxp); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); +} + /* * the arb session is restarted from the irq work when we receive the * termination completion interrupt @@ -286,16 +340,9 @@ int intel_pxp_start(struct intel_pxp *pxp) mutex_lock(&pxp->arb_mutex); - if (pxp->arb_is_valid) - goto unlock; - - pxp_queue_termination(pxp); - - if (!wait_for_completion_timeout(&pxp->termination, - msecs_to_jiffies(250))) { - ret = -ETIMEDOUT; + ret = __pxp_global_teardown_restart(pxp); + if (ret) goto unlock; - } /* make sure the compiler doesn't optimize the double access */ barrier(); diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.h b/drivers/gpu/drm/i915/pxp/intel_pxp.h index 04440fada711..3ded0890cd27 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp.h +++ b/drivers/gpu/drm/i915/pxp/intel_pxp.h @@ -24,8 +24,10 @@ void intel_pxp_init_hw(struct intel_pxp *pxp); void intel_pxp_fini_hw(struct intel_pxp *pxp); void intel_pxp_mark_termination_in_progress(struct intel_pxp *pxp); +void intel_pxp_tee_end_arb_fw_session(struct intel_pxp *pxp, u32 arb_session_id); int intel_pxp_start(struct intel_pxp *pxp); +void intel_pxp_end(struct intel_pxp *pxp); int intel_pxp_key_check(struct intel_pxp *pxp, struct drm_i915_gem_object *obj, diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_42.h b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_42.h index 739f9072fa5f..26f7d9f01bf3 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_42.h +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_42.h @@ -12,6 +12,9 @@ /* PXP-Opcode for Init Session */ #define PXP42_CMDID_INIT_SESSION 0x1e +/* PXP-Opcode for Invalidate Stream Key */ +#define PXP42_CMDID_INVALIDATE_STREAM_KEY 0x00000007 + /* PXP-Input-Packet: Init Session (Arb-Session) */ struct pxp42_create_arb_in { struct pxp_cmd_header header; @@ -25,4 +28,16 @@ struct pxp42_create_arb_out { struct pxp_cmd_header header; } __packed; +/* PXP-Input-Packet: Invalidate Stream Key */ +struct pxp42_inv_stream_key_in { + struct pxp_cmd_header header; + u32 rsvd[3]; +} __packed; + +/* PXP-Output-Packet: Invalidate Stream Key */ +struct pxp42_inv_stream_key_out { + struct pxp_cmd_header header; + u32 rsvd; +} __packed; + #endif /* __INTEL_PXP_FW_INTERFACE_42_H__ */ diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_cmn.h b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_cmn.h index aaa8187a0afb..ae9b151b7cb7 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_cmn.h +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_cmn.h @@ -28,6 +28,9 @@ struct pxp_cmd_header { union { u32 status; /* out */ u32 stream_id; /* in */ +#define PXP_CMDHDR_EXTDATA_SESSION_VALID GENMASK(0, 0) +#define PXP_CMDHDR_EXTDATA_APP_TYPE GENMASK(1, 1) +#define PXP_CMDHDR_EXTDATA_SESSION_ID GENMASK(17, 2) }; /* Length of the message (excluding the header) */ u32 buffer_len; diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c index 892d39cc61c1..4f836b317424 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c @@ -16,7 +16,7 @@ void intel_pxp_suspend_prepare(struct intel_pxp *pxp) if (!intel_pxp_is_enabled(pxp)) return; - pxp->arb_is_valid = false; + intel_pxp_end(pxp); intel_pxp_invalidate(pxp); } @@ -34,7 +34,7 @@ void intel_pxp_suspend(struct intel_pxp *pxp) } } -void intel_pxp_resume(struct intel_pxp *pxp) +void intel_pxp_resume_complete(struct intel_pxp *pxp) { if (!intel_pxp_is_enabled(pxp)) return; diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h index 586be769104f..06b46f535b42 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h @@ -11,7 +11,7 @@ struct intel_pxp; #ifdef CONFIG_DRM_I915_PXP void intel_pxp_suspend_prepare(struct intel_pxp *pxp); void intel_pxp_suspend(struct intel_pxp *pxp); -void intel_pxp_resume(struct intel_pxp *pxp); +void intel_pxp_resume_complete(struct intel_pxp *pxp); void intel_pxp_runtime_suspend(struct intel_pxp *pxp); #else static inline void intel_pxp_suspend_prepare(struct intel_pxp *pxp) @@ -22,7 +22,7 @@ static inline void intel_pxp_suspend(struct intel_pxp *pxp) { } -static inline void intel_pxp_resume(struct intel_pxp *pxp) +static inline void intel_pxp_resume_complete(struct intel_pxp *pxp) { } @@ -32,6 +32,6 @@ static inline void intel_pxp_runtime_suspend(struct intel_pxp *pxp) #endif static inline void intel_pxp_runtime_resume(struct intel_pxp *pxp) { - intel_pxp_resume(pxp); + intel_pxp_resume_complete(pxp); } #endif /* __INTEL_PXP_PM_H__ */ diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c index ae413580b81a..448cacb0465d 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c @@ -110,14 +110,16 @@ static int pxp_terminate_arb_session_and_global(struct intel_pxp *pxp) intel_uncore_write(gt->uncore, PXP_GLOBAL_TERMINATE, 1); + intel_pxp_tee_end_arb_fw_session(pxp, ARB_SESSION); + return ret; } -static void pxp_terminate(struct intel_pxp *pxp) +void intel_pxp_terminate(struct intel_pxp *pxp, bool post_invalidation_needs_restart) { int ret; - pxp->hw_state_invalidated = true; + pxp->hw_state_invalidated = post_invalidation_needs_restart; /* * if we fail to submit the termination there is no point in waiting for @@ -165,7 +167,7 @@ static void pxp_session_work(struct work_struct *work) if (events & PXP_TERMINATION_REQUEST) { events &= ~PXP_TERMINATION_COMPLETE; - pxp_terminate(pxp); + intel_pxp_terminate(pxp, true); } if (events & PXP_TERMINATION_COMPLETE) diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_session.h b/drivers/gpu/drm/i915/pxp/intel_pxp_session.h index 903ac52cffa1..ba5788127109 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_session.h +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_session.h @@ -12,9 +12,14 @@ struct intel_pxp; #ifdef CONFIG_DRM_I915_PXP void intel_pxp_session_management_init(struct intel_pxp *pxp); +void intel_pxp_terminate(struct intel_pxp *pxp, bool post_invalidation_needs_restart); #else static inline void intel_pxp_session_management_init(struct intel_pxp *pxp) { } + +static inline void intel_pxp_terminate(struct intel_pxp *pxp, bool post_invalidation_needs_restart) +{ +} #endif #endif /* __INTEL_PXP_SESSION_H__ */ diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c index 73aa8015f828..d9d248b48093 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c @@ -127,6 +127,12 @@ static int i915_pxp_tee_component_bind(struct device *i915_kdev, intel_wakeref_t wakeref; int ret = 0; + if (!HAS_HECI_PXP(i915)) { + pxp->dev_link = device_link_add(i915_kdev, tee_kdev, DL_FLAG_STATELESS); + if (drm_WARN_ON(&i915->drm, !pxp->dev_link)) + return -ENODEV; + } + mutex_lock(&pxp->tee_mutex); pxp->pxp_component = data; pxp->pxp_component->tee_dev = tee_kdev; @@ -169,6 +175,11 @@ static void i915_pxp_tee_component_unbind(struct device *i915_kdev, mutex_lock(&pxp->tee_mutex); pxp->pxp_component = NULL; mutex_unlock(&pxp->tee_mutex); + + if (pxp->dev_link) { + device_link_del(pxp->dev_link); + pxp->dev_link = NULL; + } } static const struct component_ops i915_pxp_tee_component_ops = { @@ -308,3 +319,38 @@ int intel_pxp_tee_cmd_create_arb_session(struct intel_pxp *pxp, return ret; } + +void intel_pxp_tee_end_arb_fw_session(struct intel_pxp *pxp, u32 session_id) +{ + struct drm_i915_private *i915 = pxp->ctrl_gt->i915; + struct pxp42_inv_stream_key_in msg_in = {0}; + struct pxp42_inv_stream_key_out msg_out = {0}; + int ret, trials = 0; + +try_again: + memset(&msg_in, 0, sizeof(msg_in)); + memset(&msg_out, 0, sizeof(msg_out)); + msg_in.header.api_version = PXP_APIVER(4, 2); + msg_in.header.command_id = PXP42_CMDID_INVALIDATE_STREAM_KEY; + msg_in.header.buffer_len = sizeof(msg_in) - sizeof(msg_in.header); + + msg_in.header.stream_id = FIELD_PREP(PXP_CMDHDR_EXTDATA_SESSION_VALID, 1); + msg_in.header.stream_id |= FIELD_PREP(PXP_CMDHDR_EXTDATA_APP_TYPE, 0); + msg_in.header.stream_id |= FIELD_PREP(PXP_CMDHDR_EXTDATA_SESSION_ID, session_id); + + ret = intel_pxp_tee_io_message(pxp, + &msg_in, sizeof(msg_in), + &msg_out, sizeof(msg_out), + NULL); + + /* Cleanup coherency between GT and Firmware is critical, so try again if it fails */ + if ((ret || msg_out.header.status != 0x0) && ++trials < 3) + goto try_again; + + if (ret) + drm_err(&i915->drm, "Failed to send tee msg for inv-stream-key-%d, ret=[%d]\n", + session_id, ret); + else if (msg_out.header.status != 0x0) + drm_warn(&i915->drm, "PXP firmware failed inv-stream-key-%d with status 0x%08x\n", + session_id, msg_out.header.status); +} diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_types.h b/drivers/gpu/drm/i915/pxp/intel_pxp_types.h index 7dc5f08d1583..007de49e1ea4 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_types.h +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_types.h @@ -32,6 +32,9 @@ struct intel_pxp { * which are protected by &tee_mutex. */ struct i915_pxp_component *pxp_component; + + /* @dev_link: Enforce module relationship for power management ordering. */ + struct device_link *dev_link; /** * @pxp_component_added: track if the pxp component has been added. * Set and cleared in tee init and fini functions respectively. diff --git a/drivers/gpu/drm/i915/soc/intel_dram.c b/drivers/gpu/drm/i915/soc/intel_dram.c index bba8cb6e8ae4..9f0651d48d41 100644 --- a/drivers/gpu/drm/i915/soc/intel_dram.c +++ b/drivers/gpu/drm/i915/soc/intel_dram.c @@ -10,6 +10,7 @@ #include "intel_dram.h" #include "intel_mchbar_regs.h" #include "intel_pcode.h" +#include "vlv_sideband.h" struct dram_dimm_info { u16 size; @@ -42,6 +43,155 @@ static const char *intel_dram_type_str(enum intel_dram_type type) #undef DRAM_TYPE_STR +static void pnv_detect_mem_freq(struct drm_i915_private *dev_priv) +{ + u32 tmp; + + tmp = intel_uncore_read(&dev_priv->uncore, CLKCFG); + + switch (tmp & CLKCFG_FSB_MASK) { + case CLKCFG_FSB_533: + dev_priv->fsb_freq = 533; /* 133*4 */ + break; + case CLKCFG_FSB_800: + dev_priv->fsb_freq = 800; /* 200*4 */ + break; + case CLKCFG_FSB_667: + dev_priv->fsb_freq = 667; /* 167*4 */ + break; + case CLKCFG_FSB_400: + dev_priv->fsb_freq = 400; /* 100*4 */ + break; + } + + switch (tmp & CLKCFG_MEM_MASK) { + case CLKCFG_MEM_533: + dev_priv->mem_freq = 533; + break; + case CLKCFG_MEM_667: + dev_priv->mem_freq = 667; + break; + case CLKCFG_MEM_800: + dev_priv->mem_freq = 800; + break; + } + + /* detect pineview DDR3 setting */ + tmp = intel_uncore_read(&dev_priv->uncore, CSHRDDR3CTL); + dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0; +} + +static void ilk_detect_mem_freq(struct drm_i915_private *dev_priv) +{ + u16 ddrpll, csipll; + + ddrpll = intel_uncore_read16(&dev_priv->uncore, DDRMPLL1); + switch (ddrpll & 0xff) { + case 0xc: + dev_priv->mem_freq = 800; + break; + case 0x10: + dev_priv->mem_freq = 1066; + break; + case 0x14: + dev_priv->mem_freq = 1333; + break; + case 0x18: + dev_priv->mem_freq = 1600; + break; + default: + drm_dbg(&dev_priv->drm, "unknown memory frequency 0x%02x\n", + ddrpll & 0xff); + dev_priv->mem_freq = 0; + break; + } + + csipll = intel_uncore_read16(&dev_priv->uncore, CSIPLL0); + switch (csipll & 0x3ff) { + case 0x00c: + dev_priv->fsb_freq = 3200; + break; + case 0x00e: + dev_priv->fsb_freq = 3733; + break; + case 0x010: + dev_priv->fsb_freq = 4266; + break; + case 0x012: + dev_priv->fsb_freq = 4800; + break; + case 0x014: + dev_priv->fsb_freq = 5333; + break; + case 0x016: + dev_priv->fsb_freq = 5866; + break; + case 0x018: + dev_priv->fsb_freq = 6400; + break; + default: + drm_dbg(&dev_priv->drm, "unknown fsb frequency 0x%04x\n", + csipll & 0x3ff); + dev_priv->fsb_freq = 0; + break; + } +} + +static void chv_detect_mem_freq(struct drm_i915_private *i915) +{ + u32 val; + + vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_CCK)); + val = vlv_cck_read(i915, CCK_FUSE_REG); + vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_CCK)); + + switch ((val >> 2) & 0x7) { + case 3: + i915->mem_freq = 2000; + break; + default: + i915->mem_freq = 1600; + break; + } +} + +static void vlv_detect_mem_freq(struct drm_i915_private *i915) +{ + u32 val; + + vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_PUNIT)); + val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS); + vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_PUNIT)); + + switch ((val >> 6) & 3) { + case 0: + case 1: + i915->mem_freq = 800; + break; + case 2: + i915->mem_freq = 1066; + break; + case 3: + i915->mem_freq = 1333; + break; + } +} + +static void detect_mem_freq(struct drm_i915_private *i915) +{ + if (IS_PINEVIEW(i915)) + pnv_detect_mem_freq(i915); + else if (GRAPHICS_VER(i915) == 5) + ilk_detect_mem_freq(i915); + else if (IS_CHERRYVIEW(i915)) + chv_detect_mem_freq(i915); + else if (IS_VALLEYVIEW(i915)) + vlv_detect_mem_freq(i915); + + if (i915->mem_freq) + drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq); +} + static int intel_dimm_num_devices(const struct dram_dimm_info *dimm) { return dimm->ranks * 64 / (dimm->width ?: 1); @@ -507,6 +657,8 @@ void intel_dram_detect(struct drm_i915_private *i915) struct dram_info *dram_info = &i915->dram_info; int ret; + detect_mem_freq(i915); + if (GRAPHICS_VER(i915) < 9 || IS_DG2(i915) || !HAS_DISPLAY(i915)) return; diff --git a/drivers/gpu/drm/meson/meson_venc.c b/drivers/gpu/drm/meson/meson_venc.c index 3c55ed003359..fcd532db19c1 100644 --- a/drivers/gpu/drm/meson/meson_venc.c +++ b/drivers/gpu/drm/meson/meson_venc.c @@ -866,10 +866,10 @@ meson_venc_hdmi_supported_mode(const struct drm_display_mode *mode) DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)) return MODE_BAD; - if (mode->hdisplay < 640 || mode->hdisplay > 1920) + if (mode->hdisplay < 400 || mode->hdisplay > 1920) return MODE_BAD_HVALUE; - if (mode->vdisplay < 480 || mode->vdisplay > 1200) + if (mode->vdisplay < 480 || mode->vdisplay > 1920) return MODE_BAD_VVALUE; return MODE_OK; diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h index 9e604dbb8e44..57c7edcab602 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.h +++ b/drivers/gpu/drm/mgag200/mgag200_drv.h @@ -375,12 +375,15 @@ int mgag200_primary_plane_helper_atomic_check(struct drm_plane *plane, struct drm_atomic_state *new_state); void mgag200_primary_plane_helper_atomic_update(struct drm_plane *plane, struct drm_atomic_state *old_state); +void mgag200_primary_plane_helper_atomic_enable(struct drm_plane *plane, + struct drm_atomic_state *state); void mgag200_primary_plane_helper_atomic_disable(struct drm_plane *plane, struct drm_atomic_state *old_state); #define MGAG200_PRIMARY_PLANE_HELPER_FUNCS \ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, \ .atomic_check = mgag200_primary_plane_helper_atomic_check, \ .atomic_update = mgag200_primary_plane_helper_atomic_update, \ + .atomic_enable = mgag200_primary_plane_helper_atomic_enable, \ .atomic_disable = mgag200_primary_plane_helper_atomic_disable #define MGAG200_PRIMARY_PLANE_FUNCS \ diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index 0a5aaf78172a..0f2dd26755df 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -501,10 +501,6 @@ void mgag200_primary_plane_helper_atomic_update(struct drm_plane *plane, struct drm_framebuffer *fb = plane_state->fb; struct drm_atomic_helper_damage_iter iter; struct drm_rect damage; - u8 seq1; - - if (!fb) - return; drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state); drm_atomic_for_each_plane_damage(&iter, &damage) { @@ -514,13 +510,19 @@ void mgag200_primary_plane_helper_atomic_update(struct drm_plane *plane, /* Always scanout image at VRAM offset 0 */ mgag200_set_startadd(mdev, (u32)0); mgag200_set_offset(mdev, fb); +} - if (!old_plane_state->crtc && plane_state->crtc) { // enabling - RREG_SEQ(0x01, seq1); - seq1 &= ~MGAREG_SEQ1_SCROFF; - WREG_SEQ(0x01, seq1); - msleep(20); - } +void mgag200_primary_plane_helper_atomic_enable(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_device *dev = plane->dev; + struct mga_device *mdev = to_mga_device(dev); + u8 seq1; + + RREG_SEQ(0x01, seq1); + seq1 &= ~MGAREG_SEQ1_SCROFF; + WREG_SEQ(0x01, seq1); + msleep(20); } void mgag200_primary_plane_helper_atomic_disable(struct drm_plane *plane, diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index 949b18a29a55..85f5ab1d552c 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -9,6 +9,7 @@ config DRM_MSM depends on QCOM_OCMEM || QCOM_OCMEM=n depends on QCOM_LLCC || QCOM_LLCC=n depends on QCOM_COMMAND_DB || QCOM_COMMAND_DB=n + depends on PM select IOMMU_IO_PGTABLE select QCOM_MDT_LOADER if ARCH_QCOM select REGULATOR @@ -28,6 +29,7 @@ config DRM_MSM select SYNC_FILE select PM_OPP select NVMEM + select PM_GENERIC_DOMAINS help DRM/KMS driver for MSM/snapdragon. diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h index afa6023346c4..f87a1312f580 100644 --- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h +++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h @@ -8,21 +8,21 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a2xx.xml ( 90810 bytes, from 2021-06-21 15:24:24) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 14609 bytes, from 2021-11-24 23:05:10) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 69086 bytes, from 2022-03-03 16:41:33) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84231 bytes, from 2021-11-24 23:05:10) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113358 bytes, from 2022-01-31 23:06:21) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a5xx.xml ( 149512 bytes, from 2022-01-31 23:06:21) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a6xx.xml ( 184954 bytes, from 2022-03-03 16:41:33) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11331 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 6038 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2924 bytes, from 2021-07-22 15:21:56) - -Copyright (C) 2013-2021 by the following authors: +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2023-03-10 18:32:52) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a2xx.xml ( 91929 bytes, from 2023-02-28 23:52:27) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 15434 bytes, from 2023-03-10 18:32:53) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 74995 bytes, from 2023-03-20 18:06:23) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84231 bytes, from 2022-08-02 16:38:43) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113474 bytes, from 2022-08-02 16:38:43) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a5xx.xml ( 149590 bytes, from 2023-02-14 19:37:12) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx.xml ( 198949 bytes, from 2023-03-20 18:06:23) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11404 bytes, from 2023-03-10 18:32:53) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2022-08-02 16:38:43) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 9055 bytes, from 2023-03-10 18:32:52) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2976 bytes, from 2023-03-10 18:32:52) + +Copyright (C) 2013-2023 by the following authors: - Rob Clark <[email protected]> (robclark) - Ilia Mirkin <[email protected]> (imirkin) @@ -1060,6 +1060,12 @@ enum a2xx_mh_perfcnt_select { AXI_TOTAL_READ_REQUEST_DATA_BEATS = 181, }; +enum perf_mode_cnt { + PERF_STATE_RESET = 0, + PERF_STATE_ENABLE = 1, + PERF_STATE_FREEZE = 2, +}; + enum adreno_mmu_clnt_beh { BEH_NEVR = 0, BEH_TRAN_RNG = 1, @@ -1307,6 +1313,18 @@ static inline uint32_t A2XX_MH_MMU_VA_RANGE_VA_BASE(uint32_t val) #define A2XX_RBBM_PM_OVERRIDE1_MH_TCROQ_SCLK_PM_OVERRIDE 0x80000000 #define REG_A2XX_RBBM_PM_OVERRIDE2 0x0000039d +#define A2XX_RBBM_PM_OVERRIDE2_PA_REG_SCLK_PM_OVERRIDE 0x00000001 +#define A2XX_RBBM_PM_OVERRIDE2_PA_PA_SCLK_PM_OVERRIDE 0x00000002 +#define A2XX_RBBM_PM_OVERRIDE2_PA_AG_SCLK_PM_OVERRIDE 0x00000004 +#define A2XX_RBBM_PM_OVERRIDE2_VGT_REG_SCLK_PM_OVERRIDE 0x00000008 +#define A2XX_RBBM_PM_OVERRIDE2_VGT_FIFOS_SCLK_PM_OVERRIDE 0x00000010 +#define A2XX_RBBM_PM_OVERRIDE2_VGT_VGT_SCLK_PM_OVERRIDE 0x00000020 +#define A2XX_RBBM_PM_OVERRIDE2_DEBUG_PERF_SCLK_PM_OVERRIDE 0x00000040 +#define A2XX_RBBM_PM_OVERRIDE2_PERM_SCLK_PM_OVERRIDE 0x00000080 +#define A2XX_RBBM_PM_OVERRIDE2_GC_GA_GMEM0_PM_OVERRIDE 0x00000100 +#define A2XX_RBBM_PM_OVERRIDE2_GC_GA_GMEM1_PM_OVERRIDE 0x00000200 +#define A2XX_RBBM_PM_OVERRIDE2_GC_GA_GMEM2_PM_OVERRIDE 0x00000400 +#define A2XX_RBBM_PM_OVERRIDE2_GC_GA_GMEM3_PM_OVERRIDE 0x00000800 #define REG_A2XX_RBBM_DEBUG_OUT 0x000003a0 @@ -1334,6 +1352,12 @@ static inline uint32_t A2XX_MH_MMU_VA_RANGE_VA_BASE(uint32_t val) #define REG_A2XX_RBBM_PERIPHID2 0x000003fa #define REG_A2XX_CP_PERFMON_CNTL 0x00000444 +#define A2XX_CP_PERFMON_CNTL_PERF_MODE_CNT__MASK 0x00000007 +#define A2XX_CP_PERFMON_CNTL_PERF_MODE_CNT__SHIFT 0 +static inline uint32_t A2XX_CP_PERFMON_CNTL_PERF_MODE_CNT(enum perf_mode_cnt val) +{ + return ((val) << A2XX_CP_PERFMON_CNTL_PERF_MODE_CNT__SHIFT) & A2XX_CP_PERFMON_CNTL_PERF_MODE_CNT__MASK; +} #define REG_A2XX_CP_PERFCOUNTER_SELECT 0x00000445 diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h index 520ae3f375a1..237b564445be 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h +++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h @@ -8,21 +8,21 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a2xx.xml ( 90810 bytes, from 2021-06-21 15:24:24) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 14609 bytes, from 2021-11-24 23:05:10) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 69086 bytes, from 2022-03-03 16:41:33) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84231 bytes, from 2021-11-24 23:05:10) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113358 bytes, from 2022-01-31 23:06:21) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a5xx.xml ( 149512 bytes, from 2022-01-31 23:06:21) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a6xx.xml ( 184954 bytes, from 2022-03-03 16:41:33) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11331 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 6038 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2924 bytes, from 2021-07-22 15:21:56) - -Copyright (C) 2013-2021 by the following authors: +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2023-03-10 18:32:52) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a2xx.xml ( 91929 bytes, from 2023-02-28 23:52:27) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 15434 bytes, from 2023-03-10 18:32:53) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 74995 bytes, from 2023-03-20 18:06:23) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84231 bytes, from 2022-08-02 16:38:43) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113474 bytes, from 2022-08-02 16:38:43) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a5xx.xml ( 149590 bytes, from 2023-02-14 19:37:12) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx.xml ( 198949 bytes, from 2023-03-20 18:06:23) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11404 bytes, from 2023-03-10 18:32:53) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2022-08-02 16:38:43) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 9055 bytes, from 2023-03-10 18:32:52) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2976 bytes, from 2023-03-10 18:32:52) + +Copyright (C) 2013-2022 by the following authors: - Rob Clark <[email protected]> (robclark) - Ilia Mirkin <[email protected]> (imirkin) diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c index 948785ed07bb..c86b377f6f0d 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c @@ -477,6 +477,16 @@ static struct msm_gpu_state *a3xx_gpu_state_get(struct msm_gpu *gpu) return state; } +static u64 a3xx_gpu_busy(struct msm_gpu *gpu, unsigned long *out_sample_rate) +{ + u64 busy_cycles; + + busy_cycles = gpu_read64(gpu, REG_A3XX_RBBM_PERFCTR_RBBM_1_LO); + *out_sample_rate = clk_get_rate(gpu->core_clk); + + return busy_cycles; +} + static u32 a3xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) { ring->memptrs->rptr = gpu_read(gpu, REG_AXXX_CP_RB_RPTR); @@ -498,6 +508,7 @@ static const struct adreno_gpu_funcs funcs = { #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP) .show = adreno_show, #endif + .gpu_busy = a3xx_gpu_busy, .gpu_state_get = a3xx_gpu_state_get, .gpu_state_put = adreno_gpu_state_put, .create_address_space = adreno_create_address_space, diff --git a/drivers/gpu/drm/msm/adreno/a4xx.xml.h b/drivers/gpu/drm/msm/adreno/a4xx.xml.h index 7e5c21015d10..ff5f1e98a5fc 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx.xml.h +++ b/drivers/gpu/drm/msm/adreno/a4xx.xml.h @@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a2xx.xml ( 90810 bytes, from 2021-06-21 15:24:24) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 14609 bytes, from 2021-11-24 23:05:10) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 69086 bytes, from 2022-03-03 16:41:33) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84231 bytes, from 2021-11-24 23:05:10) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113358 bytes, from 2022-01-31 23:06:21) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a5xx.xml ( 149512 bytes, from 2022-01-31 23:06:21) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a6xx.xml ( 184954 bytes, from 2022-03-03 16:41:33) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11331 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 6038 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2924 bytes, from 2021-07-22 15:21:56) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2023-03-10 18:32:52) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a2xx.xml ( 91929 bytes, from 2023-02-28 23:52:27) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 15434 bytes, from 2023-03-10 18:32:53) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 74995 bytes, from 2023-03-20 18:06:23) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84231 bytes, from 2022-08-02 16:38:43) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113474 bytes, from 2022-08-02 16:38:43) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a5xx.xml ( 149590 bytes, from 2023-02-14 19:37:12) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx.xml ( 198949 bytes, from 2023-03-20 18:06:23) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11404 bytes, from 2023-03-10 18:32:53) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2022-08-02 16:38:43) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 9055 bytes, from 2023-03-10 18:32:52) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2976 bytes, from 2023-03-10 18:32:52) Copyright (C) 2013-2022 by the following authors: - Rob Clark <[email protected]> (robclark) @@ -3159,6 +3159,18 @@ static inline uint32_t A4XX_TPL1_TP_TEX_COUNT_GS(uint32_t val) #define REG_A4XX_TPL1_TP_GS_BORDER_COLOR_BASE_ADDR 0x0000238d #define REG_A4XX_TPL1_TP_FS_TEX_COUNT 0x000023a0 +#define A4XX_TPL1_TP_FS_TEX_COUNT_FS__MASK 0x000000ff +#define A4XX_TPL1_TP_FS_TEX_COUNT_FS__SHIFT 0 +static inline uint32_t A4XX_TPL1_TP_FS_TEX_COUNT_FS(uint32_t val) +{ + return ((val) << A4XX_TPL1_TP_FS_TEX_COUNT_FS__SHIFT) & A4XX_TPL1_TP_FS_TEX_COUNT_FS__MASK; +} +#define A4XX_TPL1_TP_FS_TEX_COUNT_CS__MASK 0x0000ff00 +#define A4XX_TPL1_TP_FS_TEX_COUNT_CS__SHIFT 8 +static inline uint32_t A4XX_TPL1_TP_FS_TEX_COUNT_CS(uint32_t val) +{ + return ((val) << A4XX_TPL1_TP_FS_TEX_COUNT_CS__SHIFT) & A4XX_TPL1_TP_FS_TEX_COUNT_CS__MASK; +} #define REG_A4XX_TPL1_TP_FS_BORDER_COLOR_BASE_ADDR 0x000023a1 diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c index 3e09d3a7a0ac..715436cb3996 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c @@ -611,6 +611,16 @@ static int a4xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value) return 0; } +static u64 a4xx_gpu_busy(struct msm_gpu *gpu, unsigned long *out_sample_rate) +{ + u64 busy_cycles; + + busy_cycles = gpu_read64(gpu, REG_A4XX_RBBM_PERFCTR_RBBM_1_LO); + *out_sample_rate = clk_get_rate(gpu->core_clk); + + return busy_cycles; +} + static u32 a4xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) { ring->memptrs->rptr = gpu_read(gpu, REG_A4XX_CP_RB_RPTR); @@ -632,6 +642,7 @@ static const struct adreno_gpu_funcs funcs = { #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP) .show = adreno_show, #endif + .gpu_busy = a4xx_gpu_busy, .gpu_state_get = a4xx_gpu_state_get, .gpu_state_put = adreno_gpu_state_put, .create_address_space = adreno_create_address_space, diff --git a/drivers/gpu/drm/msm/adreno/a5xx.xml.h b/drivers/gpu/drm/msm/adreno/a5xx.xml.h index 2505b4e43ca0..03b7ee592b11 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx.xml.h +++ b/drivers/gpu/drm/msm/adreno/a5xx.xml.h @@ -8,21 +8,21 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a2xx.xml ( 90810 bytes, from 2021-06-21 15:24:24) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 14609 bytes, from 2021-11-24 23:05:10) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 69086 bytes, from 2022-03-03 16:41:33) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84231 bytes, from 2021-11-24 23:05:10) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113358 bytes, from 2022-01-31 23:06:21) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a5xx.xml ( 149512 bytes, from 2022-01-31 23:06:21) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a6xx.xml ( 184954 bytes, from 2022-03-03 16:41:33) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11331 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 6038 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2924 bytes, from 2021-07-22 15:21:56) - -Copyright (C) 2013-2022 by the following authors: +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2023-03-10 18:32:52) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a2xx.xml ( 91929 bytes, from 2023-02-28 23:52:27) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 15434 bytes, from 2023-03-10 18:32:53) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 74995 bytes, from 2023-03-20 18:06:23) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84231 bytes, from 2022-08-02 16:38:43) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113474 bytes, from 2022-08-02 16:38:43) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a5xx.xml ( 149590 bytes, from 2023-02-14 19:37:12) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx.xml ( 198949 bytes, from 2023-03-20 18:06:23) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11404 bytes, from 2023-03-10 18:32:53) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2022-08-02 16:38:43) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 9055 bytes, from 2023-03-10 18:32:52) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2976 bytes, from 2023-03-10 18:32:52) + +Copyright (C) 2013-2023 by the following authors: - Rob Clark <[email protected]> (robclark) - Ilia Mirkin <[email protected]> (imirkin) @@ -4218,6 +4218,7 @@ static inline uint32_t A5XX_SP_CS_CONFIG_SHADEROBJOFFSET(uint32_t val) #define REG_A5XX_SP_FS_CONFIG_MAX_CONST 0x0000e58b #define REG_A5XX_SP_VS_CTRL_REG0 0x0000e590 +#define A5XX_SP_VS_CTRL_REG0_BUFFER 0x00000004 #define A5XX_SP_VS_CTRL_REG0_THREADSIZE__MASK 0x00000008 #define A5XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT 3 static inline uint32_t A5XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val) @@ -4316,6 +4317,7 @@ static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val) #define REG_A5XX_SP_VS_OBJ_START_HI 0x0000e5ad #define REG_A5XX_SP_FS_CTRL_REG0 0x0000e5c0 +#define A5XX_SP_FS_CTRL_REG0_BUFFER 0x00000004 #define A5XX_SP_FS_CTRL_REG0_THREADSIZE__MASK 0x00000008 #define A5XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT 3 static inline uint32_t A5XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val) @@ -4406,6 +4408,7 @@ static inline uint32_t A5XX_SP_FS_MRT_REG_COLOR_FORMAT(enum a5xx_color_fmt val) #define REG_A5XX_UNKNOWN_E5DB 0x0000e5db #define REG_A5XX_SP_CS_CTRL_REG0 0x0000e5f0 +#define A5XX_SP_CS_CTRL_REG0_BUFFER 0x00000004 #define A5XX_SP_CS_CTRL_REG0_THREADSIZE__MASK 0x00000008 #define A5XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT 3 static inline uint32_t A5XX_SP_CS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val) @@ -4440,6 +4443,7 @@ static inline uint32_t A5XX_SP_CS_CTRL_REG0_BRANCHSTACK(uint32_t val) #define REG_A5XX_SP_CS_OBJ_START_HI 0x0000e5f4 #define REG_A5XX_SP_HS_CTRL_REG0 0x0000e600 +#define A5XX_SP_HS_CTRL_REG0_BUFFER 0x00000004 #define A5XX_SP_HS_CTRL_REG0_THREADSIZE__MASK 0x00000008 #define A5XX_SP_HS_CTRL_REG0_THREADSIZE__SHIFT 3 static inline uint32_t A5XX_SP_HS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val) @@ -4474,6 +4478,7 @@ static inline uint32_t A5XX_SP_HS_CTRL_REG0_BRANCHSTACK(uint32_t val) #define REG_A5XX_SP_HS_OBJ_START_HI 0x0000e604 #define REG_A5XX_SP_DS_CTRL_REG0 0x0000e610 +#define A5XX_SP_DS_CTRL_REG0_BUFFER 0x00000004 #define A5XX_SP_DS_CTRL_REG0_THREADSIZE__MASK 0x00000008 #define A5XX_SP_DS_CTRL_REG0_THREADSIZE__SHIFT 3 static inline uint32_t A5XX_SP_DS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val) @@ -4508,6 +4513,7 @@ static inline uint32_t A5XX_SP_DS_CTRL_REG0_BRANCHSTACK(uint32_t val) #define REG_A5XX_SP_DS_OBJ_START_HI 0x0000e62d #define REG_A5XX_SP_GS_CTRL_REG0 0x0000e640 +#define A5XX_SP_GS_CTRL_REG0_BUFFER 0x00000004 #define A5XX_SP_GS_CTRL_REG0_THREADSIZE__MASK 0x00000008 #define A5XX_SP_GS_CTRL_REG0_THREADSIZE__SHIFT 3 static inline uint32_t A5XX_SP_GS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val) @@ -4665,11 +4671,11 @@ static inline uint32_t A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK(uint32_t val) { return ((val) << A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT) & A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK; } -#define A5XX_HLSQ_CONTROL_2_REG_SIZE__MASK 0xff000000 -#define A5XX_HLSQ_CONTROL_2_REG_SIZE__SHIFT 24 -static inline uint32_t A5XX_HLSQ_CONTROL_2_REG_SIZE(uint32_t val) +#define A5XX_HLSQ_CONTROL_2_REG_CENTERRHW__MASK 0xff000000 +#define A5XX_HLSQ_CONTROL_2_REG_CENTERRHW__SHIFT 24 +static inline uint32_t A5XX_HLSQ_CONTROL_2_REG_CENTERRHW(uint32_t val) { - return ((val) << A5XX_HLSQ_CONTROL_2_REG_SIZE__SHIFT) & A5XX_HLSQ_CONTROL_2_REG_SIZE__MASK; + return ((val) << A5XX_HLSQ_CONTROL_2_REG_CENTERRHW__SHIFT) & A5XX_HLSQ_CONTROL_2_REG_CENTERRHW__MASK; } #define REG_A5XX_HLSQ_CONTROL_3_REG 0x0000e787 diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index a1e006ec5dce..d6c1c3ab19a3 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -567,7 +567,7 @@ static void a5xx_ucode_check_version(struct a5xx_gpu *a5xx_gpu, msm_gem_put_vaddr(obj); } -static int a5xx_ucode_init(struct msm_gpu *gpu) +static int a5xx_ucode_load(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); @@ -605,9 +605,24 @@ static int a5xx_ucode_init(struct msm_gpu *gpu) a5xx_ucode_check_version(a5xx_gpu, a5xx_gpu->pfp_bo); } - gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO, a5xx_gpu->pm4_iova); + if (a5xx_gpu->has_whereami) { + if (!a5xx_gpu->shadow_bo) { + a5xx_gpu->shadow = msm_gem_kernel_new(gpu->dev, + sizeof(u32) * gpu->nr_rings, + MSM_BO_WC | MSM_BO_MAP_PRIV, + gpu->aspace, &a5xx_gpu->shadow_bo, + &a5xx_gpu->shadow_iova); - gpu_write64(gpu, REG_A5XX_CP_PFP_INSTR_BASE_LO, a5xx_gpu->pfp_iova); + if (IS_ERR(a5xx_gpu->shadow)) + return PTR_ERR(a5xx_gpu->shadow); + + msm_gem_object_set_name(a5xx_gpu->shadow_bo, "shadow"); + } + } else if (gpu->nr_rings > 1) { + /* Disable preemption if WHERE_AM_I isn't available */ + a5xx_preempt_fini(gpu); + gpu->nr_rings = 1; + } return 0; } @@ -900,9 +915,8 @@ static int a5xx_hw_init(struct msm_gpu *gpu) if (adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu)) a5xx_gpmu_ucode_init(gpu); - ret = a5xx_ucode_init(gpu); - if (ret) - return ret; + gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO, a5xx_gpu->pm4_iova); + gpu_write64(gpu, REG_A5XX_CP_PFP_INSTR_BASE_LO, a5xx_gpu->pfp_iova); /* Set the ringbuffer address */ gpu_write64(gpu, REG_A5XX_CP_RB_BASE, gpu->rb[0]->iova); @@ -916,27 +930,10 @@ static int a5xx_hw_init(struct msm_gpu *gpu) gpu_write(gpu, REG_A5XX_CP_RB_CNTL, MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE); - /* Create a privileged buffer for the RPTR shadow */ - if (a5xx_gpu->has_whereami) { - if (!a5xx_gpu->shadow_bo) { - a5xx_gpu->shadow = msm_gem_kernel_new(gpu->dev, - sizeof(u32) * gpu->nr_rings, - MSM_BO_WC | MSM_BO_MAP_PRIV, - gpu->aspace, &a5xx_gpu->shadow_bo, - &a5xx_gpu->shadow_iova); - - if (IS_ERR(a5xx_gpu->shadow)) - return PTR_ERR(a5xx_gpu->shadow); - - msm_gem_object_set_name(a5xx_gpu->shadow_bo, "shadow"); - } - + /* Configure the RPTR shadow if needed: */ + if (a5xx_gpu->shadow_bo) { gpu_write64(gpu, REG_A5XX_CP_RB_RPTR_ADDR, shadowptr(a5xx_gpu, gpu->rb[0])); - } else if (gpu->nr_rings > 1) { - /* Disable preemption if WHERE_AM_I isn't available */ - a5xx_preempt_fini(gpu); - gpu->nr_rings = 1; } a5xx_preempt_hw_init(gpu); @@ -1682,6 +1679,7 @@ static const struct adreno_gpu_funcs funcs = { .get_param = adreno_get_param, .set_param = adreno_set_param, .hw_init = a5xx_hw_init, + .ucode_load = a5xx_ucode_load, .pm_suspend = a5xx_pm_suspend, .pm_resume = a5xx_pm_resume, .recover = a5xx_recover, @@ -1743,6 +1741,7 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev) struct a5xx_gpu *a5xx_gpu = NULL; struct adreno_gpu *adreno_gpu; struct msm_gpu *gpu; + unsigned int nr_rings; int ret; if (!pdev) { @@ -1763,7 +1762,12 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev) check_speed_bin(&pdev->dev); - ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 4); + nr_rings = 4; + + if (adreno_is_a510(adreno_gpu)) + nr_rings = 1; + + ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, nr_rings); if (ret) { a5xx_destroy(&(a5xx_gpu->base.base)); return ERR_PTR(ret); diff --git a/drivers/gpu/drm/msm/adreno/a6xx.xml.h b/drivers/gpu/drm/msm/adreno/a6xx.xml.h index a92788019376..4dc3be6ed45d 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx.xml.h +++ b/drivers/gpu/drm/msm/adreno/a6xx.xml.h @@ -8,21 +8,21 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a2xx.xml ( 90810 bytes, from 2021-06-21 15:24:24) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 14609 bytes, from 2021-11-24 23:05:10) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 69086 bytes, from 2022-03-03 16:41:33) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84231 bytes, from 2021-11-24 23:05:10) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113358 bytes, from 2022-01-31 23:06:21) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a5xx.xml ( 149512 bytes, from 2022-01-31 23:06:21) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a6xx.xml ( 184954 bytes, from 2022-03-03 16:41:33) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11331 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 6038 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2924 bytes, from 2021-07-22 15:21:56) - -Copyright (C) 2013-2022 by the following authors: +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2023-03-10 18:32:52) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a2xx.xml ( 91929 bytes, from 2023-02-28 23:52:27) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 15434 bytes, from 2023-03-10 18:32:53) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 74995 bytes, from 2023-03-20 18:06:23) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84231 bytes, from 2022-08-02 16:38:43) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113474 bytes, from 2022-08-02 16:38:43) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a5xx.xml ( 149590 bytes, from 2023-02-14 19:37:12) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx.xml ( 198949 bytes, from 2023-03-20 18:06:23) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11404 bytes, from 2023-03-10 18:32:53) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2022-08-02 16:38:43) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 9055 bytes, from 2023-03-10 18:32:52) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2976 bytes, from 2023-03-10 18:32:52) + +Copyright (C) 2013-2023 by the following authors: - Rob Clark <[email protected]> (robclark) - Ilia Mirkin <[email protected]> (imirkin) @@ -911,6 +911,7 @@ enum a6xx_ztest_mode { A6XX_EARLY_Z = 0, A6XX_LATE_Z = 1, A6XX_EARLY_LRZ_LATE_Z = 2, + A6XX_INVALID_ZTEST = 3, }; enum a6xx_sequenced_thread_dist { @@ -946,6 +947,12 @@ enum a6xx_buffers_location { BUFFERS_IN_SYSMEM = 3, }; +enum a6xx_lrz_dir_status { + LRZ_DIR_LE = 1, + LRZ_DIR_GE = 2, + LRZ_DIR_INVALID = 3, +}; + enum a6xx_fragcoord_sample_mode { FRAGCOORD_CENTER = 0, FRAGCOORD_SAMPLE = 3, @@ -978,6 +985,11 @@ enum a6xx_threadsize { THREAD128 = 1, }; +enum a6xx_bindless_descriptor_size { + BINDLESS_DESCRIPTOR_16B = 1, + BINDLESS_DESCRIPTOR_64B = 3, +}; + enum a6xx_isam_mode { ISAMMODE_GL = 2, }; @@ -1030,6 +1042,8 @@ enum a6xx_tex_type { #define A6XX_RBBM_INT_0_MASK_RBBM_GPU_IDLE 0x00000001 #define A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR 0x00000002 +#define A6XX_RBBM_INT_0_MASK_CP_IPC_INTR_0 0x00000010 +#define A6XX_RBBM_INT_0_MASK_CP_IPC_INTR_1 0x00000020 #define A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW 0x00000040 #define A6XX_RBBM_INT_0_MASK_RBBM_GPC_ERROR 0x00000080 #define A6XX_RBBM_INT_0_MASK_CP_SW 0x00000100 @@ -1040,15 +1054,19 @@ enum a6xx_tex_type { #define A6XX_RBBM_INT_0_MASK_CP_IB2 0x00002000 #define A6XX_RBBM_INT_0_MASK_CP_IB1 0x00004000 #define A6XX_RBBM_INT_0_MASK_CP_RB 0x00008000 +#define A6XX_RBBM_INT_0_MASK_PM4CPINTERRUPT 0x00008000 +#define A6XX_RBBM_INT_0_MASK_PM4CPINTERRUPTLPAC 0x00010000 #define A6XX_RBBM_INT_0_MASK_CP_RB_DONE_TS 0x00020000 #define A6XX_RBBM_INT_0_MASK_CP_WT_DONE_TS 0x00040000 #define A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS 0x00100000 +#define A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS_LPAC 0x00200000 #define A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW 0x00400000 #define A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT 0x00800000 #define A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS 0x01000000 #define A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR 0x02000000 #define A6XX_RBBM_INT_0_MASK_DEBBUS_INTR_0 0x04000000 #define A6XX_RBBM_INT_0_MASK_DEBBUS_INTR_1 0x08000000 +#define A6XX_RBBM_INT_0_MASK_TSBWRITEERROR 0x10000000 #define A6XX_RBBM_INT_0_MASK_ISDB_CPU_IRQ 0x40000000 #define A6XX_RBBM_INT_0_MASK_ISDB_UNDER_DEBUG 0x80000000 #define A6XX_CP_INT_CP_OPCODE_ERROR 0x00000001 @@ -1058,15 +1076,21 @@ enum a6xx_tex_type { #define A6XX_CP_INT_CP_AHB_ERROR 0x00000020 #define A6XX_CP_INT_CP_VSD_PARITY_ERROR 0x00000040 #define A6XX_CP_INT_CP_ILLEGAL_INSTR_ERROR 0x00000080 +#define A6XX_CP_INT_CP_OPCODE_ERROR_LPAC 0x00000100 +#define A6XX_CP_INT_CP_UCODE_ERROR_LPAC 0x00000200 +#define A6XX_CP_INT_CP_HW_FAULT_ERROR_LPAC 0x00000400 +#define A6XX_CP_INT_CP_REGISTER_PROTECTION_ERROR_LPAC 0x00000800 +#define A6XX_CP_INT_CP_ILLEGAL_INSTR_ERROR_LPAC 0x00001000 +#define A6XX_CP_INT_CP_OPCODE_ERROR_BV 0x00002000 +#define A6XX_CP_INT_CP_UCODE_ERROR_BV 0x00004000 +#define A6XX_CP_INT_CP_HW_FAULT_ERROR_BV 0x00008000 +#define A6XX_CP_INT_CP_REGISTER_PROTECTION_ERROR_BV 0x00010000 +#define A6XX_CP_INT_CP_ILLEGAL_INSTR_ERROR_BV 0x00020000 #define REG_A6XX_CP_RB_BASE 0x00000800 -#define REG_A6XX_CP_RB_BASE_HI 0x00000801 - #define REG_A6XX_CP_RB_CNTL 0x00000802 -#define REG_A6XX_CP_RB_RPTR_ADDR_LO 0x00000804 - -#define REG_A6XX_CP_RB_RPTR_ADDR_HI 0x00000805 +#define REG_A6XX_CP_RB_RPTR_ADDR 0x00000804 #define REG_A6XX_CP_RB_RPTR 0x00000806 @@ -1083,26 +1107,28 @@ enum a6xx_tex_type { #define REG_A6XX_CP_PROTECT_STATUS 0x00000824 +#define REG_A6XX_CP_STATUS_1 0x00000825 + #define REG_A6XX_CP_SQE_INSTR_BASE 0x00000830 #define REG_A6XX_CP_MISC_CNTL 0x00000840 -#define REG_A6XX_CP_CHICKEN_DBG 0x00000841 - #define REG_A6XX_CP_APRIV_CNTL 0x00000844 +#define REG_A6XX_CP_PREEMPT_THRESHOLD 0x000008c0 + #define REG_A6XX_CP_ROQ_THRESHOLDS_1 0x000008c1 -#define A6XX_CP_ROQ_THRESHOLDS_1_RB_LO__MASK 0x000000ff -#define A6XX_CP_ROQ_THRESHOLDS_1_RB_LO__SHIFT 0 -static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_1_RB_LO(uint32_t val) +#define A6XX_CP_ROQ_THRESHOLDS_1_MRB_START__MASK 0x000000ff +#define A6XX_CP_ROQ_THRESHOLDS_1_MRB_START__SHIFT 0 +static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_1_MRB_START(uint32_t val) { - return ((val >> 2) << A6XX_CP_ROQ_THRESHOLDS_1_RB_LO__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_1_RB_LO__MASK; + return ((val >> 2) << A6XX_CP_ROQ_THRESHOLDS_1_MRB_START__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_1_MRB_START__MASK; } -#define A6XX_CP_ROQ_THRESHOLDS_1_RB_HI__MASK 0x0000ff00 -#define A6XX_CP_ROQ_THRESHOLDS_1_RB_HI__SHIFT 8 -static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_1_RB_HI(uint32_t val) +#define A6XX_CP_ROQ_THRESHOLDS_1_VSD_START__MASK 0x0000ff00 +#define A6XX_CP_ROQ_THRESHOLDS_1_VSD_START__SHIFT 8 +static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_1_VSD_START(uint32_t val) { - return ((val >> 2) << A6XX_CP_ROQ_THRESHOLDS_1_RB_HI__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_1_RB_HI__MASK; + return ((val >> 2) << A6XX_CP_ROQ_THRESHOLDS_1_VSD_START__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_1_VSD_START__MASK; } #define A6XX_CP_ROQ_THRESHOLDS_1_IB1_START__MASK 0x00ff0000 #define A6XX_CP_ROQ_THRESHOLDS_1_IB1_START__SHIFT 16 @@ -1164,27 +1190,21 @@ static inline uint32_t A6XX_CP_PROTECT_REG_MASK_LEN(uint32_t val) #define REG_A6XX_CP_CONTEXT_SWITCH_CNTL 0x000008a0 -#define REG_A6XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO 0x000008a1 - -#define REG_A6XX_CP_CONTEXT_SWITCH_SMMU_INFO_HI 0x000008a2 +#define REG_A6XX_CP_CONTEXT_SWITCH_SMMU_INFO 0x000008a1 -#define REG_A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_LO 0x000008a3 +#define REG_A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR 0x000008a3 -#define REG_A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_HI 0x000008a4 +#define REG_A6XX_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR 0x000008a5 -#define REG_A6XX_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_LO 0x000008a5 +#define REG_A6XX_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR 0x000008a7 -#define REG_A6XX_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_HI 0x000008a6 - -#define REG_A6XX_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_LO 0x000008a7 - -#define REG_A6XX_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_HI 0x000008a8 +#define REG_A7XX_CP_CONTEXT_SWITCH_LEVEL_STATUS 0x000008ab static inline uint32_t REG_A6XX_CP_PERFCTR_CP_SEL(uint32_t i0) { return 0x000008d0 + 0x1*i0; } -#define REG_A6XX_CP_CRASH_SCRIPT_BASE_LO 0x00000900 +static inline uint32_t REG_A7XX_CP_BV_PERFCTR_CP_SEL(uint32_t i0) { return 0x000008e0 + 0x1*i0; } -#define REG_A6XX_CP_CRASH_SCRIPT_BASE_HI 0x00000901 +#define REG_A6XX_CP_CRASH_SCRIPT_BASE 0x00000900 #define REG_A6XX_CP_CRASH_DUMP_CNTL 0x00000902 @@ -1212,63 +1232,165 @@ static inline uint32_t REG_A6XX_CP_PERFCTR_CP_SEL(uint32_t i0) { return 0x000008 #define REG_A6XX_CP_IB1_BASE 0x00000928 -#define REG_A6XX_CP_IB1_BASE_HI 0x00000929 - #define REG_A6XX_CP_IB1_REM_SIZE 0x0000092a #define REG_A6XX_CP_IB2_BASE 0x0000092b -#define REG_A6XX_CP_IB2_BASE_HI 0x0000092c - #define REG_A6XX_CP_IB2_REM_SIZE 0x0000092d #define REG_A6XX_CP_SDS_BASE 0x0000092e -#define REG_A6XX_CP_SDS_BASE_HI 0x0000092f - #define REG_A6XX_CP_SDS_REM_SIZE 0x00000930 #define REG_A6XX_CP_MRB_BASE 0x00000931 -#define REG_A6XX_CP_MRB_BASE_HI 0x00000932 - #define REG_A6XX_CP_MRB_REM_SIZE 0x00000933 #define REG_A6XX_CP_VSD_BASE 0x00000934 -#define REG_A6XX_CP_VSD_BASE_HI 0x00000935 +#define REG_A6XX_CP_ROQ_RB_STAT 0x00000939 +#define A6XX_CP_ROQ_RB_STAT_RPTR__MASK 0x000003ff +#define A6XX_CP_ROQ_RB_STAT_RPTR__SHIFT 0 +static inline uint32_t A6XX_CP_ROQ_RB_STAT_RPTR(uint32_t val) +{ + return ((val) << A6XX_CP_ROQ_RB_STAT_RPTR__SHIFT) & A6XX_CP_ROQ_RB_STAT_RPTR__MASK; +} +#define A6XX_CP_ROQ_RB_STAT_WPTR__MASK 0x03ff0000 +#define A6XX_CP_ROQ_RB_STAT_WPTR__SHIFT 16 +static inline uint32_t A6XX_CP_ROQ_RB_STAT_WPTR(uint32_t val) +{ + return ((val) << A6XX_CP_ROQ_RB_STAT_WPTR__SHIFT) & A6XX_CP_ROQ_RB_STAT_WPTR__MASK; +} + +#define REG_A6XX_CP_ROQ_IB1_STAT 0x0000093a +#define A6XX_CP_ROQ_IB1_STAT_RPTR__MASK 0x000003ff +#define A6XX_CP_ROQ_IB1_STAT_RPTR__SHIFT 0 +static inline uint32_t A6XX_CP_ROQ_IB1_STAT_RPTR(uint32_t val) +{ + return ((val) << A6XX_CP_ROQ_IB1_STAT_RPTR__SHIFT) & A6XX_CP_ROQ_IB1_STAT_RPTR__MASK; +} +#define A6XX_CP_ROQ_IB1_STAT_WPTR__MASK 0x03ff0000 +#define A6XX_CP_ROQ_IB1_STAT_WPTR__SHIFT 16 +static inline uint32_t A6XX_CP_ROQ_IB1_STAT_WPTR(uint32_t val) +{ + return ((val) << A6XX_CP_ROQ_IB1_STAT_WPTR__SHIFT) & A6XX_CP_ROQ_IB1_STAT_WPTR__MASK; +} + +#define REG_A6XX_CP_ROQ_IB2_STAT 0x0000093b +#define A6XX_CP_ROQ_IB2_STAT_RPTR__MASK 0x000003ff +#define A6XX_CP_ROQ_IB2_STAT_RPTR__SHIFT 0 +static inline uint32_t A6XX_CP_ROQ_IB2_STAT_RPTR(uint32_t val) +{ + return ((val) << A6XX_CP_ROQ_IB2_STAT_RPTR__SHIFT) & A6XX_CP_ROQ_IB2_STAT_RPTR__MASK; +} +#define A6XX_CP_ROQ_IB2_STAT_WPTR__MASK 0x03ff0000 +#define A6XX_CP_ROQ_IB2_STAT_WPTR__SHIFT 16 +static inline uint32_t A6XX_CP_ROQ_IB2_STAT_WPTR(uint32_t val) +{ + return ((val) << A6XX_CP_ROQ_IB2_STAT_WPTR__SHIFT) & A6XX_CP_ROQ_IB2_STAT_WPTR__MASK; +} + +#define REG_A6XX_CP_ROQ_SDS_STAT 0x0000093c +#define A6XX_CP_ROQ_SDS_STAT_RPTR__MASK 0x000003ff +#define A6XX_CP_ROQ_SDS_STAT_RPTR__SHIFT 0 +static inline uint32_t A6XX_CP_ROQ_SDS_STAT_RPTR(uint32_t val) +{ + return ((val) << A6XX_CP_ROQ_SDS_STAT_RPTR__SHIFT) & A6XX_CP_ROQ_SDS_STAT_RPTR__MASK; +} +#define A6XX_CP_ROQ_SDS_STAT_WPTR__MASK 0x03ff0000 +#define A6XX_CP_ROQ_SDS_STAT_WPTR__SHIFT 16 +static inline uint32_t A6XX_CP_ROQ_SDS_STAT_WPTR(uint32_t val) +{ + return ((val) << A6XX_CP_ROQ_SDS_STAT_WPTR__SHIFT) & A6XX_CP_ROQ_SDS_STAT_WPTR__MASK; +} + +#define REG_A6XX_CP_ROQ_MRB_STAT 0x0000093d +#define A6XX_CP_ROQ_MRB_STAT_RPTR__MASK 0x000003ff +#define A6XX_CP_ROQ_MRB_STAT_RPTR__SHIFT 0 +static inline uint32_t A6XX_CP_ROQ_MRB_STAT_RPTR(uint32_t val) +{ + return ((val) << A6XX_CP_ROQ_MRB_STAT_RPTR__SHIFT) & A6XX_CP_ROQ_MRB_STAT_RPTR__MASK; +} +#define A6XX_CP_ROQ_MRB_STAT_WPTR__MASK 0x03ff0000 +#define A6XX_CP_ROQ_MRB_STAT_WPTR__SHIFT 16 +static inline uint32_t A6XX_CP_ROQ_MRB_STAT_WPTR(uint32_t val) +{ + return ((val) << A6XX_CP_ROQ_MRB_STAT_WPTR__SHIFT) & A6XX_CP_ROQ_MRB_STAT_WPTR__MASK; +} + +#define REG_A6XX_CP_ROQ_VSD_STAT 0x0000093e +#define A6XX_CP_ROQ_VSD_STAT_RPTR__MASK 0x000003ff +#define A6XX_CP_ROQ_VSD_STAT_RPTR__SHIFT 0 +static inline uint32_t A6XX_CP_ROQ_VSD_STAT_RPTR(uint32_t val) +{ + return ((val) << A6XX_CP_ROQ_VSD_STAT_RPTR__SHIFT) & A6XX_CP_ROQ_VSD_STAT_RPTR__MASK; +} +#define A6XX_CP_ROQ_VSD_STAT_WPTR__MASK 0x03ff0000 +#define A6XX_CP_ROQ_VSD_STAT_WPTR__SHIFT 16 +static inline uint32_t A6XX_CP_ROQ_VSD_STAT_WPTR(uint32_t val) +{ + return ((val) << A6XX_CP_ROQ_VSD_STAT_WPTR__SHIFT) & A6XX_CP_ROQ_VSD_STAT_WPTR__MASK; +} + +#define REG_A6XX_CP_IB1_DWORDS 0x00000943 + +#define REG_A6XX_CP_IB2_DWORDS 0x00000944 + +#define REG_A6XX_CP_SDS_DWORDS 0x00000945 #define REG_A6XX_CP_MRB_DWORDS 0x00000946 #define REG_A6XX_CP_VSD_DWORDS 0x00000947 -#define REG_A6XX_CP_CSQ_IB1_STAT 0x00000949 -#define A6XX_CP_CSQ_IB1_STAT_REM__MASK 0xffff0000 -#define A6XX_CP_CSQ_IB1_STAT_REM__SHIFT 16 -static inline uint32_t A6XX_CP_CSQ_IB1_STAT_REM(uint32_t val) +#define REG_A6XX_CP_ROQ_AVAIL_RB 0x00000948 +#define A6XX_CP_ROQ_AVAIL_RB_REM__MASK 0xffff0000 +#define A6XX_CP_ROQ_AVAIL_RB_REM__SHIFT 16 +static inline uint32_t A6XX_CP_ROQ_AVAIL_RB_REM(uint32_t val) { - return ((val) << A6XX_CP_CSQ_IB1_STAT_REM__SHIFT) & A6XX_CP_CSQ_IB1_STAT_REM__MASK; + return ((val) << A6XX_CP_ROQ_AVAIL_RB_REM__SHIFT) & A6XX_CP_ROQ_AVAIL_RB_REM__MASK; } -#define REG_A6XX_CP_CSQ_IB2_STAT 0x0000094a -#define A6XX_CP_CSQ_IB2_STAT_REM__MASK 0xffff0000 -#define A6XX_CP_CSQ_IB2_STAT_REM__SHIFT 16 -static inline uint32_t A6XX_CP_CSQ_IB2_STAT_REM(uint32_t val) +#define REG_A6XX_CP_ROQ_AVAIL_IB1 0x00000949 +#define A6XX_CP_ROQ_AVAIL_IB1_REM__MASK 0xffff0000 +#define A6XX_CP_ROQ_AVAIL_IB1_REM__SHIFT 16 +static inline uint32_t A6XX_CP_ROQ_AVAIL_IB1_REM(uint32_t val) { - return ((val) << A6XX_CP_CSQ_IB2_STAT_REM__SHIFT) & A6XX_CP_CSQ_IB2_STAT_REM__MASK; + return ((val) << A6XX_CP_ROQ_AVAIL_IB1_REM__SHIFT) & A6XX_CP_ROQ_AVAIL_IB1_REM__MASK; } -#define REG_A6XX_CP_MRQ_MRB_STAT 0x0000094c -#define A6XX_CP_MRQ_MRB_STAT_REM__MASK 0xffff0000 -#define A6XX_CP_MRQ_MRB_STAT_REM__SHIFT 16 -static inline uint32_t A6XX_CP_MRQ_MRB_STAT_REM(uint32_t val) +#define REG_A6XX_CP_ROQ_AVAIL_IB2 0x0000094a +#define A6XX_CP_ROQ_AVAIL_IB2_REM__MASK 0xffff0000 +#define A6XX_CP_ROQ_AVAIL_IB2_REM__SHIFT 16 +static inline uint32_t A6XX_CP_ROQ_AVAIL_IB2_REM(uint32_t val) { - return ((val) << A6XX_CP_MRQ_MRB_STAT_REM__SHIFT) & A6XX_CP_MRQ_MRB_STAT_REM__MASK; + return ((val) << A6XX_CP_ROQ_AVAIL_IB2_REM__SHIFT) & A6XX_CP_ROQ_AVAIL_IB2_REM__MASK; } -#define REG_A6XX_CP_ALWAYS_ON_COUNTER_LO 0x00000980 +#define REG_A6XX_CP_ROQ_AVAIL_SDS 0x0000094b +#define A6XX_CP_ROQ_AVAIL_SDS_REM__MASK 0xffff0000 +#define A6XX_CP_ROQ_AVAIL_SDS_REM__SHIFT 16 +static inline uint32_t A6XX_CP_ROQ_AVAIL_SDS_REM(uint32_t val) +{ + return ((val) << A6XX_CP_ROQ_AVAIL_SDS_REM__SHIFT) & A6XX_CP_ROQ_AVAIL_SDS_REM__MASK; +} -#define REG_A6XX_CP_ALWAYS_ON_COUNTER_HI 0x00000981 +#define REG_A6XX_CP_ROQ_AVAIL_MRB 0x0000094c +#define A6XX_CP_ROQ_AVAIL_MRB_REM__MASK 0xffff0000 +#define A6XX_CP_ROQ_AVAIL_MRB_REM__SHIFT 16 +static inline uint32_t A6XX_CP_ROQ_AVAIL_MRB_REM(uint32_t val) +{ + return ((val) << A6XX_CP_ROQ_AVAIL_MRB_REM__SHIFT) & A6XX_CP_ROQ_AVAIL_MRB_REM__MASK; +} + +#define REG_A6XX_CP_ROQ_AVAIL_VSD 0x0000094d +#define A6XX_CP_ROQ_AVAIL_VSD_REM__MASK 0xffff0000 +#define A6XX_CP_ROQ_AVAIL_VSD_REM__SHIFT 16 +static inline uint32_t A6XX_CP_ROQ_AVAIL_VSD_REM(uint32_t val) +{ + return ((val) << A6XX_CP_ROQ_AVAIL_VSD_REM__SHIFT) & A6XX_CP_ROQ_AVAIL_VSD_REM__MASK; +} + +#define REG_A6XX_CP_ALWAYS_ON_COUNTER 0x00000980 #define REG_A6XX_CP_AHB_CNTL 0x0000098d @@ -1276,12 +1398,70 @@ static inline uint32_t A6XX_CP_MRQ_MRB_STAT_REM(uint32_t val) #define REG_A6XX_CP_APERTURE_CNTL_CD 0x00000a03 +#define REG_A7XX_CP_BV_PROTECT_STATUS 0x00000a61 + +#define REG_A7XX_CP_BV_HW_FAULT 0x00000a64 + +#define REG_A7XX_CP_BV_DRAW_STATE_ADDR 0x00000a81 + +#define REG_A7XX_CP_BV_DRAW_STATE_DATA 0x00000a82 + +#define REG_A7XX_CP_BV_ROQ_DBG_ADDR 0x00000a83 + +#define REG_A7XX_CP_BV_ROQ_DBG_DATA 0x00000a84 + +#define REG_A7XX_CP_BV_SQE_UCODE_DBG_ADDR 0x00000a85 + +#define REG_A7XX_CP_BV_SQE_UCODE_DBG_DATA 0x00000a86 + +#define REG_A7XX_CP_BV_SQE_STAT_ADDR 0x00000a87 + +#define REG_A7XX_CP_BV_SQE_STAT_DATA 0x00000a88 + +#define REG_A7XX_CP_BV_MEM_POOL_DBG_ADDR 0x00000a96 + +#define REG_A7XX_CP_BV_MEM_POOL_DBG_DATA 0x00000a97 + +#define REG_A7XX_CP_BV_RB_RPTR_ADDR 0x00000a98 + +#define REG_A7XX_CP_RESOURCE_TBL_DBG_ADDR 0x00000a9a + +#define REG_A7XX_CP_RESOURCE_TBL_DBG_DATA 0x00000a9b + +#define REG_A7XX_CP_BV_APRIV_CNTL 0x00000ad0 + +#define REG_A7XX_CP_BV_CHICKEN_DBG 0x00000ada + +#define REG_A7XX_CP_LPAC_DRAW_STATE_ADDR 0x00000b0a + +#define REG_A7XX_CP_LPAC_DRAW_STATE_DATA 0x00000b0b + +#define REG_A7XX_CP_LPAC_ROQ_DBG_ADDR 0x00000b0c + +#define REG_A7XX_CP_SQE_AC_UCODE_DBG_ADDR 0x00000b27 + +#define REG_A7XX_CP_SQE_AC_UCODE_DBG_DATA 0x00000b28 + +#define REG_A7XX_CP_SQE_AC_STAT_ADDR 0x00000b29 + +#define REG_A7XX_CP_SQE_AC_STAT_DATA 0x00000b2a + +#define REG_A7XX_CP_LPAC_APRIV_CNTL 0x00000b31 + #define REG_A6XX_CP_LPAC_PROG_FIFO_SIZE 0x00000b34 +#define REG_A7XX_CP_LPAC_ROQ_DBG_DATA 0x00000b35 + +#define REG_A7XX_CP_LPAC_FIFO_DBG_DATA 0x00000b36 + +#define REG_A7XX_CP_LPAC_FIFO_DBG_ADDR 0x00000b40 + #define REG_A6XX_CP_LPAC_SQE_INSTR_BASE 0x00000b82 #define REG_A6XX_VSC_ADDR_MODE_CNTL 0x00000c01 +#define REG_A6XX_RBBM_GPR0_CNTL 0x00000018 + #define REG_A6XX_RBBM_INT_0_STATUS 0x00000201 #define REG_A6XX_RBBM_STATUS 0x00000210 @@ -1310,11 +1490,27 @@ static inline uint32_t A6XX_CP_MRQ_MRB_STAT_REM(uint32_t val) #define A6XX_RBBM_STATUS_CP_AHB_BUSY_CP_MASTER 0x00000002 #define A6XX_RBBM_STATUS_CP_AHB_BUSY_CX_MASTER 0x00000001 +#define REG_A6XX_RBBM_STATUS1 0x00000211 + +#define REG_A6XX_RBBM_STATUS2 0x00000212 + #define REG_A6XX_RBBM_STATUS3 0x00000213 #define A6XX_RBBM_STATUS3_SMMU_STALLED_ON_FAULT 0x01000000 #define REG_A6XX_RBBM_VBIF_GX_RESET_STATUS 0x00000215 +#define REG_A7XX_RBBM_CLOCK_MODE_CP 0x00000260 + +#define REG_A7XX_RBBM_CLOCK_MODE_BV_LRZ 0x00000284 + +#define REG_A7XX_RBBM_CLOCK_MODE_BV_GRAS 0x00000285 + +#define REG_A7XX_RBBM_CLOCK_MODE2_GRAS 0x00000286 + +#define REG_A7XX_RBBM_CLOCK_MODE_BV_VFD 0x00000287 + +#define REG_A7XX_RBBM_CLOCK_MODE_BV_GPC 0x00000288 + static inline uint32_t REG_A6XX_RBBM_PERFCTR_CP(uint32_t i0) { return 0x00000400 + 0x2*i0; } static inline uint32_t REG_A6XX_RBBM_PERFCTR_RBBM(uint32_t i0) { return 0x0000041c + 0x2*i0; } @@ -1347,6 +1543,62 @@ static inline uint32_t REG_A6XX_RBBM_PERFCTR_LRZ(uint32_t i0) { return 0x000004e static inline uint32_t REG_A6XX_RBBM_PERFCTR_CMP(uint32_t i0) { return 0x000004f2 + 0x2*i0; } +static inline uint32_t REG_A7XX_RBBM_PERFCTR_CP(uint32_t i0) { return 0x00000300 + 0x2*i0; } + +static inline uint32_t REG_A7XX_RBBM_PERFCTR_RBBM(uint32_t i0) { return 0x0000031c + 0x2*i0; } + +static inline uint32_t REG_A7XX_RBBM_PERFCTR_PC(uint32_t i0) { return 0x00000324 + 0x2*i0; } + +static inline uint32_t REG_A7XX_RBBM_PERFCTR_VFD(uint32_t i0) { return 0x00000334 + 0x2*i0; } + +static inline uint32_t REG_A7XX_RBBM_PERFCTR_HLSQ(uint32_t i0) { return 0x00000344 + 0x2*i0; } + +static inline uint32_t REG_A7XX_RBBM_PERFCTR_VPC(uint32_t i0) { return 0x00000350 + 0x2*i0; } + +static inline uint32_t REG_A7XX_RBBM_PERFCTR_CCU(uint32_t i0) { return 0x0000035c + 0x2*i0; } + +static inline uint32_t REG_A7XX_RBBM_PERFCTR_TSE(uint32_t i0) { return 0x00000366 + 0x2*i0; } + +static inline uint32_t REG_A7XX_RBBM_PERFCTR_RAS(uint32_t i0) { return 0x0000036e + 0x2*i0; } + +static inline uint32_t REG_A7XX_RBBM_PERFCTR_UCHE(uint32_t i0) { return 0x00000376 + 0x2*i0; } + +static inline uint32_t REG_A7XX_RBBM_PERFCTR_TP(uint32_t i0) { return 0x0000038e + 0x2*i0; } + +static inline uint32_t REG_A7XX_RBBM_PERFCTR_SP(uint32_t i0) { return 0x000003a6 + 0x2*i0; } + +static inline uint32_t REG_A7XX_RBBM_PERFCTR_RB(uint32_t i0) { return 0x000003d6 + 0x2*i0; } + +static inline uint32_t REG_A7XX_RBBM_PERFCTR_VSC(uint32_t i0) { return 0x000003e6 + 0x2*i0; } + +static inline uint32_t REG_A7XX_RBBM_PERFCTR_LRZ(uint32_t i0) { return 0x000003ea + 0x2*i0; } + +static inline uint32_t REG_A7XX_RBBM_PERFCTR_CMP(uint32_t i0) { return 0x000003f2 + 0x2*i0; } + +static inline uint32_t REG_A7XX_RBBM_PERFCTR_UFC(uint32_t i0) { return 0x000003fa + 0x2*i0; } + +static inline uint32_t REG_A7XX_RBBM_PERFCTR2_HLSQ(uint32_t i0) { return 0x00000410 + 0x2*i0; } + +static inline uint32_t REG_A7XX_RBBM_PERFCTR2_CP(uint32_t i0) { return 0x0000041c + 0x2*i0; } + +static inline uint32_t REG_A7XX_RBBM_PERFCTR2_SP(uint32_t i0) { return 0x0000042a + 0x2*i0; } + +static inline uint32_t REG_A7XX_RBBM_PERFCTR2_TP(uint32_t i0) { return 0x00000442 + 0x2*i0; } + +static inline uint32_t REG_A7XX_RBBM_PERFCTR2_UFC(uint32_t i0) { return 0x0000044e + 0x2*i0; } + +static inline uint32_t REG_A7XX_RBBM_PERFCTR_BV_PC(uint32_t i0) { return 0x00000460 + 0x2*i0; } + +static inline uint32_t REG_A7XX_RBBM_PERFCTR_BV_VFD(uint32_t i0) { return 0x00000470 + 0x2*i0; } + +static inline uint32_t REG_A7XX_RBBM_PERFCTR_BV_VPC(uint32_t i0) { return 0x00000480 + 0x2*i0; } + +static inline uint32_t REG_A7XX_RBBM_PERFCTR_BV_TSE(uint32_t i0) { return 0x0000048c + 0x2*i0; } + +static inline uint32_t REG_A7XX_RBBM_PERFCTR_BV_RAS(uint32_t i0) { return 0x00000494 + 0x2*i0; } + +static inline uint32_t REG_A7XX_RBBM_PERFCTR_BV_LRZ(uint32_t i0) { return 0x0000049c + 0x2*i0; } + #define REG_A6XX_RBBM_PERFCTR_CNTL 0x00000500 #define REG_A6XX_RBBM_PERFCTR_LOAD_CMD0 0x00000501 @@ -1371,6 +1623,10 @@ static inline uint32_t REG_A6XX_RBBM_PERFCTR_RBBM_SEL(uint32_t i0) { return 0x00 #define REG_A6XX_RBBM_ISDB_CNT 0x00000533 +#define REG_A7XX_RBBM_NC_MODE_CNTL 0x00000534 + +#define REG_A7XX_RBBM_SNAPSHOT_STATUS 0x00000535 + #define REG_A6XX_RBBM_PRIMCTR_0_LO 0x00000540 #define REG_A6XX_RBBM_PRIMCTR_0_HI 0x00000541 @@ -1417,9 +1673,7 @@ static inline uint32_t REG_A6XX_RBBM_PERFCTR_RBBM_SEL(uint32_t i0) { return 0x00 #define REG_A6XX_RBBM_SECVID_TRUST_CNTL 0x0000f400 -#define REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO 0x0000f800 - -#define REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI 0x0000f801 +#define REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE 0x0000f800 #define REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE 0x0000f802 @@ -1427,6 +1681,8 @@ static inline uint32_t REG_A6XX_RBBM_PERFCTR_RBBM_SEL(uint32_t i0) { return 0x00 #define REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL 0x0000f810 +#define REG_A7XX_RBBM_SECVID_TSB_STATUS 0x0000fc00 + #define REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL 0x00000010 #define REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL 0x00000011 @@ -1438,12 +1694,18 @@ static inline uint32_t REG_A6XX_RBBM_PERFCTR_RBBM_SEL(uint32_t i0) { return 0x00 #define REG_A6XX_RBBM_WAIT_FOR_GPU_IDLE_CMD 0x0000001c #define A6XX_RBBM_WAIT_FOR_GPU_IDLE_CMD_WAIT_GPU_IDLE 0x00000001 +#define REG_A7XX_RBBM_GBIF_HALT 0x00000016 + +#define REG_A7XX_RBBM_GBIF_HALT_ACK 0x00000017 + #define REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL 0x0000001f #define REG_A6XX_RBBM_INT_CLEAR_CMD 0x00000037 #define REG_A6XX_RBBM_INT_0_MASK 0x00000038 +#define REG_A7XX_RBBM_INT_2_MASK 0x0000003a + #define REG_A6XX_RBBM_SP_HYST_CNT 0x00000042 #define REG_A6XX_RBBM_SW_RESET_CMD 0x00000043 @@ -1674,6 +1936,8 @@ static inline uint32_t REG_A6XX_RBBM_PERFCTR_RBBM_SEL(uint32_t i0) { return 0x00 #define REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE 0x00000122 +#define REG_A6XX_RBBM_LPAC_GBIF_CLIENT_QOS_CNTL 0x000005ff + #define REG_A6XX_DBGC_CFG_DBGBUS_SEL_A 0x00000600 #define REG_A6XX_DBGC_CFG_DBGBUS_SEL_B 0x00000601 @@ -1852,25 +2116,15 @@ static inline uint32_t REG_A6XX_VSC_PERFCTR_VSC_SEL(uint32_t i0) { return 0x0000 #define REG_A6XX_UCHE_MODE_CNTL 0x00000e01 -#define REG_A6XX_UCHE_WRITE_RANGE_MAX_LO 0x00000e05 - -#define REG_A6XX_UCHE_WRITE_RANGE_MAX_HI 0x00000e06 - -#define REG_A6XX_UCHE_WRITE_THRU_BASE_LO 0x00000e07 +#define REG_A6XX_UCHE_WRITE_RANGE_MAX 0x00000e05 -#define REG_A6XX_UCHE_WRITE_THRU_BASE_HI 0x00000e08 +#define REG_A6XX_UCHE_WRITE_THRU_BASE 0x00000e07 -#define REG_A6XX_UCHE_TRAP_BASE_LO 0x00000e09 +#define REG_A6XX_UCHE_TRAP_BASE 0x00000e09 -#define REG_A6XX_UCHE_TRAP_BASE_HI 0x00000e0a +#define REG_A6XX_UCHE_GMEM_RANGE_MIN 0x00000e0b -#define REG_A6XX_UCHE_GMEM_RANGE_MIN_LO 0x00000e0b - -#define REG_A6XX_UCHE_GMEM_RANGE_MIN_HI 0x00000e0c - -#define REG_A6XX_UCHE_GMEM_RANGE_MAX_LO 0x00000e0d - -#define REG_A6XX_UCHE_GMEM_RANGE_MAX_HI 0x00000e0e +#define REG_A6XX_UCHE_GMEM_RANGE_MAX 0x00000e0d #define REG_A6XX_UCHE_CACHE_WAYS 0x00000e17 @@ -1886,6 +2140,8 @@ static inline uint32_t A6XX_UCHE_CLIENT_PF_PERFSEL(uint32_t val) static inline uint32_t REG_A6XX_UCHE_PERFCTR_UCHE_SEL(uint32_t i0) { return 0x00000e1c + 0x1*i0; } +#define REG_A6XX_UCHE_GBIF_GX_CONFIG 0x00000e3a + #define REG_A6XX_UCHE_CMDQ_CONFIG 0x00000e3c #define REG_A6XX_VBIF_VERSION 0x00003000 @@ -1983,6 +2239,8 @@ static inline uint32_t A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL(uint32_t val) #define REG_A6XX_GBIF_PERF_PWR_CNT_EN 0x00003cc0 +#define REG_A6XX_GBIF_PERF_PWR_CNT_CLR 0x00003cc1 + #define REG_A6XX_GBIF_PERF_CNT_SEL 0x00003cc2 #define REG_A6XX_GBIF_PERF_PWR_CNT_SEL 0x00003cc3 @@ -2105,7 +2363,7 @@ static inline uint32_t REG_A6XX_VSC_DRAW_STRM_SIZE_REG(uint32_t i0) { return 0x0 #define A6XX_GRAS_CL_CNTL_CLIP_DISABLE 0x00000001 #define A6XX_GRAS_CL_CNTL_ZNEAR_CLIP_DISABLE 0x00000002 #define A6XX_GRAS_CL_CNTL_ZFAR_CLIP_DISABLE 0x00000004 -#define A6XX_GRAS_CL_CNTL_UNK5 0x00000020 +#define A6XX_GRAS_CL_CNTL_Z_CLAMP_ENABLE 0x00000020 #define A6XX_GRAS_CL_CNTL_ZERO_GB_SCALE_Z 0x00000040 #define A6XX_GRAS_CL_CNTL_VP_CLIP_CODE_IGNORE 0x00000080 #define A6XX_GRAS_CL_CNTL_VP_XFORM_DISABLE 0x00000100 @@ -2420,11 +2678,12 @@ static inline uint32_t A6XX_GRAS_SC_CNTL_SEQUENCED_THREAD_DISTRIBUTION(enum a6xx { return ((val) << A6XX_GRAS_SC_CNTL_SEQUENCED_THREAD_DISTRIBUTION__SHIFT) & A6XX_GRAS_SC_CNTL_SEQUENCED_THREAD_DISTRIBUTION__MASK; } -#define A6XX_GRAS_SC_CNTL_UNK9__MASK 0x00000e00 -#define A6XX_GRAS_SC_CNTL_UNK9__SHIFT 9 -static inline uint32_t A6XX_GRAS_SC_CNTL_UNK9(uint32_t val) +#define A6XX_GRAS_SC_CNTL_UNK9 0x00000200 +#define A6XX_GRAS_SC_CNTL_ROTATION__MASK 0x00000c00 +#define A6XX_GRAS_SC_CNTL_ROTATION__SHIFT 10 +static inline uint32_t A6XX_GRAS_SC_CNTL_ROTATION(uint32_t val) { - return ((val) << A6XX_GRAS_SC_CNTL_UNK9__SHIFT) & A6XX_GRAS_SC_CNTL_UNK9__MASK; + return ((val) << A6XX_GRAS_SC_CNTL_ROTATION__SHIFT) & A6XX_GRAS_SC_CNTL_ROTATION__MASK; } #define A6XX_GRAS_SC_CNTL_EARLYVIZOUTEN 0x00001000 @@ -2697,12 +2956,14 @@ static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val) #define A6XX_GRAS_LRZ_CNTL_FC_ENABLE 0x00000008 #define A6XX_GRAS_LRZ_CNTL_Z_TEST_ENABLE 0x00000010 #define A6XX_GRAS_LRZ_CNTL_Z_BOUNDS_ENABLE 0x00000020 -#define A6XX_GRAS_LRZ_CNTL_UNK6__MASK 0x000003c0 -#define A6XX_GRAS_LRZ_CNTL_UNK6__SHIFT 6 -static inline uint32_t A6XX_GRAS_LRZ_CNTL_UNK6(uint32_t val) +#define A6XX_GRAS_LRZ_CNTL_DIR__MASK 0x000000c0 +#define A6XX_GRAS_LRZ_CNTL_DIR__SHIFT 6 +static inline uint32_t A6XX_GRAS_LRZ_CNTL_DIR(enum a6xx_lrz_dir_status val) { - return ((val) << A6XX_GRAS_LRZ_CNTL_UNK6__SHIFT) & A6XX_GRAS_LRZ_CNTL_UNK6__MASK; + return ((val) << A6XX_GRAS_LRZ_CNTL_DIR__SHIFT) & A6XX_GRAS_LRZ_CNTL_DIR__MASK; } +#define A6XX_GRAS_LRZ_CNTL_DIR_WRITE 0x00000100 +#define A6XX_GRAS_LRZ_CNTL_DISABLE_ON_WRONG_DIR 0x00000200 #define REG_A6XX_GRAS_LRZ_PS_INPUT_CNTL 0x00008101 #define A6XX_GRAS_LRZ_PS_INPUT_CNTL_SAMPLEID 0x00000001 @@ -2754,24 +3015,24 @@ static inline uint32_t A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE(uint32_t val) #define REG_A6XX_GRAS_SAMPLE_CNTL 0x00008109 #define A6XX_GRAS_SAMPLE_CNTL_PER_SAMP_MODE 0x00000001 -#define REG_A6XX_GRAS_UNKNOWN_810A 0x0000810a -#define A6XX_GRAS_UNKNOWN_810A_UNK0__MASK 0x000007ff -#define A6XX_GRAS_UNKNOWN_810A_UNK0__SHIFT 0 -static inline uint32_t A6XX_GRAS_UNKNOWN_810A_UNK0(uint32_t val) +#define REG_A6XX_GRAS_LRZ_DEPTH_VIEW 0x0000810a +#define A6XX_GRAS_LRZ_DEPTH_VIEW_BASE_LAYER__MASK 0x000007ff +#define A6XX_GRAS_LRZ_DEPTH_VIEW_BASE_LAYER__SHIFT 0 +static inline uint32_t A6XX_GRAS_LRZ_DEPTH_VIEW_BASE_LAYER(uint32_t val) { - return ((val) << A6XX_GRAS_UNKNOWN_810A_UNK0__SHIFT) & A6XX_GRAS_UNKNOWN_810A_UNK0__MASK; + return ((val) << A6XX_GRAS_LRZ_DEPTH_VIEW_BASE_LAYER__SHIFT) & A6XX_GRAS_LRZ_DEPTH_VIEW_BASE_LAYER__MASK; } -#define A6XX_GRAS_UNKNOWN_810A_UNK16__MASK 0x07ff0000 -#define A6XX_GRAS_UNKNOWN_810A_UNK16__SHIFT 16 -static inline uint32_t A6XX_GRAS_UNKNOWN_810A_UNK16(uint32_t val) +#define A6XX_GRAS_LRZ_DEPTH_VIEW_LAYER_COUNT__MASK 0x07ff0000 +#define A6XX_GRAS_LRZ_DEPTH_VIEW_LAYER_COUNT__SHIFT 16 +static inline uint32_t A6XX_GRAS_LRZ_DEPTH_VIEW_LAYER_COUNT(uint32_t val) { - return ((val) << A6XX_GRAS_UNKNOWN_810A_UNK16__SHIFT) & A6XX_GRAS_UNKNOWN_810A_UNK16__MASK; + return ((val) << A6XX_GRAS_LRZ_DEPTH_VIEW_LAYER_COUNT__SHIFT) & A6XX_GRAS_LRZ_DEPTH_VIEW_LAYER_COUNT__MASK; } -#define A6XX_GRAS_UNKNOWN_810A_UNK28__MASK 0xf0000000 -#define A6XX_GRAS_UNKNOWN_810A_UNK28__SHIFT 28 -static inline uint32_t A6XX_GRAS_UNKNOWN_810A_UNK28(uint32_t val) +#define A6XX_GRAS_LRZ_DEPTH_VIEW_BASE_MIP_LEVEL__MASK 0xf0000000 +#define A6XX_GRAS_LRZ_DEPTH_VIEW_BASE_MIP_LEVEL__SHIFT 28 +static inline uint32_t A6XX_GRAS_LRZ_DEPTH_VIEW_BASE_MIP_LEVEL(uint32_t val) { - return ((val) << A6XX_GRAS_UNKNOWN_810A_UNK28__SHIFT) & A6XX_GRAS_UNKNOWN_810A_UNK28__MASK; + return ((val) << A6XX_GRAS_LRZ_DEPTH_VIEW_BASE_MIP_LEVEL__SHIFT) & A6XX_GRAS_LRZ_DEPTH_VIEW_BASE_MIP_LEVEL__MASK; } #define REG_A6XX_GRAS_UNKNOWN_8110 0x00008110 @@ -2900,6 +3161,8 @@ static inline uint32_t A6XX_GRAS_2D_RESOLVE_CNTL_2_Y(uint32_t val) #define REG_A6XX_GRAS_ADDR_MODE_CNTL 0x00008601 +#define REG_A7XX_GRAS_NC_MODE_CNTL 0x00008602 + static inline uint32_t REG_A6XX_GRAS_PERFCTR_TSE_SEL(uint32_t i0) { return 0x00008610 + 0x1*i0; } static inline uint32_t REG_A6XX_GRAS_PERFCTR_RAS_SEL(uint32_t i0) { return 0x00008614 + 0x1*i0; } @@ -3126,7 +3389,7 @@ static inline uint32_t A6XX_RB_RENDER_CONTROL0_COORD_MASK(uint32_t val) #define REG_A6XX_RB_RENDER_CONTROL1 0x0000880a #define A6XX_RB_RENDER_CONTROL1_SAMPLEMASK 0x00000001 -#define A6XX_RB_RENDER_CONTROL1_UNK1 0x00000002 +#define A6XX_RB_RENDER_CONTROL1_POSTDEPTHCOVERAGE 0x00000002 #define A6XX_RB_RENDER_CONTROL1_FACENESS 0x00000004 #define A6XX_RB_RENDER_CONTROL1_SAMPLEID 0x00000008 #define A6XX_RB_RENDER_CONTROL1_FRAGCOORDSAMPLEMODE__MASK 0x00000030 @@ -3135,7 +3398,7 @@ static inline uint32_t A6XX_RB_RENDER_CONTROL1_FRAGCOORDSAMPLEMODE(enum a6xx_fra { return ((val) << A6XX_RB_RENDER_CONTROL1_FRAGCOORDSAMPLEMODE__SHIFT) & A6XX_RB_RENDER_CONTROL1_FRAGCOORDSAMPLEMODE__MASK; } -#define A6XX_RB_RENDER_CONTROL1_SIZE 0x00000040 +#define A6XX_RB_RENDER_CONTROL1_CENTERRHW 0x00000040 #define A6XX_RB_RENDER_CONTROL1_LINELENGTHEN 0x00000080 #define A6XX_RB_RENDER_CONTROL1_FOVEATION 0x00000100 @@ -3691,7 +3954,7 @@ static inline uint32_t A6XX_RB_WINDOW_OFFSET_Y(uint32_t val) } #define REG_A6XX_RB_SAMPLE_COUNT_CONTROL 0x00008891 -#define A6XX_RB_SAMPLE_COUNT_CONTROL_UNK0 0x00000001 +#define A6XX_RB_SAMPLE_COUNT_CONTROL_DISABLE 0x00000001 #define A6XX_RB_SAMPLE_COUNT_CONTROL_COPY 0x00000002 #define REG_A6XX_RB_LRZ_CNTL 0x00008898 @@ -3783,12 +4046,12 @@ static inline uint32_t A6XX_RB_WINDOW_OFFSET2_Y(uint32_t val) return ((val) << A6XX_RB_WINDOW_OFFSET2_Y__SHIFT) & A6XX_RB_WINDOW_OFFSET2_Y__MASK; } -#define REG_A6XX_RB_MSAA_CNTL 0x000088d5 -#define A6XX_RB_MSAA_CNTL_SAMPLES__MASK 0x00000018 -#define A6XX_RB_MSAA_CNTL_SAMPLES__SHIFT 3 -static inline uint32_t A6XX_RB_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val) +#define REG_A6XX_RB_BLIT_GMEM_MSAA_CNTL 0x000088d5 +#define A6XX_RB_BLIT_GMEM_MSAA_CNTL_SAMPLES__MASK 0x00000018 +#define A6XX_RB_BLIT_GMEM_MSAA_CNTL_SAMPLES__SHIFT 3 +static inline uint32_t A6XX_RB_BLIT_GMEM_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val) { - return ((val) << A6XX_RB_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_RB_MSAA_CNTL_SAMPLES__MASK; + return ((val) << A6XX_RB_BLIT_GMEM_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_RB_BLIT_GMEM_MSAA_CNTL_SAMPLES__MASK; } #define REG_A6XX_RB_BLIT_BASE_GMEM 0x000088d6 @@ -3892,17 +4155,17 @@ static inline uint32_t A6XX_RB_BLIT_INFO_CLEAR_MASK(uint32_t val) { return ((val) << A6XX_RB_BLIT_INFO_CLEAR_MASK__SHIFT) & A6XX_RB_BLIT_INFO_CLEAR_MASK__MASK; } -#define A6XX_RB_BLIT_INFO_UNK8__MASK 0x00000300 -#define A6XX_RB_BLIT_INFO_UNK8__SHIFT 8 -static inline uint32_t A6XX_RB_BLIT_INFO_UNK8(uint32_t val) +#define A6XX_RB_BLIT_INFO_LAST__MASK 0x00000300 +#define A6XX_RB_BLIT_INFO_LAST__SHIFT 8 +static inline uint32_t A6XX_RB_BLIT_INFO_LAST(uint32_t val) { - return ((val) << A6XX_RB_BLIT_INFO_UNK8__SHIFT) & A6XX_RB_BLIT_INFO_UNK8__MASK; + return ((val) << A6XX_RB_BLIT_INFO_LAST__SHIFT) & A6XX_RB_BLIT_INFO_LAST__MASK; } -#define A6XX_RB_BLIT_INFO_UNK12__MASK 0x0000f000 -#define A6XX_RB_BLIT_INFO_UNK12__SHIFT 12 -static inline uint32_t A6XX_RB_BLIT_INFO_UNK12(uint32_t val) +#define A6XX_RB_BLIT_INFO_BUFFER_ID__MASK 0x0000f000 +#define A6XX_RB_BLIT_INFO_BUFFER_ID__SHIFT 12 +static inline uint32_t A6XX_RB_BLIT_INFO_BUFFER_ID(uint32_t val) { - return ((val) << A6XX_RB_BLIT_INFO_UNK12__SHIFT) & A6XX_RB_BLIT_INFO_UNK12__MASK; + return ((val) << A6XX_RB_BLIT_INFO_BUFFER_ID__SHIFT) & A6XX_RB_BLIT_INFO_BUFFER_ID__MASK; } #define REG_A6XX_RB_UNKNOWN_88F0 0x000088f0 @@ -4173,16 +4436,23 @@ static inline uint32_t A6XX_RB_2D_DST_FLAGS_PLANE_PITCH(uint32_t val) #define REG_A6XX_RB_UNKNOWN_8E01 0x00008e01 -#define REG_A6XX_RB_UNKNOWN_8E04 0x00008e04 +#define REG_A6XX_RB_DBG_ECO_CNTL 0x00008e04 #define REG_A6XX_RB_ADDR_MODE_CNTL 0x00008e05 #define REG_A6XX_RB_CCU_CNTL 0x00008e07 -#define A6XX_RB_CCU_CNTL_COLOR_OFFSET__MASK 0xff800000 -#define A6XX_RB_CCU_CNTL_COLOR_OFFSET__SHIFT 23 -static inline uint32_t A6XX_RB_CCU_CNTL_COLOR_OFFSET(uint32_t val) +#define A6XX_RB_CCU_CNTL_CONCURRENT_RESOLVE 0x00000004 +#define A6XX_RB_CCU_CNTL_DEPTH_OFFSET_HI__MASK 0x00000080 +#define A6XX_RB_CCU_CNTL_DEPTH_OFFSET_HI__SHIFT 7 +static inline uint32_t A6XX_RB_CCU_CNTL_DEPTH_OFFSET_HI(uint32_t val) { - return ((val >> 12) << A6XX_RB_CCU_CNTL_COLOR_OFFSET__SHIFT) & A6XX_RB_CCU_CNTL_COLOR_OFFSET__MASK; + return ((val) << A6XX_RB_CCU_CNTL_DEPTH_OFFSET_HI__SHIFT) & A6XX_RB_CCU_CNTL_DEPTH_OFFSET_HI__MASK; +} +#define A6XX_RB_CCU_CNTL_COLOR_OFFSET_HI__MASK 0x00000200 +#define A6XX_RB_CCU_CNTL_COLOR_OFFSET_HI__SHIFT 9 +static inline uint32_t A6XX_RB_CCU_CNTL_COLOR_OFFSET_HI(uint32_t val) +{ + return ((val) << A6XX_RB_CCU_CNTL_COLOR_OFFSET_HI__SHIFT) & A6XX_RB_CCU_CNTL_COLOR_OFFSET_HI__MASK; } #define A6XX_RB_CCU_CNTL_DEPTH_OFFSET__MASK 0x001ff000 #define A6XX_RB_CCU_CNTL_DEPTH_OFFSET__SHIFT 12 @@ -4191,7 +4461,12 @@ static inline uint32_t A6XX_RB_CCU_CNTL_DEPTH_OFFSET(uint32_t val) return ((val >> 12) << A6XX_RB_CCU_CNTL_DEPTH_OFFSET__SHIFT) & A6XX_RB_CCU_CNTL_DEPTH_OFFSET__MASK; } #define A6XX_RB_CCU_CNTL_GMEM 0x00400000 -#define A6XX_RB_CCU_CNTL_UNK2 0x00000004 +#define A6XX_RB_CCU_CNTL_COLOR_OFFSET__MASK 0xff800000 +#define A6XX_RB_CCU_CNTL_COLOR_OFFSET__SHIFT 23 +static inline uint32_t A6XX_RB_CCU_CNTL_COLOR_OFFSET(uint32_t val) +{ + return ((val >> 12) << A6XX_RB_CCU_CNTL_COLOR_OFFSET__SHIFT) & A6XX_RB_CCU_CNTL_COLOR_OFFSET__MASK; +} #define REG_A6XX_RB_NC_MODE_CNTL 0x00008e08 #define A6XX_RB_NC_MODE_CNTL_MODE 0x00000001 @@ -4225,6 +4500,8 @@ static inline uint32_t REG_A6XX_RB_PERFCTR_CCU_SEL(uint32_t i0) { return 0x00008 static inline uint32_t REG_A6XX_RB_PERFCTR_CMP_SEL(uint32_t i0) { return 0x00008e2c + 0x1*i0; } +static inline uint32_t REG_A7XX_RB_PERFCTR_UFC_SEL(uint32_t i0) { return 0x00008e30 + 0x1*i0; } + #define REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST 0x00008e3b #define REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD 0x00008e3d @@ -4440,7 +4717,13 @@ static inline uint32_t A6XX_VPC_SO_BUFFER_SIZE(uint32_t val) return ((val >> 2) << A6XX_VPC_SO_BUFFER_SIZE__SHIFT) & A6XX_VPC_SO_BUFFER_SIZE__MASK; } -static inline uint32_t REG_A6XX_VPC_SO_NCOMP(uint32_t i0) { return 0x0000921d + 0x7*i0; } +static inline uint32_t REG_A6XX_VPC_SO_BUFFER_STRIDE(uint32_t i0) { return 0x0000921d + 0x7*i0; } +#define A6XX_VPC_SO_BUFFER_STRIDE__MASK 0x000003ff +#define A6XX_VPC_SO_BUFFER_STRIDE__SHIFT 0 +static inline uint32_t A6XX_VPC_SO_BUFFER_STRIDE(uint32_t val) +{ + return ((val >> 2) << A6XX_VPC_SO_BUFFER_STRIDE__SHIFT) & A6XX_VPC_SO_BUFFER_STRIDE__MASK; +} static inline uint32_t REG_A6XX_VPC_SO_BUFFER_OFFSET(uint32_t i0) { return 0x0000921e + 0x7*i0; } #define A6XX_VPC_SO_BUFFER_OFFSET__MASK 0xfffffffc @@ -4597,7 +4880,7 @@ static inline uint32_t A6XX_VPC_SO_STREAM_CNTL_STREAM_ENABLE(uint32_t val) #define REG_A6XX_VPC_SO_DISABLE 0x00009306 #define A6XX_VPC_SO_DISABLE_DISABLE 0x00000001 -#define REG_A6XX_VPC_UNKNOWN_9600 0x00009600 +#define REG_A6XX_VPC_DBG_ECO_CNTL 0x00009600 #define REG_A6XX_VPC_ADDR_MODE_CNTL 0x00009601 @@ -4607,6 +4890,8 @@ static inline uint32_t A6XX_VPC_SO_STREAM_CNTL_STREAM_ENABLE(uint32_t val) static inline uint32_t REG_A6XX_VPC_PERFCTR_VPC_SEL(uint32_t i0) { return 0x00009604 + 0x1*i0; } +static inline uint32_t REG_A7XX_VPC_PERFCTR_VPC_SEL(uint32_t i0) { return 0x0000960b + 0x1*i0; } + #define REG_A6XX_PC_TESS_NUM_VERTEX 0x00009800 #define REG_A6XX_PC_HS_INPUT_SIZE 0x00009801 @@ -4646,7 +4931,12 @@ static inline uint32_t A6XX_PC_TESS_CNTL_OUTPUT(enum a6xx_tess_output val) #define REG_A6XX_PC_PRIMID_PASSTHRU 0x00009806 #define REG_A6XX_PC_SO_STREAM_CNTL 0x00009808 -#define A6XX_PC_SO_STREAM_CNTL_STREAM_ENABLE 0x00008000 +#define A6XX_PC_SO_STREAM_CNTL_STREAM_ENABLE__MASK 0x00078000 +#define A6XX_PC_SO_STREAM_CNTL_STREAM_ENABLE__SHIFT 15 +static inline uint32_t A6XX_PC_SO_STREAM_CNTL_STREAM_ENABLE(uint32_t val) +{ + return ((val) << A6XX_PC_SO_STREAM_CNTL_STREAM_ENABLE__SHIFT) & A6XX_PC_SO_STREAM_CNTL_STREAM_ENABLE__MASK; +} #define REG_A6XX_PC_DGEN_SU_CONSERVATIVE_RAS_CNTL 0x0000980a #define A6XX_PC_DGEN_SU_CONSERVATIVE_RAS_CNTL_CONSERVATIVERASEN 0x00000001 @@ -4936,6 +5226,8 @@ static inline uint32_t A6XX_PC_BIN_DRAW_STRM(uint32_t val) static inline uint32_t REG_A6XX_PC_PERFCTR_PC_SEL(uint32_t i0) { return 0x00009e34 + 0x1*i0; } +static inline uint32_t REG_A7XX_PC_PERFCTR_PC_SEL(uint32_t i0) { return 0x00009e42 + 0x1*i0; } + #define REG_A6XX_PC_UNKNOWN_9E72 0x00009e72 #define REG_A6XX_VFD_CONTROL_0 0x0000a000 @@ -5138,9 +5430,11 @@ static inline uint32_t A6XX_VFD_DEST_CNTL_INSTR_REGID(uint32_t val) static inline uint32_t REG_A6XX_VFD_PERFCTR_VFD_SEL(uint32_t i0) { return 0x0000a610 + 0x1*i0; } +static inline uint32_t REG_A7XX_VFD_PERFCTR_VFD_SEL(uint32_t i0) { return 0x0000a610 + 0x1*i0; } + #define REG_A6XX_SP_VS_CTRL_REG0 0x0000a800 #define A6XX_SP_VS_CTRL_REG0_MERGEDREGS 0x00100000 -#define A6XX_SP_VS_CTRL_REG0_UNK21 0x00200000 +#define A6XX_SP_VS_CTRL_REG0_EARLYPREAMBLE 0x00200000 #define A6XX_SP_VS_CTRL_REG0_THREADMODE__MASK 0x00000001 #define A6XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT 0 static inline uint32_t A6XX_SP_VS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val) @@ -5318,7 +5612,7 @@ static inline uint32_t A6XX_SP_VS_PVT_MEM_HW_STACK_OFFSET_OFFSET(uint32_t val) } #define REG_A6XX_SP_HS_CTRL_REG0 0x0000a830 -#define A6XX_SP_HS_CTRL_REG0_UNK20 0x00100000 +#define A6XX_SP_HS_CTRL_REG0_EARLYPREAMBLE 0x00100000 #define A6XX_SP_HS_CTRL_REG0_THREADMODE__MASK 0x00000001 #define A6XX_SP_HS_CTRL_REG0_THREADMODE__SHIFT 0 static inline uint32_t A6XX_SP_HS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val) @@ -5428,7 +5722,7 @@ static inline uint32_t A6XX_SP_HS_PVT_MEM_HW_STACK_OFFSET_OFFSET(uint32_t val) } #define REG_A6XX_SP_DS_CTRL_REG0 0x0000a840 -#define A6XX_SP_DS_CTRL_REG0_MERGEDREGS 0x00100000 +#define A6XX_SP_DS_CTRL_REG0_EARLYPREAMBLE 0x00100000 #define A6XX_SP_DS_CTRL_REG0_THREADMODE__MASK 0x00000001 #define A6XX_SP_DS_CTRL_REG0_THREADMODE__SHIFT 0 static inline uint32_t A6XX_SP_DS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val) @@ -5606,7 +5900,7 @@ static inline uint32_t A6XX_SP_DS_PVT_MEM_HW_STACK_OFFSET_OFFSET(uint32_t val) } #define REG_A6XX_SP_GS_CTRL_REG0 0x0000a870 -#define A6XX_SP_GS_CTRL_REG0_UNK20 0x00100000 +#define A6XX_SP_GS_CTRL_REG0_EARLYPREAMBLE 0x00100000 #define A6XX_SP_GS_CTRL_REG0_THREADMODE__MASK 0x00000001 #define A6XX_SP_GS_CTRL_REG0_THREADMODE__SHIFT 0 static inline uint32_t A6XX_SP_GS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val) @@ -5862,12 +6156,8 @@ static inline uint32_t A6XX_SP_FS_CTRL_REG0_THREADSIZE(enum a6xx_threadsize val) #define A6XX_SP_FS_CTRL_REG0_UNK24 0x01000000 #define A6XX_SP_FS_CTRL_REG0_UNK25 0x02000000 #define A6XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x04000000 -#define A6XX_SP_FS_CTRL_REG0_UNK27__MASK 0x18000000 -#define A6XX_SP_FS_CTRL_REG0_UNK27__SHIFT 27 -static inline uint32_t A6XX_SP_FS_CTRL_REG0_UNK27(uint32_t val) -{ - return ((val) << A6XX_SP_FS_CTRL_REG0_UNK27__SHIFT) & A6XX_SP_FS_CTRL_REG0_UNK27__MASK; -} +#define A6XX_SP_FS_CTRL_REG0_UNK27 0x08000000 +#define A6XX_SP_FS_CTRL_REG0_EARLYPREAMBLE 0x10000000 #define A6XX_SP_FS_CTRL_REG0_MERGEDREGS 0x80000000 #define A6XX_SP_FS_CTRL_REG0_THREADMODE__MASK 0x00000001 #define A6XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT 0 @@ -6069,18 +6359,14 @@ static inline uint32_t A6XX_SP_FS_PREFETCH_CNTL_COUNT(uint32_t val) { return ((val) << A6XX_SP_FS_PREFETCH_CNTL_COUNT__SHIFT) & A6XX_SP_FS_PREFETCH_CNTL_COUNT__MASK; } -#define A6XX_SP_FS_PREFETCH_CNTL_UNK3 0x00000008 -#define A6XX_SP_FS_PREFETCH_CNTL_UNK4__MASK 0x00000ff0 -#define A6XX_SP_FS_PREFETCH_CNTL_UNK4__SHIFT 4 -static inline uint32_t A6XX_SP_FS_PREFETCH_CNTL_UNK4(uint32_t val) +#define A6XX_SP_FS_PREFETCH_CNTL_IJ_WRITE_DISABLE 0x00000008 +#define A6XX_SP_FS_PREFETCH_CNTL_UNK4 0x00000010 +#define A6XX_SP_FS_PREFETCH_CNTL_WRITE_COLOR_TO_OUTPUT 0x00000020 +#define A6XX_SP_FS_PREFETCH_CNTL_UNK6__MASK 0x00007fc0 +#define A6XX_SP_FS_PREFETCH_CNTL_UNK6__SHIFT 6 +static inline uint32_t A6XX_SP_FS_PREFETCH_CNTL_UNK6(uint32_t val) { - return ((val) << A6XX_SP_FS_PREFETCH_CNTL_UNK4__SHIFT) & A6XX_SP_FS_PREFETCH_CNTL_UNK4__MASK; -} -#define A6XX_SP_FS_PREFETCH_CNTL_UNK12__MASK 0x00007000 -#define A6XX_SP_FS_PREFETCH_CNTL_UNK12__SHIFT 12 -static inline uint32_t A6XX_SP_FS_PREFETCH_CNTL_UNK12(uint32_t val) -{ - return ((val) << A6XX_SP_FS_PREFETCH_CNTL_UNK12__SHIFT) & A6XX_SP_FS_PREFETCH_CNTL_UNK12__MASK; + return ((val) << A6XX_SP_FS_PREFETCH_CNTL_UNK6__SHIFT) & A6XX_SP_FS_PREFETCH_CNTL_UNK6__MASK; } static inline uint32_t REG_A6XX_SP_FS_PREFETCH(uint32_t i0) { return 0x0000a99f + 0x1*i0; } @@ -6117,9 +6403,11 @@ static inline uint32_t A6XX_SP_FS_PREFETCH_CMD_WRMASK(uint32_t val) return ((val) << A6XX_SP_FS_PREFETCH_CMD_WRMASK__SHIFT) & A6XX_SP_FS_PREFETCH_CMD_WRMASK__MASK; } #define A6XX_SP_FS_PREFETCH_CMD_HALF 0x04000000 -#define A6XX_SP_FS_PREFETCH_CMD_CMD__MASK 0xf8000000 -#define A6XX_SP_FS_PREFETCH_CMD_CMD__SHIFT 27 -static inline uint32_t A6XX_SP_FS_PREFETCH_CMD_CMD(uint32_t val) +#define A6XX_SP_FS_PREFETCH_CMD_UNK27 0x08000000 +#define A6XX_SP_FS_PREFETCH_CMD_BINDLESS 0x10000000 +#define A6XX_SP_FS_PREFETCH_CMD_CMD__MASK 0xe0000000 +#define A6XX_SP_FS_PREFETCH_CMD_CMD__SHIFT 29 +static inline uint32_t A6XX_SP_FS_PREFETCH_CMD_CMD(enum a6xx_tex_prefetch_cmd val) { return ((val) << A6XX_SP_FS_PREFETCH_CMD_CMD__SHIFT) & A6XX_SP_FS_PREFETCH_CMD_CMD__MASK; } @@ -6161,7 +6449,7 @@ static inline uint32_t A6XX_SP_CS_CTRL_REG0_THREADSIZE(enum a6xx_threadsize val) } #define A6XX_SP_CS_CTRL_REG0_UNK21 0x00200000 #define A6XX_SP_CS_CTRL_REG0_UNK22 0x00400000 -#define A6XX_SP_CS_CTRL_REG0_SEPARATEPROLOG 0x00800000 +#define A6XX_SP_CS_CTRL_REG0_EARLYPREAMBLE 0x00800000 #define A6XX_SP_CS_CTRL_REG0_MERGEDREGS 0x80000000 #define A6XX_SP_CS_CTRL_REG0_THREADMODE__MASK 0x00000001 #define A6XX_SP_CS_CTRL_REG0_THREADMODE__SHIFT 0 @@ -6355,7 +6643,19 @@ static inline uint32_t A6XX_SP_CS_TEX_CONST(uint32_t val) static inline uint32_t REG_A6XX_SP_CS_BINDLESS_BASE(uint32_t i0) { return 0x0000a9e8 + 0x2*i0; } -static inline uint32_t REG_A6XX_SP_CS_BINDLESS_BASE_ADDR(uint32_t i0) { return 0x0000a9e8 + 0x2*i0; } +static inline uint32_t REG_A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR(uint32_t i0) { return 0x0000a9e8 + 0x2*i0; } +#define A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK 0x00000003 +#define A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT 0 +static inline uint32_t A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE(enum a6xx_bindless_descriptor_size val) +{ + return ((val) << A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT) & A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK; +} +#define A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK 0xfffffffc +#define A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT 2 +static inline uint32_t A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_ADDR(uint32_t val) +{ + return ((val >> 2) << A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT) & A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK; +} #define REG_A6XX_SP_CS_IBO 0x0000a9f2 #define A6XX_SP_CS_IBO__MASK 0xffffffff @@ -6406,7 +6706,19 @@ static inline uint32_t A6XX_SP_FS_CONFIG_NIBO(uint32_t val) static inline uint32_t REG_A6XX_SP_BINDLESS_BASE(uint32_t i0) { return 0x0000ab10 + 0x2*i0; } -static inline uint32_t REG_A6XX_SP_BINDLESS_BASE_ADDR(uint32_t i0) { return 0x0000ab10 + 0x2*i0; } +static inline uint32_t REG_A6XX_SP_BINDLESS_BASE_DESCRIPTOR(uint32_t i0) { return 0x0000ab10 + 0x2*i0; } +#define A6XX_SP_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK 0x00000003 +#define A6XX_SP_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT 0 +static inline uint32_t A6XX_SP_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE(enum a6xx_bindless_descriptor_size val) +{ + return ((val) << A6XX_SP_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT) & A6XX_SP_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK; +} +#define A6XX_SP_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK 0xfffffffc +#define A6XX_SP_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT 2 +static inline uint32_t A6XX_SP_BINDLESS_BASE_DESCRIPTOR_ADDR(uint32_t val) +{ + return ((val >> 2) << A6XX_SP_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT) & A6XX_SP_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK; +} #define REG_A6XX_SP_IBO 0x0000ab1a #define A6XX_SP_IBO__MASK 0xffffffff @@ -6436,7 +6748,7 @@ static inline uint32_t A6XX_SP_2D_DST_FORMAT_MASK(uint32_t val) return ((val) << A6XX_SP_2D_DST_FORMAT_MASK__SHIFT) & A6XX_SP_2D_DST_FORMAT_MASK__MASK; } -#define REG_A6XX_SP_UNKNOWN_AE00 0x0000ae00 +#define REG_A6XX_SP_DBG_ECO_CNTL 0x0000ae00 #define REG_A6XX_SP_ADDR_MODE_CNTL 0x0000ae01 @@ -6457,6 +6769,12 @@ static inline uint32_t A6XX_SP_2D_DST_FORMAT_MASK(uint32_t val) static inline uint32_t REG_A6XX_SP_PERFCTR_SP_SEL(uint32_t i0) { return 0x0000ae10 + 0x1*i0; } +static inline uint32_t REG_A7XX_SP_PERFCTR_HLSQ_SEL(uint32_t i0) { return 0x0000ae60 + 0x1*i0; } + +#define REG_A7XX_SP_READ_SEL 0x0000ae6d + +static inline uint32_t REG_A7XX_SP_PERFCTR_SP_SEL(uint32_t i0) { return 0x0000ae80 + 0x1*i0; } + #define REG_A6XX_SP_CONTEXT_SWITCH_GFX_PREEMPTION_SAFE_MODE 0x0000be22 #define REG_A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR 0x0000b180 @@ -6887,6 +7205,8 @@ static inline uint32_t A6XX_HLSQ_FS_CNTL_0_UNK2(uint32_t val) #define REG_A6XX_HLSQ_CONTROL_1_REG 0x0000b982 +#define REG_A7XX_HLSQ_CONTROL_1_REG 0x0000a9c7 + #define REG_A6XX_HLSQ_CONTROL_2_REG 0x0000b983 #define A6XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK 0x000000ff #define A6XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT 0 @@ -6906,11 +7226,37 @@ static inline uint32_t A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK(uint32_t val) { return ((val) << A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT) & A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK; } -#define A6XX_HLSQ_CONTROL_2_REG_SIZE__MASK 0xff000000 -#define A6XX_HLSQ_CONTROL_2_REG_SIZE__SHIFT 24 -static inline uint32_t A6XX_HLSQ_CONTROL_2_REG_SIZE(uint32_t val) +#define A6XX_HLSQ_CONTROL_2_REG_CENTERRHW__MASK 0xff000000 +#define A6XX_HLSQ_CONTROL_2_REG_CENTERRHW__SHIFT 24 +static inline uint32_t A6XX_HLSQ_CONTROL_2_REG_CENTERRHW(uint32_t val) +{ + return ((val) << A6XX_HLSQ_CONTROL_2_REG_CENTERRHW__SHIFT) & A6XX_HLSQ_CONTROL_2_REG_CENTERRHW__MASK; +} + +#define REG_A7XX_HLSQ_CONTROL_2_REG 0x0000a9c8 +#define A7XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK 0x000000ff +#define A7XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT 0 +static inline uint32_t A7XX_HLSQ_CONTROL_2_REG_FACEREGID(uint32_t val) +{ + return ((val) << A7XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT) & A7XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK; +} +#define A7XX_HLSQ_CONTROL_2_REG_SAMPLEID__MASK 0x0000ff00 +#define A7XX_HLSQ_CONTROL_2_REG_SAMPLEID__SHIFT 8 +static inline uint32_t A7XX_HLSQ_CONTROL_2_REG_SAMPLEID(uint32_t val) +{ + return ((val) << A7XX_HLSQ_CONTROL_2_REG_SAMPLEID__SHIFT) & A7XX_HLSQ_CONTROL_2_REG_SAMPLEID__MASK; +} +#define A7XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK 0x00ff0000 +#define A7XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT 16 +static inline uint32_t A7XX_HLSQ_CONTROL_2_REG_SAMPLEMASK(uint32_t val) +{ + return ((val) << A7XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT) & A7XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK; +} +#define A7XX_HLSQ_CONTROL_2_REG_CENTERRHW__MASK 0xff000000 +#define A7XX_HLSQ_CONTROL_2_REG_CENTERRHW__SHIFT 24 +static inline uint32_t A7XX_HLSQ_CONTROL_2_REG_CENTERRHW(uint32_t val) { - return ((val) << A6XX_HLSQ_CONTROL_2_REG_SIZE__SHIFT) & A6XX_HLSQ_CONTROL_2_REG_SIZE__MASK; + return ((val) << A7XX_HLSQ_CONTROL_2_REG_CENTERRHW__SHIFT) & A7XX_HLSQ_CONTROL_2_REG_CENTERRHW__MASK; } #define REG_A6XX_HLSQ_CONTROL_3_REG 0x0000b984 @@ -6939,6 +7285,32 @@ static inline uint32_t A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID(uint32_t val) return ((val) << A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT) & A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK; } +#define REG_A7XX_HLSQ_CONTROL_3_REG 0x0000a9c9 +#define A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK 0x000000ff +#define A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT 0 +static inline uint32_t A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL(uint32_t val) +{ + return ((val) << A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT) & A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK; +} +#define A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK 0x0000ff00 +#define A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT 8 +static inline uint32_t A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL(uint32_t val) +{ + return ((val) << A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT) & A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK; +} +#define A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK 0x00ff0000 +#define A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT 16 +static inline uint32_t A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID(uint32_t val) +{ + return ((val) << A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT) & A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK; +} +#define A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK 0xff000000 +#define A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT 24 +static inline uint32_t A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID(uint32_t val) +{ + return ((val) << A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT) & A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK; +} + #define REG_A6XX_HLSQ_CONTROL_4_REG 0x0000b985 #define A6XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK 0x000000ff #define A6XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT 0 @@ -6965,6 +7337,32 @@ static inline uint32_t A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID(uint32_t val) return ((val) << A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT) & A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK; } +#define REG_A7XX_HLSQ_CONTROL_4_REG 0x0000a9ca +#define A7XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK 0x000000ff +#define A7XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT 0 +static inline uint32_t A7XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE(uint32_t val) +{ + return ((val) << A7XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT) & A7XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK; +} +#define A7XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__MASK 0x0000ff00 +#define A7XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__SHIFT 8 +static inline uint32_t A7XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE(uint32_t val) +{ + return ((val) << A7XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__SHIFT) & A7XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__MASK; +} +#define A7XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK 0x00ff0000 +#define A7XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT 16 +static inline uint32_t A7XX_HLSQ_CONTROL_4_REG_XYCOORDREGID(uint32_t val) +{ + return ((val) << A7XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT) & A7XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK; +} +#define A7XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK 0xff000000 +#define A7XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT 24 +static inline uint32_t A7XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID(uint32_t val) +{ + return ((val) << A7XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT) & A7XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK; +} + #define REG_A6XX_HLSQ_CONTROL_5_REG 0x0000b986 #define A6XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID__MASK 0x000000ff #define A6XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID__SHIFT 0 @@ -6979,6 +7377,20 @@ static inline uint32_t A6XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID(uint32_t va return ((val) << A6XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID__SHIFT) & A6XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID__MASK; } +#define REG_A7XX_HLSQ_CONTROL_5_REG 0x0000a9cb +#define A7XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID__MASK 0x000000ff +#define A7XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID__SHIFT 0 +static inline uint32_t A7XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID(uint32_t val) +{ + return ((val) << A7XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID__SHIFT) & A7XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID__MASK; +} +#define A7XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID__MASK 0x0000ff00 +#define A7XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID__SHIFT 8 +static inline uint32_t A7XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID(uint32_t val) +{ + return ((val) << A7XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID__SHIFT) & A7XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID__MASK; +} + #define REG_A6XX_HLSQ_CS_CNTL 0x0000b987 #define A6XX_HLSQ_CS_CNTL_CONSTLEN__MASK 0x000000ff #define A6XX_HLSQ_CS_CNTL_CONSTLEN__SHIFT 0 @@ -7124,7 +7536,19 @@ static inline uint32_t A6XX_HLSQ_LOAD_STATE_FRAG_EXT_SRC_ADDR(uint32_t val) static inline uint32_t REG_A6XX_HLSQ_CS_BINDLESS_BASE(uint32_t i0) { return 0x0000b9c0 + 0x2*i0; } -static inline uint32_t REG_A6XX_HLSQ_CS_BINDLESS_BASE_ADDR(uint32_t i0) { return 0x0000b9c0 + 0x2*i0; } +static inline uint32_t REG_A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR(uint32_t i0) { return 0x0000b9c0 + 0x2*i0; } +#define A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK 0x00000003 +#define A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT 0 +static inline uint32_t A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE(enum a6xx_bindless_descriptor_size val) +{ + return ((val) << A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT) & A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK; +} +#define A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK 0xfffffffc +#define A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT 2 +static inline uint32_t A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_ADDR(uint32_t val) +{ + return ((val >> 2) << A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT) & A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK; +} #define REG_A6XX_HLSQ_CS_UNKNOWN_B9D0 0x0000b9d0 #define A6XX_HLSQ_CS_UNKNOWN_B9D0_SHARED_SIZE__MASK 0x0000001f @@ -7204,7 +7628,19 @@ static inline uint32_t A6XX_HLSQ_FS_CNTL_CONSTLEN(uint32_t val) static inline uint32_t REG_A6XX_HLSQ_BINDLESS_BASE(uint32_t i0) { return 0x0000bb20 + 0x2*i0; } -static inline uint32_t REG_A6XX_HLSQ_BINDLESS_BASE_ADDR(uint32_t i0) { return 0x0000bb20 + 0x2*i0; } +static inline uint32_t REG_A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR(uint32_t i0) { return 0x0000bb20 + 0x2*i0; } +#define A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK 0x00000003 +#define A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT 0 +static inline uint32_t A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE(enum a6xx_bindless_descriptor_size val) +{ + return ((val) << A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT) & A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK; +} +#define A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK 0xfffffffc +#define A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT 2 +static inline uint32_t A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_ADDR(uint32_t val) +{ + return ((val >> 2) << A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT) & A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK; +} #define REG_A6XX_HLSQ_2D_EVENT_CMD 0x0000bd80 #define A6XX_HLSQ_2D_EVENT_CMD_STATE_ID__MASK 0x0000ff00 @@ -7224,7 +7660,7 @@ static inline uint32_t A6XX_HLSQ_2D_EVENT_CMD_EVENT(enum vgt_event_type val) #define REG_A6XX_HLSQ_UNKNOWN_BE01 0x0000be01 -#define REG_A6XX_HLSQ_UNKNOWN_BE04 0x0000be04 +#define REG_A6XX_HLSQ_DBG_ECO_CNTL 0x0000be04 #define REG_A6XX_HLSQ_ADDR_MODE_CNTL 0x0000be05 @@ -7234,6 +7670,8 @@ static inline uint32_t REG_A6XX_HLSQ_PERFCTR_HLSQ_SEL(uint32_t i0) { return 0x00 #define REG_A6XX_HLSQ_CONTEXT_SWITCH_GFX_PREEMPTION_SAFE_MODE 0x0000be22 +#define REG_A7XX_SP_AHB_READ_APERTURE 0x0000c000 + #define REG_A6XX_CP_EVENT_START 0x0000d600 #define A6XX_CP_EVENT_START_STATE_ID__MASK 0x000000ff #define A6XX_CP_EVENT_START_STATE_ID__SHIFT 0 @@ -7426,7 +7864,18 @@ static inline uint32_t A6XX_TEX_CONST_1_HEIGHT(uint32_t val) } #define REG_A6XX_TEX_CONST_2 0x00000002 -#define A6XX_TEX_CONST_2_BUFFER 0x00000010 +#define A6XX_TEX_CONST_2_STRUCTSIZETEXELS__MASK 0x0000fff0 +#define A6XX_TEX_CONST_2_STRUCTSIZETEXELS__SHIFT 4 +static inline uint32_t A6XX_TEX_CONST_2_STRUCTSIZETEXELS(uint32_t val) +{ + return ((val) << A6XX_TEX_CONST_2_STRUCTSIZETEXELS__SHIFT) & A6XX_TEX_CONST_2_STRUCTSIZETEXELS__MASK; +} +#define A6XX_TEX_CONST_2_STARTOFFSETTEXELS__MASK 0x003f0000 +#define A6XX_TEX_CONST_2_STARTOFFSETTEXELS__SHIFT 16 +static inline uint32_t A6XX_TEX_CONST_2_STARTOFFSETTEXELS(uint32_t val) +{ + return ((val) << A6XX_TEX_CONST_2_STARTOFFSETTEXELS__SHIFT) & A6XX_TEX_CONST_2_STARTOFFSETTEXELS__MASK; +} #define A6XX_TEX_CONST_2_PITCHALIGN__MASK 0x0000000f #define A6XX_TEX_CONST_2_PITCHALIGN__SHIFT 0 static inline uint32_t A6XX_TEX_CONST_2_PITCHALIGN(uint32_t val) @@ -7485,6 +7934,12 @@ static inline uint32_t A6XX_TEX_CONST_5_DEPTH(uint32_t val) } #define REG_A6XX_TEX_CONST_6 0x00000006 +#define A6XX_TEX_CONST_6_MIN_LOD_CLAMP__MASK 0x00000fff +#define A6XX_TEX_CONST_6_MIN_LOD_CLAMP__SHIFT 0 +static inline uint32_t A6XX_TEX_CONST_6_MIN_LOD_CLAMP(float val) +{ + return ((((uint32_t)(val * 256.0))) << A6XX_TEX_CONST_6_MIN_LOD_CLAMP__SHIFT) & A6XX_TEX_CONST_6_MIN_LOD_CLAMP__MASK; +} #define A6XX_TEX_CONST_6_PLANE_PITCH__MASK 0xffffff00 #define A6XX_TEX_CONST_6_PLANE_PITCH__SHIFT 8 static inline uint32_t A6XX_TEX_CONST_6_PLANE_PITCH(uint32_t val) diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index 7f5bc73b2040..e16b4b3f8535 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -621,6 +621,8 @@ setup_pdc: /* ensure no writes happen before the uCode is fully written */ wmb(); + a6xx_rpmh_stop(gmu); + err: if (!IS_ERR_OR_NULL(pdcptr)) iounmap(pdcptr); @@ -753,7 +755,6 @@ static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu) static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state) { - static bool rpmh_init; struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; int ret; @@ -776,15 +777,9 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state) /* Turn on register retention */ gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1); - /* We only need to load the RPMh microcode once */ - if (!rpmh_init) { - a6xx_gmu_rpmh_init(gmu); - rpmh_init = true; - } else { - ret = a6xx_rpmh_start(gmu); - if (ret) - return ret; - } + ret = a6xx_rpmh_start(gmu); + if (ret) + return ret; ret = a6xx_gmu_fw_load(gmu); if (ret) @@ -1482,6 +1477,12 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu) pm_runtime_force_suspend(gmu->dev); + /* + * Since cxpd is a virt device, the devlink with gmu-dev will be removed + * automatically when we do detach + */ + dev_pm_domain_detach(gmu->cxpd, false); + if (!IS_ERR_OR_NULL(gmu->gxpd)) { pm_runtime_disable(gmu->gxpd); dev_pm_domain_detach(gmu->gxpd, false); @@ -1504,6 +1505,17 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu) gmu->initialized = false; } +static int cxpd_notifier_cb(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct a6xx_gmu *gmu = container_of(nb, struct a6xx_gmu, pd_nb); + + if (action == GENPD_NOTIFY_OFF) + complete_all(&gmu->pd_gate); + + return 0; +} + int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) { struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; @@ -1608,8 +1620,10 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) if (adreno_is_a650_family(adreno_gpu)) { gmu->rscc = a6xx_gmu_get_mmio(pdev, "rscc"); - if (IS_ERR(gmu->rscc)) + if (IS_ERR(gmu->rscc)) { + ret = -ENODEV; goto err_mmio; + } } else { gmu->rscc = gmu->mmio + 0x23000; } @@ -1618,8 +1632,26 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq); gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq); - if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0) + if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0) { + ret = -ENODEV; goto err_mmio; + } + + gmu->cxpd = dev_pm_domain_attach_by_name(gmu->dev, "cx"); + if (IS_ERR(gmu->cxpd)) { + ret = PTR_ERR(gmu->cxpd); + goto err_mmio; + } + + if (!device_link_add(gmu->dev, gmu->cxpd, + DL_FLAG_PM_RUNTIME)) { + ret = -ENODEV; + goto detach_cxpd; + } + + init_completion(&gmu->pd_gate); + complete_all(&gmu->pd_gate); + gmu->pd_nb.notifier_call = cxpd_notifier_cb; /* * Get a link to the GX power domain to reset the GPU in case of GMU @@ -1633,10 +1665,16 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) /* Set up the HFI queues */ a6xx_hfi_init(gmu); + /* Initialize RPMh */ + a6xx_gmu_rpmh_init(gmu); + gmu->initialized = true; return 0; +detach_cxpd: + dev_pm_domain_detach(gmu->cxpd, false); + err_mmio: iounmap(gmu->mmio); if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc")) @@ -1644,8 +1682,6 @@ err_mmio: free_irq(gmu->gmu_irq, gmu); free_irq(gmu->hfi_irq, gmu); - ret = -ENODEV; - err_memory: a6xx_gmu_memory_free(gmu); err_put_device: diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h index e034935b3986..0bc3eb443fec 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h @@ -4,8 +4,10 @@ #ifndef _A6XX_GMU_H_ #define _A6XX_GMU_H_ +#include <linux/completion.h> #include <linux/iopoll.h> #include <linux/interrupt.h> +#include <linux/notifier.h> #include "msm_drv.h" #include "a6xx_hfi.h" @@ -56,6 +58,7 @@ struct a6xx_gmu { int gmu_irq; struct device *gxpd; + struct device *cxpd; int idle_level; @@ -89,6 +92,10 @@ struct a6xx_gmu { bool initialized; bool hung; bool legacy; /* a618 or a630 */ + + /* For power domain callback */ + struct notifier_block pd_nb; + struct completion pd_gate; }; static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset) diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h index 4a3230978c0e..9ab15d91aced 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h @@ -8,21 +8,21 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a2xx.xml ( 90810 bytes, from 2021-06-21 15:24:24) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 14609 bytes, from 2021-11-24 23:05:10) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 69086 bytes, from 2022-03-03 16:41:33) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84231 bytes, from 2021-11-24 23:05:10) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113358 bytes, from 2022-01-31 23:06:21) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a5xx.xml ( 149512 bytes, from 2022-01-31 23:06:21) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a6xx.xml ( 184954 bytes, from 2022-03-03 16:41:33) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11331 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 6038 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2924 bytes, from 2021-07-22 15:21:56) - -Copyright (C) 2013-2021 by the following authors: +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2023-03-10 18:32:52) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a2xx.xml ( 91929 bytes, from 2023-02-28 23:52:27) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 15434 bytes, from 2023-03-10 18:32:53) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 74995 bytes, from 2023-03-20 18:06:23) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84231 bytes, from 2022-08-02 16:38:43) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113474 bytes, from 2022-08-02 16:38:43) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a5xx.xml ( 149590 bytes, from 2023-02-14 19:37:12) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx.xml ( 198949 bytes, from 2023-03-20 18:06:23) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11404 bytes, from 2023-03-10 18:32:53) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2022-08-02 16:38:43) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 9055 bytes, from 2023-03-10 18:32:52) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2976 bytes, from 2023-03-10 18:32:52) + +Copyright (C) 2013-2023 by the following authors: - Rob Clark <[email protected]> (robclark) - Ilia Mirkin <[email protected]> (imirkin) diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index 6faea5049f76..0f6ed7a3f712 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -10,7 +10,7 @@ #include <linux/bitfield.h> #include <linux/devfreq.h> -#include <linux/reset.h> +#include <linux/pm_domain.h> #include <linux/soc/qcom/llcc-qcom.h> #define GPU_PAS_ID 13 @@ -187,7 +187,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) * GPU registers so we need to add 0x1a800 to the register value on A630 * to get the right value from PM4. */ - get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO, + get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER, rbmemptr_stats(ring, index, alwayson_start)); /* Invalidate CCU depth and color */ @@ -228,7 +228,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0), rbmemptr_stats(ring, index, cpcycles_end)); - get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO, + get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER, rbmemptr_stats(ring, index, alwayson_end)); /* Write the fence to the scratch register */ @@ -247,7 +247,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) OUT_RING(ring, submit->seqno); trace_msm_gpu_submit_flush(submit, - gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO)); + gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER)); a6xx_flush(gpu, ring); } @@ -917,7 +917,7 @@ out: return ret; } -static int a6xx_ucode_init(struct msm_gpu *gpu) +static int a6xx_ucode_load(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); @@ -946,7 +946,23 @@ static int a6xx_ucode_init(struct msm_gpu *gpu) } } - gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE, a6xx_gpu->sqe_iova); + /* + * Expanded APRIV and targets that support WHERE_AM_I both need a + * privileged buffer to store the RPTR shadow + */ + if ((adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami) && + !a6xx_gpu->shadow_bo) { + a6xx_gpu->shadow = msm_gem_kernel_new(gpu->dev, + sizeof(u32) * gpu->nr_rings, + MSM_BO_WC | MSM_BO_MAP_PRIV, + gpu->aspace, &a6xx_gpu->shadow_bo, + &a6xx_gpu->shadow_iova); + + if (IS_ERR(a6xx_gpu->shadow)) + return PTR_ERR(a6xx_gpu->shadow); + + msm_gem_object_set_name(a6xx_gpu->shadow_bo, "shadow"); + } return 0; } @@ -997,7 +1013,7 @@ static int hw_init(struct msm_gpu *gpu) * memory rendering at this point in time and we don't want to block off * part of the virtual memory space. */ - gpu_write64(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO, 0x00000000); + gpu_write64(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE, 0x00000000); gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000); /* Turn on 64 bit addressing for all blocks */ @@ -1037,18 +1053,15 @@ static int hw_init(struct msm_gpu *gpu) gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xffffffff); /* Disable L2 bypass in the UCHE */ - gpu_write(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX_LO, 0xffffffc0); - gpu_write(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX_HI, 0x0001ffff); - gpu_write(gpu, REG_A6XX_UCHE_TRAP_BASE_LO, 0xfffff000); - gpu_write(gpu, REG_A6XX_UCHE_TRAP_BASE_HI, 0x0001ffff); - gpu_write(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE_LO, 0xfffff000); - gpu_write(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE_HI, 0x0001ffff); + gpu_write64(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX, 0x0001ffffffffffc0llu); + gpu_write64(gpu, REG_A6XX_UCHE_TRAP_BASE, 0x0001fffffffff000llu); + gpu_write64(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE, 0x0001fffffffff000llu); if (!adreno_is_a650_family(adreno_gpu)) { /* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */ - gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN_LO, 0x00100000); + gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN, 0x00100000); - gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MAX_LO, + gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MAX, 0x00100000 + adreno_gpu->gmem - 1); } @@ -1135,9 +1148,7 @@ static int hw_init(struct msm_gpu *gpu) if (ret) goto out; - ret = a6xx_ucode_init(gpu); - if (ret) - goto out; + gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE, a6xx_gpu->sqe_iova); /* Set the ringbuffer address */ gpu_write64(gpu, REG_A6XX_CP_RB_BASE, gpu->rb[0]->iova); @@ -1152,26 +1163,9 @@ static int hw_init(struct msm_gpu *gpu) gpu_write(gpu, REG_A6XX_CP_RB_CNTL, MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE); - /* - * Expanded APRIV and targets that support WHERE_AM_I both need a - * privileged buffer to store the RPTR shadow - */ - - if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami) { - if (!a6xx_gpu->shadow_bo) { - a6xx_gpu->shadow = msm_gem_kernel_new(gpu->dev, - sizeof(u32) * gpu->nr_rings, - MSM_BO_WC | MSM_BO_MAP_PRIV, - gpu->aspace, &a6xx_gpu->shadow_bo, - &a6xx_gpu->shadow_iova); - - if (IS_ERR(a6xx_gpu->shadow)) - return PTR_ERR(a6xx_gpu->shadow); - - msm_gem_object_set_name(a6xx_gpu->shadow_bo, "shadow"); - } - - gpu_write64(gpu, REG_A6XX_CP_RB_RPTR_ADDR_LO, + /* Configure the RPTR shadow if needed: */ + if (a6xx_gpu->shadow_bo) { + gpu_write64(gpu, REG_A6XX_CP_RB_RPTR_ADDR, shadowptr(a6xx_gpu, gpu->rb[0])); } @@ -1259,6 +1253,7 @@ static void a6xx_recover(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + struct a6xx_gmu *gmu = &a6xx_gpu->gmu; int i, active_submits; adreno_dump_info(gpu); @@ -1297,6 +1292,10 @@ static void a6xx_recover(struct msm_gpu *gpu) */ gpu->active_submits = 0; + reinit_completion(&gmu->pd_gate); + dev_pm_genpd_add_notifier(gmu->cxpd, &gmu->pd_nb); + dev_pm_genpd_synced_poweroff(gmu->cxpd); + /* Drop the rpm refcount from active submits */ if (active_submits) pm_runtime_put(&gpu->pdev->dev); @@ -1304,8 +1303,10 @@ static void a6xx_recover(struct msm_gpu *gpu) /* And the final one from recover worker */ pm_runtime_put_sync(&gpu->pdev->dev); - /* Call into gpucc driver to poll for cx gdsc collapse */ - reset_control_reset(gpu->cx_collapse); + if (!wait_for_completion_timeout(&gmu->pd_gate, msecs_to_jiffies(1000))) + DRM_DEV_ERROR(&gpu->pdev->dev, "cx gdsc didn't collapse\n"); + + dev_pm_genpd_remove_notifier(gmu->cxpd); pm_runtime_use_autosuspend(&gpu->pdev->dev); @@ -1712,7 +1713,7 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value) /* Force the GPU power on so we can read this register */ a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET); - *value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO); + *value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER); a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET); @@ -1848,8 +1849,8 @@ static bool a6xx_progress(struct msm_gpu *gpu, struct msm_ringbuffer *ring) * to prevent prefetching into an unrelated submit. (And * either way, at some point the ROQ will be full.) */ - cp_state.ib1_rem += gpu_read(gpu, REG_A6XX_CP_CSQ_IB1_STAT) >> 16; - cp_state.ib2_rem += gpu_read(gpu, REG_A6XX_CP_CSQ_IB2_STAT) >> 16; + cp_state.ib1_rem += gpu_read(gpu, REG_A6XX_CP_ROQ_AVAIL_IB1) >> 16; + cp_state.ib2_rem += gpu_read(gpu, REG_A6XX_CP_ROQ_AVAIL_IB2) >> 16; progress = !!memcmp(&cp_state, &ring->last_cp_state, sizeof(cp_state)); @@ -1954,6 +1955,7 @@ static const struct adreno_gpu_funcs funcs = { .get_param = adreno_get_param, .set_param = adreno_set_param, .hw_init = a6xx_hw_init, + .ucode_load = a6xx_ucode_load, .pm_suspend = a6xx_pm_suspend, .pm_resume = a6xx_pm_resume, .recover = a6xx_recover, diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c index b7e217d00a22..30ecdff363e7 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c @@ -147,7 +147,7 @@ static int a6xx_crashdumper_run(struct msm_gpu *gpu, /* Make sure all pending memory writes are posted */ wmb(); - gpu_write64(gpu, REG_A6XX_CP_CRASH_SCRIPT_BASE_LO, dumper->iova); + gpu_write64(gpu, REG_A6XX_CP_CRASH_SCRIPT_BASE, dumper->iova); gpu_write(gpu, REG_A6XX_CP_CRASH_DUMP_CNTL, 1); diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h index abb037ccc02b..51c320a2e5c0 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h +++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h @@ -8,21 +8,21 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a2xx.xml ( 90810 bytes, from 2021-06-21 15:24:24) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 14609 bytes, from 2021-11-24 23:05:10) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 69086 bytes, from 2022-03-03 16:41:33) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84231 bytes, from 2021-11-24 23:05:10) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113358 bytes, from 2022-01-31 23:06:21) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a5xx.xml ( 149512 bytes, from 2022-01-31 23:06:21) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a6xx.xml ( 184954 bytes, from 2022-03-03 16:41:33) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11331 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 6038 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2924 bytes, from 2021-07-22 15:21:56) - -Copyright (C) 2013-2021 by the following authors: +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2023-03-10 18:32:52) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a2xx.xml ( 91929 bytes, from 2023-02-28 23:52:27) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 15434 bytes, from 2023-03-10 18:32:53) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 74995 bytes, from 2023-03-20 18:06:23) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84231 bytes, from 2022-08-02 16:38:43) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113474 bytes, from 2022-08-02 16:38:43) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a5xx.xml ( 149590 bytes, from 2023-02-14 19:37:12) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx.xml ( 198949 bytes, from 2023-03-20 18:06:23) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11404 bytes, from 2023-03-10 18:32:53) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2022-08-02 16:38:43) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 9055 bytes, from 2023-03-10 18:32:52) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2976 bytes, from 2023-03-10 18:32:52) + +Copyright (C) 2013-2023 by the following authors: - Rob Clark <[email protected]> (robclark) - Ilia Mirkin <[email protected]> (imirkin) @@ -49,11 +49,12 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. enum chip { - A2XX = 0, - A3XX = 0, - A4XX = 0, - A5XX = 0, - A6XX = 0, + A2XX = 2, + A3XX = 3, + A4XX = 4, + A5XX = 5, + A6XX = 6, + A7XX = 7, }; enum adreno_pa_su_sc_draw { @@ -210,6 +211,17 @@ enum a5xx_line_mode { RECTANGULAR = 1, }; +enum a6xx_tex_prefetch_cmd { + TEX_PREFETCH_UNK0 = 0, + TEX_PREFETCH_SAM = 1, + TEX_PREFETCH_GATHER4R = 2, + TEX_PREFETCH_GATHER4G = 3, + TEX_PREFETCH_GATHER4B = 4, + TEX_PREFETCH_GATHER4A = 5, + TEX_PREFETCH_UNK6 = 6, + TEX_PREFETCH_UNK7 = 7, +}; + #define REG_AXXX_CP_RB_BASE 0x000001c0 #define REG_AXXX_CP_RB_CNTL 0x000001c1 diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index c5c4c93b3689..4d1448714285 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -432,31 +432,35 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev) if (ret) return NULL; + if (gpu->funcs->ucode_load) { + ret = gpu->funcs->ucode_load(gpu); + if (ret) + return NULL; + } + /* * Now that we have firmware loaded, and are ready to begin * booting the gpu, go ahead and enable runpm: */ pm_runtime_enable(&pdev->dev); - /* Make sure pm runtime is active and reset any previous errors */ - pm_runtime_set_active(&pdev->dev); - ret = pm_runtime_get_sync(&pdev->dev); if (ret < 0) { - pm_runtime_put_sync(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); DRM_DEV_ERROR(dev->dev, "Couldn't power up the GPU: %d\n", ret); - return NULL; + goto err_disable_rpm; } mutex_lock(&gpu->lock); ret = msm_gpu_hw_init(gpu); mutex_unlock(&gpu->lock); - pm_runtime_put_autosuspend(&pdev->dev); if (ret) { DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret); - return NULL; + goto err_put_rpm; } + pm_runtime_put_autosuspend(&pdev->dev); + #ifdef CONFIG_DEBUG_FS if (gpu->funcs->debugfs_init) { gpu->funcs->debugfs_init(gpu, dev->primary); @@ -465,6 +469,13 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev) #endif return gpu; + +err_put_rpm: + pm_runtime_put_sync(&pdev->dev); +err_disable_rpm: + pm_runtime_disable(&pdev->dev); + + return NULL; } static int find_chipid(struct device *dev, struct adreno_rev *rev) @@ -548,6 +559,10 @@ static int adreno_bind(struct device *dev, struct device *master, void *data) return PTR_ERR(gpu); } + ret = dev_pm_opp_of_find_icc_paths(dev, NULL); + if (ret) + return ret; + return 0; } @@ -566,8 +581,8 @@ static void adreno_unbind(struct device *dev, struct device *master, } static const struct component_ops a3xx_ops = { - .bind = adreno_bind, - .unbind = adreno_unbind, + .bind = adreno_bind, + .unbind = adreno_unbind, }; static void adreno_device_register_headless(void) diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index ce6b76c45b6f..2f70d0c3624e 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -503,16 +503,9 @@ struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu, int adreno_hw_init(struct msm_gpu *gpu) { - struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); - int ret, i; - VERB("%s", gpu->name); - ret = adreno_load_fw(adreno_gpu); - if (ret) - return ret; - - for (i = 0; i < gpu->nr_rings; i++) { + for (int i = 0; i < gpu->nr_rings; i++) { struct msm_ringbuffer *ring = gpu->rb[i]; if (!ring) @@ -922,73 +915,46 @@ void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords) ring->id); } -/* Get legacy powerlevels from qcom,gpu-pwrlevels and populate the opp table */ -static int adreno_get_legacy_pwrlevels(struct device *dev) -{ - struct device_node *child, *node; - int ret; - - node = of_get_compatible_child(dev->of_node, "qcom,gpu-pwrlevels"); - if (!node) { - DRM_DEV_DEBUG(dev, "Could not find the GPU powerlevels\n"); - return -ENXIO; - } - - for_each_child_of_node(node, child) { - unsigned int val; - - ret = of_property_read_u32(child, "qcom,gpu-freq", &val); - if (ret) - continue; - - /* - * Skip the intentionally bogus clock value found at the bottom - * of most legacy frequency tables - */ - if (val != 27000000) - dev_pm_opp_add(dev, val, 0); - } - - of_node_put(node); - - return 0; -} - -static void adreno_get_pwrlevels(struct device *dev, +static int adreno_get_pwrlevels(struct device *dev, struct msm_gpu *gpu) { + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); unsigned long freq = ULONG_MAX; struct dev_pm_opp *opp; int ret; gpu->fast_rate = 0; - /* You down with OPP? */ - if (!of_find_property(dev->of_node, "operating-points-v2", NULL)) - ret = adreno_get_legacy_pwrlevels(dev); - else { - ret = devm_pm_opp_of_add_table(dev); - if (ret) - DRM_DEV_ERROR(dev, "Unable to set the OPP table\n"); - } - - if (!ret) { - /* Find the fastest defined rate */ - opp = dev_pm_opp_find_freq_floor(dev, &freq); - if (!IS_ERR(opp)) { - gpu->fast_rate = freq; - dev_pm_opp_put(opp); + /* devm_pm_opp_of_add_table may error out but will still create an OPP table */ + ret = devm_pm_opp_of_add_table(dev); + if (ret == -ENODEV) { + /* Special cases for ancient hw with ancient DT bindings */ + if (adreno_is_a2xx(adreno_gpu)) { + dev_warn(dev, "Unable to find the OPP table. Falling back to 200 MHz.\n"); + dev_pm_opp_add(dev, 200000000, 0); + } else if (adreno_is_a320(adreno_gpu)) { + dev_warn(dev, "Unable to find the OPP table. Falling back to 450 MHz.\n"); + dev_pm_opp_add(dev, 450000000, 0); + } else { + DRM_DEV_ERROR(dev, "Unable to find the OPP table\n"); + return -ENODEV; } + } else if (ret) { + DRM_DEV_ERROR(dev, "Unable to set the OPP table\n"); + return ret; } - if (!gpu->fast_rate) { - dev_warn(dev, - "Could not find a clock rate. Using a reasonable default\n"); - /* Pick a suitably safe clock speed for any target */ - gpu->fast_rate = 200000000; - } + /* Find the fastest defined rate */ + opp = dev_pm_opp_find_freq_floor(dev, &freq); + if (IS_ERR(opp)) + return PTR_ERR(opp); + + gpu->fast_rate = freq; + dev_pm_opp_put(opp); DBG("fast_rate=%u, slow_rate=27000000", gpu->fast_rate); + + return 0; } int adreno_gpu_ocmem_init(struct device *dev, struct adreno_gpu *adreno_gpu, @@ -1046,6 +1012,20 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, struct adreno_rev *rev = &config->rev; const char *gpu_name; u32 speedbin; + int ret; + + /* + * This can only be done before devm_pm_opp_of_add_table(), or + * dev_pm_opp_set_config() will WARN_ON() + */ + if (IS_ERR(devm_clk_get(dev, "core"))) { + /* + * If "core" is absent, go for the legacy clock name. + * If we got this far in probing, it's a given one of them exists. + */ + devm_pm_opp_set_clkname(dev, "core_clk"); + } else + devm_pm_opp_set_clkname(dev, "core"); adreno_gpu->funcs = funcs; adreno_gpu->info = adreno_info(config->rev); @@ -1070,7 +1050,9 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, adreno_gpu_config.nr_rings = nr_rings; - adreno_get_pwrlevels(dev, gpu); + ret = adreno_get_pwrlevels(dev, gpu); + if (ret) + return ret; pm_runtime_set_autosuspend_delay(dev, adreno_gpu->info->inactive_period); diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h index 7aecf920f9b9..8a4a2d161a29 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h +++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h @@ -8,21 +8,21 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a2xx.xml ( 90810 bytes, from 2021-06-21 15:24:24) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 14609 bytes, from 2021-11-24 23:05:10) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 69086 bytes, from 2022-03-03 16:41:33) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84231 bytes, from 2021-11-24 23:05:10) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113358 bytes, from 2022-01-31 23:06:21) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a5xx.xml ( 149512 bytes, from 2022-01-31 23:06:21) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a6xx.xml ( 184954 bytes, from 2022-03-03 16:41:33) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11331 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 6038 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2924 bytes, from 2021-07-22 15:21:56) - -Copyright (C) 2013-2022 by the following authors: +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2023-03-10 18:32:52) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a2xx.xml ( 91929 bytes, from 2023-02-28 23:52:27) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 15434 bytes, from 2023-03-10 18:32:53) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 74995 bytes, from 2023-03-20 18:06:23) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84231 bytes, from 2022-08-02 16:38:43) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113474 bytes, from 2022-08-02 16:38:43) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a5xx.xml ( 149590 bytes, from 2023-02-14 19:37:12) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx.xml ( 198949 bytes, from 2023-03-20 18:06:23) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11404 bytes, from 2023-03-10 18:32:53) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2022-08-02 16:38:43) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 9055 bytes, from 2023-03-10 18:32:52) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2976 bytes, from 2023-03-10 18:32:52) + +Copyright (C) 2013-2023 by the following authors: - Rob Clark <[email protected]> (robclark) - Ilia Mirkin <[email protected]> (imirkin) @@ -76,6 +76,10 @@ enum vgt_event_type { VS_FETCH_DONE = 27, FACENESS_FLUSH = 28, WT_DONE_TS = 8, + START_FRAGMENT_CTRS = 13, + STOP_FRAGMENT_CTRS = 14, + START_COMPUTE_CTRS = 15, + STOP_COMPUTE_CTRS = 16, FLUSH_SO_0 = 17, FLUSH_SO_1 = 18, FLUSH_SO_2 = 19, @@ -86,7 +90,7 @@ enum vgt_event_type { PC_CCU_FLUSH_DEPTH_TS = 28, PC_CCU_FLUSH_COLOR_TS = 29, BLIT = 30, - UNK_25 = 37, + LRZ_CLEAR = 37, LRZ_FLUSH = 38, BLIT_OP_FILL_2D = 39, BLIT_OP_COPY_2D = 40, @@ -95,6 +99,20 @@ enum vgt_event_type { UNK_2C = 44, UNK_2D = 45, CACHE_INVALIDATE = 49, + LABEL = 63, + CCU_INVALIDATE_DEPTH = 24, + CCU_INVALIDATE_COLOR = 25, + CCU_RESOLVE_CLEAN = 26, + CCU_FLUSH_DEPTH = 28, + CCU_FLUSH_COLOR = 29, + CCU_RESOLVE = 30, + CCU_END_RESOLVE_GROUP = 31, + CCU_CLEAN_DEPTH = 32, + CCU_CLEAN_COLOR = 33, + CACHE_RESET = 48, + CACHE_CLEAN = 49, + CACHE_FLUSH7 = 50, + CACHE_INVALIDATE7 = 51, }; enum pc_di_primtype { @@ -290,6 +308,9 @@ enum adreno_pm4_type3_packets { IN_INCR_UPDT_CONST = 86, IN_INCR_UPDT_INSTR = 87, PKT4 = 4, + IN_IB_END = 10, + IN_GMU_INTERRUPT = 11, + IN_PREEMPT = 15, CP_SCRATCH_WRITE = 76, CP_REG_TO_MEM_OFFSET_MEM = 116, CP_REG_TO_MEM_OFFSET_REG = 114, @@ -297,10 +318,20 @@ enum adreno_pm4_type3_packets { CP_WAIT_TWO_REGS = 112, CP_MEMCPY = 117, CP_SET_BIN_DATA5_OFFSET = 46, + CP_CONTEXT_SWITCH = 84, CP_SET_CTXSWITCH_IB = 85, CP_REG_WRITE = 109, CP_START_BIN = 80, CP_END_BIN = 81, + CP_PREEMPT_DISABLE = 108, + CP_WAIT_TIMESTAMP = 20, + CP_THREAD_CONTROL = 23, + CP_CONTEXT_REG_BUNCH2 = 93, + CP_UNK15 = 21, + CP_UNK16 = 22, + CP_UNK18 = 24, + CP_UNK1B = 27, + CP_UNK49 = 73, }; enum adreno_state_block { @@ -480,6 +511,13 @@ enum reg_tracker { TRACK_CNTL_REG = 1, TRACK_RENDER_CNTL = 2, UNK_EVENT_WRITE = 4, + TRACK_LRZ = 8, +}; + +enum cp_thread { + CP_SET_THREAD_BR = 1, + CP_SET_THREAD_BV = 2, + CP_SET_THREAD_BOTH = 3, }; #define REG_CP_LOAD_STATE_0 0x00000000 @@ -1256,6 +1294,10 @@ static inline uint32_t CP_SET_BIN_DATA5_6_BIN_PRIM_STRM_HI(uint32_t val) return ((val) << CP_SET_BIN_DATA5_6_BIN_PRIM_STRM_HI__SHIFT) & CP_SET_BIN_DATA5_6_BIN_PRIM_STRM_HI__MASK; } +#define REG_CP_SET_BIN_DATA5_7 0x00000007 + +#define REG_CP_SET_BIN_DATA5_9 0x00000009 + #define REG_CP_SET_BIN_DATA5_OFFSET_0 0x00000000 #define CP_SET_BIN_DATA5_OFFSET_0_VSC_SIZE__MASK 0x003f0000 #define CP_SET_BIN_DATA5_OFFSET_0_VSC_SIZE__SHIFT 16 @@ -2202,7 +2244,18 @@ static inline uint32_t A6XX_CP_REG_TEST_0_BIT(uint32_t val) { return ((val) << A6XX_CP_REG_TEST_0_BIT__SHIFT) & A6XX_CP_REG_TEST_0_BIT__MASK; } -#define A6XX_CP_REG_TEST_0_WAIT_FOR_ME 0x02000000 +#define A6XX_CP_REG_TEST_0_SKIP_WAIT_FOR_ME 0x02000000 +#define A6XX_CP_REG_TEST_0_PRED_BIT__MASK 0x7c000000 +#define A6XX_CP_REG_TEST_0_PRED_BIT__SHIFT 26 +static inline uint32_t A6XX_CP_REG_TEST_0_PRED_BIT(uint32_t val) +{ + return ((val) << A6XX_CP_REG_TEST_0_PRED_BIT__SHIFT) & A6XX_CP_REG_TEST_0_PRED_BIT__MASK; +} +#define A6XX_CP_REG_TEST_0_PRED_UPDATE 0x80000000 + +#define REG_A6XX_CP_REG_TEST_PRED_MASK 0x00000001 + +#define REG_A6XX_CP_REG_TEST_PRED_VAL 0x00000002 #define REG_CP_COND_REG_EXEC_0 0x00000000 #define CP_COND_REG_EXEC_0_REG0__MASK 0x0003ffff @@ -2211,6 +2264,12 @@ static inline uint32_t CP_COND_REG_EXEC_0_REG0(uint32_t val) { return ((val) << CP_COND_REG_EXEC_0_REG0__SHIFT) & CP_COND_REG_EXEC_0_REG0__MASK; } +#define CP_COND_REG_EXEC_0_PRED_BIT__MASK 0x007c0000 +#define CP_COND_REG_EXEC_0_PRED_BIT__SHIFT 18 +static inline uint32_t CP_COND_REG_EXEC_0_PRED_BIT(uint32_t val) +{ + return ((val) << CP_COND_REG_EXEC_0_PRED_BIT__SHIFT) & CP_COND_REG_EXEC_0_PRED_BIT__MASK; +} #define CP_COND_REG_EXEC_0_BINNING 0x02000000 #define CP_COND_REG_EXEC_0_GMEM 0x04000000 #define CP_COND_REG_EXEC_0_SYSMEM 0x08000000 @@ -2308,13 +2367,17 @@ static inline uint32_t CP_SET_CTXSWITCH_IB_2_TYPE(enum ctxswitch_ib val) } #define REG_CP_REG_WRITE_0 0x00000000 -#define CP_REG_WRITE_0_TRACKER__MASK 0x00000007 +#define CP_REG_WRITE_0_TRACKER__MASK 0x0000000f #define CP_REG_WRITE_0_TRACKER__SHIFT 0 static inline uint32_t CP_REG_WRITE_0_TRACKER(enum reg_tracker val) { return ((val) << CP_REG_WRITE_0_TRACKER__SHIFT) & CP_REG_WRITE_0_TRACKER__MASK; } +#define REG_CP_REG_WRITE_1 0x00000001 + +#define REG_CP_REG_WRITE_2 0x00000002 + #define REG_CP_SMMU_TABLE_UPDATE_0 0x00000000 #define CP_SMMU_TABLE_UPDATE_0_TTBR0_LO__MASK 0xffffffff #define CP_SMMU_TABLE_UPDATE_0_TTBR0_LO__SHIFT 0 @@ -2361,5 +2424,21 @@ static inline uint32_t CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK(uint32_t val) #define REG_CP_START_BIN_BODY_DWORDS 0x00000004 +#define REG_CP_WAIT_TIMESTAMP_0 0x00000000 + +#define REG_CP_WAIT_TIMESTAMP_ADDR 0x00000001 + +#define REG_CP_WAIT_TIMESTAMP_TIMESTAMP 0x00000003 + +#define REG_CP_THREAD_CONTROL_0 0x00000000 +#define CP_THREAD_CONTROL_0_THREAD__MASK 0x00000003 +#define CP_THREAD_CONTROL_0_THREAD__SHIFT 0 +static inline uint32_t CP_THREAD_CONTROL_0_THREAD(enum cp_thread val) +{ + return ((val) << CP_THREAD_CONTROL_0_THREAD__SHIFT) & CP_THREAD_CONTROL_0_THREAD__MASK; +} +#define CP_THREAD_CONTROL_0_CONCURRENT_BIN_DISABLE 0x08000000 +#define CP_THREAD_CONTROL_0_SYNC_THREADS 0x80000000 + #endif /* ADRENO_PM4_XML */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c index f29a339a3705..b1ec0c35947b 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c @@ -21,6 +21,7 @@ #include <drm/drm_probe_helper.h> #include <drm/drm_rect.h> #include <drm/drm_vblank.h> +#include <drm/drm_self_refresh_helper.h> #include "dpu_kms.h" #include "dpu_hw_lm.h" @@ -1021,6 +1022,18 @@ static void dpu_crtc_disable(struct drm_crtc *crtc, DRM_DEBUG_KMS("crtc%d\n", crtc->base.id); + /* If disable is triggered while in self refresh mode, + * reset the encoder software state so that in enable + * it won't trigger a warn while assigning crtc. + */ + if (old_crtc_state->self_refresh_active) { + drm_for_each_encoder_mask(encoder, crtc->dev, + old_crtc_state->encoder_mask) { + dpu_encoder_assign_crtc(encoder, NULL); + } + return; + } + /* Disable/save vblank irq handling */ drm_crtc_vblank_off(crtc); @@ -1032,7 +1045,14 @@ static void dpu_crtc_disable(struct drm_crtc *crtc, */ if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO) release_bandwidth = true; - dpu_encoder_assign_crtc(encoder, NULL); + + /* + * If disable is triggered during psr active(e.g: screen dim in PSR), + * we will need encoder->crtc connection to process the device sleep & + * preserve it during psr sequence. + */ + if (!crtc->state->self_refresh_active) + dpu_encoder_assign_crtc(encoder, NULL); } /* wait for frame_event_done completion */ @@ -1080,6 +1100,9 @@ static void dpu_crtc_enable(struct drm_crtc *crtc, struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); struct drm_encoder *encoder; bool request_bandwidth = false; + struct drm_crtc_state *old_crtc_state; + + old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc); pm_runtime_get_sync(crtc->dev->dev); @@ -1102,8 +1125,10 @@ static void dpu_crtc_enable(struct drm_crtc *crtc, trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc); dpu_crtc->enabled = true; - drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) - dpu_encoder_assign_crtc(encoder, crtc); + if (!old_crtc_state->self_refresh_active) { + drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) + dpu_encoder_assign_crtc(encoder, crtc); + } /* Enable/restore vblank irq handling */ drm_crtc_vblank_on(crtc); @@ -1577,7 +1602,7 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane, { struct drm_crtc *crtc = NULL; struct dpu_crtc *dpu_crtc = NULL; - int i; + int i, ret; dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL); if (!dpu_crtc) @@ -1614,6 +1639,13 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane, /* initialize event handling */ spin_lock_init(&dpu_crtc->event_lock); + ret = drm_self_refresh_helper_init(crtc); + if (ret) { + DPU_ERROR("Failed to initialize %s with self-refresh helpers %d\n", + crtc->name, ret); + return ERR_PTR(ret); + } + DRM_DEBUG_KMS("%s: successfully initialized crtc\n", dpu_crtc->name); return crtc; } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index 758261e8ac73..450abb14ac2f 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -12,6 +12,7 @@ #include <linux/kthread.h> #include <linux/seq_file.h> +#include <drm/drm_atomic.h> #include <drm/drm_crtc.h> #include <drm/drm_file.h> #include <drm/drm_probe_helper.h> @@ -652,7 +653,7 @@ static int dpu_encoder_virt_atomic_check( if (drm_atomic_crtc_needs_modeset(crtc_state)) { dpu_rm_release(global_state, drm_enc); - if (!crtc_state->active_changed || crtc_state->active) + if (!crtc_state->active_changed || crtc_state->enable) ret = dpu_rm_reserve(&dpu_kms->rm, global_state, drm_enc, crtc_state, topology); } @@ -1171,7 +1172,8 @@ out: mutex_unlock(&dpu_enc->enc_lock); } -static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc) +static void dpu_encoder_virt_atomic_enable(struct drm_encoder *drm_enc, + struct drm_atomic_state *state) { struct dpu_encoder_virt *dpu_enc = NULL; int ret = 0; @@ -1207,14 +1209,28 @@ out: mutex_unlock(&dpu_enc->enc_lock); } -static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc) +static void dpu_encoder_virt_atomic_disable(struct drm_encoder *drm_enc, + struct drm_atomic_state *state) { struct dpu_encoder_virt *dpu_enc = NULL; + struct drm_crtc *crtc; + struct drm_crtc_state *old_state = NULL; int i = 0; dpu_enc = to_dpu_encoder_virt(drm_enc); DPU_DEBUG_ENC(dpu_enc, "\n"); + crtc = drm_atomic_get_old_crtc_for_encoder(state, drm_enc); + if (crtc) + old_state = drm_atomic_get_old_crtc_state(state, crtc); + + /* + * The encoder is already disabled if self refresh mode was set earlier, + * in the old_state for the corresponding crtc. + */ + if (old_state && old_state->self_refresh_active) + return; + mutex_lock(&dpu_enc->enc_lock); dpu_enc->enabled = false; @@ -2388,8 +2404,8 @@ static void dpu_encoder_frame_done_timeout(struct timer_list *t) static const struct drm_encoder_helper_funcs dpu_encoder_helper_funcs = { .atomic_mode_set = dpu_encoder_virt_atomic_mode_set, - .disable = dpu_encoder_virt_disable, - .enable = dpu_encoder_virt_enable, + .atomic_disable = dpu_encoder_virt_atomic_disable, + .atomic_enable = dpu_encoder_virt_atomic_enable, .atomic_check = dpu_encoder_virt_atomic_check, }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c index 48c48106b16a..3a374292f311 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c @@ -523,6 +523,7 @@ static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc) { unsigned long lock_flags; int ret; + struct intf_status intf_status = {0}; if (!phys_enc->parent || !phys_enc->parent->dev) { DPU_ERROR("invalid encoder/device\n"); @@ -567,6 +568,27 @@ static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc) } } + if (phys_enc->hw_intf && phys_enc->hw_intf->ops.get_status) + phys_enc->hw_intf->ops.get_status(phys_enc->hw_intf, &intf_status); + + /* + * Wait for a vsync if timing en status is on after timing engine + * is disabled. + */ + if (intf_status.is_en && dpu_encoder_phys_vid_is_master(phys_enc)) { + spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags); + dpu_encoder_phys_inc_pending(phys_enc); + spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags); + ret = dpu_encoder_phys_vid_wait_for_vblank(phys_enc); + if (ret) { + atomic_set(&phys_enc->pending_kickoff_cnt, 0); + DRM_ERROR("wait disable failed: id:%u intf:%d ret:%d\n", + DRMID(phys_enc->parent), + phys_enc->hw_intf->idx - INTF_0, ret); + } + } + + dpu_encoder_helper_phys_cleanup(phys_enc); phys_enc->enable_state = DPU_ENC_DISABLED; } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c index 497c9e1673ab..b39e72a72d58 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c @@ -79,7 +79,8 @@ #define INTF_SDM845_MASK (0) -#define INTF_SC7180_MASK BIT(DPU_INTF_INPUT_CTRL) | BIT(DPU_INTF_TE) +#define INTF_SC7180_MASK \ + (BIT(DPU_INTF_INPUT_CTRL) | BIT(DPU_INTF_TE) | BIT(DPU_INTF_STATUS_SUPPORTED)) #define INTF_SC7280_MASK INTF_SC7180_MASK | BIT(DPU_DATA_HCTL_EN) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h index e6590302b3bf..ae85b40e282b 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h @@ -213,17 +213,19 @@ enum { /** * INTF sub-blocks - * @DPU_INTF_INPUT_CTRL Supports the setting of pp block from which - * pixel data arrives to this INTF - * @DPU_INTF_TE INTF block has TE configuration support - * @DPU_DATA_HCTL_EN Allows data to be transferred at different rate - than video timing + * @DPU_INTF_INPUT_CTRL Supports the setting of pp block from which + * pixel data arrives to this INTF + * @DPU_INTF_TE INTF block has TE configuration support + * @DPU_DATA_HCTL_EN Allows data to be transferred at different rate + * than video timing + * @DPU_INTF_STATUS_SUPPORTED INTF block has INTF_STATUS register * @DPU_INTF_MAX */ enum { DPU_INTF_INPUT_CTRL = 0x1, DPU_INTF_TE, DPU_DATA_HCTL_EN, + DPU_INTF_STATUS_SUPPORTED, DPU_INTF_MAX }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c index 7ce66bf3f4c8..84ee2efa9c66 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c @@ -62,6 +62,7 @@ #define INTF_LINE_COUNT 0x0B0 #define INTF_MUX 0x25C +#define INTF_STATUS 0x26C #define INTF_CFG_ACTIVE_H_EN BIT(29) #define INTF_CFG_ACTIVE_V_EN BIT(30) @@ -297,8 +298,13 @@ static void dpu_hw_intf_get_status( struct intf_status *s) { struct dpu_hw_blk_reg_map *c = &intf->hw; + unsigned long cap = intf->cap->features; + + if (cap & BIT(DPU_INTF_STATUS_SUPPORTED)) + s->is_en = DPU_REG_READ(c, INTF_STATUS) & BIT(0); + else + s->is_en = DPU_REG_READ(c, INTF_TIMING_ENGINE_EN); - s->is_en = DPU_REG_READ(c, INTF_TIMING_ENGINE_EN); s->is_prog_fetch_en = !!(DPU_REG_READ(c, INTF_CONFIG) & BIT(31)); if (s->is_en) { s->frame_count = DPU_REG_READ(c, INTF_FRAME_COUNT); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c index a683bd9b5a04..681dd2e0c7e8 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c @@ -491,7 +491,7 @@ static void dpu_kms_wait_for_commit_done(struct msm_kms *kms, return; } - if (!crtc->state->active) { + if (!drm_atomic_crtc_effectively_active(crtc->state)) { DPU_DEBUG("[crtc:%d] not active\n", crtc->base.id); return; } diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h index a2b6422948ec..cc8fde450884 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h @@ -8,26 +8,26 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/tmp/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-03-03 01:18:13) -- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32) -- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi.xml ( 17560 bytes, from 2021-09-16 22:37:02) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-03 01:18:13) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 41874 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2021-01-30 18:25:22) - -Copyright (C) 2013-2021 by the following authors: +- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-07-23 20:21:46) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 18746 bytes, from 2022-04-28 17:29:36) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 42350 bytes, from 2022-09-20 17:45:56) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2022-03-08 17:40:42) + +Copyright (C) 2013-2022 by the following authors: - Rob Clark <[email protected]> (robclark) - Ilia Mirkin <[email protected]> (imirkin) diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h index 86fc44b518cb..270e11c904bd 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h @@ -8,26 +8,26 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/tmp/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-03-03 01:18:13) -- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32) -- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi.xml ( 17560 bytes, from 2021-09-16 22:37:02) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-03 01:18:13) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 41874 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2021-01-30 18:25:22) - -Copyright (C) 2013-2021 by the following authors: +- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-07-23 20:21:46) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 18746 bytes, from 2022-04-28 17:29:36) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 42350 bytes, from 2022-09-20 17:45:56) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2022-03-08 17:40:42) + +Copyright (C) 2013-2022 by the following authors: - Rob Clark <[email protected]> (robclark) - Ilia Mirkin <[email protected]> (imirkin) diff --git a/drivers/gpu/drm/msm/disp/mdp_common.xml.h b/drivers/gpu/drm/msm/disp/mdp_common.xml.h index be759106b621..4dd8d7db2862 100644 --- a/drivers/gpu/drm/msm/disp/mdp_common.xml.h +++ b/drivers/gpu/drm/msm/disp/mdp_common.xml.h @@ -8,26 +8,26 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/tmp/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-03-03 01:18:13) -- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32) -- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi.xml ( 17560 bytes, from 2021-09-16 22:37:02) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-03 01:18:13) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 41874 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2021-01-30 18:25:22) - -Copyright (C) 2013-2021 by the following authors: +- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-07-23 20:21:46) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 18746 bytes, from 2022-04-28 17:29:36) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 42350 bytes, from 2022-09-20 17:45:56) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2022-03-08 17:40:42) + +Copyright (C) 2013-2022 by the following authors: - Rob Clark <[email protected]> (robclark) - Ilia Mirkin <[email protected]> (imirkin) diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c index 676279d0ca8d..c12a5d9647bb 100644 --- a/drivers/gpu/drm/msm/dp/dp_catalog.c +++ b/drivers/gpu/drm/msm/dp/dp_catalog.c @@ -47,6 +47,14 @@ #define DP_INTERRUPT_STATUS2_MASK \ (DP_INTERRUPT_STATUS2 << DP_INTERRUPT_STATUS_MASK_SHIFT) +#define DP_INTERRUPT_STATUS4 \ + (PSR_UPDATE_INT | PSR_CAPTURE_INT | PSR_EXIT_INT | \ + PSR_UPDATE_ERROR_INT | PSR_WAKE_ERROR_INT) + +#define DP_INTERRUPT_MASK4 \ + (PSR_UPDATE_MASK | PSR_CAPTURE_MASK | PSR_EXIT_MASK | \ + PSR_UPDATE_ERROR_MASK | PSR_WAKE_ERROR_MASK) + struct dp_catalog_private { struct device *dev; struct drm_device *drm_dev; @@ -359,6 +367,23 @@ void dp_catalog_ctrl_lane_mapping(struct dp_catalog *dp_catalog) ln_mapping); } +void dp_catalog_ctrl_psr_mainlink_enable(struct dp_catalog *dp_catalog, + bool enable) +{ + u32 val; + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + val = dp_read_link(catalog, REG_DP_MAINLINK_CTRL); + + if (enable) + val |= DP_MAINLINK_CTRL_ENABLE; + else + val &= ~DP_MAINLINK_CTRL_ENABLE; + + dp_write_link(catalog, REG_DP_MAINLINK_CTRL, val); +} + void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog *dp_catalog, bool enable) { @@ -610,6 +635,47 @@ void dp_catalog_ctrl_hpd_config(struct dp_catalog *dp_catalog) dp_write_aux(catalog, REG_DP_DP_HPD_CTRL, DP_DP_HPD_CTRL_HPD_EN); } +static void dp_catalog_enable_sdp(struct dp_catalog_private *catalog) +{ + /* trigger sdp */ + dp_write_link(catalog, MMSS_DP_SDP_CFG3, UPDATE_SDP); + dp_write_link(catalog, MMSS_DP_SDP_CFG3, 0x0); +} + +void dp_catalog_ctrl_config_psr(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + u32 config; + + /* enable PSR1 function */ + config = dp_read_link(catalog, REG_PSR_CONFIG); + config |= PSR1_SUPPORTED; + dp_write_link(catalog, REG_PSR_CONFIG, config); + + dp_write_ahb(catalog, REG_DP_INTR_MASK4, DP_INTERRUPT_MASK4); + dp_catalog_enable_sdp(catalog); +} + +void dp_catalog_ctrl_set_psr(struct dp_catalog *dp_catalog, bool enter) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + u32 cmd; + + cmd = dp_read_link(catalog, REG_PSR_CMD); + + cmd &= ~(PSR_ENTER | PSR_EXIT); + + if (enter) + cmd |= PSR_ENTER; + else + cmd |= PSR_EXIT; + + dp_catalog_enable_sdp(catalog); + dp_write_link(catalog, REG_PSR_CMD, cmd); +} + u32 dp_catalog_link_is_connected(struct dp_catalog *dp_catalog) { struct dp_catalog_private *catalog = container_of(dp_catalog, @@ -645,6 +711,20 @@ u32 dp_catalog_hpd_get_intr_status(struct dp_catalog *dp_catalog) return isr & (mask | ~DP_DP_HPD_INT_MASK); } +u32 dp_catalog_ctrl_read_psr_interrupt_status(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + u32 intr, intr_ack; + + intr = dp_read_ahb(catalog, REG_DP_INTR_STATUS4); + intr_ack = (intr & DP_INTERRUPT_STATUS4) + << DP_INTERRUPT_STATUS_ACK_SHIFT; + dp_write_ahb(catalog, REG_DP_INTR_STATUS4, intr_ack); + + return intr; +} + int dp_catalog_ctrl_get_interrupt(struct dp_catalog *dp_catalog) { struct dp_catalog_private *catalog = container_of(dp_catalog, diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.h b/drivers/gpu/drm/msm/dp/dp_catalog.h index 1f717f45c115..2174bb5f4e98 100644 --- a/drivers/gpu/drm/msm/dp/dp_catalog.h +++ b/drivers/gpu/drm/msm/dp/dp_catalog.h @@ -93,6 +93,7 @@ void dp_catalog_ctrl_state_ctrl(struct dp_catalog *dp_catalog, u32 state); void dp_catalog_ctrl_config_ctrl(struct dp_catalog *dp_catalog, u32 config); void dp_catalog_ctrl_lane_mapping(struct dp_catalog *dp_catalog); void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog *dp_catalog, bool enable); +void dp_catalog_ctrl_psr_mainlink_enable(struct dp_catalog *dp_catalog, bool enable); void dp_catalog_ctrl_config_misc(struct dp_catalog *dp_catalog, u32 cc, u32 tb); void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog, u32 rate, u32 stream_rate_khz, bool fixed_nvid); @@ -104,12 +105,15 @@ void dp_catalog_ctrl_enable_irq(struct dp_catalog *dp_catalog, bool enable); void dp_catalog_hpd_config_intr(struct dp_catalog *dp_catalog, u32 intr_mask, bool en); void dp_catalog_ctrl_hpd_config(struct dp_catalog *dp_catalog); +void dp_catalog_ctrl_config_psr(struct dp_catalog *dp_catalog); +void dp_catalog_ctrl_set_psr(struct dp_catalog *dp_catalog, bool enter); u32 dp_catalog_link_is_connected(struct dp_catalog *dp_catalog); u32 dp_catalog_hpd_get_intr_status(struct dp_catalog *dp_catalog); void dp_catalog_ctrl_phy_reset(struct dp_catalog *dp_catalog); int dp_catalog_ctrl_update_vx_px(struct dp_catalog *dp_catalog, u8 v_level, u8 p_level); int dp_catalog_ctrl_get_interrupt(struct dp_catalog *dp_catalog); +u32 dp_catalog_ctrl_read_psr_interrupt_status(struct dp_catalog *dp_catalog); void dp_catalog_ctrl_update_transfer_unit(struct dp_catalog *dp_catalog, u32 dp_tu, u32 valid_boundary, u32 valid_boundary2); diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c index dd26ca651a05..ea1c1f01a099 100644 --- a/drivers/gpu/drm/msm/dp/dp_ctrl.c +++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c @@ -22,6 +22,7 @@ #define DP_KHZ_TO_HZ 1000 #define IDLE_PATTERN_COMPLETION_TIMEOUT_JIFFIES (30 * HZ / 1000) /* 30 ms */ +#define PSR_OPERATION_COMPLETION_TIMEOUT_JIFFIES (300 * HZ / 1000) /* 300 ms */ #define WAIT_FOR_VIDEO_READY_TIMEOUT_JIFFIES (HZ / 2) #define DP_CTRL_INTR_READY_FOR_VIDEO BIT(0) @@ -80,6 +81,7 @@ struct dp_ctrl_private { struct dp_catalog *catalog; struct completion idle_comp; + struct completion psr_op_comp; struct completion video_comp; }; @@ -153,6 +155,9 @@ static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl) config |= DP_CONFIGURATION_CTRL_STATIC_DYNAMIC_CN; config |= DP_CONFIGURATION_CTRL_SYNC_ASYNC_CLK; + if (ctrl->panel->psr_cap.version) + config |= DP_CONFIGURATION_CTRL_SEND_VSC; + dp_catalog_ctrl_config_ctrl(ctrl->catalog, config); } @@ -1375,6 +1380,64 @@ void dp_ctrl_reset_irq_ctrl(struct dp_ctrl *dp_ctrl, bool enable) dp_catalog_ctrl_enable_irq(ctrl->catalog, enable); } +void dp_ctrl_config_psr(struct dp_ctrl *dp_ctrl) +{ + u8 cfg; + struct dp_ctrl_private *ctrl = container_of(dp_ctrl, + struct dp_ctrl_private, dp_ctrl); + + if (!ctrl->panel->psr_cap.version) + return; + + dp_catalog_ctrl_config_psr(ctrl->catalog); + + cfg = DP_PSR_ENABLE; + drm_dp_dpcd_write(ctrl->aux, DP_PSR_EN_CFG, &cfg, 1); +} + +void dp_ctrl_set_psr(struct dp_ctrl *dp_ctrl, bool enter) +{ + struct dp_ctrl_private *ctrl = container_of(dp_ctrl, + struct dp_ctrl_private, dp_ctrl); + + if (!ctrl->panel->psr_cap.version) + return; + + /* + * When entering PSR, + * 1. Send PSR enter SDP and wait for the PSR_UPDATE_INT + * 2. Turn off video + * 3. Disable the mainlink + * + * When exiting PSR, + * 1. Enable the mainlink + * 2. Send the PSR exit SDP + */ + if (enter) { + reinit_completion(&ctrl->psr_op_comp); + dp_catalog_ctrl_set_psr(ctrl->catalog, true); + + if (!wait_for_completion_timeout(&ctrl->psr_op_comp, + PSR_OPERATION_COMPLETION_TIMEOUT_JIFFIES)) { + DRM_ERROR("PSR_ENTRY timedout\n"); + dp_catalog_ctrl_set_psr(ctrl->catalog, false); + return; + } + + dp_ctrl_push_idle(dp_ctrl); + dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0); + + dp_catalog_ctrl_psr_mainlink_enable(ctrl->catalog, false); + } else { + dp_catalog_ctrl_psr_mainlink_enable(ctrl->catalog, true); + + dp_catalog_ctrl_set_psr(ctrl->catalog, false); + dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO); + dp_ctrl_wait4video_ready(ctrl); + dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0); + } +} + void dp_ctrl_phy_init(struct dp_ctrl *dp_ctrl) { struct dp_ctrl_private *ctrl; @@ -1989,6 +2052,22 @@ void dp_ctrl_isr(struct dp_ctrl *dp_ctrl) ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + if (ctrl->panel->psr_cap.version) { + isr = dp_catalog_ctrl_read_psr_interrupt_status(ctrl->catalog); + + if (isr) + complete(&ctrl->psr_op_comp); + + if (isr & PSR_EXIT_INT) + drm_dbg_dp(ctrl->drm_dev, "PSR exit done\n"); + + if (isr & PSR_UPDATE_INT) + drm_dbg_dp(ctrl->drm_dev, "PSR frame update done\n"); + + if (isr & PSR_CAPTURE_INT) + drm_dbg_dp(ctrl->drm_dev, "PSR frame capture done\n"); + } + isr = dp_catalog_ctrl_get_interrupt(ctrl->catalog); if (isr & DP_CTRL_INTR_READY_FOR_VIDEO) { @@ -2035,6 +2114,7 @@ struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link, dev_err(dev, "failed to add DP OPP table\n"); init_completion(&ctrl->idle_comp); + init_completion(&ctrl->psr_op_comp); init_completion(&ctrl->video_comp); /* in parameters */ diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h index 9f29734af81c..b226683de949 100644 --- a/drivers/gpu/drm/msm/dp/dp_ctrl.h +++ b/drivers/gpu/drm/msm/dp/dp_ctrl.h @@ -37,4 +37,7 @@ void dp_ctrl_phy_init(struct dp_ctrl *dp_ctrl); void dp_ctrl_phy_exit(struct dp_ctrl *dp_ctrl); void dp_ctrl_irq_phy_exit(struct dp_ctrl *dp_ctrl); +void dp_ctrl_set_psr(struct dp_ctrl *dp_ctrl, bool enable); +void dp_ctrl_config_psr(struct dp_ctrl *dp_ctrl); + #endif /* _DP_CTRL_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c index bde1a7ce442f..ffb21a633b6a 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.c +++ b/drivers/gpu/drm/msm/dp/dp_display.c @@ -406,6 +406,8 @@ static int dp_display_process_hpd_high(struct dp_display_private *dp) edid = dp->panel->edid; + dp->dp_display.psr_supported = dp->panel->psr_cap.version; + dp->audio_supported = drm_detect_monitor_audio(edid); dp_panel_handle_sink_request(dp->panel); @@ -910,6 +912,10 @@ static int dp_display_post_enable(struct msm_dp *dp_display) /* signal the connect event late to synchronize video and display */ dp_display_handle_plugged_change(dp_display, true); + + if (dp_display->psr_supported) + dp_ctrl_config_psr(dp->ctrl); + return 0; } @@ -990,14 +996,6 @@ enum drm_mode_status dp_bridge_mode_valid(struct drm_bridge *bridge, return -EINVAL; } - /* - * The eDP controller currently does not have a reliable way of - * enabling panel power to read sink capabilities. So, we rely - * on the panel driver to populate only supported modes for now. - */ - if (dp->is_edp) - return MODE_OK; - if (mode->clock > DP_MAX_PIXEL_CLK_KHZ) return MODE_CLOCK_HIGH; @@ -1104,6 +1102,19 @@ static void dp_display_config_hpd(struct dp_display_private *dp) enable_irq(dp->irq); } +void dp_display_set_psr(struct msm_dp *dp_display, bool enter) +{ + struct dp_display_private *dp; + + if (!dp_display) { + DRM_ERROR("invalid params\n"); + return; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + dp_ctrl_set_psr(dp->ctrl, enter); +} + static int hpd_event_thread(void *data) { struct dp_display_private *dp_priv; @@ -1652,7 +1663,8 @@ int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev, return 0; } -void dp_bridge_enable(struct drm_bridge *drm_bridge) +void dp_bridge_atomic_enable(struct drm_bridge *drm_bridge, + struct drm_bridge_state *old_bridge_state) { struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge); struct msm_dp *dp = dp_bridge->dp_display; @@ -1707,7 +1719,8 @@ void dp_bridge_enable(struct drm_bridge *drm_bridge) mutex_unlock(&dp_display->event_mutex); } -void dp_bridge_disable(struct drm_bridge *drm_bridge) +void dp_bridge_atomic_disable(struct drm_bridge *drm_bridge, + struct drm_bridge_state *old_bridge_state) { struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge); struct msm_dp *dp = dp_bridge->dp_display; @@ -1718,7 +1731,8 @@ void dp_bridge_disable(struct drm_bridge *drm_bridge) dp_ctrl_push_idle(dp_display->ctrl); } -void dp_bridge_post_disable(struct drm_bridge *drm_bridge) +void dp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge, + struct drm_bridge_state *old_bridge_state) { struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge); struct msm_dp *dp = dp_bridge->dp_display; diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h index 371337d0fae2..1e9415ab15d8 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.h +++ b/drivers/gpu/drm/msm/dp/dp_display.h @@ -29,6 +29,7 @@ struct msm_dp { u32 max_dp_lanes; struct dp_audio *dp_audio; + bool psr_supported; }; int dp_display_set_plugged_cb(struct msm_dp *dp_display, @@ -39,5 +40,6 @@ bool dp_display_check_video_test(struct msm_dp *dp_display); int dp_display_get_test_bpp(struct msm_dp *dp_display); void dp_display_signal_audio_start(struct msm_dp *dp_display); void dp_display_signal_audio_complete(struct msm_dp *dp_display); +void dp_display_set_psr(struct msm_dp *dp, bool enter); #endif /* _DP_DISPLAY_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c index 275370f21115..785d76639497 100644 --- a/drivers/gpu/drm/msm/dp/dp_drm.c +++ b/drivers/gpu/drm/msm/dp/dp_drm.c @@ -94,9 +94,9 @@ static const struct drm_bridge_funcs dp_bridge_ops = { .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, .atomic_reset = drm_atomic_helper_bridge_reset, - .enable = dp_bridge_enable, - .disable = dp_bridge_disable, - .post_disable = dp_bridge_post_disable, + .atomic_enable = dp_bridge_atomic_enable, + .atomic_disable = dp_bridge_atomic_disable, + .atomic_post_disable = dp_bridge_atomic_post_disable, .mode_set = dp_bridge_mode_set, .mode_valid = dp_bridge_mode_valid, .get_modes = dp_bridge_get_modes, @@ -107,6 +107,171 @@ static const struct drm_bridge_funcs dp_bridge_ops = { .hpd_notify = dp_bridge_hpd_notify, }; +static int edp_bridge_atomic_check(struct drm_bridge *drm_bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct msm_dp *dp = to_dp_bridge(drm_bridge)->dp_display; + + if (WARN_ON(!conn_state)) + return -ENODEV; + + conn_state->self_refresh_aware = dp->psr_supported; + + if (!conn_state->crtc || !crtc_state) + return 0; + + if (crtc_state->self_refresh_active && !dp->psr_supported) + return -EINVAL; + + return 0; +} + +static void edp_bridge_atomic_enable(struct drm_bridge *drm_bridge, + struct drm_bridge_state *old_bridge_state) +{ + struct drm_atomic_state *atomic_state = old_bridge_state->base.state; + struct drm_crtc *crtc; + struct drm_crtc_state *old_crtc_state; + struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge); + struct msm_dp *dp = dp_bridge->dp_display; + + /* + * Check the old state of the crtc to determine if the panel + * was put into psr state previously by the edp_bridge_atomic_disable. + * If the panel is in psr, just exit psr state and skip the full + * bridge enable sequence. + */ + crtc = drm_atomic_get_new_crtc_for_encoder(atomic_state, + drm_bridge->encoder); + if (!crtc) + return; + + old_crtc_state = drm_atomic_get_old_crtc_state(atomic_state, crtc); + + if (old_crtc_state && old_crtc_state->self_refresh_active) { + dp_display_set_psr(dp, false); + return; + } + + dp_bridge_atomic_enable(drm_bridge, old_bridge_state); +} + +static void edp_bridge_atomic_disable(struct drm_bridge *drm_bridge, + struct drm_bridge_state *old_bridge_state) +{ + struct drm_atomic_state *atomic_state = old_bridge_state->base.state; + struct drm_crtc *crtc; + struct drm_crtc_state *new_crtc_state = NULL, *old_crtc_state = NULL; + struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge); + struct msm_dp *dp = dp_bridge->dp_display; + + crtc = drm_atomic_get_old_crtc_for_encoder(atomic_state, + drm_bridge->encoder); + if (!crtc) + goto out; + + new_crtc_state = drm_atomic_get_new_crtc_state(atomic_state, crtc); + if (!new_crtc_state) + goto out; + + old_crtc_state = drm_atomic_get_old_crtc_state(atomic_state, crtc); + if (!old_crtc_state) + goto out; + + /* + * Set self refresh mode if current crtc state is active. + * + * If old crtc state is active, then this is a display disable + * call while the sink is in psr state. So, exit psr here. + * The eDP controller will be disabled in the + * edp_bridge_atomic_post_disable function. + * + * We observed sink is stuck in self refresh if psr exit is skipped + * when display disable occurs while the sink is in psr state. + */ + if (new_crtc_state->self_refresh_active) { + dp_display_set_psr(dp, true); + return; + } else if (old_crtc_state->self_refresh_active) { + dp_display_set_psr(dp, false); + return; + } + +out: + dp_bridge_atomic_disable(drm_bridge, old_bridge_state); +} + +static void edp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge, + struct drm_bridge_state *old_bridge_state) +{ + struct drm_atomic_state *atomic_state = old_bridge_state->base.state; + struct drm_crtc *crtc; + struct drm_crtc_state *new_crtc_state = NULL; + + crtc = drm_atomic_get_old_crtc_for_encoder(atomic_state, + drm_bridge->encoder); + if (!crtc) + return; + + new_crtc_state = drm_atomic_get_new_crtc_state(atomic_state, crtc); + if (!new_crtc_state) + return; + + /* + * Self refresh mode is already set in edp_bridge_atomic_disable. + */ + if (new_crtc_state->self_refresh_active) + return; + + dp_bridge_atomic_post_disable(drm_bridge, old_bridge_state); +} + +/** + * edp_bridge_mode_valid - callback to determine if specified mode is valid + * @bridge: Pointer to drm bridge structure + * @info: display info + * @mode: Pointer to drm mode structure + * Returns: Validity status for specified mode + */ +static enum drm_mode_status edp_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, + const struct drm_display_mode *mode) +{ + struct msm_dp *dp; + int mode_pclk_khz = mode->clock; + + dp = to_dp_bridge(bridge)->dp_display; + + if (!dp || !mode_pclk_khz || !dp->connector) { + DRM_ERROR("invalid params\n"); + return -EINVAL; + } + + if (mode->clock > DP_MAX_PIXEL_CLK_KHZ) + return MODE_CLOCK_HIGH; + + /* + * The eDP controller currently does not have a reliable way of + * enabling panel power to read sink capabilities. So, we rely + * on the panel driver to populate only supported modes for now. + */ + return MODE_OK; +} + +static const struct drm_bridge_funcs edp_bridge_ops = { + .atomic_enable = edp_bridge_atomic_enable, + .atomic_disable = edp_bridge_atomic_disable, + .atomic_post_disable = edp_bridge_atomic_post_disable, + .mode_set = dp_bridge_mode_set, + .mode_valid = edp_bridge_mode_valid, + .atomic_reset = drm_atomic_helper_bridge_reset, + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, + .atomic_check = edp_bridge_atomic_check, +}; + struct drm_bridge *dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev, struct drm_encoder *encoder) { @@ -121,7 +286,7 @@ struct drm_bridge *dp_bridge_init(struct msm_dp *dp_display, struct drm_device * dp_bridge->dp_display = dp_display; bridge = &dp_bridge->bridge; - bridge->funcs = &dp_bridge_ops; + bridge->funcs = dp_display->is_edp ? &edp_bridge_ops : &dp_bridge_ops; bridge->type = dp_display->connector_type; /* diff --git a/drivers/gpu/drm/msm/dp/dp_drm.h b/drivers/gpu/drm/msm/dp/dp_drm.h index 250f7c66201f..afe79b85e183 100644 --- a/drivers/gpu/drm/msm/dp/dp_drm.h +++ b/drivers/gpu/drm/msm/dp/dp_drm.h @@ -23,9 +23,12 @@ struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display, struct dr struct drm_bridge *dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev, struct drm_encoder *encoder); -void dp_bridge_enable(struct drm_bridge *drm_bridge); -void dp_bridge_disable(struct drm_bridge *drm_bridge); -void dp_bridge_post_disable(struct drm_bridge *drm_bridge); +void dp_bridge_atomic_enable(struct drm_bridge *drm_bridge, + struct drm_bridge_state *old_bridge_state); +void dp_bridge_atomic_disable(struct drm_bridge *drm_bridge, + struct drm_bridge_state *old_bridge_state); +void dp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge, + struct drm_bridge_state *old_bridge_state); enum drm_mode_status dp_bridge_mode_valid(struct drm_bridge *bridge, const struct drm_display_info *info, const struct drm_display_mode *mode); diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c index f1f1d646539d..5a4817ac086f 100644 --- a/drivers/gpu/drm/msm/dp/dp_link.c +++ b/drivers/gpu/drm/msm/dp/dp_link.c @@ -937,6 +937,38 @@ static int dp_link_process_phy_test_pattern_request( return 0; } +static bool dp_link_read_psr_error_status(struct dp_link_private *link) +{ + u8 status; + + drm_dp_dpcd_read(link->aux, DP_PSR_ERROR_STATUS, &status, 1); + + if (status & DP_PSR_LINK_CRC_ERROR) + DRM_ERROR("PSR LINK CRC ERROR\n"); + else if (status & DP_PSR_RFB_STORAGE_ERROR) + DRM_ERROR("PSR RFB STORAGE ERROR\n"); + else if (status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR) + DRM_ERROR("PSR VSC SDP UNCORRECTABLE ERROR\n"); + else + return false; + + return true; +} + +static bool dp_link_psr_capability_changed(struct dp_link_private *link) +{ + u8 status; + + drm_dp_dpcd_read(link->aux, DP_PSR_ESI, &status, 1); + + if (status & DP_PSR_CAPS_CHANGE) { + drm_dbg_dp(link->drm_dev, "PSR Capability Change\n"); + return true; + } + + return false; +} + static u8 get_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r) { return link_status[r - DP_LANE0_1_STATUS]; @@ -1055,6 +1087,10 @@ int dp_link_process_request(struct dp_link *dp_link) dp_link->sink_request |= DP_TEST_LINK_TRAINING; } else if (!dp_link_process_phy_test_pattern_request(link)) { dp_link->sink_request |= DP_TEST_LINK_PHY_TEST_PATTERN; + } else if (dp_link_read_psr_error_status(link)) { + DRM_ERROR("PSR IRQ_HPD received\n"); + } else if (dp_link_psr_capability_changed(link)) { + drm_dbg_dp(link->drm_dev, "PSR Capabiity changed"); } else { ret = dp_link_process_link_status_update(link); if (!ret) { diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c index 1800d8963f8a..42d52510ffd4 100644 --- a/drivers/gpu/drm/msm/dp/dp_panel.c +++ b/drivers/gpu/drm/msm/dp/dp_panel.c @@ -20,6 +20,27 @@ struct dp_panel_private { bool aux_cfg_update_done; }; +static void dp_panel_read_psr_cap(struct dp_panel_private *panel) +{ + ssize_t rlen; + struct dp_panel *dp_panel; + + dp_panel = &panel->dp_panel; + + /* edp sink */ + if (dp_panel->dpcd[DP_EDP_CONFIGURATION_CAP]) { + rlen = drm_dp_dpcd_read(panel->aux, DP_PSR_SUPPORT, + &dp_panel->psr_cap, sizeof(dp_panel->psr_cap)); + if (rlen == sizeof(dp_panel->psr_cap)) { + drm_dbg_dp(panel->drm_dev, + "psr version: 0x%x, psr_cap: 0x%x\n", + dp_panel->psr_cap.version, + dp_panel->psr_cap.capabilities); + } else + DRM_ERROR("failed to read psr info, rlen=%zd\n", rlen); + } +} + static int dp_panel_read_dpcd(struct dp_panel *dp_panel) { int rc = 0; @@ -107,6 +128,7 @@ static int dp_panel_read_dpcd(struct dp_panel *dp_panel) } } + dp_panel_read_psr_cap(panel); end: return rc; } diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h index f04d0210b5cd..45208b45eb53 100644 --- a/drivers/gpu/drm/msm/dp/dp_panel.h +++ b/drivers/gpu/drm/msm/dp/dp_panel.h @@ -34,6 +34,11 @@ struct dp_panel_in { struct dp_catalog *catalog; }; +struct dp_panel_psr { + u8 version; + u8 capabilities; +}; + struct dp_panel { /* dpcd raw data */ u8 dpcd[DP_RECEIVER_CAP_SIZE + 1]; @@ -46,6 +51,7 @@ struct dp_panel { struct edid *edid; struct drm_connector *connector; struct dp_display_mode dp_mode; + struct dp_panel_psr psr_cap; bool video_test; u32 vic; diff --git a/drivers/gpu/drm/msm/dp/dp_reg.h b/drivers/gpu/drm/msm/dp/dp_reg.h index 268602803d9a..ea85a691e72b 100644 --- a/drivers/gpu/drm/msm/dp/dp_reg.h +++ b/drivers/gpu/drm/msm/dp/dp_reg.h @@ -22,6 +22,20 @@ #define REG_DP_INTR_STATUS2 (0x00000024) #define REG_DP_INTR_STATUS3 (0x00000028) +#define REG_DP_INTR_STATUS4 (0x0000002C) +#define PSR_UPDATE_INT (0x00000001) +#define PSR_CAPTURE_INT (0x00000004) +#define PSR_EXIT_INT (0x00000010) +#define PSR_UPDATE_ERROR_INT (0x00000040) +#define PSR_WAKE_ERROR_INT (0x00000100) + +#define REG_DP_INTR_MASK4 (0x00000030) +#define PSR_UPDATE_MASK (0x00000001) +#define PSR_CAPTURE_MASK (0x00000002) +#define PSR_EXIT_MASK (0x00000004) +#define PSR_UPDATE_ERROR_MASK (0x00000008) +#define PSR_WAKE_ERROR_MASK (0x00000010) + #define REG_DP_DP_HPD_CTRL (0x00000000) #define DP_DP_HPD_CTRL_HPD_EN (0x00000001) @@ -164,6 +178,16 @@ #define MMSS_DP_AUDIO_TIMING_RBR_48 (0x00000094) #define MMSS_DP_AUDIO_TIMING_HBR_48 (0x00000098) +#define REG_PSR_CONFIG (0x00000100) +#define DISABLE_PSR (0x00000000) +#define PSR1_SUPPORTED (0x00000001) +#define PSR2_WITHOUT_FRAMESYNC (0x00000002) +#define PSR2_WITH_FRAMESYNC (0x00000003) + +#define REG_PSR_CMD (0x00000110) +#define PSR_ENTER (0x00000001) +#define PSR_EXIT (0x00000002) + #define MMSS_DP_PSR_CRC_RG (0x00000154) #define MMSS_DP_PSR_CRC_B (0x00000158) @@ -184,6 +208,9 @@ #define MMSS_DP_AUDIO_STREAM_0 (0x00000240) #define MMSS_DP_AUDIO_STREAM_1 (0x00000244) +#define MMSS_DP_SDP_CFG3 (0x0000024c) +#define UPDATE_SDP (0x00000001) + #define MMSS_DP_EXTENSION_0 (0x00000250) #define MMSS_DP_EXTENSION_1 (0x00000254) #define MMSS_DP_EXTENSION_2 (0x00000258) diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h index d1b2a17b0a66..a4a154601114 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.xml.h +++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h @@ -8,26 +8,26 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/tmp/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-03-03 01:18:13) -- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32) -- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi.xml ( 17560 bytes, from 2021-09-16 22:37:02) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-03 01:18:13) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 41874 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2021-01-30 18:25:22) - -Copyright (C) 2013-2021 by the following authors: +- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-07-23 20:21:46) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 18746 bytes, from 2022-04-28 17:29:36) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 42350 bytes, from 2022-09-20 17:45:56) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2022-03-08 17:40:42) + +Copyright (C) 2013-2022 by the following authors: - Rob Clark <[email protected]> (robclark) - Ilia Mirkin <[email protected]> (imirkin) @@ -785,4 +785,5 @@ static inline uint32_t DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM0_SLICE_WIDTH(ui return ((val) << DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM0_SLICE_WIDTH__SHIFT) & DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM0_SLICE_WIDTH__MASK; } + #endif /* DSI_XML */ diff --git a/drivers/gpu/drm/msm/dsi/dsi_phy_10nm.xml.h b/drivers/gpu/drm/msm/dsi/dsi_phy_10nm.xml.h index 8b1be69ccf89..a2ae8777e59e 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_phy_10nm.xml.h +++ b/drivers/gpu/drm/msm/dsi/dsi_phy_10nm.xml.h @@ -8,26 +8,26 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/tmp/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-03-03 01:18:13) -- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32) -- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi.xml ( 17560 bytes, from 2021-09-16 22:37:02) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-03 01:18:13) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 41874 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2021-01-30 18:25:22) - -Copyright (C) 2013-2021 by the following authors: +- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-07-23 20:21:46) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 18746 bytes, from 2022-04-28 17:29:36) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 42350 bytes, from 2022-09-20 17:45:56) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2022-03-08 17:40:42) + +Copyright (C) 2013-2022 by the following authors: - Rob Clark <[email protected]> (robclark) - Ilia Mirkin <[email protected]> (imirkin) diff --git a/drivers/gpu/drm/msm/dsi/dsi_phy_14nm.xml.h b/drivers/gpu/drm/msm/dsi/dsi_phy_14nm.xml.h index 515f1fa605bf..24e2fdc0cde1 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_phy_14nm.xml.h +++ b/drivers/gpu/drm/msm/dsi/dsi_phy_14nm.xml.h @@ -8,26 +8,26 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/tmp/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-03-03 01:18:13) -- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32) -- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi.xml ( 17560 bytes, from 2021-09-16 22:37:02) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-03 01:18:13) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 41874 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2021-01-30 18:25:22) - -Copyright (C) 2013-2021 by the following authors: +- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-07-23 20:21:46) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 18746 bytes, from 2022-04-28 17:29:36) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 42350 bytes, from 2022-09-20 17:45:56) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2022-03-08 17:40:42) + +Copyright (C) 2013-2022 by the following authors: - Rob Clark <[email protected]> (robclark) - Ilia Mirkin <[email protected]> (imirkin) diff --git a/drivers/gpu/drm/msm/dsi/dsi_phy_20nm.xml.h b/drivers/gpu/drm/msm/dsi/dsi_phy_20nm.xml.h index 81e4622eb358..6352541f37e9 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_phy_20nm.xml.h +++ b/drivers/gpu/drm/msm/dsi/dsi_phy_20nm.xml.h @@ -8,26 +8,26 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/tmp/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-03-03 01:18:13) -- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32) -- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi.xml ( 17560 bytes, from 2021-09-16 22:37:02) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-03 01:18:13) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 41874 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2021-01-30 18:25:22) - -Copyright (C) 2013-2021 by the following authors: +- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-07-23 20:21:46) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 18746 bytes, from 2022-04-28 17:29:36) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 42350 bytes, from 2022-09-20 17:45:56) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2022-03-08 17:40:42) + +Copyright (C) 2013-2022 by the following authors: - Rob Clark <[email protected]> (robclark) - Ilia Mirkin <[email protected]> (imirkin) diff --git a/drivers/gpu/drm/msm/dsi/dsi_phy_28nm.xml.h b/drivers/gpu/drm/msm/dsi/dsi_phy_28nm.xml.h index 8c7db35c12c8..178bd4fd7893 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_phy_28nm.xml.h +++ b/drivers/gpu/drm/msm/dsi/dsi_phy_28nm.xml.h @@ -8,26 +8,26 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/tmp/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-03-03 01:18:13) -- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32) -- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi.xml ( 17560 bytes, from 2021-09-16 22:37:02) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-03 01:18:13) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 41874 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2021-01-30 18:25:22) - -Copyright (C) 2013-2021 by the following authors: +- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-07-23 20:21:46) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 18746 bytes, from 2022-04-28 17:29:36) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 42350 bytes, from 2022-09-20 17:45:56) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2022-03-08 17:40:42) + +Copyright (C) 2013-2022 by the following authors: - Rob Clark <[email protected]> (robclark) - Ilia Mirkin <[email protected]> (imirkin) diff --git a/drivers/gpu/drm/msm/dsi/dsi_phy_28nm_8960.xml.h b/drivers/gpu/drm/msm/dsi/dsi_phy_28nm_8960.xml.h index 44eeca31a811..5f900bb53519 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_phy_28nm_8960.xml.h +++ b/drivers/gpu/drm/msm/dsi/dsi_phy_28nm_8960.xml.h @@ -8,26 +8,26 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/tmp/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-03-03 01:18:13) -- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32) -- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi.xml ( 17560 bytes, from 2021-09-16 22:37:02) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-03 01:18:13) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 41874 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2021-01-30 18:25:22) - -Copyright (C) 2013-2021 by the following authors: +- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-07-23 20:21:46) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 18746 bytes, from 2022-04-28 17:29:36) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 42350 bytes, from 2022-09-20 17:45:56) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2022-03-08 17:40:42) + +Copyright (C) 2013-2022 by the following authors: - Rob Clark <[email protected]> (robclark) - Ilia Mirkin <[email protected]> (imirkin) diff --git a/drivers/gpu/drm/msm/dsi/dsi_phy_7nm.xml.h b/drivers/gpu/drm/msm/dsi/dsi_phy_7nm.xml.h index 5bc061797003..584cbd0205ef 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_phy_7nm.xml.h +++ b/drivers/gpu/drm/msm/dsi/dsi_phy_7nm.xml.h @@ -8,24 +8,24 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/tmp/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-03-03 01:18:13) -- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32) -- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi.xml ( 17560 bytes, from 2021-09-16 22:37:02) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-03 01:18:13) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 41874 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2021-01-30 18:25:22) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-07-23 20:21:46) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 18746 bytes, from 2022-04-28 17:29:36) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 42350 bytes, from 2022-09-20 17:45:56) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2022-03-08 17:40:42) Copyright (C) 2013-2022 by the following authors: - Rob Clark <[email protected]> (robclark) diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h index 03bc322d0487..7062f7164216 100644 --- a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h +++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h @@ -8,26 +8,26 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/tmp/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-03-03 01:18:13) -- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32) -- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi.xml ( 17560 bytes, from 2021-09-16 22:37:02) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-03 01:18:13) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 41874 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2021-01-30 18:25:22) - -Copyright (C) 2013-2021 by the following authors: +- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-07-23 20:21:46) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 18746 bytes, from 2022-04-28 17:29:36) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 42350 bytes, from 2022-09-20 17:45:56) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2022-03-08 17:40:42) + +Copyright (C) 2013-2022 by the following authors: - Rob Clark <[email protected]> (robclark) - Ilia Mirkin <[email protected]> (imirkin) diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h index 2ae711cbec36..344a1a1620cd 100644 --- a/drivers/gpu/drm/msm/dsi/sfpb.xml.h +++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h @@ -8,26 +8,26 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/tmp/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-03-03 01:18:13) -- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32) -- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi.xml ( 17560 bytes, from 2021-09-16 22:37:02) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-03 01:18:13) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 41874 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2021-01-30 18:25:22) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-07-23 20:21:46) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 18746 bytes, from 2022-04-28 17:29:36) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 42350 bytes, from 2022-09-20 17:45:56) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2022-03-08 17:40:42) -Copyright (C) 2013-2021 by the following authors: +Copyright (C) 2013-2022 by the following authors: - Rob Clark <[email protected]> (robclark) - Ilia Mirkin <[email protected]> (imirkin) diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h index c9eb26df67c0..973b460486a5 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h +++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h @@ -8,26 +8,26 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/tmp/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-03-03 01:18:13) -- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32) -- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi.xml ( 17560 bytes, from 2021-09-16 22:37:02) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-03 01:18:13) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 41874 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2021-01-30 18:25:22) - -Copyright (C) 2013-2021 by the following authors: +- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-07-23 20:21:46) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 18746 bytes, from 2022-04-28 17:29:36) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 42350 bytes, from 2022-09-20 17:45:56) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2022-03-08 17:40:42) + +Copyright (C) 2013-2022 by the following authors: - Rob Clark <[email protected]> (robclark) - Ilia Mirkin <[email protected]> (imirkin) @@ -776,10 +776,28 @@ static inline uint32_t HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(uint32_t val) #define REG_HDMI_8x74_ANA_CFG1 0x00000004 +#define REG_HDMI_8x74_ANA_CFG2 0x00000008 + +#define REG_HDMI_8x74_ANA_CFG3 0x0000000c + #define REG_HDMI_8x74_PD_CTRL0 0x00000010 #define REG_HDMI_8x74_PD_CTRL1 0x00000014 +#define REG_HDMI_8x74_GLB_CFG 0x00000018 + +#define REG_HDMI_8x74_DCC_CFG0 0x0000001c + +#define REG_HDMI_8x74_DCC_CFG1 0x00000020 + +#define REG_HDMI_8x74_TXCAL_CFG0 0x00000024 + +#define REG_HDMI_8x74_TXCAL_CFG1 0x00000028 + +#define REG_HDMI_8x74_TXCAL_CFG2 0x0000002c + +#define REG_HDMI_8x74_TXCAL_CFG3 0x00000030 + #define REG_HDMI_8x74_BIST_CFG0 0x00000034 #define REG_HDMI_8x74_BIST_PATN0 0x0000003c @@ -790,6 +808,8 @@ static inline uint32_t HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(uint32_t val) #define REG_HDMI_8x74_BIST_PATN3 0x00000048 +#define REG_HDMI_8x74_STATUS 0x0000005c + #define REG_HDMI_28nm_PHY_PLL_REFCLK_CFG 0x00000000 #define REG_HDMI_28nm_PHY_PLL_POSTDIV1_CFG 0x00000004 @@ -877,6 +897,8 @@ static inline uint32_t HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(uint32_t val) #define REG_HDMI_28nm_PHY_PLL_DEBUG_BUS_SEL 0x000000a0 +#define REG_HDMI_28nm_PHY_PLL_STATUS 0x000000c0 + #define REG_HDMI_8996_PHY_CFG 0x00000000 #define REG_HDMI_8996_PHY_PD_CTL 0x00000004 diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h index 3edc698c4df5..498801526695 100644 --- a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h +++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h @@ -8,26 +8,26 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/tmp/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-03-03 01:18:13) -- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32) -- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi.xml ( 17560 bytes, from 2021-09-16 22:37:02) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2021-07-22 15:21:56) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-03 01:18:13) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 41874 bytes, from 2021-01-30 18:25:22) -- /home/robclark/tmp/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2021-01-30 18:25:22) - -Copyright (C) 2013-2021 by the following authors: +- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-07-23 20:21:46) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 18746 bytes, from 2022-04-28 17:29:36) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2022-03-08 17:40:42) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 42350 bytes, from 2022-09-20 17:45:56) +- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2022-03-08 17:40:42) + +Copyright (C) 2013-2022 by the following authors: - Rob Clark <[email protected]> (robclark) - Ilia Mirkin <[email protected]> (imirkin) diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index aca48c868c14..ce1a77b607d1 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -918,13 +918,11 @@ static int wait_fence(struct msm_gpu_submitqueue *queue, uint32_t fence_id, * retired, so if the fence is not found it means there is nothing * to wait for */ - ret = mutex_lock_interruptible(&queue->idr_lock); - if (ret) - return ret; + spin_lock(&queue->idr_lock); fence = idr_find(&queue->fence_idr, fence_id); if (fence) fence = dma_fence_get_rcu(fence); - mutex_unlock(&queue->idr_lock); + spin_unlock(&queue->idr_lock); if (!fence) return 0; diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c index 56641408ea74..bab3d84f1686 100644 --- a/drivers/gpu/drm/msm/msm_fence.c +++ b/drivers/gpu/drm/msm/msm_fence.c @@ -99,7 +99,7 @@ static const struct dma_fence_ops msm_fence_ops = { }; struct dma_fence * -msm_fence_alloc(struct msm_fence_context *fctx) +msm_fence_alloc(void) { struct msm_fence *f; @@ -107,10 +107,16 @@ msm_fence_alloc(struct msm_fence_context *fctx) if (!f) return ERR_PTR(-ENOMEM); + return &f->base; +} + +void +msm_fence_init(struct dma_fence *fence, struct msm_fence_context *fctx) +{ + struct msm_fence *f = to_msm_fence(fence); + f->fctx = fctx; dma_fence_init(&f->base, &msm_fence_ops, &fctx->spinlock, fctx->context, ++fctx->last_fence); - - return &f->base; } diff --git a/drivers/gpu/drm/msm/msm_fence.h b/drivers/gpu/drm/msm/msm_fence.h index 7f1798c54cd1..f913fa22d8fe 100644 --- a/drivers/gpu/drm/msm/msm_fence.h +++ b/drivers/gpu/drm/msm/msm_fence.h @@ -61,7 +61,8 @@ void msm_fence_context_free(struct msm_fence_context *fctx); bool msm_fence_completed(struct msm_fence_context *fctx, uint32_t fence); void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence); -struct dma_fence * msm_fence_alloc(struct msm_fence_context *fctx); +struct dma_fence * msm_fence_alloc(void); +void msm_fence_init(struct dma_fence *fence, struct msm_fence_context *fctx); static inline bool fence_before(uint32_t a, uint32_t b) diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index c2fb98a94bc3..9008f967637a 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -19,8 +19,6 @@ #include "msm_gpu.h" #include "msm_mmu.h" -static void update_lru(struct drm_gem_object *obj); - static dma_addr_t physaddr(struct drm_gem_object *obj) { struct msm_gem_object *msm_obj = to_msm_bo(obj); @@ -63,6 +61,49 @@ static void sync_for_cpu(struct msm_gem_object *msm_obj) dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0); } +static void update_lru_active(struct drm_gem_object *obj) +{ + struct msm_drm_private *priv = obj->dev->dev_private; + struct msm_gem_object *msm_obj = to_msm_bo(obj); + + GEM_WARN_ON(!msm_obj->pages); + + if (msm_obj->pin_count) { + drm_gem_lru_move_tail_locked(&priv->lru.pinned, obj); + } else if (msm_obj->madv == MSM_MADV_WILLNEED) { + drm_gem_lru_move_tail_locked(&priv->lru.willneed, obj); + } else { + GEM_WARN_ON(msm_obj->madv != MSM_MADV_DONTNEED); + + drm_gem_lru_move_tail_locked(&priv->lru.dontneed, obj); + } +} + +static void update_lru_locked(struct drm_gem_object *obj) +{ + struct msm_drm_private *priv = obj->dev->dev_private; + struct msm_gem_object *msm_obj = to_msm_bo(obj); + + msm_gem_assert_locked(&msm_obj->base); + + if (!msm_obj->pages) { + GEM_WARN_ON(msm_obj->pin_count); + + drm_gem_lru_move_tail_locked(&priv->lru.unbacked, obj); + } else { + update_lru_active(obj); + } +} + +static void update_lru(struct drm_gem_object *obj) +{ + struct msm_drm_private *priv = obj->dev->dev_private; + + mutex_lock(&priv->lru.lock); + update_lru_locked(obj); + mutex_unlock(&priv->lru.lock); +} + /* allocate pages from VRAM carveout, used when no IOMMU: */ static struct page **get_pages_vram(struct drm_gem_object *obj, int npages) { @@ -180,6 +221,7 @@ static void put_pages(struct drm_gem_object *obj) static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj) { + struct msm_drm_private *priv = obj->dev->dev_private; struct msm_gem_object *msm_obj = to_msm_bo(obj); struct page **p; @@ -190,10 +232,13 @@ static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj) } p = get_pages(obj); - if (!IS_ERR(p)) { - to_msm_bo(obj)->pin_count++; - update_lru(obj); - } + if (IS_ERR(p)) + return p; + + mutex_lock(&priv->lru.lock); + msm_obj->pin_count++; + update_lru_locked(obj); + mutex_unlock(&priv->lru.lock); return p; } @@ -309,12 +354,10 @@ static struct msm_gem_vma *add_vma(struct drm_gem_object *obj, msm_gem_assert_locked(obj); - vma = kzalloc(sizeof(*vma), GFP_KERNEL); + vma = msm_gem_vma_new(aspace); if (!vma) return ERR_PTR(-ENOMEM); - vma->aspace = aspace; - list_add_tail(&vma->list, &msm_obj->vmas); return vma; @@ -361,9 +404,9 @@ put_iova_spaces(struct drm_gem_object *obj, bool close) list_for_each_entry(vma, &msm_obj->vmas, list) { if (vma->aspace) { - msm_gem_purge_vma(vma->aspace, vma); + msm_gem_vma_purge(vma); if (close) - msm_gem_close_vma(vma->aspace, vma); + msm_gem_vma_close(vma); } } } @@ -399,7 +442,7 @@ static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj, if (IS_ERR(vma)) return vma; - ret = msm_gem_init_vma(aspace, vma, obj->size, + ret = msm_gem_vma_init(vma, obj->size, range_start, range_end); if (ret) { del_vma(vma); @@ -437,7 +480,7 @@ int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma) if (IS_ERR(pages)) return PTR_ERR(pages); - ret = msm_gem_map_vma(vma->aspace, vma, prot, msm_obj->sgt, obj->size); + ret = msm_gem_vma_map(vma, prot, msm_obj->sgt, obj->size); if (ret) msm_gem_unpin_locked(obj); @@ -446,14 +489,34 @@ int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma) void msm_gem_unpin_locked(struct drm_gem_object *obj) { + struct msm_drm_private *priv = obj->dev->dev_private; struct msm_gem_object *msm_obj = to_msm_bo(obj); msm_gem_assert_locked(obj); + mutex_lock(&priv->lru.lock); msm_obj->pin_count--; GEM_WARN_ON(msm_obj->pin_count < 0); + update_lru_locked(obj); + mutex_unlock(&priv->lru.lock); +} - update_lru(obj); +/* Special unpin path for use in fence-signaling path, avoiding the need + * to hold the obj lock by only depending on things that a protected by + * the LRU lock. In particular we know that that we already have backing + * and and that the object's dma_resv has the fence for the current + * submit/job which will prevent us racing against page eviction. + */ +void msm_gem_unpin_active(struct drm_gem_object *obj) +{ + struct msm_drm_private *priv = obj->dev->dev_private; + struct msm_gem_object *msm_obj = to_msm_bo(obj); + + mutex_lock(&priv->lru.lock); + msm_obj->pin_count--; + GEM_WARN_ON(msm_obj->pin_count < 0); + update_lru_active(obj); + mutex_unlock(&priv->lru.lock); } struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj, @@ -539,8 +602,8 @@ static int clear_iova(struct drm_gem_object *obj, if (msm_gem_vma_inuse(vma)) return -EBUSY; - msm_gem_purge_vma(vma->aspace, vma); - msm_gem_close_vma(vma->aspace, vma); + msm_gem_vma_purge(vma); + msm_gem_vma_close(vma); del_vma(vma); return 0; @@ -589,7 +652,7 @@ void msm_gem_unpin_iova(struct drm_gem_object *obj, msm_gem_lock(obj); vma = lookup_vma(obj, aspace); if (!GEM_WARN_ON(!vma)) { - msm_gem_unpin_vma(vma); + msm_gem_vma_unpin(vma); msm_gem_unpin_locked(obj); } msm_gem_unlock(obj); @@ -628,6 +691,7 @@ fail: static void *get_vaddr(struct drm_gem_object *obj, unsigned madv) { struct msm_gem_object *msm_obj = to_msm_bo(obj); + struct page **pages; int ret = 0; msm_gem_assert_locked(obj); @@ -641,6 +705,10 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv) return ERR_PTR(-EBUSY); } + pages = msm_gem_pin_pages_locked(obj); + if (IS_ERR(pages)) + return ERR_CAST(pages); + /* increment vmap_count *before* vmap() call, so shrinker can * check vmap_count (is_vunmapable()) outside of msm_obj lock. * This guarantees that we won't try to msm_gem_vunmap() this @@ -650,25 +718,19 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv) msm_obj->vmap_count++; if (!msm_obj->vaddr) { - struct page **pages = get_pages(obj); - if (IS_ERR(pages)) { - ret = PTR_ERR(pages); - goto fail; - } msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL)); if (msm_obj->vaddr == NULL) { ret = -ENOMEM; goto fail; } - - update_lru(obj); } return msm_obj->vaddr; fail: msm_obj->vmap_count--; + msm_gem_unpin_locked(obj); return ERR_PTR(ret); } @@ -707,6 +769,7 @@ void msm_gem_put_vaddr_locked(struct drm_gem_object *obj) GEM_WARN_ON(msm_obj->vmap_count < 1); msm_obj->vmap_count--; + msm_gem_unpin_locked(obj); } void msm_gem_put_vaddr(struct drm_gem_object *obj) @@ -721,10 +784,13 @@ void msm_gem_put_vaddr(struct drm_gem_object *obj) */ int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv) { + struct msm_drm_private *priv = obj->dev->dev_private; struct msm_gem_object *msm_obj = to_msm_bo(obj); msm_gem_lock(obj); + mutex_lock(&priv->lru.lock); + if (msm_obj->madv != __MSM_MADV_PURGED) msm_obj->madv = madv; @@ -733,7 +799,9 @@ int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv) /* If the obj is inactive, we might need to move it * between inactive lists */ - update_lru(obj); + update_lru_locked(obj); + + mutex_unlock(&priv->lru.lock); msm_gem_unlock(obj); @@ -743,6 +811,7 @@ int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv) void msm_gem_purge(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; + struct msm_drm_private *priv = obj->dev->dev_private; struct msm_gem_object *msm_obj = to_msm_bo(obj); msm_gem_assert_locked(obj); @@ -759,7 +828,10 @@ void msm_gem_purge(struct drm_gem_object *obj) put_iova_vmas(obj); + mutex_lock(&priv->lru.lock); + /* A one-way transition: */ msm_obj->madv = __MSM_MADV_PURGED; + mutex_unlock(&priv->lru.lock); drm_gem_free_mmap_offset(obj); @@ -806,29 +878,6 @@ void msm_gem_vunmap(struct drm_gem_object *obj) msm_obj->vaddr = NULL; } -static void update_lru(struct drm_gem_object *obj) -{ - struct msm_drm_private *priv = obj->dev->dev_private; - struct msm_gem_object *msm_obj = to_msm_bo(obj); - - msm_gem_assert_locked(&msm_obj->base); - - if (!msm_obj->pages) { - GEM_WARN_ON(msm_obj->pin_count); - GEM_WARN_ON(msm_obj->vmap_count); - - drm_gem_lru_move_tail(&priv->lru.unbacked, obj); - } else if (msm_obj->pin_count || msm_obj->vmap_count) { - drm_gem_lru_move_tail(&priv->lru.pinned, obj); - } else if (msm_obj->madv == MSM_MADV_WILLNEED) { - drm_gem_lru_move_tail(&priv->lru.willneed, obj); - } else { - GEM_WARN_ON(msm_obj->madv != MSM_MADV_DONTNEED); - - drm_gem_lru_move_tail(&priv->lru.dontneed, obj); - } -} - bool msm_gem_active(struct drm_gem_object *obj) { msm_gem_assert_locked(obj); diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index c4844cf3a585..2bd6846c83a9 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -59,6 +59,7 @@ struct msm_fence_context; struct msm_gem_vma { struct drm_mm_node node; + spinlock_t lock; uint64_t iova; struct msm_gem_address_space *aspace; struct list_head list; /* node in msm_gem_object::vmas */ @@ -69,19 +70,15 @@ struct msm_gem_vma { struct msm_fence_context *fctx[MSM_GPU_MAX_RINGS]; }; -int msm_gem_init_vma(struct msm_gem_address_space *aspace, - struct msm_gem_vma *vma, int size, +struct msm_gem_vma *msm_gem_vma_new(struct msm_gem_address_space *aspace); +int msm_gem_vma_init(struct msm_gem_vma *vma, int size, u64 range_start, u64 range_end); bool msm_gem_vma_inuse(struct msm_gem_vma *vma); -void msm_gem_purge_vma(struct msm_gem_address_space *aspace, - struct msm_gem_vma *vma); -void msm_gem_unpin_vma(struct msm_gem_vma *vma); -void msm_gem_unpin_vma_fenced(struct msm_gem_vma *vma, struct msm_fence_context *fctx); -int msm_gem_map_vma(struct msm_gem_address_space *aspace, - struct msm_gem_vma *vma, int prot, - struct sg_table *sgt, int size); -void msm_gem_close_vma(struct msm_gem_address_space *aspace, - struct msm_gem_vma *vma); +void msm_gem_vma_purge(struct msm_gem_vma *vma); +void msm_gem_vma_unpin(struct msm_gem_vma *vma); +void msm_gem_vma_unpin_fenced(struct msm_gem_vma *vma, struct msm_fence_context *fctx); +int msm_gem_vma_map(struct msm_gem_vma *vma, int prot, struct sg_table *sgt, int size); +void msm_gem_vma_close(struct msm_gem_vma *vma); struct msm_gem_object { struct drm_gem_object base; @@ -89,7 +86,9 @@ struct msm_gem_object { uint32_t flags; /** - * Advice: are the backing pages purgeable? + * madv: are the backing pages purgeable? + * + * Protected by obj lock and LRU lock */ uint8_t madv; @@ -117,6 +116,11 @@ struct msm_gem_object { char name[32]; /* Identifier to print for the debugfs files */ + /** + * pin_count: Number of times the pages are pinned + * + * Protected by LRU lock. + */ int pin_count; }; #define to_msm_bo(x) container_of(x, struct msm_gem_object, base) @@ -124,6 +128,7 @@ struct msm_gem_object { uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj); int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma); void msm_gem_unpin_locked(struct drm_gem_object *obj); +void msm_gem_unpin_active(struct drm_gem_object *obj); struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj, struct msm_gem_address_space *aspace); int msm_gem_get_iova(struct drm_gem_object *obj, diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index ac8ed731f76d..89375c2e422b 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -41,8 +41,16 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev, if (!submit) return ERR_PTR(-ENOMEM); + submit->hw_fence = msm_fence_alloc(); + if (IS_ERR(submit->hw_fence)) { + ret = PTR_ERR(submit->hw_fence); + kfree(submit); + return ERR_PTR(ret); + } + ret = drm_sched_job_init(&submit->base, queue->entity, queue); if (ret) { + kfree(submit->hw_fence); kfree(submit); return ERR_PTR(ret); } @@ -72,9 +80,9 @@ void __msm_gem_submit_destroy(struct kref *kref) unsigned i; if (submit->fence_id) { - mutex_lock(&submit->queue->idr_lock); + spin_lock(&submit->queue->idr_lock); idr_remove(&submit->queue->fence_idr, submit->fence_id); - mutex_unlock(&submit->queue->idr_lock); + spin_unlock(&submit->queue->idr_lock); } dma_fence_put(submit->user_fence); @@ -242,7 +250,7 @@ static void submit_cleanup_bo(struct msm_gem_submit *submit, int i, submit->bos[i].flags &= ~cleanup_flags; if (flags & BO_VMA_PINNED) - msm_gem_unpin_vma(submit->bos[i].vma); + msm_gem_vma_unpin(submit->bos[i].vma); if (flags & BO_OBJ_PINNED) msm_gem_unpin_locked(obj); @@ -874,7 +882,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, submit->nr_cmds = i; - mutex_lock(&queue->idr_lock); + idr_preload(GFP_KERNEL); + + spin_lock(&queue->idr_lock); /* * If using userspace provided seqno fence, validate that the id @@ -884,7 +894,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, */ if ((args->flags & MSM_SUBMIT_FENCE_SN_IN) && idr_find(&queue->fence_idr, args->fence)) { - mutex_unlock(&queue->idr_lock); + spin_unlock(&queue->idr_lock); + idr_preload_end(); ret = -EINVAL; goto out; } @@ -902,7 +913,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, submit->fence_id = args->fence; ret = idr_alloc_u32(&queue->fence_idr, submit->user_fence, &submit->fence_id, submit->fence_id, - GFP_KERNEL); + GFP_NOWAIT); /* * We've already validated that the fence_id slot is valid, * so if idr_alloc_u32 failed, it is a kernel bug @@ -915,10 +926,11 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, */ submit->fence_id = idr_alloc_cyclic(&queue->fence_idr, submit->user_fence, 1, - INT_MAX, GFP_KERNEL); + INT_MAX, GFP_NOWAIT); } - mutex_unlock(&queue->idr_lock); + spin_unlock(&queue->idr_lock); + idr_preload_end(); if (submit->fence_id < 0) { ret = submit->fence_id; diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c index c471aebcdbab..98287ed99960 100644 --- a/drivers/gpu/drm/msm/msm_gem_vma.c +++ b/drivers/gpu/drm/msm/msm_gem_vma.c @@ -40,25 +40,34 @@ msm_gem_address_space_get(struct msm_gem_address_space *aspace) bool msm_gem_vma_inuse(struct msm_gem_vma *vma) { + bool ret = true; + + spin_lock(&vma->lock); + if (vma->inuse > 0) - return true; + goto out; while (vma->fence_mask) { unsigned idx = ffs(vma->fence_mask) - 1; if (!msm_fence_completed(vma->fctx[idx], vma->fence[idx])) - return true; + goto out; vma->fence_mask &= ~BIT(idx); } - return false; + ret = false; + +out: + spin_unlock(&vma->lock); + + return ret; } /* Actually unmap memory for the vma */ -void msm_gem_purge_vma(struct msm_gem_address_space *aspace, - struct msm_gem_vma *vma) +void msm_gem_vma_purge(struct msm_gem_vma *vma) { + struct msm_gem_address_space *aspace = vma->aspace; unsigned size = vma->node.size; /* Print a message if we try to purge a vma in use */ @@ -68,14 +77,12 @@ void msm_gem_purge_vma(struct msm_gem_address_space *aspace, if (!vma->mapped) return; - if (aspace->mmu) - aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size); + aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size); vma->mapped = false; } -/* Remove reference counts for the mapping */ -void msm_gem_unpin_vma(struct msm_gem_vma *vma) +static void vma_unpin_locked(struct msm_gem_vma *vma) { if (GEM_WARN_ON(!vma->inuse)) return; @@ -83,50 +90,75 @@ void msm_gem_unpin_vma(struct msm_gem_vma *vma) vma->inuse--; } +/* Remove reference counts for the mapping */ +void msm_gem_vma_unpin(struct msm_gem_vma *vma) +{ + spin_lock(&vma->lock); + vma_unpin_locked(vma); + spin_unlock(&vma->lock); +} + /* Replace pin reference with fence: */ -void msm_gem_unpin_vma_fenced(struct msm_gem_vma *vma, struct msm_fence_context *fctx) +void msm_gem_vma_unpin_fenced(struct msm_gem_vma *vma, struct msm_fence_context *fctx) { + spin_lock(&vma->lock); vma->fctx[fctx->index] = fctx; vma->fence[fctx->index] = fctx->last_fence; vma->fence_mask |= BIT(fctx->index); - msm_gem_unpin_vma(vma); + vma_unpin_locked(vma); + spin_unlock(&vma->lock); } /* Map and pin vma: */ int -msm_gem_map_vma(struct msm_gem_address_space *aspace, - struct msm_gem_vma *vma, int prot, +msm_gem_vma_map(struct msm_gem_vma *vma, int prot, struct sg_table *sgt, int size) { - int ret = 0; + struct msm_gem_address_space *aspace = vma->aspace; + int ret; if (GEM_WARN_ON(!vma->iova)) return -EINVAL; /* Increase the usage counter */ + spin_lock(&vma->lock); vma->inuse++; + spin_unlock(&vma->lock); if (vma->mapped) return 0; vma->mapped = true; - if (aspace && aspace->mmu) - ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt, - size, prot); + if (!aspace) + return 0; + + /* + * NOTE: iommu/io-pgtable can allocate pages, so we cannot hold + * a lock across map/unmap which is also used in the job_run() + * path, as this can cause deadlock in job_run() vs shrinker/ + * reclaim. + * + * Revisit this if we can come up with a scheme to pre-alloc pages + * for the pgtable in map/unmap ops. + */ + ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt, size, prot); if (ret) { vma->mapped = false; + spin_lock(&vma->lock); vma->inuse--; + spin_unlock(&vma->lock); } return ret; } /* Close an iova. Warn if it is still in use */ -void msm_gem_close_vma(struct msm_gem_address_space *aspace, - struct msm_gem_vma *vma) +void msm_gem_vma_close(struct msm_gem_vma *vma) { + struct msm_gem_address_space *aspace = vma->aspace; + GEM_WARN_ON(msm_gem_vma_inuse(vma) || vma->mapped); spin_lock(&aspace->lock); @@ -139,13 +171,30 @@ void msm_gem_close_vma(struct msm_gem_address_space *aspace, msm_gem_address_space_put(aspace); } +struct msm_gem_vma *msm_gem_vma_new(struct msm_gem_address_space *aspace) +{ + struct msm_gem_vma *vma; + + vma = kzalloc(sizeof(*vma), GFP_KERNEL); + if (!vma) + return NULL; + + spin_lock_init(&vma->lock); + vma->aspace = aspace; + + return vma; +} + /* Initialize a new vma and allocate an iova for it */ -int msm_gem_init_vma(struct msm_gem_address_space *aspace, - struct msm_gem_vma *vma, int size, +int msm_gem_vma_init(struct msm_gem_vma *vma, int size, u64 range_start, u64 range_end) { + struct msm_gem_address_space *aspace = vma->aspace; int ret; + if (GEM_WARN_ON(!aspace)) + return -EINVAL; + if (GEM_WARN_ON(vma->iova)) return -EBUSY; diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 380249500325..26ebda40be4f 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -16,7 +16,6 @@ #include <generated/utsrelease.h> #include <linux/string_helpers.h> #include <linux/devcoredump.h> -#include <linux/reset.h> #include <linux/sched/task.h> /* @@ -59,7 +58,7 @@ static int disable_pwrrail(struct msm_gpu *gpu) static int enable_clk(struct msm_gpu *gpu) { if (gpu->core_clk && gpu->fast_rate) - clk_set_rate(gpu->core_clk, gpu->fast_rate); + dev_pm_opp_set_rate(&gpu->pdev->dev, gpu->fast_rate); /* Set the RBBM timer rate to 19.2Mhz */ if (gpu->rbbmtimer_clk) @@ -78,7 +77,7 @@ static int disable_clk(struct msm_gpu *gpu) * will be rounded down to zero anyway so it all works out. */ if (gpu->core_clk) - clk_set_rate(gpu->core_clk, 27000000); + dev_pm_opp_set_rate(&gpu->pdev->dev, 27000000); if (gpu->rbbmtimer_clk) clk_set_rate(gpu->rbbmtimer_clk, 0); @@ -935,9 +934,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, if (IS_ERR(gpu->gpu_cx)) gpu->gpu_cx = NULL; - gpu->cx_collapse = devm_reset_control_get_optional_exclusive(&pdev->dev, - "cx_collapse"); - gpu->pdev = pdev; platform_set_drvdata(pdev, &gpu->adreno_smmu); diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index fc1c0d8611a8..7a4fa1b8655b 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -13,7 +13,6 @@ #include <linux/interconnect.h> #include <linux/pm_opp.h> #include <linux/regulator/consumer.h> -#include <linux/reset.h> #include "msm_drv.h" #include "msm_fence.h" @@ -50,6 +49,12 @@ struct msm_gpu_funcs { int (*set_param)(struct msm_gpu *gpu, struct msm_file_private *ctx, uint32_t param, uint64_t value, uint32_t len); int (*hw_init)(struct msm_gpu *gpu); + + /** + * @ucode_load: Optional hook to upload fw to GEM objs + */ + int (*ucode_load)(struct msm_gpu *gpu); + int (*pm_suspend)(struct msm_gpu *gpu); int (*pm_resume)(struct msm_gpu *gpu); void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit); @@ -281,9 +286,6 @@ struct msm_gpu { bool hw_apriv; struct thermal_cooling_device *cooling; - - /* To poll for cx gdsc collapse during gpu recovery */ - struct reset_control *cx_collapse; }; static inline struct msm_gpu *dev_to_gpu(struct device *dev) @@ -499,7 +501,7 @@ struct msm_gpu_submitqueue { struct msm_file_private *ctx; struct list_head node; struct idr fence_idr; - struct mutex idr_lock; + struct spinlock idr_lock; struct mutex lock; struct kref ref; struct drm_sched_entity *entity; diff --git a/drivers/gpu/drm/msm/msm_gpu_devfreq.c b/drivers/gpu/drm/msm/msm_gpu_devfreq.c index e27dbf12b5e8..ea70c1c32d94 100644 --- a/drivers/gpu/drm/msm/msm_gpu_devfreq.c +++ b/drivers/gpu/drm/msm/msm_gpu_devfreq.c @@ -48,7 +48,7 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq, gpu->funcs->gpu_set_freq(gpu, opp, df->suspended); mutex_unlock(&df->lock); } else { - clk_set_rate(gpu->core_clk, *freq); + dev_pm_opp_set_rate(dev, *freq); } dev_pm_opp_put(opp); diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c index 57a8e9564540..b60199184409 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.c +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c @@ -18,16 +18,14 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job) struct msm_gpu *gpu = submit->gpu; int i; - submit->hw_fence = msm_fence_alloc(fctx); + msm_fence_init(submit->hw_fence, fctx); for (i = 0; i < submit->nr_bos; i++) { struct drm_gem_object *obj = &submit->bos[i].obj->base; - msm_gem_lock(obj); - msm_gem_unpin_vma_fenced(submit->bos[i].vma, fctx); - msm_gem_unpin_locked(obj); + msm_gem_vma_unpin_fenced(submit->bos[i].vma, fctx); + msm_gem_unpin_active(obj); submit->bos[i].flags &= ~(BO_VMA_PINNED | BO_OBJ_PINNED); - msm_gem_unlock(obj); } /* TODO move submit path over to using a per-ring lock.. */ diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c index c6929e205b51..0e803125a325 100644 --- a/drivers/gpu/drm/msm/msm_submitqueue.c +++ b/drivers/gpu/drm/msm/msm_submitqueue.c @@ -200,7 +200,7 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx, *id = queue->id; idr_init(&queue->fence_idr); - mutex_init(&queue->idr_lock); + spin_lock_init(&queue->idr_lock); mutex_init(&queue->lock); list_add_tail(&queue->node, &ctx->submitqueues); diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 288eebc70a67..c2ec91cc845d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1015,9 +1015,6 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, if (ret) goto out_ntfy; - if (nvbo->bo.pin_count) - NV_WARN(drm, "Moving pinned object %p!\n", nvbo); - if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) { ret = nouveau_bo_vm_bind(bo, new_reg, &new_tile); if (ret) diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c index a7db7c31064b..e844be49e11e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c +++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c @@ -41,7 +41,7 @@ static ssize_t nouveau_hwmon_show_temp1_auto_point1_pwm(struct device *d, struct device_attribute *a, char *buf) { - return snprintf(buf, PAGE_SIZE, "%d\n", 100); + return sysfs_emit(buf, "%d\n", 100); } static SENSOR_DEVICE_ATTR(temp1_auto_point1_pwm, 0444, nouveau_hwmon_show_temp1_auto_point1_pwm, NULL, 0); @@ -54,8 +54,8 @@ nouveau_hwmon_temp1_auto_point1_temp(struct device *d, struct nouveau_drm *drm = nouveau_drm(dev); struct nvkm_therm *therm = nvxx_therm(&drm->client.device); - return snprintf(buf, PAGE_SIZE, "%d\n", - therm->attr_get(therm, NVKM_THERM_ATTR_THRS_FAN_BOOST) * 1000); + return sysfs_emit(buf, "%d\n", + therm->attr_get(therm, NVKM_THERM_ATTR_THRS_FAN_BOOST) * 1000); } static ssize_t nouveau_hwmon_set_temp1_auto_point1_temp(struct device *d, @@ -87,8 +87,8 @@ nouveau_hwmon_temp1_auto_point1_temp_hyst(struct device *d, struct nouveau_drm *drm = nouveau_drm(dev); struct nvkm_therm *therm = nvxx_therm(&drm->client.device); - return snprintf(buf, PAGE_SIZE, "%d\n", - therm->attr_get(therm, NVKM_THERM_ATTR_THRS_FAN_BOOST_HYST) * 1000); + return sysfs_emit(buf, "%d\n", + therm->attr_get(therm, NVKM_THERM_ATTR_THRS_FAN_BOOST_HYST) * 1000); } static ssize_t nouveau_hwmon_set_temp1_auto_point1_temp_hyst(struct device *d, diff --git a/drivers/gpu/drm/nouveau/nouveau_led.h b/drivers/gpu/drm/nouveau/nouveau_led.h index 21a5775028cc..bc9bc7208da3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_led.h +++ b/drivers/gpu/drm/nouveau/nouveau_led.h @@ -27,7 +27,7 @@ #include "nouveau_drv.h" -struct led_classdev; +#include <linux/leds.h> struct nouveau_led { struct drm_device *dev; diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig index 8eeee71c0000..29cf5fa39ff2 100644 --- a/drivers/gpu/drm/panel/Kconfig +++ b/drivers/gpu/drm/panel/Kconfig @@ -318,6 +318,17 @@ config DRM_PANEL_LG_LG4573 Say Y here if you want to enable support for LG4573 RGB panel. To compile this driver as a module, choose M here. +config DRM_PANEL_MAGNACHIP_D53E6EA8966 + tristate "Magnachip D53E6EA8966 DSI panel" + depends on OF && SPI + depends on DRM_MIPI_DSI + depends on BACKLIGHT_CLASS_DEVICE + select DRM_MIPI_DBI + help + DRM panel driver for the Samsung AMS495QA01 panel controlled + with the Magnachip D53E6EA8966 panel IC. This panel receives + video data via DSI but commands via 9-bit SPI using DBI. + config DRM_PANEL_NEC_NL8048HL11 tristate "NEC NL8048HL11 RGB panel" depends on GPIOLIB && OF && SPI diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile index c05aa9e23907..b3e8ba29edd3 100644 --- a/drivers/gpu/drm/panel/Makefile +++ b/drivers/gpu/drm/panel/Makefile @@ -29,6 +29,7 @@ obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK050H3146W) += panel-leadtek-ltk050h3146w.o obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK500HD1829) += panel-leadtek-ltk500hd1829.o obj-$(CONFIG_DRM_PANEL_LG_LB035Q02) += panel-lg-lb035q02.o obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o +obj-$(CONFIG_DRM_PANEL_MAGNACHIP_D53E6EA8966) += panel-magnachip-d53e6ea8966.o obj-$(CONFIG_DRM_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o obj-$(CONFIG_DRM_PANEL_NEWVISION_NV3051D) += panel-newvision-nv3051d.o obj-$(CONFIG_DRM_PANEL_NEWVISION_NV3052C) += panel-newvision-nv3052c.o diff --git a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c index 48c1702a863b..323c33c9c37a 100644 --- a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c +++ b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c @@ -167,6 +167,202 @@ static const struct drm_panel_funcs jadard_funcs = { .get_modes = jadard_get_modes, }; +static const struct jadard_init_cmd radxa_display_8hd_ad002_init_cmds[] = { + { .data = { 0xE0, 0x00 } }, + { .data = { 0xE1, 0x93 } }, + { .data = { 0xE2, 0x65 } }, + { .data = { 0xE3, 0xF8 } }, + { .data = { 0x80, 0x03 } }, + { .data = { 0xE0, 0x01 } }, + { .data = { 0x00, 0x00 } }, + { .data = { 0x01, 0x7E } }, + { .data = { 0x03, 0x00 } }, + { .data = { 0x04, 0x65 } }, + { .data = { 0x0C, 0x74 } }, + { .data = { 0x17, 0x00 } }, + { .data = { 0x18, 0xB7 } }, + { .data = { 0x19, 0x00 } }, + { .data = { 0x1A, 0x00 } }, + { .data = { 0x1B, 0xB7 } }, + { .data = { 0x1C, 0x00 } }, + { .data = { 0x24, 0xFE } }, + { .data = { 0x37, 0x19 } }, + { .data = { 0x38, 0x05 } }, + { .data = { 0x39, 0x00 } }, + { .data = { 0x3A, 0x01 } }, + { .data = { 0x3B, 0x01 } }, + { .data = { 0x3C, 0x70 } }, + { .data = { 0x3D, 0xFF } }, + { .data = { 0x3E, 0xFF } }, + { .data = { 0x3F, 0xFF } }, + { .data = { 0x40, 0x06 } }, + { .data = { 0x41, 0xA0 } }, + { .data = { 0x43, 0x1E } }, + { .data = { 0x44, 0x0F } }, + { .data = { 0x45, 0x28 } }, + { .data = { 0x4B, 0x04 } }, + { .data = { 0x55, 0x02 } }, + { .data = { 0x56, 0x01 } }, + { .data = { 0x57, 0xA9 } }, + { .data = { 0x58, 0x0A } }, + { .data = { 0x59, 0x0A } }, + { .data = { 0x5A, 0x37 } }, + { .data = { 0x5B, 0x19 } }, + { .data = { 0x5D, 0x78 } }, + { .data = { 0x5E, 0x63 } }, + { .data = { 0x5F, 0x54 } }, + { .data = { 0x60, 0x49 } }, + { .data = { 0x61, 0x45 } }, + { .data = { 0x62, 0x38 } }, + { .data = { 0x63, 0x3D } }, + { .data = { 0x64, 0x28 } }, + { .data = { 0x65, 0x43 } }, + { .data = { 0x66, 0x41 } }, + { .data = { 0x67, 0x43 } }, + { .data = { 0x68, 0x62 } }, + { .data = { 0x69, 0x50 } }, + { .data = { 0x6A, 0x57 } }, + { .data = { 0x6B, 0x49 } }, + { .data = { 0x6C, 0x44 } }, + { .data = { 0x6D, 0x37 } }, + { .data = { 0x6E, 0x23 } }, + { .data = { 0x6F, 0x10 } }, + { .data = { 0x70, 0x78 } }, + { .data = { 0x71, 0x63 } }, + { .data = { 0x72, 0x54 } }, + { .data = { 0x73, 0x49 } }, + { .data = { 0x74, 0x45 } }, + { .data = { 0x75, 0x38 } }, + { .data = { 0x76, 0x3D } }, + { .data = { 0x77, 0x28 } }, + { .data = { 0x78, 0x43 } }, + { .data = { 0x79, 0x41 } }, + { .data = { 0x7A, 0x43 } }, + { .data = { 0x7B, 0x62 } }, + { .data = { 0x7C, 0x50 } }, + { .data = { 0x7D, 0x57 } }, + { .data = { 0x7E, 0x49 } }, + { .data = { 0x7F, 0x44 } }, + { .data = { 0x80, 0x37 } }, + { .data = { 0x81, 0x23 } }, + { .data = { 0x82, 0x10 } }, + { .data = { 0xE0, 0x02 } }, + { .data = { 0x00, 0x47 } }, + { .data = { 0x01, 0x47 } }, + { .data = { 0x02, 0x45 } }, + { .data = { 0x03, 0x45 } }, + { .data = { 0x04, 0x4B } }, + { .data = { 0x05, 0x4B } }, + { .data = { 0x06, 0x49 } }, + { .data = { 0x07, 0x49 } }, + { .data = { 0x08, 0x41 } }, + { .data = { 0x09, 0x1F } }, + { .data = { 0x0A, 0x1F } }, + { .data = { 0x0B, 0x1F } }, + { .data = { 0x0C, 0x1F } }, + { .data = { 0x0D, 0x1F } }, + { .data = { 0x0E, 0x1F } }, + { .data = { 0x0F, 0x5F } }, + { .data = { 0x10, 0x5F } }, + { .data = { 0x11, 0x57 } }, + { .data = { 0x12, 0x77 } }, + { .data = { 0x13, 0x35 } }, + { .data = { 0x14, 0x1F } }, + { .data = { 0x15, 0x1F } }, + { .data = { 0x16, 0x46 } }, + { .data = { 0x17, 0x46 } }, + { .data = { 0x18, 0x44 } }, + { .data = { 0x19, 0x44 } }, + { .data = { 0x1A, 0x4A } }, + { .data = { 0x1B, 0x4A } }, + { .data = { 0x1C, 0x48 } }, + { .data = { 0x1D, 0x48 } }, + { .data = { 0x1E, 0x40 } }, + { .data = { 0x1F, 0x1F } }, + { .data = { 0x20, 0x1F } }, + { .data = { 0x21, 0x1F } }, + { .data = { 0x22, 0x1F } }, + { .data = { 0x23, 0x1F } }, + { .data = { 0x24, 0x1F } }, + { .data = { 0x25, 0x5F } }, + { .data = { 0x26, 0x5F } }, + { .data = { 0x27, 0x57 } }, + { .data = { 0x28, 0x77 } }, + { .data = { 0x29, 0x35 } }, + { .data = { 0x2A, 0x1F } }, + { .data = { 0x2B, 0x1F } }, + { .data = { 0x58, 0x40 } }, + { .data = { 0x59, 0x00 } }, + { .data = { 0x5A, 0x00 } }, + { .data = { 0x5B, 0x10 } }, + { .data = { 0x5C, 0x06 } }, + { .data = { 0x5D, 0x40 } }, + { .data = { 0x5E, 0x01 } }, + { .data = { 0x5F, 0x02 } }, + { .data = { 0x60, 0x30 } }, + { .data = { 0x61, 0x01 } }, + { .data = { 0x62, 0x02 } }, + { .data = { 0x63, 0x03 } }, + { .data = { 0x64, 0x6B } }, + { .data = { 0x65, 0x05 } }, + { .data = { 0x66, 0x0C } }, + { .data = { 0x67, 0x73 } }, + { .data = { 0x68, 0x09 } }, + { .data = { 0x69, 0x03 } }, + { .data = { 0x6A, 0x56 } }, + { .data = { 0x6B, 0x08 } }, + { .data = { 0x6C, 0x00 } }, + { .data = { 0x6D, 0x04 } }, + { .data = { 0x6E, 0x04 } }, + { .data = { 0x6F, 0x88 } }, + { .data = { 0x70, 0x00 } }, + { .data = { 0x71, 0x00 } }, + { .data = { 0x72, 0x06 } }, + { .data = { 0x73, 0x7B } }, + { .data = { 0x74, 0x00 } }, + { .data = { 0x75, 0xF8 } }, + { .data = { 0x76, 0x00 } }, + { .data = { 0x77, 0xD5 } }, + { .data = { 0x78, 0x2E } }, + { .data = { 0x79, 0x12 } }, + { .data = { 0x7A, 0x03 } }, + { .data = { 0x7B, 0x00 } }, + { .data = { 0x7C, 0x00 } }, + { .data = { 0x7D, 0x03 } }, + { .data = { 0x7E, 0x7B } }, + { .data = { 0xE0, 0x04 } }, + { .data = { 0x00, 0x0E } }, + { .data = { 0x02, 0xB3 } }, + { .data = { 0x09, 0x60 } }, + { .data = { 0x0E, 0x2A } }, + { .data = { 0x36, 0x59 } }, + { .data = { 0xE0, 0x00 } }, +}; + +static const struct jadard_panel_desc radxa_display_8hd_ad002_desc = { + .mode = { + .clock = 70000, + + .hdisplay = 800, + .hsync_start = 800 + 40, + .hsync_end = 800 + 40 + 18, + .htotal = 800 + 40 + 18 + 20, + + .vdisplay = 1280, + .vsync_start = 1280 + 20, + .vsync_end = 1280 + 20 + 4, + .vtotal = 1280 + 20 + 4 + 20, + + .width_mm = 127, + .height_mm = 199, + .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, + }, + .lanes = 4, + .format = MIPI_DSI_FMT_RGB888, + .init_cmds = radxa_display_8hd_ad002_init_cmds, + .num_init_cmds = ARRAY_SIZE(radxa_display_8hd_ad002_init_cmds), +}; + static const struct jadard_init_cmd cz101b4001_init_cmds[] = { { .data = { 0xE0, 0x00 } }, { .data = { 0xE1, 0x93 } }, @@ -452,7 +648,18 @@ static void jadard_dsi_remove(struct mipi_dsi_device *dsi) } static const struct of_device_id jadard_of_match[] = { - { .compatible = "chongzhou,cz101b4001", .data = &cz101b4001_desc }, + { + .compatible = "chongzhou,cz101b4001", + .data = &cz101b4001_desc + }, + { + .compatible = "radxa,display-10hd-ad001", + .data = &cz101b4001_desc + }, + { + .compatible = "radxa,display-8hd-ad002", + .data = &radxa_display_8hd_ad002_desc + }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, jadard_of_match); diff --git a/drivers/gpu/drm/panel/panel-magnachip-d53e6ea8966.c b/drivers/gpu/drm/panel/panel-magnachip-d53e6ea8966.c new file mode 100644 index 000000000000..8c362c40227f --- /dev/null +++ b/drivers/gpu/drm/panel/panel-magnachip-d53e6ea8966.c @@ -0,0 +1,522 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Magnachip d53e6ea8966 MIPI-DSI panel driver + * Copyright (C) 2023 Chris Morgan + */ + +#include <drm/drm_mipi_dbi.h> +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_modes.h> +#include <drm/drm_of.h> +#include <drm/drm_panel.h> + +#include <linux/backlight.h> +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/media-bus-format.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/regulator/consumer.h> +#include <linux/spi/spi.h> + +#include <video/mipi_display.h> + +/* Forward declaration for use in backlight function */ +struct d53e6ea8966; + +/* Panel info, unique to each panel */ +struct d53e6ea8966_panel_info { + /** @display_modes: the supported display modes */ + const struct drm_display_mode *display_modes; + /** @num_modes: the number of supported display modes */ + unsigned int num_modes; + /** @width_mm: panel width in mm */ + u16 width_mm; + /** @height_mm: panel height in mm */ + u16 height_mm; + /** @bus_flags: drm bus flags for panel */ + u32 bus_flags; + /** @panel_init_seq: panel specific init sequence */ + void (*panel_init_seq)(struct d53e6ea8966 *db); + /** @backlight_register: panel backlight registration or NULL */ + int (*backlight_register)(struct d53e6ea8966 *db); +}; + +struct d53e6ea8966 { + /** @dev: the container device */ + struct device *dev; + /** @dbi: the DBI bus abstraction handle */ + struct mipi_dbi dbi; + /** @panel: the DRM panel instance for this device */ + struct drm_panel panel; + /** @reset: reset GPIO line */ + struct gpio_desc *reset; + /** @enable: enable GPIO line */ + struct gpio_desc *enable; + /** @reg_vdd: VDD supply regulator for panel logic */ + struct regulator *reg_vdd; + /** @reg_elvdd: ELVDD supply regulator for panel display */ + struct regulator *reg_elvdd; + /** @dsi_dev: DSI child device (panel) */ + struct mipi_dsi_device *dsi_dev; + /** @bl_dev: pseudo-backlight device for oled panel */ + struct backlight_device *bl_dev; + /** @panel_info: struct containing panel timing and info */ + const struct d53e6ea8966_panel_info *panel_info; +}; + +#define NUM_GAMMA_LEVELS 16 +#define GAMMA_TABLE_COUNT 23 +#define MAX_BRIGHTNESS (NUM_GAMMA_LEVELS - 1) + +#define MCS_ELVSS_ON 0xb1 +#define MCS_TEMP_SWIRE 0xb2 +#define MCS_PASSWORD_0 0xf0 +#define MCS_PASSWORD_1 0xf1 +#define MCS_ANALOG_PWR_CTL_0 0xf4 +#define MCS_ANALOG_PWR_CTL_1 0xf5 +#define MCS_GTCON_SET 0xf7 +#define MCS_GATELESS_SIGNAL_SET 0xf8 +#define MCS_SET_GAMMA 0xf9 + +static inline struct d53e6ea8966 *to_d53e6ea8966(struct drm_panel *panel) +{ + return container_of(panel, struct d53e6ea8966, panel); +} + +/* Table of gamma values provided in datasheet */ +static u8 ams495qa01_gamma[NUM_GAMMA_LEVELS][GAMMA_TABLE_COUNT] = { + {0x01, 0x79, 0x78, 0x8d, 0xd9, 0xdf, 0xd5, 0xcb, 0xcf, 0xc5, + 0xe5, 0xe0, 0xe4, 0xdc, 0xb8, 0xd4, 0xfa, 0xed, 0xe6, 0x2f, + 0x00, 0x2f}, + {0x01, 0x7d, 0x7c, 0x92, 0xd7, 0xdd, 0xd2, 0xcb, 0xd0, 0xc6, + 0xe5, 0xe1, 0xe3, 0xda, 0xbd, 0xd3, 0xfa, 0xed, 0xe6, 0x2f, + 0x00, 0x2f}, + {0x01, 0x7f, 0x7e, 0x95, 0xd7, 0xde, 0xd2, 0xcb, 0xcf, 0xc5, + 0xe5, 0xe3, 0xe3, 0xda, 0xbf, 0xd3, 0xfa, 0xed, 0xe6, 0x2f, + 0x00, 0x2f}, + {0x01, 0x82, 0x81, 0x99, 0xd6, 0xdd, 0xd1, 0xca, 0xcf, 0xc3, + 0xe4, 0xe3, 0xe3, 0xda, 0xc2, 0xd3, 0xfa, 0xed, 0xe6, 0x2f, + 0x00, 0x2f}, + {0x01, 0x84, 0x83, 0x9b, 0xd7, 0xde, 0xd2, 0xc8, 0xce, 0xc2, + 0xe4, 0xe3, 0xe2, 0xd9, 0xc3, 0xd3, 0xfa, 0xed, 0xe6, 0x2f, + 0x00, 0x2f}, + {0x01, 0x87, 0x86, 0x9f, 0xd6, 0xdd, 0xd1, 0xc7, 0xce, 0xc1, + 0xe4, 0xe3, 0xe2, 0xd9, 0xc6, 0xd3, 0xfa, 0xed, 0xe6, 0x2f, + 0x00, 0x2f}, + {0x01, 0x89, 0x89, 0xa2, 0xd5, 0xdb, 0xcf, 0xc8, 0xcf, 0xc2, + 0xe3, 0xe3, 0xe1, 0xd9, 0xc7, 0xd3, 0xfa, 0xed, 0xe6, 0x2f, + 0x00, 0x2f}, + {0x01, 0x8b, 0x8b, 0xa5, 0xd5, 0xdb, 0xcf, 0xc7, 0xce, 0xc0, + 0xe3, 0xe3, 0xe1, 0xd8, 0xc7, 0xd3, 0xfa, 0xed, 0xe6, 0x2f, + 0x00, 0x2f}, + {0x01, 0x8d, 0x8d, 0xa7, 0xd5, 0xdb, 0xcf, 0xc6, 0xce, 0xc0, + 0xe4, 0xe4, 0xe1, 0xd7, 0xc8, 0xd3, 0xfa, 0xed, 0xe6, 0x2f, + 0x00, 0x2f}, + {0x01, 0x8f, 0x8f, 0xaa, 0xd4, 0xdb, 0xce, 0xc6, 0xcd, 0xbf, + 0xe3, 0xe3, 0xe1, 0xd7, 0xca, 0xd3, 0xfa, 0xed, 0xe6, 0x2f, + 0x00, 0x2f}, + {0x01, 0x91, 0x91, 0xac, 0xd3, 0xda, 0xce, 0xc5, 0xcd, 0xbe, + 0xe3, 0xe3, 0xe0, 0xd7, 0xca, 0xd3, 0xfa, 0xed, 0xe6, 0x2f, + 0x00, 0x2f}, + {0x01, 0x93, 0x93, 0xaf, 0xd3, 0xda, 0xcd, 0xc5, 0xcd, 0xbe, + 0xe2, 0xe3, 0xdf, 0xd6, 0xca, 0xd3, 0xfa, 0xed, 0xe6, 0x2f, + 0x00, 0x2f}, + {0x01, 0x95, 0x95, 0xb1, 0xd2, 0xd9, 0xcc, 0xc4, 0xcd, 0xbe, + 0xe2, 0xe3, 0xdf, 0xd7, 0xcc, 0xd3, 0xfa, 0xed, 0xe6, 0x2f, + 0x00, 0x2f}, + {0x01, 0x99, 0x99, 0xb6, 0xd1, 0xd9, 0xcc, 0xc3, 0xcb, 0xbc, + 0xe2, 0xe4, 0xdf, 0xd6, 0xcc, 0xd3, 0xfa, 0xed, 0xe6, 0x2f, + 0x00, 0x2f}, + {0x01, 0x9c, 0x9c, 0xba, 0xd0, 0xd8, 0xcb, 0xc3, 0xcb, 0xbb, + 0xe2, 0xe4, 0xdf, 0xd6, 0xce, 0xd3, 0xfa, 0xed, 0xe6, 0x2f, + 0x00, 0x2f}, + {0x01, 0x9f, 0x9f, 0xbe, 0xcf, 0xd7, 0xc9, 0xc2, 0xcb, 0xbb, + 0xe1, 0xe3, 0xde, 0xd6, 0xd0, 0xd3, 0xfa, 0xed, 0xe6, 0x2f, + 0x00, 0x2f}, +}; + +/* + * Table of elvss values provided in datasheet and corresponds to + * gamma values. + */ +static u8 ams495qa01_elvss[NUM_GAMMA_LEVELS] = { + 0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x15, + 0x15, 0x15, 0x14, 0x14, 0x13, 0x12, +}; + +static int ams495qa01_update_gamma(struct mipi_dbi *dbi, int brightness) +{ + int tmp = brightness; + + mipi_dbi_command_buf(dbi, MCS_SET_GAMMA, ams495qa01_gamma[tmp], + ARRAY_SIZE(ams495qa01_gamma[tmp])); + mipi_dbi_command(dbi, MCS_SET_GAMMA, 0x00); + + /* Undocumented command */ + mipi_dbi_command(dbi, 0x26, 0x00); + + mipi_dbi_command(dbi, MCS_TEMP_SWIRE, ams495qa01_elvss[tmp]); + + return 0; +} + +static void ams495qa01_panel_init(struct d53e6ea8966 *db) +{ + struct mipi_dbi *dbi = &db->dbi; + + mipi_dbi_command(dbi, MCS_PASSWORD_0, 0x5a, 0x5a); + mipi_dbi_command(dbi, MCS_PASSWORD_1, 0x5a, 0x5a); + + /* Undocumented commands */ + mipi_dbi_command(dbi, 0xb0, 0x02); + mipi_dbi_command(dbi, 0xf3, 0x3b); + + mipi_dbi_command(dbi, MCS_ANALOG_PWR_CTL_0, 0x33, 0x42, 0x00, 0x08); + mipi_dbi_command(dbi, MCS_ANALOG_PWR_CTL_1, 0x00, 0x06, 0x26, 0x35, 0x03); + + /* Undocumented commands */ + mipi_dbi_command(dbi, 0xf6, 0x02); + mipi_dbi_command(dbi, 0xc6, 0x0b, 0x00, 0x00, 0x3c, 0x00, 0x22, + 0x00, 0x00, 0x00, 0x00); + + mipi_dbi_command(dbi, MCS_GTCON_SET, 0x20); + mipi_dbi_command(dbi, MCS_TEMP_SWIRE, 0x06, 0x06, 0x06, 0x06); + mipi_dbi_command(dbi, MCS_ELVSS_ON, 0x07, 0x00, 0x10); + mipi_dbi_command(dbi, MCS_GATELESS_SIGNAL_SET, 0x7f, 0x7a, + 0x89, 0x67, 0x26, 0x38, 0x00, 0x00, 0x09, + 0x67, 0x70, 0x88, 0x7a, 0x76, 0x05, 0x09, + 0x23, 0x23, 0x23); + + /* Undocumented commands */ + mipi_dbi_command(dbi, 0xb5, 0xff, 0xef, 0x35, 0x42, 0x0d, 0xd7, + 0xff, 0x07, 0xff, 0xff, 0xfd, 0x00, 0x01, + 0xff, 0x05, 0x12, 0x0f, 0xff, 0xff, 0xff, + 0xff); + mipi_dbi_command(dbi, 0xb4, 0x15); + mipi_dbi_command(dbi, 0xb3, 0x00); + + ams495qa01_update_gamma(dbi, MAX_BRIGHTNESS); +} + +static int d53e6ea8966_prepare(struct drm_panel *panel) +{ + struct d53e6ea8966 *db = to_d53e6ea8966(panel); + int ret; + + /* Power up */ + ret = regulator_enable(db->reg_vdd); + if (ret) { + dev_err(db->dev, "failed to enable vdd regulator: %d\n", ret); + return ret; + } + + if (db->reg_elvdd) { + ret = regulator_enable(db->reg_elvdd); + if (ret) { + dev_err(db->dev, + "failed to enable elvdd regulator: %d\n", ret); + regulator_disable(db->reg_vdd); + return ret; + } + } + + /* Enable */ + if (db->enable) + gpiod_set_value_cansleep(db->enable, 1); + + msleep(50); + + /* Reset */ + gpiod_set_value_cansleep(db->reset, 1); + usleep_range(1000, 5000); + gpiod_set_value_cansleep(db->reset, 0); + msleep(20); + + db->panel_info->panel_init_seq(db); + + return 0; +} + +static int d53e6ea8966_enable(struct drm_panel *panel) +{ + struct d53e6ea8966 *db = to_d53e6ea8966(panel); + struct mipi_dbi *dbi = &db->dbi; + + mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE); + msleep(200); + mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON); + usleep_range(10000, 15000); + + return 0; +} + +static int d53e6ea8966_disable(struct drm_panel *panel) +{ + struct d53e6ea8966 *db = to_d53e6ea8966(panel); + struct mipi_dbi *dbi = &db->dbi; + + mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_OFF); + msleep(20); + mipi_dbi_command(dbi, MIPI_DCS_ENTER_SLEEP_MODE); + msleep(100); + + return 0; +} + +static int d53e6ea8966_unprepare(struct drm_panel *panel) +{ + struct d53e6ea8966 *db = to_d53e6ea8966(panel); + + if (db->enable) + gpiod_set_value_cansleep(db->enable, 0); + + gpiod_set_value_cansleep(db->reset, 1); + + if (db->reg_elvdd) + regulator_disable(db->reg_elvdd); + + regulator_disable(db->reg_vdd); + msleep(100); + + return 0; +} + +static int d53e6ea8966_get_modes(struct drm_panel *panel, + struct drm_connector *connector) +{ + struct d53e6ea8966 *db = to_d53e6ea8966(panel); + const struct d53e6ea8966_panel_info *panel_info = db->panel_info; + struct drm_display_mode *mode; + static const u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24; + unsigned int i; + + for (i = 0; i < panel_info->num_modes; i++) { + mode = drm_mode_duplicate(connector->dev, + &panel_info->display_modes[i]); + if (!mode) + return -ENOMEM; + + drm_mode_set_name(mode); + drm_mode_probed_add(connector, mode); + } + + connector->display_info.bpc = 8; + connector->display_info.width_mm = panel_info->width_mm; + connector->display_info.height_mm = panel_info->height_mm; + connector->display_info.bus_flags = panel_info->bus_flags; + + drm_display_info_set_bus_formats(&connector->display_info, + &bus_format, 1); + + return 1; +} + +static const struct drm_panel_funcs d53e6ea8966_panel_funcs = { + .disable = d53e6ea8966_disable, + .enable = d53e6ea8966_enable, + .get_modes = d53e6ea8966_get_modes, + .prepare = d53e6ea8966_prepare, + .unprepare = d53e6ea8966_unprepare, +}; + +static int ams495qa01_set_brightness(struct backlight_device *bd) +{ + struct d53e6ea8966 *db = bl_get_data(bd); + struct mipi_dbi *dbi = &db->dbi; + int brightness = backlight_get_brightness(bd); + + ams495qa01_update_gamma(dbi, brightness); + + return 0; +} + +static const struct backlight_ops ams495qa01_backlight_ops = { + .update_status = ams495qa01_set_brightness, +}; + +static int ams495qa01_backlight_register(struct d53e6ea8966 *db) +{ + struct backlight_properties props = { + .type = BACKLIGHT_RAW, + .brightness = MAX_BRIGHTNESS, + .max_brightness = MAX_BRIGHTNESS, + }; + struct device *dev = db->dev; + int ret = 0; + + db->bl_dev = devm_backlight_device_register(dev, "panel", dev, db, + &ams495qa01_backlight_ops, + &props); + if (IS_ERR(db->bl_dev)) { + ret = PTR_ERR(db->bl_dev); + dev_err(dev, "error registering backlight device (%d)\n", ret); + } + + return ret; +} + +static int d53e6ea8966_probe(struct spi_device *spi) +{ + struct device *dev = &spi->dev; + struct mipi_dsi_host *dsi_host; + struct d53e6ea8966 *db; + int ret; + struct mipi_dsi_device_info info = { + .type = "d53e6ea8966", + .channel = 0, + .node = NULL, + }; + + db = devm_kzalloc(dev, sizeof(*db), GFP_KERNEL); + if (!db) + return -ENOMEM; + + spi_set_drvdata(spi, db); + + db->dev = dev; + + db->panel_info = of_device_get_match_data(dev); + if (!db->panel_info) + return -EINVAL; + + db->reg_vdd = devm_regulator_get(dev, "vdd"); + if (IS_ERR(db->reg_vdd)) + return dev_err_probe(dev, PTR_ERR(db->reg_vdd), + "Failed to get vdd supply\n"); + + db->reg_elvdd = devm_regulator_get_optional(dev, "elvdd"); + if (IS_ERR(db->reg_elvdd)) + db->reg_elvdd = NULL; + + db->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(db->reset)) { + ret = PTR_ERR(db->reset); + return dev_err_probe(dev, ret, "no RESET GPIO\n"); + } + + db->enable = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW); + if (IS_ERR(db->enable)) { + ret = PTR_ERR(db->enable); + return dev_err_probe(dev, ret, "cannot get ENABLE GPIO\n"); + } + + ret = mipi_dbi_spi_init(spi, &db->dbi, NULL); + if (ret) + return dev_err_probe(dev, ret, "MIPI DBI init failed\n"); + + dsi_host = drm_of_get_dsi_bus(dev); + if (IS_ERR(dsi_host)) { + ret = PTR_ERR(dsi_host); + return dev_err_probe(dev, ret, "Error attaching DSI bus\n"); + } + + db->dsi_dev = devm_mipi_dsi_device_register_full(dev, dsi_host, &info); + if (IS_ERR(db->dsi_dev)) { + dev_err(dev, "failed to register dsi device: %ld\n", + PTR_ERR(db->dsi_dev)); + ret = PTR_ERR(db->dsi_dev); + } + + db->dsi_dev->lanes = 2; + db->dsi_dev->format = MIPI_DSI_FMT_RGB888; + db->dsi_dev->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | + MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET; + + drm_panel_init(&db->panel, dev, &d53e6ea8966_panel_funcs, + DRM_MODE_CONNECTOR_DSI); + + if (db->panel_info->backlight_register) { + ret = db->panel_info->backlight_register(db); + if (ret < 0) + return ret; + db->panel.backlight = db->bl_dev; + } + + drm_panel_add(&db->panel); + + ret = devm_mipi_dsi_attach(dev, db->dsi_dev); + if (ret < 0) { + dev_err(dev, "mipi_dsi_attach failed: %d\n", ret); + drm_panel_remove(&db->panel); + return ret; + } + + return 0; +} + +static void d53e6ea8966_remove(struct spi_device *spi) +{ + struct d53e6ea8966 *db = spi_get_drvdata(spi); + + drm_panel_remove(&db->panel); +} + +static const struct drm_display_mode ams495qa01_modes[] = { + { /* 60hz */ + .clock = 33500, + .hdisplay = 960, + .hsync_start = 960 + 10, + .hsync_end = 960 + 10 + 2, + .htotal = 960 + 10 + 2 + 10, + .vdisplay = 544, + .vsync_start = 544 + 10, + .vsync_end = 544 + 10 + 2, + .vtotal = 544 + 10 + 2 + 10, + .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, + .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, + }, + { /* 50hz */ + .clock = 27800, + .hdisplay = 960, + .hsync_start = 960 + 10, + .hsync_end = 960 + 10 + 2, + .htotal = 960 + 10 + 2 + 10, + .vdisplay = 544, + .vsync_start = 544 + 10, + .vsync_end = 544 + 10 + 2, + .vtotal = 544 + 10 + 2 + 10, + .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, + .type = DRM_MODE_TYPE_DRIVER, + }, +}; + +static const struct d53e6ea8966_panel_info ams495qa01_info = { + .display_modes = ams495qa01_modes, + .num_modes = ARRAY_SIZE(ams495qa01_modes), + .width_mm = 117, + .height_mm = 74, + .bus_flags = DRM_BUS_FLAG_DE_LOW | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE, + .panel_init_seq = ams495qa01_panel_init, + .backlight_register = ams495qa01_backlight_register, +}; + +static const struct of_device_id d53e6ea8966_match[] = { + { .compatible = "samsung,ams495qa01", .data = &ams495qa01_info }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, d53e6ea8966_match); + +static const struct spi_device_id d53e6ea8966_ids[] = { + { "ams495qa01", 0 }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(spi, d53e6ea8966_ids); + +static struct spi_driver d53e6ea8966_driver = { + .driver = { + .name = "d53e6ea8966-panel", + .of_match_table = d53e6ea8966_match, + }, + .id_table = d53e6ea8966_ids, + .probe = d53e6ea8966_probe, + .remove = d53e6ea8966_remove, +}; +module_spi_driver(d53e6ea8966_driver); + +MODULE_AUTHOR("Chris Morgan <[email protected]>"); +MODULE_DESCRIPTION("Magnachip d53e6ea8966 panel driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index abb0dadd8f63..f49096f53141 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -220,15 +220,8 @@ panfrost_copy_in_sync(struct drm_device *dev, } for (i = 0; i < in_fence_count; i++) { - struct dma_fence *fence; - - ret = drm_syncobj_find_fence(file_priv, handles[i], 0, 0, - &fence); - if (ret) - goto fail; - - ret = drm_sched_job_add_dependency(&job->base, fence); - + ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv, + handles[i], 0); if (ret) goto fail; } diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index a92a5b0d4c25..1a82629bce3f 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -143,6 +143,17 @@ static int qxl_bo_move(struct ttm_buffer_object *bo, bool evict, struct ttm_resource *old_mem = bo->resource; int ret; + if (!old_mem) { + if (new_mem->mem_type != TTM_PL_SYSTEM) { + hop->mem_type = TTM_PL_SYSTEM; + hop->flags = TTM_PL_FLAG_TEMPORARY; + return -EMULTIHOP; + } + + ttm_bo_move_null(bo, new_mem); + return 0; + } + qxl_bo_move_notify(bo, new_mem); ret = ttm_bo_wait_ctx(bo, ctx); diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig index 62a596d3a891..e19d77d58810 100644 --- a/drivers/gpu/drm/radeon/Kconfig +++ b/drivers/gpu/drm/radeon/Kconfig @@ -8,6 +8,7 @@ config DRM_RADEON select DRM_DISPLAY_DP_HELPER select DRM_DISPLAY_HELPER select DRM_KMS_HELPER + select DRM_SUBALLOC_HELPER select DRM_TTM select DRM_TTM_HELPER select SND_HDA_COMPONENT if SND_HDA_CORE diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 57e20780a458..d19a4b1c1a8f 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -79,6 +79,7 @@ #include <drm/drm_gem.h> #include <drm/drm_audio_component.h> +#include <drm/drm_suballoc.h> #include "radeon_family.h" #include "radeon_mode.h" @@ -511,52 +512,12 @@ struct radeon_bo { }; #define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, tbo.base) -/* sub-allocation manager, it has to be protected by another lock. - * By conception this is an helper for other part of the driver - * like the indirect buffer or semaphore, which both have their - * locking. - * - * Principe is simple, we keep a list of sub allocation in offset - * order (first entry has offset == 0, last entry has the highest - * offset). - * - * When allocating new object we first check if there is room at - * the end total_size - (last_object_offset + last_object_size) >= - * alloc_size. If so we allocate new object there. - * - * When there is not enough room at the end, we start waiting for - * each sub object until we reach object_offset+object_size >= - * alloc_size, this object then become the sub object we return. - * - * Alignment can't be bigger than page size. - * - * Hole are not considered for allocation to keep things simple. - * Assumption is that there won't be hole (all object on same - * alignment). - */ struct radeon_sa_manager { - wait_queue_head_t wq; - struct radeon_bo *bo; - struct list_head *hole; - struct list_head flist[RADEON_NUM_RINGS]; - struct list_head olist; - unsigned size; - uint64_t gpu_addr; - void *cpu_ptr; - uint32_t domain; - uint32_t align; -}; - -struct radeon_sa_bo; - -/* sub-allocation buffer */ -struct radeon_sa_bo { - struct list_head olist; - struct list_head flist; - struct radeon_sa_manager *manager; - unsigned soffset; - unsigned eoffset; - struct radeon_fence *fence; + struct drm_suballoc_manager base; + struct radeon_bo *bo; + uint64_t gpu_addr; + void *cpu_ptr; + u32 domain; }; /* @@ -587,7 +548,7 @@ int radeon_mode_dumb_mmap(struct drm_file *filp, * Semaphores. */ struct radeon_semaphore { - struct radeon_sa_bo *sa_bo; + struct drm_suballoc *sa_bo; signed waiters; uint64_t gpu_addr; }; @@ -816,7 +777,7 @@ void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask); */ struct radeon_ib { - struct radeon_sa_bo *sa_bo; + struct drm_suballoc *sa_bo; uint32_t length_dw; uint64_t gpu_addr; uint32_t *ptr; diff --git a/drivers/gpu/drm/radeon/radeon_ib.c b/drivers/gpu/drm/radeon/radeon_ib.c index 62b116727b4f..6a45a72488f9 100644 --- a/drivers/gpu/drm/radeon/radeon_ib.c +++ b/drivers/gpu/drm/radeon/radeon_ib.c @@ -61,7 +61,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring, { int r; - r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256); + r = radeon_sa_bo_new(&rdev->ring_tmp_bo, &ib->sa_bo, size, 256); if (r) { dev_err(rdev->dev, "failed to get a new IB (%d)\n", r); return r; @@ -77,7 +77,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring, /* ib pool is bound at RADEON_VA_IB_OFFSET in virtual address * space and soffset is the offset inside the pool bo */ - ib->gpu_addr = ib->sa_bo->soffset + RADEON_VA_IB_OFFSET; + ib->gpu_addr = drm_suballoc_soffset(ib->sa_bo) + RADEON_VA_IB_OFFSET; } else { ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo); } @@ -97,7 +97,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring, void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib) { radeon_sync_free(rdev, &ib->sync, ib->fence); - radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence); + radeon_sa_bo_free(&ib->sa_bo, ib->fence); radeon_fence_unref(&ib->fence); } @@ -201,8 +201,7 @@ int radeon_ib_pool_init(struct radeon_device *rdev) if (rdev->family >= CHIP_BONAIRE) { r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo, - RADEON_IB_POOL_SIZE*64*1024, - RADEON_GPU_PAGE_SIZE, + RADEON_IB_POOL_SIZE*64*1024, 256, RADEON_GEM_DOMAIN_GTT, RADEON_GEM_GTT_WC); } else { @@ -210,8 +209,7 @@ int radeon_ib_pool_init(struct radeon_device *rdev) * to the command stream checking */ r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo, - RADEON_IB_POOL_SIZE*64*1024, - RADEON_GPU_PAGE_SIZE, + RADEON_IB_POOL_SIZE*64*1024, 256, RADEON_GEM_DOMAIN_GTT, 0); } if (r) { diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h index 0a6ef49e990a..39cc87a59a9a 100644 --- a/drivers/gpu/drm/radeon/radeon_object.h +++ b/drivers/gpu/drm/radeon/radeon_object.h @@ -169,15 +169,22 @@ extern void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence, /* * sub allocation */ +static inline struct radeon_sa_manager * +to_radeon_sa_manager(struct drm_suballoc_manager *manager) +{ + return container_of(manager, struct radeon_sa_manager, base); +} -static inline uint64_t radeon_sa_bo_gpu_addr(struct radeon_sa_bo *sa_bo) +static inline uint64_t radeon_sa_bo_gpu_addr(struct drm_suballoc *sa_bo) { - return sa_bo->manager->gpu_addr + sa_bo->soffset; + return to_radeon_sa_manager(sa_bo->manager)->gpu_addr + + drm_suballoc_soffset(sa_bo); } -static inline void * radeon_sa_bo_cpu_addr(struct radeon_sa_bo *sa_bo) +static inline void *radeon_sa_bo_cpu_addr(struct drm_suballoc *sa_bo) { - return sa_bo->manager->cpu_ptr + sa_bo->soffset; + return to_radeon_sa_manager(sa_bo->manager)->cpu_ptr + + drm_suballoc_soffset(sa_bo); } extern int radeon_sa_bo_manager_init(struct radeon_device *rdev, @@ -190,12 +197,10 @@ extern int radeon_sa_bo_manager_start(struct radeon_device *rdev, struct radeon_sa_manager *sa_manager); extern int radeon_sa_bo_manager_suspend(struct radeon_device *rdev, struct radeon_sa_manager *sa_manager); -extern int radeon_sa_bo_new(struct radeon_device *rdev, - struct radeon_sa_manager *sa_manager, - struct radeon_sa_bo **sa_bo, - unsigned size, unsigned align); -extern void radeon_sa_bo_free(struct radeon_device *rdev, - struct radeon_sa_bo **sa_bo, +extern int radeon_sa_bo_new(struct radeon_sa_manager *sa_manager, + struct drm_suballoc **sa_bo, + unsigned int size, unsigned int align); +extern void radeon_sa_bo_free(struct drm_suballoc **sa_bo, struct radeon_fence *fence); #if defined(CONFIG_DEBUG_FS) extern void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager, diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c index 0981948bd9ed..c87a57c9c592 100644 --- a/drivers/gpu/drm/radeon/radeon_sa.c +++ b/drivers/gpu/drm/radeon/radeon_sa.c @@ -44,53 +44,32 @@ #include "radeon.h" -static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo); -static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager); - int radeon_sa_bo_manager_init(struct radeon_device *rdev, struct radeon_sa_manager *sa_manager, - unsigned size, u32 align, u32 domain, u32 flags) + unsigned int size, u32 sa_align, u32 domain, + u32 flags) { - int i, r; - - init_waitqueue_head(&sa_manager->wq); - sa_manager->bo = NULL; - sa_manager->size = size; - sa_manager->domain = domain; - sa_manager->align = align; - sa_manager->hole = &sa_manager->olist; - INIT_LIST_HEAD(&sa_manager->olist); - for (i = 0; i < RADEON_NUM_RINGS; ++i) { - INIT_LIST_HEAD(&sa_manager->flist[i]); - } + int r; - r = radeon_bo_create(rdev, size, align, true, + r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, true, domain, flags, NULL, NULL, &sa_manager->bo); if (r) { dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r); return r; } + sa_manager->domain = domain; + + drm_suballoc_manager_init(&sa_manager->base, size, sa_align); + return r; } void radeon_sa_bo_manager_fini(struct radeon_device *rdev, struct radeon_sa_manager *sa_manager) { - struct radeon_sa_bo *sa_bo, *tmp; - - if (!list_empty(&sa_manager->olist)) { - sa_manager->hole = &sa_manager->olist, - radeon_sa_bo_try_free(sa_manager); - if (!list_empty(&sa_manager->olist)) { - dev_err(rdev->dev, "sa_manager is not empty, clearing anyway\n"); - } - } - list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) { - radeon_sa_bo_remove_locked(sa_bo); - } + drm_suballoc_manager_fini(&sa_manager->base); radeon_bo_unref(&sa_manager->bo); - sa_manager->size = 0; } int radeon_sa_bo_manager_start(struct radeon_device *rdev, @@ -139,260 +118,34 @@ int radeon_sa_bo_manager_suspend(struct radeon_device *rdev, return r; } -static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo) +int radeon_sa_bo_new(struct radeon_sa_manager *sa_manager, + struct drm_suballoc **sa_bo, + unsigned int size, unsigned int align) { - struct radeon_sa_manager *sa_manager = sa_bo->manager; - if (sa_manager->hole == &sa_bo->olist) { - sa_manager->hole = sa_bo->olist.prev; - } - list_del_init(&sa_bo->olist); - list_del_init(&sa_bo->flist); - radeon_fence_unref(&sa_bo->fence); - kfree(sa_bo); -} - -static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager) -{ - struct radeon_sa_bo *sa_bo, *tmp; - - if (sa_manager->hole->next == &sa_manager->olist) - return; + struct drm_suballoc *sa = drm_suballoc_new(&sa_manager->base, size, + GFP_KERNEL, true, align); - sa_bo = list_entry(sa_manager->hole->next, struct radeon_sa_bo, olist); - list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) { - if (sa_bo->fence == NULL || !radeon_fence_signaled(sa_bo->fence)) { - return; - } - radeon_sa_bo_remove_locked(sa_bo); + if (IS_ERR(sa)) { + *sa_bo = NULL; + return PTR_ERR(sa); } -} -static inline unsigned radeon_sa_bo_hole_soffset(struct radeon_sa_manager *sa_manager) -{ - struct list_head *hole = sa_manager->hole; - - if (hole != &sa_manager->olist) { - return list_entry(hole, struct radeon_sa_bo, olist)->eoffset; - } + *sa_bo = sa; return 0; } -static inline unsigned radeon_sa_bo_hole_eoffset(struct radeon_sa_manager *sa_manager) -{ - struct list_head *hole = sa_manager->hole; - - if (hole->next != &sa_manager->olist) { - return list_entry(hole->next, struct radeon_sa_bo, olist)->soffset; - } - return sa_manager->size; -} - -static bool radeon_sa_bo_try_alloc(struct radeon_sa_manager *sa_manager, - struct radeon_sa_bo *sa_bo, - unsigned size, unsigned align) -{ - unsigned soffset, eoffset, wasted; - - soffset = radeon_sa_bo_hole_soffset(sa_manager); - eoffset = radeon_sa_bo_hole_eoffset(sa_manager); - wasted = (align - (soffset % align)) % align; - - if ((eoffset - soffset) >= (size + wasted)) { - soffset += wasted; - - sa_bo->manager = sa_manager; - sa_bo->soffset = soffset; - sa_bo->eoffset = soffset + size; - list_add(&sa_bo->olist, sa_manager->hole); - INIT_LIST_HEAD(&sa_bo->flist); - sa_manager->hole = &sa_bo->olist; - return true; - } - return false; -} - -/** - * radeon_sa_event - Check if we can stop waiting - * - * @sa_manager: pointer to the sa_manager - * @size: number of bytes we want to allocate - * @align: alignment we need to match - * - * Check if either there is a fence we can wait for or - * enough free memory to satisfy the allocation directly - */ -static bool radeon_sa_event(struct radeon_sa_manager *sa_manager, - unsigned size, unsigned align) -{ - unsigned soffset, eoffset, wasted; - int i; - - for (i = 0; i < RADEON_NUM_RINGS; ++i) { - if (!list_empty(&sa_manager->flist[i])) { - return true; - } - } - - soffset = radeon_sa_bo_hole_soffset(sa_manager); - eoffset = radeon_sa_bo_hole_eoffset(sa_manager); - wasted = (align - (soffset % align)) % align; - - if ((eoffset - soffset) >= (size + wasted)) { - return true; - } - - return false; -} - -static bool radeon_sa_bo_next_hole(struct radeon_sa_manager *sa_manager, - struct radeon_fence **fences, - unsigned *tries) -{ - struct radeon_sa_bo *best_bo = NULL; - unsigned i, soffset, best, tmp; - - /* if hole points to the end of the buffer */ - if (sa_manager->hole->next == &sa_manager->olist) { - /* try again with its beginning */ - sa_manager->hole = &sa_manager->olist; - return true; - } - - soffset = radeon_sa_bo_hole_soffset(sa_manager); - /* to handle wrap around we add sa_manager->size */ - best = sa_manager->size * 2; - /* go over all fence list and try to find the closest sa_bo - * of the current last - */ - for (i = 0; i < RADEON_NUM_RINGS; ++i) { - struct radeon_sa_bo *sa_bo; - - fences[i] = NULL; - - if (list_empty(&sa_manager->flist[i])) { - continue; - } - - sa_bo = list_first_entry(&sa_manager->flist[i], - struct radeon_sa_bo, flist); - - if (!radeon_fence_signaled(sa_bo->fence)) { - fences[i] = sa_bo->fence; - continue; - } - - /* limit the number of tries each ring gets */ - if (tries[i] > 2) { - continue; - } - - tmp = sa_bo->soffset; - if (tmp < soffset) { - /* wrap around, pretend it's after */ - tmp += sa_manager->size; - } - tmp -= soffset; - if (tmp < best) { - /* this sa bo is the closest one */ - best = tmp; - best_bo = sa_bo; - } - } - - if (best_bo) { - ++tries[best_bo->fence->ring]; - sa_manager->hole = best_bo->olist.prev; - - /* we knew that this one is signaled, - so it's save to remote it */ - radeon_sa_bo_remove_locked(best_bo); - return true; - } - return false; -} - -int radeon_sa_bo_new(struct radeon_device *rdev, - struct radeon_sa_manager *sa_manager, - struct radeon_sa_bo **sa_bo, - unsigned size, unsigned align) -{ - struct radeon_fence *fences[RADEON_NUM_RINGS]; - unsigned tries[RADEON_NUM_RINGS]; - int i, r; - - BUG_ON(align > sa_manager->align); - BUG_ON(size > sa_manager->size); - - *sa_bo = kmalloc(sizeof(struct radeon_sa_bo), GFP_KERNEL); - if ((*sa_bo) == NULL) { - return -ENOMEM; - } - (*sa_bo)->manager = sa_manager; - (*sa_bo)->fence = NULL; - INIT_LIST_HEAD(&(*sa_bo)->olist); - INIT_LIST_HEAD(&(*sa_bo)->flist); - - spin_lock(&sa_manager->wq.lock); - do { - for (i = 0; i < RADEON_NUM_RINGS; ++i) - tries[i] = 0; - - do { - radeon_sa_bo_try_free(sa_manager); - - if (radeon_sa_bo_try_alloc(sa_manager, *sa_bo, - size, align)) { - spin_unlock(&sa_manager->wq.lock); - return 0; - } - - /* see if we can skip over some allocations */ - } while (radeon_sa_bo_next_hole(sa_manager, fences, tries)); - - for (i = 0; i < RADEON_NUM_RINGS; ++i) - radeon_fence_ref(fences[i]); - - spin_unlock(&sa_manager->wq.lock); - r = radeon_fence_wait_any(rdev, fences, false); - for (i = 0; i < RADEON_NUM_RINGS; ++i) - radeon_fence_unref(&fences[i]); - spin_lock(&sa_manager->wq.lock); - /* if we have nothing to wait for block */ - if (r == -ENOENT) { - r = wait_event_interruptible_locked( - sa_manager->wq, - radeon_sa_event(sa_manager, size, align) - ); - } - - } while (!r); - - spin_unlock(&sa_manager->wq.lock); - kfree(*sa_bo); - *sa_bo = NULL; - return r; -} - -void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo, +void radeon_sa_bo_free(struct drm_suballoc **sa_bo, struct radeon_fence *fence) { - struct radeon_sa_manager *sa_manager; - if (sa_bo == NULL || *sa_bo == NULL) { return; } - sa_manager = (*sa_bo)->manager; - spin_lock(&sa_manager->wq.lock); - if (fence && !radeon_fence_signaled(fence)) { - (*sa_bo)->fence = radeon_fence_ref(fence); - list_add_tail(&(*sa_bo)->flist, - &sa_manager->flist[fence->ring]); - } else { - radeon_sa_bo_remove_locked(*sa_bo); - } - wake_up_all_locked(&sa_manager->wq); - spin_unlock(&sa_manager->wq.lock); + if (fence) + drm_suballoc_free(*sa_bo, &fence->base); + else + drm_suballoc_free(*sa_bo, NULL); + *sa_bo = NULL; } @@ -400,25 +153,8 @@ void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo, void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager, struct seq_file *m) { - struct radeon_sa_bo *i; + struct drm_printer p = drm_seq_file_printer(m); - spin_lock(&sa_manager->wq.lock); - list_for_each_entry(i, &sa_manager->olist, olist) { - uint64_t soffset = i->soffset + sa_manager->gpu_addr; - uint64_t eoffset = i->eoffset + sa_manager->gpu_addr; - if (&i->olist == sa_manager->hole) { - seq_printf(m, ">"); - } else { - seq_printf(m, " "); - } - seq_printf(m, "[0x%010llx 0x%010llx] size %8lld", - soffset, eoffset, eoffset - soffset); - if (i->fence) { - seq_printf(m, " protected by 0x%016llx on ring %d", - i->fence->seq, i->fence->ring); - } - seq_printf(m, "\n"); - } - spin_unlock(&sa_manager->wq.lock); + drm_suballoc_dump_debug_info(&sa_manager->base, &p, sa_manager->gpu_addr); } #endif diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c index 221e59476f64..1f0a9a4ff5ae 100644 --- a/drivers/gpu/drm/radeon/radeon_semaphore.c +++ b/drivers/gpu/drm/radeon/radeon_semaphore.c @@ -40,7 +40,7 @@ int radeon_semaphore_create(struct radeon_device *rdev, if (*semaphore == NULL) { return -ENOMEM; } - r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, + r = radeon_sa_bo_new(&rdev->ring_tmp_bo, &(*semaphore)->sa_bo, 8, 8); if (r) { kfree(*semaphore); @@ -100,7 +100,7 @@ void radeon_semaphore_free(struct radeon_device *rdev, dev_err(rdev->dev, "semaphore %p has more waiters than signalers," " hardware lockup imminent!\n", *semaphore); } - radeon_sa_bo_free(rdev, &(*semaphore)->sa_bo, fence); + radeon_sa_bo_free(&(*semaphore)->sa_bo, fence); kfree(*semaphore); *semaphore = NULL; } diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 1e8e287e113c..2220cdf6a3f6 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -211,13 +211,10 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict, if (r) return r; - /* Can't move a pinned BO */ rbo = container_of(bo, struct radeon_bo, tbo); - if (WARN_ON_ONCE(rbo->tbo.pin_count > 0)) - return -EINVAL; - rdev = radeon_get_rdev(bo->bdev); - if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { + if (!old_mem || (old_mem->mem_type == TTM_PL_SYSTEM && + bo->ttm == NULL)) { ttm_bo_move_null(bo, new_mem); goto out; } diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c index 8ea09d915c3c..b8f8b45ebf59 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c @@ -261,9 +261,6 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj, else ret = rockchip_drm_gem_object_mmap_dma(obj, vma); - if (ret) - drm_gem_vm_close(vma); - return ret; } @@ -518,8 +515,14 @@ int rockchip_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map) struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); if (rk_obj->pages) { - void *vaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP, - pgprot_writecombine(PAGE_KERNEL)); + void *vaddr; + + if (rk_obj->kvaddr) + vaddr = rk_obj->kvaddr; + else + vaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP, + pgprot_writecombine(PAGE_KERNEL)); + if (!vaddr) return -ENOMEM; iosys_map_set_vaddr(map, vaddr); @@ -539,7 +542,8 @@ void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); if (rk_obj->pages) { - vunmap(map->vaddr); + if (map->vaddr != rk_obj->kvaddr) + vunmap(map->vaddr); return; } diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index fa1f4ee6d195..abbc189affa7 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -316,13 +316,10 @@ static int vop_convert_afbc_format(uint32_t format) case DRM_FORMAT_RGB565: case DRM_FORMAT_BGR565: return AFBC_FMT_RGB565; - /* either of the below should not be reachable */ default: - DRM_WARN_ONCE("unsupported AFBC format[%08x]\n", format); + DRM_DEBUG_KMS("unsupported AFBC format[%08x]\n", format); return -EINVAL; } - - return -EINVAL; } static uint16_t scl_vop_cal_scale(enum scale_mode mode, uint32_t src, @@ -2221,7 +2218,7 @@ static int vop_bind(struct device *dev, struct device *master, void *data) goto err_disable_pm_runtime; if (vop->data->feature & VOP_FEATURE_INTERNAL_RGB) { - vop->rgb = rockchip_rgb_init(dev, &vop->crtc, vop->drm_dev); + vop->rgb = rockchip_rgb_init(dev, &vop->crtc, vop->drm_dev, 0); if (IS_ERR(vop->rgb)) { ret = PTR_ERR(vop->rgb); goto err_disable_pm_runtime; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c index ba3b81789509..0e0012368976 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c @@ -38,6 +38,7 @@ #include "rockchip_drm_gem.h" #include "rockchip_drm_fb.h" #include "rockchip_drm_vop2.h" +#include "rockchip_rgb.h" /* * VOP2 architecture @@ -211,6 +212,9 @@ struct vop2 { struct clk *hclk; struct clk *aclk; + /* optional internal rgb encoder */ + struct rockchip_rgb *rgb; + /* must be put at the end of the struct */ struct vop2_win win[]; }; @@ -2245,7 +2249,7 @@ static struct vop2_video_port *find_vp_without_primary(struct vop2 *vop2) #define NR_LAYERS 6 -static int vop2_create_crtc(struct vop2 *vop2) +static int vop2_create_crtcs(struct vop2 *vop2) { const struct vop2_data *vop2_data = vop2->data; struct drm_device *drm = vop2->drm; @@ -2321,10 +2325,11 @@ static int vop2_create_crtc(struct vop2 *vop2) /* change the unused primary window to overlay window */ win->type = DRM_PLANE_TYPE_OVERLAY; } - } - - if (win->type == DRM_PLANE_TYPE_OVERLAY) + } else if (win->type == DRM_PLANE_TYPE_OVERLAY) { possible_crtcs = (1 << nvps) - 1; + } else { + possible_crtcs = 0; + } ret = vop2_plane_init(vop2, win, possible_crtcs); if (ret) { @@ -2370,15 +2375,44 @@ static int vop2_create_crtc(struct vop2 *vop2) return 0; } -static void vop2_destroy_crtc(struct drm_crtc *crtc) +static void vop2_destroy_crtcs(struct vop2 *vop2) { - of_node_put(crtc->port); + struct drm_device *drm = vop2->drm; + struct list_head *crtc_list = &drm->mode_config.crtc_list; + struct list_head *plane_list = &drm->mode_config.plane_list; + struct drm_crtc *crtc, *tmpc; + struct drm_plane *plane, *tmpp; + + list_for_each_entry_safe(plane, tmpp, plane_list, head) + drm_plane_cleanup(plane); /* * Destroy CRTC after vop2_plane_destroy() since vop2_disable_plane() * references the CRTC. */ - drm_crtc_cleanup(crtc); + list_for_each_entry_safe(crtc, tmpc, crtc_list, head) { + of_node_put(crtc->port); + drm_crtc_cleanup(crtc); + } +} + +static int vop2_find_rgb_encoder(struct vop2 *vop2) +{ + struct device_node *node = vop2->dev->of_node; + struct device_node *endpoint; + int i; + + for (i = 0; i < vop2->data->nr_vps; i++) { + endpoint = of_graph_get_endpoint_by_regs(node, i, + ROCKCHIP_VOP2_EP_RGB0); + if (!endpoint) + continue; + + of_node_put(endpoint); + return i; + } + + return -ENOENT; } static struct reg_field vop2_cluster_regs[VOP2_WIN_MAX_REG] = { @@ -2682,33 +2716,45 @@ static int vop2_bind(struct device *dev, struct device *master, void *data) if (ret) return ret; - ret = vop2_create_crtc(vop2); + ret = vop2_create_crtcs(vop2); if (ret) return ret; + ret = vop2_find_rgb_encoder(vop2); + if (ret >= 0) { + vop2->rgb = rockchip_rgb_init(dev, &vop2->vps[ret].crtc, + vop2->drm, ret); + if (IS_ERR(vop2->rgb)) { + if (PTR_ERR(vop2->rgb) == -EPROBE_DEFER) { + ret = PTR_ERR(vop2->rgb); + goto err_crtcs; + } + vop2->rgb = NULL; + } + } + rockchip_drm_dma_init_device(vop2->drm, vop2->dev); pm_runtime_enable(&pdev->dev); return 0; + +err_crtcs: + vop2_destroy_crtcs(vop2); + + return ret; } static void vop2_unbind(struct device *dev, struct device *master, void *data) { struct vop2 *vop2 = dev_get_drvdata(dev); - struct drm_device *drm = vop2->drm; - struct list_head *plane_list = &drm->mode_config.plane_list; - struct list_head *crtc_list = &drm->mode_config.crtc_list; - struct drm_crtc *crtc, *tmpc; - struct drm_plane *plane, *tmpp; pm_runtime_disable(dev); - list_for_each_entry_safe(plane, tmpp, plane_list, head) - drm_plane_cleanup(plane); + if (vop2->rgb) + rockchip_rgb_fini(vop2->rgb); - list_for_each_entry_safe(crtc, tmpc, crtc_list, head) - vop2_destroy_crtc(crtc); + vop2_destroy_crtcs(vop2); } const struct component_ops vop2_component_ops = { diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c index 75eb7cca3d82..c677b71ae516 100644 --- a/drivers/gpu/drm/rockchip/rockchip_rgb.c +++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c @@ -22,13 +22,11 @@ #include "rockchip_drm_vop.h" #include "rockchip_rgb.h" -#define encoder_to_rgb(c) container_of(c, struct rockchip_rgb, encoder) - struct rockchip_rgb { struct device *dev; struct drm_device *drm_dev; struct drm_bridge *bridge; - struct drm_encoder encoder; + struct rockchip_encoder encoder; struct drm_connector connector; int output_mode; }; @@ -74,7 +72,8 @@ struct drm_encoder_helper_funcs rockchip_rgb_encoder_helper_funcs = { struct rockchip_rgb *rockchip_rgb_init(struct device *dev, struct drm_crtc *crtc, - struct drm_device *drm_dev) + struct drm_device *drm_dev, + int video_port) { struct rockchip_rgb *rgb; struct drm_encoder *encoder; @@ -92,7 +91,7 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev, rgb->dev = dev; rgb->drm_dev = drm_dev; - port = of_graph_get_port_by_id(dev->of_node, 0); + port = of_graph_get_port_by_id(dev->of_node, video_port); if (!port) return ERR_PTR(-EINVAL); @@ -105,8 +104,8 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev, continue; child_count++; - ret = drm_of_find_panel_or_bridge(dev->of_node, 0, endpoint_id, - &panel, &bridge); + ret = drm_of_find_panel_or_bridge(dev->of_node, video_port, + endpoint_id, &panel, &bridge); if (!ret) { of_node_put(endpoint); break; @@ -125,7 +124,7 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev, return ERR_PTR(ret); } - encoder = &rgb->encoder; + encoder = &rgb->encoder.encoder; encoder->possible_crtcs = drm_crtc_mask(crtc); ret = drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_NONE); @@ -161,6 +160,8 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev, goto err_free_encoder; } + rgb->encoder.crtc_endpoint_id = endpoint_id; + ret = drm_connector_attach_encoder(connector, encoder); if (ret < 0) { DRM_DEV_ERROR(drm_dev->dev, @@ -182,6 +183,6 @@ void rockchip_rgb_fini(struct rockchip_rgb *rgb) { drm_panel_bridge_remove(rgb->bridge); drm_connector_cleanup(&rgb->connector); - drm_encoder_cleanup(&rgb->encoder); + drm_encoder_cleanup(&rgb->encoder.encoder); } EXPORT_SYMBOL_GPL(rockchip_rgb_fini); diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.h b/drivers/gpu/drm/rockchip/rockchip_rgb.h index 27b9635124bc..1bd4e20e91eb 100644 --- a/drivers/gpu/drm/rockchip/rockchip_rgb.h +++ b/drivers/gpu/drm/rockchip/rockchip_rgb.h @@ -8,12 +8,14 @@ #ifdef CONFIG_ROCKCHIP_RGB struct rockchip_rgb *rockchip_rgb_init(struct device *dev, struct drm_crtc *crtc, - struct drm_device *drm_dev); + struct drm_device *drm_dev, + int video_port); void rockchip_rgb_fini(struct rockchip_rgb *rgb); #else static inline struct rockchip_rgb *rockchip_rgb_init(struct device *dev, struct drm_crtc *crtc, - struct drm_device *drm_dev) + struct drm_device *drm_dev, + int video_port) { return NULL; } diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 007f98c48f8d..250c46cd9c34 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -53,6 +53,7 @@ #include <drm/drm_print.h> #include <drm/drm_gem.h> +#include <drm/drm_syncobj.h> #include <drm/gpu_scheduler.h> #include <drm/spsc_queue.h> @@ -719,6 +720,34 @@ int drm_sched_job_add_dependency(struct drm_sched_job *job, EXPORT_SYMBOL(drm_sched_job_add_dependency); /** + * drm_sched_job_add_syncobj_dependency - adds a syncobj's fence as a job dependency + * @job: scheduler job to add the dependencies to + * @file_private: drm file private pointer + * @handle: syncobj handle to lookup + * @point: timeline point + * + * This adds the fence matching the given syncobj to @job. + * + * Returns: + * 0 on success, or an error on failing to expand the array. + */ +int drm_sched_job_add_syncobj_dependency(struct drm_sched_job *job, + struct drm_file *file, + u32 handle, + u32 point) +{ + struct dma_fence *fence; + int ret; + + ret = drm_syncobj_find_fence(file, handle, point, 0, &fence); + if (ret) + return ret; + + return drm_sched_job_add_dependency(job, fence); +} +EXPORT_SYMBOL(drm_sched_job_add_syncobj_dependency); + +/** * drm_sched_job_add_resv_dependencies - add all fences from the resv to the job * @job: scheduler job to add the dependencies to * @resv: the dma_resv object to get the fences from diff --git a/drivers/gpu/drm/tests/drm_format_helper_test.c b/drivers/gpu/drm/tests/drm_format_helper_test.c index 34e80eb6d96e..9536829c6e3a 100644 --- a/drivers/gpu/drm/tests/drm_format_helper_test.c +++ b/drivers/gpu/drm/tests/drm_format_helper_test.c @@ -597,7 +597,7 @@ static void drm_test_fb_xrgb8888_to_xrgb1555(struct kunit *test) drm_fb_xrgb8888_to_xrgb1555(&dst, &result->dst_pitch, &src, &fb, ¶ms->clip); buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16)); - KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0); + KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); } static void drm_test_fb_xrgb8888_to_argb1555(struct kunit *test) @@ -628,7 +628,7 @@ static void drm_test_fb_xrgb8888_to_argb1555(struct kunit *test) drm_fb_xrgb8888_to_argb1555(&dst, &result->dst_pitch, &src, &fb, ¶ms->clip); buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16)); - KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0); + KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); } static void drm_test_fb_xrgb8888_to_rgba5551(struct kunit *test) @@ -659,7 +659,7 @@ static void drm_test_fb_xrgb8888_to_rgba5551(struct kunit *test) drm_fb_xrgb8888_to_rgba5551(&dst, &result->dst_pitch, &src, &fb, ¶ms->clip); buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16)); - KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0); + KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); } static void drm_test_fb_xrgb8888_to_rgb888(struct kunit *test) @@ -724,7 +724,7 @@ static void drm_test_fb_xrgb8888_to_argb8888(struct kunit *test) drm_fb_xrgb8888_to_argb8888(&dst, &result->dst_pitch, &src, &fb, ¶ms->clip); buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32)); - KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0); + KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); } static void drm_test_fb_xrgb8888_to_xrgb2101010(struct kunit *test) @@ -786,7 +786,7 @@ static void drm_test_fb_xrgb8888_to_argb2101010(struct kunit *test) drm_fb_xrgb8888_to_argb2101010(&dst, &result->dst_pitch, &src, &fb, ¶ms->clip); buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32)); - KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0); + KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); } static struct kunit_case drm_format_helper_test_cases[] = { diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c index 165365b515e1..dca077411f77 100644 --- a/drivers/gpu/drm/tidss/tidss_dispc.c +++ b/drivers/gpu/drm/tidss/tidss_dispc.c @@ -1985,9 +1985,9 @@ dma_addr_t dispc_plane_state_p_uv_addr(const struct drm_plane_state *state) (y * fb->pitches[1] / fb->format->vsub); } -int dispc_plane_setup(struct dispc_device *dispc, u32 hw_plane, - const struct drm_plane_state *state, - u32 hw_videoport) +void dispc_plane_setup(struct dispc_device *dispc, u32 hw_plane, + const struct drm_plane_state *state, + u32 hw_videoport) { bool lite = dispc->feat->vid_lite[hw_plane]; u32 fourcc = state->fb->format->format; @@ -2066,15 +2066,11 @@ int dispc_plane_setup(struct dispc_device *dispc, u32 hw_plane, else VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 0, 28, 28); - - return 0; } -int dispc_plane_enable(struct dispc_device *dispc, u32 hw_plane, bool enable) +void dispc_plane_enable(struct dispc_device *dispc, u32 hw_plane, bool enable) { VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, !!enable, 0, 0); - - return 0; } static u32 dispc_vid_get_fifo_size(struct dispc_device *dispc, u32 hw_plane) diff --git a/drivers/gpu/drm/tidss/tidss_dispc.h b/drivers/gpu/drm/tidss/tidss_dispc.h index e49432f0abf5..946ed769caaf 100644 --- a/drivers/gpu/drm/tidss/tidss_dispc.h +++ b/drivers/gpu/drm/tidss/tidss_dispc.h @@ -123,10 +123,10 @@ int dispc_runtime_resume(struct dispc_device *dispc); int dispc_plane_check(struct dispc_device *dispc, u32 hw_plane, const struct drm_plane_state *state, u32 hw_videoport); -int dispc_plane_setup(struct dispc_device *dispc, u32 hw_plane, - const struct drm_plane_state *state, - u32 hw_videoport); -int dispc_plane_enable(struct dispc_device *dispc, u32 hw_plane, bool enable); +void dispc_plane_setup(struct dispc_device *dispc, u32 hw_plane, + const struct drm_plane_state *state, + u32 hw_videoport); +void dispc_plane_enable(struct dispc_device *dispc, u32 hw_plane, bool enable); const u32 *dispc_plane_formats(struct dispc_device *dispc, unsigned int *len); int dispc_init(struct tidss_device *tidss); diff --git a/drivers/gpu/drm/tidss/tidss_plane.c b/drivers/gpu/drm/tidss/tidss_plane.c index fe2c41f0cd4f..6bdd6e4a955a 100644 --- a/drivers/gpu/drm/tidss/tidss_plane.c +++ b/drivers/gpu/drm/tidss/tidss_plane.c @@ -113,7 +113,6 @@ static void tidss_plane_atomic_update(struct drm_plane *plane, struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane); u32 hw_videoport; - int ret; dev_dbg(ddev->dev, "%s\n", __func__); @@ -124,15 +123,17 @@ static void tidss_plane_atomic_update(struct drm_plane *plane, hw_videoport = to_tidss_crtc(new_state->crtc)->hw_videoport; - ret = dispc_plane_setup(tidss->dispc, tplane->hw_plane_id, - new_state, hw_videoport); + dispc_plane_setup(tidss->dispc, tplane->hw_plane_id, new_state, hw_videoport); +} - if (ret) { - dev_err(plane->dev->dev, "%s: Failed to setup plane %d\n", - __func__, tplane->hw_plane_id); - dispc_plane_enable(tidss->dispc, tplane->hw_plane_id, false); - return; - } +static void tidss_plane_atomic_enable(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_device *ddev = plane->dev; + struct tidss_device *tidss = to_tidss(ddev); + struct tidss_plane *tplane = to_tidss_plane(plane); + + dev_dbg(ddev->dev, "%s\n", __func__); dispc_plane_enable(tidss->dispc, tplane->hw_plane_id, true); } @@ -160,6 +161,7 @@ static void drm_plane_destroy(struct drm_plane *plane) static const struct drm_plane_helper_funcs tidss_plane_helper_funcs = { .atomic_check = tidss_plane_atomic_check, .atomic_update = tidss_plane_atomic_update, + .atomic_enable = tidss_plane_atomic_enable, .atomic_disable = tidss_plane_atomic_disable, }; diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c index 63881a3754f8..c38d85848af8 100644 --- a/drivers/gpu/drm/tiny/simpledrm.c +++ b/drivers/gpu/drm/tiny/simpledrm.c @@ -606,16 +606,12 @@ static const struct drm_mode_config_funcs simpledrm_mode_config_funcs = { */ static struct drm_display_mode simpledrm_mode(unsigned int width, - unsigned int height) + unsigned int height, + unsigned int width_mm, + unsigned int height_mm) { - /* - * Assume a monitor resolution of 96 dpi to - * get a somewhat reasonable screen size. - */ const struct drm_display_mode mode = { - DRM_MODE_INIT(60, width, height, - DRM_MODE_RES_MM(width, 96ul), - DRM_MODE_RES_MM(height, 96ul)) + DRM_MODE_INIT(60, width, height, width_mm, height_mm) }; return mode; @@ -629,6 +625,8 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv, struct simpledrm_device *sdev; struct drm_device *dev; int width, height, stride; + int width_mm = 0, height_mm = 0; + struct device_node *panel_node; const struct drm_format_info *format; struct resource *res, *mem = NULL; struct drm_plane *primary_plane; @@ -685,6 +683,12 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv, mem = simplefb_get_memory_of(dev, of_node); if (IS_ERR(mem)) return ERR_CAST(mem); + panel_node = of_parse_phandle(of_node, "panel", 0); + if (panel_node) { + simplefb_read_u32_of(dev, panel_node, "width-mm", &width_mm); + simplefb_read_u32_of(dev, panel_node, "height-mm", &height_mm); + of_node_put(panel_node); + } } else { drm_err(dev, "no simplefb configuration found\n"); return ERR_PTR(-ENODEV); @@ -695,7 +699,16 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv, return ERR_PTR(-EINVAL); } - sdev->mode = simpledrm_mode(width, height); + /* + * Assume a monitor resolution of 96 dpi if physical dimensions + * are not specified to get a somewhat reasonable screen size. + */ + if (!width_mm) + width_mm = DRM_MODE_RES_MM(width, 96ul); + if (!height_mm) + height_mm = DRM_MODE_RES_MM(height, 96ul); + + sdev->mode = simpledrm_mode(width, height, width_mm, height_mm); sdev->format = format; sdev->pitch = stride; diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 326a3d13a829..882c2fa346f3 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -120,8 +120,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, bool old_use_tt, new_use_tt; int ret; - old_use_tt = bo->resource && - ttm_manager_type(bdev, bo->resource->mem_type)->use_tt; + old_use_tt = !bo->resource || ttm_manager_type(bdev, bo->resource->mem_type)->use_tt; new_use_tt = ttm_manager_type(bdev, mem->mem_type)->use_tt; ttm_bo_unmap_virtual(bo); @@ -894,14 +893,18 @@ int ttm_bo_validate(struct ttm_buffer_object *bo, if (!placement->num_placement && !placement->num_busy_placement) return ttm_bo_pipeline_gutting(bo); - /* - * Check whether we need to move buffer. - */ - if (!bo->resource || !ttm_resource_compat(bo->resource, placement)) { - ret = ttm_bo_move_buffer(bo, placement, ctx); - if (ret) - return ret; - } + /* Check whether we need to move buffer. */ + if (bo->resource && ttm_resource_compat(bo->resource, placement)) + return 0; + + /* Moving of pinned BOs is forbidden */ + if (bo->pin_count) + return -EINVAL; + + ret = ttm_bo_move_buffer(bo, placement, ctx); + if (ret) + return ret; + /* * We might need to add a TTM. */ @@ -953,7 +956,6 @@ int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo, struct sg_table *sg, struct dma_resv *resv, void (*destroy) (struct ttm_buffer_object *)) { - static const struct ttm_place sys_mem = { .mem_type = TTM_PL_SYSTEM }; int ret; kref_init(&bo->kref); @@ -970,12 +972,6 @@ int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo, bo->base.resv = &bo->base._resv; atomic_inc(&ttm_glob.bo_count); - ret = ttm_resource_alloc(bo, &sys_mem, &bo->resource); - if (unlikely(ret)) { - ttm_bo_put(bo); - return ret; - } - /* * For ttm_bo_type_device buffers, allocate * address space from the device. diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 7635d7d6b13b..fd9fd3d15101 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -157,8 +157,8 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, bool clear; int ret = 0; - if (!src_mem) - return 0; + if (WARN_ON(!src_mem)) + return -EINVAL; src_man = ttm_manager_type(bdev, src_mem->mem_type); if (ttm && ((ttm->page_flags & TTM_TT_FLAG_SWAPPED) || @@ -704,30 +704,23 @@ EXPORT_SYMBOL(ttm_bo_move_sync_cleanup); */ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo) { - static const struct ttm_place sys_mem = { .mem_type = TTM_PL_SYSTEM }; struct ttm_buffer_object *ghost; - struct ttm_resource *sys_res; struct ttm_tt *ttm; int ret; - ret = ttm_resource_alloc(bo, &sys_mem, &sys_res); - if (ret) - return ret; - /* If already idle, no need for ghost object dance. */ if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP)) { if (!bo->ttm) { /* See comment below about clearing. */ ret = ttm_tt_create(bo, true); if (ret) - goto error_free_sys_mem; + return ret; } else { ttm_tt_unpopulate(bo->bdev, bo->ttm); if (bo->type == ttm_bo_type_device) ttm_tt_mark_for_clear(bo->ttm); } ttm_resource_free(bo, &bo->resource); - ttm_bo_assign_mem(bo, sys_res); return 0; } @@ -744,7 +737,7 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo) ret = ttm_tt_create(bo, true); swap(bo->ttm, ttm); if (ret) - goto error_free_sys_mem; + return ret; ret = ttm_buffer_object_transfer(bo, &ghost); if (ret) @@ -760,13 +753,9 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo) dma_resv_unlock(&ghost->base._resv); ttm_bo_put(ghost); bo->ttm = ttm; - ttm_bo_assign_mem(bo, sys_res); return 0; error_destroy_tt: ttm_tt_destroy(bo->bdev, ttm); - -error_free_sys_mem: - ttm_resource_free(bo, &sys_res); return ret; } diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c index b8a826a24fb2..7333f7a87a2f 100644 --- a/drivers/gpu/drm/ttm/ttm_resource.c +++ b/drivers/gpu/drm/ttm/ttm_resource.c @@ -361,7 +361,6 @@ bool ttm_resource_compat(struct ttm_resource *res, return false; } -EXPORT_SYMBOL(ttm_resource_compat); void ttm_resource_set_bo(struct ttm_resource *res, struct ttm_buffer_object *bo) diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index 5da1806f3969..2e94ce788c71 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -397,20 +397,6 @@ v3d_wait_bo_ioctl(struct drm_device *dev, void *data, } static int -v3d_job_add_deps(struct drm_file *file_priv, struct v3d_job *job, - u32 in_sync, u32 point) -{ - struct dma_fence *in_fence = NULL; - int ret; - - ret = drm_syncobj_find_fence(file_priv, in_sync, point, 0, &in_fence); - if (ret == -EINVAL) - return ret; - - return drm_sched_job_add_dependency(&job->base, in_fence); -} - -static int v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv, void **container, size_t size, void (*free)(struct kref *ref), u32 in_sync, struct v3d_submit_ext *se, enum v3d_queue queue) @@ -447,14 +433,18 @@ v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv, DRM_DEBUG("Failed to copy wait dep handle.\n"); goto fail_deps; } - ret = v3d_job_add_deps(file_priv, job, in.handle, 0); - if (ret) + ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv, in.handle, 0); + + // TODO: Investigate why this was filtered out for the IOCTL. + if (ret && ret != -ENOENT) goto fail_deps; } } } else { - ret = v3d_job_add_deps(file_priv, job, in_sync, 0); - if (ret) + ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv, in_sync, 0); + + // TODO: Investigate why this was filtered out for the IOCTL. + if (ret && ret != -ENOENT) goto fail_deps; } diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h index 95069bb16821..8768566c610b 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.h +++ b/drivers/gpu/drm/vc4/vc4_drv.h @@ -690,7 +690,7 @@ struct vc4_exec_info { /* This is the array of BOs that were looked up at the start of exec. * Command validation will use indices into this array. */ - struct drm_gem_dma_object **bo; + struct drm_gem_object **bo; uint32_t bo_count; /* List of BOs that are being written by the RCL. Other than diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c index 628d40ff3aa1..03648f954985 100644 --- a/drivers/gpu/drm/vc4/vc4_gem.c +++ b/drivers/gpu/drm/vc4/vc4_gem.c @@ -199,7 +199,7 @@ vc4_save_hang_state(struct drm_device *dev) continue; for (j = 0; j < exec[i]->bo_count; j++) { - bo = to_vc4_bo(&exec[i]->bo[j]->base); + bo = to_vc4_bo(exec[i]->bo[j]); /* Retain BOs just in case they were marked purgeable. * This prevents the BO from being purged before @@ -207,8 +207,8 @@ vc4_save_hang_state(struct drm_device *dev) */ WARN_ON(!refcount_read(&bo->usecnt)); refcount_inc(&bo->usecnt); - drm_gem_object_get(&exec[i]->bo[j]->base); - kernel_state->bo[k++] = &exec[i]->bo[j]->base; + drm_gem_object_get(exec[i]->bo[j]); + kernel_state->bo[k++] = exec[i]->bo[j]; } list_for_each_entry(bo, &exec[i]->unref_list, unref_head) { @@ -558,7 +558,7 @@ vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno) unsigned i; for (i = 0; i < exec->bo_count; i++) { - bo = to_vc4_bo(&exec->bo[i]->base); + bo = to_vc4_bo(exec->bo[i]); bo->seqno = seqno; dma_resv_add_fence(bo->base.base.resv, exec->fence, @@ -585,11 +585,8 @@ vc4_unlock_bo_reservations(struct drm_device *dev, { int i; - for (i = 0; i < exec->bo_count; i++) { - struct drm_gem_object *bo = &exec->bo[i]->base; - - dma_resv_unlock(bo->resv); - } + for (i = 0; i < exec->bo_count; i++) + dma_resv_unlock(exec->bo[i]->resv); ww_acquire_fini(acquire_ctx); } @@ -614,7 +611,7 @@ vc4_lock_bo_reservations(struct drm_device *dev, retry: if (contended_lock != -1) { - bo = &exec->bo[contended_lock]->base; + bo = exec->bo[contended_lock]; ret = dma_resv_lock_slow_interruptible(bo->resv, acquire_ctx); if (ret) { ww_acquire_done(acquire_ctx); @@ -626,19 +623,19 @@ retry: if (i == contended_lock) continue; - bo = &exec->bo[i]->base; + bo = exec->bo[i]; ret = dma_resv_lock_interruptible(bo->resv, acquire_ctx); if (ret) { int j; for (j = 0; j < i; j++) { - bo = &exec->bo[j]->base; + bo = exec->bo[j]; dma_resv_unlock(bo->resv); } if (contended_lock != -1 && contended_lock >= i) { - bo = &exec->bo[contended_lock]->base; + bo = exec->bo[contended_lock]; dma_resv_unlock(bo->resv); } @@ -659,7 +656,7 @@ retry: * before we commit the CL to the hardware. */ for (i = 0; i < exec->bo_count; i++) { - bo = &exec->bo[i]->base; + bo = exec->bo[i]; ret = dma_resv_reserve_fences(bo->resv, 1); if (ret) { @@ -749,7 +746,6 @@ vc4_cl_lookup_bos(struct drm_device *dev, struct vc4_exec_info *exec) { struct drm_vc4_submit_cl *args = exec->args; - uint32_t *handles; int ret = 0; int i; @@ -763,54 +759,18 @@ vc4_cl_lookup_bos(struct drm_device *dev, return -EINVAL; } - exec->bo = kvmalloc_array(exec->bo_count, - sizeof(struct drm_gem_dma_object *), - GFP_KERNEL | __GFP_ZERO); - if (!exec->bo) { - DRM_ERROR("Failed to allocate validated BO pointers\n"); - return -ENOMEM; - } - - handles = kvmalloc_array(exec->bo_count, sizeof(uint32_t), GFP_KERNEL); - if (!handles) { - ret = -ENOMEM; - DRM_ERROR("Failed to allocate incoming GEM handles\n"); - goto fail; - } - - if (copy_from_user(handles, u64_to_user_ptr(args->bo_handles), - exec->bo_count * sizeof(uint32_t))) { - ret = -EFAULT; - DRM_ERROR("Failed to copy in GEM handles\n"); - goto fail; - } - - spin_lock(&file_priv->table_lock); - for (i = 0; i < exec->bo_count; i++) { - struct drm_gem_object *bo = idr_find(&file_priv->object_idr, - handles[i]); - if (!bo) { - DRM_DEBUG("Failed to look up GEM BO %d: %d\n", - i, handles[i]); - ret = -EINVAL; - break; - } - - drm_gem_object_get(bo); - exec->bo[i] = (struct drm_gem_dma_object *)bo; - } - spin_unlock(&file_priv->table_lock); + ret = drm_gem_objects_lookup(file_priv, u64_to_user_ptr(args->bo_handles), + exec->bo_count, &exec->bo); if (ret) goto fail_put_bo; for (i = 0; i < exec->bo_count; i++) { - ret = vc4_bo_inc_usecnt(to_vc4_bo(&exec->bo[i]->base)); + ret = vc4_bo_inc_usecnt(to_vc4_bo(exec->bo[i])); if (ret) goto fail_dec_usecnt; } - kvfree(handles); return 0; fail_dec_usecnt: @@ -823,15 +783,13 @@ fail_dec_usecnt: * step. */ for (i-- ; i >= 0; i--) - vc4_bo_dec_usecnt(to_vc4_bo(&exec->bo[i]->base)); + vc4_bo_dec_usecnt(to_vc4_bo(exec->bo[i])); fail_put_bo: /* Release any reference to acquired objects. */ for (i = 0; i < exec->bo_count && exec->bo[i]; i++) - drm_gem_object_put(&exec->bo[i]->base); + drm_gem_object_put(exec->bo[i]); -fail: - kvfree(handles); kvfree(exec->bo); exec->bo = NULL; return ret; @@ -974,10 +932,10 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec) if (exec->bo) { for (i = 0; i < exec->bo_count; i++) { - struct vc4_bo *bo = to_vc4_bo(&exec->bo[i]->base); + struct vc4_bo *bo = to_vc4_bo(exec->bo[i]); vc4_bo_dec_usecnt(bo); - drm_gem_object_put(&exec->bo[i]->base); + drm_gem_object_put(exec->bo[i]); } kvfree(exec->bo); } diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index ea22c9bf223a..d30e4547b4c5 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -1466,6 +1466,12 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder, if (!drm_dev_enter(drm, &idx)) goto out; + ret = pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev); + if (ret < 0) { + DRM_ERROR("Failed to retain power domain: %d\n", ret); + goto err_dev_exit; + } + /* * As stated in RPi's vc4 firmware "HDMI state machine (HSM) clock must * be faster than pixel clock, infinitesimally faster, tested in @@ -1482,17 +1488,13 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder, * Additionally, the AXI clock needs to be at least 25% of * pixel clock, but HSM ends up being the limiting factor. */ - hsm_rate = max_t(unsigned long, 120000000, (tmds_char_rate / 100) * 101); + hsm_rate = max_t(unsigned long, + HSM_MIN_CLOCK_FREQ, + (tmds_char_rate / 100) * 101); ret = clk_set_min_rate(vc4_hdmi->hsm_clock, hsm_rate); if (ret) { DRM_ERROR("Failed to set HSM clock rate: %d\n", ret); - goto err_dev_exit; - } - - ret = pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev); - if (ret < 0) { - DRM_ERROR("Failed to retain power domain: %d\n", ret); - goto err_dev_exit; + goto err_put_runtime_pm; } ret = clk_set_rate(vc4_hdmi->pixel_clock, tmds_char_rate); @@ -3188,16 +3190,9 @@ static int vc4_hdmi_init_resources(struct drm_device *drm, DRM_ERROR("Failed to get HDMI state machine clock\n"); return PTR_ERR(vc4_hdmi->hsm_clock); } - vc4_hdmi->audio_clock = vc4_hdmi->hsm_clock; vc4_hdmi->cec_clock = vc4_hdmi->hsm_clock; - vc4_hdmi->hsm_rpm_clock = devm_clk_get(dev, "hdmi"); - if (IS_ERR(vc4_hdmi->hsm_rpm_clock)) { - DRM_ERROR("Failed to get HDMI state machine clock\n"); - return PTR_ERR(vc4_hdmi->hsm_rpm_clock); - } - return 0; } @@ -3280,12 +3275,6 @@ static int vc5_hdmi_init_resources(struct drm_device *drm, return PTR_ERR(vc4_hdmi->hsm_clock); } - vc4_hdmi->hsm_rpm_clock = devm_clk_get(dev, "hdmi"); - if (IS_ERR(vc4_hdmi->hsm_rpm_clock)) { - DRM_ERROR("Failed to get HDMI state machine clock\n"); - return PTR_ERR(vc4_hdmi->hsm_rpm_clock); - } - vc4_hdmi->pixel_bvb_clock = devm_clk_get(dev, "bvb"); if (IS_ERR(vc4_hdmi->pixel_bvb_clock)) { DRM_ERROR("Failed to get pixel bvb clock\n"); @@ -3349,7 +3338,7 @@ static int vc4_hdmi_runtime_suspend(struct device *dev) { struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev); - clk_disable_unprepare(vc4_hdmi->hsm_rpm_clock); + clk_disable_unprepare(vc4_hdmi->hsm_clock); return 0; } @@ -3362,16 +3351,7 @@ static int vc4_hdmi_runtime_resume(struct device *dev) unsigned long rate; int ret; - /* - * The HSM clock is in the HDMI power domain, so we need to set - * its frequency while the power domain is active so that it - * keeps its rate. - */ - ret = clk_set_min_rate(vc4_hdmi->hsm_rpm_clock, HSM_MIN_CLOCK_FREQ); - if (ret) - return ret; - - ret = clk_prepare_enable(vc4_hdmi->hsm_rpm_clock); + ret = clk_prepare_enable(vc4_hdmi->hsm_clock); if (ret) return ret; @@ -3384,7 +3364,7 @@ static int vc4_hdmi_runtime_resume(struct device *dev) * case, it will lead to a silent CPU stall. Let's make sure we * prevent such a case. */ - rate = clk_get_rate(vc4_hdmi->hsm_rpm_clock); + rate = clk_get_rate(vc4_hdmi->hsm_clock); if (!rate) { ret = -EINVAL; goto err_disable_clk; diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h index dc3ccd8002a0..e3619836ca17 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.h +++ b/drivers/gpu/drm/vc4/vc4_hdmi.h @@ -164,7 +164,6 @@ struct vc4_hdmi { struct clk *cec_clock; struct clk *pixel_clock; struct clk *hsm_clock; - struct clk *hsm_rpm_clock; struct clk *audio_clock; struct clk *pixel_bvb_clock; diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c index 520231af4df9..7dff3ca5af6b 100644 --- a/drivers/gpu/drm/vc4/vc4_validate.c +++ b/drivers/gpu/drm/vc4/vc4_validate.c @@ -117,7 +117,7 @@ vc4_use_bo(struct vc4_exec_info *exec, uint32_t hindex) hindex, exec->bo_count); return NULL; } - obj = exec->bo[hindex]; + obj = to_drm_gem_dma_obj(exec->bo[hindex]); bo = to_vc4_bo(&obj->base); if (bo->validated_shader) { @@ -810,7 +810,7 @@ validate_gl_shader_rec(struct drm_device *dev, return -EINVAL; } - bo[i] = exec->bo[src_handles[i]]; + bo[i] = to_drm_gem_dma_obj(exec->bo[src_handles[i]]); if (!bo[i]) return -EINVAL; } diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c index c2a879734d40..e15754178395 100644 --- a/drivers/gpu/drm/vgem/vgem_fence.c +++ b/drivers/gpu/drm/vgem/vgem_fence.c @@ -249,4 +249,5 @@ void vgem_fence_close(struct vgem_file *vfile) { idr_for_each(&vfile->fence_idr, __vgem_fence_idr_fini, vfile); idr_destroy(&vfile->fence_idr); + mutex_destroy(&vfile->fence_mutex); } diff --git a/drivers/gpu/drm/virtio/Kconfig b/drivers/gpu/drm/virtio/Kconfig index 51ec7c3240c9..ea06ff2aa4b4 100644 --- a/drivers/gpu/drm/virtio/Kconfig +++ b/drivers/gpu/drm/virtio/Kconfig @@ -11,3 +11,14 @@ config DRM_VIRTIO_GPU QEMU based VMMs (like KVM or Xen). If unsure say M. + +config DRM_VIRTIO_GPU_KMS + bool "Virtio GPU driver modesetting support" + depends on DRM_VIRTIO_GPU + default y + help + Enable modesetting support for virtio GPU driver. This can be + disabled in cases where only "headless" usage of the GPU is + required. + + If unsure, say Y. diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c index 9ea7611a9e0f..ad924a8502e9 100644 --- a/drivers/gpu/drm/virtio/virtgpu_display.c +++ b/drivers/gpu/drm/virtio/virtgpu_display.c @@ -336,6 +336,9 @@ int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev) { int i, ret; + if (!vgdev->num_scanouts) + return 0; + ret = drmm_mode_config_init(vgdev->ddev); if (ret) return ret; @@ -362,6 +365,9 @@ void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev) { int i; + if (!vgdev->num_scanouts) + return; + for (i = 0 ; i < vgdev->num_scanouts; ++i) kfree(vgdev->outputs[i].edid); } diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index ae97b98750b6..add075681e18 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -172,6 +172,10 @@ MODULE_AUTHOR("Alon Levy"); DEFINE_DRM_GEM_FOPS(virtio_gpu_driver_fops); static const struct drm_driver driver = { + /* + * If KMS is disabled DRIVER_MODESET and DRIVER_ATOMIC are masked + * out via drm_device::driver_features: + */ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC, .open = virtio_gpu_driver_open, .postclose = virtio_gpu_driver_postclose, diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c index 27b7f14dae89..5a3b5aaed1f3 100644 --- a/drivers/gpu/drm/virtio/virtgpu_kms.c +++ b/drivers/gpu/drm/virtio/virtgpu_kms.c @@ -43,11 +43,13 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work) virtio_cread_le(vgdev->vdev, struct virtio_gpu_config, events_read, &events_read); if (events_read & VIRTIO_GPU_EVENT_DISPLAY) { - if (vgdev->has_edid) - virtio_gpu_cmd_get_edids(vgdev); - virtio_gpu_cmd_get_display_info(vgdev); - virtio_gpu_notify(vgdev); - drm_helper_hpd_irq_event(vgdev->ddev); + if (vgdev->num_scanouts) { + if (vgdev->has_edid) + virtio_gpu_cmd_get_edids(vgdev); + virtio_gpu_cmd_get_display_info(vgdev); + virtio_gpu_notify(vgdev); + drm_helper_hpd_irq_event(vgdev->ddev); + } events_clear |= VIRTIO_GPU_EVENT_DISPLAY; } virtio_cwrite_le(vgdev->vdev, struct virtio_gpu_config, @@ -223,12 +225,15 @@ int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev) num_scanouts, &num_scanouts); vgdev->num_scanouts = min_t(uint32_t, num_scanouts, VIRTIO_GPU_MAX_SCANOUTS); - if (!vgdev->num_scanouts) { - DRM_ERROR("num_scanouts is zero\n"); - ret = -EINVAL; - goto err_scanouts; + + if (!IS_ENABLED(CONFIG_DRM_VIRTIO_GPU_KMS) || !vgdev->num_scanouts) { + DRM_INFO("KMS disabled\n"); + vgdev->num_scanouts = 0; + vgdev->has_edid = false; + dev->driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC); + } else { + DRM_INFO("number of scanouts: %d\n", num_scanouts); } - DRM_INFO("number of scanouts: %d\n", num_scanouts); virtio_cread_le(vgdev->vdev, struct virtio_gpu_config, num_capsets, &num_capsets); @@ -244,12 +249,14 @@ int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev) if (num_capsets) virtio_gpu_get_capsets(vgdev, num_capsets); - if (vgdev->has_edid) - virtio_gpu_cmd_get_edids(vgdev); - virtio_gpu_cmd_get_display_info(vgdev); - virtio_gpu_notify(vgdev); - wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending, - 5 * HZ); + if (vgdev->num_scanouts) { + if (vgdev->has_edid) + virtio_gpu_cmd_get_edids(vgdev); + virtio_gpu_cmd_get_display_info(vgdev); + virtio_gpu_notify(vgdev); + wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending, + 5 * HZ); + } return 0; err_scanouts: diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index a04a9b20896d..e1accfc47edf 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -923,8 +923,7 @@ void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id, cmd_p->hdr.ctx_id = cpu_to_le32(id); cmd_p->nlen = cpu_to_le32(nlen); cmd_p->context_init = cpu_to_le32(context_init); - strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1); - cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0; + strscpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name)); virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); } diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile index 2a644f035597..e94479d9cd5b 100644 --- a/drivers/gpu/drm/vmwgfx/Makefile +++ b/drivers/gpu/drm/vmwgfx/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_ttm_buffer.o \ - vmwgfx_cmd.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ + vmwgfx_cmd.o vmwgfx_irq.o vmwgfx_ldu.o \ vmwgfx_overlay.o vmwgfx_gmrid_manager.o vmwgfx_fence.o \ vmwgfx_bo.o vmwgfx_scrn.o vmwgfx_context.o \ vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c index 4dcf2eb7aa80..82094c137855 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright © 2011-2018 VMware, Inc., Palo Alto, CA., USA + * Copyright © 2011-2023 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -26,40 +26,31 @@ * **************************************************************************/ -#include <drm/ttm/ttm_placement.h> - +#include "vmwgfx_bo.h" #include "vmwgfx_drv.h" -#include "ttm_object.h" -/** - * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct - * vmw_buffer_object. - * - * @bo: Pointer to the TTM buffer object. - * Return: Pointer to the struct vmw_buffer_object embedding the - * TTM buffer object. - */ -static struct vmw_buffer_object * -vmw_buffer_object(struct ttm_buffer_object *bo) +#include <drm/ttm/ttm_placement.h> + +static void vmw_bo_release(struct vmw_bo *vbo) { - return container_of(bo, struct vmw_buffer_object, base); + vmw_bo_unmap(vbo); + drm_gem_object_release(&vbo->tbo.base); } /** - * bo_is_vmw - check if the buffer object is a &vmw_buffer_object - * @bo: ttm buffer object to be checked + * vmw_bo_free - vmw_bo destructor * - * Uses destroy function associated with the object to determine if this is - * a &vmw_buffer_object. - * - * Returns: - * true if the object is of &vmw_buffer_object type, false if not. + * @bo: Pointer to the embedded struct ttm_buffer_object */ -static bool bo_is_vmw(struct ttm_buffer_object *bo) +static void vmw_bo_free(struct ttm_buffer_object *bo) { - return bo->destroy == &vmw_bo_bo_free || - bo->destroy == &vmw_gem_destroy; + struct vmw_bo *vbo = to_vmw_bo(&bo->base); + + WARN_ON(vbo->dirty); + WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree)); + vmw_bo_release(vbo); + kfree(vbo); } /** @@ -72,13 +63,13 @@ static bool bo_is_vmw(struct ttm_buffer_object *bo) * Return: Zero on success, Negative error code on failure. In particular * -ERESTARTSYS if interrupted by a signal */ -int vmw_bo_pin_in_placement(struct vmw_private *dev_priv, - struct vmw_buffer_object *buf, - struct ttm_placement *placement, - bool interruptible) +static int vmw_bo_pin_in_placement(struct vmw_private *dev_priv, + struct vmw_bo *buf, + struct ttm_placement *placement, + bool interruptible) { struct ttm_operation_ctx ctx = {interruptible, false }; - struct ttm_buffer_object *bo = &buf->base; + struct ttm_buffer_object *bo = &buf->tbo; int ret; vmw_execbuf_release_pinned_bo(dev_priv); @@ -87,12 +78,7 @@ int vmw_bo_pin_in_placement(struct vmw_private *dev_priv, if (unlikely(ret != 0)) goto err; - if (buf->base.pin_count > 0) - ret = ttm_resource_compat(bo->resource, placement) - ? 0 : -EINVAL; - else - ret = ttm_bo_validate(bo, placement, &ctx); - + ret = ttm_bo_validate(bo, placement, &ctx); if (!ret) vmw_bo_pin_reserved(buf, true); @@ -115,11 +101,11 @@ err: * -ERESTARTSYS if interrupted by a signal */ int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv, - struct vmw_buffer_object *buf, + struct vmw_bo *buf, bool interruptible) { struct ttm_operation_ctx ctx = {interruptible, false }; - struct ttm_buffer_object *bo = &buf->base; + struct ttm_buffer_object *bo = &buf->tbo; int ret; vmw_execbuf_release_pinned_bo(dev_priv); @@ -128,17 +114,17 @@ int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv, if (unlikely(ret != 0)) goto err; - if (buf->base.pin_count > 0) { - ret = ttm_resource_compat(bo->resource, &vmw_vram_gmr_placement) - ? 0 : -EINVAL; - goto out_unreserve; - } - - ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx); + vmw_bo_placement_set(buf, + VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM, + VMW_BO_DOMAIN_GMR); + ret = ttm_bo_validate(bo, &buf->placement, &ctx); if (likely(ret == 0) || ret == -ERESTARTSYS) goto out_unreserve; - ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx); + vmw_bo_placement_set(buf, + VMW_BO_DOMAIN_VRAM, + VMW_BO_DOMAIN_VRAM); + ret = ttm_bo_validate(bo, &buf->placement, &ctx); out_unreserve: if (!ret) @@ -163,7 +149,7 @@ err: * -ERESTARTSYS if interrupted by a signal */ int vmw_bo_pin_in_vram(struct vmw_private *dev_priv, - struct vmw_buffer_object *buf, + struct vmw_bo *buf, bool interruptible) { return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement, @@ -184,22 +170,13 @@ int vmw_bo_pin_in_vram(struct vmw_private *dev_priv, * -ERESTARTSYS if interrupted by a signal */ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv, - struct vmw_buffer_object *buf, + struct vmw_bo *buf, bool interruptible) { struct ttm_operation_ctx ctx = {interruptible, false }; - struct ttm_buffer_object *bo = &buf->base; - struct ttm_placement placement; - struct ttm_place place; + struct ttm_buffer_object *bo = &buf->tbo; int ret = 0; - place = vmw_vram_placement.placement[0]; - place.lpfn = PFN_UP(bo->resource->size); - placement.num_placement = 1; - placement.placement = &place; - placement.num_busy_placement = 1; - placement.busy_placement = &place; - vmw_execbuf_release_pinned_bo(dev_priv); ret = ttm_bo_reserve(bo, interruptible, false, NULL); if (unlikely(ret != 0)) @@ -213,16 +190,19 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv, if (bo->resource->mem_type == TTM_PL_VRAM && bo->resource->start < PFN_UP(bo->resource->size) && bo->resource->start > 0 && - buf->base.pin_count == 0) { + buf->tbo.pin_count == 0) { ctx.interruptible = false; - (void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx); + vmw_bo_placement_set(buf, + VMW_BO_DOMAIN_SYS, + VMW_BO_DOMAIN_SYS); + (void)ttm_bo_validate(bo, &buf->placement, &ctx); } - if (buf->base.pin_count > 0) - ret = ttm_resource_compat(bo->resource, &placement) - ? 0 : -EINVAL; - else - ret = ttm_bo_validate(bo, &placement, &ctx); + vmw_bo_placement_set(buf, + VMW_BO_DOMAIN_VRAM, + VMW_BO_DOMAIN_VRAM); + buf->places[0].lpfn = PFN_UP(bo->resource->size); + ret = ttm_bo_validate(bo, &buf->placement, &ctx); /* For some reason we didn't end up at the start of vram */ WARN_ON(ret == 0 && bo->resource->start != 0); @@ -248,10 +228,10 @@ err_unlock: * -ERESTARTSYS if interrupted by a signal */ int vmw_bo_unpin(struct vmw_private *dev_priv, - struct vmw_buffer_object *buf, + struct vmw_bo *buf, bool interruptible) { - struct ttm_buffer_object *bo = &buf->base; + struct ttm_buffer_object *bo = &buf->tbo; int ret; ret = ttm_bo_reserve(bo, interruptible, false, NULL); @@ -293,12 +273,12 @@ void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo, * @pin: Whether to pin or unpin. * */ -void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin) +void vmw_bo_pin_reserved(struct vmw_bo *vbo, bool pin) { struct ttm_operation_ctx ctx = { false, true }; struct ttm_place pl; struct ttm_placement placement; - struct ttm_buffer_object *bo = &vbo->base; + struct ttm_buffer_object *bo = &vbo->tbo; uint32_t old_mem_type = bo->resource->mem_type; int ret; @@ -341,9 +321,9 @@ void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin) * 3) Buffer object destruction * */ -void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo) +void *vmw_bo_map_and_cache(struct vmw_bo *vbo) { - struct ttm_buffer_object *bo = &vbo->base; + struct ttm_buffer_object *bo = &vbo->tbo; bool not_used; void *virtual; int ret; @@ -366,96 +346,70 @@ void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo) * @vbo: The buffer object whose map we are tearing down. * * This function tears down a cached map set up using - * vmw_buffer_object_map_and_cache(). + * vmw_bo_map_and_cache(). */ -void vmw_bo_unmap(struct vmw_buffer_object *vbo) +void vmw_bo_unmap(struct vmw_bo *vbo) { if (vbo->map.bo == NULL) return; ttm_bo_kunmap(&vbo->map); + vbo->map.bo = NULL; } /** - * vmw_bo_bo_free - vmw buffer object destructor - * - * @bo: Pointer to the embedded struct ttm_buffer_object - */ -void vmw_bo_bo_free(struct ttm_buffer_object *bo) -{ - struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo); - - WARN_ON(vmw_bo->dirty); - WARN_ON(!RB_EMPTY_ROOT(&vmw_bo->res_tree)); - vmw_bo_unmap(vmw_bo); - drm_gem_object_release(&bo->base); - kfree(vmw_bo); -} - -/* default destructor */ -static void vmw_bo_default_destroy(struct ttm_buffer_object *bo) -{ - kfree(bo); -} - -/** - * vmw_bo_create_kernel - Create a pinned BO for internal kernel use. + * vmw_bo_init - Initialize a vmw buffer object * * @dev_priv: Pointer to the device private struct - * @size: size of the BO we need - * @placement: where to put it - * @p_bo: resulting BO + * @vmw_bo: Buffer object to initialize + * @params: Parameters used to initialize the buffer object + * @destroy: The function used to delete the buffer object + * Returns: Zero on success, negative error code on error. * - * Creates and pin a simple BO for in kernel use. */ -int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size, - struct ttm_placement *placement, - struct ttm_buffer_object **p_bo) +static int vmw_bo_init(struct vmw_private *dev_priv, + struct vmw_bo *vmw_bo, + struct vmw_bo_params *params, + void (*destroy)(struct ttm_buffer_object *)) { struct ttm_operation_ctx ctx = { - .interruptible = false, + .interruptible = params->bo_type != ttm_bo_type_kernel, .no_wait_gpu = false }; - struct ttm_buffer_object *bo; + struct ttm_device *bdev = &dev_priv->bdev; struct drm_device *vdev = &dev_priv->drm; int ret; - bo = kzalloc(sizeof(*bo), GFP_KERNEL); - if (unlikely(!bo)) - return -ENOMEM; + memset(vmw_bo, 0, sizeof(*vmw_bo)); - size = ALIGN(size, PAGE_SIZE); + BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3); + vmw_bo->tbo.priority = 3; + vmw_bo->res_tree = RB_ROOT; - drm_gem_private_object_init(vdev, &bo->base, size); + params->size = ALIGN(params->size, PAGE_SIZE); + drm_gem_private_object_init(vdev, &vmw_bo->tbo.base, params->size); - ret = ttm_bo_init_reserved(&dev_priv->bdev, bo, ttm_bo_type_kernel, - placement, 0, &ctx, NULL, NULL, - vmw_bo_default_destroy); + vmw_bo_placement_set(vmw_bo, params->domain, params->busy_domain); + ret = ttm_bo_init_reserved(bdev, &vmw_bo->tbo, params->bo_type, + &vmw_bo->placement, 0, &ctx, NULL, + NULL, destroy); if (unlikely(ret)) - goto error_free; + return ret; - ttm_bo_pin(bo); - ttm_bo_unreserve(bo); - *p_bo = bo; + if (params->pin) + ttm_bo_pin(&vmw_bo->tbo); + ttm_bo_unreserve(&vmw_bo->tbo); return 0; - -error_free: - kfree(bo); - return ret; } int vmw_bo_create(struct vmw_private *vmw, - size_t size, struct ttm_placement *placement, - bool interruptible, bool pin, - void (*bo_free)(struct ttm_buffer_object *bo), - struct vmw_buffer_object **p_bo) + struct vmw_bo_params *params, + struct vmw_bo **p_bo) { int ret; - BUG_ON(!bo_free); - *p_bo = kmalloc(sizeof(**p_bo), GFP_KERNEL); if (unlikely(!*p_bo)) { DRM_ERROR("Failed to allocate a buffer.\n"); @@ -465,9 +419,7 @@ int vmw_bo_create(struct vmw_private *vmw, /* * vmw_bo_init will delete the *p_bo object if it fails */ - ret = vmw_bo_init(vmw, *p_bo, size, - placement, interruptible, pin, - bo_free); + ret = vmw_bo_init(vmw, *p_bo, params, vmw_bo_free); if (unlikely(ret != 0)) goto out_error; @@ -478,57 +430,7 @@ out_error: } /** - * vmw_bo_init - Initialize a vmw buffer object - * - * @dev_priv: Pointer to the device private struct - * @vmw_bo: Pointer to the struct vmw_buffer_object to initialize. - * @size: Buffer object size in bytes. - * @placement: Initial placement. - * @interruptible: Whether waits should be performed interruptible. - * @pin: If the BO should be created pinned at a fixed location. - * @bo_free: The buffer object destructor. - * Returns: Zero on success, negative error code on error. - * - * Note that on error, the code will free the buffer object. - */ -int vmw_bo_init(struct vmw_private *dev_priv, - struct vmw_buffer_object *vmw_bo, - size_t size, struct ttm_placement *placement, - bool interruptible, bool pin, - void (*bo_free)(struct ttm_buffer_object *bo)) -{ - struct ttm_operation_ctx ctx = { - .interruptible = interruptible, - .no_wait_gpu = false - }; - struct ttm_device *bdev = &dev_priv->bdev; - struct drm_device *vdev = &dev_priv->drm; - int ret; - - WARN_ON_ONCE(!bo_free); - memset(vmw_bo, 0, sizeof(*vmw_bo)); - BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3); - vmw_bo->base.priority = 3; - vmw_bo->res_tree = RB_ROOT; - - size = ALIGN(size, PAGE_SIZE); - drm_gem_private_object_init(vdev, &vmw_bo->base.base, size); - - ret = ttm_bo_init_reserved(bdev, &vmw_bo->base, ttm_bo_type_device, - placement, 0, &ctx, NULL, NULL, bo_free); - if (unlikely(ret)) { - return ret; - } - - if (pin) - ttm_bo_pin(&vmw_bo->base); - ttm_bo_unreserve(&vmw_bo->base); - - return 0; -} - -/** - * vmw_user_bo_synccpu_grab - Grab a struct vmw_buffer_object for cpu + * vmw_user_bo_synccpu_grab - Grab a struct vmw_bo for cpu * access, idling previous GPU operations on the buffer and optionally * blocking it for further command submissions. * @@ -541,11 +443,11 @@ int vmw_bo_init(struct vmw_private *dev_priv, * * A blocking grab will be automatically released when @tfile is closed. */ -static int vmw_user_bo_synccpu_grab(struct vmw_buffer_object *vmw_bo, +static int vmw_user_bo_synccpu_grab(struct vmw_bo *vmw_bo, uint32_t flags) { bool nonblock = !!(flags & drm_vmw_synccpu_dontblock); - struct ttm_buffer_object *bo = &vmw_bo->base; + struct ttm_buffer_object *bo = &vmw_bo->tbo; int ret; if (flags & drm_vmw_synccpu_allow_cs) { @@ -588,17 +490,17 @@ static int vmw_user_bo_synccpu_release(struct drm_file *filp, uint32_t handle, uint32_t flags) { - struct vmw_buffer_object *vmw_bo; + struct vmw_bo *vmw_bo; int ret = vmw_user_bo_lookup(filp, handle, &vmw_bo); if (!ret) { if (!(flags & drm_vmw_synccpu_allow_cs)) { atomic_dec(&vmw_bo->cpu_writers); } - ttm_bo_put(&vmw_bo->base); + ttm_bo_put(&vmw_bo->tbo); } - drm_gem_object_put(&vmw_bo->base.base); + drm_gem_object_put(&vmw_bo->tbo.base); return ret; } @@ -620,7 +522,7 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data, { struct drm_vmw_synccpu_arg *arg = (struct drm_vmw_synccpu_arg *) data; - struct vmw_buffer_object *vbo; + struct vmw_bo *vbo; int ret; if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0 @@ -639,7 +541,7 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data, ret = vmw_user_bo_synccpu_grab(vbo, arg->flags); vmw_bo_unreference(&vbo); - drm_gem_object_put(&vbo->base.base); + drm_gem_object_put(&vbo->tbo.base); if (unlikely(ret != 0)) { if (ret == -ERESTARTSYS || ret == -EBUSY) return -EBUSY; @@ -683,8 +585,7 @@ int vmw_bo_unref_ioctl(struct drm_device *dev, void *data, struct drm_vmw_unref_dmabuf_arg *arg = (struct drm_vmw_unref_dmabuf_arg *)data; - drm_gem_handle_delete(file_priv, arg->handle); - return 0; + return drm_gem_handle_delete(file_priv, arg->handle); } @@ -694,14 +595,14 @@ int vmw_bo_unref_ioctl(struct drm_device *dev, void *data, * @filp: The file the handle is registered with. * @handle: The user buffer object handle * @out: Pointer to a where a pointer to the embedded - * struct vmw_buffer_object should be placed. + * struct vmw_bo should be placed. * Return: Zero on success, Negative error code on error. * * The vmw buffer object pointer will be refcounted (both ttm and gem) */ int vmw_user_bo_lookup(struct drm_file *filp, - uint32_t handle, - struct vmw_buffer_object **out) + u32 handle, + struct vmw_bo **out) { struct drm_gem_object *gobj; @@ -712,8 +613,8 @@ int vmw_user_bo_lookup(struct drm_file *filp, return -ESRCH; } - *out = gem_to_vmw_bo(gobj); - ttm_bo_get(&(*out)->base); + *out = to_vmw_bo(gobj); + ttm_bo_get(&(*out)->tbo); return 0; } @@ -734,8 +635,7 @@ void vmw_bo_fence_single(struct ttm_buffer_object *bo, struct vmw_fence_obj *fence) { struct ttm_device *bdev = bo->bdev; - struct vmw_private *dev_priv = - container_of(bdev, struct vmw_private, bdev); + struct vmw_private *dev_priv = vmw_priv_from_ttm(bdev); int ret; if (fence == NULL) @@ -771,7 +671,7 @@ int vmw_dumb_create(struct drm_file *file_priv, struct drm_mode_create_dumb *args) { struct vmw_private *dev_priv = vmw_priv(dev); - struct vmw_buffer_object *vbo; + struct vmw_bo *vbo; int cpp = DIV_ROUND_UP(args->bpp, 8); int ret; @@ -795,7 +695,7 @@ int vmw_dumb_create(struct drm_file *file_priv, args->size, &args->handle, &vbo); /* drop reference from allocate - handle holds it now */ - drm_gem_object_put(&vbo->base.base); + drm_gem_object_put(&vbo->tbo.base); return ret; } @@ -806,12 +706,8 @@ int vmw_dumb_create(struct drm_file *file_priv, */ void vmw_bo_swap_notify(struct ttm_buffer_object *bo) { - /* Is @bo embedded in a struct vmw_buffer_object? */ - if (!bo_is_vmw(bo)) - return; - /* Kill any cached kernel maps before swapout */ - vmw_bo_unmap(vmw_buffer_object(bo)); + vmw_bo_unmap(to_vmw_bo(&bo->base)); } @@ -828,13 +724,7 @@ void vmw_bo_swap_notify(struct ttm_buffer_object *bo) void vmw_bo_move_notify(struct ttm_buffer_object *bo, struct ttm_resource *mem) { - struct vmw_buffer_object *vbo; - - /* Make sure @bo is embedded in a struct vmw_buffer_object? */ - if (!bo_is_vmw(bo)) - return; - - vbo = container_of(bo, struct vmw_buffer_object, base); + struct vmw_bo *vbo = to_vmw_bo(&bo->base); /* * Kill any cached kernel maps before move to or from VRAM. @@ -852,3 +742,98 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo, if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB) vmw_resource_unbind_list(vbo); } + +static u32 +set_placement_list(struct ttm_place *pl, u32 domain) +{ + u32 n = 0; + + /* + * The placements are ordered according to our preferences + */ + if (domain & VMW_BO_DOMAIN_MOB) { + pl[n].mem_type = VMW_PL_MOB; + pl[n].flags = 0; + pl[n].fpfn = 0; + pl[n].lpfn = 0; + n++; + } + if (domain & VMW_BO_DOMAIN_GMR) { + pl[n].mem_type = VMW_PL_GMR; + pl[n].flags = 0; + pl[n].fpfn = 0; + pl[n].lpfn = 0; + n++; + } + if (domain & VMW_BO_DOMAIN_VRAM) { + pl[n].mem_type = TTM_PL_VRAM; + pl[n].flags = 0; + pl[n].fpfn = 0; + pl[n].lpfn = 0; + n++; + } + if (domain & VMW_BO_DOMAIN_WAITABLE_SYS) { + pl[n].mem_type = VMW_PL_SYSTEM; + pl[n].flags = 0; + pl[n].fpfn = 0; + pl[n].lpfn = 0; + n++; + } + if (domain & VMW_BO_DOMAIN_SYS) { + pl[n].mem_type = TTM_PL_SYSTEM; + pl[n].flags = 0; + pl[n].fpfn = 0; + pl[n].lpfn = 0; + n++; + } + + WARN_ON(!n); + if (!n) { + pl[n].mem_type = TTM_PL_SYSTEM; + pl[n].flags = 0; + pl[n].fpfn = 0; + pl[n].lpfn = 0; + n++; + } + return n; +} + +void vmw_bo_placement_set(struct vmw_bo *bo, u32 domain, u32 busy_domain) +{ + struct ttm_device *bdev = bo->tbo.bdev; + struct vmw_private *vmw = vmw_priv_from_ttm(bdev); + struct ttm_placement *pl = &bo->placement; + bool mem_compatible = false; + u32 i; + + pl->placement = bo->places; + pl->num_placement = set_placement_list(bo->places, domain); + + if (drm_debug_enabled(DRM_UT_DRIVER) && bo->tbo.resource) { + for (i = 0; i < pl->num_placement; ++i) { + if (bo->tbo.resource->mem_type == TTM_PL_SYSTEM || + bo->tbo.resource->mem_type == pl->placement[i].mem_type) + mem_compatible = true; + } + if (!mem_compatible) + drm_warn(&vmw->drm, + "%s: Incompatible transition from " + "bo->base.resource->mem_type = %u to domain = %u\n", + __func__, bo->tbo.resource->mem_type, domain); + } + + pl->busy_placement = bo->busy_places; + pl->num_busy_placement = set_placement_list(bo->busy_places, busy_domain); +} + +void vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo) +{ + struct ttm_device *bdev = bo->tbo.bdev; + struct vmw_private *vmw = vmw_priv_from_ttm(bdev); + u32 domain = VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM; + + if (vmw->has_mob) + domain = VMW_BO_DOMAIN_MOB; + + vmw_bo_placement_set(bo, domain, domain); +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h new file mode 100644 index 000000000000..50a836e70994 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h @@ -0,0 +1,203 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/************************************************************************** + * + * Copyright 2023 VMware, Inc., Palo Alto, CA., USA + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#ifndef VMWGFX_BO_H +#define VMWGFX_BO_H + +#include "device_include/svga_reg.h" + +#include <drm/ttm/ttm_bo.h> +#include <drm/ttm/ttm_placement.h> + +#include <linux/rbtree_types.h> +#include <linux/types.h> + +struct vmw_bo_dirty; +struct vmw_fence_obj; +struct vmw_private; +struct vmw_resource; + +enum vmw_bo_domain { + VMW_BO_DOMAIN_SYS = BIT(0), + VMW_BO_DOMAIN_WAITABLE_SYS = BIT(1), + VMW_BO_DOMAIN_VRAM = BIT(2), + VMW_BO_DOMAIN_GMR = BIT(3), + VMW_BO_DOMAIN_MOB = BIT(4), +}; + +struct vmw_bo_params { + u32 domain; + u32 busy_domain; + enum ttm_bo_type bo_type; + size_t size; + bool pin; +}; + +/** + * struct vmw_bo - TTM buffer object with vmwgfx additions + * @tbo: The TTM buffer object + * @placement: The preferred placement for this buffer object + * @places: The chosen places for the preferred placement. + * @busy_places: Chosen busy places for the preferred placement + * @map: Kmap object for semi-persistent mappings + * @res_tree: RB tree of resources using this buffer object as a backing MOB + * @res_prios: Eviction priority counts for attached resources + * @cpu_writers: Number of synccpu write grabs. Protected by reservation when + * increased. May be decreased without reservation. + * @dx_query_ctx: DX context if this buffer object is used as a DX query MOB + * @dirty: structure for user-space dirty-tracking + */ +struct vmw_bo { + struct ttm_buffer_object tbo; + + struct ttm_placement placement; + struct ttm_place places[5]; + struct ttm_place busy_places[5]; + + /* Protected by reservation */ + struct ttm_bo_kmap_obj map; + + struct rb_root res_tree; + u32 res_prios[TTM_MAX_BO_PRIORITY]; + + atomic_t cpu_writers; + /* Not ref-counted. Protected by binding_mutex */ + struct vmw_resource *dx_query_ctx; + struct vmw_bo_dirty *dirty; +}; + +void vmw_bo_placement_set(struct vmw_bo *bo, u32 domain, u32 busy_domain); +void vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo); + +int vmw_bo_create(struct vmw_private *dev_priv, + struct vmw_bo_params *params, + struct vmw_bo **p_bo); + +int vmw_bo_unref_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); + +int vmw_bo_pin_in_vram(struct vmw_private *dev_priv, + struct vmw_bo *buf, + bool interruptible); +int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv, + struct vmw_bo *buf, + bool interruptible); +int vmw_bo_pin_in_start_of_vram(struct vmw_private *vmw_priv, + struct vmw_bo *bo, + bool interruptible); +void vmw_bo_pin_reserved(struct vmw_bo *bo, bool pin); +int vmw_bo_unpin(struct vmw_private *vmw_priv, + struct vmw_bo *bo, + bool interruptible); + +void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf, + SVGAGuestPtr *ptr); +int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +void vmw_bo_fence_single(struct ttm_buffer_object *bo, + struct vmw_fence_obj *fence); + +void *vmw_bo_map_and_cache(struct vmw_bo *vbo); +void vmw_bo_unmap(struct vmw_bo *vbo); + +void vmw_bo_move_notify(struct ttm_buffer_object *bo, + struct ttm_resource *mem); +void vmw_bo_swap_notify(struct ttm_buffer_object *bo); + +int vmw_user_bo_lookup(struct drm_file *filp, + u32 handle, + struct vmw_bo **out); +/** + * vmw_bo_adjust_prio - Adjust the buffer object eviction priority + * according to attached resources + * @vbo: The struct vmw_bo + */ +static inline void vmw_bo_prio_adjust(struct vmw_bo *vbo) +{ + int i = ARRAY_SIZE(vbo->res_prios); + + while (i--) { + if (vbo->res_prios[i]) { + vbo->tbo.priority = i; + return; + } + } + + vbo->tbo.priority = 3; +} + +/** + * vmw_bo_prio_add - Notify a buffer object of a newly attached resource + * eviction priority + * @vbo: The struct vmw_bo + * @prio: The resource priority + * + * After being notified, the code assigns the highest resource eviction priority + * to the backing buffer object (mob). + */ +static inline void vmw_bo_prio_add(struct vmw_bo *vbo, int prio) +{ + if (vbo->res_prios[prio]++ == 0) + vmw_bo_prio_adjust(vbo); +} + +/** + * vmw_bo_used_prio_del - Notify a buffer object of a resource with a certain + * priority being removed + * @vbo: The struct vmw_bo + * @prio: The resource priority + * + * After being notified, the code assigns the highest resource eviction priority + * to the backing buffer object (mob). + */ +static inline void vmw_bo_prio_del(struct vmw_bo *vbo, int prio) +{ + if (--vbo->res_prios[prio] == 0) + vmw_bo_prio_adjust(vbo); +} + +static inline void vmw_bo_unreference(struct vmw_bo **buf) +{ + struct vmw_bo *tmp_buf = *buf; + + *buf = NULL; + if (tmp_buf) + ttm_bo_put(&tmp_buf->tbo); +} + +static inline struct vmw_bo *vmw_bo_reference(struct vmw_bo *buf) +{ + ttm_bo_get(&buf->tbo); + return buf; +} + +static inline struct vmw_bo *to_vmw_bo(struct drm_gem_object *gobj) +{ + return container_of((gobj), struct vmw_bo, tbo.base); +} + +#endif // VMWGFX_BO_H diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c index 162dfeb1cc5a..195ff8792e5a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright 2009-2020 VMware, Inc., Palo Alto, CA., USA + * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the @@ -24,13 +24,13 @@ * USE OR OTHER DEALINGS IN THE SOFTWARE. * **************************************************************************/ - -#include <linux/sched/signal.h> +#include "vmwgfx_bo.h" +#include "vmwgfx_drv.h" +#include "vmwgfx_devcaps.h" #include <drm/ttm/ttm_placement.h> -#include "vmwgfx_drv.h" -#include "vmwgfx_devcaps.h" +#include <linux/sched/signal.h> bool vmw_supports_3d(struct vmw_private *dev_priv) { @@ -567,7 +567,7 @@ static int vmw_cmd_emit_dummy_legacy_query(struct vmw_private *dev_priv, * without writing to the query result structure. */ - struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base; + struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->tbo; struct { SVGA3dCmdHeader header; SVGA3dCmdWaitForQuery body; @@ -613,7 +613,7 @@ static int vmw_cmd_emit_dummy_gb_query(struct vmw_private *dev_priv, * without writing to the query result structure. */ - struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base; + struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->tbo; struct { SVGA3dCmdHeader header; SVGA3dCmdWaitForGBQuery body; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c index 2b843ff4b437..94e8982f5616 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright 2015 VMware, Inc., Palo Alto, CA., USA + * Copyright 2015-2023 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the @@ -25,12 +25,13 @@ * **************************************************************************/ -#include <linux/dmapool.h> -#include <linux/pci.h> +#include "vmwgfx_bo.h" +#include "vmwgfx_drv.h" #include <drm/ttm/ttm_bo.h> -#include "vmwgfx_drv.h" +#include <linux/dmapool.h> +#include <linux/pci.h> /* * Size of inline command buffers. Try to make sure that a page size is a @@ -79,7 +80,6 @@ struct vmw_cmdbuf_context { * frees are protected by @lock. * @cmd_space: Buffer object for the command buffer space, unless we were * able to make a contigous coherent DMA memory allocation, @handle. Immutable. - * @map_obj: Mapping state for @cmd_space. Immutable. * @map: Pointer to command buffer space. May be a mapped buffer object or * a contigous coherent DMA memory allocation. Immutable. * @cur: Command buffer for small kernel command submissions. Protected by @@ -116,8 +116,7 @@ struct vmw_cmdbuf_man { struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX]; struct list_head error; struct drm_mm mm; - struct ttm_buffer_object *cmd_space; - struct ttm_bo_kmap_obj map_obj; + struct vmw_bo *cmd_space; u8 *map; struct vmw_cmdbuf_header *cur; size_t cur_pos; @@ -888,7 +887,7 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man, header->cmd = man->map + offset; if (man->using_mob) { cb_hdr->flags = SVGA_CB_FLAG_MOB; - cb_hdr->ptr.mob.mobid = man->cmd_space->resource->start; + cb_hdr->ptr.mob.mobid = man->cmd_space->tbo.resource->start; cb_hdr->ptr.mob.mobOffset = offset; } else { cb_hdr->ptr.pa = (u64)man->handle + (u64)offset; @@ -1221,7 +1220,6 @@ static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context, int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size) { struct vmw_private *dev_priv = man->dev_priv; - bool dummy; int ret; if (man->has_pool) @@ -1234,6 +1232,13 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size) if (man->map) { man->using_mob = false; } else { + struct vmw_bo_params bo_params = { + .domain = VMW_BO_DOMAIN_MOB, + .busy_domain = VMW_BO_DOMAIN_MOB, + .bo_type = ttm_bo_type_kernel, + .size = size, + .pin = true + }; /* * DMA memory failed. If we can have command buffers in a * MOB, try to use that instead. Note that this will @@ -1244,19 +1249,12 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size) !dev_priv->has_mob) return -ENOMEM; - ret = vmw_bo_create_kernel(dev_priv, size, - &vmw_mob_placement, - &man->cmd_space); + ret = vmw_bo_create(dev_priv, &bo_params, &man->cmd_space); if (ret) return ret; - man->using_mob = true; - ret = ttm_bo_kmap(man->cmd_space, 0, size >> PAGE_SHIFT, - &man->map_obj); - if (ret) - goto out_no_map; - - man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy); + man->map = vmw_bo_map_and_cache(man->cmd_space); + man->using_mob = man->map; } man->size = size; @@ -1276,14 +1274,6 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size) (man->using_mob) ? "MOB" : "DMA"); return 0; - -out_no_map: - if (man->using_mob) { - ttm_bo_put(man->cmd_space); - man->cmd_space = NULL; - } - - return ret; } /** @@ -1382,14 +1372,11 @@ void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man) man->has_pool = false; man->default_size = VMW_CMDBUF_INLINE_SIZE; (void) vmw_cmdbuf_idle(man, false, 10*HZ); - if (man->using_mob) { - (void) ttm_bo_kunmap(&man->map_obj); - ttm_bo_put(man->cmd_space); - man->cmd_space = NULL; - } else { + if (man->using_mob) + vmw_bo_unreference(&man->cmd_space); + else dma_free_coherent(man->dev_priv->drm.dev, man->size, man->map, man->handle); - } } /** diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c index e0f48cd9529b..ecc503e42790 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA + * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the @@ -27,9 +27,10 @@ #include <drm/ttm/ttm_placement.h> +#include "vmwgfx_binding.h" +#include "vmwgfx_bo.h" #include "vmwgfx_drv.h" #include "vmwgfx_resource_priv.h" -#include "vmwgfx_binding.h" struct vmw_user_context { struct ttm_base_object base; @@ -38,7 +39,7 @@ struct vmw_user_context { struct vmw_cmdbuf_res_manager *man; struct vmw_resource *cotables[SVGA_COTABLE_MAX]; spinlock_t cotable_lock; - struct vmw_buffer_object *dx_query_mob; + struct vmw_bo *dx_query_mob; }; static void vmw_user_context_free(struct vmw_resource *res); @@ -72,10 +73,11 @@ const struct vmw_user_resource_conv *user_context_converter = static const struct vmw_res_func vmw_legacy_context_func = { .res_type = vmw_res_context, - .needs_backup = false, + .needs_guest_memory = false, .may_evict = false, .type_name = "legacy contexts", - .backup_placement = NULL, + .domain = VMW_BO_DOMAIN_SYS, + .busy_domain = VMW_BO_DOMAIN_SYS, .create = NULL, .destroy = NULL, .bind = NULL, @@ -84,12 +86,13 @@ static const struct vmw_res_func vmw_legacy_context_func = { static const struct vmw_res_func vmw_gb_context_func = { .res_type = vmw_res_context, - .needs_backup = true, + .needs_guest_memory = true, .may_evict = true, .prio = 3, .dirty_prio = 3, .type_name = "guest backed contexts", - .backup_placement = &vmw_mob_placement, + .domain = VMW_BO_DOMAIN_MOB, + .busy_domain = VMW_BO_DOMAIN_MOB, .create = vmw_gb_context_create, .destroy = vmw_gb_context_destroy, .bind = vmw_gb_context_bind, @@ -98,12 +101,13 @@ static const struct vmw_res_func vmw_gb_context_func = { static const struct vmw_res_func vmw_dx_context_func = { .res_type = vmw_res_dx_context, - .needs_backup = true, + .needs_guest_memory = true, .may_evict = true, .prio = 3, .dirty_prio = 3, .type_name = "dx contexts", - .backup_placement = &vmw_mob_placement, + .domain = VMW_BO_DOMAIN_MOB, + .busy_domain = VMW_BO_DOMAIN_MOB, .create = vmw_dx_context_create, .destroy = vmw_dx_context_destroy, .bind = vmw_dx_context_bind, @@ -182,7 +186,7 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv, struct vmw_user_context *uctx = container_of(res, struct vmw_user_context, res); - res->backup_size = (dx ? sizeof(SVGADXContextMobFormat) : + res->guest_memory_size = (dx ? sizeof(SVGADXContextMobFormat) : sizeof(SVGAGBContextData)); ret = vmw_resource_init(dev_priv, res, true, res_free, @@ -354,8 +358,8 @@ static int vmw_gb_context_bind(struct vmw_resource *res, cmd->header.size = sizeof(cmd->body); cmd->body.cid = res->id; cmd->body.mobid = bo->resource->start; - cmd->body.validContents = res->backup_dirty; - res->backup_dirty = false; + cmd->body.validContents = res->guest_memory_dirty; + res->guest_memory_dirty = false; vmw_cmd_commit(dev_priv, sizeof(*cmd)); return 0; @@ -521,8 +525,8 @@ static int vmw_dx_context_bind(struct vmw_resource *res, cmd->header.size = sizeof(cmd->body); cmd->body.cid = res->id; cmd->body.mobid = bo->resource->start; - cmd->body.validContents = res->backup_dirty; - res->backup_dirty = false; + cmd->body.validContents = res->guest_memory_dirty; + res->guest_memory_dirty = false; vmw_cmd_commit(dev_priv, sizeof(*cmd)); @@ -853,7 +857,7 @@ vmw_context_binding_state(struct vmw_resource *ctx) * specified in the parameter. 0 otherwise. */ int vmw_context_bind_dx_query(struct vmw_resource *ctx_res, - struct vmw_buffer_object *mob) + struct vmw_bo *mob) { struct vmw_user_context *uctx = container_of(ctx_res, struct vmw_user_context, res); @@ -885,7 +889,7 @@ int vmw_context_bind_dx_query(struct vmw_resource *ctx_res, * * @ctx_res: The context resource */ -struct vmw_buffer_object * +struct vmw_bo * vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res) { struct vmw_user_context *uctx = diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c index b78a10312fad..c0b24d1cacbf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright 2014-2015 VMware, Inc., Palo Alto, CA., USA + * Copyright 2014-2023 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the @@ -30,13 +30,14 @@ * whenever the backing MOB is evicted. */ -#include <drm/ttm/ttm_placement.h> - +#include "vmwgfx_bo.h" #include "vmwgfx_drv.h" #include "vmwgfx_mksstat.h" #include "vmwgfx_resource_priv.h" #include "vmwgfx_so.h" +#include <drm/ttm/ttm_placement.h> + /** * struct vmw_cotable - Context Object Table resource * @@ -130,12 +131,13 @@ static int vmw_cotable_destroy(struct vmw_resource *res); static const struct vmw_res_func vmw_cotable_func = { .res_type = vmw_res_cotable, - .needs_backup = true, + .needs_guest_memory = true, .may_evict = true, .prio = 3, .dirty_prio = 3, .type_name = "context guest backed object tables", - .backup_placement = &vmw_mob_placement, + .domain = VMW_BO_DOMAIN_MOB, + .busy_domain = VMW_BO_DOMAIN_MOB, .create = vmw_cotable_create, .destroy = vmw_cotable_destroy, .bind = vmw_cotable_bind, @@ -180,7 +182,7 @@ static int vmw_cotable_unscrub(struct vmw_resource *res) { struct vmw_cotable *vcotbl = vmw_cotable(res); struct vmw_private *dev_priv = res->dev_priv; - struct ttm_buffer_object *bo = &res->backup->base; + struct ttm_buffer_object *bo = &res->guest_memory_bo->tbo; struct { SVGA3dCmdHeader header; SVGA3dCmdDXSetCOTable body; @@ -228,7 +230,7 @@ static int vmw_cotable_bind(struct vmw_resource *res, * take the opportunity to correct the value here so that it's not * misused in the future. */ - val_buf->bo = &res->backup->base; + val_buf->bo = &res->guest_memory_bo->tbo; return vmw_cotable_unscrub(res); } @@ -289,7 +291,7 @@ int vmw_cotable_scrub(struct vmw_resource *res, bool readback) cmd0->body.cid = vcotbl->ctx->id; cmd0->body.type = vcotbl->type; cmd1 = (void *) &cmd0[1]; - vcotbl->size_read_back = res->backup_size; + vcotbl->size_read_back = res->guest_memory_size; } cmd1->header.id = SVGA_3D_CMD_DX_SET_COTABLE; cmd1->header.size = sizeof(cmd1->body); @@ -371,12 +373,12 @@ static int vmw_cotable_readback(struct vmw_resource *res) cmd->header.size = sizeof(cmd->body); cmd->body.cid = vcotbl->ctx->id; cmd->body.type = vcotbl->type; - vcotbl->size_read_back = res->backup_size; + vcotbl->size_read_back = res->guest_memory_size; vmw_cmd_commit(dev_priv, sizeof(*cmd)); } (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); - vmw_bo_fence_single(&res->backup->base, fence); + vmw_bo_fence_single(&res->guest_memory_bo->tbo, fence); vmw_fence_obj_unreference(&fence); return 0; @@ -399,14 +401,21 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) struct ttm_operation_ctx ctx = { false, false }; struct vmw_private *dev_priv = res->dev_priv; struct vmw_cotable *vcotbl = vmw_cotable(res); - struct vmw_buffer_object *buf, *old_buf = res->backup; - struct ttm_buffer_object *bo, *old_bo = &res->backup->base; - size_t old_size = res->backup_size; + struct vmw_bo *buf, *old_buf = res->guest_memory_bo; + struct ttm_buffer_object *bo, *old_bo = &res->guest_memory_bo->tbo; + size_t old_size = res->guest_memory_size; size_t old_size_read_back = vcotbl->size_read_back; size_t cur_size_read_back; struct ttm_bo_kmap_obj old_map, new_map; int ret; size_t i; + struct vmw_bo_params bo_params = { + .domain = VMW_BO_DOMAIN_MOB, + .busy_domain = VMW_BO_DOMAIN_MOB, + .bo_type = ttm_bo_type_device, + .size = new_size, + .pin = true + }; MKS_STAT_TIME_DECL(MKSSTAT_KERN_COTABLE_RESIZE); MKS_STAT_TIME_PUSH(MKSSTAT_KERN_COTABLE_RESIZE); @@ -423,14 +432,13 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) * for the new COTable. Initially pin the buffer object to make sure * we can use tryreserve without failure. */ - ret = vmw_bo_create(dev_priv, new_size, &vmw_mob_placement, - true, true, vmw_bo_bo_free, &buf); + ret = vmw_bo_create(dev_priv, &bo_params, &buf); if (ret) { DRM_ERROR("Failed initializing new cotable MOB.\n"); goto out_done; } - bo = &buf->base; + bo = &buf->tbo; WARN_ON_ONCE(ttm_bo_reserve(bo, false, true, NULL)); ret = ttm_bo_wait(old_bo, false, false); @@ -464,15 +472,18 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) } /* Unpin new buffer, and switch backup buffers. */ - ret = ttm_bo_validate(bo, &vmw_mob_placement, &ctx); + vmw_bo_placement_set(buf, + VMW_BO_DOMAIN_MOB, + VMW_BO_DOMAIN_MOB); + ret = ttm_bo_validate(bo, &buf->placement, &ctx); if (unlikely(ret != 0)) { DRM_ERROR("Failed validating new COTable backup buffer.\n"); goto out_wait; } vmw_resource_mob_detach(res); - res->backup = buf; - res->backup_size = new_size; + res->guest_memory_bo = buf; + res->guest_memory_size = new_size; vcotbl->size_read_back = cur_size_read_back; /* @@ -482,8 +493,8 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) ret = vmw_cotable_unscrub(res); if (ret) { DRM_ERROR("Failed switching COTable backup buffer.\n"); - res->backup = old_buf; - res->backup_size = old_size; + res->guest_memory_bo = old_buf; + res->guest_memory_size = old_size; vcotbl->size_read_back = old_size_read_back; vmw_resource_mob_attach(res); goto out_wait; @@ -498,7 +509,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) if (unlikely(ret)) goto out_wait; - /* Release the pin acquired in vmw_bo_init */ + /* Release the pin acquired in vmw_bo_create */ ttm_bo_unpin(bo); MKS_STAT_TIME_POP(MKSSTAT_KERN_COTABLE_RESIZE); @@ -533,7 +544,7 @@ out_done: static int vmw_cotable_create(struct vmw_resource *res) { struct vmw_cotable *vcotbl = vmw_cotable(res); - size_t new_size = res->backup_size; + size_t new_size = res->guest_memory_size; size_t needed_size; int ret; @@ -542,7 +553,7 @@ static int vmw_cotable_create(struct vmw_resource *res) while (needed_size > new_size) new_size *= 2; - if (likely(new_size <= res->backup_size)) { + if (likely(new_size <= res->guest_memory_size)) { if (vcotbl->scrubbed && vmw_resource_mob_attached(res)) { ret = vmw_cotable_unscrub(res); if (ret) @@ -606,12 +617,12 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv, INIT_LIST_HEAD(&vcotbl->resource_list); vcotbl->res.id = type; - vcotbl->res.backup_size = PAGE_SIZE; + vcotbl->res.guest_memory_size = PAGE_SIZE; num_entries = PAGE_SIZE / co_info[type].size; if (num_entries < co_info[type].min_initial_entries) { - vcotbl->res.backup_size = co_info[type].min_initial_entries * + vcotbl->res.guest_memory_size = co_info[type].min_initial_entries * co_info[type].size; - vcotbl->res.backup_size = PFN_ALIGN(vcotbl->res.backup_size); + vcotbl->res.guest_memory_size = PFN_ALIGN(vcotbl->res.guest_memory_size); } vcotbl->scrubbed = true; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 9ad28346aff7..2588615a2a38 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright 2009-2022 VMware, Inc., Palo Alto, CA., USA + * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the @@ -28,9 +28,10 @@ #include "vmwgfx_drv.h" +#include "vmwgfx_bo.h" +#include "vmwgfx_binding.h" #include "vmwgfx_devcaps.h" #include "vmwgfx_mksstat.h" -#include "vmwgfx_binding.h" #include "ttm_object.h" #include <drm/drm_aperture.h> @@ -386,27 +387,32 @@ static void vmw_print_sm_type(struct vmw_private *dev_priv) static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) { int ret; - struct vmw_buffer_object *vbo; + struct vmw_bo *vbo; struct ttm_bo_kmap_obj map; volatile SVGA3dQueryResult *result; bool dummy; + struct vmw_bo_params bo_params = { + .domain = VMW_BO_DOMAIN_SYS, + .busy_domain = VMW_BO_DOMAIN_SYS, + .bo_type = ttm_bo_type_kernel, + .size = PAGE_SIZE, + .pin = true + }; /* * Create the vbo as pinned, so that a tryreserve will * immediately succeed. This is because we're the only * user of the bo currently. */ - ret = vmw_bo_create(dev_priv, PAGE_SIZE, - &vmw_sys_placement, false, true, - &vmw_bo_bo_free, &vbo); + ret = vmw_bo_create(dev_priv, &bo_params, &vbo); if (unlikely(ret != 0)) return ret; - ret = ttm_bo_reserve(&vbo->base, false, true, NULL); + ret = ttm_bo_reserve(&vbo->tbo, false, true, NULL); BUG_ON(ret != 0); vmw_bo_pin_reserved(vbo, true); - ret = ttm_bo_kmap(&vbo->base, 0, 1, &map); + ret = ttm_bo_kmap(&vbo->tbo, 0, 1, &map); if (likely(ret == 0)) { result = ttm_kmap_obj_virtual(&map, &dummy); result->totalSize = sizeof(*result); @@ -415,7 +421,7 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) ttm_bo_kunmap(&map); } vmw_bo_pin_reserved(vbo, false); - ttm_bo_unreserve(&vbo->base); + ttm_bo_unreserve(&vbo->tbo); if (unlikely(ret != 0)) { DRM_ERROR("Dummy query buffer map failed.\n"); @@ -1565,7 +1571,7 @@ static const struct file_operations vmwgfx_driver_fops = { .open = drm_open, .release = drm_release, .unlocked_ioctl = vmw_unlocked_ioctl, - .mmap = vmw_mmap, + .mmap = drm_gem_mmap, .poll = drm_poll, .read = drm_read, #if defined(CONFIG_COMPAT) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 203fa32cd4c1..fb8f0c0642c0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 OR MIT */ /************************************************************************** * - * Copyright 2009-2022 VMware, Inc., Palo Alto, CA., USA + * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the @@ -117,32 +117,6 @@ struct vmwgfx_hash_item { unsigned long key; }; -/** - * struct vmw_buffer_object - TTM buffer object with vmwgfx additions - * @base: The TTM buffer object - * @res_tree: RB tree of resources using this buffer object as a backing MOB - * @base_mapped_count: ttm BO mapping count; used by KMS atomic helpers. - * @cpu_writers: Number of synccpu write grabs. Protected by reservation when - * increased. May be decreased without reservation. - * @dx_query_ctx: DX context if this buffer object is used as a DX query MOB - * @map: Kmap object for semi-persistent mappings - * @res_prios: Eviction priority counts for attached resources - * @dirty: structure for user-space dirty-tracking - */ -struct vmw_buffer_object { - struct ttm_buffer_object base; - struct rb_root res_tree; - /* For KMS atomic helpers: ttm bo mapping count */ - atomic_t base_mapped_count; - - atomic_t cpu_writers; - /* Not ref-counted. Protected by binding_mutex */ - struct vmw_resource *dx_query_ctx; - /* Protected by reservation */ - struct ttm_bo_kmap_obj map; - u32 res_prios[TTM_MAX_BO_PRIORITY]; - struct vmw_bo_dirty *dirty; -}; /** * struct vmw_validate_buffer - Carries validation info about buffers. @@ -168,21 +142,23 @@ struct vmw_res_func; * @kref: For refcounting. * @dev_priv: Pointer to the device private for this resource. Immutable. * @id: Device id. Protected by @dev_priv::resource_lock. - * @backup_size: Backup buffer size. Immutable. - * @res_dirty: Resource contains data not yet in the backup buffer. Protected - * by resource reserved. - * @backup_dirty: Backup buffer contains data not yet in the HW resource. + * @guest_memory_size: Guest memory buffer size. Immutable. + * @res_dirty: Resource contains data not yet in the guest memory buffer. * Protected by resource reserved. + * @guest_memory_dirty: Guest memory buffer contains data not yet in the HW + * resource. Protected by resource reserved. * @coherent: Emulate coherency by tracking vm accesses. - * @backup: The backup buffer if any. Protected by resource reserved. - * @backup_offset: Offset into the backup buffer if any. Protected by resource - * reserved. Note that only a few resource types can have a @backup_offset - * different from zero. + * @guest_memory_bo: The guest memory buffer if any. Protected by resource + * reserved. + * @guest_memory_offset: Offset into the guest memory buffer if any. Protected + * by resource reserved. Note that only a few resource types can have a + * @guest_memory_offset different from zero. * @pin_count: The pin count for this resource. A pinned resource has a * pin-count greater than zero. It is not on the resource LRU lists and its - * backup buffer is pinned. Hence it can't be evicted. + * guest memory buffer is pinned. Hence it can't be evicted. * @func: Method vtable for this resource. Immutable. - * @mob_node; Node for the MOB backup rbtree. Protected by @backup reserved. + * @mob_node; Node for the MOB guest memory rbtree. Protected by + * @guest_memory_bo reserved. * @lru_head: List head for the LRU list. Protected by @dev_priv::resource_lock. * @binding_head: List head for the context binding list. Protected by * the @dev_priv::binding_mutex @@ -190,18 +166,20 @@ struct vmw_res_func; * @hw_destroy: Callback to destroy the resource on the device, as part of * resource destruction. */ +struct vmw_bo; +struct vmw_bo; struct vmw_resource_dirty; struct vmw_resource { struct kref kref; struct vmw_private *dev_priv; int id; u32 used_prio; - unsigned long backup_size; + unsigned long guest_memory_size; u32 res_dirty : 1; - u32 backup_dirty : 1; + u32 guest_memory_dirty : 1; u32 coherent : 1; - struct vmw_buffer_object *backup; - unsigned long backup_offset; + struct vmw_bo *guest_memory_bo; + unsigned long guest_memory_offset; unsigned long pin_count; const struct vmw_res_func *func; struct rb_node mob_node; @@ -446,7 +424,7 @@ struct vmw_sw_context{ struct drm_file *filp; uint32_t *cmd_bounce; uint32_t cmd_bounce_size; - struct vmw_buffer_object *cur_query_bo; + struct vmw_bo *cur_query_bo; struct list_head bo_relocations; struct list_head res_relocations; uint32_t *buf_start; @@ -458,7 +436,7 @@ struct vmw_sw_context{ struct list_head staged_cmd_res; struct list_head ctx_list; struct vmw_ctx_validation_info *dx_ctx_node; - struct vmw_buffer_object *dx_query_mob; + struct vmw_bo *dx_query_mob; struct vmw_resource *dx_query_ctx; struct vmw_cmdbuf_res_manager *man; struct vmw_validation_context *ctx; @@ -492,7 +470,7 @@ struct vmw_otable_batch { unsigned num_otables; struct vmw_otable *otables; struct vmw_resource *context; - struct ttm_buffer_object *otable_bo; + struct vmw_bo *otable_bo; }; enum { @@ -632,8 +610,8 @@ struct vmw_private { * are protected by the cmdbuf mutex. */ - struct vmw_buffer_object *dummy_query_bo; - struct vmw_buffer_object *pinned_bo; + struct vmw_bo *dummy_query_bo; + struct vmw_bo *pinned_bo; uint32_t query_cid; uint32_t query_cid_valid; bool dummy_query_bo_pinned; @@ -677,11 +655,6 @@ struct vmw_private { #endif }; -static inline struct vmw_buffer_object *gem_to_vmw_bo(struct drm_gem_object *gobj) -{ - return container_of((gobj), struct vmw_buffer_object, base.base); -} - static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res) { return container_of(res, struct vmw_surface, res); @@ -692,6 +665,11 @@ static inline struct vmw_private *vmw_priv(struct drm_device *dev) return (struct vmw_private *)dev->dev_private; } +static inline struct vmw_private *vmw_priv_from_ttm(struct ttm_device *bdev) +{ + return container_of(bdev, struct vmw_private, bdev); +} + static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv) { return (struct vmw_fpriv *)file_priv->driver_priv; @@ -825,7 +803,7 @@ extern int vmw_user_lookup_handle(struct vmw_private *dev_priv, struct drm_file *filp, uint32_t handle, struct vmw_surface **out_surf, - struct vmw_buffer_object **out_buf); + struct vmw_bo **out_buf); extern int vmw_user_resource_lookup_handle( struct vmw_private *dev_priv, struct ttm_object_file *tfile, @@ -844,20 +822,20 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv, extern void vmw_resource_unreserve(struct vmw_resource *res, bool dirty_set, bool dirty, - bool switch_backup, - struct vmw_buffer_object *new_backup, - unsigned long new_backup_offset); + bool switch_guest_memory, + struct vmw_bo *new_guest_memory, + unsigned long new_guest_memory_offset); extern void vmw_query_move_notify(struct ttm_buffer_object *bo, struct ttm_resource *old_mem, struct ttm_resource *new_mem); -extern int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob); -extern void vmw_resource_evict_all(struct vmw_private *dev_priv); -extern void vmw_resource_unbind_list(struct vmw_buffer_object *vbo); +int vmw_query_readback_all(struct vmw_bo *dx_query_mob); +void vmw_resource_evict_all(struct vmw_private *dev_priv); +void vmw_resource_unbind_list(struct vmw_bo *vbo); void vmw_resource_mob_attach(struct vmw_resource *res); void vmw_resource_mob_detach(struct vmw_resource *res); void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start, pgoff_t end); -int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start, +int vmw_resources_clean(struct vmw_bo *vbo, pgoff_t start, pgoff_t end, pgoff_t *num_prefault); /** @@ -872,117 +850,15 @@ static inline bool vmw_resource_mob_attached(const struct vmw_resource *res) } /** - * Buffer object helper functions - vmwgfx_bo.c - */ -extern int vmw_bo_pin_in_placement(struct vmw_private *vmw_priv, - struct vmw_buffer_object *bo, - struct ttm_placement *placement, - bool interruptible); -extern int vmw_bo_pin_in_vram(struct vmw_private *dev_priv, - struct vmw_buffer_object *buf, - bool interruptible); -extern int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv, - struct vmw_buffer_object *buf, - bool interruptible); -extern int vmw_bo_pin_in_start_of_vram(struct vmw_private *vmw_priv, - struct vmw_buffer_object *bo, - bool interruptible); -extern int vmw_bo_unpin(struct vmw_private *vmw_priv, - struct vmw_buffer_object *bo, - bool interruptible); -extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf, - SVGAGuestPtr *ptr); -extern void vmw_bo_pin_reserved(struct vmw_buffer_object *bo, bool pin); -extern void vmw_bo_bo_free(struct ttm_buffer_object *bo); -extern int vmw_bo_create_kernel(struct vmw_private *dev_priv, - unsigned long size, - struct ttm_placement *placement, - struct ttm_buffer_object **p_bo); -extern int vmw_bo_create(struct vmw_private *dev_priv, - size_t size, struct ttm_placement *placement, - bool interruptible, bool pin, - void (*bo_free)(struct ttm_buffer_object *bo), - struct vmw_buffer_object **p_bo); -extern int vmw_bo_init(struct vmw_private *dev_priv, - struct vmw_buffer_object *vmw_bo, - size_t size, struct ttm_placement *placement, - bool interruptible, bool pin, - void (*bo_free)(struct ttm_buffer_object *bo)); -extern int vmw_bo_unref_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -extern int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -extern int vmw_user_bo_lookup(struct drm_file *filp, - uint32_t handle, - struct vmw_buffer_object **out); -extern void vmw_bo_fence_single(struct ttm_buffer_object *bo, - struct vmw_fence_obj *fence); -extern void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo); -extern void vmw_bo_unmap(struct vmw_buffer_object *vbo); -extern void vmw_bo_move_notify(struct ttm_buffer_object *bo, - struct ttm_resource *mem); -extern void vmw_bo_swap_notify(struct ttm_buffer_object *bo); - -/** - * vmw_bo_adjust_prio - Adjust the buffer object eviction priority - * according to attached resources - * @vbo: The struct vmw_buffer_object - */ -static inline void vmw_bo_prio_adjust(struct vmw_buffer_object *vbo) -{ - int i = ARRAY_SIZE(vbo->res_prios); - - while (i--) { - if (vbo->res_prios[i]) { - vbo->base.priority = i; - return; - } - } - - vbo->base.priority = 3; -} - -/** - * vmw_bo_prio_add - Notify a buffer object of a newly attached resource - * eviction priority - * @vbo: The struct vmw_buffer_object - * @prio: The resource priority - * - * After being notified, the code assigns the highest resource eviction priority - * to the backing buffer object (mob). - */ -static inline void vmw_bo_prio_add(struct vmw_buffer_object *vbo, int prio) -{ - if (vbo->res_prios[prio]++ == 0) - vmw_bo_prio_adjust(vbo); -} - -/** - * vmw_bo_prio_del - Notify a buffer object of a resource with a certain - * priority being removed - * @vbo: The struct vmw_buffer_object - * @prio: The resource priority - * - * After being notified, the code assigns the highest resource eviction priority - * to the backing buffer object (mob). - */ -static inline void vmw_bo_prio_del(struct vmw_buffer_object *vbo, int prio) -{ - if (--vbo->res_prios[prio] == 0) - vmw_bo_prio_adjust(vbo); -} - -/** * GEM related functionality - vmwgfx_gem.c */ extern int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv, struct drm_file *filp, uint32_t size, uint32_t *handle, - struct vmw_buffer_object **p_vbo); + struct vmw_bo **p_vbo); extern int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); -extern void vmw_gem_destroy(struct ttm_buffer_object *bo); extern void vmw_debugfs_gem_init(struct vmw_private *vdev); /** @@ -1056,29 +932,20 @@ vmw_is_cursor_bypass3_enabled(const struct vmw_private *dev_priv) } /** - * TTM glue - vmwgfx_ttm_glue.c - */ - -extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma); - -/** * TTM buffer object driver - vmwgfx_ttm_buffer.c */ extern const size_t vmw_tt_size; extern struct ttm_placement vmw_vram_placement; -extern struct ttm_placement vmw_vram_sys_placement; extern struct ttm_placement vmw_vram_gmr_placement; extern struct ttm_placement vmw_sys_placement; -extern struct ttm_placement vmw_srf_placement; -extern struct ttm_placement vmw_mob_placement; -extern struct ttm_placement vmw_nonfixed_placement; extern struct ttm_device_funcs vmw_bo_driver; extern const struct vmw_sg_table * vmw_bo_sg_table(struct ttm_buffer_object *bo); -extern int vmw_bo_create_and_populate(struct vmw_private *dev_priv, - unsigned long bo_size, - struct ttm_buffer_object **bo_p); +int vmw_bo_create_and_populate(struct vmw_private *dev_priv, + size_t bo_size, + u32 domain, + struct vmw_bo **bo_p); extern void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt, @@ -1297,8 +1164,8 @@ vmw_context_binding_state(struct vmw_resource *ctx); extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx, bool readback); extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res, - struct vmw_buffer_object *mob); -extern struct vmw_buffer_object * + struct vmw_bo *mob); +extern struct vmw_bo * vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res); @@ -1523,12 +1390,12 @@ int vmw_mksstat_remove_all(struct vmw_private *dev_priv); DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__) /* Resource dirtying - vmwgfx_page_dirty.c */ -void vmw_bo_dirty_scan(struct vmw_buffer_object *vbo); -int vmw_bo_dirty_add(struct vmw_buffer_object *vbo); +void vmw_bo_dirty_scan(struct vmw_bo *vbo); +int vmw_bo_dirty_add(struct vmw_bo *vbo); void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res); void vmw_bo_dirty_clear_res(struct vmw_resource *res); -void vmw_bo_dirty_release(struct vmw_buffer_object *vbo); -void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo, +void vmw_bo_dirty_release(struct vmw_bo *vbo); +void vmw_bo_dirty_unmap(struct vmw_bo *vbo, pgoff_t start, pgoff_t end); vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf); vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf); @@ -1561,22 +1428,6 @@ static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf) return srf; } -static inline void vmw_bo_unreference(struct vmw_buffer_object **buf) -{ - struct vmw_buffer_object *tmp_buf = *buf; - - *buf = NULL; - if (tmp_buf != NULL) - ttm_bo_put(&tmp_buf->base); -} - -static inline struct vmw_buffer_object * -vmw_bo_reference(struct vmw_buffer_object *buf) -{ - ttm_bo_get(&buf->base); - return buf; -} - static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv) { atomic_inc(&dev_priv->num_fifo_resources); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 0590bb22c73a..6b9aa2b4ef54 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright 2009 - 2022 VMware, Inc., Palo Alto, CA., USA + * Copyright 2009 - 2023 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the @@ -24,17 +24,17 @@ * USE OR OTHER DEALINGS IN THE SOFTWARE. * **************************************************************************/ -#include <linux/sync_file.h> -#include <linux/hashtable.h> - +#include "vmwgfx_binding.h" +#include "vmwgfx_bo.h" #include "vmwgfx_drv.h" -#include "vmwgfx_reg.h" +#include "vmwgfx_mksstat.h" +#include "vmwgfx_so.h" + #include <drm/ttm/ttm_bo.h> #include <drm/ttm/ttm_placement.h> -#include "vmwgfx_so.h" -#include "vmwgfx_binding.h" -#include "vmwgfx_mksstat.h" +#include <linux/sync_file.h> +#include <linux/hashtable.h> /* * Helper macro to get dx_ctx_node if available otherwise print an error @@ -65,7 +65,7 @@ */ struct vmw_relocation { struct list_head head; - struct vmw_buffer_object *vbo; + struct vmw_bo *vbo; union { SVGAMobId *mob_loc; SVGAGuestPtr *location; @@ -149,7 +149,7 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv, static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGAMobId *id, - struct vmw_buffer_object **vmw_bo_p); + struct vmw_bo **vmw_bo_p); /** * vmw_ptr_diff - Compute the offset from a to b in bytes * @@ -475,12 +475,16 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv, if (has_sm4_context(dev_priv) && vmw_res_type(ctx) == vmw_res_dx_context) { - struct vmw_buffer_object *dx_query_mob; + struct vmw_bo *dx_query_mob; dx_query_mob = vmw_context_get_dx_query_mob(ctx); - if (dx_query_mob) + if (dx_query_mob) { + vmw_bo_placement_set(dx_query_mob, + VMW_BO_DOMAIN_MOB, + VMW_BO_DOMAIN_MOB); ret = vmw_validation_add_bo(sw_context->ctx, - dx_query_mob, true, false); + dx_query_mob); + } } mutex_unlock(&dev_priv->binding_mutex); @@ -596,7 +600,7 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context) return ret; if (sw_context->dx_query_mob) { - struct vmw_buffer_object *expected_dx_query_mob; + struct vmw_bo *expected_dx_query_mob; expected_dx_query_mob = vmw_context_get_dx_query_mob(sw_context->dx_query_ctx); @@ -703,7 +707,7 @@ res_check_done: static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res) { struct vmw_private *dev_priv = ctx_res->dev_priv; - struct vmw_buffer_object *dx_query_mob; + struct vmw_bo *dx_query_mob; VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindAllQuery); dx_query_mob = vmw_context_get_dx_query_mob(ctx_res); @@ -718,7 +722,7 @@ static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res) cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY; cmd->header.size = sizeof(cmd->body); cmd->body.cid = ctx_res->id; - cmd->body.mobid = dx_query_mob->base.resource->start; + cmd->body.mobid = dx_query_mob->tbo.resource->start; vmw_cmd_commit(dev_priv, sizeof(*cmd)); vmw_context_bind_dx_query(ctx_res, dx_query_mob); @@ -1017,7 +1021,7 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv, * after successful submission of the current command batch. */ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, - struct vmw_buffer_object *new_query_bo, + struct vmw_bo *new_query_bo, struct vmw_sw_context *sw_context) { struct vmw_res_cache_entry *ctx_entry = @@ -1029,24 +1033,24 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, if (unlikely(new_query_bo != sw_context->cur_query_bo)) { - if (unlikely(PFN_UP(new_query_bo->base.resource->size) > 4)) { + if (unlikely(PFN_UP(new_query_bo->tbo.resource->size) > 4)) { VMW_DEBUG_USER("Query buffer too large.\n"); return -EINVAL; } if (unlikely(sw_context->cur_query_bo != NULL)) { sw_context->needs_post_query_barrier = true; + vmw_bo_placement_set_default_accelerated(sw_context->cur_query_bo); ret = vmw_validation_add_bo(sw_context->ctx, - sw_context->cur_query_bo, - dev_priv->has_mob, false); + sw_context->cur_query_bo); if (unlikely(ret != 0)) return ret; } sw_context->cur_query_bo = new_query_bo; + vmw_bo_placement_set_default_accelerated(dev_priv->dummy_query_bo); ret = vmw_validation_add_bo(sw_context->ctx, - dev_priv->dummy_query_bo, - dev_priv->has_mob, false); + dev_priv->dummy_query_bo); if (unlikely(ret != 0)) return ret; } @@ -1145,9 +1149,9 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGAMobId *id, - struct vmw_buffer_object **vmw_bo_p) + struct vmw_bo **vmw_bo_p) { - struct vmw_buffer_object *vmw_bo; + struct vmw_bo *vmw_bo; uint32_t handle = *id; struct vmw_relocation *reloc; int ret; @@ -1158,9 +1162,10 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, drm_dbg(&dev_priv->drm, "Could not find or use MOB buffer.\n"); return PTR_ERR(vmw_bo); } - ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false); - ttm_bo_put(&vmw_bo->base); - drm_gem_object_put(&vmw_bo->base.base); + vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_MOB, VMW_BO_DOMAIN_MOB); + ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo); + ttm_bo_put(&vmw_bo->tbo); + drm_gem_object_put(&vmw_bo->tbo.base); if (unlikely(ret != 0)) return ret; @@ -1200,9 +1205,9 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGAGuestPtr *ptr, - struct vmw_buffer_object **vmw_bo_p) + struct vmw_bo **vmw_bo_p) { - struct vmw_buffer_object *vmw_bo; + struct vmw_bo *vmw_bo; uint32_t handle = ptr->gmrId; struct vmw_relocation *reloc; int ret; @@ -1213,9 +1218,11 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, drm_dbg(&dev_priv->drm, "Could not find or use GMR region.\n"); return PTR_ERR(vmw_bo); } - ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false); - ttm_bo_put(&vmw_bo->base); - drm_gem_object_put(&vmw_bo->base.base); + vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM, + VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM); + ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo); + ttm_bo_put(&vmw_bo->tbo); + drm_gem_object_put(&vmw_bo->tbo.base); if (unlikely(ret != 0)) return ret; @@ -1280,7 +1287,7 @@ static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv, SVGA3dCmdHeader *header) { VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindQuery); - struct vmw_buffer_object *vmw_bo; + struct vmw_bo *vmw_bo; int ret; cmd = container_of(header, typeof(*cmd), header); @@ -1363,7 +1370,7 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { - struct vmw_buffer_object *vmw_bo; + struct vmw_bo *vmw_bo; VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndGBQuery); int ret; @@ -1393,7 +1400,7 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { - struct vmw_buffer_object *vmw_bo; + struct vmw_bo *vmw_bo; VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndQuery); int ret; @@ -1439,7 +1446,7 @@ static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { - struct vmw_buffer_object *vmw_bo; + struct vmw_bo *vmw_bo; VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForGBQuery); int ret; @@ -1467,7 +1474,7 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { - struct vmw_buffer_object *vmw_bo; + struct vmw_bo *vmw_bo; VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForQuery); int ret; @@ -1504,7 +1511,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { - struct vmw_buffer_object *vmw_bo = NULL; + struct vmw_bo *vmw_bo = NULL; struct vmw_surface *srf = NULL; VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA); int ret; @@ -1528,7 +1535,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, return ret; /* Make sure DMA doesn't cross BO boundaries. */ - bo_size = vmw_bo->base.base.size; + bo_size = vmw_bo->tbo.base.size; if (unlikely(cmd->body.guest.ptr.offset > bo_size)) { VMW_DEBUG_USER("Invalid DMA offset.\n"); return -EINVAL; @@ -1551,7 +1558,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res); - vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base, header); + vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->tbo, header); return 0; } @@ -1670,7 +1677,7 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, void *buf) { - struct vmw_buffer_object *vmw_bo; + struct vmw_bo *vmw_bo; struct { uint32_t header; @@ -1701,7 +1708,7 @@ static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv, struct vmw_resource *res, uint32_t *buf_id, unsigned long backup_offset) { - struct vmw_buffer_object *vbo; + struct vmw_bo *vbo; void *info; int ret; @@ -3754,7 +3761,7 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context) struct ttm_buffer_object *bo; list_for_each_entry(reloc, &sw_context->bo_relocations, head) { - bo = &reloc->vbo->base; + bo = &reloc->vbo->tbo; switch (bo->resource->mem_type) { case TTM_PL_VRAM: reloc->location->offset += bo->resource->start << PAGE_SHIFT; @@ -4364,13 +4371,17 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, if (dev_priv->pinned_bo == NULL) goto out_unlock; - ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo, false, - false); + vmw_bo_placement_set(dev_priv->pinned_bo, + VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM, + VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM); + ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo); if (ret) goto out_no_reserve; - ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo, false, - false); + vmw_bo_placement_set(dev_priv->dummy_query_bo, + VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM, + VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM); + ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo); if (ret) goto out_no_reserve; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index 66cc35dc223e..2a0cda324703 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright 2011-2014 VMware, Inc., Palo Alto, CA., USA + * Copyright 2011-2023 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c index 4d2c28e39f4e..d6baf73a6458 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR MIT */ /* - * Copyright 2021 VMware, Inc. + * Copyright 2021-2023 VMware, Inc. * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation @@ -24,31 +24,17 @@ * */ +#include "vmwgfx_bo.h" #include "vmwgfx_drv.h" #include "drm/drm_prime.h" #include "drm/drm_gem_ttm_helper.h" -/** - * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct - * vmw_buffer_object. - * - * @bo: Pointer to the TTM buffer object. - * Return: Pointer to the struct vmw_buffer_object embedding the - * TTM buffer object. - */ -static struct vmw_buffer_object * -vmw_buffer_object(struct ttm_buffer_object *bo) -{ - return container_of(bo, struct vmw_buffer_object, base); -} - static void vmw_gem_object_free(struct drm_gem_object *gobj) { struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gobj); - if (bo) { + if (bo) ttm_bo_put(bo); - } } static int vmw_gem_object_open(struct drm_gem_object *obj, @@ -65,7 +51,7 @@ static void vmw_gem_object_close(struct drm_gem_object *obj, static int vmw_gem_pin_private(struct drm_gem_object *obj, bool do_pin) { struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(obj); - struct vmw_buffer_object *vbo = vmw_buffer_object(bo); + struct vmw_bo *vbo = to_vmw_bo(obj); int ret; ret = ttm_bo_reserve(bo, false, false, NULL); @@ -103,6 +89,13 @@ static struct sg_table *vmw_gem_object_get_sg_table(struct drm_gem_object *obj) return drm_prime_pages_to_sg(obj->dev, vmw_tt->dma_ttm.pages, vmw_tt->dma_ttm.num_pages); } +static const struct vm_operations_struct vmw_vm_ops = { + .pfn_mkwrite = vmw_bo_vm_mkwrite, + .page_mkwrite = vmw_bo_vm_mkwrite, + .fault = vmw_bo_vm_fault, + .open = ttm_bo_vm_open, + .close = ttm_bo_vm_close, +}; static const struct drm_gem_object_funcs vmw_gem_object_funcs = { .free = vmw_gem_object_free, @@ -115,43 +108,31 @@ static const struct drm_gem_object_funcs vmw_gem_object_funcs = { .vmap = drm_gem_ttm_vmap, .vunmap = drm_gem_ttm_vunmap, .mmap = drm_gem_ttm_mmap, + .vm_ops = &vmw_vm_ops, }; -/** - * vmw_gem_destroy - vmw buffer object destructor - * - * @bo: Pointer to the embedded struct ttm_buffer_object - */ -void vmw_gem_destroy(struct ttm_buffer_object *bo) -{ - struct vmw_buffer_object *vbo = vmw_buffer_object(bo); - - WARN_ON(vbo->dirty); - WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree)); - vmw_bo_unmap(vbo); - drm_gem_object_release(&vbo->base.base); - kfree(vbo); -} - int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv, struct drm_file *filp, uint32_t size, uint32_t *handle, - struct vmw_buffer_object **p_vbo) + struct vmw_bo **p_vbo) { int ret; - - ret = vmw_bo_create(dev_priv, size, - (dev_priv->has_mob) ? - &vmw_sys_placement : - &vmw_vram_sys_placement, - true, false, &vmw_gem_destroy, p_vbo); + struct vmw_bo_params params = { + .domain = (dev_priv->has_mob) ? VMW_BO_DOMAIN_SYS : VMW_BO_DOMAIN_VRAM, + .busy_domain = VMW_BO_DOMAIN_SYS, + .bo_type = ttm_bo_type_device, + .size = size, + .pin = false + }; + + ret = vmw_bo_create(dev_priv, ¶ms, p_vbo); if (ret != 0) goto out_no_bo; - (*p_vbo)->base.base.funcs = &vmw_gem_object_funcs; + (*p_vbo)->tbo.base.funcs = &vmw_gem_object_funcs; - ret = drm_gem_handle_create(filp, &(*p_vbo)->base.base, handle); + ret = drm_gem_handle_create(filp, &(*p_vbo)->tbo.base, handle); out_no_bo: return ret; } @@ -165,7 +146,7 @@ int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data, (union drm_vmw_alloc_dmabuf_arg *)data; struct drm_vmw_alloc_dmabuf_req *req = &arg->req; struct drm_vmw_dmabuf_rep *rep = &arg->rep; - struct vmw_buffer_object *vbo; + struct vmw_bo *vbo; uint32_t handle; int ret; @@ -175,23 +156,23 @@ int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data, goto out_no_bo; rep->handle = handle; - rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node); + rep->map_handle = drm_vma_node_offset_addr(&vbo->tbo.base.vma_node); rep->cur_gmr_id = handle; rep->cur_gmr_offset = 0; /* drop reference from allocate - handle holds it now */ - drm_gem_object_put(&vbo->base.base); + drm_gem_object_put(&vbo->tbo.base); out_no_bo: return ret; } #if defined(CONFIG_DEBUG_FS) -static void vmw_bo_print_info(int id, struct vmw_buffer_object *bo, struct seq_file *m) +static void vmw_bo_print_info(int id, struct vmw_bo *bo, struct seq_file *m) { const char *placement; const char *type; - switch (bo->base.resource->mem_type) { + switch (bo->tbo.resource->mem_type) { case TTM_PL_SYSTEM: placement = " CPU"; break; @@ -212,7 +193,7 @@ static void vmw_bo_print_info(int id, struct vmw_buffer_object *bo, struct seq_f break; } - switch (bo->base.type) { + switch (bo->tbo.type) { case ttm_bo_type_device: type = "device"; break; @@ -228,12 +209,12 @@ static void vmw_bo_print_info(int id, struct vmw_buffer_object *bo, struct seq_f } seq_printf(m, "\t\t0x%08x: %12zu bytes %s, type = %s", - id, bo->base.base.size, placement, type); + id, bo->tbo.base.size, placement, type); seq_printf(m, ", priority = %u, pin_count = %u, GEM refs = %d, TTM refs = %d", - bo->base.priority, - bo->base.pin_count, - kref_read(&bo->base.base.refcount), - kref_read(&bo->base.kref)); + bo->tbo.priority, + bo->tbo.pin_count, + kref_read(&bo->tbo.base.refcount), + kref_read(&bo->tbo.kref)); seq_puts(m, "\n"); } @@ -267,7 +248,7 @@ static int vmw_debugfs_gem_info_show(struct seq_file *m, void *unused) spin_lock(&file->table_lock); idr_for_each_entry(&file->object_idr, gobj, id) { - struct vmw_buffer_object *bo = gem_to_vmw_bo(gobj); + struct vmw_bo *bo = to_vmw_bo(gobj); vmw_bo_print_info(id, bo, m); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 445d619e1fdc..84d6380b9895 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright 2009-2022 VMware, Inc., Palo Alto, CA., USA + * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the @@ -24,8 +24,9 @@ * USE OR OTHER DEALINGS IN THE SOFTWARE. * **************************************************************************/ - #include "vmwgfx_kms.h" + +#include "vmwgfx_bo.h" #include "vmw_surface_cache.h" #include <drm/drm_atomic.h> @@ -152,9 +153,8 @@ static void vmw_cursor_update_mob(struct vmw_private *dev_priv, SVGAGBCursorHeader *header; SVGAGBAlphaCursorHeader *alpha_header; const u32 image_size = width * height * sizeof(*image); - bool dummy; - header = ttm_kmap_obj_virtual(&vps->cursor.map, &dummy); + header = vmw_bo_map_and_cache(vps->cursor.bo); alpha_header = &header->header.alphaHeader; memset(header, 0, sizeof(*header)); @@ -169,7 +169,7 @@ static void vmw_cursor_update_mob(struct vmw_private *dev_priv, memcpy(header + 1, image, image_size); vmw_write(dev_priv, SVGA_REG_CURSOR_MOBID, - vps->cursor.bo->resource->start); + vps->cursor.bo->tbo.resource->start); } @@ -184,13 +184,13 @@ static u32 vmw_du_cursor_mob_size(u32 w, u32 h) */ static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps) { - bool dummy; + bool is_iomem; if (vps->surf) { if (vps->surf_mapped) - return vmw_bo_map_and_cache(vps->surf->res.backup); + return vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo); return vps->surf->snooper.image; } else if (vps->bo) - return ttm_kmap_obj_virtual(&vps->bo->map, &dummy); + return ttm_kmap_obj_virtual(&vps->bo->map, &is_iomem); return NULL; } @@ -222,15 +222,13 @@ static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps, return changed; } -static void vmw_du_destroy_cursor_mob(struct ttm_buffer_object **bo) +static void vmw_du_destroy_cursor_mob(struct vmw_bo **vbo) { - if (!(*bo)) + if (!(*vbo)) return; - ttm_bo_unpin(*bo); - ttm_bo_put(*bo); - kfree(*bo); - *bo = NULL; + ttm_bo_unpin(&(*vbo)->tbo); + vmw_bo_unreference(vbo); } static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp, @@ -254,8 +252,8 @@ static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp, /* Cache is full: See if this mob is bigger than an existing mob. */ for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) { - if (vcp->cursor_mobs[i]->base.size < - vps->cursor.bo->base.size) { + if (vcp->cursor_mobs[i]->tbo.base.size < + vps->cursor.bo->tbo.base.size) { vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]); vcp->cursor_mobs[i] = vps->cursor.bo; vps->cursor.bo = NULL; @@ -288,7 +286,7 @@ static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp, return -EINVAL; if (vps->cursor.bo) { - if (vps->cursor.bo->base.size >= size) + if (vps->cursor.bo->tbo.base.size >= size) return 0; vmw_du_put_cursor_mob(vcp, vps); } @@ -296,26 +294,27 @@ static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp, /* Look for an unused mob in the cache. */ for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) { if (vcp->cursor_mobs[i] && - vcp->cursor_mobs[i]->base.size >= size) { + vcp->cursor_mobs[i]->tbo.base.size >= size) { vps->cursor.bo = vcp->cursor_mobs[i]; vcp->cursor_mobs[i] = NULL; return 0; } } /* Create a new mob if we can't find an existing one. */ - ret = vmw_bo_create_kernel(dev_priv, size, &vmw_mob_placement, - &vps->cursor.bo); + ret = vmw_bo_create_and_populate(dev_priv, size, + VMW_BO_DOMAIN_MOB, + &vps->cursor.bo); if (ret != 0) return ret; /* Fence the mob creation so we are guarateed to have the mob */ - ret = ttm_bo_reserve(vps->cursor.bo, false, false, NULL); + ret = ttm_bo_reserve(&vps->cursor.bo->tbo, false, false, NULL); if (ret != 0) goto teardown; - vmw_bo_fence_single(vps->cursor.bo, NULL); - ttm_bo_unreserve(vps->cursor.bo); + vmw_bo_fence_single(&vps->cursor.bo->tbo, NULL); + ttm_bo_unreserve(&vps->cursor.bo->tbo); return 0; teardown: @@ -363,7 +362,7 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf, SVGA3dCopyBox *box; unsigned box_count; void *virtual; - bool dummy; + bool is_iomem; struct vmw_dma_cmd { SVGA3dCmdHeader header; SVGA3dCmdSurfaceDMA dma; @@ -423,7 +422,7 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf, if (unlikely(ret != 0)) goto err_unreserve; - virtual = ttm_kmap_obj_virtual(&map, &dummy); + virtual = ttm_kmap_obj_virtual(&map, &is_iomem); if (box->w == VMW_CURSOR_SNOOP_WIDTH && cmd->dma.guest.pitch == image_pitch) { memcpy(srf->snooper.image, virtual, @@ -573,39 +572,30 @@ vmw_du_cursor_plane_map_cm(struct vmw_plane_state *vps) { int ret; u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h); - struct ttm_buffer_object *bo = vps->cursor.bo; + struct ttm_buffer_object *bo; - if (!bo) + if (!vps->cursor.bo) return -EINVAL; + bo = &vps->cursor.bo->tbo; + if (bo->base.size < size) return -EINVAL; - if (vps->cursor.mapped) + if (vps->cursor.bo->map.virtual) return 0; ret = ttm_bo_reserve(bo, false, false, NULL); - if (unlikely(ret != 0)) return -ENOMEM; - ret = ttm_bo_kmap(bo, 0, PFN_UP(size), &vps->cursor.map); - - /* - * We just want to try to get mob bind to finish - * so that the first write to SVGA_REG_CURSOR_MOBID - * is done with a buffer that the device has already - * seen - */ - (void) ttm_bo_wait(bo, false, false); + vmw_bo_map_and_cache(vps->cursor.bo); ttm_bo_unreserve(bo); if (unlikely(ret != 0)) return -ENOMEM; - vps->cursor.mapped = true; - return 0; } @@ -622,19 +612,15 @@ static int vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps) { int ret = 0; - struct ttm_buffer_object *bo = vps->cursor.bo; - - if (!vps->cursor.mapped) - return 0; + struct vmw_bo *vbo = vps->cursor.bo; - if (!bo) + if (!vbo || !vbo->map.virtual) return 0; - ret = ttm_bo_reserve(bo, true, false, NULL); + ret = ttm_bo_reserve(&vbo->tbo, true, false, NULL); if (likely(ret == 0)) { - ttm_bo_kunmap(&vps->cursor.map); - ttm_bo_unreserve(bo); - vps->cursor.mapped = false; + vmw_bo_unmap(vbo); + ttm_bo_unreserve(&vbo->tbo); } return ret; @@ -657,20 +643,19 @@ vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane, { struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane); struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state); - bool dummy; + bool is_iomem; if (vps->surf_mapped) { - vmw_bo_unmap(vps->surf->res.backup); + vmw_bo_unmap(vps->surf->res.guest_memory_bo); vps->surf_mapped = false; } - if (vps->bo && ttm_kmap_obj_virtual(&vps->bo->map, &dummy)) { - const int ret = ttm_bo_reserve(&vps->bo->base, true, false, NULL); + if (vps->bo && ttm_kmap_obj_virtual(&vps->bo->map, &is_iomem)) { + const int ret = ttm_bo_reserve(&vps->bo->tbo, true, false, NULL); if (likely(ret == 0)) { - if (atomic_read(&vps->bo->base_mapped_count) == 0) - ttm_bo_kunmap(&vps->bo->map); - ttm_bo_unreserve(&vps->bo->base); + ttm_bo_kunmap(&vps->bo->map); + ttm_bo_unreserve(&vps->bo->tbo); } } @@ -736,29 +721,26 @@ vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane, * reserve the ttm_buffer_object first which * vmw_bo_map_and_cache() omits. */ - ret = ttm_bo_reserve(&vps->bo->base, true, false, NULL); + ret = ttm_bo_reserve(&vps->bo->tbo, true, false, NULL); if (unlikely(ret != 0)) return -ENOMEM; - ret = ttm_bo_kmap(&vps->bo->base, 0, PFN_UP(size), &vps->bo->map); - - if (likely(ret == 0)) - atomic_inc(&vps->bo->base_mapped_count); + ret = ttm_bo_kmap(&vps->bo->tbo, 0, PFN_UP(size), &vps->bo->map); - ttm_bo_unreserve(&vps->bo->base); + ttm_bo_unreserve(&vps->bo->tbo); if (unlikely(ret != 0)) return -ENOMEM; - } else if (vps->surf && !vps->bo && vps->surf->res.backup) { + } else if (vps->surf && !vps->bo && vps->surf->res.guest_memory_bo) { WARN_ON(vps->surf->snooper.image); - ret = ttm_bo_reserve(&vps->surf->res.backup->base, true, false, + ret = ttm_bo_reserve(&vps->surf->res.guest_memory_bo->tbo, true, false, NULL); if (unlikely(ret != 0)) return -ENOMEM; - vmw_bo_map_and_cache(vps->surf->res.backup); - ttm_bo_unreserve(&vps->surf->res.backup->base); + vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo); + ttm_bo_unreserve(&vps->surf->res.guest_memory_bo->tbo); vps->surf_mapped = true; } @@ -785,7 +767,6 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state); struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state); s32 hotspot_x, hotspot_y; - bool dummy; hotspot_x = du->hotspot_x; hotspot_y = du->hotspot_y; @@ -827,11 +808,6 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, hotspot_x, hotspot_y); } - if (vps->bo) { - if (ttm_kmap_obj_virtual(&vps->bo->map, &dummy)) - atomic_dec(&vps->bo->base_mapped_count); - } - du->cursor_x = new_state->crtc_x + du->set_gui_x; du->cursor_y = new_state->crtc_y + du->set_gui_y; @@ -935,7 +911,7 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane, WARN_ON(!surface); if (!surface || - (!surface->snooper.image && !surface->res.backup)) { + (!surface->snooper.image && !surface->res.guest_memory_bo)) { DRM_ERROR("surface not suitable for cursor\n"); return -EINVAL; } @@ -1279,9 +1255,9 @@ int vmw_kms_readback(struct vmw_private *dev_priv, user_fence_rep, vclips, num_clips, NULL); case vmw_du_screen_target: - return vmw_kms_stdu_dma(dev_priv, file_priv, vfb, - user_fence_rep, NULL, vclips, num_clips, - 1, false, true, NULL); + return vmw_kms_stdu_readback(dev_priv, file_priv, vfb, + user_fence_rep, NULL, vclips, num_clips, + 1, NULL); default: WARN_ONCE(true, "Readback called with invalid display system.\n"); @@ -1406,7 +1382,7 @@ static int vmw_framebuffer_bo_create_handle(struct drm_framebuffer *fb, struct vmw_framebuffer_bo *vfbd = vmw_framebuffer_to_vfbd(fb); - return drm_gem_handle_create(file_priv, &vfbd->buffer->base.base, handle); + return drm_gem_handle_create(file_priv, &vfbd->buffer->tbo.base, handle); } static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer) @@ -1486,69 +1462,6 @@ static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = { .dirty = vmw_framebuffer_bo_dirty_ext, }; -/* - * Pin the bofer in a location suitable for access by the - * display system. - */ -static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb) -{ - struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); - struct vmw_buffer_object *buf; - struct ttm_placement *placement; - int ret; - - buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer : - vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup; - - if (!buf) - return 0; - - switch (dev_priv->active_display_unit) { - case vmw_du_legacy: - vmw_overlay_pause_all(dev_priv); - ret = vmw_bo_pin_in_start_of_vram(dev_priv, buf, false); - vmw_overlay_resume_all(dev_priv); - break; - case vmw_du_screen_object: - case vmw_du_screen_target: - if (vfb->bo) { - if (dev_priv->capabilities & SVGA_CAP_3D) { - /* - * Use surface DMA to get content to - * sreen target surface. - */ - placement = &vmw_vram_gmr_placement; - } else { - /* Use CPU blit. */ - placement = &vmw_sys_placement; - } - } else { - /* Use surface / image update */ - placement = &vmw_mob_placement; - } - - return vmw_bo_pin_in_placement(dev_priv, buf, placement, false); - default: - return -EINVAL; - } - - return ret; -} - -static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb) -{ - struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); - struct vmw_buffer_object *buf; - - buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer : - vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup; - - if (WARN_ON(!buf)) - return 0; - - return vmw_bo_unpin(dev_priv, buf, false); -} - /** * vmw_create_bo_proxy - create a proxy surface for the buffer object * @@ -1566,7 +1479,7 @@ static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb) */ static int vmw_create_bo_proxy(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cmd, - struct vmw_buffer_object *bo_mob, + struct vmw_bo *bo_mob, struct vmw_surface **srf_out) { struct vmw_surface_metadata metadata = {0}; @@ -1618,9 +1531,9 @@ static int vmw_create_bo_proxy(struct drm_device *dev, /* Reserve and switch the backing mob. */ mutex_lock(&res->dev_priv->cmdbuf_mutex); (void) vmw_resource_reserve(res, false, true); - vmw_bo_unreference(&res->backup); - res->backup = vmw_bo_reference(bo_mob); - res->backup_offset = 0; + vmw_bo_unreference(&res->guest_memory_bo); + res->guest_memory_bo = vmw_bo_reference(bo_mob); + res->guest_memory_offset = 0; vmw_resource_unreserve(res, false, false, false, NULL, 0); mutex_unlock(&res->dev_priv->cmdbuf_mutex); @@ -1630,7 +1543,7 @@ static int vmw_create_bo_proxy(struct drm_device *dev, static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv, - struct vmw_buffer_object *bo, + struct vmw_bo *bo, struct vmw_framebuffer **out, const struct drm_mode_fb_cmd2 *mode_cmd) @@ -1642,7 +1555,7 @@ static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv, int ret; requested_size = mode_cmd->height * mode_cmd->pitches[0]; - if (unlikely(requested_size > bo->base.base.size)) { + if (unlikely(requested_size > bo->tbo.base.size)) { DRM_ERROR("Screen buffer object size is too small " "for requested mode.\n"); return -EINVAL; @@ -1663,7 +1576,7 @@ static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv, goto out_err1; } - vfbd->base.base.obj[0] = &bo->base.base; + vfbd->base.base.obj[0] = &bo->tbo.base; drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd); vfbd->base.bo = true; vfbd->buffer = vmw_bo_reference(bo); @@ -1718,7 +1631,7 @@ vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height) */ struct vmw_framebuffer * vmw_kms_new_framebuffer(struct vmw_private *dev_priv, - struct vmw_buffer_object *bo, + struct vmw_bo *bo, struct vmw_surface *surface, bool only_2d, const struct drm_mode_fb_cmd2 *mode_cmd) @@ -1765,9 +1678,6 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv, if (ret) return ERR_PTR(ret); - vfb->pin = vmw_framebuffer_pin; - vfb->unpin = vmw_framebuffer_unpin; - return vfb; } @@ -1782,7 +1692,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, struct vmw_private *dev_priv = vmw_priv(dev); struct vmw_framebuffer *vfb = NULL; struct vmw_surface *surface = NULL; - struct vmw_buffer_object *bo = NULL; + struct vmw_bo *bo = NULL; int ret; /* returns either a bo or surface */ @@ -1817,7 +1727,7 @@ err_out: /* vmw_user_lookup_handle takes one ref so does new_fb */ if (bo) { vmw_bo_unreference(&bo); - drm_gem_object_put(&bo->base.base); + drm_gem_object_put(&bo->tbo.base); } if (surface) vmw_surface_unreference(&surface); @@ -3076,8 +2986,20 @@ int vmw_du_helper_plane_update(struct vmw_du_update_plane *update) struct vmw_framebuffer_bo *vfbbo = container_of(update->vfb, typeof(*vfbbo), base); - ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer, false, - update->cpu_blit); + /* + * For screen targets we want a mappable bo, for everything else we want + * accelerated i.e. host backed (vram or gmr) bo. If the display unit + * is not screen target then mob's shouldn't be available. + */ + if (update->dev_priv->active_display_unit == vmw_du_screen_target) { + vmw_bo_placement_set(vfbbo->buffer, + VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR, + VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR); + } else { + WARN_ON(update->dev_priv->has_mob); + vmw_bo_placement_set_default_accelerated(vfbbo->buffer); + } + ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer); } else { struct vmw_framebuffer_surface *vfbs = container_of(update->vfb, typeof(*vfbs), base); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h index 4d6e7b555db7..3de7b4b6a230 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 OR MIT */ /************************************************************************** * - * Copyright 2009-2022 VMware, Inc., Palo Alto, CA., USA + * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the @@ -126,7 +126,6 @@ struct vmw_du_update_plane { struct vmw_framebuffer *vfb; struct vmw_fence_obj **out_fence; struct mutex *mutex; - bool cpu_blit; bool intr; }; @@ -217,8 +216,6 @@ struct vmw_kms_dirty { */ struct vmw_framebuffer { struct drm_framebuffer base; - int (*pin)(struct vmw_framebuffer *fb); - int (*unpin)(struct vmw_framebuffer *fb); bool bo; uint32_t user_handle; }; @@ -233,7 +230,7 @@ struct vmw_clip_rect { struct vmw_framebuffer_surface { struct vmw_framebuffer base; struct vmw_surface *surface; - struct vmw_buffer_object *buffer; + struct vmw_bo *buffer; struct list_head head; bool is_bo_proxy; /* true if this is proxy surface for DMA buf */ }; @@ -241,7 +238,7 @@ struct vmw_framebuffer_surface { struct vmw_framebuffer_bo { struct vmw_framebuffer base; - struct vmw_buffer_object *buffer; + struct vmw_bo *buffer; }; @@ -273,9 +270,7 @@ struct vmw_crtc_state { }; struct vmw_cursor_plane_state { - struct ttm_buffer_object *bo; - struct ttm_bo_kmap_obj map; - bool mapped; + struct vmw_bo *bo; s32 hotspot_x; s32 hotspot_y; }; @@ -293,7 +288,7 @@ struct vmw_cursor_plane_state { struct vmw_plane_state { struct drm_plane_state base; struct vmw_surface *surf; - struct vmw_buffer_object *bo; + struct vmw_bo *bo; int content_fb_type; unsigned long bo_size; @@ -346,7 +341,7 @@ struct vmw_connector_state { struct vmw_cursor_plane { struct drm_plane base; - struct ttm_buffer_object *cursor_mobs[3]; + struct vmw_bo *cursor_mobs[3]; }; /** @@ -364,7 +359,7 @@ struct vmw_display_unit { struct vmw_cursor_plane cursor; struct vmw_surface *cursor_surface; - struct vmw_buffer_object *cursor_bo; + struct vmw_bo *cursor_bo; size_t cursor_age; int cursor_x; @@ -397,7 +392,7 @@ struct vmw_display_unit { struct vmw_validation_ctx { struct vmw_resource *res; - struct vmw_buffer_object *buf; + struct vmw_bo *buf; }; #define vmw_crtc_to_du(x) \ @@ -458,7 +453,7 @@ int vmw_kms_readback(struct vmw_private *dev_priv, uint32_t num_clips); struct vmw_framebuffer * vmw_kms_new_framebuffer(struct vmw_private *dev_priv, - struct vmw_buffer_object *bo, + struct vmw_bo *bo, struct vmw_surface *surface, bool only_2d, const struct drm_mode_fb_cmd2 *mode_cmd); @@ -566,17 +561,15 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv, unsigned num_clips, int inc, struct vmw_fence_obj **out_fence, struct drm_crtc *crtc); -int vmw_kms_stdu_dma(struct vmw_private *dev_priv, - struct drm_file *file_priv, - struct vmw_framebuffer *vfb, - struct drm_vmw_fence_rep __user *user_fence_rep, - struct drm_clip_rect *clips, - struct drm_vmw_rect *vclips, - uint32_t num_clips, - int increment, - bool to_surface, - bool interruptible, - struct drm_crtc *crtc); +int vmw_kms_stdu_readback(struct vmw_private *dev_priv, + struct drm_file *file_priv, + struct vmw_framebuffer *vfb, + struct drm_vmw_fence_rep __user *user_fence_rep, + struct drm_clip_rect *clips, + struct drm_vmw_rect *vclips, + uint32_t num_clips, + int increment, + struct drm_crtc *crtc); int vmw_du_helper_plane_update(struct vmw_du_update_plane *update); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index a56e5d0ca3c6..c0e42f2ed144 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright 2009-2022 VMware, Inc., Palo Alto, CA., USA + * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the @@ -25,11 +25,13 @@ * **************************************************************************/ +#include "vmwgfx_bo.h" +#include "vmwgfx_kms.h" + #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_fourcc.h> -#include "vmwgfx_kms.h" #define vmw_crtc_to_ldu(x) \ container_of(x, struct vmw_legacy_display_unit, base.crtc) @@ -134,6 +136,47 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv) return 0; } +/* + * Pin the buffer in a location suitable for access by the + * display system. + */ +static int vmw_ldu_fb_pin(struct vmw_framebuffer *vfb) +{ + struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); + struct vmw_bo *buf; + int ret; + + buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer : + vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.guest_memory_bo; + + if (!buf) + return 0; + WARN_ON(dev_priv->active_display_unit != vmw_du_legacy); + + if (dev_priv->active_display_unit == vmw_du_legacy) { + vmw_overlay_pause_all(dev_priv); + ret = vmw_bo_pin_in_start_of_vram(dev_priv, buf, false); + vmw_overlay_resume_all(dev_priv); + } else + ret = -EINVAL; + + return ret; +} + +static int vmw_ldu_fb_unpin(struct vmw_framebuffer *vfb) +{ + struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); + struct vmw_bo *buf; + + buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer : + vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.guest_memory_bo; + + if (WARN_ON(!buf)) + return 0; + + return vmw_bo_unpin(dev_priv, buf, false); +} + static int vmw_ldu_del_active(struct vmw_private *vmw_priv, struct vmw_legacy_display_unit *ldu) { @@ -145,8 +188,7 @@ static int vmw_ldu_del_active(struct vmw_private *vmw_priv, list_del_init(&ldu->active); if (--(ld->num_active) == 0) { BUG_ON(!ld->fb); - if (ld->fb->unpin) - ld->fb->unpin(ld->fb); + WARN_ON(vmw_ldu_fb_unpin(ld->fb)); ld->fb = NULL; } @@ -163,11 +205,10 @@ static int vmw_ldu_add_active(struct vmw_private *vmw_priv, BUG_ON(!ld->num_active && ld->fb); if (vfb != ld->fb) { - if (ld->fb && ld->fb->unpin) - ld->fb->unpin(ld->fb); + if (ld->fb) + WARN_ON(vmw_ldu_fb_unpin(ld->fb)); vmw_svga_enable(vmw_priv); - if (vfb->pin) - vfb->pin(vfb); + WARN_ON(vmw_ldu_fb_pin(vfb)); ld->fb = vfb; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c index 0a8cc28d6606..7055cbefc768 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright 2012-2021 VMware, Inc., Palo Alto, CA., USA + * Copyright 2012-2023 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the @@ -25,10 +25,11 @@ * **************************************************************************/ -#include <linux/highmem.h> - +#include "vmwgfx_bo.h" #include "vmwgfx_drv.h" +#include <linux/highmem.h> + #ifdef CONFIG_64BIT #define VMW_PPN_SIZE 8 #define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PT64_0 @@ -50,7 +51,7 @@ * @pt_root_page DMA address of the level 0 page of the page table. */ struct vmw_mob { - struct ttm_buffer_object *pt_bo; + struct vmw_bo *pt_bo; unsigned long num_pages; unsigned pt_level; dma_addr_t pt_root_page; @@ -203,7 +204,7 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv, if (otable->page_table == NULL) return; - bo = otable->page_table->pt_bo; + bo = &otable->page_table->pt_bo->tbo; cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) return; @@ -251,7 +252,9 @@ static int vmw_otable_batch_setup(struct vmw_private *dev_priv, bo_size += otables[i].size; } - ret = vmw_bo_create_and_populate(dev_priv, bo_size, &batch->otable_bo); + ret = vmw_bo_create_and_populate(dev_priv, bo_size, + VMW_BO_DOMAIN_WAITABLE_SYS, + &batch->otable_bo); if (unlikely(ret != 0)) return ret; @@ -260,7 +263,8 @@ static int vmw_otable_batch_setup(struct vmw_private *dev_priv, if (!batch->otables[i].enabled) continue; - ret = vmw_setup_otable_base(dev_priv, i, batch->otable_bo, + ret = vmw_setup_otable_base(dev_priv, i, + &batch->otable_bo->tbo, offset, &otables[i]); if (unlikely(ret != 0)) @@ -277,8 +281,8 @@ out_no_setup: &batch->otables[i]); } - vmw_bo_unpin_unlocked(batch->otable_bo); - ttm_bo_put(batch->otable_bo); + vmw_bo_unpin_unlocked(&batch->otable_bo->tbo); + ttm_bo_put(&batch->otable_bo->tbo); batch->otable_bo = NULL; return ret; } @@ -329,7 +333,7 @@ static void vmw_otable_batch_takedown(struct vmw_private *dev_priv, struct vmw_otable_batch *batch) { SVGAOTableType i; - struct ttm_buffer_object *bo = batch->otable_bo; + struct ttm_buffer_object *bo = &batch->otable_bo->tbo; int ret; for (i = 0; i < batch->num_otables; ++i) @@ -344,8 +348,7 @@ static void vmw_otable_batch_takedown(struct vmw_private *dev_priv, ttm_bo_unpin(bo); ttm_bo_unreserve(bo); - ttm_bo_put(batch->otable_bo); - batch->otable_bo = NULL; + vmw_bo_unreference(&batch->otable_bo); } /* @@ -413,7 +416,9 @@ static int vmw_mob_pt_populate(struct vmw_private *dev_priv, { BUG_ON(mob->pt_bo != NULL); - return vmw_bo_create_and_populate(dev_priv, mob->num_pages * PAGE_SIZE, &mob->pt_bo); + return vmw_bo_create_and_populate(dev_priv, mob->num_pages * PAGE_SIZE, + VMW_BO_DOMAIN_WAITABLE_SYS, + &mob->pt_bo); } /** @@ -494,7 +499,7 @@ static void vmw_mob_pt_setup(struct vmw_mob *mob, unsigned long num_data_pages) { unsigned long num_pt_pages = 0; - struct ttm_buffer_object *bo = mob->pt_bo; + struct ttm_buffer_object *bo = &mob->pt_bo->tbo; struct vmw_piter save_pt_iter = {0}; struct vmw_piter pt_iter; const struct vmw_sg_table *vsgt; @@ -531,9 +536,8 @@ static void vmw_mob_pt_setup(struct vmw_mob *mob, void vmw_mob_destroy(struct vmw_mob *mob) { if (mob->pt_bo) { - vmw_bo_unpin_unlocked(mob->pt_bo); - ttm_bo_put(mob->pt_bo); - mob->pt_bo = NULL; + vmw_bo_unpin_unlocked(&mob->pt_bo->tbo); + vmw_bo_unreference(&mob->pt_bo); } kfree(mob); } @@ -552,7 +556,7 @@ void vmw_mob_unbind(struct vmw_private *dev_priv, SVGA3dCmdDestroyGBMob body; } *cmd; int ret; - struct ttm_buffer_object *bo = mob->pt_bo; + struct ttm_buffer_object *bo = &mob->pt_bo->tbo; if (bo) { ret = ttm_bo_reserve(bo, false, true, NULL); @@ -644,9 +648,8 @@ int vmw_mob_bind(struct vmw_private *dev_priv, out_no_cmd_space: vmw_fifo_resource_dec(dev_priv); if (pt_set_up) { - vmw_bo_unpin_unlocked(mob->pt_bo); - ttm_bo_put(mob->pt_bo); - mob->pt_bo = NULL; + vmw_bo_unpin_unlocked(&mob->pt_bo->tbo); + vmw_bo_unreference(&mob->pt_bo); } return -ENOMEM; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c index b5b311f2a91a..8d171d71cb8a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright 2009-2014 VMware, Inc., Palo Alto, CA., USA + * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the @@ -24,19 +24,19 @@ * USE OR OTHER DEALINGS IN THE SOFTWARE. * **************************************************************************/ - -#include <drm/ttm/ttm_placement.h> +#include "vmwgfx_bo.h" +#include "vmwgfx_drv.h" #include "device_include/svga_overlay.h" #include "device_include/svga_escape.h" -#include "vmwgfx_drv.h" +#include <drm/ttm/ttm_placement.h> #define VMW_MAX_NUM_STREAMS 1 #define VMW_OVERLAY_CAP_MASK (SVGA_FIFO_CAP_VIDEO | SVGA_FIFO_CAP_ESCAPE) struct vmw_stream { - struct vmw_buffer_object *buf; + struct vmw_bo *buf; bool claimed; bool paused; struct drm_vmw_control_stream_arg saved; @@ -92,7 +92,7 @@ static inline void fill_flush(struct vmw_escape_video_flush *cmd, * -ERESTARTSYS if interrupted by a signal. */ static int vmw_overlay_send_put(struct vmw_private *dev_priv, - struct vmw_buffer_object *buf, + struct vmw_bo *buf, struct drm_vmw_control_stream_arg *arg, bool interruptible) { @@ -140,7 +140,7 @@ static int vmw_overlay_send_put(struct vmw_private *dev_priv, for (i = 0; i < num_items; i++) items[i].registerId = i; - vmw_bo_get_guest_ptr(&buf->base, &ptr); + vmw_bo_get_guest_ptr(&buf->tbo, &ptr); ptr.offset += arg->offset; items[SVGA_VIDEO_ENABLED].value = true; @@ -223,7 +223,7 @@ static int vmw_overlay_send_stop(struct vmw_private *dev_priv, * used with GMRs instead of being locked to vram. */ static int vmw_overlay_move_buffer(struct vmw_private *dev_priv, - struct vmw_buffer_object *buf, + struct vmw_bo *buf, bool pin, bool inter) { if (!pin) @@ -295,7 +295,7 @@ static int vmw_overlay_stop(struct vmw_private *dev_priv, * -ERESTARTSYS if interrupted. */ static int vmw_overlay_update_stream(struct vmw_private *dev_priv, - struct vmw_buffer_object *buf, + struct vmw_bo *buf, struct drm_vmw_control_stream_arg *arg, bool interruptible) { @@ -433,7 +433,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data, struct vmw_overlay *overlay = dev_priv->overlay_priv; struct drm_vmw_control_stream_arg *arg = (struct drm_vmw_control_stream_arg *)data; - struct vmw_buffer_object *buf; + struct vmw_bo *buf; struct vmw_resource *res; int ret; @@ -458,7 +458,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data, ret = vmw_overlay_update_stream(dev_priv, buf, arg, true); vmw_bo_unreference(&buf); - drm_gem_object_put(&buf->base.base); + drm_gem_object_put(&buf->tbo.base); out_unlock: mutex_unlock(&overlay->mutex); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c index f41f041559f4..74ff2812d66a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright 2019 VMware, Inc., Palo Alto, CA., USA + * Copyright 2019-2023 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the @@ -24,6 +24,7 @@ * USE OR OTHER DEALINGS IN THE SOFTWARE. * **************************************************************************/ +#include "vmwgfx_bo.h" #include "vmwgfx_drv.h" /* @@ -78,11 +79,11 @@ struct vmw_bo_dirty { * dirty structure with the results. This function may change the * dirty-tracking method. */ -static void vmw_bo_dirty_scan_pagetable(struct vmw_buffer_object *vbo) +static void vmw_bo_dirty_scan_pagetable(struct vmw_bo *vbo) { struct vmw_bo_dirty *dirty = vbo->dirty; - pgoff_t offset = drm_vma_node_start(&vbo->base.base.vma_node); - struct address_space *mapping = vbo->base.bdev->dev_mapping; + pgoff_t offset = drm_vma_node_start(&vbo->tbo.base.vma_node); + struct address_space *mapping = vbo->tbo.bdev->dev_mapping; pgoff_t num_marked; num_marked = clean_record_shared_mapping_range @@ -116,26 +117,25 @@ static void vmw_bo_dirty_scan_pagetable(struct vmw_buffer_object *vbo) * * This function may change the dirty-tracking method. */ -static void vmw_bo_dirty_scan_mkwrite(struct vmw_buffer_object *vbo) +static void vmw_bo_dirty_scan_mkwrite(struct vmw_bo *vbo) { struct vmw_bo_dirty *dirty = vbo->dirty; - unsigned long offset = drm_vma_node_start(&vbo->base.base.vma_node); - struct address_space *mapping = vbo->base.bdev->dev_mapping; + unsigned long offset = drm_vma_node_start(&vbo->tbo.base.vma_node); + struct address_space *mapping = vbo->tbo.bdev->dev_mapping; pgoff_t num_marked; if (dirty->end <= dirty->start) return; - num_marked = wp_shared_mapping_range(vbo->base.bdev->dev_mapping, - dirty->start + offset, - dirty->end - dirty->start); + num_marked = wp_shared_mapping_range(vbo->tbo.bdev->dev_mapping, + dirty->start + offset, + dirty->end - dirty->start); if (100UL * num_marked / dirty->bitmap_size > - VMW_DIRTY_PERCENTAGE) { + VMW_DIRTY_PERCENTAGE) dirty->change_count++; - } else { + else dirty->change_count = 0; - } if (dirty->change_count > VMW_DIRTY_NUM_CHANGE_TRIGGERS) { pgoff_t start = 0; @@ -160,7 +160,7 @@ static void vmw_bo_dirty_scan_mkwrite(struct vmw_buffer_object *vbo) * * This function may change the dirty tracking method. */ -void vmw_bo_dirty_scan(struct vmw_buffer_object *vbo) +void vmw_bo_dirty_scan(struct vmw_bo *vbo) { struct vmw_bo_dirty *dirty = vbo->dirty; @@ -181,12 +181,12 @@ void vmw_bo_dirty_scan(struct vmw_buffer_object *vbo) * when calling unmap_mapping_range(). This function makes sure we pick * up all dirty pages. */ -static void vmw_bo_dirty_pre_unmap(struct vmw_buffer_object *vbo, +static void vmw_bo_dirty_pre_unmap(struct vmw_bo *vbo, pgoff_t start, pgoff_t end) { struct vmw_bo_dirty *dirty = vbo->dirty; - unsigned long offset = drm_vma_node_start(&vbo->base.base.vma_node); - struct address_space *mapping = vbo->base.bdev->dev_mapping; + unsigned long offset = drm_vma_node_start(&vbo->tbo.base.vma_node); + struct address_space *mapping = vbo->tbo.bdev->dev_mapping; if (dirty->method != VMW_BO_DIRTY_PAGETABLE || start >= end) return; @@ -206,11 +206,11 @@ static void vmw_bo_dirty_pre_unmap(struct vmw_buffer_object *vbo, * * This is similar to ttm_bo_unmap_virtual() except it takes a subrange. */ -void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo, +void vmw_bo_dirty_unmap(struct vmw_bo *vbo, pgoff_t start, pgoff_t end) { - unsigned long offset = drm_vma_node_start(&vbo->base.base.vma_node); - struct address_space *mapping = vbo->base.bdev->dev_mapping; + unsigned long offset = drm_vma_node_start(&vbo->tbo.base.vma_node); + struct address_space *mapping = vbo->tbo.bdev->dev_mapping; vmw_bo_dirty_pre_unmap(vbo, start, end); unmap_shared_mapping_range(mapping, (offset + start) << PAGE_SHIFT, @@ -227,10 +227,10 @@ void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo, * * Return: Zero on success, -ENOMEM on memory allocation failure. */ -int vmw_bo_dirty_add(struct vmw_buffer_object *vbo) +int vmw_bo_dirty_add(struct vmw_bo *vbo) { struct vmw_bo_dirty *dirty = vbo->dirty; - pgoff_t num_pages = PFN_UP(vbo->base.resource->size); + pgoff_t num_pages = PFN_UP(vbo->tbo.resource->size); size_t size; int ret; @@ -253,8 +253,8 @@ int vmw_bo_dirty_add(struct vmw_buffer_object *vbo) if (num_pages < PAGE_SIZE / sizeof(pte_t)) { dirty->method = VMW_BO_DIRTY_PAGETABLE; } else { - struct address_space *mapping = vbo->base.bdev->dev_mapping; - pgoff_t offset = drm_vma_node_start(&vbo->base.base.vma_node); + struct address_space *mapping = vbo->tbo.bdev->dev_mapping; + pgoff_t offset = drm_vma_node_start(&vbo->tbo.base.vma_node); dirty->method = VMW_BO_DIRTY_MKWRITE; @@ -284,7 +284,7 @@ out_no_dirty: * * Return: Zero on success, -ENOMEM on memory allocation failure. */ -void vmw_bo_dirty_release(struct vmw_buffer_object *vbo) +void vmw_bo_dirty_release(struct vmw_bo *vbo) { struct vmw_bo_dirty *dirty = vbo->dirty; @@ -306,11 +306,11 @@ void vmw_bo_dirty_release(struct vmw_buffer_object *vbo) */ void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res) { - struct vmw_buffer_object *vbo = res->backup; + struct vmw_bo *vbo = res->guest_memory_bo; struct vmw_bo_dirty *dirty = vbo->dirty; pgoff_t start, cur, end; - unsigned long res_start = res->backup_offset; - unsigned long res_end = res->backup_offset + res->backup_size; + unsigned long res_start = res->guest_memory_offset; + unsigned long res_end = res->guest_memory_offset + res->guest_memory_size; WARN_ON_ONCE(res_start & ~PAGE_MASK); res_start >>= PAGE_SHIFT; @@ -351,9 +351,9 @@ void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res) */ void vmw_bo_dirty_clear_res(struct vmw_resource *res) { - unsigned long res_start = res->backup_offset; - unsigned long res_end = res->backup_offset + res->backup_size; - struct vmw_buffer_object *vbo = res->backup; + unsigned long res_start = res->guest_memory_offset; + unsigned long res_end = res->guest_memory_offset + res->guest_memory_size; + struct vmw_bo *vbo = res->guest_memory_bo; struct vmw_bo_dirty *dirty = vbo->dirty; res_start >>= PAGE_SHIFT; @@ -380,8 +380,7 @@ vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf) vm_fault_t ret; unsigned long page_offset; unsigned int save_flags; - struct vmw_buffer_object *vbo = - container_of(bo, typeof(*vbo), base); + struct vmw_bo *vbo = to_vmw_bo(&bo->base); /* * mkwrite() doesn't handle the VM_FAULT_RETRY return value correctly. @@ -419,8 +418,7 @@ vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf) struct vm_area_struct *vma = vmf->vma; struct ttm_buffer_object *bo = (struct ttm_buffer_object *) vma->vm_private_data; - struct vmw_buffer_object *vbo = - container_of(bo, struct vmw_buffer_object, base); + struct vmw_bo *vbo = to_vmw_bo(&bo->base); pgoff_t num_prefault; pgprot_t prot; vm_fault_t ret; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index c7d645e5ec7b..71eeabf001c8 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA + * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the @@ -27,9 +27,10 @@ #include <drm/ttm/ttm_placement.h> -#include "vmwgfx_resource_priv.h" #include "vmwgfx_binding.h" +#include "vmwgfx_bo.h" #include "vmwgfx_drv.h" +#include "vmwgfx_resource_priv.h" #define VMW_RES_EVICT_ERR_COUNT 10 @@ -39,10 +40,10 @@ */ void vmw_resource_mob_attach(struct vmw_resource *res) { - struct vmw_buffer_object *backup = res->backup; - struct rb_node **new = &backup->res_tree.rb_node, *parent = NULL; + struct vmw_bo *gbo = res->guest_memory_bo; + struct rb_node **new = &gbo->res_tree.rb_node, *parent = NULL; - dma_resv_assert_held(res->backup->base.base.resv); + dma_resv_assert_held(gbo->tbo.base.resv); res->used_prio = (res->res_dirty) ? res->func->dirty_prio : res->func->prio; @@ -51,14 +52,14 @@ void vmw_resource_mob_attach(struct vmw_resource *res) container_of(*new, struct vmw_resource, mob_node); parent = *new; - new = (res->backup_offset < this->backup_offset) ? + new = (res->guest_memory_offset < this->guest_memory_offset) ? &((*new)->rb_left) : &((*new)->rb_right); } rb_link_node(&res->mob_node, parent, new); - rb_insert_color(&res->mob_node, &backup->res_tree); + rb_insert_color(&res->mob_node, &gbo->res_tree); - vmw_bo_prio_add(backup, res->used_prio); + vmw_bo_prio_add(gbo, res->used_prio); } /** @@ -67,13 +68,13 @@ void vmw_resource_mob_attach(struct vmw_resource *res) */ void vmw_resource_mob_detach(struct vmw_resource *res) { - struct vmw_buffer_object *backup = res->backup; + struct vmw_bo *gbo = res->guest_memory_bo; - dma_resv_assert_held(backup->base.base.resv); + dma_resv_assert_held(gbo->tbo.base.resv); if (vmw_resource_mob_attached(res)) { - rb_erase(&res->mob_node, &backup->res_tree); + rb_erase(&res->mob_node, &gbo->res_tree); RB_CLEAR_NODE(&res->mob_node); - vmw_bo_prio_del(backup, res->used_prio); + vmw_bo_prio_del(gbo, res->used_prio); } } @@ -120,8 +121,8 @@ static void vmw_resource_release(struct kref *kref) spin_lock(&dev_priv->resource_lock); list_del_init(&res->lru_head); spin_unlock(&dev_priv->resource_lock); - if (res->backup) { - struct ttm_buffer_object *bo = &res->backup->base; + if (res->guest_memory_bo) { + struct ttm_buffer_object *bo = &res->guest_memory_bo->tbo; ret = ttm_bo_reserve(bo, false, false, NULL); BUG_ON(ret); @@ -133,14 +134,14 @@ static void vmw_resource_release(struct kref *kref) val_buf.num_shared = 0; res->func->unbind(res, false, &val_buf); } - res->backup_dirty = false; + res->guest_memory_size = false; vmw_resource_mob_detach(res); if (res->dirty) res->func->dirty_free(res); if (res->coherent) - vmw_bo_dirty_release(res->backup); + vmw_bo_dirty_release(res->guest_memory_bo); ttm_bo_unreserve(bo); - vmw_bo_unreference(&res->backup); + vmw_bo_unreference(&res->guest_memory_bo); } if (likely(res->hw_destroy != NULL)) { @@ -223,9 +224,9 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res, INIT_LIST_HEAD(&res->lru_head); INIT_LIST_HEAD(&res->binding_head); res->id = -1; - res->backup = NULL; - res->backup_offset = 0; - res->backup_dirty = false; + res->guest_memory_bo = NULL; + res->guest_memory_offset = 0; + res->guest_memory_dirty = false; res->res_dirty = false; res->coherent = false; res->used_prio = 3; @@ -263,7 +264,7 @@ int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv, int ret = -EINVAL; base = ttm_base_object_lookup(tfile, handle); - if (unlikely(base == NULL)) + if (unlikely(!base)) return -EINVAL; if (unlikely(ttm_base_object_type(base) != converter->object_type)) @@ -290,7 +291,7 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv, struct drm_file *filp, uint32_t handle, struct vmw_surface **out_surf, - struct vmw_buffer_object **out_buf) + struct vmw_bo **out_buf) { struct ttm_object_file *tfile = vmw_fpriv(filp)->tfile; struct vmw_resource *res; @@ -312,32 +313,36 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv, } /** - * vmw_resource_buf_alloc - Allocate a backup buffer for a resource. + * vmw_resource_buf_alloc - Allocate a guest memory buffer for a resource. * - * @res: The resource for which to allocate a backup buffer. + * @res: The resource for which to allocate a gbo buffer. * @interruptible: Whether any sleeps during allocation should be * performed while interruptible. */ static int vmw_resource_buf_alloc(struct vmw_resource *res, bool interruptible) { - unsigned long size = PFN_ALIGN(res->backup_size); - struct vmw_buffer_object *backup; + unsigned long size = PFN_ALIGN(res->guest_memory_size); + struct vmw_bo *gbo; + struct vmw_bo_params bo_params = { + .domain = res->func->domain, + .busy_domain = res->func->busy_domain, + .bo_type = ttm_bo_type_device, + .size = res->guest_memory_size, + .pin = false + }; int ret; - if (likely(res->backup)) { - BUG_ON(res->backup->base.base.size < size); + if (likely(res->guest_memory_bo)) { + BUG_ON(res->guest_memory_bo->tbo.base.size < size); return 0; } - ret = vmw_bo_create(res->dev_priv, res->backup_size, - res->func->backup_placement, - interruptible, false, - &vmw_bo_bo_free, &backup); + ret = vmw_bo_create(res->dev_priv, &bo_params, &gbo); if (unlikely(ret != 0)) goto out_no_bo; - res->backup = backup; + res->guest_memory_bo = gbo; out_no_bo: return ret; @@ -369,13 +374,13 @@ static int vmw_resource_do_validate(struct vmw_resource *res, } if (func->bind && - ((func->needs_backup && !vmw_resource_mob_attached(res) && - val_buf->bo != NULL) || - (!func->needs_backup && val_buf->bo != NULL))) { + ((func->needs_guest_memory && !vmw_resource_mob_attached(res) && + val_buf->bo) || + (!func->needs_guest_memory && val_buf->bo))) { ret = func->bind(res, val_buf); if (unlikely(ret != 0)) goto out_bind_failed; - if (func->needs_backup) + if (func->needs_guest_memory) vmw_resource_mob_attach(res); } @@ -385,11 +390,11 @@ static int vmw_resource_do_validate(struct vmw_resource *res, */ if (func->dirty_alloc && vmw_resource_mob_attached(res) && !res->coherent) { - if (res->backup->dirty && !res->dirty) { + if (res->guest_memory_bo->dirty && !res->dirty) { ret = func->dirty_alloc(res); if (ret) return ret; - } else if (!res->backup->dirty && res->dirty) { + } else if (!res->guest_memory_bo->dirty && res->dirty) { func->dirty_free(res); } } @@ -400,12 +405,12 @@ static int vmw_resource_do_validate(struct vmw_resource *res, */ if (res->dirty) { if (dirtying && !res->res_dirty) { - pgoff_t start = res->backup_offset >> PAGE_SHIFT; + pgoff_t start = res->guest_memory_offset >> PAGE_SHIFT; pgoff_t end = __KERNEL_DIV_ROUND_UP - (res->backup_offset + res->backup_size, + (res->guest_memory_offset + res->guest_memory_size, PAGE_SIZE); - vmw_bo_dirty_unmap(res->backup, start, end); + vmw_bo_dirty_unmap(res->guest_memory_bo, start, end); } vmw_bo_dirty_transfer_to_res(res); @@ -427,10 +432,10 @@ out_bind_failed: * @res: Pointer to the struct vmw_resource to unreserve. * @dirty_set: Change dirty status of the resource. * @dirty: When changing dirty status indicates the new status. - * @switch_backup: Backup buffer has been switched. - * @new_backup: Pointer to new backup buffer if command submission + * @switch_guest_memory: Guest memory buffer has been switched. + * @new_guest_memory_bo: Pointer to new guest memory buffer if command submission * switched. May be NULL. - * @new_backup_offset: New backup offset if @switch_backup is true. + * @new_guest_memory_offset: New gbo offset if @switch_guest_memory is true. * * Currently unreserving a resource means putting it back on the device's * resource lru list, so that it can be evicted if necessary. @@ -438,42 +443,42 @@ out_bind_failed: void vmw_resource_unreserve(struct vmw_resource *res, bool dirty_set, bool dirty, - bool switch_backup, - struct vmw_buffer_object *new_backup, - unsigned long new_backup_offset) + bool switch_guest_memory, + struct vmw_bo *new_guest_memory_bo, + unsigned long new_guest_memory_offset) { struct vmw_private *dev_priv = res->dev_priv; if (!list_empty(&res->lru_head)) return; - if (switch_backup && new_backup != res->backup) { - if (res->backup) { + if (switch_guest_memory && new_guest_memory_bo != res->guest_memory_bo) { + if (res->guest_memory_bo) { vmw_resource_mob_detach(res); if (res->coherent) - vmw_bo_dirty_release(res->backup); - vmw_bo_unreference(&res->backup); + vmw_bo_dirty_release(res->guest_memory_bo); + vmw_bo_unreference(&res->guest_memory_bo); } - if (new_backup) { - res->backup = vmw_bo_reference(new_backup); + if (new_guest_memory_bo) { + res->guest_memory_bo = vmw_bo_reference(new_guest_memory_bo); /* * The validation code should already have added a * dirty tracker here. */ - WARN_ON(res->coherent && !new_backup->dirty); + WARN_ON(res->coherent && !new_guest_memory_bo->dirty); vmw_resource_mob_attach(res); } else { - res->backup = NULL; + res->guest_memory_bo = NULL; } - } else if (switch_backup && res->coherent) { - vmw_bo_dirty_release(res->backup); + } else if (switch_guest_memory && res->coherent) { + vmw_bo_dirty_release(res->guest_memory_bo); } - if (switch_backup) - res->backup_offset = new_backup_offset; + if (switch_guest_memory) + res->guest_memory_offset = new_guest_memory_offset; if (dirty_set) res->res_dirty = dirty; @@ -507,30 +512,32 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket, { struct ttm_operation_ctx ctx = { true, false }; struct list_head val_list; - bool backup_dirty = false; + bool guest_memory_dirty = false; int ret; - if (unlikely(res->backup == NULL)) { + if (unlikely(!res->guest_memory_bo)) { ret = vmw_resource_buf_alloc(res, interruptible); if (unlikely(ret != 0)) return ret; } INIT_LIST_HEAD(&val_list); - ttm_bo_get(&res->backup->base); - val_buf->bo = &res->backup->base; + ttm_bo_get(&res->guest_memory_bo->tbo); + val_buf->bo = &res->guest_memory_bo->tbo; val_buf->num_shared = 0; list_add_tail(&val_buf->head, &val_list); ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL); if (unlikely(ret != 0)) goto out_no_reserve; - if (res->func->needs_backup && !vmw_resource_mob_attached(res)) + if (res->func->needs_guest_memory && !vmw_resource_mob_attached(res)) return 0; - backup_dirty = res->backup_dirty; - ret = ttm_bo_validate(&res->backup->base, - res->func->backup_placement, + guest_memory_dirty = res->guest_memory_dirty; + vmw_bo_placement_set(res->guest_memory_bo, res->func->domain, + res->func->busy_domain); + ret = ttm_bo_validate(&res->guest_memory_bo->tbo, + &res->guest_memory_bo->placement, &ctx); if (unlikely(ret != 0)) @@ -543,8 +550,8 @@ out_no_validate: out_no_reserve: ttm_bo_put(val_buf->bo); val_buf->bo = NULL; - if (backup_dirty) - vmw_bo_unreference(&res->backup); + if (guest_memory_dirty) + vmw_bo_unreference(&res->guest_memory_bo); return ret; } @@ -555,12 +562,13 @@ out_no_reserve: * @res: The resource to reserve. * * This function takes the resource off the LRU list and make sure - * a backup buffer is present for guest-backed resources. However, - * the buffer may not be bound to the resource at this point. + * a guest memory buffer is present for guest-backed resources. + * However, the buffer may not be bound to the resource at this + * point. * */ int vmw_resource_reserve(struct vmw_resource *res, bool interruptible, - bool no_backup) + bool no_guest_memory) { struct vmw_private *dev_priv = res->dev_priv; int ret; @@ -569,13 +577,13 @@ int vmw_resource_reserve(struct vmw_resource *res, bool interruptible, list_del_init(&res->lru_head); spin_unlock(&dev_priv->resource_lock); - if (res->func->needs_backup && res->backup == NULL && - !no_backup) { + if (res->func->needs_guest_memory && !res->guest_memory_bo && + !no_guest_memory) { ret = vmw_resource_buf_alloc(res, interruptible); if (unlikely(ret != 0)) { - DRM_ERROR("Failed to allocate a backup buffer " + DRM_ERROR("Failed to allocate a guest memory buffer " "of size %lu. bytes\n", - (unsigned long) res->backup_size); + (unsigned long) res->guest_memory_size); return ret; } } @@ -585,10 +593,10 @@ int vmw_resource_reserve(struct vmw_resource *res, bool interruptible, /** * vmw_resource_backoff_reservation - Unreserve and unreference a - * backup buffer + * guest memory buffer *. * @ticket: The ww acquire ctx used for reservation. - * @val_buf: Backup buffer information. + * @val_buf: Guest memory buffer information. */ static void vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket, @@ -630,14 +638,14 @@ static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket, return ret; if (unlikely(func->unbind != NULL && - (!func->needs_backup || vmw_resource_mob_attached(res)))) { + (!func->needs_guest_memory || vmw_resource_mob_attached(res)))) { ret = func->unbind(res, res->res_dirty, &val_buf); if (unlikely(ret != 0)) goto out_no_unbind; vmw_resource_mob_detach(res); } ret = func->destroy(res); - res->backup_dirty = true; + res->guest_memory_dirty = true; res->res_dirty = false; out_no_unbind: vmw_resource_backoff_reservation(ticket, &val_buf); @@ -676,8 +684,8 @@ int vmw_resource_validate(struct vmw_resource *res, bool intr, val_buf.bo = NULL; val_buf.num_shared = 0; - if (res->backup) - val_buf.bo = &res->backup->base; + if (res->guest_memory_bo) + val_buf.bo = &res->guest_memory_bo->tbo; do { ret = vmw_resource_do_validate(res, &val_buf, dirtying); if (likely(ret != -EBUSY)) @@ -717,9 +725,9 @@ int vmw_resource_validate(struct vmw_resource *res, bool intr, if (unlikely(ret != 0)) goto out_no_validate; - else if (!res->func->needs_backup && res->backup) { + else if (!res->func->needs_guest_memory && res->guest_memory_bo) { WARN_ON_ONCE(vmw_resource_mob_attached(res)); - vmw_bo_unreference(&res->backup); + vmw_bo_unreference(&res->guest_memory_bo); } return 0; @@ -740,14 +748,14 @@ out_no_validate: * validation code, since resource validation and eviction * both require the backup buffer to be reserved. */ -void vmw_resource_unbind_list(struct vmw_buffer_object *vbo) +void vmw_resource_unbind_list(struct vmw_bo *vbo) { struct ttm_validate_buffer val_buf = { - .bo = &vbo->base, + .bo = &vbo->tbo, .num_shared = 0 }; - dma_resv_assert_held(vbo->base.base.resv); + dma_resv_assert_held(vbo->tbo.base.resv); while (!RB_EMPTY_ROOT(&vbo->res_tree)) { struct rb_node *node = vbo->res_tree.rb_node; struct vmw_resource *res = @@ -756,12 +764,12 @@ void vmw_resource_unbind_list(struct vmw_buffer_object *vbo) if (!WARN_ON_ONCE(!res->func->unbind)) (void) res->func->unbind(res, res->res_dirty, &val_buf); - res->backup_dirty = true; + res->guest_memory_size = true; res->res_dirty = false; vmw_resource_mob_detach(res); } - (void) ttm_bo_wait(&vbo->base, false, false); + (void) ttm_bo_wait(&vbo->tbo, false, false); } @@ -773,7 +781,7 @@ void vmw_resource_unbind_list(struct vmw_buffer_object *vbo) * Read back cached states from the device if they exist. This function * assumes binding_mutex is held. */ -int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob) +int vmw_query_readback_all(struct vmw_bo *dx_query_mob) { struct vmw_resource *dx_query_ctx; struct vmw_private *dev_priv; @@ -822,20 +830,19 @@ void vmw_query_move_notify(struct ttm_buffer_object *bo, struct ttm_resource *old_mem, struct ttm_resource *new_mem) { - struct vmw_buffer_object *dx_query_mob; + struct vmw_bo *dx_query_mob; struct ttm_device *bdev = bo->bdev; - struct vmw_private *dev_priv; - - dev_priv = container_of(bdev, struct vmw_private, bdev); + struct vmw_private *dev_priv = vmw_priv_from_ttm(bdev); mutex_lock(&dev_priv->binding_mutex); /* If BO is being moved from MOB to system memory */ - if (new_mem->mem_type == TTM_PL_SYSTEM && + if (old_mem && + new_mem->mem_type == TTM_PL_SYSTEM && old_mem->mem_type == VMW_PL_MOB) { struct vmw_fence_obj *fence; - dx_query_mob = container_of(bo, struct vmw_buffer_object, base); + dx_query_mob = to_vmw_bo(&bo->base); if (!dx_query_mob || !dx_query_mob->dx_query_ctx) { mutex_unlock(&dev_priv->binding_mutex); return; @@ -863,7 +870,7 @@ void vmw_query_move_notify(struct ttm_buffer_object *bo, */ bool vmw_resource_needs_backup(const struct vmw_resource *res) { - return res->func->needs_backup; + return res->func->needs_guest_memory; } /** @@ -959,21 +966,24 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible) goto out_no_reserve; if (res->pin_count == 0) { - struct vmw_buffer_object *vbo = NULL; + struct vmw_bo *vbo = NULL; - if (res->backup) { - vbo = res->backup; + if (res->guest_memory_bo) { + vbo = res->guest_memory_bo; - ret = ttm_bo_reserve(&vbo->base, interruptible, false, NULL); + ret = ttm_bo_reserve(&vbo->tbo, interruptible, false, NULL); if (ret) goto out_no_validate; - if (!vbo->base.pin_count) { + if (!vbo->tbo.pin_count) { + vmw_bo_placement_set(vbo, + res->func->domain, + res->func->busy_domain); ret = ttm_bo_validate - (&vbo->base, - res->func->backup_placement, + (&vbo->tbo, + &vbo->placement, &ctx); if (ret) { - ttm_bo_unreserve(&vbo->base); + ttm_bo_unreserve(&vbo->tbo); goto out_no_validate; } } @@ -983,7 +993,7 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible) } ret = vmw_resource_validate(res, interruptible, true); if (vbo) - ttm_bo_unreserve(&vbo->base); + ttm_bo_unreserve(&vbo->tbo); if (ret) goto out_no_validate; } @@ -1016,12 +1026,12 @@ void vmw_resource_unpin(struct vmw_resource *res) WARN_ON(ret); WARN_ON(res->pin_count == 0); - if (--res->pin_count == 0 && res->backup) { - struct vmw_buffer_object *vbo = res->backup; + if (--res->pin_count == 0 && res->guest_memory_bo) { + struct vmw_bo *vbo = res->guest_memory_bo; - (void) ttm_bo_reserve(&vbo->base, false, false, NULL); + (void) ttm_bo_reserve(&vbo->tbo, false, false, NULL); vmw_bo_pin_reserved(vbo, false); - ttm_bo_unreserve(&vbo->base); + ttm_bo_unreserve(&vbo->tbo); } vmw_resource_unreserve(res, false, false, false, NULL, 0UL); @@ -1062,7 +1072,7 @@ void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start, * @num_prefault: Returns how many pages including the first have been * cleaned and are ok to prefault */ -int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start, +int vmw_resources_clean(struct vmw_bo *vbo, pgoff_t start, pgoff_t end, pgoff_t *num_prefault) { struct rb_node *cur = vbo->res_tree.rb_node; @@ -1079,9 +1089,9 @@ int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start, struct vmw_resource *cur_res = container_of(cur, struct vmw_resource, mob_node); - if (cur_res->backup_offset >= res_end) { + if (cur_res->guest_memory_offset >= res_end) { cur = cur->rb_left; - } else if (cur_res->backup_offset + cur_res->backup_size <= + } else if (cur_res->guest_memory_offset + cur_res->guest_memory_size <= res_start) { cur = cur->rb_right; } else { @@ -1092,7 +1102,7 @@ int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start, } /* - * In order of increasing backup_offset, clean dirty resources + * In order of increasing guest_memory_offset, clean dirty resources * intersecting the range. */ while (found) { @@ -1108,13 +1118,13 @@ int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start, found->res_dirty = false; } - last_cleaned = found->backup_offset + found->backup_size; + last_cleaned = found->guest_memory_offset + found->guest_memory_size; cur = rb_next(&found->mob_node); if (!cur) break; found = container_of(cur, struct vmw_resource, mob_node); - if (found->backup_offset >= res_end) + if (found->guest_memory_offset >= res_end) break; } @@ -1123,7 +1133,7 @@ int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start, */ *num_prefault = 1; if (last_cleaned > res_start) { - struct ttm_buffer_object *bo = &vbo->base; + struct ttm_buffer_object *bo = &vbo->tbo; *num_prefault = __KERNEL_DIV_ROUND_UP(last_cleaned - res_start, PAGE_SIZE); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h index 3b7438b2d289..aa7cbd396bea 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h @@ -58,10 +58,11 @@ struct vmw_user_resource_conv { * struct vmw_res_func - members and functions common for a resource type * * @res_type: Enum that identifies the lru list to use for eviction. - * @needs_backup: Whether the resource is guest-backed and needs + * @needs_guest_memory:Whether the resource is guest-backed and needs * persistent buffer storage. * @type_name: String that identifies the resource type. - * @backup_placement: TTM placement for backup buffers. + * @domain: TTM placement for guest memory buffers. + * @busy_domain: TTM busy placement for guest memory buffers. * @may_evict Whether the resource may be evicted. * @create: Create a hardware resource. * @destroy: Destroy a hardware resource. @@ -81,9 +82,10 @@ struct vmw_user_resource_conv { */ struct vmw_res_func { enum vmw_res_type res_type; - bool needs_backup; + bool needs_guest_memory; const char *type_name; - struct ttm_placement *backup_placement; + u32 domain; + u32 busy_domain; bool may_evict; u32 prio; u32 dirty_prio; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index e1f36a09c59c..556a403b7eb5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright 2011-2022 VMware, Inc., Palo Alto, CA., USA + * Copyright 2011-2023 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the @@ -25,13 +25,14 @@ * **************************************************************************/ +#include "vmwgfx_bo.h" +#include "vmwgfx_kms.h" + #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_damage_helper.h> #include <drm/drm_fourcc.h> -#include "vmwgfx_kms.h" - #define vmw_crtc_to_sou(x) \ container_of(x, struct vmw_screen_object_unit, base.crtc) #define vmw_encoder_to_sou(x) \ @@ -89,7 +90,7 @@ struct vmw_screen_object_unit { struct vmw_display_unit base; unsigned long buffer_size; /**< Size of allocated buffer */ - struct vmw_buffer_object *buffer; /**< Backing store buffer */ + struct vmw_bo *buffer; /**< Backing store buffer */ bool defined; }; @@ -148,7 +149,7 @@ static int vmw_sou_fifo_create(struct vmw_private *dev_priv, sou->base.set_gui_y = cmd->obj.root.y; /* Ok to assume that buffer is pinned in vram */ - vmw_bo_get_guest_ptr(&sou->buffer->base, &cmd->obj.backingStore.ptr); + vmw_bo_get_guest_ptr(&sou->buffer->tbo, &cmd->obj.backingStore.ptr); cmd->obj.backingStore.pitch = mode->hdisplay * 4; vmw_cmd_commit(dev_priv, fifo_size); @@ -409,9 +410,13 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane, struct drm_crtc *crtc = plane->state->crtc ?: new_state->crtc; struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state); struct vmw_private *dev_priv; - size_t size; int ret; - + struct vmw_bo_params bo_params = { + .domain = VMW_BO_DOMAIN_VRAM, + .busy_domain = VMW_BO_DOMAIN_VRAM, + .bo_type = ttm_bo_type_device, + .pin = true + }; if (!new_fb) { vmw_bo_unreference(&vps->bo); @@ -420,11 +425,11 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane, return 0; } - size = new_state->crtc_w * new_state->crtc_h * 4; + bo_params.size = new_state->crtc_w * new_state->crtc_h * 4; dev_priv = vmw_priv(crtc->dev); if (vps->bo) { - if (vps->bo_size == size) { + if (vps->bo_size == bo_params.size) { /* * Note that this might temporarily up the pin-count * to 2, until cleanup_fb() is called. @@ -443,16 +448,12 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane, * resume the overlays, this is preferred to failing to alloc. */ vmw_overlay_pause_all(dev_priv); - ret = vmw_bo_create(dev_priv, size, - &vmw_vram_placement, - false, true, &vmw_bo_bo_free, &vps->bo); + ret = vmw_bo_create(dev_priv, &bo_params, &vps->bo); vmw_overlay_resume_all(dev_priv); - if (ret) { - vps->bo = NULL; /* vmw_bo_init frees on error */ + if (ret) return ret; - } - vps->bo_size = size; + vps->bo_size = bo_params.size; /* * TTM already thinks the buffer is pinned, but make sure the @@ -489,7 +490,7 @@ static uint32_t vmw_sou_bo_define_gmrfb(struct vmw_du_update_plane *update, gmr->body.format.colorDepth = depth; gmr->body.format.reserved = 0; gmr->body.bytesPerLine = update->vfb->base.pitches[0]; - vmw_bo_get_guest_ptr(&vfbbo->buffer->base, &gmr->body.ptr); + vmw_bo_get_guest_ptr(&vfbbo->buffer->tbo, &gmr->body.ptr); return sizeof(*gmr); } @@ -546,7 +547,6 @@ static int vmw_sou_plane_update_bo(struct vmw_private *dev_priv, bo_update.base.vfb = vfb; bo_update.base.out_fence = out_fence; bo_update.base.mutex = NULL; - bo_update.base.cpu_blit = false; bo_update.base.intr = true; bo_update.base.calc_fifo_size = vmw_sou_bo_fifo_size; @@ -707,7 +707,6 @@ static int vmw_sou_plane_update_surface(struct vmw_private *dev_priv, srf_update.base.vfb = vfb; srf_update.base.out_fence = out_fence; srf_update.base.mutex = &dev_priv->cmdbuf_mutex; - srf_update.base.cpu_blit = false; srf_update.base.intr = true; srf_update.base.calc_fifo_size = vmw_sou_surface_fifo_size; @@ -947,7 +946,7 @@ int vmw_kms_sou_init_display(struct vmw_private *dev_priv) static int do_bo_define_gmrfb(struct vmw_private *dev_priv, struct vmw_framebuffer *framebuffer) { - struct vmw_buffer_object *buf = + struct vmw_bo *buf = container_of(framebuffer, struct vmw_framebuffer_bo, base)->buffer; int depth = framebuffer->base.format->depth; @@ -973,7 +972,7 @@ static int do_bo_define_gmrfb(struct vmw_private *dev_priv, cmd->body.format.reserved = 0; cmd->body.bytesPerLine = framebuffer->base.pitches[0]; /* Buffer is reserved in vram or GMR */ - vmw_bo_get_guest_ptr(&buf->base, &cmd->body.ptr); + vmw_bo_get_guest_ptr(&buf->tbo, &cmd->body.ptr); vmw_cmd_commit(dev_priv, sizeof(*cmd)); return 0; @@ -1216,14 +1215,16 @@ int vmw_kms_sou_do_bo_dirty(struct vmw_private *dev_priv, struct vmw_fence_obj **out_fence, struct drm_crtc *crtc) { - struct vmw_buffer_object *buf = + struct vmw_bo *buf = container_of(framebuffer, struct vmw_framebuffer_bo, base)->buffer; struct vmw_kms_dirty dirty; DECLARE_VAL_CONTEXT(val_ctx, NULL, 0); int ret; - ret = vmw_validation_add_bo(&val_ctx, buf, false, false); + vmw_bo_placement_set(buf, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM, + VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM); + ret = vmw_validation_add_bo(&val_ctx, buf); if (ret) return ret; @@ -1323,13 +1324,15 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv, uint32_t num_clips, struct drm_crtc *crtc) { - struct vmw_buffer_object *buf = + struct vmw_bo *buf = container_of(vfb, struct vmw_framebuffer_bo, base)->buffer; struct vmw_kms_dirty dirty; DECLARE_VAL_CONTEXT(val_ctx, NULL, 0); int ret; - ret = vmw_validation_add_bo(&val_ctx, buf, false, false); + vmw_bo_placement_set(buf, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM, + VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM); + ret = vmw_validation_add_bo(&val_ctx, buf); if (ret) return ret; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c index 51e83dfa1cac..e7226db8b242 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA + * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the @@ -27,9 +27,10 @@ #include <drm/ttm/ttm_placement.h> +#include "vmwgfx_binding.h" +#include "vmwgfx_bo.h" #include "vmwgfx_drv.h" #include "vmwgfx_resource_priv.h" -#include "vmwgfx_binding.h" struct vmw_shader { struct vmw_resource res; @@ -88,12 +89,13 @@ const struct vmw_user_resource_conv *user_shader_converter = static const struct vmw_res_func vmw_gb_shader_func = { .res_type = vmw_res_shader, - .needs_backup = true, + .needs_guest_memory = true, .may_evict = true, .prio = 3, .dirty_prio = 3, .type_name = "guest backed shaders", - .backup_placement = &vmw_mob_placement, + .domain = VMW_BO_DOMAIN_MOB, + .busy_domain = VMW_BO_DOMAIN_MOB, .create = vmw_gb_shader_create, .destroy = vmw_gb_shader_destroy, .bind = vmw_gb_shader_bind, @@ -102,12 +104,13 @@ static const struct vmw_res_func vmw_gb_shader_func = { static const struct vmw_res_func vmw_dx_shader_func = { .res_type = vmw_res_shader, - .needs_backup = true, + .needs_guest_memory = true, .may_evict = true, .prio = 3, .dirty_prio = 3, .type_name = "dx shaders", - .backup_placement = &vmw_mob_placement, + .domain = VMW_BO_DOMAIN_MOB, + .busy_domain = VMW_BO_DOMAIN_MOB, .create = vmw_dx_shader_create, /* * The destroy callback is only called with a committed resource on @@ -158,7 +161,7 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv, SVGA3dShaderType type, uint8_t num_input_sig, uint8_t num_output_sig, - struct vmw_buffer_object *byte_code, + struct vmw_bo *byte_code, void (*res_free) (struct vmw_resource *res)) { struct vmw_shader *shader = vmw_res_to_shader(res); @@ -175,10 +178,10 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv, return ret; } - res->backup_size = size; + res->guest_memory_size = size; if (byte_code) { - res->backup = vmw_bo_reference(byte_code); - res->backup_offset = offset; + res->guest_memory_bo = vmw_bo_reference(byte_code); + res->guest_memory_offset = offset; } shader->size = size; shader->type = type; @@ -259,8 +262,8 @@ static int vmw_gb_shader_bind(struct vmw_resource *res, cmd->header.size = sizeof(cmd->body); cmd->body.shid = res->id; cmd->body.mobid = bo->resource->start; - cmd->body.offsetInBytes = res->backup_offset; - res->backup_dirty = false; + cmd->body.offsetInBytes = res->guest_memory_offset; + res->guest_memory_dirty = false; vmw_cmd_commit(dev_priv, sizeof(*cmd)); return 0; @@ -277,7 +280,7 @@ static int vmw_gb_shader_unbind(struct vmw_resource *res, } *cmd; struct vmw_fence_obj *fence; - BUG_ON(res->backup->base.resource->mem_type != VMW_PL_MOB); + BUG_ON(res->guest_memory_bo->tbo.resource->mem_type != VMW_PL_MOB); cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) @@ -397,8 +400,8 @@ static int vmw_dx_shader_unscrub(struct vmw_resource *res) cmd->header.size = sizeof(cmd->body); cmd->body.cid = shader->ctx->id; cmd->body.shid = shader->id; - cmd->body.mobid = res->backup->base.resource->start; - cmd->body.offsetInBytes = res->backup_offset; + cmd->body.mobid = res->guest_memory_bo->tbo.resource->start; + cmd->body.offsetInBytes = res->guest_memory_offset; vmw_cmd_commit(dev_priv, sizeof(*cmd)); vmw_cotable_add_resource(shader->cotable, &shader->cotable_head); @@ -508,7 +511,7 @@ static int vmw_dx_shader_unbind(struct vmw_resource *res, struct vmw_fence_obj *fence; int ret; - BUG_ON(res->backup->base.resource->mem_type != VMW_PL_MOB); + BUG_ON(res->guest_memory_bo->tbo.resource->mem_type != VMW_PL_MOB); mutex_lock(&dev_priv->binding_mutex); ret = vmw_dx_shader_scrub(res); @@ -680,7 +683,7 @@ int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, } static int vmw_user_shader_alloc(struct vmw_private *dev_priv, - struct vmw_buffer_object *buffer, + struct vmw_bo *buffer, size_t shader_size, size_t offset, SVGA3dShaderType shader_type, @@ -734,7 +737,7 @@ out: static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv, - struct vmw_buffer_object *buffer, + struct vmw_bo *buffer, size_t shader_size, size_t offset, SVGA3dShaderType shader_type) @@ -771,7 +774,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv, { struct vmw_private *dev_priv = vmw_priv(dev); struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; - struct vmw_buffer_object *buffer = NULL; + struct vmw_bo *buffer = NULL; SVGA3dShaderType shader_type; int ret; @@ -782,7 +785,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv, return ret; } - if ((u64)buffer->base.base.size < (u64)size + (u64)offset) { + if ((u64)buffer->tbo.base.size < (u64)size + (u64)offset) { VMW_DEBUG_USER("Illegal buffer- or shader size.\n"); ret = -EINVAL; goto out_bad_arg; @@ -807,7 +810,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv, num_output_sig, tfile, shader_handle); out_bad_arg: vmw_bo_unreference(&buffer); - drm_gem_object_put(&buffer->base.base); + drm_gem_object_put(&buffer->tbo.base); return ret; } @@ -884,28 +887,34 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv, struct list_head *list) { struct ttm_operation_ctx ctx = { false, true }; - struct vmw_buffer_object *buf; + struct vmw_bo *buf; struct ttm_bo_kmap_obj map; bool is_iomem; int ret; struct vmw_resource *res; + struct vmw_bo_params bo_params = { + .domain = VMW_BO_DOMAIN_SYS, + .busy_domain = VMW_BO_DOMAIN_SYS, + .bo_type = ttm_bo_type_device, + .size = size, + .pin = true + }; if (!vmw_shader_id_ok(user_key, shader_type)) return -EINVAL; - ret = vmw_bo_create(dev_priv, size, &vmw_sys_placement, - true, true, vmw_bo_bo_free, &buf); + ret = vmw_bo_create(dev_priv, &bo_params, &buf); if (unlikely(ret != 0)) goto out; - ret = ttm_bo_reserve(&buf->base, false, true, NULL); + ret = ttm_bo_reserve(&buf->tbo, false, true, NULL); if (unlikely(ret != 0)) goto no_reserve; /* Map and copy shader bytecode. */ - ret = ttm_bo_kmap(&buf->base, 0, PFN_UP(size), &map); + ret = ttm_bo_kmap(&buf->tbo, 0, PFN_UP(size), &map); if (unlikely(ret != 0)) { - ttm_bo_unreserve(&buf->base); + ttm_bo_unreserve(&buf->tbo); goto no_reserve; } @@ -913,9 +922,9 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv, WARN_ON(is_iomem); ttm_bo_kunmap(&map); - ret = ttm_bo_validate(&buf->base, &vmw_sys_placement, &ctx); + ret = ttm_bo_validate(&buf->tbo, &buf->placement, &ctx); WARN_ON(ret != 0); - ttm_bo_unreserve(&buf->base); + ttm_bo_unreserve(&buf->tbo); res = vmw_shader_alloc(dev_priv, buf, size, 0, shader_type); if (unlikely(ret != 0)) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c index 4ea32b01efc0..5af4db6d1f18 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c @@ -24,6 +24,7 @@ * **************************************************************************/ +#include "vmwgfx_bo.h" #include "vmwgfx_drv.h" #include "vmwgfx_resource_priv.h" #include "vmwgfx_so.h" @@ -81,10 +82,11 @@ static void vmw_view_commit_notify(struct vmw_resource *res, static const struct vmw_res_func vmw_view_func = { .res_type = vmw_res_view, - .needs_backup = false, + .needs_guest_memory = false, .may_evict = false, .type_name = "DX view", - .backup_placement = NULL, + .domain = VMW_BO_DOMAIN_SYS, + .busy_domain = VMW_BO_DOMAIN_SYS, .create = vmw_view_create, .commit_notify = vmw_view_commit_notify, }; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index 0090abe89254..d79a6eccfaa4 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR MIT /****************************************************************************** * - * COPYRIGHT (C) 2014-2022 VMware, Inc., Palo Alto, CA., USA + * COPYRIGHT (C) 2014-2023 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the @@ -25,14 +25,15 @@ * ******************************************************************************/ +#include "vmwgfx_bo.h" +#include "vmwgfx_kms.h" +#include "vmw_surface_cache.h" + #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_damage_helper.h> #include <drm/drm_fourcc.h> -#include "vmwgfx_kms.h" -#include "vmw_surface_cache.h" - #define vmw_crtc_to_stdu(x) \ container_of(x, struct vmw_screen_target_display_unit, base.crtc) #define vmw_encoder_to_stdu(x) \ @@ -65,12 +66,11 @@ enum stdu_content_type { */ struct vmw_stdu_dirty { struct vmw_kms_dirty base; - SVGA3dTransferType transfer; s32 left, right, top, bottom; s32 fb_left, fb_top; u32 pitch; union { - struct vmw_buffer_object *buf; + struct vmw_bo *buf; u32 sid; }; }; @@ -136,12 +136,6 @@ static void vmw_stdu_destroy(struct vmw_screen_target_display_unit *stdu); * Screen Target Display Unit CRTC Functions *****************************************************************************/ -static bool vmw_stdu_use_cpu_blit(const struct vmw_private *vmw) -{ - return !(vmw->capabilities & SVGA_CAP_3D) || vmw->vram_size < (32 * 1024 * 1024); -} - - /** * vmw_stdu_crtc_destroy - cleans up the STDU * @@ -451,93 +445,6 @@ static void vmw_stdu_crtc_atomic_disable(struct drm_crtc *crtc, } /** - * vmw_stdu_bo_clip - Callback to encode a suface DMA command cliprect - * - * @dirty: The closure structure. - * - * Encodes a surface DMA command cliprect and updates the bounding box - * for the DMA. - */ -static void vmw_stdu_bo_clip(struct vmw_kms_dirty *dirty) -{ - struct vmw_stdu_dirty *ddirty = - container_of(dirty, struct vmw_stdu_dirty, base); - struct vmw_stdu_dma *cmd = dirty->cmd; - struct SVGA3dCopyBox *blit = (struct SVGA3dCopyBox *) &cmd[1]; - - blit += dirty->num_hits; - blit->srcx = dirty->fb_x; - blit->srcy = dirty->fb_y; - blit->x = dirty->unit_x1; - blit->y = dirty->unit_y1; - blit->d = 1; - blit->w = dirty->unit_x2 - dirty->unit_x1; - blit->h = dirty->unit_y2 - dirty->unit_y1; - dirty->num_hits++; - - if (ddirty->transfer != SVGA3D_WRITE_HOST_VRAM) - return; - - /* Destination bounding box */ - ddirty->left = min_t(s32, ddirty->left, dirty->unit_x1); - ddirty->top = min_t(s32, ddirty->top, dirty->unit_y1); - ddirty->right = max_t(s32, ddirty->right, dirty->unit_x2); - ddirty->bottom = max_t(s32, ddirty->bottom, dirty->unit_y2); -} - -/** - * vmw_stdu_bo_fifo_commit - Callback to fill in and submit a DMA command. - * - * @dirty: The closure structure. - * - * Fills in the missing fields in a DMA command, and optionally encodes - * a screen target update command, depending on transfer direction. - */ -static void vmw_stdu_bo_fifo_commit(struct vmw_kms_dirty *dirty) -{ - struct vmw_stdu_dirty *ddirty = - container_of(dirty, struct vmw_stdu_dirty, base); - struct vmw_screen_target_display_unit *stdu = - container_of(dirty->unit, typeof(*stdu), base); - struct vmw_stdu_dma *cmd = dirty->cmd; - struct SVGA3dCopyBox *blit = (struct SVGA3dCopyBox *) &cmd[1]; - SVGA3dCmdSurfaceDMASuffix *suffix = - (SVGA3dCmdSurfaceDMASuffix *) &blit[dirty->num_hits]; - size_t blit_size = sizeof(*blit) * dirty->num_hits + sizeof(*suffix); - - if (!dirty->num_hits) { - vmw_cmd_commit(dirty->dev_priv, 0); - return; - } - - cmd->header.id = SVGA_3D_CMD_SURFACE_DMA; - cmd->header.size = sizeof(cmd->body) + blit_size; - vmw_bo_get_guest_ptr(&ddirty->buf->base, &cmd->body.guest.ptr); - cmd->body.guest.pitch = ddirty->pitch; - cmd->body.host.sid = stdu->display_srf->res.id; - cmd->body.host.face = 0; - cmd->body.host.mipmap = 0; - cmd->body.transfer = ddirty->transfer; - suffix->suffixSize = sizeof(*suffix); - suffix->maximumOffset = ddirty->buf->base.base.size; - - if (ddirty->transfer == SVGA3D_WRITE_HOST_VRAM) { - blit_size += sizeof(struct vmw_stdu_update); - - vmw_stdu_populate_update(&suffix[1], stdu->base.unit, - ddirty->left, ddirty->right, - ddirty->top, ddirty->bottom); - } - - vmw_cmd_commit(dirty->dev_priv, sizeof(*cmd) + blit_size); - - stdu->display_srf->res.res_dirty = true; - ddirty->left = ddirty->top = S32_MAX; - ddirty->right = ddirty->bottom = S32_MIN; -} - - -/** * vmw_stdu_bo_cpu_clip - Callback to encode a CPU blit * * @dirty: The closure structure. @@ -597,62 +504,21 @@ static void vmw_stdu_bo_cpu_commit(struct vmw_kms_dirty *dirty) return; /* Assume we are blitting from Guest (bo) to Host (display_srf) */ - dst_pitch = stdu->display_srf->metadata.base_size.width * stdu->cpp; - dst_bo = &stdu->display_srf->res.backup->base; - dst_offset = ddirty->top * dst_pitch + ddirty->left * stdu->cpp; - - src_pitch = ddirty->pitch; - src_bo = &ddirty->buf->base; - src_offset = ddirty->fb_top * src_pitch + ddirty->fb_left * stdu->cpp; - - /* Swap src and dst if the assumption was wrong. */ - if (ddirty->transfer != SVGA3D_WRITE_HOST_VRAM) { - swap(dst_pitch, src_pitch); - swap(dst_bo, src_bo); - swap(src_offset, dst_offset); - } + src_pitch = stdu->display_srf->metadata.base_size.width * stdu->cpp; + src_bo = &stdu->display_srf->res.guest_memory_bo->tbo; + src_offset = ddirty->top * dst_pitch + ddirty->left * stdu->cpp; + + dst_pitch = ddirty->pitch; + dst_bo = &ddirty->buf->tbo; + dst_offset = ddirty->fb_top * src_pitch + ddirty->fb_left * stdu->cpp; (void) vmw_bo_cpu_blit(dst_bo, dst_offset, dst_pitch, src_bo, src_offset, src_pitch, width * stdu->cpp, height, &diff); - - if (ddirty->transfer == SVGA3D_WRITE_HOST_VRAM && - drm_rect_visible(&diff.rect)) { - struct vmw_private *dev_priv; - struct vmw_stdu_update *cmd; - struct drm_clip_rect region; - int ret; - - /* We are updating the actual surface, not a proxy */ - region.x1 = diff.rect.x1; - region.x2 = diff.rect.x2; - region.y1 = diff.rect.y1; - region.y2 = diff.rect.y2; - ret = vmw_kms_update_proxy(&stdu->display_srf->res, ®ion, - 1, 1); - if (ret) - goto out_cleanup; - - - dev_priv = vmw_priv(stdu->base.crtc.dev); - cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); - if (!cmd) - goto out_cleanup; - - vmw_stdu_populate_update(cmd, stdu->base.unit, - region.x1, region.x2, - region.y1, region.y2); - - vmw_cmd_commit(dev_priv, sizeof(*cmd)); - } - -out_cleanup: - ddirty->left = ddirty->top = ddirty->fb_left = ddirty->fb_top = S32_MAX; - ddirty->right = ddirty->bottom = S32_MIN; } /** - * vmw_kms_stdu_dma - Perform a DMA transfer between a buffer-object backed + * vmw_kms_stdu_readback - Perform a readback from a buffer-object backed * framebuffer and the screen target system. * * @dev_priv: Pointer to the device private structure. @@ -665,9 +531,6 @@ out_cleanup: * be NULL. * @num_clips: Number of clip rects in @clips or @vclips. * @increment: Increment to use when looping over @clips or @vclips. - * @to_surface: Whether to DMA to the screen target system as opposed to - * from the screen target system. - * @interruptible: Whether to perform waits interruptible if possible. * @crtc: If crtc is passed, perform stdu dma on that crtc only. * * If DMA-ing till the screen target system, the function will also notify @@ -676,59 +539,49 @@ out_cleanup: * Returns 0 on success, negative error code on failure. -ERESTARTSYS if * interrupted. */ -int vmw_kms_stdu_dma(struct vmw_private *dev_priv, - struct drm_file *file_priv, - struct vmw_framebuffer *vfb, - struct drm_vmw_fence_rep __user *user_fence_rep, - struct drm_clip_rect *clips, - struct drm_vmw_rect *vclips, - uint32_t num_clips, - int increment, - bool to_surface, - bool interruptible, - struct drm_crtc *crtc) +int vmw_kms_stdu_readback(struct vmw_private *dev_priv, + struct drm_file *file_priv, + struct vmw_framebuffer *vfb, + struct drm_vmw_fence_rep __user *user_fence_rep, + struct drm_clip_rect *clips, + struct drm_vmw_rect *vclips, + uint32_t num_clips, + int increment, + struct drm_crtc *crtc) { - struct vmw_buffer_object *buf = + struct vmw_bo *buf = container_of(vfb, struct vmw_framebuffer_bo, base)->buffer; struct vmw_stdu_dirty ddirty; int ret; - bool cpu_blit = vmw_stdu_use_cpu_blit(dev_priv); DECLARE_VAL_CONTEXT(val_ctx, NULL, 0); /* - * VMs without 3D support don't have the surface DMA command and - * we'll be using a CPU blit, and the framebuffer should be moved out - * of VRAM. + * The GMR domain might seem confusing because it might seem like it should + * never happen with screen targets but e.g. the xorg vmware driver issues + * CMD_SURFACE_DMA for various pixmap updates which might transition our bo to + * a GMR. Instead of forcing another transition we can optimize the readback + * by reading directly from the GMR. */ - ret = vmw_validation_add_bo(&val_ctx, buf, false, cpu_blit); + vmw_bo_placement_set(buf, + VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_GMR, + VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_GMR); + ret = vmw_validation_add_bo(&val_ctx, buf); if (ret) return ret; - ret = vmw_validation_prepare(&val_ctx, NULL, interruptible); + ret = vmw_validation_prepare(&val_ctx, NULL, true); if (ret) goto out_unref; - ddirty.transfer = (to_surface) ? SVGA3D_WRITE_HOST_VRAM : - SVGA3D_READ_HOST_VRAM; ddirty.left = ddirty.top = S32_MAX; ddirty.right = ddirty.bottom = S32_MIN; ddirty.fb_left = ddirty.fb_top = S32_MAX; ddirty.pitch = vfb->base.pitches[0]; ddirty.buf = buf; - ddirty.base.fifo_commit = vmw_stdu_bo_fifo_commit; - ddirty.base.clip = vmw_stdu_bo_clip; - ddirty.base.fifo_reserve_size = sizeof(struct vmw_stdu_dma) + - num_clips * sizeof(SVGA3dCopyBox) + - sizeof(SVGA3dCmdSurfaceDMASuffix); - if (to_surface) - ddirty.base.fifo_reserve_size += sizeof(struct vmw_stdu_update); - - - if (cpu_blit) { - ddirty.base.fifo_commit = vmw_stdu_bo_cpu_commit; - ddirty.base.clip = vmw_stdu_bo_cpu_clip; - ddirty.base.fifo_reserve_size = 0; - } + + ddirty.base.fifo_commit = vmw_stdu_bo_cpu_commit; + ddirty.base.clip = vmw_stdu_bo_cpu_clip; + ddirty.base.fifo_reserve_size = 0; ddirty.base.crtc = crtc; @@ -1160,11 +1013,8 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane, /* * This should only happen if the buffer object is too large to create a * proxy surface for. - * If we are a 2D VM with a buffer object then we have to use CPU blit - * so cache these mappings */ - if (vps->content_fb_type == SEPARATE_BO && - vmw_stdu_use_cpu_blit(dev_priv)) + if (vps->content_fb_type == SEPARATE_BO) vps->cpp = new_fb->pitches[0] / new_fb->width; return 0; @@ -1174,14 +1024,6 @@ out_srf_unref: return ret; } -static uint32_t vmw_stdu_bo_fifo_size(struct vmw_du_update_plane *update, - uint32_t num_hits) -{ - return sizeof(struct vmw_stdu_dma) + sizeof(SVGA3dCopyBox) * num_hits + - sizeof(SVGA3dCmdSurfaceDMASuffix) + - sizeof(struct vmw_stdu_update); -} - static uint32_t vmw_stdu_bo_fifo_size_cpu(struct vmw_du_update_plane *update, uint32_t num_hits) { @@ -1189,68 +1031,6 @@ static uint32_t vmw_stdu_bo_fifo_size_cpu(struct vmw_du_update_plane *update, sizeof(struct vmw_stdu_update); } -static uint32_t vmw_stdu_bo_populate_dma(struct vmw_du_update_plane *update, - void *cmd, uint32_t num_hits) -{ - struct vmw_screen_target_display_unit *stdu; - struct vmw_framebuffer_bo *vfbbo; - struct vmw_stdu_dma *cmd_dma = cmd; - - stdu = container_of(update->du, typeof(*stdu), base); - vfbbo = container_of(update->vfb, typeof(*vfbbo), base); - - cmd_dma->header.id = SVGA_3D_CMD_SURFACE_DMA; - cmd_dma->header.size = sizeof(cmd_dma->body) + - sizeof(struct SVGA3dCopyBox) * num_hits + - sizeof(SVGA3dCmdSurfaceDMASuffix); - vmw_bo_get_guest_ptr(&vfbbo->buffer->base, &cmd_dma->body.guest.ptr); - cmd_dma->body.guest.pitch = update->vfb->base.pitches[0]; - cmd_dma->body.host.sid = stdu->display_srf->res.id; - cmd_dma->body.host.face = 0; - cmd_dma->body.host.mipmap = 0; - cmd_dma->body.transfer = SVGA3D_WRITE_HOST_VRAM; - - return sizeof(*cmd_dma); -} - -static uint32_t vmw_stdu_bo_populate_clip(struct vmw_du_update_plane *update, - void *cmd, struct drm_rect *clip, - uint32_t fb_x, uint32_t fb_y) -{ - struct SVGA3dCopyBox *box = cmd; - - box->srcx = fb_x; - box->srcy = fb_y; - box->srcz = 0; - box->x = clip->x1; - box->y = clip->y1; - box->z = 0; - box->w = drm_rect_width(clip); - box->h = drm_rect_height(clip); - box->d = 1; - - return sizeof(*box); -} - -static uint32_t vmw_stdu_bo_populate_update(struct vmw_du_update_plane *update, - void *cmd, struct drm_rect *bb) -{ - struct vmw_screen_target_display_unit *stdu; - struct vmw_framebuffer_bo *vfbbo; - SVGA3dCmdSurfaceDMASuffix *suffix = cmd; - - stdu = container_of(update->du, typeof(*stdu), base); - vfbbo = container_of(update->vfb, typeof(*vfbbo), base); - - suffix->suffixSize = sizeof(*suffix); - suffix->maximumOffset = vfbbo->buffer->base.base.size; - - vmw_stdu_populate_update(&suffix[1], stdu->base.unit, bb->x1, bb->x2, - bb->y1, bb->y2); - - return sizeof(*suffix) + sizeof(struct vmw_stdu_update); -} - static uint32_t vmw_stdu_bo_pre_clip_cpu(struct vmw_du_update_plane *update, void *cmd, uint32_t num_hits) { @@ -1300,11 +1080,11 @@ vmw_stdu_bo_populate_update_cpu(struct vmw_du_update_plane *update, void *cmd, diff.cpp = stdu->cpp; - dst_bo = &stdu->display_srf->res.backup->base; + dst_bo = &stdu->display_srf->res.guest_memory_bo->tbo; dst_pitch = stdu->display_srf->metadata.base_size.width * stdu->cpp; dst_offset = bb->y1 * dst_pitch + bb->x1 * stdu->cpp; - src_bo = &vfbbo->buffer->base; + src_bo = &vfbbo->buffer->tbo; src_pitch = update->vfb->base.pitches[0]; src_offset = bo_update->fb_top * src_pitch + bo_update->fb_left * stdu->cpp; @@ -1368,24 +1148,12 @@ static int vmw_stdu_plane_update_bo(struct vmw_private *dev_priv, bo_update.base.vfb = vfb; bo_update.base.out_fence = out_fence; bo_update.base.mutex = NULL; - bo_update.base.cpu_blit = vmw_stdu_use_cpu_blit(dev_priv); bo_update.base.intr = false; - /* - * VM without 3D support don't have surface DMA command and framebuffer - * should be moved out of VRAM. - */ - if (bo_update.base.cpu_blit) { - bo_update.base.calc_fifo_size = vmw_stdu_bo_fifo_size_cpu; - bo_update.base.pre_clip = vmw_stdu_bo_pre_clip_cpu; - bo_update.base.clip = vmw_stdu_bo_clip_cpu; - bo_update.base.post_clip = vmw_stdu_bo_populate_update_cpu; - } else { - bo_update.base.calc_fifo_size = vmw_stdu_bo_fifo_size; - bo_update.base.pre_clip = vmw_stdu_bo_populate_dma; - bo_update.base.clip = vmw_stdu_bo_populate_clip; - bo_update.base.post_clip = vmw_stdu_bo_populate_update; - } + bo_update.base.calc_fifo_size = vmw_stdu_bo_fifo_size_cpu; + bo_update.base.pre_clip = vmw_stdu_bo_pre_clip_cpu; + bo_update.base.clip = vmw_stdu_bo_clip_cpu; + bo_update.base.post_clip = vmw_stdu_bo_populate_update_cpu; return vmw_du_helper_plane_update(&bo_update.base); } @@ -1548,7 +1316,6 @@ static int vmw_stdu_plane_update_surface(struct vmw_private *dev_priv, srf_update.vfb = vfb; srf_update.out_fence = out_fence; srf_update.mutex = &dev_priv->cmdbuf_mutex; - srf_update.cpu_blit = false; srf_update.intr = true; if (vfbs->is_bo_proxy) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c b/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c index 2de97419d5c9..edcc40659038 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright © 2018-2019 VMware, Inc., Palo Alto, CA., USA + * Copyright © 2018-2023 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -26,11 +26,12 @@ * **************************************************************************/ -#include <drm/ttm/ttm_placement.h> - +#include "vmwgfx_binding.h" +#include "vmwgfx_bo.h" #include "vmwgfx_drv.h" #include "vmwgfx_resource_priv.h" -#include "vmwgfx_binding.h" + +#include <drm/ttm/ttm_placement.h> /** * struct vmw_dx_streamoutput - Streamoutput resource metadata. @@ -62,10 +63,11 @@ static void vmw_dx_streamoutput_commit_notify(struct vmw_resource *res, static const struct vmw_res_func vmw_dx_streamoutput_func = { .res_type = vmw_res_streamoutput, - .needs_backup = true, + .needs_guest_memory = true, .may_evict = false, .type_name = "DX streamoutput", - .backup_placement = &vmw_mob_placement, + .domain = VMW_BO_DOMAIN_MOB, + .busy_domain = VMW_BO_DOMAIN_MOB, .create = vmw_dx_streamoutput_create, .destroy = NULL, /* Command buffer managed resource. */ .bind = vmw_dx_streamoutput_bind, @@ -104,8 +106,8 @@ static int vmw_dx_streamoutput_unscrub(struct vmw_resource *res) cmd->header.id = SVGA_3D_CMD_DX_BIND_STREAMOUTPUT; cmd->header.size = sizeof(cmd->body); cmd->body.soid = so->id; - cmd->body.mobid = res->backup->base.resource->start; - cmd->body.offsetInBytes = res->backup_offset; + cmd->body.mobid = res->guest_memory_bo->tbo.resource->start; + cmd->body.offsetInBytes = res->guest_memory_offset; cmd->body.sizeInBytes = so->size; vmw_cmd_commit(dev_priv, sizeof(*cmd)); @@ -195,7 +197,7 @@ static int vmw_dx_streamoutput_unbind(struct vmw_resource *res, bool readback, struct vmw_fence_obj *fence; int ret; - if (WARN_ON(res->backup->base.resource->mem_type != VMW_PL_MOB)) + if (WARN_ON(res->guest_memory_bo->tbo.resource->mem_type != VMW_PL_MOB)) return -EINVAL; mutex_lock(&dev_priv->binding_mutex); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index dcfb003841b3..5db403ee8261 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA + * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the @@ -25,8 +25,7 @@ * **************************************************************************/ -#include <drm/ttm/ttm_placement.h> - +#include "vmwgfx_bo.h" #include "vmwgfx_drv.h" #include "vmwgfx_resource_priv.h" #include "vmwgfx_so.h" @@ -34,6 +33,8 @@ #include "vmw_surface_cache.h" #include "device_include/svga3d_surfacedefs.h" +#include <drm/ttm/ttm_placement.h> + #define SVGA3D_FLAGS_64(upper32, lower32) (((uint64_t)upper32 << 32) | lower32) #define SVGA3D_FLAGS_UPPER_32(svga3d_flags) (svga3d_flags >> 32) #define SVGA3D_FLAGS_LOWER_32(svga3d_flags) \ @@ -125,12 +126,13 @@ const struct vmw_user_resource_conv *user_surface_converter = static const struct vmw_res_func vmw_legacy_surface_func = { .res_type = vmw_res_surface, - .needs_backup = false, + .needs_guest_memory = false, .may_evict = true, .prio = 1, .dirty_prio = 1, .type_name = "legacy surfaces", - .backup_placement = &vmw_srf_placement, + .domain = VMW_BO_DOMAIN_GMR, + .busy_domain = VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM, .create = &vmw_legacy_srf_create, .destroy = &vmw_legacy_srf_destroy, .bind = &vmw_legacy_srf_bind, @@ -139,12 +141,13 @@ static const struct vmw_res_func vmw_legacy_surface_func = { static const struct vmw_res_func vmw_gb_surface_func = { .res_type = vmw_res_surface, - .needs_backup = true, + .needs_guest_memory = true, .may_evict = true, .prio = 1, .dirty_prio = 2, .type_name = "guest backed surfaces", - .backup_placement = &vmw_mob_placement, + .domain = VMW_BO_DOMAIN_MOB, + .busy_domain = VMW_BO_DOMAIN_MOB, .create = vmw_gb_surface_create, .destroy = vmw_gb_surface_destroy, .bind = vmw_gb_surface_bind, @@ -379,7 +382,7 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res) */ mutex_lock(&dev_priv->cmdbuf_mutex); - dev_priv->used_memory_size -= res->backup_size; + dev_priv->used_memory_size -= res->guest_memory_size; mutex_unlock(&dev_priv->cmdbuf_mutex); } } @@ -409,7 +412,7 @@ static int vmw_legacy_srf_create(struct vmw_resource *res) return 0; srf = vmw_res_to_srf(res); - if (unlikely(dev_priv->used_memory_size + res->backup_size >= + if (unlikely(dev_priv->used_memory_size + res->guest_memory_size >= dev_priv->memory_size)) return -EBUSY; @@ -447,7 +450,7 @@ static int vmw_legacy_srf_create(struct vmw_resource *res) * Surface memory usage accounting. */ - dev_priv->used_memory_size += res->backup_size; + dev_priv->used_memory_size += res->guest_memory_size; return 0; out_no_fifo: @@ -524,7 +527,7 @@ static int vmw_legacy_srf_dma(struct vmw_resource *res, static int vmw_legacy_srf_bind(struct vmw_resource *res, struct ttm_validate_buffer *val_buf) { - if (!res->backup_dirty) + if (!res->guest_memory_dirty) return 0; return vmw_legacy_srf_dma(res, val_buf, true); @@ -583,7 +586,7 @@ static int vmw_legacy_srf_destroy(struct vmw_resource *res) * Surface memory usage accounting. */ - dev_priv->used_memory_size -= res->backup_size; + dev_priv->used_memory_size -= res->guest_memory_size; /* * Release the surface ID. @@ -683,8 +686,8 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base) container_of(base, struct vmw_user_surface, prime.base); struct vmw_resource *res = &user_srf->srf.res; - if (res && res->backup) - drm_gem_object_put(&res->backup->base.base); + if (res->guest_memory_bo) + drm_gem_object_put(&res->guest_memory_bo->tbo.base); *p_base = NULL; vmw_resource_unreference(&res); @@ -812,7 +815,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, ++cur_size; } } - res->backup_size = cur_bo_offset; + res->guest_memory_size = cur_bo_offset; if (metadata->scanout && metadata->num_sizes == 1 && metadata->sizes[0].width == VMW_CURSOR_SNOOP_WIDTH && @@ -856,14 +859,14 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, ret = vmw_gem_object_create_with_handle(dev_priv, file_priv, - res->backup_size, + res->guest_memory_size, &backup_handle, - &res->backup); + &res->guest_memory_bo); if (unlikely(ret != 0)) { vmw_resource_unreference(&res); goto out_unlock; } - vmw_bo_reference(res->backup); + vmw_bo_reference(res->guest_memory_bo); /* * We don't expose the handle to the userspace and surface * already holds a gem reference @@ -872,7 +875,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, } tmp = vmw_resource_reference(&srf->res); - ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime, + ret = ttm_prime_object_init(tfile, res->guest_memory_size, &user_srf->prime, req->shareable, VMW_RES_SURFACE, &vmw_user_surface_base_release); @@ -1186,7 +1189,7 @@ static int vmw_gb_surface_bind(struct vmw_resource *res, BUG_ON(bo->resource->mem_type != VMW_PL_MOB); - submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0); + submit_size = sizeof(*cmd1) + (res->guest_memory_dirty ? sizeof(*cmd2) : 0); cmd1 = VMW_CMD_RESERVE(dev_priv, submit_size); if (unlikely(!cmd1)) @@ -1196,7 +1199,7 @@ static int vmw_gb_surface_bind(struct vmw_resource *res, cmd1->header.size = sizeof(cmd1->body); cmd1->body.sid = res->id; cmd1->body.mobid = bo->resource->start; - if (res->backup_dirty) { + if (res->guest_memory_dirty) { cmd2 = (void *) &cmd1[1]; cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE; cmd2->header.size = sizeof(cmd2->body); @@ -1204,12 +1207,12 @@ static int vmw_gb_surface_bind(struct vmw_resource *res, } vmw_cmd_commit(dev_priv, submit_size); - if (res->backup->dirty && res->backup_dirty) { + if (res->guest_memory_bo->dirty && res->guest_memory_dirty) { /* We've just made a full upload. Cear dirty regions. */ vmw_bo_dirty_clear_res(res); } - res->backup_dirty = false; + res->guest_memory_dirty = false; return 0; } @@ -1505,11 +1508,11 @@ vmw_gb_surface_define_internal(struct drm_device *dev, if (req->base.buffer_handle != SVGA3D_INVALID_ID) { ret = vmw_user_bo_lookup(file_priv, req->base.buffer_handle, - &res->backup); + &res->guest_memory_bo); if (ret == 0) { - if (res->backup->base.base.size < res->backup_size) { + if (res->guest_memory_bo->tbo.base.size < res->guest_memory_size) { VMW_DEBUG_USER("Surface backup buffer too small.\n"); - vmw_bo_unreference(&res->backup); + vmw_bo_unreference(&res->guest_memory_bo); ret = -EINVAL; goto out_unlock; } else { @@ -1520,11 +1523,11 @@ vmw_gb_surface_define_internal(struct drm_device *dev, (drm_vmw_surface_flag_create_buffer | drm_vmw_surface_flag_coherent)) { ret = vmw_gem_object_create_with_handle(dev_priv, file_priv, - res->backup_size, + res->guest_memory_size, &backup_handle, - &res->backup); + &res->guest_memory_bo); if (ret == 0) - vmw_bo_reference(res->backup); + vmw_bo_reference(res->guest_memory_bo); } if (unlikely(ret != 0)) { @@ -1533,9 +1536,9 @@ vmw_gb_surface_define_internal(struct drm_device *dev, } if (req->base.drm_surface_flags & drm_vmw_surface_flag_coherent) { - struct vmw_buffer_object *backup = res->backup; + struct vmw_bo *backup = res->guest_memory_bo; - ttm_bo_reserve(&backup->base, false, false, NULL); + ttm_bo_reserve(&backup->tbo, false, false, NULL); if (!res->func->dirty_alloc) ret = -EINVAL; if (!ret) @@ -1544,7 +1547,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev, res->coherent = true; ret = res->func->dirty_alloc(res); } - ttm_bo_unreserve(&backup->base); + ttm_bo_unreserve(&backup->tbo); if (ret) { vmw_resource_unreference(&res); goto out_unlock; @@ -1553,7 +1556,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev, } tmp = vmw_resource_reference(res); - ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime, + ret = ttm_prime_object_init(tfile, res->guest_memory_size, &user_srf->prime, req->base.drm_surface_flags & drm_vmw_surface_flag_shareable, VMW_RES_SURFACE, @@ -1566,11 +1569,11 @@ vmw_gb_surface_define_internal(struct drm_device *dev, } rep->handle = user_srf->prime.base.handle; - rep->backup_size = res->backup_size; - if (res->backup) { + rep->backup_size = res->guest_memory_size; + if (res->guest_memory_bo) { rep->buffer_map_handle = - drm_vma_node_offset_addr(&res->backup->base.base.vma_node); - rep->buffer_size = res->backup->base.base.size; + drm_vma_node_offset_addr(&res->guest_memory_bo->tbo.base.vma_node); + rep->buffer_size = res->guest_memory_bo->tbo.base.size; rep->buffer_handle = backup_handle; } else { rep->buffer_map_handle = 0; @@ -1613,14 +1616,14 @@ vmw_gb_surface_reference_internal(struct drm_device *dev, user_srf = container_of(base, struct vmw_user_surface, prime.base); srf = &user_srf->srf; - if (!srf->res.backup) { + if (!srf->res.guest_memory_bo) { DRM_ERROR("Shared GB surface is missing a backup buffer.\n"); goto out_bad_resource; } metadata = &srf->metadata; mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */ - ret = drm_gem_handle_create(file_priv, &srf->res.backup->base.base, + ret = drm_gem_handle_create(file_priv, &srf->res.guest_memory_bo->tbo.base, &backup_handle); mutex_unlock(&dev_priv->cmdbuf_mutex); if (ret != 0) { @@ -1639,11 +1642,11 @@ vmw_gb_surface_reference_internal(struct drm_device *dev, rep->creq.base.buffer_handle = backup_handle; rep->creq.base.base_size = metadata->base_size; rep->crep.handle = user_srf->prime.base.handle; - rep->crep.backup_size = srf->res.backup_size; + rep->crep.backup_size = srf->res.guest_memory_size; rep->crep.buffer_handle = backup_handle; rep->crep.buffer_map_handle = - drm_vma_node_offset_addr(&srf->res.backup->base.base.vma_node); - rep->crep.buffer_size = srf->res.backup->base.base.size; + drm_vma_node_offset_addr(&srf->res.guest_memory_bo->tbo.base.vma_node); + rep->crep.buffer_size = srf->res.guest_memory_bo->tbo.base.size; rep->creq.version = drm_vmw_gb_surface_v1; rep->creq.svga3d_flags_upper_32_bits = @@ -1742,12 +1745,12 @@ static void vmw_surface_tex_dirty_range_add(struct vmw_resource *res, { struct vmw_surface_dirty *dirty = (struct vmw_surface_dirty *) res->dirty; - size_t backup_end = res->backup_offset + res->backup_size; + size_t backup_end = res->guest_memory_offset + res->guest_memory_size; struct vmw_surface_loc loc1, loc2; const struct vmw_surface_cache *cache; - start = max_t(size_t, start, res->backup_offset) - res->backup_offset; - end = min(end, backup_end) - res->backup_offset; + start = max_t(size_t, start, res->guest_memory_offset) - res->guest_memory_offset; + end = min(end, backup_end) - res->guest_memory_offset; cache = &dirty->cache; vmw_surface_get_loc(cache, &loc1, start); vmw_surface_get_loc(cache, &loc2, end - 1); @@ -1794,13 +1797,13 @@ static void vmw_surface_buf_dirty_range_add(struct vmw_resource *res, struct vmw_surface_dirty *dirty = (struct vmw_surface_dirty *) res->dirty; const struct vmw_surface_cache *cache = &dirty->cache; - size_t backup_end = res->backup_offset + cache->mip_chain_bytes; + size_t backup_end = res->guest_memory_offset + cache->mip_chain_bytes; SVGA3dBox *box = &dirty->boxes[0]; u32 box_c2; box->h = box->d = 1; - start = max_t(size_t, start, res->backup_offset) - res->backup_offset; - end = min(end, backup_end) - res->backup_offset; + start = max_t(size_t, start, res->guest_memory_offset) - res->guest_memory_offset; + end = min(end, backup_end) - res->guest_memory_offset; box_c2 = box->x + box->w; if (box->w == 0 || box->x > start) box->x = start; @@ -1816,8 +1819,8 @@ static void vmw_surface_dirty_range_add(struct vmw_resource *res, size_t start, { struct vmw_surface *srf = vmw_res_to_srf(res); - if (WARN_ON(end <= res->backup_offset || - start >= res->backup_offset + res->backup_size)) + if (WARN_ON(end <= res->guest_memory_offset || + start >= res->guest_memory_offset + res->guest_memory_size)) return; if (srf->metadata.format == SVGA3D_BUFFER) @@ -2074,7 +2077,7 @@ int vmw_gb_surface_define(struct vmw_private *dev_priv, if (metadata->flags & SVGA3D_SURFACE_MULTISAMPLE) sample_count = metadata->multisample_count; - srf->res.backup_size = + srf->res.guest_memory_size = vmw_surface_get_serialized_size_extended( metadata->format, metadata->base_size, @@ -2083,7 +2086,7 @@ int vmw_gb_surface_define(struct vmw_private *dev_priv, sample_count); if (metadata->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT) - srf->res.backup_size += sizeof(SVGA3dDXSOState); + srf->res.guest_memory_size += sizeof(SVGA3dDXSOState); /* * Don't set SVGA3D_SURFACE_SCREENTARGET flag for a scanout surface with diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c index 856a352a72a6..af8562c95cc3 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA + * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the @@ -25,6 +25,7 @@ * **************************************************************************/ +#include "vmwgfx_bo.h" #include "vmwgfx_drv.h" #include <drm/ttm/ttm_placement.h> @@ -49,13 +50,6 @@ static const struct ttm_place gmr_placement_flags = { .flags = 0 }; -static const struct ttm_place mob_placement_flags = { - .fpfn = 0, - .lpfn = 0, - .mem_type = VMW_PL_MOB, - .flags = 0 -}; - struct ttm_placement vmw_vram_placement = { .num_placement = 1, .placement = &vram_placement_flags, @@ -77,27 +71,6 @@ static const struct ttm_place vram_gmr_placement_flags[] = { } }; -static const struct ttm_place gmr_vram_placement_flags[] = { - { - .fpfn = 0, - .lpfn = 0, - .mem_type = VMW_PL_GMR, - .flags = 0 - }, { - .fpfn = 0, - .lpfn = 0, - .mem_type = TTM_PL_VRAM, - .flags = 0 - } -}; - -static const struct ttm_place vmw_sys_placement_flags = { - .fpfn = 0, - .lpfn = 0, - .mem_type = VMW_PL_SYSTEM, - .flags = 0 -}; - struct ttm_placement vmw_vram_gmr_placement = { .num_placement = 2, .placement = vram_gmr_placement_flags, @@ -105,13 +78,6 @@ struct ttm_placement vmw_vram_gmr_placement = { .busy_placement = &gmr_placement_flags }; -struct ttm_placement vmw_vram_sys_placement = { - .num_placement = 1, - .placement = &vram_placement_flags, - .num_busy_placement = 1, - .busy_placement = &sys_placement_flags -}; - struct ttm_placement vmw_sys_placement = { .num_placement = 1, .placement = &sys_placement_flags, @@ -119,53 +85,6 @@ struct ttm_placement vmw_sys_placement = { .busy_placement = &sys_placement_flags }; -struct ttm_placement vmw_pt_sys_placement = { - .num_placement = 1, - .placement = &vmw_sys_placement_flags, - .num_busy_placement = 1, - .busy_placement = &vmw_sys_placement_flags -}; - -static const struct ttm_place nonfixed_placement_flags[] = { - { - .fpfn = 0, - .lpfn = 0, - .mem_type = TTM_PL_SYSTEM, - .flags = 0 - }, { - .fpfn = 0, - .lpfn = 0, - .mem_type = VMW_PL_GMR, - .flags = 0 - }, { - .fpfn = 0, - .lpfn = 0, - .mem_type = VMW_PL_MOB, - .flags = 0 - } -}; - -struct ttm_placement vmw_srf_placement = { - .num_placement = 1, - .num_busy_placement = 2, - .placement = &gmr_placement_flags, - .busy_placement = gmr_vram_placement_flags -}; - -struct ttm_placement vmw_mob_placement = { - .num_placement = 1, - .num_busy_placement = 1, - .placement = &mob_placement_flags, - .busy_placement = &mob_placement_flags -}; - -struct ttm_placement vmw_nonfixed_placement = { - .num_placement = 3, - .placement = nonfixed_placement_flags, - .num_busy_placement = 1, - .busy_placement = &sys_placement_flags -}; - const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt); /** @@ -508,7 +427,7 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo, if (!vmw_be) return NULL; - vmw_be->dev_priv = container_of(bo->bdev, struct vmw_private, bdev); + vmw_be->dev_priv = vmw_priv_from_ttm(bo->bdev); vmw_be->mob = NULL; if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent) @@ -534,7 +453,7 @@ static void vmw_evict_flags(struct ttm_buffer_object *bo, static int vmw_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem) { - struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev); + struct vmw_private *dev_priv = vmw_priv_from_ttm(bdev); switch (mem->mem_type) { case TTM_PL_SYSTEM: @@ -596,9 +515,13 @@ static int vmw_move(struct ttm_buffer_object *bo, struct ttm_resource *new_mem, struct ttm_place *hop) { - struct ttm_resource_manager *old_man = ttm_manager_type(bo->bdev, bo->resource->mem_type); - struct ttm_resource_manager *new_man = ttm_manager_type(bo->bdev, new_mem->mem_type); - int ret; + struct ttm_resource_manager *new_man; + struct ttm_resource_manager *old_man = NULL; + int ret = 0; + + new_man = ttm_manager_type(bo->bdev, new_mem->mem_type); + if (bo->resource) + old_man = ttm_manager_type(bo->bdev, bo->resource->mem_type); if (new_man->use_tt && !vmw_memtype_is_system(new_mem->mem_type)) { ret = vmw_ttm_bind(bo->bdev, bo->ttm, new_mem); @@ -606,9 +529,15 @@ static int vmw_move(struct ttm_buffer_object *bo, return ret; } + if (!bo->resource || (bo->resource->mem_type == TTM_PL_SYSTEM && + bo->ttm == NULL)) { + ttm_bo_move_null(bo, new_mem); + return 0; + } + vmw_move_notify(bo, bo->resource, new_mem); - if (old_man->use_tt && new_man->use_tt) { + if (old_man && old_man->use_tt && new_man->use_tt) { if (vmw_memtype_is_system(bo->resource->mem_type)) { ttm_bo_move_null(bo, new_mem); return 0; @@ -645,34 +574,39 @@ struct ttm_device_funcs vmw_bo_driver = { }; int vmw_bo_create_and_populate(struct vmw_private *dev_priv, - unsigned long bo_size, - struct ttm_buffer_object **bo_p) + size_t bo_size, u32 domain, + struct vmw_bo **bo_p) { struct ttm_operation_ctx ctx = { .interruptible = false, .no_wait_gpu = false }; - struct ttm_buffer_object *bo; + struct vmw_bo *vbo; int ret; + struct vmw_bo_params bo_params = { + .domain = domain, + .busy_domain = domain, + .bo_type = ttm_bo_type_kernel, + .size = bo_size, + .pin = true + }; - ret = vmw_bo_create_kernel(dev_priv, bo_size, - &vmw_pt_sys_placement, - &bo); + ret = vmw_bo_create(dev_priv, &bo_params, &vbo); if (unlikely(ret != 0)) return ret; - ret = ttm_bo_reserve(bo, false, true, NULL); + ret = ttm_bo_reserve(&vbo->tbo, false, true, NULL); BUG_ON(ret != 0); - ret = vmw_ttm_populate(bo->bdev, bo->ttm, &ctx); + ret = vmw_ttm_populate(vbo->tbo.bdev, vbo->tbo.ttm, &ctx); if (likely(ret == 0)) { struct vmw_ttm_tt *vmw_tt = - container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm); + container_of(vbo->tbo.ttm, struct vmw_ttm_tt, dma_ttm); ret = vmw_ttm_map_dma(vmw_tt); } - ttm_bo_unreserve(bo); + ttm_bo_unreserve(&vbo->tbo); if (likely(ret == 0)) - *bo_p = bo; + *bo_p = vbo; return ret; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_va.c b/drivers/gpu/drm/vmwgfx/vmwgfx_va.c index 6ad744ae07f5..d140089e53d4 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_va.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_va.c @@ -25,6 +25,7 @@ * **************************************************************************/ +#include "vmwgfx_bo.h" #include "vmwgfx_drv.h" #include "vmwgfx_resource_priv.h" @@ -80,10 +81,11 @@ static void vmw_stream_set_arg_handle(void *data, u32 handle) static const struct vmw_simple_resource_func va_stream_func = { .res_func = { .res_type = vmw_res_stream, - .needs_backup = false, + .needs_guest_memory = false, .may_evict = false, .type_name = "overlay stream", - .backup_placement = NULL, + .domain = VMW_BO_DOMAIN_SYS, + .busy_domain = VMW_BO_DOMAIN_SYS, .create = NULL, .destroy = NULL, .bind = NULL, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c index f5c4a40fb16d..aaacbdcbd742 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright © 2018 - 2022 VMware, Inc., Palo Alto, CA., USA + * Copyright © 2018 - 2023 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -25,9 +25,12 @@ * USE OR OTHER DEALINGS IN THE SOFTWARE. * **************************************************************************/ -#include <linux/slab.h> -#include "vmwgfx_validation.h" +#include "vmwgfx_bo.h" #include "vmwgfx_drv.h" +#include "vmwgfx_resource_priv.h" +#include "vmwgfx_validation.h" + +#include <linux/slab.h> #define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE) @@ -38,8 +41,6 @@ * @hash: A hash entry used for the duplicate detection hash table. * @coherent_count: If switching backup buffers, number of new coherent * resources that will have this buffer as a backup buffer. - * @as_mob: Validate as mob. - * @cpu_blit: Validate for cpu blit access. * * Bit fields are used since these structures are allocated and freed in * large numbers and space conservation is desired. @@ -48,21 +49,19 @@ struct vmw_validation_bo_node { struct ttm_validate_buffer base; struct vmwgfx_hash_item hash; unsigned int coherent_count; - u32 as_mob : 1; - u32 cpu_blit : 1; }; /** * struct vmw_validation_res_node - Resource validation metadata. * @head: List head for the resource validation list. * @hash: A hash entry used for the duplicate detection hash table. * @res: Reference counted resource pointer. - * @new_backup: Non ref-counted pointer to new backup buffer to be assigned - * to a resource. - * @new_backup_offset: Offset into the new backup mob for resources that can - * share MOBs. + * @new_guest_memory_bo: Non ref-counted pointer to new guest memory buffer + * to be assigned to a resource. + * @new_guest_memory_offset: Offset into the new backup mob for resources + * that can share MOBs. * @no_buffer_needed: Kernel does not need to allocate a MOB during validation, * the command stream provides a mob bind operation. - * @switching_backup: The validation process is switching backup MOB. + * @switching_guest_memory_bo: The validation process is switching backup MOB. * @first_usage: True iff the resource has been seen only once in the current * validation batch. * @reserved: Whether the resource is currently reserved by this process. @@ -77,10 +76,10 @@ struct vmw_validation_res_node { struct list_head head; struct vmwgfx_hash_item hash; struct vmw_resource *res; - struct vmw_buffer_object *new_backup; - unsigned long new_backup_offset; + struct vmw_bo *new_guest_memory_bo; + unsigned long new_guest_memory_offset; u32 no_buffer_needed : 1; - u32 switching_backup : 1; + u32 switching_guest_memory_bo : 1; u32 first_usage : 1; u32 reserved : 1; u32 dirty : 1; @@ -173,7 +172,7 @@ static void vmw_validation_mem_free(struct vmw_validation_context *ctx) */ static struct vmw_validation_bo_node * vmw_validation_find_bo_dup(struct vmw_validation_context *ctx, - struct vmw_buffer_object *vbo) + struct vmw_bo *vbo) { struct vmw_validation_bo_node *bo_node = NULL; @@ -194,7 +193,7 @@ vmw_validation_find_bo_dup(struct vmw_validation_context *ctx, struct vmw_validation_bo_node *entry; list_for_each_entry(entry, &ctx->bo_list, base.head) { - if (entry->base.bo == &vbo->base) { + if (entry->base.bo == &vbo->tbo) { bo_node = entry; break; } @@ -258,26 +257,16 @@ out: * vmw_validation_add_bo - Add a buffer object to the validation context. * @ctx: The validation context. * @vbo: The buffer object. - * @as_mob: Validate as mob, otherwise suitable for GMR operations. - * @cpu_blit: Validate in a page-mappable location. * * Return: Zero on success, negative error code otherwise. */ int vmw_validation_add_bo(struct vmw_validation_context *ctx, - struct vmw_buffer_object *vbo, - bool as_mob, - bool cpu_blit) + struct vmw_bo *vbo) { struct vmw_validation_bo_node *bo_node; bo_node = vmw_validation_find_bo_dup(ctx, vbo); - if (bo_node) { - if (bo_node->as_mob != as_mob || - bo_node->cpu_blit != cpu_blit) { - DRM_ERROR("Inconsistent buffer usage.\n"); - return -EINVAL; - } - } else { + if (!bo_node) { struct ttm_validate_buffer *val_buf; bo_node = vmw_validation_mem_alloc(ctx, sizeof(*bo_node)); @@ -290,13 +279,11 @@ int vmw_validation_add_bo(struct vmw_validation_context *ctx, bo_node->hash.key); } val_buf = &bo_node->base; - val_buf->bo = ttm_bo_get_unless_zero(&vbo->base); + val_buf->bo = ttm_bo_get_unless_zero(&vbo->tbo); if (!val_buf->bo) return -ESRCH; val_buf->num_shared = 0; list_add_tail(&val_buf->head, &ctx->bo_list); - bo_node->as_mob = as_mob; - bo_node->cpu_blit = cpu_blit; } return 0; @@ -406,23 +393,23 @@ void vmw_validation_res_set_dirty(struct vmw_validation_context *ctx, * the resource. * @vbo: The new backup buffer object MOB. This buffer object needs to have * already been registered with the validation context. - * @backup_offset: Offset into the new backup MOB. + * @guest_memory_offset: Offset into the new backup MOB. */ void vmw_validation_res_switch_backup(struct vmw_validation_context *ctx, void *val_private, - struct vmw_buffer_object *vbo, - unsigned long backup_offset) + struct vmw_bo *vbo, + unsigned long guest_memory_offset) { struct vmw_validation_res_node *val; val = container_of(val_private, typeof(*val), private); - val->switching_backup = 1; + val->switching_guest_memory_bo = 1; if (val->first_usage) val->no_buffer_needed = 1; - val->new_backup = vbo; - val->new_backup_offset = backup_offset; + val->new_guest_memory_bo = vbo; + val->new_guest_memory_offset = guest_memory_offset; } /** @@ -450,21 +437,22 @@ int vmw_validation_res_reserve(struct vmw_validation_context *ctx, goto out_unreserve; val->reserved = 1; - if (res->backup) { - struct vmw_buffer_object *vbo = res->backup; + if (res->guest_memory_bo) { + struct vmw_bo *vbo = res->guest_memory_bo; - ret = vmw_validation_add_bo - (ctx, vbo, vmw_resource_needs_backup(res), - false); + vmw_bo_placement_set(vbo, + res->func->domain, + res->func->busy_domain); + ret = vmw_validation_add_bo(ctx, vbo); if (ret) goto out_unreserve; } - if (val->switching_backup && val->new_backup && + if (val->switching_guest_memory_bo && val->new_guest_memory_bo && res->coherent) { struct vmw_validation_bo_node *bo_node = vmw_validation_find_bo_dup(ctx, - val->new_backup); + val->new_guest_memory_bo); if (WARN_ON(!bo_node)) { ret = -EINVAL; @@ -507,9 +495,9 @@ void vmw_validation_res_unreserve(struct vmw_validation_context *ctx, vmw_resource_unreserve(val->res, val->dirty_set, val->dirty, - val->switching_backup, - val->new_backup, - val->new_backup_offset); + val->switching_guest_memory_bo, + val->new_guest_memory_bo, + val->new_guest_memory_offset); } } @@ -517,17 +505,14 @@ void vmw_validation_res_unreserve(struct vmw_validation_context *ctx, * vmw_validation_bo_validate_single - Validate a single buffer object. * @bo: The TTM buffer object base. * @interruptible: Whether to perform waits interruptible if possible. - * @validate_as_mob: Whether to validate in MOB memory. * * Return: Zero on success, -ERESTARTSYS if interrupted. Negative error * code on failure. */ -int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo, - bool interruptible, - bool validate_as_mob) +static int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo, + bool interruptible) { - struct vmw_buffer_object *vbo = - container_of(bo, struct vmw_buffer_object, base); + struct vmw_bo *vbo = to_vmw_bo(&bo->base); struct ttm_operation_ctx ctx = { .interruptible = interruptible, .no_wait_gpu = false @@ -537,30 +522,20 @@ int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo, if (atomic_read(&vbo->cpu_writers)) return -EBUSY; - if (vbo->base.pin_count > 0) + if (vbo->tbo.pin_count > 0) return 0; - if (validate_as_mob) - return ttm_bo_validate(bo, &vmw_mob_placement, &ctx); - - /** - * Put BO in VRAM if there is space, otherwise as a GMR. - * If there is no space in VRAM and GMR ids are all used up, - * start evicting GMRs to make room. If the DMA buffer can't be - * used as a GMR, this will return -ENOMEM. - */ - - ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx); + ret = ttm_bo_validate(bo, &vbo->placement, &ctx); if (ret == 0 || ret == -ERESTARTSYS) return ret; - /** - * If that failed, try VRAM again, this time evicting + /* + * If that failed, try again, this time evicting * previous contents. */ + ctx.allow_res_evict = true; - ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx); - return ret; + return ttm_bo_validate(bo, &vbo->placement, &ctx); } /** @@ -578,21 +553,10 @@ int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr) int ret; list_for_each_entry(entry, &ctx->bo_list, base.head) { - struct vmw_buffer_object *vbo = - container_of(entry->base.bo, typeof(*vbo), base); - - if (entry->cpu_blit) { - struct ttm_operation_ctx ttm_ctx = { - .interruptible = intr, - .no_wait_gpu = false - }; - - ret = ttm_bo_validate(entry->base.bo, - &vmw_nonfixed_placement, &ttm_ctx); - } else { - ret = vmw_validation_bo_validate_single - (entry->base.bo, intr, entry->as_mob); - } + struct vmw_bo *vbo = to_vmw_bo(&entry->base.bo->base); + + ret = vmw_validation_bo_validate_single(entry->base.bo, intr); + if (ret) return ret; @@ -639,7 +603,7 @@ int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr) list_for_each_entry(val, &ctx->resource_list, head) { struct vmw_resource *res = val->res; - struct vmw_buffer_object *backup = res->backup; + struct vmw_bo *backup = res->guest_memory_bo; ret = vmw_resource_validate(res, intr, val->dirty_set && val->dirty); @@ -650,12 +614,12 @@ int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr) } /* Check if the resource switched backup buffer */ - if (backup && res->backup && (backup != res->backup)) { - struct vmw_buffer_object *vbo = res->backup; + if (backup && res->guest_memory_bo && backup != res->guest_memory_bo) { + struct vmw_bo *vbo = res->guest_memory_bo; - ret = vmw_validation_add_bo - (ctx, vbo, vmw_resource_needs_backup(res), - false); + vmw_bo_placement_set(vbo, res->func->domain, + res->func->busy_domain); + ret = vmw_validation_add_bo(ctx, vbo); if (ret) return ret; } @@ -889,9 +853,7 @@ void vmw_validation_bo_backoff(struct vmw_validation_context *ctx) list_for_each_entry(entry, &ctx->bo_list, base.head) { if (entry->coherent_count) { unsigned int coherent_count = entry->coherent_count; - struct vmw_buffer_object *vbo = - container_of(entry->base.bo, typeof(*vbo), - base); + struct vmw_bo *vbo = to_vmw_bo(&entry->base.bo->base); while (coherent_count--) vmw_bo_dirty_release(vbo); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h index ab9ec226f433..240ee0c4ebfd 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h @@ -73,7 +73,7 @@ struct vmw_validation_context { size_t total_mem; }; -struct vmw_buffer_object; +struct vmw_bo; struct vmw_resource; struct vmw_fence_obj; @@ -159,11 +159,7 @@ static inline unsigned int vmw_validation_align(unsigned int val) } int vmw_validation_add_bo(struct vmw_validation_context *ctx, - struct vmw_buffer_object *vbo, - bool as_mob, bool cpu_blit); -int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo, - bool interruptible, - bool validate_as_mob); + struct vmw_bo *vbo); int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr); void vmw_validation_unref_lists(struct vmw_validation_context *ctx); int vmw_validation_add_resource(struct vmw_validation_context *ctx, @@ -179,7 +175,7 @@ void vmw_validation_res_unreserve(struct vmw_validation_context *ctx, bool backoff); void vmw_validation_res_switch_backup(struct vmw_validation_context *ctx, void *val_private, - struct vmw_buffer_object *vbo, + struct vmw_bo *vbo, unsigned long backup_offset); int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr); diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index 9ddb854b8155..5c19097266fe 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c @@ -1343,7 +1343,9 @@ static void mei_cl_reset_read_by_vtag(const struct mei_cl *cl, u8 vtag) struct mei_cl_vtag *vtag_l; list_for_each_entry(vtag_l, &cl->vtag_map, list) { - if (vtag_l->vtag == vtag) { + /* The client on bus has one fixed vtag map */ + if ((cl->cldev && mei_cldev_enabled(cl->cldev)) || + vtag_l->vtag == vtag) { vtag_l->pending_read = false; break; } diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 5bf0d50d55a0..676d566f38dd 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -342,6 +342,12 @@ static void mei_me_remove(struct pci_dev *pdev) } #ifdef CONFIG_PM_SLEEP +static int mei_me_pci_prepare(struct device *device) +{ + pm_runtime_resume(device); + return 0; +} + static int mei_me_pci_suspend(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); @@ -398,7 +404,17 @@ static int mei_me_pci_resume(struct device *device) return 0; } -#endif /* CONFIG_PM_SLEEP */ + +static void mei_me_pci_complete(struct device *device) +{ + pm_runtime_suspend(device); +} +#else /* CONFIG_PM_SLEEP */ + +#define mei_me_pci_prepare NULL +#define mei_me_pci_complete NULL + +#endif /* !CONFIG_PM_SLEEP */ #ifdef CONFIG_PM static int mei_me_pm_runtime_idle(struct device *device) @@ -501,6 +517,8 @@ static inline void mei_me_unset_pm_domain(struct mei_device *dev) } static const struct dev_pm_ops mei_me_pm_ops = { + .prepare = mei_me_pci_prepare, + .complete = mei_me_pci_complete, SET_SYSTEM_SLEEP_PM_OPS(mei_me_pci_suspend, mei_me_pci_resume) SET_RUNTIME_PM_OPS( diff --git a/drivers/ps3/ps3av.c b/drivers/ps3/ps3av.c index 516e6d14d32e..f6c9e56bdba7 100644 --- a/drivers/ps3/ps3av.c +++ b/drivers/ps3/ps3av.c @@ -11,13 +11,14 @@ #include <linux/delay.h> #include <linux/notifier.h> #include <linux/ioctl.h> -#include <linux/fb.h> #include <linux/slab.h> #include <asm/firmware.h> #include <asm/ps3av.h> #include <asm/ps3.h> +#include <video/cmdline.h> + #include "vuart.h" #define BUFSIZE 4096 /* vuart buf size */ @@ -921,6 +922,7 @@ EXPORT_SYMBOL_GPL(ps3av_audio_mute); static int ps3av_probe(struct ps3_system_bus_device *dev) { + const char *mode_option; int res; int id; @@ -968,10 +970,9 @@ static int ps3av_probe(struct ps3_system_bus_device *dev) ps3av_get_hw_conf(ps3av); -#ifdef CONFIG_FB - if (fb_mode_option && !strcmp(fb_mode_option, "safe")) + mode_option = video_get_options(NULL); + if (mode_option && !strcmp(mode_option, "safe")) safe_mode = 1; -#endif /* CONFIG_FB */ id = ps3av_auto_videomode(&ps3av->av_hw_conf); if (id < 0) { printk(KERN_ERR "%s: invalid id :%d\n", __func__, id); diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index 6d2fde6c5d11..bf05363d8906 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig @@ -11,6 +11,9 @@ config APERTURE_HELPERS Support tracking and hand-over of aperture ownership. Required by graphics drivers for firmware-provided framebuffers. +config VIDEO_CMDLINE + bool + config VIDEO_NOMODESET bool default n diff --git a/drivers/video/Makefile b/drivers/video/Makefile index a50eb528ed3c..831c9fa57a6c 100644 --- a/drivers/video/Makefile +++ b/drivers/video/Makefile @@ -2,6 +2,7 @@ obj-$(CONFIG_APERTURE_HELPERS) += aperture.o obj-$(CONFIG_VGASTATE) += vgastate.o +obj-$(CONFIG_VIDEO_CMDLINE) += cmdline.o obj-$(CONFIG_VIDEO_NOMODESET) += nomodeset.o obj-$(CONFIG_HDMI) += hdmi.o diff --git a/drivers/video/cmdline.c b/drivers/video/cmdline.c new file mode 100644 index 000000000000..d3d257489c3d --- /dev/null +++ b/drivers/video/cmdline.c @@ -0,0 +1,133 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Based on the fbdev code in drivers/video/fbdev/core/fb_cmdline: + * + * Copyright (C) 2014 Intel Corp + * Copyright (C) 1994 Martin Schaller + * + * 2001 - Documented with DocBook + * - Brad Douglas <[email protected]> + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive + * for more details. + * + * Authors: + * Daniel Vetter <[email protected]> + */ + +#include <linux/fb.h> /* for FB_MAX */ +#include <linux/init.h> + +#include <video/cmdline.h> + +/* + * FB_MAX is the maximum number of framebuffer devices and also + * the maximum number of video= parameters. Although not directly + * related to each other, it makes sense to keep it that way. + */ +static const char *video_options[FB_MAX] __read_mostly; +static const char *video_option __read_mostly; +static int video_of_only __read_mostly; + +static const char *__video_get_option_string(const char *name) +{ + const char *options = NULL; + size_t name_len = 0; + + if (name) + name_len = strlen(name); + + if (name_len) { + unsigned int i; + const char *opt; + + for (i = 0; i < ARRAY_SIZE(video_options); ++i) { + if (!video_options[i]) + continue; + if (video_options[i][0] == '\0') + continue; + opt = video_options[i]; + if (!strncmp(opt, name, name_len) && opt[name_len] == ':') + options = opt + name_len + 1; + } + } + + /* No match, return global options */ + if (!options) + options = video_option; + + return options; +} + +/** + * video_get_options - get kernel boot parameters + * @name: name of the output as it would appear in the boot parameter + * line (video=<name>:<options>) + * + * Looks up the video= options for the given name. Names are connector + * names with DRM, or driver names with fbdev. If no video option for + * the name has been specified, the function returns the global video= + * setting. A @name of NULL always returns the global video setting. + * + * Returns: + * The string of video options for the given name, or NULL if no video + * option has been specified. + */ +const char *video_get_options(const char *name) +{ + return __video_get_option_string(name); +} +EXPORT_SYMBOL(video_get_options); + +bool __video_get_options(const char *name, const char **options, bool is_of) +{ + bool enabled = true; + const char *opt = NULL; + + if (video_of_only && !is_of) + enabled = false; + + opt = __video_get_option_string(name); + + if (options) + *options = opt; + + return enabled; +} +EXPORT_SYMBOL(__video_get_options); + +/* + * Process command line options for video adapters. This function is + * a __setup and __init function. It only stores the options. Drivers + * have to call video_get_options() as necessary. + */ +static int __init video_setup(char *options) +{ + if (!options || !*options) + goto out; + + if (!strncmp(options, "ofonly", 6)) { + video_of_only = true; + goto out; + } + + if (strchr(options, ':')) { + /* named */ + size_t i; + + for (i = 0; i < ARRAY_SIZE(video_options); i++) { + if (!video_options[i]) { + video_options[i] = options; + break; + } + } + } else { + /* global */ + video_option = options; + } + +out: + return 1; +} +__setup("video=", video_setup); diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index ff3646c30d0d..96e91570cdd3 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -3,16 +3,13 @@ # fbdev configuration # -config FB_CMDLINE - bool - config FB_NOTIFY bool menuconfig FB tristate "Support for frame buffer devices" - select FB_CMDLINE select FB_NOTIFY + select VIDEO_CMDLINE help The frame buffer device provides an abstraction for the graphics hardware. It represents the frame buffer of some video hardware and diff --git a/drivers/video/fbdev/core/Makefile b/drivers/video/fbdev/core/Makefile index 26cbc965497c..08fabce76b74 100644 --- a/drivers/video/fbdev/core/Makefile +++ b/drivers/video/fbdev/core/Makefile @@ -1,9 +1,8 @@ # SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_FB_CMDLINE) += fb_cmdline.o obj-$(CONFIG_FB_NOTIFY) += fb_notify.o obj-$(CONFIG_FB) += fb.o fb-y := fbmem.o fbmon.o fbcmap.o fbsysfs.o \ - modedb.o fbcvt.o + modedb.o fbcvt.o fb_cmdline.o fb-$(CONFIG_FB_DEFERRED_IO) += fb_defio.o ifeq ($(CONFIG_FRAMEBUFFER_CONSOLE),y) diff --git a/drivers/video/fbdev/core/fb_cmdline.c b/drivers/video/fbdev/core/fb_cmdline.c index 3b5bd666b952..4d1634c492ec 100644 --- a/drivers/video/fbdev/core/fb_cmdline.c +++ b/drivers/video/fbdev/core/fb_cmdline.c @@ -12,16 +12,14 @@ * for more details. * * Authors: - * Vetter <[email protected]> + * Daniel Vetter <[email protected]> */ -#include <linux/init.h> -#include <linux/fb.h> -static char *video_options[FB_MAX] __read_mostly; -static int ofonly __read_mostly; +#include <linux/export.h> +#include <linux/fb.h> +#include <linux/string.h> -const char *fb_mode_option; -EXPORT_SYMBOL_GPL(fb_mode_option); +#include <video/cmdline.h> /** * fb_get_options - get kernel boot parameters @@ -30,78 +28,34 @@ EXPORT_SYMBOL_GPL(fb_mode_option); * (video=<name>:<options>) * @option: the option will be stored here * + * The caller owns the string returned in @option and is + * responsible for releasing the memory. + * * NOTE: Needed to maintain backwards compatibility */ int fb_get_options(const char *name, char **option) { - char *opt, *options = NULL; - int retval = 0; - int name_len = strlen(name), i; - - if (name_len && ofonly && strncmp(name, "offb", 4)) - retval = 1; + const char *options = NULL; + bool is_of = false; + bool enabled; - if (name_len && !retval) { - for (i = 0; i < FB_MAX; i++) { - if (video_options[i] == NULL) - continue; - if (!video_options[i][0]) - continue; - opt = video_options[i]; - if (!strncmp(name, opt, name_len) && - opt[name_len] == ':') - options = opt + name_len + 1; - } - } - /* No match, pass global option */ - if (!options && option && fb_mode_option) - options = kstrdup(fb_mode_option, GFP_KERNEL); - if (options && !strncmp(options, "off", 3)) - retval = 1; - - if (option) - *option = options; - - return retval; -} -EXPORT_SYMBOL(fb_get_options); + if (name) + is_of = strncmp(name, "offb", 4); -/** - * video_setup - process command line options - * @options: string of options - * - * Process command line options for frame buffer subsystem. - * - * NOTE: This function is a __setup and __init function. - * It only stores the options. Drivers have to call - * fb_get_options() as necessary. - */ -static int __init video_setup(char *options) -{ - if (!options || !*options) - goto out; + enabled = __video_get_options(name, &options, is_of); - if (!strncmp(options, "ofonly", 6)) { - ofonly = 1; - goto out; + if (options) { + if (!strncmp(options, "off", 3)) + enabled = false; } - if (strchr(options, ':')) { - /* named */ - int i; - - for (i = 0; i < FB_MAX; i++) { - if (video_options[i] == NULL) { - video_options[i] = options; - break; - } - } - } else { - /* global */ - fb_mode_option = options; + if (option) { + if (options) + *option = kstrdup(options, GFP_KERNEL); + else + *option = NULL; } -out: - return 1; + return enabled ? 0 : 1; // 0 on success, 1 otherwise } -__setup("video=", video_setup); +EXPORT_SYMBOL(fb_get_options); diff --git a/drivers/video/fbdev/core/modedb.c b/drivers/video/fbdev/core/modedb.c index 6473e0dfe146..23cf8eba785d 100644 --- a/drivers/video/fbdev/core/modedb.c +++ b/drivers/video/fbdev/core/modedb.c @@ -620,6 +620,7 @@ int fb_find_mode(struct fb_var_screeninfo *var, const struct fb_videomode *default_mode, unsigned int default_bpp) { + char *mode_option_buf = NULL; int i; /* Set up defaults */ @@ -635,8 +636,10 @@ int fb_find_mode(struct fb_var_screeninfo *var, default_bpp = 8; /* Did the user specify a video mode? */ - if (!mode_option) - mode_option = fb_mode_option; + if (!mode_option) { + fb_get_options(NULL, &mode_option_buf); + mode_option = mode_option_buf; + } if (mode_option) { const char *name = mode_option; unsigned int namelen = strlen(name); @@ -715,6 +718,7 @@ int fb_find_mode(struct fb_var_screeninfo *var, res_specified = 1; } done: + kfree(mode_option_buf); if (cvt) { struct fb_videomode cvt_mode; int ret; diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h index 92586ab55ef5..9a022caacf93 100644 --- a/include/drm/drm_atomic.h +++ b/include/drm/drm_atomic.h @@ -528,6 +528,13 @@ struct drm_connector * drm_atomic_get_new_connector_for_encoder(const struct drm_atomic_state *state, struct drm_encoder *encoder); +struct drm_crtc * +drm_atomic_get_old_crtc_for_encoder(struct drm_atomic_state *state, + struct drm_encoder *encoder); +struct drm_crtc * +drm_atomic_get_new_crtc_for_encoder(struct drm_atomic_state *state, + struct drm_encoder *encoder); + /** * drm_atomic_get_existing_crtc_state - get CRTC state, if it exists * @state: global atomic state object diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h index 33f982cd1a27..536a0b0091c3 100644 --- a/include/drm/drm_atomic_helper.h +++ b/include/drm/drm_atomic_helper.h @@ -210,6 +210,32 @@ int drm_atomic_helper_page_flip_target( plane))) /** + * drm_atomic_plane_enabling - check whether a plane is being enabled + * @old_plane_state: old atomic plane state + * @new_plane_state: new atomic plane state + * + * Checks the atomic state of a plane to determine whether it's being enabled + * or not. This also WARNs if it detects an invalid state (both CRTC and FB + * need to either both be NULL or both be non-NULL). + * + * RETURNS: + * True if the plane is being enabled, false otherwise. + */ +static inline bool drm_atomic_plane_enabling(struct drm_plane_state *old_plane_state, + struct drm_plane_state *new_plane_state) +{ + /* + * When enabling a plane, CRTC and FB should always be set together. + * Anything else should be considered a bug in the atomic core, so we + * gently warn about it. + */ + WARN_ON((!new_plane_state->crtc && new_plane_state->fb) || + (new_plane_state->crtc && !new_plane_state->fb)); + + return !old_plane_state->crtc && new_plane_state->crtc; +} + +/** * drm_atomic_plane_disabling - check whether a plane is being disabled * @old_plane_state: old atomic plane state * @new_plane_state: new atomic plane state diff --git a/include/drm/drm_displayid.h b/include/drm/drm_displayid.h index 49649eb8447e..566497eeb3b8 100644 --- a/include/drm/drm_displayid.h +++ b/include/drm/drm_displayid.h @@ -139,7 +139,11 @@ struct displayid_vesa_vendor_specific_block { u8 mso; } __packed; -/* DisplayID iteration */ +/* + * DisplayID iteration. + * + * Do not access directly, this is private. + */ struct displayid_iter { const struct drm_edid *drm_edid; @@ -147,6 +151,9 @@ struct displayid_iter { int length; int idx; int ext_index; + + u8 version; + u8 primary_use; }; void displayid_iter_edid_begin(const struct drm_edid *drm_edid, @@ -157,4 +164,7 @@ __displayid_iter_next(struct displayid_iter *iter); while (((__block) = __displayid_iter_next(__iter))) void displayid_iter_end(struct displayid_iter *iter); +u8 displayid_version(const struct displayid_iter *iter); +u8 displayid_primary_use(const struct displayid_iter *iter); + #endif diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h index 1d76d0686b03..5b86bb7603e7 100644 --- a/include/drm/drm_drv.h +++ b/include/drm/drm_drv.h @@ -400,25 +400,6 @@ struct drm_driver { int (*dumb_map_offset)(struct drm_file *file_priv, struct drm_device *dev, uint32_t handle, uint64_t *offset); - /** - * @dumb_destroy: - * - * This destroys the userspace handle for the given dumb backing storage buffer. - * Since buffer objects must be reference counted in the kernel a buffer object - * won't be immediately freed if a framebuffer modeset object still uses it. - * - * Called by the user via ioctl. - * - * The default implementation is drm_gem_dumb_destroy(). GEM based drivers - * must not overwrite this. - * - * Returns: - * - * Zero on success, negative errno on failure. - */ - int (*dumb_destroy)(struct drm_file *file_priv, - struct drm_device *dev, - uint32_t handle); /** @major: driver major number */ int major; diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index 70ae6c290bdc..571885d32907 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h @@ -61,9 +61,15 @@ struct std_timing { u8 vfreq_aspect; } __attribute__((packed)); -#define DRM_EDID_PT_HSYNC_POSITIVE (1 << 1) -#define DRM_EDID_PT_VSYNC_POSITIVE (1 << 2) -#define DRM_EDID_PT_SEPARATE_SYNC (3 << 3) +#define DRM_EDID_PT_SYNC_MASK (3 << 3) +# define DRM_EDID_PT_ANALOG_CSYNC (0 << 3) +# define DRM_EDID_PT_BIPOLAR_ANALOG_CSYNC (1 << 3) +# define DRM_EDID_PT_DIGITAL_CSYNC (2 << 3) +# define DRM_EDID_PT_CSYNC_ON_RGB (1 << 1) /* analog csync only */ +# define DRM_EDID_PT_CSYNC_SERRATE (1 << 2) +# define DRM_EDID_PT_DIGITAL_SEPARATE_SYNC (3 << 3) +# define DRM_EDID_PT_HSYNC_POSITIVE (1 << 1) /* also digital csync */ +# define DRM_EDID_PT_VSYNC_POSITIVE (1 << 2) #define DRM_EDID_PT_STEREO (1 << 5) #define DRM_EDID_PT_INTERLACED (1 << 7) diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h index 772a4adf5287..189fd618ca65 100644 --- a/include/drm/drm_gem.h +++ b/include/drm/drm_gem.h @@ -165,6 +165,16 @@ struct drm_gem_object_funcs { int (*mmap)(struct drm_gem_object *obj, struct vm_area_struct *vma); /** + * @evict: + * + * Evicts gem object out from memory. Used by the drm_gem_object_evict() + * helper. Returns 0 on success, -errno otherwise. + * + * This callback is optional. + */ + int (*evict)(struct drm_gem_object *obj); + + /** * @vm_ops: * * Virtual memory operations used with mmap. @@ -475,8 +485,11 @@ int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, void drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock); void drm_gem_lru_remove(struct drm_gem_object *obj); +void drm_gem_lru_move_tail_locked(struct drm_gem_lru *lru, struct drm_gem_object *obj); void drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj); unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan, bool (*shrink)(struct drm_gem_object *obj)); +int drm_gem_evict(struct drm_gem_object *obj); + #endif /* __DRM_GEM_H__ */ diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h index a2201b2488c5..5994fed5e327 100644 --- a/include/drm/drm_gem_shmem_helper.h +++ b/include/drm/drm_gem_shmem_helper.h @@ -61,20 +61,6 @@ struct drm_gem_shmem_object { struct list_head madv_list; /** - * @pages_mark_dirty_on_put: - * - * Mark pages as dirty when they are put. - */ - unsigned int pages_mark_dirty_on_put : 1; - - /** - * @pages_mark_accessed_on_put: - * - * Mark pages as accessed when they are put. - */ - unsigned int pages_mark_accessed_on_put : 1; - - /** * @sgt: Scatter/gather table for imported PRIME buffers */ struct sg_table *sgt; @@ -98,9 +84,23 @@ struct drm_gem_shmem_object { unsigned int vmap_use_count; /** + * @pages_mark_dirty_on_put: + * + * Mark pages as dirty when they are put. + */ + bool pages_mark_dirty_on_put : 1; + + /** + * @pages_mark_accessed_on_put: + * + * Mark pages as accessed when they are put. + */ + bool pages_mark_accessed_on_put : 1; + + /** * @map_wc: map object write-combined (instead of using shmem defaults). */ - bool map_wc; + bool map_wc : 1; }; #define to_drm_gem_shmem_obj(obj) \ diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h index 206f495bbf06..965faf082a6d 100644 --- a/include/drm/drm_modeset_helper_vtables.h +++ b/include/drm/drm_modeset_helper_vtables.h @@ -1331,6 +1331,32 @@ struct drm_plane_helper_funcs { */ void (*atomic_update)(struct drm_plane *plane, struct drm_atomic_state *state); + + /** + * @atomic_enable: + * + * Drivers should use this function to unconditionally enable a plane. + * This hook is called in-between the &drm_crtc_helper_funcs.atomic_begin + * and drm_crtc_helper_funcs.atomic_flush callbacks. It is called after + * @atomic_update, which will be called for all enabled planes. Drivers + * that use @atomic_enable should set up a plane in @atomic_update and + * afterwards enable the plane in @atomic_enable. If a plane needs to be + * enabled before installing the scanout buffer, drivers can still do + * so in @atomic_update. + * + * Note that the power state of the display pipe when this function is + * called depends upon the exact helpers and calling sequence the driver + * has picked. See drm_atomic_helper_commit_planes() for a discussion of + * the tradeoffs and variants of plane commit helpers. + * + * This callback is used by the atomic modeset helpers, but it is + * optional. If implemented, @atomic_enable should be the inverse of + * @atomic_disable. Drivers that don't want to use either can still + * implement the complete plane update in @atomic_update. + */ + void (*atomic_enable)(struct drm_plane *plane, + struct drm_atomic_state *state); + /** * @atomic_disable: * @@ -1351,7 +1377,8 @@ struct drm_plane_helper_funcs { * the tradeoffs and variants of plane commit helpers. * * This callback is used by the atomic modeset helpers and by the - * transitional plane helpers, but it is optional. + * transitional plane helpers, but it is optional. It's intended to + * reverse the effects of @atomic_enable. */ void (*atomic_disable)(struct drm_plane *plane, struct drm_atomic_state *state); diff --git a/include/drm/drm_of.h b/include/drm/drm_of.h index 10ab58c40746..082a6e980d01 100644 --- a/include/drm/drm_of.h +++ b/include/drm/drm_of.h @@ -15,6 +15,8 @@ struct drm_encoder; struct drm_panel; struct drm_bridge; struct device_node; +struct mipi_dsi_device_info; +struct mipi_dsi_host; /** * enum drm_lvds_dual_link_pixels - Pixel order of an LVDS dual-link connection @@ -129,6 +131,16 @@ drm_of_get_data_lanes_count_ep(const struct device_node *port, } #endif +#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DRM_MIPI_DSI) +struct mipi_dsi_host *drm_of_get_dsi_bus(struct device *dev); +#else +static inline struct +mipi_dsi_host *drm_of_get_dsi_bus(struct device *dev) +{ + return ERR_PTR(-EINVAL); +} +#endif /* CONFIG_OF && CONFIG_DRM_MIPI_DSI */ + /* * drm_of_panel_bridge_remove - remove panel bridge * @np: device tree node containing panel bridge output ports diff --git a/include/drm/drm_suballoc.h b/include/drm/drm_suballoc.h new file mode 100644 index 000000000000..c2188bb0b157 --- /dev/null +++ b/include/drm/drm_suballoc.h @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* + * Copyright 2011 Red Hat Inc. + * Copyright © 2022 Intel Corporation + */ +#ifndef _DRM_SUBALLOC_H_ +#define _DRM_SUBALLOC_H_ + +#include <drm/drm_mm.h> + +#include <linux/dma-fence.h> +#include <linux/types.h> + +#define DRM_SUBALLOC_MAX_QUEUES 32 +/** + * struct drm_suballoc_manager - fenced range allocations + * @wq: Wait queue for sleeping allocations on contention. + * @hole: Pointer to first hole node. + * @olist: List of allocated ranges. + * @flist: Array[fence context hash] of queues of fenced allocated ranges. + * @size: Size of the managed range. + * @align: Default alignment for the managed range. + */ +struct drm_suballoc_manager { + wait_queue_head_t wq; + struct list_head *hole; + struct list_head olist; + struct list_head flist[DRM_SUBALLOC_MAX_QUEUES]; + size_t size; + size_t align; +}; + +/** + * struct drm_suballoc - Sub-allocated range + * @olist: List link for list of allocated ranges. + * @flist: List linkk for the manager fenced allocated ranges queues. + * @manager: The drm_suballoc_manager. + * @soffset: Start offset. + * @eoffset: End offset + 1 so that @eoffset - @soffset = size. + * @dma_fence: The fence protecting the allocation. + */ +struct drm_suballoc { + struct list_head olist; + struct list_head flist; + struct drm_suballoc_manager *manager; + size_t soffset; + size_t eoffset; + struct dma_fence *fence; +}; + +void drm_suballoc_manager_init(struct drm_suballoc_manager *sa_manager, + size_t size, size_t align); + +void drm_suballoc_manager_fini(struct drm_suballoc_manager *sa_manager); + +struct drm_suballoc * +drm_suballoc_new(struct drm_suballoc_manager *sa_manager, size_t size, + gfp_t gfp, bool intr, size_t align); + +void drm_suballoc_free(struct drm_suballoc *sa, struct dma_fence *fence); + +/** + * drm_suballoc_soffset - Range start. + * @sa: The struct drm_suballoc. + * + * Return: The start of the allocated range. + */ +static inline size_t drm_suballoc_soffset(struct drm_suballoc *sa) +{ + return sa->soffset; +} + +/** + * drm_suballoc_eoffset - Range end. + * @sa: The struct drm_suballoc. + * + * Return: The end of the allocated range + 1. + */ +static inline size_t drm_suballoc_eoffset(struct drm_suballoc *sa) +{ + return sa->eoffset; +} + +/** + * drm_suballoc_size - Range size. + * @sa: The struct drm_suballoc. + * + * Return: The size of the allocated range. + */ +static inline size_t drm_suballoc_size(struct drm_suballoc *sa) +{ + return sa->eoffset - sa->soffset; +} + +#ifdef CONFIG_DEBUG_FS +void drm_suballoc_dump_debug_info(struct drm_suballoc_manager *sa_manager, + struct drm_printer *p, + unsigned long long suballoc_base); +#else +static inline void +drm_suballoc_dump_debug_info(struct drm_suballoc_manager *sa_manager, + struct drm_printer *p, + unsigned long long suballoc_base) +{ } + +#endif + +#endif /* _DRM_SUBALLOC_H_ */ diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 99584e457153..c0ec6719282a 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -57,6 +57,8 @@ struct drm_gem_object; struct drm_gpu_scheduler; struct drm_sched_rq; +struct drm_file; + /* These are often used as an (initial) index * to an array, and as such should start at 0. */ @@ -537,6 +539,10 @@ int drm_sched_job_init(struct drm_sched_job *job, void drm_sched_job_arm(struct drm_sched_job *job); int drm_sched_job_add_dependency(struct drm_sched_job *job, struct dma_fence *fence); +int drm_sched_job_add_syncobj_dependency(struct drm_sched_job *job, + struct drm_file *file, + u32 handle, + u32 point); int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job, struct dma_resv *resv, enum dma_resv_usage usage); diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index 4a4c190f7698..e1e10dfbb661 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h @@ -588,6 +588,7 @@ INTEL_VGA_DEVICE(0x4551, info), \ INTEL_VGA_DEVICE(0x4555, info), \ INTEL_VGA_DEVICE(0x4557, info), \ + INTEL_VGA_DEVICE(0x4570, info), \ INTEL_VGA_DEVICE(0x4571, info) /* JSL */ @@ -684,14 +685,18 @@ INTEL_VGA_DEVICE(0xA78A, info), \ INTEL_VGA_DEVICE(0xA78B, info) +/* RPL-U */ +#define INTEL_RPLU_IDS(info) \ + INTEL_VGA_DEVICE(0xA721, info), \ + INTEL_VGA_DEVICE(0xA7A1, info), \ + INTEL_VGA_DEVICE(0xA7A9, info) + /* RPL-P */ #define INTEL_RPLP_IDS(info) \ + INTEL_RPLU_IDS(info), \ INTEL_VGA_DEVICE(0xA720, info), \ - INTEL_VGA_DEVICE(0xA721, info), \ INTEL_VGA_DEVICE(0xA7A0, info), \ - INTEL_VGA_DEVICE(0xA7A1, info), \ - INTEL_VGA_DEVICE(0xA7A8, info), \ - INTEL_VGA_DEVICE(0xA7A9, info) + INTEL_VGA_DEVICE(0xA7A8, info) /* DG2 */ #define INTEL_DG2_G10_IDS(info) \ @@ -706,7 +711,6 @@ INTEL_VGA_DEVICE(0x5693, info), \ INTEL_VGA_DEVICE(0x5694, info), \ INTEL_VGA_DEVICE(0x5695, info), \ - INTEL_VGA_DEVICE(0x5698, info), \ INTEL_VGA_DEVICE(0x56A5, info), \ INTEL_VGA_DEVICE(0x56A6, info), \ INTEL_VGA_DEVICE(0x56B0, info), \ diff --git a/include/drm/ttm/ttm_device.h b/include/drm/ttm/ttm_device.h index 4f3e81eac6f3..56e82ba2d046 100644 --- a/include/drm/ttm/ttm_device.h +++ b/include/drm/ttm/ttm_device.h @@ -141,7 +141,7 @@ struct ttm_device_funcs { * the graphics address space * @ctx: context for this move with parameters * @new_mem: the new memory region receiving the buffer - @ @hop: placement for driver directed intermediate hop + * @hop: placement for driver directed intermediate hop * * Move a buffer between two memory regions. * Returns errno -EMULTIHOP if driver requests a hop diff --git a/include/linux/fb.h b/include/linux/fb.h index d8d20514ea05..d96529caa35e 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -765,7 +765,6 @@ struct dmt_videomode { const struct fb_videomode *mode; }; -extern const char *fb_mode_option; extern const struct fb_videomode vesa_modes[]; extern const struct dmt_videomode dmt_modes[]; diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index 642808520d92..a87bbbbca2d4 100644 --- a/include/uapi/drm/drm.h +++ b/include/uapi/drm/drm.h @@ -972,6 +972,19 @@ extern "C" { #define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats) #define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version) #define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl) +/** + * DRM_IOCTL_GEM_CLOSE - Close a GEM handle. + * + * GEM handles are not reference-counted by the kernel. User-space is + * responsible for managing their lifetime. For example, if user-space imports + * the same memory object twice on the same DRM file description, the same GEM + * handle is returned by both imports, and user-space needs to ensure + * &DRM_IOCTL_GEM_CLOSE is performed once only. The same situation can happen + * when a memory object is allocated, then exported and imported again on the + * same DRM file description. The &DRM_IOCTL_MODE_GETFB2 IOCTL is an exception + * and always returns fresh new GEM handles even if an existing GEM handle + * already refers to the same memory object before the IOCTL is performed. + */ #define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close) #define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink) #define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open) @@ -1012,7 +1025,37 @@ extern "C" { #define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, struct drm_lock) #define DRM_IOCTL_FINISH DRM_IOW( 0x2c, struct drm_lock) +/** + * DRM_IOCTL_PRIME_HANDLE_TO_FD - Convert a GEM handle to a DMA-BUF FD. + * + * User-space sets &drm_prime_handle.handle with the GEM handle to export and + * &drm_prime_handle.flags, and gets back a DMA-BUF file descriptor in + * &drm_prime_handle.fd. + * + * The export can fail for any driver-specific reason, e.g. because export is + * not supported for this specific GEM handle (but might be for others). + * + * Support for exporting DMA-BUFs is advertised via &DRM_PRIME_CAP_EXPORT. + */ #define DRM_IOCTL_PRIME_HANDLE_TO_FD DRM_IOWR(0x2d, struct drm_prime_handle) +/** + * DRM_IOCTL_PRIME_FD_TO_HANDLE - Convert a DMA-BUF FD to a GEM handle. + * + * User-space sets &drm_prime_handle.fd with a DMA-BUF file descriptor to + * import, and gets back a GEM handle in &drm_prime_handle.handle. + * &drm_prime_handle.flags is unused. + * + * If an existing GEM handle refers to the memory object backing the DMA-BUF, + * that GEM handle is returned. Therefore user-space which needs to handle + * arbitrary DMA-BUFs must have a user-space lookup data structure to manually + * reference-count duplicated GEM handles. For more information see + * &DRM_IOCTL_GEM_CLOSE. + * + * The import can fail for any driver-specific reason, e.g. because import is + * only supported for DMA-BUFs allocated on this DRM device. + * + * Support for importing DMA-BUFs is advertised via &DRM_PRIME_CAP_IMPORT. + */ #define DRM_IOCTL_PRIME_FD_TO_HANDLE DRM_IOWR(0x2e, struct drm_prime_handle) #define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30) @@ -1104,8 +1147,13 @@ extern "C" { * struct as the output. * * If the client is DRM master or has &CAP_SYS_ADMIN, &drm_mode_fb_cmd2.handles - * will be filled with GEM buffer handles. Planes are valid until one has a - * zero handle -- this can be used to compute the number of planes. + * will be filled with GEM buffer handles. Fresh new GEM handles are always + * returned, even if another GEM handle referring to the same memory object + * already exists on the DRM file description. The caller is responsible for + * removing the new handles, e.g. via the &DRM_IOCTL_GEM_CLOSE IOCTL. The same + * new handle will be returned for multiple planes in case they use the same + * memory object. Planes are valid until one has a zero handle -- this can be + * used to compute the number of planes. * * Otherwise, &drm_mode_fb_cmd2.handles will be zeroed and planes are valid * until one has a zero &drm_mode_fb_cmd2.pitches. @@ -1113,6 +1161,11 @@ extern "C" { * If the framebuffer has a format modifier, &DRM_MODE_FB_MODIFIERS will be set * in &drm_mode_fb_cmd2.flags and &drm_mode_fb_cmd2.modifier will contain the * modifier. Otherwise, user-space must ignore &drm_mode_fb_cmd2.modifier. + * + * To obtain DMA-BUF FDs for each plane without leaking GEM handles, user-space + * can export each handle via &DRM_IOCTL_PRIME_HANDLE_TO_FD, then immediately + * close each unique handle via &DRM_IOCTL_GEM_CLOSE, making sure to not + * double-close handles which are specified multiple times in the array. */ #define DRM_IOCTL_MODE_GETFB2 DRM_IOWR(0xCE, struct drm_mode_fb_cmd2) diff --git a/include/video/cmdline.h b/include/video/cmdline.h new file mode 100644 index 000000000000..26b80cdaef79 --- /dev/null +++ b/include/video/cmdline.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef VIDEO_CMDLINE_H +#define VIDEO_CMDLINE_H + +#include <linux/types.h> + +#if defined(CONFIG_VIDEO_CMDLINE) +const char *video_get_options(const char *name); + +/* exported for compatibility with fbdev; don't use in new code */ +bool __video_get_options(const char *name, const char **option, bool is_of); +#else +static inline const char *video_get_options(const char *name) +{ + return NULL; +} +#endif + +#endif |