diff options
author | Dave Airlie <airlied@redhat.com> | 2020-11-13 14:16:16 +1000 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2020-11-13 14:52:32 +1000 |
commit | 14346c08fac465332f229fbfb0adb1d1a75de96c (patch) | |
tree | 56875e6e322d9f6f1d99f258f80cfa5d51720021 | |
parent | 512bce50a41c528fa15c4c014293e7bebf018658 (diff) | |
parent | 05481f072787e96d08cc304cda0c10e0d02cdadc (diff) |
Merge tag 'drm-misc-next-2020-11-12' of git://anongit.freedesktop.org/drm/drm-misc into drm-next
drm-misc-next for 5.11:
UAPI Changes:
Cross-subsystem Changes:
Core Changes:
- Remove pgprot_decrypt() before calling io_remap_pfn_range()
- Revert "drm/dp_mst: Retrieve extended DPCD caps for topology manager"
- ttm: Add multihop infrastructure
- doc: Update dma-buf
Driver Changes:
- amdgpu: Use TTM multihop
- kmb: select DRM_MIPI_DSI and depend on ARCH_KEEMBAY; Fix build warning;
Fix typos
- nouveau: Use TTM multihop; Fix out-of-bounds access
- radeon: Use TTM multihop
- ingenic: Search for scaling coefficients to to 102% of screen size
Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://patchwork.freedesktop.org/patch/msgid/20201112080115.GA7954@linux-uq9g
249 files changed, 7023 insertions, 1412 deletions
diff --git a/Documentation/devicetree/bindings/display/bridge/intel,keembay-dsi.yaml b/Documentation/devicetree/bindings/display/bridge/intel,keembay-dsi.yaml new file mode 100644 index 000000000000..ab5be2625224 --- /dev/null +++ b/Documentation/devicetree/bindings/display/bridge/intel,keembay-dsi.yaml @@ -0,0 +1,101 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/bridge/intel,keembay-dsi.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Devicetree bindings for Intel Keem Bay mipi dsi controller + +maintainers: + - Anitha Chrisanthus <anitha.chrisanthus@intel.com> + - Edmond J Dea <edmund.j.dea@intel.com> + +properties: + compatible: + const: intel,keembay-dsi + + reg: + items: + - description: MIPI registers range + + reg-names: + items: + - const: mipi + + clocks: + items: + - description: MIPI DSI clock + - description: MIPI DSI econfig clock + - description: MIPI DSI config clock + + clock-names: + items: + - const: clk_mipi + - const: clk_mipi_ecfg + - const: clk_mipi_cfg + + ports: + type: object + + properties: + '#address-cells': + const: 1 + + '#size-cells': + const: 0 + + port@0: + type: object + description: MIPI DSI input port. + + port@1: + type: object + description: DSI output port. + + required: + - port@0 + - port@1 + + additionalProperties: false + +required: + - compatible + - reg + - reg-names + - clocks + - clock-names + - ports + +additionalProperties: false + +examples: + - | + mipi-dsi@20900000 { + compatible = "intel,keembay-dsi"; + reg = <0x20900000 0x4000>; + reg-names = "mipi"; + clocks = <&scmi_clk 0x86>, + <&scmi_clk 0x88>, + <&scmi_clk 0x89>; + clock-names = "clk_mipi", "clk_mipi_ecfg", + "clk_mipi_cfg"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + dsi_in: endpoint { + remote-endpoint = <&disp_out>; + }; + }; + + port@1 { + reg = <1>; + dsi_out: endpoint { + remote-endpoint = <&adv7535_input>; + }; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/display/bridge/lontium,lt9611.yaml b/Documentation/devicetree/bindings/display/bridge/lontium,lt9611.yaml index d60208359234..7a1c89b995e2 100644 --- a/Documentation/devicetree/bindings/display/bridge/lontium,lt9611.yaml +++ b/Documentation/devicetree/bindings/display/bridge/lontium,lt9611.yaml @@ -4,18 +4,19 @@ $id: http://devicetree.org/schemas/display/bridge/lontium,lt9611.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Lontium LT9611 2 Port MIPI to HDMI Bridge +title: Lontium LT9611(UXC) 2 Port MIPI to HDMI Bridge maintainers: - Vinod Koul <vkoul@kernel.org> description: | - The LT9611 is a bridge device which converts DSI to HDMI + The LT9611 and LT9611UXC are bridge devices which convert DSI to HDMI properties: compatible: enum: - lontium,lt9611 + - lontium,lt9611uxc reg: maxItems: 1 diff --git a/Documentation/devicetree/bindings/display/bridge/sii902x.txt b/Documentation/devicetree/bindings/display/bridge/sii902x.txt index 0d1db3f9da84..02c21b584741 100644 --- a/Documentation/devicetree/bindings/display/bridge/sii902x.txt +++ b/Documentation/devicetree/bindings/display/bridge/sii902x.txt @@ -8,6 +8,8 @@ Optional properties: - interrupts: describe the interrupt line used to inform the host about hotplug events. - reset-gpios: OF device-tree gpio specification for RST_N pin. + - iovcc-supply: I/O Supply Voltage (1.8V or 3.3V) + - cvcc12-supply: Digital Core Supply Voltage (1.2V) HDMI audio properties: - #sound-dai-cells: <0> or <1>. <0> if only i2s or spdif pin @@ -54,6 +56,8 @@ Example: compatible = "sil,sii9022"; reg = <0x39>; reset-gpios = <&pioA 1 0>; + iovcc-supply = <&v3v3_hdmi>; + cvcc12-supply = <&v1v2_hdmi>; #sound-dai-cells = <0>; sil,i2s-data-lanes = < 0 1 2 >; diff --git a/Documentation/devicetree/bindings/display/intel,keembay-display.yaml b/Documentation/devicetree/bindings/display/intel,keembay-display.yaml new file mode 100644 index 000000000000..0a697d45c2ad --- /dev/null +++ b/Documentation/devicetree/bindings/display/intel,keembay-display.yaml @@ -0,0 +1,72 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/intel,keembay-display.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Devicetree bindings for Intel Keem Bay display controller + +maintainers: + - Anitha Chrisanthus <anitha.chrisanthus@intel.com> + - Edmond J Dea <edmund.j.dea@intel.com> + +properties: + compatible: + const: intel,keembay-display + + reg: + items: + - description: LCD registers range + + reg-names: + items: + - const: lcd + + clocks: + items: + - description: LCD controller clock + - description: pll0 clock + + clock-names: + items: + - const: clk_lcd + - const: clk_pll0 + + interrupts: + maxItems: 1 + + port: + type: object + description: Display output node to DSI. + +required: + - compatible + - reg + - reg-names + - clocks + - clock-names + - interrupts + - port + +additionalProperties: false + +examples: + - | + #include <dt-bindings/interrupt-controller/irq.h> + #include <dt-bindings/interrupt-controller/arm-gic.h> + + display@20930000 { + compatible = "intel,keembay-display"; + reg = <0x20930000 0x3000>; + reg-names = "lcd"; + interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&scmi_clk 0x83>, + <&scmi_clk 0x0>; + clock-names = "clk_lcd", "clk_pll0"; + + port { + disp_out: endpoint { + remote-endpoint = <&dsi_in>; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/display/intel,keembay-msscam.yaml b/Documentation/devicetree/bindings/display/intel,keembay-msscam.yaml new file mode 100644 index 000000000000..40caa6118809 --- /dev/null +++ b/Documentation/devicetree/bindings/display/intel,keembay-msscam.yaml @@ -0,0 +1,43 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/intel,keembay-msscam.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Devicetree bindings for Intel Keem Bay MSSCAM + +maintainers: + - Anitha Chrisanthus <anitha.chrisanthus@intel.com> + - Edmond J Dea <edmund.j.dea@intel.com> + +description: | + MSSCAM controls local clocks in the display subsystem namely LCD clocks and + MIPI DSI clocks. It also configures the interconnect between LCD and + MIPI DSI. + +properties: + compatible: + items: + - const: intel,keembay-msscam + - const: syscon + + reg: + maxItems: 1 + + reg-io-width: + const: 4 + +required: + - compatible + - reg + - reg-io-width + +additionalProperties: false + +examples: + - | + msscam:msscam@20910000 { + compatible = "intel,keembay-msscam", "syscon"; + reg = <0x20910000 0x30>; + reg-io-width = <4>; + }; diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst index 6b224ef14455..acca232b025b 100644 --- a/Documentation/gpu/todo.rst +++ b/Documentation/gpu/todo.rst @@ -201,13 +201,28 @@ Convert drivers to use drm_fbdev_generic_setup() ------------------------------------------------ Most drivers can use drm_fbdev_generic_setup(). Driver have to implement -atomic modesetting and GEM vmap support. Current generic fbdev emulation -expects the framebuffer in system memory (or system-like memory). +atomic modesetting and GEM vmap support. Historically, generic fbdev emulation +expected the framebuffer in system memory or system-like memory. By employing +struct dma_buf_map, drivers with frambuffers in I/O memory can be supported +as well. Contact: Maintainer of the driver you plan to convert Level: Intermediate +Reimplement functions in drm_fbdev_fb_ops without fbdev +------------------------------------------------------- + +A number of callback functions in drm_fbdev_fb_ops could benefit from +being rewritten without dependencies on the fbdev module. Some of the +helpers could further benefit from using struct dma_buf_map instead of +raw pointers. + +Contact: Thomas Zimmermann <tzimmermann@suse.de>, Daniel Vetter + +Level: Advanced + + drm_framebuffer_funcs and drm_mode_config_funcs.fb_create cleanup ----------------------------------------------------------------- @@ -450,6 +465,24 @@ Contact: Ville Syrjälä, Daniel Vetter Level: Intermediate +Use struct dma_buf_map throughout codebase +------------------------------------------ + +Pointers to shared device memory are stored in struct dma_buf_map. Each +instance knows whether it refers to system or I/O memory. Most of the DRM-wide +interface have been converted to use struct dma_buf_map, but implementations +often still use raw pointers. + +The task is to use struct dma_buf_map where it makes sense. + +* Memory managers should use struct dma_buf_map for dma-buf-imported buffers. +* TTM might benefit from using struct dma_buf_map internally. +* Framebuffer copying and blitting helpers should operate on struct dma_buf_map. + +Contact: Thomas Zimmermann <tzimmermann@suse.de>, Christian König, Daniel Vetter + +Level: Intermediate + Core refactorings ================= diff --git a/MAINTAINERS b/MAINTAINERS index 0bd560eb5101..970d9cee509d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8961,6 +8961,13 @@ M: Deepak Saxena <dsaxena@plexity.net> S: Maintained F: drivers/char/hw_random/ixp4xx-rng.c +INTEL KEEMBAY DRM DRIVER +M: Anitha Chrisanthus <anitha.chrisanthus@intel.com> +M: Edmund Dea <edmund.j.dea@intel.com> +S: Maintained +F: Documentation/devicetree/bindings/display/intel,kmb_display.yaml +F: drivers/gpu/drm/kmb/ + INTEL MANAGEMENT ENGINE (mei) M: Tomas Winkler <tomas.winkler@intel.com> L: linux-kernel@vger.kernel.org diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 556f62e8b196..0eb80c1ecdab 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -908,7 +908,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, } #ifdef CONFIG_DMA_API_DEBUG - { + if (!IS_ERR(sg_table)) { struct scatterlist *sg; u64 addr; int len; diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 64376dd298ed..0973f408d75f 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -232,6 +232,7 @@ config DRM_RADEON select FW_LOADER select DRM_KMS_HELPER select DRM_TTM + select DRM_TTM_HELPER select POWER_SUPPLY select HWMON select BACKLIGHT_CLASS_DEVICE @@ -252,6 +253,7 @@ config DRM_AMDGPU select DRM_KMS_HELPER select DRM_SCHED select DRM_TTM + select DRM_TTM_HELPER select POWER_SUPPLY select HWMON select BACKLIGHT_CLASS_DEVICE @@ -268,6 +270,8 @@ source "drivers/gpu/drm/nouveau/Kconfig" source "drivers/gpu/drm/i915/Kconfig" +source "drivers/gpu/drm/kmb/Kconfig" + config DRM_VGEM tristate "Virtual GEM provider" depends on DRM diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 81569009f884..fefaff4c832d 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -71,6 +71,7 @@ obj-$(CONFIG_DRM_AMDGPU)+= amd/amdgpu/ obj-$(CONFIG_DRM_MGA) += mga/ obj-$(CONFIG_DRM_I810) += i810/ obj-$(CONFIG_DRM_I915) += i915/ +obj-$(CONFIG_DRM_KMB_DISPLAY) += kmb/ obj-$(CONFIG_DRM_MGAG200) += mgag200/ obj-$(CONFIG_DRM_V3D) += v3d/ obj-$(CONFIG_DRM_VC4) += vc4/ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index 5b465ab774d1..e5919efca870 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -42,42 +42,6 @@ #include <linux/pci-p2pdma.h> /** - * amdgpu_gem_prime_vmap - &dma_buf_ops.vmap implementation - * @obj: GEM BO - * - * Sets up an in-kernel virtual mapping of the BO's memory. - * - * Returns: - * The virtual address of the mapping or an error pointer. - */ -void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj) -{ - struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); - int ret; - - ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, - &bo->dma_buf_vmap); - if (ret) - return ERR_PTR(ret); - - return bo->dma_buf_vmap.virtual; -} - -/** - * amdgpu_gem_prime_vunmap - &dma_buf_ops.vunmap implementation - * @obj: GEM BO - * @vaddr: Virtual address (unused) - * - * Tears down the in-kernel virtual mapping of the BO's memory. - */ -void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) -{ - struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); - - ttm_bo_kunmap(&bo->dma_buf_vmap); -} - -/** * amdgpu_gem_prime_mmap - &drm_driver.gem_prime_mmap implementation * @obj: GEM BO * @vma: Virtual memory area diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h index 2c5c84a06bb9..39b5b9616fd8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h @@ -31,8 +31,6 @@ struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf); bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev, struct amdgpu_bo *bo); -void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj); -void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 26cf369003e1..1dfea15bbec3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -40,6 +40,7 @@ #include "amdgpu.h" #include "amdgpu_irq.h" #include "amdgpu_dma_buf.h" +#include "amdgpu_sched.h" #include "amdgpu_amdkfd.h" @@ -1105,7 +1106,7 @@ static const struct pci_device_id pciidlist[] = { MODULE_DEVICE_TABLE(pci, pciidlist); -static struct drm_driver kms_driver; +static const struct drm_driver amdgpu_kms_driver; static int amdgpu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) @@ -1176,7 +1177,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, if (ret) return ret; - adev = devm_drm_dev_alloc(&pdev->dev, &kms_driver, typeof(*adev), ddev); + adev = devm_drm_dev_alloc(&pdev->dev, &amdgpu_kms_driver, typeof(*adev), ddev); if (IS_ERR(adev)) return PTR_ERR(adev); @@ -1520,7 +1521,29 @@ int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv) return 0; } -static struct drm_driver kms_driver = { +int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); + +const struct drm_ioctl_desc amdgpu_ioctls_kms[] = { + DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_SCHED, amdgpu_sched_ioctl, DRM_MASTER), + DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + /* KMS */ + DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_FENCES, amdgpu_cs_wait_fences_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), +}; + +static const struct drm_driver amdgpu_kms_driver = { .driver_features = DRIVER_ATOMIC | DRIVER_GEM | @@ -1531,6 +1554,7 @@ static struct drm_driver kms_driver = { .lastclose = amdgpu_driver_lastclose_kms, .irq_handler = amdgpu_irq_handler, .ioctls = amdgpu_ioctls_kms, + .num_ioctls = ARRAY_SIZE(amdgpu_ioctls_kms), .dumb_create = amdgpu_mode_dumb_create, .dumb_map_offset = amdgpu_mode_dumb_mmap, .fops = &amdgpu_driver_kms_fops, @@ -1583,7 +1607,6 @@ static int __init amdgpu_init(void) goto error_fence; DRM_INFO("amdgpu kernel modesetting enabled.\n"); - kms_driver.num_ioctls = amdgpu_max_kms_ioctl; amdgpu_register_atpx_handler(); /* Ignore KFD init failures. Normal when CONFIG_HSA_AMD is not set. */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index df94db199532..c9f94fbeb018 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -33,6 +33,7 @@ #include <drm/amdgpu_drm.h> #include <drm/drm_debugfs.h> +#include <drm/drm_gem_ttm_helper.h> #include "amdgpu.h" #include "amdgpu_display.h" @@ -220,8 +221,8 @@ static const struct drm_gem_object_funcs amdgpu_gem_object_funcs = { .open = amdgpu_gem_object_open, .close = amdgpu_gem_object_close, .export = amdgpu_gem_prime_export, - .vmap = amdgpu_gem_prime_vmap, - .vunmap = amdgpu_gem_prime_vunmap, + .vmap = drm_gem_ttm_vmap, + .vunmap = drm_gem_ttm_vunmap, }; /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 918d4e9c6461..4ad6d801bc25 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -29,7 +29,6 @@ #include "amdgpu.h" #include <drm/drm_debugfs.h> #include <drm/amdgpu_drm.h> -#include "amdgpu_sched.h" #include "amdgpu_uvd.h" #include "amdgpu_vce.h" #include "atom.h" @@ -484,7 +483,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, * etc. (all asics). * Returns 0 on success, -EINVAL on failure. */ -static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) +int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { struct amdgpu_device *adev = drm_to_adev(dev); struct drm_amdgpu_info *info = data; @@ -1247,27 +1246,6 @@ void amdgpu_disable_vblank_kms(struct drm_crtc *crtc) amdgpu_irq_put(adev, &adev->crtc_irq, idx); } -const struct drm_ioctl_desc amdgpu_ioctls_kms[] = { - DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(AMDGPU_SCHED, amdgpu_sched_ioctl, DRM_MASTER), - DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - /* KMS */ - DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_FENCES, amdgpu_cs_wait_fences_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW) -}; -const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms); - /* * Debugfs info */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index a5a7992b5773..ed47cbac4f75 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -100,7 +100,6 @@ struct amdgpu_bo { struct amdgpu_bo *parent; struct amdgpu_bo *shadow; - struct ttm_bo_kmap_obj dma_buf_vmap; struct amdgpu_mn *mn; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 12aa35ac0eb4..676fb520e044 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -513,119 +513,6 @@ error: } /** - * amdgpu_move_vram_ram - Copy VRAM buffer to RAM buffer - * - * Called by amdgpu_bo_move(). - */ -static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict, - struct ttm_operation_ctx *ctx, - struct ttm_resource *new_mem) -{ - struct ttm_resource *old_mem = &bo->mem; - struct ttm_resource tmp_mem; - struct ttm_place placements; - struct ttm_placement placement; - int r; - - /* create space/pages for new_mem in GTT space */ - tmp_mem = *new_mem; - tmp_mem.mm_node = NULL; - placement.num_placement = 1; - placement.placement = &placements; - placement.num_busy_placement = 1; - placement.busy_placement = &placements; - placements.fpfn = 0; - placements.lpfn = 0; - placements.mem_type = TTM_PL_TT; - placements.flags = 0; - r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx); - if (unlikely(r)) { - pr_err("Failed to find GTT space for blit from VRAM\n"); - return r; - } - - r = ttm_tt_populate(bo->bdev, bo->ttm, ctx); - if (unlikely(r)) - goto out_cleanup; - - /* Bind the memory to the GTT space */ - r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, &tmp_mem); - if (unlikely(r)) { - goto out_cleanup; - } - - /* blit VRAM to GTT */ - r = amdgpu_move_blit(bo, evict, &tmp_mem, old_mem); - if (unlikely(r)) { - goto out_cleanup; - } - - r = ttm_bo_wait_ctx(bo, ctx); - if (unlikely(r)) - goto out_cleanup; - - amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm); - ttm_resource_free(bo, &bo->mem); - ttm_bo_assign_mem(bo, new_mem); -out_cleanup: - ttm_resource_free(bo, &tmp_mem); - return r; -} - -/** - * amdgpu_move_ram_vram - Copy buffer from RAM to VRAM - * - * Called by amdgpu_bo_move(). - */ -static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict, - struct ttm_operation_ctx *ctx, - struct ttm_resource *new_mem) -{ - struct ttm_resource *old_mem = &bo->mem; - struct ttm_resource tmp_mem; - struct ttm_placement placement; - struct ttm_place placements; - int r; - - /* make space in GTT for old_mem buffer */ - tmp_mem = *new_mem; - tmp_mem.mm_node = NULL; - placement.num_placement = 1; - placement.placement = &placements; - placement.num_busy_placement = 1; - placement.busy_placement = &placements; - placements.fpfn = 0; - placements.lpfn = 0; - placements.mem_type = TTM_PL_TT; - placements.flags = 0; - r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx); - if (unlikely(r)) { - pr_err("Failed to find GTT space for blit to VRAM\n"); - return r; - } - - /* move/bind old memory to GTT space */ - r = ttm_tt_populate(bo->bdev, bo->ttm, ctx); - if (unlikely(r)) - return r; - - r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, &tmp_mem); - if (unlikely(r)) { - goto out_cleanup; - } - - ttm_bo_assign_mem(bo, &tmp_mem); - /* copy to VRAM */ - r = amdgpu_move_blit(bo, evict, new_mem, old_mem); - if (unlikely(r)) { - goto out_cleanup; - } -out_cleanup: - ttm_resource_free(bo, &tmp_mem); - return r; -} - -/** * amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy * * Called by amdgpu_bo_move() @@ -656,13 +543,25 @@ static bool amdgpu_mem_visible(struct amdgpu_device *adev, */ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, struct ttm_operation_ctx *ctx, - struct ttm_resource *new_mem) + struct ttm_resource *new_mem, + struct ttm_place *hop) { struct amdgpu_device *adev; struct amdgpu_bo *abo; struct ttm_resource *old_mem = &bo->mem; int r; + if ((old_mem->mem_type == TTM_PL_SYSTEM && + new_mem->mem_type == TTM_PL_VRAM) || + (old_mem->mem_type == TTM_PL_VRAM && + new_mem->mem_type == TTM_PL_SYSTEM)) { + hop->fpfn = 0; + hop->lpfn = 0; + hop->mem_type = TTM_PL_TT; + hop->flags = 0; + return -EMULTIHOP; + } + if (new_mem->mem_type == TTM_PL_TT) { r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem); if (r) @@ -716,17 +615,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, goto memcpy; } - if (old_mem->mem_type == TTM_PL_VRAM && - new_mem->mem_type == TTM_PL_SYSTEM) { - r = amdgpu_move_vram_ram(bo, evict, ctx, new_mem); - } else if (old_mem->mem_type == TTM_PL_SYSTEM && - new_mem->mem_type == TTM_PL_VRAM) { - r = amdgpu_move_ram_vram(bo, evict, ctx, new_mem); - } else { - r = amdgpu_move_blit(bo, evict, - new_mem, old_mem); - } - + r = amdgpu_move_blit(bo, evict, new_mem, old_mem); if (r) { memcpy: /* Check that all memory is CPU accessible */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index d0aea5e39531..8aff6ef50f91 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -47,11 +47,13 @@ bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev) void amdgpu_virt_init_setting(struct amdgpu_device *adev) { + struct drm_device *ddev = adev_to_drm(adev); + /* enable virtual display */ if (adev->mode_info.num_crtc == 0) adev->mode_info.num_crtc = 1; adev->enable_virtual_display = true; - adev_to_drm(adev)->driver->driver_features &= ~DRIVER_ATOMIC; + ddev->driver_features &= ~DRIVER_ATOMIC; adev->cg_flags = 0; adev->pg_flags = 0; } diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c index df0b9eeb8933..4b485eb512e2 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c @@ -387,10 +387,12 @@ static void komeda_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state) { + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); struct drm_crtc_state *old = drm_atomic_get_old_crtc_state(state, crtc); /* commit with modeset will be handled in enable/disable */ - if (drm_atomic_crtc_needs_modeset(crtc->state)) + if (drm_atomic_crtc_needs_modeset(crtc_state)) return; komeda_crtc_do_flush(crtc, old); diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c index 1f6682032ca4..6b99df696384 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c @@ -58,7 +58,7 @@ static irqreturn_t komeda_kms_irq_handler(int irq, void *data) return status; } -static struct drm_driver komeda_kms_driver = { +static const struct drm_driver komeda_kms_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .lastclose = drm_fb_helper_lastclose, DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(komeda_gem_cma_dumb_create), diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c index faa8a5a752da..81ae92390736 100644 --- a/drivers/gpu/drm/arm/hdlcd_drv.c +++ b/drivers/gpu/drm/arm/hdlcd_drv.c @@ -234,7 +234,7 @@ static void hdlcd_debugfs_init(struct drm_minor *minor) DEFINE_DRM_GEM_CMA_FOPS(fops); -static struct drm_driver hdlcd_driver = { +static const struct drm_driver hdlcd_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .irq_handler = hdlcd_irq, .irq_preinstall = hdlcd_irq_preinstall, diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c index 69fee05c256c..fceda010d65a 100644 --- a/drivers/gpu/drm/arm/malidp_drv.c +++ b/drivers/gpu/drm/arm/malidp_drv.c @@ -561,7 +561,7 @@ static void malidp_debugfs_init(struct drm_minor *minor) #endif //CONFIG_DEBUG_FS -static struct drm_driver malidp_driver = { +static const struct drm_driver malidp_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(malidp_dumb_create), #ifdef CONFIG_DEBUG_FS diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c index ca643f4e2064..3ebcf5a52c8b 100644 --- a/drivers/gpu/drm/armada/armada_crtc.c +++ b/drivers/gpu/drm/armada/armada_crtc.c @@ -431,11 +431,13 @@ static int armada_drm_crtc_atomic_check(struct drm_crtc *crtc, static void armada_drm_crtc_atomic_begin(struct drm_crtc *crtc, struct drm_atomic_state *state) { + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name); - if (crtc->state->color_mgmt_changed) + if (crtc_state->color_mgmt_changed) armada_drm_update_gamma(crtc); dcrtc->regs_idx = 0; @@ -445,6 +447,8 @@ static void armada_drm_crtc_atomic_begin(struct drm_crtc *crtc, static void armada_drm_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state) { + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name); @@ -455,7 +459,7 @@ static void armada_drm_crtc_atomic_flush(struct drm_crtc *crtc, * If we aren't doing a full modeset, then we need to queue * the event here. */ - if (!drm_atomic_crtc_needs_modeset(crtc->state)) { + if (!drm_atomic_crtc_needs_modeset(crtc_state)) { dcrtc->update_pending = true; armada_drm_crtc_queue_state_event(crtc); spin_lock_irq(&dcrtc->irq_lock); diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c index 22247cfce80b..44fe9f994fc5 100644 --- a/drivers/gpu/drm/armada/armada_drv.c +++ b/drivers/gpu/drm/armada/armada_drv.c @@ -27,7 +27,7 @@ #include <drm/armada_drm.h> #include "armada_ioctlP.h" -static struct drm_ioctl_desc armada_ioctls[] = { +static const struct drm_ioctl_desc armada_ioctls[] = { DRM_IOCTL_DEF_DRV(ARMADA_GEM_CREATE, armada_gem_create_ioctl,0), DRM_IOCTL_DEF_DRV(ARMADA_GEM_MMAP, armada_gem_mmap_ioctl, 0), DRM_IOCTL_DEF_DRV(ARMADA_GEM_PWRITE, armada_gem_pwrite_ioctl, 0), @@ -35,7 +35,7 @@ static struct drm_ioctl_desc armada_ioctls[] = { DEFINE_DRM_GEM_FOPS(armada_drm_fops); -static struct drm_driver armada_drm_driver = { +static const struct drm_driver armada_drm_driver = { .lastclose = drm_fb_helper_lastclose, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, @@ -48,6 +48,7 @@ static struct drm_driver armada_drm_driver = { .date = "20120730", .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .ioctls = armada_ioctls, + .num_ioctls = ARRAY_SIZE(armada_ioctls), .fops = &armada_drm_fops, }; @@ -275,8 +276,6 @@ static int __init armada_drm_init(void) { int ret; - armada_drm_driver.num_ioctls = ARRAY_SIZE(armada_ioctls); - ret = platform_driver_register(&armada_lcd_platform_driver); if (ret) return ret; diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c index 771ad71cd340..457ec04950f7 100644 --- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c +++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c @@ -191,7 +191,7 @@ static void aspeed_gfx_unload(struct drm_device *drm) DEFINE_DRM_GEM_CMA_FOPS(fops); -static struct drm_driver aspeed_gfx_driver = { +static const struct drm_driver aspeed_gfx_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, DRM_GEM_CMA_DRIVER_OPS, .fops = &fops, diff --git a/drivers/gpu/drm/ast/ast_cursor.c b/drivers/gpu/drm/ast/ast_cursor.c index e0f4613918ad..742d43a7edf4 100644 --- a/drivers/gpu/drm/ast/ast_cursor.c +++ b/drivers/gpu/drm/ast/ast_cursor.c @@ -39,7 +39,7 @@ static void ast_cursor_fini(struct ast_private *ast) for (i = 0; i < ARRAY_SIZE(ast->cursor.gbo); ++i) { gbo = ast->cursor.gbo[i]; - drm_gem_vram_vunmap(gbo, ast->cursor.vaddr[i]); + drm_gem_vram_vunmap(gbo, &ast->cursor.map[i]); drm_gem_vram_unpin(gbo); drm_gem_vram_put(gbo); } @@ -60,7 +60,7 @@ int ast_cursor_init(struct ast_private *ast) struct drm_device *dev = &ast->base; size_t size, i; struct drm_gem_vram_object *gbo; - void __iomem *vaddr; + struct dma_buf_map map; int ret; size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE); @@ -77,16 +77,15 @@ int ast_cursor_init(struct ast_private *ast) drm_gem_vram_put(gbo); goto err_drm_gem_vram_put; } - vaddr = drm_gem_vram_vmap(gbo); - if (IS_ERR(vaddr)) { - ret = PTR_ERR(vaddr); + ret = drm_gem_vram_vmap(gbo, &map); + if (ret) { drm_gem_vram_unpin(gbo); drm_gem_vram_put(gbo); goto err_drm_gem_vram_put; } ast->cursor.gbo[i] = gbo; - ast->cursor.vaddr[i] = vaddr; + ast->cursor.map[i] = map; } return drmm_add_action_or_reset(dev, ast_cursor_release, NULL); @@ -95,7 +94,7 @@ err_drm_gem_vram_put: while (i) { --i; gbo = ast->cursor.gbo[i]; - drm_gem_vram_vunmap(gbo, ast->cursor.vaddr[i]); + drm_gem_vram_vunmap(gbo, &ast->cursor.map[i]); drm_gem_vram_unpin(gbo); drm_gem_vram_put(gbo); } @@ -170,6 +169,7 @@ int ast_cursor_blit(struct ast_private *ast, struct drm_framebuffer *fb) { struct drm_device *dev = &ast->base; struct drm_gem_vram_object *gbo; + struct dma_buf_map map; int ret; void *src; void __iomem *dst; @@ -183,18 +183,17 @@ int ast_cursor_blit(struct ast_private *ast, struct drm_framebuffer *fb) ret = drm_gem_vram_pin(gbo, 0); if (ret) return ret; - src = drm_gem_vram_vmap(gbo); - if (IS_ERR(src)) { - ret = PTR_ERR(src); + ret = drm_gem_vram_vmap(gbo, &map); + if (ret) goto err_drm_gem_vram_unpin; - } + src = map.vaddr; /* TODO: Use mapping abstraction properly */ - dst = ast->cursor.vaddr[ast->cursor.next_index]; + dst = ast->cursor.map[ast->cursor.next_index].vaddr_iomem; /* do data transfer to cursor BO */ update_cursor_image(dst, src, fb->width, fb->height); - drm_gem_vram_vunmap(gbo, src); + drm_gem_vram_vunmap(gbo, &map); drm_gem_vram_unpin(gbo); return 0; @@ -257,7 +256,7 @@ void ast_cursor_show(struct ast_private *ast, int x, int y, u8 __iomem *sig; u8 jreg; - dst = ast->cursor.vaddr[ast->cursor.next_index]; + dst = ast->cursor.map[ast->cursor.next_index].vaddr; sig = dst + AST_HWC_SIZE; writel(x, sig + AST_HWC_SIGNATURE_X); diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c index f0b4af1c390a..667b450606ef 100644 --- a/drivers/gpu/drm/ast/ast_drv.c +++ b/drivers/gpu/drm/ast/ast_drv.c @@ -49,7 +49,7 @@ module_param_named(modeset, ast_modeset, int, 0400); DEFINE_DRM_GEM_FOPS(ast_fops); -static struct drm_driver ast_driver = { +static const struct drm_driver ast_driver = { .driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET, diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index 467049ca8430..ccaff81924ee 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -28,10 +28,11 @@ #ifndef __AST_DRV_H__ #define __AST_DRV_H__ -#include <linux/types.h> -#include <linux/io.h> +#include <linux/dma-buf-map.h> #include <linux/i2c.h> #include <linux/i2c-algo-bit.h> +#include <linux/io.h> +#include <linux/types.h> #include <drm/drm_connector.h> #include <drm/drm_crtc.h> @@ -63,6 +64,7 @@ enum ast_chip { AST2300, AST2400, AST2500, + AST2600, }; enum ast_tx_chip { @@ -131,7 +133,7 @@ struct ast_private { struct { struct drm_gem_vram_object *gbo[AST_DEFAULT_HWC_NUM]; - void __iomem *vaddr[AST_DEFAULT_HWC_NUM]; + struct dma_buf_map map[AST_DEFAULT_HWC_NUM]; unsigned int next_index; } cursor; @@ -159,7 +161,7 @@ static inline struct ast_private *to_ast_private(struct drm_device *dev) return container_of(dev, struct ast_private, base); } -struct ast_private *ast_device_create(struct drm_driver *drv, +struct ast_private *ast_device_create(const struct drm_driver *drv, struct pci_dev *pdev, unsigned long flags); diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index 77066bca8793..1b13199858cb 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -143,7 +143,10 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) ast_detect_config_mode(dev, &scu_rev); /* Identify chipset */ - if (dev->pdev->revision >= 0x40) { + if (dev->pdev->revision >= 0x50) { + ast->chip = AST2600; + drm_info(dev, "AST 2600 detected\n"); + } else if (dev->pdev->revision >= 0x40) { ast->chip = AST2500; drm_info(dev, "AST 2500 detected\n"); } else if (dev->pdev->revision >= 0x30) { @@ -392,7 +395,7 @@ static void ast_device_release(void *data) ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x04); } -struct ast_private *ast_device_create(struct drm_driver *drv, +struct ast_private *ast_device_create(const struct drm_driver *drv, struct pci_dev *pdev, unsigned long flags) { diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 22f0e65fbe9a..9db371f4054f 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -782,10 +782,12 @@ static void ast_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state) { + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc); struct ast_private *ast = to_ast_private(crtc->dev); - struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc->state); + struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc_state); struct ast_crtc_state *old_ast_crtc_state = to_ast_crtc_state(old_crtc_state); /* diff --git a/drivers/gpu/drm/ast/ast_tables.h b/drivers/gpu/drm/ast/ast_tables.h index 3d013946714e..d9eb353a4bf0 100644 --- a/drivers/gpu/drm/ast/ast_tables.h +++ b/drivers/gpu/drm/ast/ast_tables.h @@ -295,10 +295,10 @@ static const struct ast_vbios_enhtable res_1600x900[] = { static const struct ast_vbios_enhtable res_1920x1080[] = { {2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5, /* 60Hz */ - (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo | + (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo | AST2500PreCatchCRT), 60, 1, 0x38 }, {2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5, /* 60Hz */ - (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo | + (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo | AST2500PreCatchCRT), 0xFF, 1, 0x38 }, }; diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c index 871293d1aeeb..98fb53b75f77 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c @@ -815,7 +815,7 @@ static void atmel_hlcdc_dc_irq_uninstall(struct drm_device *dev) DEFINE_DRM_GEM_CMA_FOPS(fops); -static struct drm_driver atmel_hlcdc_dc_driver = { +static const struct drm_driver atmel_hlcdc_dc_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .irq_handler = atmel_hlcdc_dc_irq_handler, .irq_preinstall = atmel_hlcdc_dc_irq_uninstall, diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c index e18c51de1196..fd454225fd19 100644 --- a/drivers/gpu/drm/bochs/bochs_drv.c +++ b/drivers/gpu/drm/bochs/bochs_drv.c @@ -57,7 +57,7 @@ err: DEFINE_DRM_GEM_FOPS(bochs_fops); -static struct drm_driver bochs_driver = { +static const struct drm_driver bochs_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &bochs_fops, .name = "bochs-drm", diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c index 13d0d04c4457..853081d186d5 100644 --- a/drivers/gpu/drm/bochs/bochs_kms.c +++ b/drivers/gpu/drm/bochs/bochs_kms.c @@ -151,7 +151,6 @@ int bochs_kms_init(struct bochs_device *bochs) bochs->dev->mode_config.preferred_depth = 24; bochs->dev->mode_config.prefer_shadow = 0; bochs->dev->mode_config.prefer_shadow_fbdev = 1; - bochs->dev->mode_config.fbdev_use_iomem = true; bochs->dev->mode_config.quirk_addfb_prefer_host_byte_order = true; bochs->dev->mode_config.funcs = &bochs_mode_funcs; diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index ef91646441b1..e4110d6ca7b3 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -61,6 +61,19 @@ config DRM_LONTIUM_LT9611 HDMI signals Please say Y if you have such hardware. +config DRM_LONTIUM_LT9611UXC + tristate "Lontium LT9611UXC DSI/HDMI bridge" + select SND_SOC_HDMI_CODEC if SND_SOC + depends on OF + select DRM_PANEL_BRIDGE + select DRM_KMS_HELPER + select REGMAP_I2C + help + Driver for Lontium LT9611UXC DSI to HDMI bridge + chip driver that converts dual DSI and I2S to + HDMI signals + Please say Y if you have such hardware. + config DRM_LVDS_CODEC tristate "Transparent LVDS encoders and decoders support" depends on OF diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile index 2b3aff104e46..86e7acc76f8d 100644 --- a/drivers/gpu/drm/bridge/Makefile +++ b/drivers/gpu/drm/bridge/Makefile @@ -3,6 +3,7 @@ obj-$(CONFIG_DRM_CDNS_DSI) += cdns-dsi.o obj-$(CONFIG_DRM_CHRONTEL_CH7033) += chrontel-ch7033.o obj-$(CONFIG_DRM_DISPLAY_CONNECTOR) += display-connector.o obj-$(CONFIG_DRM_LONTIUM_LT9611) += lontium-lt9611.o +obj-$(CONFIG_DRM_LONTIUM_LT9611UXC) += lontium-lt9611uxc.o obj-$(CONFIG_DRM_LVDS_CODEC) += lvds-codec.o obj-$(CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW) += megachips-stdpxxxx-ge-b850v3-fw.o obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c index fafb4b492ea0..cab6c8b92efd 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c @@ -524,94 +524,6 @@ void analogix_dp_enable_sw_function(struct analogix_dp_device *dp) writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_1); } -static int analogix_dp_start_aux_transaction(struct analogix_dp_device *dp) -{ - int reg; - int retval = 0; - int timeout_loop = 0; - - /* Enable AUX CH operation */ - reg = readl(dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_2); - reg |= AUX_EN; - writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_2); - - /* Is AUX CH command reply received? */ - reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA); - while (!(reg & RPLY_RECEIV)) { - timeout_loop++; - if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) { - dev_err(dp->dev, "AUX CH command reply failed!\n"); - return -ETIMEDOUT; - } - reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA); - usleep_range(10, 11); - } - - /* Clear interrupt source for AUX CH command reply */ - writel(RPLY_RECEIV, dp->reg_base + ANALOGIX_DP_INT_STA); - - /* Clear interrupt source for AUX CH access error */ - reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA); - if (reg & AUX_ERR) { - writel(AUX_ERR, dp->reg_base + ANALOGIX_DP_INT_STA); - return -EREMOTEIO; - } - - /* Check AUX CH error access status */ - reg = readl(dp->reg_base + ANALOGIX_DP_AUX_CH_STA); - if ((reg & AUX_STATUS_MASK) != 0) { - dev_err(dp->dev, "AUX CH error happens: %d\n\n", - reg & AUX_STATUS_MASK); - return -EREMOTEIO; - } - - return retval; -} - -int analogix_dp_write_byte_to_dpcd(struct analogix_dp_device *dp, - unsigned int reg_addr, - unsigned char data) -{ - u32 reg; - int i; - int retval; - - for (i = 0; i < 3; i++) { - /* Clear AUX CH data buffer */ - reg = BUF_CLR; - writel(reg, dp->reg_base + ANALOGIX_DP_BUFFER_DATA_CTL); - - /* Select DPCD device address */ - reg = AUX_ADDR_7_0(reg_addr); - writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_7_0); - reg = AUX_ADDR_15_8(reg_addr); - writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_15_8); - reg = AUX_ADDR_19_16(reg_addr); - writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_19_16); - - /* Write data buffer */ - reg = (unsigned int)data; - writel(reg, dp->reg_base + ANALOGIX_DP_BUF_DATA_0); - - /* - * Set DisplayPort transaction and write 1 byte - * If bit 3 is 1, DisplayPort transaction. - * If Bit 3 is 0, I2C transaction. - */ - reg = AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_WRITE; - writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_1); - - /* Start AUX transaction */ - retval = analogix_dp_start_aux_transaction(dp); - if (retval == 0) - break; - - dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", __func__); - } - - return retval; -} - void analogix_dp_set_link_bandwidth(struct analogix_dp_device *dp, u32 bwtype) { u32 reg; diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c new file mode 100644 index 000000000000..0c98d27f84ac --- /dev/null +++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c @@ -0,0 +1,1002 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2019-2020. Linaro Limited. + */ + +#include <linux/firmware.h> +#include <linux/gpio/consumer.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/of_graph.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/regulator/consumer.h> +#include <linux/wait.h> + +#include <sound/hdmi-codec.h> + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_print.h> +#include <drm/drm_probe_helper.h> + +#define EDID_BLOCK_SIZE 128 +#define EDID_NUM_BLOCKS 2 + +struct lt9611uxc { + struct device *dev; + struct drm_bridge bridge; + struct drm_connector connector; + + struct regmap *regmap; + /* Protects all accesses to registers by stopping the on-chip MCU */ + struct mutex ocm_lock; + + struct wait_queue_head wq; + + struct device_node *dsi0_node; + struct device_node *dsi1_node; + struct mipi_dsi_device *dsi0; + struct mipi_dsi_device *dsi1; + struct platform_device *audio_pdev; + + struct gpio_desc *reset_gpio; + struct gpio_desc *enable_gpio; + + struct regulator_bulk_data supplies[2]; + + struct i2c_client *client; + + bool hpd_supported; + bool edid_read; + uint8_t fw_version; +}; + +#define LT9611_PAGE_CONTROL 0xff + +static const struct regmap_range_cfg lt9611uxc_ranges[] = { + { + .name = "register_range", + .range_min = 0, + .range_max = 0xd0ff, + .selector_reg = LT9611_PAGE_CONTROL, + .selector_mask = 0xff, + .selector_shift = 0, + .window_start = 0, + .window_len = 0x100, + }, +}; + +static const struct regmap_config lt9611uxc_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .max_register = 0xffff, + .ranges = lt9611uxc_ranges, + .num_ranges = ARRAY_SIZE(lt9611uxc_ranges), +}; + +struct lt9611uxc_mode { + u16 hdisplay; + u16 vdisplay; + u8 vrefresh; +}; + +/* + * This chip supports only a fixed set of modes. + * Enumerate them here to check whether the mode is supported. + */ +static struct lt9611uxc_mode lt9611uxc_modes[] = { + { 1920, 1080, 60 }, + { 1920, 1080, 30 }, + { 1920, 1080, 25 }, + { 1366, 768, 60 }, + { 1360, 768, 60 }, + { 1280, 1024, 60 }, + { 1280, 800, 60 }, + { 1280, 720, 60 }, + { 1280, 720, 50 }, + { 1280, 720, 30 }, + { 1152, 864, 60 }, + { 1024, 768, 60 }, + { 800, 600, 60 }, + { 720, 576, 50 }, + { 720, 480, 60 }, + { 640, 480, 60 }, +}; + +static struct lt9611uxc *bridge_to_lt9611uxc(struct drm_bridge *bridge) +{ + return container_of(bridge, struct lt9611uxc, bridge); +} + +static struct lt9611uxc *connector_to_lt9611uxc(struct drm_connector *connector) +{ + return container_of(connector, struct lt9611uxc, connector); +} + +static void lt9611uxc_lock(struct lt9611uxc *lt9611uxc) +{ + mutex_lock(<9611uxc->ocm_lock); + regmap_write(lt9611uxc->regmap, 0x80ee, 0x01); +} + +static void lt9611uxc_unlock(struct lt9611uxc *lt9611uxc) +{ + regmap_write(lt9611uxc->regmap, 0x80ee, 0x00); + msleep(50); + mutex_unlock(<9611uxc->ocm_lock); +} + +static irqreturn_t lt9611uxc_irq_thread_handler(int irq, void *dev_id) +{ + struct lt9611uxc *lt9611uxc = dev_id; + unsigned int irq_status = 0; + unsigned int hpd_status = 0; + + lt9611uxc_lock(lt9611uxc); + + regmap_read(lt9611uxc->regmap, 0xb022, &irq_status); + regmap_read(lt9611uxc->regmap, 0xb023, &hpd_status); + if (irq_status) + regmap_write(lt9611uxc->regmap, 0xb022, 0); + + lt9611uxc_unlock(lt9611uxc); + + if (irq_status & BIT(0)) + lt9611uxc->edid_read = !!(hpd_status & BIT(0)); + + if (irq_status & BIT(1)) { + if (lt9611uxc->connector.dev) + drm_kms_helper_hotplug_event(lt9611uxc->connector.dev); + else + drm_bridge_hpd_notify(<9611uxc->bridge, !!(hpd_status & BIT(1))); + } + + return IRQ_HANDLED; +} + +static void lt9611uxc_reset(struct lt9611uxc *lt9611uxc) +{ + gpiod_set_value_cansleep(lt9611uxc->reset_gpio, 1); + msleep(20); + + gpiod_set_value_cansleep(lt9611uxc->reset_gpio, 0); + msleep(20); + + gpiod_set_value_cansleep(lt9611uxc->reset_gpio, 1); + msleep(300); +} + +static void lt9611uxc_assert_5v(struct lt9611uxc *lt9611uxc) +{ + if (!lt9611uxc->enable_gpio) + return; + + gpiod_set_value_cansleep(lt9611uxc->enable_gpio, 1); + msleep(20); +} + +static int lt9611uxc_regulator_init(struct lt9611uxc *lt9611uxc) +{ + int ret; + + lt9611uxc->supplies[0].supply = "vdd"; + lt9611uxc->supplies[1].supply = "vcc"; + + ret = devm_regulator_bulk_get(lt9611uxc->dev, 2, lt9611uxc->supplies); + if (ret < 0) + return ret; + + return regulator_set_load(lt9611uxc->supplies[0].consumer, 200000); +} + +static int lt9611uxc_regulator_enable(struct lt9611uxc *lt9611uxc) +{ + int ret; + + ret = regulator_enable(lt9611uxc->supplies[0].consumer); + if (ret < 0) + return ret; + + usleep_range(1000, 10000); /* 50000 according to dtsi */ + + ret = regulator_enable(lt9611uxc->supplies[1].consumer); + if (ret < 0) { + regulator_disable(lt9611uxc->supplies[0].consumer); + return ret; + } + + return 0; +} + +static struct lt9611uxc_mode *lt9611uxc_find_mode(const struct drm_display_mode *mode) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(lt9611uxc_modes); i++) { + if (lt9611uxc_modes[i].hdisplay == mode->hdisplay && + lt9611uxc_modes[i].vdisplay == mode->vdisplay && + lt9611uxc_modes[i].vrefresh == drm_mode_vrefresh(mode)) { + return <9611uxc_modes[i]; + } + } + + return NULL; +} + +static struct mipi_dsi_device *lt9611uxc_attach_dsi(struct lt9611uxc *lt9611uxc, + struct device_node *dsi_node) +{ + const struct mipi_dsi_device_info info = { "lt9611uxc", 0, NULL }; + struct mipi_dsi_device *dsi; + struct mipi_dsi_host *host; + int ret; + + host = of_find_mipi_dsi_host_by_node(dsi_node); + if (!host) { + dev_err(lt9611uxc->dev, "failed to find dsi host\n"); + return ERR_PTR(-EPROBE_DEFER); + } + + dsi = mipi_dsi_device_register_full(host, &info); + if (IS_ERR(dsi)) { + dev_err(lt9611uxc->dev, "failed to create dsi device\n"); + return dsi; + } + + dsi->lanes = 4; + dsi->format = MIPI_DSI_FMT_RGB888; + dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE | + MIPI_DSI_MODE_VIDEO_HSE; + + ret = mipi_dsi_attach(dsi); + if (ret < 0) { + dev_err(lt9611uxc->dev, "failed to attach dsi to host\n"); + mipi_dsi_device_unregister(dsi); + return ERR_PTR(ret); + } + + return dsi; +} + +static int lt9611uxc_connector_get_modes(struct drm_connector *connector) +{ + struct lt9611uxc *lt9611uxc = connector_to_lt9611uxc(connector); + unsigned int count; + struct edid *edid; + + edid = lt9611uxc->bridge.funcs->get_edid(<9611uxc->bridge, connector); + drm_connector_update_edid_property(connector, edid); + count = drm_add_edid_modes(connector, edid); + kfree(edid); + + return count; +} + +static enum drm_connector_status lt9611uxc_connector_detect(struct drm_connector *connector, + bool force) +{ + struct lt9611uxc *lt9611uxc = connector_to_lt9611uxc(connector); + + return lt9611uxc->bridge.funcs->detect(<9611uxc->bridge); +} + +static enum drm_mode_status lt9611uxc_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct lt9611uxc_mode *lt9611uxc_mode = lt9611uxc_find_mode(mode); + + return lt9611uxc_mode ? MODE_OK : MODE_BAD; +} + +static const struct drm_connector_helper_funcs lt9611uxc_bridge_connector_helper_funcs = { + .get_modes = lt9611uxc_connector_get_modes, + .mode_valid = lt9611uxc_connector_mode_valid, +}; + +static const struct drm_connector_funcs lt9611uxc_bridge_connector_funcs = { + .fill_modes = drm_helper_probe_single_connector_modes, + .detect = lt9611uxc_connector_detect, + .destroy = drm_connector_cleanup, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static int lt9611uxc_connector_init(struct drm_bridge *bridge, struct lt9611uxc *lt9611uxc) +{ + int ret; + + if (!bridge->encoder) { + DRM_ERROR("Parent encoder object not found"); + return -ENODEV; + } + + drm_connector_helper_add(<9611uxc->connector, + <9611uxc_bridge_connector_helper_funcs); + ret = drm_connector_init(bridge->dev, <9611uxc->connector, + <9611uxc_bridge_connector_funcs, + DRM_MODE_CONNECTOR_HDMIA); + if (ret) { + DRM_ERROR("Failed to initialize connector with drm\n"); + return ret; + } + + return drm_connector_attach_encoder(<9611uxc->connector, bridge->encoder); +} + +static void lt9611uxc_bridge_detach(struct drm_bridge *bridge) +{ + struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge); + + if (lt9611uxc->dsi1) { + mipi_dsi_detach(lt9611uxc->dsi1); + mipi_dsi_device_unregister(lt9611uxc->dsi1); + } + + mipi_dsi_detach(lt9611uxc->dsi0); + mipi_dsi_device_unregister(lt9611uxc->dsi0); +} + +static int lt9611uxc_bridge_attach(struct drm_bridge *bridge, + enum drm_bridge_attach_flags flags) +{ + struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge); + int ret; + + if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) { + ret = lt9611uxc_connector_init(bridge, lt9611uxc); + if (ret < 0) + return ret; + } + + /* Attach primary DSI */ + lt9611uxc->dsi0 = lt9611uxc_attach_dsi(lt9611uxc, lt9611uxc->dsi0_node); + if (IS_ERR(lt9611uxc->dsi0)) + return PTR_ERR(lt9611uxc->dsi0); + + /* Attach secondary DSI, if specified */ + if (lt9611uxc->dsi1_node) { + lt9611uxc->dsi1 = lt9611uxc_attach_dsi(lt9611uxc, lt9611uxc->dsi1_node); + if (IS_ERR(lt9611uxc->dsi1)) { + ret = PTR_ERR(lt9611uxc->dsi1); + goto err_unregister_dsi0; + } + } + + return 0; + +err_unregister_dsi0: + mipi_dsi_detach(lt9611uxc->dsi0); + mipi_dsi_device_unregister(lt9611uxc->dsi0); + + return ret; +} + +static enum drm_mode_status +lt9611uxc_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, + const struct drm_display_mode *mode) +{ + struct lt9611uxc_mode *lt9611uxc_mode; + + lt9611uxc_mode = lt9611uxc_find_mode(mode); + + return lt9611uxc_mode ? MODE_OK : MODE_BAD; +} + +static void lt9611uxc_video_setup(struct lt9611uxc *lt9611uxc, + const struct drm_display_mode *mode) +{ + u32 h_total, hactive, hsync_len, hfront_porch; + u32 v_total, vactive, vsync_len, vfront_porch; + + h_total = mode->htotal; + v_total = mode->vtotal; + + hactive = mode->hdisplay; + hsync_len = mode->hsync_end - mode->hsync_start; + hfront_porch = mode->hsync_start - mode->hdisplay; + + vactive = mode->vdisplay; + vsync_len = mode->vsync_end - mode->vsync_start; + vfront_porch = mode->vsync_start - mode->vdisplay; + + regmap_write(lt9611uxc->regmap, 0xd00d, (u8)(v_total / 256)); + regmap_write(lt9611uxc->regmap, 0xd00e, (u8)(v_total % 256)); + + regmap_write(lt9611uxc->regmap, 0xd00f, (u8)(vactive / 256)); + regmap_write(lt9611uxc->regmap, 0xd010, (u8)(vactive % 256)); + + regmap_write(lt9611uxc->regmap, 0xd011, (u8)(h_total / 256)); + regmap_write(lt9611uxc->regmap, 0xd012, (u8)(h_total % 256)); + + regmap_write(lt9611uxc->regmap, 0xd013, (u8)(hactive / 256)); + regmap_write(lt9611uxc->regmap, 0xd014, (u8)(hactive % 256)); + + regmap_write(lt9611uxc->regmap, 0xd015, (u8)(vsync_len % 256)); + + regmap_update_bits(lt9611uxc->regmap, 0xd016, 0xf, (u8)(hsync_len / 256)); + regmap_write(lt9611uxc->regmap, 0xd017, (u8)(hsync_len % 256)); + + regmap_update_bits(lt9611uxc->regmap, 0xd018, 0xf, (u8)(vfront_porch / 256)); + regmap_write(lt9611uxc->regmap, 0xd019, (u8)(vfront_porch % 256)); + + regmap_update_bits(lt9611uxc->regmap, 0xd01a, 0xf, (u8)(hfront_porch / 256)); + regmap_write(lt9611uxc->regmap, 0xd01b, (u8)(hfront_porch % 256)); +} + +static void lt9611uxc_bridge_mode_set(struct drm_bridge *bridge, + const struct drm_display_mode *mode, + const struct drm_display_mode *adj_mode) +{ + struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge); + + lt9611uxc_lock(lt9611uxc); + lt9611uxc_video_setup(lt9611uxc, mode); + lt9611uxc_unlock(lt9611uxc); +} + +static enum drm_connector_status lt9611uxc_bridge_detect(struct drm_bridge *bridge) +{ + struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge); + unsigned int reg_val = 0; + int ret; + int connected = 1; + + if (lt9611uxc->hpd_supported) { + lt9611uxc_lock(lt9611uxc); + ret = regmap_read(lt9611uxc->regmap, 0xb023, ®_val); + lt9611uxc_unlock(lt9611uxc); + + if (ret) + dev_err(lt9611uxc->dev, "failed to read hpd status: %d\n", ret); + else + connected = reg_val & BIT(1); + } + + return connected ? connector_status_connected : + connector_status_disconnected; +} + +static int lt9611uxc_wait_for_edid(struct lt9611uxc *lt9611uxc) +{ + return wait_event_interruptible_timeout(lt9611uxc->wq, lt9611uxc->edid_read, + msecs_to_jiffies(100)); +} + +static int lt9611uxc_get_edid_block(void *data, u8 *buf, unsigned int block, size_t len) +{ + struct lt9611uxc *lt9611uxc = data; + int ret; + + if (len > EDID_BLOCK_SIZE) + return -EINVAL; + + if (block >= EDID_NUM_BLOCKS) + return -EINVAL; + + lt9611uxc_lock(lt9611uxc); + + regmap_write(lt9611uxc->regmap, 0xb00b, 0x10); + + regmap_write(lt9611uxc->regmap, 0xb00a, block * EDID_BLOCK_SIZE); + + ret = regmap_noinc_read(lt9611uxc->regmap, 0xb0b0, buf, len); + if (ret) + dev_err(lt9611uxc->dev, "edid read failed: %d\n", ret); + + lt9611uxc_unlock(lt9611uxc); + + return 0; +}; + +static struct edid *lt9611uxc_bridge_get_edid(struct drm_bridge *bridge, + struct drm_connector *connector) +{ + struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge); + int ret; + + ret = lt9611uxc_wait_for_edid(lt9611uxc); + if (ret < 0) { + dev_err(lt9611uxc->dev, "wait for EDID failed: %d\n", ret); + return ERR_PTR(ret); + } + + return drm_do_get_edid(connector, lt9611uxc_get_edid_block, lt9611uxc); +} + +static const struct drm_bridge_funcs lt9611uxc_bridge_funcs = { + .attach = lt9611uxc_bridge_attach, + .detach = lt9611uxc_bridge_detach, + .mode_valid = lt9611uxc_bridge_mode_valid, + .mode_set = lt9611uxc_bridge_mode_set, + .detect = lt9611uxc_bridge_detect, + .get_edid = lt9611uxc_bridge_get_edid, +}; + +static int lt9611uxc_parse_dt(struct device *dev, + struct lt9611uxc *lt9611uxc) +{ + lt9611uxc->dsi0_node = of_graph_get_remote_node(dev->of_node, 0, -1); + if (!lt9611uxc->dsi0_node) { + dev_err(lt9611uxc->dev, "failed to get remote node for primary dsi\n"); + return -ENODEV; + } + + lt9611uxc->dsi1_node = of_graph_get_remote_node(dev->of_node, 1, -1); + + return 0; +} + +static int lt9611uxc_gpio_init(struct lt9611uxc *lt9611uxc) +{ + struct device *dev = lt9611uxc->dev; + + lt9611uxc->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(lt9611uxc->reset_gpio)) { + dev_err(dev, "failed to acquire reset gpio\n"); + return PTR_ERR(lt9611uxc->reset_gpio); + } + + lt9611uxc->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW); + if (IS_ERR(lt9611uxc->enable_gpio)) { + dev_err(dev, "failed to acquire enable gpio\n"); + return PTR_ERR(lt9611uxc->enable_gpio); + } + + return 0; +} + +static int lt9611uxc_read_device_rev(struct lt9611uxc *lt9611uxc) +{ + unsigned int rev0, rev1, rev2; + int ret; + + lt9611uxc_lock(lt9611uxc); + + ret = regmap_read(lt9611uxc->regmap, 0x8100, &rev0); + ret |= regmap_read(lt9611uxc->regmap, 0x8101, &rev1); + ret |= regmap_read(lt9611uxc->regmap, 0x8102, &rev2); + if (ret) + dev_err(lt9611uxc->dev, "failed to read revision: %d\n", ret); + else + dev_info(lt9611uxc->dev, "LT9611 revision: 0x%02x.%02x.%02x\n", rev0, rev1, rev2); + + lt9611uxc_unlock(lt9611uxc); + + return ret; +} + +static int lt9611uxc_read_version(struct lt9611uxc *lt9611uxc) +{ + unsigned int rev; + int ret; + + lt9611uxc_lock(lt9611uxc); + + ret = regmap_read(lt9611uxc->regmap, 0xb021, &rev); + if (ret) + dev_err(lt9611uxc->dev, "failed to read revision: %d\n", ret); + else + dev_info(lt9611uxc->dev, "LT9611 version: 0x%02x\n", rev); + + lt9611uxc_unlock(lt9611uxc); + + return ret < 0 ? ret : rev; +} + +static int lt9611uxc_hdmi_hw_params(struct device *dev, void *data, + struct hdmi_codec_daifmt *fmt, + struct hdmi_codec_params *hparms) +{ + /* + * LT9611UXC will automatically detect rate and sample size, so no need + * to setup anything here. + */ + return 0; +} + +static void lt9611uxc_audio_shutdown(struct device *dev, void *data) +{ +} + +static int lt9611uxc_hdmi_i2s_get_dai_id(struct snd_soc_component *component, + struct device_node *endpoint) +{ + struct of_endpoint of_ep; + int ret; + + ret = of_graph_parse_endpoint(endpoint, &of_ep); + if (ret < 0) + return ret; + + /* + * HDMI sound should be located as reg = <2> + * Then, it is sound port 0 + */ + if (of_ep.port == 2) + return 0; + + return -EINVAL; +} + +static const struct hdmi_codec_ops lt9611uxc_codec_ops = { + .hw_params = lt9611uxc_hdmi_hw_params, + .audio_shutdown = lt9611uxc_audio_shutdown, + .get_dai_id = lt9611uxc_hdmi_i2s_get_dai_id, +}; + +static int lt9611uxc_audio_init(struct device *dev, struct lt9611uxc *lt9611uxc) +{ + struct hdmi_codec_pdata codec_data = { + .ops = <9611uxc_codec_ops, + .max_i2s_channels = 2, + .i2s = 1, + .data = lt9611uxc, + }; + + lt9611uxc->audio_pdev = + platform_device_register_data(dev, HDMI_CODEC_DRV_NAME, + PLATFORM_DEVID_AUTO, + &codec_data, sizeof(codec_data)); + + return PTR_ERR_OR_ZERO(lt9611uxc->audio_pdev); +} + +static void lt9611uxc_audio_exit(struct lt9611uxc *lt9611uxc) +{ + if (lt9611uxc->audio_pdev) { + platform_device_unregister(lt9611uxc->audio_pdev); + lt9611uxc->audio_pdev = NULL; + } +} + +#define LT9611UXC_FW_PAGE_SIZE 32 +static void lt9611uxc_firmware_write_page(struct lt9611uxc *lt9611uxc, u16 addr, const u8 *buf) +{ + struct reg_sequence seq_write_prepare[] = { + REG_SEQ0(0x805a, 0x04), + REG_SEQ0(0x805a, 0x00), + + REG_SEQ0(0x805e, 0xdf), + REG_SEQ0(0x805a, 0x20), + REG_SEQ0(0x805a, 0x00), + REG_SEQ0(0x8058, 0x21), + }; + + struct reg_sequence seq_write_addr[] = { + REG_SEQ0(0x805b, (addr >> 16) & 0xff), + REG_SEQ0(0x805c, (addr >> 8) & 0xff), + REG_SEQ0(0x805d, addr & 0xff), + REG_SEQ0(0x805a, 0x10), + REG_SEQ0(0x805a, 0x00), + }; + + regmap_write(lt9611uxc->regmap, 0x8108, 0xbf); + msleep(20); + regmap_write(lt9611uxc->regmap, 0x8108, 0xff); + msleep(20); + regmap_multi_reg_write(lt9611uxc->regmap, seq_write_prepare, ARRAY_SIZE(seq_write_prepare)); + regmap_noinc_write(lt9611uxc->regmap, 0x8059, buf, LT9611UXC_FW_PAGE_SIZE); + regmap_multi_reg_write(lt9611uxc->regmap, seq_write_addr, ARRAY_SIZE(seq_write_addr)); + msleep(20); +} + +static void lt9611uxc_firmware_read_page(struct lt9611uxc *lt9611uxc, u16 addr, char *buf) +{ + struct reg_sequence seq_read_page[] = { + REG_SEQ0(0x805a, 0xa0), + REG_SEQ0(0x805a, 0x80), + REG_SEQ0(0x805b, (addr >> 16) & 0xff), + REG_SEQ0(0x805c, (addr >> 8) & 0xff), + REG_SEQ0(0x805d, addr & 0xff), + REG_SEQ0(0x805a, 0x90), + REG_SEQ0(0x805a, 0x80), + REG_SEQ0(0x8058, 0x21), + }; + + regmap_multi_reg_write(lt9611uxc->regmap, seq_read_page, ARRAY_SIZE(seq_read_page)); + regmap_noinc_read(lt9611uxc->regmap, 0x805f, buf, LT9611UXC_FW_PAGE_SIZE); +} + +static char *lt9611uxc_firmware_read(struct lt9611uxc *lt9611uxc, size_t size) +{ + struct reg_sequence seq_read_setup[] = { + REG_SEQ0(0x805a, 0x84), + REG_SEQ0(0x805a, 0x80), + }; + + char *readbuf; + u16 offset; + + readbuf = kzalloc(ALIGN(size, 32), GFP_KERNEL); + if (!readbuf) + return NULL; + + regmap_multi_reg_write(lt9611uxc->regmap, seq_read_setup, ARRAY_SIZE(seq_read_setup)); + + for (offset = 0; + offset < size; + offset += LT9611UXC_FW_PAGE_SIZE) + lt9611uxc_firmware_read_page(lt9611uxc, offset, &readbuf[offset]); + + return readbuf; +} + +static int lt9611uxc_firmware_update(struct lt9611uxc *lt9611uxc) +{ + int ret; + u16 offset; + size_t remain; + char *readbuf; + const struct firmware *fw; + + struct reg_sequence seq_setup[] = { + REG_SEQ0(0x805e, 0xdf), + REG_SEQ0(0x8058, 0x00), + REG_SEQ0(0x8059, 0x50), + REG_SEQ0(0x805a, 0x10), + REG_SEQ0(0x805a, 0x00), + }; + + + struct reg_sequence seq_block_erase[] = { + REG_SEQ0(0x805a, 0x04), + REG_SEQ0(0x805a, 0x00), + REG_SEQ0(0x805b, 0x00), + REG_SEQ0(0x805c, 0x00), + REG_SEQ0(0x805d, 0x00), + REG_SEQ0(0x805a, 0x01), + REG_SEQ0(0x805a, 0x00), + }; + + ret = request_firmware(&fw, "lt9611uxc_fw.bin", lt9611uxc->dev); + if (ret < 0) + return ret; + + dev_info(lt9611uxc->dev, "Updating firmware\n"); + lt9611uxc_lock(lt9611uxc); + + regmap_multi_reg_write(lt9611uxc->regmap, seq_setup, ARRAY_SIZE(seq_setup)); + + /* + * Need erase block 2 timess here. Sometimes, block erase can fail. + * This is a workaroud. + */ + regmap_multi_reg_write(lt9611uxc->regmap, seq_block_erase, ARRAY_SIZE(seq_block_erase)); + msleep(3000); + regmap_multi_reg_write(lt9611uxc->regmap, seq_block_erase, ARRAY_SIZE(seq_block_erase)); + msleep(3000); + + for (offset = 0, remain = fw->size; + remain >= LT9611UXC_FW_PAGE_SIZE; + offset += LT9611UXC_FW_PAGE_SIZE, remain -= LT9611UXC_FW_PAGE_SIZE) + lt9611uxc_firmware_write_page(lt9611uxc, offset, fw->data + offset); + + if (remain > 0) { + char buf[LT9611UXC_FW_PAGE_SIZE]; + + memset(buf, 0xff, LT9611UXC_FW_PAGE_SIZE); + memcpy(buf, fw->data + offset, remain); + lt9611uxc_firmware_write_page(lt9611uxc, offset, buf); + } + msleep(20); + + readbuf = lt9611uxc_firmware_read(lt9611uxc, fw->size); + if (!readbuf) { + ret = -ENOMEM; + goto out; + } + + if (!memcmp(readbuf, fw->data, fw->size)) { + dev_err(lt9611uxc->dev, "Firmware update failed\n"); + print_hex_dump(KERN_ERR, "fw: ", DUMP_PREFIX_OFFSET, 16, 1, readbuf, fw->size, false); + ret = -EINVAL; + } else { + dev_info(lt9611uxc->dev, "Firmware updates successfully\n"); + ret = 0; + } + kfree(readbuf); + +out: + lt9611uxc_unlock(lt9611uxc); + lt9611uxc_reset(lt9611uxc); + release_firmware(fw); + + return ret; +} + +static ssize_t lt9611uxc_firmware_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) +{ + struct lt9611uxc *lt9611uxc = dev_get_drvdata(dev); + int ret; + + ret = lt9611uxc_firmware_update(lt9611uxc); + if (ret < 0) + return ret; + return len; +} + +static ssize_t lt9611uxc_firmware_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct lt9611uxc *lt9611uxc = dev_get_drvdata(dev); + + return snprintf(buf, PAGE_SIZE, "%02x\n", lt9611uxc->fw_version); +} + +static DEVICE_ATTR_RW(lt9611uxc_firmware); + +static struct attribute *lt9611uxc_attrs[] = { + &dev_attr_lt9611uxc_firmware.attr, + NULL, +}; + +static const struct attribute_group lt9611uxc_attr_group = { + .attrs = lt9611uxc_attrs, +}; + +static const struct attribute_group *lt9611uxc_attr_groups[] = { + <9611uxc_attr_group, + NULL, +}; + +static int lt9611uxc_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct lt9611uxc *lt9611uxc; + struct device *dev = &client->dev; + int ret; + bool fw_updated = false; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + dev_err(dev, "device doesn't support I2C\n"); + return -ENODEV; + } + + lt9611uxc = devm_kzalloc(dev, sizeof(*lt9611uxc), GFP_KERNEL); + if (!lt9611uxc) + return -ENOMEM; + + lt9611uxc->dev = &client->dev; + lt9611uxc->client = client; + mutex_init(<9611uxc->ocm_lock); + + lt9611uxc->regmap = devm_regmap_init_i2c(client, <9611uxc_regmap_config); + if (IS_ERR(lt9611uxc->regmap)) { + dev_err(lt9611uxc->dev, "regmap i2c init failed\n"); + return PTR_ERR(lt9611uxc->regmap); + } + + ret = lt9611uxc_parse_dt(&client->dev, lt9611uxc); + if (ret) { + dev_err(dev, "failed to parse device tree\n"); + return ret; + } + + ret = lt9611uxc_gpio_init(lt9611uxc); + if (ret < 0) + goto err_of_put; + + ret = lt9611uxc_regulator_init(lt9611uxc); + if (ret < 0) + goto err_of_put; + + lt9611uxc_assert_5v(lt9611uxc); + + ret = lt9611uxc_regulator_enable(lt9611uxc); + if (ret) + goto err_of_put; + + lt9611uxc_reset(lt9611uxc); + + ret = lt9611uxc_read_device_rev(lt9611uxc); + if (ret) { + dev_err(dev, "failed to read chip rev\n"); + goto err_disable_regulators; + } + +retry: + ret = lt9611uxc_read_version(lt9611uxc); + if (ret < 0) { + dev_err(dev, "failed to read FW version\n"); + goto err_disable_regulators; + } else if (ret == 0) { + if (!fw_updated) { + fw_updated = true; + dev_err(dev, "FW version 0, enforcing firmware update\n"); + ret = lt9611uxc_firmware_update(lt9611uxc); + if (ret < 0) + goto err_disable_regulators; + else + goto retry; + } else { + dev_err(dev, "FW version 0, update failed\n"); + ret = -EOPNOTSUPP; + goto err_disable_regulators; + } + } else if (ret < 0x40) { + dev_info(dev, "FW version 0x%x, HPD not supported\n", ret); + } else { + lt9611uxc->hpd_supported = true; + } + lt9611uxc->fw_version = ret; + + init_waitqueue_head(<9611uxc->wq); + ret = devm_request_threaded_irq(dev, client->irq, NULL, + lt9611uxc_irq_thread_handler, + IRQF_ONESHOT, "lt9611uxc", lt9611uxc); + if (ret) { + dev_err(dev, "failed to request irq\n"); + goto err_disable_regulators; + } + + i2c_set_clientdata(client, lt9611uxc); + + lt9611uxc->bridge.funcs = <9611uxc_bridge_funcs; + lt9611uxc->bridge.of_node = client->dev.of_node; + lt9611uxc->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID; + if (lt9611uxc->hpd_supported) + lt9611uxc->bridge.ops |= DRM_BRIDGE_OP_HPD; + lt9611uxc->bridge.type = DRM_MODE_CONNECTOR_HDMIA; + + drm_bridge_add(<9611uxc->bridge); + + return lt9611uxc_audio_init(dev, lt9611uxc); + +err_disable_regulators: + regulator_bulk_disable(ARRAY_SIZE(lt9611uxc->supplies), lt9611uxc->supplies); + +err_of_put: + of_node_put(lt9611uxc->dsi1_node); + of_node_put(lt9611uxc->dsi0_node); + + return ret; +} + +static int lt9611uxc_remove(struct i2c_client *client) +{ + struct lt9611uxc *lt9611uxc = i2c_get_clientdata(client); + + disable_irq(client->irq); + lt9611uxc_audio_exit(lt9611uxc); + drm_bridge_remove(<9611uxc->bridge); + + mutex_destroy(<9611uxc->ocm_lock); + + regulator_bulk_disable(ARRAY_SIZE(lt9611uxc->supplies), lt9611uxc->supplies); + + of_node_put(lt9611uxc->dsi1_node); + of_node_put(lt9611uxc->dsi0_node); + + return 0; +} + +static struct i2c_device_id lt9611uxc_id[] = { + { "lontium,lt9611uxc", 0 }, + { /* sentinel */ } +}; + +static const struct of_device_id lt9611uxc_match_table[] = { + { .compatible = "lontium,lt9611uxc" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, lt9611uxc_match_table); + +static struct i2c_driver lt9611uxc_driver = { + .driver = { + .name = "lt9611uxc", + .of_match_table = lt9611uxc_match_table, + .dev_groups = lt9611uxc_attr_groups, + }, + .probe = lt9611uxc_probe, + .remove = lt9611uxc_remove, + .id_table = lt9611uxc_id, +}; +module_i2c_driver(lt9611uxc_driver); + +MODULE_AUTHOR("Dmitry Baryshkov <dmitry.baryshkov@linaro.org>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/bridge/lvds-codec.c b/drivers/gpu/drm/bridge/lvds-codec.c index f52ccffc1bd1..dcf579a4cf83 100644 --- a/drivers/gpu/drm/bridge/lvds-codec.c +++ b/drivers/gpu/drm/bridge/lvds-codec.c @@ -80,7 +80,6 @@ static int lvds_codec_probe(struct platform_device *pdev) struct device_node *panel_node; struct drm_panel *panel; struct lvds_codec *lvds_codec; - int ret; lvds_codec = devm_kzalloc(dev, sizeof(*lvds_codec), GFP_KERNEL); if (!lvds_codec) @@ -90,13 +89,9 @@ static int lvds_codec_probe(struct platform_device *pdev) lvds_codec->connector_type = (uintptr_t)of_device_get_match_data(dev); lvds_codec->vcc = devm_regulator_get(lvds_codec->dev, "power"); - if (IS_ERR(lvds_codec->vcc)) { - ret = PTR_ERR(lvds_codec->vcc); - if (ret != -EPROBE_DEFER) - dev_err(lvds_codec->dev, - "Unable to get \"vcc\" supply: %d\n", ret); - return ret; - } + if (IS_ERR(lvds_codec->vcc)) + return dev_err_probe(dev, PTR_ERR(lvds_codec->vcc), + "Unable to get \"vcc\" supply\n"); lvds_codec->powerdown_gpio = devm_gpiod_get_optional(dev, "powerdown", GPIOD_OUT_HIGH); diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c index 33fd33f953ec..89558e581530 100644 --- a/drivers/gpu/drm/bridge/sii902x.c +++ b/drivers/gpu/drm/bridge/sii902x.c @@ -17,6 +17,7 @@ #include <linux/i2c.h> #include <linux/module.h> #include <linux/regmap.h> +#include <linux/regulator/consumer.h> #include <linux/clk.h> #include <drm/drm_atomic_helper.h> @@ -168,6 +169,7 @@ struct sii902x { struct drm_connector connector; struct gpio_desc *reset_gpio; struct i2c_mux_core *i2cmux; + struct regulator_bulk_data supplies[2]; /* * Mutex protects audio and video functions from interfering * each other, by keeping their i2c command sequences atomic. @@ -954,41 +956,13 @@ static const struct drm_bridge_timings default_sii902x_timings = { | DRM_BUS_FLAG_DE_HIGH, }; -static int sii902x_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int sii902x_init(struct sii902x *sii902x) { - struct device *dev = &client->dev; + struct device *dev = &sii902x->i2c->dev; unsigned int status = 0; - struct sii902x *sii902x; u8 chipid[4]; int ret; - ret = i2c_check_functionality(client->adapter, - I2C_FUNC_SMBUS_BYTE_DATA); - if (!ret) { - dev_err(dev, "I2C adapter not suitable\n"); - return -EIO; - } - - sii902x = devm_kzalloc(dev, sizeof(*sii902x), GFP_KERNEL); - if (!sii902x) - return -ENOMEM; - - sii902x->i2c = client; - sii902x->regmap = devm_regmap_init_i2c(client, &sii902x_regmap_config); - if (IS_ERR(sii902x->regmap)) - return PTR_ERR(sii902x->regmap); - - sii902x->reset_gpio = devm_gpiod_get_optional(dev, "reset", - GPIOD_OUT_LOW); - if (IS_ERR(sii902x->reset_gpio)) { - dev_err(dev, "Failed to retrieve/request reset gpio: %ld\n", - PTR_ERR(sii902x->reset_gpio)); - return PTR_ERR(sii902x->reset_gpio); - } - - mutex_init(&sii902x->mutex); - sii902x_reset(sii902x); ret = regmap_write(sii902x->regmap, SII902X_REG_TPI_RQB, 0x0); @@ -1012,11 +986,11 @@ static int sii902x_probe(struct i2c_client *client, regmap_read(sii902x->regmap, SII902X_INT_STATUS, &status); regmap_write(sii902x->regmap, SII902X_INT_STATUS, status); - if (client->irq > 0) { + if (sii902x->i2c->irq > 0) { regmap_write(sii902x->regmap, SII902X_INT_ENABLE, SII902X_HOTPLUG_EVENT); - ret = devm_request_threaded_irq(dev, client->irq, NULL, + ret = devm_request_threaded_irq(dev, sii902x->i2c->irq, NULL, sii902x_interrupt, IRQF_ONESHOT, dev_name(dev), sii902x); @@ -1031,9 +1005,9 @@ static int sii902x_probe(struct i2c_client *client, sii902x_audio_codec_init(sii902x, dev); - i2c_set_clientdata(client, sii902x); + i2c_set_clientdata(sii902x->i2c, sii902x); - sii902x->i2cmux = i2c_mux_alloc(client->adapter, dev, + sii902x->i2cmux = i2c_mux_alloc(sii902x->i2c->adapter, dev, 1, 0, I2C_MUX_GATE, sii902x_i2c_bypass_select, sii902x_i2c_bypass_deselect); @@ -1044,6 +1018,62 @@ static int sii902x_probe(struct i2c_client *client, return i2c_mux_add_adapter(sii902x->i2cmux, 0, 0, 0); } +static int sii902x_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct device *dev = &client->dev; + struct sii902x *sii902x; + int ret; + + ret = i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_BYTE_DATA); + if (!ret) { + dev_err(dev, "I2C adapter not suitable\n"); + return -EIO; + } + + sii902x = devm_kzalloc(dev, sizeof(*sii902x), GFP_KERNEL); + if (!sii902x) + return -ENOMEM; + + sii902x->i2c = client; + sii902x->regmap = devm_regmap_init_i2c(client, &sii902x_regmap_config); + if (IS_ERR(sii902x->regmap)) + return PTR_ERR(sii902x->regmap); + + sii902x->reset_gpio = devm_gpiod_get_optional(dev, "reset", + GPIOD_OUT_LOW); + if (IS_ERR(sii902x->reset_gpio)) { + dev_err(dev, "Failed to retrieve/request reset gpio: %ld\n", + PTR_ERR(sii902x->reset_gpio)); + return PTR_ERR(sii902x->reset_gpio); + } + + mutex_init(&sii902x->mutex); + + sii902x->supplies[0].supply = "iovcc"; + sii902x->supplies[1].supply = "cvcc12"; + ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(sii902x->supplies), + sii902x->supplies); + if (ret < 0) + return ret; + + ret = regulator_bulk_enable(ARRAY_SIZE(sii902x->supplies), + sii902x->supplies); + if (ret < 0) { + dev_err_probe(dev, ret, "Failed to enable supplies"); + return ret; + } + + ret = sii902x_init(sii902x); + if (ret < 0) { + regulator_bulk_disable(ARRAY_SIZE(sii902x->supplies), + sii902x->supplies); + } + + return ret; +} + static int sii902x_remove(struct i2c_client *client) { @@ -1051,6 +1081,8 @@ static int sii902x_remove(struct i2c_client *client) i2c_mux_del_adapters(sii902x->i2cmux); drm_bridge_remove(&sii902x->bridge); + regulator_bulk_disable(ARRAY_SIZE(sii902x->supplies), + sii902x->supplies); return 0; } diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index 6ca1debd0f88..f27306c51e4d 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c @@ -17,6 +17,8 @@ #include <linux/regmap.h> #include <linux/regulator/consumer.h> +#include <asm/unaligned.h> + #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> @@ -72,6 +74,7 @@ #define SN_AUX_ADDR_19_16_REG 0x74 #define SN_AUX_ADDR_15_8_REG 0x75 #define SN_AUX_ADDR_7_0_REG 0x76 +#define SN_AUX_ADDR_MASK GENMASK(19, 0) #define SN_AUX_LENGTH_REG 0x77 #define SN_AUX_CMD_REG 0x78 #define AUX_CMD_SEND BIT(0) @@ -118,6 +121,7 @@ * @debugfs: Used for managing our debugfs. * @host_node: Remote DSI node. * @dsi: Our MIPI DSI source. + * @edid: Detected EDID of eDP panel. * @refclk: Our reference clock. * @panel: Our panel. * @enable_gpio: The GPIO we toggle to enable the bridge. @@ -143,6 +147,7 @@ struct ti_sn_bridge { struct drm_bridge bridge; struct drm_connector connector; struct dentry *debugfs; + struct edid *edid; struct device_node *host_node; struct mipi_dsi_device *dsi; struct clk *refclk; @@ -264,6 +269,23 @@ connector_to_ti_sn_bridge(struct drm_connector *connector) static int ti_sn_bridge_connector_get_modes(struct drm_connector *connector) { struct ti_sn_bridge *pdata = connector_to_ti_sn_bridge(connector); + struct edid *edid = pdata->edid; + int num, ret; + + if (!edid) { + pm_runtime_get_sync(pdata->dev); + edid = pdata->edid = drm_get_edid(connector, &pdata->aux.ddc); + pm_runtime_put(pdata->dev); + } + + if (edid && drm_edid_is_valid(edid)) { + ret = drm_connector_update_edid_property(connector, edid); + if (!ret) { + num = drm_add_edid_modes(connector, edid); + if (num) + return num; + } + } return drm_panel_get_modes(pdata->panel, connector); } @@ -856,13 +878,15 @@ static ssize_t ti_sn_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) { struct ti_sn_bridge *pdata = aux_to_ti_sn_bridge(aux); - u32 request = msg->request & ~DP_AUX_I2C_MOT; + u32 request = msg->request & ~(DP_AUX_I2C_MOT | DP_AUX_I2C_WRITE_STATUS_UPDATE); u32 request_val = AUX_CMD_REQ(msg->request); - u8 *buf = (u8 *)msg->buffer; + u8 *buf = msg->buffer; + unsigned int len = msg->size; unsigned int val; - int ret, i; + int ret; + u8 addr_len[SN_AUX_LENGTH_REG + 1 - SN_AUX_ADDR_19_16_REG]; - if (msg->size > SN_AUX_MAX_PAYLOAD_BYTES) + if (len > SN_AUX_MAX_PAYLOAD_BYTES) return -EINVAL; switch (request) { @@ -871,24 +895,21 @@ static ssize_t ti_sn_aux_transfer(struct drm_dp_aux *aux, case DP_AUX_NATIVE_READ: case DP_AUX_I2C_READ: regmap_write(pdata->regmap, SN_AUX_CMD_REG, request_val); + /* Assume it's good */ + msg->reply = 0; break; default: return -EINVAL; } - regmap_write(pdata->regmap, SN_AUX_ADDR_19_16_REG, - (msg->address >> 16) & 0xF); - regmap_write(pdata->regmap, SN_AUX_ADDR_15_8_REG, - (msg->address >> 8) & 0xFF); - regmap_write(pdata->regmap, SN_AUX_ADDR_7_0_REG, msg->address & 0xFF); - - regmap_write(pdata->regmap, SN_AUX_LENGTH_REG, msg->size); + BUILD_BUG_ON(sizeof(addr_len) != sizeof(__be32)); + put_unaligned_be32((msg->address & SN_AUX_ADDR_MASK) << 8 | len, + addr_len); + regmap_bulk_write(pdata->regmap, SN_AUX_ADDR_19_16_REG, addr_len, + ARRAY_SIZE(addr_len)); - if (request == DP_AUX_NATIVE_WRITE || request == DP_AUX_I2C_WRITE) { - for (i = 0; i < msg->size; i++) - regmap_write(pdata->regmap, SN_AUX_WDATA_REG(i), - buf[i]); - } + if (request == DP_AUX_NATIVE_WRITE || request == DP_AUX_I2C_WRITE) + regmap_bulk_write(pdata->regmap, SN_AUX_WDATA_REG(0), buf, len); /* Clear old status bits before start so we don't get confused */ regmap_write(pdata->regmap, SN_AUX_CMD_STATUS_REG, @@ -898,35 +919,52 @@ static ssize_t ti_sn_aux_transfer(struct drm_dp_aux *aux, regmap_write(pdata->regmap, SN_AUX_CMD_REG, request_val | AUX_CMD_SEND); + /* Zero delay loop because i2c transactions are slow already */ ret = regmap_read_poll_timeout(pdata->regmap, SN_AUX_CMD_REG, val, - !(val & AUX_CMD_SEND), 200, - 50 * 1000); + !(val & AUX_CMD_SEND), 0, 50 * 1000); if (ret) return ret; ret = regmap_read(pdata->regmap, SN_AUX_CMD_STATUS_REG, &val); if (ret) return ret; - else if ((val & AUX_IRQ_STATUS_NAT_I2C_FAIL) - || (val & AUX_IRQ_STATUS_AUX_RPLY_TOUT) - || (val & AUX_IRQ_STATUS_AUX_SHORT)) - return -ENXIO; - if (request == DP_AUX_NATIVE_WRITE || request == DP_AUX_I2C_WRITE) - return msg->size; + if (val & AUX_IRQ_STATUS_AUX_RPLY_TOUT) { + /* + * The hardware tried the message seven times per the DP spec + * but it hit a timeout. We ignore defers here because they're + * handled in hardware. + */ + return -ETIMEDOUT; + } - for (i = 0; i < msg->size; i++) { - unsigned int val; - ret = regmap_read(pdata->regmap, SN_AUX_RDATA_REG(i), - &val); + if (val & AUX_IRQ_STATUS_AUX_SHORT) { + ret = regmap_read(pdata->regmap, SN_AUX_LENGTH_REG, &len); if (ret) return ret; - - WARN_ON(val & ~0xFF); - buf[i] = (u8)(val & 0xFF); + } else if (val & AUX_IRQ_STATUS_NAT_I2C_FAIL) { + switch (request) { + case DP_AUX_I2C_WRITE: + case DP_AUX_I2C_READ: + msg->reply |= DP_AUX_I2C_REPLY_NACK; + break; + case DP_AUX_NATIVE_READ: + case DP_AUX_NATIVE_WRITE: + msg->reply |= DP_AUX_NATIVE_REPLY_NACK; + break; + } + return 0; } - return msg->size; + if (request == DP_AUX_NATIVE_WRITE || request == DP_AUX_I2C_WRITE || + len == 0) + return len; + + ret = regmap_bulk_read(pdata->regmap, SN_AUX_RDATA_REG(0), buf, len); + if (ret) + return ret; + + return len; } static int ti_sn_bridge_parse_dsi_host(struct ti_sn_bridge *pdata) @@ -1268,6 +1306,7 @@ static int ti_sn_bridge_remove(struct i2c_client *client) if (!pdata) return -EINVAL; + kfree(pdata->edid); ti_sn_debugfs_remove(pdata); of_node_put(pdata->host_node); diff --git a/drivers/gpu/drm/bridge/ti-tpd12s015.c b/drivers/gpu/drm/bridge/ti-tpd12s015.c index 514cbf0eac75..e0e015243a60 100644 --- a/drivers/gpu/drm/bridge/ti-tpd12s015.c +++ b/drivers/gpu/drm/bridge/ti-tpd12s015.c @@ -160,7 +160,7 @@ static int tpd12s015_probe(struct platform_device *pdev) /* Register the IRQ if the HPD GPIO is IRQ-capable. */ tpd->hpd_irq = gpiod_to_irq(tpd->hpd_gpio); - if (tpd->hpd_irq) { + if (tpd->hpd_irq >= 0) { ret = devm_request_threaded_irq(&pdev->dev, tpd->hpd_irq, NULL, tpd12s015_hpd_isr, IRQF_TRIGGER_RISING | diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c index 495f47d23d87..fe573acf1067 100644 --- a/drivers/gpu/drm/drm_client.c +++ b/drivers/gpu/drm/drm_client.c @@ -3,6 +3,7 @@ * Copyright 2018 Noralf Trønnes */ +#include <linux/dma-buf-map.h> #include <linux/list.h> #include <linux/module.h> #include <linux/mutex.h> @@ -234,7 +235,7 @@ static void drm_client_buffer_delete(struct drm_client_buffer *buffer) { struct drm_device *dev = buffer->client->dev; - drm_gem_vunmap(buffer->gem, buffer->vaddr); + drm_gem_vunmap(buffer->gem, &buffer->map); if (buffer->gem) drm_gem_object_put(buffer->gem); @@ -290,24 +291,31 @@ err_delete: /** * drm_client_buffer_vmap - Map DRM client buffer into address space * @buffer: DRM client buffer + * @map_copy: Returns the mapped memory's address * * This function maps a client buffer into kernel address space. If the - * buffer is already mapped, it returns the mapping's address. + * buffer is already mapped, it returns the existing mapping's address. * * Client buffer mappings are not ref'counted. Each call to * drm_client_buffer_vmap() should be followed by a call to * drm_client_buffer_vunmap(); or the client buffer should be mapped * throughout its lifetime. * + * The returned address is a copy of the internal value. In contrast to + * other vmap interfaces, you don't need it for the client's vunmap + * function. So you can modify it at will during blit and draw operations. + * * Returns: - * The mapped memory's address + * 0 on success, or a negative errno code otherwise. */ -void *drm_client_buffer_vmap(struct drm_client_buffer *buffer) +int +drm_client_buffer_vmap(struct drm_client_buffer *buffer, struct dma_buf_map *map_copy) { - void *vaddr; + struct dma_buf_map *map = &buffer->map; + int ret; - if (buffer->vaddr) - return buffer->vaddr; + if (dma_buf_map_is_set(map)) + goto out; /* * FIXME: The dependency on GEM here isn't required, we could @@ -317,13 +325,14 @@ void *drm_client_buffer_vmap(struct drm_client_buffer *buffer) * fd_install step out of the driver backend hooks, to make that * final step optional for internal users. */ - vaddr = drm_gem_vmap(buffer->gem); - if (IS_ERR(vaddr)) - return vaddr; + ret = drm_gem_vmap(buffer->gem, map); + if (ret) + return ret; - buffer->vaddr = vaddr; +out: + *map_copy = *map; - return vaddr; + return 0; } EXPORT_SYMBOL(drm_client_buffer_vmap); @@ -337,8 +346,9 @@ EXPORT_SYMBOL(drm_client_buffer_vmap); */ void drm_client_buffer_vunmap(struct drm_client_buffer *buffer) { - drm_gem_vunmap(buffer->gem, buffer->vaddr); - buffer->vaddr = NULL; + struct dma_buf_map *map = &buffer->map; + + drm_gem_vunmap(buffer->gem, map); } EXPORT_SYMBOL(drm_client_buffer_vunmap); diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 153b6065ba29..e87542533640 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -3686,10 +3686,9 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms WARN_ON(mgr->mst_primary); /* get dpcd info */ - ret = drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd); - if (ret < 0) { - drm_dbg_kms(mgr->dev, "%s: failed to read DPCD, ret %d\n", - mgr->aux->name, ret); + ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE); + if (ret != DP_RECEIVER_CAP_SIZE) { + DRM_DEBUG_KMS("failed to read DPCD\n"); goto out_unlock; } diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index cd162d406078..734303802bc3 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -284,7 +284,7 @@ void drm_minor_release(struct drm_minor *minor) * struct clk *pclk; * }; * - * static struct drm_driver driver_drm_driver = { + * static const struct drm_driver driver_drm_driver = { * [...] * }; * @@ -574,7 +574,7 @@ static void drm_dev_init_release(struct drm_device *dev, void *res) } static int drm_dev_init(struct drm_device *dev, - struct drm_driver *driver, + const struct drm_driver *driver, struct device *parent) { int ret; @@ -589,7 +589,11 @@ static int drm_dev_init(struct drm_device *dev, kref_init(&dev->ref); dev->dev = get_device(parent); +#ifdef CONFIG_DRM_LEGACY + dev->driver = (struct drm_driver *)driver; +#else dev->driver = driver; +#endif INIT_LIST_HEAD(&dev->managed.resources); spin_lock_init(&dev->managed.lock); @@ -663,7 +667,7 @@ static void devm_drm_dev_init_release(void *data) static int devm_drm_dev_init(struct device *parent, struct drm_device *dev, - struct drm_driver *driver) + const struct drm_driver *driver) { int ret; @@ -678,7 +682,8 @@ static int devm_drm_dev_init(struct device *parent, return ret; } -void *__devm_drm_dev_alloc(struct device *parent, struct drm_driver *driver, +void *__devm_drm_dev_alloc(struct device *parent, + const struct drm_driver *driver, size_t size, size_t offset) { void *container; @@ -713,7 +718,7 @@ EXPORT_SYMBOL(__devm_drm_dev_alloc); * RETURNS: * Pointer to new DRM device, or ERR_PTR on failure. */ -struct drm_device *drm_dev_alloc(struct drm_driver *driver, +struct drm_device *drm_dev_alloc(const struct drm_driver *driver, struct device *parent) { struct drm_device *dev; @@ -858,7 +863,7 @@ static void remove_compat_control_link(struct drm_device *dev) */ int drm_dev_register(struct drm_device *dev, unsigned long flags) { - struct drm_driver *driver = dev->driver; + const struct drm_driver *driver = dev->driver; int ret; if (!driver->load) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index c7363af731b4..74f5a3197214 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -3114,6 +3114,8 @@ static int drm_cvt_modes(struct drm_connector *connector, case 0x0c: width = height * 15 / 9; break; + default: + unreachable(); } for (j = 1; j < 5; j++) { diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 92e0db30fdf7..25edf670867c 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -372,24 +372,22 @@ static void drm_fb_helper_resume_worker(struct work_struct *work) } static void drm_fb_helper_dirty_blit_real(struct drm_fb_helper *fb_helper, - struct drm_clip_rect *clip) + struct drm_clip_rect *clip, + struct dma_buf_map *dst) { struct drm_framebuffer *fb = fb_helper->fb; unsigned int cpp = fb->format->cpp[0]; size_t offset = clip->y1 * fb->pitches[0] + clip->x1 * cpp; void *src = fb_helper->fbdev->screen_buffer + offset; - void *dst = fb_helper->buffer->vaddr + offset; size_t len = (clip->x2 - clip->x1) * cpp; unsigned int y; - for (y = clip->y1; y < clip->y2; y++) { - if (!fb_helper->dev->mode_config.fbdev_use_iomem) - memcpy(dst, src, len); - else - memcpy_toio((void __iomem *)dst, src, len); + dma_buf_map_incr(dst, offset); /* go to first pixel within clip rect */ + for (y = clip->y1; y < clip->y2; y++) { + dma_buf_map_memcpy_to(dst, src, len); + dma_buf_map_incr(dst, fb->pitches[0]); src += fb->pitches[0]; - dst += fb->pitches[0]; } } @@ -400,7 +398,8 @@ static void drm_fb_helper_dirty_work(struct work_struct *work) struct drm_clip_rect *clip = &helper->dirty_clip; struct drm_clip_rect clip_copy; unsigned long flags; - void *vaddr; + struct dma_buf_map map; + int ret; spin_lock_irqsave(&helper->dirty_lock, flags); clip_copy = *clip; @@ -413,11 +412,12 @@ static void drm_fb_helper_dirty_work(struct work_struct *work) /* Generic fbdev uses a shadow buffer */ if (helper->buffer) { - vaddr = drm_client_buffer_vmap(helper->buffer); - if (IS_ERR(vaddr)) + ret = drm_client_buffer_vmap(helper->buffer, &map); + if (ret) return; - drm_fb_helper_dirty_blit_real(helper, &clip_copy); + drm_fb_helper_dirty_blit_real(helper, &clip_copy, &map); } + if (helper->fb->funcs->dirty) helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1); @@ -2026,6 +2026,199 @@ static int drm_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) return -ENODEV; } +static bool drm_fbdev_use_iomem(struct fb_info *info) +{ + struct drm_fb_helper *fb_helper = info->par; + struct drm_client_buffer *buffer = fb_helper->buffer; + + return !drm_fbdev_use_shadow_fb(fb_helper) && buffer->map.is_iomem; +} + +static ssize_t fb_read_screen_base(struct fb_info *info, char __user *buf, size_t count, + loff_t pos) +{ + const char __iomem *src = info->screen_base + pos; + size_t alloc_size = min_t(size_t, count, PAGE_SIZE); + ssize_t ret = 0; + int err = 0; + char *tmp; + + tmp = kmalloc(alloc_size, GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + while (count) { + size_t c = min_t(size_t, count, alloc_size); + + memcpy_fromio(tmp, src, c); + if (copy_to_user(buf, tmp, c)) { + err = -EFAULT; + break; + } + + src += c; + buf += c; + ret += c; + count -= c; + } + + kfree(tmp); + + return ret ? ret : err; +} + +static ssize_t fb_read_screen_buffer(struct fb_info *info, char __user *buf, size_t count, + loff_t pos) +{ + const char *src = info->screen_buffer + pos; + + if (copy_to_user(buf, src, count)) + return -EFAULT; + + return count; +} + +static ssize_t drm_fbdev_fb_read(struct fb_info *info, char __user *buf, + size_t count, loff_t *ppos) +{ + loff_t pos = *ppos; + size_t total_size; + ssize_t ret; + + if (info->screen_size) + total_size = info->screen_size; + else + total_size = info->fix.smem_len; + + if (pos >= total_size) + return 0; + if (count >= total_size) + count = total_size; + if (total_size - count < pos) + count = total_size - pos; + + if (drm_fbdev_use_iomem(info)) + ret = fb_read_screen_base(info, buf, count, pos); + else + ret = fb_read_screen_buffer(info, buf, count, pos); + + if (ret > 0) + *ppos += ret; + + return ret; +} + +static ssize_t fb_write_screen_base(struct fb_info *info, const char __user *buf, size_t count, + loff_t pos) +{ + char __iomem *dst = info->screen_base + pos; + size_t alloc_size = min_t(size_t, count, PAGE_SIZE); + ssize_t ret = 0; + int err = 0; + u8 *tmp; + + tmp = kmalloc(alloc_size, GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + while (count) { + size_t c = min_t(size_t, count, alloc_size); + + if (copy_from_user(tmp, buf, c)) { + err = -EFAULT; + break; + } + memcpy_toio(dst, tmp, c); + + dst += c; + buf += c; + ret += c; + count -= c; + } + + kfree(tmp); + + return ret ? ret : err; +} + +static ssize_t fb_write_screen_buffer(struct fb_info *info, const char __user *buf, size_t count, + loff_t pos) +{ + char *dst = info->screen_buffer + pos; + + if (copy_from_user(dst, buf, count)) + return -EFAULT; + + return count; +} + +static ssize_t drm_fbdev_fb_write(struct fb_info *info, const char __user *buf, + size_t count, loff_t *ppos) +{ + loff_t pos = *ppos; + size_t total_size; + ssize_t ret; + int err = 0; + + if (info->screen_size) + total_size = info->screen_size; + else + total_size = info->fix.smem_len; + + if (pos > total_size) + return -EFBIG; + if (count > total_size) { + err = -EFBIG; + count = total_size; + } + if (total_size - count < pos) { + if (!err) + err = -ENOSPC; + count = total_size - pos; + } + + /* + * Copy to framebuffer even if we already logged an error. Emulates + * the behavior of the original fbdev implementation. + */ + if (drm_fbdev_use_iomem(info)) + ret = fb_write_screen_base(info, buf, count, pos); + else + ret = fb_write_screen_buffer(info, buf, count, pos); + + if (ret > 0) + *ppos += ret; + + return ret ? ret : err; +} + +static void drm_fbdev_fb_fillrect(struct fb_info *info, + const struct fb_fillrect *rect) +{ + if (drm_fbdev_use_iomem(info)) + drm_fb_helper_cfb_fillrect(info, rect); + else + drm_fb_helper_sys_fillrect(info, rect); +} + +static void drm_fbdev_fb_copyarea(struct fb_info *info, + const struct fb_copyarea *area) +{ + if (drm_fbdev_use_iomem(info)) + drm_fb_helper_cfb_copyarea(info, area); + else + drm_fb_helper_sys_copyarea(info, area); +} + +static void drm_fbdev_fb_imageblit(struct fb_info *info, + const struct fb_image *image) +{ + if (drm_fbdev_use_iomem(info)) + drm_fb_helper_cfb_imageblit(info, image); + else + drm_fb_helper_sys_imageblit(info, image); +} + static const struct fb_ops drm_fbdev_fb_ops = { .owner = THIS_MODULE, DRM_FB_HELPER_DEFAULT_OPS, @@ -2033,11 +2226,11 @@ static const struct fb_ops drm_fbdev_fb_ops = { .fb_release = drm_fbdev_fb_release, .fb_destroy = drm_fbdev_fb_destroy, .fb_mmap = drm_fbdev_fb_mmap, - .fb_read = drm_fb_helper_sys_read, - .fb_write = drm_fb_helper_sys_write, - .fb_fillrect = drm_fb_helper_sys_fillrect, - .fb_copyarea = drm_fb_helper_sys_copyarea, - .fb_imageblit = drm_fb_helper_sys_imageblit, + .fb_read = drm_fbdev_fb_read, + .fb_write = drm_fbdev_fb_write, + .fb_fillrect = drm_fbdev_fb_fillrect, + .fb_copyarea = drm_fbdev_fb_copyarea, + .fb_imageblit = drm_fbdev_fb_imageblit, }; static struct fb_deferred_io drm_fbdev_defio = { @@ -2060,7 +2253,8 @@ static int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper, struct drm_framebuffer *fb; struct fb_info *fbi; u32 format; - void *vaddr; + struct dma_buf_map map; + int ret; drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n", sizes->surface_width, sizes->surface_height, @@ -2096,14 +2290,22 @@ static int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper, fb_deferred_io_init(fbi); } else { /* buffer is mapped for HW framebuffer */ - vaddr = drm_client_buffer_vmap(fb_helper->buffer); - if (IS_ERR(vaddr)) - return PTR_ERR(vaddr); + ret = drm_client_buffer_vmap(fb_helper->buffer, &map); + if (ret) + return ret; + if (map.is_iomem) + fbi->screen_base = map.vaddr_iomem; + else + fbi->screen_buffer = map.vaddr; - fbi->screen_buffer = vaddr; - /* Shamelessly leak the physical address to user-space */ + /* + * Shamelessly leak the physical address to user-space. As + * page_to_phys() is undefined for I/O memory, warn in this + * case. + */ #if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM) - if (drm_leak_fbdev_smem && fbi->fix.smem_start == 0) + if (drm_leak_fbdev_smem && fbi->fix.smem_start == 0 && + !drm_WARN_ON_ONCE(dev, map.is_iomem)) fbi->fix.smem_start = page_to_phys(virt_to_page(fbi->screen_buffer)); #endif diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index 0ac4566ae3f4..b50380fa80ce 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -258,9 +258,11 @@ void drm_file_free(struct drm_file *file) (long)old_encode_dev(file->minor->kdev->devt), atomic_read(&dev->open_count)); +#ifdef CONFIG_DRM_LEGACY if (drm_core_check_feature(dev, DRIVER_LEGACY) && dev->driver->preclose) dev->driver->preclose(dev, file); +#endif if (drm_core_check_feature(dev, DRIVER_LEGACY)) drm_legacy_lock_release(dev, file->filp); diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index d586068f5509..eb2d23e04be9 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -36,6 +36,7 @@ #include <linux/pagemap.h> #include <linux/shmem_fs.h> #include <linux/dma-buf.h> +#include <linux/dma-buf-map.h> #include <linux/mem_encrypt.h> #include <linux/pagevec.h> @@ -1205,28 +1206,32 @@ void drm_gem_unpin(struct drm_gem_object *obj) obj->funcs->unpin(obj); } -void *drm_gem_vmap(struct drm_gem_object *obj) +int drm_gem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) { - void *vaddr; + int ret; - if (obj->funcs->vmap) - vaddr = obj->funcs->vmap(obj); - else - vaddr = ERR_PTR(-EOPNOTSUPP); + if (!obj->funcs->vmap) + return -EOPNOTSUPP; - if (!vaddr) - vaddr = ERR_PTR(-ENOMEM); + ret = obj->funcs->vmap(obj, map); + if (ret) + return ret; + else if (dma_buf_map_is_null(map)) + return -ENOMEM; - return vaddr; + return 0; } -void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr) +void drm_gem_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map) { - if (!vaddr) + if (dma_buf_map_is_null(map)) return; if (obj->funcs->vunmap) - obj->funcs->vunmap(obj, vaddr); + obj->funcs->vunmap(obj, map); + + /* Always set the mapping to NULL. Callers may rely on this. */ + dma_buf_map_clear(map); } /** diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c index 2165633c9b9e..4d5c1d86b022 100644 --- a/drivers/gpu/drm/drm_gem_cma_helper.c +++ b/drivers/gpu/drm/drm_gem_cma_helper.c @@ -33,6 +33,14 @@ * display drivers that are unable to map scattered buffers via an IOMMU. */ +static const struct drm_gem_object_funcs drm_gem_cma_default_funcs = { + .free = drm_gem_cma_free_object, + .print_info = drm_gem_cma_print_info, + .get_sg_table = drm_gem_cma_prime_get_sg_table, + .vmap = drm_gem_cma_prime_vmap, + .vm_ops = &drm_gem_cma_vm_ops, +}; + /** * __drm_gem_cma_create - Create a GEM CMA object without allocating memory * @drm: DRM device @@ -58,6 +66,10 @@ __drm_gem_cma_create(struct drm_device *drm, size_t size) gem_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL); if (!gem_obj) return ERR_PTR(-ENOMEM); + + if (!gem_obj->funcs) + gem_obj->funcs = &drm_gem_cma_default_funcs; + cma_obj = container_of(gem_obj, struct drm_gem_cma_object, base); ret = drm_gem_object_init(drm, gem_obj, size); @@ -519,6 +531,8 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_prime_mmap); * drm_gem_cma_prime_vmap - map a CMA GEM object into the kernel's virtual * address space * @obj: GEM object + * @map: Returns the kernel virtual address of the CMA GEM object's backing + * store. * * This function maps a buffer exported via DRM PRIME into the kernel's * virtual address space. Since the CMA buffers are already mapped into the @@ -527,67 +541,17 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_prime_mmap); * driver's &drm_gem_object_funcs.vmap callback. * * Returns: - * The kernel virtual address of the CMA GEM object's backing store. + * 0 on success, or a negative error code otherwise. */ -void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj) +int drm_gem_cma_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) { struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj); - return cma_obj->vaddr; -} -EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vmap); - -/** - * drm_gem_cma_prime_vunmap - unmap a CMA GEM object from the kernel's virtual - * address space - * @obj: GEM object - * @vaddr: kernel virtual address where the CMA GEM object was mapped - * - * This function removes a buffer exported via DRM PRIME from the kernel's - * virtual address space. This is a no-op because CMA buffers cannot be - * unmapped from kernel space. Drivers using the CMA helpers should set this - * as their &drm_gem_object_funcs.vunmap callback. - */ -void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr) -{ - /* Nothing to do */ -} -EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vunmap); - -static const struct drm_gem_object_funcs drm_gem_cma_default_funcs = { - .free = drm_gem_cma_free_object, - .print_info = drm_gem_cma_print_info, - .get_sg_table = drm_gem_cma_prime_get_sg_table, - .vmap = drm_gem_cma_prime_vmap, - .vm_ops = &drm_gem_cma_vm_ops, -}; + dma_buf_map_set_vaddr(map, cma_obj->vaddr); -/** - * drm_gem_cma_create_object_default_funcs - Create a CMA GEM object with a - * default function table - * @dev: DRM device - * @size: Size of the object to allocate - * - * This sets the GEM object functions to the default CMA helper functions. - * This function can be used as the &drm_driver.gem_create_object callback. - * - * Returns: - * A pointer to a allocated GEM object or an error pointer on failure. - */ -struct drm_gem_object * -drm_gem_cma_create_object_default_funcs(struct drm_device *dev, size_t size) -{ - struct drm_gem_cma_object *cma_obj; - - cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL); - if (!cma_obj) - return NULL; - - cma_obj->base.funcs = &drm_gem_cma_default_funcs; - - return &cma_obj->base; + return 0; } -EXPORT_SYMBOL(drm_gem_cma_create_object_default_funcs); +EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vmap); /** * drm_gem_cma_prime_import_sg_table_vmap - PRIME import another driver's diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index 8233bda4692f..499189c48f0b 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -258,19 +258,25 @@ void drm_gem_shmem_unpin(struct drm_gem_object *obj) } EXPORT_SYMBOL(drm_gem_shmem_unpin); -static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem) +static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, struct dma_buf_map *map) { struct drm_gem_object *obj = &shmem->base; - struct dma_buf_map map; int ret = 0; - if (shmem->vmap_use_count++ > 0) - return shmem->vaddr; + if (shmem->vmap_use_count++ > 0) { + dma_buf_map_set_vaddr(map, shmem->vaddr); + return 0; + } if (obj->import_attach) { - ret = dma_buf_vmap(obj->import_attach->dmabuf, &map); - if (!ret) - shmem->vaddr = map.vaddr; + ret = dma_buf_vmap(obj->import_attach->dmabuf, map); + if (!ret) { + if (WARN_ON(map->is_iomem)) { + ret = -EIO; + goto err_put_pages; + } + shmem->vaddr = map->vaddr; + } } else { pgprot_t prot = PAGE_KERNEL; @@ -284,6 +290,8 @@ static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem) VM_MAP, prot); if (!shmem->vaddr) ret = -ENOMEM; + else + dma_buf_map_set_vaddr(map, shmem->vaddr); } if (ret) { @@ -291,7 +299,7 @@ static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem) goto err_put_pages; } - return shmem->vaddr; + return 0; err_put_pages: if (!obj->import_attach) @@ -299,12 +307,14 @@ err_put_pages: err_zero_use: shmem->vmap_use_count = 0; - return ERR_PTR(ret); + return ret; } /* * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object * @shmem: shmem GEM object + * @map: Returns the kernel virtual address of the SHMEM GEM object's backing + * store. * * This function makes sure that a contiguous kernel virtual address mapping * exists for the buffer backing the shmem GEM object. @@ -318,26 +328,25 @@ err_zero_use: * Returns: * 0 on success or a negative error code on failure. */ -void *drm_gem_shmem_vmap(struct drm_gem_object *obj) +int drm_gem_shmem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) { struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); - void *vaddr; int ret; ret = mutex_lock_interruptible(&shmem->vmap_lock); if (ret) - return ERR_PTR(ret); - vaddr = drm_gem_shmem_vmap_locked(shmem); + return ret; + ret = drm_gem_shmem_vmap_locked(shmem, map); mutex_unlock(&shmem->vmap_lock); - return vaddr; + return ret; } EXPORT_SYMBOL(drm_gem_shmem_vmap); -static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem) +static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem, + struct dma_buf_map *map) { struct drm_gem_object *obj = &shmem->base; - struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(shmem->vaddr); if (WARN_ON_ONCE(!shmem->vmap_use_count)) return; @@ -346,7 +355,7 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem) return; if (obj->import_attach) - dma_buf_vunmap(obj->import_attach->dmabuf, &map); + dma_buf_vunmap(obj->import_attach->dmabuf, map); else vunmap(shmem->vaddr); @@ -357,6 +366,7 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem) /* * drm_gem_shmem_vunmap - Unmap a virtual mapping fo a shmem GEM object * @shmem: shmem GEM object + * @map: Kernel virtual address where the SHMEM GEM object was mapped * * This function cleans up a kernel virtual address mapping acquired by * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to @@ -366,12 +376,12 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem) * also be called by drivers directly, in which case it will hide the * differences between dma-buf imported and natively allocated objects. */ -void drm_gem_shmem_vunmap(struct drm_gem_object *obj, void *vaddr) +void drm_gem_shmem_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map) { struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); mutex_lock(&shmem->vmap_lock); - drm_gem_shmem_vunmap_locked(shmem); + drm_gem_shmem_vunmap_locked(shmem, map); mutex_unlock(&shmem->vmap_lock); } EXPORT_SYMBOL(drm_gem_shmem_vunmap); diff --git a/drivers/gpu/drm/drm_gem_ttm_helper.c b/drivers/gpu/drm/drm_gem_ttm_helper.c index 0e4fb9ba43ad..de28720757af 100644 --- a/drivers/gpu/drm/drm_gem_ttm_helper.c +++ b/drivers/gpu/drm/drm_gem_ttm_helper.c @@ -50,6 +50,43 @@ void drm_gem_ttm_print_info(struct drm_printer *p, unsigned int indent, EXPORT_SYMBOL(drm_gem_ttm_print_info); /** + * drm_gem_ttm_vmap() - vmap &ttm_buffer_object + * @gem: GEM object. + * @map: [out] returns the dma-buf mapping. + * + * Maps a GEM object with ttm_bo_vmap(). This function can be used as + * &drm_gem_object_funcs.vmap callback. + * + * Returns: + * 0 on success, or a negative errno code otherwise. + */ +int drm_gem_ttm_vmap(struct drm_gem_object *gem, + struct dma_buf_map *map) +{ + struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem); + + return ttm_bo_vmap(bo, map); +} +EXPORT_SYMBOL(drm_gem_ttm_vmap); + +/** + * drm_gem_ttm_vunmap() - vunmap &ttm_buffer_object + * @gem: GEM object. + * @map: dma-buf mapping. + * + * Unmaps a GEM object with ttm_bo_vunmap(). This function can be used as + * &drm_gem_object_funcs.vmap callback. + */ +void drm_gem_ttm_vunmap(struct drm_gem_object *gem, + struct dma_buf_map *map) +{ + struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem); + + ttm_bo_vunmap(bo, map); +} +EXPORT_SYMBOL(drm_gem_ttm_vunmap); + +/** * drm_gem_ttm_mmap() - mmap &ttm_buffer_object * @gem: GEM object. * @vma: vm area. diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c index 16d68c04ea5d..889a06696f7e 100644 --- a/drivers/gpu/drm/drm_gem_vram_helper.c +++ b/drivers/gpu/drm/drm_gem_vram_helper.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later +#include <linux/dma-buf-map.h> #include <linux/module.h> #include <drm/drm_debugfs.h> @@ -112,8 +113,8 @@ static void drm_gem_vram_cleanup(struct drm_gem_vram_object *gbo) * up; only release the GEM object. */ - WARN_ON(gbo->kmap_use_count); - WARN_ON(gbo->kmap.virtual); + WARN_ON(gbo->vmap_use_count); + WARN_ON(dma_buf_map_is_set(&gbo->map)); drm_gem_object_release(&gbo->bo.base); } @@ -378,39 +379,37 @@ int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo) } EXPORT_SYMBOL(drm_gem_vram_unpin); -static void *drm_gem_vram_kmap_locked(struct drm_gem_vram_object *gbo, - bool map, bool *is_iomem) +static int drm_gem_vram_kmap_locked(struct drm_gem_vram_object *gbo, + struct dma_buf_map *map) { int ret; - struct ttm_bo_kmap_obj *kmap = &gbo->kmap; - if (gbo->kmap_use_count > 0) + if (gbo->vmap_use_count > 0) goto out; - if (kmap->virtual || !map) - goto out; - - ret = ttm_bo_kmap(&gbo->bo, 0, gbo->bo.num_pages, kmap); + ret = ttm_bo_vmap(&gbo->bo, &gbo->map); if (ret) - return ERR_PTR(ret); + return ret; out: - if (!kmap->virtual) { - if (is_iomem) - *is_iomem = false; - return NULL; /* not mapped; don't increment ref */ - } - ++gbo->kmap_use_count; - if (is_iomem) - return ttm_kmap_obj_virtual(kmap, is_iomem); - return kmap->virtual; + ++gbo->vmap_use_count; + *map = gbo->map; + + return 0; } -static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo) +static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo, + struct dma_buf_map *map) { - if (WARN_ON_ONCE(!gbo->kmap_use_count)) + struct drm_device *dev = gbo->bo.base.dev; + + if (drm_WARN_ON_ONCE(dev, !gbo->vmap_use_count)) return; - if (--gbo->kmap_use_count > 0) + + if (drm_WARN_ON_ONCE(dev, !dma_buf_map_is_equal(&gbo->map, map))) + return; /* BUG: map not mapped from this BO */ + + if (--gbo->vmap_use_count > 0) return; /* @@ -424,7 +423,9 @@ static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo) /** * drm_gem_vram_vmap() - Pins and maps a GEM VRAM object into kernel address * space - * @gbo: The GEM VRAM object to map + * @gbo: The GEM VRAM object to map + * @map: Returns the kernel virtual address of the VRAM GEM object's backing + * store. * * The vmap function pins a GEM VRAM object to its current location, either * system or video memory, and maps its buffer into kernel address space. @@ -433,48 +434,44 @@ static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo) * unmap and unpin the GEM VRAM object. * * Returns: - * The buffer's virtual address on success, or - * an ERR_PTR()-encoded error code otherwise. + * 0 on success, or a negative error code otherwise. */ -void *drm_gem_vram_vmap(struct drm_gem_vram_object *gbo) +int drm_gem_vram_vmap(struct drm_gem_vram_object *gbo, struct dma_buf_map *map) { int ret; - void *base; ret = ttm_bo_reserve(&gbo->bo, true, false, NULL); if (ret) - return ERR_PTR(ret); + return ret; ret = drm_gem_vram_pin_locked(gbo, 0); if (ret) goto err_ttm_bo_unreserve; - base = drm_gem_vram_kmap_locked(gbo, true, NULL); - if (IS_ERR(base)) { - ret = PTR_ERR(base); + ret = drm_gem_vram_kmap_locked(gbo, map); + if (ret) goto err_drm_gem_vram_unpin_locked; - } ttm_bo_unreserve(&gbo->bo); - return base; + return 0; err_drm_gem_vram_unpin_locked: drm_gem_vram_unpin_locked(gbo); err_ttm_bo_unreserve: ttm_bo_unreserve(&gbo->bo); - return ERR_PTR(ret); + return ret; } EXPORT_SYMBOL(drm_gem_vram_vmap); /** * drm_gem_vram_vunmap() - Unmaps and unpins a GEM VRAM object - * @gbo: The GEM VRAM object to unmap - * @vaddr: The mapping's base address as returned by drm_gem_vram_vmap() + * @gbo: The GEM VRAM object to unmap + * @map: Kernel virtual address where the VRAM GEM object was mapped * * A call to drm_gem_vram_vunmap() unmaps and unpins a GEM VRAM buffer. See * the documentation for drm_gem_vram_vmap() for more information. */ -void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, void *vaddr) +void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, struct dma_buf_map *map) { int ret; @@ -482,7 +479,7 @@ void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, void *vaddr) if (WARN_ONCE(ret, "ttm_bo_reserve_failed(): ret=%d\n", ret)) return; - drm_gem_vram_kunmap_locked(gbo); + drm_gem_vram_kunmap_locked(gbo, map); drm_gem_vram_unpin_locked(gbo); ttm_bo_unreserve(&gbo->bo); @@ -573,15 +570,13 @@ static void drm_gem_vram_bo_driver_move_notify(struct drm_gem_vram_object *gbo, bool evict, struct ttm_resource *new_mem) { - struct ttm_bo_kmap_obj *kmap = &gbo->kmap; + struct ttm_buffer_object *bo = &gbo->bo; + struct drm_device *dev = bo->base.dev; - if (WARN_ON_ONCE(gbo->kmap_use_count)) + if (drm_WARN_ON_ONCE(dev, gbo->vmap_use_count)) return; - if (!kmap->virtual) - return; - ttm_bo_kunmap(kmap); - kmap->virtual = NULL; + ttm_bo_vunmap(bo, &gbo->map); } static int drm_gem_vram_bo_driver_move(struct drm_gem_vram_object *gbo, @@ -847,37 +842,33 @@ static void drm_gem_vram_object_unpin(struct drm_gem_object *gem) } /** - * drm_gem_vram_object_vmap() - \ - Implements &struct drm_gem_object_funcs.vmap - * @gem: The GEM object to map + * drm_gem_vram_object_vmap() - + * Implements &struct drm_gem_object_funcs.vmap + * @gem: The GEM object to map + * @map: Returns the kernel virtual address of the VRAM GEM object's backing + * store. * * Returns: - * The buffers virtual address on success, or - * NULL otherwise. + * 0 on success, or a negative error code otherwise. */ -static void *drm_gem_vram_object_vmap(struct drm_gem_object *gem) +static int drm_gem_vram_object_vmap(struct drm_gem_object *gem, struct dma_buf_map *map) { struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem); - void *base; - base = drm_gem_vram_vmap(gbo); - if (IS_ERR(base)) - return NULL; - return base; + return drm_gem_vram_vmap(gbo, map); } /** - * drm_gem_vram_object_vunmap() - \ - Implements &struct drm_gem_object_funcs.vunmap - * @gem: The GEM object to unmap - * @vaddr: The mapping's base address + * drm_gem_vram_object_vunmap() - + * Implements &struct drm_gem_object_funcs.vunmap + * @gem: The GEM object to unmap + * @map: Kernel virtual address where the VRAM GEM object was mapped */ -static void drm_gem_vram_object_vunmap(struct drm_gem_object *gem, - void *vaddr) +static void drm_gem_vram_object_vunmap(struct drm_gem_object *gem, struct dma_buf_map *map) { struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem); - drm_gem_vram_vunmap(gbo, vaddr); + drm_gem_vram_vunmap(gbo, map); } /* @@ -964,7 +955,8 @@ static void bo_driver_delete_mem_notify(struct ttm_buffer_object *bo) static int bo_driver_move(struct ttm_buffer_object *bo, bool evict, struct ttm_operation_ctx *ctx, - struct ttm_resource *new_mem) + struct ttm_resource *new_mem, + struct ttm_place *hop) { struct drm_gem_vram_object *gbo; diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index 2bdac3557765..81d386b5b92a 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -33,6 +33,7 @@ struct dentry; struct dma_buf; +struct dma_buf_map; struct drm_connector; struct drm_crtc; struct drm_framebuffer; @@ -187,8 +188,8 @@ void drm_gem_print_info(struct drm_printer *p, unsigned int indent, int drm_gem_pin(struct drm_gem_object *obj); void drm_gem_unpin(struct drm_gem_object *obj); -void *drm_gem_vmap(struct drm_gem_object *obj); -void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr); +int drm_gem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map); +void drm_gem_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map); /* drm_debugfs.c drm_debugfs_crc.c */ #if defined(CONFIG_DEBUG_FS) diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index a7b61c2d9190..7db55fce35d8 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -667,21 +667,15 @@ EXPORT_SYMBOL(drm_gem_unmap_dma_buf); * * Sets up a kernel virtual mapping. This can be used as the &dma_buf_ops.vmap * callback. Calls into &drm_gem_object_funcs.vmap for device specific handling. + * The kernel virtual address is returned in map. * - * Returns the kernel virtual address or NULL on failure. + * Returns 0 on success or a negative errno code otherwise. */ int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map) { struct drm_gem_object *obj = dma_buf->priv; - void *vaddr; - vaddr = drm_gem_vmap(obj); - if (IS_ERR(vaddr)) - return PTR_ERR(vaddr); - - dma_buf_map_set_vaddr(map, vaddr); - - return 0; + return drm_gem_vmap(obj, map); } EXPORT_SYMBOL(drm_gem_dmabuf_vmap); @@ -697,7 +691,7 @@ void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct dma_buf_map *map) { struct drm_gem_object *obj = dma_buf->priv; - drm_gem_vunmap(obj, map->vaddr); + drm_gem_vunmap(obj, map); } EXPORT_SYMBOL(drm_gem_dmabuf_vunmap); diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c index f135b79593dd..d30e2f2b8f3c 100644 --- a/drivers/gpu/drm/drm_vblank.c +++ b/drivers/gpu/drm/drm_vblank.c @@ -209,9 +209,12 @@ static u32 __get_vblank_counter(struct drm_device *dev, unsigned int pipe) if (crtc->funcs->get_vblank_counter) return crtc->funcs->get_vblank_counter(crtc); - } else if (dev->driver->get_vblank_counter) { + } +#ifdef CONFIG_DRM_LEGACY + else if (dev->driver->get_vblank_counter) { return dev->driver->get_vblank_counter(dev, pipe); } +#endif return drm_vblank_no_hw_counter(dev, pipe); } @@ -429,9 +432,12 @@ static void __disable_vblank(struct drm_device *dev, unsigned int pipe) if (crtc->funcs->disable_vblank) crtc->funcs->disable_vblank(crtc); - } else { + } +#ifdef CONFIG_DRM_LEGACY + else { dev->driver->disable_vblank(dev, pipe); } +#endif } /* @@ -1096,9 +1102,12 @@ static int __enable_vblank(struct drm_device *dev, unsigned int pipe) if (crtc->funcs->enable_vblank) return crtc->funcs->enable_vblank(crtc); - } else if (dev->driver->enable_vblank) { + } +#ifdef CONFIG_DRM_LEGACY + else if (dev->driver->enable_vblank) { return dev->driver->enable_vblank(dev, pipe); } +#endif return -EINVAL; } diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c index 1a6369633789..6d5a03b32238 100644 --- a/drivers/gpu/drm/drm_vm.c +++ b/drivers/gpu/drm/drm_vm.c @@ -70,9 +70,6 @@ static pgprot_t drm_io_prot(struct drm_local_map *map, { pgprot_t tmp = vm_get_page_prot(vma->vm_flags); - /* We don't want graphics memory to be mapped encrypted */ - tmp = pgprot_decrypted(tmp); - #if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || \ defined(__mips__) if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING)) diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index aa270b79e585..7604e3c07973 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -480,7 +480,7 @@ static const struct file_operations fops = { .mmap = etnaviv_gem_mmap, }; -static struct drm_driver etnaviv_drm_driver = { +static const struct drm_driver etnaviv_drm_driver = { .driver_features = DRIVER_GEM | DRIVER_RENDER, .open = etnaviv_open, .postclose = etnaviv_postclose, diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h index 914f0867ff71..f5be627e1de0 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h @@ -51,8 +51,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma); int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset); struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj); -void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj); -void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); +int etnaviv_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map); int etnaviv_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev, diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index 67d9a2b9ea6a..bbd235473645 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -571,7 +571,6 @@ static const struct drm_gem_object_funcs etnaviv_gem_object_funcs = { .unpin = etnaviv_gem_prime_unpin, .get_sg_table = etnaviv_gem_prime_get_sg_table, .vmap = etnaviv_gem_prime_vmap, - .vunmap = etnaviv_gem_prime_vunmap, .vm_ops = &vm_ops, }; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c index 135fbff6fecf..d9bd83203a15 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c @@ -22,14 +22,16 @@ struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj) return drm_prime_pages_to_sg(obj->dev, etnaviv_obj->pages, npages); } -void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj) +int etnaviv_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) { - return etnaviv_gem_vmap(obj); -} + void *vaddr; -void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) -{ - /* TODO msm_gem_vunmap() */ + vaddr = etnaviv_gem_vmap(obj); + if (!vaddr) + return -ENOMEM; + dma_buf_map_set_vaddr(map, vaddr); + + return 0; } int etnaviv_gem_prime_mmap(struct drm_gem_object *obj, diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index fe46680ca208..e60257f1f24b 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -113,7 +113,7 @@ static const struct file_operations exynos_drm_driver_fops = { .release = drm_release, }; -static struct drm_driver exynos_drm_driver = { +static const struct drm_driver exynos_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC | DRIVER_RENDER, .open = exynos_drm_open, diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index 4afbf5109cbf..4396224227d1 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -135,8 +135,6 @@ static const struct vm_operations_struct exynos_drm_gem_vm_ops = { static const struct drm_gem_object_funcs exynos_drm_gem_object_funcs = { .free = exynos_drm_gem_free_object, .get_sg_table = exynos_drm_gem_prime_get_sg_table, - .vmap = exynos_drm_gem_prime_vmap, - .vunmap = exynos_drm_gem_prime_vunmap, .vm_ops = &exynos_drm_gem_vm_ops, }; @@ -469,16 +467,6 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev, return &exynos_gem->base; } -void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj) -{ - return NULL; -} - -void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) -{ - /* Nothing to do */ -} - int exynos_drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) { diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h index 74e926abeff0..a23272fb96fb 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.h +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h @@ -107,8 +107,6 @@ struct drm_gem_object * exynos_drm_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sgt); -void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj); -void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); int exynos_drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c index abbc1ddbf27f..7528e8a2d359 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c @@ -134,7 +134,7 @@ static irqreturn_t fsl_dcu_drm_irq(int irq, void *arg) DEFINE_DRM_GEM_CMA_FOPS(fsl_dcu_drm_fops); -static struct drm_driver fsl_dcu_drm_driver = { +static const struct drm_driver fsl_dcu_drm_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .load = fsl_dcu_load, .unload = fsl_dcu_unload, @@ -234,7 +234,6 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct resource *res; void __iomem *base; - struct drm_driver *driver = &fsl_dcu_drm_driver; struct clk *pix_clk_in; char pix_clk_name[32]; const char *pix_clk_in_name; @@ -304,7 +303,7 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev) fsl_dev->tcon = fsl_tcon_init(dev); - drm = drm_dev_alloc(driver, dev); + drm = drm_dev_alloc(&fsl_dcu_drm_driver, dev); if (IS_ERR(drm)) { ret = PTR_ERR(drm); goto unregister_pix_clk; diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c index 8f07de83b6fb..db827e591403 100644 --- a/drivers/gpu/drm/gma500/gem.c +++ b/drivers/gpu/drm/gma500/gem.c @@ -32,12 +32,6 @@ static void psb_gem_free_object(struct drm_gem_object *obj) psb_gtt_free_range(obj->dev, gtt); } -int psb_gem_get_aperture(struct drm_device *dev, void *data, - struct drm_file *file) -{ - return -EINVAL; -} - static const struct vm_operations_struct psb_gem_vm_ops = { .fault = psb_gem_fault, .open = drm_gem_vm_open, diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c index b13376a6fb91..6e7197d89463 100644 --- a/drivers/gpu/drm/gma500/psb_drv.c +++ b/drivers/gpu/drm/gma500/psb_drv.c @@ -34,7 +34,7 @@ #include "psb_intel_reg.h" #include "psb_reg.h" -static struct drm_driver driver; +static const struct drm_driver driver; static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent); /* @@ -491,7 +491,7 @@ static const struct file_operations psb_gem_fops = { .read = drm_read, }; -static struct drm_driver driver = { +static const struct drm_driver driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM, .lastclose = drm_fb_helper_lastclose, diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h index c71a5a4e912c..ce6aae4b1bb2 100644 --- a/drivers/gpu/drm/gma500/psb_drv.h +++ b/drivers/gpu/drm/gma500/psb_drv.h @@ -735,8 +735,6 @@ extern const struct drm_connector_helper_funcs extern const struct drm_connector_funcs psb_intel_lvds_connector_funcs; /* gem.c */ -extern int psb_gem_get_aperture(struct drm_device *dev, void *data, - struct drm_file *file); extern int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args); diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c index fee6fe810e74..d845657fd99c 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c @@ -43,7 +43,7 @@ static irqreturn_t hibmc_drm_interrupt(int irq, void *arg) return IRQ_HANDLED; } -static struct drm_driver hibmc_driver = { +static const struct drm_driver hibmc_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &hibmc_fops, .name = "hibmc", diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c index d84d41f3e78f..aa6c53f88f7c 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c @@ -918,7 +918,7 @@ static const struct drm_mode_config_funcs ade_mode_config_funcs = { DEFINE_DRM_GEM_CMA_FOPS(ade_fops); -static struct drm_driver ade_driver = { +static const struct drm_driver ade_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &ade_fops, DRM_GEM_CMA_DRIVER_OPS, diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h index dee8ec2f7f2e..386d137f29e5 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h @@ -40,7 +40,7 @@ struct kirin_drm_data { u32 num_planes; u32 prim_plane; - struct drm_driver *driver; + const struct drm_driver *driver; const struct drm_crtc_helper_funcs *crtc_helper_funcs; const struct drm_crtc_funcs *crtc_funcs; const struct drm_plane_helper_funcs *plane_helper_funcs; diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index d6e25212d5c0..f2389ba49c69 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -87,7 +87,7 @@ #include "intel_sideband.h" #include "vlv_suspend.h" -static struct drm_driver driver; +static const struct drm_driver driver; static int i915_get_bridge_dev(struct drm_i915_private *dev_priv) { @@ -1759,7 +1759,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW), }; -static struct drm_driver driver = { +static const struct drm_driver driver = { /* Don't use MTRRs here; the Xserver or userspace app should * deal with them for Intel hardware. */ diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 9220c9d1a4b7..e946bd2087d8 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -81,7 +81,7 @@ out: i915_params_free(&i915->params); } -static struct drm_driver mock_driver = { +static const struct drm_driver mock_driver = { .name = "mock", .driver_features = DRIVER_GEM, .release = mock_device_release, diff --git a/drivers/gpu/drm/imx/dcss/dcss-kms.c b/drivers/gpu/drm/imx/dcss/dcss-kms.c index b72e5cef7e40..b549ce5e7607 100644 --- a/drivers/gpu/drm/imx/dcss/dcss-kms.c +++ b/drivers/gpu/drm/imx/dcss/dcss-kms.c @@ -26,7 +26,7 @@ static const struct drm_mode_config_funcs dcss_drm_mode_config_funcs = { .atomic_commit = drm_atomic_helper_commit, }; -static struct drm_driver dcss_kms_driver = { +static const struct drm_driver dcss_kms_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, DRM_GEM_CMA_DRIVER_OPS, .fops = &dcss_cma_fops, diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c index 9bf5ad6d18a2..d1a9841adeed 100644 --- a/drivers/gpu/drm/imx/imx-drm-core.c +++ b/drivers/gpu/drm/imx/imx-drm-core.c @@ -145,7 +145,7 @@ static const struct drm_ioctl_desc imx_drm_ioctls[] = { /* none so far */ }; -static struct drm_driver imx_drm_driver = { +static const struct drm_driver imx_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, DRM_GEM_CMA_DRIVER_OPS, .ioctls = imx_drm_ioctls, diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c index b9c156e13156..368bfef8b340 100644 --- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c +++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c @@ -305,11 +305,13 @@ ingenic_drm_crtc_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode static void ingenic_drm_crtc_atomic_begin(struct drm_crtc *crtc, struct drm_atomic_state *state) { + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); struct ingenic_drm *priv = drm_crtc_get_priv(crtc); u32 ctrl = 0; if (priv->soc_info->has_osd && - drm_atomic_crtc_needs_modeset(crtc->state)) { + drm_atomic_crtc_needs_modeset(crtc_state)) { /* * If IPU plane is enabled, enable IPU as source for the F1 * plane; otherwise use regular DMA. @@ -326,7 +328,8 @@ static void ingenic_drm_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct ingenic_drm *priv = drm_crtc_get_priv(crtc); - struct drm_crtc_state *crtc_state = crtc->state; + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); struct drm_pending_vblank_event *event = crtc_state->event; if (drm_atomic_crtc_needs_modeset(crtc_state)) { @@ -716,7 +719,7 @@ static void ingenic_drm_disable_vblank(struct drm_crtc *crtc) DEFINE_DRM_GEM_CMA_FOPS(ingenic_drm_fops); -static struct drm_driver ingenic_drm_driver_data = { +static const struct drm_driver ingenic_drm_driver_data = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, .name = "ingenic-drm", .desc = "DRM module for Ingenic SoCs", diff --git a/drivers/gpu/drm/ingenic/ingenic-ipu.c b/drivers/gpu/drm/ingenic/ingenic-ipu.c index fc8c6e970ee3..e52777ef85fd 100644 --- a/drivers/gpu/drm/ingenic/ingenic-ipu.c +++ b/drivers/gpu/drm/ingenic/ingenic-ipu.c @@ -516,7 +516,7 @@ static void ingenic_ipu_plane_atomic_update(struct drm_plane *plane, static int ingenic_ipu_plane_atomic_check(struct drm_plane *plane, struct drm_plane_state *state) { - unsigned int num_w, denom_w, num_h, denom_h, xres, yres; + unsigned int num_w, denom_w, num_h, denom_h, xres, yres, max_w, max_h; struct ingenic_ipu *ipu = plane_to_ingenic_ipu(plane); struct drm_crtc *crtc = state->crtc ?: plane->state->crtc; struct drm_crtc_state *crtc_state; @@ -558,19 +558,26 @@ static int ingenic_ipu_plane_atomic_check(struct drm_plane *plane, xres = state->src_w >> 16; yres = state->src_h >> 16; - /* Adjust the coefficients until we find a valid configuration */ - for (denom_w = xres, num_w = state->crtc_w; - num_w <= crtc_state->mode.hdisplay; num_w++) + /* + * Increase the scaled image's theorical width/height until we find a + * configuration that has valid scaling coefficients, up to 102% of the + * screen's resolution. This makes sure that we can scale from almost + * every resolution possible at the cost of a very small distorsion. + * The CRTC_W / CRTC_H are not modified. + */ + max_w = crtc_state->mode.hdisplay * 102 / 100; + max_h = crtc_state->mode.vdisplay * 102 / 100; + + for (denom_w = xres, num_w = state->crtc_w; num_w <= max_w; num_w++) if (!reduce_fraction(&num_w, &denom_w)) break; - if (num_w > crtc_state->mode.hdisplay) + if (num_w > max_w) return -EINVAL; - for (denom_h = yres, num_h = state->crtc_h; - num_h <= crtc_state->mode.vdisplay; num_h++) + for (denom_h = yres, num_h = state->crtc_h; num_h <= max_h; num_h++) if (!reduce_fraction(&num_h, &denom_h)) break; - if (num_h > crtc_state->mode.vdisplay) + if (num_h > max_h) return -EINVAL; ipu->num_w = num_w; diff --git a/drivers/gpu/drm/kmb/Kconfig b/drivers/gpu/drm/kmb/Kconfig new file mode 100644 index 000000000000..3921c57ac511 --- /dev/null +++ b/drivers/gpu/drm/kmb/Kconfig @@ -0,0 +1,13 @@ +config DRM_KMB_DISPLAY + tristate "INTEL KEEMBAY DISPLAY" + depends on DRM + depends on ARCH_KEEMBAY || COMPILE_TEST + select DRM_KMS_HELPER + select DRM_KMS_CMA_HELPER + select DRM_GEM_CMA_HELPER + select DRM_MIPI_DSI + help + Choose this option if you have Intel's KeemBay SOC which integrates + an ARM Cortex A53 CPU with an Intel Movidius VPU. + + If M is selected the module will be called kmb-drm. diff --git a/drivers/gpu/drm/kmb/Makefile b/drivers/gpu/drm/kmb/Makefile new file mode 100644 index 000000000000..527d737a0539 --- /dev/null +++ b/drivers/gpu/drm/kmb/Makefile @@ -0,0 +1,2 @@ +kmb-drm-y := kmb_crtc.o kmb_drv.o kmb_plane.o kmb_dsi.o +obj-$(CONFIG_DRM_KMB_DISPLAY) += kmb-drm.o diff --git a/drivers/gpu/drm/kmb/kmb_crtc.c b/drivers/gpu/drm/kmb/kmb_crtc.c new file mode 100644 index 000000000000..44327bc629ca --- /dev/null +++ b/drivers/gpu/drm/kmb/kmb_crtc.c @@ -0,0 +1,214 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright © 2018-2020 Intel Corporation + */ + +#include <linux/clk.h> + +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_crtc.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_print.h> +#include <drm/drm_vblank.h> +#include <drm/drm_modeset_helper_vtables.h> + +#include "kmb_drv.h" +#include "kmb_dsi.h" +#include "kmb_plane.h" +#include "kmb_regs.h" + +struct kmb_crtc_timing { + u32 vfront_porch; + u32 vback_porch; + u32 vsync_len; + u32 hfront_porch; + u32 hback_porch; + u32 hsync_len; +}; + +static int kmb_crtc_enable_vblank(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct kmb_drm_private *kmb = to_kmb(dev); + + /* Clear interrupt */ + kmb_write_lcd(kmb, LCD_INT_CLEAR, LCD_INT_VERT_COMP); + /* Set which interval to generate vertical interrupt */ + kmb_write_lcd(kmb, LCD_VSTATUS_COMPARE, + LCD_VSTATUS_COMPARE_VSYNC); + /* Enable vertical interrupt */ + kmb_set_bitmask_lcd(kmb, LCD_INT_ENABLE, + LCD_INT_VERT_COMP); + return 0; +} + +static void kmb_crtc_disable_vblank(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct kmb_drm_private *kmb = to_kmb(dev); + + /* Clear interrupt */ + kmb_write_lcd(kmb, LCD_INT_CLEAR, LCD_INT_VERT_COMP); + /* Disable vertical interrupt */ + kmb_clr_bitmask_lcd(kmb, LCD_INT_ENABLE, + LCD_INT_VERT_COMP); +} + +static const struct drm_crtc_funcs kmb_crtc_funcs = { + .destroy = drm_crtc_cleanup, + .set_config = drm_atomic_helper_set_config, + .page_flip = drm_atomic_helper_page_flip, + .reset = drm_atomic_helper_crtc_reset, + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, + .enable_vblank = kmb_crtc_enable_vblank, + .disable_vblank = kmb_crtc_disable_vblank, +}; + +static void kmb_crtc_set_mode(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_display_mode *m = &crtc->state->adjusted_mode; + struct kmb_crtc_timing vm; + struct kmb_drm_private *kmb = to_kmb(dev); + unsigned int val = 0; + + /* Initialize mipi */ + kmb_dsi_mode_set(kmb->kmb_dsi, m, kmb->sys_clk_mhz); + drm_info(dev, + "vfp= %d vbp= %d vsync_len=%d hfp=%d hbp=%d hsync_len=%d\n", + m->crtc_vsync_start - m->crtc_vdisplay, + m->crtc_vtotal - m->crtc_vsync_end, + m->crtc_vsync_end - m->crtc_vsync_start, + m->crtc_hsync_start - m->crtc_hdisplay, + m->crtc_htotal - m->crtc_hsync_end, + m->crtc_hsync_end - m->crtc_hsync_start); + val = kmb_read_lcd(kmb, LCD_INT_ENABLE); + kmb_clr_bitmask_lcd(kmb, LCD_INT_ENABLE, val); + kmb_set_bitmask_lcd(kmb, LCD_INT_CLEAR, ~0x0); + vm.vfront_porch = 2; + vm.vback_porch = 2; + vm.vsync_len = 8; + vm.hfront_porch = 0; + vm.hback_porch = 0; + vm.hsync_len = 28; + + drm_dbg(dev, "%s : %dactive height= %d vbp=%d vfp=%d vsync-w=%d h-active=%d h-bp=%d h-fp=%d hsync-l=%d", + __func__, __LINE__, + m->crtc_vdisplay, vm.vback_porch, vm.vfront_porch, + vm.vsync_len, m->crtc_hdisplay, vm.hback_porch, + vm.hfront_porch, vm.hsync_len); + kmb_write_lcd(kmb, LCD_V_ACTIVEHEIGHT, + m->crtc_vdisplay - 1); + kmb_write_lcd(kmb, LCD_V_BACKPORCH, vm.vback_porch); + kmb_write_lcd(kmb, LCD_V_FRONTPORCH, vm.vfront_porch); + kmb_write_lcd(kmb, LCD_VSYNC_WIDTH, vm.vsync_len - 1); + kmb_write_lcd(kmb, LCD_H_ACTIVEWIDTH, + m->crtc_hdisplay - 1); + kmb_write_lcd(kmb, LCD_H_BACKPORCH, vm.hback_porch); + kmb_write_lcd(kmb, LCD_H_FRONTPORCH, vm.hfront_porch); + kmb_write_lcd(kmb, LCD_HSYNC_WIDTH, vm.hsync_len - 1); + /* This is hardcoded as 0 in the Myriadx code */ + kmb_write_lcd(kmb, LCD_VSYNC_START, 0); + kmb_write_lcd(kmb, LCD_VSYNC_END, 0); + /* Back ground color */ + kmb_write_lcd(kmb, LCD_BG_COLOUR_LS, 0x4); + if (m->flags == DRM_MODE_FLAG_INTERLACE) { + kmb_write_lcd(kmb, + LCD_VSYNC_WIDTH_EVEN, vm.vsync_len - 1); + kmb_write_lcd(kmb, + LCD_V_BACKPORCH_EVEN, vm.vback_porch); + kmb_write_lcd(kmb, + LCD_V_FRONTPORCH_EVEN, vm.vfront_porch); + kmb_write_lcd(kmb, LCD_V_ACTIVEHEIGHT_EVEN, + m->crtc_vdisplay - 1); + /* This is hardcoded as 10 in the Myriadx code */ + kmb_write_lcd(kmb, LCD_VSYNC_START_EVEN, 10); + kmb_write_lcd(kmb, LCD_VSYNC_END_EVEN, 10); + } + kmb_write_lcd(kmb, LCD_TIMING_GEN_TRIG, 1); + kmb_set_bitmask_lcd(kmb, LCD_CONTROL, LCD_CTRL_ENABLE); + kmb_set_bitmask_lcd(kmb, LCD_INT_ENABLE, val); +} + +static void kmb_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct kmb_drm_private *kmb = crtc_to_kmb_priv(crtc); + + clk_prepare_enable(kmb->kmb_clk.clk_lcd); + kmb_crtc_set_mode(crtc); + drm_crtc_vblank_on(crtc); +} + +static void kmb_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct kmb_drm_private *kmb = crtc_to_kmb_priv(crtc); + struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state, crtc); + + /* due to hw limitations, planes need to be off when crtc is off */ + drm_atomic_helper_disable_planes_on_crtc(old_state, false); + + drm_crtc_vblank_off(crtc); + clk_disable_unprepare(kmb->kmb_clk.clk_lcd); +} + +static void kmb_crtc_atomic_begin(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct drm_device *dev = crtc->dev; + struct kmb_drm_private *kmb = to_kmb(dev); + + kmb_clr_bitmask_lcd(kmb, LCD_INT_ENABLE, + LCD_INT_VERT_COMP); +} + +static void kmb_crtc_atomic_flush(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct drm_device *dev = crtc->dev; + struct kmb_drm_private *kmb = to_kmb(dev); + + kmb_set_bitmask_lcd(kmb, LCD_INT_ENABLE, + LCD_INT_VERT_COMP); + + spin_lock_irq(&crtc->dev->event_lock); + if (crtc->state->event) { + if (drm_crtc_vblank_get(crtc) == 0) + drm_crtc_arm_vblank_event(crtc, crtc->state->event); + else + drm_crtc_send_vblank_event(crtc, crtc->state->event); + } + crtc->state->event = NULL; + spin_unlock_irq(&crtc->dev->event_lock); +} + +static const struct drm_crtc_helper_funcs kmb_crtc_helper_funcs = { + .atomic_begin = kmb_crtc_atomic_begin, + .atomic_enable = kmb_crtc_atomic_enable, + .atomic_disable = kmb_crtc_atomic_disable, + .atomic_flush = kmb_crtc_atomic_flush, +}; + +int kmb_setup_crtc(struct drm_device *drm) +{ + struct kmb_drm_private *kmb = to_kmb(drm); + struct kmb_plane *primary; + int ret; + + primary = kmb_plane_init(drm); + if (IS_ERR(primary)) + return PTR_ERR(primary); + + ret = drm_crtc_init_with_planes(drm, &kmb->crtc, &primary->base_plane, + NULL, &kmb_crtc_funcs, NULL); + if (ret) { + kmb_plane_destroy(&primary->base_plane); + return ret; + } + + drm_crtc_helper_add(&kmb->crtc, &kmb_crtc_helper_funcs); + return 0; +} diff --git a/drivers/gpu/drm/kmb/kmb_drv.c b/drivers/gpu/drm/kmb/kmb_drv.c new file mode 100644 index 000000000000..a31a840ce634 --- /dev/null +++ b/drivers/gpu/drm/kmb/kmb_drv.c @@ -0,0 +1,602 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright © 2018-2020 Intel Corporation + */ + +#include <linux/clk.h> +#include <linux/module.h> +#include <linux/of_graph.h> +#include <linux/of_platform.h> +#include <linux/of_reserved_mem.h> +#include <linux/mfd/syscon.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/regmap.h> + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_drv.h> +#include <drm/drm_gem_cma_helper.h> +#include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_irq.h> +#include <drm/drm_probe_helper.h> +#include <drm/drm_vblank.h> + +#include "kmb_drv.h" +#include "kmb_dsi.h" +#include "kmb_regs.h" + +static int kmb_display_clk_enable(struct kmb_drm_private *kmb) +{ + int ret = 0; + + ret = clk_prepare_enable(kmb->kmb_clk.clk_lcd); + if (ret) { + drm_err(&kmb->drm, "Failed to enable LCD clock: %d\n", ret); + return ret; + } + DRM_INFO("SUCCESS : enabled LCD clocks\n"); + return 0; +} + +static int kmb_initialize_clocks(struct kmb_drm_private *kmb, struct device *dev) +{ + int ret = 0; + struct regmap *msscam; + + kmb->kmb_clk.clk_lcd = devm_clk_get(dev, "clk_lcd"); + if (IS_ERR(kmb->kmb_clk.clk_lcd)) { + drm_err(&kmb->drm, "clk_get() failed clk_lcd\n"); + return PTR_ERR(kmb->kmb_clk.clk_lcd); + } + + kmb->kmb_clk.clk_pll0 = devm_clk_get(dev, "clk_pll0"); + if (IS_ERR(kmb->kmb_clk.clk_pll0)) { + drm_err(&kmb->drm, "clk_get() failed clk_pll0 "); + return PTR_ERR(kmb->kmb_clk.clk_pll0); + } + kmb->sys_clk_mhz = clk_get_rate(kmb->kmb_clk.clk_pll0) / 1000000; + drm_info(&kmb->drm, "system clk = %d Mhz", kmb->sys_clk_mhz); + + ret = kmb_dsi_clk_init(kmb->kmb_dsi); + + /* Set LCD clock to 200 Mhz */ + clk_set_rate(kmb->kmb_clk.clk_lcd, KMB_LCD_DEFAULT_CLK); + if (clk_get_rate(kmb->kmb_clk.clk_lcd) != KMB_LCD_DEFAULT_CLK) { + drm_err(&kmb->drm, "failed to set to clk_lcd to %d\n", + KMB_LCD_DEFAULT_CLK); + return -1; + } + drm_dbg(&kmb->drm, "clk_lcd = %ld\n", clk_get_rate(kmb->kmb_clk.clk_lcd)); + + ret = kmb_display_clk_enable(kmb); + if (ret) + return ret; + + msscam = syscon_regmap_lookup_by_compatible("intel,keembay-msscam"); + if (IS_ERR(msscam)) { + drm_err(&kmb->drm, "failed to get msscam syscon"); + return -1; + } + + /* Enable MSS_CAM_CLK_CTRL for MIPI TX and LCD */ + regmap_update_bits(msscam, MSS_CAM_CLK_CTRL, 0x1fff, 0x1fff); + regmap_update_bits(msscam, MSS_CAM_RSTN_CTRL, 0xffffffff, 0xffffffff); + return 0; +} + +static void kmb_display_clk_disable(struct kmb_drm_private *kmb) +{ + clk_disable_unprepare(kmb->kmb_clk.clk_lcd); +} + +static void __iomem *kmb_map_mmio(struct drm_device *drm, + struct platform_device *pdev, + char *name) +{ + struct resource *res; + void __iomem *mem; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); + if (!res) { + drm_err(drm, "failed to get resource for %s", name); + return ERR_PTR(-ENOMEM); + } + mem = devm_ioremap_resource(drm->dev, res); + if (IS_ERR(mem)) + drm_err(drm, "failed to ioremap %s registers", name); + return mem; +} + +static int kmb_hw_init(struct drm_device *drm, unsigned long flags) +{ + struct kmb_drm_private *kmb = to_kmb(drm); + struct platform_device *pdev = to_platform_device(drm->dev); + int irq_lcd; + int ret = 0; + + /* Map LCD MMIO registers */ + kmb->lcd_mmio = kmb_map_mmio(drm, pdev, "lcd"); + if (IS_ERR(kmb->lcd_mmio)) { + drm_err(&kmb->drm, "failed to map LCD registers\n"); + return -ENOMEM; + } + + /* Map MIPI MMIO registers */ + ret = kmb_dsi_map_mmio(kmb->kmb_dsi); + if (ret) + return ret; + + /* Enable display clocks */ + kmb_initialize_clocks(kmb, &pdev->dev); + + /* Register irqs here - section 17.3 in databook + * lists LCD at 79 and 82 for MIPI under MSS CPU - + * firmware has redirected 79 to A53 IRQ 33 + */ + + /* Allocate LCD interrupt resources */ + irq_lcd = platform_get_irq(pdev, 0); + if (irq_lcd < 0) { + drm_err(&kmb->drm, "irq_lcd not found"); + goto setup_fail; + } + + /* Get the optional framebuffer memory resource */ + ret = of_reserved_mem_device_init(drm->dev); + if (ret && ret != -ENODEV) + return ret; + + spin_lock_init(&kmb->irq_lock); + + kmb->irq_lcd = irq_lcd; + + return 0; + + setup_fail: + of_reserved_mem_device_release(drm->dev); + + return ret; +} + +static const struct drm_mode_config_funcs kmb_mode_config_funcs = { + .fb_create = drm_gem_fb_create, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, +}; + +static int kmb_setup_mode_config(struct drm_device *drm) +{ + int ret; + struct kmb_drm_private *kmb = to_kmb(drm); + + ret = drmm_mode_config_init(drm); + if (ret) + return ret; + drm->mode_config.min_width = KMB_MIN_WIDTH; + drm->mode_config.min_height = KMB_MIN_HEIGHT; + drm->mode_config.max_width = KMB_MAX_WIDTH; + drm->mode_config.max_height = KMB_MAX_HEIGHT; + drm->mode_config.funcs = &kmb_mode_config_funcs; + + ret = kmb_setup_crtc(drm); + if (ret < 0) { + drm_err(drm, "failed to create crtc\n"); + return ret; + } + ret = kmb_dsi_encoder_init(drm, kmb->kmb_dsi); + /* Set the CRTC's port so that the encoder component can find it */ + kmb->crtc.port = of_graph_get_port_by_id(drm->dev->of_node, 0); + ret = drm_vblank_init(drm, drm->mode_config.num_crtc); + if (ret < 0) { + drm_err(drm, "failed to initialize vblank\n"); + pm_runtime_disable(drm->dev); + return ret; + } + + drm_mode_config_reset(drm); + return 0; +} + +static irqreturn_t handle_lcd_irq(struct drm_device *dev) +{ + unsigned long status, val, val1; + int plane_id, dma0_state, dma1_state; + struct kmb_drm_private *kmb = to_kmb(dev); + + status = kmb_read_lcd(kmb, LCD_INT_STATUS); + + spin_lock(&kmb->irq_lock); + if (status & LCD_INT_EOF) { + kmb_write_lcd(kmb, LCD_INT_CLEAR, LCD_INT_EOF); + + /* When disabling/enabling LCD layers, the change takes effect + * immediately and does not wait for EOF (end of frame). + * When kmb_plane_atomic_disable is called, mark the plane as + * disabled but actually disable the plane when EOF irq is + * being handled. + */ + for (plane_id = LAYER_0; + plane_id < KMB_MAX_PLANES; plane_id++) { + if (kmb->plane_status[plane_id].disable) { + kmb_clr_bitmask_lcd(kmb, + LCD_LAYERn_DMA_CFG + (plane_id), + LCD_DMA_LAYER_ENABLE); + + kmb_clr_bitmask_lcd(kmb, LCD_CONTROL, + kmb->plane_status[plane_id].ctrl); + + kmb->plane_status[plane_id].disable = false; + } + } + if (kmb->kmb_under_flow) { + /* DMA Recovery after underflow */ + dma0_state = (kmb->layer_no == 0) ? + LCD_VIDEO0_DMA0_STATE : LCD_VIDEO1_DMA0_STATE; + dma1_state = (kmb->layer_no == 0) ? + LCD_VIDEO0_DMA1_STATE : LCD_VIDEO1_DMA1_STATE; + + do { + kmb_write_lcd(kmb, LCD_FIFO_FLUSH, 1); + val = kmb_read_lcd(kmb, dma0_state) + & LCD_DMA_STATE_ACTIVE; + val1 = kmb_read_lcd(kmb, dma1_state) + & LCD_DMA_STATE_ACTIVE; + } while ((val || val1)); + /* disable dma */ + kmb_clr_bitmask_lcd(kmb, + LCD_LAYERn_DMA_CFG(kmb->layer_no), + LCD_DMA_LAYER_ENABLE); + kmb_write_lcd(kmb, LCD_FIFO_FLUSH, 1); + kmb->kmb_flush_done = 1; + kmb->kmb_under_flow = 0; + } + } + + if (status & LCD_INT_LINE_CMP) { + /* clear line compare interrupt */ + kmb_write_lcd(kmb, LCD_INT_CLEAR, LCD_INT_LINE_CMP); + } + + if (status & LCD_INT_VERT_COMP) { + /* Read VSTATUS */ + val = kmb_read_lcd(kmb, LCD_VSTATUS); + val = (val & LCD_VSTATUS_VERTICAL_STATUS_MASK); + switch (val) { + case LCD_VSTATUS_COMPARE_VSYNC: + /* Clear vertical compare interrupt */ + kmb_write_lcd(kmb, LCD_INT_CLEAR, LCD_INT_VERT_COMP); + if (kmb->kmb_flush_done) { + kmb_set_bitmask_lcd(kmb, + LCD_LAYERn_DMA_CFG + (kmb->layer_no), + LCD_DMA_LAYER_ENABLE); + kmb->kmb_flush_done = 0; + } + drm_crtc_handle_vblank(&kmb->crtc); + break; + case LCD_VSTATUS_COMPARE_BACKPORCH: + case LCD_VSTATUS_COMPARE_ACTIVE: + case LCD_VSTATUS_COMPARE_FRONT_PORCH: + kmb_write_lcd(kmb, LCD_INT_CLEAR, LCD_INT_VERT_COMP); + break; + } + } + if (status & LCD_INT_DMA_ERR) { + val = + (status & LCD_INT_DMA_ERR & + kmb_read_lcd(kmb, LCD_INT_ENABLE)); + /* LAYER0 - VL0 */ + if (val & (LAYER0_DMA_FIFO_UNDERFLOW | + LAYER0_DMA_CB_FIFO_UNDERFLOW | + LAYER0_DMA_CR_FIFO_UNDERFLOW)) { + kmb->kmb_under_flow++; + drm_info(&kmb->drm, + "!LAYER0:VL0 DMA UNDERFLOW val = 0x%lx,under_flow=%d", + val, kmb->kmb_under_flow); + /* disable underflow interrupt */ + kmb_clr_bitmask_lcd(kmb, LCD_INT_ENABLE, + LAYER0_DMA_FIFO_UNDERFLOW | + LAYER0_DMA_CB_FIFO_UNDERFLOW | + LAYER0_DMA_CR_FIFO_UNDERFLOW); + kmb_set_bitmask_lcd(kmb, LCD_INT_CLEAR, + LAYER0_DMA_CB_FIFO_UNDERFLOW | + LAYER0_DMA_FIFO_UNDERFLOW | + LAYER0_DMA_CR_FIFO_UNDERFLOW); + /* disable auto restart mode */ + kmb_clr_bitmask_lcd(kmb, LCD_LAYERn_DMA_CFG(0), + LCD_DMA_LAYER_CONT_PING_PONG_UPDATE); + + kmb->layer_no = 0; + } + + if (val & LAYER0_DMA_FIFO_OVERFLOW) + drm_dbg(&kmb->drm, + "LAYER0:VL0 DMA OVERFLOW val = 0x%lx", val); + if (val & LAYER0_DMA_CB_FIFO_OVERFLOW) + drm_dbg(&kmb->drm, + "LAYER0:VL0 DMA CB OVERFLOW val = 0x%lx", val); + if (val & LAYER0_DMA_CR_FIFO_OVERFLOW) + drm_dbg(&kmb->drm, + "LAYER0:VL0 DMA CR OVERFLOW val = 0x%lx", val); + + /* LAYER1 - VL1 */ + if (val & (LAYER1_DMA_FIFO_UNDERFLOW | + LAYER1_DMA_CB_FIFO_UNDERFLOW | + LAYER1_DMA_CR_FIFO_UNDERFLOW)) { + kmb->kmb_under_flow++; + drm_info(&kmb->drm, + "!LAYER1:VL1 DMA UNDERFLOW val = 0x%lx, under_flow=%d", + val, kmb->kmb_under_flow); + /* disable underflow interrupt */ + kmb_clr_bitmask_lcd(kmb, LCD_INT_ENABLE, + LAYER1_DMA_FIFO_UNDERFLOW | + LAYER1_DMA_CB_FIFO_UNDERFLOW | + LAYER1_DMA_CR_FIFO_UNDERFLOW); + kmb_set_bitmask_lcd(kmb, LCD_INT_CLEAR, + LAYER1_DMA_CB_FIFO_UNDERFLOW | + LAYER1_DMA_FIFO_UNDERFLOW | + LAYER1_DMA_CR_FIFO_UNDERFLOW); + /* disable auto restart mode */ + kmb_clr_bitmask_lcd(kmb, LCD_LAYERn_DMA_CFG(1), + LCD_DMA_LAYER_CONT_PING_PONG_UPDATE); + kmb->layer_no = 1; + } + + /* LAYER1 - VL1 */ + if (val & LAYER1_DMA_FIFO_OVERFLOW) + drm_dbg(&kmb->drm, + "LAYER1:VL1 DMA OVERFLOW val = 0x%lx", val); + if (val & LAYER1_DMA_CB_FIFO_OVERFLOW) + drm_dbg(&kmb->drm, + "LAYER1:VL1 DMA CB OVERFLOW val = 0x%lx", val); + if (val & LAYER1_DMA_CR_FIFO_OVERFLOW) + drm_dbg(&kmb->drm, + "LAYER1:VL1 DMA CR OVERFLOW val = 0x%lx", val); + + /* LAYER2 - GL0 */ + if (val & LAYER2_DMA_FIFO_UNDERFLOW) + drm_dbg(&kmb->drm, + "LAYER2:GL0 DMA UNDERFLOW val = 0x%lx", val); + if (val & LAYER2_DMA_FIFO_OVERFLOW) + drm_dbg(&kmb->drm, + "LAYER2:GL0 DMA OVERFLOW val = 0x%lx", val); + + /* LAYER3 - GL1 */ + if (val & LAYER3_DMA_FIFO_UNDERFLOW) + drm_dbg(&kmb->drm, + "LAYER3:GL1 DMA UNDERFLOW val = 0x%lx", val); + if (val & LAYER3_DMA_FIFO_UNDERFLOW) + drm_dbg(&kmb->drm, + "LAYER3:GL1 DMA OVERFLOW val = 0x%lx", val); + } + + spin_unlock(&kmb->irq_lock); + + if (status & LCD_INT_LAYER) { + /* Clear layer interrupts */ + kmb_write_lcd(kmb, LCD_INT_CLEAR, LCD_INT_LAYER); + } + + /* Clear all interrupts */ + kmb_set_bitmask_lcd(kmb, LCD_INT_CLEAR, 1); + return IRQ_HANDLED; +} + +/* IRQ handler */ +static irqreturn_t kmb_isr(int irq, void *arg) +{ + struct drm_device *dev = (struct drm_device *)arg; + + handle_lcd_irq(dev); + return IRQ_HANDLED; +} + +static void kmb_irq_reset(struct drm_device *drm) +{ + kmb_write_lcd(to_kmb(drm), LCD_INT_CLEAR, 0xFFFF); + kmb_write_lcd(to_kmb(drm), LCD_INT_ENABLE, 0); +} + +DEFINE_DRM_GEM_CMA_FOPS(fops); + +static struct drm_driver kmb_driver = { + .driver_features = DRIVER_GEM | + DRIVER_MODESET | DRIVER_ATOMIC, + .irq_handler = kmb_isr, + .irq_preinstall = kmb_irq_reset, + .irq_uninstall = kmb_irq_reset, + /* GEM Operations */ + .fops = &fops, + DRM_GEM_CMA_DRIVER_OPS_VMAP, + .name = "kmb-drm", + .desc = "KEEMBAY DISPLAY DRIVER ", + .date = "20201008", + .major = 1, + .minor = 0, +}; + +static int kmb_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct drm_device *drm = dev_get_drvdata(dev); + struct kmb_drm_private *kmb = to_kmb(drm); + + drm_dev_unregister(drm); + drm_kms_helper_poll_fini(drm); + of_node_put(kmb->crtc.port); + kmb->crtc.port = NULL; + pm_runtime_get_sync(drm->dev); + drm_irq_uninstall(drm); + pm_runtime_put_sync(drm->dev); + pm_runtime_disable(drm->dev); + + of_reserved_mem_device_release(drm->dev); + + /* Release clks */ + kmb_display_clk_disable(kmb); + + dev_set_drvdata(dev, NULL); + + /* Unregister DSI host */ + kmb_dsi_host_unregister(kmb->kmb_dsi); + drm_atomic_helper_shutdown(drm); + return 0; +} + +static int kmb_probe(struct platform_device *pdev) +{ + struct device *dev = get_device(&pdev->dev); + struct kmb_drm_private *kmb; + int ret = 0; + struct device_node *dsi_in; + struct device_node *dsi_node; + struct platform_device *dsi_pdev; + + /* The bridge (ADV 7535) will return -EPROBE_DEFER until it + * has a mipi_dsi_host to register its device to. So, we + * first register the DSI host during probe time, and then return + * -EPROBE_DEFER until the bridge is loaded. Probe will be called again + * and then the rest of the driver initialization can proceed + * afterwards and the bridge can be successfully attached. + */ + dsi_in = of_graph_get_endpoint_by_regs(dev->of_node, 0, 0); + if (!dsi_in) { + DRM_ERROR("Failed to get dsi_in node info from DT"); + return -EINVAL; + } + dsi_node = of_graph_get_remote_port_parent(dsi_in); + if (!dsi_node) { + of_node_put(dsi_in); + DRM_ERROR("Failed to get dsi node from DT\n"); + return -EINVAL; + } + + dsi_pdev = of_find_device_by_node(dsi_node); + if (!dsi_pdev) { + of_node_put(dsi_in); + of_node_put(dsi_node); + DRM_ERROR("Failed to get dsi platform device\n"); + return -EINVAL; + } + + of_node_put(dsi_in); + of_node_put(dsi_node); + ret = kmb_dsi_host_bridge_init(get_device(&dsi_pdev->dev)); + + if (ret == -EPROBE_DEFER) { + return -EPROBE_DEFER; + } else if (ret) { + DRM_ERROR("probe failed to initialize DSI host bridge\n"); + return ret; + } + + /* Create DRM device */ + kmb = devm_drm_dev_alloc(dev, &kmb_driver, + struct kmb_drm_private, drm); + if (IS_ERR(kmb)) + return PTR_ERR(kmb); + + dev_set_drvdata(dev, &kmb->drm); + + /* Initialize MIPI DSI */ + kmb->kmb_dsi = kmb_dsi_init(dsi_pdev); + if (IS_ERR(kmb->kmb_dsi)) { + drm_err(&kmb->drm, "failed to initialize DSI\n"); + ret = PTR_ERR(kmb->kmb_dsi); + goto err_free1; + } + + kmb->kmb_dsi->dev = &dsi_pdev->dev; + kmb->kmb_dsi->pdev = dsi_pdev; + ret = kmb_hw_init(&kmb->drm, 0); + if (ret) + goto err_free1; + + ret = kmb_setup_mode_config(&kmb->drm); + if (ret) + goto err_free; + + ret = drm_irq_install(&kmb->drm, kmb->irq_lcd); + if (ret < 0) { + drm_err(&kmb->drm, "failed to install IRQ handler\n"); + goto err_irq; + } + + drm_kms_helper_poll_init(&kmb->drm); + + /* Register graphics device with the kernel */ + ret = drm_dev_register(&kmb->drm, 0); + if (ret) + goto err_register; + + return 0; + + err_register: + drm_kms_helper_poll_fini(&kmb->drm); + err_irq: + pm_runtime_disable(kmb->drm.dev); + err_free: + drm_crtc_cleanup(&kmb->crtc); + drm_mode_config_cleanup(&kmb->drm); + err_free1: + dev_set_drvdata(dev, NULL); + kmb_dsi_host_unregister(kmb->kmb_dsi); + + return ret; +} + +static const struct of_device_id kmb_of_match[] = { + {.compatible = "intel,keembay-display"}, + {}, +}; + +MODULE_DEVICE_TABLE(of, kmb_of_match); + +static int __maybe_unused kmb_pm_suspend(struct device *dev) +{ + struct drm_device *drm = dev_get_drvdata(dev); + struct kmb_drm_private *kmb = drm ? to_kmb(drm) : NULL; + + drm_kms_helper_poll_disable(drm); + + kmb->state = drm_atomic_helper_suspend(drm); + if (IS_ERR(kmb->state)) { + drm_kms_helper_poll_enable(drm); + return PTR_ERR(kmb->state); + } + + return 0; +} + +static int __maybe_unused kmb_pm_resume(struct device *dev) +{ + struct drm_device *drm = dev_get_drvdata(dev); + struct kmb_drm_private *kmb = drm ? to_kmb(drm) : NULL; + + if (!kmb) + return 0; + + drm_atomic_helper_resume(drm, kmb->state); + drm_kms_helper_poll_enable(drm); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(kmb_pm_ops, kmb_pm_suspend, kmb_pm_resume); + +static struct platform_driver kmb_platform_driver = { + .probe = kmb_probe, + .remove = kmb_remove, + .driver = { + .name = "kmb-drm", + .pm = &kmb_pm_ops, + .of_match_table = kmb_of_match, + }, +}; + +module_platform_driver(kmb_platform_driver); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("Keembay Display driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/kmb/kmb_drv.h b/drivers/gpu/drm/kmb/kmb_drv.h new file mode 100644 index 000000000000..02e806712a64 --- /dev/null +++ b/drivers/gpu/drm/kmb/kmb_drv.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright © 2018-2020 Intel Corporation + */ + +#ifndef __KMB_DRV_H__ +#define __KMB_DRV_H__ + +#include <drm/drm_device.h> + +#include "kmb_plane.h" +#include "kmb_regs.h" + +#define KMB_MAX_WIDTH 1920 /*Max width in pixels */ +#define KMB_MAX_HEIGHT 1080 /*Max height in pixels */ +#define KMB_MIN_WIDTH 1920 /*Max width in pixels */ +#define KMB_MIN_HEIGHT 1080 /*Max height in pixels */ +#define KMB_LCD_DEFAULT_CLK 200000000 +#define KMB_SYS_CLK_MHZ 500 + +#define ICAM_MMIO 0x3b100000 +#define ICAM_LCD_OFFSET 0x1080 +#define ICAM_MMIO_SIZE 0x2000 + +struct kmb_dsi; + +struct kmb_clock { + struct clk *clk_lcd; + struct clk *clk_pll0; +}; + +struct kmb_drm_private { + struct drm_device drm; + struct kmb_dsi *kmb_dsi; + void __iomem *lcd_mmio; + struct kmb_clock kmb_clk; + struct drm_crtc crtc; + struct kmb_plane *plane; + struct drm_atomic_state *state; + spinlock_t irq_lock; + int irq_lcd; + int sys_clk_mhz; + struct layer_status plane_status[KMB_MAX_PLANES]; + int kmb_under_flow; + int kmb_flush_done; + int layer_no; +}; + +static inline struct kmb_drm_private *to_kmb(const struct drm_device *dev) +{ + return container_of(dev, struct kmb_drm_private, drm); +} + +static inline struct kmb_drm_private *crtc_to_kmb_priv(const struct drm_crtc *x) +{ + return container_of(x, struct kmb_drm_private, crtc); +} + +static inline void kmb_write_lcd(struct kmb_drm_private *dev_p, + unsigned int reg, u32 value) +{ + writel(value, (dev_p->lcd_mmio + reg)); +} + +static inline u32 kmb_read_lcd(struct kmb_drm_private *dev_p, unsigned int reg) +{ + return readl(dev_p->lcd_mmio + reg); +} + +static inline void kmb_set_bitmask_lcd(struct kmb_drm_private *dev_p, + unsigned int reg, u32 mask) +{ + u32 reg_val = kmb_read_lcd(dev_p, reg); + + kmb_write_lcd(dev_p, reg, (reg_val | mask)); +} + +static inline void kmb_clr_bitmask_lcd(struct kmb_drm_private *dev_p, + unsigned int reg, u32 mask) +{ + u32 reg_val = kmb_read_lcd(dev_p, reg); + + kmb_write_lcd(dev_p, reg, (reg_val & (~mask))); +} + +int kmb_setup_crtc(struct drm_device *dev); +void kmb_set_scanout(struct kmb_drm_private *lcd); +#endif /* __KMB_DRV_H__ */ diff --git a/drivers/gpu/drm/kmb/kmb_dsi.c b/drivers/gpu/drm/kmb/kmb_dsi.c new file mode 100644 index 000000000000..4b5d82af84b3 --- /dev/null +++ b/drivers/gpu/drm/kmb/kmb_dsi.c @@ -0,0 +1,1561 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright © 2019-2020 Intel Corporation + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/of_graph.h> +#include <linux/mfd/syscon.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> +#include <drm/drm_bridge_connector.h> +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_simple_kms_helper.h> +#include <drm/drm_print.h> +#include <drm/drm_probe_helper.h> + +#include "kmb_dsi.h" +#include "kmb_regs.h" + +static struct mipi_dsi_host *dsi_host; +static struct mipi_dsi_device *dsi_device; +static struct drm_bridge *adv_bridge; + +/* Default setting is 1080p, 4 lanes */ +#define IMG_HEIGHT_LINES 1080 +#define IMG_WIDTH_PX 1920 +#define MIPI_TX_ACTIVE_LANES 4 + +static struct mipi_tx_frame_section_cfg mipi_tx_frame0_sect_cfg = { + .width_pixels = IMG_WIDTH_PX, + .height_lines = IMG_HEIGHT_LINES, + .data_type = DSI_LP_DT_PPS_RGB888_24B, + .data_mode = MIPI_DATA_MODE1, + .dma_packed = 0 +}; + +static struct mipi_tx_frame_cfg mipitx_frame0_cfg = { + .sections[0] = &mipi_tx_frame0_sect_cfg, + .sections[1] = NULL, + .sections[2] = NULL, + .sections[3] = NULL, + .vsync_width = 5, + .v_backporch = 36, + .v_frontporch = 4, + .hsync_width = 44, + .h_backporch = 148, + .h_frontporch = 88 +}; + +static const struct mipi_tx_dsi_cfg mipitx_dsi_cfg = { + .hfp_blank_en = 0, + .eotp_en = 0, + .lpm_last_vfp_line = 0, + .lpm_first_vsa_line = 0, + .sync_pulse_eventn = DSI_VIDEO_MODE_NO_BURST_EVENT, + .hfp_blanking = SEND_BLANK_PACKET, + .hbp_blanking = SEND_BLANK_PACKET, + .hsa_blanking = SEND_BLANK_PACKET, + .v_blanking = SEND_BLANK_PACKET, +}; + +static struct mipi_ctrl_cfg mipi_tx_init_cfg = { + .active_lanes = MIPI_TX_ACTIVE_LANES, + .lane_rate_mbps = MIPI_TX_LANE_DATA_RATE_MBPS, + .ref_clk_khz = MIPI_TX_REF_CLK_KHZ, + .cfg_clk_khz = MIPI_TX_CFG_CLK_KHZ, + .tx_ctrl_cfg = { + .frames[0] = &mipitx_frame0_cfg, + .frames[1] = NULL, + .frames[2] = NULL, + .frames[3] = NULL, + .tx_dsi_cfg = &mipitx_dsi_cfg, + .line_sync_pkt_en = 0, + .line_counter_active = 0, + .frame_counter_active = 0, + .tx_always_use_hact = 1, + .tx_hact_wait_stop = 1, + } +}; + +struct mipi_hs_freq_range_cfg { + u16 default_bit_rate_mbps; + u8 hsfreqrange_code; +}; + +struct vco_params { + u32 freq; + u32 range; + u32 divider; +}; + +static const struct vco_params vco_table[] = { + {52, 0x3f, 8}, + {80, 0x39, 8}, + {105, 0x2f, 4}, + {160, 0x29, 4}, + {210, 0x1f, 2}, + {320, 0x19, 2}, + {420, 0x0f, 1}, + {630, 0x09, 1}, + {1100, 0x03, 1}, + {0xffff, 0x01, 1}, +}; + +static const struct mipi_hs_freq_range_cfg +mipi_hs_freq_range[MIPI_DPHY_DEFAULT_BIT_RATES] = { + {.default_bit_rate_mbps = 80, .hsfreqrange_code = 0x00}, + {.default_bit_rate_mbps = 90, .hsfreqrange_code = 0x10}, + {.default_bit_rate_mbps = 100, .hsfreqrange_code = 0x20}, + {.default_bit_rate_mbps = 110, .hsfreqrange_code = 0x30}, + {.default_bit_rate_mbps = 120, .hsfreqrange_code = 0x01}, + {.default_bit_rate_mbps = 130, .hsfreqrange_code = 0x11}, + {.default_bit_rate_mbps = 140, .hsfreqrange_code = 0x21}, + {.default_bit_rate_mbps = 150, .hsfreqrange_code = 0x31}, + {.default_bit_rate_mbps = 160, .hsfreqrange_code = 0x02}, + {.default_bit_rate_mbps = 170, .hsfreqrange_code = 0x12}, + {.default_bit_rate_mbps = 180, .hsfreqrange_code = 0x22}, + {.default_bit_rate_mbps = 190, .hsfreqrange_code = 0x32}, + {.default_bit_rate_mbps = 205, .hsfreqrange_code = 0x03}, + {.default_bit_rate_mbps = 220, .hsfreqrange_code = 0x13}, + {.default_bit_rate_mbps = 235, .hsfreqrange_code = 0x23}, + {.default_bit_rate_mbps = 250, .hsfreqrange_code = 0x33}, + {.default_bit_rate_mbps = 275, .hsfreqrange_code = 0x04}, + {.default_bit_rate_mbps = 300, .hsfreqrange_code = 0x14}, + {.default_bit_rate_mbps = 325, .hsfreqrange_code = 0x25}, + {.default_bit_rate_mbps = 350, .hsfreqrange_code = 0x35}, + {.default_bit_rate_mbps = 400, .hsfreqrange_code = 0x05}, + {.default_bit_rate_mbps = 450, .hsfreqrange_code = 0x16}, + {.default_bit_rate_mbps = 500, .hsfreqrange_code = 0x26}, + {.default_bit_rate_mbps = 550, .hsfreqrange_code = 0x37}, + {.default_bit_rate_mbps = 600, .hsfreqrange_code = 0x07}, + {.default_bit_rate_mbps = 650, .hsfreqrange_code = 0x18}, + {.default_bit_rate_mbps = 700, .hsfreqrange_code = 0x28}, + {.default_bit_rate_mbps = 750, .hsfreqrange_code = 0x39}, + {.default_bit_rate_mbps = 800, .hsfreqrange_code = 0x09}, + {.default_bit_rate_mbps = 850, .hsfreqrange_code = 0x19}, + {.default_bit_rate_mbps = 900, .hsfreqrange_code = 0x29}, + {.default_bit_rate_mbps = 1000, .hsfreqrange_code = 0x0A}, + {.default_bit_rate_mbps = 1050, .hsfreqrange_code = 0x1A}, + {.default_bit_rate_mbps = 1100, .hsfreqrange_code = 0x2A}, + {.default_bit_rate_mbps = 1150, .hsfreqrange_code = 0x3B}, + {.default_bit_rate_mbps = 1200, .hsfreqrange_code = 0x0B}, + {.default_bit_rate_mbps = 1250, .hsfreqrange_code = 0x1B}, + {.default_bit_rate_mbps = 1300, .hsfreqrange_code = 0x2B}, + {.default_bit_rate_mbps = 1350, .hsfreqrange_code = 0x3C}, + {.default_bit_rate_mbps = 1400, .hsfreqrange_code = 0x0C}, + {.default_bit_rate_mbps = 1450, .hsfreqrange_code = 0x1C}, + {.default_bit_rate_mbps = 1500, .hsfreqrange_code = 0x2C}, + {.default_bit_rate_mbps = 1550, .hsfreqrange_code = 0x3D}, + {.default_bit_rate_mbps = 1600, .hsfreqrange_code = 0x0D}, + {.default_bit_rate_mbps = 1650, .hsfreqrange_code = 0x1D}, + {.default_bit_rate_mbps = 1700, .hsfreqrange_code = 0x2E}, + {.default_bit_rate_mbps = 1750, .hsfreqrange_code = 0x3E}, + {.default_bit_rate_mbps = 1800, .hsfreqrange_code = 0x0E}, + {.default_bit_rate_mbps = 1850, .hsfreqrange_code = 0x1E}, + {.default_bit_rate_mbps = 1900, .hsfreqrange_code = 0x2F}, + {.default_bit_rate_mbps = 1950, .hsfreqrange_code = 0x3F}, + {.default_bit_rate_mbps = 2000, .hsfreqrange_code = 0x0F}, + {.default_bit_rate_mbps = 2050, .hsfreqrange_code = 0x40}, + {.default_bit_rate_mbps = 2100, .hsfreqrange_code = 0x41}, + {.default_bit_rate_mbps = 2150, .hsfreqrange_code = 0x42}, + {.default_bit_rate_mbps = 2200, .hsfreqrange_code = 0x43}, + {.default_bit_rate_mbps = 2250, .hsfreqrange_code = 0x44}, + {.default_bit_rate_mbps = 2300, .hsfreqrange_code = 0x45}, + {.default_bit_rate_mbps = 2350, .hsfreqrange_code = 0x46}, + {.default_bit_rate_mbps = 2400, .hsfreqrange_code = 0x47}, + {.default_bit_rate_mbps = 2450, .hsfreqrange_code = 0x48}, + {.default_bit_rate_mbps = 2500, .hsfreqrange_code = 0x49} +}; + +static void kmb_dsi_clk_disable(struct kmb_dsi *kmb_dsi) +{ + clk_disable_unprepare(kmb_dsi->clk_mipi); + clk_disable_unprepare(kmb_dsi->clk_mipi_ecfg); + clk_disable_unprepare(kmb_dsi->clk_mipi_cfg); +} + +void kmb_dsi_host_unregister(struct kmb_dsi *kmb_dsi) +{ + kmb_dsi_clk_disable(kmb_dsi); + mipi_dsi_host_unregister(kmb_dsi->host); +} + +/* + * This DSI can only be paired with bridges that do config through i2c + * which is ADV 7535 in the KMB EVM + */ +static ssize_t kmb_dsi_host_transfer(struct mipi_dsi_host *host, + const struct mipi_dsi_msg *msg) +{ + return 0; +} + +static int kmb_dsi_host_attach(struct mipi_dsi_host *host, + struct mipi_dsi_device *dev) +{ + return 0; +} + +static int kmb_dsi_host_detach(struct mipi_dsi_host *host, + struct mipi_dsi_device *dev) +{ + return 0; +} + +static const struct mipi_dsi_host_ops kmb_dsi_host_ops = { + .attach = kmb_dsi_host_attach, + .detach = kmb_dsi_host_detach, + .transfer = kmb_dsi_host_transfer, +}; + +int kmb_dsi_host_bridge_init(struct device *dev) +{ + struct device_node *encoder_node, *dsi_out; + + /* Create and register MIPI DSI host */ + if (!dsi_host) { + dsi_host = kzalloc(sizeof(*dsi_host), GFP_KERNEL); + if (!dsi_host) + return -ENOMEM; + + dsi_host->ops = &kmb_dsi_host_ops; + + if (!dsi_device) { + dsi_device = kzalloc(sizeof(*dsi_device), GFP_KERNEL); + if (!dsi_device) { + kfree(dsi_host); + return -ENOMEM; + } + } + + dsi_host->dev = dev; + mipi_dsi_host_register(dsi_host); + } + + /* Find ADV7535 node and initialize it */ + dsi_out = of_graph_get_endpoint_by_regs(dev->of_node, 0, 1); + if (!dsi_out) { + DRM_ERROR("Failed to get dsi_out node info from DT\n"); + return -EINVAL; + } + encoder_node = of_graph_get_remote_port_parent(dsi_out); + if (!encoder_node) { + of_node_put(dsi_out); + DRM_ERROR("Failed to get bridge info from DT\n"); + return -EINVAL; + } + /* Locate drm bridge from the hdmi encoder DT node */ + adv_bridge = of_drm_find_bridge(encoder_node); + of_node_put(dsi_out); + of_node_put(encoder_node); + if (!adv_bridge) { + DRM_DEBUG("Wait for external bridge driver DT\n"); + return -EPROBE_DEFER; + } + + return 0; +} + +static u32 mipi_get_datatype_params(u32 data_type, u32 data_mode, + struct mipi_data_type_params *params) +{ + struct mipi_data_type_params data_type_param; + + switch (data_type) { + case DSI_LP_DT_PPS_YCBCR420_12B: + data_type_param.size_constraint_pixels = 2; + data_type_param.size_constraint_bytes = 3; + switch (data_mode) { + /* Case 0 not supported according to MDK */ + case 1: + case 2: + case 3: + data_type_param.pixels_per_pclk = 2; + data_type_param.bits_per_pclk = 24; + break; + default: + DRM_ERROR("DSI: Invalid data_mode %d\n", data_mode); + return -EINVAL; + }; + break; + case DSI_LP_DT_PPS_YCBCR422_16B: + data_type_param.size_constraint_pixels = 2; + data_type_param.size_constraint_bytes = 4; + switch (data_mode) { + /* Case 0 and 1 not supported according + * to MDK + */ + case 2: + data_type_param.pixels_per_pclk = 1; + data_type_param.bits_per_pclk = 16; + break; + case 3: + data_type_param.pixels_per_pclk = 2; + data_type_param.bits_per_pclk = 32; + break; + default: + DRM_ERROR("DSI: Invalid data_mode %d\n", data_mode); + return -EINVAL; + }; + break; + case DSI_LP_DT_LPPS_YCBCR422_20B: + case DSI_LP_DT_PPS_YCBCR422_24B: + data_type_param.size_constraint_pixels = 2; + data_type_param.size_constraint_bytes = 6; + switch (data_mode) { + /* Case 0 not supported according to MDK */ + case 1: + case 2: + case 3: + data_type_param.pixels_per_pclk = 1; + data_type_param.bits_per_pclk = 24; + break; + default: + DRM_ERROR("DSI: Invalid data_mode %d\n", data_mode); + return -EINVAL; + }; + break; + case DSI_LP_DT_PPS_RGB565_16B: + data_type_param.size_constraint_pixels = 1; + data_type_param.size_constraint_bytes = 2; + switch (data_mode) { + case 0: + case 1: + data_type_param.pixels_per_pclk = 1; + data_type_param.bits_per_pclk = 16; + break; + case 2: + case 3: + data_type_param.pixels_per_pclk = 2; + data_type_param.bits_per_pclk = 32; + break; + default: + DRM_ERROR("DSI: Invalid data_mode %d\n", data_mode); + return -EINVAL; + }; + break; + case DSI_LP_DT_PPS_RGB666_18B: + data_type_param.size_constraint_pixels = 4; + data_type_param.size_constraint_bytes = 9; + data_type_param.bits_per_pclk = 18; + data_type_param.pixels_per_pclk = 1; + break; + case DSI_LP_DT_LPPS_RGB666_18B: + case DSI_LP_DT_PPS_RGB888_24B: + data_type_param.size_constraint_pixels = 1; + data_type_param.size_constraint_bytes = 3; + data_type_param.bits_per_pclk = 24; + data_type_param.pixels_per_pclk = 1; + break; + case DSI_LP_DT_PPS_RGB101010_30B: + data_type_param.size_constraint_pixels = 4; + data_type_param.size_constraint_bytes = 15; + data_type_param.bits_per_pclk = 30; + data_type_param.pixels_per_pclk = 1; + break; + default: + DRM_ERROR("DSI: Invalid data_type %d\n", data_type); + return -EINVAL; + }; + + *params = data_type_param; + return 0; +} + +static u32 compute_wc(u32 width_px, u8 size_constr_p, u8 size_constr_b) +{ + /* Calculate the word count for each long packet */ + return (((width_px / size_constr_p) * size_constr_b) & 0xffff); +} + +static u32 compute_unpacked_bytes(u32 wc, u8 bits_per_pclk) +{ + /* Number of PCLK cycles needed to transfer a line + * with each PCLK cycle, 4 Bytes are sent through the PPL module + */ + return ((wc * 8) / bits_per_pclk) * 4; +} + +static u32 mipi_tx_fg_section_cfg_regs(struct kmb_dsi *kmb_dsi, + u8 frame_id, u8 section, + u32 height_lines, u32 unpacked_bytes, + struct mipi_tx_frame_sect_phcfg *ph_cfg) +{ + u32 cfg = 0; + u32 ctrl_no = MIPI_CTRL6; + u32 reg_adr; + + /* Frame section packet header */ + /* Word count bits [15:0] */ + cfg = (ph_cfg->wc & MIPI_TX_SECT_WC_MASK) << 0; + + /* Data type (bits [21:16]) */ + cfg |= ((ph_cfg->data_type & MIPI_TX_SECT_DT_MASK) + << MIPI_TX_SECT_DT_SHIFT); + + /* Virtual channel (bits [23:22]) */ + cfg |= ((ph_cfg->vchannel & MIPI_TX_SECT_VC_MASK) + << MIPI_TX_SECT_VC_SHIFT); + + /* Data mode (bits [24:25]) */ + cfg |= ((ph_cfg->data_mode & MIPI_TX_SECT_DM_MASK) + << MIPI_TX_SECT_DM_SHIFT); + if (ph_cfg->dma_packed) + cfg |= MIPI_TX_SECT_DMA_PACKED; + + dev_dbg(kmb_dsi->dev, + "ctrl=%d frame_id=%d section=%d cfg=%x packed=%d\n", + ctrl_no, frame_id, section, cfg, ph_cfg->dma_packed); + kmb_write_mipi(kmb_dsi, + (MIPI_TXm_HS_FGn_SECTo_PH(ctrl_no, frame_id, section)), + cfg); + + /* Unpacked bytes */ + + /* There are 4 frame generators and each fg has 4 sections + * There are 2 registers for unpacked bytes (# bytes each + * section occupies in memory) + * REG_UNPACKED_BYTES0: [15:0]-BYTES0, [31:16]-BYTES1 + * REG_UNPACKED_BYTES1: [15:0]-BYTES2, [31:16]-BYTES3 + */ + reg_adr = + MIPI_TXm_HS_FGn_SECT_UNPACKED_BYTES0(ctrl_no, + frame_id) + (section / 2) * 4; + kmb_write_bits_mipi(kmb_dsi, reg_adr, (section % 2) * 16, 16, + unpacked_bytes); + dev_dbg(kmb_dsi->dev, + "unpacked_bytes = %d, wordcount = %d\n", unpacked_bytes, + ph_cfg->wc); + + /* Line config */ + reg_adr = MIPI_TXm_HS_FGn_SECTo_LINE_CFG(ctrl_no, frame_id, section); + kmb_write_mipi(kmb_dsi, reg_adr, height_lines); + return 0; +} + +static u32 mipi_tx_fg_section_cfg(struct kmb_dsi *kmb_dsi, + u8 frame_id, u8 section, + struct mipi_tx_frame_section_cfg *frame_scfg, + u32 *bits_per_pclk, u32 *wc) +{ + u32 ret = 0; + u32 unpacked_bytes; + struct mipi_data_type_params data_type_parameters; + struct mipi_tx_frame_sect_phcfg ph_cfg; + + ret = mipi_get_datatype_params(frame_scfg->data_type, + frame_scfg->data_mode, + &data_type_parameters); + if (ret) + return ret; + + /* Packet width has to be a multiple of the minimum packet width + * (in pixels) set for each data type + */ + if (frame_scfg->width_pixels % + data_type_parameters.size_constraint_pixels != 0) + return -EINVAL; + + *wc = compute_wc(frame_scfg->width_pixels, + data_type_parameters.size_constraint_pixels, + data_type_parameters.size_constraint_bytes); + unpacked_bytes = compute_unpacked_bytes(*wc, + data_type_parameters.bits_per_pclk); + ph_cfg.wc = *wc; + ph_cfg.data_mode = frame_scfg->data_mode; + ph_cfg.data_type = frame_scfg->data_type; + ph_cfg.dma_packed = frame_scfg->dma_packed; + ph_cfg.vchannel = frame_id; + + mipi_tx_fg_section_cfg_regs(kmb_dsi, frame_id, section, + frame_scfg->height_lines, + unpacked_bytes, &ph_cfg); + + /* Caller needs bits_per_clk for additional caluclations */ + *bits_per_pclk = data_type_parameters.bits_per_pclk; + + return 0; +} + +static void mipi_tx_fg_cfg_regs(struct kmb_dsi *kmb_dsi, u8 frame_gen, + struct mipi_tx_frame_timing_cfg *fg_cfg) +{ + u32 sysclk; + u32 ppl_llp_ratio; + u32 ctrl_no = MIPI_CTRL6, reg_adr, val, offset; + + /* 500 Mhz system clock minus 50 to account for the difference in + * MIPI clock speed in RTL tests + */ + sysclk = kmb_dsi->sys_clk_mhz - 50; + + /* PPL-Pixel Packing Layer, LLP-Low Level Protocol + * Frame genartor timing parameters are clocked on the system clock, + * whereas as the equivalent parameters in the LLP blocks are clocked + * on LLP Tx clock from the D-PHY - BYTE clock + */ + + /* Multiply by 1000 to maintain precision */ + ppl_llp_ratio = ((fg_cfg->bpp / 8) * sysclk * 1000) / + ((fg_cfg->lane_rate_mbps / 8) * fg_cfg->active_lanes); + + dev_dbg(kmb_dsi->dev, "ppl_llp_ratio=%d\n", ppl_llp_ratio); + dev_dbg(kmb_dsi->dev, "bpp=%d sysclk=%d lane-rate=%d active-lanes=%d\n", + fg_cfg->bpp, sysclk, fg_cfg->lane_rate_mbps, + fg_cfg->active_lanes); + + /* Frame generator number of lines */ + reg_adr = MIPI_TXm_HS_FGn_NUM_LINES(ctrl_no, frame_gen); + kmb_write_mipi(kmb_dsi, reg_adr, fg_cfg->v_active); + + /* vsync width + * There are 2 registers for vsync width (VSA in lines for + * channels 0-3) + * REG_VSYNC_WIDTH0: [15:0]-VSA for channel0, [31:16]-VSA for channel1 + * REG_VSYNC_WIDTH1: [15:0]-VSA for channel2, [31:16]-VSA for channel3 + */ + offset = (frame_gen % 2) * 16; + reg_adr = MIPI_TXm_HS_VSYNC_WIDTHn(ctrl_no, frame_gen / 2); + kmb_write_bits_mipi(kmb_dsi, reg_adr, offset, 16, fg_cfg->vsync_width); + + /* vertical backporch (vbp) */ + reg_adr = MIPI_TXm_HS_V_BACKPORCHESn(ctrl_no, frame_gen / 2); + kmb_write_bits_mipi(kmb_dsi, reg_adr, offset, 16, fg_cfg->v_backporch); + + /* vertical frontporch (vfp) */ + reg_adr = MIPI_TXm_HS_V_FRONTPORCHESn(ctrl_no, frame_gen / 2); + kmb_write_bits_mipi(kmb_dsi, reg_adr, offset, 16, fg_cfg->v_frontporch); + + /* vertical active (vactive) */ + reg_adr = MIPI_TXm_HS_V_ACTIVEn(ctrl_no, frame_gen / 2); + kmb_write_bits_mipi(kmb_dsi, reg_adr, offset, 16, fg_cfg->v_active); + + /* hsync width */ + reg_adr = MIPI_TXm_HS_HSYNC_WIDTHn(ctrl_no, frame_gen); + kmb_write_mipi(kmb_dsi, reg_adr, + (fg_cfg->hsync_width * ppl_llp_ratio) / 1000); + + /* horizontal backporch (hbp) */ + reg_adr = MIPI_TXm_HS_H_BACKPORCHn(ctrl_no, frame_gen); + kmb_write_mipi(kmb_dsi, reg_adr, + (fg_cfg->h_backporch * ppl_llp_ratio) / 1000); + + /* horizontal frontporch (hfp) */ + reg_adr = MIPI_TXm_HS_H_FRONTPORCHn(ctrl_no, frame_gen); + kmb_write_mipi(kmb_dsi, reg_adr, + (fg_cfg->h_frontporch * ppl_llp_ratio) / 1000); + + /* horizontal active (ha) */ + reg_adr = MIPI_TXm_HS_H_ACTIVEn(ctrl_no, frame_gen); + + /* convert h_active which is wc in bytes to cycles */ + val = (fg_cfg->h_active * sysclk * 1000) / + ((fg_cfg->lane_rate_mbps / 8) * fg_cfg->active_lanes); + val /= 1000; + kmb_write_mipi(kmb_dsi, reg_adr, val); + + /* llp hsync width */ + reg_adr = MIPI_TXm_HS_LLP_HSYNC_WIDTHn(ctrl_no, frame_gen); + kmb_write_mipi(kmb_dsi, reg_adr, fg_cfg->hsync_width * (fg_cfg->bpp / 8)); + + /* llp h backporch */ + reg_adr = MIPI_TXm_HS_LLP_H_BACKPORCHn(ctrl_no, frame_gen); + kmb_write_mipi(kmb_dsi, reg_adr, fg_cfg->h_backporch * (fg_cfg->bpp / 8)); + + /* llp h frontporch */ + reg_adr = MIPI_TXm_HS_LLP_H_FRONTPORCHn(ctrl_no, frame_gen); + kmb_write_mipi(kmb_dsi, reg_adr, + fg_cfg->h_frontporch * (fg_cfg->bpp / 8)); +} + +static void mipi_tx_fg_cfg(struct kmb_dsi *kmb_dsi, u8 frame_gen, + u8 active_lanes, u32 bpp, u32 wc, + u32 lane_rate_mbps, struct mipi_tx_frame_cfg *fg_cfg) +{ + u32 i, fg_num_lines = 0; + struct mipi_tx_frame_timing_cfg fg_t_cfg; + + /* Calculate the total frame generator number of + * lines based on it's active sections + */ + for (i = 0; i < MIPI_TX_FRAME_GEN_SECTIONS; i++) { + if (fg_cfg->sections[i]) + fg_num_lines += fg_cfg->sections[i]->height_lines; + } + + fg_t_cfg.bpp = bpp; + fg_t_cfg.lane_rate_mbps = lane_rate_mbps; + fg_t_cfg.hsync_width = fg_cfg->hsync_width; + fg_t_cfg.h_backporch = fg_cfg->h_backporch; + fg_t_cfg.h_frontporch = fg_cfg->h_frontporch; + fg_t_cfg.h_active = wc; + fg_t_cfg.vsync_width = fg_cfg->vsync_width; + fg_t_cfg.v_backporch = fg_cfg->v_backporch; + fg_t_cfg.v_frontporch = fg_cfg->v_frontporch; + fg_t_cfg.v_active = fg_num_lines; + fg_t_cfg.active_lanes = active_lanes; + + /* Apply frame generator timing setting */ + mipi_tx_fg_cfg_regs(kmb_dsi, frame_gen, &fg_t_cfg); +} + +static void mipi_tx_multichannel_fifo_cfg(struct kmb_dsi *kmb_dsi, + u8 active_lanes, u8 vchannel_id) +{ + u32 fifo_size, fifo_rthreshold; + u32 ctrl_no = MIPI_CTRL6; + + /* Clear all mc fifo channel sizes and thresholds */ + kmb_write_mipi(kmb_dsi, MIPI_TX_HS_MC_FIFO_CTRL_EN, 0); + kmb_write_mipi(kmb_dsi, MIPI_TX_HS_MC_FIFO_CHAN_ALLOC0, 0); + kmb_write_mipi(kmb_dsi, MIPI_TX_HS_MC_FIFO_CHAN_ALLOC1, 0); + kmb_write_mipi(kmb_dsi, MIPI_TX_HS_MC_FIFO_RTHRESHOLD0, 0); + kmb_write_mipi(kmb_dsi, MIPI_TX_HS_MC_FIFO_RTHRESHOLD1, 0); + + fifo_size = ((active_lanes > MIPI_D_LANES_PER_DPHY) ? + MIPI_CTRL_4LANE_MAX_MC_FIFO_LOC : + MIPI_CTRL_2LANE_MAX_MC_FIFO_LOC) - 1; + + /* MC fifo size for virtual channels 0-3 + * REG_MC_FIFO_CHAN_ALLOC0: [8:0]-channel0, [24:16]-channel1 + * REG_MC_FIFO_CHAN_ALLOC1: [8:0]-2, [24:16]-channel3 + */ + SET_MC_FIFO_CHAN_ALLOC(kmb_dsi, ctrl_no, vchannel_id, fifo_size); + + /* Set threshold to half the fifo size, actual size=size*16 */ + fifo_rthreshold = ((fifo_size) * 8) & BIT_MASK_16; + SET_MC_FIFO_RTHRESHOLD(kmb_dsi, ctrl_no, vchannel_id, fifo_rthreshold); + + /* Enable the MC FIFO channel corresponding to the Virtual Channel */ + kmb_set_bit_mipi(kmb_dsi, MIPI_TXm_HS_MC_FIFO_CTRL_EN(ctrl_no), + vchannel_id); +} + +static void mipi_tx_ctrl_cfg(struct kmb_dsi *kmb_dsi, u8 fg_id, + struct mipi_ctrl_cfg *ctrl_cfg) +{ + u32 sync_cfg = 0, ctrl = 0, fg_en; + u32 ctrl_no = MIPI_CTRL6; + + /* MIPI_TX_HS_SYNC_CFG */ + if (ctrl_cfg->tx_ctrl_cfg.line_sync_pkt_en) + sync_cfg |= LINE_SYNC_PKT_ENABLE; + if (ctrl_cfg->tx_ctrl_cfg.frame_counter_active) + sync_cfg |= FRAME_COUNTER_ACTIVE; + if (ctrl_cfg->tx_ctrl_cfg.line_counter_active) + sync_cfg |= LINE_COUNTER_ACTIVE; + if (ctrl_cfg->tx_ctrl_cfg.tx_dsi_cfg->v_blanking) + sync_cfg |= DSI_V_BLANKING; + if (ctrl_cfg->tx_ctrl_cfg.tx_dsi_cfg->hsa_blanking) + sync_cfg |= DSI_HSA_BLANKING; + if (ctrl_cfg->tx_ctrl_cfg.tx_dsi_cfg->hbp_blanking) + sync_cfg |= DSI_HBP_BLANKING; + if (ctrl_cfg->tx_ctrl_cfg.tx_dsi_cfg->hfp_blanking) + sync_cfg |= DSI_HFP_BLANKING; + if (ctrl_cfg->tx_ctrl_cfg.tx_dsi_cfg->sync_pulse_eventn) + sync_cfg |= DSI_SYNC_PULSE_EVENTN; + if (ctrl_cfg->tx_ctrl_cfg.tx_dsi_cfg->lpm_first_vsa_line) + sync_cfg |= DSI_LPM_FIRST_VSA_LINE; + if (ctrl_cfg->tx_ctrl_cfg.tx_dsi_cfg->lpm_last_vfp_line) + sync_cfg |= DSI_LPM_LAST_VFP_LINE; + + /* Enable frame generator */ + fg_en = 1 << fg_id; + sync_cfg |= FRAME_GEN_EN(fg_en); + + if (ctrl_cfg->tx_ctrl_cfg.tx_always_use_hact) + sync_cfg |= ALWAYS_USE_HACT(fg_en); + if (ctrl_cfg->tx_ctrl_cfg.tx_hact_wait_stop) + sync_cfg |= HACT_WAIT_STOP(fg_en); + + dev_dbg(kmb_dsi->dev, "sync_cfg=%d fg_en=%d\n", sync_cfg, fg_en); + + /* MIPI_TX_HS_CTRL */ + + /* type:DSI, source:LCD */ + ctrl = HS_CTRL_EN | TX_SOURCE; + ctrl |= LCD_VC(fg_id); + ctrl |= ACTIVE_LANES(ctrl_cfg->active_lanes - 1); + if (ctrl_cfg->tx_ctrl_cfg.tx_dsi_cfg->eotp_en) + ctrl |= DSI_EOTP_EN; + if (ctrl_cfg->tx_ctrl_cfg.tx_dsi_cfg->hfp_blank_en) + ctrl |= DSI_CMD_HFP_EN; + + /*67 ns stop time */ + ctrl |= HSEXIT_CNT(0x43); + + kmb_write_mipi(kmb_dsi, MIPI_TXm_HS_SYNC_CFG(ctrl_no), sync_cfg); + kmb_write_mipi(kmb_dsi, MIPI_TXm_HS_CTRL(ctrl_no), ctrl); +} + +static u32 mipi_tx_init_cntrl(struct kmb_dsi *kmb_dsi, + struct mipi_ctrl_cfg *ctrl_cfg) +{ + u32 ret = 0; + u8 active_vchannels = 0; + u8 frame_id, sect; + u32 bits_per_pclk = 0; + u32 word_count = 0; + struct mipi_tx_frame_cfg *frame; + + /* This is the order to initialize MIPI TX: + * 1. set frame section parameters + * 2. set frame specific parameters + * 3. connect lcd to mipi + * 4. multi channel fifo cfg + * 5. set mipitxcctrlcfg + */ + + for (frame_id = 0; frame_id < 4; frame_id++) { + frame = ctrl_cfg->tx_ctrl_cfg.frames[frame_id]; + + /* Find valid frame, assume only one valid frame */ + if (!frame) + continue; + + /* Frame Section configuration */ + /* TODO - assume there is only one valid section in a frame, + * so bits_per_pclk and word_count are only set once + */ + for (sect = 0; sect < MIPI_CTRL_VIRTUAL_CHANNELS; sect++) { + if (!frame->sections[sect]) + continue; + + ret = mipi_tx_fg_section_cfg(kmb_dsi, frame_id, sect, + frame->sections[sect], + &bits_per_pclk, + &word_count); + if (ret) + return ret; + } + + /* Set frame specific parameters */ + mipi_tx_fg_cfg(kmb_dsi, frame_id, ctrl_cfg->active_lanes, + bits_per_pclk, word_count, + ctrl_cfg->lane_rate_mbps, frame); + + active_vchannels++; + + /* Stop iterating as only one virtual channel + * shall be used for LCD connection + */ + break; + } + + if (active_vchannels == 0) + return -EINVAL; + /* Multi-Channel FIFO Configuration */ + mipi_tx_multichannel_fifo_cfg(kmb_dsi, ctrl_cfg->active_lanes, frame_id); + + /* Frame Generator Enable */ + mipi_tx_ctrl_cfg(kmb_dsi, frame_id, ctrl_cfg); + + return ret; +} + +static void test_mode_send(struct kmb_dsi *kmb_dsi, u32 dphy_no, + u32 test_code, u32 test_data) +{ + /* Steps to send test code: + * - set testclk HIGH + * - set testdin with test code + * - set testen HIGH + * - set testclk LOW + * - set testen LOW + */ + + /* Set testclk high */ + SET_DPHY_TEST_CTRL1_CLK(kmb_dsi, dphy_no); + + /* Set testdin */ + SET_TEST_DIN0_3(kmb_dsi, dphy_no, test_code); + + /* Set testen high */ + SET_DPHY_TEST_CTRL1_EN(kmb_dsi, dphy_no); + + /* Set testclk low */ + CLR_DPHY_TEST_CTRL1_CLK(kmb_dsi, dphy_no); + + /* Set testen low */ + CLR_DPHY_TEST_CTRL1_EN(kmb_dsi, dphy_no); + + if (test_code) { + /* Steps to send test data: + * - set testen LOW + * - set testclk LOW + * - set testdin with data + * - set testclk HIGH + */ + + /* Set testen low */ + CLR_DPHY_TEST_CTRL1_EN(kmb_dsi, dphy_no); + + /* Set testclk low */ + CLR_DPHY_TEST_CTRL1_CLK(kmb_dsi, dphy_no); + + /* Set data in testdin */ + kmb_write_mipi(kmb_dsi, + DPHY_TEST_DIN0_3 + ((dphy_no / 0x4) * 0x4), + test_data << ((dphy_no % 4) * 8)); + + /* Set testclk high */ + SET_DPHY_TEST_CTRL1_CLK(kmb_dsi, dphy_no); + } +} + +static inline void + set_test_mode_src_osc_freq_target_low_bits(struct kmb_dsi *kmb_dsi, + u32 dphy_no, + u32 freq) +{ + /* Typical rise/fall time=166, refer Table 1207 databook, + * sr_osc_freq_target[7:0] + */ + test_mode_send(kmb_dsi, dphy_no, TEST_CODE_SLEW_RATE_DDL_CYCLES, + (freq & 0x7f)); +} + +static inline void + set_test_mode_src_osc_freq_target_hi_bits(struct kmb_dsi *kmb_dsi, + u32 dphy_no, + u32 freq) +{ + u32 data; + + /* Flag this as high nibble */ + data = ((freq >> 6) & 0x1f) | (1 << 7); + + /* Typical rise/fall time=166, refer Table 1207 databook, + * sr_osc_freq_target[11:7] + */ + test_mode_send(kmb_dsi, dphy_no, TEST_CODE_SLEW_RATE_DDL_CYCLES, data); +} + +static void mipi_tx_get_vco_params(struct vco_params *vco) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(vco_table); i++) { + if (vco->freq < vco_table[i].freq) { + *vco = vco_table[i]; + return; + } + } + + WARN_ONCE(1, "Invalid vco freq = %u for PLL setup\n", vco->freq); +} + +static void mipi_tx_pll_setup(struct kmb_dsi *kmb_dsi, u32 dphy_no, + u32 ref_clk_mhz, u32 target_freq_mhz) +{ + u32 best_n = 0, best_m = 0; + u32 n = 0, m = 0, div = 0, delta, freq = 0, t_freq; + u32 best_freq_delta = 3000; + + /* pll_ref_clk: - valid range: 2~64 MHz; Typically 24 MHz + * Fvco: - valid range: 320~1250 MHz (Gen3 D-PHY) + * Fout: - valid range: 40~1250 MHz (Gen3 D-PHY) + * n: - valid range [0 15] + * N: - N = n + 1 + * -valid range: [1 16] + * -conditions: - (pll_ref_clk / N) >= 2 MHz + * -(pll_ref_clk / N) <= 8 MHz + * m: valid range [62 623] + * M: - M = m + 2 + * -valid range [64 625] + * -Fvco = (M/N) * pll_ref_clk + */ + struct vco_params vco_p = { + .range = 0, + .divider = 1, + }; + + vco_p.freq = target_freq_mhz; + mipi_tx_get_vco_params(&vco_p); + + /* Search pll n parameter */ + for (n = PLL_N_MIN; n <= PLL_N_MAX; n++) { + /* Calculate the pll input frequency division ratio + * multiply by 1000 for precision - + * no floating point, add n for rounding + */ + div = ((ref_clk_mhz * 1000) + n) / (n + 1); + + /* Found a valid n parameter */ + if ((div < 2000 || div > 8000)) + continue; + + /* Search pll m parameter */ + for (m = PLL_M_MIN; m <= PLL_M_MAX; m++) { + /* Calculate the Fvco(DPHY PLL output frequency) + * using the current n,m params + */ + freq = div * (m + 2); + freq /= 1000; + + /* Trim the potential pll freq to max supported */ + if (freq > PLL_FVCO_MAX) + continue; + + delta = abs(freq - target_freq_mhz); + + /* Select the best (closest to target pll freq) + * n,m parameters so far + */ + if (delta < best_freq_delta) { + best_n = n; + best_m = m; + best_freq_delta = delta; + } + } + } + + /* Program vco_cntrl parameter + * PLL_VCO_Control[5:0] = pll_vco_cntrl_ovr, + * PLL_VCO_Control[6] = pll_vco_cntrl_ovr_en + */ + test_mode_send(kmb_dsi, dphy_no, TEST_CODE_PLL_VCO_CTRL, (vco_p.range + | (1 << 6))); + + /* Program m, n pll parameters */ + dev_dbg(kmb_dsi->dev, "m = %d n = %d\n", best_m, best_n); + + /* PLL_Input_Divider_Ratio[3:0] = pll_n_ovr */ + test_mode_send(kmb_dsi, dphy_no, TEST_CODE_PLL_INPUT_DIVIDER, + (best_n & 0x0f)); + + /* m - low nibble PLL_Loop_Divider_Ratio[4:0] + * pll_m_ovr[4:0] + */ + test_mode_send(kmb_dsi, dphy_no, TEST_CODE_PLL_FEEDBACK_DIVIDER, + (best_m & 0x1f)); + + /* m - high nibble PLL_Loop_Divider_Ratio[4:0] + * pll_m_ovr[9:5] + */ + test_mode_send(kmb_dsi, dphy_no, TEST_CODE_PLL_FEEDBACK_DIVIDER, + ((best_m >> 5) & 0x1f) | PLL_FEEDBACK_DIVIDER_HIGH); + + /* Enable overwrite of n,m parameters :pll_n_ovr_en, pll_m_ovr_en */ + test_mode_send(kmb_dsi, dphy_no, TEST_CODE_PLL_OUTPUT_CLK_SEL, + (PLL_N_OVR_EN | PLL_M_OVR_EN)); + + /* Program Charge-Pump parameters */ + + /* pll_prop_cntrl-fixed values for prop_cntrl from DPHY doc */ + t_freq = target_freq_mhz * vco_p.divider; + test_mode_send(kmb_dsi, dphy_no, + TEST_CODE_PLL_PROPORTIONAL_CHARGE_PUMP_CTRL, + ((t_freq > 1150) ? 0x0C : 0x0B)); + + /* pll_int_cntrl-fixed value for int_cntrl from DPHY doc */ + test_mode_send(kmb_dsi, dphy_no, TEST_CODE_PLL_INTEGRAL_CHARGE_PUMP_CTRL, + 0x00); + + /* pll_gmp_cntrl-fixed value for gmp_cntrl from DPHY doci */ + test_mode_send(kmb_dsi, dphy_no, TEST_CODE_PLL_GMP_CTRL, 0x10); + + /* pll_cpbias_cntrl-fixed value for cpbias_cntrl from DPHY doc */ + test_mode_send(kmb_dsi, dphy_no, TEST_CODE_PLL_CHARGE_PUMP_BIAS, 0x10); + + /* pll_th1 -Lock Detector Phase error threshold, + * document gives fixed value + */ + test_mode_send(kmb_dsi, dphy_no, TEST_CODE_PLL_PHASE_ERR_CTRL, 0x02); + + /* PLL Lock Configuration */ + + /* pll_th2 - Lock Filter length, document gives fixed value */ + test_mode_send(kmb_dsi, dphy_no, TEST_CODE_PLL_LOCK_FILTER, 0x60); + + /* pll_th3- PLL Unlocking filter, document gives fixed value */ + test_mode_send(kmb_dsi, dphy_no, TEST_CODE_PLL_UNLOCK_FILTER, 0x03); + + /* pll_lock_sel-PLL Lock Detector Selection, + * document gives fixed value + */ + test_mode_send(kmb_dsi, dphy_no, TEST_CODE_PLL_LOCK_DETECTOR, 0x02); +} + +static void set_slewrate_gt_1500(struct kmb_dsi *kmb_dsi, u32 dphy_no) +{ + u32 test_code = 0, test_data = 0; + /* Bypass slew rate calibration algorithm + * bits[1:0} srcal_en_ovr_en, srcal_en_ovr + */ + test_code = TEST_CODE_SLEW_RATE_OVERRIDE_CTRL; + test_data = 0x02; + test_mode_send(kmb_dsi, dphy_no, test_code, test_data); + + /* Disable slew rate calibration */ + test_code = TEST_CODE_SLEW_RATE_DDL_LOOP_CTRL; + test_data = 0x00; + test_mode_send(kmb_dsi, dphy_no, test_code, test_data); +} + +static void set_slewrate_gt_1000(struct kmb_dsi *kmb_dsi, u32 dphy_no) +{ + u32 test_code = 0, test_data = 0; + + /* BitRate: > 1 Gbps && <= 1.5 Gbps: - slew rate control ON + * typical rise/fall times: 166 ps + */ + + /* Do not bypass slew rate calibration algorithm + * bits[1:0}=srcal_en_ovr_en, srcal_en_ovr, bit[6]=sr_range + */ + test_code = TEST_CODE_SLEW_RATE_OVERRIDE_CTRL; + test_data = (0x03 | (1 << 6)); + test_mode_send(kmb_dsi, dphy_no, test_code, test_data); + + /* Enable slew rate calibration */ + test_code = TEST_CODE_SLEW_RATE_DDL_LOOP_CTRL; + test_data = 0x01; + test_mode_send(kmb_dsi, dphy_no, test_code, test_data); + + /* Set sr_osc_freq_target[6:0] low nibble + * typical rise/fall time=166, refer Table 1207 databook + */ + test_code = TEST_CODE_SLEW_RATE_DDL_CYCLES; + test_data = (0x72f & 0x7f); + test_mode_send(kmb_dsi, dphy_no, test_code, test_data); + + /* Set sr_osc_freq_target[11:7] high nibble + * Typical rise/fall time=166, refer Table 1207 databook + */ + test_code = TEST_CODE_SLEW_RATE_DDL_CYCLES; + test_data = ((0x72f >> 6) & 0x1f) | (1 << 7); + test_mode_send(kmb_dsi, dphy_no, test_code, test_data); +} + +static void set_slewrate_lt_1000(struct kmb_dsi *kmb_dsi, u32 dphy_no) +{ + u32 test_code = 0, test_data = 0; + + /* lane_rate_mbps <= 1000 Mbps + * BitRate: <= 1 Gbps: + * - slew rate control ON + * - typical rise/fall times: 225 ps + */ + + /* Do not bypass slew rate calibration algorithm */ + test_code = TEST_CODE_SLEW_RATE_OVERRIDE_CTRL; + test_data = (0x03 | (1 << 6)); + test_mode_send(kmb_dsi, dphy_no, test_code, test_data); + + /* Enable slew rate calibration */ + test_code = TEST_CODE_SLEW_RATE_DDL_LOOP_CTRL; + test_data = 0x01; + test_mode_send(kmb_dsi, dphy_no, test_code, test_data); + + /* Typical rise/fall time=255, refer Table 1207 databook */ + test_code = TEST_CODE_SLEW_RATE_DDL_CYCLES; + test_data = (0x523 & 0x7f); + test_mode_send(kmb_dsi, dphy_no, test_code, test_data); + + /* Set sr_osc_freq_target[11:7] high nibble */ + test_code = TEST_CODE_SLEW_RATE_DDL_CYCLES; + test_data = ((0x523 >> 6) & 0x1f) | (1 << 7); + test_mode_send(kmb_dsi, dphy_no, test_code, test_data); +} + +static void setup_pll(struct kmb_dsi *kmb_dsi, u32 dphy_no, + struct mipi_ctrl_cfg *cfg) +{ + u32 test_code = 0, test_data = 0; + + /* Set PLL regulator in bypass */ + test_code = TEST_CODE_PLL_ANALOG_PROG; + test_data = 0x01; + test_mode_send(kmb_dsi, dphy_no, test_code, test_data); + + /* PLL Parameters Setup */ + mipi_tx_pll_setup(kmb_dsi, dphy_no, cfg->ref_clk_khz / 1000, + cfg->lane_rate_mbps / 2); + + /* Set clksel */ + kmb_write_bits_mipi(kmb_dsi, DPHY_INIT_CTRL1, PLL_CLKSEL_0, 2, 0x01); + + /* Set pll_shadow_control */ + kmb_set_bit_mipi(kmb_dsi, DPHY_INIT_CTRL1, PLL_SHADOW_CTRL); +} + +static void set_lane_data_rate(struct kmb_dsi *kmb_dsi, u32 dphy_no, + struct mipi_ctrl_cfg *cfg) +{ + u32 i, test_code = 0, test_data = 0; + + for (i = 0; i < MIPI_DPHY_DEFAULT_BIT_RATES; i++) { + if (mipi_hs_freq_range[i].default_bit_rate_mbps < + cfg->lane_rate_mbps) + continue; + + /* Send the test code and data */ + /* bit[6:0] = hsfreqrange_ovr bit[7] = hsfreqrange_ovr_en */ + test_code = TEST_CODE_HS_FREQ_RANGE_CFG; + test_data = (mipi_hs_freq_range[i].hsfreqrange_code & 0x7f) | + (1 << 7); + test_mode_send(kmb_dsi, dphy_no, test_code, test_data); + break; + } +} + +static void dphy_init_sequence(struct kmb_dsi *kmb_dsi, + struct mipi_ctrl_cfg *cfg, u32 dphy_no, + int active_lanes, enum dphy_mode mode) +{ + u32 test_code = 0, test_data = 0, val; + + /* Set D-PHY in shutdown mode */ + /* Assert RSTZ signal */ + CLR_DPHY_INIT_CTRL0(kmb_dsi, dphy_no, RESETZ); + + /* Assert SHUTDOWNZ signal */ + CLR_DPHY_INIT_CTRL0(kmb_dsi, dphy_no, SHUTDOWNZ); + val = kmb_read_mipi(kmb_dsi, DPHY_INIT_CTRL0); + + /* Init D-PHY_n + * Pulse testclear signal to make sure the d-phy configuration + * starts from a clean base + */ + CLR_DPHY_TEST_CTRL0(kmb_dsi, dphy_no); + ndelay(15); + SET_DPHY_TEST_CTRL0(kmb_dsi, dphy_no); + ndelay(15); + CLR_DPHY_TEST_CTRL0(kmb_dsi, dphy_no); + ndelay(15); + + /* Set mastermacro bit - Master or slave mode */ + test_code = TEST_CODE_MULTIPLE_PHY_CTRL; + + /* DPHY has its own clock lane enabled (master) */ + if (mode == MIPI_DPHY_MASTER) + test_data = 0x01; + else + test_data = 0x00; + + /* Send the test code and data */ + test_mode_send(kmb_dsi, dphy_no, test_code, test_data); + + /* Set the lane data rate */ + set_lane_data_rate(kmb_dsi, dphy_no, cfg); + + /* High-Speed Tx Slew Rate Calibration + * BitRate: > 1.5 Gbps && <= 2.5 Gbps: slew rate control OFF + */ + if (cfg->lane_rate_mbps > 1500) + set_slewrate_gt_1500(kmb_dsi, dphy_no); + else if (cfg->lane_rate_mbps > 1000) + set_slewrate_gt_1000(kmb_dsi, dphy_no); + else + set_slewrate_lt_1000(kmb_dsi, dphy_no); + + /* Set cfgclkfreqrange */ + val = (((cfg->cfg_clk_khz / 1000) - 17) * 4) & 0x3f; + SET_DPHY_FREQ_CTRL0_3(kmb_dsi, dphy_no, val); + + /* Enable config clk for the corresponding d-phy */ + kmb_set_bit_mipi(kmb_dsi, DPHY_CFG_CLK_EN, dphy_no); + + /* PLL setup */ + if (mode == MIPI_DPHY_MASTER) + setup_pll(kmb_dsi, dphy_no, cfg); + + /* Send NORMAL OPERATION test code */ + test_code = 0x0; + test_data = 0x0; + test_mode_send(kmb_dsi, dphy_no, test_code, test_data); + + /* Configure BASEDIR for data lanes + * NOTE: basedir only applies to LANE_0 of each D-PHY. + * The other lanes keep their direction based on the D-PHY type, + * either Rx or Tx. + * bits[5:0] - BaseDir: 1 = Rx + * bits[9:6] - BaseDir: 0 = Tx + */ + kmb_write_bits_mipi(kmb_dsi, DPHY_INIT_CTRL2, 0, 9, 0x03f); + ndelay(15); + + /* Enable CLOCK LANE + * Clock lane should be enabled regardless of the direction + * set for the D-PHY (Rx/Tx) + */ + kmb_set_bit_mipi(kmb_dsi, DPHY_INIT_CTRL2, 12 + dphy_no); + + /* Enable DATA LANES */ + kmb_write_bits_mipi(kmb_dsi, DPHY_ENABLE, dphy_no * 2, 2, + ((1 << active_lanes) - 1)); + + ndelay(15); + + /* Take D-PHY out of shutdown mode */ + /* Deassert SHUTDOWNZ signal */ + SET_DPHY_INIT_CTRL0(kmb_dsi, dphy_no, SHUTDOWNZ); + ndelay(15); + + /* Deassert RSTZ signal */ + SET_DPHY_INIT_CTRL0(kmb_dsi, dphy_no, RESETZ); +} + +static void dphy_wait_fsm(struct kmb_dsi *kmb_dsi, u32 dphy_no, + enum dphy_tx_fsm fsm_state) +{ + enum dphy_tx_fsm val = DPHY_TX_POWERDWN; + int i = 0; + int status = 1; + + do { + test_mode_send(kmb_dsi, dphy_no, TEST_CODE_FSM_CONTROL, 0x80); + + val = GET_TEST_DOUT4_7(kmb_dsi, dphy_no); + i++; + if (i > TIMEOUT) { + status = 0; + break; + } + } while (val != fsm_state); + + dev_dbg(kmb_dsi->dev, "%s: dphy %d val = %x", __func__, dphy_no, val); + dev_dbg(kmb_dsi->dev, "* DPHY %d WAIT_FSM %s *", + dphy_no, status ? "SUCCESS" : "FAILED"); +} + +static void wait_init_done(struct kmb_dsi *kmb_dsi, u32 dphy_no, + u32 active_lanes) +{ + u32 stopstatedata = 0; + u32 data_lanes = (1 << active_lanes) - 1; + int i = 0; + int status = 1; + + do { + stopstatedata = GET_STOPSTATE_DATA(kmb_dsi, dphy_no) + & data_lanes; + + /* TODO-need to add a time out and return failure */ + i++; + + if (i > TIMEOUT) { + status = 0; + dev_dbg(kmb_dsi->dev, + "! WAIT_INIT_DONE: TIMING OUT!(err_stat=%d)", + kmb_read_mipi(kmb_dsi, MIPI_DPHY_ERR_STAT6_7)); + break; + } + } while (stopstatedata != data_lanes); + + dev_dbg(kmb_dsi->dev, "* DPHY %d INIT - %s *", + dphy_no, status ? "SUCCESS" : "FAILED"); +} + +static void wait_pll_lock(struct kmb_dsi *kmb_dsi, u32 dphy_no) +{ + int i = 0; + int status = 1; + + do { + /* TODO-need to add a time out and return failure */ + i++; + if (i > TIMEOUT) { + status = 0; + dev_dbg(kmb_dsi->dev, "%s: timing out", __func__); + break; + } + } while (!GET_PLL_LOCK(kmb_dsi, dphy_no)); + + dev_dbg(kmb_dsi->dev, "* PLL Locked for DPHY %d - %s *", + dphy_no, status ? "SUCCESS" : "FAILED"); +} + +static u32 mipi_tx_init_dphy(struct kmb_dsi *kmb_dsi, + struct mipi_ctrl_cfg *cfg) +{ + u32 dphy_no = MIPI_DPHY6; + + /* Multiple D-PHYs needed */ + if (cfg->active_lanes > MIPI_DPHY_D_LANES) { + /* + *Initialization for Tx aggregation mode is done according to + *a. start init PHY1 + *b. poll for PHY1 FSM state LOCK + * b1. reg addr 0x03[3:0] - state_main[3:0] == 5 (LOCK) + *c. poll for PHY1 calibrations done : + * c1. termination calibration lower section: addr 0x22[5] + * - rescal_done + * c2. slewrate calibration (if data rate < = 1500 Mbps): + * addr 0xA7[3:2] - srcal_done, sr_finished + *d. start init PHY0 + *e. poll for PHY0 stopstate + *f. poll for PHY1 stopstate + */ + /* PHY #N+1 ('slave') */ + + dphy_init_sequence(kmb_dsi, cfg, dphy_no + 1, + (cfg->active_lanes - MIPI_DPHY_D_LANES), + MIPI_DPHY_SLAVE); + dphy_wait_fsm(kmb_dsi, dphy_no + 1, DPHY_TX_LOCK); + + /* PHY #N master */ + dphy_init_sequence(kmb_dsi, cfg, dphy_no, MIPI_DPHY_D_LANES, + MIPI_DPHY_MASTER); + + /* Wait for DPHY init to complete */ + wait_init_done(kmb_dsi, dphy_no, MIPI_DPHY_D_LANES); + wait_init_done(kmb_dsi, dphy_no + 1, + cfg->active_lanes - MIPI_DPHY_D_LANES); + wait_pll_lock(kmb_dsi, dphy_no); + wait_pll_lock(kmb_dsi, dphy_no + 1); + dphy_wait_fsm(kmb_dsi, dphy_no, DPHY_TX_IDLE); + } else { /* Single DPHY */ + dphy_init_sequence(kmb_dsi, cfg, dphy_no, cfg->active_lanes, + MIPI_DPHY_MASTER); + dphy_wait_fsm(kmb_dsi, dphy_no, DPHY_TX_IDLE); + wait_init_done(kmb_dsi, dphy_no, cfg->active_lanes); + wait_pll_lock(kmb_dsi, dphy_no); + } + + return 0; +} + +static void connect_lcd_to_mipi(struct kmb_dsi *kmb_dsi) +{ + struct regmap *msscam; + + msscam = syscon_regmap_lookup_by_compatible("intel,keembay-msscam"); + if (IS_ERR(msscam)) { + dev_dbg(kmb_dsi->dev, "failed to get msscam syscon"); + return; + } + + /* DISABLE MIPI->CIF CONNECTION */ + regmap_write(msscam, MSS_MIPI_CIF_CFG, 0); + + /* ENABLE LCD->MIPI CONNECTION */ + regmap_write(msscam, MSS_LCD_MIPI_CFG, 1); + /* DISABLE LCD->CIF LOOPBACK */ + regmap_write(msscam, MSS_LOOPBACK_CFG, 1); +} + +int kmb_dsi_mode_set(struct kmb_dsi *kmb_dsi, struct drm_display_mode *mode, + int sys_clk_mhz) +{ + u64 data_rate; + + kmb_dsi->sys_clk_mhz = sys_clk_mhz; + mipi_tx_init_cfg.active_lanes = MIPI_TX_ACTIVE_LANES; + + mipi_tx_frame0_sect_cfg.width_pixels = mode->crtc_hdisplay; + mipi_tx_frame0_sect_cfg.height_lines = mode->crtc_vdisplay; + mipitx_frame0_cfg.vsync_width = + mode->crtc_vsync_end - mode->crtc_vsync_start; + mipitx_frame0_cfg.v_backporch = + mode->crtc_vtotal - mode->crtc_vsync_end; + mipitx_frame0_cfg.v_frontporch = + mode->crtc_vsync_start - mode->crtc_vdisplay; + mipitx_frame0_cfg.hsync_width = + mode->crtc_hsync_end - mode->crtc_hsync_start; + mipitx_frame0_cfg.h_backporch = + mode->crtc_htotal - mode->crtc_hsync_end; + mipitx_frame0_cfg.h_frontporch = + mode->crtc_hsync_start - mode->crtc_hdisplay; + + /* Lane rate = (vtotal*htotal*fps*bpp)/4 / 1000000 + * to convert to Mbps + */ + data_rate = ((((u32)mode->crtc_vtotal * (u32)mode->crtc_htotal) * + (u32)(drm_mode_vrefresh(mode)) * + MIPI_TX_BPP) / mipi_tx_init_cfg.active_lanes) / 1000000; + + dev_dbg(kmb_dsi->dev, "data_rate=%u active_lanes=%d\n", + (u32)data_rate, mipi_tx_init_cfg.active_lanes); + + /* When late rate < 800, modeset fails with 4 lanes, + * so switch to 2 lanes + */ + if (data_rate < 800) { + mipi_tx_init_cfg.active_lanes = 2; + mipi_tx_init_cfg.lane_rate_mbps = data_rate * 2; + } else { + mipi_tx_init_cfg.lane_rate_mbps = data_rate; + } + + kmb_write_mipi(kmb_dsi, DPHY_ENABLE, 0); + kmb_write_mipi(kmb_dsi, DPHY_INIT_CTRL0, 0); + kmb_write_mipi(kmb_dsi, DPHY_INIT_CTRL1, 0); + kmb_write_mipi(kmb_dsi, DPHY_INIT_CTRL2, 0); + + /* Initialize mipi controller */ + mipi_tx_init_cntrl(kmb_dsi, &mipi_tx_init_cfg); + + /* Dphy initialization */ + mipi_tx_init_dphy(kmb_dsi, &mipi_tx_init_cfg); + + connect_lcd_to_mipi(kmb_dsi); + dev_info(kmb_dsi->dev, "mipi hw initialized"); + + return 0; +} + +struct kmb_dsi *kmb_dsi_init(struct platform_device *pdev) +{ + struct kmb_dsi *kmb_dsi; + struct device *dev = get_device(&pdev->dev); + + kmb_dsi = devm_kzalloc(dev, sizeof(*kmb_dsi), GFP_KERNEL); + if (!kmb_dsi) { + dev_err(dev, "failed to allocate kmb_dsi\n"); + return ERR_PTR(-ENOMEM); + } + + kmb_dsi->host = dsi_host; + kmb_dsi->host->ops = &kmb_dsi_host_ops; + + dsi_device->host = kmb_dsi->host; + kmb_dsi->device = dsi_device; + + return kmb_dsi; +} + +int kmb_dsi_encoder_init(struct drm_device *dev, struct kmb_dsi *kmb_dsi) +{ + struct drm_encoder *encoder; + struct drm_connector *connector; + int ret = 0; + + encoder = &kmb_dsi->base; + encoder->possible_crtcs = 1; + encoder->possible_clones = 0; + + ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_DSI); + if (ret) { + dev_err(kmb_dsi->dev, "Failed to init encoder %d\n", ret); + return ret; + } + + /* Link drm_bridge to encoder */ + ret = drm_bridge_attach(encoder, adv_bridge, NULL, + DRM_BRIDGE_ATTACH_NO_CONNECTOR); + if (ret) { + DRM_ERROR("failed to attach bridge to MIPI\n"); + drm_encoder_cleanup(encoder); + return ret; + } + drm_info(dev, "Bridge attached : SUCCESS"); + connector = drm_bridge_connector_init(dev, encoder); + if (IS_ERR(connector)) { + DRM_ERROR("Unable to create bridge connector"); + drm_encoder_cleanup(encoder); + return PTR_ERR(connector); + } + drm_connector_attach_encoder(connector, encoder); + return 0; +} + +int kmb_dsi_map_mmio(struct kmb_dsi *kmb_dsi) +{ + struct resource *res; + struct device *dev = kmb_dsi->dev; + + res = platform_get_resource_byname(kmb_dsi->pdev, IORESOURCE_MEM, + "mipi"); + if (!res) { + dev_err(dev, "failed to get resource for mipi"); + return -ENOMEM; + } + kmb_dsi->mipi_mmio = devm_ioremap_resource(dev, res); + if (IS_ERR(kmb_dsi->mipi_mmio)) { + dev_err(dev, "failed to ioremap mipi registers"); + return PTR_ERR(kmb_dsi->mipi_mmio); + } + return 0; +} + +static int kmb_dsi_clk_enable(struct kmb_dsi *kmb_dsi) +{ + int ret; + struct device *dev = kmb_dsi->dev; + + ret = clk_prepare_enable(kmb_dsi->clk_mipi); + if (ret) { + dev_err(dev, "Failed to enable MIPI clock: %d\n", ret); + return ret; + } + + ret = clk_prepare_enable(kmb_dsi->clk_mipi_ecfg); + if (ret) { + dev_err(dev, "Failed to enable MIPI_ECFG clock: %d\n", ret); + return ret; + } + + ret = clk_prepare_enable(kmb_dsi->clk_mipi_cfg); + if (ret) { + dev_err(dev, "Failed to enable MIPI_CFG clock: %d\n", ret); + return ret; + } + + dev_info(dev, "SUCCESS : enabled MIPI clocks\n"); + return 0; +} + +int kmb_dsi_clk_init(struct kmb_dsi *kmb_dsi) +{ + struct device *dev = kmb_dsi->dev; + unsigned long clk; + + kmb_dsi->clk_mipi = devm_clk_get(dev, "clk_mipi"); + if (IS_ERR(kmb_dsi->clk_mipi)) { + dev_err(dev, "devm_clk_get() failed clk_mipi\n"); + return PTR_ERR(kmb_dsi->clk_mipi); + } + + kmb_dsi->clk_mipi_ecfg = devm_clk_get(dev, "clk_mipi_ecfg"); + if (IS_ERR(kmb_dsi->clk_mipi_ecfg)) { + dev_err(dev, "devm_clk_get() failed clk_mipi_ecfg\n"); + return PTR_ERR(kmb_dsi->clk_mipi_ecfg); + } + + kmb_dsi->clk_mipi_cfg = devm_clk_get(dev, "clk_mipi_cfg"); + if (IS_ERR(kmb_dsi->clk_mipi_cfg)) { + dev_err(dev, "devm_clk_get() failed clk_mipi_cfg\n"); + return PTR_ERR(kmb_dsi->clk_mipi_cfg); + } + /* Set MIPI clock to 24 Mhz */ + clk_set_rate(kmb_dsi->clk_mipi, KMB_MIPI_DEFAULT_CLK); + if (clk_get_rate(kmb_dsi->clk_mipi) != KMB_MIPI_DEFAULT_CLK) { + dev_err(dev, "failed to set to clk_mipi to %d\n", + KMB_MIPI_DEFAULT_CLK); + return -1; + } + dev_dbg(dev, "clk_mipi = %ld\n", clk_get_rate(kmb_dsi->clk_mipi)); + + clk = clk_get_rate(kmb_dsi->clk_mipi_ecfg); + if (clk != KMB_MIPI_DEFAULT_CFG_CLK) { + /* Set MIPI_ECFG clock to 24 Mhz */ + clk_set_rate(kmb_dsi->clk_mipi_ecfg, KMB_MIPI_DEFAULT_CFG_CLK); + clk = clk_get_rate(kmb_dsi->clk_mipi_ecfg); + if (clk != KMB_MIPI_DEFAULT_CFG_CLK) { + dev_err(dev, "failed to set to clk_mipi_ecfg to %d\n", + KMB_MIPI_DEFAULT_CFG_CLK); + return -1; + } + } + + clk = clk_get_rate(kmb_dsi->clk_mipi_cfg); + if (clk != KMB_MIPI_DEFAULT_CFG_CLK) { + /* Set MIPI_CFG clock to 24 Mhz */ + clk_set_rate(kmb_dsi->clk_mipi_cfg, 24000000); + clk = clk_get_rate(kmb_dsi->clk_mipi_cfg); + if (clk != KMB_MIPI_DEFAULT_CFG_CLK) { + dev_err(dev, "failed to set clk_mipi_cfg to %d\n", + KMB_MIPI_DEFAULT_CFG_CLK); + return -1; + } + } + + return kmb_dsi_clk_enable(kmb_dsi); +} diff --git a/drivers/gpu/drm/kmb/kmb_dsi.h b/drivers/gpu/drm/kmb/kmb_dsi.h new file mode 100644 index 000000000000..66b7c500d9bc --- /dev/null +++ b/drivers/gpu/drm/kmb/kmb_dsi.h @@ -0,0 +1,387 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright © 2019-2020 Intel Corporation + */ + +#ifndef __KMB_DSI_H__ +#define __KMB_DSI_H__ + +#include <drm/drm_encoder.h> +#include <drm/drm_mipi_dsi.h> + +/* MIPI TX CFG */ +#define MIPI_TX_LANE_DATA_RATE_MBPS 891 +#define MIPI_TX_REF_CLK_KHZ 24000 +#define MIPI_TX_CFG_CLK_KHZ 24000 +#define MIPI_TX_BPP 24 + +/* DPHY Tx test codes*/ +#define TEST_CODE_FSM_CONTROL 0x03 +#define TEST_CODE_MULTIPLE_PHY_CTRL 0x0C +#define TEST_CODE_PLL_PROPORTIONAL_CHARGE_PUMP_CTRL 0x0E +#define TEST_CODE_PLL_INTEGRAL_CHARGE_PUMP_CTRL 0x0F +#define TEST_CODE_PLL_VCO_CTRL 0x12 +#define TEST_CODE_PLL_GMP_CTRL 0x13 +#define TEST_CODE_PLL_PHASE_ERR_CTRL 0x14 +#define TEST_CODE_PLL_LOCK_FILTER 0x15 +#define TEST_CODE_PLL_UNLOCK_FILTER 0x16 +#define TEST_CODE_PLL_INPUT_DIVIDER 0x17 +#define TEST_CODE_PLL_FEEDBACK_DIVIDER 0x18 +#define PLL_FEEDBACK_DIVIDER_HIGH BIT(7) +#define TEST_CODE_PLL_OUTPUT_CLK_SEL 0x19 +#define PLL_N_OVR_EN BIT(4) +#define PLL_M_OVR_EN BIT(5) +#define TEST_CODE_VOD_LEVEL 0x24 +#define TEST_CODE_PLL_CHARGE_PUMP_BIAS 0x1C +#define TEST_CODE_PLL_LOCK_DETECTOR 0x1D +#define TEST_CODE_HS_FREQ_RANGE_CFG 0x44 +#define TEST_CODE_PLL_ANALOG_PROG 0x1F +#define TEST_CODE_SLEW_RATE_OVERRIDE_CTRL 0xA0 +#define TEST_CODE_SLEW_RATE_DDL_LOOP_CTRL 0xA3 +#define TEST_CODE_SLEW_RATE_DDL_CYCLES 0xA4 + +/* DPHY params */ +#define PLL_N_MIN 0 +#define PLL_N_MAX 15 +#define PLL_M_MIN 62 +#define PLL_M_MAX 623 +#define PLL_FVCO_MAX 1250 + +#define TIMEOUT 600 + +#define MIPI_TX_FRAME_GEN 4 +#define MIPI_TX_FRAME_GEN_SECTIONS 4 +#define MIPI_CTRL_VIRTUAL_CHANNELS 4 +#define MIPI_D_LANES_PER_DPHY 2 +#define MIPI_CTRL_2LANE_MAX_MC_FIFO_LOC 255 +#define MIPI_CTRL_4LANE_MAX_MC_FIFO_LOC 511 +/* 2 Data Lanes per D-PHY */ +#define MIPI_DPHY_D_LANES 2 +#define MIPI_DPHY_DEFAULT_BIT_RATES 63 + +#define KMB_MIPI_DEFAULT_CLK 24000000 +#define KMB_MIPI_DEFAULT_CFG_CLK 24000000 + +#define to_kmb_dsi(x) container_of(x, struct kmb_dsi, base) + +struct kmb_dsi { + struct drm_encoder base; + struct device *dev; + struct platform_device *pdev; + struct mipi_dsi_host *host; + struct mipi_dsi_device *device; + struct drm_bridge *adv_bridge; + void __iomem *mipi_mmio; + struct clk *clk_mipi; + struct clk *clk_mipi_ecfg; + struct clk *clk_mipi_cfg; + int sys_clk_mhz; +}; + +/* DPHY Tx test codes */ + +enum mipi_ctrl_num { + MIPI_CTRL0 = 0, + MIPI_CTRL1, + MIPI_CTRL2, + MIPI_CTRL3, + MIPI_CTRL4, + MIPI_CTRL5, + MIPI_CTRL6, + MIPI_CTRL7, + MIPI_CTRL8, + MIPI_CTRL9, + MIPI_CTRL_NA +}; + +enum mipi_dphy_num { + MIPI_DPHY0 = 0, + MIPI_DPHY1, + MIPI_DPHY2, + MIPI_DPHY3, + MIPI_DPHY4, + MIPI_DPHY5, + MIPI_DPHY6, + MIPI_DPHY7, + MIPI_DPHY8, + MIPI_DPHY9, + MIPI_DPHY_NA +}; + +enum mipi_dir { + MIPI_RX, + MIPI_TX +}; + +enum mipi_ctrl_type { + MIPI_DSI, + MIPI_CSI +}; + +enum mipi_data_if { + MIPI_IF_DMA, + MIPI_IF_PARALLEL +}; + +enum mipi_data_mode { + MIPI_DATA_MODE0, + MIPI_DATA_MODE1, + MIPI_DATA_MODE2, + MIPI_DATA_MODE3 +}; + +enum mipi_dsi_video_mode { + DSI_VIDEO_MODE_NO_BURST_PULSE, + DSI_VIDEO_MODE_NO_BURST_EVENT, + DSI_VIDEO_MODE_BURST +}; + +enum mipi_dsi_blanking_mode { + TRANSITION_TO_LOW_POWER, + SEND_BLANK_PACKET +}; + +enum mipi_dsi_eotp { + DSI_EOTP_DISABLED, + DSI_EOTP_ENABLES +}; + +enum mipi_dsi_data_type { + DSI_SP_DT_RESERVED_00 = 0x00, + DSI_SP_DT_VSYNC_START = 0x01, + DSI_SP_DT_COLOR_MODE_OFF = 0x02, + DSI_SP_DT_GENERIC_SHORT_WR = 0x03, + DSI_SP_DT_GENERIC_RD = 0x04, + DSI_SP_DT_DCS_SHORT_WR = 0x05, + DSI_SP_DT_DCS_RD = 0x06, + DSI_SP_DT_EOTP = 0x08, + DSI_LP_DT_NULL = 0x09, + DSI_LP_DT_RESERVED_0A = 0x0a, + DSI_LP_DT_RESERVED_0B = 0x0b, + DSI_LP_DT_LPPS_YCBCR422_20B = 0x0c, + DSI_LP_DT_PPS_RGB101010_30B = 0x0d, + DSI_LP_DT_PPS_RGB565_16B = 0x0e, + DSI_LP_DT_RESERVED_0F = 0x0f, + + DSI_SP_DT_RESERVED_10 = 0x10, + DSI_SP_DT_VSYNC_END = 0x11, + DSI_SP_DT_COLOR_MODE_ON = 0x12, + DSI_SP_DT_GENERIC_SHORT_WR_1PAR = 0x13, + DSI_SP_DT_GENERIC_RD_1PAR = 0x14, + DSI_SP_DT_DCS_SHORT_WR_1PAR = 0x15, + DSI_SP_DT_RESERVED_16 = 0x16, + DSI_SP_DT_RESERVED_17 = 0x17, + DSI_SP_DT_RESERVED_18 = 0x18, + DSI_LP_DT_BLANK = 0x19, + DSI_LP_DT_RESERVED_1A = 0x1a, + DSI_LP_DT_RESERVED_1B = 0x1b, + DSI_LP_DT_PPS_YCBCR422_24B = 0x1c, + DSI_LP_DT_PPS_RGB121212_36B = 0x1d, + DSI_LP_DT_PPS_RGB666_18B = 0x1e, + DSI_LP_DT_RESERVED_1F = 0x1f, + + DSI_SP_DT_RESERVED_20 = 0x20, + DSI_SP_DT_HSYNC_START = 0x21, + DSI_SP_DT_SHUT_DOWN_PERIPH_CMD = 0x22, + DSI_SP_DT_GENERIC_SHORT_WR_2PAR = 0x23, + DSI_SP_DT_GENERIC_RD_2PAR = 0x24, + DSI_SP_DT_RESERVED_25 = 0x25, + DSI_SP_DT_RESERVED_26 = 0x26, + DSI_SP_DT_RESERVED_27 = 0x27, + DSI_SP_DT_RESERVED_28 = 0x28, + DSI_LP_DT_GENERIC_LONG_WR = 0x29, + DSI_LP_DT_RESERVED_2A = 0x2a, + DSI_LP_DT_RESERVED_2B = 0x2b, + DSI_LP_DT_PPS_YCBCR422_16B = 0x2c, + DSI_LP_DT_RESERVED_2D = 0x2d, + DSI_LP_DT_LPPS_RGB666_18B = 0x2e, + DSI_LP_DT_RESERVED_2F = 0x2f, + + DSI_SP_DT_RESERVED_30 = 0x30, + DSI_SP_DT_HSYNC_END = 0x31, + DSI_SP_DT_TURN_ON_PERIPH_CMD = 0x32, + DSI_SP_DT_RESERVED_33 = 0x33, + DSI_SP_DT_RESERVED_34 = 0x34, + DSI_SP_DT_RESERVED_35 = 0x35, + DSI_SP_DT_RESERVED_36 = 0x36, + DSI_SP_DT_SET_MAX_RETURN_PKT_SIZE = 0x37, + DSI_SP_DT_RESERVED_38 = 0x38, + DSI_LP_DT_DSC_LONG_WR = 0x39, + DSI_LP_DT_RESERVED_3A = 0x3a, + DSI_LP_DT_RESERVED_3B = 0x3b, + DSI_LP_DT_RESERVED_3C = 0x3c, + DSI_LP_DT_PPS_YCBCR420_12B = 0x3d, + DSI_LP_DT_PPS_RGB888_24B = 0x3e, + DSI_LP_DT_RESERVED_3F = 0x3f +}; + +enum mipi_tx_hs_tp_sel { + MIPI_TX_HS_TP_WHOLE_FRAME_COLOR0 = 0, + MIPI_TX_HS_TP_WHOLE_FRAME_COLOR1, + MIPI_TX_HS_TP_V_STRIPES, + MIPI_TX_HS_TP_H_STRIPES, +}; + +enum dphy_mode { + MIPI_DPHY_SLAVE = 0, + MIPI_DPHY_MASTER +}; + +enum dphy_tx_fsm { + DPHY_TX_POWERDWN = 0, + DPHY_TX_BGPON, + DPHY_TX_TERMCAL, + DPHY_TX_TERMCALUP, + DPHY_TX_OFFSETCAL, + DPHY_TX_LOCK, + DPHY_TX_SRCAL, + DPHY_TX_IDLE, + DPHY_TX_ULP, + DPHY_TX_LANESTART, + DPHY_TX_CLKALIGN, + DPHY_TX_DDLTUNNING, + DPHY_TX_ULP_FORCE_PLL, + DPHY_TX_LOCK_LOSS +}; + +struct mipi_data_type_params { + u8 size_constraint_pixels; + u8 size_constraint_bytes; + u8 pixels_per_pclk; + u8 bits_per_pclk; +}; + +struct mipi_tx_dsi_cfg { + u8 hfp_blank_en; /* Horizontal front porch blanking enable */ + u8 eotp_en; /* End of transmission packet enable */ + /* Last vertical front porch blanking mode */ + u8 lpm_last_vfp_line; + /* First vertical sync active blanking mode */ + u8 lpm_first_vsa_line; + u8 sync_pulse_eventn; /* Sync type */ + u8 hfp_blanking; /* Horizontal front porch blanking mode */ + u8 hbp_blanking; /* Horizontal back porch blanking mode */ + u8 hsa_blanking; /* Horizontal sync active blanking mode */ + u8 v_blanking; /* Vertical timing blanking mode */ +}; + +struct mipi_tx_frame_section_cfg { + u32 dma_v_stride; + u16 dma_v_scale_cfg; + u16 width_pixels; + u16 height_lines; + u8 dma_packed; + u8 bpp; + u8 bpp_unpacked; + u8 dma_h_stride; + u8 data_type; + u8 data_mode; + u8 dma_flip_rotate_sel; +}; + +struct mipi_tx_frame_timing_cfg { + u32 bpp; + u32 lane_rate_mbps; + u32 hsync_width; + u32 h_backporch; + u32 h_frontporch; + u32 h_active; + u16 vsync_width; + u16 v_backporch; + u16 v_frontporch; + u16 v_active; + u8 active_lanes; +}; + +struct mipi_tx_frame_sect_phcfg { + u32 wc; + enum mipi_data_mode data_mode; + enum mipi_dsi_data_type data_type; + u8 vchannel; + u8 dma_packed; +}; + +struct mipi_tx_frame_cfg { + struct mipi_tx_frame_section_cfg *sections[MIPI_TX_FRAME_GEN_SECTIONS]; + u32 hsync_width; /* in pixels */ + u32 h_backporch; /* in pixels */ + u32 h_frontporch; /* in pixels */ + u16 vsync_width; /* in lines */ + u16 v_backporch; /* in lines */ + u16 v_frontporch; /* in lines */ +}; + +struct mipi_tx_ctrl_cfg { + struct mipi_tx_frame_cfg *frames[MIPI_TX_FRAME_GEN]; + const struct mipi_tx_dsi_cfg *tx_dsi_cfg; + u8 line_sync_pkt_en; + u8 line_counter_active; + u8 frame_counter_active; + u8 tx_hsclkkidle_cnt; + u8 tx_hsexit_cnt; + u8 tx_crc_en; + u8 tx_hact_wait_stop; + u8 tx_always_use_hact; + u8 tx_wait_trig; + u8 tx_wait_all_sect; +}; + +/* configuration structure for MIPI control */ +struct mipi_ctrl_cfg { + u8 active_lanes; /* # active lanes per controller 2/4 */ + u32 lane_rate_mbps; /* MBPS */ + u32 ref_clk_khz; + u32 cfg_clk_khz; + struct mipi_tx_ctrl_cfg tx_ctrl_cfg; +}; + +static inline void kmb_write_mipi(struct kmb_dsi *kmb_dsi, + unsigned int reg, u32 value) +{ + writel(value, (kmb_dsi->mipi_mmio + reg)); +} + +static inline u32 kmb_read_mipi(struct kmb_dsi *kmb_dsi, unsigned int reg) +{ + return readl(kmb_dsi->mipi_mmio + reg); +} + +static inline void kmb_write_bits_mipi(struct kmb_dsi *kmb_dsi, + unsigned int reg, u32 offset, + u32 num_bits, u32 value) +{ + u32 reg_val = kmb_read_mipi(kmb_dsi, reg); + u32 mask = (1 << num_bits) - 1; + + value &= mask; + mask <<= offset; + reg_val &= (~mask); + reg_val |= (value << offset); + kmb_write_mipi(kmb_dsi, reg, reg_val); +} + +static inline void kmb_set_bit_mipi(struct kmb_dsi *kmb_dsi, + unsigned int reg, u32 offset) +{ + u32 reg_val = kmb_read_mipi(kmb_dsi, reg); + + kmb_write_mipi(kmb_dsi, reg, reg_val | (1 << offset)); +} + +static inline void kmb_clr_bit_mipi(struct kmb_dsi *kmb_dsi, + unsigned int reg, u32 offset) +{ + u32 reg_val = kmb_read_mipi(kmb_dsi, reg); + + kmb_write_mipi(kmb_dsi, reg, reg_val & (~(1 << offset))); +} + +int kmb_dsi_host_bridge_init(struct device *dev); +struct kmb_dsi *kmb_dsi_init(struct platform_device *pdev); +void kmb_dsi_host_unregister(struct kmb_dsi *kmb_dsi); +int kmb_dsi_mode_set(struct kmb_dsi *kmb_dsi, struct drm_display_mode *mode, + int sys_clk_mhz); +int kmb_dsi_map_mmio(struct kmb_dsi *kmb_dsi); +int kmb_dsi_clk_init(struct kmb_dsi *kmb_dsi); +int kmb_dsi_encoder_init(struct drm_device *dev, struct kmb_dsi *kmb_dsi); +#endif /* __KMB_DSI_H__ */ diff --git a/drivers/gpu/drm/kmb/kmb_plane.c b/drivers/gpu/drm/kmb/kmb_plane.c new file mode 100644 index 000000000000..8448d1edb553 --- /dev/null +++ b/drivers/gpu/drm/kmb/kmb_plane.c @@ -0,0 +1,522 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright © 2018-2020 Intel Corporation + */ + +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_crtc.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_fb_cma_helper.h> +#include <drm/drm_fb_helper.h> +#include <drm/drm_fourcc.h> +#include <drm/drm_gem_cma_helper.h> +#include <drm/drm_managed.h> +#include <drm/drm_plane_helper.h> + +#include "kmb_drv.h" +#include "kmb_plane.h" +#include "kmb_regs.h" + +const u32 layer_irqs[] = { + LCD_INT_VL0, + LCD_INT_VL1, + LCD_INT_GL0, + LCD_INT_GL1 +}; + +/* Conversion (yuv->rgb) matrix from myriadx */ +static const u32 csc_coef_lcd[] = { + 1024, 0, 1436, + 1024, -352, -731, + 1024, 1814, 0, + -179, 125, -226 +}; + +/* Graphics layer (layers 2 & 3) formats, only packed formats are supported */ +static const u32 kmb_formats_g[] = { + DRM_FORMAT_RGB332, + DRM_FORMAT_XRGB4444, DRM_FORMAT_XBGR4444, + DRM_FORMAT_ARGB4444, DRM_FORMAT_ABGR4444, + DRM_FORMAT_XRGB1555, DRM_FORMAT_XBGR1555, + DRM_FORMAT_ARGB1555, DRM_FORMAT_ABGR1555, + DRM_FORMAT_RGB565, DRM_FORMAT_BGR565, + DRM_FORMAT_RGB888, DRM_FORMAT_BGR888, + DRM_FORMAT_XRGB8888, DRM_FORMAT_XBGR8888, + DRM_FORMAT_ARGB8888, DRM_FORMAT_ABGR8888, +}; + +/* Video layer ( 0 & 1) formats, packed and planar formats are supported */ +static const u32 kmb_formats_v[] = { + /* packed formats */ + DRM_FORMAT_RGB332, + DRM_FORMAT_XRGB4444, DRM_FORMAT_XBGR4444, + DRM_FORMAT_ARGB4444, DRM_FORMAT_ABGR4444, + DRM_FORMAT_XRGB1555, DRM_FORMAT_XBGR1555, + DRM_FORMAT_ARGB1555, DRM_FORMAT_ABGR1555, + DRM_FORMAT_RGB565, DRM_FORMAT_BGR565, + DRM_FORMAT_RGB888, DRM_FORMAT_BGR888, + DRM_FORMAT_XRGB8888, DRM_FORMAT_XBGR8888, + DRM_FORMAT_ARGB8888, DRM_FORMAT_ABGR8888, + /*planar formats */ + DRM_FORMAT_YUV420, DRM_FORMAT_YVU420, + DRM_FORMAT_YUV422, DRM_FORMAT_YVU422, + DRM_FORMAT_YUV444, DRM_FORMAT_YVU444, + DRM_FORMAT_NV12, DRM_FORMAT_NV21, +}; + +static unsigned int check_pixel_format(struct drm_plane *plane, u32 format) +{ + int i; + + for (i = 0; i < plane->format_count; i++) { + if (plane->format_types[i] == format) + return 0; + } + return -EINVAL; +} + +static int kmb_plane_atomic_check(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct drm_framebuffer *fb; + int ret; + struct drm_crtc_state *crtc_state; + bool can_position; + + fb = state->fb; + if (!fb || !state->crtc) + return 0; + + ret = check_pixel_format(plane, fb->format->format); + if (ret) + return ret; + + if (state->crtc_w > KMB_MAX_WIDTH || state->crtc_h > KMB_MAX_HEIGHT) + return -EINVAL; + if (state->crtc_w < KMB_MIN_WIDTH || state->crtc_h < KMB_MIN_HEIGHT) + return -EINVAL; + can_position = (plane->type == DRM_PLANE_TYPE_OVERLAY); + crtc_state = + drm_atomic_get_existing_crtc_state(state->state, state->crtc); + return drm_atomic_helper_check_plane_state(state, crtc_state, + DRM_PLANE_HELPER_NO_SCALING, + DRM_PLANE_HELPER_NO_SCALING, + can_position, true); +} + +static void kmb_plane_atomic_disable(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct kmb_plane *kmb_plane = to_kmb_plane(plane); + int plane_id = kmb_plane->id; + struct kmb_drm_private *kmb; + + kmb = to_kmb(plane->dev); + + switch (plane_id) { + case LAYER_0: + kmb->plane_status[plane_id].ctrl = LCD_CTRL_VL1_ENABLE; + break; + case LAYER_1: + kmb->plane_status[plane_id].ctrl = LCD_CTRL_VL2_ENABLE; + break; + case LAYER_2: + kmb->plane_status[plane_id].ctrl = LCD_CTRL_GL1_ENABLE; + break; + case LAYER_3: + kmb->plane_status[plane_id].ctrl = LCD_CTRL_GL2_ENABLE; + break; + } + + kmb->plane_status[plane_id].disable = true; +} + +static unsigned int get_pixel_format(u32 format) +{ + unsigned int val = 0; + + switch (format) { + /* planar formats */ + case DRM_FORMAT_YUV444: + val = LCD_LAYER_FORMAT_YCBCR444PLAN | LCD_LAYER_PLANAR_STORAGE; + break; + case DRM_FORMAT_YVU444: + val = LCD_LAYER_FORMAT_YCBCR444PLAN | LCD_LAYER_PLANAR_STORAGE + | LCD_LAYER_CRCB_ORDER; + break; + case DRM_FORMAT_YUV422: + val = LCD_LAYER_FORMAT_YCBCR422PLAN | LCD_LAYER_PLANAR_STORAGE; + break; + case DRM_FORMAT_YVU422: + val = LCD_LAYER_FORMAT_YCBCR422PLAN | LCD_LAYER_PLANAR_STORAGE + | LCD_LAYER_CRCB_ORDER; + break; + case DRM_FORMAT_YUV420: + val = LCD_LAYER_FORMAT_YCBCR420PLAN | LCD_LAYER_PLANAR_STORAGE; + break; + case DRM_FORMAT_YVU420: + val = LCD_LAYER_FORMAT_YCBCR420PLAN | LCD_LAYER_PLANAR_STORAGE + | LCD_LAYER_CRCB_ORDER; + break; + case DRM_FORMAT_NV12: + val = LCD_LAYER_FORMAT_NV12 | LCD_LAYER_PLANAR_STORAGE; + break; + case DRM_FORMAT_NV21: + val = LCD_LAYER_FORMAT_NV12 | LCD_LAYER_PLANAR_STORAGE + | LCD_LAYER_CRCB_ORDER; + break; + /* packed formats */ + /* looks hw requires B & G to be swapped when RGB */ + case DRM_FORMAT_RGB332: + val = LCD_LAYER_FORMAT_RGB332 | LCD_LAYER_BGR_ORDER; + break; + case DRM_FORMAT_XBGR4444: + val = LCD_LAYER_FORMAT_RGBX4444; + break; + case DRM_FORMAT_ARGB4444: + val = LCD_LAYER_FORMAT_RGBA4444 | LCD_LAYER_BGR_ORDER; + break; + case DRM_FORMAT_ABGR4444: + val = LCD_LAYER_FORMAT_RGBA4444; + break; + case DRM_FORMAT_XRGB1555: + val = LCD_LAYER_FORMAT_XRGB1555 | LCD_LAYER_BGR_ORDER; + break; + case DRM_FORMAT_XBGR1555: + val = LCD_LAYER_FORMAT_XRGB1555; + break; + case DRM_FORMAT_ARGB1555: + val = LCD_LAYER_FORMAT_RGBA1555 | LCD_LAYER_BGR_ORDER; + break; + case DRM_FORMAT_ABGR1555: + val = LCD_LAYER_FORMAT_RGBA1555; + break; + case DRM_FORMAT_RGB565: + val = LCD_LAYER_FORMAT_RGB565 | LCD_LAYER_BGR_ORDER; + break; + case DRM_FORMAT_BGR565: + val = LCD_LAYER_FORMAT_RGB565; + break; + case DRM_FORMAT_RGB888: + val = LCD_LAYER_FORMAT_RGB888 | LCD_LAYER_BGR_ORDER; + break; + case DRM_FORMAT_BGR888: + val = LCD_LAYER_FORMAT_RGB888; + break; + case DRM_FORMAT_XRGB8888: + val = LCD_LAYER_FORMAT_RGBX8888 | LCD_LAYER_BGR_ORDER; + break; + case DRM_FORMAT_XBGR8888: + val = LCD_LAYER_FORMAT_RGBX8888; + break; + case DRM_FORMAT_ARGB8888: + val = LCD_LAYER_FORMAT_RGBA8888 | LCD_LAYER_BGR_ORDER; + break; + case DRM_FORMAT_ABGR8888: + val = LCD_LAYER_FORMAT_RGBA8888; + break; + } + DRM_INFO_ONCE("%s : %d format=0x%x val=0x%x\n", + __func__, __LINE__, format, val); + return val; +} + +static unsigned int get_bits_per_pixel(const struct drm_format_info *format) +{ + u32 bpp = 0; + unsigned int val = 0; + + if (format->num_planes > 1) { + val = LCD_LAYER_8BPP; + return val; + } + + bpp += 8 * format->cpp[0]; + + switch (bpp) { + case 8: + val = LCD_LAYER_8BPP; + break; + case 16: + val = LCD_LAYER_16BPP; + break; + case 24: + val = LCD_LAYER_24BPP; + break; + case 32: + val = LCD_LAYER_32BPP; + break; + } + + DRM_DEBUG("bpp=%d val=0x%x\n", bpp, val); + return val; +} + +static void config_csc(struct kmb_drm_private *kmb, int plane_id) +{ + /* YUV to RGB conversion using the fixed matrix csc_coef_lcd */ + kmb_write_lcd(kmb, LCD_LAYERn_CSC_COEFF11(plane_id), csc_coef_lcd[0]); + kmb_write_lcd(kmb, LCD_LAYERn_CSC_COEFF12(plane_id), csc_coef_lcd[1]); + kmb_write_lcd(kmb, LCD_LAYERn_CSC_COEFF13(plane_id), csc_coef_lcd[2]); + kmb_write_lcd(kmb, LCD_LAYERn_CSC_COEFF21(plane_id), csc_coef_lcd[3]); + kmb_write_lcd(kmb, LCD_LAYERn_CSC_COEFF22(plane_id), csc_coef_lcd[4]); + kmb_write_lcd(kmb, LCD_LAYERn_CSC_COEFF23(plane_id), csc_coef_lcd[5]); + kmb_write_lcd(kmb, LCD_LAYERn_CSC_COEFF31(plane_id), csc_coef_lcd[6]); + kmb_write_lcd(kmb, LCD_LAYERn_CSC_COEFF32(plane_id), csc_coef_lcd[7]); + kmb_write_lcd(kmb, LCD_LAYERn_CSC_COEFF33(plane_id), csc_coef_lcd[8]); + kmb_write_lcd(kmb, LCD_LAYERn_CSC_OFF1(plane_id), csc_coef_lcd[9]); + kmb_write_lcd(kmb, LCD_LAYERn_CSC_OFF2(plane_id), csc_coef_lcd[10]); + kmb_write_lcd(kmb, LCD_LAYERn_CSC_OFF3(plane_id), csc_coef_lcd[11]); +} + +static void kmb_plane_atomic_update(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct drm_framebuffer *fb; + struct kmb_drm_private *kmb; + unsigned int width; + unsigned int height; + unsigned int dma_len; + struct kmb_plane *kmb_plane; + unsigned int dma_cfg; + unsigned int ctrl = 0, val = 0, out_format = 0; + unsigned int src_w, src_h, crtc_x, crtc_y; + unsigned char plane_id; + int num_planes; + static dma_addr_t addr[MAX_SUB_PLANES]; + + if (!plane || !plane->state || !state) + return; + + fb = plane->state->fb; + if (!fb) + return; + num_planes = fb->format->num_planes; + kmb_plane = to_kmb_plane(plane); + plane_id = kmb_plane->id; + + kmb = to_kmb(plane->dev); + + spin_lock_irq(&kmb->irq_lock); + if (kmb->kmb_under_flow || kmb->kmb_flush_done) { + spin_unlock_irq(&kmb->irq_lock); + drm_dbg(&kmb->drm, "plane_update:underflow!!!! returning"); + return; + } + spin_unlock_irq(&kmb->irq_lock); + + src_w = (plane->state->src_w >> 16); + src_h = plane->state->src_h >> 16; + crtc_x = plane->state->crtc_x; + crtc_y = plane->state->crtc_y; + + drm_dbg(&kmb->drm, + "src_w=%d src_h=%d, fb->format->format=0x%x fb->flags=0x%x\n", + src_w, src_h, fb->format->format, fb->flags); + + width = fb->width; + height = fb->height; + dma_len = (width * height * fb->format->cpp[0]); + drm_dbg(&kmb->drm, "dma_len=%d ", dma_len); + kmb_write_lcd(kmb, LCD_LAYERn_DMA_LEN(plane_id), dma_len); + kmb_write_lcd(kmb, LCD_LAYERn_DMA_LEN_SHADOW(plane_id), dma_len); + kmb_write_lcd(kmb, LCD_LAYERn_DMA_LINE_VSTRIDE(plane_id), + fb->pitches[0]); + kmb_write_lcd(kmb, LCD_LAYERn_DMA_LINE_WIDTH(plane_id), + (width * fb->format->cpp[0])); + + addr[Y_PLANE] = drm_fb_cma_get_gem_addr(fb, plane->state, 0); + kmb_write_lcd(kmb, LCD_LAYERn_DMA_START_ADDR(plane_id), + addr[Y_PLANE] + fb->offsets[0]); + val = get_pixel_format(fb->format->format); + val |= get_bits_per_pixel(fb->format); + /* Program Cb/Cr for planar formats */ + if (num_planes > 1) { + kmb_write_lcd(kmb, LCD_LAYERn_DMA_CB_LINE_VSTRIDE(plane_id), + width * fb->format->cpp[0]); + kmb_write_lcd(kmb, LCD_LAYERn_DMA_CB_LINE_WIDTH(plane_id), + (width * fb->format->cpp[0])); + + addr[U_PLANE] = drm_fb_cma_get_gem_addr(fb, plane->state, + U_PLANE); + /* check if Cb/Cr is swapped*/ + if (num_planes == 3 && (val & LCD_LAYER_CRCB_ORDER)) + kmb_write_lcd(kmb, + LCD_LAYERn_DMA_START_CR_ADR(plane_id), + addr[U_PLANE]); + else + kmb_write_lcd(kmb, + LCD_LAYERn_DMA_START_CB_ADR(plane_id), + addr[U_PLANE]); + + if (num_planes == 3) { + kmb_write_lcd(kmb, + LCD_LAYERn_DMA_CR_LINE_VSTRIDE(plane_id), + ((width) * fb->format->cpp[0])); + + kmb_write_lcd(kmb, + LCD_LAYERn_DMA_CR_LINE_WIDTH(plane_id), + ((width) * fb->format->cpp[0])); + + addr[V_PLANE] = drm_fb_cma_get_gem_addr(fb, + plane->state, + V_PLANE); + + /* check if Cb/Cr is swapped*/ + if (val & LCD_LAYER_CRCB_ORDER) + kmb_write_lcd(kmb, + LCD_LAYERn_DMA_START_CB_ADR(plane_id), + addr[V_PLANE]); + else + kmb_write_lcd(kmb, + LCD_LAYERn_DMA_START_CR_ADR(plane_id), + addr[V_PLANE]); + } + } + + kmb_write_lcd(kmb, LCD_LAYERn_WIDTH(plane_id), src_w - 1); + kmb_write_lcd(kmb, LCD_LAYERn_HEIGHT(plane_id), src_h - 1); + kmb_write_lcd(kmb, LCD_LAYERn_COL_START(plane_id), crtc_x); + kmb_write_lcd(kmb, LCD_LAYERn_ROW_START(plane_id), crtc_y); + + val |= LCD_LAYER_FIFO_100; + + if (val & LCD_LAYER_PLANAR_STORAGE) { + val |= LCD_LAYER_CSC_EN; + + /* Enable CSC if input is planar and output is RGB */ + config_csc(kmb, plane_id); + } + + kmb_write_lcd(kmb, LCD_LAYERn_CFG(plane_id), val); + + switch (plane_id) { + case LAYER_0: + ctrl = LCD_CTRL_VL1_ENABLE; + break; + case LAYER_1: + ctrl = LCD_CTRL_VL2_ENABLE; + break; + case LAYER_2: + ctrl = LCD_CTRL_GL1_ENABLE; + break; + case LAYER_3: + ctrl = LCD_CTRL_GL2_ENABLE; + break; + } + + ctrl |= LCD_CTRL_PROGRESSIVE | LCD_CTRL_TIM_GEN_ENABLE + | LCD_CTRL_CONTINUOUS | LCD_CTRL_OUTPUT_ENABLED; + + /* LCD is connected to MIPI on kmb + * Therefore this bit is required for DSI Tx + */ + ctrl |= LCD_CTRL_VHSYNC_IDLE_LVL; + + kmb_set_bitmask_lcd(kmb, LCD_CONTROL, ctrl); + + /* FIXME no doc on how to set output format,these values are + * taken from the Myriadx tests + */ + out_format |= LCD_OUTF_FORMAT_RGB888; + + /* Leave RGB order,conversion mode and clip mode to default */ + /* do not interleave RGB channels for mipi Tx compatibility */ + out_format |= LCD_OUTF_MIPI_RGB_MODE; + kmb_write_lcd(kmb, LCD_OUT_FORMAT_CFG, out_format); + + dma_cfg = LCD_DMA_LAYER_ENABLE | LCD_DMA_LAYER_VSTRIDE_EN | + LCD_DMA_LAYER_CONT_UPDATE | LCD_DMA_LAYER_AXI_BURST_16; + + /* Enable DMA */ + kmb_write_lcd(kmb, LCD_LAYERn_DMA_CFG(plane_id), dma_cfg); + drm_dbg(&kmb->drm, "dma_cfg=0x%x LCD_DMA_CFG=0x%x\n", dma_cfg, + kmb_read_lcd(kmb, LCD_LAYERn_DMA_CFG(plane_id))); + + kmb_set_bitmask_lcd(kmb, LCD_INT_CLEAR, LCD_INT_EOF | + LCD_INT_DMA_ERR); + kmb_set_bitmask_lcd(kmb, LCD_INT_ENABLE, LCD_INT_EOF | + LCD_INT_DMA_ERR); +} + +static const struct drm_plane_helper_funcs kmb_plane_helper_funcs = { + .atomic_check = kmb_plane_atomic_check, + .atomic_update = kmb_plane_atomic_update, + .atomic_disable = kmb_plane_atomic_disable +}; + +void kmb_plane_destroy(struct drm_plane *plane) +{ + struct kmb_plane *kmb_plane = to_kmb_plane(plane); + + drm_plane_cleanup(plane); + kfree(kmb_plane); +} + +static const struct drm_plane_funcs kmb_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = kmb_plane_destroy, + .reset = drm_atomic_helper_plane_reset, + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, +}; + +struct kmb_plane *kmb_plane_init(struct drm_device *drm) +{ + struct kmb_drm_private *kmb = to_kmb(drm); + struct kmb_plane *plane = NULL; + struct kmb_plane *primary = NULL; + int i = 0; + int ret = 0; + enum drm_plane_type plane_type; + const u32 *plane_formats; + int num_plane_formats; + + for (i = 0; i < KMB_MAX_PLANES; i++) { + plane = drmm_kzalloc(drm, sizeof(*plane), GFP_KERNEL); + + if (!plane) { + drm_err(drm, "Failed to allocate plane\n"); + return ERR_PTR(-ENOMEM); + } + + plane_type = (i == 0) ? DRM_PLANE_TYPE_PRIMARY : + DRM_PLANE_TYPE_OVERLAY; + if (i < 2) { + plane_formats = kmb_formats_v; + num_plane_formats = ARRAY_SIZE(kmb_formats_v); + } else { + plane_formats = kmb_formats_g; + num_plane_formats = ARRAY_SIZE(kmb_formats_g); + } + + ret = drm_universal_plane_init(drm, &plane->base_plane, + POSSIBLE_CRTCS, &kmb_plane_funcs, + plane_formats, num_plane_formats, + NULL, plane_type, "plane %d", i); + if (ret < 0) { + drm_err(drm, "drm_universal_plane_init failed (ret=%d)", + ret); + goto cleanup; + } + drm_dbg(drm, "%s : %d i=%d type=%d", + __func__, __LINE__, + i, plane_type); + drm_plane_helper_add(&plane->base_plane, + &kmb_plane_helper_funcs); + if (plane_type == DRM_PLANE_TYPE_PRIMARY) { + primary = plane; + kmb->plane = plane; + } + drm_dbg(drm, "%s : %d primary=%p\n", __func__, __LINE__, + &primary->base_plane); + plane->id = i; + } + + return primary; +cleanup: + drmm_kfree(drm, plane); + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/kmb/kmb_plane.h b/drivers/gpu/drm/kmb/kmb_plane.h new file mode 100644 index 000000000000..486490f7a3ec --- /dev/null +++ b/drivers/gpu/drm/kmb/kmb_plane.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright © 2018-2020 Intel Corporation + */ + +#ifndef __KMB_PLANE_H__ +#define __KMB_PLANE_H__ + +#include <drm/drm_fourcc.h> +#include <drm/drm_plane.h> + +#define LCD_INT_VL0_ERR ((LAYER0_DMA_FIFO_UNDERFLOW) | \ + (LAYER0_DMA_FIFO_OVERFLOW) | \ + (LAYER0_DMA_CB_FIFO_OVERFLOW) | \ + (LAYER0_DMA_CB_FIFO_UNDERFLOW) | \ + (LAYER0_DMA_CR_FIFO_OVERFLOW) | \ + (LAYER0_DMA_CR_FIFO_UNDERFLOW)) + +#define LCD_INT_VL1_ERR ((LAYER1_DMA_FIFO_UNDERFLOW) | \ + (LAYER1_DMA_FIFO_OVERFLOW) | \ + (LAYER1_DMA_CB_FIFO_OVERFLOW) | \ + (LAYER1_DMA_CB_FIFO_UNDERFLOW) | \ + (LAYER1_DMA_CR_FIFO_OVERFLOW) | \ + (LAYER1_DMA_CR_FIFO_UNDERFLOW)) + +#define LCD_INT_GL0_ERR (LAYER2_DMA_FIFO_OVERFLOW | LAYER2_DMA_FIFO_UNDERFLOW) +#define LCD_INT_GL1_ERR (LAYER3_DMA_FIFO_OVERFLOW | LAYER3_DMA_FIFO_UNDERFLOW) +#define LCD_INT_VL0 (LAYER0_DMA_DONE | LAYER0_DMA_IDLE | LCD_INT_VL0_ERR) +#define LCD_INT_VL1 (LAYER1_DMA_DONE | LAYER1_DMA_IDLE | LCD_INT_VL1_ERR) +#define LCD_INT_GL0 (LAYER2_DMA_DONE | LAYER2_DMA_IDLE | LCD_INT_GL0_ERR) +#define LCD_INT_GL1 (LAYER3_DMA_DONE | LAYER3_DMA_IDLE | LCD_INT_GL1_ERR) +#define LCD_INT_DMA_ERR (LCD_INT_VL0_ERR | LCD_INT_VL1_ERR \ + | LCD_INT_GL0_ERR | LCD_INT_GL1_ERR) + +#define POSSIBLE_CRTCS 1 +#define to_kmb_plane(x) container_of(x, struct kmb_plane, base_plane) + +enum layer_id { + LAYER_0, + LAYER_1, + LAYER_2, + LAYER_3, + /* KMB_MAX_PLANES */ +}; + +#define KMB_MAX_PLANES 1 + +enum sub_plane_id { + Y_PLANE, + U_PLANE, + V_PLANE, + MAX_SUB_PLANES, +}; + +struct kmb_plane { + struct drm_plane base_plane; + unsigned char id; +}; + +struct layer_status { + bool disable; + u32 ctrl; +}; + +struct kmb_plane *kmb_plane_init(struct drm_device *drm); +void kmb_plane_destroy(struct drm_plane *plane); +#endif /* __KMB_PLANE_H__ */ diff --git a/drivers/gpu/drm/kmb/kmb_regs.h b/drivers/gpu/drm/kmb/kmb_regs.h new file mode 100644 index 000000000000..48150569f702 --- /dev/null +++ b/drivers/gpu/drm/kmb/kmb_regs.h @@ -0,0 +1,725 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright © 2018-2020 Intel Corporation + */ + +#ifndef __KMB_REGS_H__ +#define __KMB_REGS_H__ + +/*************************************************************************** + * LCD controller control register defines + ***************************************************************************/ +#define LCD_CONTROL (0x4 * 0x000) +#define LCD_CTRL_PROGRESSIVE (0 << 0) +#define LCD_CTRL_INTERLACED BIT(0) +#define LCD_CTRL_ENABLE BIT(1) +#define LCD_CTRL_VL1_ENABLE BIT(2) +#define LCD_CTRL_VL2_ENABLE BIT(3) +#define LCD_CTRL_GL1_ENABLE BIT(4) +#define LCD_CTRL_GL2_ENABLE BIT(5) +#define LCD_CTRL_ALPHA_BLEND_VL1 (0 << 6) +#define LCD_CTRL_ALPHA_BLEND_VL2 BIT(6) +#define LCD_CTRL_ALPHA_BLEND_GL1 (2 << 6) +#define LCD_CTRL_ALPHA_BLEND_GL2 (3 << 6) +#define LCD_CTRL_ALPHA_TOP_VL1 (0 << 8) +#define LCD_CTRL_ALPHA_TOP_VL2 BIT(8) +#define LCD_CTRL_ALPHA_TOP_GL1 (2 << 8) +#define LCD_CTRL_ALPHA_TOP_GL2 (3 << 8) +#define LCD_CTRL_ALPHA_MIDDLE_VL1 (0 << 10) +#define LCD_CTRL_ALPHA_MIDDLE_VL2 BIT(10) +#define LCD_CTRL_ALPHA_MIDDLE_GL1 (2 << 10) +#define LCD_CTRL_ALPHA_MIDDLE_GL2 (3 << 10) +#define LCD_CTRL_ALPHA_BOTTOM_VL1 (0 << 12) +#define LCD_CTRL_ALPHA_BOTTOM_VL2 BIT(12) +#define LCD_CTRL_ALPHA_BOTTOM_GL1 (2 << 12) +#define LCD_CTRL_ALPHA_BOTTOM_GL2 (3 << 12) +#define LCD_CTRL_TIM_GEN_ENABLE BIT(14) +#define LCD_CTRL_CONTINUOUS (0 << 15) +#define LCD_CTRL_ONE_SHOT BIT(15) +#define LCD_CTRL_PWM0_EN BIT(16) +#define LCD_CTRL_PWM1_EN BIT(17) +#define LCD_CTRL_PWM2_EN BIT(18) +#define LCD_CTRL_OUTPUT_DISABLED (0 << 19) +#define LCD_CTRL_OUTPUT_ENABLED BIT(19) +#define LCD_CTRL_BPORCH_ENABLE BIT(21) +#define LCD_CTRL_FPORCH_ENABLE BIT(22) +#define LCD_CTRL_PIPELINE_DMA BIT(28) +#define LCD_CTRL_VHSYNC_IDLE_LVL BIT(31) + +/* interrupts */ +#define LCD_INT_STATUS (0x4 * 0x001) +#define LCD_INT_EOF BIT(0) +#define LCD_INT_LINE_CMP BIT(1) +#define LCD_INT_VERT_COMP BIT(2) +#define LAYER0_DMA_DONE BIT(3) +#define LAYER0_DMA_IDLE BIT(4) +#define LAYER0_DMA_FIFO_OVERFLOW BIT(5) +#define LAYER0_DMA_FIFO_UNDERFLOW BIT(6) +#define LAYER0_DMA_CB_FIFO_OVERFLOW BIT(7) +#define LAYER0_DMA_CB_FIFO_UNDERFLOW BIT(8) +#define LAYER0_DMA_CR_FIFO_OVERFLOW BIT(9) +#define LAYER0_DMA_CR_FIFO_UNDERFLOW BIT(10) +#define LAYER1_DMA_DONE BIT(11) +#define LAYER1_DMA_IDLE BIT(12) +#define LAYER1_DMA_FIFO_OVERFLOW BIT(13) +#define LAYER1_DMA_FIFO_UNDERFLOW BIT(14) +#define LAYER1_DMA_CB_FIFO_OVERFLOW BIT(15) +#define LAYER1_DMA_CB_FIFO_UNDERFLOW BIT(16) +#define LAYER1_DMA_CR_FIFO_OVERFLOW BIT(17) +#define LAYER1_DMA_CR_FIFO_UNDERFLOW BIT(18) +#define LAYER2_DMA_DONE BIT(19) +#define LAYER2_DMA_IDLE BIT(20) +#define LAYER2_DMA_FIFO_OVERFLOW BIT(21) +#define LAYER2_DMA_FIFO_UNDERFLOW BIT(22) +#define LAYER3_DMA_DONE BIT(23) +#define LAYER3_DMA_IDLE BIT(24) +#define LAYER3_DMA_FIFO_OVERFLOW BIT(25) +#define LAYER3_DMA_FIFO_UNDERFLOW BIT(26) +#define LCD_INT_LAYER (0x07fffff8) +#define LCD_INT_ENABLE (0x4 * 0x002) +#define LCD_INT_CLEAR (0x4 * 0x003) +#define LCD_LINE_COUNT (0x4 * 0x004) +#define LCD_LINE_COMPARE (0x4 * 0x005) +#define LCD_VSTATUS (0x4 * 0x006) + +/*LCD_VSTATUS_COMPARE Vertcal interval in which to generate vertcal + * interval interrupt + */ +/* BITS 13 and 14 */ +#define LCD_VSTATUS_COMPARE (0x4 * 0x007) +#define LCD_VSTATUS_VERTICAL_STATUS_MASK (3 << 13) +#define LCD_VSTATUS_COMPARE_VSYNC (0 << 13) +#define LCD_VSTATUS_COMPARE_BACKPORCH BIT(13) +#define LCD_VSTATUS_COMPARE_ACTIVE (2 << 13) +#define LCD_VSTATUS_COMPARE_FRONT_PORCH (3 << 13) + +#define LCD_SCREEN_WIDTH (0x4 * 0x008) +#define LCD_SCREEN_HEIGHT (0x4 * 0x009) +#define LCD_FIELD_INT_CFG (0x4 * 0x00a) +#define LCD_FIFO_FLUSH (0x4 * 0x00b) +#define LCD_BG_COLOUR_LS (0x4 * 0x00c) +#define LCD_BG_COLOUR_MS (0x4 * 0x00d) +#define LCD_RAM_CFG (0x4 * 0x00e) + +/**************************************************************************** + * LCD controller Layer config register + ***************************************************************************/ +#define LCD_LAYER0_CFG (0x4 * 0x100) +#define LCD_LAYERn_CFG(N) (LCD_LAYER0_CFG + (0x400 * (N))) +#define LCD_LAYER_SCALE_H BIT(1) +#define LCD_LAYER_SCALE_V BIT(2) +#define LCD_LAYER_SCALE_H_V (LCD_LAYER_SCALE_H | \ + LCD_LAYER_SCALE_V) +#define LCD_LAYER_CSC_EN BIT(3) +#define LCD_LAYER_ALPHA_STATIC BIT(4) +#define LCD_LAYER_ALPHA_EMBED BIT(5) +#define LCD_LAYER_ALPHA_COMBI (LCD_LAYER_ALPHA_STATIC | \ + LCD_LAYER_ALPHA_EMBED) +/* RGB multiplied with alpha */ +#define LCD_LAYER_ALPHA_PREMULT BIT(6) +#define LCD_LAYER_INVERT_COL BIT(7) +#define LCD_LAYER_TRANSPARENT_EN BIT(8) +#define LCD_LAYER_FORMAT_YCBCR444PLAN (0 << 9) +#define LCD_LAYER_FORMAT_YCBCR422PLAN BIT(9) +#define LCD_LAYER_FORMAT_YCBCR420PLAN (2 << 9) +#define LCD_LAYER_FORMAT_RGB888PLAN (3 << 9) +#define LCD_LAYER_FORMAT_YCBCR444LIN (4 << 9) +#define LCD_LAYER_FORMAT_YCBCR422LIN (5 << 9) +#define LCD_LAYER_FORMAT_RGB888 (6 << 9) +#define LCD_LAYER_FORMAT_RGBA8888 (7 << 9) +#define LCD_LAYER_FORMAT_RGBX8888 (8 << 9) +#define LCD_LAYER_FORMAT_RGB565 (9 << 9) +#define LCD_LAYER_FORMAT_RGBA1555 (0xa << 9) +#define LCD_LAYER_FORMAT_XRGB1555 (0xb << 9) +#define LCD_LAYER_FORMAT_RGB444 (0xc << 9) +#define LCD_LAYER_FORMAT_RGBA4444 (0xd << 9) +#define LCD_LAYER_FORMAT_RGBX4444 (0xe << 9) +#define LCD_LAYER_FORMAT_RGB332 (0xf << 9) +#define LCD_LAYER_FORMAT_RGBA3328 (0x10 << 9) +#define LCD_LAYER_FORMAT_RGBX3328 (0x11 << 9) +#define LCD_LAYER_FORMAT_CLUT (0x12 << 9) +#define LCD_LAYER_FORMAT_NV12 (0x1c << 9) +#define LCD_LAYER_PLANAR_STORAGE BIT(14) +#define LCD_LAYER_8BPP (0 << 15) +#define LCD_LAYER_16BPP BIT(15) +#define LCD_LAYER_24BPP (2 << 15) +#define LCD_LAYER_32BPP (3 << 15) +#define LCD_LAYER_Y_ORDER BIT(17) +#define LCD_LAYER_CRCB_ORDER BIT(18) +#define LCD_LAYER_BGR_ORDER BIT(19) +#define LCD_LAYER_LUT_2ENT (0 << 20) +#define LCD_LAYER_LUT_4ENT BIT(20) +#define LCD_LAYER_LUT_16ENT (2 << 20) +#define LCD_LAYER_NO_FLIP (0 << 22) +#define LCD_LAYER_FLIP_V BIT(22) +#define LCD_LAYER_FLIP_H (2 << 22) +#define LCD_LAYER_ROT_R90 (3 << 22) +#define LCD_LAYER_ROT_L90 (4 << 22) +#define LCD_LAYER_ROT_180 (5 << 22) +#define LCD_LAYER_FIFO_00 (0 << 25) +#define LCD_LAYER_FIFO_25 BIT(25) +#define LCD_LAYER_FIFO_50 (2 << 25) +#define LCD_LAYER_FIFO_100 (3 << 25) +#define LCD_LAYER_INTERLEAVE_DIS (0 << 27) +#define LCD_LAYER_INTERLEAVE_V BIT(27) +#define LCD_LAYER_INTERLEAVE_H (2 << 27) +#define LCD_LAYER_INTERLEAVE_CH (3 << 27) +#define LCD_LAYER_INTERLEAVE_V_SUB (4 << 27) +#define LCD_LAYER_INTERLEAVE_H_SUB (5 << 27) +#define LCD_LAYER_INTERLEAVE_CH_SUB (6 << 27) +#define LCD_LAYER_INTER_POS_EVEN (0 << 30) +#define LCD_LAYER_INTER_POS_ODD BIT(30) + +#define LCD_LAYER0_COL_START (0x4 * 0x101) +#define LCD_LAYERn_COL_START(N) (LCD_LAYER0_COL_START + (0x400 * (N))) +#define LCD_LAYER0_ROW_START (0x4 * 0x102) +#define LCD_LAYERn_ROW_START(N) (LCD_LAYER0_ROW_START + (0x400 * (N))) +#define LCD_LAYER0_WIDTH (0x4 * 0x103) +#define LCD_LAYERn_WIDTH(N) (LCD_LAYER0_WIDTH + (0x400 * (N))) +#define LCD_LAYER0_HEIGHT (0x4 * 0x104) +#define LCD_LAYERn_HEIGHT(N) (LCD_LAYER0_HEIGHT + (0x400 * (N))) +#define LCD_LAYER0_SCALE_CFG (0x4 * 0x105) +#define LCD_LAYERn_SCALE_CFG(N) (LCD_LAYER0_SCALE_CFG + (0x400 * (N))) +#define LCD_LAYER0_ALPHA (0x4 * 0x106) +#define LCD_LAYERn_ALPHA(N) (LCD_LAYER0_ALPHA + (0x400 * (N))) +#define LCD_LAYER0_INV_COLOUR_LS (0x4 * 0x107) +#define LCD_LAYERn_INV_COLOUR_LS(N) (LCD_LAYER0_INV_COLOUR_LS + \ + (0x400 * (N))) +#define LCD_LAYER0_INV_COLOUR_MS (0x4 * 0x108) +#define LCD_LAYERn_INV_COLOUR_MS(N) (LCD_LAYER0_INV_COLOUR_MS + \ + (0x400 * (N))) +#define LCD_LAYER0_TRANS_COLOUR_LS (0x4 * 0x109) +#define LCD_LAYERn_TRANS_COLOUR_LS(N) (LCD_LAYER0_TRANS_COLOUR_LS + \ + (0x400 * (N))) +#define LCD_LAYER0_TRANS_COLOUR_MS (0x4 * 0x10a) +#define LCD_LAYERn_TRANS_COLOUR_MS(N) (LCD_LAYER0_TRANS_COLOUR_MS + \ + (0x400 * (N))) +#define LCD_LAYER0_CSC_COEFF11 (0x4 * 0x10b) +#define LCD_LAYERn_CSC_COEFF11(N) (LCD_LAYER0_CSC_COEFF11 + (0x400 * (N))) +#define LCD_LAYER0_CSC_COEFF12 (0x4 * 0x10c) +#define LCD_LAYERn_CSC_COEFF12(N) (LCD_LAYER0_CSC_COEFF12 + (0x400 * (N))) +#define LCD_LAYER0_CSC_COEFF13 (0x4 * 0x10d) +#define LCD_LAYERn_CSC_COEFF13(N) (LCD_LAYER0_CSC_COEFF13 + (0x400 * (N))) +#define LCD_LAYER0_CSC_COEFF21 (0x4 * 0x10e) +#define LCD_LAYERn_CSC_COEFF21(N) (LCD_LAYER0_CSC_COEFF21 + (0x400 * (N))) +#define LCD_LAYER0_CSC_COEFF22 (0x4 * 0x10f) +#define LCD_LAYERn_CSC_COEFF22(N) (LCD_LAYER0_CSC_COEFF22 + (0x400 * (N))) +#define LCD_LAYER0_CSC_COEFF23 (0x4 * 0x110) +#define LCD_LAYERn_CSC_COEFF23(N) (LCD_LAYER0_CSC_COEFF23 + (0x400 * (N))) +#define LCD_LAYER0_CSC_COEFF31 (0x4 * 0x111) +#define LCD_LAYERn_CSC_COEFF31(N) (LCD_LAYER0_CSC_COEFF31 + (0x400 * (N))) +#define LCD_LAYER0_CSC_COEFF32 (0x4 * 0x112) +#define LCD_LAYERn_CSC_COEFF32(N) (LCD_LAYER0_CSC_COEFF32 + (0x400 * (N))) +#define LCD_LAYER0_CSC_COEFF33 (0x4 * 0x113) +#define LCD_LAYERn_CSC_COEFF33(N) (LCD_LAYER0_CSC_COEFF33 + (0x400 * (N))) +#define LCD_LAYER0_CSC_OFF1 (0x4 * 0x114) +#define LCD_LAYERn_CSC_OFF1(N) (LCD_LAYER0_CSC_OFF1 + (0x400 * (N))) +#define LCD_LAYER0_CSC_OFF2 (0x4 * 0x115) +#define LCD_LAYERn_CSC_OFF2(N) (LCD_LAYER0_CSC_OFF2 + (0x400 * (N))) +#define LCD_LAYER0_CSC_OFF3 (0x4 * 0x116) +#define LCD_LAYERn_CSC_OFF3(N) (LCD_LAYER0_CSC_OFF3 + (0x400 * (N))) + +/* LCD controller Layer DMA config register */ +#define LCD_LAYER0_DMA_CFG (0x4 * 0x117) +#define LCD_LAYERn_DMA_CFG(N) (LCD_LAYER0_DMA_CFG + \ + (0x400 * (N))) +#define LCD_DMA_LAYER_ENABLE BIT(0) +#define LCD_DMA_LAYER_STATUS BIT(1) +#define LCD_DMA_LAYER_AUTO_UPDATE BIT(2) +#define LCD_DMA_LAYER_CONT_UPDATE BIT(3) +#define LCD_DMA_LAYER_CONT_PING_PONG_UPDATE (LCD_DMA_LAYER_AUTO_UPDATE \ + | LCD_DMA_LAYER_CONT_UPDATE) +#define LCD_DMA_LAYER_FIFO_ADR_MODE BIT(4) +#define LCD_DMA_LAYER_AXI_BURST_1 BIT(5) +#define LCD_DMA_LAYER_AXI_BURST_2 (2 << 5) +#define LCD_DMA_LAYER_AXI_BURST_3 (3 << 5) +#define LCD_DMA_LAYER_AXI_BURST_4 (4 << 5) +#define LCD_DMA_LAYER_AXI_BURST_5 (5 << 5) +#define LCD_DMA_LAYER_AXI_BURST_6 (6 << 5) +#define LCD_DMA_LAYER_AXI_BURST_7 (7 << 5) +#define LCD_DMA_LAYER_AXI_BURST_8 (8 << 5) +#define LCD_DMA_LAYER_AXI_BURST_9 (9 << 5) +#define LCD_DMA_LAYER_AXI_BURST_10 (0xa << 5) +#define LCD_DMA_LAYER_AXI_BURST_11 (0xb << 5) +#define LCD_DMA_LAYER_AXI_BURST_12 (0xc << 5) +#define LCD_DMA_LAYER_AXI_BURST_13 (0xd << 5) +#define LCD_DMA_LAYER_AXI_BURST_14 (0xe << 5) +#define LCD_DMA_LAYER_AXI_BURST_15 (0xf << 5) +#define LCD_DMA_LAYER_AXI_BURST_16 (0x10 << 5) +#define LCD_DMA_LAYER_VSTRIDE_EN BIT(10) + +#define LCD_LAYER0_DMA_START_ADR (0x4 * 0x118) +#define LCD_LAYERn_DMA_START_ADDR(N) (LCD_LAYER0_DMA_START_ADR \ + + (0x400 * (N))) +#define LCD_LAYER0_DMA_START_SHADOW (0x4 * 0x119) +#define LCD_LAYERn_DMA_START_SHADOW(N) (LCD_LAYER0_DMA_START_SHADOW \ + + (0x400 * (N))) +#define LCD_LAYER0_DMA_LEN (0x4 * 0x11a) +#define LCD_LAYERn_DMA_LEN(N) (LCD_LAYER0_DMA_LEN + \ + (0x400 * (N))) +#define LCD_LAYER0_DMA_LEN_SHADOW (0x4 * 0x11b) +#define LCD_LAYERn_DMA_LEN_SHADOW(N) (LCD_LAYER0_DMA_LEN_SHADOW + \ + (0x400 * (N))) +#define LCD_LAYER0_DMA_STATUS (0x4 * 0x11c) +#define LCD_LAYERn_DMA_STATUS(N) (LCD_LAYER0_DMA_STATUS + \ + (0x400 * (N))) +#define LCD_LAYER0_DMA_LINE_WIDTH (0x4 * 0x11d) +#define LCD_LAYERn_DMA_LINE_WIDTH(N) (LCD_LAYER0_DMA_LINE_WIDTH + \ + (0x400 * (N))) +#define LCD_LAYER0_DMA_LINE_VSTRIDE (0x4 * 0x11e) +#define LCD_LAYERn_DMA_LINE_VSTRIDE(N) (LCD_LAYER0_DMA_LINE_VSTRIDE +\ + (0x400 * (N))) +#define LCD_LAYER0_DMA_FIFO_STATUS (0x4 * 0x11f) +#define LCD_LAYERn_DMA_FIFO_STATUS(N) (LCD_LAYER0_DMA_FIFO_STATUS + \ + (0x400 * (N))) +#define LCD_LAYER0_CFG2 (0x4 * 0x120) +#define LCD_LAYERn_CFG2(N) (LCD_LAYER0_CFG2 + (0x400 * (N))) +#define LCD_LAYER0_DMA_START_CB_ADR (0x4 * 0x700) +#define LCD_LAYERn_DMA_START_CB_ADR(N) (LCD_LAYER0_DMA_START_CB_ADR + \ + (0x20 * (N))) +#define LCD_LAYER0_DMA_START_CB_SHADOW (0x4 * 0x701) +#define LCD_LAYERn_DMA_START_CB_SHADOW(N) (LCD_LAYER0_DMA_START_CB_SHADOW\ + + (0x20 * (N))) +#define LCD_LAYER0_DMA_CB_LINE_WIDTH (0x4 * 0x702) +#define LCD_LAYERn_DMA_CB_LINE_WIDTH(N) (LCD_LAYER0_DMA_CB_LINE_WIDTH +\ + (0x20 * (N))) +#define LCD_LAYER0_DMA_CB_LINE_VSTRIDE (0x4 * 0x703) +#define LCD_LAYERn_DMA_CB_LINE_VSTRIDE(N) (LCD_LAYER0_DMA_CB_LINE_VSTRIDE\ + + (0x20 * (N))) +#define LCD_LAYER0_DMA_START_CR_ADR (0x4 * 0x704) +#define LCD_LAYERn_DMA_START_CR_ADR(N) (LCD_LAYER0_DMA_START_CR_ADR + \ + (0x20 * (N))) +#define LCD_LAYER0_DMA_START_CR_SHADOW (0x4 * 0x705) +#define LCD_LAYERn_DMA_START_CR_SHADOW(N) \ + (LCD_LAYER0_DMA_START_CR_SHADOW\ + + (0x20 * (N))) +#define LCD_LAYER0_DMA_CR_LINE_WIDTH (0x4 * 0x706) +#define LCD_LAYERn_DMA_CR_LINE_WIDTH(N) (LCD_LAYER0_DMA_CR_LINE_WIDTH +\ + (0x20 * (N))) +#define LCD_LAYER0_DMA_CR_LINE_VSTRIDE (0x4 * 0x707) +#define LCD_LAYERn_DMA_CR_LINE_VSTRIDE(N) (LCD_LAYER0_DMA_CR_LINE_VSTRIDE\ + + (0x20 * (N))) +#define LCD_LAYER1_DMA_START_CB_ADR (0x4 * 0x708) +#define LCD_LAYER1_DMA_START_CB_SHADOW (0x4 * 0x709) +#define LCD_LAYER1_DMA_CB_LINE_WIDTH (0x4 * 0x70a) +#define LCD_LAYER1_DMA_CB_LINE_VSTRIDE (0x4 * 0x70b) +#define LCD_LAYER1_DMA_START_CR_ADR (0x4 * 0x70c) +#define LCD_LAYER1_DMA_START_CR_SHADOW (0x4 * 0x70d) +#define LCD_LAYER1_DMA_CR_LINE_WIDTH (0x4 * 0x70e) +#define LCD_LAYER1_DMA_CR_LINE_VSTRIDE (0x4 * 0x70f) + +/**************************************************************************** + * LCD controller output format register defines + ***************************************************************************/ +#define LCD_OUT_FORMAT_CFG (0x4 * 0x800) +#define LCD_OUTF_FORMAT_RGB121212 (0x00) +#define LCD_OUTF_FORMAT_RGB101010 (0x01) +#define LCD_OUTF_FORMAT_RGB888 (0x02) +#define LCD_OUTF_FORMAT_RGB666 (0x03) +#define LCD_OUTF_FORMAT_RGB565 (0x04) +#define LCD_OUTF_FORMAT_RGB444 (0x05) +#define LCD_OUTF_FORMAT_MRGB121212 (0x10) +#define LCD_OUTF_FORMAT_MRGB101010 (0x11) +#define LCD_OUTF_FORMAT_MRGB888 (0x12) +#define LCD_OUTF_FORMAT_MRGB666 (0x13) +#define LCD_OUTF_FORMAT_MRGB565 (0x14) +#define LCD_OUTF_FORMAT_YCBCR420_8B_LEGACY (0x08) +#define LCD_OUTF_FORMAT_YCBCR420_8B_DCI (0x09) +#define LCD_OUTF_FORMAT_YCBCR420_8B (0x0A) +#define LCD_OUTF_FORMAT_YCBCR420_10B (0x0B) +#define LCD_OUTF_FORMAT_YCBCR420_12B (0x0C) +#define LCD_OUTF_FORMAT_YCBCR422_8B (0x0D) +#define LCD_OUTF_FORMAT_YCBCR422_10B (0x0E) +#define LCD_OUTF_FORMAT_YCBCR444 (0x0F) +#define LCD_OUTF_FORMAT_MYCBCR420_8B_LEGACY (0x18) +#define LCD_OUTF_FORMAT_MYCBCR420_8B_DCI (0x19) +#define LCD_OUTF_FORMAT_MYCBCR420_8B (0x1A) +#define LCD_OUTF_FORMAT_MYCBCR420_10B (0x1B) +#define LCD_OUTF_FORMAT_MYCBCR420_12B (0x1C) +#define LCD_OUTF_FORMAT_MYCBCR422_8B (0x1D) +#define LCD_OUTF_FORMAT_MYCBCR422_10B (0x1E) +#define LCD_OUTF_FORMAT_MYCBCR444 (0x1F) +#define LCD_OUTF_BGR_ORDER BIT(5) +#define LCD_OUTF_Y_ORDER BIT(6) +#define LCD_OUTF_CRCB_ORDER BIT(7) +#define LCD_OUTF_SYNC_MODE BIT(11) +#define LCD_OUTF_RGB_CONV_MODE BIT(14) +#define LCD_OUTF_MIPI_RGB_MODE BIT(18) + +#define LCD_HSYNC_WIDTH (0x4 * 0x801) +#define LCD_H_BACKPORCH (0x4 * 0x802) +#define LCD_H_ACTIVEWIDTH (0x4 * 0x803) +#define LCD_H_FRONTPORCH (0x4 * 0x804) +#define LCD_VSYNC_WIDTH (0x4 * 0x805) +#define LCD_V_BACKPORCH (0x4 * 0x806) +#define LCD_V_ACTIVEHEIGHT (0x4 * 0x807) +#define LCD_V_FRONTPORCH (0x4 * 0x808) +#define LCD_VSYNC_START (0x4 * 0x809) +#define LCD_VSYNC_END (0x4 * 0x80a) +#define LCD_V_BACKPORCH_EVEN (0x4 * 0x80b) +#define LCD_VSYNC_WIDTH_EVEN (0x4 * 0x80c) +#define LCD_V_ACTIVEHEIGHT_EVEN (0x4 * 0x80d) +#define LCD_V_FRONTPORCH_EVEN (0x4 * 0x80e) +#define LCD_VSYNC_START_EVEN (0x4 * 0x80f) +#define LCD_VSYNC_END_EVEN (0x4 * 0x810) +#define LCD_TIMING_GEN_TRIG (0x4 * 0x811) +#define LCD_PWM0_CTRL (0x4 * 0x812) +#define LCD_PWM0_RPT_LEADIN (0x4 * 0x813) +#define LCD_PWM0_HIGH_LOW (0x4 * 0x814) +#define LCD_PWM1_CTRL (0x4 * 0x815) +#define LCD_PWM1_RPT_LEADIN (0x4 * 0x816) +#define LCD_PWM1_HIGH_LOW (0x4 * 0x817) +#define LCD_PWM2_CTRL (0x4 * 0x818) +#define LCD_PWM2_RPT_LEADIN (0x4 * 0x819) +#define LCD_PWM2_HIGH_LOW (0x4 * 0x81a) +#define LCD_VIDEO0_DMA0_BYTES (0x4 * 0xb00) +#define LCD_VIDEO0_DMA0_STATE (0x4 * 0xb01) +#define LCD_DMA_STATE_ACTIVE BIT(3) +#define LCD_VIDEO0_DMA1_BYTES (0x4 * 0xb02) +#define LCD_VIDEO0_DMA1_STATE (0x4 * 0xb03) +#define LCD_VIDEO0_DMA2_BYTES (0x4 * 0xb04) +#define LCD_VIDEO0_DMA2_STATE (0x4 * 0xb05) +#define LCD_VIDEO1_DMA0_BYTES (0x4 * 0xb06) +#define LCD_VIDEO1_DMA0_STATE (0x4 * 0xb07) +#define LCD_VIDEO1_DMA1_BYTES (0x4 * 0xb08) +#define LCD_VIDEO1_DMA1_STATE (0x4 * 0xb09) +#define LCD_VIDEO1_DMA2_BYTES (0x4 * 0xb0a) +#define LCD_VIDEO1_DMA2_STATE (0x4 * 0xb0b) +#define LCD_GRAPHIC0_DMA_BYTES (0x4 * 0xb0c) +#define LCD_GRAPHIC0_DMA_STATE (0x4 * 0xb0d) +#define LCD_GRAPHIC1_DMA_BYTES (0x4 * 0xb0e) +#define LCD_GRAPHIC1_DMA_STATE (0x4 * 0xb0f) + +/*************************************************************************** + * MIPI controller control register defines + *************************************************************************/ +#define MIPI0_HS_BASE_ADDR (MIPI_BASE_ADDR + 0x400) +#define HS_OFFSET(M) (((M) + 1) * 0x400) + +#define MIPI_TX_HS_CTRL (0x0) +#define MIPI_TXm_HS_CTRL(M) (MIPI_TX_HS_CTRL + HS_OFFSET(M)) +#define HS_CTRL_EN BIT(0) +/* 1:CSI 0:DSI */ +#define HS_CTRL_CSIDSIN BIT(2) +/* 1:LCD, 0:DMA */ +#define TX_SOURCE BIT(3) +#define ACTIVE_LANES(n) ((n) << 4) +#define LCD_VC(ch) ((ch) << 8) +#define DSI_EOTP_EN BIT(11) +#define DSI_CMD_HFP_EN BIT(12) +#define CRC_EN BIT(14) +#define HSEXIT_CNT(n) ((n) << 16) +#define HSCLKIDLE_CNT BIT(24) +#define MIPI_TX_HS_SYNC_CFG (0x8) +#define MIPI_TXm_HS_SYNC_CFG(M) (MIPI_TX_HS_SYNC_CFG \ + + HS_OFFSET(M)) +#define LINE_SYNC_PKT_ENABLE BIT(0) +#define FRAME_COUNTER_ACTIVE BIT(1) +#define LINE_COUNTER_ACTIVE BIT(2) +#define DSI_V_BLANKING BIT(4) +#define DSI_HSA_BLANKING BIT(5) +#define DSI_HBP_BLANKING BIT(6) +#define DSI_HFP_BLANKING BIT(7) +#define DSI_SYNC_PULSE_EVENTN BIT(8) +#define DSI_LPM_FIRST_VSA_LINE BIT(9) +#define DSI_LPM_LAST_VFP_LINE BIT(10) +#define WAIT_ALL_SECT BIT(11) +#define WAIT_TRIG_POS BIT(15) +#define ALWAYS_USE_HACT(f) ((f) << 19) +#define FRAME_GEN_EN(f) ((f) << 23) +#define HACT_WAIT_STOP(f) ((f) << 28) +#define MIPI_TX0_HS_FG0_SECT0_PH (0x40) +#define MIPI_TXm_HS_FGn_SECTo_PH(M, N, O) (MIPI_TX0_HS_FG0_SECT0_PH + \ + HS_OFFSET(M) + (0x2C * (N)) \ + + (8 * (O))) +#define MIPI_TX_SECT_WC_MASK (0xffff) +#define MIPI_TX_SECT_VC_MASK (3) +#define MIPI_TX_SECT_VC_SHIFT (22) +#define MIPI_TX_SECT_DT_MASK (0x3f) +#define MIPI_TX_SECT_DT_SHIFT (16) +#define MIPI_TX_SECT_DM_MASK (3) +#define MIPI_TX_SECT_DM_SHIFT (24) +#define MIPI_TX_SECT_DMA_PACKED BIT(26) +#define MIPI_TX_HS_FG0_SECT_UNPACKED_BYTES0 (0x60) +#define MIPI_TX_HS_FG0_SECT_UNPACKED_BYTES1 (0x64) +#define MIPI_TXm_HS_FGn_SECT_UNPACKED_BYTES0(M, N) \ + (MIPI_TX_HS_FG0_SECT_UNPACKED_BYTES0 \ + + HS_OFFSET(M) + (0x2C * (N))) +#define MIPI_TX_HS_FG0_SECT0_LINE_CFG (0x44) +#define MIPI_TXm_HS_FGn_SECTo_LINE_CFG(M, N, O) \ + (MIPI_TX_HS_FG0_SECT0_LINE_CFG + HS_OFFSET(M) \ + + (0x2C * (N)) + (8 * (O))) + +#define MIPI_TX_HS_FG0_NUM_LINES (0x68) +#define MIPI_TXm_HS_FGn_NUM_LINES(M, N) \ + (MIPI_TX_HS_FG0_NUM_LINES + HS_OFFSET(M) \ + + (0x2C * (N))) +#define MIPI_TX_HS_VSYNC_WIDTHS0 (0x104) +#define MIPI_TXm_HS_VSYNC_WIDTHn(M, N) \ + (MIPI_TX_HS_VSYNC_WIDTHS0 + HS_OFFSET(M) \ + + (0x4 * (N))) +#define MIPI_TX_HS_V_BACKPORCHES0 (0x16c) +#define MIPI_TXm_HS_V_BACKPORCHESn(M, N) \ + (MIPI_TX_HS_V_BACKPORCHES0 + HS_OFFSET(M) \ + + (0x4 * (N))) +#define MIPI_TX_HS_V_FRONTPORCHES0 (0x174) +#define MIPI_TXm_HS_V_FRONTPORCHESn(M, N) \ + (MIPI_TX_HS_V_FRONTPORCHES0 + HS_OFFSET(M) \ + + (0x4 * (N))) +#define MIPI_TX_HS_V_ACTIVE0 (0x17c) +#define MIPI_TXm_HS_V_ACTIVEn(M, N) \ + (MIPI_TX_HS_V_ACTIVE0 + HS_OFFSET(M) \ + + (0x4 * (N))) +#define MIPI_TX_HS_HSYNC_WIDTH0 (0x10c) +#define MIPI_TXm_HS_HSYNC_WIDTHn(M, N) \ + (MIPI_TX_HS_HSYNC_WIDTH0 + HS_OFFSET(M) \ + + (0x4 * (N))) +#define MIPI_TX_HS_H_BACKPORCH0 (0x11c) +#define MIPI_TXm_HS_H_BACKPORCHn(M, N) \ + (MIPI_TX_HS_H_BACKPORCH0 + HS_OFFSET(M) \ + + (0x4 * (N))) +#define MIPI_TX_HS_H_FRONTPORCH0 (0x12c) +#define MIPI_TXm_HS_H_FRONTPORCHn(M, N) \ + (MIPI_TX_HS_H_FRONTPORCH0 + HS_OFFSET(M) \ + + (0x4 * (N))) +#define MIPI_TX_HS_H_ACTIVE0 (0x184) +#define MIPI_TXm_HS_H_ACTIVEn(M, N) \ + (MIPI_TX_HS_H_ACTIVE0 + HS_OFFSET(M) \ + + (0x4 * (N))) +#define MIPI_TX_HS_LLP_HSYNC_WIDTH0 (0x13c) +#define MIPI_TXm_HS_LLP_HSYNC_WIDTHn(M, N) \ + (MIPI_TX_HS_LLP_HSYNC_WIDTH0 + HS_OFFSET(M) \ + + (0x4 * (N))) +#define MIPI_TX_HS_LLP_H_BACKPORCH0 (0x14c) +#define MIPI_TXm_HS_LLP_H_BACKPORCHn(M, N) \ + (MIPI_TX_HS_LLP_H_BACKPORCH0 + HS_OFFSET(M) \ + + (0x4 * (N))) +#define MIPI_TX_HS_LLP_H_FRONTPORCH0 (0x15c) +#define MIPI_TXm_HS_LLP_H_FRONTPORCHn(M, N) \ + (MIPI_TX_HS_LLP_H_FRONTPORCH0 + HS_OFFSET(M) \ + + (0x4 * (N))) + +#define MIPI_TX_HS_MC_FIFO_CTRL_EN (0x194) +#define MIPI_TXm_HS_MC_FIFO_CTRL_EN(M) \ + (MIPI_TX_HS_MC_FIFO_CTRL_EN + HS_OFFSET(M)) + +#define MIPI_TX_HS_MC_FIFO_CHAN_ALLOC0 (0x198) +#define MIPI_TX_HS_MC_FIFO_CHAN_ALLOC1 (0x19c) +#define MIPI_TXm_HS_MC_FIFO_CHAN_ALLOCn(M, N) \ + (MIPI_TX_HS_MC_FIFO_CHAN_ALLOC0 + HS_OFFSET(M) \ + + (0x4 * (N))) +#define SET_MC_FIFO_CHAN_ALLOC(dev, ctrl, vc, sz) \ + kmb_write_bits_mipi(dev, \ + MIPI_TXm_HS_MC_FIFO_CHAN_ALLOCn(ctrl, \ + (vc) / 2), ((vc) % 2) * 16, 16, sz) +#define MIPI_TX_HS_MC_FIFO_RTHRESHOLD0 (0x1a0) +#define MIPI_TX_HS_MC_FIFO_RTHRESHOLD1 (0x1a4) +#define MIPI_TXm_HS_MC_FIFO_RTHRESHOLDn(M, N) \ + (MIPI_TX_HS_MC_FIFO_RTHRESHOLD0 + HS_OFFSET(M) \ + + (0x4 * (N))) +#define SET_MC_FIFO_RTHRESHOLD(dev, ctrl, vc, th) \ + kmb_write_bits_mipi(dev, MIPI_TXm_HS_MC_FIFO_RTHRESHOLDn(ctrl, \ + (vc) / 2), ((vc) % 2) * 16, 16, th) +#define MIPI_TX_HS_DMA_CFG (0x1a8) +#define MIPI_TX_HS_DMA_START_ADR_CHAN0 (0x1ac) +#define MIPI_TX_HS_DMA_LEN_CHAN0 (0x1b4) + +/* MIPI IRQ */ +#define MIPI_CTRL_IRQ_STATUS0 (0x00) +#define MIPI_DPHY_ERR_IRQ 1 +#define MIPI_DPHY_ERR_MASK 0x7FE /*bits 1-10 */ +#define MIPI_HS_IRQ 13 +/* bits 13-22 */ +#define MIPI_HS_IRQ_MASK 0x7FE000 +#define MIPI_LP_EVENT_IRQ 25 +#define MIPI_GET_IRQ_STAT0(dev) kmb_read_mipi(dev, \ + MIPI_CTRL_IRQ_STATUS0) +#define MIPI_CTRL_IRQ_STATUS1 (0x04) +#define MIPI_HS_RX_EVENT_IRQ 0 +#define MIPI_GET_IRQ_STAT1(dev) kmb_read_mipi(dev, \ + MIPI_CTRL_IRQ_STATUS1) +#define MIPI_CTRL_IRQ_ENABLE0 (0x08) +#define SET_MIPI_CTRL_IRQ_ENABLE0(dev, M, N) kmb_set_bit_mipi(dev, \ + MIPI_CTRL_IRQ_ENABLE0, \ + (M) + (N)) +#define MIPI_GET_IRQ_ENABLED0(dev) kmb_read_mipi(dev, \ + MIPI_CTRL_IRQ_ENABLE0) +#define MIPI_CTRL_IRQ_ENABLE1 (0x0c) +#define MIPI_GET_IRQ_ENABLED1(dev) kmb_read_mipi(dev, \ + MIPI_CTRL_IRQ_ENABLE1) +#define MIPI_CTRL_IRQ_CLEAR0 (0x010) +#define SET_MIPI_CTRL_IRQ_CLEAR0(dev, M, N) \ + kmb_set_bit_mipi(dev, MIPI_CTRL_IRQ_CLEAR0, (M) + (N)) +#define MIPI_CTRL_IRQ_CLEAR1 (0x014) +#define SET_MIPI_CTRL_IRQ_CLEAR1(dev, M, N) \ + kmb_set_bit_mipi(dev, MIPI_CTRL_IRQ_CLEAR1, (M) + (N)) +#define MIPI_CTRL_DIG_LOOPBACK (0x018) +#define MIPI_TX_HS_IRQ_STATUS (0x01c) +#define MIPI_TX_HS_IRQ_STATUSm(M) (MIPI_TX_HS_IRQ_STATUS + \ + HS_OFFSET(M)) +#define GET_MIPI_TX_HS_IRQ_STATUS(dev, M) kmb_read_mipi(dev, \ + MIPI_TX_HS_IRQ_STATUSm(M)) +#define MIPI_TX_HS_IRQ_LINE_COMPARE BIT(1) +#define MIPI_TX_HS_IRQ_FRAME_DONE_0 BIT(2) +#define MIPI_TX_HS_IRQ_FRAME_DONE_1 BIT(3) +#define MIPI_TX_HS_IRQ_FRAME_DONE_2 BIT(4) +#define MIPI_TX_HS_IRQ_FRAME_DONE_3 BIT(5) +#define MIPI_TX_HS_IRQ_DMA_DONE_0 BIT(6) +#define MIPI_TX_HS_IRQ_DMA_IDLE_0 BIT(7) +#define MIPI_TX_HS_IRQ_DMA_DONE_1 BIT(8) +#define MIPI_TX_HS_IRQ_DMA_IDLE_1 BIT(9) +#define MIPI_TX_HS_IRQ_DMA_DONE_2 BIT(10) +#define MIPI_TX_HS_IRQ_DMA_IDLE_2 BIT(11) +#define MIPI_TX_HS_IRQ_DMA_DONE_3 BIT(12) +#define MIPI_TX_HS_IRQ_DMA_IDLE_3 BIT(13) +#define MIPI_TX_HS_IRQ_MC_FIFO_UNDERFLOW BIT(14) +#define MIPI_TX_HS_IRQ_MC_FIFO_OVERFLOW BIT(15) +#define MIPI_TX_HS_IRQ_LLP_FIFO_EMPTY BIT(16) +#define MIPI_TX_HS_IRQ_LLP_REQUEST_QUEUE_FULL BIT(17) +#define MIPI_TX_HS_IRQ_LLP_REQUEST_QUEUE_ERROR BIT(18) +#define MIPI_TX_HS_IRQ_LLP_WORD_COUNT_ERROR BIT(20) +#define MIPI_TX_HS_IRQ_FRAME_DONE \ + (MIPI_TX_HS_IRQ_FRAME_DONE_0 | \ + MIPI_TX_HS_IRQ_FRAME_DONE_1 | \ + MIPI_TX_HS_IRQ_FRAME_DONE_2 | \ + MIPI_TX_HS_IRQ_FRAME_DONE_3) + +#define MIPI_TX_HS_IRQ_DMA_DONE \ + (MIPI_TX_HS_IRQ_DMA_DONE_0 | \ + MIPI_TX_HS_IRQ_DMA_DONE_1 | \ + MIPI_TX_HS_IRQ_DMA_DONE_2 | \ + MIPI_TX_HS_IRQ_DMA_DONE_3) + +#define MIPI_TX_HS_IRQ_DMA_IDLE \ + (MIPI_TX_HS_IRQ_DMA_IDLE_0 | \ + MIPI_TX_HS_IRQ_DMA_IDLE_1 | \ + MIPI_TX_HS_IRQ_DMA_IDLE_2 | \ + MIPI_TX_HS_IRQ_DMA_IDLE_3) + +#define MIPI_TX_HS_IRQ_ERROR \ + (MIPI_TX_HS_IRQ_MC_FIFO_UNDERFLOW | \ + MIPI_TX_HS_IRQ_MC_FIFO_OVERFLOW | \ + MIPI_TX_HS_IRQ_LLP_FIFO_EMPTY | \ + MIPI_TX_HS_IRQ_LLP_REQUEST_QUEUE_FULL | \ + MIPI_TX_HS_IRQ_LLP_REQUEST_QUEUE_ERROR | \ + MIPI_TX_HS_IRQ_LLP_WORD_COUNT_ERROR) + +#define MIPI_TX_HS_IRQ_ALL \ + (MIPI_TX_HS_IRQ_FRAME_DONE | \ + MIPI_TX_HS_IRQ_DMA_DONE | \ + MIPI_TX_HS_IRQ_DMA_IDLE | \ + MIPI_TX_HS_IRQ_LINE_COMPARE | \ + MIPI_TX_HS_IRQ_ERROR) + +#define MIPI_TX_HS_IRQ_ENABLE (0x020) +#define GET_HS_IRQ_ENABLE(dev, M) kmb_read_mipi(dev, \ + MIPI_TX_HS_IRQ_ENABLE \ + + HS_OFFSET(M)) +#define MIPI_TX_HS_IRQ_CLEAR (0x024) + +/* MIPI Test Pattern Generation */ +#define MIPI_TX_HS_TEST_PAT_CTRL (0x230) +#define MIPI_TXm_HS_TEST_PAT_CTRL(M) \ + (MIPI_TX_HS_TEST_PAT_CTRL + HS_OFFSET(M)) +#define TP_EN_VCm(M) (1 << ((M) * 0x04)) +#define TP_SEL_VCm(M, N) \ + ((N) << (((M) * 0x04) + 1)) +#define TP_STRIPE_WIDTH(M) ((M) << 16) +#define MIPI_TX_HS_TEST_PAT_COLOR0 (0x234) +#define MIPI_TXm_HS_TEST_PAT_COLOR0(M) \ + (MIPI_TX_HS_TEST_PAT_COLOR0 + HS_OFFSET(M)) +#define MIPI_TX_HS_TEST_PAT_COLOR1 (0x238) +#define MIPI_TXm_HS_TEST_PAT_COLOR1(M) \ + (MIPI_TX_HS_TEST_PAT_COLOR1 + HS_OFFSET(M)) + +/* D-PHY regs */ +#define DPHY_ENABLE (0x100) +#define DPHY_INIT_CTRL0 (0x104) +#define SHUTDOWNZ 0 +#define RESETZ 12 +#define DPHY_INIT_CTRL1 (0x108) +#define PLL_CLKSEL_0 18 +#define PLL_SHADOW_CTRL 16 +#define DPHY_INIT_CTRL2 (0x10c) +#define SET_DPHY_INIT_CTRL0(dev, dphy, offset) \ + kmb_set_bit_mipi(dev, DPHY_INIT_CTRL0, \ + ((dphy) + (offset))) +#define CLR_DPHY_INIT_CTRL0(dev, dphy, offset) \ + kmb_clr_bit_mipi(dev, DPHY_INIT_CTRL0, \ + ((dphy) + (offset))) +#define DPHY_INIT_CTRL2 (0x10c) +#define DPHY_PLL_OBS0 (0x110) +#define DPHY_PLL_OBS1 (0x114) +#define DPHY_PLL_OBS2 (0x118) +#define DPHY_FREQ_CTRL0_3 (0x11c) +#define DPHY_FREQ_CTRL4_7 (0x120) +#define SET_DPHY_FREQ_CTRL0_3(dev, dphy, val) \ + kmb_write_bits_mipi(dev, DPHY_FREQ_CTRL0_3 \ + + (((dphy) / 4) * 4), (dphy % 4) * 8, 6, val) + +#define DPHY_FORCE_CTRL0 (0x128) +#define DPHY_FORCE_CTRL1 (0x12C) +#define MIPI_DPHY_STAT0_3 (0x134) +#define MIPI_DPHY_STAT4_7 (0x138) +#define GET_STOPSTATE_DATA(dev, dphy) \ + (((kmb_read_mipi(dev, MIPI_DPHY_STAT0_3 + \ + ((dphy) / 4) * 4)) >> \ + (((dphy % 4) * 8) + 4)) & 0x03) + +#define MIPI_DPHY_ERR_STAT6_7 (0x14C) + +#define DPHY_TEST_CTRL0 (0x154) +#define SET_DPHY_TEST_CTRL0(dev, dphy) \ + kmb_set_bit_mipi(dev, DPHY_TEST_CTRL0, (dphy)) +#define CLR_DPHY_TEST_CTRL0(dev, dphy) \ + kmb_clr_bit_mipi(dev, DPHY_TEST_CTRL0, \ + (dphy)) +#define DPHY_TEST_CTRL1 (0x158) +#define SET_DPHY_TEST_CTRL1_CLK(dev, dphy) \ + kmb_set_bit_mipi(dev, DPHY_TEST_CTRL1, (dphy)) +#define CLR_DPHY_TEST_CTRL1_CLK(dev, dphy) \ + kmb_clr_bit_mipi(dev, DPHY_TEST_CTRL1, (dphy)) +#define SET_DPHY_TEST_CTRL1_EN(dev, dphy) \ + kmb_set_bit_mipi(dev, DPHY_TEST_CTRL1, ((dphy) + 12)) +#define CLR_DPHY_TEST_CTRL1_EN(dev, dphy) \ + kmb_clr_bit_mipi(dev, DPHY_TEST_CTRL1, ((dphy) + 12)) +#define DPHY_TEST_DIN0_3 (0x15c) +#define SET_TEST_DIN0_3(dev, dphy, val) \ + kmb_write_mipi(dev, DPHY_TEST_DIN0_3 + \ + 4, ((val) << (((dphy) % 4) * 8))) +#define DPHY_TEST_DOUT0_3 (0x168) +#define GET_TEST_DOUT0_3(dev, dphy) \ + (kmb_read_mipi(dev, DPHY_TEST_DOUT0_3) \ + >> (((dphy) % 4) * 8) & 0xff) +#define DPHY_TEST_DOUT4_7 (0x16C) +#define GET_TEST_DOUT4_7(dev, dphy) \ + (kmb_read_mipi(dev, DPHY_TEST_DOUT4_7) \ + >> (((dphy) % 4) * 8) & 0xff) +#define DPHY_TEST_DOUT8_9 (0x170) +#define DPHY_TEST_DIN4_7 (0x160) +#define DPHY_TEST_DIN8_9 (0x164) +#define DPHY_PLL_LOCK (0x188) +#define GET_PLL_LOCK(dev, dphy) \ + (kmb_read_mipi(dev, DPHY_PLL_LOCK) \ + & (1 << ((dphy) - MIPI_DPHY6))) +#define DPHY_CFG_CLK_EN (0x18c) + +#define MSS_MIPI_CIF_CFG (0x00) +#define MSS_LCD_MIPI_CFG (0x04) +#define MSS_CAM_CLK_CTRL (0x10) +#define MSS_LOOPBACK_CFG (0x0C) +#define LCD BIT(1) +#define MIPI_COMMON BIT(2) +#define MIPI_TX0 BIT(9) +#define MSS_CAM_RSTN_CTRL (0x14) +#define MSS_CAM_RSTN_SET (0x20) +#define MSS_CAM_RSTN_CLR (0x24) + +#define MSSCPU_CPR_CLK_EN (0x0) +#define MSSCPU_CPR_RST_EN (0x10) +#define BIT_MASK_16 (0xffff) +/* icam lcd qos */ +#define LCD_QOS_PRIORITY (0x8) +#define LCD_QOS_MODE (0xC) +#define LCD_QOS_BW (0x10) +#endif /* __KMB_REGS_H__ */ diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c index ab460121fd52..d497af91d850 100644 --- a/drivers/gpu/drm/lima/lima_drv.c +++ b/drivers/gpu/drm/lima/lima_drv.c @@ -261,7 +261,7 @@ DEFINE_DRM_GEM_FOPS(lima_drm_driver_fops); * - 1.1.0 - add heap buffer support */ -static struct drm_driver lima_drm_driver = { +static const struct drm_driver lima_drm_driver = { .driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ, .open = lima_drm_driver_open, .postclose = lima_drm_driver_postclose, diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c index 11223fe348df..832e5280a6ed 100644 --- a/drivers/gpu/drm/lima/lima_gem.c +++ b/drivers/gpu/drm/lima/lima_gem.c @@ -182,14 +182,14 @@ static int lima_gem_pin(struct drm_gem_object *obj) return drm_gem_shmem_pin(obj); } -static void *lima_gem_vmap(struct drm_gem_object *obj) +static int lima_gem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) { struct lima_bo *bo = to_lima_bo(obj); if (bo->heap_size) - return ERR_PTR(-EINVAL); + return -EINVAL; - return drm_gem_shmem_vmap(obj); + return drm_gem_shmem_vmap(obj, map); } static int lima_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c index dc6df9e9a40d..a070a85f8f36 100644 --- a/drivers/gpu/drm/lima/lima_sched.c +++ b/drivers/gpu/drm/lima/lima_sched.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR MIT /* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */ +#include <linux/dma-buf-map.h> #include <linux/kthread.h> #include <linux/slab.h> #include <linux/vmalloc.h> @@ -303,6 +304,8 @@ static void lima_sched_build_error_task_list(struct lima_sched_task *task) struct lima_dump_chunk_buffer *buffer_chunk; u32 size, task_size, mem_size; int i; + struct dma_buf_map map; + int ret; mutex_lock(&dev->error_task_list_lock); @@ -388,15 +391,15 @@ static void lima_sched_build_error_task_list(struct lima_sched_task *task) } else { buffer_chunk->size = lima_bo_size(bo); - data = drm_gem_shmem_vmap(&bo->base.base); - if (IS_ERR_OR_NULL(data)) { + ret = drm_gem_shmem_vmap(&bo->base.base, &map); + if (ret) { kvfree(et); goto out; } - memcpy(buffer_chunk + 1, data, buffer_chunk->size); + memcpy(buffer_chunk + 1, map.vaddr, buffer_chunk->size); - drm_gem_shmem_vunmap(&bo->base.base, data); + drm_gem_shmem_vunmap(&bo->base.base, &map); } buffer_chunk = (void *)(buffer_chunk + 1) + buffer_chunk->size; diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c index f9b5f450a9cb..870626e04ec0 100644 --- a/drivers/gpu/drm/mcde/mcde_drv.c +++ b/drivers/gpu/drm/mcde/mcde_drv.c @@ -178,7 +178,7 @@ static int mcde_modeset_init(struct drm_device *drm) DEFINE_DRM_GEM_CMA_FOPS(drm_fops); -static struct drm_driver mcde_drm_driver = { +static const struct drm_driver mcde_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, .lastclose = drm_fb_helper_lastclose, diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index 23f5c10b0c67..193848fd7515 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -11,6 +11,7 @@ #include <asm/barrier.h> #include <soc/mediatek/smi.h> +#include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_plane_helper.h> #include <drm/drm_probe_helper.h> @@ -577,17 +578,19 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc, static void mtk_drm_crtc_atomic_begin(struct drm_crtc *crtc, struct drm_atomic_state *state) { - struct mtk_crtc_state *crtc_state = to_mtk_crtc_state(crtc->state); + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); + struct mtk_crtc_state *mtk_crtc_state = to_mtk_crtc_state(crtc_state); struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); - if (mtk_crtc->event && crtc_state->base.event) + if (mtk_crtc->event && mtk_crtc_state->base.event) DRM_ERROR("new event while there is still a pending event\n"); - if (crtc_state->base.event) { - crtc_state->base.event->pipe = drm_crtc_index(crtc); + if (mtk_crtc_state->base.event) { + mtk_crtc_state->base.event->pipe = drm_crtc_index(crtc); WARN_ON(drm_crtc_vblank_get(crtc) != 0); - mtk_crtc->event = crtc_state->base.event; - crtc_state->base.event = NULL; + mtk_crtc->event = mtk_crtc_state->base.event; + mtk_crtc_state->base.event = NULL; } } diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index 7f3398a7c2b0..2e8065b1e2bb 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -321,7 +321,7 @@ struct drm_gem_object *mtk_drm_gem_prime_import(struct drm_device *dev, return drm_gem_prime_import_dev(dev, dma_buf, private->dma_dev); } -static struct drm_driver mtk_drm_driver = { +static const struct drm_driver mtk_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, .dumb_create = mtk_drm_gem_dumb_create, diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c index cdd1a6e61564..28a2ee1336ef 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c @@ -240,23 +240,25 @@ struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev, return &mtk_gem->base; } -void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj) +int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) { struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj); - struct sg_table *sgt; + struct sg_table *sgt = NULL; unsigned int npages; if (mtk_gem->kvaddr) - return mtk_gem->kvaddr; + goto out; sgt = mtk_gem_prime_get_sg_table(obj); if (IS_ERR(sgt)) - return NULL; + return PTR_ERR(sgt); npages = obj->size >> PAGE_SHIFT; mtk_gem->pages = kcalloc(npages, sizeof(*mtk_gem->pages), GFP_KERNEL); - if (!mtk_gem->pages) - goto out; + if (!mtk_gem->pages) { + kfree(sgt); + return -ENOMEM; + } drm_prime_sg_to_page_addr_arrays(sgt, mtk_gem->pages, NULL, npages); @@ -265,13 +267,15 @@ void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj) out: kfree(sgt); + dma_buf_map_set_vaddr(map, mtk_gem->kvaddr); - return mtk_gem->kvaddr; + return 0; } -void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) +void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map) { struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj); + void *vaddr = map->vaddr; if (!mtk_gem->pages) return; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.h b/drivers/gpu/drm/mediatek/mtk_drm_gem.h index ff9f976d9807..6da5ccb4b933 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_gem.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.h @@ -45,7 +45,7 @@ int mtk_drm_gem_mmap_buf(struct drm_gem_object *obj, struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj); struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sg); -void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj); -void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); +int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map); +void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map); #endif diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index 8b9c8dd788c4..dc8509ceb787 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c @@ -90,7 +90,7 @@ static int meson_dumb_create(struct drm_file *file, struct drm_device *dev, DEFINE_DRM_GEM_CMA_FOPS(fops); -static struct drm_driver meson_driver = { +static const struct drm_driver meson_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, /* IRQ */ diff --git a/drivers/gpu/drm/mga/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c index 85c74364ce24..1cb7d120d18f 100644 --- a/drivers/gpu/drm/mga/mga_dma.c +++ b/drivers/gpu/drm/mga/mga_dma.c @@ -25,7 +25,7 @@ * DEALINGS IN THE SOFTWARE. */ -/** +/* * \file mga_dma.c * DMA support for MGA G200 / G400. * @@ -435,7 +435,7 @@ int mga_driver_load(struct drm_device *dev, unsigned long flags) } #if IS_ENABLED(CONFIG_AGP) -/** +/* * Bootstrap the driver for AGP DMA. * * \todo @@ -610,7 +610,7 @@ static int mga_do_agp_dma_bootstrap(struct drm_device *dev, } #endif -/** +/* * Bootstrap the driver for PCI DMA. * * \todo @@ -1143,7 +1143,7 @@ int mga_dma_buffers(struct drm_device *dev, void *data, return ret; } -/** +/* * Called just before the module is unloaded. */ void mga_driver_unload(struct drm_device *dev) @@ -1152,7 +1152,7 @@ void mga_driver_unload(struct drm_device *dev) dev->dev_private = NULL; } -/** +/* * Called when the last opener of the device is closed. */ void mga_driver_lastclose(struct drm_device *dev) diff --git a/drivers/gpu/drm/mga/mga_state.c b/drivers/gpu/drm/mga/mga_state.c index 77a0b006f066..0dec4062e5a2 100644 --- a/drivers/gpu/drm/mga/mga_state.c +++ b/drivers/gpu/drm/mga/mga_state.c @@ -942,7 +942,6 @@ static int mga_dma_iload(struct drm_device *dev, void *data, struct drm_file *fi struct drm_device_dma *dma = dev->dma; drm_mga_private_t *dev_priv = dev->dev_private; struct drm_buf *buf; - drm_mga_buf_priv_t *buf_priv; drm_mga_iload_t *iload = data; DRM_DEBUG("\n"); @@ -959,7 +958,6 @@ static int mga_dma_iload(struct drm_device *dev, void *data, struct drm_file *fi return -EINVAL; buf = dma->buflist[iload->idx]; - buf_priv = buf->dev_private; if (mga_verify_iload(dev_priv, iload->dstorg, iload->length)) { mga_freelist_put(dev, buf); diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c index 771b26aeee19..0f07f259503d 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.c +++ b/drivers/gpu/drm/mgag200/mgag200_drv.c @@ -28,7 +28,7 @@ module_param_named(modeset, mgag200_modeset, int, 0400); DEFINE_DRM_GEM_FOPS(mgag200_driver_fops); -static struct drm_driver mgag200_driver = { +static const struct drm_driver mgag200_driver = { .driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET, .fops = &mgag200_driver_fops, .name = DRIVER_NAME, diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index 38672f9e5c4f..1dfc42170059 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -9,6 +9,7 @@ */ #include <linux/delay.h> +#include <linux/dma-buf-map.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_atomic_state_helper.h> @@ -794,21 +795,16 @@ static int mgag200_crtc_set_plls(struct mga_device *mdev, long clock) case G200_SE_A: case G200_SE_B: return mga_g200se_set_plls(mdev, clock); - break; case G200_WB: case G200_EW3: return mga_g200wb_set_plls(mdev, clock); - break; case G200_EV: return mga_g200ev_set_plls(mdev, clock); - break; case G200_EH: case G200_EH3: return mga_g200eh_set_plls(mdev, clock); - break; case G200_ER: return mga_g200er_set_plls(mdev, clock); - break; } misc = RREG8(MGA_MISC_IN); @@ -1556,15 +1552,18 @@ mgag200_handle_damage(struct mga_device *mdev, struct drm_framebuffer *fb, struct drm_rect *clip) { struct drm_device *dev = &mdev->base; + struct dma_buf_map map; void *vmap; + int ret; - vmap = drm_gem_shmem_vmap(fb->obj[0]); - if (drm_WARN_ON(dev, !vmap)) + ret = drm_gem_shmem_vmap(fb->obj[0], &map); + if (drm_WARN_ON(dev, ret)) return; /* BUG: SHMEM BO should always be vmapped */ + vmap = map.vaddr; /* TODO: Use mapping abstraction properly */ drm_fb_memcpy_dstclip(mdev->vram, vmap, fb, clip); - drm_gem_shmem_vunmap(fb->obj[0], vmap); + drm_gem_shmem_vunmap(fb->obj[0], &map); /* Always scanout image at VRAM offset 0 */ mgag200_set_startadd(mdev, (u32)0); diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index aa4509766d64..f8fd31e709bb 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -393,7 +393,7 @@ static int msm_init_vram(struct drm_device *dev) return ret; } -static int msm_drm_init(struct device *dev, struct drm_driver *drv) +static int msm_drm_init(struct device *dev, const struct drm_driver *drv) { struct platform_device *pdev = to_platform_device(dev); struct drm_device *ddev; @@ -984,7 +984,7 @@ static const struct file_operations fops = { .mmap = msm_gem_mmap, }; -static struct drm_driver msm_driver = { +static const struct drm_driver msm_driver = { .driver_features = DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC | diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index c45789f36e48..a6aef687bc6e 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -295,8 +295,8 @@ int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, uint32_t handle, uint64_t *offset); struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj); -void *msm_gem_prime_vmap(struct drm_gem_object *obj); -void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); +int msm_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map); +void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map); int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sg); diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c index 515ef80816a0..9880348a4dc7 100644 --- a/drivers/gpu/drm/msm/msm_gem_prime.c +++ b/drivers/gpu/drm/msm/msm_gem_prime.c @@ -22,12 +22,19 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj) return drm_prime_pages_to_sg(obj->dev, msm_obj->pages, npages); } -void *msm_gem_prime_vmap(struct drm_gem_object *obj) +int msm_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) { - return msm_gem_get_vaddr(obj); + void *vaddr; + + vaddr = msm_gem_get_vaddr(obj); + if (IS_ERR(vaddr)) + return PTR_ERR(vaddr); + dma_buf_map_set_vaddr(map, vaddr); + + return 0; } -void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) +void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map) { msm_gem_put_vaddr(obj); } diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c index 35122aef037b..6faf17b6408d 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c @@ -289,7 +289,7 @@ static irqreturn_t mxsfb_irq_handler(int irq, void *data) DEFINE_DRM_GEM_CMA_FOPS(fops); -static struct drm_driver mxsfb_driver = { +static const struct drm_driver mxsfb_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .irq_handler = mxsfb_irq_handler, .irq_preinstall = mxsfb_irq_disable, diff --git a/drivers/gpu/drm/mxsfb/mxsfb_kms.c b/drivers/gpu/drm/mxsfb/mxsfb_kms.c index 9040835289a8..a6b3d6e84c52 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_kms.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_kms.c @@ -486,6 +486,13 @@ static void mxsfb_plane_overlay_atomic_update(struct drm_plane *plane, writel(ctrl, mxsfb->base + LCDC_AS_CTRL); } +static bool mxsfb_format_mod_supported(struct drm_plane *plane, + uint32_t format, + uint64_t modifier) +{ + return modifier == DRM_FORMAT_MOD_LINEAR; +} + static const struct drm_plane_helper_funcs mxsfb_plane_primary_helper_funcs = { .atomic_check = mxsfb_plane_atomic_check, .atomic_update = mxsfb_plane_primary_atomic_update, @@ -497,6 +504,7 @@ static const struct drm_plane_helper_funcs mxsfb_plane_overlay_helper_funcs = { }; static const struct drm_plane_funcs mxsfb_plane_funcs = { + .format_mod_supported = mxsfb_format_mod_supported, .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, .destroy = drm_plane_cleanup, diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig index 5dec1e5694b7..9436310d0854 100644 --- a/drivers/gpu/drm/nouveau/Kconfig +++ b/drivers/gpu/drm/nouveau/Kconfig @@ -6,6 +6,7 @@ config DRM_NOUVEAU select FW_LOADER select DRM_KMS_HELPER select DRM_TTM + select DRM_TTM_HELPER select BACKLIGHT_CLASS_DEVICE if DRM_NOUVEAU_BACKLIGHT select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && INPUT select X86_PLATFORM_DEVICES if ACPI && X86 diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 8133377d865d..7aa4286784ae 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -861,96 +861,6 @@ nouveau_bo_move_init(struct nouveau_drm *drm) NV_INFO(drm, "MM: using %s for buffer copies\n", name); } -static int -nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, - struct ttm_operation_ctx *ctx, - struct ttm_resource *new_reg) -{ - struct ttm_place placement_memtype = { - .fpfn = 0, - .lpfn = 0, - .mem_type = TTM_PL_TT, - .flags = 0 - }; - struct ttm_placement placement; - struct ttm_resource tmp_reg; - int ret; - - placement.num_placement = placement.num_busy_placement = 1; - placement.placement = placement.busy_placement = &placement_memtype; - - tmp_reg = *new_reg; - tmp_reg.mm_node = NULL; - ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, ctx); - if (ret) - return ret; - - ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx); - if (ret) - goto out; - - ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, &tmp_reg); - if (ret) - goto out; - - ret = nouveau_bo_move_m2mf(bo, true, ctx, &tmp_reg); - if (ret) - goto out; - - ret = ttm_bo_wait_ctx(bo, ctx); - if (ret) - goto out; - - nouveau_ttm_tt_unbind(bo->bdev, bo->ttm); - ttm_resource_free(bo, &bo->mem); - ttm_bo_assign_mem(bo, &tmp_reg); -out: - ttm_resource_free(bo, &tmp_reg); - return ret; -} - -static int -nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, - struct ttm_operation_ctx *ctx, - struct ttm_resource *new_reg) -{ - struct ttm_place placement_memtype = { - .fpfn = 0, - .lpfn = 0, - .mem_type = TTM_PL_TT, - .flags = 0 - }; - struct ttm_placement placement; - struct ttm_resource tmp_reg; - int ret; - - placement.num_placement = placement.num_busy_placement = 1; - placement.placement = placement.busy_placement = &placement_memtype; - - tmp_reg = *new_reg; - tmp_reg.mm_node = NULL; - ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, ctx); - if (ret) - return ret; - - ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx); - if (unlikely(ret != 0)) - return ret; - - ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, &tmp_reg); - if (unlikely(ret != 0)) - return ret; - - ttm_bo_assign_mem(bo, &tmp_reg); - ret = nouveau_bo_move_m2mf(bo, true, ctx, new_reg); - if (ret) - goto out; - -out: - ttm_resource_free(bo, &tmp_reg); - return ret; -} - static void nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict, struct ttm_resource *new_reg) @@ -1023,7 +933,8 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, static int nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, struct ttm_operation_ctx *ctx, - struct ttm_resource *new_reg) + struct ttm_resource *new_reg, + struct ttm_place *hop) { struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct nouveau_bo *nvbo = nouveau_bo(bo); @@ -1031,6 +942,17 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, struct nouveau_drm_tile *new_tile = NULL; int ret = 0; + if ((old_reg->mem_type == TTM_PL_SYSTEM && + new_reg->mem_type == TTM_PL_VRAM) || + (old_reg->mem_type == TTM_PL_VRAM && + new_reg->mem_type == TTM_PL_SYSTEM)) { + hop->fpfn = 0; + hop->lpfn = 0; + hop->mem_type = TTM_PL_TT; + hop->flags = 0; + return -EMULTIHOP; + } + if (new_reg->mem_type == TTM_PL_TT) { ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, new_reg); if (ret) @@ -1073,15 +995,8 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, /* Hardware assisted copy. */ if (drm->ttm.move) { - if (new_reg->mem_type == TTM_PL_SYSTEM) - ret = nouveau_bo_move_flipd(bo, evict, ctx, - new_reg); - else if (old_reg->mem_type == TTM_PL_SYSTEM) - ret = nouveau_bo_move_flips(bo, evict, ctx, - new_reg); - else - ret = nouveau_bo_move_m2mf(bo, evict, ctx, - new_reg); + ret = nouveau_bo_move_m2mf(bo, evict, ctx, + new_reg); if (!ret) goto out; } @@ -1142,9 +1057,12 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *reg) struct nvkm_device *device = nvxx_device(&drm->client.device); struct nouveau_mem *mem = nouveau_mem(reg); struct nvif_mmu *mmu = &drm->client.mmu; - const u8 type = mmu->type[drm->ttm.type_vram].type; + u8 type = 0; int ret; + if (drm->ttm.type_vram >= 0) + type = mmu->type[drm->ttm.type_vram].type; + mutex_lock(&drm->ttm.io_reserve_mutex); retry: switch (reg->mem_type) { diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h index 641ef6298a0e..6045b85a762a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.h +++ b/drivers/gpu/drm/nouveau/nouveau_bo.h @@ -39,8 +39,6 @@ struct nouveau_bo { unsigned mode; struct nouveau_drm_tile *tile; - - struct ttm_bo_kmap_obj dma_buf_vmap; }; static inline struct nouveau_bo * diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index dd51cd0ae20c..787d05eefd9c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -24,6 +24,8 @@ * */ +#include <drm/drm_gem_ttm_helper.h> + #include "nouveau_drv.h" #include "nouveau_dma.h" #include "nouveau_fence.h" @@ -176,8 +178,8 @@ const struct drm_gem_object_funcs nouveau_gem_object_funcs = { .pin = nouveau_gem_prime_pin, .unpin = nouveau_gem_prime_unpin, .get_sg_table = nouveau_gem_prime_get_sg_table, - .vmap = nouveau_gem_prime_vmap, - .vunmap = nouveau_gem_prime_vunmap, + .vmap = drm_gem_ttm_vmap, + .vunmap = drm_gem_ttm_vunmap, }; int diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.h b/drivers/gpu/drm/nouveau/nouveau_gem.h index b35c180322e2..3b919c7c931c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.h +++ b/drivers/gpu/drm/nouveau/nouveau_gem.h @@ -37,7 +37,5 @@ extern void nouveau_gem_prime_unpin(struct drm_gem_object *); extern struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *); extern struct drm_gem_object *nouveau_gem_prime_import_sg_table( struct drm_device *, struct dma_buf_attachment *, struct sg_table *); -extern void *nouveau_gem_prime_vmap(struct drm_gem_object *); -extern void nouveau_gem_prime_vunmap(struct drm_gem_object *, void *); #endif diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c index a8264aebf3d4..2f16b5249283 100644 --- a/drivers/gpu/drm/nouveau/nouveau_prime.c +++ b/drivers/gpu/drm/nouveau/nouveau_prime.c @@ -35,26 +35,6 @@ struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *obj) return drm_prime_pages_to_sg(obj->dev, nvbo->bo.ttm->pages, npages); } -void *nouveau_gem_prime_vmap(struct drm_gem_object *obj) -{ - struct nouveau_bo *nvbo = nouveau_gem_object(obj); - int ret; - - ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.num_pages, - &nvbo->dma_buf_vmap); - if (ret) - return ERR_PTR(ret); - - return nvbo->dma_buf_vmap.virtual; -} - -void nouveau_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) -{ - struct nouveau_bo *nvbo = nouveau_gem_object(obj); - - ttm_bo_kunmap(&nvbo->dma_buf_vmap); -} - struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sg) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c index 350f10a3de37..2ec84b8a3b3a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c @@ -123,7 +123,6 @@ pll_map(struct nvkm_bios *bios) case NV_20: case NV_30: return nv04_pll_mapping; - break; case NV_40: return nv40_pll_mapping; case NV_50: diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c index efa50274df97..4884eb4a9221 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c @@ -140,17 +140,14 @@ mcp77_clk_read(struct nvkm_clk *base, enum nv_clk_src src) break; case nv_clk_src_mem: return 0; - break; case nv_clk_src_vdec: P = (read_div(clk) & 0x00000700) >> 8; switch (mast & 0x00400000) { case 0x00400000: return nvkm_clk_read(&clk->base, nv_clk_src_core) >> P; - break; default: return 500000 >> P; - break; } break; default: diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c index 2ccb4b6be153..7b1eb44ff3da 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c @@ -171,7 +171,6 @@ nv50_ram_timing_read(struct nv50_ram *ram, u32 *timing) break; default: return -ENOSYS; - break; } T(WR) = ((timing[1] >> 24) & 0xff) - 1 - T(CWL); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c index e01746ce9fc4..1156634533f9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c @@ -90,7 +90,6 @@ gk104_top_oneinit(struct nvkm_top *top) case 0x00000010: B_(NVDEC ); break; case 0x00000013: B_(CE ); break; case 0x00000014: C_(GSP ); break; - break; default: break; } diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c index faca5c873bde..e39ce0c0c9a9 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c @@ -2,7 +2,7 @@ /* * Generic DSI Command Mode panel driver * - * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com/ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com> */ diff --git a/drivers/gpu/drm/omapdrm/dss/Kconfig b/drivers/gpu/drm/omapdrm/dss/Kconfig index 2658c521b702..e11b258a2294 100644 --- a/drivers/gpu/drm/omapdrm/dss/Kconfig +++ b/drivers/gpu/drm/omapdrm/dss/Kconfig @@ -80,7 +80,7 @@ config OMAP5_DSS_HDMI select OMAP2_DSS_HDMI_COMMON help HDMI Interface for OMAP5 and similar cores. This adds the High - Definition Multimedia Interface. See http://www.hdmi.org/ for HDMI + Definition Multimedia Interface. See https://www.hdmi.org/ for HDMI specification. config OMAP2_DSS_SDI @@ -101,7 +101,7 @@ config OMAP2_DSS_DSI DSI is a high speed half-duplex serial interface between the host processor and a peripheral, such as a display or a framebuffer chip. - See http://www.mipi.org/ for DSI specifications. + See https://www.mipi.org/ for DSI specifications. config OMAP2_DSS_MIN_FCK_PER_PCK int "Minimum FCK/PCK ratio (for scaling)" diff --git a/drivers/gpu/drm/omapdrm/dss/base.c b/drivers/gpu/drm/omapdrm/dss/base.c index c7650a7c155d..cf50430e6363 100644 --- a/drivers/gpu/drm/omapdrm/dss/base.c +++ b/drivers/gpu/drm/omapdrm/dss/base.c @@ -2,7 +2,7 @@ /* * OMAP Display Subsystem Base * - * Copyright (C) 2015-2017 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2015-2017 Texas Instruments Incorporated - https://www.ti.com/ */ #include <linux/kernel.h> diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c index 48593932bddf..599183879caf 100644 --- a/drivers/gpu/drm/omapdrm/dss/dispc.c +++ b/drivers/gpu/drm/omapdrm/dss/dispc.c @@ -653,8 +653,11 @@ int dispc_runtime_get(struct dispc_device *dispc) DSSDBG("dispc_runtime_get\n"); r = pm_runtime_get_sync(&dispc->pdev->dev); - WARN_ON(r < 0); - return r < 0 ? r : 0; + if (WARN_ON(r < 0)) { + pm_runtime_put_noidle(&dispc->pdev->dev); + return r; + } + return 0; } void dispc_runtime_put(struct dispc_device *dispc) diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.h b/drivers/gpu/drm/omapdrm/dss/dispc.h index 2348faf88768..3f842c1ff81a 100644 --- a/drivers/gpu/drm/omapdrm/dss/dispc.h +++ b/drivers/gpu/drm/omapdrm/dss/dispc.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ * Author: Archit Taneja <archit@ti.com> */ diff --git a/drivers/gpu/drm/omapdrm/dss/dispc_coefs.c b/drivers/gpu/drm/omapdrm/dss/dispc_coefs.c index bccb28de5a59..d1f3a93b8efd 100644 --- a/drivers/gpu/drm/omapdrm/dss/dispc_coefs.c +++ b/drivers/gpu/drm/omapdrm/dss/dispc_coefs.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ * Author: Chandrabhanu Mahapatra <cmahapatra@ti.com> */ diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index eeccf40bae41..735a4e9027d0 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c @@ -1112,8 +1112,11 @@ static int dsi_runtime_get(struct dsi_data *dsi) DSSDBG("dsi_runtime_get\n"); r = pm_runtime_get_sync(dsi->dev); - WARN_ON(r < 0); - return r < 0 ? r : 0; + if (WARN_ON(r < 0)) { + pm_runtime_put_noidle(dsi->dev); + return r; + } + return 0; } static void dsi_runtime_put(struct dsi_data *dsi) @@ -1128,13 +1131,12 @@ static void dsi_runtime_put(struct dsi_data *dsi) static void _dsi_print_reset_status(struct dsi_data *dsi) { - u32 l; int b0, b1, b2; /* A dummy read using the SCP interface to any DSIPHY register is * required after DSIPHY reset to complete the reset of the DSI complex * I/O. */ - l = dsi_read_reg(dsi, DSI_DSIPHY_CFG5); + dsi_read_reg(dsi, DSI_DSIPHY_CFG5); if (dsi->data->quirks & DSI_QUIRK_REVERSE_TXCLKESC) { b0 = 28; @@ -3940,7 +3942,6 @@ static int dsi_update(struct omap_dss_device *dssdev, int channel, void (*callback)(int, void *), void *data) { struct dsi_data *dsi = to_dsi_data(dssdev); - u16 dw, dh; dsi_perf_mark_setup(dsi); @@ -3949,11 +3950,8 @@ static int dsi_update(struct omap_dss_device *dssdev, int channel, dsi->framedone_callback = callback; dsi->framedone_data = data; - dw = dsi->vm.hactive; - dh = dsi->vm.vactive; - #ifdef DSI_PERF_MEASURE - dsi->update_bytes = dw * dh * + dsi->update_bytes = dsi->vm.hactive * dsi->vm.vactive * dsi_get_pixel_size(dsi->pix_fmt) / 8; #endif dsi_update_screen_dispc(dsi); diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c index 6ccbc29c4ce4..d7b2f5bcac16 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.c +++ b/drivers/gpu/drm/omapdrm/dss/dss.c @@ -858,8 +858,11 @@ int dss_runtime_get(struct dss_device *dss) DSSDBG("dss_runtime_get\n"); r = pm_runtime_get_sync(&dss->pdev->dev); - WARN_ON(r < 0); - return r < 0 ? r : 0; + if (WARN_ON(r < 0)) { + pm_runtime_put_noidle(&dss->pdev->dev); + return r; + } + return 0; } void dss_runtime_put(struct dss_device *dss) diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi.h b/drivers/gpu/drm/omapdrm/dss/hdmi.h index 3a40833d3368..c4a4e07f0b99 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi.h +++ b/drivers/gpu/drm/omapdrm/dss/hdmi.h @@ -2,7 +2,7 @@ /* * HDMI driver definition for TI OMAP4 Processor. * - * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com/ */ #ifndef _HDMI_H diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c index a14fbf06cb30..8de41e74e8f8 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c @@ -2,7 +2,7 @@ /* * HDMI interface DSS driver for TI's OMAP4 family of SoCs. * - * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com/ * Authors: Yong Zhi * Mythri pk <mythripk@ti.com> */ @@ -43,10 +43,10 @@ static int hdmi_runtime_get(struct omap_hdmi *hdmi) DSSDBG("hdmi_runtime_get\n"); r = pm_runtime_get_sync(&hdmi->pdev->dev); - WARN_ON(r < 0); - if (r < 0) + if (WARN_ON(r < 0)) { + pm_runtime_put_noidle(&hdmi->pdev->dev); return r; - + } return 0; } diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c index ebf9c96d43ee..43592c1cf081 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c @@ -3,7 +3,7 @@ * * Based on the CEC code from hdmi_ti_4xxx_ip.c from Android. * - * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com/ * Authors: Yong Zhi * Mythri pk <mythripk@ti.com> * diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c index 751985a2679a..35faa7f028c4 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c @@ -2,7 +2,7 @@ /* * HDMI TI81xx, TI38xx, TI OMAP4 etc IP driver Library * - * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com/ * Authors: Yong Zhi * Mythri pk <mythripk@ti.com> */ diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.h b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.h index dc64ae2aa300..3c9e1f600fbe 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.h +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.h @@ -2,7 +2,7 @@ /* * HDMI header definition for OMAP4 HDMI core IP * - * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com/ */ #ifndef _HDMI4_CORE_H_ diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c index b738d9750686..54e5cb5aa52d 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c @@ -2,7 +2,7 @@ /* * HDMI driver for OMAP5 * - * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com/ * * Authors: * Yong Zhi @@ -44,10 +44,10 @@ static int hdmi_runtime_get(struct omap_hdmi *hdmi) DSSDBG("hdmi_runtime_get\n"); r = pm_runtime_get_sync(&hdmi->pdev->dev); - WARN_ON(r < 0); - if (r < 0) + if (WARN_ON(r < 0)) { + pm_runtime_put_noidle(&hdmi->pdev->dev); return r; - + } return 0; } diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c index 7dd587035160..6cc2ad7a420c 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c @@ -2,7 +2,7 @@ /* * OMAP5 HDMI CORE IP driver library * - * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com/ * Authors: * Yong Zhi * Mythri pk diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.h b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.h index 65eadefdb3f9..070cbf5fb57d 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.h +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.h @@ -2,7 +2,7 @@ /* * HDMI driver definition for TI OMAP5 processors. * - * Copyright (C) 2011-2012 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2011-2012 Texas Instruments Incorporated - https://www.ti.com/ */ #ifndef _HDMI5_CORE_H_ diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c b/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c index 00bbf24488c1..5dc200f09c3c 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c @@ -2,7 +2,7 @@ /* * HDMI PHY * - * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com/ */ #include <linux/kernel.h> diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c b/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c index cf2b000f397f..13bf649aba52 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c @@ -2,7 +2,7 @@ /* * HDMI PLL * - * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com/ */ #define DSS_SUBSYS_NAME "HDMIPLL" diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c index 32f45f4f569d..9d830584a762 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c @@ -2,7 +2,7 @@ /* * HDMI wrapper * - * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com/ */ #define DSS_SUBSYS_NAME "HDMIWP" diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c index 72a7da7bfff1..f21b5df31213 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c +++ b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com/ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com> */ diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index ab19d4af8de7..a48a9a254e33 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2016 Texas Instruments Incorporated - https://www.ti.com/ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com> */ diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c index ce21c798cca6..5affdf078134 100644 --- a/drivers/gpu/drm/omapdrm/dss/output.c +++ b/drivers/gpu/drm/omapdrm/dss/output.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/ * Author: Archit Taneja <archit@ti.com> */ diff --git a/drivers/gpu/drm/omapdrm/dss/pll.c b/drivers/gpu/drm/omapdrm/dss/pll.c index 1212f3cc52d1..241a338ace29 100644 --- a/drivers/gpu/drm/omapdrm/dss/pll.c +++ b/drivers/gpu/drm/omapdrm/dss/pll.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com/ */ #define DSS_SUBSYS_NAME "PLL" diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c index 5c027c81760f..94cf50d837b0 100644 --- a/drivers/gpu/drm/omapdrm/dss/venc.c +++ b/drivers/gpu/drm/omapdrm/dss/venc.c @@ -361,8 +361,11 @@ static int venc_runtime_get(struct venc_device *venc) DSSDBG("venc_runtime_get\n"); r = pm_runtime_get_sync(&venc->pdev->dev); - WARN_ON(r < 0); - return r < 0 ? r : 0; + if (WARN_ON(r < 0)) { + pm_runtime_put_noidle(&venc->pdev->dev); + return r; + } + return 0; } static void venc_runtime_put(struct venc_device *venc) diff --git a/drivers/gpu/drm/omapdrm/dss/video-pll.c b/drivers/gpu/drm/omapdrm/dss/video-pll.c index a612e2696dbc..b72c3ffddc9a 100644 --- a/drivers/gpu/drm/omapdrm/dss/video-pll.c +++ b/drivers/gpu/drm/omapdrm/dss/video-pll.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com/ */ #include <linux/clk.h> diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c index de95dc1b861c..47719b92e22b 100644 --- a/drivers/gpu/drm/omapdrm/omap_connector.c +++ b/drivers/gpu/drm/omapdrm/omap_connector.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ * Author: Rob Clark <rob@ti.com> */ diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c index 49621b2e1ab5..7d66269ad998 100644 --- a/drivers/gpu/drm/omapdrm/omap_crtc.c +++ b/drivers/gpu/drm/omapdrm/omap_crtc.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ * Author: Rob Clark <rob@ti.com> */ diff --git a/drivers/gpu/drm/omapdrm/omap_debugfs.c b/drivers/gpu/drm/omapdrm/omap_debugfs.c index b57fbe8a0ac2..2d3909a37f51 100644 --- a/drivers/gpu/drm/omapdrm/omap_debugfs.c +++ b/drivers/gpu/drm/omapdrm/omap_debugfs.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ * Author: Rob Clark <rob.clark@linaro.org> */ diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_priv.h b/drivers/gpu/drm/omapdrm/omap_dmm_priv.h index 60bb3f9297bc..58a8239d3e69 100644 --- a/drivers/gpu/drm/omapdrm/omap_dmm_priv.h +++ b/drivers/gpu/drm/omapdrm/omap_dmm_priv.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ * Author: Rob Clark <rob@ti.com> * Andy Gross <andy.gross@ti.com> * diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c index 42ec51bb7b1b..38a85a5bfb30 100644 --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c @@ -1,7 +1,7 @@ /* * DMM IOMMU driver support functions for TI OMAP processors. * - * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ * Author: Rob Clark <rob@ti.com> * Andy Gross <andy.gross@ti.com> * @@ -306,7 +306,7 @@ static irqreturn_t omap_dmm_irq_handler(int irq, void *arg) return IRQ_HANDLED; } -/** +/* * Get a handle for a DMM transaction */ static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm) @@ -344,7 +344,7 @@ static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm) return txn; } -/** +/* * Add region to DMM transaction. If pages or pages[i] is NULL, then the * corresponding slot is cleared (ie. dummy_pa is programmed) */ @@ -392,7 +392,7 @@ static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area, return; } -/** +/* * Commit the DMM transaction. */ static int dmm_txn_commit(struct dmm_txn *txn, bool wait) diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h index 43c1d096b021..2f8918fe06d5 100644 --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ * Author: Rob Clark <rob@ti.com> * Andy Gross <andy.gross@ti.com> * diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index 2e598b8b72af..42c2ed752095 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ * Author: Rob Clark <rob@ti.com> */ @@ -533,7 +533,7 @@ static const struct file_operations omapdriver_fops = { .llseek = noop_llseek, }; -static struct drm_driver omap_drm_driver = { +static const struct drm_driver omap_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC | DRIVER_RENDER, .open = dev_open, diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h index 8a1fac680138..ae57e7ada876 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.h +++ b/drivers/gpu/drm/omapdrm/omap_drv.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ * Author: Rob Clark <rob@ti.com> */ diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c index ae4b867a67a3..57e92a4d5937 100644 --- a/drivers/gpu/drm/omapdrm/omap_encoder.c +++ b/drivers/gpu/drm/omapdrm/omap_encoder.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ * Author: Rob Clark <rob@ti.com> */ diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c index 05f30e2618c9..190afc564914 100644 --- a/drivers/gpu/drm/omapdrm/omap_fb.c +++ b/drivers/gpu/drm/omapdrm/omap_fb.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ * Author: Rob Clark <rob@ti.com> */ diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c index 3f6cfc24fb64..42eac6ad12bd 100644 --- a/drivers/gpu/drm/omapdrm/omap_fbdev.c +++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ * Author: Rob Clark <rob@ti.com> */ diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index d8e09792793a..68c271f4250b 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ * Author: Rob Clark <rob.clark@linaro.org> */ @@ -580,7 +580,7 @@ int omap_gem_mmap_obj(struct drm_gem_object *obj, /** * omap_gem_dumb_create - create a dumb buffer - * @drm_file: our client file + * @file: our client file * @dev: our device * @args: the requested arguments copied from userspace * @@ -610,6 +610,7 @@ int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev, * @file: our drm client file * @dev: drm device * @handle: GEM handle to the object (from dumb_create) + * @offset: memory map offset placeholder * * Do the necessary setup to allow the mapping of the frame buffer * into user memory. We don't have to do much here at the moment. diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c index b319fe7f2371..f4cde3a169d8 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c +++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ * Author: Rob Clark <rob.clark@linaro.org> */ diff --git a/drivers/gpu/drm/omapdrm/omap_irq.c b/drivers/gpu/drm/omapdrm/omap_irq.c index 382bcdc72ac0..97c83b959f7e 100644 --- a/drivers/gpu/drm/omapdrm/omap_irq.c +++ b/drivers/gpu/drm/omapdrm/omap_irq.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ * Author: Rob Clark <rob.clark@linaro.org> */ @@ -100,8 +100,7 @@ int omap_irq_enable_framedone(struct drm_crtc *crtc, bool enable) /** * enable_vblank - enable vblank interrupt events - * @dev: DRM device - * @pipe: which irq to enable + * @crtc: DRM CRTC * * Enable vblank interrupts for @crtc. If the device doesn't have * a hardware vblank counter, this routine should be a no-op, since @@ -131,8 +130,7 @@ int omap_irq_enable_vblank(struct drm_crtc *crtc) /** * disable_vblank - disable vblank interrupt events - * @dev: DRM device - * @pipe: which irq to enable + * @crtc: DRM CRTC * * Disable vblank interrupts for @crtc. If the device doesn't have * a hardware vblank counter, this routine should be a no-op, since diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c index 73ec99819a3d..21e0b9785599 100644 --- a/drivers/gpu/drm/omapdrm/omap_plane.c +++ b/drivers/gpu/drm/omapdrm/omap_plane.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ * Author: Rob Clark <rob.clark@linaro.org> */ diff --git a/drivers/gpu/drm/omapdrm/tcm-sita.c b/drivers/gpu/drm/omapdrm/tcm-sita.c index 817be3c41863..9e1acbd2c7aa 100644 --- a/drivers/gpu/drm/omapdrm/tcm-sita.c +++ b/drivers/gpu/drm/omapdrm/tcm-sita.c @@ -5,7 +5,7 @@ * Lajos Molnar <molnar@ti.com> * Andy Gross <andy.gross@ti.com> * - * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/ * * This package is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c index 074e18559b9f..8e84df9a0033 100644 --- a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c +++ b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c @@ -152,7 +152,7 @@ #define ILI9322_GAMMA_7 0x16 #define ILI9322_GAMMA_8 0x17 -/** +/* * enum ili9322_input - the format of the incoming signal to the panel * * The panel can be connected to various input streams and four of them can diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 8b82ec33f08a..597f676a6591 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -39,6 +39,7 @@ #include <drm/drm_panel.h> /** + * struct panel_desc * @modes: Pointer to array of fixed modes appropriate for this panel. If * only one mode then this can just be the address of this the mode. * NOTE: cannot be used with "timings" and also if this is specified @@ -53,6 +54,7 @@ * @delay: Structure containing various delay values for this panel. * @bus_format: See MEDIA_BUS_FMT_... defines. * @bus_flags: See DRM_BUS_FLAG_... defines. + * @connector_type: LVDS, eDP, DSI, DPI, etc. */ struct panel_desc { const struct drm_display_mode *modes; @@ -1327,6 +1329,7 @@ static const struct drm_display_mode boe_nv133fhm_n61_modes = { .vsync_start = 1080 + 3, .vsync_end = 1080 + 3 + 6, .vtotal = 1080 + 3 + 6 + 31, + .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC, }; /* Also used for boe_nv133fhm_n62 */ @@ -1812,6 +1815,7 @@ static const struct panel_desc edt_etm0700g0dh6 = { }, .bus_format = MEDIA_BUS_FMT_RGB666_1X18, .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE, + .connector_type = DRM_MODE_CONNECTOR_DPI, }; static const struct panel_desc edt_etm0700g0bdh6 = { @@ -4673,8 +4677,10 @@ static int __init panel_simple_init(void) if (IS_ENABLED(CONFIG_DRM_MIPI_DSI)) { err = mipi_dsi_driver_register(&panel_simple_dsi_driver); - if (err < 0) + if (err < 0) { + platform_driver_unregister(&panel_simple_platform_driver); return err; + } } return 0; diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index 689be734ed20..83a461bdeea8 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -548,7 +548,7 @@ DEFINE_DRM_GEM_FOPS(panfrost_drm_driver_fops); * - 1.0 - initial interface * - 1.1 - adds HEAP and NOEXEC flags for CREATE_BO */ -static struct drm_driver panfrost_drm_driver = { +static const struct drm_driver panfrost_drm_driver = { .driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ, .open = panfrost_open, .postclose = panfrost_postclose, diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c index fdbc8d949135..5ab03d605f57 100644 --- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c +++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c @@ -5,6 +5,7 @@ #include <drm/drm_gem_shmem_helper.h> #include <drm/panfrost_drm.h> #include <linux/completion.h> +#include <linux/dma-buf-map.h> #include <linux/iopoll.h> #include <linux/pm_runtime.h> #include <linux/slab.h> @@ -72,6 +73,7 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev, { struct panfrost_file_priv *user = file_priv->driver_priv; struct panfrost_perfcnt *perfcnt = pfdev->perfcnt; + struct dma_buf_map map; struct drm_gem_shmem_object *bo; u32 cfg, as; int ret; @@ -103,11 +105,10 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev, goto err_close_bo; } - perfcnt->buf = drm_gem_shmem_vmap(&bo->base); - if (IS_ERR(perfcnt->buf)) { - ret = PTR_ERR(perfcnt->buf); + ret = drm_gem_shmem_vmap(&bo->base, &map); + if (ret) goto err_put_mapping; - } + perfcnt->buf = map.vaddr; /* * Invalidate the cache and clear the counters to start from a fresh @@ -163,7 +164,7 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev, return 0; err_vunmap: - drm_gem_shmem_vunmap(&bo->base, perfcnt->buf); + drm_gem_shmem_vunmap(&bo->base, &map); err_put_mapping: panfrost_gem_mapping_put(perfcnt->mapping); err_close_bo: @@ -180,6 +181,7 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev, { struct panfrost_file_priv *user = file_priv->driver_priv; struct panfrost_perfcnt *perfcnt = pfdev->perfcnt; + struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(perfcnt->buf); if (user != perfcnt->user) return -EINVAL; @@ -192,7 +194,7 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev, GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_OFF)); perfcnt->user = NULL; - drm_gem_shmem_vunmap(&perfcnt->mapping->obj->base.base, perfcnt->buf); + drm_gem_shmem_vunmap(&perfcnt->mapping->obj->base.base, &map); perfcnt->buf = NULL; panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv); panfrost_mmu_as_put(pfdev, perfcnt->mapping->mmu); diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c index ecef8a2383d2..40e6708fbbe2 100644 --- a/drivers/gpu/drm/pl111/pl111_drv.c +++ b/drivers/gpu/drm/pl111/pl111_drv.c @@ -213,7 +213,7 @@ pl111_gem_import_sg_table(struct drm_device *dev, DEFINE_DRM_GEM_CMA_FOPS(drm_fops); -static struct drm_driver pl111_drm_driver = { +static const struct drm_driver pl111_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, .ioctls = NULL, @@ -224,7 +224,6 @@ static struct drm_driver pl111_drm_driver = { .major = 1, .minor = 0, .patchlevel = 0, - .gem_create_object = drm_gem_cma_create_object_default_funcs, .dumb_create = drm_gem_cma_dumb_create, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 07a3e3c23f09..012bce0cdb65 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -25,6 +25,7 @@ #include <linux/crc32.h> #include <linux/delay.h> +#include <linux/dma-buf-map.h> #include <drm/drm_drv.h> #include <drm/drm_atomic.h> @@ -581,6 +582,8 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane, struct drm_gem_object *obj; struct qxl_bo *cursor_bo = NULL, *user_bo = NULL, *old_cursor_bo = NULL; int ret; + struct dma_buf_map user_map; + struct dma_buf_map cursor_map; void *user_ptr; int size = 64*64*4; @@ -595,9 +598,10 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane, user_bo = gem_to_qxl_bo(obj); /* pinning is done in the prepare/cleanup framevbuffer */ - ret = qxl_bo_kmap(user_bo, &user_ptr); + ret = qxl_bo_kmap(user_bo, &user_map); if (ret) goto out_free_release; + user_ptr = user_map.vaddr; /* TODO: Use mapping abstraction properly */ ret = qxl_alloc_bo_reserved(qdev, release, sizeof(struct qxl_cursor) + size, @@ -613,9 +617,13 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane, if (ret) goto out_unpin; - ret = qxl_bo_kmap(cursor_bo, (void **)&cursor); + ret = qxl_bo_kmap(cursor_bo, &cursor_map); if (ret) goto out_backoff; + if (cursor_map.is_iomem) /* TODO: Use mapping abstraction properly */ + cursor = (struct qxl_cursor __force *)cursor_map.vaddr_iomem; + else + cursor = (struct qxl_cursor *)cursor_map.vaddr; cursor->header.unique = 0; cursor->header.type = SPICE_CURSOR_TYPE_ALPHA; @@ -1133,6 +1141,7 @@ int qxl_create_monitors_object(struct qxl_device *qdev) { int ret; struct drm_gem_object *gobj; + struct dma_buf_map map; int monitors_config_size = sizeof(struct qxl_monitors_config) + qxl_num_crtc * sizeof(struct qxl_head); @@ -1149,7 +1158,7 @@ int qxl_create_monitors_object(struct qxl_device *qdev) if (ret) return ret; - qxl_bo_kmap(qdev->monitors_config_bo, NULL); + qxl_bo_kmap(qdev->monitors_config_bo, &map); qdev->monitors_config = qdev->monitors_config_bo->kptr; qdev->ram_header->monitors_config = diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c index 3599db096973..7b7acb910780 100644 --- a/drivers/gpu/drm/qxl/qxl_draw.c +++ b/drivers/gpu/drm/qxl/qxl_draw.c @@ -20,6 +20,8 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +#include <linux/dma-buf-map.h> + #include <drm/drm_fourcc.h> #include "qxl_drv.h" @@ -42,13 +44,15 @@ static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev, unsigned int num_clips, struct qxl_bo *clips_bo) { + struct dma_buf_map map; struct qxl_clip_rects *dev_clips; int ret; - ret = qxl_bo_kmap(clips_bo, (void **)&dev_clips); - if (ret) { + ret = qxl_bo_kmap(clips_bo, &map); + if (ret) return NULL; - } + dev_clips = map.vaddr; /* TODO: Use mapping abstraction properly */ + dev_clips->num_rects = num_clips; dev_clips->chunk.next_chunk = 0; dev_clips->chunk.prev_chunk = 0; @@ -142,6 +146,7 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev, int stride = fb->pitches[0]; /* depth is not actually interesting, we don't mask with it */ int depth = fb->format->cpp[0] * 8; + struct dma_buf_map surface_map; uint8_t *surface_base; struct qxl_release *release; struct qxl_bo *clips_bo; @@ -197,9 +202,10 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev, if (ret) goto out_release_backoff; - ret = qxl_bo_kmap(bo, (void **)&surface_base); + ret = qxl_bo_kmap(bo, &surface_map); if (ret) goto out_release_backoff; + surface_base = surface_map.vaddr; /* TODO: Use mapping abstraction properly */ ret = qxl_image_init(qdev, release, dimage, surface_base, left - dumb_shadow_offset, diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 3602e8b34189..eb437fea5d9e 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -30,6 +30,7 @@ * Definitions taken from spice-protocol, plus kernel driver specific bits. */ +#include <linux/dma-buf-map.h> #include <linux/dma-fence.h> #include <linux/firmware.h> #include <linux/platform_device.h> @@ -50,6 +51,8 @@ #include "qxl_dev.h" +struct dma_buf_map; + #define DRIVER_AUTHOR "Dave Airlie" #define DRIVER_NAME "qxl" @@ -79,7 +82,7 @@ struct qxl_bo { /* Protected by tbo.reserved */ struct ttm_place placements[3]; struct ttm_placement placement; - struct ttm_bo_kmap_obj kmap; + struct dma_buf_map map; void *kptr; unsigned int map_count; int type; @@ -335,7 +338,6 @@ int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv); void qxl_gem_object_close(struct drm_gem_object *obj, struct drm_file *file_priv); void qxl_bo_force_delete(struct qxl_device *qdev); -int qxl_bo_kmap(struct qxl_bo *bo, void **ptr); /* qxl_dumb.c */ int qxl_mode_dumb_create(struct drm_file *file_priv, @@ -445,8 +447,9 @@ struct sg_table *qxl_gem_prime_get_sg_table(struct drm_gem_object *obj); struct drm_gem_object *qxl_gem_prime_import_sg_table( struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sgt); -void *qxl_gem_prime_vmap(struct drm_gem_object *obj); -void qxl_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); +int qxl_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map); +void qxl_gem_prime_vunmap(struct drm_gem_object *obj, + struct dma_buf_map *map); int qxl_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c index 0bab9ec6adc1..16e1e589508e 100644 --- a/drivers/gpu/drm/qxl/qxl_ioctl.c +++ b/drivers/gpu/drm/qxl/qxl_ioctl.c @@ -160,7 +160,6 @@ static int qxl_process_single_command(struct qxl_device *qdev, default: DRM_DEBUG("Only draw commands in execbuffers\n"); return -EINVAL; - break; } if (cmd->command_size > PAGE_SIZE - sizeof(union qxl_release_info)) diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c index dc5b3850a4d4..228e2b9198f1 100644 --- a/drivers/gpu/drm/qxl/qxl_kms.c +++ b/drivers/gpu/drm/qxl/qxl_kms.c @@ -231,11 +231,11 @@ int qxl_device_init(struct qxl_device *qdev, goto cursor_ring_free; } - idr_init(&qdev->release_idr); + idr_init_base(&qdev->release_idr, 1); spin_lock_init(&qdev->release_idr_lock); spin_lock_init(&qdev->release_lock); - idr_init(&qdev->surf_id_idr); + idr_init_base(&qdev->surf_id_idr, 1); spin_lock_init(&qdev->surf_id_idr_lock); mutex_init(&qdev->async_io_mutex); diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c index 547d46c14d56..ceebc5881f68 100644 --- a/drivers/gpu/drm/qxl/qxl_object.c +++ b/drivers/gpu/drm/qxl/qxl_object.c @@ -23,10 +23,12 @@ * Alon Levy */ +#include <linux/dma-buf-map.h> +#include <linux/io-mapping.h> + #include "qxl_drv.h" #include "qxl_object.h" -#include <linux/io-mapping.h> static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo) { struct qxl_bo *bo; @@ -152,24 +154,27 @@ int qxl_bo_create(struct qxl_device *qdev, return 0; } -int qxl_bo_kmap(struct qxl_bo *bo, void **ptr) +int qxl_bo_kmap(struct qxl_bo *bo, struct dma_buf_map *map) { - bool is_iomem; int r; if (bo->kptr) { - if (ptr) - *ptr = bo->kptr; bo->map_count++; - return 0; + goto out; } - r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); + r = ttm_bo_vmap(&bo->tbo, &bo->map); if (r) return r; - bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); - if (ptr) - *ptr = bo->kptr; bo->map_count = 1; + + /* TODO: Remove kptr in favor of map everywhere. */ + if (bo->map.is_iomem) + bo->kptr = (void *)bo->map.vaddr_iomem; + else + bo->kptr = bo->map.vaddr; + +out: + *map = bo->map; return 0; } @@ -180,6 +185,7 @@ void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, void *rptr; int ret; struct io_mapping *map; + struct dma_buf_map bo_map; if (bo->tbo.mem.mem_type == TTM_PL_VRAM) map = qdev->vram_mapping; @@ -196,9 +202,10 @@ fallback: return rptr; } - ret = qxl_bo_kmap(bo, &rptr); + ret = qxl_bo_kmap(bo, &bo_map); if (ret) return NULL; + rptr = bo_map.vaddr; /* TODO: Use mapping abstraction properly */ rptr += page_offset * PAGE_SIZE; return rptr; @@ -212,7 +219,7 @@ void qxl_bo_kunmap(struct qxl_bo *bo) if (bo->map_count > 0) return; bo->kptr = NULL; - ttm_bo_kunmap(&bo->kmap); + ttm_bo_vunmap(&bo->tbo, &bo->map); } void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h index 09a5c818324d..ebf24c9d2bf2 100644 --- a/drivers/gpu/drm/qxl/qxl_object.h +++ b/drivers/gpu/drm/qxl/qxl_object.h @@ -63,7 +63,7 @@ extern int qxl_bo_create(struct qxl_device *qdev, bool kernel, bool pinned, u32 domain, struct qxl_surface *surf, struct qxl_bo **bo_ptr); -extern int qxl_bo_kmap(struct qxl_bo *bo, void **ptr); +extern int qxl_bo_kmap(struct qxl_bo *bo, struct dma_buf_map *map); extern void qxl_bo_kunmap(struct qxl_bo *bo); void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, int page_offset); void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, void *map); diff --git a/drivers/gpu/drm/qxl/qxl_prime.c b/drivers/gpu/drm/qxl/qxl_prime.c index 7d3816fca5a8..4aa949799446 100644 --- a/drivers/gpu/drm/qxl/qxl_prime.c +++ b/drivers/gpu/drm/qxl/qxl_prime.c @@ -54,20 +54,20 @@ struct drm_gem_object *qxl_gem_prime_import_sg_table( return ERR_PTR(-ENOSYS); } -void *qxl_gem_prime_vmap(struct drm_gem_object *obj) +int qxl_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) { struct qxl_bo *bo = gem_to_qxl_bo(obj); - void *ptr; int ret; - ret = qxl_bo_kmap(bo, &ptr); + ret = qxl_bo_kmap(bo, map); if (ret < 0) - return ERR_PTR(ret); + return ret; - return ptr; + return 0; } -void qxl_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) +void qxl_gem_prime_vunmap(struct drm_gem_object *obj, + struct dma_buf_map *map) { struct qxl_bo *bo = gem_to_qxl_bo(obj); diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index a80d59634143..128c38c8a837 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -140,7 +140,8 @@ static void qxl_bo_move_notify(struct ttm_buffer_object *bo, static int qxl_bo_move(struct ttm_buffer_object *bo, bool evict, struct ttm_operation_ctx *ctx, - struct ttm_resource *new_mem) + struct ttm_resource *new_mem, + struct ttm_place *hop) { struct ttm_resource *old_mem = &bo->mem; int ret; diff --git a/drivers/gpu/drm/r128/ati_pcigart.c b/drivers/gpu/drm/r128/ati_pcigart.c index 3e76ae5a17ee..1234ec60c0af 100644 --- a/drivers/gpu/drm/r128/ati_pcigart.c +++ b/drivers/gpu/drm/r128/ati_pcigart.c @@ -1,4 +1,4 @@ -/** +/* * \file ati_pcigart.c * ATI PCI GART support * diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index e1132d86d250..28cb8ced91b9 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -509,7 +509,6 @@ struct radeon_bo { /* Constant after initialization */ struct radeon_device *rdev; - struct ttm_bo_kmap_obj dma_buf_vmap; pid_t pid; #ifdef CONFIG_MMU_NOTIFIER @@ -2245,6 +2244,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); +int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); /* VRAM scratch page for HDP bug, default vram page */ struct r600_vram_scratch { diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 65061c949aee..bfadb799d6a3 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -51,6 +51,7 @@ #include <drm/radeon_drm.h> #include "radeon_drv.h" +#include "radeon.h" /* * KMS wrapper. @@ -129,8 +130,6 @@ extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int crtc, ktime_t *stime, ktime_t *etime, const struct drm_display_mode *mode); extern bool radeon_is_px(struct drm_device *dev); -extern const struct drm_ioctl_desc radeon_ioctls_kms[]; -extern int radeon_max_kms_ioctl; int radeon_mmap(struct file *filp, struct vm_area_struct *vma); int radeon_mode_dumb_mmap(struct drm_file *filp, struct drm_device *dev, @@ -298,7 +297,7 @@ static struct pci_device_id pciidlist[] = { MODULE_DEVICE_TABLE(pci, pciidlist); -static struct drm_driver kms_driver; +static const struct drm_driver kms_driver; bool radeon_device_is_virtual(void); @@ -584,9 +583,55 @@ static const struct file_operations radeon_driver_kms_fops = { #endif }; -static struct drm_driver kms_driver = { +static const struct drm_ioctl_desc radeon_ioctls_kms[] = { + DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(RADEON_CP_START, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, drm_invalid_op, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, drm_invalid_op, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_RESET, drm_invalid_op, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, drm_invalid_op, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_SWAP, drm_invalid_op, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_CLEAR, drm_invalid_op, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_VERTEX, drm_invalid_op, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_INDICES, drm_invalid_op, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, drm_invalid_op, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, drm_invalid_op, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, drm_invalid_op, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, drm_invalid_op, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, drm_invalid_op, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_FLIP, drm_invalid_op, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_ALLOC, drm_invalid_op, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_FREE, drm_invalid_op, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, drm_invalid_op, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, drm_invalid_op, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, drm_invalid_op, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, drm_invalid_op, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, drm_invalid_op, DRM_AUTH), + /* KMS */ + DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(RADEON_GEM_OP, radeon_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(RADEON_GEM_USERPTR, radeon_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), +}; + +static const struct drm_driver kms_driver = { .driver_features = - DRIVER_GEM | DRIVER_RENDER, + DRIVER_GEM | DRIVER_RENDER | DRIVER_MODESET, .load = radeon_driver_load_kms, .open = radeon_driver_open_kms, .postclose = radeon_driver_postclose_kms, @@ -597,6 +642,7 @@ static struct drm_driver kms_driver = { .irq_uninstall = radeon_driver_irq_uninstall_kms, .irq_handler = radeon_driver_irq_handler_kms, .ioctls = radeon_ioctls_kms, + .num_ioctls = ARRAY_SIZE(radeon_ioctls_kms), .dumb_create = radeon_mode_dumb_create, .dumb_map_offset = radeon_mode_dumb_mmap, .fops = &radeon_driver_kms_fops, @@ -613,9 +659,6 @@ static struct drm_driver kms_driver = { .patchlevel = KMS_DRIVER_PATCHLEVEL, }; -static struct drm_driver *driver; -static struct pci_driver *pdriver; - static struct pci_driver radeon_kms_pci_driver = { .name = DRIVER_NAME, .id_table = pciidlist, @@ -625,41 +668,33 @@ static struct pci_driver radeon_kms_pci_driver = { .driver.pm = &radeon_pm_ops, }; -static int __init radeon_init(void) +static int __init radeon_module_init(void) { if (vgacon_text_force() && radeon_modeset == -1) { DRM_INFO("VGACON disable radeon kernel modesetting.\n"); radeon_modeset = 0; } - /* set to modesetting by default if not nomodeset */ - if (radeon_modeset == -1) - radeon_modeset = 1; - - if (radeon_modeset == 1) { - DRM_INFO("radeon kernel modesetting enabled.\n"); - driver = &kms_driver; - pdriver = &radeon_kms_pci_driver; - driver->driver_features |= DRIVER_MODESET; - driver->num_ioctls = radeon_max_kms_ioctl; - radeon_register_atpx_handler(); - - } else { + + if (radeon_modeset == 0) { DRM_ERROR("No UMS support in radeon module!\n"); return -EINVAL; } - return pci_register_driver(pdriver); + DRM_INFO("radeon kernel modesetting enabled.\n"); + radeon_register_atpx_handler(); + + return pci_register_driver(&radeon_kms_pci_driver); } -static void __exit radeon_exit(void) +static void __exit radeon_module_exit(void) { - pci_unregister_driver(pdriver); + pci_unregister_driver(&radeon_kms_pci_driver); radeon_unregister_atpx_handler(); mmu_notifier_synchronize(); } -module_init(radeon_init); -module_exit(radeon_exit); +module_init(radeon_module_init); +module_exit(radeon_module_exit); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 0ccd7213e41f..d2876ce3bc9e 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -31,6 +31,7 @@ #include <drm/drm_debugfs.h> #include <drm/drm_device.h> #include <drm/drm_file.h> +#include <drm/drm_gem_ttm_helper.h> #include <drm/radeon_drm.h> #include "radeon.h" @@ -40,8 +41,6 @@ struct dma_buf *radeon_gem_prime_export(struct drm_gem_object *gobj, struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj); int radeon_gem_prime_pin(struct drm_gem_object *obj); void radeon_gem_prime_unpin(struct drm_gem_object *obj); -void *radeon_gem_prime_vmap(struct drm_gem_object *obj); -void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); static const struct drm_gem_object_funcs radeon_gem_object_funcs; @@ -235,8 +234,8 @@ static const struct drm_gem_object_funcs radeon_gem_object_funcs = { .pin = radeon_gem_prime_pin, .unpin = radeon_gem_prime_unpin, .get_sg_table = radeon_gem_prime_get_sg_table, - .vmap = radeon_gem_prime_vmap, - .vunmap = radeon_gem_prime_vunmap, + .vmap = drm_gem_ttm_vmap, + .vunmap = drm_gem_ttm_vunmap, }; /* diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 99ee60f8b604..abb3bdd9ca25 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -221,7 +221,7 @@ static void radeon_set_filp_rights(struct drm_device *dev, * etc. (all asics). * Returns 0 on success, -EINVAL on failure. */ -static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) +int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { struct radeon_device *rdev = dev->dev_private; struct drm_radeon_info *info = data; @@ -866,50 +866,3 @@ void radeon_disable_vblank_kms(struct drm_crtc *crtc) radeon_irq_set(rdev); spin_unlock_irqrestore(&rdev->irq.lock, irqflags); } - -const struct drm_ioctl_desc radeon_ioctls_kms[] = { - DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF_DRV(RADEON_CP_START, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, drm_invalid_op, DRM_AUTH), - DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, drm_invalid_op, DRM_AUTH), - DRM_IOCTL_DEF_DRV(RADEON_RESET, drm_invalid_op, DRM_AUTH), - DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, drm_invalid_op, DRM_AUTH), - DRM_IOCTL_DEF_DRV(RADEON_SWAP, drm_invalid_op, DRM_AUTH), - DRM_IOCTL_DEF_DRV(RADEON_CLEAR, drm_invalid_op, DRM_AUTH), - DRM_IOCTL_DEF_DRV(RADEON_VERTEX, drm_invalid_op, DRM_AUTH), - DRM_IOCTL_DEF_DRV(RADEON_INDICES, drm_invalid_op, DRM_AUTH), - DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, drm_invalid_op, DRM_AUTH), - DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, drm_invalid_op, DRM_AUTH), - DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, drm_invalid_op, DRM_AUTH), - DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, drm_invalid_op, DRM_AUTH), - DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, drm_invalid_op, DRM_AUTH), - DRM_IOCTL_DEF_DRV(RADEON_FLIP, drm_invalid_op, DRM_AUTH), - DRM_IOCTL_DEF_DRV(RADEON_ALLOC, drm_invalid_op, DRM_AUTH), - DRM_IOCTL_DEF_DRV(RADEON_FREE, drm_invalid_op, DRM_AUTH), - DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, drm_invalid_op, DRM_AUTH), - DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, drm_invalid_op, DRM_AUTH), - DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, drm_invalid_op, DRM_AUTH), - DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, drm_invalid_op, DRM_AUTH), - DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, drm_invalid_op, DRM_AUTH), - /* KMS */ - DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH), - DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH), - DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(RADEON_GEM_OP, radeon_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(RADEON_GEM_USERPTR, radeon_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), -}; -int radeon_max_kms_ioctl = ARRAY_SIZE(radeon_ioctls_kms); diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c index b9de0e51c0be..088d39a51c0d 100644 --- a/drivers/gpu/drm/radeon/radeon_prime.c +++ b/drivers/gpu/drm/radeon/radeon_prime.c @@ -39,26 +39,6 @@ struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj) return drm_prime_pages_to_sg(obj->dev, bo->tbo.ttm->pages, npages); } -void *radeon_gem_prime_vmap(struct drm_gem_object *obj) -{ - struct radeon_bo *bo = gem_to_radeon_bo(obj); - int ret; - - ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, - &bo->dma_buf_vmap); - if (ret) - return ERR_PTR(ret); - - return bo->dma_buf_vmap.virtual; -} - -void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) -{ - struct radeon_bo *bo = gem_to_radeon_bo(obj); - - ttm_bo_kunmap(&bo->dma_buf_vmap); -} - struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sg) diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 95038ac3382e..0ca381b95d3d 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -206,110 +206,27 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, return r; } -static int radeon_move_vram_ram(struct ttm_buffer_object *bo, - bool evict, - struct ttm_operation_ctx *ctx, - struct ttm_resource *new_mem) -{ - struct ttm_resource *old_mem = &bo->mem; - struct ttm_resource tmp_mem; - struct ttm_place placements; - struct ttm_placement placement; - int r; - - tmp_mem = *new_mem; - tmp_mem.mm_node = NULL; - placement.num_placement = 1; - placement.placement = &placements; - placement.num_busy_placement = 1; - placement.busy_placement = &placements; - placements.fpfn = 0; - placements.lpfn = 0; - placements.mem_type = TTM_PL_TT; - placements.flags = 0; - r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx); - if (unlikely(r)) { - return r; - } - - r = ttm_tt_populate(bo->bdev, bo->ttm, ctx); - if (unlikely(r)) { - goto out_cleanup; - } - - r = radeon_ttm_tt_bind(bo->bdev, bo->ttm, &tmp_mem); - if (unlikely(r)) { - goto out_cleanup; - } - r = radeon_move_blit(bo, true, &tmp_mem, old_mem); - if (unlikely(r)) { - goto out_cleanup; - } - r = ttm_bo_wait_ctx(bo, ctx); - if (unlikely(r)) - goto out_cleanup; - - radeon_ttm_tt_unbind(bo->bdev, bo->ttm); - ttm_resource_free(bo, &bo->mem); - ttm_bo_assign_mem(bo, new_mem); -out_cleanup: - ttm_resource_free(bo, &tmp_mem); - return r; -} - -static int radeon_move_ram_vram(struct ttm_buffer_object *bo, - bool evict, - struct ttm_operation_ctx *ctx, - struct ttm_resource *new_mem) -{ - struct ttm_resource *old_mem = &bo->mem; - struct ttm_resource tmp_mem; - struct ttm_placement placement; - struct ttm_place placements; - int r; - - tmp_mem = *new_mem; - tmp_mem.mm_node = NULL; - placement.num_placement = 1; - placement.placement = &placements; - placement.num_busy_placement = 1; - placement.busy_placement = &placements; - placements.fpfn = 0; - placements.lpfn = 0; - placements.mem_type = TTM_PL_TT; - placements.flags = 0; - r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx); - if (unlikely(r)) { - return r; - } - - r = ttm_tt_populate(bo->bdev, bo->ttm, ctx); - if (unlikely(r)) - goto out_cleanup; - - r = radeon_ttm_tt_bind(bo->bdev, bo->ttm, &tmp_mem); - if (unlikely(r)) - goto out_cleanup; - - ttm_bo_assign_mem(bo, &tmp_mem); - r = radeon_move_blit(bo, true, new_mem, old_mem); - if (unlikely(r)) { - goto out_cleanup; - } -out_cleanup: - ttm_resource_free(bo, &tmp_mem); - return r; -} - static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict, struct ttm_operation_ctx *ctx, - struct ttm_resource *new_mem) + struct ttm_resource *new_mem, + struct ttm_place *hop) { struct radeon_device *rdev; struct radeon_bo *rbo; struct ttm_resource *old_mem = &bo->mem; int r; + if ((old_mem->mem_type == TTM_PL_SYSTEM && + new_mem->mem_type == TTM_PL_VRAM) || + (old_mem->mem_type == TTM_PL_VRAM && + new_mem->mem_type == TTM_PL_SYSTEM)) { + hop->fpfn = 0; + hop->lpfn = 0; + hop->mem_type = TTM_PL_TT; + hop->flags = 0; + return -EMULTIHOP; + } + if (new_mem->mem_type == TTM_PL_TT) { r = radeon_ttm_tt_bind(bo->bdev, bo->ttm, new_mem); if (r) @@ -350,17 +267,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict, goto memcpy; } - if (old_mem->mem_type == TTM_PL_VRAM && - new_mem->mem_type == TTM_PL_SYSTEM) { - r = radeon_move_vram_ram(bo, evict, ctx, new_mem); - } else if (old_mem->mem_type == TTM_PL_SYSTEM && - new_mem->mem_type == TTM_PL_VRAM) { - r = radeon_move_ram_vram(bo, evict, ctx, new_mem); - } else { - r = radeon_move_blit(bo, evict, - new_mem, old_mem); - } - + r = radeon_move_blit(bo, evict, new_mem, old_mem); if (r) { memcpy: r = ttm_bo_move_memcpy(bo, ctx, new_mem); diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index 447be991fa25..600056dff374 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c @@ -507,7 +507,7 @@ MODULE_DEVICE_TABLE(of, rcar_du_of_table); DEFINE_DRM_GEM_CMA_FOPS(rcar_du_fops); -static struct drm_driver rcar_du_driver = { +static const struct drm_driver rcar_du_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(rcar_du_dumb_create), .fops = &rcar_du_fops, diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index b7654f5e4225..212bd87c0c4a 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c @@ -35,7 +35,7 @@ #define DRIVER_MINOR 0 static bool is_support_iommu = true; -static struct drm_driver rockchip_drm_driver; +static const struct drm_driver rockchip_drm_driver; /* * Attach a (component) device to the shared drm dma mapping from master drm @@ -209,7 +209,7 @@ static const struct file_operations rockchip_drm_driver_fops = { .release = drm_release, }; -static struct drm_driver rockchip_drm_driver = { +static const struct drm_driver rockchip_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, .lastclose = drm_fb_helper_lastclose, .dumb_create = rockchip_gem_dumb_create, diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c index 7d5ebb10323b..7971f57436dd 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c @@ -532,26 +532,32 @@ err_free_rk_obj: return ERR_PTR(ret); } -void *rockchip_gem_prime_vmap(struct drm_gem_object *obj) +int rockchip_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) { struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); - if (rk_obj->pages) - return vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP, - pgprot_writecombine(PAGE_KERNEL)); + if (rk_obj->pages) { + void *vaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP, + pgprot_writecombine(PAGE_KERNEL)); + if (!vaddr) + return -ENOMEM; + dma_buf_map_set_vaddr(map, vaddr); + return 0; + } if (rk_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING) - return NULL; + return -ENOMEM; + dma_buf_map_set_vaddr(map, rk_obj->kvaddr); - return rk_obj->kvaddr; + return 0; } -void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) +void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map) { struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); if (rk_obj->pages) { - vunmap(vaddr); + vunmap(map->vaddr); return; } diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h index 7ffc541bea07..5a70a56cd406 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h @@ -31,8 +31,8 @@ struct drm_gem_object * rockchip_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sg); -void *rockchip_gem_prime_vmap(struct drm_gem_object *obj); -void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); +int rockchip_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map); +void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map); /* drm driver mmap file operations */ int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index 8cd39fca81a3..d1e05482641b 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -1248,6 +1248,8 @@ static void vop_crtc_gamma_set(struct vop *vop, struct drm_crtc *crtc, static void vop_crtc_atomic_begin(struct drm_crtc *crtc, struct drm_atomic_state *state) { + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc); struct vop *vop = to_vop(crtc); @@ -1256,8 +1258,8 @@ static void vop_crtc_atomic_begin(struct drm_crtc *crtc, * Only update GAMMA if the 'active' flag is not changed, * otherwise it's updated by .atomic_enable. */ - if (crtc->state->color_mgmt_changed && - !crtc->state->active_changed) + if (crtc_state->color_mgmt_changed && + !crtc_state->active_changed) vop_crtc_gamma_set(vop, crtc, old_crtc_state); } diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c index 26a15c214bd3..0a02b7092c04 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c +++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c @@ -128,7 +128,7 @@ static irqreturn_t shmob_drm_irq(int irq, void *arg) DEFINE_DRM_GEM_CMA_FOPS(shmob_drm_fops); -static struct drm_driver shmob_drm_driver = { +static const struct drm_driver shmob_drm_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET, .irq_handler = shmob_drm_irq, DRM_GEM_CMA_DRIVER_OPS, diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c index 3f54efa36098..c7efb43b83ee 100644 --- a/drivers/gpu/drm/sti/sti_drv.c +++ b/drivers/gpu/drm/sti/sti_drv.c @@ -130,7 +130,7 @@ static void sti_mode_config_init(struct drm_device *dev) DEFINE_DRM_GEM_CMA_FOPS(sti_driver_fops); -static struct drm_driver sti_driver = { +static const struct drm_driver sti_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, .fops = &sti_driver_fops, DRM_GEM_CMA_DRIVER_OPS, diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c index 411103f013e2..222869b232ae 100644 --- a/drivers/gpu/drm/stm/drv.c +++ b/drivers/gpu/drm/stm/drv.c @@ -53,7 +53,7 @@ static int stm_gem_cma_dumb_create(struct drm_file *file, DEFINE_DRM_GEM_CMA_FOPS(drv_driver_fops); -static struct drm_driver drv_driver = { +static const struct drm_driver drv_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, .name = "stm", .desc = "STMicroelectronics SoC DRM", diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index 29861fc81b35..91502937f26d 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c @@ -40,7 +40,7 @@ static int drm_sun4i_gem_dumb_create(struct drm_file *file_priv, DEFINE_DRM_GEM_CMA_FOPS(sun4i_drv_fops); -static struct drm_driver sun4i_drv_driver = { +static const struct drm_driver sun4i_drv_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, /* Generic Operations */ diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index 2d86627b0d4e..85dd7131553a 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c @@ -1939,15 +1939,17 @@ static void tegra_crtc_atomic_begin(struct drm_crtc *crtc, static void tegra_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state) { - struct tegra_dc_state *crtc_state = to_dc_state(crtc->state); + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); + struct tegra_dc_state *dc_state = to_dc_state(crtc_state); struct tegra_dc *dc = to_tegra_dc(crtc); u32 value; - value = crtc_state->planes << 8 | GENERAL_UPDATE; + value = dc_state->planes << 8 | GENERAL_UPDATE; tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL); value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL); - value = crtc_state->planes | GENERAL_ACT_REQ; + value = dc_state->planes | GENERAL_ACT_REQ; tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL); value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL); } diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index f0f581cd345e..19ffb0626505 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -847,7 +847,7 @@ static void tegra_debugfs_init(struct drm_minor *minor) } #endif -static struct drm_driver tegra_drm_driver = { +static const struct drm_driver tegra_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC | DRIVER_RENDER, .open = tegra_drm_open, @@ -1081,12 +1081,11 @@ static bool host1x_drm_wants_iommu(struct host1x_device *dev) static int host1x_drm_probe(struct host1x_device *dev) { - struct drm_driver *driver = &tegra_drm_driver; struct tegra_drm *tegra; struct drm_device *drm; int err; - drm = drm_dev_alloc(driver, &dev->dev); + drm = drm_dev_alloc(&tegra_drm_driver, &dev->dev); if (IS_ERR(drm)) return PTR_ERR(drm); diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c index b669168ae7cb..60b92df615aa 100644 --- a/drivers/gpu/drm/tidss/tidss_dispc.c +++ b/drivers/gpu/drm/tidss/tidss_dispc.c @@ -2608,16 +2608,9 @@ void dispc_remove(struct tidss_device *tidss) static int dispc_iomap_resource(struct platform_device *pdev, const char *name, void __iomem **base) { - struct resource *res; void __iomem *b; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); - if (!res) { - dev_err(&pdev->dev, "cannot get mem resource '%s'\n", name); - return -EINVAL; - } - - b = devm_ioremap_resource(&pdev->dev, res); + b = devm_platform_ioremap_resource_byname(pdev, name); if (IS_ERR(b)) { dev_err(&pdev->dev, "cannot ioremap resource '%s'\n", name); return PTR_ERR(b); diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c index 9179ea18f625..66e3c86eb5c7 100644 --- a/drivers/gpu/drm/tidss/tidss_drv.c +++ b/drivers/gpu/drm/tidss/tidss_drv.c @@ -108,7 +108,7 @@ static void tidss_release(struct drm_device *ddev) DEFINE_DRM_GEM_CMA_FOPS(tidss_fops); -static struct drm_driver tidss_driver = { +static const struct drm_driver tidss_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &tidss_fops, .release = tidss_release, diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c index 3d7e4db756b7..f1d3a9f919fd 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c @@ -166,7 +166,7 @@ static void tilcdc_fini(struct drm_device *dev) drm_dev_put(dev); } -static int tilcdc_init(struct drm_driver *ddrv, struct device *dev) +static int tilcdc_init(const struct drm_driver *ddrv, struct device *dev) { struct drm_device *ddev; struct platform_device *pdev = to_platform_device(dev); @@ -452,7 +452,7 @@ static void tilcdc_debugfs_init(struct drm_minor *minor) DEFINE_DRM_GEM_CMA_FOPS(fops); -static struct drm_driver tilcdc_driver = { +static const struct drm_driver tilcdc_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .irq_handler = tilcdc_irq, DRM_GEM_CMA_DRIVER_OPS, diff --git a/drivers/gpu/drm/tiny/cirrus.c b/drivers/gpu/drm/tiny/cirrus.c index 744a8e337e41..561c49d8657a 100644 --- a/drivers/gpu/drm/tiny/cirrus.c +++ b/drivers/gpu/drm/tiny/cirrus.c @@ -17,6 +17,7 @@ */ #include <linux/console.h> +#include <linux/dma-buf-map.h> #include <linux/module.h> #include <linux/pci.h> @@ -314,6 +315,7 @@ static int cirrus_fb_blit_rect(struct drm_framebuffer *fb, struct drm_rect *rect) { struct cirrus_device *cirrus = to_cirrus(fb->dev); + struct dma_buf_map map; void *vmap; int idx, ret; @@ -321,10 +323,10 @@ static int cirrus_fb_blit_rect(struct drm_framebuffer *fb, if (!drm_dev_enter(&cirrus->dev, &idx)) goto out; - ret = -ENOMEM; - vmap = drm_gem_shmem_vmap(fb->obj[0]); - if (!vmap) + ret = drm_gem_shmem_vmap(fb->obj[0], &map); + if (ret) goto out_dev_exit; + vmap = map.vaddr; /* TODO: Use mapping abstraction properly */ if (cirrus->cpp == fb->format->cpp[0]) drm_fb_memcpy_dstclip(cirrus->vram, @@ -343,7 +345,7 @@ static int cirrus_fb_blit_rect(struct drm_framebuffer *fb, else WARN_ON_ONCE("cpp mismatch"); - drm_gem_shmem_vunmap(fb->obj[0], vmap); + drm_gem_shmem_vunmap(fb->obj[0], &map); ret = 0; out_dev_exit: @@ -536,7 +538,7 @@ static int cirrus_mode_config_init(struct cirrus_device *cirrus) DEFINE_DRM_GEM_FOPS(cirrus_fops); -static struct drm_driver cirrus_driver = { +static const struct drm_driver cirrus_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, .name = DRIVER_NAME, diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c index cc397671f689..33f65f4626e5 100644 --- a/drivers/gpu/drm/tiny/gm12u320.c +++ b/drivers/gpu/drm/tiny/gm12u320.c @@ -45,7 +45,7 @@ MODULE_PARM_DESC(eco_mode, "Turn on Eco mode (less bright, more silent)"); #define GM12U320_BLOCK_COUNT 20 #define GM12U320_ERR(fmt, ...) \ - DRM_DEV_ERROR(&gm12u320->udev->dev, fmt, ##__VA_ARGS__) + DRM_DEV_ERROR(gm12u320->dev.dev, fmt, ##__VA_ARGS__) #define MISC_RCV_EPT 1 #define DATA_RCV_EPT 2 @@ -85,7 +85,6 @@ struct gm12u320_device { struct drm_device dev; struct drm_simple_display_pipe pipe; struct drm_connector conn; - struct usb_device *udev; unsigned char *cmd_buf; unsigned char *data_buf[GM12U320_BLOCK_COUNT]; struct { @@ -155,6 +154,11 @@ static const char data_block_footer[DATA_BLOCK_FOOTER_SIZE] = { 0x80, 0x00, 0x00, 0x4f }; +static inline struct usb_device *gm12u320_to_usb_device(struct gm12u320_device *gm12u320) +{ + return interface_to_usbdev(to_usb_interface(gm12u320->dev.dev)); +} + static int gm12u320_usb_alloc(struct gm12u320_device *gm12u320) { int i, block_size; @@ -191,6 +195,7 @@ static int gm12u320_misc_request(struct gm12u320_device *gm12u320, u8 req_a, u8 req_b, u8 arg_a, u8 arg_b, u8 arg_c, u8 arg_d) { + struct usb_device *udev = gm12u320_to_usb_device(gm12u320); int ret, len; memcpy(gm12u320->cmd_buf, &cmd_misc, CMD_SIZE); @@ -202,8 +207,7 @@ static int gm12u320_misc_request(struct gm12u320_device *gm12u320, gm12u320->cmd_buf[25] = arg_d; /* Send request */ - ret = usb_bulk_msg(gm12u320->udev, - usb_sndbulkpipe(gm12u320->udev, MISC_SND_EPT), + ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, MISC_SND_EPT), gm12u320->cmd_buf, CMD_SIZE, &len, CMD_TIMEOUT); if (ret || len != CMD_SIZE) { GM12U320_ERR("Misc. req. error %d\n", ret); @@ -211,8 +215,7 @@ static int gm12u320_misc_request(struct gm12u320_device *gm12u320, } /* Read value */ - ret = usb_bulk_msg(gm12u320->udev, - usb_rcvbulkpipe(gm12u320->udev, MISC_RCV_EPT), + ret = usb_bulk_msg(udev, usb_rcvbulkpipe(udev, MISC_RCV_EPT), gm12u320->cmd_buf, MISC_VALUE_SIZE, &len, DATA_TIMEOUT); if (ret || len != MISC_VALUE_SIZE) { @@ -222,8 +225,7 @@ static int gm12u320_misc_request(struct gm12u320_device *gm12u320, /* cmd_buf[0] now contains the read value, which we don't use */ /* Read status */ - ret = usb_bulk_msg(gm12u320->udev, - usb_rcvbulkpipe(gm12u320->udev, MISC_RCV_EPT), + ret = usb_bulk_msg(udev, usb_rcvbulkpipe(udev, MISC_RCV_EPT), gm12u320->cmd_buf, READ_STATUS_SIZE, &len, CMD_TIMEOUT); if (ret || len != READ_STATUS_SIZE) { @@ -248,6 +250,7 @@ static void gm12u320_copy_fb_to_blocks(struct gm12u320_device *gm12u320) { int block, dst_offset, len, remain, ret, x1, x2, y1, y2; struct drm_framebuffer *fb; + struct dma_buf_map map; void *vaddr; u8 *src; @@ -262,11 +265,12 @@ static void gm12u320_copy_fb_to_blocks(struct gm12u320_device *gm12u320) y1 = gm12u320->fb_update.rect.y1; y2 = gm12u320->fb_update.rect.y2; - vaddr = drm_gem_shmem_vmap(fb->obj[0]); - if (IS_ERR(vaddr)) { - GM12U320_ERR("failed to vmap fb: %ld\n", PTR_ERR(vaddr)); + ret = drm_gem_shmem_vmap(fb->obj[0], &map); + if (ret) { + GM12U320_ERR("failed to vmap fb: %d\n", ret); goto put_fb; } + vaddr = map.vaddr; /* TODO: Use mapping abstraction properly */ if (fb->obj[0]->import_attach) { ret = dma_buf_begin_cpu_access( @@ -318,7 +322,7 @@ static void gm12u320_copy_fb_to_blocks(struct gm12u320_device *gm12u320) GM12U320_ERR("dma_buf_end_cpu_access err: %d\n", ret); } vunmap: - drm_gem_shmem_vunmap(fb->obj[0], vaddr); + drm_gem_shmem_vunmap(fb->obj[0], &map); put_fb: drm_framebuffer_put(fb); gm12u320->fb_update.fb = NULL; @@ -331,6 +335,7 @@ static void gm12u320_fb_update_work(struct work_struct *work) struct gm12u320_device *gm12u320 = container_of(to_delayed_work(work), struct gm12u320_device, fb_update.work); + struct usb_device *udev = gm12u320_to_usb_device(gm12u320); int block, block_size, len; int ret = 0; @@ -350,43 +355,41 @@ static void gm12u320_fb_update_work(struct work_struct *work) gm12u320->cmd_buf[21] = block | (gm12u320->fb_update.frame << 7); - ret = usb_bulk_msg(gm12u320->udev, - usb_sndbulkpipe(gm12u320->udev, DATA_SND_EPT), - gm12u320->cmd_buf, CMD_SIZE, &len, - CMD_TIMEOUT); + ret = usb_bulk_msg(udev, + usb_sndbulkpipe(udev, DATA_SND_EPT), + gm12u320->cmd_buf, CMD_SIZE, &len, + CMD_TIMEOUT); if (ret || len != CMD_SIZE) goto err; /* Send data block to device */ - ret = usb_bulk_msg(gm12u320->udev, - usb_sndbulkpipe(gm12u320->udev, DATA_SND_EPT), - gm12u320->data_buf[block], block_size, - &len, DATA_TIMEOUT); + ret = usb_bulk_msg(udev, + usb_sndbulkpipe(udev, DATA_SND_EPT), + gm12u320->data_buf[block], block_size, + &len, DATA_TIMEOUT); if (ret || len != block_size) goto err; /* Read status */ - ret = usb_bulk_msg(gm12u320->udev, - usb_rcvbulkpipe(gm12u320->udev, DATA_RCV_EPT), - gm12u320->cmd_buf, READ_STATUS_SIZE, &len, - CMD_TIMEOUT); + ret = usb_bulk_msg(udev, + usb_rcvbulkpipe(udev, DATA_RCV_EPT), + gm12u320->cmd_buf, READ_STATUS_SIZE, &len, + CMD_TIMEOUT); if (ret || len != READ_STATUS_SIZE) goto err; } /* Send draw command to device */ memcpy(gm12u320->cmd_buf, cmd_draw, CMD_SIZE); - ret = usb_bulk_msg(gm12u320->udev, - usb_sndbulkpipe(gm12u320->udev, DATA_SND_EPT), - gm12u320->cmd_buf, CMD_SIZE, &len, CMD_TIMEOUT); + ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, DATA_SND_EPT), + gm12u320->cmd_buf, CMD_SIZE, &len, CMD_TIMEOUT); if (ret || len != CMD_SIZE) goto err; /* Read status */ - ret = usb_bulk_msg(gm12u320->udev, - usb_rcvbulkpipe(gm12u320->udev, DATA_RCV_EPT), - gm12u320->cmd_buf, READ_STATUS_SIZE, &len, - gm12u320->fb_update.draw_status_timeout); + ret = usb_bulk_msg(udev, usb_rcvbulkpipe(udev, DATA_RCV_EPT), + gm12u320->cmd_buf, READ_STATUS_SIZE, &len, + gm12u320->fb_update.draw_status_timeout); if (ret || len != READ_STATUS_SIZE) goto err; @@ -600,7 +603,7 @@ static const uint64_t gm12u320_pipe_modifiers[] = { DEFINE_DRM_GEM_FOPS(gm12u320_fops); -static struct drm_driver gm12u320_drm_driver = { +static const struct drm_driver gm12u320_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, .name = DRIVER_NAME, @@ -638,7 +641,6 @@ static int gm12u320_usb_probe(struct usb_interface *interface, if (IS_ERR(gm12u320)) return PTR_ERR(gm12u320); - gm12u320->udev = interface_to_usbdev(interface); INIT_DELAYED_WORK(&gm12u320->fb_update.work, gm12u320_fb_update_work); mutex_init(&gm12u320->fb_update.lock); diff --git a/drivers/gpu/drm/tiny/hx8357d.c b/drivers/gpu/drm/tiny/hx8357d.c index 0998309b0d95..c6525cd02bc2 100644 --- a/drivers/gpu/drm/tiny/hx8357d.c +++ b/drivers/gpu/drm/tiny/hx8357d.c @@ -193,7 +193,7 @@ static const struct drm_display_mode yx350hv15_mode = { DEFINE_DRM_GEM_CMA_FOPS(hx8357d_fops); -static struct drm_driver hx8357d_driver = { +static const struct drm_driver hx8357d_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &hx8357d_fops, DRM_GEM_CMA_DRIVER_OPS_VMAP, diff --git a/drivers/gpu/drm/tiny/ili9225.c b/drivers/gpu/drm/tiny/ili9225.c index 97a77262d791..8e98962db5a2 100644 --- a/drivers/gpu/drm/tiny/ili9225.c +++ b/drivers/gpu/drm/tiny/ili9225.c @@ -337,7 +337,7 @@ static const struct drm_display_mode ili9225_mode = { DEFINE_DRM_GEM_CMA_FOPS(ili9225_fops); -static struct drm_driver ili9225_driver = { +static const struct drm_driver ili9225_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &ili9225_fops, DRM_GEM_CMA_DRIVER_OPS_VMAP, diff --git a/drivers/gpu/drm/tiny/ili9341.c b/drivers/gpu/drm/tiny/ili9341.c index d39c39df56ad..6ce97f0698eb 100644 --- a/drivers/gpu/drm/tiny/ili9341.c +++ b/drivers/gpu/drm/tiny/ili9341.c @@ -149,7 +149,7 @@ static const struct drm_display_mode yx240qv29_mode = { DEFINE_DRM_GEM_CMA_FOPS(ili9341_fops); -static struct drm_driver ili9341_driver = { +static const struct drm_driver ili9341_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &ili9341_fops, DRM_GEM_CMA_DRIVER_OPS_VMAP, diff --git a/drivers/gpu/drm/tiny/ili9486.c b/drivers/gpu/drm/tiny/ili9486.c index 403af68fa440..d7ce40eb166a 100644 --- a/drivers/gpu/drm/tiny/ili9486.c +++ b/drivers/gpu/drm/tiny/ili9486.c @@ -162,7 +162,7 @@ static const struct drm_display_mode waveshare_mode = { DEFINE_DRM_GEM_CMA_FOPS(ili9486_fops); -static struct drm_driver ili9486_driver = { +static const struct drm_driver ili9486_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &ili9486_fops, DRM_GEM_CMA_DRIVER_OPS_VMAP, diff --git a/drivers/gpu/drm/tiny/mi0283qt.c b/drivers/gpu/drm/tiny/mi0283qt.c index 2131b4268c00..ff77f983f803 100644 --- a/drivers/gpu/drm/tiny/mi0283qt.c +++ b/drivers/gpu/drm/tiny/mi0283qt.c @@ -153,7 +153,7 @@ static const struct drm_display_mode mi0283qt_mode = { DEFINE_DRM_GEM_CMA_FOPS(mi0283qt_fops); -static struct drm_driver mi0283qt_driver = { +static const struct drm_driver mi0283qt_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &mi0283qt_fops, DRM_GEM_CMA_DRIVER_OPS_VMAP, diff --git a/drivers/gpu/drm/tiny/repaper.c b/drivers/gpu/drm/tiny/repaper.c index 2e01cf0a9876..11c602fc9897 100644 --- a/drivers/gpu/drm/tiny/repaper.c +++ b/drivers/gpu/drm/tiny/repaper.c @@ -936,7 +936,7 @@ static const u8 repaper_e2271cs021_cs[] = { 0x00, 0x00, 0x00, 0x7f, DEFINE_DRM_GEM_CMA_FOPS(repaper_fops); -static struct drm_driver repaper_driver = { +static const struct drm_driver repaper_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &repaper_fops, DRM_GEM_CMA_DRIVER_OPS_VMAP, diff --git a/drivers/gpu/drm/tiny/st7586.c b/drivers/gpu/drm/tiny/st7586.c index d05de03891f8..ff5cf60f4bd7 100644 --- a/drivers/gpu/drm/tiny/st7586.c +++ b/drivers/gpu/drm/tiny/st7586.c @@ -277,7 +277,7 @@ static const struct drm_display_mode st7586_mode = { DEFINE_DRM_GEM_CMA_FOPS(st7586_fops); -static struct drm_driver st7586_driver = { +static const struct drm_driver st7586_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &st7586_fops, DRM_GEM_CMA_DRIVER_OPS_VMAP, diff --git a/drivers/gpu/drm/tiny/st7735r.c b/drivers/gpu/drm/tiny/st7735r.c index c0bc2a18edde..faaba0a033ea 100644 --- a/drivers/gpu/drm/tiny/st7735r.c +++ b/drivers/gpu/drm/tiny/st7735r.c @@ -154,7 +154,7 @@ static const struct st7735r_cfg rh128128t_cfg = { DEFINE_DRM_GEM_CMA_FOPS(st7735r_fops); -static struct drm_driver st7735r_driver = { +static const struct drm_driver st7735r_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &st7735r_fops, DRM_GEM_CMA_DRIVER_OPS_VMAP, diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index e2a124b3affb..e6bcbfe530ec 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -231,7 +231,8 @@ EXPORT_SYMBOL(ttm_bo_bulk_move_lru_tail); static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, struct ttm_resource *mem, bool evict, - struct ttm_operation_ctx *ctx) + struct ttm_operation_ctx *ctx, + struct ttm_place *hop) { struct ttm_bo_device *bdev = bo->bdev; struct ttm_resource_manager *old_man = ttm_manager_type(bdev, bo->mem.mem_type); @@ -259,9 +260,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, } } - ret = bdev->driver->move(bo, evict, ctx, mem); - if (ret) + ret = bdev->driver->move(bo, evict, ctx, mem, hop); + if (ret) { + if (ret == -EMULTIHOP) + return ret; goto out_err; + } ctx->bytes_moved += bo->num_pages << PAGE_SHIFT; return 0; @@ -566,8 +570,11 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, struct ttm_bo_device *bdev = bo->bdev; struct ttm_resource evict_mem; struct ttm_placement placement; + struct ttm_place hop; int ret = 0; + memset(&hop, 0, sizeof(hop)); + dma_resv_assert_held(bo->base.resv); placement.num_placement = 0; @@ -596,8 +603,9 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, goto out; } - ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, ctx); + ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, ctx, &hop); if (unlikely(ret)) { + WARN(ret == -EMULTIHOP, "Unexpected multihop in eviction - likely driver bug\n"); if (ret != -ERESTARTSYS) pr_err("Buffer eviction failed\n"); ttm_resource_free(bo, &evict_mem); @@ -936,15 +944,45 @@ error: } EXPORT_SYMBOL(ttm_bo_mem_space); +static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo, + struct ttm_resource *mem, + struct ttm_operation_ctx *ctx, + struct ttm_place *hop) +{ + struct ttm_placement hop_placement; + int ret; + struct ttm_resource hop_mem = *mem; + + hop_mem.mm_node = NULL; + hop_mem.mem_type = TTM_PL_SYSTEM; + hop_mem.placement = 0; + + hop_placement.num_placement = hop_placement.num_busy_placement = 1; + hop_placement.placement = hop_placement.busy_placement = hop; + + /* find space in the bounce domain */ + ret = ttm_bo_mem_space(bo, &hop_placement, &hop_mem, ctx); + if (ret) + return ret; + /* move to the bounce domain */ + ret = ttm_bo_handle_move_mem(bo, &hop_mem, false, ctx, NULL); + if (ret) + return ret; + return 0; +} + static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, struct ttm_placement *placement, struct ttm_operation_ctx *ctx) { int ret = 0; + struct ttm_place hop; struct ttm_resource mem; dma_resv_assert_held(bo->base.resv); + memset(&hop, 0, sizeof(hop)); + mem.num_pages = bo->num_pages; mem.size = mem.num_pages << PAGE_SHIFT; mem.page_alignment = bo->mem.page_alignment; @@ -954,12 +992,25 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, /* * Determine where to move the buffer. + * + * If driver determines move is going to need + * an extra step then it will return -EMULTIHOP + * and the buffer will be moved to the temporary + * stop and the driver will be called to make + * the second hop. */ +bounce: ret = ttm_bo_mem_space(bo, placement, &mem, ctx); if (ret) - goto out_unlock; - ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx); -out_unlock: + return ret; + ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx, &hop); + if (ret == -EMULTIHOP) { + ret = ttm_bo_bounce_temp_buffer(bo, &mem, ctx, &hop); + if (ret) + return ret; + /* try and move to final place now. */ + goto bounce; + } if (ret) ttm_resource_free(bo, &mem); return ret; @@ -1432,15 +1483,20 @@ int ttm_bo_swapout(struct ttm_operation_ctx *ctx) if (bo->mem.mem_type != TTM_PL_SYSTEM) { struct ttm_operation_ctx ctx = { false, false }; struct ttm_resource evict_mem; + struct ttm_place hop; + + memset(&hop, 0, sizeof(hop)); evict_mem = bo->mem; evict_mem.mm_node = NULL; evict_mem.placement = 0; evict_mem.mem_type = TTM_PL_SYSTEM; - ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, &ctx); - if (unlikely(ret != 0)) + ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, &ctx, &hop); + if (unlikely(ret != 0)) { + WARN(ret == -EMULTIHOP, "Unexpected multihop in swaput - likely driver bug.\n"); goto out; + } } /** diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index ecb54415d1ca..7ccb2295cac1 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -32,6 +32,7 @@ #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_placement.h> #include <drm/drm_vma_manager.h> +#include <linux/dma-buf-map.h> #include <linux/io.h> #include <linux/highmem.h> #include <linux/wait.h> @@ -471,6 +472,77 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) } EXPORT_SYMBOL(ttm_bo_kunmap); +int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map) +{ + struct ttm_resource *mem = &bo->mem; + int ret; + + ret = ttm_mem_io_reserve(bo->bdev, mem); + if (ret) + return ret; + + if (mem->bus.is_iomem) { + void __iomem *vaddr_iomem; + size_t size = bo->num_pages << PAGE_SHIFT; + + if (mem->bus.addr) + vaddr_iomem = (void __iomem *)mem->bus.addr; + else if (mem->bus.caching == ttm_write_combined) + vaddr_iomem = ioremap_wc(mem->bus.offset, size); + else + vaddr_iomem = ioremap(mem->bus.offset, size); + + if (!vaddr_iomem) + return -ENOMEM; + + dma_buf_map_set_vaddr_iomem(map, vaddr_iomem); + + } else { + struct ttm_operation_ctx ctx = { + .interruptible = false, + .no_wait_gpu = false + }; + struct ttm_tt *ttm = bo->ttm; + pgprot_t prot; + void *vaddr; + + ret = ttm_tt_populate(bo->bdev, ttm, &ctx); + if (ret) + return ret; + + /* + * We need to use vmap to get the desired page protection + * or to make the buffer object look contiguous. + */ + prot = ttm_io_prot(bo, mem, PAGE_KERNEL); + vaddr = vmap(ttm->pages, bo->num_pages, 0, prot); + if (!vaddr) + return -ENOMEM; + + dma_buf_map_set_vaddr(map, vaddr); + } + + return 0; +} +EXPORT_SYMBOL(ttm_bo_vmap); + +void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct dma_buf_map *map) +{ + struct ttm_resource *mem = &bo->mem; + + if (dma_buf_map_is_null(map)) + return; + + if (!map->is_iomem) + vunmap(map->vaddr); + else if (!mem->bus.addr) + iounmap(map->vaddr_iomem); + dma_buf_map_clear(map); + + ttm_mem_io_free(bo->bdev, &bo->mem); +} +EXPORT_SYMBOL(ttm_bo_vunmap); + static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo, bool dst_use_tt) { diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c index b5259cb1383f..07140e0b90a3 100644 --- a/drivers/gpu/drm/tve200/tve200_drv.c +++ b/drivers/gpu/drm/tve200/tve200_drv.c @@ -137,7 +137,7 @@ finish: DEFINE_DRM_GEM_CMA_FOPS(drm_fops); -static struct drm_driver tve200_drm_driver = { +static const struct drm_driver tve200_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, .ioctls = NULL, .fops = &drm_fops, diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c index cdc1c42e1669..3750fd216131 100644 --- a/drivers/gpu/drm/udl/udl_connector.c +++ b/drivers/gpu/drm/udl/udl_connector.c @@ -20,6 +20,7 @@ static int udl_get_edid_block(void *data, u8 *buf, unsigned int block, int ret, i; u8 *read_buff; struct udl_device *udl = data; + struct usb_device *udev = udl_to_usb_device(udl); read_buff = kmalloc(2, GFP_KERNEL); if (!read_buff) @@ -27,10 +28,9 @@ static int udl_get_edid_block(void *data, u8 *buf, unsigned int block, for (i = 0; i < len; i++) { int bval = (i + block * EDID_LENGTH) << 8; - ret = usb_control_msg(udl->udev, - usb_rcvctrlpipe(udl->udev, 0), - (0x02), (0x80 | (0x02 << 5)), bval, - 0xA1, read_buff, 2, HZ); + ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), + 0x02, (0x80 | (0x02 << 5)), bval, + 0xA1, read_buff, 2, HZ); if (ret < 1) { DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret); kfree(read_buff); diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c index 96d4317a2c1b..b5a8dd9fdf02 100644 --- a/drivers/gpu/drm/udl/udl_drv.c +++ b/drivers/gpu/drm/udl/udl_drv.c @@ -34,7 +34,7 @@ static int udl_usb_resume(struct usb_interface *interface) DEFINE_DRM_GEM_FOPS(udl_driver_fops); -static struct drm_driver driver = { +static const struct drm_driver driver = { .driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET, /* GEM hooks */ @@ -53,7 +53,6 @@ static struct drm_driver driver = { static struct udl_device *udl_driver_create(struct usb_interface *interface) { - struct usb_device *udev = interface_to_usbdev(interface); struct udl_device *udl; int r; @@ -62,8 +61,6 @@ static struct udl_device *udl_driver_create(struct usb_interface *interface) if (IS_ERR(udl)) return udl; - udl->udev = udev; - r = udl_init(udl); if (r) return ERR_PTR(r); diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h index b1461f30780b..875e73551ae9 100644 --- a/drivers/gpu/drm/udl/udl_drv.h +++ b/drivers/gpu/drm/udl/udl_drv.h @@ -50,7 +50,6 @@ struct urb_list { struct udl_device { struct drm_device drm; struct device *dev; - struct usb_device *udev; struct drm_simple_display_pipe display_pipe; @@ -66,6 +65,11 @@ struct udl_device { #define to_udl(x) container_of(x, struct udl_device, drm) +static inline struct usb_device *udl_to_usb_device(struct udl_device *udl) +{ + return interface_to_usbdev(to_usb_interface(udl->drm.dev)); +} + /* modeset */ int udl_modeset_init(struct drm_device *dev); struct drm_connector *udl_connector_init(struct drm_device *dev); diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c index f5d27f2a5654..0e2a376cb075 100644 --- a/drivers/gpu/drm/udl/udl_main.c +++ b/drivers/gpu/drm/udl/udl_main.c @@ -26,10 +26,9 @@ #define GET_URB_TIMEOUT HZ #define FREE_URB_TIMEOUT (HZ*2) -static int udl_parse_vendor_descriptor(struct drm_device *dev, - struct usb_device *usbdev) +static int udl_parse_vendor_descriptor(struct udl_device *udl) { - struct udl_device *udl = to_udl(dev); + struct usb_device *udev = udl_to_usb_device(udl); char *desc; char *buf; char *desc_end; @@ -41,7 +40,7 @@ static int udl_parse_vendor_descriptor(struct drm_device *dev, return false; desc = buf; - total_len = usb_get_descriptor(usbdev, 0x5f, /* vendor specific */ + total_len = usb_get_descriptor(udev, 0x5f, /* vendor specific */ 0, desc, MAX_VENDOR_DESCRIPTOR_SIZE); if (total_len > 5) { DRM_INFO("vendor descriptor length:%x data:%11ph\n", @@ -98,19 +97,20 @@ success: */ static int udl_select_std_channel(struct udl_device *udl) { - int ret; static const u8 set_def_chn[] = {0x57, 0xCD, 0xDC, 0xA7, 0x1C, 0x88, 0x5E, 0x15, 0x60, 0xFE, 0xC6, 0x97, 0x16, 0x3D, 0x47, 0xF2}; + void *sendbuf; + int ret; + struct usb_device *udev = udl_to_usb_device(udl); sendbuf = kmemdup(set_def_chn, sizeof(set_def_chn), GFP_KERNEL); if (!sendbuf) return -ENOMEM; - ret = usb_control_msg(udl->udev, - usb_sndctrlpipe(udl->udev, 0), + ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), NR_USB_REQUEST_CHANNEL, (USB_DIR_OUT | USB_TYPE_VENDOR), 0, 0, sendbuf, sizeof(set_def_chn), @@ -202,6 +202,7 @@ static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size) struct urb_node *unode; char *buf; size_t wanted_size = count * size; + struct usb_device *udev = udl_to_usb_device(udl); spin_lock_init(&udl->urbs.lock); @@ -229,7 +230,7 @@ retry: } unode->urb = urb; - buf = usb_alloc_coherent(udl->udev, size, GFP_KERNEL, + buf = usb_alloc_coherent(udev, size, GFP_KERNEL, &urb->transfer_dma); if (!buf) { kfree(unode); @@ -243,8 +244,8 @@ retry: } /* urb->transfer_buffer_length set to actual before submit */ - usb_fill_bulk_urb(urb, udl->udev, usb_sndbulkpipe(udl->udev, 1), - buf, size, udl_urb_completion, unode); + usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, 1), + buf, size, udl_urb_completion, unode); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; list_add_tail(&unode->entry, &udl->urbs.list); @@ -316,7 +317,7 @@ int udl_init(struct udl_device *udl) mutex_init(&udl->gem_lock); - if (!udl_parse_vendor_descriptor(dev, udl->udev)) { + if (!udl_parse_vendor_descriptor(udl)) { ret = -ENODEV; DRM_ERROR("firmware not recognized. Assume incompatible device\n"); goto err; diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c index fef43f4e3bac..42eeba1dfdbf 100644 --- a/drivers/gpu/drm/udl/udl_modeset.c +++ b/drivers/gpu/drm/udl/udl_modeset.c @@ -276,6 +276,7 @@ static int udl_handle_damage(struct drm_framebuffer *fb, int x, int y, struct urb *urb; struct drm_rect clip; int log_bpp; + struct dma_buf_map map; void *vaddr; ret = udl_log_cpp(fb->format->cpp[0]); @@ -296,11 +297,12 @@ static int udl_handle_damage(struct drm_framebuffer *fb, int x, int y, return ret; } - vaddr = drm_gem_shmem_vmap(fb->obj[0]); - if (IS_ERR(vaddr)) { + ret = drm_gem_shmem_vmap(fb->obj[0], &map); + if (ret) { DRM_ERROR("failed to vmap fb\n"); goto out_dma_buf_end_cpu_access; } + vaddr = map.vaddr; /* TODO: Use mapping abstraction properly */ urb = udl_get_urb(dev); if (!urb) @@ -333,7 +335,7 @@ static int udl_handle_damage(struct drm_framebuffer *fb, int x, int y, ret = 0; out_drm_gem_shmem_vunmap: - drm_gem_shmem_vunmap(fb->obj[0], vaddr); + drm_gem_shmem_vunmap(fb->obj[0], &map); out_dma_buf_end_cpu_access: if (import_attach) { tmp_ret = dma_buf_end_cpu_access(import_attach->dmabuf, diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c index 9f7c26193831..2da0c1180bc6 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.c +++ b/drivers/gpu/drm/v3d/v3d_drv.c @@ -194,7 +194,7 @@ static const struct drm_ioctl_desc v3d_drm_ioctls[] = { DRM_IOCTL_DEF_DRV(V3D_SUBMIT_CSD, v3d_submit_csd_ioctl, DRM_RENDER_ALLOW | DRM_AUTH), }; -static struct drm_driver v3d_drm_driver = { +static const struct drm_driver v3d_drm_driver = { .driver_features = (DRIVER_GEM | DRIVER_RENDER | DRIVER_SYNCOBJ), diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c index cf2e3e6a2388..f3eac72cb46e 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_drv.c +++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c @@ -26,7 +26,7 @@ static int vbox_modeset = -1; MODULE_PARM_DESC(modeset, "Disable/Enable modesetting"); module_param_named(modeset, vbox_modeset, int, 0400); -static struct drm_driver driver; +static const struct drm_driver driver; static const struct pci_device_id pciidlist[] = { { PCI_DEVICE(0x80ee, 0xbeef) }, @@ -175,7 +175,7 @@ static struct pci_driver vbox_pci_driver = { DEFINE_DRM_GEM_FOPS(vbox_fops); -static struct drm_driver driver = { +static const struct drm_driver driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, diff --git a/drivers/gpu/drm/vboxvideo/vbox_mode.c b/drivers/gpu/drm/vboxvideo/vbox_mode.c index 322bf7133ba1..dbc0dd53c69e 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_mode.c +++ b/drivers/gpu/drm/vboxvideo/vbox_mode.c @@ -9,6 +9,8 @@ * Michael Thayer <michael.thayer@oracle.com, * Hans de Goede <hdegoede@redhat.com> */ + +#include <linux/dma-buf-map.h> #include <linux/export.h> #include <drm/drm_atomic.h> @@ -384,6 +386,8 @@ static void vbox_cursor_atomic_update(struct drm_plane *plane, u32 height = plane->state->crtc_h; size_t data_size, mask_size; u32 flags; + struct dma_buf_map map; + int ret; u8 *src; /* @@ -397,8 +401,8 @@ static void vbox_cursor_atomic_update(struct drm_plane *plane, vbox_crtc->cursor_enabled = true; - src = drm_gem_vram_vmap(gbo); - if (IS_ERR(src)) { + ret = drm_gem_vram_vmap(gbo, &map); + if (ret) { /* * BUG: we should have pinned the BO in prepare_fb(). */ @@ -406,6 +410,7 @@ static void vbox_cursor_atomic_update(struct drm_plane *plane, DRM_WARN("Could not map cursor bo, skipping update\n"); return; } + src = map.vaddr; /* TODO: Use mapping abstraction properly */ /* * The mask must be calculated based on the alpha @@ -416,7 +421,7 @@ static void vbox_cursor_atomic_update(struct drm_plane *plane, data_size = width * height * 4 + mask_size; copy_cursor_image(src, vbox->cursor_data, width, height, mask_size); - drm_gem_vram_vunmap(gbo, src); + drm_gem_vram_vunmap(gbo, &map); flags = VBOX_MOUSE_POINTER_VISIBLE | VBOX_MOUSE_POINTER_SHAPE | VBOX_MOUSE_POINTER_ALPHA; diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c index c2dead3b0733..a21a6c53ffcf 100644 --- a/drivers/gpu/drm/vc4/vc4_bo.c +++ b/drivers/gpu/drm/vc4/vc4_bo.c @@ -387,7 +387,6 @@ static const struct drm_gem_object_funcs vc4_gem_object_funcs = { .export = vc4_prime_export, .get_sg_table = drm_gem_cma_prime_get_sg_table, .vmap = vc4_prime_vmap, - .vunmap = drm_gem_cma_prime_vunmap, .vm_ops = &vc4_vm_ops, }; @@ -786,16 +785,16 @@ int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) return drm_gem_cma_prime_mmap(obj, vma); } -void *vc4_prime_vmap(struct drm_gem_object *obj) +int vc4_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) { struct vc4_bo *bo = to_vc4_bo(obj); if (bo->validated_shader) { DRM_DEBUG("mmaping of shader BOs not allowed.\n"); - return ERR_PTR(-EINVAL); + return -EINVAL; } - return drm_gem_cma_prime_vmap(obj); + return drm_gem_cma_prime_vmap(obj, map); } struct drm_gem_object * diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h index 40f1192fff02..137c382256d5 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.h +++ b/drivers/gpu/drm/vc4/vc4_drv.h @@ -807,7 +807,7 @@ int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); struct drm_gem_object *vc4_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sgt); -void *vc4_prime_vmap(struct drm_gem_object *obj); +int vc4_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map); int vc4_bo_cache_init(struct drm_device *dev); int vc4_bo_inc_usecnt(struct vc4_bo *bo); void vc4_bo_dec_usecnt(struct vc4_bo *bo); diff --git a/drivers/gpu/drm/vc4/vc4_perfmon.c b/drivers/gpu/drm/vc4/vc4_perfmon.c index f4aa75efd16b..18abc06335c1 100644 --- a/drivers/gpu/drm/vc4/vc4_perfmon.c +++ b/drivers/gpu/drm/vc4/vc4_perfmon.c @@ -77,7 +77,7 @@ struct vc4_perfmon *vc4_perfmon_find(struct vc4_file *vc4file, int id) void vc4_perfmon_open_file(struct vc4_file *vc4file) { mutex_init(&vc4file->perfmon.lock); - idr_init(&vc4file->perfmon.idr); + idr_init_base(&vc4file->perfmon.idr, VC4_PERFMONID_MIN); } static int vc4_perfmon_idr_del(int id, void *elem, void *data) diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c index fa54a6d1403d..9a413091abb6 100644 --- a/drivers/gpu/drm/vgem/vgem_drv.c +++ b/drivers/gpu/drm/vgem/vgem_drv.c @@ -361,24 +361,30 @@ static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev, return &obj->base; } -static void *vgem_prime_vmap(struct drm_gem_object *obj) +static int vgem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) { struct drm_vgem_gem_object *bo = to_vgem_bo(obj); long n_pages = obj->size >> PAGE_SHIFT; struct page **pages; + void *vaddr; pages = vgem_pin_pages(bo); if (IS_ERR(pages)) - return NULL; + return PTR_ERR(pages); + + vaddr = vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL)); + if (!vaddr) + return -ENOMEM; + dma_buf_map_set_vaddr(map, vaddr); - return vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL)); + return 0; } -static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) +static void vgem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map) { struct drm_vgem_gem_object *bo = to_vgem_bo(obj); - vunmap(vaddr); + vunmap(map->vaddr); vgem_unpin_pages(bo); } @@ -415,7 +421,7 @@ static const struct drm_gem_object_funcs vgem_gem_object_funcs = { .vm_ops = &vgem_gem_vm_ops, }; -static struct drm_driver vgem_driver = { +static const struct drm_driver vgem_driver = { .driver_features = DRIVER_GEM | DRIVER_RENDER, .open = vgem_open, .postclose = vgem_postclose, diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c index 17f32f550dd9..2902dc6e64fa 100644 --- a/drivers/gpu/drm/vgem/vgem_fence.c +++ b/drivers/gpu/drm/vgem/vgem_fence.c @@ -233,7 +233,7 @@ int vgem_fence_signal_ioctl(struct drm_device *dev, int vgem_fence_open(struct vgem_file *vfile) { mutex_init(&vfile->fence_mutex); - idr_init(&vfile->fence_idr); + idr_init_base(&vfile->fence_idr, 1); return 0; } diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c index 4bf74836bd53..a6caebd4a0dd 100644 --- a/drivers/gpu/drm/virtio/virtgpu_display.c +++ b/drivers/gpu/drm/virtio/virtgpu_display.c @@ -119,6 +119,8 @@ static int virtio_gpu_crtc_atomic_check(struct drm_crtc *crtc, static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state) { + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc); /* @@ -127,7 +129,7 @@ static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc, * in the plane update callback, and here we just check * whenever we must force the modeset. */ - if (drm_atomic_crtc_needs_modeset(crtc->state)) { + if (drm_atomic_crtc_needs_modeset(crtc_state)) { output->needs_modeset = true; } } diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index 86330f1ade72..27f13bd29c13 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -37,7 +37,7 @@ #include "virtgpu_drv.h" -static struct drm_driver driver; +static const struct drm_driver driver; static int virtio_gpu_modeset = -1; @@ -190,7 +190,7 @@ MODULE_AUTHOR("Alon Levy"); DEFINE_DRM_GEM_FOPS(virtio_gpu_driver_fops); -static struct drm_driver driver = { +static const struct drm_driver driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC, .open = virtio_gpu_driver_open, .postclose = virtio_gpu_driver_postclose, diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c index 2d3aa7baffe4..d9ad27e00905 100644 --- a/drivers/gpu/drm/virtio/virtgpu_object.c +++ b/drivers/gpu/drm/virtio/virtgpu_object.c @@ -184,8 +184,9 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev, *nents = shmem->pages->orig_nents; } - *ents = kmalloc_array(*nents, sizeof(struct virtio_gpu_mem_entry), - GFP_KERNEL); + *ents = kvmalloc_array(*nents, + sizeof(struct virtio_gpu_mem_entry), + GFP_KERNEL); if (!(*ents)) { DRM_ERROR("failed to allocate ent list\n"); return -ENOMEM; diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index 857f730747b6..cf84d382dd41 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -1211,10 +1211,8 @@ int virtio_gpu_cmd_map(struct virtio_gpu_device *vgdev, struct virtio_gpu_resp_map_info *resp_buf; resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL); - if (!resp_buf) { - virtio_gpu_array_put_free(objs); + if (!resp_buf) return -ENOMEM; - } cmd_p = virtio_gpu_alloc_cmd_resp (vgdev, virtio_gpu_cmd_resource_map_cb, &vbuf, sizeof(*cmd_p), diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c index 25faba5aac08..1a1b5bc8e121 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.c +++ b/drivers/gpu/drm/vkms/vkms_drv.c @@ -78,7 +78,7 @@ static void vkms_atomic_commit_tail(struct drm_atomic_state *old_state) drm_atomic_helper_cleanup_planes(dev, old_state); } -static struct drm_driver vkms_driver = { +static const struct drm_driver vkms_driver = { .driver_features = DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_GEM, .release = vkms_release, .fops = &vkms_driver_fops, diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c index 9890137bcb8d..0824327cc860 100644 --- a/drivers/gpu/drm/vkms/vkms_plane.c +++ b/drivers/gpu/drm/vkms/vkms_plane.c @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-2.0+ +#include <linux/dma-buf-map.h> + #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_fourcc.h> @@ -146,15 +148,16 @@ static int vkms_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state) { struct drm_gem_object *gem_obj; - void *vaddr; + struct dma_buf_map map; + int ret; if (!state->fb) return 0; gem_obj = drm_gem_fb_get_obj(state->fb, 0); - vaddr = drm_gem_shmem_vmap(gem_obj); - if (IS_ERR(vaddr)) - DRM_ERROR("vmap failed: %li\n", PTR_ERR(vaddr)); + ret = drm_gem_shmem_vmap(gem_obj, &map); + if (ret) + DRM_ERROR("vmap failed: %d\n", ret); return drm_gem_fb_prepare_fb(plane, state); } @@ -164,13 +167,15 @@ static void vkms_cleanup_fb(struct drm_plane *plane, { struct drm_gem_object *gem_obj; struct drm_gem_shmem_object *shmem_obj; + struct dma_buf_map map; if (!old_state->fb) return; gem_obj = drm_gem_fb_get_obj(old_state->fb, 0); shmem_obj = to_drm_gem_shmem_obj(drm_gem_fb_get_obj(old_state->fb, 0)); - drm_gem_shmem_vunmap(gem_obj, shmem_obj->vaddr); + dma_buf_map_set_vaddr(&map, shmem_obj->vaddr); + drm_gem_shmem_vunmap(gem_obj, &map); } static const struct drm_plane_helper_funcs vkms_primary_helper_funcs = { diff --git a/drivers/gpu/drm/vkms/vkms_writeback.c b/drivers/gpu/drm/vkms/vkms_writeback.c index 26b903926872..67f80ab1e85f 100644 --- a/drivers/gpu/drm/vkms/vkms_writeback.c +++ b/drivers/gpu/drm/vkms/vkms_writeback.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0+ -#include "vkms_drv.h" +#include <linux/dma-buf-map.h> + #include <drm/drm_fourcc.h> #include <drm/drm_writeback.h> #include <drm/drm_probe_helper.h> @@ -8,6 +9,8 @@ #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_gem_shmem_helper.h> +#include "vkms_drv.h" + static const u32 vkms_wb_formats[] = { DRM_FORMAT_XRGB8888, }; @@ -65,19 +68,20 @@ static int vkms_wb_prepare_job(struct drm_writeback_connector *wb_connector, struct drm_writeback_job *job) { struct drm_gem_object *gem_obj; - void *vaddr; + struct dma_buf_map map; + int ret; if (!job->fb) return 0; gem_obj = drm_gem_fb_get_obj(job->fb, 0); - vaddr = drm_gem_shmem_vmap(gem_obj); - if (IS_ERR(vaddr)) { - DRM_ERROR("vmap failed: %li\n", PTR_ERR(vaddr)); - return PTR_ERR(vaddr); + ret = drm_gem_shmem_vmap(gem_obj, &map); + if (ret) { + DRM_ERROR("vmap failed: %d\n", ret); + return ret; } - job->priv = vaddr; + job->priv = map.vaddr; return 0; } @@ -87,12 +91,14 @@ static void vkms_wb_cleanup_job(struct drm_writeback_connector *connector, { struct drm_gem_object *gem_obj; struct vkms_device *vkmsdev; + struct dma_buf_map map; if (!job->fb) return; gem_obj = drm_gem_fb_get_obj(job->fb, 0); - drm_gem_shmem_vunmap(gem_obj, job->priv); + dma_buf_map_set_vaddr(&map, job->priv); + drm_gem_shmem_vunmap(gem_obj, &map); vkmsdev = drm_device_to_vkms_device(gem_obj->dev); vkms_set_composer(&vkmsdev->output, false); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 0c42d2c05f43..216daf93022c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -1469,7 +1469,7 @@ static const struct file_operations vmwgfx_driver_fops = { .get_unmapped_area = vmw_get_unmapped_area, }; -static struct drm_driver driver = { +static const struct drm_driver driver = { .driver_features = DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC, .ioctls = vmw_ioctls, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c index 51f70bea41cc..6a04261ce760 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c @@ -695,7 +695,8 @@ static void vmw_swap_notify(struct ttm_buffer_object *bo) static int vmw_move(struct ttm_buffer_object *bo, bool evict, struct ttm_operation_ctx *ctx, - struct ttm_resource *new_mem) + struct ttm_resource *new_mem, + struct ttm_place *hop) { struct ttm_resource_manager *old_man = ttm_manager_type(bo->bdev, bo->mem.mem_type); struct ttm_resource_manager *new_man = ttm_manager_type(bo->bdev, new_mem->mem_type); diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c index 98b6d2ba088a..30d9adf31c84 100644 --- a/drivers/gpu/drm/xen/xen_drm_front.c +++ b/drivers/gpu/drm/xen/xen_drm_front.c @@ -483,7 +483,7 @@ static const struct file_operations xen_drm_dev_fops = { .mmap = xen_drm_front_gem_mmap, }; -static struct drm_driver xen_drm_driver = { +static const struct drm_driver xen_drm_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .release = xen_drm_drv_release, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c index 4f34ef34ba60..74db5a840bed 100644 --- a/drivers/gpu/drm/xen/xen_drm_front_gem.c +++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c @@ -290,22 +290,28 @@ int xen_drm_front_gem_mmap(struct file *filp, struct vm_area_struct *vma) return gem_mmap_obj(xen_obj, vma); } -void *xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj) +int xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj, struct dma_buf_map *map) { struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj); + void *vaddr; if (!xen_obj->pages) - return NULL; + return -ENOMEM; /* Please see comment in gem_mmap_obj on mapping and attributes. */ - return vmap(xen_obj->pages, xen_obj->num_pages, - VM_MAP, PAGE_KERNEL); + vaddr = vmap(xen_obj->pages, xen_obj->num_pages, + VM_MAP, PAGE_KERNEL); + if (!vaddr) + return -ENOMEM; + dma_buf_map_set_vaddr(map, vaddr); + + return 0; } void xen_drm_front_gem_prime_vunmap(struct drm_gem_object *gem_obj, - void *vaddr) + struct dma_buf_map *map) { - vunmap(vaddr); + vunmap(map->vaddr); } int xen_drm_front_gem_prime_mmap(struct drm_gem_object *gem_obj, diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.h b/drivers/gpu/drm/xen/xen_drm_front_gem.h index a39675fa31b2..a4e67d0a149c 100644 --- a/drivers/gpu/drm/xen/xen_drm_front_gem.h +++ b/drivers/gpu/drm/xen/xen_drm_front_gem.h @@ -12,6 +12,7 @@ #define __XEN_DRM_FRONT_GEM_H struct dma_buf_attachment; +struct dma_buf_map; struct drm_device; struct drm_gem_object; struct file; @@ -34,10 +35,11 @@ void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj); int xen_drm_front_gem_mmap(struct file *filp, struct vm_area_struct *vma); -void *xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj); +int xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj, + struct dma_buf_map *map); void xen_drm_front_gem_prime_vunmap(struct drm_gem_object *gem_obj, - void *vaddr); + struct dma_buf_map *map); int xen_drm_front_gem_prime_mmap(struct drm_gem_object *gem_obj, struct vm_area_struct *vma); diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c index f3ffc3703a0e..0c1c50271a88 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c +++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c @@ -76,7 +76,7 @@ static const struct drm_mode_config_funcs zynqmp_dpsub_mode_config_funcs = { DEFINE_DRM_GEM_CMA_FOPS(zynqmp_dpsub_drm_fops); -static struct drm_driver zynqmp_dpsub_drm_driver = { +static const struct drm_driver zynqmp_dpsub_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, diff --git a/drivers/gpu/drm/zte/zx_drm_drv.c b/drivers/gpu/drm/zte/zx_drm_drv.c index 31014a451f8b..5506336594e2 100644 --- a/drivers/gpu/drm/zte/zx_drm_drv.c +++ b/drivers/gpu/drm/zte/zx_drm_drv.c @@ -34,7 +34,7 @@ static const struct drm_mode_config_funcs zx_drm_mode_config_funcs = { DEFINE_DRM_GEM_CMA_FOPS(zx_drm_fops); -static struct drm_driver zx_drm_driver = { +static const struct drm_driver zx_drm_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, DRM_GEM_CMA_DRIVER_OPS, .fops = &zx_drm_fops, diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c index e201f62d62c0..347fb962b6c9 100644 --- a/drivers/gpu/host1x/bus.c +++ b/drivers/gpu/host1x/bus.c @@ -32,6 +32,7 @@ struct host1x_subdev { /** * host1x_subdev_add() - add a new subdevice with an associated device node * @device: host1x device to add the subdevice to + * @driver: host1x driver containing the subdevices * @np: device node */ static int host1x_subdev_add(struct host1x_device *device, diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index 9e5c78e00995..06f5805de2de 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -1380,11 +1380,6 @@ fb_mmap(struct file *file, struct vm_area_struct * vma) mutex_unlock(&info->mm_lock); vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); - /* - * The framebuffer needs to be accessed decrypted, be sure - * SME protection is removed - */ - vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); fb_pgprotect(file, vma, start); return vm_iomap_memory(vma, start, len); diff --git a/drivers/video/fbdev/riva/riva_hw.c b/drivers/video/fbdev/riva/riva_hw.c index 4168ac464565..bcf9c4b4de31 100644 --- a/drivers/video/fbdev/riva/riva_hw.c +++ b/drivers/video/fbdev/riva/riva_hw.c @@ -247,14 +247,13 @@ static int nv3_iterate(nv3_fifo_info *res_info, nv3_sim_state * state, nv3_arb_i int mburst_size = 32; int mmisses, gmisses, vmisses; int misses; - int vlwm, glwm, mlwm; + int vlwm, glwm; int last, next, cur; int max_gfsize ; long ns; vlwm = 0; glwm = 0; - mlwm = 0; vfsize = 0; gfsize = 0; cur = ainfo->cur; @@ -656,13 +655,12 @@ static void nv4CalcArbitration nv4_sim_state *arb ) { - int data, pagemiss, cas,width, video_enable, color_key_enable, bpp, align; + int data, pagemiss, cas,width, video_enable, bpp; int nvclks, mclks, pclks, vpagemiss, crtpagemiss, vbs; int found, mclk_extra, mclk_loop, cbs, m1, p1; int mclk_freq, pclk_freq, nvclk_freq, mp_enable; int us_m, us_n, us_p, video_drain_rate, crtc_drain_rate; int vpm_us, us_video, vlwm, video_fill_us, cpm_us, us_crt,clwm; - int craw, vraw; fifo->valid = 1; pclk_freq = arb->pclk_khz; @@ -672,9 +670,7 @@ static void nv4CalcArbitration cas = arb->mem_latency; width = arb->memory_width >> 6; video_enable = arb->enable_video; - color_key_enable = arb->gr_during_vid; bpp = arb->pix_bpp; - align = arb->mem_aligned; mp_enable = arb->enable_mp; clwm = 0; vlwm = 0; @@ -782,8 +778,6 @@ static void nv4CalcArbitration mclk_extra--; } } - craw = clwm; - vraw = vlwm; if (clwm < 384) clwm = 384; if (vlwm < 128) vlwm = 128; data = (int)(clwm); @@ -842,7 +836,7 @@ static void nv10CalcArbitration nv10_sim_state *arb ) { - int data, pagemiss, cas,width, video_enable, color_key_enable, bpp, align; + int data, pagemiss, cas,width, video_enable, bpp; int nvclks, mclks, pclks, vpagemiss, crtpagemiss, vbs; int nvclk_fill, us_extra; int found, mclk_extra, mclk_loop, cbs, m1; @@ -863,9 +857,7 @@ static void nv10CalcArbitration cas = arb->mem_latency; width = arb->memory_width/64; video_enable = arb->enable_video; - color_key_enable = arb->gr_during_vid; bpp = arb->pix_bpp; - align = arb->mem_aligned; mp_enable = arb->enable_mp; clwm = 0; vlwm = 1024; diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h index 7aaea665bfc2..f07f2fb02e75 100644 --- a/include/drm/drm_client.h +++ b/include/drm/drm_client.h @@ -3,6 +3,7 @@ #ifndef _DRM_CLIENT_H_ #define _DRM_CLIENT_H_ +#include <linux/dma-buf-map.h> #include <linux/lockdep.h> #include <linux/mutex.h> #include <linux/types.h> @@ -141,9 +142,9 @@ struct drm_client_buffer { struct drm_gem_object *gem; /** - * @vaddr: Virtual address for the buffer + * @map: Virtual address for the buffer */ - void *vaddr; + struct dma_buf_map map; /** * @fb: DRM framebuffer @@ -155,7 +156,7 @@ struct drm_client_buffer * drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format); void drm_client_framebuffer_delete(struct drm_client_buffer *buffer); int drm_client_framebuffer_flush(struct drm_client_buffer *buffer, struct drm_rect *rect); -void *drm_client_buffer_vmap(struct drm_client_buffer *buffer); +int drm_client_buffer_vmap(struct drm_client_buffer *buffer, struct dma_buf_map *map); void drm_client_buffer_vunmap(struct drm_client_buffer *buffer); int drm_client_modeset_create(struct drm_client_dev *client); diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h index f4f68e7a9149..2c361964aee7 100644 --- a/include/drm/drm_device.h +++ b/include/drm/drm_device.h @@ -83,7 +83,11 @@ struct drm_device { } managed; /** @driver: DRM driver managing the device */ +#ifdef CONFIG_DRM_LEGACY struct drm_driver *driver; +#else + const struct drm_driver *driver; +#endif /** * @dev_private: diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h index 023076255a7f..02787319246a 100644 --- a/include/drm/drm_drv.h +++ b/include/drm/drm_drv.h @@ -495,6 +495,7 @@ struct drm_driver { */ const struct file_operations *fops; +#ifdef CONFIG_DRM_LEGACY /* Everything below here is for legacy driver, never use! */ /* private: */ @@ -509,9 +510,11 @@ struct drm_driver { int (*enable_vblank)(struct drm_device *dev, unsigned int pipe); void (*disable_vblank)(struct drm_device *dev, unsigned int pipe); int dev_priv_size; +#endif }; -void *__devm_drm_dev_alloc(struct device *parent, struct drm_driver *driver, +void *__devm_drm_dev_alloc(struct device *parent, + const struct drm_driver *driver, size_t size, size_t offset); /** @@ -544,7 +547,7 @@ void *__devm_drm_dev_alloc(struct device *parent, struct drm_driver *driver, ((type *) __devm_drm_dev_alloc(parent, driver, sizeof(type), \ offsetof(type, member))) -struct drm_device *drm_dev_alloc(struct drm_driver *driver, +struct drm_device *drm_dev_alloc(const struct drm_driver *driver, struct device *parent); int drm_dev_register(struct drm_device *dev, unsigned long flags); void drm_dev_unregister(struct drm_device *dev); diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h index a60f5f1555ac..5dfa5f7a80a7 100644 --- a/include/drm/drm_encoder.h +++ b/include/drm/drm_encoder.h @@ -89,7 +89,6 @@ struct drm_encoder_funcs { * @head: list management * @base: base KMS object * @name: human readable name, can be overwritten by the driver - * @bridge: bridge associated to the encoder * @funcs: control functions * @helper_private: mid-layer private data * diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h index c38dd35da00b..5e6daa1c982f 100644 --- a/include/drm/drm_gem.h +++ b/include/drm/drm_gem.h @@ -39,6 +39,7 @@ #include <drm/drm_vma_manager.h> +struct dma_buf_map; struct drm_gem_object; /** @@ -138,7 +139,7 @@ struct drm_gem_object_funcs { * * This callback is optional. */ - void *(*vmap)(struct drm_gem_object *obj); + int (*vmap)(struct drm_gem_object *obj, struct dma_buf_map *map); /** * @vunmap: @@ -148,7 +149,7 @@ struct drm_gem_object_funcs { * * This callback is optional. */ - void (*vunmap)(struct drm_gem_object *obj, void *vaddr); + void (*vunmap)(struct drm_gem_object *obj, struct dma_buf_map *map); /** * @mmap: diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h index 2bfa2502607a..5605c1b8f779 100644 --- a/include/drm/drm_gem_cma_helper.h +++ b/include/drm/drm_gem_cma_helper.h @@ -103,11 +103,7 @@ drm_gem_cma_prime_import_sg_table(struct drm_device *dev, struct sg_table *sgt); int drm_gem_cma_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); -void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj); -void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr); - -struct drm_gem_object * -drm_gem_cma_create_object_default_funcs(struct drm_device *dev, size_t size); +int drm_gem_cma_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map); /** * DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE - CMA GEM driver operations @@ -123,7 +119,6 @@ drm_gem_cma_create_object_default_funcs(struct drm_device *dev, size_t size); * DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE() instead. */ #define DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(dumb_create_func) \ - .gem_create_object = drm_gem_cma_create_object_default_funcs, \ .dumb_create = (dumb_create_func), \ .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \ .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \ @@ -162,7 +157,6 @@ drm_gem_cma_create_object_default_funcs(struct drm_device *dev, size_t size); * DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE() instead. */ #define DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(dumb_create_func) \ - .gem_create_object = drm_gem_cma_create_object_default_funcs, \ .dumb_create = dumb_create_func, \ .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \ .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \ diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h index 5381f0c8cf6f..3449a0353fe0 100644 --- a/include/drm/drm_gem_shmem_helper.h +++ b/include/drm/drm_gem_shmem_helper.h @@ -113,8 +113,8 @@ int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem); void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem); int drm_gem_shmem_pin(struct drm_gem_object *obj); void drm_gem_shmem_unpin(struct drm_gem_object *obj); -void *drm_gem_shmem_vmap(struct drm_gem_object *obj); -void drm_gem_shmem_vunmap(struct drm_gem_object *obj, void *vaddr); +int drm_gem_shmem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map); +void drm_gem_shmem_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map); int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv); diff --git a/include/drm/drm_gem_ttm_helper.h b/include/drm/drm_gem_ttm_helper.h index 118cef76f84f..7c6d874910b8 100644 --- a/include/drm/drm_gem_ttm_helper.h +++ b/include/drm/drm_gem_ttm_helper.h @@ -10,11 +10,17 @@ #include <drm/ttm/ttm_bo_api.h> #include <drm/ttm/ttm_bo_driver.h> +struct dma_buf_map; + #define drm_gem_ttm_of_gem(gem_obj) \ container_of(gem_obj, struct ttm_buffer_object, base) void drm_gem_ttm_print_info(struct drm_printer *p, unsigned int indent, const struct drm_gem_object *gem); +int drm_gem_ttm_vmap(struct drm_gem_object *gem, + struct dma_buf_map *map); +void drm_gem_ttm_vunmap(struct drm_gem_object *gem, + struct dma_buf_map *map); int drm_gem_ttm_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma); diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h index 128f88174d32..c0d28ba0f5c9 100644 --- a/include/drm/drm_gem_vram_helper.h +++ b/include/drm/drm_gem_vram_helper.h @@ -10,6 +10,7 @@ #include <drm/ttm/ttm_bo_api.h> #include <drm/ttm/ttm_bo_driver.h> +#include <linux/dma-buf-map.h> #include <linux/kernel.h> /* for container_of() */ struct drm_mode_create_dumb; @@ -29,9 +30,8 @@ struct vm_area_struct; /** * struct drm_gem_vram_object - GEM object backed by VRAM - * @gem: GEM object * @bo: TTM buffer object - * @kmap: Mapping information for @bo + * @map: Mapping information for @bo * @placement: TTM placement information. Supported placements are \ %TTM_PL_VRAM and %TTM_PL_SYSTEM * @placements: TTM placement information. @@ -50,15 +50,15 @@ struct vm_area_struct; */ struct drm_gem_vram_object { struct ttm_buffer_object bo; - struct ttm_bo_kmap_obj kmap; + struct dma_buf_map map; /** - * @kmap_use_count: + * @vmap_use_count: * * Reference count on the virtual address. * The address are un-mapped when the count reaches zero. */ - unsigned int kmap_use_count; + unsigned int vmap_use_count; /* Supported placements are %TTM_PL_VRAM and %TTM_PL_SYSTEM */ struct ttm_placement placement; @@ -97,8 +97,8 @@ u64 drm_gem_vram_mmap_offset(struct drm_gem_vram_object *gbo); s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo); int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag); int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo); -void *drm_gem_vram_vmap(struct drm_gem_vram_object *gbo); -void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, void *vaddr); +int drm_gem_vram_vmap(struct drm_gem_vram_object *gbo, struct dma_buf_map *map); +void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, struct dma_buf_map *map); int drm_gem_vram_fill_create_dumb(struct drm_file *file, struct drm_device *dev, diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h index 5ffbb4ed5b35..ab424ddd7665 100644 --- a/include/drm/drm_mode_config.h +++ b/include/drm/drm_mode_config.h @@ -878,18 +878,6 @@ struct drm_mode_config { bool prefer_shadow_fbdev; /** - * @fbdev_use_iomem: - * - * Set to true if framebuffer reside in iomem. - * When set to true memcpy_toio() is used when copying the framebuffer in - * drm_fb_helper.drm_fb_helper_dirty_blit_real(). - * - * FIXME: This should be replaced with a per-mapping is_iomem - * flag (like ttm does), and then used everywhere in fbdev code. - */ - bool fbdev_use_iomem; - - /** * @quirk_addfb_prefer_xbgr_30bpp: * * Special hack for legacy ADDFB to keep nouveau userspace happy. Should diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 5ddad88ae6ed..2564e66e67d7 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -48,6 +48,8 @@ struct ttm_bo_global; struct ttm_bo_device; +struct dma_buf_map; + struct drm_mm_node; struct ttm_placement; @@ -496,6 +498,32 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page, void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map); /** + * ttm_bo_vmap + * + * @bo: The buffer object. + * @map: pointer to a struct dma_buf_map representing the map. + * + * Sets up a kernel virtual mapping, using ioremap or vmap to the + * data in the buffer object. The parameter @map returns the virtual + * address as struct dma_buf_map. Unmap the buffer with ttm_bo_vunmap(). + * + * Returns + * -ENOMEM: Out of memory. + * -EINVAL: Invalid range. + */ +int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map); + +/** + * ttm_bo_vunmap + * + * @bo: The buffer object. + * @map: Object describing the map to unmap. + * + * Unmaps a kernel map set up by ttm_bo_vmap(). + */ +void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct dma_buf_map *map); + +/** * ttm_bo_mmap_obj - mmap memory backed by a ttm buffer object. * * @vma: vma as input from the fbdev mmap method. diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index da8208f43378..f02f7cf9ae90 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -121,6 +121,8 @@ struct ttm_bo_driver { * Return the bo flags for a buffer which is not mapped to the hardware. * These will be placed in proposed_flags so that when the move is * finished, they'll end up in bo->mem.flags + * This should not cause multihop evictions, and the core will warn + * if one is proposed. */ void (*evict_flags)(struct ttm_buffer_object *bo, @@ -134,12 +136,15 @@ struct ttm_bo_driver { * the graphics address space * @ctx: context for this move with parameters * @new_mem: the new memory region receiving the buffer + @ @hop: placement for driver directed intermediate hop * * Move a buffer between two memory regions. + * Returns errno -EMULTIHOP if driver requests a hop */ int (*move)(struct ttm_buffer_object *bo, bool evict, struct ttm_operation_ctx *ctx, - struct ttm_resource *new_mem); + struct ttm_resource *new_mem, + struct ttm_place *hop); /** * struct ttm_bo_driver_member verify_access diff --git a/include/linux/dma-buf-map.h b/include/linux/dma-buf-map.h index fd1aba545fdf..583a3a1f9447 100644 --- a/include/linux/dma-buf-map.h +++ b/include/linux/dma-buf-map.h @@ -7,6 +7,7 @@ #define __DMA_BUF_MAP_H__ #include <linux/io.h> +#include <linux/string.h> /** * DOC: overview @@ -32,6 +33,14 @@ * accessing the buffer. Use the returned instance and the helper functions * to access the buffer's memory in the correct way. * + * The type :c:type:`struct dma_buf_map <dma_buf_map>` and its helpers are + * actually independent from the dma-buf infrastructure. When sharing buffers + * among devices, drivers have to know the location of the memory to access + * the buffers in a safe way. :c:type:`struct dma_buf_map <dma_buf_map>` + * solves this problem for dma-buf and its users. If other drivers or + * sub-systems require similar functionality, the type could be generalized + * and moved to a more prominent header file. + * * Open-coding access to :c:type:`struct dma_buf_map <dma_buf_map>` is * considered bad style. Rather then accessing its fields directly, use one * of the provided helper functions, or implement your own. For example, @@ -45,6 +54,20 @@ * * dma_buf_map_set_vaddr(&map. 0xdeadbeaf); * + * To set an address in I/O memory, use dma_buf_map_set_vaddr_iomem(). + * + * .. code-block:: c + * + * dma_buf_map_set_vaddr_iomem(&map. 0xdeadbeaf); + * + * Instances of struct dma_buf_map do not have to be cleaned up, but + * can be cleared to NULL with dma_buf_map_clear(). Cleared mappings + * always refer to system memory. + * + * .. code-block:: c + * + * dma_buf_map_clear(&map); + * * Test if a mapping is valid with either dma_buf_map_is_set() or * dma_buf_map_is_null(). * @@ -67,17 +90,19 @@ * if (dma_buf_map_is_equal(&sys_map, &io_map)) * // always false * - * Instances of struct dma_buf_map do not have to be cleaned up, but - * can be cleared to NULL with dma_buf_map_clear(). Cleared mappings - * always refer to system memory. + * A set up instance of struct dma_buf_map can be used to access or manipulate + * the buffer memory. Depending on the location of the memory, the provided + * helpers will pick the correct operations. Data can be copied into the memory + * with dma_buf_map_memcpy_to(). The address can be manipulated with + * dma_buf_map_incr(). * - * The type :c:type:`struct dma_buf_map <dma_buf_map>` and its helpers are - * actually independent from the dma-buf infrastructure. When sharing buffers - * among devices, drivers have to know the location of the memory to access - * the buffers in a safe way. :c:type:`struct dma_buf_map <dma_buf_map>` - * solves this problem for dma-buf and its users. If other drivers or - * sub-systems require similar functionality, the type could be generalized - * and moved to a more prominent header file. + * .. code-block:: c + * + * const void *src = ...; // source buffer + * size_t len = ...; // length of src + * + * dma_buf_map_memcpy_to(&map, src, len); + * dma_buf_map_incr(&map, len); // go to first byte after the memcpy */ /** @@ -119,6 +144,20 @@ static inline void dma_buf_map_set_vaddr(struct dma_buf_map *map, void *vaddr) } /** + * dma_buf_map_set_vaddr_iomem - Sets a dma-buf mapping structure to an address in I/O memory + * @map: The dma-buf mapping structure + * @vaddr_iomem: An I/O-memory address + * + * Sets the address and the I/O-memory flag. + */ +static inline void dma_buf_map_set_vaddr_iomem(struct dma_buf_map *map, + void __iomem *vaddr_iomem) +{ + map->vaddr_iomem = vaddr_iomem; + map->is_iomem = true; +} + +/** * dma_buf_map_is_equal - Compares two dma-buf mapping structures for equality * @lhs: The dma-buf mapping structure * @rhs: A dma-buf mapping structure to compare with @@ -190,4 +229,38 @@ static inline void dma_buf_map_clear(struct dma_buf_map *map) } } +/** + * dma_buf_map_memcpy_to - Memcpy into dma-buf mapping + * @dst: The dma-buf mapping structure + * @src: The source buffer + * @len: The number of byte in src + * + * Copies data into a dma-buf mapping. The source buffer is in system + * memory. Depending on the buffer's location, the helper picks the correct + * method of accessing the memory. + */ +static inline void dma_buf_map_memcpy_to(struct dma_buf_map *dst, const void *src, size_t len) +{ + if (dst->is_iomem) + memcpy_toio(dst->vaddr_iomem, src, len); + else + memcpy(dst->vaddr, src, len); +} + +/** + * dma_buf_map_incr - Increments the address stored in a dma-buf mapping + * @map: The dma-buf mapping structure + * @incr: The number of bytes to increment + * + * Increments the address stored in a dma-buf mapping. Depending on the + * buffer's location, the correct value will be updated. + */ +static inline void dma_buf_map_incr(struct dma_buf_map *map, size_t incr) +{ + if (map->is_iomem) + map->vaddr_iomem += incr; + else + map->vaddr += incr; +} + #endif /* __DMA_BUF_MAP_H__ */ diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 03875eaed51a..cf72699cb2bc 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -273,7 +273,7 @@ struct dma_buf_ops { /** * struct dma_buf - shared buffer object - * @size: size of the buffer + * @size: size of the buffer; invariant over the lifetime of the buffer. * @file: file pointer used for sharing buffers across, and for refcounting. * @attachments: list of dma_buf_attachment that denotes all devices attached, * protected by dma_resv lock. @@ -405,7 +405,7 @@ struct dma_buf_attachment { * @exp_name: name of the exporter - useful for debugging. * @owner: pointer to exporter module - used for refcounting kernel module * @ops: Attach allocator-defined dma buf ops to the new buffer - * @size: Size of the buffer + * @size: Size of the buffer - invariant over the lifetime of the buffer * @flags: mode flags for the file * @resv: reservation-object, NULL to allocate default one * @priv: Attach private data of allocator to this buffer diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h index 863eda048265..5ad10ab2a577 100644 --- a/include/uapi/drm/drm_mode.h +++ b/include/uapi/drm/drm_mode.h @@ -924,6 +924,12 @@ struct drm_mode_create_blob { * struct drm_mode_destroy_blob - Destroy user blob * @blob_id: blob_id to destroy * Destroy a user-created blob property. + * + * User-space can release blobs as soon as they do not need to refer to them by + * their blob object ID. For instance, if you are using a MODE_ID blob in an + * atomic commit and you will not make another commit re-using the same ID, you + * can destroy the blob as soon as the commit has been issued, without waiting + * for it to complete. */ struct drm_mode_destroy_blob { __u32 blob_id; |