diff options
89 files changed, 2819 insertions, 817 deletions
diff --git a/Documentation/devicetree/bindings/display/panel/toshiba,lt089ac29000.txt b/Documentation/devicetree/bindings/display/panel/toshiba,lt089ac29000.txt new file mode 100644 index 000000000000..4c0caaf246c9 --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/toshiba,lt089ac29000.txt @@ -0,0 +1,8 @@ +Toshiba 8.9" WXGA (1280x768) TFT LCD panel + +Required properties: +- compatible: should be "toshiba,lt089ac29000.txt" +- power-supply: as specified in the base binding + +This binding is compatible with the simple-panel binding, which is specified +in simple-panel.txt in this directory. diff --git a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt index 46df3b78ae9e..013e76b348ba 100644 --- a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt +++ b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt @@ -40,6 +40,7 @@ CEC. It is one end of the pipeline. Required properties: - compatible: value must be one of: + * allwinner,sun4i-a10-hdmi * allwinner,sun5i-a10s-hdmi * allwinner,sun6i-a31-hdmi - reg: base address and size of memory-mapped region @@ -86,9 +87,11 @@ The TCON acts as a timing controller for RGB, LVDS and TV interfaces. Required properties: - compatible: value must be either: + * allwinner,sun4i-a10-tcon * allwinner,sun5i-a13-tcon * allwinner,sun6i-a31-tcon * allwinner,sun6i-a31s-tcon + * allwinner,sun7i-a20-tcon * allwinner,sun8i-a33-tcon * allwinner,sun8i-v3s-tcon - reg: base address and size of memory-mapped region @@ -153,8 +156,10 @@ system. Required properties: - compatible: value must be one of: + * allwinner,sun4i-a10-display-backend * allwinner,sun5i-a13-display-backend * allwinner,sun6i-a31-display-backend + * allwinner,sun7i-a20-display-backend * allwinner,sun8i-a33-display-backend - reg: base address and size of the memory-mapped region. - interrupts: interrupt associated to this IP @@ -185,8 +190,10 @@ deinterlacing and color space conversion. Required properties: - compatible: value must be one of: + * allwinner,sun4i-a10-display-frontend * allwinner,sun5i-a13-display-frontend * allwinner,sun6i-a31-display-frontend + * allwinner,sun7i-a20-display-frontend * allwinner,sun8i-a33-display-frontend - reg: base address and size of the memory-mapped region. - interrupts: interrupt associated to this IP @@ -231,10 +238,12 @@ extra node. Required properties: - compatible: value must be one of: + * allwinner,sun4i-a10-display-engine * allwinner,sun5i-a10s-display-engine * allwinner,sun5i-a13-display-engine * allwinner,sun6i-a31-display-engine * allwinner,sun6i-a31s-display-engine + * allwinner,sun7i-a20-display-engine * allwinner,sun8i-a33-display-engine * allwinner,sun8i-v3s-display-engine diff --git a/Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt b/Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt index 74e1e8add5a1..844e0103fb0d 100644 --- a/Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt +++ b/Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt @@ -3,6 +3,10 @@ NVIDIA Tegra host1x Required properties: - compatible: "nvidia,tegra<chip>-host1x" - reg: Physical base address and length of the controller's registers. + For pre-Tegra186, one entry describing the whole register area. + For Tegra186, one entry for each entry in reg-names: + "vm" - VM region assigned to Linux + "hypervisor" - Hypervisor region (only if Linux acts as hypervisor) - interrupts: The interrupt outputs from the controller. - #address-cells: The number of cells used to represent physical base addresses in the host1x address space. Should be 1. diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst index 92ee2f982572..96f8ec7dbe4e 100644 --- a/Documentation/gpu/todo.rst +++ b/Documentation/gpu/todo.rst @@ -304,6 +304,18 @@ There's a bunch of issues with it: Contact: Daniel Vetter +KMS cleanups +------------ + +Some of these date from the very introduction of KMS in 2008 ... + +- drm_mode_config.crtc_idr is misnamed, since it contains all KMS object. Should + be renamed to drm_mode_config.object_idr. + +- drm_display_mode doesn't need to be derived from drm_mode_object. That's + leftovers from older (never merged into upstream) KMS designs where modes + where set using their ID, including support to add/remove modes. + Better Testing ============== diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index 02831a396419..0ac7aa346c69 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c @@ -1740,15 +1740,3 @@ static void fixup_hide_host_resource_fsl(struct pci_dev *dev) } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl); - -static void fixup_vga(struct pci_dev *pdev) -{ - u16 cmd; - - pci_read_config_word(pdev, PCI_COMMAND, &cmd); - if ((cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) || !vga_default_device()) - vga_set_default_device(pdev); - -} -DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID, - PCI_CLASS_DISPLAY_VGA, 8, fixup_vga); diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c index 2a4d163ac76f..2e065facdce7 100644 --- a/drivers/gpu/drm/armada/armada_crtc.c +++ b/drivers/gpu/drm/armada/armada_crtc.c @@ -298,7 +298,7 @@ static void armada_drm_crtc_finish_fb(struct armada_crtc *dcrtc, if (force) { /* Display is disabled, so just drop the old fb */ - drm_framebuffer_unreference(fb); + drm_framebuffer_put(fb); return; } @@ -321,7 +321,7 @@ static void armada_drm_crtc_finish_fb(struct armada_crtc *dcrtc, * the best. The worst that will happen is the buffer gets * reused before it has finished being displayed. */ - drm_framebuffer_unreference(fb); + drm_framebuffer_put(fb); } static void armada_drm_vblank_off(struct armada_crtc *dcrtc) @@ -577,7 +577,7 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc, unsigned i; bool interlaced; - drm_framebuffer_reference(crtc->primary->fb); + drm_framebuffer_get(crtc->primary->fb); interlaced = !!(adj->flags & DRM_MODE_FLAG_INTERLACE); @@ -718,7 +718,7 @@ static int armada_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, MAX_SCHEDULE_TIMEOUT); /* Take a reference to the new fb as we're using it */ - drm_framebuffer_reference(crtc->primary->fb); + drm_framebuffer_get(crtc->primary->fb); /* Update the base in the CRTC */ armada_drm_crtc_update_regs(dcrtc, regs); @@ -742,7 +742,7 @@ void armada_drm_crtc_plane_disable(struct armada_crtc *dcrtc, * primary plane. */ if (plane->fb) - drm_framebuffer_unreference(plane->fb); + drm_framebuffer_put(plane->fb); /* Power down the Y/U/V FIFOs */ sram_para1 = CFG_PDWN16x66 | CFG_PDWN32x66; @@ -947,13 +947,13 @@ static int armada_drm_crtc_cursor_set(struct drm_crtc *crtc, /* Must be a kernel-mapped object */ if (!obj->addr) { - drm_gem_object_unreference_unlocked(&obj->obj); + drm_gem_object_put_unlocked(&obj->obj); return -EINVAL; } if (obj->obj.size < w * h * 4) { DRM_ERROR("buffer is too small\n"); - drm_gem_object_unreference_unlocked(&obj->obj); + drm_gem_object_put_unlocked(&obj->obj); return -ENOMEM; } } @@ -961,7 +961,7 @@ static int armada_drm_crtc_cursor_set(struct drm_crtc *crtc, if (dcrtc->cursor_obj) { dcrtc->cursor_obj->update = NULL; dcrtc->cursor_obj->update_data = NULL; - drm_gem_object_unreference_unlocked(&dcrtc->cursor_obj->obj); + drm_gem_object_put_unlocked(&dcrtc->cursor_obj->obj); } dcrtc->cursor_obj = obj; dcrtc->cursor_w = w; @@ -997,7 +997,7 @@ static void armada_drm_crtc_destroy(struct drm_crtc *crtc) struct armada_private *priv = crtc->dev->dev_private; if (dcrtc->cursor_obj) - drm_gem_object_unreference_unlocked(&dcrtc->cursor_obj->obj); + drm_gem_object_put_unlocked(&dcrtc->cursor_obj->obj); priv->dcrtc[dcrtc->num] = NULL; drm_crtc_cleanup(&dcrtc->crtc); @@ -1045,12 +1045,12 @@ static int armada_drm_crtc_page_flip(struct drm_crtc *crtc, * Ensure that we hold a reference on the new framebuffer. * This has to match the behaviour in mode_set. */ - drm_framebuffer_reference(fb); + drm_framebuffer_get(fb); ret = armada_drm_crtc_queue_frame_work(dcrtc, work); if (ret) { /* Undo our reference above */ - drm_framebuffer_unreference(fb); + drm_framebuffer_put(fb); kfree(work); return ret; } diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c index 2d45103d06cb..e857b88a9799 100644 --- a/drivers/gpu/drm/armada/armada_drv.c +++ b/drivers/gpu/drm/armada/armada_drv.c @@ -25,7 +25,7 @@ static void armada_drm_unref_work(struct work_struct *work) struct drm_framebuffer *fb; while (kfifo_get(&priv->fb_unref, &fb)) - drm_framebuffer_unreference(fb); + drm_framebuffer_put(fb); } /* Must be called with dev->event_lock held */ diff --git a/drivers/gpu/drm/armada/armada_fb.c b/drivers/gpu/drm/armada/armada_fb.c index b9e1637cc4cf..a38d5a0892a9 100644 --- a/drivers/gpu/drm/armada/armada_fb.c +++ b/drivers/gpu/drm/armada/armada_fb.c @@ -17,7 +17,7 @@ static void armada_fb_destroy(struct drm_framebuffer *fb) struct armada_framebuffer *dfb = drm_fb_to_armada_fb(fb); drm_framebuffer_cleanup(&dfb->fb); - drm_gem_object_unreference_unlocked(&dfb->obj->obj); + drm_gem_object_put_unlocked(&dfb->obj->obj); kfree(dfb); } @@ -94,7 +94,7 @@ struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev, * the above call, but the caller will drop their reference * to it. Hence we need to take our own reference. */ - drm_gem_object_reference(&obj->obj); + drm_gem_object_get(&obj->obj); return dfb; } @@ -143,12 +143,12 @@ static struct drm_framebuffer *armada_fb_create(struct drm_device *dev, goto err; } - drm_gem_object_unreference_unlocked(&obj->obj); + drm_gem_object_put_unlocked(&obj->obj); return &dfb->fb; err_unref: - drm_gem_object_unreference_unlocked(&obj->obj); + drm_gem_object_put_unlocked(&obj->obj); err: DRM_ERROR("failed to initialize framebuffer: %d\n", ret); return ERR_PTR(ret); diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c index 10e3fd87a83b..a2ce83f84800 100644 --- a/drivers/gpu/drm/armada/armada_fbdev.c +++ b/drivers/gpu/drm/armada/armada_fbdev.c @@ -51,13 +51,13 @@ static int armada_fb_create(struct drm_fb_helper *fbh, ret = armada_gem_linear_back(dev, obj); if (ret) { - drm_gem_object_unreference_unlocked(&obj->obj); + drm_gem_object_put_unlocked(&obj->obj); return ret; } ptr = armada_gem_map_object(dev, obj); if (!ptr) { - drm_gem_object_unreference_unlocked(&obj->obj); + drm_gem_object_put_unlocked(&obj->obj); return -ENOMEM; } @@ -67,7 +67,7 @@ static int armada_fb_create(struct drm_fb_helper *fbh, * A reference is now held by the framebuffer object if * successful, otherwise this drops the ref for the error path. */ - drm_gem_object_unreference_unlocked(&obj->obj); + drm_gem_object_put_unlocked(&obj->obj); if (IS_ERR(dfb)) return PTR_ERR(dfb); diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c index 7837e6adb16f..a97f509743a5 100644 --- a/drivers/gpu/drm/armada/armada_gem.c +++ b/drivers/gpu/drm/armada/armada_gem.c @@ -265,7 +265,7 @@ int armada_gem_dumb_create(struct drm_file *file, struct drm_device *dev, /* drop reference from allocate - handle holds it now */ DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle); err: - drm_gem_object_unreference_unlocked(&dobj->obj); + drm_gem_object_put_unlocked(&dobj->obj); return ret; } @@ -297,7 +297,7 @@ int armada_gem_create_ioctl(struct drm_device *dev, void *data, /* drop reference from allocate - handle holds it now */ DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle); err: - drm_gem_object_unreference_unlocked(&dobj->obj); + drm_gem_object_put_unlocked(&dobj->obj); return ret; } @@ -314,13 +314,13 @@ int armada_gem_mmap_ioctl(struct drm_device *dev, void *data, return -ENOENT; if (!dobj->obj.filp) { - drm_gem_object_unreference_unlocked(&dobj->obj); + drm_gem_object_put_unlocked(&dobj->obj); return -EINVAL; } addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE, MAP_SHARED, args->offset); - drm_gem_object_unreference_unlocked(&dobj->obj); + drm_gem_object_put_unlocked(&dobj->obj); if (IS_ERR_VALUE(addr)) return addr; @@ -375,7 +375,7 @@ int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data, } unref: - drm_gem_object_unreference_unlocked(&dobj->obj); + drm_gem_object_put_unlocked(&dobj->obj); return ret; } @@ -524,7 +524,7 @@ armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf) * Importing our own dmabuf(s) increases the * refcount on the gem object itself. */ - drm_gem_object_reference(obj); + drm_gem_object_get(obj); return obj; } } diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c index edc44910d79f..b411b608821a 100644 --- a/drivers/gpu/drm/armada/armada_overlay.c +++ b/drivers/gpu/drm/armada/armada_overlay.c @@ -177,7 +177,7 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, * Take a reference on the new framebuffer - we want to * hold on to it while the hardware is displaying it. */ - drm_framebuffer_reference(fb); + drm_framebuffer_get(fb); if (plane->fb) armada_ovl_retire_fb(dplane, plane->fb); @@ -278,7 +278,7 @@ static int armada_ovl_plane_disable(struct drm_plane *plane, fb = xchg(&dplane->old_fb, NULL); if (fb) - drm_framebuffer_unreference(fb); + drm_framebuffer_put(fb); return 0; } diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c index 31ca883bda83..0e14f1572d05 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c @@ -607,10 +607,10 @@ static int adv7511_get_modes(struct adv7511 *adv7511, adv7511_set_config_csc(adv7511, connector, adv7511->rgb, drm_detect_hdmi_monitor(edid)); - kfree(edid); - cec_s_phys_addr_from_edid(adv7511->cec_adap, edid); + kfree(edid); + return count; } diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 562494873ca5..c2da5585e201 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -1812,7 +1812,7 @@ int drm_atomic_debugfs_init(struct drm_minor *minor) */ static struct drm_pending_vblank_event *create_vblank_event( - struct drm_device *dev, uint64_t user_data) + struct drm_crtc *crtc, uint64_t user_data) { struct drm_pending_vblank_event *e = NULL; @@ -1822,7 +1822,8 @@ static struct drm_pending_vblank_event *create_vblank_event( e->event.base.type = DRM_EVENT_FLIP_COMPLETE; e->event.base.length = sizeof(e->event); - e->event.user_data = user_data; + e->event.vbl.crtc_id = crtc->base.id; + e->event.vbl.user_data = user_data; return e; } @@ -2076,7 +2077,7 @@ static int prepare_crtc_signaling(struct drm_device *dev, if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT || fence_ptr) { struct drm_pending_vblank_event *e; - e = create_vblank_event(dev, arg->user_data); + e = create_vblank_event(crtc, arg->user_data); if (!e) return -ENOMEM; diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index ae56d91433ff..71d712f1b56a 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -860,6 +860,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) { const struct drm_crtc_helper_funcs *funcs; + int ret; /* Shut down everything that needs a full modeset. */ if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) @@ -883,6 +884,14 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) funcs->disable(crtc); else funcs->dpms(crtc, DRM_MODE_DPMS_OFF); + + if (!(dev->irq_enabled && dev->num_crtcs)) + continue; + + ret = drm_crtc_vblank_get(crtc); + WARN_ONCE(ret != -EINVAL, "driver forgot to call drm_crtc_vblank_off()\n"); + if (ret == 0) + drm_crtc_vblank_put(crtc); } } @@ -1772,16 +1781,16 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state, } for_each_oldnew_connector_in_state(state, conn, old_conn_state, new_conn_state, i) { - /* commit tracked through new_crtc_state->commit, no need to do it explicitly */ - if (new_conn_state->crtc) - continue; - /* Userspace is not allowed to get ahead of the previous * commit with nonblocking ones. */ if (nonblock && old_conn_state->commit && !try_wait_for_completion(&old_conn_state->commit->flip_done)) return -EBUSY; + /* commit tracked through new_crtc_state->commit, no need to do it explicitly */ + if (new_conn_state->crtc) + continue; + commit = crtc_or_fake_commit(state, old_conn_state->crtc); if (!commit) return -ENOMEM; @@ -1790,18 +1799,17 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state, } for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { - /* - * Unlike connectors, always track planes explicitly for - * async pageflip support. - */ - /* Userspace is not allowed to get ahead of the previous * commit with nonblocking ones. */ if (nonblock && old_plane_state->commit && !try_wait_for_completion(&old_plane_state->commit->flip_done)) return -EBUSY; - commit = crtc_or_fake_commit(state, old_plane_state->crtc); + /* + * Unlike connectors, always track planes explicitly for + * async pageflip support. + */ + commit = crtc_or_fake_commit(state, new_plane_state->crtc ?: old_plane_state->crtc); if (!commit) return -ENOMEM; diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c index 373e33f22be4..020e7668dfab 100644 --- a/drivers/gpu/drm/drm_gem_cma_helper.c +++ b/drivers/gpu/drm/drm_gem_cma_helper.c @@ -112,7 +112,7 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm, cma_obj->vaddr = dma_alloc_wc(drm->dev, size, &cma_obj->paddr, GFP_KERNEL | __GFP_NOWARN); if (!cma_obj->vaddr) { - dev_err(drm->dev, "failed to allocate buffer with size %zu\n", + dev_dbg(drm->dev, "failed to allocate buffer with size %zu\n", size); ret = -ENOMEM; goto error; diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index edd921adcf33..c9d5a6cd4d41 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -70,6 +70,12 @@ int drm_legacy_modeset_ctl_ioctl(struct drm_device *dev, void *data, int drm_legacy_irq_control(struct drm_device *dev, void *data, struct drm_file *file_priv); +int drm_crtc_get_sequence_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp); + +int drm_crtc_queue_sequence_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp); + /* drm_auth.c */ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv); diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index a78f03155466..9c435a4c0c82 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -663,6 +663,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_SIGNAL, drm_syncobj_signal_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF(DRM_IOCTL_CRTC_GET_SEQUENCE, drm_crtc_get_sequence_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_CRTC_QUEUE_SEQUENCE, drm_crtc_queue_sequence_ioctl, DRM_UNLOCKED), }; #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c index 8090e50607fa..8d9824804b0c 100644 --- a/drivers/gpu/drm/drm_plane.c +++ b/drivers/gpu/drm/drm_plane.c @@ -1025,7 +1025,7 @@ retry: } e->event.base.type = DRM_EVENT_FLIP_COMPLETE; e->event.base.length = sizeof(e->event); - e->event.user_data = page_flip->user_data; + e->event.vbl.user_data = page_flip->user_data; ret = drm_event_reserve_init(dev, file_priv, &e->base, &e->event.base); if (ret) { kfree(e); diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c index 3af6c20ba03b..13722c373a6a 100644 --- a/drivers/gpu/drm/drm_vblank.c +++ b/drivers/gpu/drm/drm_vblank.c @@ -251,7 +251,7 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe, } DRM_DEBUG_VBL("updating vblank count on crtc %u:" - " current=%u, diff=%u, hw=%u hw_last=%u\n", + " current=%llu, diff=%u, hw=%u hw_last=%u\n", pipe, vblank->count, diff, cur_vblank, vblank->last); if (diff == 0) { @@ -740,17 +740,31 @@ drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe, * Returns: * The software vblank counter. */ -u32 drm_crtc_vblank_count(struct drm_crtc *crtc) +u64 drm_crtc_vblank_count(struct drm_crtc *crtc) { return drm_vblank_count(crtc->dev, drm_crtc_index(crtc)); } EXPORT_SYMBOL(drm_crtc_vblank_count); -static u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe, +/** + * drm_vblank_count_and_time - retrieve "cooked" vblank counter value and the + * system timestamp corresponding to that vblank counter value. + * @dev: DRM device + * @pipe: index of CRTC whose counter to retrieve + * @vblanktime: Pointer to ktime_t to receive the vblank timestamp. + * + * Fetches the "cooked" vblank count value that represents the number of + * vblank events since the system was booted, including lost events due to + * modesetting activity. Returns corresponding system timestamp of the time + * of the vblank interval that corresponds to the current vblank counter value. + * + * This is the legacy version of drm_crtc_vblank_count_and_time(). + */ +static u64 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe, ktime_t *vblanktime) { struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; - u32 vblank_count; + u64 vblank_count; unsigned int seq; if (WARN_ON(pipe >= dev->num_crtcs)) { @@ -778,7 +792,7 @@ static u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe, * modesetting activity. Returns corresponding system timestamp of the time * of the vblank interval that corresponds to the current vblank counter value. */ -u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc, +u64 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc, ktime_t *vblanktime) { return drm_vblank_count_and_time(crtc->dev, drm_crtc_index(crtc), @@ -788,22 +802,30 @@ EXPORT_SYMBOL(drm_crtc_vblank_count_and_time); static void send_vblank_event(struct drm_device *dev, struct drm_pending_vblank_event *e, - unsigned long seq, ktime_t now) + u64 seq, ktime_t now) { - struct timespec64 tv = ktime_to_timespec64(now); - - e->event.sequence = seq; - /* - * e->event is a user space structure, with hardcoded unsigned - * 32-bit seconds/microseconds. This is safe as we always use - * monotonic timestamps since linux-4.15 - */ - e->event.tv_sec = tv.tv_sec; - e->event.tv_usec = tv.tv_nsec / 1000; - - trace_drm_vblank_event_delivered(e->base.file_priv, e->pipe, - e->event.sequence); + struct timespec64 tv; + switch (e->event.base.type) { + case DRM_EVENT_VBLANK: + case DRM_EVENT_FLIP_COMPLETE: + tv = ktime_to_timespec64(now); + e->event.vbl.sequence = seq; + /* + * e->event is a user space structure, with hardcoded unsigned + * 32-bit seconds/microseconds. This is safe as we always use + * monotonic timestamps since linux-4.15 + */ + e->event.vbl.tv_sec = tv.tv_sec; + e->event.vbl.tv_usec = tv.tv_nsec / 1000; + break; + case DRM_EVENT_CRTC_SEQUENCE: + if (seq) + e->event.seq.sequence = seq; + e->event.seq.time_ns = ktime_to_ns(now); + break; + } + trace_drm_vblank_event_delivered(e->base.file_priv, e->pipe, seq); drm_send_event_locked(dev, &e->base); } @@ -854,8 +876,7 @@ void drm_crtc_arm_vblank_event(struct drm_crtc *crtc, assert_spin_locked(&dev->event_lock); e->pipe = pipe; - e->event.sequence = drm_crtc_accurate_vblank_count(crtc) + 1; - e->event.crtc_id = crtc->base.id; + e->sequence = drm_crtc_accurate_vblank_count(crtc) + 1; list_add_tail(&e->base.link, &dev->vblank_event_list); } EXPORT_SYMBOL(drm_crtc_arm_vblank_event); @@ -875,7 +896,8 @@ void drm_crtc_send_vblank_event(struct drm_crtc *crtc, struct drm_pending_vblank_event *e) { struct drm_device *dev = crtc->dev; - unsigned int seq, pipe = drm_crtc_index(crtc); + u64 seq; + unsigned int pipe = drm_crtc_index(crtc); ktime_t now; if (dev->num_crtcs > 0) { @@ -886,7 +908,6 @@ void drm_crtc_send_vblank_event(struct drm_crtc *crtc, now = ktime_get(); } e->pipe = pipe; - e->event.crtc_id = crtc->base.id; send_vblank_event(dev, e, seq, now); } EXPORT_SYMBOL(drm_crtc_send_vblank_event); @@ -1088,7 +1109,7 @@ void drm_crtc_vblank_off(struct drm_crtc *crtc) ktime_t now; unsigned long irqflags; - unsigned int seq; + u64 seq; if (WARN_ON(pipe >= dev->num_crtcs)) return; @@ -1123,8 +1144,8 @@ void drm_crtc_vblank_off(struct drm_crtc *crtc) if (e->pipe != pipe) continue; DRM_DEBUG("Sending premature vblank event on disable: " - "wanted %u, current %u\n", - e->event.sequence, seq); + "wanted %llu, current %llu\n", + e->sequence, seq); list_del(&e->base.link); drm_vblank_put(dev, pipe); send_vblank_event(dev, e, seq, now); @@ -1296,12 +1317,13 @@ int drm_legacy_modeset_ctl_ioctl(struct drm_device *dev, void *data, return 0; } -static inline bool vblank_passed(u32 seq, u32 ref) +static inline bool vblank_passed(u64 seq, u64 ref) { return (seq - ref) <= (1 << 23); } static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe, + u64 req_seq, union drm_wait_vblank *vblwait, struct drm_file *file_priv) { @@ -1309,7 +1331,7 @@ static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe, struct drm_pending_vblank_event *e; ktime_t now; unsigned long flags; - unsigned int seq; + u64 seq; int ret; e = kzalloc(sizeof(*e), GFP_KERNEL); @@ -1320,8 +1342,14 @@ static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe, e->pipe = pipe; e->event.base.type = DRM_EVENT_VBLANK; - e->event.base.length = sizeof(e->event); - e->event.user_data = vblwait->request.signal; + e->event.base.length = sizeof(e->event.vbl); + e->event.vbl.user_data = vblwait->request.signal; + e->event.vbl.crtc_id = 0; + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe); + if (crtc) + e->event.vbl.crtc_id = crtc->base.id; + } spin_lock_irqsave(&dev->event_lock, flags); @@ -1344,21 +1372,20 @@ static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe, seq = drm_vblank_count_and_time(dev, pipe, &now); - DRM_DEBUG("event on vblank count %u, current %u, crtc %u\n", - vblwait->request.sequence, seq, pipe); + DRM_DEBUG("event on vblank count %llu, current %llu, crtc %u\n", + req_seq, seq, pipe); - trace_drm_vblank_event_queued(file_priv, pipe, - vblwait->request.sequence); + trace_drm_vblank_event_queued(file_priv, pipe, req_seq); - e->event.sequence = vblwait->request.sequence; - if (vblank_passed(seq, vblwait->request.sequence)) { + e->sequence = req_seq; + if (vblank_passed(seq, req_seq)) { drm_vblank_put(dev, pipe); send_vblank_event(dev, e, seq, now); vblwait->reply.sequence = seq; } else { /* drm_handle_vblank_events will call drm_vblank_put */ list_add_tail(&e->base.link, &dev->vblank_event_list); - vblwait->reply.sequence = vblwait->request.sequence; + vblwait->reply.sequence = req_seq; } spin_unlock_irqrestore(&dev->event_lock, flags); @@ -1384,6 +1411,22 @@ static bool drm_wait_vblank_is_query(union drm_wait_vblank *vblwait) _DRM_VBLANK_NEXTONMISS)); } +/* + * Widen a 32-bit param to 64-bits. + * + * \param narrow 32-bit value (missing upper 32 bits) + * \param near 64-bit value that should be 'close' to near + * + * This function returns a 64-bit value using the lower 32-bits from + * 'narrow' and constructing the upper 32-bits so that the result is + * as close as possible to 'near'. + */ + +static u64 widen_32_to_64(u32 narrow, u64 near) +{ + return near + (s32) (narrow - near); +} + static void drm_wait_vblank_reply(struct drm_device *dev, unsigned int pipe, struct drm_wait_vblank_reply *reply) { @@ -1407,7 +1450,8 @@ int drm_wait_vblank_ioctl(struct drm_device *dev, void *data, struct drm_vblank_crtc *vblank; union drm_wait_vblank *vblwait = data; int ret; - unsigned int flags, seq, pipe, high_pipe; + u64 req_seq, seq; + unsigned int flags, pipe, high_pipe; if (!dev->irq_enabled) return -EINVAL; @@ -1455,9 +1499,12 @@ int drm_wait_vblank_ioctl(struct drm_device *dev, void *data, switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) { case _DRM_VBLANK_RELATIVE: - vblwait->request.sequence += seq; + req_seq = seq + vblwait->request.sequence; + vblwait->request.sequence = req_seq; vblwait->request.type &= ~_DRM_VBLANK_RELATIVE; + break; case _DRM_VBLANK_ABSOLUTE: + req_seq = widen_32_to_64(vblwait->request.sequence, seq); break; default: ret = -EINVAL; @@ -1465,22 +1512,25 @@ int drm_wait_vblank_ioctl(struct drm_device *dev, void *data, } if ((flags & _DRM_VBLANK_NEXTONMISS) && - vblank_passed(seq, vblwait->request.sequence)) - vblwait->request.sequence = seq + 1; + vblank_passed(seq, req_seq)) { + req_seq = seq + 1; + vblwait->request.type &= ~_DRM_VBLANK_NEXTONMISS; + vblwait->request.sequence = req_seq; + } if (flags & _DRM_VBLANK_EVENT) { /* must hold on to the vblank ref until the event fires * drm_vblank_put will be called asynchronously */ - return drm_queue_vblank_event(dev, pipe, vblwait, file_priv); + return drm_queue_vblank_event(dev, pipe, req_seq, vblwait, file_priv); } - if (vblwait->request.sequence != seq) { - DRM_DEBUG("waiting on vblank count %u, crtc %u\n", - vblwait->request.sequence, pipe); + if (req_seq != seq) { + DRM_DEBUG("waiting on vblank count %llu, crtc %u\n", + req_seq, pipe); DRM_WAIT_ON(ret, vblank->queue, 3 * HZ, vblank_passed(drm_vblank_count(dev, pipe), - vblwait->request.sequence) || + req_seq) || !READ_ONCE(vblank->enabled)); } @@ -1502,7 +1552,7 @@ static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe) { struct drm_pending_vblank_event *e, *t; ktime_t now; - unsigned int seq; + u64 seq; assert_spin_locked(&dev->event_lock); @@ -1511,11 +1561,11 @@ static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe) list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) { if (e->pipe != pipe) continue; - if (!vblank_passed(seq, e->event.sequence)) + if (!vblank_passed(seq, e->sequence)) continue; - DRM_DEBUG("vblank event on %u, current %u\n", - e->event.sequence, seq); + DRM_DEBUG("vblank event on %llu, current %llu\n", + e->sequence, seq); list_del(&e->base.link); drm_vblank_put(dev, pipe); @@ -1605,3 +1655,166 @@ bool drm_crtc_handle_vblank(struct drm_crtc *crtc) return drm_handle_vblank(crtc->dev, drm_crtc_index(crtc)); } EXPORT_SYMBOL(drm_crtc_handle_vblank); + +/* + * Get crtc VBLANK count. + * + * \param dev DRM device + * \param data user arguement, pointing to a drm_crtc_get_sequence structure. + * \param file_priv drm file private for the user's open file descriptor + */ + +int drm_crtc_get_sequence_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_crtc *crtc; + struct drm_vblank_crtc *vblank; + int pipe; + struct drm_crtc_get_sequence *get_seq = data; + ktime_t now; + bool vblank_enabled; + int ret; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + + if (!dev->irq_enabled) + return -EINVAL; + + crtc = drm_crtc_find(dev, file_priv, get_seq->crtc_id); + if (!crtc) + return -ENOENT; + + pipe = drm_crtc_index(crtc); + + vblank = &dev->vblank[pipe]; + vblank_enabled = dev->vblank_disable_immediate && READ_ONCE(vblank->enabled); + + if (!vblank_enabled) { + ret = drm_crtc_vblank_get(crtc); + if (ret) { + DRM_DEBUG("crtc %d failed to acquire vblank counter, %d\n", pipe, ret); + return ret; + } + } + drm_modeset_lock(&crtc->mutex, NULL); + if (crtc->state) + get_seq->active = crtc->state->enable; + else + get_seq->active = crtc->enabled; + drm_modeset_unlock(&crtc->mutex); + get_seq->sequence = drm_vblank_count_and_time(dev, pipe, &now); + get_seq->sequence_ns = ktime_to_ns(now); + if (!vblank_enabled) + drm_crtc_vblank_put(crtc); + return 0; +} + +/* + * Queue a event for VBLANK sequence + * + * \param dev DRM device + * \param data user arguement, pointing to a drm_crtc_queue_sequence structure. + * \param file_priv drm file private for the user's open file descriptor + */ + +int drm_crtc_queue_sequence_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_crtc *crtc; + struct drm_vblank_crtc *vblank; + int pipe; + struct drm_crtc_queue_sequence *queue_seq = data; + ktime_t now; + struct drm_pending_vblank_event *e; + u32 flags; + u64 seq; + u64 req_seq; + int ret; + unsigned long spin_flags; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + + if (!dev->irq_enabled) + return -EINVAL; + + crtc = drm_crtc_find(dev, file_priv, queue_seq->crtc_id); + if (!crtc) + return -ENOENT; + + flags = queue_seq->flags; + /* Check valid flag bits */ + if (flags & ~(DRM_CRTC_SEQUENCE_RELATIVE| + DRM_CRTC_SEQUENCE_NEXT_ON_MISS)) + return -EINVAL; + + pipe = drm_crtc_index(crtc); + + vblank = &dev->vblank[pipe]; + + e = kzalloc(sizeof(*e), GFP_KERNEL); + if (e == NULL) + return -ENOMEM; + + ret = drm_crtc_vblank_get(crtc); + if (ret) { + DRM_DEBUG("crtc %d failed to acquire vblank counter, %d\n", pipe, ret); + goto err_free; + } + + seq = drm_vblank_count_and_time(dev, pipe, &now); + req_seq = queue_seq->sequence; + + if (flags & DRM_CRTC_SEQUENCE_RELATIVE) + req_seq += seq; + + if ((flags & DRM_CRTC_SEQUENCE_NEXT_ON_MISS) && vblank_passed(seq, req_seq)) + req_seq = seq + 1; + + e->pipe = pipe; + e->event.base.type = DRM_EVENT_CRTC_SEQUENCE; + e->event.base.length = sizeof(e->event.seq); + e->event.seq.user_data = queue_seq->user_data; + + spin_lock_irqsave(&dev->event_lock, spin_flags); + + /* + * drm_crtc_vblank_off() might have been called after we called + * drm_crtc_vblank_get(). drm_crtc_vblank_off() holds event_lock around the + * vblank disable, so no need for further locking. The reference from + * drm_crtc_vblank_get() protects against vblank disable from another source. + */ + if (!READ_ONCE(vblank->enabled)) { + ret = -EINVAL; + goto err_unlock; + } + + ret = drm_event_reserve_init_locked(dev, file_priv, &e->base, + &e->event.base); + + if (ret) + goto err_unlock; + + e->sequence = req_seq; + + if (vblank_passed(seq, req_seq)) { + drm_crtc_vblank_put(crtc); + send_vblank_event(dev, e, seq, now); + queue_seq->sequence = seq; + } else { + /* drm_handle_vblank_events will call drm_vblank_put */ + list_add_tail(&e->base.link, &dev->vblank_event_list); + queue_seq->sequence = req_seq; + } + + spin_unlock_irqrestore(&dev->event_lock, spin_flags); + return 0; + +err_unlock: + spin_unlock_irqrestore(&dev->event_lock, spin_flags); + drm_crtc_vblank_put(crtc); +err_free: + kfree(e); + return ret; +} diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index a3c96d2ea41c..b7c4709f7b34 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -1008,6 +1008,10 @@ static const struct panel_desc hitachi_tx23d38vm0caa = { .width = 195, .height = 117, }, + .delay = { + .enable = 160, + .disable = 160, + }, }; static const struct drm_display_mode innolux_at043tn24_mode = { @@ -1018,8 +1022,8 @@ static const struct drm_display_mode innolux_at043tn24_mode = { .htotal = 480 + 2 + 41 + 2, .vdisplay = 272, .vsync_start = 272 + 2, - .vsync_end = 272 + 2 + 11, - .vtotal = 272 + 2 + 11 + 2, + .vsync_end = 272 + 2 + 10, + .vtotal = 272 + 2 + 10 + 2, .vrefresh = 60, .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, }; @@ -1033,6 +1037,7 @@ static const struct panel_desc innolux_at043tn24 = { .height = 54, }, .bus_format = MEDIA_BUS_FMT_RGB888_1X24, + .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE, }; static const struct drm_display_mode innolux_at070tn92_mode = { @@ -1832,6 +1837,30 @@ static const struct panel_desc tianma_tm070jdhg30 = { .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, }; +static const struct drm_display_mode toshiba_lt089ac29000_mode = { + .clock = 79500, + .hdisplay = 1280, + .hsync_start = 1280 + 192, + .hsync_end = 1280 + 192 + 128, + .htotal = 1280 + 192 + 128 + 64, + .vdisplay = 768, + .vsync_start = 768 + 20, + .vsync_end = 768 + 20 + 7, + .vtotal = 768 + 20 + 7 + 3, + .vrefresh = 60, +}; + +static const struct panel_desc toshiba_lt089ac29000 = { + .modes = &toshiba_lt089ac29000_mode, + .num_modes = 1, + .size = { + .width = 194, + .height = 116, + }, + .bus_format = MEDIA_BUS_FMT_RGB888_1X24, + .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE, +}; + static const struct drm_display_mode tpk_f07a_0102_mode = { .clock = 33260, .hdisplay = 800, @@ -2114,6 +2143,9 @@ static const struct of_device_id platform_of_match[] = { .compatible = "tianma,tm070jdhg30", .data = &tianma_tm070jdhg30, }, { + .compatible = "toshiba,lt089ac29000", + .data = &toshiba_lt089ac29000, + }, { .compatible = "tpk,f07a-0102", .data = &tpk_f07a_0102, }, { diff --git a/drivers/gpu/drm/sun4i/Makefile b/drivers/gpu/drm/sun4i/Makefile index 43c753cafc88..301b5b1452db 100644 --- a/drivers/gpu/drm/sun4i/Makefile +++ b/drivers/gpu/drm/sun4i/Makefile @@ -1,24 +1,25 @@ -sun4i-drm-y += sun4i_drv.o -sun4i-drm-y += sun4i_framebuffer.o +sun4i-backend-y += sun4i_backend.o sun4i_layer.o -sun4i-drm-hdmi-y += sun4i_hdmi_enc.o -sun4i-drm-hdmi-y += sun4i_hdmi_i2c.o -sun4i-drm-hdmi-y += sun4i_hdmi_ddc_clk.o -sun4i-drm-hdmi-y += sun4i_hdmi_tmds_clk.o +sun4i-drm-y += sun4i_drv.o +sun4i-drm-y += sun4i_framebuffer.o -sun4i-tcon-y += sun4i_tcon.o -sun4i-tcon-y += sun4i_rgb.o -sun4i-tcon-y += sun4i_dotclock.o -sun4i-tcon-y += sun4i_crtc.o +sun4i-drm-hdmi-y += sun4i_hdmi_ddc_clk.o +sun4i-drm-hdmi-y += sun4i_hdmi_enc.o +sun4i-drm-hdmi-y += sun4i_hdmi_i2c.o +sun4i-drm-hdmi-y += sun4i_hdmi_tmds_clk.o -sun4i-backend-y += sun4i_backend.o sun4i_layer.o +sun8i-mixer-y += sun8i_mixer.o sun8i_layer.o -sun8i-mixer-y += sun8i_mixer.o sun8i_layer.o +sun4i-tcon-y += sun4i_crtc.o +sun4i-tcon-y += sun4i_dotclock.o +sun4i-tcon-y += sun4i_tcon.o +sun4i-tcon-y += sun4i_rgb.o -obj-$(CONFIG_DRM_SUN4I) += sun4i-drm.o sun4i-tcon.o -obj-$(CONFIG_DRM_SUN4I) += sun6i_drc.o +obj-$(CONFIG_DRM_SUN4I) += sun4i-drm.o +obj-$(CONFIG_DRM_SUN4I) += sun4i-tcon.o obj-$(CONFIG_DRM_SUN4I) += sun4i_tv.o +obj-$(CONFIG_DRM_SUN4I) += sun6i_drc.o -obj-$(CONFIG_DRM_SUN4I_BACKEND) += sun4i-backend.o +obj-$(CONFIG_DRM_SUN4I_BACKEND) += sun4i-backend.o obj-$(CONFIG_DRM_SUN4I_HDMI) += sun4i-drm-hdmi.o -obj-$(CONFIG_DRM_SUN8I_MIXER) += sun8i-mixer.o +obj-$(CONFIG_DRM_SUN8I_MIXER) += sun8i-mixer.o diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c index 4fefd8add714..847eecbe4d14 100644 --- a/drivers/gpu/drm/sun4i/sun4i_backend.c +++ b/drivers/gpu/drm/sun4i/sun4i_backend.c @@ -20,6 +20,7 @@ #include <linux/component.h> #include <linux/list.h> +#include <linux/of_device.h> #include <linux/of_graph.h> #include <linux/reset.h> @@ -28,6 +29,11 @@ #include "sun4i_layer.h" #include "sunxi_engine.h" +struct sun4i_backend_quirks { + /* backend <-> TCON muxing selection done in backend */ + bool needs_output_muxing; +}; + static const u32 sunxi_rgb2yuv_coef[12] = { 0x00000107, 0x00000204, 0x00000064, 0x00000108, 0x00003f69, 0x00003ed6, 0x000001c1, 0x00000808, @@ -216,6 +222,13 @@ int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend, paddr = drm_fb_cma_get_gem_addr(fb, state, 0); DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &paddr); + /* + * backend DMA accesses DRAM directly, bypassing the system + * bus. As such, the address range is different and the buffer + * address needs to be corrected. + */ + paddr -= PHYS_OFFSET; + /* Write the 32 lower bits of the address (in bits) */ lo_paddr = paddr << 3; DRM_DEBUG_DRIVER("Setting address lower bits to 0x%x\n", lo_paddr); @@ -338,6 +351,7 @@ static int sun4i_backend_bind(struct device *dev, struct device *master, struct drm_device *drm = data; struct sun4i_drv *drv = drm->dev_private; struct sun4i_backend *backend; + const struct sun4i_backend_quirks *quirks; struct resource *res; void __iomem *regs; int i, ret; @@ -432,6 +446,27 @@ static int sun4i_backend_bind(struct device *dev, struct device *master, SUN4I_BACKEND_MODCTL_DEBE_EN | SUN4I_BACKEND_MODCTL_START_CTL); + /* Set output selection if needed */ + quirks = of_device_get_match_data(dev); + if (quirks->needs_output_muxing) { + /* + * We assume there is no dynamic muxing of backends + * and TCONs, so we select the backend with same ID. + * + * While dynamic selection might be interesting, since + * the CRTC is tied to the TCON, while the layers are + * tied to the backends, this means, we will need to + * switch between groups of layers. There might not be + * a way to represent this constraint in DRM. + */ + regmap_update_bits(backend->engine.regs, + SUN4I_BACKEND_MODCTL_REG, + SUN4I_BACKEND_MODCTL_OUT_SEL, + (backend->engine.id + ? SUN4I_BACKEND_MODCTL_OUT_LCD1 + : SUN4I_BACKEND_MODCTL_OUT_LCD0)); + } + return 0; err_disable_ram_clk: @@ -479,10 +514,44 @@ static int sun4i_backend_remove(struct platform_device *pdev) return 0; } +static const struct sun4i_backend_quirks sun4i_backend_quirks = { + .needs_output_muxing = true, +}; + +static const struct sun4i_backend_quirks sun5i_backend_quirks = { +}; + +static const struct sun4i_backend_quirks sun6i_backend_quirks = { +}; + +static const struct sun4i_backend_quirks sun7i_backend_quirks = { + .needs_output_muxing = true, +}; + +static const struct sun4i_backend_quirks sun8i_a33_backend_quirks = { +}; + static const struct of_device_id sun4i_backend_of_table[] = { - { .compatible = "allwinner,sun5i-a13-display-backend" }, - { .compatible = "allwinner,sun6i-a31-display-backend" }, - { .compatible = "allwinner,sun8i-a33-display-backend" }, + { + .compatible = "allwinner,sun4i-a10-display-backend", + .data = &sun4i_backend_quirks, + }, + { + .compatible = "allwinner,sun5i-a13-display-backend", + .data = &sun5i_backend_quirks, + }, + { + .compatible = "allwinner,sun6i-a31-display-backend", + .data = &sun6i_backend_quirks, + }, + { + .compatible = "allwinner,sun7i-a20-display-backend", + .data = &sun7i_backend_quirks, + }, + { + .compatible = "allwinner,sun8i-a33-display-backend", + .data = &sun8i_a33_backend_quirks, + }, { } }; MODULE_DEVICE_TABLE(of, sun4i_backend_of_table); diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.h b/drivers/gpu/drm/sun4i/sun4i_backend.h index 21945af67a9d..ac3cc029f5cd 100644 --- a/drivers/gpu/drm/sun4i/sun4i_backend.h +++ b/drivers/gpu/drm/sun4i/sun4i_backend.h @@ -25,7 +25,8 @@ #define SUN4I_BACKEND_MODCTL_LINE_SEL BIT(29) #define SUN4I_BACKEND_MODCTL_ITLMOD_EN BIT(28) #define SUN4I_BACKEND_MODCTL_OUT_SEL GENMASK(22, 20) -#define SUN4I_BACKEND_MODCTL_OUT_LCD (0 << 20) +#define SUN4I_BACKEND_MODCTL_OUT_LCD0 (0 << 20) +#define SUN4I_BACKEND_MODCTL_OUT_LCD1 (1 << 20) #define SUN4I_BACKEND_MODCTL_OUT_FE0 (6 << 20) #define SUN4I_BACKEND_MODCTL_OUT_FE1 (7 << 20) #define SUN4I_BACKEND_MODCTL_HWC_EN BIT(16) diff --git a/drivers/gpu/drm/sun4i/sun4i_crtc.c b/drivers/gpu/drm/sun4i/sun4i_crtc.c index d097c6f93ad0..5decae0069d0 100644 --- a/drivers/gpu/drm/sun4i/sun4i_crtc.c +++ b/drivers/gpu/drm/sun4i/sun4i_crtc.c @@ -30,6 +30,22 @@ #include "sunxi_engine.h" #include "sun4i_tcon.h" +/* + * While this isn't really working in the DRM theory, in practice we + * can only ever have one encoder per TCON since we have a mux in our + * TCON. + */ +static struct drm_encoder *sun4i_crtc_get_encoder(struct drm_crtc *crtc) +{ + struct drm_encoder *encoder; + + drm_for_each_encoder(encoder, crtc->dev) + if (encoder->crtc == crtc) + return encoder; + + return NULL; +} + static void sun4i_crtc_atomic_begin(struct drm_crtc *crtc, struct drm_crtc_state *old_state) { @@ -72,11 +88,12 @@ static void sun4i_crtc_atomic_flush(struct drm_crtc *crtc, static void sun4i_crtc_atomic_disable(struct drm_crtc *crtc, struct drm_crtc_state *old_state) { + struct drm_encoder *encoder = sun4i_crtc_get_encoder(crtc); struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc); DRM_DEBUG_DRIVER("Disabling the CRTC\n"); - sun4i_tcon_disable(scrtc->tcon); + sun4i_tcon_set_status(scrtc->tcon, encoder, false); if (crtc->state->event && !crtc->state->active) { spin_lock_irq(&crtc->dev->event_lock); @@ -90,11 +107,21 @@ static void sun4i_crtc_atomic_disable(struct drm_crtc *crtc, static void sun4i_crtc_atomic_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_state) { + struct drm_encoder *encoder = sun4i_crtc_get_encoder(crtc); struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc); DRM_DEBUG_DRIVER("Enabling the CRTC\n"); - sun4i_tcon_enable(scrtc->tcon); + sun4i_tcon_set_status(scrtc->tcon, encoder, true); +} + +static void sun4i_crtc_mode_set_nofb(struct drm_crtc *crtc) +{ + struct drm_display_mode *mode = &crtc->state->adjusted_mode; + struct drm_encoder *encoder = sun4i_crtc_get_encoder(crtc); + struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc); + + sun4i_tcon_mode_set(scrtc->tcon, encoder, mode); } static const struct drm_crtc_helper_funcs sun4i_crtc_helper_funcs = { @@ -102,6 +129,7 @@ static const struct drm_crtc_helper_funcs sun4i_crtc_helper_funcs = { .atomic_flush = sun4i_crtc_atomic_flush, .atomic_enable = sun4i_crtc_atomic_enable, .atomic_disable = sun4i_crtc_atomic_disable, + .mode_set_nofb = sun4i_crtc_mode_set_nofb, }; static int sun4i_crtc_enable_vblank(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index b5879d4620d8..75c76cdd82bc 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c @@ -11,6 +11,7 @@ */ #include <linux/component.h> +#include <linux/kfifo.h> #include <linux/of_graph.h> #include <linux/of_reserved_mem.h> @@ -177,16 +178,20 @@ static bool sun4i_drv_node_is_connector(struct device_node *node) static bool sun4i_drv_node_is_frontend(struct device_node *node) { - return of_device_is_compatible(node, "allwinner,sun5i-a13-display-frontend") || + return of_device_is_compatible(node, "allwinner,sun4i-a10-display-frontend") || + of_device_is_compatible(node, "allwinner,sun5i-a13-display-frontend") || of_device_is_compatible(node, "allwinner,sun6i-a31-display-frontend") || + of_device_is_compatible(node, "allwinner,sun7i-a20-display-frontend") || of_device_is_compatible(node, "allwinner,sun8i-a33-display-frontend"); } static bool sun4i_drv_node_is_tcon(struct device_node *node) { - return of_device_is_compatible(node, "allwinner,sun5i-a13-tcon") || + return of_device_is_compatible(node, "allwinner,sun4i-a10-tcon") || + of_device_is_compatible(node, "allwinner,sun5i-a13-tcon") || of_device_is_compatible(node, "allwinner,sun6i-a31-tcon") || of_device_is_compatible(node, "allwinner,sun6i-a31s-tcon") || + of_device_is_compatible(node, "allwinner,sun7i-a20-tcon") || of_device_is_compatible(node, "allwinner,sun8i-a33-tcon") || of_device_is_compatible(node, "allwinner,sun8i-v3s-tcon"); } @@ -222,29 +227,15 @@ static int compare_of(struct device *dev, void *data) * matching system handles this for us. */ struct endpoint_list { - struct device_node *node; - struct list_head list; + DECLARE_KFIFO(fifo, struct device_node *, 16); }; -static bool node_is_in_list(struct list_head *endpoints, - struct device_node *node) -{ - struct endpoint_list *endpoint; - - list_for_each_entry(endpoint, endpoints, list) - if (endpoint->node == node) - return true; - - return false; -} - static int sun4i_drv_add_endpoints(struct device *dev, - struct list_head *endpoints, + struct endpoint_list *list, struct component_match **match, struct device_node *node) { struct device_node *port, *ep, *remote; - struct endpoint_list *endpoint; int count = 0; /* @@ -304,19 +295,7 @@ static int sun4i_drv_add_endpoints(struct device *dev, } } - /* skip downstream node if it is already in the queue */ - if (node_is_in_list(endpoints, remote)) - continue; - - /* Add downstream nodes to the queue */ - endpoint = kzalloc(sizeof(*endpoint), GFP_KERNEL); - if (!endpoint) { - of_node_put(remote); - return -ENOMEM; - } - - endpoint->node = remote; - list_add_tail(&endpoint->list, endpoints); + kfifo_put(&list->fifo, remote); } return count; @@ -325,10 +304,11 @@ static int sun4i_drv_add_endpoints(struct device *dev, static int sun4i_drv_probe(struct platform_device *pdev) { struct component_match *match = NULL; - struct device_node *np = pdev->dev.of_node; - struct endpoint_list *endpoint, *endpoint_temp; + struct device_node *np = pdev->dev.of_node, *endpoint; + struct endpoint_list list; int i, ret, count = 0; - LIST_HEAD(endpoints); + + INIT_KFIFO(list.fifo); for (i = 0;; i++) { struct device_node *pipeline = of_parse_phandle(np, @@ -337,31 +317,19 @@ static int sun4i_drv_probe(struct platform_device *pdev) if (!pipeline) break; - endpoint = kzalloc(sizeof(*endpoint), GFP_KERNEL); - if (!endpoint) { - ret = -ENOMEM; - goto err_free_endpoints; - } - - endpoint->node = pipeline; - list_add_tail(&endpoint->list, &endpoints); + kfifo_put(&list.fifo, pipeline); } - list_for_each_entry_safe(endpoint, endpoint_temp, &endpoints, list) { + while (kfifo_get(&list.fifo, &endpoint)) { /* process this endpoint */ - ret = sun4i_drv_add_endpoints(&pdev->dev, &endpoints, &match, - endpoint->node); + ret = sun4i_drv_add_endpoints(&pdev->dev, &list, &match, + endpoint); /* sun4i_drv_add_endpoints can fail to allocate memory */ if (ret < 0) - goto err_free_endpoints; + return ret; count += ret; - - /* delete and cleanup the current entry */ - list_del(&endpoint->list); - of_node_put(endpoint->node); - kfree(endpoint); } if (count) @@ -370,15 +338,6 @@ static int sun4i_drv_probe(struct platform_device *pdev) match); else return 0; - -err_free_endpoints: - list_for_each_entry_safe(endpoint, endpoint_temp, &endpoints, list) { - list_del(&endpoint->list); - of_node_put(endpoint->node); - kfree(endpoint); - } - - return ret; } static int sun4i_drv_remove(struct platform_device *pdev) @@ -387,10 +346,12 @@ static int sun4i_drv_remove(struct platform_device *pdev) } static const struct of_device_id sun4i_drv_of_table[] = { + { .compatible = "allwinner,sun4i-a10-display-engine" }, { .compatible = "allwinner,sun5i-a10s-display-engine" }, { .compatible = "allwinner,sun5i-a13-display-engine" }, { .compatible = "allwinner,sun6i-a31-display-engine" }, { .compatible = "allwinner,sun6i-a31s-display-engine" }, + { .compatible = "allwinner,sun7i-a20-display-engine" }, { .compatible = "allwinner,sun8i-a33-display-engine" }, { .compatible = "allwinner,sun8i-v3s-display-engine" }, { } diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c index 04f85b1cf922..e826da34e919 100644 --- a/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c +++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c @@ -13,7 +13,6 @@ #include <linux/clk-provider.h> #include <linux/regmap.h> -#include "sun4i_tcon.h" #include "sun4i_hdmi.h" struct sun4i_ddc { diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c index 6ca6e6a74c4a..dda904ec0534 100644 --- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c +++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c @@ -30,7 +30,6 @@ #include "sun4i_crtc.h" #include "sun4i_drv.h" #include "sun4i_hdmi.h" -#include "sun4i_tcon.h" static inline struct sun4i_hdmi * drm_encoder_to_sun4i_hdmi(struct drm_encoder *encoder) @@ -86,8 +85,6 @@ static int sun4i_hdmi_atomic_check(struct drm_encoder *encoder, static void sun4i_hdmi_disable(struct drm_encoder *encoder) { struct sun4i_hdmi *hdmi = drm_encoder_to_sun4i_hdmi(encoder); - struct sun4i_crtc *crtc = drm_crtc_to_sun4i_crtc(encoder->crtc); - struct sun4i_tcon *tcon = crtc->tcon; u32 val; DRM_DEBUG_DRIVER("Disabling the HDMI Output\n"); @@ -95,22 +92,16 @@ static void sun4i_hdmi_disable(struct drm_encoder *encoder) val = readl(hdmi->base + SUN4I_HDMI_VID_CTRL_REG); val &= ~SUN4I_HDMI_VID_CTRL_ENABLE; writel(val, hdmi->base + SUN4I_HDMI_VID_CTRL_REG); - - sun4i_tcon_channel_disable(tcon, 1); } static void sun4i_hdmi_enable(struct drm_encoder *encoder) { struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode; struct sun4i_hdmi *hdmi = drm_encoder_to_sun4i_hdmi(encoder); - struct sun4i_crtc *crtc = drm_crtc_to_sun4i_crtc(encoder->crtc); - struct sun4i_tcon *tcon = crtc->tcon; u32 val = 0; DRM_DEBUG_DRIVER("Enabling the HDMI Output\n"); - sun4i_tcon_channel_enable(tcon, 1); - sun4i_hdmi_setup_avi_infoframes(hdmi, mode); val |= SUN4I_HDMI_PKT_CTRL_TYPE(0, SUN4I_HDMI_PKT_AVI); val |= SUN4I_HDMI_PKT_CTRL_TYPE(1, SUN4I_HDMI_PKT_END); @@ -128,15 +119,9 @@ static void sun4i_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *adjusted_mode) { struct sun4i_hdmi *hdmi = drm_encoder_to_sun4i_hdmi(encoder); - struct sun4i_crtc *crtc = drm_crtc_to_sun4i_crtc(encoder->crtc); - struct sun4i_tcon *tcon = crtc->tcon; unsigned int x, y; u32 val; - sun4i_tcon1_mode_set(tcon, mode); - sun4i_tcon_set_mux(tcon, 1, encoder); - - clk_set_rate(tcon->sclk1, mode->crtc_clock * 1000); clk_set_rate(hdmi->mod_clk, mode->crtc_clock * 1000); clk_set_rate(hdmi->tmds_clk, mode->crtc_clock * 1000); @@ -289,6 +274,58 @@ static const struct cec_pin_ops sun4i_hdmi_cec_pin_ops = { #define SUN4I_HDMI_PAD_CTRL1_MASK (GENMASK(24, 7) | GENMASK(5, 0)) #define SUN4I_HDMI_PLL_CTRL_MASK (GENMASK(31, 8) | GENMASK(3, 0)) +/* Only difference from sun5i is AMP is 4 instead of 6 */ +static const struct sun4i_hdmi_variant sun4i_variant = { + .pad_ctrl0_init_val = SUN4I_HDMI_PAD_CTRL0_TXEN | + SUN4I_HDMI_PAD_CTRL0_CKEN | + SUN4I_HDMI_PAD_CTRL0_PWENG | + SUN4I_HDMI_PAD_CTRL0_PWEND | + SUN4I_HDMI_PAD_CTRL0_PWENC | + SUN4I_HDMI_PAD_CTRL0_LDODEN | + SUN4I_HDMI_PAD_CTRL0_LDOCEN | + SUN4I_HDMI_PAD_CTRL0_BIASEN, + .pad_ctrl1_init_val = SUN4I_HDMI_PAD_CTRL1_REG_AMP(4) | + SUN4I_HDMI_PAD_CTRL1_REG_EMP(2) | + SUN4I_HDMI_PAD_CTRL1_REG_DENCK | + SUN4I_HDMI_PAD_CTRL1_REG_DEN | + SUN4I_HDMI_PAD_CTRL1_EMPCK_OPT | + SUN4I_HDMI_PAD_CTRL1_EMP_OPT | + SUN4I_HDMI_PAD_CTRL1_AMPCK_OPT | + SUN4I_HDMI_PAD_CTRL1_AMP_OPT, + .pll_ctrl_init_val = SUN4I_HDMI_PLL_CTRL_VCO_S(8) | + SUN4I_HDMI_PLL_CTRL_CS(7) | + SUN4I_HDMI_PLL_CTRL_CP_S(15) | + SUN4I_HDMI_PLL_CTRL_S(7) | + SUN4I_HDMI_PLL_CTRL_VCO_GAIN(4) | + SUN4I_HDMI_PLL_CTRL_SDIV2 | + SUN4I_HDMI_PLL_CTRL_LDO2_EN | + SUN4I_HDMI_PLL_CTRL_LDO1_EN | + SUN4I_HDMI_PLL_CTRL_HV_IS_33 | + SUN4I_HDMI_PLL_CTRL_BWS | + SUN4I_HDMI_PLL_CTRL_PLL_EN, + + .ddc_clk_reg = REG_FIELD(SUN4I_HDMI_DDC_CLK_REG, 0, 6), + .ddc_clk_pre_divider = 2, + .ddc_clk_m_offset = 1, + + .field_ddc_en = REG_FIELD(SUN4I_HDMI_DDC_CTRL_REG, 31, 31), + .field_ddc_start = REG_FIELD(SUN4I_HDMI_DDC_CTRL_REG, 30, 30), + .field_ddc_reset = REG_FIELD(SUN4I_HDMI_DDC_CTRL_REG, 0, 0), + .field_ddc_addr_reg = REG_FIELD(SUN4I_HDMI_DDC_ADDR_REG, 0, 31), + .field_ddc_slave_addr = REG_FIELD(SUN4I_HDMI_DDC_ADDR_REG, 0, 6), + .field_ddc_int_status = REG_FIELD(SUN4I_HDMI_DDC_INT_STATUS_REG, 0, 8), + .field_ddc_fifo_clear = REG_FIELD(SUN4I_HDMI_DDC_FIFO_CTRL_REG, 31, 31), + .field_ddc_fifo_rx_thres = REG_FIELD(SUN4I_HDMI_DDC_FIFO_CTRL_REG, 4, 7), + .field_ddc_fifo_tx_thres = REG_FIELD(SUN4I_HDMI_DDC_FIFO_CTRL_REG, 0, 3), + .field_ddc_byte_count = REG_FIELD(SUN4I_HDMI_DDC_BYTE_COUNT_REG, 0, 9), + .field_ddc_cmd = REG_FIELD(SUN4I_HDMI_DDC_CMD_REG, 0, 2), + .field_ddc_sda_en = REG_FIELD(SUN4I_HDMI_DDC_LINE_CTRL_REG, 9, 9), + .field_ddc_sck_en = REG_FIELD(SUN4I_HDMI_DDC_LINE_CTRL_REG, 8, 8), + + .ddc_fifo_reg = SUN4I_HDMI_DDC_FIFO_DATA_REG, + .ddc_fifo_has_dir = true, +}; + static const struct sun4i_hdmi_variant sun5i_variant = { .pad_ctrl0_init_val = SUN4I_HDMI_PAD_CTRL0_TXEN | SUN4I_HDMI_PAD_CTRL0_CKEN | @@ -613,6 +650,7 @@ static int sun4i_hdmi_remove(struct platform_device *pdev) } static const struct of_device_id sun4i_hdmi_of_table[] = { + { .compatible = "allwinner,sun4i-a10-hdmi", .data = &sun4i_variant, }, { .compatible = "allwinner,sun5i-a10s-hdmi", .data = &sun5i_variant, }, { .compatible = "allwinner,sun6i-a31-hdmi", .data = &sun6i_variant, }, { } diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c index 1b6b37aefc38..dc332ea56f6c 100644 --- a/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c +++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c @@ -12,7 +12,6 @@ #include <linux/clk-provider.h> -#include "sun4i_tcon.h" #include "sun4i_hdmi.h" struct sun4i_tmds { diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c index 7cd7090ad63a..832f8f9bc47f 100644 --- a/drivers/gpu/drm/sun4i/sun4i_rgb.c +++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c @@ -134,13 +134,10 @@ static void sun4i_rgb_encoder_enable(struct drm_encoder *encoder) DRM_DEBUG_DRIVER("Enabling RGB output\n"); - if (!IS_ERR(tcon->panel)) + if (!IS_ERR(tcon->panel)) { drm_panel_prepare(tcon->panel); - - sun4i_tcon_channel_enable(tcon, 0); - - if (!IS_ERR(tcon->panel)) drm_panel_enable(tcon->panel); + } } static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder) @@ -150,31 +147,13 @@ static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder) DRM_DEBUG_DRIVER("Disabling RGB output\n"); - if (!IS_ERR(tcon->panel)) + if (!IS_ERR(tcon->panel)) { drm_panel_disable(tcon->panel); - - sun4i_tcon_channel_disable(tcon, 0); - - if (!IS_ERR(tcon->panel)) drm_panel_unprepare(tcon->panel); -} - -static void sun4i_rgb_encoder_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - struct sun4i_rgb *rgb = drm_encoder_to_sun4i_rgb(encoder); - struct sun4i_tcon *tcon = rgb->tcon; - - sun4i_tcon0_mode_set(tcon, mode); - sun4i_tcon_set_mux(tcon, 0, encoder); - - /* FIXME: This seems to be board specific */ - clk_set_phase(tcon->dclk, 120); + } } static struct drm_encoder_helper_funcs sun4i_rgb_enc_helper_funcs = { - .mode_set = sun4i_rgb_encoder_mode_set, .disable = sun4i_rgb_encoder_disable, .enable = sun4i_rgb_encoder_enable, }; diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c index 68751b999877..e122f5b2a395 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.c +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c @@ -35,66 +35,61 @@ #include "sun4i_tcon.h" #include "sunxi_engine.h" -void sun4i_tcon_disable(struct sun4i_tcon *tcon) +static void sun4i_tcon_channel_set_status(struct sun4i_tcon *tcon, int channel, + bool enabled) { - DRM_DEBUG_DRIVER("Disabling TCON\n"); + struct clk *clk; - /* Disable the TCON */ - regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG, - SUN4I_TCON_GCTL_TCON_ENABLE, 0); -} -EXPORT_SYMBOL(sun4i_tcon_disable); - -void sun4i_tcon_enable(struct sun4i_tcon *tcon) -{ - DRM_DEBUG_DRIVER("Enabling TCON\n"); - - /* Enable the TCON */ - regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG, - SUN4I_TCON_GCTL_TCON_ENABLE, - SUN4I_TCON_GCTL_TCON_ENABLE); -} -EXPORT_SYMBOL(sun4i_tcon_enable); - -void sun4i_tcon_channel_disable(struct sun4i_tcon *tcon, int channel) -{ - DRM_DEBUG_DRIVER("Disabling TCON channel %d\n", channel); - - /* Disable the TCON's channel */ - if (channel == 0) { + switch (channel) { + case 0: regmap_update_bits(tcon->regs, SUN4I_TCON0_CTL_REG, - SUN4I_TCON0_CTL_TCON_ENABLE, 0); - clk_disable_unprepare(tcon->dclk); + SUN4I_TCON0_CTL_TCON_ENABLE, + enabled ? SUN4I_TCON0_CTL_TCON_ENABLE : 0); + clk = tcon->dclk; + break; + case 1: + WARN_ON(!tcon->quirks->has_channel_1); + regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG, + SUN4I_TCON1_CTL_TCON_ENABLE, + enabled ? SUN4I_TCON1_CTL_TCON_ENABLE : 0); + clk = tcon->sclk1; + break; + default: + DRM_WARN("Unknown channel... doing nothing\n"); return; } - WARN_ON(!tcon->quirks->has_channel_1); - regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG, - SUN4I_TCON1_CTL_TCON_ENABLE, 0); - clk_disable_unprepare(tcon->sclk1); + if (enabled) + clk_prepare_enable(clk); + else + clk_disable_unprepare(clk); } -EXPORT_SYMBOL(sun4i_tcon_channel_disable); -void sun4i_tcon_channel_enable(struct sun4i_tcon *tcon, int channel) +void sun4i_tcon_set_status(struct sun4i_tcon *tcon, + const struct drm_encoder *encoder, + bool enabled) { - DRM_DEBUG_DRIVER("Enabling TCON channel %d\n", channel); + int channel; - /* Enable the TCON's channel */ - if (channel == 0) { - regmap_update_bits(tcon->regs, SUN4I_TCON0_CTL_REG, - SUN4I_TCON0_CTL_TCON_ENABLE, - SUN4I_TCON0_CTL_TCON_ENABLE); - clk_prepare_enable(tcon->dclk); + switch (encoder->encoder_type) { + case DRM_MODE_ENCODER_NONE: + channel = 0; + break; + case DRM_MODE_ENCODER_TMDS: + case DRM_MODE_ENCODER_TVDAC: + channel = 1; + break; + default: + DRM_DEBUG_DRIVER("Unknown encoder type, doing nothing...\n"); return; } - WARN_ON(!tcon->quirks->has_channel_1); - regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG, - SUN4I_TCON1_CTL_TCON_ENABLE, - SUN4I_TCON1_CTL_TCON_ENABLE); - clk_prepare_enable(tcon->sclk1); + regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG, + SUN4I_TCON_GCTL_TCON_ENABLE, + enabled ? SUN4I_TCON_GCTL_TCON_ENABLE : 0); + + sun4i_tcon_channel_set_status(tcon, channel, enabled); } -EXPORT_SYMBOL(sun4i_tcon_channel_enable); void sun4i_tcon_enable_vblank(struct sun4i_tcon *tcon, bool enable) { @@ -134,7 +129,7 @@ static struct sun4i_tcon *sun4i_get_tcon0(struct drm_device *drm) } void sun4i_tcon_set_mux(struct sun4i_tcon *tcon, int channel, - struct drm_encoder *encoder) + const struct drm_encoder *encoder) { int ret = -ENOTSUPP; @@ -144,9 +139,8 @@ void sun4i_tcon_set_mux(struct sun4i_tcon *tcon, int channel, DRM_DEBUG_DRIVER("Muxing encoder %s to CRTC %s: %d\n", encoder->name, encoder->crtc->name, ret); } -EXPORT_SYMBOL(sun4i_tcon_set_mux); -static int sun4i_tcon_get_clk_delay(struct drm_display_mode *mode, +static int sun4i_tcon_get_clk_delay(const struct drm_display_mode *mode, int channel) { int delay = mode->vtotal - mode->vdisplay; @@ -164,15 +158,26 @@ static int sun4i_tcon_get_clk_delay(struct drm_display_mode *mode, return delay; } -void sun4i_tcon0_mode_set(struct sun4i_tcon *tcon, - struct drm_display_mode *mode) +static void sun4i_tcon0_mode_set_common(struct sun4i_tcon *tcon, + const struct drm_display_mode *mode) +{ + /* Configure the dot clock */ + clk_set_rate(tcon->dclk, mode->crtc_clock * 1000); + + /* Set the resolution */ + regmap_write(tcon->regs, SUN4I_TCON0_BASIC0_REG, + SUN4I_TCON0_BASIC0_X(mode->crtc_hdisplay) | + SUN4I_TCON0_BASIC0_Y(mode->crtc_vdisplay)); +} + +static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon, + const struct drm_display_mode *mode) { unsigned int bp, hsync, vsync; u8 clk_delay; u32 val = 0; - /* Configure the dot clock */ - clk_set_rate(tcon->dclk, mode->crtc_clock * 1000); + sun4i_tcon0_mode_set_common(tcon, mode); /* Adjust clock delay */ clk_delay = sun4i_tcon_get_clk_delay(mode, 0); @@ -180,11 +185,6 @@ void sun4i_tcon0_mode_set(struct sun4i_tcon *tcon, SUN4I_TCON0_CTL_CLK_DELAY_MASK, SUN4I_TCON0_CTL_CLK_DELAY(clk_delay)); - /* Set the resolution */ - regmap_write(tcon->regs, SUN4I_TCON0_BASIC0_REG, - SUN4I_TCON0_BASIC0_X(mode->crtc_hdisplay) | - SUN4I_TCON0_BASIC0_Y(mode->crtc_vdisplay)); - /* * This is called a backporch in the register documentation, * but it really is the back porch + hsync @@ -238,10 +238,9 @@ void sun4i_tcon0_mode_set(struct sun4i_tcon *tcon, /* Enable the output on the pins */ regmap_write(tcon->regs, SUN4I_TCON0_IO_TRI_REG, 0); } -EXPORT_SYMBOL(sun4i_tcon0_mode_set); -void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon, - struct drm_display_mode *mode) +static void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon, + const struct drm_display_mode *mode) { unsigned int bp, hsync, vsync, vtotal; u8 clk_delay; @@ -329,7 +328,26 @@ void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon, SUN4I_TCON_GCTL_IOMAP_MASK, SUN4I_TCON_GCTL_IOMAP_TCON1); } -EXPORT_SYMBOL(sun4i_tcon1_mode_set); + +void sun4i_tcon_mode_set(struct sun4i_tcon *tcon, + const struct drm_encoder *encoder, + const struct drm_display_mode *mode) +{ + switch (encoder->encoder_type) { + case DRM_MODE_ENCODER_NONE: + sun4i_tcon0_mode_set_rgb(tcon, mode); + sun4i_tcon_set_mux(tcon, 0, encoder); + break; + case DRM_MODE_ENCODER_TVDAC: + case DRM_MODE_ENCODER_TMDS: + sun4i_tcon1_mode_set(tcon, mode); + sun4i_tcon_set_mux(tcon, 1, encoder); + break; + default: + DRM_DEBUG_DRIVER("Unknown encoder type, doing nothing...\n"); + } +} +EXPORT_SYMBOL(sun4i_tcon_mode_set); static void sun4i_tcon_finish_page_flip(struct drm_device *dev, struct sun4i_crtc *scrtc) @@ -782,8 +800,32 @@ static int sun4i_tcon_remove(struct platform_device *pdev) } /* platform specific TCON muxing callbacks */ +static int sun4i_a10_tcon_set_mux(struct sun4i_tcon *tcon, + const struct drm_encoder *encoder) +{ + struct sun4i_tcon *tcon0 = sun4i_get_tcon0(encoder->dev); + u32 shift; + + if (!tcon0) + return -EINVAL; + + switch (encoder->encoder_type) { + case DRM_MODE_ENCODER_TMDS: + /* HDMI */ + shift = 8; + break; + default: + return -EINVAL; + } + + regmap_update_bits(tcon0->regs, SUN4I_TCON_MUX_CTRL_REG, + 0x3 << shift, tcon->id << shift); + + return 0; +} + static int sun5i_a13_tcon_set_mux(struct sun4i_tcon *tcon, - struct drm_encoder *encoder) + const struct drm_encoder *encoder) { u32 val; @@ -799,7 +841,7 @@ static int sun5i_a13_tcon_set_mux(struct sun4i_tcon *tcon, } static int sun6i_tcon_set_mux(struct sun4i_tcon *tcon, - struct drm_encoder *encoder) + const struct drm_encoder *encoder) { struct sun4i_tcon *tcon0 = sun4i_get_tcon0(encoder->dev); u32 shift; @@ -823,6 +865,11 @@ static int sun6i_tcon_set_mux(struct sun4i_tcon *tcon, return 0; } +static const struct sun4i_tcon_quirks sun4i_a10_quirks = { + .has_channel_1 = true, + .set_mux = sun4i_a10_tcon_set_mux, +}; + static const struct sun4i_tcon_quirks sun5i_a13_quirks = { .has_channel_1 = true, .set_mux = sun5i_a13_tcon_set_mux, @@ -839,6 +886,12 @@ static const struct sun4i_tcon_quirks sun6i_a31s_quirks = { .needs_de_be_mux = true, }; +static const struct sun4i_tcon_quirks sun7i_a20_quirks = { + .has_channel_1 = true, + /* Same display pipeline structure as A10 */ + .set_mux = sun4i_a10_tcon_set_mux, +}; + static const struct sun4i_tcon_quirks sun8i_a33_quirks = { /* nothing is supported */ }; @@ -848,9 +901,11 @@ static const struct sun4i_tcon_quirks sun8i_v3s_quirks = { }; static const struct of_device_id sun4i_tcon_of_table[] = { + { .compatible = "allwinner,sun4i-a10-tcon", .data = &sun4i_a10_quirks }, { .compatible = "allwinner,sun5i-a13-tcon", .data = &sun5i_a13_quirks }, { .compatible = "allwinner,sun6i-a31-tcon", .data = &sun6i_a31_quirks }, { .compatible = "allwinner,sun6i-a31s-tcon", .data = &sun6i_a31s_quirks }, + { .compatible = "allwinner,sun7i-a20-tcon", .data = &sun7i_a20_quirks }, { .compatible = "allwinner,sun8i-a33-tcon", .data = &sun8i_a33_quirks }, { .compatible = "allwinner,sun8i-v3s-tcon", .data = &sun8i_v3s_quirks }, { } diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h index d9e1357cc8ae..f61bf6d83b4a 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.h +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h @@ -152,7 +152,7 @@ struct sun4i_tcon_quirks { bool needs_de_be_mux; /* sun6i needs mux to select backend */ /* callback to handle tcon muxing options */ - int (*set_mux)(struct sun4i_tcon *, struct drm_encoder *); + int (*set_mux)(struct sun4i_tcon *, const struct drm_encoder *); }; struct sun4i_tcon { @@ -190,22 +190,11 @@ struct sun4i_tcon { struct drm_bridge *sun4i_tcon_find_bridge(struct device_node *node); struct drm_panel *sun4i_tcon_find_panel(struct device_node *node); -/* Global Control */ -void sun4i_tcon_disable(struct sun4i_tcon *tcon); -void sun4i_tcon_enable(struct sun4i_tcon *tcon); - -/* Channel Control */ -void sun4i_tcon_channel_disable(struct sun4i_tcon *tcon, int channel); -void sun4i_tcon_channel_enable(struct sun4i_tcon *tcon, int channel); - void sun4i_tcon_enable_vblank(struct sun4i_tcon *tcon, bool enable); - -/* Mode Related Controls */ -void sun4i_tcon_set_mux(struct sun4i_tcon *tcon, int channel, - struct drm_encoder *encoder); -void sun4i_tcon0_mode_set(struct sun4i_tcon *tcon, - struct drm_display_mode *mode); -void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon, - struct drm_display_mode *mode); +void sun4i_tcon_mode_set(struct sun4i_tcon *tcon, + const struct drm_encoder *encoder, + const struct drm_display_mode *mode); +void sun4i_tcon_set_status(struct sun4i_tcon *crtc, + const struct drm_encoder *encoder, bool enable); #endif /* __SUN4I_TCON_H__ */ diff --git a/drivers/gpu/drm/sun4i/sun4i_tv.c b/drivers/gpu/drm/sun4i/sun4i_tv.c index 050cfd43c7a0..b070d522ed8d 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tv.c +++ b/drivers/gpu/drm/sun4i/sun4i_tv.c @@ -24,7 +24,6 @@ #include "sun4i_crtc.h" #include "sun4i_drv.h" -#include "sun4i_tcon.h" #include "sunxi_engine.h" #define SUN4I_TVE_EN_REG 0x000 @@ -345,12 +344,9 @@ static void sun4i_tv_disable(struct drm_encoder *encoder) { struct sun4i_tv *tv = drm_encoder_to_sun4i_tv(encoder); struct sun4i_crtc *crtc = drm_crtc_to_sun4i_crtc(encoder->crtc); - struct sun4i_tcon *tcon = crtc->tcon; DRM_DEBUG_DRIVER("Disabling the TV Output\n"); - sun4i_tcon_channel_disable(tcon, 1); - regmap_update_bits(tv->regs, SUN4I_TVE_EN_REG, SUN4I_TVE_EN_ENABLE, 0); @@ -362,7 +358,6 @@ static void sun4i_tv_enable(struct drm_encoder *encoder) { struct sun4i_tv *tv = drm_encoder_to_sun4i_tv(encoder); struct sun4i_crtc *crtc = drm_crtc_to_sun4i_crtc(encoder->crtc); - struct sun4i_tcon *tcon = crtc->tcon; DRM_DEBUG_DRIVER("Enabling the TV Output\n"); @@ -371,8 +366,6 @@ static void sun4i_tv_enable(struct drm_encoder *encoder) regmap_update_bits(tv->regs, SUN4I_TVE_EN_REG, SUN4I_TVE_EN_ENABLE, SUN4I_TVE_EN_ENABLE); - - sun4i_tcon_channel_enable(tcon, 1); } static void sun4i_tv_mode_set(struct drm_encoder *encoder, @@ -380,13 +373,8 @@ static void sun4i_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *adjusted_mode) { struct sun4i_tv *tv = drm_encoder_to_sun4i_tv(encoder); - struct sun4i_crtc *crtc = drm_crtc_to_sun4i_crtc(encoder->crtc); - struct sun4i_tcon *tcon = crtc->tcon; const struct tv_mode *tv_mode = sun4i_tv_find_tv_by_mode(mode); - sun4i_tcon1_mode_set(tcon, mode); - sun4i_tcon_set_mux(tcon, 1, encoder); - /* Enable and map the DAC to the output */ regmap_update_bits(tv->regs, SUN4I_TVE_EN_REG, SUN4I_TVE_EN_DAC_MAP_MASK, diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig index dc58ab140151..cf54847a8bd1 100644 --- a/drivers/gpu/drm/tegra/Kconfig +++ b/drivers/gpu/drm/tegra/Kconfig @@ -9,6 +9,7 @@ config DRM_TEGRA select DRM_PANEL select TEGRA_HOST1X select IOMMU_IOVA if IOMMU_SUPPORT + select CEC_CORE if CEC_NOTIFIER help Choose this option if you have an NVIDIA Tegra SoC. diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index 4df39112e38e..24a5ef4f5bb8 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c @@ -10,6 +10,7 @@ #include <linux/clk.h> #include <linux/debugfs.h> #include <linux/iommu.h> +#include <linux/of_device.h> #include <linux/pm_runtime.h> #include <linux/reset.h> @@ -23,16 +24,6 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_plane_helper.h> -struct tegra_dc_soc_info { - bool supports_border_color; - bool supports_interlacing; - bool supports_cursor; - bool supports_block_linear; - unsigned int pitch_align; - bool has_powergate; - bool broken_reset; -}; - struct tegra_plane { struct drm_plane base; unsigned int index; @@ -559,14 +550,21 @@ static int tegra_plane_atomic_check(struct drm_plane *plane, return 0; } -static void tegra_dc_disable_window(struct tegra_dc *dc, int index) +static void tegra_plane_atomic_disable(struct drm_plane *plane, + struct drm_plane_state *old_state) { + struct tegra_dc *dc = to_tegra_dc(old_state->crtc); + struct tegra_plane *p = to_tegra_plane(plane); unsigned long flags; u32 value; + /* rien ne va plus */ + if (!old_state || !old_state->crtc) + return; + spin_lock_irqsave(&dc->lock, flags); - value = WINDOW_A_SELECT << index; + value = WINDOW_A_SELECT << p->index; tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER); value = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS); @@ -591,7 +589,7 @@ static void tegra_plane_atomic_update(struct drm_plane *plane, return; if (!plane->state->visible) - return tegra_dc_disable_window(dc, p->index); + return tegra_plane_atomic_disable(plane, old_state); memset(&window, 0, sizeof(window)); window.src.x = plane->state->src.x1 >> 16; @@ -627,25 +625,10 @@ static void tegra_plane_atomic_update(struct drm_plane *plane, tegra_dc_setup_window(dc, p->index, &window); } -static void tegra_plane_atomic_disable(struct drm_plane *plane, - struct drm_plane_state *old_state) -{ - struct tegra_plane *p = to_tegra_plane(plane); - struct tegra_dc *dc; - - /* rien ne va plus */ - if (!old_state || !old_state->crtc) - return; - - dc = to_tegra_dc(old_state->crtc); - - tegra_dc_disable_window(dc, p->index); -} - -static const struct drm_plane_helper_funcs tegra_primary_plane_helper_funcs = { +static const struct drm_plane_helper_funcs tegra_plane_helper_funcs = { .atomic_check = tegra_plane_atomic_check, - .atomic_update = tegra_plane_atomic_update, .atomic_disable = tegra_plane_atomic_disable, + .atomic_update = tegra_plane_atomic_update, }; static struct drm_plane *tegra_dc_primary_plane_create(struct drm_device *drm, @@ -685,7 +668,7 @@ static struct drm_plane *tegra_dc_primary_plane_create(struct drm_device *drm, return ERR_PTR(err); } - drm_plane_helper_add(&plane->base, &tegra_primary_plane_helper_funcs); + drm_plane_helper_add(&plane->base, &tegra_plane_helper_funcs); return &plane->base; } @@ -880,12 +863,6 @@ static const uint32_t tegra_overlay_plane_formats[] = { DRM_FORMAT_YUV422, }; -static const struct drm_plane_helper_funcs tegra_overlay_plane_helper_funcs = { - .atomic_check = tegra_plane_atomic_check, - .atomic_update = tegra_plane_atomic_update, - .atomic_disable = tegra_plane_atomic_disable, -}; - static struct drm_plane *tegra_dc_overlay_plane_create(struct drm_device *drm, struct tegra_dc *dc, unsigned int index) @@ -913,7 +890,7 @@ static struct drm_plane *tegra_dc_overlay_plane_create(struct drm_device *drm, return ERR_PTR(err); } - drm_plane_helper_add(&plane->base, &tegra_overlay_plane_helper_funcs); + drm_plane_helper_add(&plane->base, &tegra_plane_helper_funcs); return &plane->base; } @@ -1161,6 +1138,11 @@ static void tegra_dc_commit_state(struct tegra_dc *dc, value = SHIFT_CLK_DIVIDER(state->div) | PIXEL_CLK_DIVIDER_PCD1; tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL); + + err = clk_set_rate(dc->clk, state->pclk); + if (err < 0) + dev_err(dc->dev, "failed to set clock %pC to %lu Hz: %d\n", + dc->clk, state->pclk, err); } static void tegra_dc_stop(struct tegra_dc *dc) @@ -1756,7 +1738,7 @@ static int tegra_dc_init(struct host1x_client *client) struct drm_plane *cursor = NULL; int err; - dc->syncpt = host1x_syncpt_request(dc->dev, flags); + dc->syncpt = host1x_syncpt_request(client, flags); if (!dc->syncpt) dev_warn(dc->dev, "failed to allocate syncpoint\n"); @@ -1985,7 +1967,6 @@ static int tegra_dc_parse_dt(struct tegra_dc *dc) static int tegra_dc_probe(struct platform_device *pdev) { - const struct of_device_id *id; struct resource *regs; struct tegra_dc *dc; int err; @@ -1994,14 +1975,11 @@ static int tegra_dc_probe(struct platform_device *pdev) if (!dc) return -ENOMEM; - id = of_match_node(tegra_dc_of_match, pdev->dev.of_node); - if (!id) - return -ENODEV; + dc->soc = of_device_get_match_data(&pdev->dev); spin_lock_init(&dc->lock); INIT_LIST_HEAD(&dc->list); dc->dev = &pdev->dev; - dc->soc = id->data; err = tegra_dc_parse_dt(dc); if (err < 0) @@ -2019,8 +1997,22 @@ static int tegra_dc_probe(struct platform_device *pdev) return PTR_ERR(dc->rst); } - if (!dc->soc->broken_reset) - reset_control_assert(dc->rst); + /* assert reset and disable clock */ + if (!dc->soc->broken_reset) { + err = clk_prepare_enable(dc->clk); + if (err < 0) + return err; + + usleep_range(2000, 4000); + + err = reset_control_assert(dc->rst); + if (err < 0) + return err; + + usleep_range(2000, 4000); + + clk_disable_unprepare(dc->clk); + } if (dc->soc->has_powergate) { if (dc->pipe == 0) diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h index 4a268635749b..cb100b6e3282 100644 --- a/drivers/gpu/drm/tegra/dc.h +++ b/drivers/gpu/drm/tegra/dc.h @@ -10,6 +10,126 @@ #ifndef TEGRA_DC_H #define TEGRA_DC_H 1 +#include <linux/host1x.h> + +#include <drm/drm_crtc.h> + +#include "drm.h" + +struct tegra_output; + +struct tegra_dc_stats { + unsigned long frames; + unsigned long vblank; + unsigned long underflow; + unsigned long overflow; +}; + +struct tegra_dc_soc_info { + bool supports_border_color; + bool supports_interlacing; + bool supports_cursor; + bool supports_block_linear; + unsigned int pitch_align; + bool has_powergate; + bool broken_reset; +}; + +struct tegra_dc { + struct host1x_client client; + struct host1x_syncpt *syncpt; + struct device *dev; + spinlock_t lock; + + struct drm_crtc base; + unsigned int powergate; + int pipe; + + struct clk *clk; + struct reset_control *rst; + void __iomem *regs; + int irq; + + struct tegra_output *rgb; + + struct tegra_dc_stats stats; + struct list_head list; + + struct drm_info_list *debugfs_files; + struct drm_minor *minor; + struct dentry *debugfs; + + /* page-flip handling */ + struct drm_pending_vblank_event *event; + + const struct tegra_dc_soc_info *soc; + + struct iommu_domain *domain; +}; + +static inline struct tegra_dc * +host1x_client_to_dc(struct host1x_client *client) +{ + return container_of(client, struct tegra_dc, client); +} + +static inline struct tegra_dc *to_tegra_dc(struct drm_crtc *crtc) +{ + return crtc ? container_of(crtc, struct tegra_dc, base) : NULL; +} + +static inline void tegra_dc_writel(struct tegra_dc *dc, u32 value, + unsigned int offset) +{ + trace_dc_writel(dc->dev, offset, value); + writel(value, dc->regs + (offset << 2)); +} + +static inline u32 tegra_dc_readl(struct tegra_dc *dc, unsigned int offset) +{ + u32 value = readl(dc->regs + (offset << 2)); + + trace_dc_readl(dc->dev, offset, value); + + return value; +} + +struct tegra_dc_window { + struct { + unsigned int x; + unsigned int y; + unsigned int w; + unsigned int h; + } src; + struct { + unsigned int x; + unsigned int y; + unsigned int w; + unsigned int h; + } dst; + unsigned int bits_per_pixel; + unsigned int stride[2]; + unsigned long base[3]; + bool bottom_up; + + struct tegra_bo_tiling tiling; + u32 format; + u32 swap; +}; + +/* from dc.c */ +void tegra_dc_commit(struct tegra_dc *dc); +int tegra_dc_state_setup_clock(struct tegra_dc *dc, + struct drm_crtc_state *crtc_state, + struct clk *clk, unsigned long pclk, + unsigned int div); + +/* from rgb.c */ +int tegra_dc_rgb_probe(struct tegra_dc *dc); +int tegra_dc_rgb_remove(struct tegra_dc *dc); +int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc); +int tegra_dc_rgb_exit(struct tegra_dc *dc); + #define DC_CMD_GENERAL_INCR_SYNCPT 0x000 #define DC_CMD_GENERAL_INCR_SYNCPT_CNTRL 0x001 #define SYNCPT_CNTRL_NO_STALL (1 << 8) diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index 597d563d636a..943bdf88c4a2 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -386,12 +386,10 @@ int tegra_drm_submit(struct tegra_drm_context *context, unsigned int num_cmdbufs = args->num_cmdbufs; unsigned int num_relocs = args->num_relocs; unsigned int num_waitchks = args->num_waitchks; - struct drm_tegra_cmdbuf __user *cmdbufs = - (void __user *)(uintptr_t)args->cmdbufs; - struct drm_tegra_reloc __user *relocs = - (void __user *)(uintptr_t)args->relocs; - struct drm_tegra_waitchk __user *waitchks = - (void __user *)(uintptr_t)args->waitchks; + struct drm_tegra_cmdbuf __user *user_cmdbufs; + struct drm_tegra_reloc __user *user_relocs; + struct drm_tegra_waitchk __user *user_waitchks; + struct drm_tegra_syncpt __user *user_syncpt; struct drm_tegra_syncpt syncpt; struct host1x *host1x = dev_get_drvdata(drm->dev->parent); struct drm_gem_object **refs; @@ -400,6 +398,11 @@ int tegra_drm_submit(struct tegra_drm_context *context, unsigned int num_refs; int err; + user_cmdbufs = u64_to_user_ptr(args->cmdbufs); + user_relocs = u64_to_user_ptr(args->relocs); + user_waitchks = u64_to_user_ptr(args->waitchks); + user_syncpt = u64_to_user_ptr(args->syncpts); + /* We don't yet support other than one syncpt_incr struct per submit */ if (args->num_syncpts != 1) return -EINVAL; @@ -440,7 +443,7 @@ int tegra_drm_submit(struct tegra_drm_context *context, struct tegra_bo *obj; u64 offset; - if (copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf))) { + if (copy_from_user(&cmdbuf, user_cmdbufs, sizeof(cmdbuf))) { err = -EFAULT; goto fail; } @@ -476,7 +479,7 @@ int tegra_drm_submit(struct tegra_drm_context *context, host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset); num_cmdbufs--; - cmdbufs++; + user_cmdbufs++; } /* copy and resolve relocations from submit */ @@ -485,7 +488,7 @@ int tegra_drm_submit(struct tegra_drm_context *context, struct tegra_bo *obj; err = host1x_reloc_copy_from_user(&job->relocarray[num_relocs], - &relocs[num_relocs], drm, + &user_relocs[num_relocs], drm, file); if (err < 0) goto fail; @@ -519,9 +522,8 @@ int tegra_drm_submit(struct tegra_drm_context *context, struct host1x_waitchk *wait = &job->waitchk[num_waitchks]; struct tegra_bo *obj; - err = host1x_waitchk_copy_from_user(wait, - &waitchks[num_waitchks], - file); + err = host1x_waitchk_copy_from_user( + wait, &user_waitchks[num_waitchks], file); if (err < 0) goto fail; @@ -539,8 +541,7 @@ int tegra_drm_submit(struct tegra_drm_context *context, } } - if (copy_from_user(&syncpt, (void __user *)(uintptr_t)args->syncpts, - sizeof(syncpt))) { + if (copy_from_user(&syncpt, user_syncpt, sizeof(syncpt))) { err = -EFAULT; goto fail; } @@ -1317,6 +1318,7 @@ static const struct of_device_id host1x_drm_subdevs[] = { { .compatible = "nvidia,tegra210-sor", }, { .compatible = "nvidia,tegra210-sor1", }, { .compatible = "nvidia,tegra210-vic", }, + { .compatible = "nvidia,tegra186-vic", }, { /* sentinel */ } }; diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h index 063f5d397526..ddae331ad8b6 100644 --- a/drivers/gpu/drm/tegra/drm.h +++ b/drivers/gpu/drm/tegra/drm.h @@ -119,104 +119,7 @@ void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size, dma_addr_t *iova); void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt, dma_addr_t iova); -struct tegra_dc_soc_info; -struct tegra_output; - -struct tegra_dc_stats { - unsigned long frames; - unsigned long vblank; - unsigned long underflow; - unsigned long overflow; -}; - -struct tegra_dc { - struct host1x_client client; - struct host1x_syncpt *syncpt; - struct device *dev; - spinlock_t lock; - - struct drm_crtc base; - unsigned int powergate; - int pipe; - - struct clk *clk; - struct reset_control *rst; - void __iomem *regs; - int irq; - - struct tegra_output *rgb; - - struct tegra_dc_stats stats; - struct list_head list; - - struct drm_info_list *debugfs_files; - struct drm_minor *minor; - struct dentry *debugfs; - - /* page-flip handling */ - struct drm_pending_vblank_event *event; - - const struct tegra_dc_soc_info *soc; - - struct iommu_domain *domain; -}; - -static inline struct tegra_dc * -host1x_client_to_dc(struct host1x_client *client) -{ - return container_of(client, struct tegra_dc, client); -} - -static inline struct tegra_dc *to_tegra_dc(struct drm_crtc *crtc) -{ - return crtc ? container_of(crtc, struct tegra_dc, base) : NULL; -} - -static inline void tegra_dc_writel(struct tegra_dc *dc, u32 value, - unsigned int offset) -{ - trace_dc_writel(dc->dev, offset, value); - writel(value, dc->regs + (offset << 2)); -} - -static inline u32 tegra_dc_readl(struct tegra_dc *dc, unsigned int offset) -{ - u32 value = readl(dc->regs + (offset << 2)); - - trace_dc_readl(dc->dev, offset, value); - - return value; -} - -struct tegra_dc_window { - struct { - unsigned int x; - unsigned int y; - unsigned int w; - unsigned int h; - } src; - struct { - unsigned int x; - unsigned int y; - unsigned int w; - unsigned int h; - } dst; - unsigned int bits_per_pixel; - unsigned int stride[2]; - unsigned long base[3]; - bool bottom_up; - - struct tegra_bo_tiling tiling; - u32 format; - u32 swap; -}; - -/* from dc.c */ -void tegra_dc_commit(struct tegra_dc *dc); -int tegra_dc_state_setup_clock(struct tegra_dc *dc, - struct drm_crtc_state *crtc_state, - struct clk *clk, unsigned long pclk, - unsigned int div); +struct cec_notifier; struct tegra_output { struct device_node *of_node; @@ -225,6 +128,7 @@ struct tegra_output { struct drm_panel *panel; struct i2c_adapter *ddc; const struct edid *edid; + struct cec_notifier *notifier; unsigned int hpd_irq; int hpd_gpio; enum of_gpio_flags hpd_gpio_flags; @@ -243,12 +147,6 @@ static inline struct tegra_output *connector_to_output(struct drm_connector *c) return container_of(c, struct tegra_output, connector); } -/* from rgb.c */ -int tegra_dc_rgb_probe(struct tegra_dc *dc); -int tegra_dc_rgb_remove(struct tegra_dc *dc); -int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc); -int tegra_dc_rgb_exit(struct tegra_dc *dc); - /* from output.c */ int tegra_output_probe(struct tegra_output *output); void tegra_output_remove(struct tegra_output *output); diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c index 6ea070da7718..9a8ea93016a9 100644 --- a/drivers/gpu/drm/tegra/gr2d.c +++ b/drivers/gpu/drm/tegra/gr2d.c @@ -36,7 +36,7 @@ static int gr2d_init(struct host1x_client *client) if (!gr2d->channel) return -ENOMEM; - client->syncpts[0] = host1x_syncpt_request(client->dev, flags); + client->syncpts[0] = host1x_syncpt_request(client, flags); if (!client->syncpts[0]) { host1x_channel_put(gr2d->channel); return -ENOMEM; diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c index cee2ab645cde..28c4ef63065b 100644 --- a/drivers/gpu/drm/tegra/gr3d.c +++ b/drivers/gpu/drm/tegra/gr3d.c @@ -46,7 +46,7 @@ static int gr3d_init(struct host1x_client *client) if (!gr3d->channel) return -ENOMEM; - client->syncpts[0] = host1x_syncpt_request(client->dev, flags); + client->syncpts[0] = host1x_syncpt_request(client, flags); if (!client->syncpts[0]) { host1x_channel_put(gr3d->channel); return -ENOMEM; diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c index 5b9d83b71943..6434b3d3d1ba 100644 --- a/drivers/gpu/drm/tegra/hdmi.c +++ b/drivers/gpu/drm/tegra/hdmi.c @@ -11,6 +11,7 @@ #include <linux/debugfs.h> #include <linux/gpio.h> #include <linux/hdmi.h> +#include <linux/of_device.h> #include <linux/pm_runtime.h> #include <linux/regulator/consumer.h> #include <linux/reset.h> @@ -21,6 +22,8 @@ #include <sound/hda_verbs.h> +#include <media/cec-notifier.h> + #include "hdmi.h" #include "drm.h" #include "dc.h" @@ -1663,20 +1666,15 @@ static irqreturn_t tegra_hdmi_irq(int irq, void *data) static int tegra_hdmi_probe(struct platform_device *pdev) { - const struct of_device_id *match; struct tegra_hdmi *hdmi; struct resource *regs; int err; - match = of_match_node(tegra_hdmi_of_match, pdev->dev.of_node); - if (!match) - return -ENODEV; - hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL); if (!hdmi) return -ENOMEM; - hdmi->config = match->data; + hdmi->config = of_device_get_match_data(&pdev->dev); hdmi->dev = &pdev->dev; hdmi->audio_source = AUTO; @@ -1725,6 +1723,10 @@ static int tegra_hdmi_probe(struct platform_device *pdev) return PTR_ERR(hdmi->vdd); } + hdmi->output.notifier = cec_notifier_get(&pdev->dev); + if (hdmi->output.notifier == NULL) + return -ENOMEM; + hdmi->output.dev = &pdev->dev; err = tegra_output_probe(&hdmi->output); @@ -1783,6 +1785,9 @@ static int tegra_hdmi_remove(struct platform_device *pdev) tegra_output_remove(&hdmi->output); + if (hdmi->output.notifier) + cec_notifier_put(hdmi->output.notifier); + return 0; } diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c index 595d1ec3e02e..1cfbacea8113 100644 --- a/drivers/gpu/drm/tegra/output.c +++ b/drivers/gpu/drm/tegra/output.c @@ -11,6 +11,8 @@ #include <drm/drm_panel.h> #include "drm.h" +#include <media/cec-notifier.h> + int tegra_output_connector_get_modes(struct drm_connector *connector) { struct tegra_output *output = connector_to_output(connector); @@ -32,6 +34,7 @@ int tegra_output_connector_get_modes(struct drm_connector *connector) else if (output->ddc) edid = drm_get_edid(connector, output->ddc); + cec_notifier_set_phys_addr_from_edid(output->notifier, edid); drm_mode_connector_update_edid_property(connector, edid); if (edid) { @@ -68,6 +71,9 @@ tegra_output_connector_detect(struct drm_connector *connector, bool force) status = connector_status_connected; } + if (status != connector_status_connected) + cec_notifier_phys_addr_invalidate(output->notifier); + return status; } diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c index 7ab1d1dc7cd7..4bcacd3f4861 100644 --- a/drivers/gpu/drm/tegra/sor.c +++ b/drivers/gpu/drm/tegra/sor.c @@ -2536,20 +2536,17 @@ MODULE_DEVICE_TABLE(of, tegra_sor_of_match); static int tegra_sor_probe(struct platform_device *pdev) { - const struct of_device_id *match; struct device_node *np; struct tegra_sor *sor; struct resource *regs; int err; - match = of_match_device(tegra_sor_of_match, &pdev->dev); - sor = devm_kzalloc(&pdev->dev, sizeof(*sor), GFP_KERNEL); if (!sor) return -ENOMEM; + sor->soc = of_device_get_match_data(&pdev->dev); sor->output.dev = sor->dev = &pdev->dev; - sor->soc = match->data; sor->settings = devm_kmemdup(&pdev->dev, sor->soc->settings, sor->soc->num_settings * diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c index 2448229fa653..18024183aa2b 100644 --- a/drivers/gpu/drm/tegra/vic.c +++ b/drivers/gpu/drm/tegra/vic.c @@ -167,7 +167,7 @@ static int vic_init(struct host1x_client *client) goto detach_device; } - client->syncpts[0] = host1x_syncpt_request(client->dev, 0); + client->syncpts[0] = host1x_syncpt_request(client, 0); if (!client->syncpts[0]) { err = -ENOMEM; goto free_channel; @@ -270,29 +270,33 @@ static const struct vic_config vic_t210_config = { .firmware = NVIDIA_TEGRA_210_VIC_FIRMWARE, }; +#define NVIDIA_TEGRA_186_VIC_FIRMWARE "nvidia/tegra186/vic04_ucode.bin" + +static const struct vic_config vic_t186_config = { + .firmware = NVIDIA_TEGRA_186_VIC_FIRMWARE, +}; + static const struct of_device_id vic_match[] = { { .compatible = "nvidia,tegra124-vic", .data = &vic_t124_config }, { .compatible = "nvidia,tegra210-vic", .data = &vic_t210_config }, + { .compatible = "nvidia,tegra186-vic", .data = &vic_t186_config }, { }, }; static int vic_probe(struct platform_device *pdev) { - struct vic_config *vic_config = NULL; struct device *dev = &pdev->dev; struct host1x_syncpt **syncpts; struct resource *regs; - const struct of_device_id *match; struct vic *vic; int err; - match = of_match_device(vic_match, dev); - vic_config = (struct vic_config *)match->data; - vic = devm_kzalloc(dev, sizeof(*vic), GFP_KERNEL); if (!vic) return -ENOMEM; + vic->config = of_device_get_match_data(dev); + syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL); if (!syncpts) return -ENOMEM; @@ -321,7 +325,7 @@ static int vic_probe(struct platform_device *pdev) if (err < 0) return err; - err = falcon_read_firmware(&vic->falcon, vic_config->firmware); + err = falcon_read_firmware(&vic->falcon, vic->config->firmware); if (err < 0) goto exit_falcon; @@ -334,7 +338,6 @@ static int vic_probe(struct platform_device *pdev) vic->client.base.syncpts = syncpts; vic->client.base.num_syncpts = 1; vic->dev = dev; - vic->config = vic_config; INIT_LIST_HEAD(&vic->client.list); vic->client.ops = &vic_ops; @@ -405,3 +408,6 @@ MODULE_FIRMWARE(NVIDIA_TEGRA_124_VIC_FIRMWARE); #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) MODULE_FIRMWARE(NVIDIA_TEGRA_210_VIC_FIRMWARE); #endif +#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) +MODULE_FIRMWARE(NVIDIA_TEGRA_186_VIC_FIRMWARE); +#endif diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c index 091ca81658eb..c3dc1fd20cb4 100644 --- a/drivers/gpu/drm/udl/udl_connector.c +++ b/drivers/gpu/drm/udl/udl_connector.c @@ -14,70 +14,95 @@ #include <drm/drm_crtc.h> #include <drm/drm_edid.h> #include <drm/drm_crtc_helper.h> +#include "udl_connector.h" #include "udl_drv.h" -/* dummy connector to just get EDID, - all UDL appear to have a DVI-D */ - -static u8 *udl_get_edid(struct udl_device *udl) +static bool udl_get_edid_block(struct udl_device *udl, int block_idx, + u8 *buff) { - u8 *block; - char *rbuf; int ret, i; + u8 *read_buff; - block = kmalloc(EDID_LENGTH, GFP_KERNEL); - if (block == NULL) - return NULL; - - rbuf = kmalloc(2, GFP_KERNEL); - if (rbuf == NULL) - goto error; + read_buff = kmalloc(2, GFP_KERNEL); + if (!read_buff) + return false; for (i = 0; i < EDID_LENGTH; i++) { + int bval = (i + block_idx * EDID_LENGTH) << 8; ret = usb_control_msg(udl->udev, - usb_rcvctrlpipe(udl->udev, 0), (0x02), - (0x80 | (0x02 << 5)), i << 8, 0xA1, rbuf, 2, - HZ); + usb_rcvctrlpipe(udl->udev, 0), + (0x02), (0x80 | (0x02 << 5)), bval, + 0xA1, read_buff, 2, HZ); if (ret < 1) { DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret); - goto error; + kfree(read_buff); + return false; } - block[i] = rbuf[1]; + buff[i] = read_buff[1]; } - kfree(rbuf); - return block; - -error: - kfree(block); - kfree(rbuf); - return NULL; + kfree(read_buff); + return true; } -static int udl_get_modes(struct drm_connector *connector) +static bool udl_get_edid(struct udl_device *udl, u8 **result_buff, + int *result_buff_size) { - struct udl_device *udl = connector->dev->dev_private; - struct edid *edid; - int ret; - - edid = (struct edid *)udl_get_edid(udl); - if (!edid) { - drm_mode_connector_update_edid_property(connector, NULL); - return 0; + int i, extensions; + u8 *block_buff = NULL, *buff_ptr; + + block_buff = kmalloc(EDID_LENGTH, GFP_KERNEL); + if (block_buff == NULL) + return false; + + if (udl_get_edid_block(udl, 0, block_buff) && + memchr_inv(block_buff, 0, EDID_LENGTH)) { + extensions = ((struct edid *)block_buff)->extensions; + if (extensions > 0) { + /* we have to read all extensions one by one */ + *result_buff_size = EDID_LENGTH * (extensions + 1); + *result_buff = kmalloc(*result_buff_size, GFP_KERNEL); + buff_ptr = *result_buff; + if (buff_ptr == NULL) { + kfree(block_buff); + return false; + } + memcpy(buff_ptr, block_buff, EDID_LENGTH); + kfree(block_buff); + buff_ptr += EDID_LENGTH; + for (i = 1; i < extensions; ++i) { + if (udl_get_edid_block(udl, i, buff_ptr)) { + buff_ptr += EDID_LENGTH; + } else { + kfree(*result_buff); + *result_buff = NULL; + return false; + } + } + return true; + } + /* we have only base edid block */ + *result_buff = block_buff; + *result_buff_size = EDID_LENGTH; + return true; } - /* - * We only read the main block, but if the monitor reports extension - * blocks then the drm edid code expects them to be present, so patch - * the extension count to 0. - */ - edid->checksum += edid->extensions; - edid->extensions = 0; - - drm_mode_connector_update_edid_property(connector, edid); - ret = drm_add_edid_modes(connector, edid); - kfree(edid); - return ret; + kfree(block_buff); + + return false; +} + +static int udl_get_modes(struct drm_connector *connector) +{ + struct udl_drm_connector *udl_connector = + container_of(connector, + struct udl_drm_connector, + connector); + + drm_mode_connector_update_edid_property(connector, udl_connector->edid); + if (udl_connector->edid) + return drm_add_edid_modes(connector, udl_connector->edid); + return 0; } static int udl_mode_valid(struct drm_connector *connector, @@ -96,8 +121,26 @@ static int udl_mode_valid(struct drm_connector *connector, static enum drm_connector_status udl_detect(struct drm_connector *connector, bool force) { - if (drm_dev_is_unplugged(connector->dev)) + u8 *edid_buff = NULL; + int edid_buff_size = 0; + struct udl_device *udl = connector->dev->dev_private; + struct udl_drm_connector *udl_connector = + container_of(connector, + struct udl_drm_connector, + connector); + + /* cleanup previous edid */ + if (udl_connector->edid != NULL) { + kfree(udl_connector->edid); + udl_connector->edid = NULL; + } + + + if (!udl_get_edid(udl, &edid_buff, &edid_buff_size)) return connector_status_disconnected; + + udl_connector->edid = (struct edid *)edid_buff; + return connector_status_connected; } @@ -117,8 +160,14 @@ static int udl_connector_set_property(struct drm_connector *connector, static void udl_connector_destroy(struct drm_connector *connector) { + struct udl_drm_connector *udl_connector = + container_of(connector, + struct udl_drm_connector, + connector); + drm_connector_unregister(connector); drm_connector_cleanup(connector); + kfree(udl_connector->edid); kfree(connector); } @@ -138,17 +187,22 @@ static const struct drm_connector_funcs udl_connector_funcs = { int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder) { + struct udl_drm_connector *udl_connector; struct drm_connector *connector; - connector = kzalloc(sizeof(struct drm_connector), GFP_KERNEL); - if (!connector) + udl_connector = kzalloc(sizeof(struct udl_drm_connector), GFP_KERNEL); + if (!udl_connector) return -ENOMEM; - drm_connector_init(dev, connector, &udl_connector_funcs, DRM_MODE_CONNECTOR_DVII); + connector = &udl_connector->connector; + drm_connector_init(dev, connector, &udl_connector_funcs, + DRM_MODE_CONNECTOR_DVII); drm_connector_helper_add(connector, &udl_connector_helper_funcs); drm_connector_register(connector); drm_mode_connector_attach_encoder(connector, encoder); + connector->polled = DRM_CONNECTOR_POLL_HPD | + DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; return 0; } diff --git a/drivers/gpu/drm/udl/udl_connector.h b/drivers/gpu/drm/udl/udl_connector.h new file mode 100644 index 000000000000..0fb0db5c4612 --- /dev/null +++ b/drivers/gpu/drm/udl/udl_connector.h @@ -0,0 +1,13 @@ +#ifndef __UDL_CONNECTOR_H__ +#define __UDL_CONNECTOR_H__ + +#include <drm/drm_crtc.h> + +struct udl_drm_connector { + struct drm_connector connector; + /* last udl_detect edid */ + struct edid *edid; +}; + + +#endif //__UDL_CONNECTOR_H__ diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c index 31421b6b586e..3c45a3064726 100644 --- a/drivers/gpu/drm/udl/udl_drv.c +++ b/drivers/gpu/drm/udl/udl_drv.c @@ -14,6 +14,9 @@ static int udl_usb_suspend(struct usb_interface *interface, pm_message_t message) { + struct drm_device *dev = usb_get_intfdata(interface); + + drm_kms_helper_poll_disable(dev); return 0; } @@ -21,6 +24,7 @@ static int udl_usb_resume(struct usb_interface *interface) { struct drm_device *dev = usb_get_intfdata(interface); + drm_kms_helper_poll_enable(dev); udl_modeset_restore(dev); return 0; } diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c index 0328b2c7b210..f1ec4528a73e 100644 --- a/drivers/gpu/drm/udl/udl_main.c +++ b/drivers/gpu/drm/udl/udl_main.c @@ -11,6 +11,7 @@ * more details. */ #include <drm/drmP.h> +#include <drm/drm_crtc_helper.h> #include "udl_drv.h" /* -BULK_SIZE as per usb-skeleton. Can we get full page and avoid overhead? */ @@ -350,6 +351,8 @@ int udl_driver_load(struct drm_device *dev, unsigned long flags) if (ret) goto err_fb; + drm_kms_helper_poll_init(dev); + return 0; err_fb: udl_fbdev_cleanup(dev); @@ -371,6 +374,8 @@ void udl_driver_unload(struct drm_device *dev) { struct udl_device *udl = dev->dev_private; + drm_kms_helper_poll_fini(dev); + if (udl->urbs.count) udl_free_urb_list(dev); diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c index 3afdbf4bc10b..01a53ba304f8 100644 --- a/drivers/gpu/drm/vc4/vc4_bo.c +++ b/drivers/gpu/drm/vc4/vc4_bo.c @@ -53,6 +53,17 @@ static void vc4_bo_stats_dump(struct vc4_dev *vc4) vc4->bo_labels[i].size_allocated / 1024, vc4->bo_labels[i].num_allocated); } + + mutex_lock(&vc4->purgeable.lock); + if (vc4->purgeable.num) + DRM_INFO("%30s: %6zdkb BOs (%d)\n", "userspace BO cache", + vc4->purgeable.size / 1024, vc4->purgeable.num); + + if (vc4->purgeable.purged_num) + DRM_INFO("%30s: %6zdkb BOs (%d)\n", "total purged BO", + vc4->purgeable.purged_size / 1024, + vc4->purgeable.purged_num); + mutex_unlock(&vc4->purgeable.lock); } #ifdef CONFIG_DEBUG_FS @@ -75,6 +86,17 @@ int vc4_bo_stats_debugfs(struct seq_file *m, void *unused) } mutex_unlock(&vc4->bo_lock); + mutex_lock(&vc4->purgeable.lock); + if (vc4->purgeable.num) + seq_printf(m, "%30s: %6dkb BOs (%d)\n", "userspace BO cache", + vc4->purgeable.size / 1024, vc4->purgeable.num); + + if (vc4->purgeable.purged_num) + seq_printf(m, "%30s: %6dkb BOs (%d)\n", "total purged BO", + vc4->purgeable.purged_size / 1024, + vc4->purgeable.purged_num); + mutex_unlock(&vc4->purgeable.lock); + return 0; } #endif @@ -247,6 +269,109 @@ static void vc4_bo_cache_purge(struct drm_device *dev) mutex_unlock(&vc4->bo_lock); } +void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo) +{ + struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev); + + mutex_lock(&vc4->purgeable.lock); + list_add_tail(&bo->size_head, &vc4->purgeable.list); + vc4->purgeable.num++; + vc4->purgeable.size += bo->base.base.size; + mutex_unlock(&vc4->purgeable.lock); +} + +static void vc4_bo_remove_from_purgeable_pool_locked(struct vc4_bo *bo) +{ + struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev); + + /* list_del_init() is used here because the caller might release + * the purgeable lock in order to acquire the madv one and update the + * madv status. + * During this short period of time a user might decide to mark + * the BO as unpurgeable, and if bo->madv is set to + * VC4_MADV_DONTNEED it will try to remove the BO from the + * purgeable list which will fail if the ->next/prev fields + * are set to LIST_POISON1/LIST_POISON2 (which is what + * list_del() does). + * Re-initializing the list element guarantees that list_del() + * will work correctly even if it's a NOP. + */ + list_del_init(&bo->size_head); + vc4->purgeable.num--; + vc4->purgeable.size -= bo->base.base.size; +} + +void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo) +{ + struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev); + + mutex_lock(&vc4->purgeable.lock); + vc4_bo_remove_from_purgeable_pool_locked(bo); + mutex_unlock(&vc4->purgeable.lock); +} + +static void vc4_bo_purge(struct drm_gem_object *obj) +{ + struct vc4_bo *bo = to_vc4_bo(obj); + struct drm_device *dev = obj->dev; + + WARN_ON(!mutex_is_locked(&bo->madv_lock)); + WARN_ON(bo->madv != VC4_MADV_DONTNEED); + + drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); + + dma_free_wc(dev->dev, obj->size, bo->base.vaddr, bo->base.paddr); + bo->base.vaddr = NULL; + bo->madv = __VC4_MADV_PURGED; +} + +static void vc4_bo_userspace_cache_purge(struct drm_device *dev) +{ + struct vc4_dev *vc4 = to_vc4_dev(dev); + + mutex_lock(&vc4->purgeable.lock); + while (!list_empty(&vc4->purgeable.list)) { + struct vc4_bo *bo = list_first_entry(&vc4->purgeable.list, + struct vc4_bo, size_head); + struct drm_gem_object *obj = &bo->base.base; + size_t purged_size = 0; + + vc4_bo_remove_from_purgeable_pool_locked(bo); + + /* Release the purgeable lock while we're purging the BO so + * that other people can continue inserting things in the + * purgeable pool without having to wait for all BOs to be + * purged. + */ + mutex_unlock(&vc4->purgeable.lock); + mutex_lock(&bo->madv_lock); + + /* Since we released the purgeable pool lock before acquiring + * the BO madv one, the user may have marked the BO as WILLNEED + * and re-used it in the meantime. + * Before purging the BO we need to make sure + * - it is still marked as DONTNEED + * - it has not been re-inserted in the purgeable list + * - it is not used by HW blocks + * If one of these conditions is not met, just skip the entry. + */ + if (bo->madv == VC4_MADV_DONTNEED && + list_empty(&bo->size_head) && + !refcount_read(&bo->usecnt)) { + purged_size = bo->base.base.size; + vc4_bo_purge(obj); + } + mutex_unlock(&bo->madv_lock); + mutex_lock(&vc4->purgeable.lock); + + if (purged_size) { + vc4->purgeable.purged_size += purged_size; + vc4->purgeable.purged_num++; + } + } + mutex_unlock(&vc4->purgeable.lock); +} + static struct vc4_bo *vc4_bo_get_from_cache(struct drm_device *dev, uint32_t size, enum vc4_kernel_bo_type type) @@ -293,6 +418,9 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size) if (!bo) return ERR_PTR(-ENOMEM); + bo->madv = VC4_MADV_WILLNEED; + refcount_set(&bo->usecnt, 0); + mutex_init(&bo->madv_lock); mutex_lock(&vc4->bo_lock); bo->label = VC4_BO_TYPE_KERNEL; vc4->bo_labels[VC4_BO_TYPE_KERNEL].num_allocated++; @@ -330,16 +458,38 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size, * CMA allocations we've got laying around and try again. */ vc4_bo_cache_purge(dev); + cma_obj = drm_gem_cma_create(dev, size); + } + if (IS_ERR(cma_obj)) { + /* + * Still not enough CMA memory, purge the userspace BO + * cache and retry. + * This is sub-optimal since we purge the whole userspace + * BO cache which forces user that want to re-use the BO to + * restore its initial content. + * Ideally, we should purge entries one by one and retry + * after each to see if CMA allocation succeeds. Or even + * better, try to find an entry with at least the same + * size. + */ + vc4_bo_userspace_cache_purge(dev); cma_obj = drm_gem_cma_create(dev, size); - if (IS_ERR(cma_obj)) { - DRM_ERROR("Failed to allocate from CMA:\n"); - vc4_bo_stats_dump(vc4); - return ERR_PTR(-ENOMEM); - } + } + + if (IS_ERR(cma_obj)) { + DRM_ERROR("Failed to allocate from CMA:\n"); + vc4_bo_stats_dump(vc4); + return ERR_PTR(-ENOMEM); } bo = to_vc4_bo(&cma_obj->base); + /* By default, BOs do not support the MADV ioctl. This will be enabled + * only on BOs that are exposed to userspace (V3D, V3D_SHADER and DUMB + * BOs). + */ + bo->madv = __VC4_MADV_NOTSUPP; + mutex_lock(&vc4->bo_lock); vc4_bo_set_label(&cma_obj->base, type); mutex_unlock(&vc4->bo_lock); @@ -365,6 +515,8 @@ int vc4_dumb_create(struct drm_file *file_priv, if (IS_ERR(bo)) return PTR_ERR(bo); + bo->madv = VC4_MADV_WILLNEED; + ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); drm_gem_object_put_unlocked(&bo->base.base); @@ -403,6 +555,12 @@ void vc4_free_object(struct drm_gem_object *gem_bo) struct vc4_bo *bo = to_vc4_bo(gem_bo); struct list_head *cache_list; + /* Remove the BO from the purgeable list. */ + mutex_lock(&bo->madv_lock); + if (bo->madv == VC4_MADV_DONTNEED && !refcount_read(&bo->usecnt)) + vc4_bo_remove_from_purgeable_pool(bo); + mutex_unlock(&bo->madv_lock); + mutex_lock(&vc4->bo_lock); /* If the object references someone else's memory, we can't cache it. */ @@ -418,7 +576,8 @@ void vc4_free_object(struct drm_gem_object *gem_bo) } /* If this object was partially constructed but CMA allocation - * had failed, just free it. + * had failed, just free it. Can also happen when the BO has been + * purged. */ if (!bo->base.vaddr) { vc4_bo_destroy(bo); @@ -437,6 +596,10 @@ void vc4_free_object(struct drm_gem_object *gem_bo) bo->validated_shader = NULL; } + /* Reset madv and usecnt before adding the BO to the cache. */ + bo->madv = __VC4_MADV_NOTSUPP; + refcount_set(&bo->usecnt, 0); + bo->t_format = false; bo->free_time = jiffies; list_add(&bo->size_head, cache_list); @@ -461,6 +624,56 @@ static void vc4_bo_cache_time_work(struct work_struct *work) mutex_unlock(&vc4->bo_lock); } +int vc4_bo_inc_usecnt(struct vc4_bo *bo) +{ + int ret; + + /* Fast path: if the BO is already retained by someone, no need to + * check the madv status. + */ + if (refcount_inc_not_zero(&bo->usecnt)) + return 0; + + mutex_lock(&bo->madv_lock); + switch (bo->madv) { + case VC4_MADV_WILLNEED: + refcount_inc(&bo->usecnt); + ret = 0; + break; + case VC4_MADV_DONTNEED: + /* We shouldn't use a BO marked as purgeable if at least + * someone else retained its content by incrementing usecnt. + * Luckily the BO hasn't been purged yet, but something wrong + * is happening here. Just throw an error instead of + * authorizing this use case. + */ + case __VC4_MADV_PURGED: + /* We can't use a purged BO. */ + default: + /* Invalid madv value. */ + ret = -EINVAL; + break; + } + mutex_unlock(&bo->madv_lock); + + return ret; +} + +void vc4_bo_dec_usecnt(struct vc4_bo *bo) +{ + /* Fast path: if the BO is still retained by someone, no need to test + * the madv value. + */ + if (refcount_dec_not_one(&bo->usecnt)) + return; + + mutex_lock(&bo->madv_lock); + if (refcount_dec_and_test(&bo->usecnt) && + bo->madv == VC4_MADV_DONTNEED) + vc4_bo_add_to_purgeable_pool(bo); + mutex_unlock(&bo->madv_lock); +} + static void vc4_bo_cache_time_timer(unsigned long data) { struct drm_device *dev = (struct drm_device *)data; @@ -480,18 +693,52 @@ struct dma_buf * vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags) { struct vc4_bo *bo = to_vc4_bo(obj); + struct dma_buf *dmabuf; + int ret; if (bo->validated_shader) { DRM_DEBUG("Attempting to export shader BO\n"); return ERR_PTR(-EINVAL); } - return drm_gem_prime_export(dev, obj, flags); + /* Note: as soon as the BO is exported it becomes unpurgeable, because + * noone ever decrements the usecnt even if the reference held by the + * exported BO is released. This shouldn't be a problem since we don't + * expect exported BOs to be marked as purgeable. + */ + ret = vc4_bo_inc_usecnt(bo); + if (ret) { + DRM_ERROR("Failed to increment BO usecnt\n"); + return ERR_PTR(ret); + } + + dmabuf = drm_gem_prime_export(dev, obj, flags); + if (IS_ERR(dmabuf)) + vc4_bo_dec_usecnt(bo); + + return dmabuf; +} + +int vc4_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct drm_gem_object *obj = vma->vm_private_data; + struct vc4_bo *bo = to_vc4_bo(obj); + + /* The only reason we would end up here is when user-space accesses + * BO's memory after it's been purged. + */ + mutex_lock(&bo->madv_lock); + WARN_ON(bo->madv != __VC4_MADV_PURGED); + mutex_unlock(&bo->madv_lock); + + return VM_FAULT_SIGBUS; } int vc4_mmap(struct file *filp, struct vm_area_struct *vma) { struct drm_gem_object *gem_obj; + unsigned long vm_pgoff; struct vc4_bo *bo; int ret; @@ -507,16 +754,36 @@ int vc4_mmap(struct file *filp, struct vm_area_struct *vma) return -EINVAL; } + if (bo->madv != VC4_MADV_WILLNEED) { + DRM_DEBUG("mmaping of %s BO not allowed\n", + bo->madv == VC4_MADV_DONTNEED ? + "purgeable" : "purged"); + return -EINVAL; + } + /* * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map * the whole buffer. */ vma->vm_flags &= ~VM_PFNMAP; - vma->vm_pgoff = 0; + /* This ->vm_pgoff dance is needed to make all parties happy: + * - dma_mmap_wc() uses ->vm_pgoff as an offset within the allocated + * mem-region, hence the need to set it to zero (the value set by + * the DRM core is a virtual offset encoding the GEM object-id) + * - the mmap() core logic needs ->vm_pgoff to be restored to its + * initial value before returning from this function because it + * encodes the offset of this GEM in the dev->anon_inode pseudo-file + * and this information will be used when we invalidate userspace + * mappings with drm_vma_node_unmap() (called from vc4_gem_purge()). + */ + vm_pgoff = vma->vm_pgoff; + vma->vm_pgoff = 0; ret = dma_mmap_wc(bo->base.base.dev->dev, vma, bo->base.vaddr, bo->base.paddr, vma->vm_end - vma->vm_start); + vma->vm_pgoff = vm_pgoff; + if (ret) drm_gem_vm_close(vma); @@ -580,6 +847,8 @@ int vc4_create_bo_ioctl(struct drm_device *dev, void *data, if (IS_ERR(bo)) return PTR_ERR(bo); + bo->madv = VC4_MADV_WILLNEED; + ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); drm_gem_object_put_unlocked(&bo->base.base); @@ -633,6 +902,8 @@ vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data, if (IS_ERR(bo)) return PTR_ERR(bo); + bo->madv = VC4_MADV_WILLNEED; + if (copy_from_user(bo->base.vaddr, (void __user *)(uintptr_t)args->data, args->size)) { diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index 1c96edcb302b..e3c29729da2e 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c @@ -100,6 +100,7 @@ static int vc4_get_param_ioctl(struct drm_device *dev, void *data, case DRM_VC4_PARAM_SUPPORTS_ETC1: case DRM_VC4_PARAM_SUPPORTS_THREADED_FS: case DRM_VC4_PARAM_SUPPORTS_FIXED_RCL_ORDER: + case DRM_VC4_PARAM_SUPPORTS_MADVISE: args->value = true; break; default: @@ -117,6 +118,12 @@ static void vc4_lastclose(struct drm_device *dev) drm_fbdev_cma_restore_mode(vc4->fbdev); } +static const struct vm_operations_struct vc4_vm_ops = { + .fault = vc4_fault, + .open = drm_gem_vm_open, + .close = drm_gem_vm_close, +}; + static const struct file_operations vc4_drm_fops = { .owner = THIS_MODULE, .open = drm_open, @@ -142,6 +149,7 @@ static const struct drm_ioctl_desc vc4_drm_ioctls[] = { DRM_IOCTL_DEF_DRV(VC4_SET_TILING, vc4_set_tiling_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(VC4_GET_TILING, vc4_get_tiling_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(VC4_LABEL_BO, vc4_label_bo_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VC4_GEM_MADVISE, vc4_gem_madvise_ioctl, DRM_RENDER_ALLOW), }; static struct drm_driver vc4_drm_driver = { @@ -166,7 +174,7 @@ static struct drm_driver vc4_drm_driver = { .gem_create_object = vc4_create_object, .gem_free_object_unlocked = vc4_free_object, - .gem_vm_ops = &drm_gem_cma_vm_ops, + .gem_vm_ops = &vc4_vm_ops, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h index 87f2d8e5c134..9c0d380c96f2 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.h +++ b/drivers/gpu/drm/vc4/vc4_drv.h @@ -74,6 +74,19 @@ struct vc4_dev { /* Protects bo_cache and bo_labels. */ struct mutex bo_lock; + /* Purgeable BO pool. All BOs in this pool can have their memory + * reclaimed if the driver is unable to allocate new BOs. We also + * keep stats related to the purge mechanism here. + */ + struct { + struct list_head list; + unsigned int num; + size_t size; + unsigned int purged_num; + size_t purged_size; + struct mutex lock; + } purgeable; + uint64_t dma_fence_context; /* Sequence number for the last job queued in bin_job_list. @@ -192,6 +205,16 @@ struct vc4_bo { * for user-allocated labels. */ int label; + + /* Count the number of active users. This is needed to determine + * whether we can move the BO to the purgeable list or not (when the BO + * is used by the GPU or the display engine we can't purge it). + */ + refcount_t usecnt; + + /* Store purgeable/purged state here */ + u32 madv; + struct mutex madv_lock; }; static inline struct vc4_bo * @@ -503,6 +526,7 @@ int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int vc4_label_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int vc4_fault(struct vm_fault *vmf); int vc4_mmap(struct file *filp, struct vm_area_struct *vma); struct reservation_object *vc4_prime_res_obj(struct drm_gem_object *obj); int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); @@ -513,6 +537,10 @@ void *vc4_prime_vmap(struct drm_gem_object *obj); int vc4_bo_cache_init(struct drm_device *dev); void vc4_bo_cache_destroy(struct drm_device *dev); int vc4_bo_stats_debugfs(struct seq_file *m, void *arg); +int vc4_bo_inc_usecnt(struct vc4_bo *bo); +void vc4_bo_dec_usecnt(struct vc4_bo *bo); +void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo); +void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo); /* vc4_crtc.c */ extern struct platform_driver vc4_crtc_driver; @@ -557,6 +585,8 @@ void vc4_job_handle_completed(struct vc4_dev *vc4); int vc4_queue_seqno_cb(struct drm_device *dev, struct vc4_seqno_cb *cb, uint64_t seqno, void (*func)(struct vc4_seqno_cb *cb)); +int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); /* vc4_hdmi.c */ extern struct platform_driver vc4_hdmi_driver; diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c index 554605af344e..94085f8bcd68 100644 --- a/drivers/gpu/drm/vc4/vc4_dsi.c +++ b/drivers/gpu/drm/vc4/vc4_dsi.c @@ -1360,6 +1360,27 @@ static void dsi_handle_error(struct vc4_dsi *dsi, *ret = IRQ_HANDLED; } +/* + * Initial handler for port 1 where we need the reg_dma workaround. + * The register DMA writes sleep, so we can't do it in the top half. + * Instead we use IRQF_ONESHOT so that the IRQ gets disabled in the + * parent interrupt contrller until our interrupt thread is done. + */ +static irqreturn_t vc4_dsi_irq_defer_to_thread_handler(int irq, void *data) +{ + struct vc4_dsi *dsi = data; + u32 stat = DSI_PORT_READ(INT_STAT); + + if (!stat) + return IRQ_NONE; + + return IRQ_WAKE_THREAD; +} + +/* + * Normal IRQ handler for port 0, or the threaded IRQ handler for port + * 1 where we need the reg_dma workaround. + */ static irqreturn_t vc4_dsi_irq_handler(int irq, void *data) { struct vc4_dsi *dsi = data; @@ -1539,8 +1560,15 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data) /* Clear any existing interrupt state. */ DSI_PORT_WRITE(INT_STAT, DSI_PORT_READ(INT_STAT)); - ret = devm_request_irq(dev, platform_get_irq(pdev, 0), - vc4_dsi_irq_handler, 0, "vc4 dsi", dsi); + if (dsi->reg_dma_mem) + ret = devm_request_threaded_irq(dev, platform_get_irq(pdev, 0), + vc4_dsi_irq_defer_to_thread_handler, + vc4_dsi_irq_handler, + IRQF_ONESHOT, + "vc4 dsi", dsi); + else + ret = devm_request_irq(dev, platform_get_irq(pdev, 0), + vc4_dsi_irq_handler, 0, "vc4 dsi", dsi); if (ret) { if (ret != -EPROBE_DEFER) dev_err(dev, "Failed to get interrupt: %d\n", ret); diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c index d0c6bfb68c4e..e00ac2f3a264 100644 --- a/drivers/gpu/drm/vc4/vc4_gem.c +++ b/drivers/gpu/drm/vc4/vc4_gem.c @@ -188,11 +188,22 @@ vc4_save_hang_state(struct drm_device *dev) continue; for (j = 0; j < exec[i]->bo_count; j++) { + bo = to_vc4_bo(&exec[i]->bo[j]->base); + + /* Retain BOs just in case they were marked purgeable. + * This prevents the BO from being purged before + * someone had a chance to dump the hang state. + */ + WARN_ON(!refcount_read(&bo->usecnt)); + refcount_inc(&bo->usecnt); drm_gem_object_get(&exec[i]->bo[j]->base); kernel_state->bo[j + prev_idx] = &exec[i]->bo[j]->base; } list_for_each_entry(bo, &exec[i]->unref_list, unref_head) { + /* No need to retain BOs coming from the ->unref_list + * because they are naturally unpurgeable. + */ drm_gem_object_get(&bo->base.base); kernel_state->bo[j + prev_idx] = &bo->base.base; j++; @@ -233,6 +244,26 @@ vc4_save_hang_state(struct drm_device *dev) state->fdbgs = V3D_READ(V3D_FDBGS); state->errstat = V3D_READ(V3D_ERRSTAT); + /* We need to turn purgeable BOs into unpurgeable ones so that + * userspace has a chance to dump the hang state before the kernel + * decides to purge those BOs. + * Note that BO consistency at dump time cannot be guaranteed. For + * example, if the owner of these BOs decides to re-use them or mark + * them purgeable again there's nothing we can do to prevent it. + */ + for (i = 0; i < kernel_state->user_state.bo_count; i++) { + struct vc4_bo *bo = to_vc4_bo(kernel_state->bo[i]); + + if (bo->madv == __VC4_MADV_NOTSUPP) + continue; + + mutex_lock(&bo->madv_lock); + if (!WARN_ON(bo->madv == __VC4_MADV_PURGED)) + bo->madv = VC4_MADV_WILLNEED; + refcount_dec(&bo->usecnt); + mutex_unlock(&bo->madv_lock); + } + spin_lock_irqsave(&vc4->job_lock, irqflags); if (vc4->hang_state) { spin_unlock_irqrestore(&vc4->job_lock, irqflags); @@ -639,9 +670,6 @@ vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec, * The command validator needs to reference BOs by their index within * the submitted job's BO list. This does the validation of the job's * BO list and reference counting for the lifetime of the job. - * - * Note that this function doesn't need to unreference the BOs on - * failure, because that will happen at vc4_complete_exec() time. */ static int vc4_cl_lookup_bos(struct drm_device *dev, @@ -693,16 +721,47 @@ vc4_cl_lookup_bos(struct drm_device *dev, DRM_DEBUG("Failed to look up GEM BO %d: %d\n", i, handles[i]); ret = -EINVAL; - spin_unlock(&file_priv->table_lock); - goto fail; + break; } + drm_gem_object_get(bo); exec->bo[i] = (struct drm_gem_cma_object *)bo; } spin_unlock(&file_priv->table_lock); + if (ret) + goto fail_put_bo; + + for (i = 0; i < exec->bo_count; i++) { + ret = vc4_bo_inc_usecnt(to_vc4_bo(&exec->bo[i]->base)); + if (ret) + goto fail_dec_usecnt; + } + + kvfree(handles); + return 0; + +fail_dec_usecnt: + /* Decrease usecnt on acquired objects. + * We cannot rely on vc4_complete_exec() to release resources here, + * because vc4_complete_exec() has no information about which BO has + * had its ->usecnt incremented. + * To make things easier we just free everything explicitly and set + * exec->bo to NULL so that vc4_complete_exec() skips the 'BO release' + * step. + */ + for (i-- ; i >= 0; i--) + vc4_bo_dec_usecnt(to_vc4_bo(&exec->bo[i]->base)); + +fail_put_bo: + /* Release any reference to acquired objects. */ + for (i = 0; i < exec->bo_count && exec->bo[i]; i++) + drm_gem_object_put_unlocked(&exec->bo[i]->base); + fail: kvfree(handles); + kvfree(exec->bo); + exec->bo = NULL; return ret; } @@ -833,8 +892,12 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec) dma_fence_signal(exec->fence); if (exec->bo) { - for (i = 0; i < exec->bo_count; i++) + for (i = 0; i < exec->bo_count; i++) { + struct vc4_bo *bo = to_vc4_bo(&exec->bo[i]->base); + + vc4_bo_dec_usecnt(bo); drm_gem_object_put_unlocked(&exec->bo[i]->base); + } kvfree(exec->bo); } @@ -1098,6 +1161,9 @@ vc4_gem_init(struct drm_device *dev) INIT_WORK(&vc4->job_done_work, vc4_job_done_work); mutex_init(&vc4->power_lock); + + INIT_LIST_HEAD(&vc4->purgeable.list); + mutex_init(&vc4->purgeable.lock); } void @@ -1121,3 +1187,81 @@ vc4_gem_destroy(struct drm_device *dev) if (vc4->hang_state) vc4_free_hang_state(dev, vc4->hang_state); } + +int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_vc4_gem_madvise *args = data; + struct drm_gem_object *gem_obj; + struct vc4_bo *bo; + int ret; + + switch (args->madv) { + case VC4_MADV_DONTNEED: + case VC4_MADV_WILLNEED: + break; + default: + return -EINVAL; + } + + if (args->pad != 0) + return -EINVAL; + + gem_obj = drm_gem_object_lookup(file_priv, args->handle); + if (!gem_obj) { + DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); + return -ENOENT; + } + + bo = to_vc4_bo(gem_obj); + + /* Only BOs exposed to userspace can be purged. */ + if (bo->madv == __VC4_MADV_NOTSUPP) { + DRM_DEBUG("madvise not supported on this BO\n"); + ret = -EINVAL; + goto out_put_gem; + } + + /* Not sure it's safe to purge imported BOs. Let's just assume it's + * not until proven otherwise. + */ + if (gem_obj->import_attach) { + DRM_DEBUG("madvise not supported on imported BOs\n"); + ret = -EINVAL; + goto out_put_gem; + } + + mutex_lock(&bo->madv_lock); + + if (args->madv == VC4_MADV_DONTNEED && bo->madv == VC4_MADV_WILLNEED && + !refcount_read(&bo->usecnt)) { + /* If the BO is about to be marked as purgeable, is not used + * and is not already purgeable or purged, add it to the + * purgeable list. + */ + vc4_bo_add_to_purgeable_pool(bo); + } else if (args->madv == VC4_MADV_WILLNEED && + bo->madv == VC4_MADV_DONTNEED && + !refcount_read(&bo->usecnt)) { + /* The BO has not been purged yet, just remove it from + * the purgeable list. + */ + vc4_bo_remove_from_purgeable_pool(bo); + } + + /* Save the purged state. */ + args->retained = bo->madv != __VC4_MADV_PURGED; + + /* Update internal madv state only if the bo was not purged. */ + if (bo->madv != __VC4_MADV_PURGED) + bo->madv = args->madv; + + mutex_unlock(&bo->madv_lock); + + ret = 0; + +out_put_gem: + drm_gem_object_put_unlocked(gem_obj); + + return ret; +} diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index 3a767a038f72..423a23ed8fc2 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c @@ -23,6 +23,7 @@ #include <drm/drm_fb_cma_helper.h> #include <drm/drm_plane_helper.h> +#include "uapi/drm/vc4_drm.h" #include "vc4_drv.h" #include "vc4_regs.h" @@ -774,21 +775,40 @@ static int vc4_prepare_fb(struct drm_plane *plane, { struct vc4_bo *bo; struct dma_fence *fence; + int ret; if ((plane->state->fb == state->fb) || !state->fb) return 0; bo = to_vc4_bo(&drm_fb_cma_get_gem_obj(state->fb, 0)->base); + + ret = vc4_bo_inc_usecnt(bo); + if (ret) + return ret; + fence = reservation_object_get_excl_rcu(bo->resv); drm_atomic_set_fence_for_plane(state, fence); return 0; } +static void vc4_cleanup_fb(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct vc4_bo *bo; + + if (plane->state->fb == state->fb || !state->fb) + return; + + bo = to_vc4_bo(&drm_fb_cma_get_gem_obj(state->fb, 0)->base); + vc4_bo_dec_usecnt(bo); +} + static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = { .atomic_check = vc4_plane_atomic_check, .atomic_update = vc4_plane_atomic_update, .prepare_fb = vc4_prepare_fb, + .cleanup_fb = vc4_cleanup_fb, }; static void vc4_plane_destroy(struct drm_plane *plane) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index d1552d3e0652..bc5f6026573d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c @@ -360,8 +360,8 @@ static int vmw_sou_crtc_page_flip(struct drm_crtc *crtc, ret = vmw_event_fence_action_queue(file_priv, fence, &event->base, - &event->event.tv_sec, - &event->event.tv_usec, + &event->event.vbl.tv_sec, + &event->event.vbl.tv_usec, true); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index ca3afae2db1f..90b5437fd787 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c @@ -549,8 +549,8 @@ static int vmw_stdu_crtc_page_flip(struct drm_crtc *crtc, ret = vmw_event_fence_action_queue(file_priv, fence, &event->base, - &event->event.tv_sec, - &event->event.tv_usec, + &event->event.vbl.tv_sec, + &event->event.vbl.tv_usec, true); vmw_fence_obj_unreference(&fence); } else { diff --git a/drivers/gpu/host1x/Makefile b/drivers/gpu/host1x/Makefile index a1d9974cfcb5..4fb61bd57aee 100644 --- a/drivers/gpu/host1x/Makefile +++ b/drivers/gpu/host1x/Makefile @@ -11,6 +11,7 @@ host1x-y = \ hw/host1x01.o \ hw/host1x02.o \ hw/host1x04.o \ - hw/host1x05.o + hw/host1x05.o \ + hw/host1x06.o obj-$(CONFIG_TEGRA_HOST1X) += host1x.o diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c index f9cde03030fd..66ea5acee820 100644 --- a/drivers/gpu/host1x/bus.c +++ b/drivers/gpu/host1x/bus.c @@ -403,12 +403,13 @@ static int host1x_device_add(struct host1x *host1x, device->dev.coherent_dma_mask = host1x->dev->coherent_dma_mask; device->dev.dma_mask = &device->dev.coherent_dma_mask; dev_set_name(&device->dev, "%s", driver->driver.name); - of_dma_configure(&device->dev, host1x->dev->of_node); device->dev.release = host1x_device_release; device->dev.of_node = host1x->dev->of_node; device->dev.bus = &host1x_bus_type; device->dev.parent = host1x->dev; + of_dma_configure(&device->dev, host1x->dev->of_node); + err = host1x_device_parse_dt(device, driver); if (err < 0) { kfree(device); diff --git a/drivers/gpu/host1x/channel.c b/drivers/gpu/host1x/channel.c index db9b91d1384c..2fb93c27c1d9 100644 --- a/drivers/gpu/host1x/channel.c +++ b/drivers/gpu/host1x/channel.c @@ -128,8 +128,7 @@ static struct host1x_channel *acquire_unused_channel(struct host1x *host) * host1x_channel_request() - Allocate a channel * @device: Host1x unit this channel will be used to send commands to * - * Allocates a new host1x channel for @device. If there are no free channels, - * this will sleep until one becomes available. May return NULL if CDMA + * Allocates a new host1x channel for @device. May return NULL if CDMA * initialization fails. */ struct host1x_channel *host1x_channel_request(struct device *dev) diff --git a/drivers/gpu/host1x/debug.c b/drivers/gpu/host1x/debug.c index 2aae0e63214c..dc77ec452ffc 100644 --- a/drivers/gpu/host1x/debug.c +++ b/drivers/gpu/host1x/debug.c @@ -40,7 +40,19 @@ void host1x_debug_output(struct output *o, const char *fmt, ...) len = vsnprintf(o->buf, sizeof(o->buf), fmt, args); va_end(args); - o->fn(o->ctx, o->buf, len); + o->fn(o->ctx, o->buf, len, false); +} + +void host1x_debug_cont(struct output *o, const char *fmt, ...) +{ + va_list args; + int len; + + va_start(args, fmt); + len = vsnprintf(o->buf, sizeof(o->buf), fmt, args); + va_end(args); + + o->fn(o->ctx, o->buf, len, true); } static int show_channel(struct host1x_channel *ch, void *data, bool show_fifo) diff --git a/drivers/gpu/host1x/debug.h b/drivers/gpu/host1x/debug.h index 4595b2e0799f..990cce47e737 100644 --- a/drivers/gpu/host1x/debug.h +++ b/drivers/gpu/host1x/debug.h @@ -24,22 +24,28 @@ struct host1x; struct output { - void (*fn)(void *ctx, const char *str, size_t len); + void (*fn)(void *ctx, const char *str, size_t len, bool cont); void *ctx; char buf[256]; }; -static inline void write_to_seqfile(void *ctx, const char *str, size_t len) +static inline void write_to_seqfile(void *ctx, const char *str, size_t len, + bool cont) { seq_write((struct seq_file *)ctx, str, len); } -static inline void write_to_printk(void *ctx, const char *str, size_t len) +static inline void write_to_printk(void *ctx, const char *str, size_t len, + bool cont) { - pr_info("%s", str); + if (cont) + pr_cont("%s", str); + else + pr_info("%s", str); } void __printf(2, 3) host1x_debug_output(struct output *o, const char *fmt, ...); +void __printf(2, 3) host1x_debug_cont(struct output *o, const char *fmt, ...); extern unsigned int host1x_debug_trace_cmdbuf; diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c index 7f22c5c37660..773d6337aa30 100644 --- a/drivers/gpu/host1x/dev.c +++ b/drivers/gpu/host1x/dev.c @@ -39,6 +39,17 @@ #include "hw/host1x02.h" #include "hw/host1x04.h" #include "hw/host1x05.h" +#include "hw/host1x06.h" + +void host1x_hypervisor_writel(struct host1x *host1x, u32 v, u32 r) +{ + writel(v, host1x->hv_regs + r); +} + +u32 host1x_hypervisor_readl(struct host1x *host1x, u32 r) +{ + return readl(host1x->hv_regs + r); +} void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r) { @@ -104,7 +115,19 @@ static const struct host1x_info host1x05_info = { .dma_mask = DMA_BIT_MASK(34), }; +static const struct host1x_info host1x06_info = { + .nb_channels = 63, + .nb_pts = 576, + .nb_mlocks = 24, + .nb_bases = 16, + .init = host1x06_init, + .sync_offset = 0x0, + .dma_mask = DMA_BIT_MASK(34), + .has_hypervisor = true, +}; + static const struct of_device_id host1x_of_match[] = { + { .compatible = "nvidia,tegra186-host1x", .data = &host1x06_info, }, { .compatible = "nvidia,tegra210-host1x", .data = &host1x05_info, }, { .compatible = "nvidia,tegra124-host1x", .data = &host1x04_info, }, { .compatible = "nvidia,tegra114-host1x", .data = &host1x02_info, }, @@ -116,20 +139,37 @@ MODULE_DEVICE_TABLE(of, host1x_of_match); static int host1x_probe(struct platform_device *pdev) { - const struct of_device_id *id; struct host1x *host; - struct resource *regs; + struct resource *regs, *hv_regs = NULL; int syncpt_irq; int err; - id = of_match_device(host1x_of_match, &pdev->dev); - if (!id) - return -EINVAL; + host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL); + if (!host) + return -ENOMEM; - regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!regs) { - dev_err(&pdev->dev, "failed to get registers\n"); - return -ENXIO; + host->info = of_device_get_match_data(&pdev->dev); + + if (host->info->has_hypervisor) { + regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vm"); + if (!regs) { + dev_err(&pdev->dev, "failed to get vm registers\n"); + return -ENXIO; + } + + hv_regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "hypervisor"); + if (!hv_regs) { + dev_err(&pdev->dev, + "failed to get hypervisor registers\n"); + return -ENXIO; + } + } else { + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!regs) { + dev_err(&pdev->dev, "failed to get registers\n"); + return -ENXIO; + } } syncpt_irq = platform_get_irq(pdev, 0); @@ -138,15 +178,10 @@ static int host1x_probe(struct platform_device *pdev) return syncpt_irq; } - host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL); - if (!host) - return -ENOMEM; - mutex_init(&host->devices_lock); INIT_LIST_HEAD(&host->devices); INIT_LIST_HEAD(&host->list); host->dev = &pdev->dev; - host->info = id->data; /* set common host1x device data */ platform_set_drvdata(pdev, host); @@ -155,6 +190,12 @@ static int host1x_probe(struct platform_device *pdev) if (IS_ERR(host->regs)) return PTR_ERR(host->regs); + if (host->info->has_hypervisor) { + host->hv_regs = devm_ioremap_resource(&pdev->dev, hv_regs); + if (IS_ERR(host->hv_regs)) + return PTR_ERR(host->hv_regs); + } + dma_set_mask_and_coherent(host->dev, host->info->dma_mask); if (host->info->init) { diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h index ffdbc15b749b..502769726480 100644 --- a/drivers/gpu/host1x/dev.h +++ b/drivers/gpu/host1x/dev.h @@ -79,6 +79,9 @@ struct host1x_syncpt_ops { u32 (*load)(struct host1x_syncpt *syncpt); int (*cpu_incr)(struct host1x_syncpt *syncpt); int (*patch_wait)(struct host1x_syncpt *syncpt, void *patch_addr); + void (*assign_to_channel)(struct host1x_syncpt *syncpt, + struct host1x_channel *channel); + void (*enable_protection)(struct host1x *host); }; struct host1x_intr_ops { @@ -100,12 +103,14 @@ struct host1x_info { int (*init)(struct host1x *host1x); /* initialize per SoC ops */ unsigned int sync_offset; /* offset of syncpoint registers */ u64 dma_mask; /* mask of addressable memory */ + bool has_hypervisor; /* has hypervisor registers */ }; struct host1x { const struct host1x_info *info; void __iomem *regs; + void __iomem *hv_regs; /* hypervisor region */ struct host1x_syncpt *syncpt; struct host1x_syncpt_base *bases; struct device *dev; @@ -140,6 +145,8 @@ struct host1x { struct list_head list; }; +void host1x_hypervisor_writel(struct host1x *host1x, u32 r, u32 v); +u32 host1x_hypervisor_readl(struct host1x *host1x, u32 r); void host1x_sync_writel(struct host1x *host1x, u32 r, u32 v); u32 host1x_sync_readl(struct host1x *host1x, u32 r); void host1x_ch_writel(struct host1x_channel *ch, u32 r, u32 v); @@ -182,6 +189,18 @@ static inline int host1x_hw_syncpt_patch_wait(struct host1x *host, return host->syncpt_op->patch_wait(sp, patch_addr); } +static inline void host1x_hw_syncpt_assign_to_channel( + struct host1x *host, struct host1x_syncpt *sp, + struct host1x_channel *ch) +{ + return host->syncpt_op->assign_to_channel(sp, ch); +} + +static inline void host1x_hw_syncpt_enable_protection(struct host1x *host) +{ + return host->syncpt_op->enable_protection(host); +} + static inline int host1x_hw_intr_init_host_sync(struct host1x *host, u32 cpm, void (*syncpt_thresh_work)(struct work_struct *)) { diff --git a/drivers/gpu/host1x/hw/cdma_hw.c b/drivers/gpu/host1x/hw/cdma_hw.c index 6b231119193e..ce320534cbed 100644 --- a/drivers/gpu/host1x/hw/cdma_hw.c +++ b/drivers/gpu/host1x/hw/cdma_hw.c @@ -172,6 +172,30 @@ static void cdma_stop(struct host1x_cdma *cdma) mutex_unlock(&cdma->lock); } +static void cdma_hw_cmdproc_stop(struct host1x *host, struct host1x_channel *ch, + bool stop) +{ +#if HOST1X_HW >= 6 + host1x_ch_writel(ch, stop ? 0x1 : 0x0, HOST1X_CHANNEL_CMDPROC_STOP); +#else + u32 cmdproc_stop = host1x_sync_readl(host, HOST1X_SYNC_CMDPROC_STOP); + if (stop) + cmdproc_stop |= BIT(ch->id); + else + cmdproc_stop &= ~BIT(ch->id); + host1x_sync_writel(host, cmdproc_stop, HOST1X_SYNC_CMDPROC_STOP); +#endif +} + +static void cdma_hw_teardown(struct host1x *host, struct host1x_channel *ch) +{ +#if HOST1X_HW >= 6 + host1x_ch_writel(ch, 0x1, HOST1X_CHANNEL_TEARDOWN); +#else + host1x_sync_writel(host, BIT(ch->id), HOST1X_SYNC_CH_TEARDOWN); +#endif +} + /* * Stops both channel's command processor and CDMA immediately. * Also, tears down the channel and resets corresponding module. @@ -180,7 +204,6 @@ static void cdma_freeze(struct host1x_cdma *cdma) { struct host1x *host = cdma_to_host1x(cdma); struct host1x_channel *ch = cdma_to_channel(cdma); - u32 cmdproc_stop; if (cdma->torndown && !cdma->running) { dev_warn(host->dev, "Already torn down\n"); @@ -189,9 +212,7 @@ static void cdma_freeze(struct host1x_cdma *cdma) dev_dbg(host->dev, "freezing channel (id %d)\n", ch->id); - cmdproc_stop = host1x_sync_readl(host, HOST1X_SYNC_CMDPROC_STOP); - cmdproc_stop |= BIT(ch->id); - host1x_sync_writel(host, cmdproc_stop, HOST1X_SYNC_CMDPROC_STOP); + cdma_hw_cmdproc_stop(host, ch, true); dev_dbg(host->dev, "%s: DMA GET 0x%x, PUT HW 0x%x / shadow 0x%x\n", __func__, host1x_ch_readl(ch, HOST1X_CHANNEL_DMAGET), @@ -201,7 +222,7 @@ static void cdma_freeze(struct host1x_cdma *cdma) host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP, HOST1X_CHANNEL_DMACTRL); - host1x_sync_writel(host, BIT(ch->id), HOST1X_SYNC_CH_TEARDOWN); + cdma_hw_teardown(host, ch); cdma->running = false; cdma->torndown = true; @@ -211,15 +232,12 @@ static void cdma_resume(struct host1x_cdma *cdma, u32 getptr) { struct host1x *host1x = cdma_to_host1x(cdma); struct host1x_channel *ch = cdma_to_channel(cdma); - u32 cmdproc_stop; dev_dbg(host1x->dev, "resuming channel (id %u, DMAGET restart = 0x%x)\n", ch->id, getptr); - cmdproc_stop = host1x_sync_readl(host1x, HOST1X_SYNC_CMDPROC_STOP); - cmdproc_stop &= ~BIT(ch->id); - host1x_sync_writel(host1x, cmdproc_stop, HOST1X_SYNC_CMDPROC_STOP); + cdma_hw_cmdproc_stop(host1x, ch, false); cdma->torndown = false; cdma_timeout_restart(cdma, getptr); @@ -232,7 +250,7 @@ static void cdma_resume(struct host1x_cdma *cdma, u32 getptr) */ static void cdma_timeout_handler(struct work_struct *work) { - u32 prev_cmdproc, cmdproc_stop, syncpt_val; + u32 syncpt_val; struct host1x_cdma *cdma; struct host1x *host1x; struct host1x_channel *ch; @@ -254,12 +272,7 @@ static void cdma_timeout_handler(struct work_struct *work) } /* stop processing to get a clean snapshot */ - prev_cmdproc = host1x_sync_readl(host1x, HOST1X_SYNC_CMDPROC_STOP); - cmdproc_stop = prev_cmdproc | BIT(ch->id); - host1x_sync_writel(host1x, cmdproc_stop, HOST1X_SYNC_CMDPROC_STOP); - - dev_dbg(host1x->dev, "cdma_timeout: cmdproc was 0x%x is 0x%x\n", - prev_cmdproc, cmdproc_stop); + cdma_hw_cmdproc_stop(host1x, ch, true); syncpt_val = host1x_syncpt_load(cdma->timeout.syncpt); @@ -268,9 +281,7 @@ static void cdma_timeout_handler(struct work_struct *work) dev_dbg(host1x->dev, "cdma_timeout: expired, but buffer had completed\n"); /* restore */ - cmdproc_stop = prev_cmdproc & ~(BIT(ch->id)); - host1x_sync_writel(host1x, cmdproc_stop, - HOST1X_SYNC_CMDPROC_STOP); + cdma_hw_cmdproc_stop(host1x, ch, false); mutex_unlock(&cdma->lock); return; } diff --git a/drivers/gpu/host1x/hw/channel_hw.c b/drivers/gpu/host1x/hw/channel_hw.c index 8447a56c41ca..9af758785a11 100644 --- a/drivers/gpu/host1x/hw/channel_hw.c +++ b/drivers/gpu/host1x/hw/channel_hw.c @@ -147,6 +147,8 @@ static int channel_submit(struct host1x_job *job) syncval = host1x_syncpt_incr_max(sp, user_syncpt_incrs); + host1x_hw_syncpt_assign_to_channel(host, sp, ch); + job->syncpt_end = syncval; /* add a setclass for modules that require it */ @@ -178,10 +180,32 @@ error: return err; } +static void enable_gather_filter(struct host1x *host, + struct host1x_channel *ch) +{ +#if HOST1X_HW >= 6 + u32 val; + + if (!host->hv_regs) + return; + + val = host1x_hypervisor_readl( + host, HOST1X_HV_CH_KERNEL_FILTER_GBUFFER(ch->id / 32)); + val |= BIT(ch->id % 32); + host1x_hypervisor_writel( + host, val, HOST1X_HV_CH_KERNEL_FILTER_GBUFFER(ch->id / 32)); +#elif HOST1X_HW >= 4 + host1x_ch_writel(ch, + HOST1X_CHANNEL_CHANNELCTRL_KERNEL_FILTER_GBUFFER(1), + HOST1X_CHANNEL_CHANNELCTRL); +#endif +} + static int host1x_channel_init(struct host1x_channel *ch, struct host1x *dev, unsigned int index) { ch->regs = dev->regs + index * HOST1X_CHANNEL_SIZE; + enable_gather_filter(dev, ch); return 0; } diff --git a/drivers/gpu/host1x/hw/debug_hw.c b/drivers/gpu/host1x/hw/debug_hw.c index 7a4a3286e4a7..989476801f9d 100644 --- a/drivers/gpu/host1x/hw/debug_hw.c +++ b/drivers/gpu/host1x/hw/debug_hw.c @@ -30,6 +30,13 @@ enum { HOST1X_OPCODE_IMM = 0x04, HOST1X_OPCODE_RESTART = 0x05, HOST1X_OPCODE_GATHER = 0x06, + HOST1X_OPCODE_SETSTRMID = 0x07, + HOST1X_OPCODE_SETAPPID = 0x08, + HOST1X_OPCODE_SETPYLD = 0x09, + HOST1X_OPCODE_INCR_W = 0x0a, + HOST1X_OPCODE_NONINCR_W = 0x0b, + HOST1X_OPCODE_GATHER_W = 0x0c, + HOST1X_OPCODE_RESTART_W = 0x0d, HOST1X_OPCODE_EXTEND = 0x0e, }; @@ -38,67 +45,122 @@ enum { HOST1X_OPCODE_EXTEND_RELEASE_MLOCK = 0x01, }; -static unsigned int show_channel_command(struct output *o, u32 val) +#define INVALID_PAYLOAD 0xffffffff + +static unsigned int show_channel_command(struct output *o, u32 val, + u32 *payload) { - unsigned int mask, subop; + unsigned int mask, subop, num, opcode; + + opcode = val >> 28; - switch (val >> 28) { + switch (opcode) { case HOST1X_OPCODE_SETCLASS: mask = val & 0x3f; if (mask) { - host1x_debug_output(o, "SETCL(class=%03x, offset=%03x, mask=%02x, [", + host1x_debug_cont(o, "SETCL(class=%03x, offset=%03x, mask=%02x, [", val >> 6 & 0x3ff, val >> 16 & 0xfff, mask); return hweight8(mask); } - host1x_debug_output(o, "SETCL(class=%03x)\n", val >> 6 & 0x3ff); + host1x_debug_cont(o, "SETCL(class=%03x)\n", val >> 6 & 0x3ff); return 0; case HOST1X_OPCODE_INCR: - host1x_debug_output(o, "INCR(offset=%03x, [", + num = val & 0xffff; + host1x_debug_cont(o, "INCR(offset=%03x, [", val >> 16 & 0xfff); - return val & 0xffff; + if (!num) + host1x_debug_cont(o, "])\n"); + + return num; case HOST1X_OPCODE_NONINCR: - host1x_debug_output(o, "NONINCR(offset=%03x, [", + num = val & 0xffff; + host1x_debug_cont(o, "NONINCR(offset=%03x, [", val >> 16 & 0xfff); - return val & 0xffff; + if (!num) + host1x_debug_cont(o, "])\n"); + + return num; case HOST1X_OPCODE_MASK: mask = val & 0xffff; - host1x_debug_output(o, "MASK(offset=%03x, mask=%03x, [", + host1x_debug_cont(o, "MASK(offset=%03x, mask=%03x, [", val >> 16 & 0xfff, mask); + if (!mask) + host1x_debug_cont(o, "])\n"); + return hweight16(mask); case HOST1X_OPCODE_IMM: - host1x_debug_output(o, "IMM(offset=%03x, data=%03x)\n", + host1x_debug_cont(o, "IMM(offset=%03x, data=%03x)\n", val >> 16 & 0xfff, val & 0xffff); return 0; case HOST1X_OPCODE_RESTART: - host1x_debug_output(o, "RESTART(offset=%08x)\n", val << 4); + host1x_debug_cont(o, "RESTART(offset=%08x)\n", val << 4); return 0; case HOST1X_OPCODE_GATHER: - host1x_debug_output(o, "GATHER(offset=%03x, insert=%d, type=%d, count=%04x, addr=[", + host1x_debug_cont(o, "GATHER(offset=%03x, insert=%d, type=%d, count=%04x, addr=[", val >> 16 & 0xfff, val >> 15 & 0x1, val >> 14 & 0x1, val & 0x3fff); return 1; +#if HOST1X_HW >= 6 + case HOST1X_OPCODE_SETSTRMID: + host1x_debug_cont(o, "SETSTRMID(offset=%06x)\n", + val & 0x3fffff); + return 0; + + case HOST1X_OPCODE_SETAPPID: + host1x_debug_cont(o, "SETAPPID(appid=%02x)\n", val & 0xff); + return 0; + + case HOST1X_OPCODE_SETPYLD: + *payload = val & 0xffff; + host1x_debug_cont(o, "SETPYLD(data=%04x)\n", *payload); + return 0; + + case HOST1X_OPCODE_INCR_W: + case HOST1X_OPCODE_NONINCR_W: + host1x_debug_cont(o, "%s(offset=%06x, ", + opcode == HOST1X_OPCODE_INCR_W ? + "INCR_W" : "NONINCR_W", + val & 0x3fffff); + if (*payload == 0) { + host1x_debug_cont(o, "[])\n"); + return 0; + } else if (*payload == INVALID_PAYLOAD) { + host1x_debug_cont(o, "unknown)\n"); + return 0; + } else { + host1x_debug_cont(o, "["); + return *payload; + } + + case HOST1X_OPCODE_GATHER_W: + host1x_debug_cont(o, "GATHER_W(count=%04x, addr=[", + val & 0x3fff); + return 2; +#endif + case HOST1X_OPCODE_EXTEND: subop = val >> 24 & 0xf; if (subop == HOST1X_OPCODE_EXTEND_ACQUIRE_MLOCK) - host1x_debug_output(o, "ACQUIRE_MLOCK(index=%d)\n", + host1x_debug_cont(o, "ACQUIRE_MLOCK(index=%d)\n", val & 0xff); else if (subop == HOST1X_OPCODE_EXTEND_RELEASE_MLOCK) - host1x_debug_output(o, "RELEASE_MLOCK(index=%d)\n", + host1x_debug_cont(o, "RELEASE_MLOCK(index=%d)\n", val & 0xff); else - host1x_debug_output(o, "EXTEND_UNKNOWN(%08x)\n", val); + host1x_debug_cont(o, "EXTEND_UNKNOWN(%08x)\n", val); return 0; default: + host1x_debug_cont(o, "UNKNOWN\n"); return 0; } } @@ -110,6 +172,7 @@ static void show_gather(struct output *o, phys_addr_t phys_addr, /* Map dmaget cursor to corresponding mem handle */ u32 offset = phys_addr - pin_addr; unsigned int data_count = 0, i; + u32 payload = INVALID_PAYLOAD; /* * Sometimes we're given different hardware address to the same @@ -126,11 +189,11 @@ static void show_gather(struct output *o, phys_addr_t phys_addr, u32 val = *(map_addr + offset / 4 + i); if (!data_count) { - host1x_debug_output(o, "%08x: %08x:", addr, val); - data_count = show_channel_command(o, val); + host1x_debug_output(o, "%08x: %08x: ", addr, val); + data_count = show_channel_command(o, val, &payload); } else { - host1x_debug_output(o, "%08x%s", val, - data_count > 0 ? ", " : "])\n"); + host1x_debug_cont(o, "%08x%s", val, + data_count > 1 ? ", " : "])\n"); data_count--; } } @@ -174,138 +237,11 @@ static void show_channel_gathers(struct output *o, struct host1x_cdma *cdma) } } -static void host1x_debug_show_channel_cdma(struct host1x *host, - struct host1x_channel *ch, - struct output *o) -{ - struct host1x_cdma *cdma = &ch->cdma; - u32 dmaput, dmaget, dmactrl; - u32 cbstat, cbread; - u32 val, base, baseval; - - dmaput = host1x_ch_readl(ch, HOST1X_CHANNEL_DMAPUT); - dmaget = host1x_ch_readl(ch, HOST1X_CHANNEL_DMAGET); - dmactrl = host1x_ch_readl(ch, HOST1X_CHANNEL_DMACTRL); - cbread = host1x_sync_readl(host, HOST1X_SYNC_CBREAD(ch->id)); - cbstat = host1x_sync_readl(host, HOST1X_SYNC_CBSTAT(ch->id)); - - host1x_debug_output(o, "%u-%s: ", ch->id, dev_name(ch->dev)); - - if (HOST1X_CHANNEL_DMACTRL_DMASTOP_V(dmactrl) || - !ch->cdma.push_buffer.mapped) { - host1x_debug_output(o, "inactive\n\n"); - return; - } - - if (HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat) == HOST1X_CLASS_HOST1X && - HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat) == - HOST1X_UCLASS_WAIT_SYNCPT) - host1x_debug_output(o, "waiting on syncpt %d val %d\n", - cbread >> 24, cbread & 0xffffff); - else if (HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat) == - HOST1X_CLASS_HOST1X && - HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat) == - HOST1X_UCLASS_WAIT_SYNCPT_BASE) { - base = (cbread >> 16) & 0xff; - baseval = - host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_BASE(base)); - val = cbread & 0xffff; - host1x_debug_output(o, "waiting on syncpt %d val %d (base %d = %d; offset = %d)\n", - cbread >> 24, baseval + val, base, - baseval, val); - } else - host1x_debug_output(o, "active class %02x, offset %04x, val %08x\n", - HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat), - HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat), - cbread); - - host1x_debug_output(o, "DMAPUT %08x, DMAGET %08x, DMACTL %08x\n", - dmaput, dmaget, dmactrl); - host1x_debug_output(o, "CBREAD %08x, CBSTAT %08x\n", cbread, cbstat); - - show_channel_gathers(o, cdma); - host1x_debug_output(o, "\n"); -} - -static void host1x_debug_show_channel_fifo(struct host1x *host, - struct host1x_channel *ch, - struct output *o) -{ - u32 val, rd_ptr, wr_ptr, start, end; - unsigned int data_count = 0; - - host1x_debug_output(o, "%u: fifo:\n", ch->id); - - val = host1x_ch_readl(ch, HOST1X_CHANNEL_FIFOSTAT); - host1x_debug_output(o, "FIFOSTAT %08x\n", val); - if (HOST1X_CHANNEL_FIFOSTAT_CFEMPTY_V(val)) { - host1x_debug_output(o, "[empty]\n"); - return; - } - - host1x_sync_writel(host, 0x0, HOST1X_SYNC_CFPEEK_CTRL); - host1x_sync_writel(host, HOST1X_SYNC_CFPEEK_CTRL_ENA_F(1) | - HOST1X_SYNC_CFPEEK_CTRL_CHANNR_F(ch->id), - HOST1X_SYNC_CFPEEK_CTRL); - - val = host1x_sync_readl(host, HOST1X_SYNC_CFPEEK_PTRS); - rd_ptr = HOST1X_SYNC_CFPEEK_PTRS_CF_RD_PTR_V(val); - wr_ptr = HOST1X_SYNC_CFPEEK_PTRS_CF_WR_PTR_V(val); - - val = host1x_sync_readl(host, HOST1X_SYNC_CF_SETUP(ch->id)); - start = HOST1X_SYNC_CF_SETUP_BASE_V(val); - end = HOST1X_SYNC_CF_SETUP_LIMIT_V(val); - - do { - host1x_sync_writel(host, 0x0, HOST1X_SYNC_CFPEEK_CTRL); - host1x_sync_writel(host, HOST1X_SYNC_CFPEEK_CTRL_ENA_F(1) | - HOST1X_SYNC_CFPEEK_CTRL_CHANNR_F(ch->id) | - HOST1X_SYNC_CFPEEK_CTRL_ADDR_F(rd_ptr), - HOST1X_SYNC_CFPEEK_CTRL); - val = host1x_sync_readl(host, HOST1X_SYNC_CFPEEK_READ); - - if (!data_count) { - host1x_debug_output(o, "%08x:", val); - data_count = show_channel_command(o, val); - } else { - host1x_debug_output(o, "%08x%s", val, - data_count > 0 ? ", " : "])\n"); - data_count--; - } - - if (rd_ptr == end) - rd_ptr = start; - else - rd_ptr++; - } while (rd_ptr != wr_ptr); - - if (data_count) - host1x_debug_output(o, ", ...])\n"); - host1x_debug_output(o, "\n"); - - host1x_sync_writel(host, 0x0, HOST1X_SYNC_CFPEEK_CTRL); -} - -static void host1x_debug_show_mlocks(struct host1x *host, struct output *o) -{ - unsigned int i; - - host1x_debug_output(o, "---- mlocks ----\n"); - - for (i = 0; i < host1x_syncpt_nb_mlocks(host); i++) { - u32 owner = - host1x_sync_readl(host, HOST1X_SYNC_MLOCK_OWNER(i)); - if (HOST1X_SYNC_MLOCK_OWNER_CH_OWNS_V(owner)) - host1x_debug_output(o, "%u: locked by channel %u\n", - i, HOST1X_SYNC_MLOCK_OWNER_CHID_V(owner)); - else if (HOST1X_SYNC_MLOCK_OWNER_CPU_OWNS_V(owner)) - host1x_debug_output(o, "%u: locked by cpu\n", i); - else - host1x_debug_output(o, "%u: unlocked\n", i); - } - - host1x_debug_output(o, "\n"); -} +#if HOST1X_HW >= 6 +#include "debug_hw_1x06.c" +#else +#include "debug_hw_1x01.c" +#endif static const struct host1x_debug_ops host1x_debug_ops = { .show_channel_cdma = host1x_debug_show_channel_cdma, diff --git a/drivers/gpu/host1x/hw/debug_hw_1x01.c b/drivers/gpu/host1x/hw/debug_hw_1x01.c new file mode 100644 index 000000000000..8790d5fd5f20 --- /dev/null +++ b/drivers/gpu/host1x/hw/debug_hw_1x01.c @@ -0,0 +1,154 @@ +/* + * Copyright (C) 2010 Google, Inc. + * Author: Erik Gilling <konkers@android.com> + * + * Copyright (C) 2011-2013 NVIDIA Corporation + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include "../dev.h" +#include "../debug.h" +#include "../cdma.h" +#include "../channel.h" + +static void host1x_debug_show_channel_cdma(struct host1x *host, + struct host1x_channel *ch, + struct output *o) +{ + struct host1x_cdma *cdma = &ch->cdma; + u32 dmaput, dmaget, dmactrl; + u32 cbstat, cbread; + u32 val, base, baseval; + + dmaput = host1x_ch_readl(ch, HOST1X_CHANNEL_DMAPUT); + dmaget = host1x_ch_readl(ch, HOST1X_CHANNEL_DMAGET); + dmactrl = host1x_ch_readl(ch, HOST1X_CHANNEL_DMACTRL); + cbread = host1x_sync_readl(host, HOST1X_SYNC_CBREAD(ch->id)); + cbstat = host1x_sync_readl(host, HOST1X_SYNC_CBSTAT(ch->id)); + + host1x_debug_output(o, "%u-%s: ", ch->id, dev_name(ch->dev)); + + if (HOST1X_CHANNEL_DMACTRL_DMASTOP_V(dmactrl) || + !ch->cdma.push_buffer.mapped) { + host1x_debug_output(o, "inactive\n\n"); + return; + } + + if (HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat) == HOST1X_CLASS_HOST1X && + HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat) == + HOST1X_UCLASS_WAIT_SYNCPT) + host1x_debug_output(o, "waiting on syncpt %d val %d\n", + cbread >> 24, cbread & 0xffffff); + else if (HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat) == + HOST1X_CLASS_HOST1X && + HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat) == + HOST1X_UCLASS_WAIT_SYNCPT_BASE) { + base = (cbread >> 16) & 0xff; + baseval = + host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_BASE(base)); + val = cbread & 0xffff; + host1x_debug_output(o, "waiting on syncpt %d val %d (base %d = %d; offset = %d)\n", + cbread >> 24, baseval + val, base, + baseval, val); + } else + host1x_debug_output(o, "active class %02x, offset %04x, val %08x\n", + HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat), + HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat), + cbread); + + host1x_debug_output(o, "DMAPUT %08x, DMAGET %08x, DMACTL %08x\n", + dmaput, dmaget, dmactrl); + host1x_debug_output(o, "CBREAD %08x, CBSTAT %08x\n", cbread, cbstat); + + show_channel_gathers(o, cdma); + host1x_debug_output(o, "\n"); +} + +static void host1x_debug_show_channel_fifo(struct host1x *host, + struct host1x_channel *ch, + struct output *o) +{ + u32 val, rd_ptr, wr_ptr, start, end; + unsigned int data_count = 0; + + host1x_debug_output(o, "%u: fifo:\n", ch->id); + + val = host1x_ch_readl(ch, HOST1X_CHANNEL_FIFOSTAT); + host1x_debug_output(o, "FIFOSTAT %08x\n", val); + if (HOST1X_CHANNEL_FIFOSTAT_CFEMPTY_V(val)) { + host1x_debug_output(o, "[empty]\n"); + return; + } + + host1x_sync_writel(host, 0x0, HOST1X_SYNC_CFPEEK_CTRL); + host1x_sync_writel(host, HOST1X_SYNC_CFPEEK_CTRL_ENA_F(1) | + HOST1X_SYNC_CFPEEK_CTRL_CHANNR_F(ch->id), + HOST1X_SYNC_CFPEEK_CTRL); + + val = host1x_sync_readl(host, HOST1X_SYNC_CFPEEK_PTRS); + rd_ptr = HOST1X_SYNC_CFPEEK_PTRS_CF_RD_PTR_V(val); + wr_ptr = HOST1X_SYNC_CFPEEK_PTRS_CF_WR_PTR_V(val); + + val = host1x_sync_readl(host, HOST1X_SYNC_CF_SETUP(ch->id)); + start = HOST1X_SYNC_CF_SETUP_BASE_V(val); + end = HOST1X_SYNC_CF_SETUP_LIMIT_V(val); + + do { + host1x_sync_writel(host, 0x0, HOST1X_SYNC_CFPEEK_CTRL); + host1x_sync_writel(host, HOST1X_SYNC_CFPEEK_CTRL_ENA_F(1) | + HOST1X_SYNC_CFPEEK_CTRL_CHANNR_F(ch->id) | + HOST1X_SYNC_CFPEEK_CTRL_ADDR_F(rd_ptr), + HOST1X_SYNC_CFPEEK_CTRL); + val = host1x_sync_readl(host, HOST1X_SYNC_CFPEEK_READ); + + if (!data_count) { + host1x_debug_output(o, "%08x: ", val); + data_count = show_channel_command(o, val, NULL); + } else { + host1x_debug_cont(o, "%08x%s", val, + data_count > 1 ? ", " : "])\n"); + data_count--; + } + + if (rd_ptr == end) + rd_ptr = start; + else + rd_ptr++; + } while (rd_ptr != wr_ptr); + + if (data_count) + host1x_debug_cont(o, ", ...])\n"); + host1x_debug_output(o, "\n"); + + host1x_sync_writel(host, 0x0, HOST1X_SYNC_CFPEEK_CTRL); +} + +static void host1x_debug_show_mlocks(struct host1x *host, struct output *o) +{ + unsigned int i; + + host1x_debug_output(o, "---- mlocks ----\n"); + + for (i = 0; i < host1x_syncpt_nb_mlocks(host); i++) { + u32 owner = + host1x_sync_readl(host, HOST1X_SYNC_MLOCK_OWNER(i)); + if (HOST1X_SYNC_MLOCK_OWNER_CH_OWNS_V(owner)) + host1x_debug_output(o, "%u: locked by channel %u\n", + i, HOST1X_SYNC_MLOCK_OWNER_CHID_V(owner)); + else if (HOST1X_SYNC_MLOCK_OWNER_CPU_OWNS_V(owner)) + host1x_debug_output(o, "%u: locked by cpu\n", i); + else + host1x_debug_output(o, "%u: unlocked\n", i); + } + + host1x_debug_output(o, "\n"); +} diff --git a/drivers/gpu/host1x/hw/debug_hw_1x06.c b/drivers/gpu/host1x/hw/debug_hw_1x06.c new file mode 100644 index 000000000000..b503c740c022 --- /dev/null +++ b/drivers/gpu/host1x/hw/debug_hw_1x06.c @@ -0,0 +1,135 @@ +/* + * Copyright (C) 2010 Google, Inc. + * Author: Erik Gilling <konkers@android.com> + * + * Copyright (C) 2011-2017 NVIDIA Corporation + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include "../dev.h" +#include "../debug.h" +#include "../cdma.h" +#include "../channel.h" + +static void host1x_debug_show_channel_cdma(struct host1x *host, + struct host1x_channel *ch, + struct output *o) +{ + struct host1x_cdma *cdma = &ch->cdma; + u32 dmaput, dmaget, dmactrl; + u32 offset, class; + u32 ch_stat; + + dmaput = host1x_ch_readl(ch, HOST1X_CHANNEL_DMAPUT); + dmaget = host1x_ch_readl(ch, HOST1X_CHANNEL_DMAGET); + dmactrl = host1x_ch_readl(ch, HOST1X_CHANNEL_DMACTRL); + offset = host1x_ch_readl(ch, HOST1X_CHANNEL_CMDP_OFFSET); + class = host1x_ch_readl(ch, HOST1X_CHANNEL_CMDP_CLASS); + ch_stat = host1x_ch_readl(ch, HOST1X_CHANNEL_CHANNELSTAT); + + host1x_debug_output(o, "%u-%s: ", ch->id, dev_name(ch->dev)); + + if (dmactrl & HOST1X_CHANNEL_DMACTRL_DMASTOP || + !ch->cdma.push_buffer.mapped) { + host1x_debug_output(o, "inactive\n\n"); + return; + } + + if (class == HOST1X_CLASS_HOST1X && offset == HOST1X_UCLASS_WAIT_SYNCPT) + host1x_debug_output(o, "waiting on syncpt\n"); + else + host1x_debug_output(o, "active class %02x, offset %04x\n", + class, offset); + + host1x_debug_output(o, "DMAPUT %08x, DMAGET %08x, DMACTL %08x\n", + dmaput, dmaget, dmactrl); + host1x_debug_output(o, "CHANNELSTAT %02x\n", ch_stat); + + show_channel_gathers(o, cdma); + host1x_debug_output(o, "\n"); +} + +static void host1x_debug_show_channel_fifo(struct host1x *host, + struct host1x_channel *ch, + struct output *o) +{ + u32 val, rd_ptr, wr_ptr, start, end; + u32 payload = INVALID_PAYLOAD; + unsigned int data_count = 0; + + host1x_debug_output(o, "%u: fifo:\n", ch->id); + + val = host1x_ch_readl(ch, HOST1X_CHANNEL_CMDFIFO_STAT); + host1x_debug_output(o, "CMDFIFO_STAT %08x\n", val); + if (val & HOST1X_CHANNEL_CMDFIFO_STAT_EMPTY) { + host1x_debug_output(o, "[empty]\n"); + return; + } + + val = host1x_ch_readl(ch, HOST1X_CHANNEL_CMDFIFO_RDATA); + host1x_debug_output(o, "CMDFIFO_RDATA %08x\n", val); + + /* Peek pointer values are invalid during SLCG, so disable it */ + host1x_hypervisor_writel(host, 0x1, HOST1X_HV_ICG_EN_OVERRIDE); + + val = 0; + val |= HOST1X_HV_CMDFIFO_PEEK_CTRL_ENABLE; + val |= HOST1X_HV_CMDFIFO_PEEK_CTRL_CHANNEL(ch->id); + host1x_hypervisor_writel(host, val, HOST1X_HV_CMDFIFO_PEEK_CTRL); + + val = host1x_hypervisor_readl(host, HOST1X_HV_CMDFIFO_PEEK_PTRS); + rd_ptr = HOST1X_HV_CMDFIFO_PEEK_PTRS_RD_PTR_V(val); + wr_ptr = HOST1X_HV_CMDFIFO_PEEK_PTRS_WR_PTR_V(val); + + val = host1x_hypervisor_readl(host, HOST1X_HV_CMDFIFO_SETUP(ch->id)); + start = HOST1X_HV_CMDFIFO_SETUP_BASE_V(val); + end = HOST1X_HV_CMDFIFO_SETUP_LIMIT_V(val); + + do { + val = 0; + val |= HOST1X_HV_CMDFIFO_PEEK_CTRL_ENABLE; + val |= HOST1X_HV_CMDFIFO_PEEK_CTRL_CHANNEL(ch->id); + val |= HOST1X_HV_CMDFIFO_PEEK_CTRL_ADDR(rd_ptr); + host1x_hypervisor_writel(host, val, + HOST1X_HV_CMDFIFO_PEEK_CTRL); + + val = host1x_hypervisor_readl(host, + HOST1X_HV_CMDFIFO_PEEK_READ); + + if (!data_count) { + host1x_debug_output(o, "%03x 0x%08x: ", + rd_ptr - start, val); + data_count = show_channel_command(o, val, &payload); + } else { + host1x_debug_cont(o, "%08x%s", val, + data_count > 1 ? ", " : "])\n"); + data_count--; + } + + if (rd_ptr == end) + rd_ptr = start; + else + rd_ptr++; + } while (rd_ptr != wr_ptr); + + if (data_count) + host1x_debug_cont(o, ", ...])\n"); + host1x_debug_output(o, "\n"); + + host1x_hypervisor_writel(host, 0x0, HOST1X_HV_CMDFIFO_PEEK_CTRL); + host1x_hypervisor_writel(host, 0x0, HOST1X_HV_ICG_EN_OVERRIDE); +} + +static void host1x_debug_show_mlocks(struct host1x *host, struct output *o) +{ + /* TODO */ +} diff --git a/drivers/gpu/host1x/hw/host1x01.c b/drivers/gpu/host1x/hw/host1x01.c index 859b73beb4d0..bb124f8b4af8 100644 --- a/drivers/gpu/host1x/hw/host1x01.c +++ b/drivers/gpu/host1x/hw/host1x01.c @@ -21,6 +21,8 @@ #include "host1x01_hardware.h" /* include code */ +#define HOST1X_HW 1 + #include "cdma_hw.c" #include "channel_hw.c" #include "debug_hw.c" diff --git a/drivers/gpu/host1x/hw/host1x02.c b/drivers/gpu/host1x/hw/host1x02.c index 928946c2144b..c5f85dbedb98 100644 --- a/drivers/gpu/host1x/hw/host1x02.c +++ b/drivers/gpu/host1x/hw/host1x02.c @@ -21,6 +21,8 @@ #include "host1x02_hardware.h" /* include code */ +#define HOST1X_HW 2 + #include "cdma_hw.c" #include "channel_hw.c" #include "debug_hw.c" diff --git a/drivers/gpu/host1x/hw/host1x04.c b/drivers/gpu/host1x/hw/host1x04.c index 8007c70fa9c4..f102a1a7743f 100644 --- a/drivers/gpu/host1x/hw/host1x04.c +++ b/drivers/gpu/host1x/hw/host1x04.c @@ -21,6 +21,8 @@ #include "host1x04_hardware.h" /* include code */ +#define HOST1X_HW 4 + #include "cdma_hw.c" #include "channel_hw.c" #include "debug_hw.c" diff --git a/drivers/gpu/host1x/hw/host1x05.c b/drivers/gpu/host1x/hw/host1x05.c index 047097ce3bad..2b1239d6ec67 100644 --- a/drivers/gpu/host1x/hw/host1x05.c +++ b/drivers/gpu/host1x/hw/host1x05.c @@ -21,6 +21,8 @@ #include "host1x05_hardware.h" /* include code */ +#define HOST1X_HW 5 + #include "cdma_hw.c" #include "channel_hw.c" #include "debug_hw.c" diff --git a/drivers/gpu/host1x/hw/host1x06.c b/drivers/gpu/host1x/hw/host1x06.c new file mode 100644 index 000000000000..a66230827c59 --- /dev/null +++ b/drivers/gpu/host1x/hw/host1x06.c @@ -0,0 +1,44 @@ +/* + * Host1x init for Tegra186 SoCs + * + * Copyright (c) 2017 NVIDIA Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +/* include hw specification */ +#include "host1x06.h" +#include "host1x06_hardware.h" + +/* include code */ +#define HOST1X_HW 6 + +#include "cdma_hw.c" +#include "channel_hw.c" +#include "debug_hw.c" +#include "intr_hw.c" +#include "syncpt_hw.c" + +#include "../dev.h" + +int host1x06_init(struct host1x *host) +{ + host->channel_op = &host1x_channel_ops; + host->cdma_op = &host1x_cdma_ops; + host->cdma_pb_op = &host1x_pushbuffer_ops; + host->syncpt_op = &host1x_syncpt_ops; + host->intr_op = &host1x_intr_ops; + host->debug_op = &host1x_debug_ops; + + return 0; +} diff --git a/drivers/gpu/host1x/hw/host1x06.h b/drivers/gpu/host1x/hw/host1x06.h new file mode 100644 index 000000000000..d9abe1489241 --- /dev/null +++ b/drivers/gpu/host1x/hw/host1x06.h @@ -0,0 +1,26 @@ +/* + * Host1x init for Tegra186 SoCs + * + * Copyright (c) 2017 NVIDIA Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef HOST1X_HOST1X06_H +#define HOST1X_HOST1X06_H + +struct host1x; + +int host1x06_init(struct host1x *host); + +#endif diff --git a/drivers/gpu/host1x/hw/host1x06_hardware.h b/drivers/gpu/host1x/hw/host1x06_hardware.h new file mode 100644 index 000000000000..3039c92ea605 --- /dev/null +++ b/drivers/gpu/host1x/hw/host1x06_hardware.h @@ -0,0 +1,142 @@ +/* + * Tegra host1x Register Offsets for Tegra186 + * + * Copyright (c) 2017 NVIDIA Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __HOST1X_HOST1X06_HARDWARE_H +#define __HOST1X_HOST1X06_HARDWARE_H + +#include <linux/types.h> +#include <linux/bitops.h> + +#include "hw_host1x06_uclass.h" +#include "hw_host1x06_vm.h" +#include "hw_host1x06_hypervisor.h" + +static inline u32 host1x_class_host_wait_syncpt( + unsigned indx, unsigned threshold) +{ + return host1x_uclass_wait_syncpt_indx_f(indx) + | host1x_uclass_wait_syncpt_thresh_f(threshold); +} + +static inline u32 host1x_class_host_load_syncpt_base( + unsigned indx, unsigned threshold) +{ + return host1x_uclass_load_syncpt_base_base_indx_f(indx) + | host1x_uclass_load_syncpt_base_value_f(threshold); +} + +static inline u32 host1x_class_host_wait_syncpt_base( + unsigned indx, unsigned base_indx, unsigned offset) +{ + return host1x_uclass_wait_syncpt_base_indx_f(indx) + | host1x_uclass_wait_syncpt_base_base_indx_f(base_indx) + | host1x_uclass_wait_syncpt_base_offset_f(offset); +} + +static inline u32 host1x_class_host_incr_syncpt_base( + unsigned base_indx, unsigned offset) +{ + return host1x_uclass_incr_syncpt_base_base_indx_f(base_indx) + | host1x_uclass_incr_syncpt_base_offset_f(offset); +} + +static inline u32 host1x_class_host_incr_syncpt( + unsigned cond, unsigned indx) +{ + return host1x_uclass_incr_syncpt_cond_f(cond) + | host1x_uclass_incr_syncpt_indx_f(indx); +} + +static inline u32 host1x_class_host_indoff_reg_write( + unsigned mod_id, unsigned offset, bool auto_inc) +{ + u32 v = host1x_uclass_indoff_indbe_f(0xf) + | host1x_uclass_indoff_indmodid_f(mod_id) + | host1x_uclass_indoff_indroffset_f(offset); + if (auto_inc) + v |= host1x_uclass_indoff_autoinc_f(1); + return v; +} + +static inline u32 host1x_class_host_indoff_reg_read( + unsigned mod_id, unsigned offset, bool auto_inc) +{ + u32 v = host1x_uclass_indoff_indmodid_f(mod_id) + | host1x_uclass_indoff_indroffset_f(offset) + | host1x_uclass_indoff_rwn_read_v(); + if (auto_inc) + v |= host1x_uclass_indoff_autoinc_f(1); + return v; +} + +/* cdma opcodes */ +static inline u32 host1x_opcode_setclass( + unsigned class_id, unsigned offset, unsigned mask) +{ + return (0 << 28) | (offset << 16) | (class_id << 6) | mask; +} + +static inline u32 host1x_opcode_incr(unsigned offset, unsigned count) +{ + return (1 << 28) | (offset << 16) | count; +} + +static inline u32 host1x_opcode_nonincr(unsigned offset, unsigned count) +{ + return (2 << 28) | (offset << 16) | count; +} + +static inline u32 host1x_opcode_mask(unsigned offset, unsigned mask) +{ + return (3 << 28) | (offset << 16) | mask; +} + +static inline u32 host1x_opcode_imm(unsigned offset, unsigned value) +{ + return (4 << 28) | (offset << 16) | value; +} + +static inline u32 host1x_opcode_imm_incr_syncpt(unsigned cond, unsigned indx) +{ + return host1x_opcode_imm(host1x_uclass_incr_syncpt_r(), + host1x_class_host_incr_syncpt(cond, indx)); +} + +static inline u32 host1x_opcode_restart(unsigned address) +{ + return (5 << 28) | (address >> 4); +} + +static inline u32 host1x_opcode_gather(unsigned count) +{ + return (6 << 28) | count; +} + +static inline u32 host1x_opcode_gather_nonincr(unsigned offset, unsigned count) +{ + return (6 << 28) | (offset << 16) | BIT(15) | count; +} + +static inline u32 host1x_opcode_gather_incr(unsigned offset, unsigned count) +{ + return (6 << 28) | (offset << 16) | BIT(15) | BIT(14) | count; +} + +#define HOST1X_OPCODE_NOP host1x_opcode_nonincr(0, 0) + +#endif diff --git a/drivers/gpu/host1x/hw/hw_host1x04_channel.h b/drivers/gpu/host1x/hw/hw_host1x04_channel.h index 95e6f96142b9..2e8b635aa660 100644 --- a/drivers/gpu/host1x/hw/hw_host1x04_channel.h +++ b/drivers/gpu/host1x/hw/hw_host1x04_channel.h @@ -117,5 +117,17 @@ static inline u32 host1x_channel_dmactrl_dmainitget(void) } #define HOST1X_CHANNEL_DMACTRL_DMAINITGET \ host1x_channel_dmactrl_dmainitget() +static inline u32 host1x_channel_channelctrl_r(void) +{ + return 0x98; +} +#define HOST1X_CHANNEL_CHANNELCTRL \ + host1x_channel_channelctrl_r() +static inline u32 host1x_channel_channelctrl_kernel_filter_gbuffer_f(u32 v) +{ + return (v & 0x1) << 2; +} +#define HOST1X_CHANNEL_CHANNELCTRL_KERNEL_FILTER_GBUFFER(v) \ + host1x_channel_channelctrl_kernel_filter_gbuffer_f(v) #endif diff --git a/drivers/gpu/host1x/hw/hw_host1x05_channel.h b/drivers/gpu/host1x/hw/hw_host1x05_channel.h index fce6e2c1ff4c..abbbc2641ce6 100644 --- a/drivers/gpu/host1x/hw/hw_host1x05_channel.h +++ b/drivers/gpu/host1x/hw/hw_host1x05_channel.h @@ -117,5 +117,17 @@ static inline u32 host1x_channel_dmactrl_dmainitget(void) } #define HOST1X_CHANNEL_DMACTRL_DMAINITGET \ host1x_channel_dmactrl_dmainitget() +static inline u32 host1x_channel_channelctrl_r(void) +{ + return 0x98; +} +#define HOST1X_CHANNEL_CHANNELCTRL \ + host1x_channel_channelctrl_r() +static inline u32 host1x_channel_channelctrl_kernel_filter_gbuffer_f(u32 v) +{ + return (v & 0x1) << 2; +} +#define HOST1X_CHANNEL_CHANNELCTRL_KERNEL_FILTER_GBUFFER(v) \ + host1x_channel_channelctrl_kernel_filter_gbuffer_f(v) #endif diff --git a/drivers/gpu/host1x/hw/hw_host1x06_hypervisor.h b/drivers/gpu/host1x/hw/hw_host1x06_hypervisor.h new file mode 100644 index 000000000000..c05dab8a178b --- /dev/null +++ b/drivers/gpu/host1x/hw/hw_host1x06_hypervisor.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2017 NVIDIA Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + */ + +#define HOST1X_HV_SYNCPT_PROT_EN 0x1ac4 +#define HOST1X_HV_SYNCPT_PROT_EN_CH_EN BIT(1) +#define HOST1X_HV_CH_KERNEL_FILTER_GBUFFER(x) (0x2020 + (x * 4)) +#define HOST1X_HV_CMDFIFO_PEEK_CTRL 0x233c +#define HOST1X_HV_CMDFIFO_PEEK_CTRL_ADDR(x) (x) +#define HOST1X_HV_CMDFIFO_PEEK_CTRL_CHANNEL(x) ((x) << 16) +#define HOST1X_HV_CMDFIFO_PEEK_CTRL_ENABLE BIT(31) +#define HOST1X_HV_CMDFIFO_PEEK_READ 0x2340 +#define HOST1X_HV_CMDFIFO_PEEK_PTRS 0x2344 +#define HOST1X_HV_CMDFIFO_PEEK_PTRS_WR_PTR_V(x) (((x) >> 16) & 0xfff) +#define HOST1X_HV_CMDFIFO_PEEK_PTRS_RD_PTR_V(x) ((x) & 0xfff) +#define HOST1X_HV_CMDFIFO_SETUP(x) (0x2588 + (x * 4)) +#define HOST1X_HV_CMDFIFO_SETUP_LIMIT_V(x) (((x) >> 16) & 0xfff) +#define HOST1X_HV_CMDFIFO_SETUP_BASE_V(x) ((x) & 0xfff) +#define HOST1X_HV_ICG_EN_OVERRIDE 0x2aa8 diff --git a/drivers/gpu/host1x/hw/hw_host1x06_uclass.h b/drivers/gpu/host1x/hw/hw_host1x06_uclass.h new file mode 100644 index 000000000000..4457486c72b0 --- /dev/null +++ b/drivers/gpu/host1x/hw/hw_host1x06_uclass.h @@ -0,0 +1,181 @@ +/* + * Copyright (c) 2017 NVIDIA Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + */ + + /* + * Function naming determines intended use: + * + * <x>_r(void) : Returns the offset for register <x>. + * + * <x>_w(void) : Returns the word offset for word (4 byte) element <x>. + * + * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits. + * + * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field <y> of register <x>. This value + * can be |'d with others to produce a full register value for + * register <x>. + * + * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This + * value can be ~'d and then &'d to clear the value of field <y> for + * register <x>. + * + * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted + * to place it at field <y> of register <x>. This value can be |'d + * with others to produce a full register value for <x>. + * + * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register + * <x> value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field <y> of register <x>. + * + * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for + * field <y> of register <x>. This value is suitable for direct + * comparison with unshifted values appropriate for use in field <y> + * of register <x>. + */ + +#ifndef HOST1X_HW_HOST1X06_UCLASS_H +#define HOST1X_HW_HOST1X06_UCLASS_H + +static inline u32 host1x_uclass_incr_syncpt_r(void) +{ + return 0x0; +} +#define HOST1X_UCLASS_INCR_SYNCPT \ + host1x_uclass_incr_syncpt_r() +static inline u32 host1x_uclass_incr_syncpt_cond_f(u32 v) +{ + return (v & 0xff) << 8; +} +#define HOST1X_UCLASS_INCR_SYNCPT_COND_F(v) \ + host1x_uclass_incr_syncpt_cond_f(v) +static inline u32 host1x_uclass_incr_syncpt_indx_f(u32 v) +{ + return (v & 0xff) << 0; +} +#define HOST1X_UCLASS_INCR_SYNCPT_INDX_F(v) \ + host1x_uclass_incr_syncpt_indx_f(v) +static inline u32 host1x_uclass_wait_syncpt_r(void) +{ + return 0x8; +} +#define HOST1X_UCLASS_WAIT_SYNCPT \ + host1x_uclass_wait_syncpt_r() +static inline u32 host1x_uclass_wait_syncpt_indx_f(u32 v) +{ + return (v & 0xff) << 24; +} +#define HOST1X_UCLASS_WAIT_SYNCPT_INDX_F(v) \ + host1x_uclass_wait_syncpt_indx_f(v) +static inline u32 host1x_uclass_wait_syncpt_thresh_f(u32 v) +{ + return (v & 0xffffff) << 0; +} +#define HOST1X_UCLASS_WAIT_SYNCPT_THRESH_F(v) \ + host1x_uclass_wait_syncpt_thresh_f(v) +static inline u32 host1x_uclass_wait_syncpt_base_r(void) +{ + return 0x9; +} +#define HOST1X_UCLASS_WAIT_SYNCPT_BASE \ + host1x_uclass_wait_syncpt_base_r() +static inline u32 host1x_uclass_wait_syncpt_base_indx_f(u32 v) +{ + return (v & 0xff) << 24; +} +#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_INDX_F(v) \ + host1x_uclass_wait_syncpt_base_indx_f(v) +static inline u32 host1x_uclass_wait_syncpt_base_base_indx_f(u32 v) +{ + return (v & 0xff) << 16; +} +#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_BASE_INDX_F(v) \ + host1x_uclass_wait_syncpt_base_base_indx_f(v) +static inline u32 host1x_uclass_wait_syncpt_base_offset_f(u32 v) +{ + return (v & 0xffff) << 0; +} +#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_OFFSET_F(v) \ + host1x_uclass_wait_syncpt_base_offset_f(v) +static inline u32 host1x_uclass_load_syncpt_base_r(void) +{ + return 0xb; +} +#define HOST1X_UCLASS_LOAD_SYNCPT_BASE \ + host1x_uclass_load_syncpt_base_r() +static inline u32 host1x_uclass_load_syncpt_base_base_indx_f(u32 v) +{ + return (v & 0xff) << 24; +} +#define HOST1X_UCLASS_LOAD_SYNCPT_BASE_BASE_INDX_F(v) \ + host1x_uclass_load_syncpt_base_base_indx_f(v) +static inline u32 host1x_uclass_load_syncpt_base_value_f(u32 v) +{ + return (v & 0xffffff) << 0; +} +#define HOST1X_UCLASS_LOAD_SYNCPT_BASE_VALUE_F(v) \ + host1x_uclass_load_syncpt_base_value_f(v) +static inline u32 host1x_uclass_incr_syncpt_base_base_indx_f(u32 v) +{ + return (v & 0xff) << 24; +} +#define HOST1X_UCLASS_INCR_SYNCPT_BASE_BASE_INDX_F(v) \ + host1x_uclass_incr_syncpt_base_base_indx_f(v) +static inline u32 host1x_uclass_incr_syncpt_base_offset_f(u32 v) +{ + return (v & 0xffffff) << 0; +} +#define HOST1X_UCLASS_INCR_SYNCPT_BASE_OFFSET_F(v) \ + host1x_uclass_incr_syncpt_base_offset_f(v) +static inline u32 host1x_uclass_indoff_r(void) +{ + return 0x2d; +} +#define HOST1X_UCLASS_INDOFF \ + host1x_uclass_indoff_r() +static inline u32 host1x_uclass_indoff_indbe_f(u32 v) +{ + return (v & 0xf) << 28; +} +#define HOST1X_UCLASS_INDOFF_INDBE_F(v) \ + host1x_uclass_indoff_indbe_f(v) +static inline u32 host1x_uclass_indoff_autoinc_f(u32 v) +{ + return (v & 0x1) << 27; +} +#define HOST1X_UCLASS_INDOFF_AUTOINC_F(v) \ + host1x_uclass_indoff_autoinc_f(v) +static inline u32 host1x_uclass_indoff_indmodid_f(u32 v) +{ + return (v & 0xff) << 18; +} +#define HOST1X_UCLASS_INDOFF_INDMODID_F(v) \ + host1x_uclass_indoff_indmodid_f(v) +static inline u32 host1x_uclass_indoff_indroffset_f(u32 v) +{ + return (v & 0xffff) << 2; +} +#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \ + host1x_uclass_indoff_indroffset_f(v) +static inline u32 host1x_uclass_indoff_rwn_read_v(void) +{ + return 1; +} +#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \ + host1x_uclass_indoff_indroffset_f(v) + +#endif diff --git a/drivers/gpu/host1x/hw/hw_host1x06_vm.h b/drivers/gpu/host1x/hw/hw_host1x06_vm.h new file mode 100644 index 000000000000..e54b33902332 --- /dev/null +++ b/drivers/gpu/host1x/hw/hw_host1x06_vm.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2017 NVIDIA Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + */ + +#define HOST1X_CHANNEL_DMASTART 0x0000 +#define HOST1X_CHANNEL_DMASTART_HI 0x0004 +#define HOST1X_CHANNEL_DMAPUT 0x0008 +#define HOST1X_CHANNEL_DMAPUT_HI 0x000c +#define HOST1X_CHANNEL_DMAGET 0x0010 +#define HOST1X_CHANNEL_DMAGET_HI 0x0014 +#define HOST1X_CHANNEL_DMAEND 0x0018 +#define HOST1X_CHANNEL_DMAEND_HI 0x001c +#define HOST1X_CHANNEL_DMACTRL 0x0020 +#define HOST1X_CHANNEL_DMACTRL_DMASTOP BIT(0) +#define HOST1X_CHANNEL_DMACTRL_DMAGETRST BIT(1) +#define HOST1X_CHANNEL_DMACTRL_DMAINITGET BIT(2) +#define HOST1X_CHANNEL_CMDFIFO_STAT 0x0024 +#define HOST1X_CHANNEL_CMDFIFO_STAT_EMPTY BIT(13) +#define HOST1X_CHANNEL_CMDFIFO_RDATA 0x0028 +#define HOST1X_CHANNEL_CMDP_OFFSET 0x0030 +#define HOST1X_CHANNEL_CMDP_CLASS 0x0034 +#define HOST1X_CHANNEL_CHANNELSTAT 0x0038 +#define HOST1X_CHANNEL_CMDPROC_STOP 0x0048 +#define HOST1X_CHANNEL_TEARDOWN 0x004c + +#define HOST1X_SYNC_SYNCPT_CPU_INCR(x) (0x6400 + 4*(x)) +#define HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(x) (0x6464 + 4*(x)) +#define HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(x) (0x652c + 4*(x)) +#define HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(x) (0x6590 + 4*(x)) +#define HOST1X_SYNC_SYNCPT_BASE(x) (0x8000 + 4*(x)) +#define HOST1X_SYNC_SYNCPT(x) (0x8080 + 4*(x)) +#define HOST1X_SYNC_SYNCPT_INT_THRESH(x) (0x8a00 + 4*(x)) +#define HOST1X_SYNC_SYNCPT_CH_APP(x) (0x9384 + 4*(x)) +#define HOST1X_SYNC_SYNCPT_CH_APP_CH(v) (((v) & 0x3f) << 8) diff --git a/drivers/gpu/host1x/hw/intr_hw.c b/drivers/gpu/host1x/hw/intr_hw.c index 37ebb51703fa..329239237090 100644 --- a/drivers/gpu/host1x/hw/intr_hw.c +++ b/drivers/gpu/host1x/hw/intr_hw.c @@ -72,6 +72,23 @@ static void _host1x_intr_disable_all_syncpt_intrs(struct host1x *host) } } +static void intr_hw_init(struct host1x *host, u32 cpm) +{ +#if HOST1X_HW < 6 + /* disable the ip_busy_timeout. this prevents write drops */ + host1x_sync_writel(host, 0, HOST1X_SYNC_IP_BUSY_TIMEOUT); + + /* + * increase the auto-ack timout to the maximum value. 2d will hang + * otherwise on Tegra2. + */ + host1x_sync_writel(host, 0xff, HOST1X_SYNC_CTXSW_TIMEOUT_CFG); + + /* update host clocks per usec */ + host1x_sync_writel(host, cpm, HOST1X_SYNC_USEC_CLK); +#endif +} + static int _host1x_intr_init_host_sync(struct host1x *host, u32 cpm, void (*syncpt_thresh_work)(struct work_struct *)) @@ -92,17 +109,7 @@ _host1x_intr_init_host_sync(struct host1x *host, u32 cpm, return err; } - /* disable the ip_busy_timeout. this prevents write drops */ - host1x_sync_writel(host, 0, HOST1X_SYNC_IP_BUSY_TIMEOUT); - - /* - * increase the auto-ack timout to the maximum value. 2d will hang - * otherwise on Tegra2. - */ - host1x_sync_writel(host, 0xff, HOST1X_SYNC_CTXSW_TIMEOUT_CFG); - - /* update host clocks per usec */ - host1x_sync_writel(host, cpm, HOST1X_SYNC_USEC_CLK); + intr_hw_init(host, cpm); return 0; } diff --git a/drivers/gpu/host1x/hw/syncpt_hw.c b/drivers/gpu/host1x/hw/syncpt_hw.c index 7b0270d60742..7dfd47d74f89 100644 --- a/drivers/gpu/host1x/hw/syncpt_hw.c +++ b/drivers/gpu/host1x/hw/syncpt_hw.c @@ -106,6 +106,50 @@ static int syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr) return 0; } +/** + * syncpt_assign_to_channel() - Assign syncpoint to channel + * @sp: syncpoint + * @ch: channel + * + * On chips with the syncpoint protection feature (Tegra186+), assign @sp to + * @ch, preventing other channels from incrementing the syncpoints. If @ch is + * NULL, unassigns the syncpoint. + * + * On older chips, do nothing. + */ +static void syncpt_assign_to_channel(struct host1x_syncpt *sp, + struct host1x_channel *ch) +{ +#if HOST1X_HW >= 6 + struct host1x *host = sp->host; + + if (!host->hv_regs) + return; + + host1x_sync_writel(host, + HOST1X_SYNC_SYNCPT_CH_APP_CH(ch ? ch->id : 0xff), + HOST1X_SYNC_SYNCPT_CH_APP(sp->id)); +#endif +} + +/** + * syncpt_enable_protection() - Enable syncpoint protection + * @host: host1x instance + * + * On chips with the syncpoint protection feature (Tegra186+), enable this + * feature. On older chips, do nothing. + */ +static void syncpt_enable_protection(struct host1x *host) +{ +#if HOST1X_HW >= 6 + if (!host->hv_regs) + return; + + host1x_hypervisor_writel(host, HOST1X_HV_SYNCPT_PROT_EN_CH_EN, + HOST1X_HV_SYNCPT_PROT_EN); +#endif +} + static const struct host1x_syncpt_ops host1x_syncpt_ops = { .restore = syncpt_restore, .restore_wait_base = syncpt_restore_wait_base, @@ -113,4 +157,6 @@ static const struct host1x_syncpt_ops host1x_syncpt_ops = { .load = syncpt_load, .cpu_incr = syncpt_cpu_incr, .patch_wait = syncpt_patch_wait, + .assign_to_channel = syncpt_assign_to_channel, + .enable_protection = syncpt_enable_protection, }; diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c index 048ac9e344ce..a2a952adc136 100644 --- a/drivers/gpu/host1x/syncpt.c +++ b/drivers/gpu/host1x/syncpt.c @@ -54,7 +54,7 @@ static void host1x_syncpt_base_free(struct host1x_syncpt_base *base) } static struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host, - struct device *dev, + struct host1x_client *client, unsigned long flags) { int i; @@ -76,11 +76,11 @@ static struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host, } name = kasprintf(GFP_KERNEL, "%02u-%s", sp->id, - dev ? dev_name(dev) : NULL); + client ? dev_name(client->dev) : NULL); if (!name) goto free_base; - sp->dev = dev; + sp->client = client; sp->name = name; if (flags & HOST1X_SYNCPT_CLIENT_MANAGED) @@ -398,6 +398,13 @@ int host1x_syncpt_init(struct host1x *host) for (i = 0; i < host->info->nb_pts; i++) { syncpt[i].id = i; syncpt[i].host = host; + + /* + * Unassign syncpt from channels for purposes of Tegra186 + * syncpoint protection. This prevents any channel from + * accessing it until it is reassigned. + */ + host1x_hw_syncpt_assign_to_channel(host, &syncpt[i], NULL); } for (i = 0; i < host->info->nb_bases; i++) @@ -408,6 +415,7 @@ int host1x_syncpt_init(struct host1x *host) host->bases = bases; host1x_syncpt_restore(host); + host1x_hw_syncpt_enable_protection(host); /* Allocate sync point to use for clearing waits for expired fences */ host->nop_sp = host1x_syncpt_alloc(host, NULL, 0); @@ -419,7 +427,7 @@ int host1x_syncpt_init(struct host1x *host) /** * host1x_syncpt_request() - request a syncpoint - * @dev: device requesting the syncpoint + * @client: client requesting the syncpoint * @flags: flags * * host1x client drivers can use this function to allocate a syncpoint for @@ -427,12 +435,12 @@ int host1x_syncpt_init(struct host1x *host) * use by the client exclusively. When no longer using a syncpoint, a host1x * client driver needs to release it using host1x_syncpt_free(). */ -struct host1x_syncpt *host1x_syncpt_request(struct device *dev, +struct host1x_syncpt *host1x_syncpt_request(struct host1x_client *client, unsigned long flags) { - struct host1x *host = dev_get_drvdata(dev->parent); + struct host1x *host = dev_get_drvdata(client->parent->parent); - return host1x_syncpt_alloc(host, dev, flags); + return host1x_syncpt_alloc(host, client, flags); } EXPORT_SYMBOL(host1x_syncpt_request); @@ -456,7 +464,7 @@ void host1x_syncpt_free(struct host1x_syncpt *sp) host1x_syncpt_base_free(sp->base); kfree(sp->name); sp->base = NULL; - sp->dev = NULL; + sp->client = NULL; sp->name = NULL; sp->client_managed = false; diff --git a/drivers/gpu/host1x/syncpt.h b/drivers/gpu/host1x/syncpt.h index f719205105ac..9d88d37c2397 100644 --- a/drivers/gpu/host1x/syncpt.h +++ b/drivers/gpu/host1x/syncpt.h @@ -44,7 +44,7 @@ struct host1x_syncpt { const char *name; bool client_managed; struct host1x *host; - struct device *dev; + struct host1x_client *client; struct host1x_syncpt_base *base; /* interrupt data */ diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c index 76875f6299b8..d35d6d271f3f 100644 --- a/drivers/gpu/vga/vgaarb.c +++ b/drivers/gpu/vga/vgaarb.c @@ -1402,29 +1402,14 @@ static struct miscdevice vga_arb_device = { MISC_DYNAMIC_MINOR, "vga_arbiter", &vga_arb_device_fops }; -static int __init vga_arb_device_init(void) +static void __init vga_arb_select_default_device(void) { - int rc; struct pci_dev *pdev; struct vga_device *vgadev; - rc = misc_register(&vga_arb_device); - if (rc < 0) - pr_err("error %d registering device\n", rc); - - bus_register_notifier(&pci_bus_type, &pci_notifier); - - /* We add all pci devices satisfying vga class in the arbiter by - * default */ - pdev = NULL; - while ((pdev = - pci_get_subsys(PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, - PCI_ANY_ID, pdev)) != NULL) - vga_arbiter_add_pci_device(pdev); - +#if defined(CONFIG_X86) || defined(CONFIG_IA64) list_for_each_entry(vgadev, &vga_list, list) { struct device *dev = &vgadev->pdev->dev; -#if defined(CONFIG_X86) || defined(CONFIG_IA64) /* * Override vga_arbiter_add_pci_device()'s I/O based detection * as it may take the wrong device (e.g. on Apple system under @@ -1461,13 +1446,66 @@ static int __init vga_arb_device_init(void) vgaarb_info(dev, "overriding boot device\n"); vga_set_default_device(vgadev->pdev); } + } #endif + + if (!vga_default_device()) { + list_for_each_entry(vgadev, &vga_list, list) { + struct device *dev = &vgadev->pdev->dev; + u16 cmd; + + pdev = vgadev->pdev; + pci_read_config_word(pdev, PCI_COMMAND, &cmd); + if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) { + vgaarb_info(dev, "setting as boot device (VGA legacy resources not available)\n"); + vga_set_default_device(pdev); + break; + } + } + } + + if (!vga_default_device()) { + vgadev = list_first_entry_or_null(&vga_list, + struct vga_device, list); + if (vgadev) { + struct device *dev = &vgadev->pdev->dev; + vgaarb_info(dev, "setting as boot device (VGA legacy resources not available)\n"); + vga_set_default_device(vgadev->pdev); + } + } +} + +static int __init vga_arb_device_init(void) +{ + int rc; + struct pci_dev *pdev; + struct vga_device *vgadev; + + rc = misc_register(&vga_arb_device); + if (rc < 0) + pr_err("error %d registering device\n", rc); + + bus_register_notifier(&pci_bus_type, &pci_notifier); + + /* We add all PCI devices satisfying VGA class in the arbiter by + * default */ + pdev = NULL; + while ((pdev = + pci_get_subsys(PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, + PCI_ANY_ID, pdev)) != NULL) + vga_arbiter_add_pci_device(pdev); + + list_for_each_entry(vgadev, &vga_list, list) { + struct device *dev = &vgadev->pdev->dev; + if (vgadev->bridge_has_one_vga) vgaarb_info(dev, "bridge control possible\n"); else vgaarb_info(dev, "no bridge control possible\n"); } + vga_arb_select_default_device(); + pr_info("loaded\n"); return rc; } diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h index 6a58e2e91a0f..848b463a0af5 100644 --- a/include/drm/drm_vblank.h +++ b/include/drm/drm_vblank.h @@ -48,9 +48,17 @@ struct drm_pending_vblank_event { */ unsigned int pipe; /** + * @sequence: frame event should be triggered at + */ + u64 sequence; + /** * @event: Actual event which will be sent to userspace. */ - struct drm_event_vblank event; + union { + struct drm_event base; + struct drm_event_vblank vbl; + struct drm_event_crtc_sequence seq; + } event; }; /** @@ -88,7 +96,7 @@ struct drm_vblank_crtc { /** * @count: Current software vblank counter. */ - u32 count; + u64 count; /** * @time: Vblank timestamp corresponding to @count. */ @@ -152,13 +160,16 @@ struct drm_vblank_crtc { }; int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs); -u32 drm_crtc_vblank_count(struct drm_crtc *crtc); -u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc, +u64 drm_crtc_vblank_count(struct drm_crtc *crtc); +u64 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc, ktime_t *vblanktime); void drm_crtc_send_vblank_event(struct drm_crtc *crtc, struct drm_pending_vblank_event *e); void drm_crtc_arm_vblank_event(struct drm_crtc *crtc, struct drm_pending_vblank_event *e); +void drm_vblank_set_event(struct drm_pending_vblank_event *e, + u64 *seq, + ktime_t *now); bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe); bool drm_crtc_handle_vblank(struct drm_crtc *crtc); int drm_crtc_vblank_get(struct drm_crtc *crtc); diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index ca974224d92e..efdabbb64e3c 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -128,7 +128,7 @@ struct dma_fence_cb { * implementation know that there is another driver waiting on * the signal (ie. hw->sw case). * - * This function can be called called from atomic context, but not + * This function can be called from atomic context, but not * from irq context, so normal spinlocks can be used. * * A return value of false indicates the fence already passed, diff --git a/include/linux/host1x.h b/include/linux/host1x.h index 630b1a98ab58..ddf7f9ca86cc 100644 --- a/include/linux/host1x.h +++ b/include/linux/host1x.h @@ -157,7 +157,7 @@ int host1x_syncpt_incr(struct host1x_syncpt *sp); u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs); int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout, u32 *value); -struct host1x_syncpt *host1x_syncpt_request(struct device *dev, +struct host1x_syncpt *host1x_syncpt_request(struct host1x_client *client, unsigned long flags); void host1x_syncpt_free(struct host1x_syncpt *sp); diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index 97677cd6964d..91d83c1747c0 100644 --- a/include/uapi/drm/drm.h +++ b/include/uapi/drm/drm.h @@ -737,6 +737,28 @@ struct drm_syncobj_array { __u32 pad; }; +/* Query current scanout sequence number */ +struct drm_crtc_get_sequence { + __u32 crtc_id; /* requested crtc_id */ + __u32 active; /* return: crtc output is active */ + __u64 sequence; /* return: most recent vblank sequence */ + __s64 sequence_ns; /* return: most recent time of first pixel out */ +}; + +/* Queue event to be delivered at specified sequence. Time stamp marks + * when the first pixel of the refresh cycle leaves the display engine + * for the display + */ +#define DRM_CRTC_SEQUENCE_RELATIVE 0x00000001 /* sequence is relative to current */ +#define DRM_CRTC_SEQUENCE_NEXT_ON_MISS 0x00000002 /* Use next sequence if we've missed */ + +struct drm_crtc_queue_sequence { + __u32 crtc_id; + __u32 flags; + __u64 sequence; /* on input, target sequence. on output, actual sequence */ + __u64 user_data; /* user data passed to event */ +}; + #if defined(__cplusplus) } #endif @@ -819,6 +841,9 @@ extern "C" { #define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank) +#define DRM_IOCTL_CRTC_GET_SEQUENCE DRM_IOWR(0x3b, struct drm_crtc_get_sequence) +#define DRM_IOCTL_CRTC_QUEUE_SEQUENCE DRM_IOWR(0x3c, struct drm_crtc_queue_sequence) + #define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw) #define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res) @@ -893,6 +918,7 @@ struct drm_event { #define DRM_EVENT_VBLANK 0x01 #define DRM_EVENT_FLIP_COMPLETE 0x02 +#define DRM_EVENT_CRTC_SEQUENCE 0x03 struct drm_event_vblank { struct drm_event base; @@ -903,6 +929,16 @@ struct drm_event_vblank { __u32 crtc_id; /* 0 on older kernels that do not support this */ }; +/* Event delivered at sequence. Time stamp marks when the first pixel + * of the refresh cycle leaves the display engine for the display + */ +struct drm_event_crtc_sequence { + struct drm_event base; + __u64 user_data; + __s64 time_ns; + __u64 sequence; +}; + /* typedef area */ #ifndef __KERNEL__ typedef struct drm_clip_rect drm_clip_rect_t; diff --git a/include/uapi/drm/vc4_drm.h b/include/uapi/drm/vc4_drm.h index afae87004963..52263b575bdc 100644 --- a/include/uapi/drm/vc4_drm.h +++ b/include/uapi/drm/vc4_drm.h @@ -41,6 +41,7 @@ extern "C" { #define DRM_VC4_SET_TILING 0x08 #define DRM_VC4_GET_TILING 0x09 #define DRM_VC4_LABEL_BO 0x0a +#define DRM_VC4_GEM_MADVISE 0x0b #define DRM_IOCTL_VC4_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SUBMIT_CL, struct drm_vc4_submit_cl) #define DRM_IOCTL_VC4_WAIT_SEQNO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_SEQNO, struct drm_vc4_wait_seqno) @@ -53,6 +54,7 @@ extern "C" { #define DRM_IOCTL_VC4_SET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SET_TILING, struct drm_vc4_set_tiling) #define DRM_IOCTL_VC4_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_TILING, struct drm_vc4_get_tiling) #define DRM_IOCTL_VC4_LABEL_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_LABEL_BO, struct drm_vc4_label_bo) +#define DRM_IOCTL_VC4_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GEM_MADVISE, struct drm_vc4_gem_madvise) struct drm_vc4_submit_rcl_surface { __u32 hindex; /* Handle index, or ~0 if not present. */ @@ -305,6 +307,7 @@ struct drm_vc4_get_hang_state { #define DRM_VC4_PARAM_SUPPORTS_ETC1 4 #define DRM_VC4_PARAM_SUPPORTS_THREADED_FS 5 #define DRM_VC4_PARAM_SUPPORTS_FIXED_RCL_ORDER 6 +#define DRM_VC4_PARAM_SUPPORTS_MADVISE 7 struct drm_vc4_get_param { __u32 param; @@ -333,6 +336,22 @@ struct drm_vc4_label_bo { __u64 name; }; +/* + * States prefixed with '__' are internal states and cannot be passed to the + * DRM_IOCTL_VC4_GEM_MADVISE ioctl. + */ +#define VC4_MADV_WILLNEED 0 +#define VC4_MADV_DONTNEED 1 +#define __VC4_MADV_PURGED 2 +#define __VC4_MADV_NOTSUPP 3 + +struct drm_vc4_gem_madvise { + __u32 handle; + __u32 madv; + __u32 retained; + __u32 pad; +}; + #if defined(__cplusplus) } #endif |