diff options
48 files changed, 1378 insertions, 339 deletions
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c index 798fc718fffe..b3f6eb5d04a2 100644 --- a/arch/arm/mach-omap2/display.c +++ b/arch/arm/mach-omap2/display.c @@ -172,6 +172,7 @@ static int __init omapdss_init_fbdev(void) .set_min_bus_tput = omap_dss_set_min_bus_tput, }; struct device_node *node; + int r; board_data.version = omap_display_get_version(); if (board_data.version == OMAPDSS_VER_UNKNOWN) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 15d4a28d73bb..269b835571eb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1035,7 +1035,7 @@ static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p, { int r; struct dma_fence *fence; - r = drm_syncobj_fence_get(p->filp, handle, &fence); + r = drm_syncobj_find_fence(p->filp, handle, &fence); if (r) return r; diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index 4e906b82a170..fbc3f308fa19 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -167,3 +167,9 @@ int drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data, struct drm_file *file_private); int drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_private); +int drm_syncobj_wait_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_private); +int drm_syncobj_reset_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_private); +int drm_syncobj_signal_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_private); diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index d920b2118a39..a9ae6dd2d593 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -657,6 +657,12 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, drm_syncobj_fd_to_handle_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_WAIT, drm_syncobj_wait_ioctl, + DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_RESET, drm_syncobj_reset_ioctl, + DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_SIGNAL, drm_syncobj_signal_ioctl, + DRM_UNLOCKED|DRM_RENDER_ALLOW), }; #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index a5b38a80a99a..0422b8c2c2e7 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -1,5 +1,7 @@ /* * Copyright 2017 Red Hat + * Parts ported from amdgpu (fence wait code). + * Copyright 2016 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -31,6 +33,9 @@ * that contain an optional fence. The fence can be updated with a new * fence, or be NULL. * + * syncobj's can be waited upon, where it will wait for the underlying + * fence. + * * syncobj's can be export to fd's and back, these fd's are opaque and * have no other use case, except passing the syncobj between processes. * @@ -46,6 +51,7 @@ #include <linux/fs.h> #include <linux/anon_inodes.h> #include <linux/sync_file.h> +#include <linux/sched/signal.h> #include "drm_internal.h" #include <drm/drm_syncobj.h> @@ -75,6 +81,75 @@ struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private, } EXPORT_SYMBOL(drm_syncobj_find); +static void drm_syncobj_add_callback_locked(struct drm_syncobj *syncobj, + struct drm_syncobj_cb *cb, + drm_syncobj_func_t func) +{ + cb->func = func; + list_add_tail(&cb->node, &syncobj->cb_list); +} + +static int drm_syncobj_fence_get_or_add_callback(struct drm_syncobj *syncobj, + struct dma_fence **fence, + struct drm_syncobj_cb *cb, + drm_syncobj_func_t func) +{ + int ret; + + *fence = drm_syncobj_fence_get(syncobj); + if (*fence) + return 1; + + spin_lock(&syncobj->lock); + /* We've already tried once to get a fence and failed. Now that we + * have the lock, try one more time just to be sure we don't add a + * callback when a fence has already been set. + */ + if (syncobj->fence) { + *fence = dma_fence_get(syncobj->fence); + ret = 1; + } else { + *fence = NULL; + drm_syncobj_add_callback_locked(syncobj, cb, func); + ret = 0; + } + spin_unlock(&syncobj->lock); + + return ret; +} + +/** + * drm_syncobj_add_callback - adds a callback to syncobj::cb_list + * @syncobj: Sync object to which to add the callback + * @cb: Callback to add + * @func: Func to use when initializing the drm_syncobj_cb struct + * + * This adds a callback to be called next time the fence is replaced + */ +void drm_syncobj_add_callback(struct drm_syncobj *syncobj, + struct drm_syncobj_cb *cb, + drm_syncobj_func_t func) +{ + spin_lock(&syncobj->lock); + drm_syncobj_add_callback_locked(syncobj, cb, func); + spin_unlock(&syncobj->lock); +} +EXPORT_SYMBOL(drm_syncobj_add_callback); + +/** + * drm_syncobj_add_callback - removes a callback to syncobj::cb_list + * @syncobj: Sync object from which to remove the callback + * @cb: Callback to remove + */ +void drm_syncobj_remove_callback(struct drm_syncobj *syncobj, + struct drm_syncobj_cb *cb) +{ + spin_lock(&syncobj->lock); + list_del_init(&cb->node); + spin_unlock(&syncobj->lock); +} +EXPORT_SYMBOL(drm_syncobj_remove_callback); + /** * drm_syncobj_replace_fence - replace fence in a sync object. * @syncobj: Sync object to replace fence in @@ -86,18 +161,75 @@ void drm_syncobj_replace_fence(struct drm_syncobj *syncobj, struct dma_fence *fence) { struct dma_fence *old_fence; + struct drm_syncobj_cb *cur, *tmp; if (fence) dma_fence_get(fence); - old_fence = xchg(&syncobj->fence, fence); + + spin_lock(&syncobj->lock); + + old_fence = syncobj->fence; + syncobj->fence = fence; + + if (fence != old_fence) { + list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) { + list_del_init(&cur->node); + cur->func(syncobj, cur); + } + } + + spin_unlock(&syncobj->lock); dma_fence_put(old_fence); } EXPORT_SYMBOL(drm_syncobj_replace_fence); -int drm_syncobj_fence_get(struct drm_file *file_private, - u32 handle, - struct dma_fence **fence) +struct drm_syncobj_null_fence { + struct dma_fence base; + spinlock_t lock; +}; + +static const char *drm_syncobj_null_fence_get_name(struct dma_fence *fence) +{ + return "syncobjnull"; +} + +static bool drm_syncobj_null_fence_enable_signaling(struct dma_fence *fence) +{ + dma_fence_enable_sw_signaling(fence); + return !dma_fence_is_signaled(fence); +} + +static const struct dma_fence_ops drm_syncobj_null_fence_ops = { + .get_driver_name = drm_syncobj_null_fence_get_name, + .get_timeline_name = drm_syncobj_null_fence_get_name, + .enable_signaling = drm_syncobj_null_fence_enable_signaling, + .wait = dma_fence_default_wait, + .release = NULL, +}; + +static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj) +{ + struct drm_syncobj_null_fence *fence; + fence = kzalloc(sizeof(*fence), GFP_KERNEL); + if (fence == NULL) + return -ENOMEM; + + spin_lock_init(&fence->lock); + dma_fence_init(&fence->base, &drm_syncobj_null_fence_ops, + &fence->lock, 0, 0); + dma_fence_signal(&fence->base); + + drm_syncobj_replace_fence(syncobj, &fence->base); + + dma_fence_put(&fence->base); + + return 0; +} + +int drm_syncobj_find_fence(struct drm_file *file_private, + u32 handle, + struct dma_fence **fence) { struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle); int ret = 0; @@ -105,14 +237,14 @@ int drm_syncobj_fence_get(struct drm_file *file_private, if (!syncobj) return -ENOENT; - *fence = dma_fence_get(syncobj->fence); + *fence = drm_syncobj_fence_get(syncobj); if (!*fence) { ret = -EINVAL; } drm_syncobj_put(syncobj); return ret; } -EXPORT_SYMBOL(drm_syncobj_fence_get); +EXPORT_SYMBOL(drm_syncobj_find_fence); /** * drm_syncobj_free - free a sync object. @@ -125,13 +257,13 @@ void drm_syncobj_free(struct kref *kref) struct drm_syncobj *syncobj = container_of(kref, struct drm_syncobj, refcount); - dma_fence_put(syncobj->fence); + drm_syncobj_replace_fence(syncobj, NULL); kfree(syncobj); } EXPORT_SYMBOL(drm_syncobj_free); static int drm_syncobj_create(struct drm_file *file_private, - u32 *handle) + u32 *handle, uint32_t flags) { int ret; struct drm_syncobj *syncobj; @@ -141,6 +273,16 @@ static int drm_syncobj_create(struct drm_file *file_private, return -ENOMEM; kref_init(&syncobj->refcount); + INIT_LIST_HEAD(&syncobj->cb_list); + spin_lock_init(&syncobj->lock); + + if (flags & DRM_SYNCOBJ_CREATE_SIGNALED) { + ret = drm_syncobj_assign_null_handle(syncobj); + if (ret < 0) { + drm_syncobj_put(syncobj); + return ret; + } + } idr_preload(GFP_KERNEL); spin_lock(&file_private->syncobj_table_lock); @@ -307,7 +449,7 @@ int drm_syncobj_export_sync_file(struct drm_file *file_private, if (fd < 0) return fd; - ret = drm_syncobj_fence_get(file_private, handle, &fence); + ret = drm_syncobj_find_fence(file_private, handle, &fence); if (ret) goto err_put_fd; @@ -377,11 +519,11 @@ drm_syncobj_create_ioctl(struct drm_device *dev, void *data, return -ENODEV; /* no valid flags yet */ - if (args->flags) + if (args->flags & ~DRM_SYNCOBJ_CREATE_SIGNALED) return -EINVAL; return drm_syncobj_create(file_private, - &args->handle); + &args->handle, args->flags); } int @@ -447,3 +589,368 @@ drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data, return drm_syncobj_fd_to_handle(file_private, args->fd, &args->handle); } + +struct syncobj_wait_entry { + struct task_struct *task; + struct dma_fence *fence; + struct dma_fence_cb fence_cb; + struct drm_syncobj_cb syncobj_cb; +}; + +static void syncobj_wait_fence_func(struct dma_fence *fence, + struct dma_fence_cb *cb) +{ + struct syncobj_wait_entry *wait = + container_of(cb, struct syncobj_wait_entry, fence_cb); + + wake_up_process(wait->task); +} + +static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj, + struct drm_syncobj_cb *cb) +{ + struct syncobj_wait_entry *wait = + container_of(cb, struct syncobj_wait_entry, syncobj_cb); + + /* This happens inside the syncobj lock */ + wait->fence = dma_fence_get(syncobj->fence); + wake_up_process(wait->task); +} + +static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs, + uint32_t count, + uint32_t flags, + signed long timeout, + uint32_t *idx) +{ + struct syncobj_wait_entry *entries; + struct dma_fence *fence; + signed long ret; + uint32_t signaled_count, i; + + entries = kcalloc(count, sizeof(*entries), GFP_KERNEL); + if (!entries) + return -ENOMEM; + + /* Walk the list of sync objects and initialize entries. We do + * this up-front so that we can properly return -EINVAL if there is + * a syncobj with a missing fence and then never have the chance of + * returning -EINVAL again. + */ + signaled_count = 0; + for (i = 0; i < count; ++i) { + entries[i].task = current; + entries[i].fence = drm_syncobj_fence_get(syncobjs[i]); + if (!entries[i].fence) { + if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) { + continue; + } else { + ret = -EINVAL; + goto cleanup_entries; + } + } + + if (dma_fence_is_signaled(entries[i].fence)) { + if (signaled_count == 0 && idx) + *idx = i; + signaled_count++; + } + } + + /* Initialize ret to the max of timeout and 1. That way, the + * default return value indicates a successful wait and not a + * timeout. + */ + ret = max_t(signed long, timeout, 1); + + if (signaled_count == count || + (signaled_count > 0 && + !(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL))) + goto cleanup_entries; + + /* There's a very annoying laxness in the dma_fence API here, in + * that backends are not required to automatically report when a + * fence is signaled prior to fence->ops->enable_signaling() being + * called. So here if we fail to match signaled_count, we need to + * fallthough and try a 0 timeout wait! + */ + + if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) { + for (i = 0; i < count; ++i) { + drm_syncobj_fence_get_or_add_callback(syncobjs[i], + &entries[i].fence, + &entries[i].syncobj_cb, + syncobj_wait_syncobj_func); + } + } + + do { + set_current_state(TASK_INTERRUPTIBLE); + + signaled_count = 0; + for (i = 0; i < count; ++i) { + fence = entries[i].fence; + if (!fence) + continue; + + if (dma_fence_is_signaled(fence) || + (!entries[i].fence_cb.func && + dma_fence_add_callback(fence, + &entries[i].fence_cb, + syncobj_wait_fence_func))) { + /* The fence has been signaled */ + if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL) { + signaled_count++; + } else { + if (idx) + *idx = i; + goto done_waiting; + } + } + } + + if (signaled_count == count) + goto done_waiting; + + if (timeout == 0) { + /* If we are doing a 0 timeout wait and we got + * here, then we just timed out. + */ + ret = 0; + goto done_waiting; + } + + ret = schedule_timeout(ret); + + if (ret > 0 && signal_pending(current)) + ret = -ERESTARTSYS; + } while (ret > 0); + +done_waiting: + __set_current_state(TASK_RUNNING); + +cleanup_entries: + for (i = 0; i < count; ++i) { + if (entries[i].syncobj_cb.func) + drm_syncobj_remove_callback(syncobjs[i], + &entries[i].syncobj_cb); + if (entries[i].fence_cb.func) + dma_fence_remove_callback(entries[i].fence, + &entries[i].fence_cb); + dma_fence_put(entries[i].fence); + } + kfree(entries); + + return ret; +} + +/** + * drm_timeout_abs_to_jiffies - calculate jiffies timeout from absolute value + * + * @timeout_nsec: timeout nsec component in ns, 0 for poll + * + * Calculate the timeout in jiffies from an absolute time in sec/nsec. + */ +static signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec) +{ + ktime_t abs_timeout, now; + u64 timeout_ns, timeout_jiffies64; + + /* make 0 timeout means poll - absolute 0 doesn't seem valid */ + if (timeout_nsec == 0) + return 0; + + abs_timeout = ns_to_ktime(timeout_nsec); + now = ktime_get(); + + if (!ktime_after(abs_timeout, now)) + return 0; + + timeout_ns = ktime_to_ns(ktime_sub(abs_timeout, now)); + + timeout_jiffies64 = nsecs_to_jiffies64(timeout_ns); + /* clamp timeout to avoid infinite timeout */ + if (timeout_jiffies64 >= MAX_SCHEDULE_TIMEOUT - 1) + return MAX_SCHEDULE_TIMEOUT - 1; + + return timeout_jiffies64 + 1; +} + +static int drm_syncobj_array_wait(struct drm_device *dev, + struct drm_file *file_private, + struct drm_syncobj_wait *wait, + struct drm_syncobj **syncobjs) +{ + signed long timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec); + signed long ret = 0; + uint32_t first = ~0; + + ret = drm_syncobj_array_wait_timeout(syncobjs, + wait->count_handles, + wait->flags, + timeout, &first); + if (ret < 0) + return ret; + + wait->first_signaled = first; + if (ret == 0) + return -ETIME; + return 0; +} + +static int drm_syncobj_array_find(struct drm_file *file_private, + void *user_handles, uint32_t count_handles, + struct drm_syncobj ***syncobjs_out) +{ + uint32_t i, *handles; + struct drm_syncobj **syncobjs; + int ret; + + handles = kmalloc_array(count_handles, sizeof(*handles), GFP_KERNEL); + if (handles == NULL) + return -ENOMEM; + + if (copy_from_user(handles, user_handles, + sizeof(uint32_t) * count_handles)) { + ret = -EFAULT; + goto err_free_handles; + } + + syncobjs = kmalloc_array(count_handles, sizeof(*syncobjs), GFP_KERNEL); + if (syncobjs == NULL) { + ret = -ENOMEM; + goto err_free_handles; + } + + for (i = 0; i < count_handles; i++) { + syncobjs[i] = drm_syncobj_find(file_private, handles[i]); + if (!syncobjs[i]) { + ret = -ENOENT; + goto err_put_syncobjs; + } + } + + kfree(handles); + *syncobjs_out = syncobjs; + return 0; + +err_put_syncobjs: + while (i-- > 0) + drm_syncobj_put(syncobjs[i]); + kfree(syncobjs); +err_free_handles: + kfree(handles); + + return ret; +} + +static void drm_syncobj_array_free(struct drm_syncobj **syncobjs, + uint32_t count) +{ + uint32_t i; + for (i = 0; i < count; i++) + drm_syncobj_put(syncobjs[i]); + kfree(syncobjs); +} + +int +drm_syncobj_wait_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_private) +{ + struct drm_syncobj_wait *args = data; + struct drm_syncobj **syncobjs; + int ret = 0; + + if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) + return -ENODEV; + + if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL | + DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT)) + return -EINVAL; + + if (args->count_handles == 0) + return -EINVAL; + + ret = drm_syncobj_array_find(file_private, + u64_to_user_ptr(args->handles), + args->count_handles, + &syncobjs); + if (ret < 0) + return ret; + + ret = drm_syncobj_array_wait(dev, file_private, + args, syncobjs); + + drm_syncobj_array_free(syncobjs, args->count_handles); + + return ret; +} + +int +drm_syncobj_reset_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_private) +{ + struct drm_syncobj_array *args = data; + struct drm_syncobj **syncobjs; + uint32_t i; + int ret; + + if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) + return -ENODEV; + + if (args->pad != 0) + return -EINVAL; + + if (args->count_handles == 0) + return -EINVAL; + + ret = drm_syncobj_array_find(file_private, + u64_to_user_ptr(args->handles), + args->count_handles, + &syncobjs); + if (ret < 0) + return ret; + + for (i = 0; i < args->count_handles; i++) + drm_syncobj_replace_fence(syncobjs[i], NULL); + + drm_syncobj_array_free(syncobjs, args->count_handles); + + return 0; +} + +int +drm_syncobj_signal_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_private) +{ + struct drm_syncobj_array *args = data; + struct drm_syncobj **syncobjs; + uint32_t i; + int ret; + + if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) + return -ENODEV; + + if (args->pad != 0) + return -EINVAL; + + if (args->count_handles == 0) + return -EINVAL; + + ret = drm_syncobj_array_find(file_private, + u64_to_user_ptr(args->handles), + args->count_handles, + &syncobjs); + if (ret < 0) + return ret; + + for (i = 0; i < args->count_handles; i++) { + ret = drm_syncobj_assign_null_handle(syncobjs[i]); + if (ret < 0) + break; + } + + drm_syncobj_array_free(syncobjs, args->count_handles); + + return ret; +} diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c index f77dcfaade6c..b4c7af3ab6ae 100644 --- a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c +++ b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c @@ -603,6 +603,72 @@ static void dsi_encoder_enable(struct drm_encoder *encoder) dsi->enable = true; } +static enum drm_mode_status dsi_encoder_phy_mode_valid( + struct drm_encoder *encoder, + const struct drm_display_mode *mode) +{ + struct dw_dsi *dsi = encoder_to_dsi(encoder); + struct mipi_phy_params phy; + u32 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format); + u32 req_kHz, act_kHz, lane_byte_clk_kHz; + + /* Calculate the lane byte clk using the adjusted mode clk */ + memset(&phy, 0, sizeof(phy)); + req_kHz = mode->clock * bpp / dsi->lanes; + act_kHz = dsi_calc_phy_rate(req_kHz, &phy); + lane_byte_clk_kHz = act_kHz / 8; + + DRM_DEBUG_DRIVER("Checking mode %ix%i-%i@%i clock: %i...", + mode->hdisplay, mode->vdisplay, bpp, + drm_mode_vrefresh(mode), mode->clock); + + /* + * Make sure the adjusted mode clock and the lane byte clk + * have a common denominator base frequency + */ + if (mode->clock/dsi->lanes == lane_byte_clk_kHz/3) { + DRM_DEBUG_DRIVER("OK!\n"); + return MODE_OK; + } + + DRM_DEBUG_DRIVER("BAD!\n"); + return MODE_BAD; +} + +static enum drm_mode_status dsi_encoder_mode_valid(struct drm_encoder *encoder, + const struct drm_display_mode *mode) + +{ + const struct drm_crtc_helper_funcs *crtc_funcs = NULL; + struct drm_crtc *crtc = NULL; + struct drm_display_mode adj_mode; + enum drm_mode_status ret; + + /* + * The crtc might adjust the mode, so go through the + * possible crtcs (technically just one) and call + * mode_fixup to figure out the adjusted mode before we + * validate it. + */ + drm_for_each_crtc(crtc, encoder->dev) { + /* + * reset adj_mode to the mode value each time, + * so we don't adjust the mode twice + */ + drm_mode_copy(&adj_mode, mode); + + crtc_funcs = crtc->helper_private; + if (crtc_funcs && crtc_funcs->mode_fixup) + if (!crtc_funcs->mode_fixup(crtc, mode, &adj_mode)) + return MODE_BAD; + + ret = dsi_encoder_phy_mode_valid(encoder, &adj_mode); + if (ret != MODE_OK) + return ret; + } + return MODE_OK; +} + static void dsi_encoder_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adj_mode) @@ -622,6 +688,7 @@ static int dsi_encoder_atomic_check(struct drm_encoder *encoder, static const struct drm_encoder_helper_funcs dw_encoder_helper_funcs = { .atomic_check = dsi_encoder_atomic_check, + .mode_valid = dsi_encoder_mode_valid, .mode_set = dsi_encoder_mode_set, .enable = dsi_encoder_enable, .disable = dsi_encoder_disable diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c index 39f7d15673ed..9823477b1855 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c @@ -178,6 +178,19 @@ static void ade_init(struct ade_hw_ctx *ctx) FRM_END_START_MASK, REG_EFFECTIVE_IN_ADEEN_FRMEND); } +static bool ade_crtc_mode_fixup(struct drm_crtc *crtc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct ade_crtc *acrtc = to_ade_crtc(crtc); + struct ade_hw_ctx *ctx = acrtc->ctx; + + adjusted_mode->clock = + clk_round_rate(ctx->ade_pix_clk, mode->clock * 1000) / 1000; + return true; +} + + static void ade_set_pix_clk(struct ade_hw_ctx *ctx, struct drm_display_mode *mode, struct drm_display_mode *adj_mode) @@ -555,6 +568,7 @@ static void ade_crtc_atomic_flush(struct drm_crtc *crtc, } static const struct drm_crtc_helper_funcs ade_crtc_helper_funcs = { + .mode_fixup = ade_crtc_mode_fixup, .mode_set_nofb = ade_crtc_mode_set_nofb, .atomic_begin = ade_crtc_atomic_begin, .atomic_flush = ade_crtc_atomic_flush, diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 3d74f3a27c13..4c2016237d61 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -2129,9 +2129,7 @@ await_fence_array(struct i915_execbuffer *eb, if (!(flags & I915_EXEC_FENCE_WAIT)) continue; - rcu_read_lock(); - fence = dma_fence_get_rcu_safe(&syncobj->fence); - rcu_read_unlock(); + fence = drm_syncobj_fence_get(syncobj); if (!fence) return -EINVAL; diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c index 0e3828ed1e46..7791313405b5 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c @@ -486,8 +486,6 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev) adreno_gpu = &a3xx_gpu->base; gpu = &adreno_gpu->base; - a3xx_gpu->pdev = pdev; - gpu->perfcntrs = perfcntrs; gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs); diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h index 85ff66cbddd6..ab60dc9e344e 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h @@ -28,7 +28,6 @@ struct a3xx_gpu { struct adreno_gpu base; - struct platform_device *pdev; /* if OCMEM is used for GMEM: */ uint32_t ocmem_base; diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c index 19abf229b08d..58341ef6f15b 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c @@ -568,8 +568,6 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev) adreno_gpu = &a4xx_gpu->base; gpu = &adreno_gpu->base; - a4xx_gpu->pdev = pdev; - gpu->perfcntrs = NULL; gpu->num_perfcntrs = 0; diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.h b/drivers/gpu/drm/msm/adreno/a4xx_gpu.h index 01247204ac92..f757184328a3 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.h @@ -23,7 +23,6 @@ struct a4xx_gpu { struct adreno_gpu base; - struct platform_device *pdev; /* if OCMEM is used for GMEM: */ uint32_t ocmem_base; diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index f9eae03aa1dc..17c59d839e6f 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -284,28 +284,14 @@ static int a5xx_me_init(struct msm_gpu *gpu) static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu, const struct firmware *fw, u64 *iova) { - struct drm_device *drm = gpu->dev; struct drm_gem_object *bo; void *ptr; - bo = msm_gem_new_locked(drm, fw->size - 4, MSM_BO_UNCACHED); - if (IS_ERR(bo)) - return bo; + ptr = msm_gem_kernel_new_locked(gpu->dev, fw->size - 4, + MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova); - ptr = msm_gem_get_vaddr(bo); - if (!ptr) { - drm_gem_object_unreference(bo); - return ERR_PTR(-ENOMEM); - } - - if (iova) { - int ret = msm_gem_get_iova(bo, gpu->aspace, iova); - - if (ret) { - drm_gem_object_unreference(bo); - return ERR_PTR(ret); - } - } + if (IS_ERR(ptr)) + return ERR_CAST(ptr); memcpy(ptr, &fw->data[4], fw->size - 4); @@ -372,8 +358,7 @@ static int a5xx_zap_shader_init(struct msm_gpu *gpu) { static bool loaded; struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); - struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); - struct platform_device *pdev = a5xx_gpu->pdev; + struct platform_device *pdev = gpu->pdev; int ret; /* @@ -410,6 +395,7 @@ static int a5xx_zap_shader_init(struct msm_gpu *gpu) A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \ A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW | \ A5XX_RBBM_INT_0_MASK_CP_HW_ERROR | \ + A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT | \ A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \ A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \ A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP) @@ -812,6 +798,27 @@ static void a5xx_gpmu_err_irq(struct msm_gpu *gpu) dev_err_ratelimited(gpu->dev->dev, "GPMU | voltage droop\n"); } +static void a5xx_fault_detect_irq(struct msm_gpu *gpu) +{ + struct drm_device *dev = gpu->dev; + struct msm_drm_private *priv = dev->dev_private; + + dev_err(dev->dev, "gpu fault fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n", + gpu->funcs->last_fence(gpu), + gpu_read(gpu, REG_A5XX_RBBM_STATUS), + gpu_read(gpu, REG_A5XX_CP_RB_RPTR), + gpu_read(gpu, REG_A5XX_CP_RB_WPTR), + gpu_read64(gpu, REG_A5XX_CP_IB1_BASE, REG_A5XX_CP_IB1_BASE_HI), + gpu_read(gpu, REG_A5XX_CP_IB1_BUFSZ), + gpu_read64(gpu, REG_A5XX_CP_IB2_BASE, REG_A5XX_CP_IB2_BASE_HI), + gpu_read(gpu, REG_A5XX_CP_IB2_BUFSZ)); + + /* Turn off the hangcheck timer to keep it from bothering us */ + del_timer(&gpu->hangcheck_timer); + + queue_work(priv->wq, &gpu->recover_work); +} + #define RBBM_ERROR_MASK \ (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \ A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \ @@ -838,6 +845,9 @@ static irqreturn_t a5xx_irq(struct msm_gpu *gpu) if (status & A5XX_RBBM_INT_0_MASK_CP_HW_ERROR) a5xx_cp_err_irq(gpu); + if (status & A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT) + a5xx_fault_detect_irq(gpu); + if (status & A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS) a5xx_uche_err_irq(gpu); @@ -1015,7 +1025,6 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev) adreno_gpu = &a5xx_gpu->base; gpu = &adreno_gpu->base; - a5xx_gpu->pdev = pdev; adreno_gpu->registers = a5xx_registers; adreno_gpu->reg_offsets = a5xx_register_offsets; diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h index 1137092241d5..e94451685bf8 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h @@ -23,7 +23,6 @@ struct a5xx_gpu { struct adreno_gpu base; - struct platform_device *pdev; struct drm_gem_object *pm4_bo; uint64_t pm4_iova; diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c index 87af6eea0483..04aab1dcae2b 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_power.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c @@ -294,16 +294,10 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu) */ bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2; - a5xx_gpu->gpmu_bo = msm_gem_new_locked(drm, bosize, MSM_BO_UNCACHED); - if (IS_ERR(a5xx_gpu->gpmu_bo)) - goto err; - - if (msm_gem_get_iova(a5xx_gpu->gpmu_bo, gpu->aspace, - &a5xx_gpu->gpmu_iova)) - goto err; - - ptr = msm_gem_get_vaddr(a5xx_gpu->gpmu_bo); - if (!ptr) + ptr = msm_gem_kernel_new_locked(drm, bosize, + MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace, + &a5xx_gpu->gpmu_bo, &a5xx_gpu->gpmu_iova); + if (IS_ERR(ptr)) goto err; while (cmds_size > 0) { diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 7414c6bbd582..c8b4ac254bb5 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -337,11 +337,6 @@ void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords) DRM_ERROR("%s: timeout waiting for ringbuffer space\n", gpu->name); } -static const char *iommu_ports[] = { - "gfx3d_user", "gfx3d_priv", - "gfx3d1_user", "gfx3d1_priv", -}; - int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, struct adreno_gpu *adreno_gpu, const struct adreno_gpu_funcs *funcs) { @@ -373,15 +368,15 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, adreno_gpu_config.ringsz = RB_SIZE; + pm_runtime_set_autosuspend_delay(&pdev->dev, DRM_MSM_INACTIVE_PERIOD); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_enable(&pdev->dev); + ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base, adreno_gpu->info->name, &adreno_gpu_config); if (ret) return ret; - pm_runtime_set_autosuspend_delay(&pdev->dev, DRM_MSM_INACTIVE_PERIOD); - pm_runtime_use_autosuspend(&pdev->dev); - pm_runtime_enable(&pdev->dev); - ret = request_firmware(&adreno_gpu->pm4, adreno_gpu->info->pm4fw, drm->dev); if (ret) { dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n", @@ -396,37 +391,17 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, return ret; } - if (gpu->aspace && gpu->aspace->mmu) { - struct msm_mmu *mmu = gpu->aspace->mmu; - ret = mmu->funcs->attach(mmu, iommu_ports, - ARRAY_SIZE(iommu_ports)); - if (ret) - return ret; - } + adreno_gpu->memptrs = msm_gem_kernel_new(drm, + sizeof(*adreno_gpu->memptrs), MSM_BO_UNCACHED, gpu->aspace, + &adreno_gpu->memptrs_bo, &adreno_gpu->memptrs_iova); - adreno_gpu->memptrs_bo = msm_gem_new(drm, sizeof(*adreno_gpu->memptrs), - MSM_BO_UNCACHED); - if (IS_ERR(adreno_gpu->memptrs_bo)) { - ret = PTR_ERR(adreno_gpu->memptrs_bo); - adreno_gpu->memptrs_bo = NULL; - dev_err(drm->dev, "could not allocate memptrs: %d\n", ret); - return ret; - } - - adreno_gpu->memptrs = msm_gem_get_vaddr(adreno_gpu->memptrs_bo); if (IS_ERR(adreno_gpu->memptrs)) { - dev_err(drm->dev, "could not vmap memptrs\n"); - return -ENOMEM; - } - - ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->aspace, - &adreno_gpu->memptrs_iova); - if (ret) { - dev_err(drm->dev, "could not map memptrs: %d\n", ret); - return ret; + ret = PTR_ERR(adreno_gpu->memptrs); + adreno_gpu->memptrs = NULL; + dev_err(drm->dev, "could not allocate memptrs: %d\n", ret); } - return 0; + return ret; } void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu) @@ -446,10 +421,4 @@ void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu) release_firmware(adreno_gpu->pfp); msm_gpu_cleanup(gpu); - - if (gpu->aspace) { - gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu, - iommu_ports, ARRAY_SIZE(iommu_ports)); - msm_gem_address_space_put(gpu->aspace); - } } diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c index 311c1c1e7d6c..98742d7af6dc 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.c +++ b/drivers/gpu/drm/msm/dsi/dsi.c @@ -161,12 +161,17 @@ static const struct of_device_id dt_match[] = { {} }; +static const struct dev_pm_ops dsi_pm_ops = { + SET_RUNTIME_PM_OPS(msm_dsi_runtime_suspend, msm_dsi_runtime_resume, NULL) +}; + static struct platform_driver dsi_driver = { .probe = dsi_dev_probe, .remove = dsi_dev_remove, .driver = { .name = "msm_dsi", .of_match_table = dt_match, + .pm = &dsi_pm_ops, }, }; diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h index 9e6017387efb..2302046197a8 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.h +++ b/drivers/gpu/drm/msm/dsi/dsi.h @@ -179,6 +179,8 @@ void msm_dsi_host_destroy(struct mipi_dsi_host *host); int msm_dsi_host_modeset_init(struct mipi_dsi_host *host, struct drm_device *dev); int msm_dsi_host_init(struct msm_dsi *msm_dsi); +int msm_dsi_runtime_suspend(struct device *dev); +int msm_dsi_runtime_resume(struct device *dev); /* dsi phy */ struct msm_dsi_phy; diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index c7b612c3d771..dbb31a014419 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -135,7 +135,6 @@ struct msm_dsi_host { struct completion video_comp; struct mutex dev_mutex; struct mutex cmd_mutex; - struct mutex clk_mutex; spinlock_t intr_lock; /* Protect interrupt ctrl register */ u32 err_work_state; @@ -221,6 +220,8 @@ static const struct msm_dsi_cfg_handler *dsi_get_config( goto put_gdsc; } + pm_runtime_get_sync(dev); + ret = regulator_enable(gdsc_reg); if (ret) { pr_err("%s: unable to enable gdsc\n", __func__); @@ -247,6 +248,7 @@ disable_clks: clk_disable_unprepare(ahb_clk); disable_gdsc: regulator_disable(gdsc_reg); + pm_runtime_put_autosuspend(dev); put_clk: clk_put(ahb_clk); put_gdsc: @@ -455,6 +457,34 @@ static void dsi_bus_clk_disable(struct msm_dsi_host *msm_host) clk_disable_unprepare(msm_host->bus_clks[i]); } +int msm_dsi_runtime_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct msm_dsi *msm_dsi = platform_get_drvdata(pdev); + struct mipi_dsi_host *host = msm_dsi->host; + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + + if (!msm_host->cfg_hnd) + return 0; + + dsi_bus_clk_disable(msm_host); + + return 0; +} + +int msm_dsi_runtime_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct msm_dsi *msm_dsi = platform_get_drvdata(pdev); + struct mipi_dsi_host *host = msm_dsi->host; + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + + if (!msm_host->cfg_hnd) + return 0; + + return dsi_bus_clk_enable(msm_host); +} + static int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host) { int ret; @@ -596,35 +626,6 @@ static void dsi_link_clk_disable(struct msm_dsi_host *msm_host) } } -static int dsi_clk_ctrl(struct msm_dsi_host *msm_host, bool enable) -{ - int ret = 0; - - mutex_lock(&msm_host->clk_mutex); - if (enable) { - ret = dsi_bus_clk_enable(msm_host); - if (ret) { - pr_err("%s: Can not enable bus clk, %d\n", - __func__, ret); - goto unlock_ret; - } - ret = dsi_link_clk_enable(msm_host); - if (ret) { - pr_err("%s: Can not enable link clk, %d\n", - __func__, ret); - dsi_bus_clk_disable(msm_host); - goto unlock_ret; - } - } else { - dsi_link_clk_disable(msm_host); - dsi_bus_clk_disable(msm_host); - } - -unlock_ret: - mutex_unlock(&msm_host->clk_mutex); - return ret; -} - static int dsi_calc_clk_rate(struct msm_dsi_host *msm_host) { struct drm_display_mode *mode = msm_host->mode; @@ -1699,6 +1700,7 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi) } msm_host->pdev = pdev; + msm_dsi->host = &msm_host->base; ret = dsi_host_parse_dt(msm_host); if (ret) { @@ -1713,6 +1715,8 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi) goto fail; } + pm_runtime_enable(&pdev->dev); + msm_host->cfg_hnd = dsi_get_config(msm_host); if (!msm_host->cfg_hnd) { ret = -EINVAL; @@ -1753,7 +1757,6 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi) init_completion(&msm_host->video_comp); mutex_init(&msm_host->dev_mutex); mutex_init(&msm_host->cmd_mutex); - mutex_init(&msm_host->clk_mutex); spin_lock_init(&msm_host->intr_lock); /* setup workqueue */ @@ -1761,7 +1764,6 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi) INIT_WORK(&msm_host->err_work, dsi_err_worker); INIT_WORK(&msm_host->hpd_work, dsi_hpd_worker); - msm_dsi->host = &msm_host->base; msm_dsi->id = msm_host->id; DBG("Dsi Host %d initialized", msm_host->id); @@ -1783,9 +1785,10 @@ void msm_dsi_host_destroy(struct mipi_dsi_host *host) msm_host->workqueue = NULL; } - mutex_destroy(&msm_host->clk_mutex); mutex_destroy(&msm_host->cmd_mutex); mutex_destroy(&msm_host->dev_mutex); + + pm_runtime_disable(&msm_host->pdev->dev); } int msm_dsi_host_modeset_init(struct mipi_dsi_host *host, @@ -1881,7 +1884,8 @@ int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host, * mdss interrupt is generated in mdp core clock domain * mdp clock need to be enabled to receive dsi interrupt */ - dsi_clk_ctrl(msm_host, 1); + pm_runtime_get_sync(&msm_host->pdev->dev); + dsi_link_clk_enable(msm_host); /* TODO: vote for bus bandwidth */ @@ -1911,7 +1915,8 @@ void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host, /* TODO: unvote for bus bandwidth */ - dsi_clk_ctrl(msm_host, 0); + dsi_link_clk_disable(msm_host); + pm_runtime_put_autosuspend(&msm_host->pdev->dev); } int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host, @@ -2160,8 +2165,11 @@ int msm_dsi_host_enable(struct mipi_dsi_host *host) * and only turned on before MDP START. * This part of code should be enabled once mdp driver support it. */ - /* if (msm_panel->mode == MSM_DSI_CMD_MODE) - dsi_clk_ctrl(msm_host, 0); */ + /* if (msm_panel->mode == MSM_DSI_CMD_MODE) { + * dsi_link_clk_disable(msm_host); + * pm_runtime_put_autosuspend(&msm_host->pdev->dev); + * } + */ return 0; } @@ -2217,9 +2225,11 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host, goto unlock_ret; } - ret = dsi_clk_ctrl(msm_host, 1); + pm_runtime_get_sync(&msm_host->pdev->dev); + ret = dsi_link_clk_enable(msm_host); if (ret) { - pr_err("%s: failed to enable clocks. ret=%d\n", __func__, ret); + pr_err("%s: failed to enable link clocks. ret=%d\n", + __func__, ret); goto fail_disable_reg; } @@ -2243,7 +2253,8 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host, return 0; fail_disable_clk: - dsi_clk_ctrl(msm_host, 0); + dsi_link_clk_disable(msm_host); + pm_runtime_put_autosuspend(&msm_host->pdev->dev); fail_disable_reg: dsi_host_regulator_disable(msm_host); unlock_ret: @@ -2268,7 +2279,8 @@ int msm_dsi_host_power_off(struct mipi_dsi_host *host) pinctrl_pm_select_sleep_state(&msm_host->pdev->dev); - dsi_clk_ctrl(msm_host, 0); + dsi_link_clk_disable(msm_host); + pm_runtime_put_autosuspend(&msm_host->pdev->dev); dsi_host_regulator_disable(msm_host); diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c index 0c2eb9c9a1fc..7c9bf91bc22b 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c @@ -373,7 +373,7 @@ static int dsi_phy_enable_resource(struct msm_dsi_phy *phy) static void dsi_phy_disable_resource(struct msm_dsi_phy *phy) { clk_disable_unprepare(phy->ahb_clk); - pm_runtime_put_sync(&phy->pdev->dev); + pm_runtime_put_autosuspend(&phy->pdev->dev); } static const struct of_device_id dsi_phy_dt_match[] = { diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c index a968cad509c2..17e069a133a4 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi.c @@ -239,6 +239,8 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev) hdmi->pwr_clks[i] = clk; } + pm_runtime_enable(&pdev->dev); + hdmi->workq = alloc_ordered_workqueue("msm_hdmi", 0); hdmi->i2c = msm_hdmi_i2c_init(hdmi); diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c index 13ac822dee5d..7e357077ed26 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c @@ -35,6 +35,8 @@ static void msm_hdmi_power_on(struct drm_bridge *bridge) const struct hdmi_platform_config *config = hdmi->config; int i, ret; + pm_runtime_get_sync(&hdmi->pdev->dev); + for (i = 0; i < config->pwr_reg_cnt; i++) { ret = regulator_enable(hdmi->pwr_regs[i]); if (ret) { @@ -84,6 +86,8 @@ static void power_off(struct drm_bridge *bridge) config->pwr_reg_names[i], ret); } } + + pm_runtime_put_autosuspend(&hdmi->pdev->dev); } #define AVI_IFRAME_LINE_NUMBER 1 diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c index 71536d9c7fe8..c0848dfedd50 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c @@ -137,6 +137,36 @@ err: return ret; } +static void enable_hpd_clocks(struct hdmi *hdmi, bool enable) +{ + const struct hdmi_platform_config *config = hdmi->config; + struct device *dev = &hdmi->pdev->dev; + int i, ret; + + if (enable) { + for (i = 0; i < config->hpd_clk_cnt; i++) { + if (config->hpd_freq && config->hpd_freq[i]) { + ret = clk_set_rate(hdmi->hpd_clks[i], + config->hpd_freq[i]); + if (ret) + dev_warn(dev, + "failed to set clk %s (%d)\n", + config->hpd_clk_names[i], ret); + } + + ret = clk_prepare_enable(hdmi->hpd_clks[i]); + if (ret) { + dev_err(dev, + "failed to enable hpd clk: %s (%d)\n", + config->hpd_clk_names[i], ret); + } + } + } else { + for (i = config->hpd_clk_cnt - 1; i >= 0; i--) + clk_disable_unprepare(hdmi->hpd_clks[i]); + } +} + static int hpd_enable(struct hdmi_connector *hdmi_connector) { struct hdmi *hdmi = hdmi_connector->hdmi; @@ -167,22 +197,8 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector) goto fail; } - for (i = 0; i < config->hpd_clk_cnt; i++) { - if (config->hpd_freq && config->hpd_freq[i]) { - ret = clk_set_rate(hdmi->hpd_clks[i], - config->hpd_freq[i]); - if (ret) - dev_warn(dev, "failed to set clk %s (%d)\n", - config->hpd_clk_names[i], ret); - } - - ret = clk_prepare_enable(hdmi->hpd_clks[i]); - if (ret) { - dev_err(dev, "failed to enable hpd clk: %s (%d)\n", - config->hpd_clk_names[i], ret); - goto fail; - } - } + pm_runtime_get_sync(dev); + enable_hpd_clocks(hdmi, true); msm_hdmi_set_mode(hdmi, false); msm_hdmi_phy_reset(hdmi); @@ -225,8 +241,8 @@ static void hdp_disable(struct hdmi_connector *hdmi_connector) msm_hdmi_set_mode(hdmi, false); - for (i = 0; i < config->hpd_clk_cnt; i++) - clk_disable_unprepare(hdmi->hpd_clks[i]); + enable_hpd_clocks(hdmi, false); + pm_runtime_put_autosuspend(dev); ret = gpio_config(hdmi, false); if (ret) @@ -285,7 +301,16 @@ void msm_hdmi_connector_irq(struct drm_connector *connector) static enum drm_connector_status detect_reg(struct hdmi *hdmi) { - uint32_t hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS); + uint32_t hpd_int_status; + + pm_runtime_get_sync(&hdmi->pdev->dev); + enable_hpd_clocks(hdmi, true); + + hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS); + + enable_hpd_clocks(hdmi, false); + pm_runtime_put_autosuspend(&hdmi->pdev->dev); + return (hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED) ? connector_status_connected : connector_status_disconnected; } diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c index aa7402e03f67..60790df91bfa 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c @@ -192,6 +192,7 @@ int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder, { struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder); struct mdp5_kms *mdp5_kms; + struct device *dev; int intf_num; u32 data = 0; @@ -214,14 +215,16 @@ int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder, /* Smart Panel, Sync mode */ data |= MDP5_SPLIT_DPL_UPPER_SMART_PANEL; + dev = &mdp5_kms->pdev->dev; + /* Make sure clocks are on when connectors calling this function. */ - mdp5_enable(mdp5_kms); + pm_runtime_get_sync(dev); mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, data); mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER, MDP5_SPLIT_DPL_LOWER_SMART_PANEL); mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1); - mdp5_disable(mdp5_kms); + pm_runtime_put_autosuspend(dev); return 0; } diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c index 3a81e26629c7..6fcb58ab718c 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c @@ -415,6 +415,7 @@ static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc, struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); struct mdp5_kms *mdp5_kms = get_kms(crtc); + struct device *dev = &mdp5_kms->pdev->dev; DBG("%s", crtc->name); @@ -425,7 +426,7 @@ static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc, mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->pp_done); mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err); - mdp5_disable(mdp5_kms); + pm_runtime_put_autosuspend(dev); mdp5_crtc->enabled = false; } @@ -436,13 +437,17 @@ static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc, struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); struct mdp5_kms *mdp5_kms = get_kms(crtc); + struct device *dev = &mdp5_kms->pdev->dev; DBG("%s", crtc->name); if (WARN_ON(mdp5_crtc->enabled)) return; - mdp5_enable(mdp5_kms); + pm_runtime_get_sync(dev); + + mdp5_crtc_mode_set_nofb(crtc); + mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err); if (mdp5_cstate->cmd_mode) @@ -533,7 +538,7 @@ static bool is_fullscreen(struct drm_crtc_state *cstate, ((pstate->crtc_y + pstate->crtc_h) >= cstate->mode.vdisplay); } -enum mdp_mixer_stage_id get_start_stage(struct drm_crtc *crtc, +static enum mdp_mixer_stage_id get_start_stage(struct drm_crtc *crtc, struct drm_crtc_state *new_crtc_state, struct drm_plane_state *bpstate) { @@ -727,6 +732,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline; struct drm_device *dev = crtc->dev; struct mdp5_kms *mdp5_kms = get_kms(crtc); + struct platform_device *pdev = mdp5_kms->pdev; struct msm_kms *kms = &mdp5_kms->base.base; struct drm_gem_object *cursor_bo, *old_bo = NULL; uint32_t blendcfg, stride; @@ -755,7 +761,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, if (!handle) { DBG("Cursor off"); cursor_enable = false; - mdp5_enable(mdp5_kms); + pm_runtime_get_sync(&pdev->dev); goto set_cursor; } @@ -770,6 +776,8 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, lm = mdp5_cstate->pipeline.mixer->lm; stride = width * drm_format_plane_cpp(DRM_FORMAT_ARGB8888, 0); + pm_runtime_get_sync(&pdev->dev); + spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); old_bo = mdp5_crtc->cursor.scanout_bo; @@ -779,8 +787,6 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, get_roi(crtc, &roi_w, &roi_h); - mdp5_enable(mdp5_kms); - mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride); mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm), MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888)); @@ -798,6 +804,8 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); + pm_runtime_put_autosuspend(&pdev->dev); + set_cursor: ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable); if (ret) { @@ -809,7 +817,7 @@ set_cursor: crtc_flush(crtc, flush_mask); end: - mdp5_disable(mdp5_kms); + pm_runtime_put_autosuspend(&pdev->dev); if (old_bo) { drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo); /* enable vblank to complete cursor work: */ @@ -842,7 +850,7 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) get_roi(crtc, &roi_w, &roi_h); - mdp5_enable(mdp5_kms); + pm_runtime_get_sync(&mdp5_kms->pdev->dev); spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm), @@ -855,7 +863,7 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) crtc_flush(crtc, flush_mask); - mdp5_disable(mdp5_kms); + pm_runtime_put_autosuspend(&mdp5_kms->pdev->dev); return 0; } diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c index 70bef51245af..5b851380d3f2 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c @@ -297,6 +297,10 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder) { struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); struct mdp5_interface *intf = mdp5_encoder->intf; + /* this isn't right I think */ + struct drm_crtc_state *cstate = encoder->crtc->state; + + mdp5_encoder_mode_set(encoder, &cstate->mode, &cstate->adjusted_mode); if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND) mdp5_cmd_encoder_enable(encoder); @@ -320,7 +324,6 @@ static int mdp5_encoder_atomic_check(struct drm_encoder *encoder, } static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = { - .mode_set = mdp5_encoder_mode_set, .disable = mdp5_encoder_disable, .enable = mdp5_encoder_enable, .atomic_check = mdp5_encoder_atomic_check, @@ -350,6 +353,7 @@ int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder, struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); struct mdp5_encoder *mdp5_slave_enc = to_mdp5_encoder(slave_encoder); struct mdp5_kms *mdp5_kms; + struct device *dev; int intf_num; u32 data = 0; @@ -369,8 +373,10 @@ int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder, else return -EINVAL; + dev = &mdp5_kms->pdev->dev; /* Make sure clocks are on when connectors calling this function. */ - mdp5_enable(mdp5_kms); + pm_runtime_get_sync(dev); + /* Dumb Panel, Sync mode */ mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, 0); mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER, data); @@ -378,7 +384,7 @@ int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder, mdp5_ctl_pair(mdp5_encoder->ctl, mdp5_slave_enc->ctl, true); - mdp5_disable(mdp5_kms); + pm_runtime_put_autosuspend(dev); return 0; } diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c index 3ce8b9dec9c1..bb5deb00c899 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c @@ -49,16 +49,19 @@ static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus) void mdp5_irq_preinstall(struct msm_kms *kms) { struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); - mdp5_enable(mdp5_kms); + struct device *dev = &mdp5_kms->pdev->dev; + + pm_runtime_get_sync(dev); mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff); mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000); - mdp5_disable(mdp5_kms); + pm_runtime_put_autosuspend(dev); } int mdp5_irq_postinstall(struct msm_kms *kms) { struct mdp_kms *mdp_kms = to_mdp_kms(kms); struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms); + struct device *dev = &mdp5_kms->pdev->dev; struct mdp_irq *error_handler = &mdp5_kms->error_handler; error_handler->irq = mdp5_irq_error_handler; @@ -67,9 +70,9 @@ int mdp5_irq_postinstall(struct msm_kms *kms) MDP5_IRQ_INTF2_UNDER_RUN | MDP5_IRQ_INTF3_UNDER_RUN; - mdp5_enable(mdp5_kms); + pm_runtime_get_sync(dev); mdp_irq_register(mdp_kms, error_handler); - mdp5_disable(mdp5_kms); + pm_runtime_put_autosuspend(dev); return 0; } @@ -77,9 +80,11 @@ int mdp5_irq_postinstall(struct msm_kms *kms) void mdp5_irq_uninstall(struct msm_kms *kms) { struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); - mdp5_enable(mdp5_kms); + struct device *dev = &mdp5_kms->pdev->dev; + + pm_runtime_get_sync(dev); mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000); - mdp5_disable(mdp5_kms); + pm_runtime_put_autosuspend(dev); } irqreturn_t mdp5_irq(struct msm_kms *kms) @@ -109,11 +114,12 @@ irqreturn_t mdp5_irq(struct msm_kms *kms) int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) { struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); + struct device *dev = &mdp5_kms->pdev->dev; - mdp5_enable(mdp5_kms); + pm_runtime_get_sync(dev); mdp_update_vblank_mask(to_mdp_kms(kms), mdp5_crtc_vblank(crtc), true); - mdp5_disable(mdp5_kms); + pm_runtime_put_autosuspend(dev); return 0; } @@ -121,9 +127,10 @@ int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) { struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); + struct device *dev = &mdp5_kms->pdev->dev; - mdp5_enable(mdp5_kms); + pm_runtime_get_sync(dev); mdp_update_vblank_mask(to_mdp_kms(kms), mdp5_crtc_vblank(crtc), false); - mdp5_disable(mdp5_kms); + pm_runtime_put_autosuspend(dev); } diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c index 1c603aef3c59..f7c0698fec40 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c @@ -30,11 +30,10 @@ static const char *iommu_ports[] = { static int mdp5_hw_init(struct msm_kms *kms) { struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); - struct platform_device *pdev = mdp5_kms->pdev; + struct device *dev = &mdp5_kms->pdev->dev; unsigned long flags; - pm_runtime_get_sync(&pdev->dev); - mdp5_enable(mdp5_kms); + pm_runtime_get_sync(dev); /* Magic unknown register writes: * @@ -66,8 +65,7 @@ static int mdp5_hw_init(struct msm_kms *kms) mdp5_ctlm_hw_reset(mdp5_kms->ctlm); - mdp5_disable(mdp5_kms); - pm_runtime_put_sync(&pdev->dev); + pm_runtime_put_sync(dev); return 0; } @@ -111,8 +109,9 @@ static void mdp5_swap_state(struct msm_kms *kms, struct drm_atomic_state *state) static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state) { struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); + struct device *dev = &mdp5_kms->pdev->dev; - mdp5_enable(mdp5_kms); + pm_runtime_get_sync(dev); if (mdp5_kms->smp) mdp5_smp_prepare_commit(mdp5_kms->smp, &mdp5_kms->state->smp); @@ -121,11 +120,12 @@ static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *st static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state) { struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); + struct device *dev = &mdp5_kms->pdev->dev; if (mdp5_kms->smp) mdp5_smp_complete_commit(mdp5_kms->smp, &mdp5_kms->state->smp); - mdp5_disable(mdp5_kms); + pm_runtime_put_autosuspend(dev); } static void mdp5_wait_for_crtc_commit_done(struct msm_kms *kms, @@ -249,6 +249,9 @@ int mdp5_disable(struct mdp5_kms *mdp5_kms) { DBG(""); + mdp5_kms->enable_count--; + WARN_ON(mdp5_kms->enable_count < 0); + clk_disable_unprepare(mdp5_kms->ahb_clk); clk_disable_unprepare(mdp5_kms->axi_clk); clk_disable_unprepare(mdp5_kms->core_clk); @@ -262,6 +265,8 @@ int mdp5_enable(struct mdp5_kms *mdp5_kms) { DBG(""); + mdp5_kms->enable_count++; + clk_prepare_enable(mdp5_kms->ahb_clk); clk_prepare_enable(mdp5_kms->axi_clk); clk_prepare_enable(mdp5_kms->core_clk); @@ -486,11 +491,12 @@ fail: static void read_mdp_hw_revision(struct mdp5_kms *mdp5_kms, u32 *major, u32 *minor) { + struct device *dev = &mdp5_kms->pdev->dev; u32 version; - mdp5_enable(mdp5_kms); + pm_runtime_get_sync(dev); version = mdp5_read(mdp5_kms, REG_MDP5_HW_VERSION); - mdp5_disable(mdp5_kms); + pm_runtime_put_autosuspend(dev); *major = FIELD(version, MDP5_HW_VERSION_MAJOR); *minor = FIELD(version, MDP5_HW_VERSION_MINOR); @@ -643,7 +649,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) * have left things on, in which case we'll start getting faults if * we don't disable): */ - mdp5_enable(mdp5_kms); + pm_runtime_get_sync(&pdev->dev); for (i = 0; i < MDP5_INTF_NUM_MAX; i++) { if (mdp5_cfg_intf_is_virtual(config->hw->intf.connect[i]) || !config->hw->intf.base[i]) @@ -652,7 +658,6 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(i), 0x3); } - mdp5_disable(mdp5_kms); mdelay(16); if (config->platform.iommu) { @@ -678,6 +683,8 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) aspace = NULL;; } + pm_runtime_put_autosuspend(&pdev->dev); + ret = modeset_init(mdp5_kms); if (ret) { dev_err(&pdev->dev, "modeset_init failed: %d\n", ret); @@ -1005,6 +1012,30 @@ static int mdp5_dev_remove(struct platform_device *pdev) return 0; } +static __maybe_unused int mdp5_runtime_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev); + + DBG(""); + + return mdp5_disable(mdp5_kms); +} + +static __maybe_unused int mdp5_runtime_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev); + + DBG(""); + + return mdp5_enable(mdp5_kms); +} + +static const struct dev_pm_ops mdp5_pm_ops = { + SET_RUNTIME_PM_OPS(mdp5_runtime_suspend, mdp5_runtime_resume, NULL) +}; + static const struct of_device_id mdp5_dt_match[] = { { .compatible = "qcom,mdp5", }, /* to support downstream DT files */ @@ -1019,6 +1050,7 @@ static struct platform_driver mdp5_driver = { .driver = { .name = "msm_mdp", .of_match_table = mdp5_dt_match, + .pm = &mdp5_pm_ops, }, }; diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h index 17caa0e8c8ae..9b3fe01089d1 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h @@ -76,6 +76,8 @@ struct mdp5_kms { bool rpm_enabled; struct mdp_irq error_handler; + + int enable_count; }; #define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base) @@ -167,11 +169,13 @@ struct mdp5_encoder { static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data) { + WARN_ON(mdp5_kms->enable_count <= 0); msm_writel(data, mdp5_kms->mmio + reg); } static inline u32 mdp5_read(struct mdp5_kms *mdp5_kms, u32 reg) { + WARN_ON(mdp5_kms->enable_count <= 0); return msm_readl(mdp5_kms->mmio + reg); } @@ -255,9 +259,6 @@ static inline uint32_t lm2ppdone(struct mdp5_hw_mixer *mixer) return MDP5_IRQ_PING_PONG_0_DONE << mixer->pp; } -int mdp5_disable(struct mdp5_kms *mdp5_kms); -int mdp5_enable(struct mdp5_kms *mdp5_kms); - void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask, uint32_t old_irqmask); void mdp5_irq_preinstall(struct msm_kms *kms); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c index 9c34d7824988..f2a0db7a8a03 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c @@ -31,6 +31,10 @@ struct msm_mdss { struct regulator *vdd; + struct clk *ahb_clk; + struct clk *axi_clk; + struct clk *vsync_clk; + struct { volatile unsigned long enabled_mask; struct irq_domain *domain; @@ -140,6 +144,51 @@ static int mdss_irq_domain_init(struct msm_mdss *mdss) return 0; } +int msm_mdss_enable(struct msm_mdss *mdss) +{ + DBG(""); + + clk_prepare_enable(mdss->ahb_clk); + if (mdss->axi_clk) + clk_prepare_enable(mdss->axi_clk); + if (mdss->vsync_clk) + clk_prepare_enable(mdss->vsync_clk); + + return 0; +} + +int msm_mdss_disable(struct msm_mdss *mdss) +{ + DBG(""); + + if (mdss->vsync_clk) + clk_disable_unprepare(mdss->vsync_clk); + if (mdss->axi_clk) + clk_disable_unprepare(mdss->axi_clk); + clk_disable_unprepare(mdss->ahb_clk); + + return 0; +} + +static int msm_mdss_get_clocks(struct msm_mdss *mdss) +{ + struct platform_device *pdev = to_platform_device(mdss->dev->dev); + + mdss->ahb_clk = msm_clk_get(pdev, "iface"); + if (IS_ERR(mdss->ahb_clk)) + mdss->ahb_clk = NULL; + + mdss->axi_clk = msm_clk_get(pdev, "bus"); + if (IS_ERR(mdss->axi_clk)) + mdss->axi_clk = NULL; + + mdss->vsync_clk = msm_clk_get(pdev, "vsync"); + if (IS_ERR(mdss->vsync_clk)) + mdss->vsync_clk = NULL; + + return 0; +} + void msm_mdss_destroy(struct drm_device *dev) { struct msm_drm_private *priv = dev->dev_private; @@ -153,8 +202,6 @@ void msm_mdss_destroy(struct drm_device *dev) regulator_disable(mdss->vdd); - pm_runtime_put_sync(dev->dev); - pm_runtime_disable(dev->dev); } @@ -190,6 +237,12 @@ int msm_mdss_init(struct drm_device *dev) goto fail; } + ret = msm_mdss_get_clocks(mdss); + if (ret) { + dev_err(dev->dev, "failed to get clocks: %d\n", ret); + goto fail; + } + /* Regulator to enable GDSCs in downstream kernels */ mdss->vdd = devm_regulator_get(dev->dev, "vdd"); if (IS_ERR(mdss->vdd)) { @@ -221,12 +274,6 @@ int msm_mdss_init(struct drm_device *dev) pm_runtime_enable(dev->dev); - /* - * TODO: This is needed as the MDSS GDSC is only tied to MDSS's power - * domain. Remove this once runtime PM is adapted for all the devices. - */ - pm_runtime_get_sync(dev->dev); - return 0; fail_irq: regulator_disable(mdss->vdd); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c index 58f712d37e7f..ae4983d9d0a5 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c @@ -28,6 +28,13 @@ struct mdp5_smp { int blk_cnt; int blk_size; + + /* register cache */ + u32 alloc_w[22]; + u32 alloc_r[22]; + u32 pipe_reqprio_fifo_wm0[SSPP_MAX]; + u32 pipe_reqprio_fifo_wm1[SSPP_MAX]; + u32 pipe_reqprio_fifo_wm2[SSPP_MAX]; }; static inline @@ -98,16 +105,15 @@ static int smp_request_block(struct mdp5_smp *smp, static void set_fifo_thresholds(struct mdp5_smp *smp, enum mdp5_pipe pipe, int nblks) { - struct mdp5_kms *mdp5_kms = get_kms(smp); u32 smp_entries_per_blk = smp->blk_size / (128 / BITS_PER_BYTE); u32 val; /* 1/4 of SMP pool that is being fetched */ val = (nblks * smp_entries_per_blk) / 4; - mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe), val * 1); - mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe), val * 2); - mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe), val * 3); + smp->pipe_reqprio_fifo_wm0[pipe] = val * 1; + smp->pipe_reqprio_fifo_wm1[pipe] = val * 2; + smp->pipe_reqprio_fifo_wm2[pipe] = val * 3; } /* @@ -222,7 +228,6 @@ void mdp5_smp_release(struct mdp5_smp *smp, struct mdp5_smp_state *state, static unsigned update_smp_state(struct mdp5_smp *smp, u32 cid, mdp5_smp_state_t *assigned) { - struct mdp5_kms *mdp5_kms = get_kms(smp); int cnt = smp->blk_cnt; unsigned nblks = 0; u32 blk, val; @@ -231,7 +236,7 @@ static unsigned update_smp_state(struct mdp5_smp *smp, int idx = blk / 3; int fld = blk % 3; - val = mdp5_read(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx)); + val = smp->alloc_w[idx]; switch (fld) { case 0: @@ -248,8 +253,8 @@ static unsigned update_smp_state(struct mdp5_smp *smp, break; } - mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx), val); - mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(idx), val); + smp->alloc_w[idx] = val; + smp->alloc_r[idx] = val; nblks++; } @@ -257,6 +262,39 @@ static unsigned update_smp_state(struct mdp5_smp *smp, return nblks; } +static void write_smp_alloc_regs(struct mdp5_smp *smp) +{ + struct mdp5_kms *mdp5_kms = get_kms(smp); + int i, num_regs; + + num_regs = smp->blk_cnt / 3 + 1; + + for (i = 0; i < num_regs; i++) { + mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(i), + smp->alloc_w[i]); + mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(i), + smp->alloc_r[i]); + } +} + +static void write_smp_fifo_regs(struct mdp5_smp *smp) +{ + struct mdp5_kms *mdp5_kms = get_kms(smp); + int i; + + for (i = 0; i < mdp5_kms->num_hwpipes; i++) { + struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i]; + enum mdp5_pipe pipe = hwpipe->pipe; + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe), + smp->pipe_reqprio_fifo_wm0[pipe]); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe), + smp->pipe_reqprio_fifo_wm1[pipe]); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe), + smp->pipe_reqprio_fifo_wm2[pipe]); + } +} + void mdp5_smp_prepare_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state) { enum mdp5_pipe pipe; @@ -277,6 +315,9 @@ void mdp5_smp_prepare_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state) set_fifo_thresholds(smp, pipe, nblks); } + write_smp_alloc_regs(smp); + write_smp_fifo_regs(smp); + state->assigned = 0; } @@ -289,6 +330,8 @@ void mdp5_smp_complete_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state set_fifo_thresholds(smp, pipe, 0); } + write_smp_fifo_regs(smp); + state->released = 0; } diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index b0129e7b29e3..606df7bea97b 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -73,6 +73,10 @@ bool dumpstate = false; MODULE_PARM_DESC(dumpstate, "Dump KMS state on errors"); module_param(dumpstate, bool, 0600); +static bool modeset = true; +MODULE_PARM_DESC(modeset, "Use kernel modesetting [KMS] (1=on (default), 0=disable)"); +module_param(modeset, bool, 0600); + /* * Util/helpers: */ @@ -878,8 +882,37 @@ static int msm_pm_resume(struct device *dev) } #endif +#ifdef CONFIG_PM +static int msm_runtime_suspend(struct device *dev) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct msm_drm_private *priv = ddev->dev_private; + + DBG(""); + + if (priv->mdss) + return msm_mdss_disable(priv->mdss); + + return 0; +} + +static int msm_runtime_resume(struct device *dev) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct msm_drm_private *priv = ddev->dev_private; + + DBG(""); + + if (priv->mdss) + return msm_mdss_enable(priv->mdss); + + return 0; +} +#endif + static const struct dev_pm_ops msm_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend, msm_pm_resume) + SET_RUNTIME_PM_OPS(msm_runtime_suspend, msm_runtime_resume, NULL) }; /* @@ -1103,6 +1136,9 @@ static struct platform_driver msm_platform_driver = { static int __init msm_drm_register(void) { + if (!modeset) + return -EINVAL; + DBG("init"); msm_mdp_register(); msm_dsi_register(); diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index fc8d24f7c084..5e8109c07560 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -55,8 +55,6 @@ struct msm_fence_cb; struct msm_gem_address_space; struct msm_gem_vma; -#define NUM_DOMAINS 2 /* one for KMS, then one per gpu core (?) */ - struct msm_file_private { /* currently we don't do anything useful with this.. but when * per-context address spaces are supported we'd keep track of @@ -237,6 +235,12 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags); struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev, uint32_t size, uint32_t flags); +void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, + uint32_t flags, struct msm_gem_address_space *aspace, + struct drm_gem_object **bo, uint64_t *iova); +void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size, + uint32_t flags, struct msm_gem_address_space *aspace, + struct drm_gem_object **bo, uint64_t *iova); struct drm_gem_object *msm_gem_import(struct drm_device *dev, struct dma_buf *dmabuf, struct sg_table *sgt); @@ -248,10 +252,10 @@ uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, struct msm_gem_address_space *aspace, int plane); struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane); const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb); -struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev, - const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos); struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev, struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd); +struct drm_framebuffer * msm_alloc_stolen_fb(struct drm_device *dev, + int w, int h, int p, uint32_t format); struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev); void msm_fbdev_free(struct drm_device *dev); diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c index 6ecb7b170316..fc175e724ad6 100644 --- a/drivers/gpu/drm/msm/msm_fb.c +++ b/drivers/gpu/drm/msm/msm_fb.c @@ -20,6 +20,7 @@ #include "msm_drv.h" #include "msm_kms.h" +#include "msm_gem.h" struct msm_framebuffer { struct drm_framebuffer base; @@ -28,6 +29,8 @@ struct msm_framebuffer { }; #define to_msm_framebuffer(x) container_of(x, struct msm_framebuffer, base) +static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev, + const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos); static int msm_framebuffer_create_handle(struct drm_framebuffer *fb, struct drm_file *file_priv, @@ -161,7 +164,7 @@ out_unref: return ERR_PTR(ret); } -struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev, +static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos) { struct msm_drm_private *priv = dev->dev_private; @@ -237,3 +240,43 @@ fail: return ERR_PTR(ret); } + +struct drm_framebuffer * +msm_alloc_stolen_fb(struct drm_device *dev, int w, int h, int p, uint32_t format) +{ + struct drm_mode_fb_cmd2 mode_cmd = { + .pixel_format = format, + .width = w, + .height = h, + .pitches = { p }, + }; + struct drm_gem_object *bo; + struct drm_framebuffer *fb; + int size; + + /* allocate backing bo */ + size = mode_cmd.pitches[0] * mode_cmd.height; + DBG("allocating %d bytes for fb %d", size, dev->primary->index); + bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | MSM_BO_WC | MSM_BO_STOLEN); + if (IS_ERR(bo)) { + dev_warn(dev->dev, "could not allocate stolen bo\n"); + /* try regular bo: */ + bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | MSM_BO_WC); + } + if (IS_ERR(bo)) { + dev_err(dev->dev, "failed to allocate buffer object\n"); + return ERR_CAST(bo); + } + + fb = msm_framebuffer_init(dev, &mode_cmd, &bo); + if (IS_ERR(fb)) { + dev_err(dev->dev, "failed to allocate fb\n"); + /* note: if fb creation failed, we can't rely on fb destroy + * to unref the bo: + */ + drm_gem_object_unreference_unlocked(bo); + return ERR_CAST(fb); + } + + return fb; +} diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c index 9c00fedfc741..c178563fcd4d 100644 --- a/drivers/gpu/drm/msm/msm_fbdev.c +++ b/drivers/gpu/drm/msm/msm_fbdev.c @@ -19,7 +19,6 @@ #include <drm/drm_fb_helper.h> #include "msm_drv.h" -#include "msm_gem.h" #include "msm_kms.h" extern int msm_gem_mmap_obj(struct drm_gem_object *obj, @@ -35,7 +34,6 @@ static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma); struct msm_fbdev { struct drm_fb_helper base; struct drm_framebuffer *fb; - struct drm_gem_object *bo; }; static struct fb_ops msm_fb_ops = { @@ -57,16 +55,16 @@ static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma) { struct drm_fb_helper *helper = (struct drm_fb_helper *)info->par; struct msm_fbdev *fbdev = to_msm_fbdev(helper); - struct drm_gem_object *drm_obj = fbdev->bo; + struct drm_gem_object *bo = msm_framebuffer_bo(fbdev->fb, 0); int ret = 0; - ret = drm_gem_mmap_obj(drm_obj, drm_obj->size, vma); + ret = drm_gem_mmap_obj(bo, bo->size, vma); if (ret) { pr_err("%s:drm_gem_mmap_obj fail\n", __func__); return ret; } - return msm_gem_mmap_obj(drm_obj, vma); + return msm_gem_mmap_obj(bo, vma); } static int msm_fbdev_create(struct drm_fb_helper *helper, @@ -76,47 +74,30 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, struct drm_device *dev = helper->dev; struct msm_drm_private *priv = dev->dev_private; struct drm_framebuffer *fb = NULL; + struct drm_gem_object *bo; struct fb_info *fbi = NULL; - struct drm_mode_fb_cmd2 mode_cmd = {0}; uint64_t paddr; - int ret, size; + uint32_t format; + int ret, pitch; + + format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width, sizes->surface_height, sizes->surface_bpp, sizes->fb_width, sizes->fb_height); - mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, - sizes->surface_depth); - - mode_cmd.width = sizes->surface_width; - mode_cmd.height = sizes->surface_height; - - mode_cmd.pitches[0] = align_pitch( - mode_cmd.width, sizes->surface_bpp); + pitch = align_pitch(sizes->surface_width, sizes->surface_bpp); + fb = msm_alloc_stolen_fb(dev, sizes->surface_width, + sizes->surface_height, pitch, format); - /* allocate backing bo */ - size = mode_cmd.pitches[0] * mode_cmd.height; - DBG("allocating %d bytes for fb %d", size, dev->primary->index); - fbdev->bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | - MSM_BO_WC | MSM_BO_STOLEN); - if (IS_ERR(fbdev->bo)) { - ret = PTR_ERR(fbdev->bo); - fbdev->bo = NULL; - dev_err(dev->dev, "failed to allocate buffer object: %d\n", ret); - goto fail; - } - - fb = msm_framebuffer_init(dev, &mode_cmd, &fbdev->bo); if (IS_ERR(fb)) { dev_err(dev->dev, "failed to allocate fb\n"); - /* note: if fb creation failed, we can't rely on fb destroy - * to unref the bo: - */ - drm_gem_object_unreference_unlocked(fbdev->bo); ret = PTR_ERR(fb); goto fail; } + bo = msm_framebuffer_bo(fb, 0); + mutex_lock(&dev->struct_mutex); /* @@ -124,7 +105,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, * in panic (ie. lock-safe, etc) we could avoid pinning the * buffer now: */ - ret = msm_gem_get_iova(fbdev->bo, priv->kms->aspace, &paddr); + ret = msm_gem_get_iova(bo, priv->kms->aspace, &paddr); if (ret) { dev_err(dev->dev, "failed to get buffer obj iova: %d\n", ret); goto fail_unlock; @@ -152,14 +133,14 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, dev->mode_config.fb_base = paddr; - fbi->screen_base = msm_gem_get_vaddr(fbdev->bo); + fbi->screen_base = msm_gem_get_vaddr(bo); if (IS_ERR(fbi->screen_base)) { ret = PTR_ERR(fbi->screen_base); goto fail_unlock; } - fbi->screen_size = fbdev->bo->size; + fbi->screen_size = bo->size; fbi->fix.smem_start = paddr; - fbi->fix.smem_len = fbdev->bo->size; + fbi->fix.smem_len = bo->size; DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres); DBG("allocated %dx%d fb", fbdev->fb->width, fbdev->fb->height); @@ -241,7 +222,9 @@ void msm_fbdev_free(struct drm_device *dev) /* this will free the backing object */ if (fbdev->fb) { - msm_gem_put_vaddr(fbdev->bo); + struct drm_gem_object *bo = + msm_framebuffer_bo(fbdev->fb, 0); + msm_gem_put_vaddr(bo); drm_framebuffer_remove(fbdev->fb); } diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index a0c60e738db8..f15821a0d900 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -1024,3 +1024,49 @@ fail: drm_gem_object_unreference_unlocked(obj); return ERR_PTR(ret); } + +static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size, + uint32_t flags, struct msm_gem_address_space *aspace, + struct drm_gem_object **bo, uint64_t *iova, bool locked) +{ + void *vaddr; + struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked); + int ret; + + if (IS_ERR(obj)) + return ERR_CAST(obj); + + if (iova) { + ret = msm_gem_get_iova(obj, aspace, iova); + if (ret) { + drm_gem_object_unreference(obj); + return ERR_PTR(ret); + } + } + + vaddr = msm_gem_get_vaddr(obj); + if (!vaddr) { + msm_gem_put_iova(obj, aspace); + drm_gem_object_unreference(obj); + return ERR_PTR(-ENOMEM); + } + + if (bo) + *bo = obj; + + return vaddr; +} + +void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, + uint32_t flags, struct msm_gem_address_space *aspace, + struct drm_gem_object **bo, uint64_t *iova) +{ + return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false); +} + +void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size, + uint32_t flags, struct msm_gem_address_space *aspace, + struct drm_gem_object **bo, uint64_t *iova) +{ + return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true); +} diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 9f3dbc236ab3..ffbff27600e0 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -562,11 +562,49 @@ static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu) return 0; } +static struct msm_gem_address_space * +msm_gpu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev, + uint64_t va_start, uint64_t va_end) +{ + struct iommu_domain *iommu; + struct msm_gem_address_space *aspace; + int ret; + + /* + * Setup IOMMU.. eventually we will (I think) do this once per context + * and have separate page tables per context. For now, to keep things + * simple and to get something working, just use a single address space: + */ + iommu = iommu_domain_alloc(&platform_bus_type); + if (!iommu) + return NULL; + + iommu->geometry.aperture_start = va_start; + iommu->geometry.aperture_end = va_end; + + dev_info(gpu->dev->dev, "%s: using IOMMU\n", gpu->name); + + aspace = msm_gem_address_space_create(&pdev->dev, iommu, "gpu"); + if (IS_ERR(aspace)) { + dev_err(gpu->dev->dev, "failed to init iommu: %ld\n", + PTR_ERR(aspace)); + iommu_domain_free(iommu); + return ERR_CAST(aspace); + } + + ret = aspace->mmu->funcs->attach(aspace->mmu, NULL, 0); + if (ret) { + msm_gem_address_space_put(aspace); + return ERR_PTR(ret); + } + + return aspace; +} + int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs, const char *name, struct msm_gpu_config *config) { - struct iommu_domain *iommu; int ret; if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs))) @@ -636,28 +674,19 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, if (IS_ERR(gpu->gpu_cx)) gpu->gpu_cx = NULL; - /* Setup IOMMU.. eventually we will (I think) do this once per context - * and have separate page tables per context. For now, to keep things - * simple and to get something working, just use a single address space: - */ - iommu = iommu_domain_alloc(&platform_bus_type); - if (iommu) { - iommu->geometry.aperture_start = config->va_start; - iommu->geometry.aperture_end = config->va_end; - - dev_info(drm->dev, "%s: using IOMMU\n", name); - gpu->aspace = msm_gem_address_space_create(&pdev->dev, - iommu, "gpu"); - if (IS_ERR(gpu->aspace)) { - ret = PTR_ERR(gpu->aspace); - dev_err(drm->dev, "failed to init iommu: %d\n", ret); - gpu->aspace = NULL; - iommu_domain_free(iommu); - goto fail; - } + gpu->pdev = pdev; + platform_set_drvdata(pdev, gpu); + + bs_init(gpu); - } else { + gpu->aspace = msm_gpu_create_address_space(gpu, pdev, + config->va_start, config->va_end); + + if (gpu->aspace == NULL) dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name); + else if (IS_ERR(gpu->aspace)) { + ret = PTR_ERR(gpu->aspace); + goto fail; } /* Create ringbuffer: */ @@ -669,14 +698,10 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, goto fail; } - gpu->pdev = pdev; - platform_set_drvdata(pdev, gpu); - - bs_init(gpu); - return 0; fail: + platform_set_drvdata(pdev, NULL); return ret; } @@ -693,7 +718,9 @@ void msm_gpu_cleanup(struct msm_gpu *gpu) msm_gem_put_iova(gpu->rb->bo, gpu->aspace); msm_ringbuffer_destroy(gpu->rb); } - - if (gpu->fctx) - msm_fence_context_free(gpu->fctx); + if (gpu->aspace) { + gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu, + NULL, 0); + msm_gem_address_space_put(gpu->aspace); + } } diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h index a8f2ba5e5f07..17d5824417ad 100644 --- a/drivers/gpu/drm/msm/msm_kms.h +++ b/drivers/gpu/drm/msm/msm_kms.h @@ -99,5 +99,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev); struct msm_kms *mdp5_kms_init(struct drm_device *dev); int msm_mdss_init(struct drm_device *dev); void msm_mdss_destroy(struct drm_device *dev); +int msm_mdss_enable(struct msm_mdss *mdss); +int msm_mdss_disable(struct msm_mdss *mdss); #endif /* __MSM_KMS_H__ */ diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c index 791bca3c6a9c..bf065a540130 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.c +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c @@ -33,16 +33,14 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size) } ring->gpu = gpu; - ring->bo = msm_gem_new(gpu->dev, size, MSM_BO_WC); - if (IS_ERR(ring->bo)) { - ret = PTR_ERR(ring->bo); - ring->bo = NULL; - goto fail; - } - ring->start = msm_gem_get_vaddr(ring->bo); + /* Pass NULL for the iova pointer - we will map it later */ + ring->start = msm_gem_kernel_new(gpu->dev, size, MSM_BO_WC, + gpu->aspace, &ring->bo, NULL); + if (IS_ERR(ring->start)) { ret = PTR_ERR(ring->start); + ring->start = 0; goto fail; } ring->end = ring->start + (size / 4); diff --git a/drivers/gpu/drm/omapdrm/dss/dss.h b/drivers/gpu/drm/omapdrm/dss/dss.h index 085486024089..ed465572491e 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.h +++ b/drivers/gpu/drm/omapdrm/dss/dss.h @@ -185,6 +185,9 @@ struct dss_pll_hw { bool has_freqsel; bool has_selfreqdco; bool has_refsel; + + /* DRA7 errata i886: use high N & M to avoid jitter */ + bool errata_i886; }; struct dss_pll { diff --git a/drivers/gpu/drm/omapdrm/dss/pll.c b/drivers/gpu/drm/omapdrm/dss/pll.c index 5e221302768b..9d9d9d42009b 100644 --- a/drivers/gpu/drm/omapdrm/dss/pll.c +++ b/drivers/gpu/drm/omapdrm/dss/pll.c @@ -215,8 +215,8 @@ bool dss_pll_calc_a(const struct dss_pll *pll, unsigned long clkin, dss_pll_calc_func func, void *data) { const struct dss_pll_hw *hw = pll->hw; - int n, n_min, n_max; - int m, m_min, m_max; + int n, n_start, n_stop, n_inc; + int m, m_start, m_stop, m_inc; unsigned long fint, clkdco; unsigned long pll_hw_max; unsigned long fint_hw_min, fint_hw_max; @@ -226,22 +226,33 @@ bool dss_pll_calc_a(const struct dss_pll *pll, unsigned long clkin, fint_hw_min = hw->fint_min; fint_hw_max = hw->fint_max; - n_min = max(DIV_ROUND_UP(clkin, fint_hw_max), 1ul); - n_max = min((unsigned)(clkin / fint_hw_min), hw->n_max); + n_start = max(DIV_ROUND_UP(clkin, fint_hw_max), 1ul); + n_stop = min((unsigned)(clkin / fint_hw_min), hw->n_max); + n_inc = 1; + + if (hw->errata_i886) { + swap(n_start, n_stop); + n_inc = -1; + } pll_max = pll_max ? pll_max : ULONG_MAX; - /* Try to find high N & M to avoid jitter (DRA7 errata i886) */ - for (n = n_max; n >= n_min; --n) { + for (n = n_start; n != n_stop; n += n_inc) { fint = clkin / n; - m_min = max(DIV_ROUND_UP(DIV_ROUND_UP(pll_min, fint), 2), + m_start = max(DIV_ROUND_UP(DIV_ROUND_UP(pll_min, fint), 2), 1ul); - m_max = min3((unsigned)(pll_max / fint / 2), + m_stop = min3((unsigned)(pll_max / fint / 2), (unsigned)(pll_hw_max / fint / 2), hw->m_max); + m_inc = 1; + + if (hw->errata_i886) { + swap(m_start, m_stop); + m_inc = -1; + } - for (m = m_max; m >= m_min; --m) { + for (m = m_start; m != m_stop; m += m_inc) { clkdco = 2 * m * fint; if (func(n, m, fint, clkdco, data)) diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c index 5bd7788357b2..d58da6f32693 100644 --- a/drivers/gpu/drm/omapdrm/dss/venc.c +++ b/drivers/gpu/drm/omapdrm/dss/venc.c @@ -263,6 +263,12 @@ static const struct venc_config venc_config_pal_bdghi = { .fid_ext_start_y__fid_ext_offset_y = 0x01380005, }; +enum venc_videomode { + VENC_MODE_UNKNOWN, + VENC_MODE_PAL, + VENC_MODE_NTSC, +}; + static const struct videomode omap_dss_pal_vm = { .hactive = 720, .vactive = 574, @@ -297,6 +303,24 @@ static const struct videomode omap_dss_ntsc_vm = { DISPLAY_FLAGS_SYNC_NEGEDGE, }; +static enum venc_videomode venc_get_videomode(const struct videomode *vm) +{ + if (!(vm->flags & DISPLAY_FLAGS_INTERLACED)) + return VENC_MODE_UNKNOWN; + + if (vm->pixelclock == omap_dss_pal_vm.pixelclock && + vm->hactive == omap_dss_pal_vm.hactive && + vm->vactive == omap_dss_pal_vm.vactive) + return VENC_MODE_PAL; + + if (vm->pixelclock == omap_dss_ntsc_vm.pixelclock && + vm->hactive == omap_dss_ntsc_vm.hactive && + vm->vactive == omap_dss_ntsc_vm.vactive) + return VENC_MODE_NTSC; + + return VENC_MODE_UNKNOWN; +} + static struct { struct platform_device *pdev; void __iomem *base; @@ -423,14 +447,14 @@ static void venc_runtime_put(void) static const struct venc_config *venc_timings_to_config(struct videomode *vm) { - if (memcmp(&omap_dss_pal_vm, vm, sizeof(*vm)) == 0) + switch (venc_get_videomode(vm)) { + default: + WARN_ON_ONCE(1); + case VENC_MODE_PAL: return &venc_config_pal_trm; - - if (memcmp(&omap_dss_ntsc_vm, vm, sizeof(*vm)) == 0) + case VENC_MODE_NTSC: return &venc_config_ntsc_trm; - - BUG(); - return NULL; + } } static int venc_power_on(struct omap_dss_device *dssdev) @@ -541,15 +565,28 @@ static void venc_display_disable(struct omap_dss_device *dssdev) static void venc_set_timings(struct omap_dss_device *dssdev, struct videomode *vm) { + struct videomode actual_vm; + DSSDBG("venc_set_timings\n"); mutex_lock(&venc.venc_lock); + switch (venc_get_videomode(vm)) { + default: + WARN_ON_ONCE(1); + case VENC_MODE_PAL: + actual_vm = omap_dss_pal_vm; + break; + case VENC_MODE_NTSC: + actual_vm = omap_dss_ntsc_vm; + break; + } + /* Reset WSS data when the TV standard changes. */ - if (memcmp(&venc.vm, vm, sizeof(*vm))) + if (memcmp(&venc.vm, &actual_vm, sizeof(actual_vm))) venc.wss_data = 0; - venc.vm = *vm; + venc.vm = actual_vm; dispc_set_tv_pclk(13500000); @@ -561,13 +598,13 @@ static int venc_check_timings(struct omap_dss_device *dssdev, { DSSDBG("venc_check_timings\n"); - if (memcmp(&omap_dss_pal_vm, vm, sizeof(*vm)) == 0) + switch (venc_get_videomode(vm)) { + case VENC_MODE_PAL: + case VENC_MODE_NTSC: return 0; - - if (memcmp(&omap_dss_ntsc_vm, vm, sizeof(*vm)) == 0) - return 0; - - return -EINVAL; + default: + return -EINVAL; + } } static void venc_get_timings(struct omap_dss_device *dssdev, diff --git a/drivers/gpu/drm/omapdrm/dss/video-pll.c b/drivers/gpu/drm/omapdrm/dss/video-pll.c index f7ea02a88b1a..38a239cc5e04 100644 --- a/drivers/gpu/drm/omapdrm/dss/video-pll.c +++ b/drivers/gpu/drm/omapdrm/dss/video-pll.c @@ -130,6 +130,8 @@ static const struct dss_pll_hw dss_dra7_video_pll_hw = { .mX_lsb[3] = 5, .has_refsel = true, + + .errata_i886 = true, }; struct dss_pll *dss_video_pll_init(struct platform_device *pdev, int id, diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index 9b3c36b48356..cdf5b0601eba 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -84,23 +84,36 @@ static void omap_atomic_commit_tail(struct drm_atomic_state *old_state) /* Apply the atomic update. */ drm_atomic_helper_commit_modeset_disables(dev, old_state); - /* With the current dss dispc implementation we have to enable - * the new modeset before we can commit planes. The dispc ovl - * configuration relies on the video mode configuration been - * written into the HW when the ovl configuration is - * calculated. - * - * This approach is not ideal because after a mode change the - * plane update is executed only after the first vblank - * interrupt. The dispc implementation should be fixed so that - * it is able use uncommitted drm state information. - */ - drm_atomic_helper_commit_modeset_enables(dev, old_state); - omap_atomic_wait_for_completion(dev, old_state); - - drm_atomic_helper_commit_planes(dev, old_state, 0); - - drm_atomic_helper_commit_hw_done(old_state); + if (priv->omaprev != 0x3430) { + /* With the current dss dispc implementation we have to enable + * the new modeset before we can commit planes. The dispc ovl + * configuration relies on the video mode configuration been + * written into the HW when the ovl configuration is + * calculated. + * + * This approach is not ideal because after a mode change the + * plane update is executed only after the first vblank + * interrupt. The dispc implementation should be fixed so that + * it is able use uncommitted drm state information. + */ + drm_atomic_helper_commit_modeset_enables(dev, old_state); + omap_atomic_wait_for_completion(dev, old_state); + + drm_atomic_helper_commit_planes(dev, old_state, 0); + + drm_atomic_helper_commit_hw_done(old_state); + } else { + /* + * OMAP3 DSS seems to have issues with the work-around above, + * resulting in endless sync losts if a crtc is enabled without + * a plane. For now, skip the WA for OMAP3. + */ + drm_atomic_helper_commit_planes(dev, old_state, 0); + + drm_atomic_helper_commit_modeset_enables(dev, old_state); + + drm_atomic_helper_commit_hw_done(old_state); + } /* * Wait for completion of the page flips to ensure that old buffers diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c index cf480218daa5..ec5943627aa5 100644 --- a/drivers/gpu/drm/sun4i/sun4i_backend.c +++ b/drivers/gpu/drm/sun4i/sun4i_backend.c @@ -312,7 +312,7 @@ static int sun4i_backend_of_get_id(struct device_node *node) struct device_node *remote; u32 reg; - remote = of_parse_phandle(ep, "remote-endpoint", 0); + remote = of_graph_get_remote_endpoint(ep); if (!remote) continue; diff --git a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h index 89976da542b1..c00fee539822 100644 --- a/include/drm/drm_syncobj.h +++ b/include/drm/drm_syncobj.h @@ -28,6 +28,8 @@ #include "linux/dma-fence.h" +struct drm_syncobj_cb; + /** * struct drm_syncobj - sync object. * @@ -43,15 +45,47 @@ struct drm_syncobj { /** * @fence: * NULL or a pointer to the fence bound to this object. + * + * This field should not be used directly. Use drm_syncobj_fence_get + * and drm_syncobj_replace_fence instead. */ struct dma_fence *fence; /** + * @cb_list: + * List of callbacks to call when the fence gets replaced + */ + struct list_head cb_list; + /** + * @lock: + * locks cb_list and write-locks fence. + */ + spinlock_t lock; + /** * @file: * a file backing for this syncobj. */ struct file *file; }; +typedef void (*drm_syncobj_func_t)(struct drm_syncobj *syncobj, + struct drm_syncobj_cb *cb); + +/** + * struct drm_syncobj_cb - callback for drm_syncobj_add_callback + * @node: used by drm_syncob_add_callback to append this struct to + * syncobj::cb_list + * @func: drm_syncobj_func_t to call + * + * This struct will be initialized by drm_syncobj_add_callback, additional + * data can be passed along by embedding drm_syncobj_cb in another struct. + * The callback will get called the next time drm_syncobj_replace_fence is + * called. + */ +struct drm_syncobj_cb { + struct list_head node; + drm_syncobj_func_t func; +}; + void drm_syncobj_free(struct kref *kref); /** @@ -77,13 +111,30 @@ drm_syncobj_put(struct drm_syncobj *obj) kref_put(&obj->refcount, drm_syncobj_free); } +static inline struct dma_fence * +drm_syncobj_fence_get(struct drm_syncobj *syncobj) +{ + struct dma_fence *fence; + + rcu_read_lock(); + fence = dma_fence_get_rcu_safe(&syncobj->fence); + rcu_read_unlock(); + + return fence; +} + struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private, u32 handle); +void drm_syncobj_add_callback(struct drm_syncobj *syncobj, + struct drm_syncobj_cb *cb, + drm_syncobj_func_t func); +void drm_syncobj_remove_callback(struct drm_syncobj *syncobj, + struct drm_syncobj_cb *cb); void drm_syncobj_replace_fence(struct drm_syncobj *syncobj, struct dma_fence *fence); -int drm_syncobj_fence_get(struct drm_file *file_private, - u32 handle, - struct dma_fence **fence); +int drm_syncobj_find_fence(struct drm_file *file_private, + u32 handle, + struct dma_fence **fence); void drm_syncobj_free(struct kref *kref); #endif diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index 101593ab10ac..97677cd6964d 100644 --- a/include/uapi/drm/drm.h +++ b/include/uapi/drm/drm.h @@ -700,6 +700,7 @@ struct drm_prime_handle { struct drm_syncobj_create { __u32 handle; +#define DRM_SYNCOBJ_CREATE_SIGNALED (1 << 0) __u32 flags; }; @@ -718,6 +719,24 @@ struct drm_syncobj_handle { __u32 pad; }; +#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0) +#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1) +struct drm_syncobj_wait { + __u64 handles; + /* absolute timeout */ + __s64 timeout_nsec; + __u32 count_handles; + __u32 flags; + __u32 first_signaled; /* only valid when not waiting all */ + __u32 pad; +}; + +struct drm_syncobj_array { + __u64 handles; + __u32 count_handles; + __u32 pad; +}; + #if defined(__cplusplus) } #endif @@ -840,6 +859,9 @@ extern "C" { #define DRM_IOCTL_SYNCOBJ_DESTROY DRM_IOWR(0xC0, struct drm_syncobj_destroy) #define DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD DRM_IOWR(0xC1, struct drm_syncobj_handle) #define DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE DRM_IOWR(0xC2, struct drm_syncobj_handle) +#define DRM_IOCTL_SYNCOBJ_WAIT DRM_IOWR(0xC3, struct drm_syncobj_wait) +#define DRM_IOCTL_SYNCOBJ_RESET DRM_IOWR(0xC4, struct drm_syncobj_array) +#define DRM_IOCTL_SYNCOBJ_SIGNAL DRM_IOWR(0xC5, struct drm_syncobj_array) /** * Device specific ioctls should only be in their respective headers diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h index a2bb7161f020..54fc38c3c3f1 100644 --- a/include/uapi/drm/drm_mode.h +++ b/include/uapi/drm/drm_mode.h @@ -715,24 +715,24 @@ struct drm_mode_atomic { struct drm_format_modifier_blob { #define FORMAT_BLOB_CURRENT 1 /* Version of this blob format */ - u32 version; + __u32 version; /* Flags */ - u32 flags; + __u32 flags; /* Number of fourcc formats supported */ - u32 count_formats; + __u32 count_formats; /* Where in this blob the formats exist (in bytes) */ - u32 formats_offset; + __u32 formats_offset; /* Number of drm_format_modifiers */ - u32 count_modifiers; + __u32 count_modifiers; /* Where in this blob the modifiers exist (in bytes) */ - u32 modifiers_offset; + __u32 modifiers_offset; - /* u32 formats[] */ + /* __u32 formats[] */ /* struct drm_format_modifier modifiers[] */ }; |