diff options
Diffstat (limited to 'drivers/gpu/drm/drm_syncobj.c')
-rw-r--r-- | drivers/gpu/drm/drm_syncobj.c | 450 |
1 files changed, 85 insertions, 365 deletions
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index da8175d9c6ff..e19525af0cce 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -56,47 +56,16 @@ #include "drm_internal.h" #include <drm/drm_syncobj.h> -/* merge normal syncobj to timeline syncobj, the point interval is 1 */ -#define DRM_SYNCOBJ_BINARY_POINT 1 - -struct drm_syncobj_stub_fence { - struct dma_fence base; - spinlock_t lock; -}; - -static const char *drm_syncobj_stub_fence_get_name(struct dma_fence *fence) -{ - return "syncobjstub"; -} - -static const struct dma_fence_ops drm_syncobj_stub_fence_ops = { - .get_driver_name = drm_syncobj_stub_fence_get_name, - .get_timeline_name = drm_syncobj_stub_fence_get_name, -}; - -struct drm_syncobj_signal_pt { - struct dma_fence_array *fence_array; - u64 value; - struct list_head list; +struct syncobj_wait_entry { + struct list_head node; + struct task_struct *task; + struct dma_fence *fence; + struct dma_fence_cb fence_cb; }; -static DEFINE_SPINLOCK(signaled_fence_lock); -static struct dma_fence signaled_fence; - -static struct dma_fence *drm_syncobj_get_stub_fence(void) -{ - spin_lock(&signaled_fence_lock); - if (!signaled_fence.ops) { - dma_fence_init(&signaled_fence, - &drm_syncobj_stub_fence_ops, - &signaled_fence_lock, - 0, 0); - dma_fence_signal_locked(&signaled_fence); - } - spin_unlock(&signaled_fence_lock); +static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj, + struct syncobj_wait_entry *wait); - return dma_fence_get(&signaled_fence); -} /** * drm_syncobj_find - lookup and reference a sync object. * @file_private: drm file private pointer @@ -123,302 +92,84 @@ struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private, } EXPORT_SYMBOL(drm_syncobj_find); -static struct dma_fence * -drm_syncobj_find_signal_pt_for_point(struct drm_syncobj *syncobj, - uint64_t point) +static void drm_syncobj_fence_add_wait(struct drm_syncobj *syncobj, + struct syncobj_wait_entry *wait) { - struct drm_syncobj_signal_pt *signal_pt; - - if ((syncobj->type == DRM_SYNCOBJ_TYPE_TIMELINE) && - (point <= syncobj->timeline)) - return drm_syncobj_get_stub_fence(); - - list_for_each_entry(signal_pt, &syncobj->signal_pt_list, list) { - if (point > signal_pt->value) - continue; - if ((syncobj->type == DRM_SYNCOBJ_TYPE_BINARY) && - (point != signal_pt->value)) - continue; - return dma_fence_get(&signal_pt->fence_array->base); - } - return NULL; -} - -static void drm_syncobj_add_callback_locked(struct drm_syncobj *syncobj, - struct drm_syncobj_cb *cb, - drm_syncobj_func_t func) -{ - cb->func = func; - list_add_tail(&cb->node, &syncobj->cb_list); -} - -static void drm_syncobj_fence_get_or_add_callback(struct drm_syncobj *syncobj, - struct dma_fence **fence, - struct drm_syncobj_cb *cb, - drm_syncobj_func_t func) -{ - u64 pt_value = 0; - - WARN_ON(*fence); - - if (syncobj->type == DRM_SYNCOBJ_TYPE_BINARY) { - /*BINARY syncobj always wait on last pt */ - pt_value = syncobj->signal_point; - - if (pt_value == 0) - pt_value += DRM_SYNCOBJ_BINARY_POINT; - } - - mutex_lock(&syncobj->cb_mutex); - spin_lock(&syncobj->pt_lock); - *fence = drm_syncobj_find_signal_pt_for_point(syncobj, pt_value); - spin_unlock(&syncobj->pt_lock); - if (!*fence) - drm_syncobj_add_callback_locked(syncobj, cb, func); - mutex_unlock(&syncobj->cb_mutex); -} - -static void drm_syncobj_remove_callback(struct drm_syncobj *syncobj, - struct drm_syncobj_cb *cb) -{ - mutex_lock(&syncobj->cb_mutex); - list_del_init(&cb->node); - mutex_unlock(&syncobj->cb_mutex); -} + if (wait->fence) + return; -static void drm_syncobj_init(struct drm_syncobj *syncobj) -{ - spin_lock(&syncobj->pt_lock); - syncobj->timeline_context = dma_fence_context_alloc(1); - syncobj->timeline = 0; - syncobj->signal_point = 0; - init_waitqueue_head(&syncobj->wq); - - INIT_LIST_HEAD(&syncobj->signal_pt_list); - spin_unlock(&syncobj->pt_lock); -} - -static void drm_syncobj_fini(struct drm_syncobj *syncobj) -{ - struct drm_syncobj_signal_pt *signal_pt = NULL, *tmp; - - spin_lock(&syncobj->pt_lock); - list_for_each_entry_safe(signal_pt, tmp, - &syncobj->signal_pt_list, list) { - list_del(&signal_pt->list); - dma_fence_put(&signal_pt->fence_array->base); - kfree(signal_pt); - } - spin_unlock(&syncobj->pt_lock); + spin_lock(&syncobj->lock); + /* We've already tried once to get a fence and failed. Now that we + * have the lock, try one more time just to be sure we don't add a + * callback when a fence has already been set. + */ + if (syncobj->fence) + wait->fence = dma_fence_get( + rcu_dereference_protected(syncobj->fence, 1)); + else + list_add_tail(&wait->node, &syncobj->cb_list); + spin_unlock(&syncobj->lock); } -static int drm_syncobj_create_signal_pt(struct drm_syncobj *syncobj, - struct dma_fence *fence, - u64 point) +static void drm_syncobj_remove_wait(struct drm_syncobj *syncobj, + struct syncobj_wait_entry *wait) { - struct drm_syncobj_signal_pt *signal_pt = - kzalloc(sizeof(struct drm_syncobj_signal_pt), GFP_KERNEL); - struct drm_syncobj_signal_pt *tail_pt; - struct dma_fence **fences; - int num_fences = 0; - int ret = 0, i; - - if (!signal_pt) - return -ENOMEM; - if (!fence) - goto out; - - fences = kmalloc_array(sizeof(void *), 2, GFP_KERNEL); - if (!fences) { - ret = -ENOMEM; - goto out; - } - fences[num_fences++] = dma_fence_get(fence); - /* timeline syncobj must take this dependency */ - if (syncobj->type == DRM_SYNCOBJ_TYPE_TIMELINE) { - spin_lock(&syncobj->pt_lock); - if (!list_empty(&syncobj->signal_pt_list)) { - tail_pt = list_last_entry(&syncobj->signal_pt_list, - struct drm_syncobj_signal_pt, list); - fences[num_fences++] = - dma_fence_get(&tail_pt->fence_array->base); - } - spin_unlock(&syncobj->pt_lock); - } - signal_pt->fence_array = dma_fence_array_create(num_fences, fences, - syncobj->timeline_context, - point, false); - if (!signal_pt->fence_array) { - ret = -ENOMEM; - goto fail; - } + if (!wait->node.next) + return; - spin_lock(&syncobj->pt_lock); - if (syncobj->signal_point >= point) { - DRM_WARN("A later signal is ready!"); - spin_unlock(&syncobj->pt_lock); - goto exist; - } - signal_pt->value = point; - list_add_tail(&signal_pt->list, &syncobj->signal_pt_list); - syncobj->signal_point = point; - spin_unlock(&syncobj->pt_lock); - wake_up_all(&syncobj->wq); - - return 0; -exist: - dma_fence_put(&signal_pt->fence_array->base); -fail: - for (i = 0; i < num_fences; i++) - dma_fence_put(fences[i]); - kfree(fences); -out: - kfree(signal_pt); - return ret; + spin_lock(&syncobj->lock); + list_del_init(&wait->node); + spin_unlock(&syncobj->lock); } -static void drm_syncobj_garbage_collection(struct drm_syncobj *syncobj) -{ - struct drm_syncobj_signal_pt *signal_pt, *tmp, *tail_pt; - - spin_lock(&syncobj->pt_lock); - tail_pt = list_last_entry(&syncobj->signal_pt_list, - struct drm_syncobj_signal_pt, - list); - list_for_each_entry_safe(signal_pt, tmp, - &syncobj->signal_pt_list, list) { - if (syncobj->type == DRM_SYNCOBJ_TYPE_BINARY && - signal_pt == tail_pt) - continue; - if (dma_fence_is_signaled(&signal_pt->fence_array->base)) { - syncobj->timeline = signal_pt->value; - list_del(&signal_pt->list); - dma_fence_put(&signal_pt->fence_array->base); - kfree(signal_pt); - } else { - /*signal_pt is in order in list, from small to big, so - * the later must not be signal either */ - break; - } - } - - spin_unlock(&syncobj->pt_lock); -} /** * drm_syncobj_replace_fence - replace fence in a sync object. * @syncobj: Sync object to replace fence in - * @point: timeline point * @fence: fence to install in sync file. * - * This replaces the fence on a sync object, or a timeline point fence. + * This replaces the fence on a sync object. */ void drm_syncobj_replace_fence(struct drm_syncobj *syncobj, - u64 point, struct dma_fence *fence) { - u64 pt_value = point; - - drm_syncobj_garbage_collection(syncobj); - if (syncobj->type == DRM_SYNCOBJ_TYPE_BINARY) { - if (!fence) { - drm_syncobj_fini(syncobj); - drm_syncobj_init(syncobj); - return; - } - pt_value = syncobj->signal_point + - DRM_SYNCOBJ_BINARY_POINT; - } - drm_syncobj_create_signal_pt(syncobj, fence, pt_value); - if (fence) { - struct drm_syncobj_cb *cur, *tmp; - LIST_HEAD(cb_list); + struct dma_fence *old_fence; + struct syncobj_wait_entry *cur, *tmp; + + if (fence) + dma_fence_get(fence); + + spin_lock(&syncobj->lock); + + old_fence = rcu_dereference_protected(syncobj->fence, + lockdep_is_held(&syncobj->lock)); + rcu_assign_pointer(syncobj->fence, fence); - mutex_lock(&syncobj->cb_mutex); + if (fence != old_fence) { list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) { list_del_init(&cur->node); - cur->func(syncobj, cur); + syncobj_wait_syncobj_func(syncobj, cur); } - mutex_unlock(&syncobj->cb_mutex); } -} -EXPORT_SYMBOL(drm_syncobj_replace_fence); - -static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj) -{ - struct drm_syncobj_stub_fence *fence; - fence = kzalloc(sizeof(*fence), GFP_KERNEL); - if (fence == NULL) - return -ENOMEM; - spin_lock_init(&fence->lock); - dma_fence_init(&fence->base, &drm_syncobj_stub_fence_ops, - &fence->lock, 0, 0); - dma_fence_signal(&fence->base); + spin_unlock(&syncobj->lock); - drm_syncobj_replace_fence(syncobj, 0, &fence->base); - - dma_fence_put(&fence->base); - - return 0; -} - -static int -drm_syncobj_point_get(struct drm_syncobj *syncobj, u64 point, u64 flags, - struct dma_fence **fence) -{ - int ret = 0; - - if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) { - ret = wait_event_interruptible(syncobj->wq, - point <= syncobj->signal_point); - if (ret < 0) - return ret; - } - spin_lock(&syncobj->pt_lock); - *fence = drm_syncobj_find_signal_pt_for_point(syncobj, point); - if (!*fence) - ret = -EINVAL; - spin_unlock(&syncobj->pt_lock); - return ret; + dma_fence_put(old_fence); } +EXPORT_SYMBOL(drm_syncobj_replace_fence); /** - * drm_syncobj_search_fence - lookup and reference the fence in a sync object or - * in a timeline point - * @syncobj: sync object pointer - * @point: timeline point - * @flags: DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT or not - * @fence: out parameter for the fence - * - * if flags is DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT, the function will block - * here until specific timeline points is reached. - * if not, you need a submit thread and block in userspace until all future - * timeline points have materialized, only then you can submit to the kernel, - * otherwise, function will fail to return fence. + * drm_syncobj_assign_null_handle - assign a stub fence to the sync object + * @syncobj: sync object to assign the fence on * - * Returns 0 on success or a negative error value on failure. On success @fence - * contains a reference to the fence, which must be released by calling - * dma_fence_put(). + * Assign a already signaled stub fence to the sync object. */ -int drm_syncobj_search_fence(struct drm_syncobj *syncobj, u64 point, - u64 flags, struct dma_fence **fence) +static void drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj) { - u64 pt_value = point; - - if (!syncobj) - return -ENOENT; + struct dma_fence *fence = dma_fence_get_stub(); - drm_syncobj_garbage_collection(syncobj); - if (syncobj->type == DRM_SYNCOBJ_TYPE_BINARY) { - /*BINARY syncobj always wait on last pt */ - pt_value = syncobj->signal_point; - - if (pt_value == 0) - pt_value += DRM_SYNCOBJ_BINARY_POINT; - } - return drm_syncobj_point_get(syncobj, pt_value, flags, fence); + drm_syncobj_replace_fence(syncobj, fence); + dma_fence_put(fence); } -EXPORT_SYMBOL(drm_syncobj_search_fence); /** * drm_syncobj_find_fence - lookup and reference the fence in a sync object @@ -429,7 +180,7 @@ EXPORT_SYMBOL(drm_syncobj_search_fence); * @fence: out parameter for the fence * * This is just a convenience function that combines drm_syncobj_find() and - * drm_syncobj_lookup_fence(). + * drm_syncobj_fence_get(). * * Returns 0 on success or a negative error value on failure. On success @fence * contains a reference to the fence, which must be released by calling @@ -440,11 +191,16 @@ int drm_syncobj_find_fence(struct drm_file *file_private, struct dma_fence **fence) { struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle); - int ret; + int ret = 0; - ret = drm_syncobj_search_fence(syncobj, point, flags, fence); - if (syncobj) - drm_syncobj_put(syncobj); + if (!syncobj) + return -ENOENT; + + *fence = drm_syncobj_fence_get(syncobj); + if (!*fence) { + ret = -EINVAL; + } + drm_syncobj_put(syncobj); return ret; } EXPORT_SYMBOL(drm_syncobj_find_fence); @@ -460,7 +216,7 @@ void drm_syncobj_free(struct kref *kref) struct drm_syncobj *syncobj = container_of(kref, struct drm_syncobj, refcount); - drm_syncobj_fini(syncobj); + drm_syncobj_replace_fence(syncobj, NULL); kfree(syncobj); } EXPORT_SYMBOL(drm_syncobj_free); @@ -480,7 +236,6 @@ EXPORT_SYMBOL(drm_syncobj_free); int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags, struct dma_fence *fence) { - int ret; struct drm_syncobj *syncobj; syncobj = kzalloc(sizeof(struct drm_syncobj), GFP_KERNEL); @@ -489,24 +244,13 @@ int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags, kref_init(&syncobj->refcount); INIT_LIST_HEAD(&syncobj->cb_list); - spin_lock_init(&syncobj->pt_lock); - mutex_init(&syncobj->cb_mutex); - if (flags & DRM_SYNCOBJ_CREATE_TYPE_TIMELINE) - syncobj->type = DRM_SYNCOBJ_TYPE_TIMELINE; - else - syncobj->type = DRM_SYNCOBJ_TYPE_BINARY; - drm_syncobj_init(syncobj); - - if (flags & DRM_SYNCOBJ_CREATE_SIGNALED) { - ret = drm_syncobj_assign_null_handle(syncobj); - if (ret < 0) { - drm_syncobj_put(syncobj); - return ret; - } - } + spin_lock_init(&syncobj->lock); + + if (flags & DRM_SYNCOBJ_CREATE_SIGNALED) + drm_syncobj_assign_null_handle(syncobj); if (fence) - drm_syncobj_replace_fence(syncobj, 0, fence); + drm_syncobj_replace_fence(syncobj, fence); *out_syncobj = syncobj; return 0; @@ -691,7 +435,7 @@ static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private, return -ENOENT; } - drm_syncobj_replace_fence(syncobj, 0, fence); + drm_syncobj_replace_fence(syncobj, fence); dma_fence_put(fence); drm_syncobj_put(syncobj); return 0; @@ -778,8 +522,7 @@ drm_syncobj_create_ioctl(struct drm_device *dev, void *data, return -EOPNOTSUPP; /* no valid flags yet */ - if (args->flags & ~(DRM_SYNCOBJ_CREATE_SIGNALED | - DRM_SYNCOBJ_CREATE_TYPE_TIMELINE)) + if (args->flags & ~DRM_SYNCOBJ_CREATE_SIGNALED) return -EINVAL; return drm_syncobj_create_as_handle(file_private, @@ -850,13 +593,6 @@ drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data, &args->handle); } -struct syncobj_wait_entry { - struct task_struct *task; - struct dma_fence *fence; - struct dma_fence_cb fence_cb; - struct drm_syncobj_cb syncobj_cb; -}; - static void syncobj_wait_fence_func(struct dma_fence *fence, struct dma_fence_cb *cb) { @@ -867,13 +603,11 @@ static void syncobj_wait_fence_func(struct dma_fence *fence, } static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj, - struct drm_syncobj_cb *cb) + struct syncobj_wait_entry *wait) { - struct syncobj_wait_entry *wait = - container_of(cb, struct syncobj_wait_entry, syncobj_cb); - - drm_syncobj_search_fence(syncobj, 0, 0, &wait->fence); - + /* This happens inside the syncobj lock */ + wait->fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, + lockdep_is_held(&syncobj->lock))); wake_up_process(wait->task); } @@ -899,8 +633,7 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs, signaled_count = 0; for (i = 0; i < count; ++i) { entries[i].task = current; - drm_syncobj_search_fence(syncobjs[i], 0, 0, - &entries[i].fence); + entries[i].fence = drm_syncobj_fence_get(syncobjs[i]); if (!entries[i].fence) { if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) { continue; @@ -930,15 +663,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs, */ if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) { - for (i = 0; i < count; ++i) { - if (entries[i].fence) - continue; - - drm_syncobj_fence_get_or_add_callback(syncobjs[i], - &entries[i].fence, - &entries[i].syncobj_cb, - syncobj_wait_syncobj_func); - } + for (i = 0; i < count; ++i) + drm_syncobj_fence_add_wait(syncobjs[i], &entries[i]); } do { @@ -987,9 +713,7 @@ done_waiting: cleanup_entries: for (i = 0; i < count; ++i) { - if (entries[i].syncobj_cb.func) - drm_syncobj_remove_callback(syncobjs[i], - &entries[i].syncobj_cb); + drm_syncobj_remove_wait(syncobjs[i], &entries[i]); if (entries[i].fence_cb.func) dma_fence_remove_callback(entries[i].fence, &entries[i].fence_cb); @@ -1165,13 +889,12 @@ drm_syncobj_reset_ioctl(struct drm_device *dev, void *data, if (ret < 0) return ret; - for (i = 0; i < args->count_handles; i++) { - drm_syncobj_fini(syncobjs[i]); - drm_syncobj_init(syncobjs[i]); - } + for (i = 0; i < args->count_handles; i++) + drm_syncobj_replace_fence(syncobjs[i], NULL); + drm_syncobj_array_free(syncobjs, args->count_handles); - return ret; + return 0; } int @@ -1199,11 +922,8 @@ drm_syncobj_signal_ioctl(struct drm_device *dev, void *data, if (ret < 0) return ret; - for (i = 0; i < args->count_handles; i++) { - ret = drm_syncobj_assign_null_handle(syncobjs[i]); - if (ret < 0) - break; - } + for (i = 0; i < args->count_handles; i++) + drm_syncobj_assign_null_handle(syncobjs[i]); drm_syncobj_array_free(syncobjs, args->count_handles); |