diff options
author | Maarten Lankhorst <[email protected]> | 2022-01-28 09:57:39 +0100 |
---|---|---|
committer | Maarten Lankhorst <[email protected]> | 2022-01-28 12:17:51 +0100 |
commit | a594525c82e0b8d677a7e5fd13c7c115d41e9722 (patch) | |
tree | c4d527b9e66c276c5409e50676a25ac507660f01 | |
parent | 7a05c5a0c883ed6353f82699dff8d789dae6b673 (diff) |
drm/i915: Allow dead vm to unbind vma's without lock.
i915_gem_vm_close may take the lock, and we currently have no better way
of handling this. At least for now, allow a path in which holding vm->mutex
is sufficient. This is the case, because the object destroy path will
forcefully take vm->mutex now.
Signed-off-by: Maarten Lankhorst <[email protected]>
Link: https://patchwork.freedesktop.org/patch/msgid/[email protected]
Reviewed-by: Thomas Hellstrom <[email protected]>
-rw-r--r-- | drivers/gpu/drm/i915/i915_vma.c | 15 |
1 files changed, 13 insertions, 2 deletions
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 2a14a4e8b0bc..22cdc55c4863 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -39,6 +39,17 @@ #include "i915_vma.h" #include "i915_vma_resource.h" +static inline void assert_vma_held_evict(const struct i915_vma *vma) +{ + /* + * We may be forced to unbind when the vm is dead, to clean it up. + * This is the only exception to the requirement of the object lock + * being held. + */ + if (atomic_read(&vma->vm->open)) + assert_object_held_shared(vma->obj); +} + static struct kmem_cache *slab_vmas; static struct i915_vma *i915_vma_alloc(void) @@ -1721,7 +1732,7 @@ struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async) struct dma_fence *unbind_fence; GEM_BUG_ON(i915_vma_is_pinned(vma)); - assert_object_held_shared(vma->obj); + assert_vma_held_evict(vma); if (i915_vma_is_map_and_fenceable(vma)) { /* Force a pagefault for domain tracking on next user access */ @@ -1788,7 +1799,7 @@ int __i915_vma_unbind(struct i915_vma *vma) int ret; lockdep_assert_held(&vma->vm->mutex); - assert_object_held_shared(vma->obj); + assert_vma_held_evict(vma); if (!drm_mm_node_allocated(&vma->node)) return 0; |