drm/i915/gem: Defer obj->base.resv fini until RCU callback

Since reservation_object_fini() does an immediate free, rather than
kfree_rcu as normal, we have to delay the release until after the RCU
grace period has elapsed (i.e. from the rcu cleanup callback) so that we
can rely on the RCU protected access to the fences while the object is a
zombie.

i915_gem_busy_ioctl relies on having an RCU barrier to protect the
reservation in order to avoid having to take a reference and strong
memory barriers.

v2: Order is important; only release after putting the pages!

Fixes: c03467ba40 ("drm/i915/gem: Free pages before rcu-freeing the object")
Testcase: igt/gem_busy/close-race
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190703180601.10950-1-chris@chris-wilson.co.uk
This commit is contained in:
Chris Wilson 2019-07-03 19:06:01 +01:00
parent 21de5a9e34
commit 0c159ffef6
4 changed files with 12 additions and 13 deletions

View file

@ -152,6 +152,7 @@ static void __i915_gem_free_object_rcu(struct rcu_head *head)
container_of(head, typeof(*obj), rcu);
struct drm_i915_private *i915 = to_i915(obj->base.dev);
reservation_object_fini(&obj->base._resv);
i915_gem_object_free(obj);
GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
@ -187,9 +188,6 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
GEM_BUG_ON(!list_empty(&obj->lut_list));
if (obj->ops->release)
obj->ops->release(obj);
atomic_set(&obj->mm.pages_pin_count, 0);
__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
GEM_BUG_ON(i915_gem_object_has_pages(obj));
@ -198,7 +196,10 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
if (obj->base.import_attach)
drm_prime_gem_destroy(&obj->base, NULL);
drm_gem_object_release(&obj->base);
drm_gem_free_mmap_offset(&obj->base);
if (obj->ops->release)
obj->ops->release(obj);
/* But keep the pointer alive for RCU-protected lookups */
call_rcu(&obj->rcu, __i915_gem_free_object_rcu);

View file

@ -133,16 +133,9 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
drm_pci_free(obj->base.dev, obj->phys_handle);
}
static void
i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
{
i915_gem_object_unpin_pages(obj);
}
static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
.get_pages = i915_gem_object_get_pages_phys,
.put_pages = i915_gem_object_put_pages_phys,
.release = i915_gem_object_release_phys,
};
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)

View file

@ -414,6 +414,11 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
return 0;
}
static void shmem_release(struct drm_i915_gem_object *obj)
{
fput(obj->base.filp);
}
const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
I915_GEM_OBJECT_IS_SHRINKABLE,
@ -424,6 +429,8 @@ const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
.writeback = shmem_writeback,
.pwrite = shmem_pwrite,
.release = shmem_release,
};
static int create_shmem(struct drm_i915_private *i915,

View file

@ -529,8 +529,6 @@ i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
GEM_BUG_ON(!stolen);
__i915_gem_object_unpin_pages(obj);
i915_gem_stolen_remove_node(dev_priv, stolen);
kfree(stolen);
}