diff options
author | Alex Goins <[email protected]> | 2015-11-25 18:43:38 -0800 |
---|---|---|
committer | Maarten Lankhorst <[email protected]> | 2015-12-03 16:10:11 +0100 |
commit | fd8e058a27f319d4560529d54bcbf8aec2d72017 (patch) | |
tree | 1f7a76f36dfd82f853d7b01bbe8a9f27a5a709d4 | |
parent | 6d65ba943a2d1e4292a07ca7ddb6c5138b9efa5d (diff) |
i915: wait for fence in mmio_flip_work_func
If a buffer is backed by dmabuf, wait on its reservation object's exclusive
fence before flipping.
v2: First commit
v3: Remove object_name_lock acquire
v4: Move wait ahead of mark_page_flip_active
Use crtc->primary->fb to get GEM object instead of pending_flip_obj
use_mmio_flip() return true when exclusive fence is attached
Wait only on exclusive fences, interruptible with no timeout
v5: Move wait from do_mmio_flip to mmio_flip_work_func
Style tweaks to more closely match rest of file
v6: Change back to unintteruptible wait to match __i915_wait_request due to
inability to properly handle interrupted wait.
Warn on error code from waiting.
v7: No change
v8: Test for !reservation_object_signaled_rcu(test_all=FALSE) instead of
obj->base.dma_buf->resv->fence_excl
Link: https://patchwork.kernel.org/patch/7704181/
Signed-off-by: Alex Goins <[email protected]>
Signed-off-by: Maarten Lankhorst <[email protected]>
-rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 15 |
1 files changed, 15 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 602e2be1c3d8..0ad20eccbf62 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -44,6 +44,8 @@ #include <drm/drm_plane_helper.h> #include <drm/drm_rect.h> #include <linux/dma_remapping.h> +#include <linux/reservation.h> +#include <linux/dma-buf.h> /* Primary plane formats for gen <= 3 */ static const uint32_t i8xx_primary_formats[] = { @@ -11204,6 +11206,10 @@ static bool use_mmio_flip(struct intel_engine_cs *ring, return true; else if (i915.enable_execlists) return true; + else if (obj->base.dma_buf && + !reservation_object_test_signaled_rcu(obj->base.dma_buf->resv, + false)) + return true; else return ring != i915_gem_request_get_ring(obj->last_write_req); } @@ -11318,6 +11324,9 @@ static void intel_mmio_flip_work_func(struct work_struct *work) { struct intel_mmio_flip *mmio_flip = container_of(work, struct intel_mmio_flip, work); + struct intel_framebuffer *intel_fb = + to_intel_framebuffer(mmio_flip->crtc->base.primary->fb); + struct drm_i915_gem_object *obj = intel_fb->obj; if (mmio_flip->req) { WARN_ON(__i915_wait_request(mmio_flip->req, @@ -11327,6 +11336,12 @@ static void intel_mmio_flip_work_func(struct work_struct *work) i915_gem_request_unreference__unlocked(mmio_flip->req); } + /* For framebuffer backed by dmabuf, wait for fence */ + if (obj->base.dma_buf) + WARN_ON(reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv, + false, false, + MAX_SCHEDULE_TIMEOUT) < 0); + intel_do_mmio_flip(mmio_flip); kfree(mmio_flip); } |