From 82ad6443a55ea274ab2f0e24ada71f0529f3238b Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 5 Jun 2018 16:37:58 +0100 Subject: drm/i915/gtt: Rename i915_hw_ppgtt base member In the near future, I want to subclass gen6_hw_ppgtt as it contains a few specialised members and I wish to add more. To avoid the ugliness of using ppgtt->base.base, rename the i915_hw_ppgtt base member (i915_address_space) as vm, which is our common shorthand for an i915_address_space local. Signed-off-by: Chris Wilson Cc: Joonas Lahtinen Cc: Mika Kuoppala Cc: Matthew Auld Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20180605153758.18422-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/selftests/i915_request.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/i915/selftests/i915_request.c') diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 94bc2e1898a4..a3a89aadeccb 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -430,7 +430,7 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915) if (err) goto err; - vma = i915_vma_instance(obj, &i915->ggtt.base, NULL); + vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto err; @@ -555,7 +555,8 @@ out_unlock: static struct i915_vma *recursive_batch(struct drm_i915_private *i915) { struct i915_gem_context *ctx = i915->kernel_context; - struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base; + struct i915_address_space *vm = + ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm; struct drm_i915_gem_object *obj; const int gen = INTEL_GEN(i915); struct i915_vma *vma; -- cgit From 697b9a8714cb4631fd0526b3c78955d5422c24ba Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 12 Jun 2018 11:51:35 +0100 Subject: drm/i915: Make closing request flush mandatory For symmetry, simplicity and ensuring the request is always truly idle upon its completion, always emit the closing flush prior to emitting the request breadcrumb. Previously, we would only emit the flush if we had started a user batch, but this just leaves all the other paths open to speculation (do they affect the GPU caches or not?) With mm switching, a key requirement is that the GPU is flushed and invalidated before hand, so for absolute safety, we want that closing flush be mandatory. Signed-off-by: Chris Wilson Reviewed-by: Joonas Lahtinen Link: https://patchwork.freedesktop.org/patch/msgid/20180612105135.4459-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem.c | 4 ++-- drivers/gpu/drm/i915/i915_gem_context.c | 9 +-------- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 4 ++-- drivers/gpu/drm/i915/i915_request.c | 18 ++---------------- drivers/gpu/drm/i915/i915_request.h | 4 +--- drivers/gpu/drm/i915/selftests/huge_pages.c | 2 +- drivers/gpu/drm/i915/selftests/i915_gem_coherency.c | 4 ++-- drivers/gpu/drm/i915/selftests/i915_gem_context.c | 4 ++-- drivers/gpu/drm/i915/selftests/i915_request.c | 2 +- drivers/gpu/drm/i915/selftests/intel_hangcheck.c | 16 ++++++++-------- drivers/gpu/drm/i915/selftests/intel_lrc.c | 2 +- drivers/gpu/drm/i915/selftests/intel_workarounds.c | 2 +- 12 files changed, 24 insertions(+), 47 deletions(-) (limited to 'drivers/gpu/drm/i915/selftests/i915_request.c') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 93efd92362db..8dd4d35655af 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3213,7 +3213,7 @@ void i915_gem_reset(struct drm_i915_private *dev_priv, rq = i915_request_alloc(engine, dev_priv->kernel_context); if (!IS_ERR(rq)) - __i915_request_add(rq, false); + i915_request_add(rq); } } @@ -5332,7 +5332,7 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915) if (engine->init_context) err = engine->init_context(rq); - __i915_request_add(rq, true); + i915_request_add(rq); if (err) goto err_active; } diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index b2c7ac1b074d..ef6ea4bcd773 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -700,14 +700,7 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915) i915_timeline_sync_set(rq->timeline, &prev->fence); } - /* - * Force a flush after the switch to ensure that all rendering - * and operations prior to switching to the kernel context hits - * memory. This should be guaranteed by the previous request, - * but an extra layer of paranoia before we declare the system - * idle (on suspend etc) is advisable! - */ - __i915_request_add(rq, true); + i915_request_add(rq); } return 0; diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 2d2eb3075960..60dc2a865f5f 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -921,7 +921,7 @@ static void reloc_gpu_flush(struct reloc_cache *cache) i915_gem_object_unpin_map(cache->rq->batch->obj); i915_gem_chipset_flush(cache->rq->i915); - __i915_request_add(cache->rq, true); + i915_request_add(cache->rq); cache->rq = NULL; } @@ -2438,7 +2438,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, trace_i915_request_queue(eb.request, eb.batch_flags); err = eb_submit(&eb); err_request: - __i915_request_add(eb.request, err == 0); + i915_request_add(eb.request); add_to_client(eb.request, file); if (fences) diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 9092f5464c24..e1dbb544046f 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -1018,14 +1018,13 @@ i915_request_await_object(struct i915_request *to, * request is not being tracked for completion but the work itself is * going to happen on the hardware. This would be a Bad Thing(tm). */ -void __i915_request_add(struct i915_request *request, bool flush_caches) +void i915_request_add(struct i915_request *request) { struct intel_engine_cs *engine = request->engine; struct i915_timeline *timeline = request->timeline; struct intel_ring *ring = request->ring; struct i915_request *prev; u32 *cs; - int err; GEM_TRACE("%s fence %llx:%d\n", engine->name, request->fence.context, request->fence.seqno); @@ -1046,20 +1045,7 @@ void __i915_request_add(struct i915_request *request, bool flush_caches) * know that it is time to use that space up. */ request->reserved_space = 0; - - /* - * Emit any outstanding flushes - execbuf can fail to emit the flush - * after having emitted the batchbuffer command. Hence we need to fix - * things up similar to emitting the lazy request. The difference here - * is that the flush _must_ happen before the next request, no matter - * what. - */ - if (flush_caches) { - err = engine->emit_flush(request, EMIT_FLUSH); - - /* Not allowed to fail! */ - WARN(err, "engine->emit_flush() failed: %d!\n", err); - } + engine->emit_flush(request, EMIT_FLUSH); /* * Record the position of the start of the breadcrumb so that diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index 0e9aba53d0e4..7ee220ded9c9 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -253,9 +253,7 @@ int i915_request_await_object(struct i915_request *to, int i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence); -void __i915_request_add(struct i915_request *rq, bool flush_caches); -#define i915_request_add(rq) \ - __i915_request_add(rq, false) +void i915_request_add(struct i915_request *rq); void __i915_request_submit(struct i915_request *request); void i915_request_submit(struct i915_request *request); diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c index 7846ea4a99bc..fbe4324116d7 100644 --- a/drivers/gpu/drm/i915/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/selftests/huge_pages.c @@ -1003,7 +1003,7 @@ static int gpu_write(struct i915_vma *vma, reservation_object_unlock(vma->resv); err_request: - __i915_request_add(rq, err == 0); + i915_request_add(rq); return err; } diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c index 340a98c0c804..a4900091ae3d 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c @@ -199,7 +199,7 @@ static int gpu_set(struct drm_i915_gem_object *obj, cs = intel_ring_begin(rq, 4); if (IS_ERR(cs)) { - __i915_request_add(rq, false); + i915_request_add(rq); i915_vma_unpin(vma); return PTR_ERR(cs); } @@ -229,7 +229,7 @@ static int gpu_set(struct drm_i915_gem_object *obj, reservation_object_add_excl_fence(obj->resv, &rq->fence); reservation_object_unlock(obj->resv); - __i915_request_add(rq, true); + i915_request_add(rq); return 0; } diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c index 708e8d721448..836f1af8b833 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c @@ -182,12 +182,12 @@ static int gpu_fill(struct drm_i915_gem_object *obj, reservation_object_add_excl_fence(obj->resv, &rq->fence); reservation_object_unlock(obj->resv); - __i915_request_add(rq, true); + i915_request_add(rq); return 0; err_request: - __i915_request_add(rq, false); + i915_request_add(rq); err_batch: i915_vma_unpin(batch); err_vma: diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index a3a89aadeccb..f5d00332bb31 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -466,7 +466,7 @@ empty_request(struct intel_engine_cs *engine, goto out_request; out_request: - __i915_request_add(request, err == 0); + i915_request_add(request); return err ? ERR_PTR(err) : request; } diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c index 390a157b37c3..fe7d3190ebfe 100644 --- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c @@ -245,7 +245,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine) err = emit_recurse_batch(h, rq); if (err) { - __i915_request_add(rq, false); + i915_request_add(rq); return ERR_PTR(err); } @@ -318,7 +318,7 @@ static int igt_hang_sanitycheck(void *arg) *h.batch = MI_BATCH_BUFFER_END; i915_gem_chipset_flush(i915); - __i915_request_add(rq, true); + i915_request_add(rq); timeout = i915_request_wait(rq, I915_WAIT_LOCKED, @@ -464,7 +464,7 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active) } i915_request_get(rq); - __i915_request_add(rq, true); + i915_request_add(rq); mutex_unlock(&i915->drm.struct_mutex); if (!wait_until_running(&h, rq)) { @@ -742,7 +742,7 @@ static int __igt_reset_engines(struct drm_i915_private *i915, } i915_request_get(rq); - __i915_request_add(rq, true); + i915_request_add(rq); mutex_unlock(&i915->drm.struct_mutex); if (!wait_until_running(&h, rq)) { @@ -942,7 +942,7 @@ static int igt_wait_reset(void *arg) } i915_request_get(rq); - __i915_request_add(rq, true); + i915_request_add(rq); if (!wait_until_running(&h, rq)) { struct drm_printer p = drm_info_printer(i915->drm.dev); @@ -1037,7 +1037,7 @@ static int igt_reset_queue(void *arg) } i915_request_get(prev); - __i915_request_add(prev, true); + i915_request_add(prev); count = 0; do { @@ -1051,7 +1051,7 @@ static int igt_reset_queue(void *arg) } i915_request_get(rq); - __i915_request_add(rq, true); + i915_request_add(rq); /* * XXX We don't handle resetting the kernel context @@ -1184,7 +1184,7 @@ static int igt_handle_error(void *arg) } i915_request_get(rq); - __i915_request_add(rq, true); + i915_request_add(rq); if (!wait_until_running(&h, rq)) { struct drm_printer p = drm_info_printer(i915->drm.dev); diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c index 0b6da08c8cae..ea27c7cfbf96 100644 --- a/drivers/gpu/drm/i915/selftests/intel_lrc.c +++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c @@ -155,7 +155,7 @@ spinner_create_request(struct spinner *spin, err = emit_recurse_batch(spin, rq, arbitration_command); if (err) { - __i915_request_add(rq, false); + i915_request_add(rq); return ERR_PTR(err); } diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c index f1cfb0fb6bea..e1ea2d2bedd2 100644 --- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c +++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c @@ -75,7 +75,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine) i915_gem_object_get(result); i915_gem_object_set_active_reference(result); - __i915_request_add(rq, true); + i915_request_add(rq); i915_vma_unpin(vma); return result; -- cgit From 920d3fb1a00c3d7bb24c9b396c8b2950192c5ad8 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 14 Jun 2018 13:49:23 +0100 Subject: drm/i915/selftests: Initialise request to silence a compiler MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With an old (4.7.3 on 32bit) gcc, it emits a warning for In file included from drivers/gpu/drm/i915/i915_request.c:1425:0: drivers/gpu/drm/i915/selftests/i915_request.c: In function ‘live_nop_request’: drivers/gpu/drm/i915/selftests/i915_request.c:380:21: error: ‘request’ may be used uninitialized in this function [-Werror=maybe-uninitialized] Silence it by just setting it to NULL on initialisation. Signed-off-by: Chris Wilson Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20180614124923.18071-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/selftests/i915_request.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/i915/selftests/i915_request.c') diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index f5d00332bb31..63cd9486cc13 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -342,9 +342,9 @@ static int live_nop_request(void *arg) mutex_lock(&i915->drm.struct_mutex); for_each_engine(engine, i915, id) { - IGT_TIMEOUT(end_time); - struct i915_request *request; + struct i915_request *request = NULL; unsigned long n, prime; + IGT_TIMEOUT(end_time); ktime_t times[2] = {}; err = begin_live_test(&t, i915, __func__, engine->name); -- cgit From a24362ead99ab5d39b594a7b4ff48abc9addc059 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Mon, 18 Jun 2018 13:01:54 +0200 Subject: drm/i915: Replace drm_dev_unref with drm_dev_put This patch unifies the naming of DRM functions for reference counting of struct drm_device. The resulting code is more aligned with the rest of the Linux kernel interfaces. Signed-off-by: Thomas Zimmermann Signed-off-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20180618110154.30462-6-tdz@users.sourceforge.net --- drivers/gpu/drm/i915/selftests/huge_pages.c | 2 +- drivers/gpu/drm/i915/selftests/i915_gem_context.c | 2 +- drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c | 2 +- drivers/gpu/drm/i915/selftests/i915_gem_evict.c | 2 +- drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 2 +- drivers/gpu/drm/i915/selftests/i915_gem_object.c | 2 +- drivers/gpu/drm/i915/selftests/i915_request.c | 2 +- drivers/gpu/drm/i915/selftests/i915_vma.c | 2 +- drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm/i915/selftests/i915_request.c') diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c index fbe4324116d7..b5e87fcdcdae 100644 --- a/drivers/gpu/drm/i915/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/selftests/huge_pages.c @@ -1724,7 +1724,7 @@ out_unlock: i915_modparams.enable_ppgtt = saved_ppgtt; - drm_dev_unref(&dev_priv->drm); + drm_dev_put(&dev_priv->drm); return err; } diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c index 90c3c36173ba..c0c26fae45c7 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c @@ -586,7 +586,7 @@ int i915_gem_context_mock_selftests(void) err = i915_subtests(tests, i915); - drm_dev_unref(&i915->drm); + drm_dev_put(&i915->drm); return err; } diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c index 89dc25a5a53b..a7055b12e53c 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c @@ -389,7 +389,7 @@ int i915_gem_dmabuf_mock_selftests(void) err = i915_subtests(tests, i915); - drm_dev_unref(&i915->drm); + drm_dev_put(&i915->drm); return err; } diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c index 2dc72a984d45..8059268800fa 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c @@ -490,7 +490,7 @@ int i915_gem_evict_mock_selftests(void) err = i915_subtests(tests, i915); mutex_unlock(&i915->drm.struct_mutex); - drm_dev_unref(&i915->drm); + drm_dev_put(&i915->drm); return err; } diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index a4060238bef0..a28ee0cc6a63 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -1644,7 +1644,7 @@ int i915_gem_gtt_mock_selftests(void) err = i915_subtests(tests, i915); mutex_unlock(&i915->drm.struct_mutex); - drm_dev_unref(&i915->drm); + drm_dev_put(&i915->drm); return err; } diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c index 2b2dde94526f..549707b9d738 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c @@ -586,7 +586,7 @@ int i915_gem_object_mock_selftests(void) err = i915_subtests(tests, i915); - drm_dev_unref(&i915->drm); + drm_dev_put(&i915->drm); return err; } diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 63cd9486cc13..521ae4a90ddf 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -262,7 +262,7 @@ int i915_request_mock_selftests(void) return -ENOMEM; err = i915_subtests(tests, i915); - drm_dev_unref(&i915->drm); + drm_dev_put(&i915->drm); return err; } diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c index 8400a8cc5cf2..ffa74290e054 100644 --- a/drivers/gpu/drm/i915/selftests/i915_vma.c +++ b/drivers/gpu/drm/i915/selftests/i915_vma.c @@ -733,7 +733,7 @@ int i915_vma_mock_selftests(void) err = i915_subtests(tests, i915); mutex_unlock(&i915->drm.struct_mutex); - drm_dev_unref(&i915->drm); + drm_dev_put(&i915->drm); return err; } diff --git a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c index d6926e7820e5..f03b407fdbe2 100644 --- a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c @@ -464,7 +464,7 @@ int intel_breadcrumbs_mock_selftests(void) return -ENOMEM; err = i915_subtests(tests, i915->engine[RCS]); - drm_dev_unref(&i915->drm); + drm_dev_put(&i915->drm); return err; } -- cgit From bb9e8755a4259a306cd1691b4e17c4c485d81c9e Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 5 Jul 2018 16:47:56 +0100 Subject: drm/i915/selftests: Fixup recursive MI_BB_START for gen3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There's no magic bit0 in MI_BB_START for gen3, it's the same dword length parameter as elsewhere and needs to be zero. v2: Same bug in both live_requests and live_hanghcheck. References: https://bugs.freedesktop.org/show_bug.cgi?id=107132 Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20180705154756.5533-1-chris@chris-wilson.co.uk Reviewed-by: Ville Syrjälä --- drivers/gpu/drm/i915/selftests/i915_request.c | 5 +---- drivers/gpu/drm/i915/selftests/intel_hangcheck.c | 2 +- 2 files changed, 2 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/i915/selftests/i915_request.c') diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 521ae4a90ddf..e44aa3335d9e 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -594,11 +594,8 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915) } else if (gen >= 6) { *cmd++ = MI_BATCH_BUFFER_START | 1 << 8; *cmd++ = lower_32_bits(vma->node.start); - } else if (gen >= 4) { - *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT; - *cmd++ = lower_32_bits(vma->node.start); } else { - *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | 1; + *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT; *cmd++ = lower_32_bits(vma->node.start); } *cmd++ = MI_BATCH_BUFFER_END; /* terminate early in case of error */ diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c index fe7d3190ebfe..72547aa24538 100644 --- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c @@ -193,7 +193,7 @@ static int emit_recurse_batch(struct hang *h, batch += 1024 / sizeof(*batch); *batch++ = MI_ARB_CHECK; - *batch++ = MI_BATCH_BUFFER_START | 2 << 6 | 1; + *batch++ = MI_BATCH_BUFFER_START | 2 << 6; *batch++ = lower_32_bits(vma->node.start); } *batch++ = MI_BATCH_BUFFER_END; /* not reached */ -- cgit From a9450e15ad9e921fd2d76b038a8b252b1afc7bb6 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 6 Jul 2018 07:53:10 +0100 Subject: drm/i915/selftests: Skip all request selftests when wedged If the GPU is irrecoverably wedge, we cannot submit any request and so all of the request selftests will expectedly fail. Skip over them. Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20180706065332.15214-5-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/selftests/i915_request.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/gpu/drm/i915/selftests/i915_request.c') diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index e44aa3335d9e..cc27edc40356 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -859,5 +859,9 @@ int i915_request_live_selftests(struct drm_i915_private *i915) SUBTEST(live_sequential_engines), SUBTEST(live_empty_request), }; + + if (i915_terminally_wedged(&i915->gpu_error)) + return 0; + return i915_subtests(tests, i915); } -- cgit From a523697857cdfb6a548f6caf38f42f4fe0f7d757 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 6 Jul 2018 11:39:44 +0100 Subject: drm/i915: Start returning an error from i915_vma_move_to_active() Handling such a late error in request construction is tricky, but to accommodate future patches which may allocate here, we potentially could err. To handle the error after already adjusting global state to track the new request, we must finish and submit the request. But we don't want to use the request as not everything is being tracked by it, so we opt to cancel the commands inside the request. Signed-off-by: Chris Wilson Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20180706103947.15919-3-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gvt/scheduler.c | 6 +++++- drivers/gpu/drm/i915/i915_drv.h | 6 +++--- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 25 ++++++++++++++++------ drivers/gpu/drm/i915/i915_gem_render_state.c | 2 +- drivers/gpu/drm/i915/selftests/huge_pages.c | 9 ++++++-- .../gpu/drm/i915/selftests/i915_gem_coherency.c | 4 ++-- drivers/gpu/drm/i915/selftests/i915_gem_context.c | 12 +++++++++-- drivers/gpu/drm/i915/selftests/i915_gem_object.c | 7 +++--- drivers/gpu/drm/i915/selftests/i915_request.c | 8 +++++-- drivers/gpu/drm/i915/selftests/intel_hangcheck.c | 11 ++++++++-- drivers/gpu/drm/i915/selftests/intel_lrc.c | 11 ++++++++-- drivers/gpu/drm/i915/selftests/intel_workarounds.c | 6 ++++-- 12 files changed, 78 insertions(+), 29 deletions(-) (limited to 'drivers/gpu/drm/i915/selftests/i915_request.c') diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 928818f218f7..b0e566956b8d 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -476,7 +476,11 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) i915_gem_obj_finish_shmem_access(bb->obj); bb->accessing = false; - i915_vma_move_to_active(bb->vma, workload->req, 0); + ret = i915_vma_move_to_active(bb->vma, + workload->req, + 0); + if (ret) + goto err; } } return 0; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index c1c637e47bf5..07846e63671d 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -3090,9 +3090,9 @@ i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj) } int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); -void i915_vma_move_to_active(struct i915_vma *vma, - struct i915_request *rq, - unsigned int flags); +int __must_check i915_vma_move_to_active(struct i915_vma *vma, + struct i915_request *rq, + unsigned int flags); int i915_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 91f20445147f..97136e4ce91d 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -1165,12 +1165,16 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, goto err_request; GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true)); - i915_vma_move_to_active(batch, rq, 0); - i915_vma_unpin(batch); + err = i915_vma_move_to_active(batch, rq, 0); + if (err) + goto skip_request; - i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + if (err) + goto skip_request; rq->batch = batch; + i915_vma_unpin(batch); cache->rq = rq; cache->rq_cmd = cmd; @@ -1179,6 +1183,8 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, /* Return with batch mapping (cmd) still pinned */ return 0; +skip_request: + i915_request_skip(rq, err); err_request: i915_request_add(rq); err_unpin: @@ -1818,7 +1824,11 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) unsigned int flags = eb->flags[i]; struct i915_vma *vma = eb->vma[i]; - i915_vma_move_to_active(vma, eb->request, flags); + err = i915_vma_move_to_active(vma, eb->request, flags); + if (unlikely(err)) { + i915_request_skip(eb->request, err); + return err; + } __eb_unreserve_vma(vma, flags); vma->exec_flags = NULL; @@ -1877,9 +1887,9 @@ static void export_fence(struct i915_vma *vma, reservation_object_unlock(resv); } -void i915_vma_move_to_active(struct i915_vma *vma, - struct i915_request *rq, - unsigned int flags) +int i915_vma_move_to_active(struct i915_vma *vma, + struct i915_request *rq, + unsigned int flags) { struct drm_i915_gem_object *obj = vma->obj; const unsigned int idx = rq->engine->id; @@ -1916,6 +1926,7 @@ void i915_vma_move_to_active(struct i915_vma *vma, i915_gem_active_set(&vma->last_fence, rq); export_fence(vma, rq, flags); + return 0; } static int i915_reset_gen7_sol_offsets(struct i915_request *rq) diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c index 3210cedfa46c..90baf9086d0a 100644 --- a/drivers/gpu/drm/i915/i915_gem_render_state.c +++ b/drivers/gpu/drm/i915/i915_gem_render_state.c @@ -222,7 +222,7 @@ int i915_gem_render_state_emit(struct i915_request *rq) goto err_unpin; } - i915_vma_move_to_active(so.vma, rq, 0); + err = i915_vma_move_to_active(so.vma, rq, 0); err_unpin: i915_vma_unpin(so.vma); err_vma: diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c index 84bed69f30cc..d9f439f6219f 100644 --- a/drivers/gpu/drm/i915/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/selftests/huge_pages.c @@ -985,7 +985,10 @@ static int gpu_write(struct i915_vma *vma, goto err_request; } - i915_vma_move_to_active(batch, rq, 0); + err = i915_vma_move_to_active(batch, rq, 0); + if (err) + goto err_request; + i915_gem_object_set_active_reference(batch->obj); i915_vma_unpin(batch); i915_vma_close(batch); @@ -996,7 +999,9 @@ static int gpu_write(struct i915_vma *vma, if (err) goto err_request; - i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + if (err) + i915_request_skip(rq, err); err_request: i915_request_add(rq); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c index 97a16311f083..1de7c1402fd5 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c @@ -222,12 +222,12 @@ static int gpu_set(struct drm_i915_gem_object *obj, } intel_ring_advance(rq, cs); - i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); i915_vma_unpin(vma); i915_request_add(rq); - return 0; + return err; } static bool always_valid(struct drm_i915_private *i915) diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c index c642ab97698e..5fbe15f4effd 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c @@ -170,18 +170,26 @@ static int gpu_fill(struct drm_i915_gem_object *obj, if (err) goto err_request; - i915_vma_move_to_active(batch, rq, 0); + err = i915_vma_move_to_active(batch, rq, 0); + if (err) + goto skip_request; + + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + if (err) + goto skip_request; + i915_gem_object_set_active_reference(batch->obj); i915_vma_unpin(batch); i915_vma_close(batch); - i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); i915_vma_unpin(vma); i915_request_add(rq); return 0; +skip_request: + i915_request_skip(rq, err); err_request: i915_request_add(rq); err_batch: diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c index b2ccbc5e2bbe..25c2b2d433bd 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c @@ -464,13 +464,14 @@ static int make_obj_busy(struct drm_i915_gem_object *obj) return PTR_ERR(rq); } - i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); i915_request_add(rq); - i915_gem_object_set_active_reference(obj); + __i915_gem_object_release_unless_active(obj); i915_vma_unpin(vma); - return 0; + + return err; } static bool assert_mmap_offset(struct drm_i915_private *i915, diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index cc27edc40356..43995fc3534d 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -675,7 +675,9 @@ static int live_all_engines(void *arg) i915_gem_object_set_active_reference(batch->obj); } - i915_vma_move_to_active(batch, request[id], 0); + err = i915_vma_move_to_active(batch, request[id], 0); + GEM_BUG_ON(err); + i915_request_get(request[id]); i915_request_add(request[id]); } @@ -785,7 +787,9 @@ static int live_sequential_engines(void *arg) GEM_BUG_ON(err); request[id]->batch = batch; - i915_vma_move_to_active(batch, request[id], 0); + err = i915_vma_move_to_active(batch, request[id], 0); + GEM_BUG_ON(err); + i915_gem_object_set_active_reference(batch->obj); i915_vma_get(batch); diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c index c838f7d08cb9..73462a65a330 100644 --- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c @@ -130,13 +130,19 @@ static int emit_recurse_batch(struct hang *h, if (err) goto unpin_vma; - i915_vma_move_to_active(vma, rq, 0); + err = i915_vma_move_to_active(vma, rq, 0); + if (err) + goto unpin_hws; + if (!i915_gem_object_has_active_reference(vma->obj)) { i915_gem_object_get(vma->obj); i915_gem_object_set_active_reference(vma->obj); } - i915_vma_move_to_active(hws, rq, 0); + err = i915_vma_move_to_active(hws, rq, 0); + if (err) + goto unpin_hws; + if (!i915_gem_object_has_active_reference(hws->obj)) { i915_gem_object_get(hws->obj); i915_gem_object_set_active_reference(hws->obj); @@ -205,6 +211,7 @@ static int emit_recurse_batch(struct hang *h, err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags); +unpin_hws: i915_vma_unpin(hws); unpin_vma: i915_vma_unpin(vma); diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c index 730a02d39058..636cb68191e3 100644 --- a/drivers/gpu/drm/i915/selftests/intel_lrc.c +++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c @@ -104,13 +104,19 @@ static int emit_recurse_batch(struct spinner *spin, if (err) goto unpin_vma; - i915_vma_move_to_active(vma, rq, 0); + err = i915_vma_move_to_active(vma, rq, 0); + if (err) + goto unpin_hws; + if (!i915_gem_object_has_active_reference(vma->obj)) { i915_gem_object_get(vma->obj); i915_gem_object_set_active_reference(vma->obj); } - i915_vma_move_to_active(hws, rq, 0); + err = i915_vma_move_to_active(hws, rq, 0); + if (err) + goto unpin_hws; + if (!i915_gem_object_has_active_reference(hws->obj)) { i915_gem_object_get(hws->obj); i915_gem_object_set_active_reference(hws->obj); @@ -134,6 +140,7 @@ static int emit_recurse_batch(struct spinner *spin, err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0); +unpin_hws: i915_vma_unpin(hws); unpin_vma: i915_vma_unpin(vma); diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c index 4a9dc01a364a..fafdec3fe83e 100644 --- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c +++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c @@ -49,6 +49,10 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine) goto err_pin; } + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + if (err) + goto err_req; + srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; if (INTEL_GEN(ctx->i915) >= 8) srm++; @@ -67,8 +71,6 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine) } intel_ring_advance(rq, cs); - i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); - i915_gem_object_get(result); i915_gem_object_set_active_reference(result); -- cgit From ec625fb932bb057e2d3c2ed28eee56a827385ab8 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 9 Jul 2018 13:20:42 +0100 Subject: drm/i915: Provide a timeout to i915_gem_wait_for_idle() Usually we have no idea about the upper bound we need to wait to catch up with userspace when idling the device, but in a few situations we know the system was idle beforehand and can provide a short timeout in order to very quickly catch a failure, long before hangcheck kicks in. In the following patches, we will use the timeout to curtain two overly long waits, where we know we can expect the GPU to complete within a reasonable time or declare it broken. In particular, with a broken GPU we expect it to fail during the initial GPU setup where do a couple of context switches to record the defaults. This is a task that takes a few milliseconds even on the slowest of devices, but we may have to wait 60s for hangcheck to give in and declare the machine inoperable. In this a case where any gpu hang is unacceptable, both from a timeliness and practical standpoint. The other improvement is that in selftests, we do not need to arm an independent timer to inject a wedge, as we can just limit the timeout on the wait directly. v2: Include the timeout parameter in the trace. Signed-off-by: Chris Wilson Cc: Joonas Lahtinen Cc: Mika Kuoppala Cc: Tvrtko Ursulin Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20180709122044.7028-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_debugfs.c | 6 ++- drivers/gpu/drm/i915/i915_drv.h | 2 +- drivers/gpu/drm/i915/i915_gem.c | 48 ++++++++++++++--------- drivers/gpu/drm/i915/i915_gem_evict.c | 3 +- drivers/gpu/drm/i915/i915_gem_gtt.c | 2 +- drivers/gpu/drm/i915/i915_gem_shrinker.c | 11 ++++-- drivers/gpu/drm/i915/i915_perf.c | 4 +- drivers/gpu/drm/i915/i915_request.c | 6 ++- drivers/gpu/drm/i915/selftests/i915_gem_context.c | 4 +- drivers/gpu/drm/i915/selftests/i915_request.c | 4 +- drivers/gpu/drm/i915/selftests/igt_flush_test.c | 2 +- 11 files changed, 59 insertions(+), 33 deletions(-) (limited to 'drivers/gpu/drm/i915/selftests/i915_request.c') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 544e5e7f011f..099f97ef2303 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -4105,7 +4105,8 @@ fault_irq_set(struct drm_i915_private *i915, err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED | - I915_WAIT_INTERRUPTIBLE); + I915_WAIT_INTERRUPTIBLE, + MAX_SCHEDULE_TIMEOUT); if (err) goto err_unlock; @@ -4210,7 +4211,8 @@ i915_drop_caches_set(void *data, u64 val) if (val & DROP_ACTIVE) ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE | - I915_WAIT_LOCKED); + I915_WAIT_LOCKED, + MAX_SCHEDULE_TIMEOUT); if (val & DROP_RETIRE) i915_retire_requests(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index c79008177708..fcb8f49a9b8a 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -3157,7 +3157,7 @@ void i915_gem_init_swizzling(struct drm_i915_private *dev_priv); void i915_gem_fini(struct drm_i915_private *dev_priv); void i915_gem_cleanup_engines(struct drm_i915_private *dev_priv); int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv, - unsigned int flags); + unsigned int flags, long timeout); int __must_check i915_gem_suspend(struct drm_i915_private *dev_priv); void i915_gem_suspend_late(struct drm_i915_private *dev_priv); void i915_gem_resume(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 1a9dab302433..91d705a67d38 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2267,7 +2267,9 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj) /* Attempt to reap some mmap space from dead objects */ do { - err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE); + err = i915_gem_wait_for_idle(dev_priv, + I915_WAIT_INTERRUPTIBLE, + MAX_SCHEDULE_TIMEOUT); if (err) break; @@ -3742,14 +3744,14 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) return ret; } -static int wait_for_timeline(struct i915_timeline *tl, unsigned int flags) +static long wait_for_timeline(struct i915_timeline *tl, + unsigned int flags, long timeout) { struct i915_request *rq; - long ret; rq = i915_gem_active_get_unlocked(&tl->last_request); if (!rq) - return 0; + return timeout; /* * "Race-to-idle". @@ -3763,10 +3765,10 @@ static int wait_for_timeline(struct i915_timeline *tl, unsigned int flags) if (flags & I915_WAIT_FOR_IDLE_BOOST) gen6_rps_boost(rq, NULL); - ret = i915_request_wait(rq, flags, MAX_SCHEDULE_TIMEOUT); + timeout = i915_request_wait(rq, flags, timeout); i915_request_put(rq); - return ret < 0 ? ret : 0; + return timeout; } static int wait_for_engines(struct drm_i915_private *i915) @@ -3782,10 +3784,12 @@ static int wait_for_engines(struct drm_i915_private *i915) return 0; } -int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags) +int i915_gem_wait_for_idle(struct drm_i915_private *i915, + unsigned int flags, long timeout) { - GEM_TRACE("flags=%x (%s)\n", - flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked"); + GEM_TRACE("flags=%x (%s), timeout=%ld%s\n", + flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked", + timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : ""); /* If the device is asleep, we have no requests outstanding */ if (!READ_ONCE(i915->gt.awake)) @@ -3798,9 +3802,9 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags) lockdep_assert_held(&i915->drm.struct_mutex); list_for_each_entry(tl, &i915->gt.timelines, link) { - err = wait_for_timeline(tl, flags); - if (err) - return err; + timeout = wait_for_timeline(tl, flags, timeout); + if (timeout < 0) + return timeout; } err = wait_for_engines(i915); @@ -3812,12 +3816,13 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags) } else { struct intel_engine_cs *engine; enum intel_engine_id id; - int err; for_each_engine(engine, i915, id) { - err = wait_for_timeline(&engine->timeline, flags); - if (err) - return err; + struct i915_timeline *tl = &engine->timeline; + + timeout = wait_for_timeline(tl, flags, timeout); + if (timeout < 0) + return timeout; } } @@ -5052,7 +5057,8 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv) ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED | - I915_WAIT_FOR_IDLE_BOOST); + I915_WAIT_FOR_IDLE_BOOST, + MAX_SCHEDULE_TIMEOUT); if (ret && ret != -EIO) goto err_unlock; @@ -5356,7 +5362,9 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915) if (err) goto err_active; - err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED); + err = i915_gem_wait_for_idle(i915, + I915_WAIT_LOCKED, + MAX_SCHEDULE_TIMEOUT); if (err) goto err_active; @@ -5421,7 +5429,9 @@ err_active: if (WARN_ON(i915_gem_switch_to_kernel_context(i915))) goto out_ctx; - if (WARN_ON(i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED))) + if (WARN_ON(i915_gem_wait_for_idle(i915, + I915_WAIT_LOCKED, + MAX_SCHEDULE_TIMEOUT))) goto out_ctx; i915_gem_contexts_lost(i915); diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 54814a196ee4..02b83a5ed96c 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -69,7 +69,8 @@ static int ggtt_flush(struct drm_i915_private *i915) err = i915_gem_wait_for_idle(i915, I915_WAIT_INTERRUPTIBLE | - I915_WAIT_LOCKED); + I915_WAIT_LOCKED, + MAX_SCHEDULE_TIMEOUT); if (err) return err; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 4db31aaaa9d3..210baf3c8d11 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2793,7 +2793,7 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj, struct i915_ggtt *ggtt = &dev_priv->ggtt; if (unlikely(ggtt->do_idle_maps)) { - if (i915_gem_wait_for_idle(dev_priv, 0)) { + if (i915_gem_wait_for_idle(dev_priv, 0, MAX_SCHEDULE_TIMEOUT)) { DRM_ERROR("Failed to wait for idle; VT'd may hang.\n"); /* Wait a bit, in hopes it avoids the hang */ udelay(10); diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c index 55e84e71f526..c61f5b80fee3 100644 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c @@ -172,7 +172,9 @@ i915_gem_shrink(struct drm_i915_private *i915, * we will free as much as we can and hope to get a second chance. */ if (flags & I915_SHRINK_ACTIVE) - i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED); + i915_gem_wait_for_idle(i915, + I915_WAIT_LOCKED, + MAX_SCHEDULE_TIMEOUT); trace_i915_gem_shrink(i915, target, flags); i915_retire_requests(i915); @@ -392,7 +394,8 @@ shrinker_lock_uninterruptible(struct drm_i915_private *i915, bool *unlock, unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms); do { - if (i915_gem_wait_for_idle(i915, 0) == 0 && + if (i915_gem_wait_for_idle(i915, + 0, MAX_SCHEDULE_TIMEOUT) == 0 && shrinker_lock(i915, unlock)) break; @@ -466,7 +469,9 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr return NOTIFY_DONE; /* Force everything onto the inactive lists */ - ret = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED); + ret = i915_gem_wait_for_idle(i915, + I915_WAIT_LOCKED, + MAX_SCHEDULE_TIMEOUT); if (ret) goto out; diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 447407fee3b8..6bf10952c724 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -1836,7 +1836,9 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv, * So far the best way to work around this issue seems to be draining * the GPU from any submitted work. */ - ret = i915_gem_wait_for_idle(dev_priv, wait_flags); + ret = i915_gem_wait_for_idle(dev_priv, + wait_flags, + MAX_SCHEDULE_TIMEOUT); if (ret) goto out; diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 3248369dbcfb..5c2c93cbab12 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -206,7 +206,8 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno) /* Carefully retire all requests without writing to the rings */ ret = i915_gem_wait_for_idle(i915, I915_WAIT_INTERRUPTIBLE | - I915_WAIT_LOCKED); + I915_WAIT_LOCKED, + MAX_SCHEDULE_TIMEOUT); if (ret) return ret; @@ -735,7 +736,8 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) /* Ratelimit ourselves to prevent oom from malicious clients */ ret = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED | - I915_WAIT_INTERRUPTIBLE); + I915_WAIT_INTERRUPTIBLE, + MAX_SCHEDULE_TIMEOUT); if (ret) goto err_unreserve; diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c index 5fbe15f4effd..ab2590242033 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c @@ -478,7 +478,9 @@ static int __igt_switch_to_kernel_context(struct drm_i915_private *i915, } } - err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED); + err = i915_gem_wait_for_idle(i915, + I915_WAIT_LOCKED, + MAX_SCHEDULE_TIMEOUT); if (err) return err; diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 43995fc3534d..c4aac6141e04 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -286,7 +286,9 @@ static int begin_live_test(struct live_test *t, t->func = func; t->name = name; - err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED); + err = i915_gem_wait_for_idle(i915, + I915_WAIT_LOCKED, + MAX_SCHEDULE_TIMEOUT); if (err) { pr_err("%s(%s): failed to idle before, with err=%d!", func, name, err); diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c index 0d06f559243f..09ab037ce803 100644 --- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c +++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c @@ -64,7 +64,7 @@ int igt_flush_test(struct drm_i915_private *i915, unsigned int flags) } wedge_on_timeout(&w, i915, HZ) - i915_gem_wait_for_idle(i915, flags); + i915_gem_wait_for_idle(i915, flags, MAX_SCHEDULE_TIMEOUT); return i915_terminally_wedged(&i915->gpu_error) ? -EIO : 0; } -- cgit