From 2386b492ded48bb4731ab4321715986bb29a0454 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 19 Mar 2020 09:19:38 +0000 Subject: drm/i915: Prefer '%ps' for printing function symbol names %pS includes the offset, which is useful for return addresses but noise when we are pretty printing a known (and expected) function entry point. Signed-off-by: Chris Wilson Reviewed-by: Tvrtko ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20200319091943.7815-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/selftests/i915_request.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/i915/selftests/i915_request.c') diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index f89d9c42f1fa..7ac9616de9d8 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -1233,7 +1233,7 @@ static int live_parallel_engines(void *arg) struct igt_live_test t; unsigned int idx; - snprintf(name, sizeof(name), "%pS", fn); + snprintf(name, sizeof(name), "%ps", fn); err = igt_live_test_begin(&t, i915, __func__, name); if (err) break; -- cgit From 73c8bfb7feed2831ab685faafa0a77d90ca6db07 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 25 Mar 2020 23:48:03 +0000 Subject: drm/i915: Drop final few uses of drm_i915_private.engine We've migrated all the heavy users over to the intel_gt, and can finally drop the last few users and with that the mirror in dev_priv->engine[]. Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Cc: Andi Shyti Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20200325234803.6175-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/display/intel_overlay.c | 2 +- drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c | 2 +- drivers/gpu/drm/i915/gt/intel_engine_cs.c | 4 ---- drivers/gpu/drm/i915/i915_drv.h | 1 - drivers/gpu/drm/i915/i915_irq.c | 8 ++++---- drivers/gpu/drm/i915/selftests/i915_request.c | 14 ++++++++++---- drivers/gpu/drm/i915/selftests/mock_gem_device.c | 6 +++--- 7 files changed, 19 insertions(+), 18 deletions(-) (limited to 'drivers/gpu/drm/i915/selftests/i915_request.c') diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c index 481187223101..6e1d66323223 100644 --- a/drivers/gpu/drm/i915/display/intel_overlay.c +++ b/drivers/gpu/drm/i915/display/intel_overlay.c @@ -1342,7 +1342,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv) if (!HAS_OVERLAY(dev_priv)) return; - engine = dev_priv->engine[RCS0]; + engine = dev_priv->gt.engine[RCS0]; if (!engine || !engine->kernel_context) return; diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index 54b86cf7f5d2..f4f933240b39 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -1925,7 +1925,7 @@ static int mock_context_barrier(void *arg) goto out; } - rq = igt_request_alloc(ctx, i915->engine[RCS0]); + rq = igt_request_alloc(ctx, i915->gt.engine[RCS0]); if (IS_ERR(rq)) { pr_err("Request allocation failed!\n"); goto out; diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 3aa8a652c16d..dff0bbe9e1a6 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -347,8 +347,6 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id) gt->engine_class[info->class][info->instance] = engine; gt->engine[id] = engine; - i915->engine[id] = engine; - return 0; } @@ -425,8 +423,6 @@ void intel_engines_release(struct intel_gt *gt) engine->release = NULL; memset(&engine->reset, 0, sizeof(engine->reset)); - - gt->i915->engine[id] = NULL; } } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 9bcea97c39de..b09a1c929c94 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -884,7 +884,6 @@ struct drm_i915_private { struct pci_dev *bridge_dev; - struct intel_engine_cs *engine[I915_NUM_ENGINES]; struct rb_root uabi_engines; struct resource mch_res; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index ecf07b0faad2..adb21112df78 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -3648,7 +3648,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir); if (iir & I915_USER_INTERRUPT) - intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]); + intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]); if (iir & I915_MASTER_ERROR_INTERRUPT) i8xx_error_irq_handler(dev_priv, eir, eir_stuck); @@ -3753,7 +3753,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) I915_WRITE(GEN2_IIR, iir); if (iir & I915_USER_INTERRUPT) - intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]); + intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]); if (iir & I915_MASTER_ERROR_INTERRUPT) i9xx_error_irq_handler(dev_priv, eir, eir_stuck); @@ -3895,10 +3895,10 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) I915_WRITE(GEN2_IIR, iir); if (iir & I915_USER_INTERRUPT) - intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]); + intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]); if (iir & I915_BSD_USER_INTERRUPT) - intel_engine_signal_breadcrumbs(dev_priv->engine[VCS0]); + intel_engine_signal_breadcrumbs(dev_priv->gt.engine[VCS0]); if (iir & I915_MASTER_ERROR_INTERRUPT) i9xx_error_irq_handler(dev_priv, eir, eir_stuck); diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 7ac9616de9d8..1dab0360f76a 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -28,6 +28,7 @@ #include "gem/selftests/mock_context.h" #include "gt/intel_engine_pm.h" +#include "gt/intel_engine_user.h" #include "gt/intel_gt.h" #include "i915_random.h" @@ -51,6 +52,11 @@ static unsigned int num_uabi_engines(struct drm_i915_private *i915) return count; } +static struct intel_engine_cs *rcs0(struct drm_i915_private *i915) +{ + return intel_engine_lookup_user(i915, I915_ENGINE_CLASS_RENDER, 0); +} + static int igt_add_request(void *arg) { struct drm_i915_private *i915 = arg; @@ -58,7 +64,7 @@ static int igt_add_request(void *arg) /* Basic preliminary test to create a request and let it loose! */ - request = mock_request(i915->engine[RCS0]->kernel_context, HZ / 10); + request = mock_request(rcs0(i915)->kernel_context, HZ / 10); if (!request) return -ENOMEM; @@ -76,7 +82,7 @@ static int igt_wait_request(void *arg) /* Submit a request, then wait upon it */ - request = mock_request(i915->engine[RCS0]->kernel_context, T); + request = mock_request(rcs0(i915)->kernel_context, T); if (!request) return -ENOMEM; @@ -145,7 +151,7 @@ static int igt_fence_wait(void *arg) /* Submit a request, treat it as a fence and wait upon it */ - request = mock_request(i915->engine[RCS0]->kernel_context, T); + request = mock_request(rcs0(i915)->kernel_context, T); if (!request) return -ENOMEM; @@ -420,7 +426,7 @@ static int mock_breadcrumbs_smoketest(void *arg) { struct drm_i915_private *i915 = arg; struct smoketest t = { - .engine = i915->engine[RCS0], + .engine = rcs0(i915), .ncontexts = 1024, .max_batch = 1024, .request_alloc = __mock_request_alloc diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 754d0eb6beaa..47fde54150f4 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -178,11 +178,11 @@ struct drm_i915_private *mock_gem_device(void) mkwrite_device_info(i915)->engine_mask = BIT(0); - i915->engine[RCS0] = mock_engine(i915, "mock", RCS0); - if (!i915->engine[RCS0]) + i915->gt.engine[RCS0] = mock_engine(i915, "mock", RCS0); + if (!i915->gt.engine[RCS0]) goto err_unlock; - if (mock_engine_init(i915->engine[RCS0])) + if (mock_engine_init(i915->gt.engine[RCS0])) goto err_context; __clear_bit(I915_WEDGED, &i915->gt.reset.flags); -- cgit From cbfd3a0c5a558bff441d39535ed5cafc862ce38a Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 22 Apr 2020 08:42:03 +0100 Subject: drm/i915/selftests: Add request throughput measurement to perf Under ideal circumstances, the driver should be able to keep the GPU fully saturated with work. Measure how close to ideal we get under the harshest of conditions with no user payload. v2: Also measure throughput using only one thread. Signed-off-by: Chris Wilson Reviewed-by: Andi Shyti Link: https://patchwork.freedesktop.org/patch/msgid/20200422074203.9799-1-chris@chris-wilson.co.uk --- .../gpu/drm/i915/selftests/i915_perf_selftests.h | 1 + drivers/gpu/drm/i915/selftests/i915_request.c | 580 ++++++++++++++++++++- 2 files changed, 580 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/i915/selftests/i915_request.c') diff --git a/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h b/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h index 3bf7f53e9924..d8da142985eb 100644 --- a/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h +++ b/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h @@ -16,5 +16,6 @@ * Tests are executed in order by igt/i915_selftest */ selftest(engine_cs, intel_engine_cs_perf_selftests) +selftest(request, i915_request_perf_selftests) selftest(blt, i915_gem_object_blt_perf_selftests) selftest(region, intel_memory_region_perf_selftests) diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 1dab0360f76a..3b319c0953cb 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -23,6 +23,7 @@ */ #include +#include #include "gem/i915_gem_pm.h" #include "gem/selftests/mock_context.h" @@ -1239,7 +1240,7 @@ static int live_parallel_engines(void *arg) struct igt_live_test t; unsigned int idx; - snprintf(name, sizeof(name), "%ps", fn); + snprintf(name, sizeof(name), "%ps", *fn); err = igt_live_test_begin(&t, i915, __func__, name); if (err) break; @@ -1476,3 +1477,580 @@ int i915_request_live_selftests(struct drm_i915_private *i915) return i915_subtests(tests, i915); } + +static int switch_to_kernel_sync(struct intel_context *ce, int err) +{ + struct i915_request *rq; + struct dma_fence *fence; + + rq = intel_engine_create_kernel_request(ce->engine); + if (IS_ERR(rq)) + return PTR_ERR(rq); + + fence = i915_active_fence_get(&ce->timeline->last_request); + if (fence) { + i915_request_await_dma_fence(rq, fence); + dma_fence_put(fence); + } + + rq = i915_request_get(rq); + i915_request_add(rq); + if (i915_request_wait(rq, 0, HZ / 2) < 0 && !err) + err = -ETIME; + i915_request_put(rq); + + while (!err && !intel_engine_is_idle(ce->engine)) + intel_engine_flush_submission(ce->engine); + + return err; +} + +struct perf_stats { + struct intel_engine_cs *engine; + unsigned long count; + ktime_t time; + ktime_t busy; + u64 runtime; +}; + +struct perf_series { + struct drm_i915_private *i915; + unsigned int nengines; + struct intel_context *ce[]; +}; + +static int s_sync0(void *arg) +{ + struct perf_series *ps = arg; + IGT_TIMEOUT(end_time); + unsigned int idx = 0; + int err = 0; + + GEM_BUG_ON(!ps->nengines); + do { + struct i915_request *rq; + + rq = i915_request_create(ps->ce[idx]); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + break; + } + + i915_request_get(rq); + i915_request_add(rq); + + if (i915_request_wait(rq, 0, HZ / 5) < 0) + err = -ETIME; + i915_request_put(rq); + if (err) + break; + + if (++idx == ps->nengines) + idx = 0; + } while (!__igt_timeout(end_time, NULL)); + + return err; +} + +static int s_sync1(void *arg) +{ + struct perf_series *ps = arg; + struct i915_request *prev = NULL; + IGT_TIMEOUT(end_time); + unsigned int idx = 0; + int err = 0; + + GEM_BUG_ON(!ps->nengines); + do { + struct i915_request *rq; + + rq = i915_request_create(ps->ce[idx]); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + break; + } + + i915_request_get(rq); + i915_request_add(rq); + + if (prev && i915_request_wait(prev, 0, HZ / 5) < 0) + err = -ETIME; + i915_request_put(prev); + prev = rq; + if (err) + break; + + if (++idx == ps->nengines) + idx = 0; + } while (!__igt_timeout(end_time, NULL)); + i915_request_put(prev); + + return err; +} + +static int s_many(void *arg) +{ + struct perf_series *ps = arg; + IGT_TIMEOUT(end_time); + unsigned int idx = 0; + + GEM_BUG_ON(!ps->nengines); + do { + struct i915_request *rq; + + rq = i915_request_create(ps->ce[idx]); + if (IS_ERR(rq)) + return PTR_ERR(rq); + + i915_request_add(rq); + + if (++idx == ps->nengines) + idx = 0; + } while (!__igt_timeout(end_time, NULL)); + + return 0; +} + +static int perf_series_engines(void *arg) +{ + struct drm_i915_private *i915 = arg; + static int (* const func[])(void *arg) = { + s_sync0, + s_sync1, + s_many, + NULL, + }; + const unsigned int nengines = num_uabi_engines(i915); + struct intel_engine_cs *engine; + int (* const *fn)(void *arg); + struct pm_qos_request qos; + struct perf_stats *stats; + struct perf_series *ps; + unsigned int idx; + int err = 0; + + stats = kcalloc(nengines, sizeof(*stats), GFP_KERNEL); + if (!stats) + return -ENOMEM; + + ps = kzalloc(struct_size(ps, ce, nengines), GFP_KERNEL); + if (!ps) { + kfree(stats); + return -ENOMEM; + } + + cpu_latency_qos_add_request(&qos, 0); /* disable cstates */ + + ps->i915 = i915; + ps->nengines = nengines; + + idx = 0; + for_each_uabi_engine(engine, i915) { + struct intel_context *ce; + + ce = intel_context_create(engine); + if (IS_ERR(ce)) + goto out; + + err = intel_context_pin(ce); + if (err) { + intel_context_put(ce); + goto out; + } + + ps->ce[idx++] = ce; + } + GEM_BUG_ON(idx != ps->nengines); + + for (fn = func; *fn && !err; fn++) { + char name[KSYM_NAME_LEN]; + struct igt_live_test t; + + snprintf(name, sizeof(name), "%ps", *fn); + err = igt_live_test_begin(&t, i915, __func__, name); + if (err) + break; + + for (idx = 0; idx < nengines; idx++) { + struct perf_stats *p = + memset(&stats[idx], 0, sizeof(stats[idx])); + struct intel_context *ce = ps->ce[idx]; + + p->engine = ps->ce[idx]->engine; + intel_engine_pm_get(p->engine); + + if (intel_engine_supports_stats(p->engine) && + !intel_enable_engine_stats(p->engine)) + p->busy = intel_engine_get_busy_time(p->engine) + 1; + p->runtime = -intel_context_get_total_runtime_ns(ce); + p->time = ktime_get(); + } + + err = (*fn)(ps); + if (igt_live_test_end(&t)) + err = -EIO; + + for (idx = 0; idx < nengines; idx++) { + struct perf_stats *p = &stats[idx]; + struct intel_context *ce = ps->ce[idx]; + int integer, decimal; + u64 busy, dt; + + p->time = ktime_sub(ktime_get(), p->time); + if (p->busy) { + p->busy = ktime_sub(intel_engine_get_busy_time(p->engine), + p->busy - 1); + intel_disable_engine_stats(p->engine); + } + + err = switch_to_kernel_sync(ce, err); + p->runtime += intel_context_get_total_runtime_ns(ce); + intel_engine_pm_put(p->engine); + + busy = 100 * ktime_to_ns(p->busy); + dt = ktime_to_ns(p->time); + if (dt) { + integer = div64_u64(busy, dt); + busy -= integer * dt; + decimal = div64_u64(100 * busy, dt); + } else { + integer = 0; + decimal = 0; + } + + pr_info("%s %5s: { seqno:%d, busy:%d.%02d%%, runtime:%lldms, walltime:%lldms }\n", + name, p->engine->name, ce->timeline->seqno, + integer, decimal, + div_u64(p->runtime, 1000 * 1000), + div_u64(ktime_to_ns(p->time), 1000 * 1000)); + } + } + +out: + for (idx = 0; idx < nengines; idx++) { + if (IS_ERR_OR_NULL(ps->ce[idx])) + break; + + intel_context_unpin(ps->ce[idx]); + intel_context_put(ps->ce[idx]); + } + kfree(ps); + + cpu_latency_qos_remove_request(&qos); + kfree(stats); + return err; +} + +static int p_sync0(void *arg) +{ + struct perf_stats *p = arg; + struct intel_engine_cs *engine = p->engine; + struct intel_context *ce; + IGT_TIMEOUT(end_time); + unsigned long count; + bool busy; + int err = 0; + + ce = intel_context_create(engine); + if (IS_ERR(ce)) + return PTR_ERR(ce); + + err = intel_context_pin(ce); + if (err) { + intel_context_put(ce); + return err; + } + + busy = false; + if (intel_engine_supports_stats(engine) && + !intel_enable_engine_stats(engine)) { + p->busy = intel_engine_get_busy_time(engine); + busy = true; + } + + p->time = ktime_get(); + count = 0; + do { + struct i915_request *rq; + + rq = i915_request_create(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + break; + } + + i915_request_get(rq); + i915_request_add(rq); + + err = 0; + if (i915_request_wait(rq, 0, HZ / 5) < 0) + err = -ETIME; + i915_request_put(rq); + if (err) + break; + + count++; + } while (!__igt_timeout(end_time, NULL)); + p->time = ktime_sub(ktime_get(), p->time); + + if (busy) { + p->busy = ktime_sub(intel_engine_get_busy_time(engine), + p->busy); + intel_disable_engine_stats(engine); + } + + err = switch_to_kernel_sync(ce, err); + p->runtime = intel_context_get_total_runtime_ns(ce); + p->count = count; + + intel_context_unpin(ce); + intel_context_put(ce); + return err; +} + +static int p_sync1(void *arg) +{ + struct perf_stats *p = arg; + struct intel_engine_cs *engine = p->engine; + struct i915_request *prev = NULL; + struct intel_context *ce; + IGT_TIMEOUT(end_time); + unsigned long count; + bool busy; + int err = 0; + + ce = intel_context_create(engine); + if (IS_ERR(ce)) + return PTR_ERR(ce); + + err = intel_context_pin(ce); + if (err) { + intel_context_put(ce); + return err; + } + + busy = false; + if (intel_engine_supports_stats(engine) && + !intel_enable_engine_stats(engine)) { + p->busy = intel_engine_get_busy_time(engine); + busy = true; + } + + p->time = ktime_get(); + count = 0; + do { + struct i915_request *rq; + + rq = i915_request_create(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + break; + } + + i915_request_get(rq); + i915_request_add(rq); + + err = 0; + if (prev && i915_request_wait(prev, 0, HZ / 5) < 0) + err = -ETIME; + i915_request_put(prev); + prev = rq; + if (err) + break; + + count++; + } while (!__igt_timeout(end_time, NULL)); + i915_request_put(prev); + p->time = ktime_sub(ktime_get(), p->time); + + if (busy) { + p->busy = ktime_sub(intel_engine_get_busy_time(engine), + p->busy); + intel_disable_engine_stats(engine); + } + + err = switch_to_kernel_sync(ce, err); + p->runtime = intel_context_get_total_runtime_ns(ce); + p->count = count; + + intel_context_unpin(ce); + intel_context_put(ce); + return err; +} + +static int p_many(void *arg) +{ + struct perf_stats *p = arg; + struct intel_engine_cs *engine = p->engine; + struct intel_context *ce; + IGT_TIMEOUT(end_time); + unsigned long count; + int err = 0; + bool busy; + + ce = intel_context_create(engine); + if (IS_ERR(ce)) + return PTR_ERR(ce); + + err = intel_context_pin(ce); + if (err) { + intel_context_put(ce); + return err; + } + + busy = false; + if (intel_engine_supports_stats(engine) && + !intel_enable_engine_stats(engine)) { + p->busy = intel_engine_get_busy_time(engine); + busy = true; + } + + count = 0; + p->time = ktime_get(); + do { + struct i915_request *rq; + + rq = i915_request_create(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + break; + } + + i915_request_add(rq); + count++; + } while (!__igt_timeout(end_time, NULL)); + p->time = ktime_sub(ktime_get(), p->time); + + if (busy) { + p->busy = ktime_sub(intel_engine_get_busy_time(engine), + p->busy); + intel_disable_engine_stats(engine); + } + + err = switch_to_kernel_sync(ce, err); + p->runtime = intel_context_get_total_runtime_ns(ce); + p->count = count; + + intel_context_unpin(ce); + intel_context_put(ce); + return err; +} + +static int perf_parallel_engines(void *arg) +{ + struct drm_i915_private *i915 = arg; + static int (* const func[])(void *arg) = { + p_sync0, + p_sync1, + p_many, + NULL, + }; + const unsigned int nengines = num_uabi_engines(i915); + struct intel_engine_cs *engine; + int (* const *fn)(void *arg); + struct pm_qos_request qos; + struct { + struct perf_stats p; + struct task_struct *tsk; + } *engines; + int err = 0; + + engines = kcalloc(nengines, sizeof(*engines), GFP_KERNEL); + if (!engines) + return -ENOMEM; + + cpu_latency_qos_add_request(&qos, 0); + + for (fn = func; *fn; fn++) { + char name[KSYM_NAME_LEN]; + struct igt_live_test t; + unsigned int idx; + + snprintf(name, sizeof(name), "%ps", *fn); + err = igt_live_test_begin(&t, i915, __func__, name); + if (err) + break; + + atomic_set(&i915->selftest.counter, nengines); + + idx = 0; + for_each_uabi_engine(engine, i915) { + intel_engine_pm_get(engine); + + memset(&engines[idx].p, 0, sizeof(engines[idx].p)); + engines[idx].p.engine = engine; + + engines[idx].tsk = kthread_run(*fn, &engines[idx].p, + "igt:%s", engine->name); + if (IS_ERR(engines[idx].tsk)) { + err = PTR_ERR(engines[idx].tsk); + intel_engine_pm_put(engine); + break; + } + get_task_struct(engines[idx++].tsk); + } + + yield(); /* start all threads before we kthread_stop() */ + + idx = 0; + for_each_uabi_engine(engine, i915) { + int status; + + if (IS_ERR(engines[idx].tsk)) + break; + + status = kthread_stop(engines[idx].tsk); + if (status && !err) + err = status; + + intel_engine_pm_put(engine); + put_task_struct(engines[idx++].tsk); + } + + if (igt_live_test_end(&t)) + err = -EIO; + if (err) + break; + + idx = 0; + for_each_uabi_engine(engine, i915) { + struct perf_stats *p = &engines[idx].p; + u64 busy = 100 * ktime_to_ns(p->busy); + u64 dt = ktime_to_ns(p->time); + int integer, decimal; + + if (dt) { + integer = div64_u64(busy, dt); + busy -= integer * dt; + decimal = div64_u64(100 * busy, dt); + } else { + integer = 0; + decimal = 0; + } + + GEM_BUG_ON(engine != p->engine); + pr_info("%s %5s: { count:%lu, busy:%d.%02d%%, runtime:%lldms, walltime:%lldms }\n", + name, engine->name, p->count, integer, decimal, + div_u64(p->runtime, 1000 * 1000), + div_u64(ktime_to_ns(p->time), 1000 * 1000)); + idx++; + } + } + + cpu_latency_qos_remove_request(&qos); + kfree(engines); + return err; +} + +int i915_request_perf_selftests(struct drm_i915_private *i915) +{ + static const struct i915_subtest tests[] = { + SUBTEST(perf_series_engines), + SUBTEST(perf_parallel_engines), + }; + + if (intel_gt_is_wedged(&i915->gt)) + return 0; + + return i915_subtests(tests, i915); +} -- cgit From 426d0073fb6d1a9513978cea4c9e8396f3721fba Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 29 Apr 2020 21:54:41 +0100 Subject: drm/i915/gt: Always enable busy-stats for execlists In the near future, we will utilize the busy-stats on each engine to approximate the C0 cycles of each, and use that as an input to a manual RPS mechanism. That entails having busy-stats always enabled and so we can remove the enable/disable routines and simplify the pmu setup. As a consequence of always having the stats enabled, we can also show the current active time via sysfs/engine/xcs/active_time_ns. Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20200429205446.3259-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_engine.h | 3 -- drivers/gpu/drm/i915/gt/intel_engine_cs.c | 76 +-------------------------- drivers/gpu/drm/i915/gt/intel_engine_types.h | 29 +++++----- drivers/gpu/drm/i915/gt/intel_lrc.c | 44 ++++------------ drivers/gpu/drm/i915/i915_pmu.c | 32 +---------- drivers/gpu/drm/i915/selftests/i915_request.c | 16 ++---- 6 files changed, 29 insertions(+), 171 deletions(-) (limited to 'drivers/gpu/drm/i915/selftests/i915_request.c') diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h index d9ee64e2ef79..d10e52ff059f 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine.h +++ b/drivers/gpu/drm/i915/gt/intel_engine.h @@ -310,9 +310,6 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m, const char *header, ...); -int intel_enable_engine_stats(struct intel_engine_cs *engine); -void intel_disable_engine_stats(struct intel_engine_cs *engine); - ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine); struct i915_request * diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 80bf71636c0f..c9e46c5ced43 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -1589,58 +1589,6 @@ void intel_engine_dump(struct intel_engine_cs *engine, intel_engine_print_breadcrumbs(engine, m); } -/** - * intel_enable_engine_stats() - Enable engine busy tracking on engine - * @engine: engine to enable stats collection - * - * Start collecting the engine busyness data for @engine. - * - * Returns 0 on success or a negative error code. - */ -int intel_enable_engine_stats(struct intel_engine_cs *engine) -{ - struct intel_engine_execlists *execlists = &engine->execlists; - unsigned long flags; - int err = 0; - - if (!intel_engine_supports_stats(engine)) - return -ENODEV; - - execlists_active_lock_bh(execlists); - write_seqlock_irqsave(&engine->stats.lock, flags); - - if (unlikely(engine->stats.enabled == ~0)) { - err = -EBUSY; - goto unlock; - } - - if (engine->stats.enabled++ == 0) { - struct i915_request * const *port; - struct i915_request *rq; - - engine->stats.enabled_at = ktime_get(); - - /* XXX submission method oblivious? */ - for (port = execlists->active; (rq = *port); port++) - engine->stats.active++; - - for (port = execlists->pending; (rq = *port); port++) { - /* Exclude any contexts already counted in active */ - if (!intel_context_inflight_count(rq->context)) - engine->stats.active++; - } - - if (engine->stats.active) - engine->stats.start = engine->stats.enabled_at; - } - -unlock: - write_sequnlock_irqrestore(&engine->stats.lock, flags); - execlists_active_unlock_bh(execlists); - - return err; -} - static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine) { ktime_t total = engine->stats.total; @@ -1649,7 +1597,7 @@ static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine) * If the engine is executing something at the moment * add it to the total. */ - if (engine->stats.active) + if (atomic_read(&engine->stats.active)) total = ktime_add(total, ktime_sub(ktime_get(), engine->stats.start)); @@ -1675,28 +1623,6 @@ ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine) return total; } -/** - * intel_disable_engine_stats() - Disable engine busy tracking on engine - * @engine: engine to disable stats collection - * - * Stops collecting the engine busyness data for @engine. - */ -void intel_disable_engine_stats(struct intel_engine_cs *engine) -{ - unsigned long flags; - - if (!intel_engine_supports_stats(engine)) - return; - - write_seqlock_irqsave(&engine->stats.lock, flags); - WARN_ON_ONCE(engine->stats.enabled == 0); - if (--engine->stats.enabled == 0) { - engine->stats.total = __intel_engine_get_busy_time(engine); - engine->stats.active = 0; - } - write_sequnlock_irqrestore(&engine->stats.lock, flags); -} - static bool match_ring(struct i915_request *rq) { u32 ring = ENGINE_READ(rq->engine, RING_START); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index 483d8ff39a0d..83b1f95ebf12 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -531,28 +531,16 @@ struct intel_engine_cs { u32 (*get_cmd_length_mask)(u32 cmd_header); struct { - /** - * @lock: Lock protecting the below fields. - */ - seqlock_t lock; - /** - * @enabled: Reference count indicating number of listeners. - */ - unsigned int enabled; /** * @active: Number of contexts currently scheduled in. */ - unsigned int active; - /** - * @enabled_at: Timestamp when busy stats were enabled. - */ - ktime_t enabled_at; + atomic_t active; + /** - * @start: Timestamp of the last idle to active transition. - * - * Idle is defined as active == 0, active is active > 0. + * @lock: Lock protecting the below fields. */ - ktime_t start; + seqlock_t lock; + /** * @total: Total time this engine was busy. * @@ -560,6 +548,13 @@ struct intel_engine_cs { * where engine is currently busy (active > 0). */ ktime_t total; + + /** + * @start: Timestamp of the last idle to active transition. + * + * Idle is defined as active == 0, active is active > 0. + */ + ktime_t start; } stats; struct { diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 7fc4081c34fe..4311b12542fb 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -1199,17 +1199,14 @@ static void intel_engine_context_in(struct intel_engine_cs *engine) { unsigned long flags; - if (READ_ONCE(engine->stats.enabled) == 0) + if (atomic_add_unless(&engine->stats.active, 1, 0)) return; write_seqlock_irqsave(&engine->stats.lock, flags); - - if (engine->stats.enabled > 0) { - if (engine->stats.active++ == 0) - engine->stats.start = ktime_get(); - GEM_BUG_ON(engine->stats.active == 0); + if (!atomic_add_unless(&engine->stats.active, 1, 0)) { + engine->stats.start = ktime_get(); + atomic_inc(&engine->stats.active); } - write_sequnlock_irqrestore(&engine->stats.lock, flags); } @@ -1217,36 +1214,17 @@ static void intel_engine_context_out(struct intel_engine_cs *engine) { unsigned long flags; - if (READ_ONCE(engine->stats.enabled) == 0) + GEM_BUG_ON(!atomic_read(&engine->stats.active)); + + if (atomic_add_unless(&engine->stats.active, -1, 1)) return; write_seqlock_irqsave(&engine->stats.lock, flags); - - if (engine->stats.enabled > 0) { - ktime_t last; - - if (engine->stats.active && --engine->stats.active == 0) { - /* - * Decrement the active context count and in case GPU - * is now idle add up to the running total. - */ - last = ktime_sub(ktime_get(), engine->stats.start); - - engine->stats.total = ktime_add(engine->stats.total, - last); - } else if (engine->stats.active == 0) { - /* - * After turning on engine stats, context out might be - * the first event in which case we account from the - * time stats gathering was turned on. - */ - last = ktime_sub(ktime_get(), engine->stats.enabled_at); - - engine->stats.total = ktime_add(engine->stats.total, - last); - } + if (atomic_dec_and_test(&engine->stats.active)) { + engine->stats.total = + ktime_add(engine->stats.total, + ktime_sub(ktime_get(), engine->stats.start)); } - write_sequnlock_irqrestore(&engine->stats.lock, flags); } diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index 230e9256ab30..83c6a8ccd2cb 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -439,29 +439,9 @@ static u64 count_interrupts(struct drm_i915_private *i915) return sum; } -static void engine_event_destroy(struct perf_event *event) -{ - struct drm_i915_private *i915 = - container_of(event->pmu, typeof(*i915), pmu.base); - struct intel_engine_cs *engine; - - engine = intel_engine_lookup_user(i915, - engine_event_class(event), - engine_event_instance(event)); - if (drm_WARN_ON_ONCE(&i915->drm, !engine)) - return; - - if (engine_event_sample(event) == I915_SAMPLE_BUSY && - intel_engine_supports_stats(engine)) - intel_disable_engine_stats(engine); -} - static void i915_pmu_event_destroy(struct perf_event *event) { WARN_ON(event->parent); - - if (is_engine_event(event)) - engine_event_destroy(event); } static int @@ -514,23 +494,13 @@ static int engine_event_init(struct perf_event *event) struct drm_i915_private *i915 = container_of(event->pmu, typeof(*i915), pmu.base); struct intel_engine_cs *engine; - u8 sample; - int ret; engine = intel_engine_lookup_user(i915, engine_event_class(event), engine_event_instance(event)); if (!engine) return -ENODEV; - sample = engine_event_sample(event); - ret = engine_event_status(engine, sample); - if (ret) - return ret; - - if (sample == I915_SAMPLE_BUSY && intel_engine_supports_stats(engine)) - ret = intel_enable_engine_stats(engine); - - return ret; + return engine_event_status(engine, engine_event_sample(event)); } static int i915_pmu_event_init(struct perf_event *event) diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 3b319c0953cb..15b1ca9f7a01 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -1679,8 +1679,7 @@ static int perf_series_engines(void *arg) p->engine = ps->ce[idx]->engine; intel_engine_pm_get(p->engine); - if (intel_engine_supports_stats(p->engine) && - !intel_enable_engine_stats(p->engine)) + if (intel_engine_supports_stats(p->engine)) p->busy = intel_engine_get_busy_time(p->engine) + 1; p->runtime = -intel_context_get_total_runtime_ns(ce); p->time = ktime_get(); @@ -1700,7 +1699,6 @@ static int perf_series_engines(void *arg) if (p->busy) { p->busy = ktime_sub(intel_engine_get_busy_time(p->engine), p->busy - 1); - intel_disable_engine_stats(p->engine); } err = switch_to_kernel_sync(ce, err); @@ -1762,8 +1760,7 @@ static int p_sync0(void *arg) } busy = false; - if (intel_engine_supports_stats(engine) && - !intel_enable_engine_stats(engine)) { + if (intel_engine_supports_stats(engine)) { p->busy = intel_engine_get_busy_time(engine); busy = true; } @@ -1796,7 +1793,6 @@ static int p_sync0(void *arg) if (busy) { p->busy = ktime_sub(intel_engine_get_busy_time(engine), p->busy); - intel_disable_engine_stats(engine); } err = switch_to_kernel_sync(ce, err); @@ -1830,8 +1826,7 @@ static int p_sync1(void *arg) } busy = false; - if (intel_engine_supports_stats(engine) && - !intel_enable_engine_stats(engine)) { + if (intel_engine_supports_stats(engine)) { p->busy = intel_engine_get_busy_time(engine); busy = true; } @@ -1866,7 +1861,6 @@ static int p_sync1(void *arg) if (busy) { p->busy = ktime_sub(intel_engine_get_busy_time(engine), p->busy); - intel_disable_engine_stats(engine); } err = switch_to_kernel_sync(ce, err); @@ -1899,8 +1893,7 @@ static int p_many(void *arg) } busy = false; - if (intel_engine_supports_stats(engine) && - !intel_enable_engine_stats(engine)) { + if (intel_engine_supports_stats(engine)) { p->busy = intel_engine_get_busy_time(engine); busy = true; } @@ -1924,7 +1917,6 @@ static int p_many(void *arg) if (busy) { p->busy = ktime_sub(intel_engine_get_busy_time(engine), p->busy); - intel_disable_engine_stats(engine); } err = switch_to_kernel_sync(ce, err); -- cgit From b0a997ae5248b293b6f6d1996ea49c57f7b94227 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 10 May 2020 11:24:29 +0100 Subject: drm/i915: Emit await(batch) before MI_BB_START Be consistent and ensure that we always emit the asynchronous waits prior to issuing instructions that use the address. This ensures that if we do emit GPU commands to do the await, they are before our use! Reviewed-by: Mika Kuoppala Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20200510102431.21959-1-chris@chris-wilson.co.uk --- .../gpu/drm/i915/gem/selftests/i915_gem_context.c | 49 ++++++++++++++-------- drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c | 25 ++++------- drivers/gpu/drm/i915/gt/intel_renderstate.c | 16 +++---- drivers/gpu/drm/i915/selftests/i915_request.c | 28 ++++++------- 4 files changed, 64 insertions(+), 54 deletions(-) (limited to 'drivers/gpu/drm/i915/selftests/i915_request.c') diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index 87d264fe54b2..b81978890641 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -972,12 +972,6 @@ emit_rpcs_query(struct drm_i915_gem_object *obj, goto err_batch; } - err = rq->engine->emit_bb_start(rq, - batch->node.start, batch->node.size, - 0); - if (err) - goto err_request; - i915_vma_lock(batch); err = i915_request_await_object(rq, batch->obj, false); if (err == 0) @@ -994,6 +988,18 @@ emit_rpcs_query(struct drm_i915_gem_object *obj, if (err) goto skip_request; + if (rq->engine->emit_init_breadcrumb) { + err = rq->engine->emit_init_breadcrumb(rq); + if (err) + goto skip_request; + } + + err = rq->engine->emit_bb_start(rq, + batch->node.start, batch->node.size, + 0); + if (err) + goto skip_request; + i915_vma_unpin_and_release(&batch, 0); i915_vma_unpin(vma); @@ -1005,7 +1011,6 @@ emit_rpcs_query(struct drm_i915_gem_object *obj, skip_request: i915_request_set_error_once(rq, err); -err_request: i915_request_add(rq); err_batch: i915_vma_unpin_and_release(&batch, 0); @@ -1541,10 +1546,6 @@ static int write_to_scratch(struct i915_gem_context *ctx, goto err_unpin; } - err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0); - if (err) - goto err_request; - i915_vma_lock(vma); err = i915_request_await_object(rq, vma->obj, false); if (err == 0) @@ -1553,6 +1554,16 @@ static int write_to_scratch(struct i915_gem_context *ctx, if (err) goto skip_request; + if (rq->engine->emit_init_breadcrumb) { + err = rq->engine->emit_init_breadcrumb(rq); + if (err) + goto skip_request; + } + + err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0); + if (err) + goto skip_request; + i915_vma_unpin(vma); i915_request_add(rq); @@ -1560,7 +1571,6 @@ static int write_to_scratch(struct i915_gem_context *ctx, goto out_vm; skip_request: i915_request_set_error_once(rq, err); -err_request: i915_request_add(rq); err_unpin: i915_vma_unpin(vma); @@ -1674,10 +1684,6 @@ static int read_from_scratch(struct i915_gem_context *ctx, goto err_unpin; } - err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, flags); - if (err) - goto err_request; - i915_vma_lock(vma); err = i915_request_await_object(rq, vma->obj, true); if (err == 0) @@ -1686,6 +1692,16 @@ static int read_from_scratch(struct i915_gem_context *ctx, if (err) goto skip_request; + if (rq->engine->emit_init_breadcrumb) { + err = rq->engine->emit_init_breadcrumb(rq); + if (err) + goto skip_request; + } + + err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, flags); + if (err) + goto skip_request; + i915_vma_unpin(vma); i915_request_add(rq); @@ -1708,7 +1724,6 @@ static int read_from_scratch(struct i915_gem_context *ctx, goto out_vm; skip_request: i915_request_set_error_once(rq, err); -err_request: i915_request_add(rq); err_unpin: i915_vma_unpin(vma); diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c index 772d8cba7da9..8b3925b4036f 100644 --- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c +++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c @@ -83,6 +83,7 @@ igt_emit_store_dw(struct i915_vma *vma, offset += PAGE_SIZE; } *cmd = MI_BATCH_BUFFER_END; + i915_gem_object_unpin_map(obj); intel_gt_chipset_flush(vma->vm->gt); @@ -126,16 +127,6 @@ int igt_gpu_fill_dw(struct intel_context *ce, goto err_batch; } - flags = 0; - if (INTEL_GEN(ce->vm->i915) <= 5) - flags |= I915_DISPATCH_SECURE; - - err = rq->engine->emit_bb_start(rq, - batch->node.start, batch->node.size, - flags); - if (err) - goto err_request; - i915_vma_lock(batch); err = i915_request_await_object(rq, batch->obj, false); if (err == 0) @@ -152,15 +143,17 @@ int igt_gpu_fill_dw(struct intel_context *ce, if (err) goto skip_request; - i915_request_add(rq); - - i915_vma_unpin_and_release(&batch, 0); + flags = 0; + if (INTEL_GEN(ce->vm->i915) <= 5) + flags |= I915_DISPATCH_SECURE; - return 0; + err = rq->engine->emit_bb_start(rq, + batch->node.start, batch->node.size, + flags); skip_request: - i915_request_set_error_once(rq, err); -err_request: + if (err) + i915_request_set_error_once(rq, err); i915_request_add(rq); err_batch: i915_vma_unpin_and_release(&batch, 0); diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.c b/drivers/gpu/drm/i915/gt/intel_renderstate.c index 708cb7808865..f59e7875cc5e 100644 --- a/drivers/gpu/drm/i915/gt/intel_renderstate.c +++ b/drivers/gpu/drm/i915/gt/intel_renderstate.c @@ -219,6 +219,14 @@ int intel_renderstate_emit(struct intel_renderstate *so, if (!so->vma) return 0; + i915_vma_lock(so->vma); + err = i915_request_await_object(rq, so->vma->obj, false); + if (err == 0) + err = i915_vma_move_to_active(so->vma, rq, 0); + i915_vma_unlock(so->vma); + if (err) + return err; + err = engine->emit_bb_start(rq, so->batch_offset, so->batch_size, I915_DISPATCH_SECURE); @@ -233,13 +241,7 @@ int intel_renderstate_emit(struct intel_renderstate *so, return err; } - i915_vma_lock(so->vma); - err = i915_request_await_object(rq, so->vma->obj, false); - if (err == 0) - err = i915_vma_move_to_active(so->vma, rq, 0); - i915_vma_unlock(so->vma); - - return err; + return 0; } void intel_renderstate_fini(struct intel_renderstate *so) diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 15b1ca9f7a01..ffdfcb3805b5 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -865,13 +865,6 @@ static int live_all_engines(void *arg) goto out_request; } - err = engine->emit_bb_start(request[idx], - batch->node.start, - batch->node.size, - 0); - GEM_BUG_ON(err); - request[idx]->batch = batch; - i915_vma_lock(batch); err = i915_request_await_object(request[idx], batch->obj, 0); if (err == 0) @@ -879,6 +872,13 @@ static int live_all_engines(void *arg) i915_vma_unlock(batch); GEM_BUG_ON(err); + err = engine->emit_bb_start(request[idx], + batch->node.start, + batch->node.size, + 0); + GEM_BUG_ON(err); + request[idx]->batch = batch; + i915_request_get(request[idx]); i915_request_add(request[idx]); idx++; @@ -993,13 +993,6 @@ static int live_sequential_engines(void *arg) } } - err = engine->emit_bb_start(request[idx], - batch->node.start, - batch->node.size, - 0); - GEM_BUG_ON(err); - request[idx]->batch = batch; - i915_vma_lock(batch); err = i915_request_await_object(request[idx], batch->obj, false); @@ -1008,6 +1001,13 @@ static int live_sequential_engines(void *arg) i915_vma_unlock(batch); GEM_BUG_ON(err); + err = engine->emit_bb_start(request[idx], + batch->node.start, + batch->node.size, + 0); + GEM_BUG_ON(err); + request[idx]->batch = batch; + i915_request_get(request[idx]); i915_request_add(request[idx]); -- cgit From 9bad40a27dac1f88012a1e2db0bfc5ae58fa0370 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 11 May 2020 15:13:03 +0100 Subject: drm/i915/selftests: Always flush before unpining after writing Be consistent, and even when we know we had used a WC, flush the mapped object after writing into it. The flush understands the mapping type and will only clflush if !I915_MAP_WC, but will always insert a wmb [sfence] so that we can be sure that all writes are visible. v2: Add the unconditional wmb so we are know that we always flush the writes to memory/HW at that point. Signed-off-by: Chris Wilson Cc: Mika Kuoppala Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20200511141304.599-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_object_blt.c | 8 ++++++-- drivers/gpu/drm/i915/gem/i915_gem_pages.c | 1 + drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c | 2 ++ drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c | 1 + drivers/gpu/drm/i915/gt/selftest_ring_submission.c | 2 ++ drivers/gpu/drm/i915/gt/selftest_rps.c | 2 ++ drivers/gpu/drm/i915/selftests/i915_request.c | 9 +++++++-- 7 files changed, 21 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/i915/selftests/i915_request.c') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c index 2fc7737ef5f4..f457d7130491 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c @@ -78,10 +78,12 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce, } while (rem); *cmd = MI_BATCH_BUFFER_END; - intel_gt_chipset_flush(ce->vm->gt); + i915_gem_object_flush_map(pool->obj); i915_gem_object_unpin_map(pool->obj); + intel_gt_chipset_flush(ce->vm->gt); + batch = i915_vma_instance(pool->obj, ce->vm, NULL); if (IS_ERR(batch)) { err = PTR_ERR(batch); @@ -289,10 +291,12 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce, } while (rem); *cmd = MI_BATCH_BUFFER_END; - intel_gt_chipset_flush(ce->vm->gt); + i915_gem_object_flush_map(pool->obj); i915_gem_object_unpin_map(pool->obj); + intel_gt_chipset_flush(ce->vm->gt); + batch = i915_vma_instance(pool->obj, ce->vm, NULL); if (IS_ERR(batch)) { err = PTR_ERR(batch); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index 5d855fcd5c0f..af9e48ee4a33 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -391,6 +391,7 @@ void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj, GEM_BUG_ON(range_overflows_t(typeof(obj->base.size), offset, size, obj->base.size)); + wmb(); /* let all previous writes be visible to coherent partners */ obj->mm.dirty = true; if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE) diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c index 3f6079e1dfb6..87d7d8aa080f 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c @@ -158,6 +158,8 @@ static int wc_set(struct context *ctx, unsigned long offset, u32 v) return PTR_ERR(map); map[offset / sizeof(*map)] = v; + + __i915_gem_object_flush_map(ctx->obj, offset, sizeof(*map)); i915_gem_object_unpin_map(ctx->obj); return 0; diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c index 8b3925b4036f..e21b5023ca7d 100644 --- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c +++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c @@ -84,6 +84,7 @@ igt_emit_store_dw(struct i915_vma *vma, } *cmd = MI_BATCH_BUFFER_END; + i915_gem_object_flush_map(obj); i915_gem_object_unpin_map(obj); intel_gt_chipset_flush(vma->vm->gt); diff --git a/drivers/gpu/drm/i915/gt/selftest_ring_submission.c b/drivers/gpu/drm/i915/gt/selftest_ring_submission.c index 9995faadd7e8..3350e7c995bc 100644 --- a/drivers/gpu/drm/i915/gt/selftest_ring_submission.c +++ b/drivers/gpu/drm/i915/gt/selftest_ring_submission.c @@ -54,6 +54,8 @@ static struct i915_vma *create_wally(struct intel_engine_cs *engine) *cs++ = STACK_MAGIC; *cs++ = MI_BATCH_BUFFER_END; + + i915_gem_object_flush_map(obj); i915_gem_object_unpin_map(obj); vma->private = intel_context_create(engine); /* dummy residuals */ diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c index bfa1a15564f7..6275d69aa9cc 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rps.c +++ b/drivers/gpu/drm/i915/gt/selftest_rps.c @@ -727,6 +727,7 @@ int live_rps_frequency_cs(void *arg) err_vma: *cancel = MI_BATCH_BUFFER_END; + i915_gem_object_flush_map(vma->obj); i915_gem_object_unpin_map(vma->obj); i915_vma_unpin(vma); i915_vma_put(vma); @@ -868,6 +869,7 @@ int live_rps_frequency_srm(void *arg) err_vma: *cancel = MI_BATCH_BUFFER_END; + i915_gem_object_flush_map(vma->obj); i915_gem_object_unpin_map(vma->obj); i915_vma_unpin(vma); i915_vma_put(vma); diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index ffdfcb3805b5..6014e8dfcbb1 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -816,10 +816,12 @@ static int recursive_batch_resolve(struct i915_vma *batch) return PTR_ERR(cmd); *cmd = MI_BATCH_BUFFER_END; - intel_gt_chipset_flush(batch->vm->gt); + __i915_gem_object_flush_map(batch->obj, 0, sizeof(*cmd)); i915_gem_object_unpin_map(batch->obj); + intel_gt_chipset_flush(batch->vm->gt); + return 0; } @@ -1060,9 +1062,12 @@ out_request: I915_MAP_WC); if (!IS_ERR(cmd)) { *cmd = MI_BATCH_BUFFER_END; - intel_gt_chipset_flush(engine->gt); + __i915_gem_object_flush_map(request[idx]->batch->obj, + 0, sizeof(*cmd)); i915_gem_object_unpin_map(request[idx]->batch->obj); + + intel_gt_chipset_flush(engine->gt); } i915_vma_put(request[idx]->batch); -- cgit