aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/selftests/i915_request.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/selftests/i915_request.c')
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c236
1 files changed, 219 insertions, 17 deletions
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 64bbb8288249..db367a6721c5 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -33,6 +33,7 @@
#include "gt/intel_engine_pm.h"
#include "gt/intel_engine_user.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_clock_utils.h"
#include "gt/intel_gt_requests.h"
#include "gt/selftest_engine_heartbeat.h"
@@ -608,6 +609,206 @@ static int live_nop_request(void *arg)
return err;
}
+static int __cancel_inactive(struct intel_engine_cs *engine)
+{
+ struct intel_context *ce;
+ struct igt_spinner spin;
+ struct i915_request *rq;
+ int err = 0;
+
+ if (igt_spinner_init(&spin, engine->gt))
+ return -ENOMEM;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out_spin;
+ }
+
+ rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_ce;
+ }
+
+ pr_debug("%s: Cancelling inactive request\n", engine->name);
+ i915_request_cancel(rq, -EINTR);
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ struct drm_printer p = drm_info_printer(engine->i915->drm.dev);
+
+ pr_err("%s: Failed to cancel inactive request\n", engine->name);
+ intel_engine_dump(engine, &p, "%s\n", engine->name);
+ err = -ETIME;
+ goto out_rq;
+ }
+
+ if (rq->fence.error != -EINTR) {
+ pr_err("%s: fence not cancelled (%u)\n",
+ engine->name, rq->fence.error);
+ err = -EINVAL;
+ }
+
+out_rq:
+ i915_request_put(rq);
+out_ce:
+ intel_context_put(ce);
+out_spin:
+ igt_spinner_fini(&spin);
+ if (err)
+ pr_err("%s: %s error %d\n", __func__, engine->name, err);
+ return err;
+}
+
+static int __cancel_active(struct intel_engine_cs *engine)
+{
+ struct intel_context *ce;
+ struct igt_spinner spin;
+ struct i915_request *rq;
+ int err = 0;
+
+ if (igt_spinner_init(&spin, engine->gt))
+ return -ENOMEM;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out_spin;
+ }
+
+ rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_ce;
+ }
+
+ pr_debug("%s: Cancelling active request\n", engine->name);
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ struct drm_printer p = drm_info_printer(engine->i915->drm.dev);
+
+ pr_err("Failed to start spinner on %s\n", engine->name);
+ intel_engine_dump(engine, &p, "%s\n", engine->name);
+ err = -ETIME;
+ goto out_rq;
+ }
+ i915_request_cancel(rq, -EINTR);
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ struct drm_printer p = drm_info_printer(engine->i915->drm.dev);
+
+ pr_err("%s: Failed to cancel active request\n", engine->name);
+ intel_engine_dump(engine, &p, "%s\n", engine->name);
+ err = -ETIME;
+ goto out_rq;
+ }
+
+ if (rq->fence.error != -EINTR) {
+ pr_err("%s: fence not cancelled (%u)\n",
+ engine->name, rq->fence.error);
+ err = -EINVAL;
+ }
+
+out_rq:
+ i915_request_put(rq);
+out_ce:
+ intel_context_put(ce);
+out_spin:
+ igt_spinner_fini(&spin);
+ if (err)
+ pr_err("%s: %s error %d\n", __func__, engine->name, err);
+ return err;
+}
+
+static int __cancel_completed(struct intel_engine_cs *engine)
+{
+ struct intel_context *ce;
+ struct igt_spinner spin;
+ struct i915_request *rq;
+ int err = 0;
+
+ if (igt_spinner_init(&spin, engine->gt))
+ return -ENOMEM;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out_spin;
+ }
+
+ rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_ce;
+ }
+ igt_spinner_end(&spin);
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ err = -ETIME;
+ goto out_rq;
+ }
+
+ pr_debug("%s: Cancelling completed request\n", engine->name);
+ i915_request_cancel(rq, -EINTR);
+ if (rq->fence.error) {
+ pr_err("%s: fence not cancelled (%u)\n",
+ engine->name, rq->fence.error);
+ err = -EINVAL;
+ }
+
+out_rq:
+ i915_request_put(rq);
+out_ce:
+ intel_context_put(ce);
+out_spin:
+ igt_spinner_fini(&spin);
+ if (err)
+ pr_err("%s: %s error %d\n", __func__, engine->name, err);
+ return err;
+}
+
+static int live_cancel_request(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+
+ /*
+ * Check cancellation of requests. We expect to be able to immediately
+ * cancel active requests, even if they are currently on the GPU.
+ */
+
+ for_each_uabi_engine(engine, i915) {
+ struct igt_live_test t;
+ int err, err2;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ err = igt_live_test_begin(&t, i915, __func__, engine->name);
+ if (err)
+ return err;
+
+ err = __cancel_inactive(engine);
+ if (err == 0)
+ err = __cancel_active(engine);
+ if (err == 0)
+ err = __cancel_completed(engine);
+
+ err2 = igt_live_test_end(&t);
+ if (err)
+ return err;
+ if (err2)
+ return err2;
+ }
+
+ return 0;
+}
+
static struct i915_vma *empty_batch(struct drm_i915_private *i915)
{
struct drm_i915_gem_object *obj;
@@ -619,7 +820,7 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915)
if (IS_ERR(obj))
return ERR_CAST(obj);
- cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ cmd = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
goto err;
@@ -762,7 +963,7 @@ out_batch:
static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
{
struct drm_i915_gem_object *obj;
- const int gen = INTEL_GEN(i915);
+ const int ver = GRAPHICS_VER(i915);
struct i915_vma *vma;
u32 *cmd;
int err;
@@ -781,17 +982,17 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
if (err)
goto err;
- cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ cmd = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
goto err;
}
- if (gen >= 8) {
+ if (ver >= 8) {
*cmd++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
*cmd++ = lower_32_bits(vma->node.start);
*cmd++ = upper_32_bits(vma->node.start);
- } else if (gen >= 6) {
+ } else if (ver >= 6) {
*cmd++ = MI_BATCH_BUFFER_START | 1 << 8;
*cmd++ = lower_32_bits(vma->node.start);
} else {
@@ -816,7 +1017,7 @@ static int recursive_batch_resolve(struct i915_vma *batch)
{
u32 *cmd;
- cmd = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
+ cmd = i915_gem_object_pin_map_unlocked(batch->obj, I915_MAP_WC);
if (IS_ERR(cmd))
return PTR_ERR(cmd);
@@ -1069,8 +1270,8 @@ out_request:
if (!request[idx])
break;
- cmd = i915_gem_object_pin_map(request[idx]->batch->obj,
- I915_MAP_WC);
+ cmd = i915_gem_object_pin_map_unlocked(request[idx]->batch->obj,
+ I915_MAP_WC);
if (!IS_ERR(cmd)) {
*cmd = MI_BATCH_BUFFER_END;
@@ -1485,6 +1686,7 @@ int i915_request_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_sequential_engines),
SUBTEST(live_parallel_engines),
SUBTEST(live_empty_request),
+ SUBTEST(live_cancel_request),
SUBTEST(live_breadcrumbs_smoketest),
};
@@ -1560,7 +1762,7 @@ static u32 trifilter(u32 *a)
static u64 cycles_to_ns(struct intel_engine_cs *engine, u32 cycles)
{
- u64 ns = i915_cs_timestamp_ticks_to_ns(engine->i915, cycles);
+ u64 ns = intel_gt_clock_interval_to_ns(engine->gt, cycles);
return DIV_ROUND_CLOSEST(ns, 1 << TF_BIAS);
}
@@ -1932,9 +2134,7 @@ static int measure_inter_request(struct intel_context *ce)
intel_ring_advance(rq, cs);
i915_request_add(rq);
}
- local_bh_disable();
i915_sw_fence_commit(submit);
- local_bh_enable();
intel_engine_flush_submission(ce->engine);
heap_fence_put(submit);
@@ -2220,11 +2420,9 @@ static int measure_completion(struct intel_context *ce)
intel_ring_advance(rq, cs);
dma_fence_add_callback(&rq->fence, &cb.base, signal_cb);
-
- local_bh_disable();
i915_request_add(rq);
- local_bh_enable();
+ intel_engine_flush_submission(ce->engine);
if (wait_for(READ_ONCE(sema[i]) == -1, 50)) {
err = -EIO;
goto err;
@@ -2284,7 +2482,7 @@ static int perf_request_latency(void *arg)
struct pm_qos_request qos;
int err = 0;
- if (INTEL_GEN(i915) < 8) /* per-engine CS timestamp, semaphores */
+ if (GRAPHICS_VER(i915) < 8) /* per-engine CS timestamp, semaphores */
return 0;
cpu_latency_qos_add_request(&qos, 0); /* disable cstates */
@@ -2293,8 +2491,10 @@ static int perf_request_latency(void *arg)
struct intel_context *ce;
ce = intel_context_create(engine);
- if (IS_ERR(ce))
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
goto out;
+ }
err = intel_context_pin(ce);
if (err) {
@@ -2467,8 +2667,10 @@ static int perf_series_engines(void *arg)
struct intel_context *ce;
ce = intel_context_create(engine);
- if (IS_ERR(ce))
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
goto out;
+ }
err = intel_context_pin(ce);
if (err) {