From 4e0d64dba816adf18c17488d38ede67a3d0e9b40 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 17 May 2018 22:26:30 +0100 Subject: drm/i915: Move request->ctx aside In the next patch, we want to store the intel_context pointer inside i915_request, as it is frequently access via a convoluted dance when submitting the request to hw. Having two context pointers inside i915_request leads to confusion so first rename the existing i915_gem_context pointer to i915_request.gem_context. Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Cc: Joonas Lahtinen Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20180517212633.24934-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_request.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/i915/i915_request.h') diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index eddbd4245cb3..dddecd9ffd0c 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -93,7 +93,7 @@ struct i915_request { * i915_request_free() will then decrement the refcount on the * context. */ - struct i915_gem_context *ctx; + struct i915_gem_context *gem_context; struct intel_engine_cs *engine; struct intel_ring *ring; struct i915_timeline *timeline; -- cgit From 1fc44d9b1afb0afe46acd99bdfdf793805a850e1 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 17 May 2018 22:26:32 +0100 Subject: drm/i915: Store a pointer to intel_context in i915_request To ease the frequent and ugly pointer dance of &request->gem_context->engine[request->engine->id] during request submission, store that pointer as request->hw_context. One major advantage that we will exploit later is that this decouples the logical context state from the engine itself. v2: Set mock_context->ops so we don't crash and burn in selftests. Cleanups from Tvrtko. Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Acked-by: Zhenyu Wang Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20180517212633.24934-3-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gvt/mmio_context.c | 6 +- drivers/gpu/drm/i915/gvt/mmio_context.h | 2 +- drivers/gpu/drm/i915/gvt/scheduler.c | 141 ++++++++++---------------- drivers/gpu/drm/i915/gvt/scheduler.h | 1 - drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/i915_gem.c | 12 +-- drivers/gpu/drm/i915/i915_gem_context.c | 17 ++-- drivers/gpu/drm/i915/i915_gem_context.h | 21 ++-- drivers/gpu/drm/i915/i915_gpu_error.c | 3 +- drivers/gpu/drm/i915/i915_perf.c | 25 ++--- drivers/gpu/drm/i915/i915_request.c | 34 +++---- drivers/gpu/drm/i915/i915_request.h | 1 + drivers/gpu/drm/i915/intel_engine_cs.c | 54 ++++++---- drivers/gpu/drm/i915/intel_guc_submission.c | 10 +- drivers/gpu/drm/i915/intel_lrc.c | 125 +++++++++++++---------- drivers/gpu/drm/i915/intel_lrc.h | 7 -- drivers/gpu/drm/i915/intel_ringbuffer.c | 100 +++++++++++------- drivers/gpu/drm/i915/intel_ringbuffer.h | 9 +- drivers/gpu/drm/i915/selftests/mock_context.c | 7 ++ drivers/gpu/drm/i915/selftests/mock_engine.c | 41 +++++--- 20 files changed, 321 insertions(+), 296 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_request.h') diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index 0f949554d118..708170e61625 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c @@ -446,9 +446,9 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next, #define CTX_CONTEXT_CONTROL_VAL 0x03 -bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id) +bool is_inhibit_context(struct intel_context *ce) { - u32 *reg_state = ctx->__engine[ring_id].lrc_reg_state; + const u32 *reg_state = ce->lrc_reg_state; u32 inhibit_mask = _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); @@ -501,7 +501,7 @@ static void switch_mmio(struct intel_vgpu *pre, * itself. */ if (mmio->in_context && - !is_inhibit_context(s->shadow_ctx, ring_id)) + !is_inhibit_context(&s->shadow_ctx->__engine[ring_id])) continue; if (mmio->mask) diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.h b/drivers/gpu/drm/i915/gvt/mmio_context.h index 0439eb8057a8..5c3b9ff9f96a 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.h +++ b/drivers/gpu/drm/i915/gvt/mmio_context.h @@ -49,7 +49,7 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre, void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt); -bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id); +bool is_inhibit_context(struct intel_context *ce); int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu, struct i915_request *req); diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 17f9f8d7e148..e1760030dda1 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -54,11 +54,8 @@ static void set_context_pdp_root_pointer( static void update_shadow_pdps(struct intel_vgpu_workload *workload) { - struct intel_vgpu *vgpu = workload->vgpu; - int ring_id = workload->ring_id; - struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx; struct drm_i915_gem_object *ctx_obj = - shadow_ctx->__engine[ring_id].state->obj; + workload->req->hw_context->state->obj; struct execlist_ring_context *shadow_ring_context; struct page *page; @@ -128,9 +125,8 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) struct intel_vgpu *vgpu = workload->vgpu; struct intel_gvt *gvt = vgpu->gvt; int ring_id = workload->ring_id; - struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx; struct drm_i915_gem_object *ctx_obj = - shadow_ctx->__engine[ring_id].state->obj; + workload->req->hw_context->state->obj; struct execlist_ring_context *shadow_ring_context; struct page *page; void *dst; @@ -280,10 +276,8 @@ static int shadow_context_status_change(struct notifier_block *nb, return NOTIFY_OK; } -static void shadow_context_descriptor_update(struct i915_gem_context *ctx, - struct intel_engine_cs *engine) +static void shadow_context_descriptor_update(struct intel_context *ce) { - struct intel_context *ce = to_intel_context(ctx, engine); u64 desc = 0; desc = ce->lrc_desc; @@ -292,7 +286,7 @@ static void shadow_context_descriptor_update(struct i915_gem_context *ctx, * like GEN8_CTX_* cached in desc_template */ desc &= U64_MAX << 12; - desc |= ctx->desc_template & ((1ULL << 12) - 1); + desc |= ce->gem_context->desc_template & ((1ULL << 12) - 1); ce->lrc_desc = desc; } @@ -300,12 +294,11 @@ static void shadow_context_descriptor_update(struct i915_gem_context *ctx, static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; + struct i915_request *req = workload->req; void *shadow_ring_buffer_va; u32 *cs; - struct i915_request *req = workload->req; - if (IS_KABYLAKE(req->i915) && - is_inhibit_context(req->gem_context, req->engine->id)) + if (IS_KABYLAKE(req->i915) && is_inhibit_context(req->hw_context)) intel_vgpu_restore_inhibit_context(vgpu, req); /* allocate shadow ring buffer */ @@ -353,60 +346,56 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) struct intel_vgpu_submission *s = &vgpu->submission; struct i915_gem_context *shadow_ctx = s->shadow_ctx; struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; - int ring_id = workload->ring_id; - struct intel_engine_cs *engine = dev_priv->engine[ring_id]; - struct intel_ring *ring; + struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id]; + struct intel_context *ce; int ret; lockdep_assert_held(&dev_priv->drm.struct_mutex); - if (workload->shadowed) + if (workload->req) return 0; + /* pin shadow context by gvt even the shadow context will be pinned + * when i915 alloc request. That is because gvt will update the guest + * context from shadow context when workload is completed, and at that + * moment, i915 may already unpined the shadow context to make the + * shadow_ctx pages invalid. So gvt need to pin itself. After update + * the guest context, gvt can unpin the shadow_ctx safely. + */ + ce = intel_context_pin(shadow_ctx, engine); + if (IS_ERR(ce)) { + gvt_vgpu_err("fail to pin shadow context\n"); + return PTR_ERR(ce); + } + shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT); shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT; - if (!test_and_set_bit(ring_id, s->shadow_ctx_desc_updated)) - shadow_context_descriptor_update(shadow_ctx, - dev_priv->engine[ring_id]); + if (!test_and_set_bit(workload->ring_id, s->shadow_ctx_desc_updated)) + shadow_context_descriptor_update(ce); ret = intel_gvt_scan_and_shadow_ringbuffer(workload); if (ret) - goto err_scan; + goto err_unpin; if ((workload->ring_id == RCS) && (workload->wa_ctx.indirect_ctx.size != 0)) { ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx); if (ret) - goto err_scan; - } - - /* pin shadow context by gvt even the shadow context will be pinned - * when i915 alloc request. That is because gvt will update the guest - * context from shadow context when workload is completed, and at that - * moment, i915 may already unpined the shadow context to make the - * shadow_ctx pages invalid. So gvt need to pin itself. After update - * the guest context, gvt can unpin the shadow_ctx safely. - */ - ring = intel_context_pin(shadow_ctx, engine); - if (IS_ERR(ring)) { - ret = PTR_ERR(ring); - gvt_vgpu_err("fail to pin shadow context\n"); - goto err_shadow; + goto err_shadow; } ret = populate_shadow_context(workload); if (ret) - goto err_unpin; - workload->shadowed = true; + goto err_shadow; + return 0; -err_unpin: - intel_context_unpin(shadow_ctx, engine); err_shadow: release_shadow_wa_ctx(&workload->wa_ctx); -err_scan: +err_unpin: + intel_context_unpin(ce); return ret; } @@ -414,7 +403,6 @@ static int intel_gvt_generate_request(struct intel_vgpu_workload *workload) { int ring_id = workload->ring_id; struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; - struct intel_engine_cs *engine = dev_priv->engine[ring_id]; struct i915_request *rq; struct intel_vgpu *vgpu = workload->vgpu; struct intel_vgpu_submission *s = &vgpu->submission; @@ -437,7 +425,6 @@ static int intel_gvt_generate_request(struct intel_vgpu_workload *workload) return 0; err_unpin: - intel_context_unpin(shadow_ctx, engine); release_shadow_wa_ctx(&workload->wa_ctx); return ret; } @@ -517,21 +504,13 @@ err: return ret; } -static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx) +static void update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx) { - struct intel_vgpu_workload *workload = container_of(wa_ctx, - struct intel_vgpu_workload, - wa_ctx); - int ring_id = workload->ring_id; - struct intel_vgpu_submission *s = &workload->vgpu->submission; - struct i915_gem_context *shadow_ctx = s->shadow_ctx; - struct drm_i915_gem_object *ctx_obj = - shadow_ctx->__engine[ring_id].state->obj; - struct execlist_ring_context *shadow_ring_context; - struct page *page; - - page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); - shadow_ring_context = kmap_atomic(page); + struct intel_vgpu_workload *workload = + container_of(wa_ctx, struct intel_vgpu_workload, wa_ctx); + struct i915_request *rq = workload->req; + struct execlist_ring_context *shadow_ring_context = + (struct execlist_ring_context *)rq->hw_context->lrc_reg_state; shadow_ring_context->bb_per_ctx_ptr.val = (shadow_ring_context->bb_per_ctx_ptr.val & @@ -539,9 +518,6 @@ static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx) shadow_ring_context->rcs_indirect_ctx.val = (shadow_ring_context->rcs_indirect_ctx.val & (~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma; - - kunmap_atomic(shadow_ring_context); - return 0; } static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) @@ -670,12 +646,9 @@ err_unpin_mm: static int dispatch_workload(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; - struct intel_vgpu_submission *s = &vgpu->submission; - struct i915_gem_context *shadow_ctx = s->shadow_ctx; struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; int ring_id = workload->ring_id; - struct intel_engine_cs *engine = dev_priv->engine[ring_id]; - int ret = 0; + int ret; gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n", ring_id, workload); @@ -687,10 +660,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) goto out; ret = prepare_workload(workload); - if (ret) { - intel_context_unpin(shadow_ctx, engine); - goto out; - } out: if (ret) @@ -765,27 +734,23 @@ out: static void update_guest_context(struct intel_vgpu_workload *workload) { + struct i915_request *rq = workload->req; struct intel_vgpu *vgpu = workload->vgpu; struct intel_gvt *gvt = vgpu->gvt; - struct intel_vgpu_submission *s = &vgpu->submission; - struct i915_gem_context *shadow_ctx = s->shadow_ctx; - int ring_id = workload->ring_id; - struct drm_i915_gem_object *ctx_obj = - shadow_ctx->__engine[ring_id].state->obj; + struct drm_i915_gem_object *ctx_obj = rq->hw_context->state->obj; struct execlist_ring_context *shadow_ring_context; struct page *page; void *src; unsigned long context_gpa, context_page_num; int i; - gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id, - workload->ctx_desc.lrca); - - context_page_num = gvt->dev_priv->engine[ring_id]->context_size; + gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id, + workload->ctx_desc.lrca); + context_page_num = rq->engine->context_size; context_page_num = context_page_num >> PAGE_SHIFT; - if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS) + if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS) context_page_num = 19; i = 2; @@ -858,6 +823,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) scheduler->current_workload[ring_id]; struct intel_vgpu *vgpu = workload->vgpu; struct intel_vgpu_submission *s = &vgpu->submission; + struct i915_request *rq; int event; mutex_lock(&gvt->lock); @@ -866,11 +832,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) * switch to make sure request is completed. * For the workload w/o request, directly complete the workload. */ - if (workload->req) { - struct drm_i915_private *dev_priv = - workload->vgpu->gvt->dev_priv; - struct intel_engine_cs *engine = - dev_priv->engine[workload->ring_id]; + rq = fetch_and_zero(&workload->req); + if (rq) { wait_event(workload->shadow_ctx_status_wq, !atomic_read(&workload->shadow_ctx_active)); @@ -886,8 +849,6 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) workload->status = 0; } - i915_request_put(fetch_and_zero(&workload->req)); - if (!workload->status && !(vgpu->resetting_eng & ENGINE_MASK(ring_id))) { update_guest_context(workload); @@ -896,10 +857,13 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) INTEL_GVT_EVENT_MAX) intel_vgpu_trigger_virtual_event(vgpu, event); } - mutex_lock(&dev_priv->drm.struct_mutex); + /* unpin shadow ctx as the shadow_ctx update is done */ - intel_context_unpin(s->shadow_ctx, engine); - mutex_unlock(&dev_priv->drm.struct_mutex); + mutex_lock(&rq->i915->drm.struct_mutex); + intel_context_unpin(rq->hw_context); + mutex_unlock(&rq->i915->drm.struct_mutex); + + i915_request_put(rq); } gvt_dbg_sched("ring id %d complete workload %p status %d\n", @@ -1270,7 +1234,6 @@ alloc_workload(struct intel_vgpu *vgpu) atomic_set(&workload->shadow_ctx_active, 0); workload->status = -EINPROGRESS; - workload->shadowed = false; workload->vgpu = vgpu; return workload; diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h index 6c644782193e..21eddab4a9cd 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.h +++ b/drivers/gpu/drm/i915/gvt/scheduler.h @@ -83,7 +83,6 @@ struct intel_vgpu_workload { struct i915_request *req; /* if this workload has been dispatched to i915? */ bool dispatched; - bool shadowed; int status; struct intel_vgpu_mm *shadow_mm; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 34c125e2d90c..e33c380b43e3 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1950,6 +1950,7 @@ struct drm_i915_private { */ struct i915_perf_stream *exclusive_stream; + struct intel_context *pinned_ctx; u32 specific_ctx_id; struct hrtimer poll_check_timer; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index a20f8db5729d..03874b50ada9 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3181,14 +3181,14 @@ void i915_gem_reset(struct drm_i915_private *dev_priv, i915_retire_requests(dev_priv); for_each_engine(engine, dev_priv, id) { - struct i915_gem_context *ctx; + struct intel_context *ce; i915_gem_reset_engine(engine, engine->hangcheck.active_request, stalled_mask & ENGINE_MASK(id)); - ctx = fetch_and_zero(&engine->last_retired_context); - if (ctx) - intel_context_unpin(ctx, engine); + ce = fetch_and_zero(&engine->last_retired_context); + if (ce) + intel_context_unpin(ce); /* * Ostensibily, we always want a context loaded for powersaving, @@ -4897,13 +4897,13 @@ void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj) static void assert_kernel_context_is_current(struct drm_i915_private *i915) { - struct i915_gem_context *kernel_context = i915->kernel_context; + struct i915_gem_context *kctx = i915->kernel_context; struct intel_engine_cs *engine; enum intel_engine_id id; for_each_engine(engine, i915, id) { GEM_BUG_ON(__i915_gem_active_peek(&engine->timeline.last_request)); - GEM_BUG_ON(engine->last_retired_context != kernel_context); + GEM_BUG_ON(engine->last_retired_context->gem_context != kctx); } } diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 9e70f4dfa703..b69b18ef8120 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -127,14 +127,8 @@ static void i915_gem_context_free(struct i915_gem_context *ctx) for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) { struct intel_context *ce = &ctx->__engine[n]; - if (!ce->state) - continue; - - WARN_ON(ce->pin_count); - if (ce->ring) - intel_ring_free(ce->ring); - - __i915_gem_object_release_unless_active(ce->state->obj); + if (ce->ops) + ce->ops->destroy(ce); } kfree(ctx->name); @@ -266,6 +260,7 @@ __create_hw_context(struct drm_i915_private *dev_priv, struct drm_i915_file_private *file_priv) { struct i915_gem_context *ctx; + unsigned int n; int ret; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); @@ -283,6 +278,12 @@ __create_hw_context(struct drm_i915_private *dev_priv, ctx->i915 = dev_priv; ctx->sched.priority = I915_PRIORITY_NORMAL; + for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) { + struct intel_context *ce = &ctx->__engine[n]; + + ce->gem_context = ctx; + } + INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL); INIT_LIST_HEAD(&ctx->handles_list); diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h index ace3b129c189..749a4ff566f5 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.h +++ b/drivers/gpu/drm/i915/i915_gem_context.h @@ -45,6 +45,11 @@ struct intel_ring; #define DEFAULT_CONTEXT_HANDLE 0 +struct intel_context_ops { + void (*unpin)(struct intel_context *ce); + void (*destroy)(struct intel_context *ce); +}; + /** * struct i915_gem_context - client state * @@ -144,11 +149,14 @@ struct i915_gem_context { /** engine: per-engine logical HW state */ struct intel_context { + struct i915_gem_context *gem_context; struct i915_vma *state; struct intel_ring *ring; u32 *lrc_reg_state; u64 lrc_desc; int pin_count; + + const struct intel_context_ops *ops; } __engine[I915_NUM_ENGINES]; /** ring_size: size for allocating the per-engine ring buffer */ @@ -263,25 +271,22 @@ to_intel_context(struct i915_gem_context *ctx, return &ctx->__engine[engine->id]; } -static inline struct intel_ring * +static inline struct intel_context * intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine) { return engine->context_pin(engine, ctx); } -static inline void __intel_context_pin(struct i915_gem_context *ctx, - const struct intel_engine_cs *engine) +static inline void __intel_context_pin(struct intel_context *ce) { - struct intel_context *ce = to_intel_context(ctx, engine); - GEM_BUG_ON(!ce->pin_count); ce->pin_count++; } -static inline void intel_context_unpin(struct i915_gem_context *ctx, - struct intel_engine_cs *engine) +static inline void intel_context_unpin(struct intel_context *ce) { - engine->context_unpin(engine, ctx); + GEM_BUG_ON(!ce->ops); + ce->ops->unpin(ce); } /* i915_gem_context.c */ diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 37c9a42654ba..47721437a4c5 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -1485,8 +1485,7 @@ static void gem_record_rings(struct i915_gpu_state *error) ee->ctx = i915_error_object_create(i915, - to_intel_context(ctx, - engine)->state); + request->hw_context->state); error->simulated |= i915_gem_context_no_error_capture(ctx); diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 019bd2d073ad..4f0eb84b3c00 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -1221,7 +1221,7 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream) dev_priv->perf.oa.specific_ctx_id = stream->ctx->hw_id; } else { struct intel_engine_cs *engine = dev_priv->engine[RCS]; - struct intel_ring *ring; + struct intel_context *ce; int ret; ret = i915_mutex_lock_interruptible(&dev_priv->drm); @@ -1234,19 +1234,19 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream) * * NB: implied RCS engine... */ - ring = intel_context_pin(stream->ctx, engine); + ce = intel_context_pin(stream->ctx, engine); mutex_unlock(&dev_priv->drm.struct_mutex); - if (IS_ERR(ring)) - return PTR_ERR(ring); + if (IS_ERR(ce)) + return PTR_ERR(ce); + dev_priv->perf.oa.pinned_ctx = ce; /* * Explicitly track the ID (instead of calling * i915_ggtt_offset() on the fly) considering the difference * with gen8+ and execlists */ - dev_priv->perf.oa.specific_ctx_id = - i915_ggtt_offset(to_intel_context(stream->ctx, engine)->state); + dev_priv->perf.oa.specific_ctx_id = i915_ggtt_offset(ce->state); } return 0; @@ -1262,17 +1262,14 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream) static void oa_put_render_ctx_id(struct i915_perf_stream *stream) { struct drm_i915_private *dev_priv = stream->dev_priv; + struct intel_context *ce; - if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) { - dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID; - } else { - struct intel_engine_cs *engine = dev_priv->engine[RCS]; + dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID; + ce = fetch_and_zero(&dev_priv->perf.oa.pinned_ctx); + if (ce) { mutex_lock(&dev_priv->drm.struct_mutex); - - dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID; - intel_context_unpin(stream->ctx, engine); - + intel_context_unpin(ce); mutex_unlock(&dev_priv->drm.struct_mutex); } } diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index fe8810a6a339..fc499bcbd105 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -383,8 +383,8 @@ static void __retire_engine_request(struct intel_engine_cs *engine, * the subsequent request. */ if (engine->last_retired_context) - intel_context_unpin(engine->last_retired_context, engine); - engine->last_retired_context = rq->gem_context; + intel_context_unpin(engine->last_retired_context); + engine->last_retired_context = rq->hw_context; } static void __retire_engine_upto(struct intel_engine_cs *engine, @@ -456,7 +456,7 @@ static void i915_request_retire(struct i915_request *request) /* Retirement decays the ban score as it is a sign of ctx progress */ atomic_dec_if_positive(&request->gem_context->ban_score); - intel_context_unpin(request->gem_context, request->engine); + intel_context_unpin(request->hw_context); __retire_engine_upto(request->engine, request); @@ -657,7 +657,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) { struct drm_i915_private *i915 = engine->i915; struct i915_request *rq; - struct intel_ring *ring; + struct intel_context *ce; int ret; lockdep_assert_held(&i915->drm.struct_mutex); @@ -681,22 +681,21 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) * GGTT space, so do this first before we reserve a seqno for * ourselves. */ - ring = intel_context_pin(ctx, engine); - if (IS_ERR(ring)) - return ERR_CAST(ring); - GEM_BUG_ON(!ring); + ce = intel_context_pin(ctx, engine); + if (IS_ERR(ce)) + return ERR_CAST(ce); ret = reserve_gt(i915); if (ret) goto err_unpin; - ret = intel_ring_wait_for_space(ring, MIN_SPACE_FOR_ADD_REQUEST); + ret = intel_ring_wait_for_space(ce->ring, MIN_SPACE_FOR_ADD_REQUEST); if (ret) goto err_unreserve; /* Move our oldest request to the slab-cache (if not in use!) */ - rq = list_first_entry(&ring->request_list, typeof(*rq), ring_link); - if (!list_is_last(&rq->ring_link, &ring->request_list) && + rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link); + if (!list_is_last(&rq->ring_link, &ce->ring->request_list) && i915_request_completed(rq)) i915_request_retire(rq); @@ -761,8 +760,9 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) rq->i915 = i915; rq->engine = engine; rq->gem_context = ctx; - rq->ring = ring; - rq->timeline = ring->timeline; + rq->hw_context = ce; + rq->ring = ce->ring; + rq->timeline = ce->ring->timeline; GEM_BUG_ON(rq->timeline == &engine->timeline); spin_lock_init(&rq->lock); @@ -814,14 +814,14 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) goto err_unwind; /* Keep a second pin for the dual retirement along engine and ring */ - __intel_context_pin(rq->gem_context, engine); + __intel_context_pin(ce); /* Check that we didn't interrupt ourselves with a new request */ GEM_BUG_ON(rq->timeline->seqno != rq->fence.seqno); return rq; err_unwind: - rq->ring->emit = rq->head; + ce->ring->emit = rq->head; /* Make sure we didn't add ourselves to external state before freeing */ GEM_BUG_ON(!list_empty(&rq->active_list)); @@ -832,7 +832,7 @@ err_unwind: err_unreserve: unreserve_gt(i915); err_unpin: - intel_context_unpin(ctx, engine); + intel_context_unpin(ce); return ERR_PTR(ret); } @@ -1018,8 +1018,8 @@ i915_request_await_object(struct i915_request *to, void __i915_request_add(struct i915_request *request, bool flush_caches) { struct intel_engine_cs *engine = request->engine; - struct intel_ring *ring = request->ring; struct i915_timeline *timeline = request->timeline; + struct intel_ring *ring = request->ring; struct i915_request *prev; u32 *cs; int err; diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index dddecd9ffd0c..1bbbb7a9fa03 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -95,6 +95,7 @@ struct i915_request { */ struct i915_gem_context *gem_context; struct intel_engine_cs *engine; + struct intel_context *hw_context; struct intel_ring *ring; struct i915_timeline *timeline; struct intel_signal_node signaling; diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 9e618aab6568..26f9f8aab949 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -645,6 +645,12 @@ static int init_phys_status_page(struct intel_engine_cs *engine) return 0; } +static void __intel_context_unpin(struct i915_gem_context *ctx, + struct intel_engine_cs *engine) +{ + intel_context_unpin(to_intel_context(ctx, engine)); +} + /** * intel_engines_init_common - initialize cengine state which might require hw access * @engine: Engine to initialize. @@ -658,7 +664,8 @@ static int init_phys_status_page(struct intel_engine_cs *engine) */ int intel_engine_init_common(struct intel_engine_cs *engine) { - struct intel_ring *ring; + struct drm_i915_private *i915 = engine->i915; + struct intel_context *ce; int ret; engine->set_default_submission(engine); @@ -670,18 +677,18 @@ int intel_engine_init_common(struct intel_engine_cs *engine) * be available. To avoid this we always pin the default * context. */ - ring = intel_context_pin(engine->i915->kernel_context, engine); - if (IS_ERR(ring)) - return PTR_ERR(ring); + ce = intel_context_pin(i915->kernel_context, engine); + if (IS_ERR(ce)) + return PTR_ERR(ce); /* * Similarly the preempt context must always be available so that * we can interrupt the engine at any time. */ - if (engine->i915->preempt_context) { - ring = intel_context_pin(engine->i915->preempt_context, engine); - if (IS_ERR(ring)) { - ret = PTR_ERR(ring); + if (i915->preempt_context) { + ce = intel_context_pin(i915->preempt_context, engine); + if (IS_ERR(ce)) { + ret = PTR_ERR(ce); goto err_unpin_kernel; } } @@ -690,7 +697,7 @@ int intel_engine_init_common(struct intel_engine_cs *engine) if (ret) goto err_unpin_preempt; - if (HWS_NEEDS_PHYSICAL(engine->i915)) + if (HWS_NEEDS_PHYSICAL(i915)) ret = init_phys_status_page(engine); else ret = init_status_page(engine); @@ -702,10 +709,11 @@ int intel_engine_init_common(struct intel_engine_cs *engine) err_breadcrumbs: intel_engine_fini_breadcrumbs(engine); err_unpin_preempt: - if (engine->i915->preempt_context) - intel_context_unpin(engine->i915->preempt_context, engine); + if (i915->preempt_context) + __intel_context_unpin(i915->preempt_context, engine); + err_unpin_kernel: - intel_context_unpin(engine->i915->kernel_context, engine); + __intel_context_unpin(i915->kernel_context, engine); return ret; } @@ -718,6 +726,8 @@ err_unpin_kernel: */ void intel_engine_cleanup_common(struct intel_engine_cs *engine) { + struct drm_i915_private *i915 = engine->i915; + intel_engine_cleanup_scratch(engine); if (HWS_NEEDS_PHYSICAL(engine->i915)) @@ -732,9 +742,9 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine) if (engine->default_state) i915_gem_object_put(engine->default_state); - if (engine->i915->preempt_context) - intel_context_unpin(engine->i915->preempt_context, engine); - intel_context_unpin(engine->i915->kernel_context, engine); + if (i915->preempt_context) + __intel_context_unpin(i915->preempt_context, engine); + __intel_context_unpin(i915->kernel_context, engine); i915_timeline_fini(&engine->timeline); } @@ -1007,8 +1017,8 @@ bool intel_engines_are_idle(struct drm_i915_private *dev_priv) */ bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine) { - const struct i915_gem_context * const kernel_context = - engine->i915->kernel_context; + const struct intel_context *kernel_context = + to_intel_context(engine->i915->kernel_context, engine); struct i915_request *rq; lockdep_assert_held(&engine->i915->drm.struct_mutex); @@ -1020,7 +1030,7 @@ bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine) */ rq = __i915_gem_active_peek(&engine->timeline.last_request); if (rq) - return rq->gem_context == kernel_context; + return rq->hw_context == kernel_context; else return engine->last_retired_context == kernel_context; } @@ -1107,16 +1117,16 @@ void intel_engines_unpark(struct drm_i915_private *i915) */ void intel_engine_lost_context(struct intel_engine_cs *engine) { - struct i915_gem_context *ctx; + struct intel_context *ce; lockdep_assert_held(&engine->i915->drm.struct_mutex); engine->legacy_active_context = NULL; engine->legacy_active_ppgtt = NULL; - ctx = fetch_and_zero(&engine->last_retired_context); - if (ctx) - intel_context_unpin(ctx, engine); + ce = fetch_and_zero(&engine->last_retired_context); + if (ce) + intel_context_unpin(ce); } bool intel_engine_can_store_dword(struct intel_engine_cs *engine) diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c index a432a193f3c4..133367a17863 100644 --- a/drivers/gpu/drm/i915/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/intel_guc_submission.c @@ -513,9 +513,7 @@ static void guc_add_request(struct intel_guc *guc, struct i915_request *rq) { struct intel_guc_client *client = guc->execbuf_client; struct intel_engine_cs *engine = rq->engine; - u32 ctx_desc = - lower_32_bits(intel_lr_context_descriptor(rq->gem_context, - engine)); + u32 ctx_desc = lower_32_bits(rq->hw_context->lrc_desc); u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64); spin_lock(&client->wq_lock); @@ -553,8 +551,8 @@ static void inject_preempt_context(struct work_struct *work) preempt_work[engine->id]); struct intel_guc_client *client = guc->preempt_client; struct guc_stage_desc *stage_desc = __get_stage_desc(client); - u32 ctx_desc = lower_32_bits(intel_lr_context_descriptor(client->owner, - engine)); + u32 ctx_desc = lower_32_bits(to_intel_context(client->owner, + engine)->lrc_desc); u32 data[7]; /* @@ -726,7 +724,7 @@ static bool __guc_dequeue(struct intel_engine_cs *engine) struct i915_request *rq, *rn; list_for_each_entry_safe(rq, rn, &p->requests, sched.link) { - if (last && rq->gem_context != last->gem_context) { + if (last && rq->hw_context != last->hw_context) { if (port == last_port) { __list_del_many(&p->requests, &rq->sched.link); diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 1e9cc55d785c..b97c5d4c7877 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -164,7 +164,8 @@ #define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS) static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, - struct intel_engine_cs *engine); + struct intel_engine_cs *engine, + struct intel_context *ce); static void execlists_init_reg_state(u32 *reg_state, struct i915_gem_context *ctx, struct intel_engine_cs *engine, @@ -189,12 +190,7 @@ static inline bool need_preempt(const struct intel_engine_cs *engine, !i915_request_completed(last)); } -/** - * intel_lr_context_descriptor_update() - calculate & cache the descriptor - * descriptor for a pinned context - * @ctx: Context to work on - * @engine: Engine the descriptor will be used with - * +/* * The context descriptor encodes various attributes of a context, * including its GTT address and some flags. Because it's fairly * expensive to calculate, we'll just do it once and cache the result, @@ -222,9 +218,9 @@ static inline bool need_preempt(const struct intel_engine_cs *engine, */ static void intel_lr_context_descriptor_update(struct i915_gem_context *ctx, - struct intel_engine_cs *engine) + struct intel_engine_cs *engine, + struct intel_context *ce) { - struct intel_context *ce = to_intel_context(ctx, engine); u64 desc; BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (BIT(GEN8_CTX_ID_WIDTH))); @@ -418,8 +414,7 @@ execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state) static u64 execlists_update_context(struct i915_request *rq) { - struct intel_context *ce = - to_intel_context(rq->gem_context, rq->engine); + struct intel_context *ce = rq->hw_context; struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt ?: rq->i915->mm.aliasing_ppgtt; u32 *reg_state = ce->lrc_reg_state; @@ -496,14 +491,14 @@ static void execlists_submit_ports(struct intel_engine_cs *engine) execlists_clear_active(execlists, EXECLISTS_ACTIVE_HWACK); } -static bool ctx_single_port_submission(const struct i915_gem_context *ctx) +static bool ctx_single_port_submission(const struct intel_context *ce) { return (IS_ENABLED(CONFIG_DRM_I915_GVT) && - i915_gem_context_force_single_submission(ctx)); + i915_gem_context_force_single_submission(ce->gem_context)); } -static bool can_merge_ctx(const struct i915_gem_context *prev, - const struct i915_gem_context *next) +static bool can_merge_ctx(const struct intel_context *prev, + const struct intel_context *next) { if (prev != next) return false; @@ -680,8 +675,8 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine) * second request, and so we never need to tell the * hardware about the first. */ - if (last && !can_merge_ctx(rq->gem_context, - last->gem_context)) { + if (last && + !can_merge_ctx(rq->hw_context, last->hw_context)) { /* * If we are on the second port and cannot * combine this request with the last, then we @@ -700,14 +695,14 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine) * the same context (even though a different * request) to the second port. */ - if (ctx_single_port_submission(last->gem_context) || - ctx_single_port_submission(rq->gem_context)) { + if (ctx_single_port_submission(last->hw_context) || + ctx_single_port_submission(rq->hw_context)) { __list_del_many(&p->requests, &rq->sched.link); goto done; } - GEM_BUG_ON(last->gem_context == rq->gem_context); + GEM_BUG_ON(last->hw_context == rq->hw_context); if (submit) port_assign(port, last); @@ -1339,6 +1334,37 @@ static void execlists_schedule(struct i915_request *request, spin_unlock_irq(&engine->timeline.lock); } +static void execlists_context_destroy(struct intel_context *ce) +{ + GEM_BUG_ON(!ce->state); + GEM_BUG_ON(ce->pin_count); + + intel_ring_free(ce->ring); + __i915_gem_object_release_unless_active(ce->state->obj); +} + +static void __execlists_context_unpin(struct intel_context *ce) +{ + intel_ring_unpin(ce->ring); + + ce->state->obj->pin_global--; + i915_gem_object_unpin_map(ce->state->obj); + i915_vma_unpin(ce->state); + + i915_gem_context_put(ce->gem_context); +} + +static void execlists_context_unpin(struct intel_context *ce) +{ + lockdep_assert_held(&ce->gem_context->i915->drm.struct_mutex); + GEM_BUG_ON(ce->pin_count == 0); + + if (--ce->pin_count) + return; + + __execlists_context_unpin(ce); +} + static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma) { unsigned int flags; @@ -1362,21 +1388,15 @@ static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma) return i915_vma_pin(vma, 0, GEN8_LR_CONTEXT_ALIGN, flags); } -static struct intel_ring * -execlists_context_pin(struct intel_engine_cs *engine, - struct i915_gem_context *ctx) +static struct intel_context * +__execlists_context_pin(struct intel_engine_cs *engine, + struct i915_gem_context *ctx, + struct intel_context *ce) { - struct intel_context *ce = to_intel_context(ctx, engine); void *vaddr; int ret; - lockdep_assert_held(&ctx->i915->drm.struct_mutex); - - if (likely(ce->pin_count++)) - goto out; - GEM_BUG_ON(!ce->pin_count); /* no overflow please! */ - - ret = execlists_context_deferred_alloc(ctx, engine); + ret = execlists_context_deferred_alloc(ctx, engine, ce); if (ret) goto err; GEM_BUG_ON(!ce->state); @@ -1395,7 +1415,7 @@ execlists_context_pin(struct intel_engine_cs *engine, if (ret) goto unpin_map; - intel_lr_context_descriptor_update(ctx, engine); + intel_lr_context_descriptor_update(ctx, engine, ce); ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; ce->lrc_reg_state[CTX_RING_BUFFER_START+1] = @@ -1404,8 +1424,7 @@ execlists_context_pin(struct intel_engine_cs *engine, ce->state->obj->pin_global++; i915_gem_context_get(ctx); -out: - return ce->ring; + return ce; unpin_map: i915_gem_object_unpin_map(ce->state->obj); @@ -1416,33 +1435,33 @@ err: return ERR_PTR(ret); } -static void execlists_context_unpin(struct intel_engine_cs *engine, - struct i915_gem_context *ctx) +static const struct intel_context_ops execlists_context_ops = { + .unpin = execlists_context_unpin, + .destroy = execlists_context_destroy, +}; + +static struct intel_context * +execlists_context_pin(struct intel_engine_cs *engine, + struct i915_gem_context *ctx) { struct intel_context *ce = to_intel_context(ctx, engine); lockdep_assert_held(&ctx->i915->drm.struct_mutex); - GEM_BUG_ON(ce->pin_count == 0); - if (--ce->pin_count) - return; - - intel_ring_unpin(ce->ring); + if (likely(ce->pin_count++)) + return ce; + GEM_BUG_ON(!ce->pin_count); /* no overflow please! */ - ce->state->obj->pin_global--; - i915_gem_object_unpin_map(ce->state->obj); - i915_vma_unpin(ce->state); + ce->ops = &execlists_context_ops; - i915_gem_context_put(ctx); + return __execlists_context_pin(engine, ctx, ce); } static int execlists_request_alloc(struct i915_request *request) { - struct intel_context *ce = - to_intel_context(request->gem_context, request->engine); int ret; - GEM_BUG_ON(!ce->pin_count); + GEM_BUG_ON(!request->hw_context->pin_count); /* Flush enough space to reduce the likelihood of waiting after * we start building the request - in which case we will just @@ -1956,7 +1975,7 @@ static void execlists_reset(struct intel_engine_cs *engine, * future request will be after userspace has had the opportunity * to recreate its own state. */ - regs = to_intel_context(request->gem_context, engine)->lrc_reg_state; + regs = request->hw_context->lrc_reg_state; if (engine->default_state) { void *defaults; @@ -2327,8 +2346,6 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine) engine->reset.finish = execlists_reset_finish; engine->context_pin = execlists_context_pin; - engine->context_unpin = execlists_context_unpin; - engine->request_alloc = execlists_request_alloc; engine->emit_flush = gen8_emit_flush; @@ -2563,7 +2580,7 @@ static void execlists_init_reg_state(u32 *regs, struct drm_i915_private *dev_priv = engine->i915; struct i915_hw_ppgtt *ppgtt = ctx->ppgtt ?: dev_priv->mm.aliasing_ppgtt; u32 base = engine->mmio_base; - bool rcs = engine->id == RCS; + bool rcs = engine->class == RENDER_CLASS; /* A context is actually a big batch buffer with several * MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The @@ -2710,10 +2727,10 @@ err_unpin_ctx: } static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, - struct intel_engine_cs *engine) + struct intel_engine_cs *engine, + struct intel_context *ce) { struct drm_i915_gem_object *ctx_obj; - struct intel_context *ce = to_intel_context(ctx, engine); struct i915_vma *vma; uint32_t context_size; struct intel_ring *ring; diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h index 4ec7d8dd13c8..1593194e930c 100644 --- a/drivers/gpu/drm/i915/intel_lrc.h +++ b/drivers/gpu/drm/i915/intel_lrc.h @@ -104,11 +104,4 @@ struct i915_gem_context; void intel_lr_context_resume(struct drm_i915_private *dev_priv); -static inline uint64_t -intel_lr_context_descriptor(struct i915_gem_context *ctx, - struct intel_engine_cs *engine) -{ - return to_intel_context(ctx, engine)->lrc_desc; -} - #endif /* _INTEL_LRC_H_ */ diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 53703012ec75..0c0c9f531e4e 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -571,8 +571,7 @@ static void reset_ring(struct intel_engine_cs *engine, */ if (request) { struct drm_i915_private *dev_priv = request->i915; - struct intel_context *ce = - to_intel_context(request->gem_context, engine); + struct intel_context *ce = request->hw_context; struct i915_hw_ppgtt *ppgtt; if (ce->state) { @@ -1186,7 +1185,31 @@ intel_ring_free(struct intel_ring *ring) kfree(ring); } -static int context_pin(struct intel_context *ce) +static void intel_ring_context_destroy(struct intel_context *ce) +{ + GEM_BUG_ON(ce->pin_count); + + if (ce->state) + __i915_gem_object_release_unless_active(ce->state->obj); +} + +static void intel_ring_context_unpin(struct intel_context *ce) +{ + lockdep_assert_held(&ce->gem_context->i915->drm.struct_mutex); + GEM_BUG_ON(ce->pin_count == 0); + + if (--ce->pin_count) + return; + + if (ce->state) { + ce->state->obj->pin_global--; + i915_vma_unpin(ce->state); + } + + i915_gem_context_put(ce->gem_context); +} + +static int __context_pin(struct intel_context *ce) { struct i915_vma *vma = ce->state; int ret; @@ -1275,25 +1298,19 @@ err_obj: return ERR_PTR(err); } -static struct intel_ring * -intel_ring_context_pin(struct intel_engine_cs *engine, - struct i915_gem_context *ctx) +static struct intel_context * +__ring_context_pin(struct intel_engine_cs *engine, + struct i915_gem_context *ctx, + struct intel_context *ce) { - struct intel_context *ce = to_intel_context(ctx, engine); - int ret; - - lockdep_assert_held(&ctx->i915->drm.struct_mutex); - - if (likely(ce->pin_count++)) - goto out; - GEM_BUG_ON(!ce->pin_count); /* no overflow please! */ + int err; if (!ce->state && engine->context_size) { struct i915_vma *vma; vma = alloc_context_vma(engine); if (IS_ERR(vma)) { - ret = PTR_ERR(vma); + err = PTR_ERR(vma); goto err; } @@ -1301,8 +1318,8 @@ intel_ring_context_pin(struct intel_engine_cs *engine, } if (ce->state) { - ret = context_pin(ce); - if (ret) + err = __context_pin(ce); + if (err) goto err; ce->state->obj->pin_global++; @@ -1310,32 +1327,37 @@ intel_ring_context_pin(struct intel_engine_cs *engine, i915_gem_context_get(ctx); -out: /* One ringbuffer to rule them all */ - return engine->buffer; + GEM_BUG_ON(!engine->buffer); + ce->ring = engine->buffer; + + return ce; err: ce->pin_count = 0; - return ERR_PTR(ret); + return ERR_PTR(err); } -static void intel_ring_context_unpin(struct intel_engine_cs *engine, - struct i915_gem_context *ctx) +static const struct intel_context_ops ring_context_ops = { + .unpin = intel_ring_context_unpin, + .destroy = intel_ring_context_destroy, +}; + +static struct intel_context * +intel_ring_context_pin(struct intel_engine_cs *engine, + struct i915_gem_context *ctx) { struct intel_context *ce = to_intel_context(ctx, engine); lockdep_assert_held(&ctx->i915->drm.struct_mutex); - GEM_BUG_ON(ce->pin_count == 0); - if (--ce->pin_count) - return; + if (likely(ce->pin_count++)) + return ce; + GEM_BUG_ON(!ce->pin_count); /* no overflow please! */ - if (ce->state) { - ce->state->obj->pin_global--; - i915_vma_unpin(ce->state); - } + ce->ops = &ring_context_ops; - i915_gem_context_put(ctx); + return __ring_context_pin(engine, ctx, ce); } static int intel_init_ring_buffer(struct intel_engine_cs *engine) @@ -1346,10 +1368,6 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine) intel_engine_setup_common(engine); - err = intel_engine_init_common(engine); - if (err) - goto err; - timeline = i915_timeline_create(engine->i915, engine->name); if (IS_ERR(timeline)) { err = PTR_ERR(timeline); @@ -1371,8 +1389,14 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine) GEM_BUG_ON(engine->buffer); engine->buffer = ring; + err = intel_engine_init_common(engine); + if (err) + goto err_unpin; + return 0; +err_unpin: + intel_ring_unpin(ring); err_ring: intel_ring_free(ring); err: @@ -1458,7 +1482,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags) *cs++ = MI_NOOP; *cs++ = MI_SET_CONTEXT; - *cs++ = i915_ggtt_offset(to_intel_context(rq->gem_context, engine)->state) | flags; + *cs++ = i915_ggtt_offset(rq->hw_context->state) | flags; /* * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP * WaMiSetContext_Hang:snb,ivb,vlv @@ -1549,7 +1573,7 @@ static int switch_context(struct i915_request *rq) hw_flags = MI_FORCE_RESTORE; } - if (to_intel_context(to_ctx, engine)->state && + if (rq->hw_context->state && (to_ctx != from_ctx || hw_flags & MI_FORCE_RESTORE)) { GEM_BUG_ON(engine->id != RCS); @@ -1597,7 +1621,7 @@ static int ring_request_alloc(struct i915_request *request) { int ret; - GEM_BUG_ON(!to_intel_context(request->gem_context, request->engine)->pin_count); + GEM_BUG_ON(!request->hw_context->pin_count); /* Flush enough space to reduce the likelihood of waiting after * we start building the request - in which case we will just @@ -2028,8 +2052,6 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv, engine->reset.finish = reset_finish; engine->context_pin = intel_ring_context_pin; - engine->context_unpin = intel_ring_context_unpin; - engine->request_alloc = ring_request_alloc; engine->emit_breadcrumb = i9xx_emit_breadcrumb; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 2b16185e36c4..20c4e13efc0d 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -436,10 +436,9 @@ struct intel_engine_cs { void (*set_default_submission)(struct intel_engine_cs *engine); - struct intel_ring *(*context_pin)(struct intel_engine_cs *engine, - struct i915_gem_context *ctx); - void (*context_unpin)(struct intel_engine_cs *engine, - struct i915_gem_context *ctx); + struct intel_context *(*context_pin)(struct intel_engine_cs *engine, + struct i915_gem_context *ctx); + int (*request_alloc)(struct i915_request *rq); int (*init_context)(struct i915_request *rq); @@ -555,7 +554,7 @@ struct intel_engine_cs { * to the kernel context and trash it as the save may not happen * before the hardware is powered down. */ - struct i915_gem_context *last_retired_context; + struct intel_context *last_retired_context; /* We track the current MI_SET_CONTEXT in order to eliminate * redudant context switches. This presumes that requests are not diff --git a/drivers/gpu/drm/i915/selftests/mock_context.c b/drivers/gpu/drm/i915/selftests/mock_context.c index 501becc47c0c..8904f1ce64e3 100644 --- a/drivers/gpu/drm/i915/selftests/mock_context.c +++ b/drivers/gpu/drm/i915/selftests/mock_context.c @@ -30,6 +30,7 @@ mock_context(struct drm_i915_private *i915, const char *name) { struct i915_gem_context *ctx; + unsigned int n; int ret; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); @@ -43,6 +44,12 @@ mock_context(struct drm_i915_private *i915, INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL); INIT_LIST_HEAD(&ctx->handles_list); + for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) { + struct intel_context *ce = &ctx->__engine[n]; + + ce->gem_context = ctx; + } + ret = ida_simple_get(&i915->contexts.hw_ida, 0, MAX_CONTEXT_HW_ID, GFP_KERNEL); if (ret < 0) diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c index 26bf29d97007..33eddfc1f8ce 100644 --- a/drivers/gpu/drm/i915/selftests/mock_engine.c +++ b/drivers/gpu/drm/i915/selftests/mock_engine.c @@ -72,25 +72,37 @@ static void hw_delay_complete(struct timer_list *t) spin_unlock(&engine->hw_lock); } -static struct intel_ring * -mock_context_pin(struct intel_engine_cs *engine, - struct i915_gem_context *ctx) +static void mock_context_unpin(struct intel_context *ce) { - struct intel_context *ce = to_intel_context(ctx, engine); + if (--ce->pin_count) + return; - if (!ce->pin_count++) - i915_gem_context_get(ctx); + i915_gem_context_put(ce->gem_context); +} - return engine->buffer; +static void mock_context_destroy(struct intel_context *ce) +{ + GEM_BUG_ON(ce->pin_count); } -static void mock_context_unpin(struct intel_engine_cs *engine, - struct i915_gem_context *ctx) +static const struct intel_context_ops mock_context_ops = { + .unpin = mock_context_unpin, + .destroy = mock_context_destroy, +}; + +static struct intel_context * +mock_context_pin(struct intel_engine_cs *engine, + struct i915_gem_context *ctx) { struct intel_context *ce = to_intel_context(ctx, engine); - if (!--ce->pin_count) - i915_gem_context_put(ctx); + if (!ce->pin_count++) { + i915_gem_context_get(ctx); + ce->ring = engine->buffer; + ce->ops = &mock_context_ops; + } + + return ce; } static int mock_request_alloc(struct i915_request *request) @@ -185,7 +197,6 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915, engine->base.status_page.page_addr = (void *)(engine + 1); engine->base.context_pin = mock_context_pin; - engine->base.context_unpin = mock_context_unpin; engine->base.request_alloc = mock_request_alloc; engine->base.emit_flush = mock_emit_flush; engine->base.emit_breadcrumb = mock_emit_breadcrumb; @@ -238,11 +249,13 @@ void mock_engine_free(struct intel_engine_cs *engine) { struct mock_engine *mock = container_of(engine, typeof(*mock), base); + struct intel_context *ce; GEM_BUG_ON(timer_pending(&mock->hw_delay)); - if (engine->last_retired_context) - intel_context_unpin(engine->last_retired_context, engine); + ce = fetch_and_zero(&engine->last_retired_context); + if (ce) + intel_context_unpin(ce); mock_ring_free(engine->buffer); -- cgit From 0606035fcab6b2630b0d95a5b31e235ec9ad5642 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 31 May 2018 09:22:44 +0100 Subject: drm/i915: "Race-to-idle" after switching to the kernel context During suspend we want to flush out all active contexts and their rendering. To do so we queue a request from the kernel's context, once we know that request is done, we know the GPU is completely idle. To speed up that switch bump the GPU clocks. Switching to the kernel context prior to idling is also used to enforce a barrier before changing OA properties, and when evicting active rendering from the global GTT. All cases where we do want to race-to-idle. v2: Limit the boosting to only the switch before suspend. v3: Limit it to the wait-for-idle on suspend. Signed-off-by: Chris Wilson Cc: David Weinehall Cc: Mika Kuoppala Tested-by: David Weinehall #v1 Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20180531082246.9763-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem.c | 27 +++++++++++++++++++++++++-- drivers/gpu/drm/i915/i915_request.h | 1 + 2 files changed, 26 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_request.h') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 173d1e4ad963..b312ac006d24 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3708,7 +3708,29 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) static int wait_for_timeline(struct i915_timeline *tl, unsigned int flags) { - return i915_gem_active_wait(&tl->last_request, flags); + struct i915_request *rq; + long ret; + + rq = i915_gem_active_get_unlocked(&tl->last_request); + if (!rq) + return 0; + + /* + * "Race-to-idle". + * + * Switching to the kernel context is often used a synchronous + * step prior to idling, e.g. in suspend for flushing all + * current operations to memory before sleeping. These we + * want to complete as quickly as possible to avoid prolonged + * stalls, so allow the gpu to boost to maximum clocks. + */ + if (flags & I915_WAIT_FOR_IDLE_BOOST) + gen6_rps_boost(rq, NULL); + + ret = i915_request_wait(rq, flags, MAX_SCHEDULE_TIMEOUT); + i915_request_put(rq); + + return ret < 0 ? ret : 0; } static int wait_for_engines(struct drm_i915_private *i915) @@ -4983,7 +5005,8 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv) ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE | - I915_WAIT_LOCKED); + I915_WAIT_LOCKED | + I915_WAIT_FOR_IDLE_BOOST); if (ret && ret != -EIO) goto err_unlock; diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index 1bbbb7a9fa03..491ff81d0fea 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -267,6 +267,7 @@ long i915_request_wait(struct i915_request *rq, #define I915_WAIT_INTERRUPTIBLE BIT(0) #define I915_WAIT_LOCKED BIT(1) /* struct_mutex held, handle GPU reset */ #define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */ +#define I915_WAIT_FOR_IDLE_BOOST BIT(3) static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine); -- cgit From b3ee09a4de33259a89d30aca6b2ebb0bc26640af Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 11 Jun 2018 12:08:44 +0100 Subject: drm/i915/ringbuffer: Fix context restore upon reset The discovery with trying to enable full-ppgtt was that we were completely failing to the load both the mm and context following the reset. Although we were performing mmio to set the PP_DIR (per-process GTT) and CCID (context), these were taking no effect (the assumption was that this would trigger reload of the context and restore the page tables). It was not until we performed the LRI + MI_SET_CONTEXT in a following context switch would anything occur. Since we are then required to reset the context image and PP_DIR using CS commands, we place those commands into every batch. The hardware should recognise the no-ops and eliminate the expensive context loads, but we still have to pay the cost of using cross-powerwell register writes. In practice, this has no effect on actual context switch times, and only adds a few hundred nanoseconds to no-op switches. We can improve the latter by eliminating the w/a around known no-op switches, but there is an ulterior motive to keeping them. Always emitting the context switch at the beginning of the request (and relying on HW to skip unneeded switches) does have one key advantage. Should we implement request reordering on Haswell, we will not know in advance what the previous executing context was on the GPU and so we would not be able to elide the MI_SET_CONTEXT commands ourselves and always have to emit them. Having our hand forced now actually prepares us for later. Now since that context and mm follow the request, we no longer (and not for a long time since requests took over!) require a trace point to tell when we write the switch into the ring, since it is always. (This is even more important when you remember that simply writing into the ring bears no relation to the current mm.) v2: Sandybridge has to agree to use LRI as well. Testcase: igt/drv_selftests/live_hangcheck Signed-off-by: Chris Wilson Cc: Joonas Lahtinen Cc: Mika Kuoppala Cc: Matthew Auld Cc: Tvrtko Ursulin Reviewed-by: Joonas Lahtinen Link: https://patchwork.freedesktop.org/patch/msgid/20180611110845.31890-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_gtt.c | 45 ------------ drivers/gpu/drm/i915/i915_gem_gtt.h | 2 - drivers/gpu/drm/i915/i915_request.c | 2 + drivers/gpu/drm/i915/i915_request.h | 3 + drivers/gpu/drm/i915/i915_trace.h | 33 --------- drivers/gpu/drm/i915/intel_engine_cs.c | 3 - drivers/gpu/drm/i915/intel_ringbuffer.c | 125 ++++++++++++++++---------------- drivers/gpu/drm/i915/intel_ringbuffer.h | 9 --- 8 files changed, 66 insertions(+), 156 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_request.h') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 7ccfdbc8f9b4..ac75e0c5735c 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1712,45 +1712,6 @@ static void gen6_write_page_range(struct i915_hw_ppgtt *ppgtt, wmb(); } -static inline u32 get_pd_offset(struct i915_hw_ppgtt *ppgtt) -{ - GEM_BUG_ON(ppgtt->pd.base.ggtt_offset & 0x3f); - return ppgtt->pd.base.ggtt_offset << 10; -} - -static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, - struct i915_request *rq) -{ - struct intel_engine_cs *engine = rq->engine; - u32 *cs; - - /* NB: TLBs must be flushed and invalidated before a switch */ - cs = intel_ring_begin(rq, 6); - if (IS_ERR(cs)) - return PTR_ERR(cs); - - *cs++ = MI_LOAD_REGISTER_IMM(2); - *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine)); - *cs++ = PP_DIR_DCLV_2G; - *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine)); - *cs++ = get_pd_offset(ppgtt); - *cs++ = MI_NOOP; - intel_ring_advance(rq, cs); - - return 0; -} - -static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt, - struct i915_request *rq) -{ - struct intel_engine_cs *engine = rq->engine; - struct drm_i915_private *dev_priv = rq->i915; - - I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G); - I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt)); - return 0; -} - static void gen8_ppgtt_enable(struct drm_i915_private *dev_priv) { struct intel_engine_cs *engine; @@ -2024,12 +1985,6 @@ static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915) ppgtt->vm.dma = &i915->drm.pdev->dev; ppgtt->vm.pte_encode = ggtt->vm.pte_encode; - if (IS_GEN6(i915)) - ppgtt->switch_mm = gen6_mm_switch; - else if (IS_GEN7(i915)) - ppgtt->switch_mm = gen7_mm_switch; - else - BUG(); err = gen6_ppgtt_alloc(ppgtt); if (err) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 16307ba7e303..e70f6abcd0f2 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -406,8 +406,6 @@ struct i915_hw_ppgtt { gen6_pte_t __iomem *pd_addr; - int (*switch_mm)(struct i915_hw_ppgtt *ppgtt, - struct i915_request *rq); void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m); }; diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index f187250e60c6..9092f5464c24 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -817,6 +817,8 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) /* Keep a second pin for the dual retirement along engine and ring */ __intel_context_pin(ce); + rq->infix = rq->ring->emit; /* end of header; start of user payload */ + /* Check that we didn't interrupt ourselves with a new request */ GEM_BUG_ON(rq->timeline->seqno != rq->fence.seqno); return rq; diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index 491ff81d0fea..0e9aba53d0e4 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -134,6 +134,9 @@ struct i915_request { /** Position in the ring of the start of the request */ u32 head; + /** Position in the ring of the start of the user packets */ + u32 infix; + /** * Position in the ring of the start of the postfix. * This is required to calculate the maximum available ring space diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 1472f48ab2e8..b50c6b829715 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -973,39 +973,6 @@ DEFINE_EVENT(i915_context, i915_context_free, TP_ARGS(ctx) ); -/** - * DOC: switch_mm tracepoint - * - * This tracepoint allows tracking of the mm switch, which is an important point - * in the lifetime of the vm in the legacy submission path. This tracepoint is - * called only if full ppgtt is enabled. - */ -TRACE_EVENT(switch_mm, - TP_PROTO(struct intel_engine_cs *engine, struct i915_gem_context *to), - - TP_ARGS(engine, to), - - TP_STRUCT__entry( - __field(u16, class) - __field(u16, instance) - __field(struct i915_gem_context *, to) - __field(struct i915_address_space *, vm) - __field(u32, dev) - ), - - TP_fast_assign( - __entry->class = engine->uabi_class; - __entry->instance = engine->instance; - __entry->to = to; - __entry->vm = to->ppgtt ? &to->ppgtt->vm : NULL; - __entry->dev = engine->i915->drm.primary->index; - ), - - TP_printk("dev=%u, engine=%u:%u, ctx=%p, ctx_vm=%p", - __entry->dev, __entry->class, __entry->instance, __entry->to, - __entry->vm) -); - #endif /* _I915_TRACE_H_ */ /* This part must be outside protection */ diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 2ec2e60dc670..d1cf8b4926ab 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -1168,9 +1168,6 @@ void intel_engine_lost_context(struct intel_engine_cs *engine) lockdep_assert_held(&engine->i915->drm.struct_mutex); - engine->legacy_active_context = NULL; - engine->legacy_active_ppgtt = NULL; - ce = fetch_and_zero(&engine->last_retired_context); if (ce) intel_context_unpin(ce); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 5bc53a5f4504..d72a6a5ff3ac 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -541,11 +541,23 @@ static struct i915_request *reset_prepare(struct intel_engine_cs *engine) return i915_gem_find_active_request(engine); } -static void reset_ring(struct intel_engine_cs *engine, - struct i915_request *request) +static void skip_request(struct i915_request *rq) { - GEM_TRACE("%s seqno=%x\n", - engine->name, request ? request->global_seqno : 0); + void *vaddr = rq->ring->vaddr; + u32 head; + + head = rq->infix; + if (rq->postfix < head) { + memset32(vaddr + head, MI_NOOP, + (rq->ring->size - head) / sizeof(u32)); + head = 0; + } + memset32(vaddr + head, MI_NOOP, (rq->postfix - head) / sizeof(u32)); +} + +static void reset_ring(struct intel_engine_cs *engine, struct i915_request *rq) +{ + GEM_TRACE("%s seqno=%x\n", engine->name, rq ? rq->global_seqno : 0); /* * RC6 must be prevented until the reset is complete and the engine @@ -569,43 +581,11 @@ static void reset_ring(struct intel_engine_cs *engine, * If the request was innocent, we try to replay the request with * the restored context. */ - if (request) { - struct drm_i915_private *dev_priv = request->i915; - struct intel_context *ce = request->hw_context; - struct i915_hw_ppgtt *ppgtt; - - if (ce->state) { - I915_WRITE(CCID, - i915_ggtt_offset(ce->state) | - BIT(8) /* must be set! */ | - CCID_EXTENDED_STATE_SAVE | - CCID_EXTENDED_STATE_RESTORE | - CCID_EN); - } - - ppgtt = request->gem_context->ppgtt ?: engine->i915->mm.aliasing_ppgtt; - if (ppgtt) { - u32 pd_offset = ppgtt->pd.base.ggtt_offset << 10; - - I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G); - I915_WRITE(RING_PP_DIR_BASE(engine), pd_offset); - - /* Wait for the PD reload to complete */ - if (intel_wait_for_register(dev_priv, - RING_PP_DIR_BASE(engine), - BIT(0), 0, - 10)) - DRM_ERROR("Wait for reload of ppgtt page-directory timed out\n"); - - ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine); - } - + if (rq) { /* If the rq hung, jump to its breadcrumb and skip the batch */ - if (request->fence.error == -EIO) - request->ring->head = request->postfix; - } else { - engine->legacy_active_context = NULL; - engine->legacy_active_ppgtt = NULL; + rq->ring->head = intel_ring_wrap(rq->ring, rq->head); + if (rq->fence.error == -EIO) + skip_request(rq); } } @@ -1446,6 +1426,29 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv) intel_ring_reset(engine->buffer, 0); } +static int load_pd_dir(struct i915_request *rq, + const struct i915_hw_ppgtt *ppgtt) +{ + const struct intel_engine_cs * const engine = rq->engine; + u32 *cs; + + cs = intel_ring_begin(rq, 6); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + *cs++ = MI_LOAD_REGISTER_IMM(1); + *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine)); + *cs++ = PP_DIR_DCLV_2G; + + *cs++ = MI_LOAD_REGISTER_IMM(1); + *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine)); + *cs++ = ppgtt->pd.base.ggtt_offset << 10; + + intel_ring_advance(rq, cs); + + return 0; +} + static inline int mi_set_context(struct i915_request *rq, u32 flags) { struct drm_i915_private *i915 = rq->i915; @@ -1590,31 +1593,28 @@ static int remap_l3(struct i915_request *rq, int slice) static int switch_context(struct i915_request *rq) { struct intel_engine_cs *engine = rq->engine; - struct i915_gem_context *to_ctx = rq->gem_context; - struct i915_hw_ppgtt *to_mm = - to_ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt; - struct i915_gem_context *from_ctx = engine->legacy_active_context; - struct i915_hw_ppgtt *from_mm = engine->legacy_active_ppgtt; + struct i915_gem_context *ctx = rq->gem_context; + struct i915_hw_ppgtt *ppgtt = ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt; + unsigned int unwind_mm = 0; u32 hw_flags = 0; int ret, i; lockdep_assert_held(&rq->i915->drm.struct_mutex); GEM_BUG_ON(HAS_EXECLISTS(rq->i915)); - if (to_mm != from_mm || - (to_mm && intel_engine_flag(engine) & to_mm->pd_dirty_rings)) { - trace_switch_mm(engine, to_ctx); - ret = to_mm->switch_mm(to_mm, rq); + if (ppgtt) { + ret = load_pd_dir(rq, ppgtt); if (ret) goto err; - to_mm->pd_dirty_rings &= ~intel_engine_flag(engine); - engine->legacy_active_ppgtt = to_mm; - hw_flags = MI_FORCE_RESTORE; + if (intel_engine_flag(engine) & ppgtt->pd_dirty_rings) { + unwind_mm = intel_engine_flag(engine); + ppgtt->pd_dirty_rings &= ~unwind_mm; + hw_flags = MI_FORCE_RESTORE; + } } - if (rq->hw_context->state && - (to_ctx != from_ctx || hw_flags & MI_FORCE_RESTORE)) { + if (rq->hw_context->state) { GEM_BUG_ON(engine->id != RCS); /* @@ -1624,35 +1624,32 @@ static int switch_context(struct i915_request *rq) * as nothing actually executes using the kernel context; it * is purely used for flushing user contexts. */ - if (i915_gem_context_is_kernel(to_ctx)) + if (i915_gem_context_is_kernel(ctx)) hw_flags = MI_RESTORE_INHIBIT; ret = mi_set_context(rq, hw_flags); if (ret) goto err_mm; - - engine->legacy_active_context = to_ctx; } - if (to_ctx->remap_slice) { + if (ctx->remap_slice) { for (i = 0; i < MAX_L3_SLICES; i++) { - if (!(to_ctx->remap_slice & BIT(i))) + if (!(ctx->remap_slice & BIT(i))) continue; ret = remap_l3(rq, i); if (ret) - goto err_ctx; + goto err_mm; } - to_ctx->remap_slice = 0; + ctx->remap_slice = 0; } return 0; -err_ctx: - engine->legacy_active_context = from_ctx; err_mm: - engine->legacy_active_ppgtt = from_mm; + if (unwind_mm) + ppgtt->pd_dirty_rings |= unwind_mm; err: return ret; } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index acef385c4c80..b44c67849749 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -557,15 +557,6 @@ struct intel_engine_cs { */ struct intel_context *last_retired_context; - /* We track the current MI_SET_CONTEXT in order to eliminate - * redudant context switches. This presumes that requests are not - * reordered! Or when they are the tracking is updated along with - * the emission of individual requests into the legacy command - * stream (ring). - */ - struct i915_gem_context *legacy_active_context; - struct i915_hw_ppgtt *legacy_active_ppgtt; - /* status_notifier: list of callbacks for context-switch changes */ struct atomic_notifier_head context_status_notifier; -- cgit From 697b9a8714cb4631fd0526b3c78955d5422c24ba Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 12 Jun 2018 11:51:35 +0100 Subject: drm/i915: Make closing request flush mandatory For symmetry, simplicity and ensuring the request is always truly idle upon its completion, always emit the closing flush prior to emitting the request breadcrumb. Previously, we would only emit the flush if we had started a user batch, but this just leaves all the other paths open to speculation (do they affect the GPU caches or not?) With mm switching, a key requirement is that the GPU is flushed and invalidated before hand, so for absolute safety, we want that closing flush be mandatory. Signed-off-by: Chris Wilson Reviewed-by: Joonas Lahtinen Link: https://patchwork.freedesktop.org/patch/msgid/20180612105135.4459-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem.c | 4 ++-- drivers/gpu/drm/i915/i915_gem_context.c | 9 +-------- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 4 ++-- drivers/gpu/drm/i915/i915_request.c | 18 ++---------------- drivers/gpu/drm/i915/i915_request.h | 4 +--- drivers/gpu/drm/i915/selftests/huge_pages.c | 2 +- drivers/gpu/drm/i915/selftests/i915_gem_coherency.c | 4 ++-- drivers/gpu/drm/i915/selftests/i915_gem_context.c | 4 ++-- drivers/gpu/drm/i915/selftests/i915_request.c | 2 +- drivers/gpu/drm/i915/selftests/intel_hangcheck.c | 16 ++++++++-------- drivers/gpu/drm/i915/selftests/intel_lrc.c | 2 +- drivers/gpu/drm/i915/selftests/intel_workarounds.c | 2 +- 12 files changed, 24 insertions(+), 47 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_request.h') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 93efd92362db..8dd4d35655af 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3213,7 +3213,7 @@ void i915_gem_reset(struct drm_i915_private *dev_priv, rq = i915_request_alloc(engine, dev_priv->kernel_context); if (!IS_ERR(rq)) - __i915_request_add(rq, false); + i915_request_add(rq); } } @@ -5332,7 +5332,7 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915) if (engine->init_context) err = engine->init_context(rq); - __i915_request_add(rq, true); + i915_request_add(rq); if (err) goto err_active; } diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index b2c7ac1b074d..ef6ea4bcd773 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -700,14 +700,7 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915) i915_timeline_sync_set(rq->timeline, &prev->fence); } - /* - * Force a flush after the switch to ensure that all rendering - * and operations prior to switching to the kernel context hits - * memory. This should be guaranteed by the previous request, - * but an extra layer of paranoia before we declare the system - * idle (on suspend etc) is advisable! - */ - __i915_request_add(rq, true); + i915_request_add(rq); } return 0; diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 2d2eb3075960..60dc2a865f5f 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -921,7 +921,7 @@ static void reloc_gpu_flush(struct reloc_cache *cache) i915_gem_object_unpin_map(cache->rq->batch->obj); i915_gem_chipset_flush(cache->rq->i915); - __i915_request_add(cache->rq, true); + i915_request_add(cache->rq); cache->rq = NULL; } @@ -2438,7 +2438,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, trace_i915_request_queue(eb.request, eb.batch_flags); err = eb_submit(&eb); err_request: - __i915_request_add(eb.request, err == 0); + i915_request_add(eb.request); add_to_client(eb.request, file); if (fences) diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 9092f5464c24..e1dbb544046f 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -1018,14 +1018,13 @@ i915_request_await_object(struct i915_request *to, * request is not being tracked for completion but the work itself is * going to happen on the hardware. This would be a Bad Thing(tm). */ -void __i915_request_add(struct i915_request *request, bool flush_caches) +void i915_request_add(struct i915_request *request) { struct intel_engine_cs *engine = request->engine; struct i915_timeline *timeline = request->timeline; struct intel_ring *ring = request->ring; struct i915_request *prev; u32 *cs; - int err; GEM_TRACE("%s fence %llx:%d\n", engine->name, request->fence.context, request->fence.seqno); @@ -1046,20 +1045,7 @@ void __i915_request_add(struct i915_request *request, bool flush_caches) * know that it is time to use that space up. */ request->reserved_space = 0; - - /* - * Emit any outstanding flushes - execbuf can fail to emit the flush - * after having emitted the batchbuffer command. Hence we need to fix - * things up similar to emitting the lazy request. The difference here - * is that the flush _must_ happen before the next request, no matter - * what. - */ - if (flush_caches) { - err = engine->emit_flush(request, EMIT_FLUSH); - - /* Not allowed to fail! */ - WARN(err, "engine->emit_flush() failed: %d!\n", err); - } + engine->emit_flush(request, EMIT_FLUSH); /* * Record the position of the start of the breadcrumb so that diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index 0e9aba53d0e4..7ee220ded9c9 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -253,9 +253,7 @@ int i915_request_await_object(struct i915_request *to, int i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence); -void __i915_request_add(struct i915_request *rq, bool flush_caches); -#define i915_request_add(rq) \ - __i915_request_add(rq, false) +void i915_request_add(struct i915_request *rq); void __i915_request_submit(struct i915_request *request); void i915_request_submit(struct i915_request *request); diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c index 7846ea4a99bc..fbe4324116d7 100644 --- a/drivers/gpu/drm/i915/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/selftests/huge_pages.c @@ -1003,7 +1003,7 @@ static int gpu_write(struct i915_vma *vma, reservation_object_unlock(vma->resv); err_request: - __i915_request_add(rq, err == 0); + i915_request_add(rq); return err; } diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c index 340a98c0c804..a4900091ae3d 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c @@ -199,7 +199,7 @@ static int gpu_set(struct drm_i915_gem_object *obj, cs = intel_ring_begin(rq, 4); if (IS_ERR(cs)) { - __i915_request_add(rq, false); + i915_request_add(rq); i915_vma_unpin(vma); return PTR_ERR(cs); } @@ -229,7 +229,7 @@ static int gpu_set(struct drm_i915_gem_object *obj, reservation_object_add_excl_fence(obj->resv, &rq->fence); reservation_object_unlock(obj->resv); - __i915_request_add(rq, true); + i915_request_add(rq); return 0; } diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c index 708e8d721448..836f1af8b833 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c @@ -182,12 +182,12 @@ static int gpu_fill(struct drm_i915_gem_object *obj, reservation_object_add_excl_fence(obj->resv, &rq->fence); reservation_object_unlock(obj->resv); - __i915_request_add(rq, true); + i915_request_add(rq); return 0; err_request: - __i915_request_add(rq, false); + i915_request_add(rq); err_batch: i915_vma_unpin(batch); err_vma: diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index a3a89aadeccb..f5d00332bb31 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -466,7 +466,7 @@ empty_request(struct intel_engine_cs *engine, goto out_request; out_request: - __i915_request_add(request, err == 0); + i915_request_add(request); return err ? ERR_PTR(err) : request; } diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c index 390a157b37c3..fe7d3190ebfe 100644 --- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c @@ -245,7 +245,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine) err = emit_recurse_batch(h, rq); if (err) { - __i915_request_add(rq, false); + i915_request_add(rq); return ERR_PTR(err); } @@ -318,7 +318,7 @@ static int igt_hang_sanitycheck(void *arg) *h.batch = MI_BATCH_BUFFER_END; i915_gem_chipset_flush(i915); - __i915_request_add(rq, true); + i915_request_add(rq); timeout = i915_request_wait(rq, I915_WAIT_LOCKED, @@ -464,7 +464,7 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active) } i915_request_get(rq); - __i915_request_add(rq, true); + i915_request_add(rq); mutex_unlock(&i915->drm.struct_mutex); if (!wait_until_running(&h, rq)) { @@ -742,7 +742,7 @@ static int __igt_reset_engines(struct drm_i915_private *i915, } i915_request_get(rq); - __i915_request_add(rq, true); + i915_request_add(rq); mutex_unlock(&i915->drm.struct_mutex); if (!wait_until_running(&h, rq)) { @@ -942,7 +942,7 @@ static int igt_wait_reset(void *arg) } i915_request_get(rq); - __i915_request_add(rq, true); + i915_request_add(rq); if (!wait_until_running(&h, rq)) { struct drm_printer p = drm_info_printer(i915->drm.dev); @@ -1037,7 +1037,7 @@ static int igt_reset_queue(void *arg) } i915_request_get(prev); - __i915_request_add(prev, true); + i915_request_add(prev); count = 0; do { @@ -1051,7 +1051,7 @@ static int igt_reset_queue(void *arg) } i915_request_get(rq); - __i915_request_add(rq, true); + i915_request_add(rq); /* * XXX We don't handle resetting the kernel context @@ -1184,7 +1184,7 @@ static int igt_handle_error(void *arg) } i915_request_get(rq); - __i915_request_add(rq, true); + i915_request_add(rq); if (!wait_until_running(&h, rq)) { struct drm_printer p = drm_info_printer(i915->drm.dev); diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c index 0b6da08c8cae..ea27c7cfbf96 100644 --- a/drivers/gpu/drm/i915/selftests/intel_lrc.c +++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c @@ -155,7 +155,7 @@ spinner_create_request(struct spinner *spin, err = emit_recurse_batch(spin, rq, arbitration_command); if (err) { - __i915_request_add(rq, false); + i915_request_add(rq); return ERR_PTR(err); } diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c index f1cfb0fb6bea..e1ea2d2bedd2 100644 --- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c +++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c @@ -75,7 +75,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine) i915_gem_object_get(result); i915_gem_object_set_active_reference(result); - __i915_request_add(rq, true); + i915_request_add(rq); i915_vma_unpin(vma); return result; -- cgit From 6dd7526f6f6c73961eecec8a4b9b717d414010f8 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 6 Jul 2018 11:39:43 +0100 Subject: drm/i915: Export i915_request_skip() In the next patch, we will want to start skipping requests on failing to complete their payloads. So export the utility function current used to make requests inoperable following a failed gpu reset. Signed-off-by: Chris Wilson Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20180706103947.15919-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem.c | 25 +++---------------------- drivers/gpu/drm/i915/i915_request.c | 21 +++++++++++++++++++++ drivers/gpu/drm/i915/i915_request.h | 2 ++ 3 files changed, 26 insertions(+), 22 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_request.h') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index be63e8bbb6d2..2e05cf114083 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3085,25 +3085,6 @@ int i915_gem_reset_prepare(struct drm_i915_private *dev_priv) return err; } -static void skip_request(struct i915_request *request) -{ - void *vaddr = request->ring->vaddr; - u32 head; - - /* As this request likely depends on state from the lost - * context, clear out all the user operations leaving the - * breadcrumb at the end (so we get the fence notifications). - */ - head = request->head; - if (request->postfix < head) { - memset(vaddr + head, 0, request->ring->size - head); - head = 0; - } - memset(vaddr + head, 0, request->postfix - head); - - dma_fence_set_error(&request->fence, -EIO); -} - static void engine_skip_context(struct i915_request *request) { struct intel_engine_cs *engine = request->engine; @@ -3118,10 +3099,10 @@ static void engine_skip_context(struct i915_request *request) list_for_each_entry_continue(request, &engine->timeline.requests, link) if (request->gem_context == hung_ctx) - skip_request(request); + i915_request_skip(request, -EIO); list_for_each_entry(request, &timeline->requests, link) - skip_request(request); + i915_request_skip(request, -EIO); spin_unlock(&timeline->lock); spin_unlock_irqrestore(&engine->timeline.lock, flags); @@ -3164,7 +3145,7 @@ i915_gem_reset_request(struct intel_engine_cs *engine, if (stalled) { i915_gem_context_mark_guilty(request->gem_context); - skip_request(request); + i915_request_skip(request, -EIO); /* If this context is now banned, skip all pending requests. */ if (i915_gem_context_is_banned(request->gem_context)) diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index a2f7e9358450..7ae08b68121e 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -1013,6 +1013,27 @@ i915_request_await_object(struct i915_request *to, return ret; } +void i915_request_skip(struct i915_request *rq, int error) +{ + void *vaddr = rq->ring->vaddr; + u32 head; + + GEM_BUG_ON(!IS_ERR_VALUE((long)error)); + dma_fence_set_error(&rq->fence, error); + + /* + * As this request likely depends on state from the lost + * context, clear out all the user operations leaving the + * breadcrumb at the end (so we get the fence notifications). + */ + head = rq->infix; + if (rq->postfix < head) { + memset(vaddr + head, 0, rq->ring->size - head); + head = 0; + } + memset(vaddr + head, 0, rq->postfix - head); +} + /* * NB: This function is not allowed to fail. Doing so would mean the the * request is not being tracked for completion but the work itself is diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index 7ee220ded9c9..a355a081485f 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -258,6 +258,8 @@ void i915_request_add(struct i915_request *rq); void __i915_request_submit(struct i915_request *request); void i915_request_submit(struct i915_request *request); +void i915_request_skip(struct i915_request *request, int error); + void __i915_request_unsubmit(struct i915_request *request); void i915_request_unsubmit(struct i915_request *request); -- cgit From 5c3f8c221c77ccdce3c2a8b96d196e5f4e2dac0c Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 6 Jul 2018 11:39:46 +0100 Subject: drm/i915: Track vma activity per fence.context, not per engine In the next patch, we will want to be able to use more flexible request timelines that can hop between engines. From the vma pov, we can then not rely on the binding of this request to an engine and so can not ensure that different requests are ordered through a per-engine timeline, and so we must track activity of all timelines. (We track activity on the vma itself to prevent unbinding from HW before the HW has finished accessing it.) v2: Switch to a rbtree for 32b safety (since using u64 as a radixtree index is fraught with aliasing of unsigned longs). v3: s/lookup_active/active_instance/ because we can never agree on names Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20180706103947.15919-5-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_gtt.c | 5 +- drivers/gpu/drm/i915/i915_gpu_error.c | 14 +---- drivers/gpu/drm/i915/i915_gpu_error.h | 2 +- drivers/gpu/drm/i915/i915_request.h | 1 + drivers/gpu/drm/i915/i915_vma.c | 112 ++++++++++++++++++++++++---------- drivers/gpu/drm/i915/i915_vma.h | 46 ++++---------- 6 files changed, 100 insertions(+), 80 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_request.h') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index e2793231ef49..4db31aaaa9d3 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2074,7 +2074,6 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size) struct drm_i915_private *i915 = ppgtt->base.vm.i915; struct i915_ggtt *ggtt = &i915->ggtt; struct i915_vma *vma; - int i; GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); GEM_BUG_ON(size > ggtt->vm.total); @@ -2083,14 +2082,14 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size) if (!vma) return ERR_PTR(-ENOMEM); - for (i = 0; i < ARRAY_SIZE(vma->last_read); i++) - init_request_active(&vma->last_read[i], NULL); init_request_active(&vma->last_fence, NULL); vma->vm = &ggtt->vm; vma->ops = &pd_vma_ops; vma->private = ppgtt; + vma->active = RB_ROOT; + vma->size = size; vma->fence_size = size; vma->flags = I915_VMA_GGTT; diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index df524c9cad40..8c81cf3aa182 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -335,21 +335,16 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m, struct drm_i915_error_buffer *err, int count) { - int i; - err_printf(m, "%s [%d]:\n", name, count); while (count--) { - err_printf(m, " %08x_%08x %8u %02x %02x [ ", + err_printf(m, " %08x_%08x %8u %02x %02x %02x", upper_32_bits(err->gtt_offset), lower_32_bits(err->gtt_offset), err->size, err->read_domains, - err->write_domain); - for (i = 0; i < I915_NUM_ENGINES; i++) - err_printf(m, "%02x ", err->rseqno[i]); - - err_printf(m, "] %02x", err->wseqno); + err->write_domain, + err->wseqno); err_puts(m, tiling_flag(err->tiling)); err_puts(m, dirty_flag(err->dirty)); err_puts(m, purgeable_flag(err->purgeable)); @@ -1021,13 +1016,10 @@ static void capture_bo(struct drm_i915_error_buffer *err, struct i915_vma *vma) { struct drm_i915_gem_object *obj = vma->obj; - int i; err->size = obj->base.size; err->name = obj->base.name; - for (i = 0; i < I915_NUM_ENGINES; i++) - err->rseqno[i] = __active_get_seqno(&vma->last_read[i]); err->wseqno = __active_get_seqno(&obj->frontbuffer_write); err->engine = __active_get_engine_id(&obj->frontbuffer_write); diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h index 58910f1dc67c..f893a4e8b783 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.h +++ b/drivers/gpu/drm/i915/i915_gpu_error.h @@ -177,7 +177,7 @@ struct i915_gpu_state { struct drm_i915_error_buffer { u32 size; u32 name; - u32 rseqno[I915_NUM_ENGINES], wseqno; + u32 wseqno; u64 gtt_offset; u32 read_domains; u32 write_domain; diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index a355a081485f..e1c9365dfefb 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -380,6 +380,7 @@ static inline void init_request_active(struct i915_gem_active *active, i915_gem_retire_fn retire) { + RCU_INIT_POINTER(active->request, NULL); INIT_LIST_HEAD(&active->link); active->retire = retire ?: i915_gem_retire_noop; } diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 6f3a0f2296c2..b4cc98330225 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -63,18 +63,20 @@ static void vma_print_allocator(struct i915_vma *vma, const char *reason) #endif +struct i915_vma_active { + struct i915_gem_active base; + struct i915_vma *vma; + struct rb_node node; + u64 timeline; +}; + static void -i915_vma_retire(struct i915_gem_active *active, struct i915_request *rq) +__i915_vma_retire(struct i915_vma *vma, struct i915_request *rq) { - const unsigned int idx = rq->engine->id; - struct i915_vma *vma = - container_of(active, struct i915_vma, last_read[idx]); struct drm_i915_gem_object *obj = vma->obj; - GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx)); - - i915_vma_clear_active(vma, idx); - if (i915_vma_is_active(vma)) + GEM_BUG_ON(!i915_vma_is_active(vma)); + if (--vma->active_count) return; GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); @@ -108,6 +110,15 @@ i915_vma_retire(struct i915_gem_active *active, struct i915_request *rq) } } +static void +i915_vma_retire(struct i915_gem_active *base, struct i915_request *rq) +{ + struct i915_vma_active *active = + container_of(base, typeof(*active), base); + + __i915_vma_retire(active->vma, rq); +} + static struct i915_vma * vma_create(struct drm_i915_gem_object *obj, struct i915_address_space *vm, @@ -115,7 +126,6 @@ vma_create(struct drm_i915_gem_object *obj, { struct i915_vma *vma; struct rb_node *rb, **p; - int i; /* The aliasing_ppgtt should never be used directly! */ GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm); @@ -124,8 +134,8 @@ vma_create(struct drm_i915_gem_object *obj, if (vma == NULL) return ERR_PTR(-ENOMEM); - for (i = 0; i < ARRAY_SIZE(vma->last_read); i++) - init_request_active(&vma->last_read[i], i915_vma_retire); + vma->active = RB_ROOT; + init_request_active(&vma->last_fence, NULL); vma->vm = vm; vma->ops = &vm->vma_ops; @@ -778,13 +788,11 @@ void i915_vma_reopen(struct i915_vma *vma) static void __i915_vma_destroy(struct i915_vma *vma) { struct drm_i915_private *i915 = vma->vm->i915; - int i; + struct i915_vma_active *iter, *n; GEM_BUG_ON(vma->node.allocated); GEM_BUG_ON(vma->fence); - for (i = 0; i < ARRAY_SIZE(vma->last_read); i++) - GEM_BUG_ON(i915_gem_active_isset(&vma->last_read[i])); GEM_BUG_ON(i915_gem_active_isset(&vma->last_fence)); list_del(&vma->obj_link); @@ -795,6 +803,11 @@ static void __i915_vma_destroy(struct i915_vma *vma) if (!i915_vma_is_ggtt(vma)) i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm)); + rbtree_postorder_for_each_entry_safe(iter, n, &vma->active, node) { + GEM_BUG_ON(i915_gem_active_isset(&iter->base)); + kfree(iter); + } + kmem_cache_free(i915->vmas, vma); } @@ -878,16 +891,54 @@ static void export_fence(struct i915_vma *vma, reservation_object_unlock(resv); } +static struct i915_gem_active *active_instance(struct i915_vma *vma, u64 idx) +{ + struct i915_vma_active *active; + struct rb_node **p, *parent; + + parent = NULL; + p = &vma->active.rb_node; + while (*p) { + parent = *p; + + active = rb_entry(parent, struct i915_vma_active, node); + if (active->timeline == idx) + return &active->base; + + if (active->timeline < idx) + p = &parent->rb_right; + else + p = &parent->rb_left; + } + + active = kmalloc(sizeof(*active), GFP_KERNEL); + if (unlikely(!active)) + return ERR_PTR(-ENOMEM); + + init_request_active(&active->base, i915_vma_retire); + active->vma = vma; + active->timeline = idx; + + rb_link_node(&active->node, parent, p); + rb_insert_color(&active->node, &vma->active); + + return &active->base; +} + int i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq, unsigned int flags) { struct drm_i915_gem_object *obj = vma->obj; - const unsigned int idx = rq->engine->id; + struct i915_gem_active *active; lockdep_assert_held(&rq->i915->drm.struct_mutex); GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); + active = active_instance(vma, rq->fence.context); + if (IS_ERR(active)) + return PTR_ERR(active); + /* * Add a reference if we're newly entering the active list. * The order in which we add operations to the retirement queue is @@ -896,11 +947,13 @@ int i915_vma_move_to_active(struct i915_vma *vma, * add the active reference first and queue for it to be dropped * *last*. */ - if (!i915_vma_is_active(vma)) + if (!i915_gem_active_isset(active) && !vma->active_count++) { + list_move_tail(&vma->vm_link, &vma->vm->active_list); obj->active_count++; - i915_vma_set_active(vma, idx); - i915_gem_active_set(&vma->last_read[idx], rq); - list_move_tail(&vma->vm_link, &vma->vm->active_list); + } + i915_gem_active_set(active, rq); + GEM_BUG_ON(!i915_vma_is_active(vma)); + GEM_BUG_ON(!obj->active_count); obj->write_domain = 0; if (flags & EXEC_OBJECT_WRITE) { @@ -922,7 +975,6 @@ int i915_vma_move_to_active(struct i915_vma *vma, int i915_vma_unbind(struct i915_vma *vma) { - unsigned long active; int ret; lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); @@ -932,9 +984,8 @@ int i915_vma_unbind(struct i915_vma *vma) * have side-effects such as unpinning or even unbinding this vma. */ might_sleep(); - active = i915_vma_get_active(vma); - if (active) { - int idx; + if (i915_vma_is_active(vma)) { + struct i915_vma_active *active, *n; /* * When a closed VMA is retired, it is unbound - eek. @@ -951,18 +1002,17 @@ int i915_vma_unbind(struct i915_vma *vma) */ __i915_vma_pin(vma); - for_each_active(active, idx) { - ret = i915_gem_active_retire(&vma->last_read[idx], + rbtree_postorder_for_each_entry_safe(active, n, + &vma->active, node) { + ret = i915_gem_active_retire(&active->base, &vma->vm->i915->drm.struct_mutex); if (ret) - break; - } - - if (!ret) { - ret = i915_gem_active_retire(&vma->last_fence, - &vma->vm->i915->drm.struct_mutex); + goto unpin; } + ret = i915_gem_active_retire(&vma->last_fence, + &vma->vm->i915->drm.struct_mutex); +unpin: __i915_vma_unpin(vma); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h index a218b689e418..c297b0a0dc47 100644 --- a/drivers/gpu/drm/i915/i915_vma.h +++ b/drivers/gpu/drm/i915/i915_vma.h @@ -26,6 +26,7 @@ #define __I915_VMA_H__ #include +#include #include @@ -94,8 +95,8 @@ struct i915_vma { #define I915_VMA_USERFAULT BIT(I915_VMA_USERFAULT_BIT) #define I915_VMA_GGTT_WRITE BIT(12) - unsigned int active; - struct i915_gem_active last_read[I915_NUM_ENGINES]; + unsigned int active_count; + struct rb_root active; struct i915_gem_active last_fence; /** @@ -138,6 +139,15 @@ i915_vma_instance(struct drm_i915_gem_object *obj, void i915_vma_unpin_and_release(struct i915_vma **p_vma); +static inline bool i915_vma_is_active(struct i915_vma *vma) +{ + return vma->active_count; +} + +int __must_check i915_vma_move_to_active(struct i915_vma *vma, + struct i915_request *rq, + unsigned int flags); + static inline bool i915_vma_is_ggtt(const struct i915_vma *vma) { return vma->flags & I915_VMA_GGTT; @@ -187,38 +197,6 @@ static inline bool i915_vma_has_userfault(const struct i915_vma *vma) return test_bit(I915_VMA_USERFAULT_BIT, &vma->flags); } -static inline unsigned int i915_vma_get_active(const struct i915_vma *vma) -{ - return vma->active; -} - -static inline bool i915_vma_is_active(const struct i915_vma *vma) -{ - return i915_vma_get_active(vma); -} - -static inline void i915_vma_set_active(struct i915_vma *vma, - unsigned int engine) -{ - vma->active |= BIT(engine); -} - -static inline void i915_vma_clear_active(struct i915_vma *vma, - unsigned int engine) -{ - vma->active &= ~BIT(engine); -} - -static inline bool i915_vma_has_active_engine(const struct i915_vma *vma, - unsigned int engine) -{ - return vma->active & BIT(engine); -} - -int __must_check i915_vma_move_to_active(struct i915_vma *vma, - struct i915_request *rq, - unsigned int flags); - static inline u32 i915_ggtt_offset(const struct i915_vma *vma) { GEM_BUG_ON(!i915_vma_is_ggtt(vma)); -- cgit