diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_request.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_request.c | 104 |
1 files changed, 57 insertions, 47 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c index 8c59c79cbd8b..813a3b546d6e 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.c +++ b/drivers/gpu/drm/i915/i915_gem_request.c @@ -213,6 +213,10 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno) cond_resched(); } + /* Check we are idle before we fiddle with hw state! */ + GEM_BUG_ON(!intel_engine_is_idle(engine)); + GEM_BUG_ON(i915_gem_active_isset(&engine->timeline->last_request)); + /* Finally reset hw state */ intel_engine_init_global_seqno(engine, seqno); tl->seqno = seqno; @@ -240,27 +244,60 @@ int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno) return reset_all_global_seqno(dev_priv, seqno - 1); } -static int reserve_seqno(struct intel_engine_cs *engine) +static void mark_busy(struct drm_i915_private *i915) { + if (i915->gt.awake) + return; + + GEM_BUG_ON(!i915->gt.active_requests); + + intel_runtime_pm_get_noresume(i915); + i915->gt.awake = true; + + intel_enable_gt_powersave(i915); + i915_update_gfx_val(i915); + if (INTEL_GEN(i915) >= 6) + gen6_rps_busy(i915); + + queue_delayed_work(i915->wq, + &i915->gt.retire_work, + round_jiffies_up_relative(HZ)); +} + +static int reserve_engine(struct intel_engine_cs *engine) +{ + struct drm_i915_private *i915 = engine->i915; u32 active = ++engine->timeline->inflight_seqnos; u32 seqno = engine->timeline->seqno; int ret; /* Reservation is fine until we need to wrap around */ - if (likely(!add_overflows(seqno, active))) - return 0; - - ret = reset_all_global_seqno(engine->i915, 0); - if (ret) { - engine->timeline->inflight_seqnos--; - return ret; + if (unlikely(add_overflows(seqno, active))) { + ret = reset_all_global_seqno(i915, 0); + if (ret) { + engine->timeline->inflight_seqnos--; + return ret; + } } + if (!i915->gt.active_requests++) + mark_busy(i915); + return 0; } -static void unreserve_seqno(struct intel_engine_cs *engine) +static void unreserve_engine(struct intel_engine_cs *engine) { + struct drm_i915_private *i915 = engine->i915; + + if (!--i915->gt.active_requests) { + /* Cancel the mark_busy() from our reserve_engine() */ + GEM_BUG_ON(!i915->gt.awake); + mod_delayed_work(i915->wq, + &i915->gt.idle_work, + msecs_to_jiffies(100)); + } + GEM_BUG_ON(!engine->timeline->inflight_seqnos); engine->timeline->inflight_seqnos--; } @@ -329,13 +366,7 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request) list_del_init(&request->link); spin_unlock_irq(&engine->timeline->lock); - if (!--request->i915->gt.active_requests) { - GEM_BUG_ON(!request->i915->gt.awake); - mod_delayed_work(request->i915->wq, - &request->i915->gt.idle_work, - msecs_to_jiffies(100)); - } - unreserve_seqno(request->engine); + unreserve_engine(request->engine); advance_ring(request); free_capture_list(request); @@ -370,8 +401,7 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request) i915_gem_request_remove_from_client(request); /* Retirement decays the ban score as it is a sign of ctx progress */ - if (request->ctx->ban_score > 0) - request->ctx->ban_score--; + atomic_dec_if_positive(&request->ctx->ban_score); /* The backing object for the context is done after switching to the * *next* context. Therefore we cannot retire the previous context until @@ -384,7 +414,11 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request) engine->context_unpin(engine, engine->last_retired_context); engine->last_retired_context = request->ctx; - dma_fence_signal(&request->fence); + spin_lock_irq(&request->lock); + if (request->waitboost) + atomic_dec(&request->i915->rps.num_waiters); + dma_fence_signal_locked(&request->fence); + spin_unlock_irq(&request->lock); i915_priotree_fini(request->i915, &request->priotree); i915_gem_request_put(request); @@ -568,7 +602,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine, return ERR_CAST(ring); GEM_BUG_ON(!ring); - ret = reserve_seqno(engine); + ret = reserve_engine(engine); if (ret) goto err_unpin; @@ -639,6 +673,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine, req->file_priv = NULL; req->batch = NULL; req->capture_list = NULL; + req->waitboost = false; /* * Reserve space in the ring buffer for all the commands required to @@ -673,7 +708,7 @@ err_ctx: kmem_cache_free(dev_priv->requests, req); err_unreserve: - unreserve_seqno(engine); + unreserve_engine(engine); err_unpin: engine->context_unpin(engine, ctx); return ERR_PTR(ret); @@ -855,28 +890,6 @@ i915_gem_request_await_object(struct drm_i915_gem_request *to, return ret; } -static void i915_gem_mark_busy(const struct intel_engine_cs *engine) -{ - struct drm_i915_private *dev_priv = engine->i915; - - if (dev_priv->gt.awake) - return; - - GEM_BUG_ON(!dev_priv->gt.active_requests); - - intel_runtime_pm_get_noresume(dev_priv); - dev_priv->gt.awake = true; - - intel_enable_gt_powersave(dev_priv); - i915_update_gfx_val(dev_priv); - if (INTEL_GEN(dev_priv) >= 6) - gen6_rps_busy(dev_priv); - - queue_delayed_work(dev_priv->wq, - &dev_priv->gt.retire_work, - round_jiffies_up_relative(HZ)); -} - /* * NB: This function is not allowed to fail. Doing so would mean the the * request is not being tracked for completion but the work itself is @@ -958,9 +971,6 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches) list_add_tail(&request->ring_link, &ring->request_list); request->emitted_jiffies = jiffies; - if (!request->i915->gt.active_requests++) - i915_gem_mark_busy(engine); - /* Let the backend know a new request has arrived that may need * to adjust the existing execution schedule due to a high priority * request - i.e. we may want to preempt the current request in order @@ -1063,7 +1073,7 @@ static bool __i915_wait_request_check_and_reset(struct drm_i915_gem_request *req return false; __set_current_state(TASK_RUNNING); - i915_reset(request->i915); + i915_reset(request->i915, 0); return true; } |