diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_scheduler.c')
| -rw-r--r-- | drivers/gpu/drm/i915/i915_scheduler.c | 20 | 
1 files changed, 11 insertions, 9 deletions
| diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c index e19a37a83397..68b06a7ba667 100644 --- a/drivers/gpu/drm/i915/i915_scheduler.c +++ b/drivers/gpu/drm/i915/i915_scheduler.c @@ -209,6 +209,8 @@ static void kick_submission(struct intel_engine_cs *engine,  	if (!inflight)  		goto unlock; +	engine->execlists.queue_priority_hint = prio; +  	/*  	 * If we are already the currently executing context, don't  	 * bother evaluating if we should preempt ourselves. @@ -216,7 +218,6 @@ static void kick_submission(struct intel_engine_cs *engine,  	if (inflight->context == rq->context)  		goto unlock; -	engine->execlists.queue_priority_hint = prio;  	if (need_preempt(prio, rq_prio(inflight)))  		tasklet_hi_schedule(&engine->execlists.tasklet); @@ -227,10 +228,10 @@ unlock:  static void __i915_schedule(struct i915_sched_node *node,  			    const struct i915_sched_attr *attr)  { +	const int prio = max(attr->priority, node->attr.priority);  	struct intel_engine_cs *engine;  	struct i915_dependency *dep, *p;  	struct i915_dependency stack; -	const int prio = attr->priority;  	struct sched_cache cache;  	LIST_HEAD(dfs); @@ -238,9 +239,6 @@ static void __i915_schedule(struct i915_sched_node *node,  	lockdep_assert_held(&schedule_lock);  	GEM_BUG_ON(prio == I915_PRIORITY_INVALID); -	if (prio <= READ_ONCE(node->attr.priority)) -		return; -  	if (node_signaled(node))  		return; @@ -324,7 +322,7 @@ static void __i915_schedule(struct i915_sched_node *node,  		GEM_BUG_ON(node_to_request(node)->engine != engine); -		node->attr.priority = prio; +		WRITE_ONCE(node->attr.priority, prio);  		/*  		 * Once the request is ready, it will be placed into the @@ -363,6 +361,9 @@ static void __bump_priority(struct i915_sched_node *node, unsigned int bump)  {  	struct i915_sched_attr attr = node->attr; +	if (attr.priority & bump) +		return; +  	attr.priority |= bump;  	__i915_schedule(node, &attr);  } @@ -486,7 +487,7 @@ void i915_sched_node_fini(struct i915_sched_node *node)  	list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {  		GEM_BUG_ON(!list_empty(&dep->dfs_link)); -		list_del(&dep->wait_link); +		list_del_rcu(&dep->wait_link);  		if (dep->flags & I915_DEPENDENCY_ALLOC)  			i915_dependency_free(dep);  	} @@ -497,7 +498,7 @@ void i915_sched_node_fini(struct i915_sched_node *node)  		GEM_BUG_ON(dep->signaler != node);  		GEM_BUG_ON(!list_empty(&dep->dfs_link)); -		list_del(&dep->signal_link); +		list_del_rcu(&dep->signal_link);  		if (dep->flags & I915_DEPENDENCY_ALLOC)  			i915_dependency_free(dep);  	} @@ -526,7 +527,8 @@ static struct i915_global_scheduler global = { {  int __init i915_global_scheduler_init(void)  {  	global.slab_dependencies = KMEM_CACHE(i915_dependency, -					      SLAB_HWCACHE_ALIGN); +					      SLAB_HWCACHE_ALIGN | +					      SLAB_TYPESAFE_BY_RCU);  	if (!global.slab_dependencies)  		return -ENOMEM; |