diff options
Diffstat (limited to 'drivers/gpu/drm/scheduler/sched_entity.c')
| -rw-r--r-- | drivers/gpu/drm/scheduler/sched_entity.c | 140 | 
1 files changed, 69 insertions, 71 deletions
| diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index 79554aa4dbb1..27e1573af96e 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -45,8 +45,14 @@   * @guilty: atomic_t set to 1 when a job on this queue   *          is found to be guilty causing a timeout   * - * Note: the sched_list should have at least one element to schedule - *       the entity + * Note that the &sched_list must have at least one element to schedule the entity. + * + * For changing @priority later on at runtime see + * drm_sched_entity_set_priority(). For changing the set of schedulers + * @sched_list at runtime see drm_sched_entity_modify_sched(). + * + * An entity is cleaned up by callind drm_sched_entity_fini(). See also + * drm_sched_entity_destroy().   *   * Returns 0 on success or a negative error code on failure.   */ @@ -92,6 +98,11 @@ EXPORT_SYMBOL(drm_sched_entity_init);   * @sched_list: the list of new drm scheds which will replace   *		 existing entity->sched_list   * @num_sched_list: number of drm sched in sched_list + * + * Note that this must be called under the same common lock for @entity as + * drm_sched_job_arm() and drm_sched_entity_push_job(), or the driver needs to + * guarantee through some other means that this is never called while new jobs + * can be pushed to @entity.   */  void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,  				    struct drm_gpu_scheduler **sched_list, @@ -104,13 +115,6 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,  }  EXPORT_SYMBOL(drm_sched_entity_modify_sched); -/** - * drm_sched_entity_is_idle - Check if entity is idle - * - * @entity: scheduler entity - * - * Returns true if the entity does not have any unscheduled jobs. - */  static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)  {  	rmb(); /* for list_empty to work without lock */ @@ -123,13 +127,7 @@ static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)  	return false;  } -/** - * drm_sched_entity_is_ready - Check if entity is ready - * - * @entity: scheduler entity - * - * Return true if entity could provide a job. - */ +/* Return true if entity could provide a job. */  bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)  {  	if (spsc_queue_peek(&entity->job_queue) == NULL) @@ -192,14 +190,7 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)  }  EXPORT_SYMBOL(drm_sched_entity_flush); -/** - * drm_sched_entity_kill_jobs_cb - helper for drm_sched_entity_kill_jobs - * - * @f: signaled fence - * @cb: our callback structure - * - * Signal the scheduler finished fence when the entity in question is killed. - */ +/* Signal the scheduler finished fence when the entity in question is killed. */  static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,  					  struct dma_fence_cb *cb)  { @@ -211,14 +202,19 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,  	job->sched->ops->free_job(job);  } -/** - * drm_sched_entity_kill_jobs - Make sure all remaining jobs are killed - * - * @entity: entity which is cleaned up - * - * Makes sure that all remaining jobs in an entity are killed before it is - * destroyed. - */ +static struct dma_fence * +drm_sched_job_dependency(struct drm_sched_job *job, +			 struct drm_sched_entity *entity) +{ +	if (!xa_empty(&job->dependencies)) +		return xa_erase(&job->dependencies, job->last_dependency++); + +	if (job->sched->ops->dependency) +		return job->sched->ops->dependency(job, entity); + +	return NULL; +} +  static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)  {  	struct drm_sched_job *job; @@ -229,7 +225,7 @@ static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)  		struct drm_sched_fence *s_fence = job->s_fence;  		/* Wait for all dependencies to avoid data corruptions */ -		while ((f = job->sched->ops->dependency(job, entity))) +		while ((f = drm_sched_job_dependency(job, entity)))  			dma_fence_wait(f, false);  		drm_sched_fence_scheduled(s_fence); @@ -260,9 +256,11 @@ static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)   *   * @entity: scheduler entity   * - * This should be called after @drm_sched_entity_do_release. It goes over the - * entity and signals all jobs with an error code if the process was killed. + * Cleanups up @entity which has been initialized by drm_sched_entity_init().   * + * If there are potentially job still in flight or getting newly queued + * drm_sched_entity_flush() must be called first. This function then goes over + * the entity and signals all jobs with an error code if the process was killed.   */  void drm_sched_entity_fini(struct drm_sched_entity *entity)  { @@ -302,10 +300,10 @@ EXPORT_SYMBOL(drm_sched_entity_fini);  /**   * drm_sched_entity_destroy - Destroy a context entity - *   * @entity: scheduler entity   * - * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup() + * Calls drm_sched_entity_flush() and drm_sched_entity_fini() as a + * convenience wrapper.   */  void drm_sched_entity_destroy(struct drm_sched_entity *entity)  { @@ -314,9 +312,7 @@ void drm_sched_entity_destroy(struct drm_sched_entity *entity)  }  EXPORT_SYMBOL(drm_sched_entity_destroy); -/* - * drm_sched_entity_clear_dep - callback to clear the entities dependency - */ +/* drm_sched_entity_clear_dep - callback to clear the entities dependency */  static void drm_sched_entity_clear_dep(struct dma_fence *f,  				       struct dma_fence_cb *cb)  { @@ -358,11 +354,7 @@ void drm_sched_entity_set_priority(struct drm_sched_entity *entity,  }  EXPORT_SYMBOL(drm_sched_entity_set_priority); -/** - * drm_sched_entity_add_dependency_cb - add callback for the entities dependency - * - * @entity: entity with dependency - * +/*   * Add a callback to the current dependency of the entity to wake up the   * scheduler when the entity becomes available.   */ @@ -410,16 +402,8 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)  	return false;  } -/** - * drm_sched_entity_pop_job - get a ready to be scheduled job from the entity - * - * @entity: entity to get the job from - * - * Process all dependencies and try to get one job from the entities queue. - */  struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)  { -	struct drm_gpu_scheduler *sched = entity->rq->sched;  	struct drm_sched_job *sched_job;  	sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue)); @@ -427,7 +411,7 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)  		return NULL;  	while ((entity->dependency = -			sched->ops->dependency(sched_job, entity))) { +			drm_sched_job_dependency(sched_job, entity))) {  		trace_drm_sched_job_wait_dep(sched_job, entity->dependency);  		if (drm_sched_entity_add_dependency_cb(entity)) @@ -439,30 +423,45 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)  		dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);  	dma_fence_put(entity->last_scheduled); +  	entity->last_scheduled = dma_fence_get(&sched_job->s_fence->finished); +	/* +	 * If the queue is empty we allow drm_sched_entity_select_rq() to +	 * locklessly access ->last_scheduled. This only works if we set the +	 * pointer before we dequeue and if we a write barrier here. +	 */ +	smp_wmb(); +  	spsc_queue_pop(&entity->job_queue);  	return sched_job;  } -/** - * drm_sched_entity_select_rq - select a new rq for the entity - * - * @entity: scheduler entity - * - * Check all prerequisites and select a new rq for the entity for load - * balancing. - */  void drm_sched_entity_select_rq(struct drm_sched_entity *entity)  {  	struct dma_fence *fence;  	struct drm_gpu_scheduler *sched;  	struct drm_sched_rq *rq; -	if (spsc_queue_count(&entity->job_queue) || !entity->sched_list) +	/* single possible engine and already selected */ +	if (!entity->sched_list)  		return; -	fence = READ_ONCE(entity->last_scheduled); +	/* queue non-empty, stay on the same engine */ +	if (spsc_queue_count(&entity->job_queue)) +		return; + +	/* +	 * Only when the queue is empty are we guaranteed that the scheduler +	 * thread cannot change ->last_scheduled. To enforce ordering we need +	 * a read barrier here. See drm_sched_entity_pop_job() for the other +	 * side. +	 */ +	smp_rmb(); + +	fence = entity->last_scheduled; + +	/* stay on the same engine if the previous job hasn't finished */  	if (fence && !dma_fence_is_signaled(fence))  		return; @@ -481,19 +480,18 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)  /**   * drm_sched_entity_push_job - Submit a job to the entity's job queue - *   * @sched_job: job to submit - * @entity: scheduler entity   * - * Note: To guarantee that the order of insertion to queue matches - * the job's fence sequence number this function should be - * called with drm_sched_job_init under common lock. + * Note: To guarantee that the order of insertion to queue matches the job's + * fence sequence number this function should be called with drm_sched_job_arm() + * under common lock for the struct drm_sched_entity that was set up for + * @sched_job in drm_sched_job_init().   *   * Returns 0 for success, negative error code otherwise.   */ -void drm_sched_entity_push_job(struct drm_sched_job *sched_job, -			       struct drm_sched_entity *entity) +void drm_sched_entity_push_job(struct drm_sched_job *sched_job)  { +	struct drm_sched_entity *entity = sched_job->entity;  	bool first;  	trace_drm_sched_job(sched_job, entity); |