diff options
Diffstat (limited to 'drivers/gpu/drm/amd/scheduler')
| -rw-r--r-- | drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h | 4 | ||||
| -rw-r--r-- | drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 67 | ||||
| -rw-r--r-- | drivers/gpu/drm/amd/scheduler/gpu_scheduler.h | 26 | ||||
| -rw-r--r-- | drivers/gpu/drm/amd/scheduler/sched_fence.c | 48 | 
4 files changed, 75 insertions, 70 deletions
| diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h index b961a1c6caf3..dbd4fd3a810b 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h @@ -17,7 +17,7 @@ TRACE_EVENT(amd_sched_job,  	    TP_STRUCT__entry(  			     __field(struct amd_sched_entity *, entity)  			     __field(struct amd_sched_job *, sched_job) -			     __field(struct fence *, fence) +			     __field(struct dma_fence *, fence)  			     __field(const char *, name)  			     __field(u32, job_count)  			     __field(int, hw_job_count) @@ -42,7 +42,7 @@ TRACE_EVENT(amd_sched_process_job,  	    TP_PROTO(struct amd_sched_fence *fence),  	    TP_ARGS(fence),  	    TP_STRUCT__entry( -		    __field(struct fence *, fence) +		    __field(struct dma_fence *, fence)  		    ),  	    TP_fast_assign( diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index ffe1f85ce300..1bf83ed113b3 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -32,7 +32,7 @@  static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);  static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); -static void amd_sched_process_job(struct fence *f, struct fence_cb *cb); +static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);  /* Initialize a given run queue struct */  static void amd_sched_rq_init(struct amd_sched_rq *rq) @@ -138,7 +138,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,  		return r;  	atomic_set(&entity->fence_seq, 0); -	entity->fence_context = fence_context_alloc(2); +	entity->fence_context = dma_fence_context_alloc(2);  	return 0;  } @@ -218,32 +218,32 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,  	kfifo_free(&entity->job_queue);  } -static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb) +static void amd_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)  {  	struct amd_sched_entity *entity =  		container_of(cb, struct amd_sched_entity, cb);  	entity->dependency = NULL; -	fence_put(f); +	dma_fence_put(f);  	amd_sched_wakeup(entity->sched);  } -static void amd_sched_entity_clear_dep(struct fence *f, struct fence_cb *cb) +static void amd_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb)  {  	struct amd_sched_entity *entity =  		container_of(cb, struct amd_sched_entity, cb);  	entity->dependency = NULL; -	fence_put(f); +	dma_fence_put(f);  }  static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)  {  	struct amd_gpu_scheduler *sched = entity->sched; -	struct fence * fence = entity->dependency; +	struct dma_fence * fence = entity->dependency;  	struct amd_sched_fence *s_fence;  	if (fence->context == entity->fence_context) {  		/* We can ignore fences from ourself */ -		fence_put(entity->dependency); +		dma_fence_put(entity->dependency);  		return false;  	} @@ -254,23 +254,23 @@ static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)  		 * Fence is from the same scheduler, only need to wait for  		 * it to be scheduled  		 */ -		fence = fence_get(&s_fence->scheduled); -		fence_put(entity->dependency); +		fence = dma_fence_get(&s_fence->scheduled); +		dma_fence_put(entity->dependency);  		entity->dependency = fence; -		if (!fence_add_callback(fence, &entity->cb, -					amd_sched_entity_clear_dep)) +		if (!dma_fence_add_callback(fence, &entity->cb, +					    amd_sched_entity_clear_dep))  			return true;  		/* Ignore it when it is already scheduled */ -		fence_put(fence); +		dma_fence_put(fence);  		return false;  	} -	if (!fence_add_callback(entity->dependency, &entity->cb, -				amd_sched_entity_wakeup)) +	if (!dma_fence_add_callback(entity->dependency, &entity->cb, +				    amd_sched_entity_wakeup))  		return true; -	fence_put(entity->dependency); +	dma_fence_put(entity->dependency);  	return false;  } @@ -351,7 +351,8 @@ static void amd_sched_job_finish(struct work_struct *work)  	sched->ops->free_job(s_job);  } -static void amd_sched_job_finish_cb(struct fence *f, struct fence_cb *cb) +static void amd_sched_job_finish_cb(struct dma_fence *f, +				    struct dma_fence_cb *cb)  {  	struct amd_sched_job *job = container_of(cb, struct amd_sched_job,  						 finish_cb); @@ -385,8 +386,8 @@ void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched)  	spin_lock(&sched->job_list_lock);  	list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) { -		if (fence_remove_callback(s_job->s_fence->parent, &s_job->s_fence->cb)) { -			fence_put(s_job->s_fence->parent); +		if (dma_fence_remove_callback(s_job->s_fence->parent, &s_job->s_fence->cb)) { +			dma_fence_put(s_job->s_fence->parent);  			s_job->s_fence->parent = NULL;  		}  	} @@ -407,21 +408,21 @@ void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)  	list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {  		struct amd_sched_fence *s_fence = s_job->s_fence; -		struct fence *fence; +		struct dma_fence *fence;  		spin_unlock(&sched->job_list_lock);  		fence = sched->ops->run_job(s_job);  		atomic_inc(&sched->hw_rq_count);  		if (fence) { -			s_fence->parent = fence_get(fence); -			r = fence_add_callback(fence, &s_fence->cb, -					       amd_sched_process_job); +			s_fence->parent = dma_fence_get(fence); +			r = dma_fence_add_callback(fence, &s_fence->cb, +						   amd_sched_process_job);  			if (r == -ENOENT)  				amd_sched_process_job(fence, &s_fence->cb);  			else if (r)  				DRM_ERROR("fence add callback failed (%d)\n",  					  r); -			fence_put(fence); +			dma_fence_put(fence);  		} else {  			DRM_ERROR("Failed to run job!\n");  			amd_sched_process_job(NULL, &s_fence->cb); @@ -443,8 +444,8 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job)  	struct amd_sched_entity *entity = sched_job->s_entity;  	trace_amd_sched_job(sched_job); -	fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb, -			   amd_sched_job_finish_cb); +	dma_fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb, +			       amd_sched_job_finish_cb);  	wait_event(entity->sched->job_scheduled,  		   amd_sched_entity_in(sched_job));  } @@ -508,7 +509,7 @@ amd_sched_select_entity(struct amd_gpu_scheduler *sched)  	return entity;  } -static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) +static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)  {  	struct amd_sched_fence *s_fence =  		container_of(cb, struct amd_sched_fence, cb); @@ -518,7 +519,7 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)  	amd_sched_fence_finished(s_fence);  	trace_amd_sched_process_job(s_fence); -	fence_put(&s_fence->finished); +	dma_fence_put(&s_fence->finished);  	wake_up_interruptible(&sched->wake_up_worker);  } @@ -544,7 +545,7 @@ static int amd_sched_main(void *param)  		struct amd_sched_entity *entity = NULL;  		struct amd_sched_fence *s_fence;  		struct amd_sched_job *sched_job; -		struct fence *fence; +		struct dma_fence *fence;  		wait_event_interruptible(sched->wake_up_worker,  					 (!amd_sched_blocked(sched) && @@ -566,15 +567,15 @@ static int amd_sched_main(void *param)  		fence = sched->ops->run_job(sched_job);  		amd_sched_fence_scheduled(s_fence);  		if (fence) { -			s_fence->parent = fence_get(fence); -			r = fence_add_callback(fence, &s_fence->cb, -					       amd_sched_process_job); +			s_fence->parent = dma_fence_get(fence); +			r = dma_fence_add_callback(fence, &s_fence->cb, +						   amd_sched_process_job);  			if (r == -ENOENT)  				amd_sched_process_job(fence, &s_fence->cb);  			else if (r)  				DRM_ERROR("fence add callback failed (%d)\n",  					  r); -			fence_put(fence); +			dma_fence_put(fence);  		} else {  			DRM_ERROR("Failed to run job!\n");  			amd_sched_process_job(NULL, &s_fence->cb); diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h index 51068e6c3d9a..d8dc681bcda6 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h @@ -25,7 +25,7 @@  #define _GPU_SCHEDULER_H_  #include <linux/kfifo.h> -#include <linux/fence.h> +#include <linux/dma-fence.h>  struct amd_gpu_scheduler;  struct amd_sched_rq; @@ -47,8 +47,8 @@ struct amd_sched_entity {  	atomic_t			fence_seq;  	uint64_t                        fence_context; -	struct fence			*dependency; -	struct fence_cb			cb; +	struct dma_fence		*dependency; +	struct dma_fence_cb		cb;  };  /** @@ -63,10 +63,10 @@ struct amd_sched_rq {  };  struct amd_sched_fence { -	struct fence                    scheduled; -	struct fence                    finished; -	struct fence_cb                 cb; -	struct fence                    *parent; +	struct dma_fence                scheduled; +	struct dma_fence                finished; +	struct dma_fence_cb             cb; +	struct dma_fence                *parent;  	struct amd_gpu_scheduler	*sched;  	spinlock_t			lock;  	void                            *owner; @@ -76,15 +76,15 @@ struct amd_sched_job {  	struct amd_gpu_scheduler        *sched;  	struct amd_sched_entity         *s_entity;  	struct amd_sched_fence          *s_fence; -	struct fence_cb			finish_cb; +	struct dma_fence_cb		finish_cb;  	struct work_struct		finish_work;  	struct list_head		node;  	struct delayed_work		work_tdr;  }; -extern const struct fence_ops amd_sched_fence_ops_scheduled; -extern const struct fence_ops amd_sched_fence_ops_finished; -static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f) +extern const struct dma_fence_ops amd_sched_fence_ops_scheduled; +extern const struct dma_fence_ops amd_sched_fence_ops_finished; +static inline struct amd_sched_fence *to_amd_sched_fence(struct dma_fence *f)  {  	if (f->ops == &amd_sched_fence_ops_scheduled)  		return container_of(f, struct amd_sched_fence, scheduled); @@ -100,8 +100,8 @@ static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)   * these functions should be implemented in driver side  */  struct amd_sched_backend_ops { -	struct fence *(*dependency)(struct amd_sched_job *sched_job); -	struct fence *(*run_job)(struct amd_sched_job *sched_job); +	struct dma_fence *(*dependency)(struct amd_sched_job *sched_job); +	struct dma_fence *(*run_job)(struct amd_sched_job *sched_job);  	void (*timedout_job)(struct amd_sched_job *sched_job);  	void (*free_job)(struct amd_sched_job *sched_job);  }; diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c index 88fc2d662579..33f54d0a5c4f 100644 --- a/drivers/gpu/drm/amd/scheduler/sched_fence.c +++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c @@ -61,46 +61,50 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity,  	spin_lock_init(&fence->lock);  	seq = atomic_inc_return(&entity->fence_seq); -	fence_init(&fence->scheduled, &amd_sched_fence_ops_scheduled, -		   &fence->lock, entity->fence_context, seq); -	fence_init(&fence->finished, &amd_sched_fence_ops_finished, -		   &fence->lock, entity->fence_context + 1, seq); +	dma_fence_init(&fence->scheduled, &amd_sched_fence_ops_scheduled, +		       &fence->lock, entity->fence_context, seq); +	dma_fence_init(&fence->finished, &amd_sched_fence_ops_finished, +		       &fence->lock, entity->fence_context + 1, seq);  	return fence;  }  void amd_sched_fence_scheduled(struct amd_sched_fence *fence)  { -	int ret = fence_signal(&fence->scheduled); +	int ret = dma_fence_signal(&fence->scheduled);  	if (!ret) -		FENCE_TRACE(&fence->scheduled, "signaled from irq context\n"); +		DMA_FENCE_TRACE(&fence->scheduled, +				"signaled from irq context\n");  	else -		FENCE_TRACE(&fence->scheduled, "was already signaled\n"); +		DMA_FENCE_TRACE(&fence->scheduled, +				"was already signaled\n");  }  void amd_sched_fence_finished(struct amd_sched_fence *fence)  { -	int ret = fence_signal(&fence->finished); +	int ret = dma_fence_signal(&fence->finished);  	if (!ret) -		FENCE_TRACE(&fence->finished, "signaled from irq context\n"); +		DMA_FENCE_TRACE(&fence->finished, +				"signaled from irq context\n");  	else -		FENCE_TRACE(&fence->finished, "was already signaled\n"); +		DMA_FENCE_TRACE(&fence->finished, +				"was already signaled\n");  } -static const char *amd_sched_fence_get_driver_name(struct fence *fence) +static const char *amd_sched_fence_get_driver_name(struct dma_fence *fence)  {  	return "amd_sched";  } -static const char *amd_sched_fence_get_timeline_name(struct fence *f) +static const char *amd_sched_fence_get_timeline_name(struct dma_fence *f)  {  	struct amd_sched_fence *fence = to_amd_sched_fence(f);  	return (const char *)fence->sched->name;  } -static bool amd_sched_fence_enable_signaling(struct fence *f) +static bool amd_sched_fence_enable_signaling(struct dma_fence *f)  {  	return true;  } @@ -114,10 +118,10 @@ static bool amd_sched_fence_enable_signaling(struct fence *f)   */  static void amd_sched_fence_free(struct rcu_head *rcu)  { -	struct fence *f = container_of(rcu, struct fence, rcu); +	struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);  	struct amd_sched_fence *fence = to_amd_sched_fence(f); -	fence_put(fence->parent); +	dma_fence_put(fence->parent);  	kmem_cache_free(sched_fence_slab, fence);  } @@ -129,7 +133,7 @@ static void amd_sched_fence_free(struct rcu_head *rcu)   * This function is called when the reference count becomes zero.   * It just RCU schedules freeing up the fence.   */ -static void amd_sched_fence_release_scheduled(struct fence *f) +static void amd_sched_fence_release_scheduled(struct dma_fence *f)  {  	struct amd_sched_fence *fence = to_amd_sched_fence(f); @@ -143,27 +147,27 @@ static void amd_sched_fence_release_scheduled(struct fence *f)   *   * Drop the extra reference from the scheduled fence to the base fence.   */ -static void amd_sched_fence_release_finished(struct fence *f) +static void amd_sched_fence_release_finished(struct dma_fence *f)  {  	struct amd_sched_fence *fence = to_amd_sched_fence(f); -	fence_put(&fence->scheduled); +	dma_fence_put(&fence->scheduled);  } -const struct fence_ops amd_sched_fence_ops_scheduled = { +const struct dma_fence_ops amd_sched_fence_ops_scheduled = {  	.get_driver_name = amd_sched_fence_get_driver_name,  	.get_timeline_name = amd_sched_fence_get_timeline_name,  	.enable_signaling = amd_sched_fence_enable_signaling,  	.signaled = NULL, -	.wait = fence_default_wait, +	.wait = dma_fence_default_wait,  	.release = amd_sched_fence_release_scheduled,  }; -const struct fence_ops amd_sched_fence_ops_finished = { +const struct dma_fence_ops amd_sched_fence_ops_finished = {  	.get_driver_name = amd_sched_fence_get_driver_name,  	.get_timeline_name = amd_sched_fence_get_timeline_name,  	.enable_signaling = amd_sched_fence_enable_signaling,  	.signaled = NULL, -	.wait = fence_default_wait, +	.wait = dma_fence_default_wait,  	.release = amd_sched_fence_release_finished,  }; |