From f5617f9dde5ae2466560f7cb008c741e2b88adab Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Thu, 5 Nov 2015 11:41:50 +0800 Subject: drm/amd: add kmem cache for sched fence MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Change-Id: I45bb8ff10ef05dc3b15e31a77fbcf31117705f11 Signed-off-by: Chunming Zhou Reviewed-by: Christian König --- drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'drivers/gpu/drm/amd/scheduler/gpu_scheduler.c') diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 89619a5a4289..fe5b3c415f18 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -34,6 +34,9 @@ static struct amd_sched_job * amd_sched_entity_pop_job(struct amd_sched_entity *entity); static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); +struct kmem_cache *sched_fence_slab; +atomic_t sched_fence_slab_ref = ATOMIC_INIT(0); + /* Initialize a given run queue struct */ static void amd_sched_rq_init(struct amd_sched_rq *rq) { @@ -450,6 +453,13 @@ int amd_sched_init(struct amd_gpu_scheduler *sched, init_waitqueue_head(&sched->wake_up_worker); init_waitqueue_head(&sched->job_scheduled); atomic_set(&sched->hw_rq_count, 0); + if (atomic_inc_return(&sched_fence_slab_ref) == 1) { + sched_fence_slab = kmem_cache_create( + "amd_sched_fence", sizeof(struct amd_sched_fence), 0, + SLAB_HWCACHE_ALIGN, NULL); + if (!sched_fence_slab) + return -ENOMEM; + } /* Each scheduler will run on a seperate kernel thread */ sched->thread = kthread_run(amd_sched_main, sched, sched->name); @@ -470,4 +480,6 @@ void amd_sched_fini(struct amd_gpu_scheduler *sched) { if (sched->thread) kthread_stop(sched->thread); + if (atomic_dec_and_test(&sched_fence_slab_ref)) + kmem_cache_destroy(sched_fence_slab); } -- cgit From 7034decf6a5b1ff778d83ff9d7ce1f0b404804e4 Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Wed, 11 Nov 2015 14:56:00 +0800 Subject: drm/amdgpu: add command submission workflow tracepoint OGL needs these tracepoints to investigate performance issue. Change-Id: I5e58187d061253f7d665dfce8e4e163ba91d3e2b Signed-off-by: Chunming Zhou --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c | 2 + drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h | 51 +++++++++++++++++++++++++ drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h | 24 ++++++++++-- drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 1 + 5 files changed, 76 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd/scheduler/gpu_scheduler.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index bf32096b8eb7..2ae73d5232dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -888,7 +888,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ttm_eu_fence_buffer_objects(&parser.ticket, &parser.validated, &job->base.s_fence->base); - + trace_amdgpu_cs_ioctl(job); mutex_unlock(&job->job_lock); amdgpu_cs_parser_fini_late(&parser); mutex_unlock(&vm->mutex); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c index dcf4a8aca680..67f778f6eedb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c @@ -26,6 +26,7 @@ #include #include #include "amdgpu.h" +#include "amdgpu_trace.h" static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job) { @@ -45,6 +46,7 @@ static struct fence *amdgpu_sched_run_job(struct amd_sched_job *sched_job) } job = to_amdgpu_job(sched_job); mutex_lock(&job->job_lock); + trace_amdgpu_sched_run_job(job); r = amdgpu_ib_schedule(job->adev, job->num_ibs, job->ibs, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 26e2d50a6f64..8f9834ab1bd5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -48,6 +48,57 @@ TRACE_EVENT(amdgpu_cs, __entry->fences) ); +TRACE_EVENT(amdgpu_cs_ioctl, + TP_PROTO(struct amdgpu_job *job), + TP_ARGS(job), + TP_STRUCT__entry( + __field(struct amdgpu_device *, adev) + __field(struct amd_sched_job *, sched_job) + __field(struct amdgpu_ib *, ib) + __field(struct fence *, fence) + __field(char *, ring_name) + __field(u32, num_ibs) + ), + + TP_fast_assign( + __entry->adev = job->adev; + __entry->sched_job = &job->base; + __entry->ib = job->ibs; + __entry->fence = &job->base.s_fence->base; + __entry->ring_name = job->ibs[0].ring->name; + __entry->num_ibs = job->num_ibs; + ), + TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u", + __entry->adev, __entry->sched_job, __entry->ib, + __entry->fence, __entry->ring_name, __entry->num_ibs) +); + +TRACE_EVENT(amdgpu_sched_run_job, + TP_PROTO(struct amdgpu_job *job), + TP_ARGS(job), + TP_STRUCT__entry( + __field(struct amdgpu_device *, adev) + __field(struct amd_sched_job *, sched_job) + __field(struct amdgpu_ib *, ib) + __field(struct fence *, fence) + __field(char *, ring_name) + __field(u32, num_ibs) + ), + + TP_fast_assign( + __entry->adev = job->adev; + __entry->sched_job = &job->base; + __entry->ib = job->ibs; + __entry->fence = &job->base.s_fence->base; + __entry->ring_name = job->ibs[0].ring->name; + __entry->num_ibs = job->num_ibs; + ), + TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u", + __entry->adev, __entry->sched_job, __entry->ib, + __entry->fence, __entry->ring_name, __entry->num_ibs) +); + + TRACE_EVENT(amdgpu_vm_grab_id, TP_PROTO(unsigned vmid, int ring), TP_ARGS(vmid, ring), diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h index 144f50acc971..c89dc777768f 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h @@ -16,6 +16,8 @@ TRACE_EVENT(amd_sched_job, TP_ARGS(sched_job), TP_STRUCT__entry( __field(struct amd_sched_entity *, entity) + __field(struct amd_sched_job *, sched_job) + __field(struct fence *, fence) __field(const char *, name) __field(u32, job_count) __field(int, hw_job_count) @@ -23,16 +25,32 @@ TRACE_EVENT(amd_sched_job, TP_fast_assign( __entry->entity = sched_job->s_entity; + __entry->sched_job = sched_job; + __entry->fence = &sched_job->s_fence->base; __entry->name = sched_job->sched->name; __entry->job_count = kfifo_len( &sched_job->s_entity->job_queue) / sizeof(sched_job); __entry->hw_job_count = atomic_read( &sched_job->sched->hw_rq_count); ), - TP_printk("entity=%p, ring=%s, job count:%u, hw job count:%d", - __entry->entity, __entry->name, __entry->job_count, - __entry->hw_job_count) + TP_printk("entity=%p, sched job=%p, fence=%p, ring=%s, job count:%u, hw job count:%d", + __entry->entity, __entry->sched_job, __entry->fence, __entry->name, + __entry->job_count, __entry->hw_job_count) ); + +TRACE_EVENT(amd_sched_process_job, + TP_PROTO(struct amd_sched_fence *fence), + TP_ARGS(fence), + TP_STRUCT__entry( + __field(struct fence *, fence) + ), + + TP_fast_assign( + __entry->fence = &fence->base; + ), + TP_printk("fence=%p signaled", __entry->fence) +); + #endif /* This part must be outside protection */ diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index fe5b3c415f18..b8925fea577c 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -346,6 +346,7 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) list_del_init(&s_fence->list); spin_unlock_irqrestore(&sched->fence_list_lock, flags); } + trace_amd_sched_process_job(s_fence); fence_put(&s_fence->base); wake_up_interruptible(&sched->wake_up_worker); } -- cgit From 4a562283376197722b295d27633134401bbc80f5 Mon Sep 17 00:00:00 2001 From: Christian König Date: Fri, 6 Nov 2015 14:09:21 +0100 Subject: drm/amdgpu: cleanup scheduler fence get/put dance MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The code was correct, but getting two references when the ownership is linearly moved on is a bit awkward and just overhead. Signed: Christian König Reviewed-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c | 1 - drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 1 - 2 files changed, 2 deletions(-) (limited to 'drivers/gpu/drm/amd/scheduler/gpu_scheduler.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c index 67f778f6eedb..8ef9e4415fcc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c @@ -64,7 +64,6 @@ err: job->free_job(job); mutex_unlock(&job->job_lock); - fence_put(&job->base.s_fence->base); kfree(job); return fence ? &fence->base : NULL; } diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index b8925fea577c..ccb7c1554f5e 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -285,7 +285,6 @@ int amd_sched_entity_push_job(struct amd_sched_job *sched_job) if (!fence) return -ENOMEM; - fence_get(&fence->base); sched_job->s_fence = fence; wait_event(entity->sched->job_scheduled, -- cgit From e284022163716ecf11c37fd1057c35d689ef2c11 Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 5 Nov 2015 19:49:48 +0100 Subject: drm/amdgpu: fix incorrect mutex usage v3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Before this patch the scheduler fence was created when we push the job into the queue, so we could only get the fence after pushing it. The mutex now was necessary to prevent the thread pushing the jobs to the hardware from running faster than the thread pushing the jobs into the queue. Otherwise the thread pushing jobs into the queue would have accessed possible freed up memory when it tries to get a reference to the fence. So what you get in the end is thread A: mutex_lock(&job->lock); ... Kick of thread B. ... mutex_unlock(&job->lock); And thread B: mutex_lock(&job->lock); .... mutex_unlock(&job->lock); kfree(job); I'm actually not sure if I'm still up to date on this, but this usage pattern used to be not allowed with mutexes. See here as well https://lwn.net/Articles/575460/. v2: remove unrelated changes, fix missing owner v3: rebased, add more commit message Signed-off-by: Christian König Reviewed-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 43 +++++++++++++++------------ drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c | 27 +++++++---------- drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 10 +------ drivers/gpu/drm/amd/scheduler/gpu_scheduler.h | 3 +- 5 files changed, 37 insertions(+), 48 deletions(-) (limited to 'drivers/gpu/drm/amd/scheduler/gpu_scheduler.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 7b02e3455172..0f187027c753 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1225,7 +1225,7 @@ struct amdgpu_job { struct amdgpu_device *adev; struct amdgpu_ib *ibs; uint32_t num_ibs; - struct mutex job_lock; + void *owner; struct amdgpu_user_fence uf; int (*free_job)(struct amdgpu_job *job); }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 2ae73d5232dd..44cf977ae4f6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -845,8 +845,9 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) goto out; if (amdgpu_enable_scheduler && parser.num_ibs) { - struct amdgpu_job *job; struct amdgpu_ring * ring = parser.ibs->ring; + struct amd_sched_fence *fence; + struct amdgpu_job *job; job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); if (!job) { @@ -859,37 +860,41 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) job->adev = parser.adev; job->ibs = parser.ibs; job->num_ibs = parser.num_ibs; - job->base.owner = parser.filp; - mutex_init(&job->job_lock); + job->owner = parser.filp; + job->free_job = amdgpu_cs_free_job; + if (job->ibs[job->num_ibs - 1].user) { job->uf = parser.uf; job->ibs[job->num_ibs - 1].user = &job->uf; parser.uf.bo = NULL; } - parser.ibs = NULL; - parser.num_ibs = 0; - - job->free_job = amdgpu_cs_free_job; - mutex_lock(&job->job_lock); - r = amd_sched_entity_push_job(&job->base); - if (r) { - mutex_unlock(&job->job_lock); + fence = amd_sched_fence_create(job->base.s_entity, + parser.filp); + if (!fence) { + r = -ENOMEM; amdgpu_cs_free_job(job); kfree(job); goto out; } - cs->out.handle = - amdgpu_ctx_add_fence(parser.ctx, ring, - &job->base.s_fence->base); + job->base.s_fence = fence; + fence_get(&fence->base); + + cs->out.handle = amdgpu_ctx_add_fence(parser.ctx, ring, + &fence->base); job->ibs[job->num_ibs - 1].sequence = cs->out.handle; - list_sort(NULL, &parser.validated, cmp_size_smaller_first); - ttm_eu_fence_buffer_objects(&parser.ticket, - &parser.validated, - &job->base.s_fence->base); + parser.ibs = NULL; + parser.num_ibs = 0; + trace_amdgpu_cs_ioctl(job); - mutex_unlock(&job->job_lock); + amd_sched_entity_push_job(&job->base); + + list_sort(NULL, &parser.validated, cmp_size_smaller_first); + ttm_eu_fence_buffer_objects(&parser.ticket, &parser.validated, + &fence->base); + fence_put(&fence->base); + amdgpu_cs_parser_fini_late(&parser); mutex_unlock(&vm->mutex); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c index 8ef9e4415fcc..438c05254695 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c @@ -45,12 +45,8 @@ static struct fence *amdgpu_sched_run_job(struct amd_sched_job *sched_job) return NULL; } job = to_amdgpu_job(sched_job); - mutex_lock(&job->job_lock); trace_amdgpu_sched_run_job(job); - r = amdgpu_ib_schedule(job->adev, - job->num_ibs, - job->ibs, - job->base.owner); + r = amdgpu_ib_schedule(job->adev, job->num_ibs, job->ibs, job->owner); if (r) { DRM_ERROR("Error scheduling IBs (%d)\n", r); goto err; @@ -63,7 +59,6 @@ err: if (job->free_job) job->free_job(job); - mutex_unlock(&job->job_lock); kfree(job); return fence ? &fence->base : NULL; } @@ -89,21 +84,19 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, return -ENOMEM; job->base.sched = &ring->sched; job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity; + job->base.s_fence = amd_sched_fence_create(job->base.s_entity, owner); + if (!job->base.s_fence) { + kfree(job); + return -ENOMEM; + } + *f = fence_get(&job->base.s_fence->base); + job->adev = adev; job->ibs = ibs; job->num_ibs = num_ibs; - job->base.owner = owner; - mutex_init(&job->job_lock); + job->owner = owner; job->free_job = free_job; - mutex_lock(&job->job_lock); - r = amd_sched_entity_push_job(&job->base); - if (r) { - mutex_unlock(&job->job_lock); - kfree(job); - return r; - } - *f = fence_get(&job->base.s_fence->base); - mutex_unlock(&job->job_lock); + amd_sched_entity_push_job(&job->base); } else { r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner); if (r) diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index ccb7c1554f5e..ea30d6ad4c13 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -276,21 +276,13 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job) * * Returns 0 for success, negative error code otherwise. */ -int amd_sched_entity_push_job(struct amd_sched_job *sched_job) +void amd_sched_entity_push_job(struct amd_sched_job *sched_job) { struct amd_sched_entity *entity = sched_job->s_entity; - struct amd_sched_fence *fence = amd_sched_fence_create( - entity, sched_job->owner); - - if (!fence) - return -ENOMEM; - - sched_job->s_fence = fence; wait_event(entity->sched->job_scheduled, amd_sched_entity_in(sched_job)); trace_amd_sched_job(sched_job); - return 0; } /** diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h index 4d05ca6fb057..939692b14f4b 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h @@ -79,7 +79,6 @@ struct amd_sched_job { struct amd_gpu_scheduler *sched; struct amd_sched_entity *s_entity; struct amd_sched_fence *s_fence; - void *owner; }; extern const struct fence_ops amd_sched_fence_ops; @@ -131,7 +130,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched, uint32_t jobs); void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, struct amd_sched_entity *entity); -int amd_sched_entity_push_job(struct amd_sched_job *sched_job); +void amd_sched_entity_push_job(struct amd_sched_job *sched_job); struct amd_sched_fence *amd_sched_fence_create( struct amd_sched_entity *s_entity, void *owner); -- cgit