diff options
author | Dave Airlie <airlied@redhat.com> | 2016-07-08 13:42:41 +1000 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2016-07-08 13:42:41 +1000 |
commit | 6f6e68b383314ab10189f983fead55437c149f32 (patch) | |
tree | a9254f00bb2053aa567981c8979c9713d648346c /drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | |
parent | b33e07731c13f2a9ec5c345b8542cae5adf74235 (diff) | |
parent | b1814a1def0564a2a1d3be7fa5bf7243ff899a28 (diff) |
Merge branch 'drm-next-4.8' of git://people.freedesktop.org/~agd5f/linux into drm-next
This is the main 4.8 pull for radeon and amdgpu. Sorry for the delay,
I meant to send this out last week, but I was moving house. Lots of
changes here:
- ATPX improvements for better dGPU power control on PX systems
- New power features for CZ/BR/ST
- Pipelined BO moves and evictions in TTM
- GPU scheduler improvements
- GPU reset improvements
- Overclocking on dGPUs with amdgpu
- Lots of code cleanup
- Bug fixes
* 'drm-next-4.8' of git://people.freedesktop.org/~agd5f/linux: (191 commits)
drm/amd/powerplay: don't add invalid voltage.
drm/amdgpu: add read/write function for GC CAC programming
drm/amd/powerplay: add definitions related to di/dt feature for fiji and polaris.
drm/amd/powerplay: add shared definitions for di/dt feature.
drm/amdgpu: remove gfx8 registers that vary between asics
drm/amd/powerplay: add mvdd dpm support.
drm/amdgpu: get number of shade engine by cgs interface.
drm/amdgpu: remove more of the ring backup code
drm/amd/powerplay: Unify family defines
drm/amdgpu: clean up ring_backup code, no need more
drm/amdgpu: ib test first after gpu reset
drm/amdgpu: recovery hw jobs when gpu reset V3
drm/amdgpu: abstract amdgpu_vm_is_gpu_reset
drm/amdgpu: add a bool to specify if needing vm flush V2
drm/amdgpu: add amd_sched_job_recovery
drm/amdgpu: force completion for gpu reset
drm/amdgpu: block ttm first before parking scheduler
drm/amd: add amd_sched_hw_job_reset
drm/amd: add parent for sched fence
drm/amdgpu: remove evict vram
...
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_job.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 72 |
1 files changed, 33 insertions, 39 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index f0dafa514fe4..aaee0c8f6731 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -28,21 +28,15 @@ #include "amdgpu.h" #include "amdgpu_trace.h" -static void amdgpu_job_free_handler(struct work_struct *ws) +static void amdgpu_job_timedout(struct amd_sched_job *s_job) { - struct amdgpu_job *job = container_of(ws, struct amdgpu_job, base.work_free_job); - amd_sched_job_put(&job->base); -} + struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base); -void amdgpu_job_timeout_func(struct work_struct *work) -{ - struct amdgpu_job *job = container_of(work, struct amdgpu_job, base.work_tdr.work); DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n", - job->base.sched->name, - (uint32_t)atomic_read(&job->ring->fence_drv.last_seq), - job->ring->fence_drv.sync_seq); - - amd_sched_job_put(&job->base); + job->base.sched->name, + atomic_read(&job->ring->fence_drv.last_seq), + job->ring->fence_drv.sync_seq); + amdgpu_gpu_reset(job->adev); } int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, @@ -63,7 +57,6 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, (*job)->vm = vm; (*job)->ibs = (void *)&(*job)[1]; (*job)->num_ibs = num_ibs; - INIT_WORK(&(*job)->base.work_free_job, amdgpu_job_free_handler); amdgpu_sync_create(&(*job)->sync); @@ -86,27 +79,33 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, return r; } -void amdgpu_job_free(struct amdgpu_job *job) +void amdgpu_job_free_resources(struct amdgpu_job *job) { - unsigned i; struct fence *f; + unsigned i; + /* use sched fence if available */ - f = (job->base.s_fence)? &job->base.s_fence->base : job->fence; + f = job->base.s_fence ? &job->base.s_fence->finished : job->fence; for (i = 0; i < job->num_ibs; ++i) - amdgpu_sa_bo_free(job->adev, &job->ibs[i].sa_bo, f); - fence_put(job->fence); + amdgpu_ib_free(job->adev, &job->ibs[i], f); +} - amdgpu_bo_unref(&job->uf_bo); - amdgpu_sync_free(&job->sync); +void amdgpu_job_free_cb(struct amd_sched_job *s_job) +{ + struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base); - if (!job->base.use_sched) - kfree(job); + fence_put(job->fence); + amdgpu_sync_free(&job->sync); + kfree(job); } -void amdgpu_job_free_func(struct kref *refcount) +void amdgpu_job_free(struct amdgpu_job *job) { - struct amdgpu_job *job = container_of(refcount, struct amdgpu_job, base.refcount); + amdgpu_job_free_resources(job); + + fence_put(job->fence); + amdgpu_sync_free(&job->sync); kfree(job); } @@ -114,22 +113,20 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, struct amd_sched_entity *entity, void *owner, struct fence **f) { - struct fence *fence; int r; job->ring = ring; if (!f) return -EINVAL; - r = amd_sched_job_init(&job->base, &ring->sched, - entity, amdgpu_job_timeout_func, - amdgpu_job_free_func, owner, &fence); + r = amd_sched_job_init(&job->base, &ring->sched, entity, owner); if (r) return r; job->owner = owner; job->ctx = entity->fence_context; - *f = fence_get(fence); + *f = fence_get(&job->base.s_fence->finished); + amdgpu_job_free_resources(job); amd_sched_entity_push_job(&job->base); return 0; @@ -147,8 +144,8 @@ static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job) int r; r = amdgpu_vm_grab_id(vm, ring, &job->sync, - &job->base.s_fence->base, - &job->vm_id, &job->vm_pd_addr); + &job->base.s_fence->finished, + job); if (r) DRM_ERROR("Error getting VM ID (%d)\n", r); @@ -170,11 +167,7 @@ static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job) } job = to_amdgpu_job(sched_job); - r = amdgpu_sync_wait(&job->sync); - if (r) { - DRM_ERROR("failed to sync wait (%d)\n", r); - return NULL; - } + BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL)); trace_amdgpu_sched_run_job(job); r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, @@ -185,14 +178,15 @@ static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job) } err: + /* if gpu reset, hw fence will be replaced here */ + fence_put(job->fence); job->fence = fence; - amdgpu_job_free(job); return fence; } const struct amd_sched_backend_ops amdgpu_sched_ops = { .dependency = amdgpu_job_dependency, .run_job = amdgpu_job_run, - .begin_job = amd_sched_job_begin, - .finish_job = amd_sched_job_finish, + .timedout_job = amdgpu_job_timedout, + .free_job = amdgpu_job_free_cb }; |