diff options
author | Dave Airlie <airlied@redhat.com> | 2015-08-20 09:40:49 +1000 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2015-08-20 09:40:49 +1000 |
commit | e2a8986f3e287dc036ce1b9452d7b9e2d8839f2b (patch) | |
tree | 3459b59ade856c84a50539a894edc237bf7636da /drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c | |
parent | 294947a5c7f6d228b70fcc51a89527e74a38a2c5 (diff) | |
parent | 05906dec7d7daf197b9b773295c95ad6b9af2a5a (diff) |
Merge branch 'drm-next-4.3' of git://people.freedesktop.org/~agd5f/linux into drm-next
amdgpu and radeon changes for 4.3. Highlights:
- Fiji support for amdgpu.
- CGS support for amdgpu. This is a new driver
internal cross-component API.
- Initial GPU scheduler for amdgpu. Still disabled
by default.
- Lots of bug fixes and optimizations
* 'drm-next-4.3' of git://people.freedesktop.org/~agd5f/linux: (130 commits)
drm/amdgpu: wait on page directory changes. v2
drm/amdgpu: Select BACKLIGHT_LCD_SUPPORT
drm/radeon: Select BACKLIGHT_LCD_SUPPORT
drm/amdgpu: cleanup sheduler rq handling v2
drm/amdgpu: move prepare work out of scheduler to cs_ioctl
drm/amdgpu: fix unnecessary wake up
drm/amdgpu: fix duplicated mapping invoke bug
drm/amdgpu: drop bo_list_clone when no scheduler
drm/amdgpu: disable GPU reset by default
drm/amdgpu: fix type mismatch error
drm/amdgpu: add reference for **fence
drm/amdgpu: fix waiting for all fences before flipping
drm/amdgpu: fix UVD return code checking
drm/amdgpu: remove scheduler fence list v2
drm/amdgpu: remove amd_sched_wait_emit v2
drm/amdgpu: remove unecessary scheduler fence callbacks
drm/amdgpu: fix scheduler fence implementation
drm/amdgpu: don't grab dev->struct_mutex in pm functions
drm/amdgpu: Don't take dev->struct_mutex in bo_force_delete
drm/radeon: Don't take dev->struct_mutex in pm functions
...
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c | 58 |
1 files changed, 36 insertions, 22 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index 21accbdd0a1a..7cb711fc1ee2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -53,20 +53,24 @@ void amdgpu_sync_create(struct amdgpu_sync *sync) } /** - * amdgpu_sync_fence - use the semaphore to sync to a fence + * amdgpu_sync_fence - remember to sync to this fence * * @sync: sync object to add fence to * @fence: fence to sync to * - * Sync to the fence using the semaphore objects */ -void amdgpu_sync_fence(struct amdgpu_sync *sync, - struct amdgpu_fence *fence) +int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, + struct fence *f) { + struct amdgpu_fence *fence; struct amdgpu_fence *other; - if (!fence) - return; + if (!f) + return 0; + + fence = to_amdgpu_fence(f); + if (!fence || fence->ring->adev != adev) + return fence_wait(f, true); other = sync->sync_to[fence->ring->idx]; sync->sync_to[fence->ring->idx] = amdgpu_fence_ref( @@ -79,6 +83,8 @@ void amdgpu_sync_fence(struct amdgpu_sync *sync, amdgpu_fence_later(fence, other)); amdgpu_fence_unref(&other); } + + return 0; } /** @@ -106,11 +112,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, /* always sync to the exclusive fence */ f = reservation_object_get_excl(resv); - fence = f ? to_amdgpu_fence(f) : NULL; - if (fence && fence->ring->adev == adev) - amdgpu_sync_fence(sync, fence); - else if (f) - r = fence_wait(f, true); + r = amdgpu_sync_fence(adev, sync, f); flist = reservation_object_get_list(resv); if (!flist || r) @@ -121,14 +123,26 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, reservation_object_held(resv)); fence = f ? to_amdgpu_fence(f) : NULL; if (fence && fence->ring->adev == adev) { - if (fence->owner != owner || - fence->owner == AMDGPU_FENCE_OWNER_UNDEFINED) - amdgpu_sync_fence(sync, fence); - } else if (f) { - r = fence_wait(f, true); - if (r) - break; + /* VM updates are only interesting + * for other VM updates and moves. + */ + if ((owner != AMDGPU_FENCE_OWNER_MOVE) && + (fence->owner != AMDGPU_FENCE_OWNER_MOVE) && + ((owner == AMDGPU_FENCE_OWNER_VM) != + (fence->owner == AMDGPU_FENCE_OWNER_VM))) + continue; + + /* Ignore fence from the same owner as + * long as it isn't undefined. + */ + if (owner != AMDGPU_FENCE_OWNER_UNDEFINED && + fence->owner == owner) + continue; } + + r = amdgpu_sync_fence(adev, sync, f); + if (r) + break; } return r; } @@ -164,9 +178,9 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync, return -EINVAL; } - if (count >= AMDGPU_NUM_SYNCS) { + if (amdgpu_enable_scheduler || (count >= AMDGPU_NUM_SYNCS)) { /* not enough room, wait manually */ - r = amdgpu_fence_wait(fence, false); + r = fence_wait(&fence->base, false); if (r) return r; continue; @@ -186,7 +200,7 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync, if (!amdgpu_semaphore_emit_signal(other, semaphore)) { /* signaling wasn't successful wait manually */ amdgpu_ring_undo(other); - r = amdgpu_fence_wait(fence, false); + r = fence_wait(&fence->base, false); if (r) return r; continue; @@ -196,7 +210,7 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync, if (!amdgpu_semaphore_emit_wait(ring, semaphore)) { /* waiting wasn't successful wait manually */ amdgpu_ring_undo(other); - r = amdgpu_fence_wait(fence, false); + r = fence_wait(&fence->base, false); if (r) return r; continue; |