diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 128 |
1 files changed, 50 insertions, 78 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index b2eae86bf906..f3c459b7c0bb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -299,12 +299,15 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev) cancel_delayed_work_sync(&adev->uvd.idle_work); - for (i = 0; i < adev->uvd.max_handles; ++i) - if (atomic_read(&adev->uvd.handles[i])) - break; + /* only valid for physical mode */ + if (adev->asic_type < CHIP_POLARIS10) { + for (i = 0; i < adev->uvd.max_handles; ++i) + if (atomic_read(&adev->uvd.handles[i])) + break; - if (i == AMDGPU_MAX_UVD_HANDLES) - return 0; + if (i == adev->uvd.max_handles) + return 0; + } size = amdgpu_bo_size(adev->uvd.vcpu_bo); ptr = adev->uvd.cpu_addr; @@ -952,37 +955,28 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx) static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, bool direct, struct dma_fence **fence) { - struct ttm_operation_ctx ctx = { true, false }; - struct ttm_validate_buffer tv; - struct ww_acquire_ctx ticket; - struct list_head head; + struct amdgpu_device *adev = ring->adev; + struct dma_fence *f = NULL; struct amdgpu_job *job; struct amdgpu_ib *ib; - struct dma_fence *f = NULL; - struct amdgpu_device *adev = ring->adev; - uint64_t addr; uint32_t data[4]; - int i, r; - - memset(&tv, 0, sizeof(tv)); - tv.bo = &bo->tbo; - - INIT_LIST_HEAD(&head); - list_add(&tv.head, &head); + uint64_t addr; + long r; + int i; - r = ttm_eu_reserve_buffers(&ticket, &head, true, NULL); - if (r) - return r; + amdgpu_bo_kunmap(bo); + amdgpu_bo_unpin(bo); if (!ring->adev->uvd.address_64_bit) { + struct ttm_operation_ctx ctx = { true, false }; + amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM); amdgpu_uvd_force_into_uvd_segment(bo); + r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); + if (r) + goto err; } - r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); - if (r) - goto err; - r = amdgpu_job_alloc_with_ib(adev, 64, &job); if (r) goto err; @@ -1014,6 +1008,14 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, ib->length_dw = 16; if (direct) { + r = reservation_object_wait_timeout_rcu(bo->tbo.resv, + true, false, + msecs_to_jiffies(10)); + if (r == 0) + r = -ETIMEDOUT; + if (r < 0) + goto err_free; + r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); job->fence = dma_fence_get(f); if (r) @@ -1021,17 +1023,23 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, amdgpu_job_free(job); } else { + r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv, + AMDGPU_FENCE_OWNER_UNDEFINED, false); + if (r) + goto err_free; + r = amdgpu_job_submit(job, ring, &adev->uvd.entity, AMDGPU_FENCE_OWNER_UNDEFINED, &f); if (r) goto err_free; } - ttm_eu_fence_buffer_objects(&ticket, &head, f); + amdgpu_bo_fence(bo, f, false); + amdgpu_bo_unreserve(bo); + amdgpu_bo_unref(&bo); if (fence) *fence = dma_fence_get(f); - amdgpu_bo_unref(&bo); dma_fence_put(f); return 0; @@ -1040,7 +1048,8 @@ err_free: amdgpu_job_free(job); err: - ttm_eu_backoff_reservation(&ticket, &head); + amdgpu_bo_unreserve(bo); + amdgpu_bo_unref(&bo); return r; } @@ -1051,31 +1060,16 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, struct dma_fence **fence) { struct amdgpu_device *adev = ring->adev; - struct amdgpu_bo *bo; + struct amdgpu_bo *bo = NULL; uint32_t *msg; int r, i; - r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - NULL, NULL, 0, &bo); + r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &bo, NULL, (void **)&msg); if (r) return r; - r = amdgpu_bo_reserve(bo, false); - if (r) { - amdgpu_bo_unref(&bo); - return r; - } - - r = amdgpu_bo_kmap(bo, (void **)&msg); - if (r) { - amdgpu_bo_unreserve(bo); - amdgpu_bo_unref(&bo); - return r; - } - /* stitch together an UVD create msg */ msg[0] = cpu_to_le32(0x00000de4); msg[1] = cpu_to_le32(0x00000000); @@ -1091,9 +1085,6 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, for (i = 11; i < 1024; ++i) msg[i] = cpu_to_le32(0x0); - amdgpu_bo_kunmap(bo); - amdgpu_bo_unreserve(bo); - return amdgpu_uvd_send_msg(ring, bo, true, fence); } @@ -1101,31 +1092,16 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, bool direct, struct dma_fence **fence) { struct amdgpu_device *adev = ring->adev; - struct amdgpu_bo *bo; + struct amdgpu_bo *bo = NULL; uint32_t *msg; int r, i; - r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - NULL, NULL, 0, &bo); + r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &bo, NULL, (void **)&msg); if (r) return r; - r = amdgpu_bo_reserve(bo, false); - if (r) { - amdgpu_bo_unref(&bo); - return r; - } - - r = amdgpu_bo_kmap(bo, (void **)&msg); - if (r) { - amdgpu_bo_unreserve(bo); - amdgpu_bo_unref(&bo); - return r; - } - /* stitch together an UVD destroy msg */ msg[0] = cpu_to_le32(0x00000de4); msg[1] = cpu_to_le32(0x00000002); @@ -1134,9 +1110,6 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, for (i = 4; i < 1024; ++i) msg[i] = cpu_to_le32(0x0); - amdgpu_bo_kunmap(bo); - amdgpu_bo_unreserve(bo); - return amdgpu_uvd_send_msg(ring, bo, direct, fence); } @@ -1146,9 +1119,6 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work) container_of(work, struct amdgpu_device, uvd.idle_work.work); unsigned fences = amdgpu_fence_count_emitted(&adev->uvd.ring); - if (amdgpu_sriov_vf(adev)) - return; - if (fences == 0) { if (adev->pm.dpm_enabled) { amdgpu_dpm_enable_uvd(adev, false); @@ -1168,11 +1138,12 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work) void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - bool set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work); + bool set_clocks; if (amdgpu_sriov_vf(adev)) return; + set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work); if (set_clocks) { if (adev->pm.dpm_enabled) { amdgpu_dpm_enable_uvd(adev, true); @@ -1188,7 +1159,8 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring) void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring) { - schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT); + if (!amdgpu_sriov_vf(ring->adev)) + schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT); } /** |