diff options
author | Dave Airlie <airlied@redhat.com> | 2018-01-09 10:09:13 +1000 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2018-01-09 10:09:13 +1000 |
commit | bd3c0094a143300b74f3cc8c9cf2b21ed686047f (patch) | |
tree | cb97fbd1f0a7eed1c3bb2d3e8b9358ee90bf0780 /drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | |
parent | b0caa1333b6d2d928a00304e9fb6674526c37b79 (diff) | |
parent | 104bd2ca1124dfd9aa904d5f5a96253ef2b580f6 (diff) |
Merge branch 'drm-next-4.16' of git://people.freedesktop.org/~agd5f/linux into drm-next
Last few updates for 4.16:
- Misc fixes for amdgpu
- Enable swapout for reserved BOs during allocation for ttm
- Misc cleanups for ttm
* 'drm-next-4.16' of git://people.freedesktop.org/~agd5f/linux: (24 commits)
drm/amdgpu: Correct the IB size of bo update mapping.
drm/ttm: enable swapout for reserved BOs during allocation
drm/ttm: add new function to check if bo is allowable to evict or swapout
drm/ttm: use an operation ctx for ttm_tt_bind
drm/ttm: use an operation ctx for ttm_tt_populate in ttm_bo_driver (v2)
drm/ttm: use an operation ctx for ttm_mem_global_alloc_page
drm/ttm: use an operation ctx for ttm_mem_global_alloc
drm/ttm: call ttm_bo_swapout directly when ttm shrink
drm/vmwgfx: remove the default io_mem_pfn set
drm/virtio: remove the default io_mem_pfn set
drm/radeon: remove the default io_mem_pfn set
drm/qxl: remove the default io_mem_pfn set
drm/nouveau: remove the default io_mem_pfn set
drm/mgag200: remove the default io_mem_pfn set
drm/cirrus: remove the default io_mem_pfn set
drm/bochs: remove the default io_mem_pfn set
drm/ast: remove the default io_mem_pfn set
drm/ttm: add ttm_bo_io_mem_pfn to check io_mem_pfn
drm/amdgpu: fix VM faults with per VM BOs
drm/ttm: drop the spin in delayed delete if the trylock doesn't work
...
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 26 |
1 files changed, 13 insertions, 13 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 86123448a8ff..59271055a30e 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -1028,10 +1028,10 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring) */ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, - unsigned vm_id, bool ctx_switch) + unsigned vmid, bool ctx_switch) { amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0)); - amdgpu_ring_write(ring, vm_id); + amdgpu_ring_write(ring, vmid); amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0)); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); @@ -1050,24 +1050,24 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring, * Write enc ring commands to execute the indirect buffer */ static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring, - struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch) + struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch) { amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM); - amdgpu_ring_write(ring, vm_id); + amdgpu_ring_write(ring, vmid); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, ib->length_dw); } static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned vm_id, uint64_t pd_addr) + unsigned vmid, uint64_t pd_addr) { uint32_t reg; - if (vm_id < 8) - reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id; + if (vmid < 8) + reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid; else - reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8; + reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8; amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); amdgpu_ring_write(ring, reg << 2); @@ -1079,7 +1079,7 @@ static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring, amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2); amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); - amdgpu_ring_write(ring, 1 << vm_id); + amdgpu_ring_write(ring, 1 << vmid); amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); amdgpu_ring_write(ring, 0x8); @@ -1088,7 +1088,7 @@ static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring, amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0)); - amdgpu_ring_write(ring, 1 << vm_id); /* mask */ + amdgpu_ring_write(ring, 1 << vmid); /* mask */ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); amdgpu_ring_write(ring, 0xC); } @@ -1127,14 +1127,14 @@ static void uvd_v6_0_enc_ring_insert_end(struct amdgpu_ring *ring) } static void uvd_v6_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned int vm_id, uint64_t pd_addr) + unsigned int vmid, uint64_t pd_addr) { amdgpu_ring_write(ring, HEVC_ENC_CMD_UPDATE_PTB); - amdgpu_ring_write(ring, vm_id); + amdgpu_ring_write(ring, vmid); amdgpu_ring_write(ring, pd_addr >> 12); amdgpu_ring_write(ring, HEVC_ENC_CMD_FLUSH_TLB); - amdgpu_ring_write(ring, vm_id); + amdgpu_ring_write(ring, vmid); } static bool uvd_v6_0_is_idle(void *handle) |