From ba8f7ad0e5b25851299cd45a63b57d843e50b577 Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Fri, 10 Nov 2017 12:27:40 -0500 Subject: drm/amdgpu: add VEGAM UVD firmware support Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 627542b22ae4..d8dd4028c2bb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -66,6 +66,7 @@ #define FIRMWARE_POLARIS10 "amdgpu/polaris10_uvd.bin" #define FIRMWARE_POLARIS11 "amdgpu/polaris11_uvd.bin" #define FIRMWARE_POLARIS12 "amdgpu/polaris12_uvd.bin" +#define FIRMWARE_VEGAM "amdgpu/vegam_uvd.bin" #define FIRMWARE_VEGA10 "amdgpu/vega10_uvd.bin" #define FIRMWARE_VEGA12 "amdgpu/vega12_uvd.bin" @@ -109,6 +110,7 @@ MODULE_FIRMWARE(FIRMWARE_STONEY); MODULE_FIRMWARE(FIRMWARE_POLARIS10); MODULE_FIRMWARE(FIRMWARE_POLARIS11); MODULE_FIRMWARE(FIRMWARE_POLARIS12); +MODULE_FIRMWARE(FIRMWARE_VEGAM); MODULE_FIRMWARE(FIRMWARE_VEGA10); MODULE_FIRMWARE(FIRMWARE_VEGA12); @@ -172,6 +174,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) case CHIP_VEGA12: fw_name = FIRMWARE_VEGA12; break; + case CHIP_VEGAM: + fw_name = FIRMWARE_VEGAM; + break; default: return -EINVAL; } -- cgit From 8344c53f57057b42a5da87e9557c40fcda18fb7a Mon Sep 17 00:00:00 2001 From: Nayan Deshmukh Date: Thu, 29 Mar 2018 22:36:32 +0530 Subject: drm/scheduler: remove unused parameter MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit this patch also effect the amdgpu and etnaviv drivers which use the function drm_sched_entity_init Signed-off-by: Nayan Deshmukh Suggested-by: Christian König Acked-by: Lucas Stach Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 2 +- drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 2 +- drivers/gpu/drm/etnaviv/etnaviv_drv.c | 2 +- drivers/gpu/drm/scheduler/gpu_scheduler.c | 3 +-- include/drm/gpu_scheduler.h | 2 +- 11 files changed, 12 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 6741a62a7d15..a8e531d604fa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -91,7 +91,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, continue; r = drm_sched_entity_init(&ring->sched, &ctx->rings[i].entity, - rq, amdgpu_sched_jobs, &ctx->guilty); + rq, &ctx->guilty); if (r) goto failed; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index cc3b067e1ec6..5e9fd256faad 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -111,7 +111,7 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev) ring = adev->mman.buffer_funcs_ring; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL]; r = drm_sched_entity_init(&ring->sched, &adev->mman.entity, - rq, amdgpu_sched_jobs, NULL); + rq, NULL); if (r) { DRM_ERROR("Failed setting up TTM BO move run queue.\n"); goto error_entity; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index d8dd4028c2bb..de4d77af02ae 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -242,7 +242,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) ring = &adev->uvd.ring; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity, - rq, amdgpu_sched_jobs, NULL); + rq, NULL); if (r != 0) { DRM_ERROR("Failed setting up UVD run queue.\n"); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index e2186eda3271..a86322f5164f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -186,7 +186,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) ring = &adev->vce.ring[0]; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; r = drm_sched_entity_init(&ring->sched, &adev->vce.entity, - rq, amdgpu_sched_jobs, NULL); + rq, NULL); if (r != 0) { DRM_ERROR("Failed setting up VCE run queue.\n"); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 58e495330b38..e5d234cf804f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -105,7 +105,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) ring = &adev->vcn.ring_dec; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_dec, - rq, amdgpu_sched_jobs, NULL); + rq, NULL); if (r != 0) { DRM_ERROR("Failed setting up VCN dec run queue.\n"); return r; @@ -114,7 +114,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) ring = &adev->vcn.ring_enc[0]; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_enc, - rq, amdgpu_sched_jobs, NULL); + rq, NULL); if (r != 0) { DRM_ERROR("Failed setting up VCN enc run queue.\n"); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 8e71d3984016..1a8f4e0dd023 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2404,7 +2404,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, ring = adev->vm_manager.vm_pte_rings[ring_instance]; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL]; r = drm_sched_entity_init(&ring->sched, &vm->entity, - rq, amdgpu_sched_jobs, NULL); + rq, NULL); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 8041b26a7a21..ca6ab56357b5 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -429,7 +429,7 @@ static int uvd_v6_0_sw_init(void *handle) ring = &adev->uvd.ring_enc[0]; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity_enc, - rq, amdgpu_sched_jobs, NULL); + rq, NULL); if (r) { DRM_ERROR("Failed setting up UVD ENC run queue.\n"); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index b0de1e04093b..0ca63d588670 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -418,7 +418,7 @@ static int uvd_v7_0_sw_init(void *handle) ring = &adev->uvd.ring_enc[0]; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity_enc, - rq, amdgpu_sched_jobs, NULL); + rq, NULL); if (r) { DRM_ERROR("Failed setting up UVD ENC run queue.\n"); return r; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index ab50090d066c..23e73c2a19f4 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -116,7 +116,7 @@ static int etnaviv_open(struct drm_device *dev, struct drm_file *file) drm_sched_entity_init(&gpu->sched, &ctx->sched_entity[i], &gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL], - 32, NULL); + NULL); } } diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c index 1f1dd70125a7..a364fc0b38c3 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c @@ -117,7 +117,6 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq) * @sched The pointer to the scheduler * @entity The pointer to a valid drm_sched_entity * @rq The run queue this entity belongs - * @jobs The max number of jobs in the job queue * @guilty atomic_t set to 1 when a job on this queue * is found to be guilty causing a timeout * @@ -126,7 +125,7 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq) int drm_sched_entity_init(struct drm_gpu_scheduler *sched, struct drm_sched_entity *entity, struct drm_sched_rq *rq, - uint32_t jobs, atomic_t *guilty) + atomic_t *guilty) { if (!(sched && entity && rq)) return -EINVAL; diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 350a62c26b29..52380067a43f 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -188,7 +188,7 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched); int drm_sched_entity_init(struct drm_gpu_scheduler *sched, struct drm_sched_entity *entity, struct drm_sched_rq *rq, - uint32_t jobs, atomic_t *guilty); + atomic_t *guilty); void drm_sched_entity_do_release(struct drm_gpu_scheduler *sched, struct drm_sched_entity *entity); void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched, -- cgit From cac18c82e0c5b39b69648942576dbd1d6f9d056e Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Fri, 11 May 2018 13:44:09 -0500 Subject: drm/amdgpu: Specify vega20 uvd firmware MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reviewed-by: Christian König Signed-off-by: Feifei Xu Reviewed-by: Alex Deucher Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index de4d77af02ae..fd1e9cd65066 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -70,6 +70,7 @@ #define FIRMWARE_VEGA10 "amdgpu/vega10_uvd.bin" #define FIRMWARE_VEGA12 "amdgpu/vega12_uvd.bin" +#define FIRMWARE_VEGA20 "amdgpu/vega20_uvd.bin" #define mmUVD_GPCOM_VCPU_DATA0_VEGA10 (0x03c4 + 0x7e00) #define mmUVD_GPCOM_VCPU_DATA1_VEGA10 (0x03c5 + 0x7e00) @@ -114,6 +115,7 @@ MODULE_FIRMWARE(FIRMWARE_VEGAM); MODULE_FIRMWARE(FIRMWARE_VEGA10); MODULE_FIRMWARE(FIRMWARE_VEGA12); +MODULE_FIRMWARE(FIRMWARE_VEGA20); static void amdgpu_uvd_idle_work_handler(struct work_struct *work); @@ -177,6 +179,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) case CHIP_VEGAM: fw_name = FIRMWARE_VEGAM; break; + case CHIP_VEGA20: + fw_name = FIRMWARE_VEGA20; + break; default: return -EINVAL; } -- cgit From 2bb795f5ba9cd676536858a978b9df06f473af88 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Tue, 15 May 2018 14:25:46 -0500 Subject: drm/amdgpu/vg20:Restruct uvd to support multiple uvds Vega20 has dual-UVD. Need Restruct amdgpu_device::uvd to support multiple uvds. There are no any logical changes here. Signed-off-by: James Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 6 +- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 102 +++++++++---------- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h | 19 ++-- drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c | 27 +++--- drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c | 25 ++--- drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 77 +++++++-------- drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 135 +++++++++++++------------- 9 files changed, 205 insertions(+), 194 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index d09fcab2398f..1070f4042cbb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -376,14 +376,14 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, struct amdgpu_device *adev = ring->adev; uint64_t index; - if (ring != &adev->uvd.ring) { + if (ring != &adev->uvd.inst->ring) { ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs]; ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4); } else { /* put fence directly behind firmware */ index = ALIGN(adev->uvd.fw->size, 8); - ring->fence_drv.cpu_addr = adev->uvd.cpu_addr + index; - ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index; + ring->fence_drv.cpu_addr = adev->uvd.inst->cpu_addr + index; + ring->fence_drv.gpu_addr = adev->uvd.inst->gpu_addr + index; } amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq)); amdgpu_irq_get(adev, irq_src, irq_type); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index eb4785e51573..5620ed291107 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -348,7 +348,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file break; case AMDGPU_HW_IP_UVD: type = AMD_IP_BLOCK_TYPE_UVD; - ring_mask = adev->uvd.ring.ready ? 1 : 0; + ring_mask = adev->uvd.inst->ring.ready ? 1 : 0; ib_start_alignment = AMDGPU_GPU_PAGE_SIZE; ib_size_alignment = 16; break; @@ -362,7 +362,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file case AMDGPU_HW_IP_UVD_ENC: type = AMD_IP_BLOCK_TYPE_UVD; for (i = 0; i < adev->uvd.num_enc_rings; i++) - ring_mask |= ((adev->uvd.ring_enc[i].ready ? 1 : 0) << i); + ring_mask |= ((adev->uvd.inst->ring_enc[i].ready ? 1 : 0) << i); ib_start_alignment = AMDGPU_GPU_PAGE_SIZE; ib_size_alignment = 1; break; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c index 262c1267249e..2458d385e55a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c @@ -77,13 +77,13 @@ static int amdgpu_identity_map(struct amdgpu_device *adev, *out_ring = &adev->sdma.instance[ring].ring; break; case AMDGPU_HW_IP_UVD: - *out_ring = &adev->uvd.ring; + *out_ring = &adev->uvd.inst->ring; break; case AMDGPU_HW_IP_VCE: *out_ring = &adev->vce.ring[ring]; break; case AMDGPU_HW_IP_UVD_ENC: - *out_ring = &adev->uvd.ring_enc[ring]; + *out_ring = &adev->uvd.inst->ring_enc[ring]; break; case AMDGPU_HW_IP_VCN_DEC: *out_ring = &adev->vcn.ring_dec; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index fd1e9cd65066..02683a039a98 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -129,7 +129,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) unsigned version_major, version_minor, family_id; int i, r; - INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler); + INIT_DELAYED_WORK(&adev->uvd.inst->idle_work, amdgpu_uvd_idle_work_handler); switch (adev->asic_type) { #ifdef CONFIG_DRM_AMDGPU_CIK @@ -237,16 +237,16 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.vcpu_bo, - &adev->uvd.gpu_addr, &adev->uvd.cpu_addr); + AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst->vcpu_bo, + &adev->uvd.inst->gpu_addr, &adev->uvd.inst->cpu_addr); if (r) { dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r); return r; } - ring = &adev->uvd.ring; + ring = &adev->uvd.inst->ring; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; - r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity, + r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst->entity, rq, NULL); if (r != 0) { DRM_ERROR("Failed setting up UVD run queue.\n"); @@ -254,8 +254,8 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) } for (i = 0; i < adev->uvd.max_handles; ++i) { - atomic_set(&adev->uvd.handles[i], 0); - adev->uvd.filp[i] = NULL; + atomic_set(&adev->uvd.inst->handles[i], 0); + adev->uvd.inst->filp[i] = NULL; } /* from uvd v5.0 HW addressing capacity increased to 64 bits */ @@ -285,18 +285,18 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) { int i; - kfree(adev->uvd.saved_bo); + kfree(adev->uvd.inst->saved_bo); - drm_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity); + drm_sched_entity_fini(&adev->uvd.inst->ring.sched, &adev->uvd.inst->entity); - amdgpu_bo_free_kernel(&adev->uvd.vcpu_bo, - &adev->uvd.gpu_addr, - (void **)&adev->uvd.cpu_addr); + amdgpu_bo_free_kernel(&adev->uvd.inst->vcpu_bo, + &adev->uvd.inst->gpu_addr, + (void **)&adev->uvd.inst->cpu_addr); - amdgpu_ring_fini(&adev->uvd.ring); + amdgpu_ring_fini(&adev->uvd.inst->ring); for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i) - amdgpu_ring_fini(&adev->uvd.ring_enc[i]); + amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]); release_firmware(adev->uvd.fw); @@ -309,29 +309,29 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev) void *ptr; int i; - if (adev->uvd.vcpu_bo == NULL) + if (adev->uvd.inst->vcpu_bo == NULL) return 0; - cancel_delayed_work_sync(&adev->uvd.idle_work); + cancel_delayed_work_sync(&adev->uvd.inst->idle_work); /* only valid for physical mode */ if (adev->asic_type < CHIP_POLARIS10) { for (i = 0; i < adev->uvd.max_handles; ++i) - if (atomic_read(&adev->uvd.handles[i])) + if (atomic_read(&adev->uvd.inst->handles[i])) break; if (i == adev->uvd.max_handles) return 0; } - size = amdgpu_bo_size(adev->uvd.vcpu_bo); - ptr = adev->uvd.cpu_addr; + size = amdgpu_bo_size(adev->uvd.inst->vcpu_bo); + ptr = adev->uvd.inst->cpu_addr; - adev->uvd.saved_bo = kmalloc(size, GFP_KERNEL); - if (!adev->uvd.saved_bo) + adev->uvd.inst->saved_bo = kmalloc(size, GFP_KERNEL); + if (!adev->uvd.inst->saved_bo) return -ENOMEM; - memcpy_fromio(adev->uvd.saved_bo, ptr, size); + memcpy_fromio(adev->uvd.inst->saved_bo, ptr, size); return 0; } @@ -341,16 +341,16 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev) unsigned size; void *ptr; - if (adev->uvd.vcpu_bo == NULL) + if (adev->uvd.inst->vcpu_bo == NULL) return -EINVAL; - size = amdgpu_bo_size(adev->uvd.vcpu_bo); - ptr = adev->uvd.cpu_addr; + size = amdgpu_bo_size(adev->uvd.inst->vcpu_bo); + ptr = adev->uvd.inst->cpu_addr; - if (adev->uvd.saved_bo != NULL) { - memcpy_toio(ptr, adev->uvd.saved_bo, size); - kfree(adev->uvd.saved_bo); - adev->uvd.saved_bo = NULL; + if (adev->uvd.inst->saved_bo != NULL) { + memcpy_toio(ptr, adev->uvd.inst->saved_bo, size); + kfree(adev->uvd.inst->saved_bo); + adev->uvd.inst->saved_bo = NULL; } else { const struct common_firmware_header *hdr; unsigned offset; @@ -358,14 +358,14 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev) hdr = (const struct common_firmware_header *)adev->uvd.fw->data; if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { offset = le32_to_cpu(hdr->ucode_array_offset_bytes); - memcpy_toio(adev->uvd.cpu_addr, adev->uvd.fw->data + offset, + memcpy_toio(adev->uvd.inst->cpu_addr, adev->uvd.fw->data + offset, le32_to_cpu(hdr->ucode_size_bytes)); size -= le32_to_cpu(hdr->ucode_size_bytes); ptr += le32_to_cpu(hdr->ucode_size_bytes); } memset_io(ptr, 0, size); /* to restore uvd fence seq */ - amdgpu_fence_driver_force_completion(&adev->uvd.ring); + amdgpu_fence_driver_force_completion(&adev->uvd.inst->ring); } return 0; @@ -373,12 +373,12 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev) void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) { - struct amdgpu_ring *ring = &adev->uvd.ring; + struct amdgpu_ring *ring = &adev->uvd.inst->ring; int i, r; for (i = 0; i < adev->uvd.max_handles; ++i) { - uint32_t handle = atomic_read(&adev->uvd.handles[i]); - if (handle != 0 && adev->uvd.filp[i] == filp) { + uint32_t handle = atomic_read(&adev->uvd.inst->handles[i]); + if (handle != 0 && adev->uvd.inst->filp[i] == filp) { struct dma_fence *fence; r = amdgpu_uvd_get_destroy_msg(ring, handle, @@ -391,8 +391,8 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) dma_fence_wait(fence, false); dma_fence_put(fence); - adev->uvd.filp[i] = NULL; - atomic_set(&adev->uvd.handles[i], 0); + adev->uvd.inst->filp[i] = NULL; + atomic_set(&adev->uvd.inst->handles[i], 0); } } } @@ -696,13 +696,13 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, /* try to alloc a new handle */ for (i = 0; i < adev->uvd.max_handles; ++i) { - if (atomic_read(&adev->uvd.handles[i]) == handle) { + if (atomic_read(&adev->uvd.inst->handles[i]) == handle) { DRM_ERROR("Handle 0x%x already in use!\n", handle); return -EINVAL; } - if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) { - adev->uvd.filp[i] = ctx->parser->filp; + if (!atomic_cmpxchg(&adev->uvd.inst->handles[i], 0, handle)) { + adev->uvd.inst->filp[i] = ctx->parser->filp; return 0; } } @@ -719,8 +719,8 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, /* validate the handle */ for (i = 0; i < adev->uvd.max_handles; ++i) { - if (atomic_read(&adev->uvd.handles[i]) == handle) { - if (adev->uvd.filp[i] != ctx->parser->filp) { + if (atomic_read(&adev->uvd.inst->handles[i]) == handle) { + if (adev->uvd.inst->filp[i] != ctx->parser->filp) { DRM_ERROR("UVD handle collision detected!\n"); return -EINVAL; } @@ -734,7 +734,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, case 2: /* it's a destroy msg, free the handle */ for (i = 0; i < adev->uvd.max_handles; ++i) - atomic_cmpxchg(&adev->uvd.handles[i], handle, 0); + atomic_cmpxchg(&adev->uvd.inst->handles[i], handle, 0); amdgpu_bo_kunmap(bo); return 0; @@ -810,7 +810,7 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx) } if ((cmd == 0 || cmd == 0x3) && - (start >> 28) != (ctx->parser->adev->uvd.gpu_addr >> 28)) { + (start >> 28) != (ctx->parser->adev->uvd.inst->gpu_addr >> 28)) { DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n", start, end); return -EINVAL; @@ -1043,7 +1043,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, if (r) goto err_free; - r = amdgpu_job_submit(job, ring, &adev->uvd.entity, + r = amdgpu_job_submit(job, ring, &adev->uvd.inst->entity, AMDGPU_FENCE_OWNER_UNDEFINED, &f); if (r) goto err_free; @@ -1131,8 +1131,8 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, static void amdgpu_uvd_idle_work_handler(struct work_struct *work) { struct amdgpu_device *adev = - container_of(work, struct amdgpu_device, uvd.idle_work.work); - unsigned fences = amdgpu_fence_count_emitted(&adev->uvd.ring); + container_of(work, struct amdgpu_device, uvd.inst->idle_work.work); + unsigned fences = amdgpu_fence_count_emitted(&adev->uvd.inst->ring); if (fences == 0) { if (adev->pm.dpm_enabled) { @@ -1146,7 +1146,7 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work) AMD_CG_STATE_GATE); } } else { - schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT); + schedule_delayed_work(&adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT); } } @@ -1158,7 +1158,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring) if (amdgpu_sriov_vf(adev)) return; - set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work); + set_clocks = !cancel_delayed_work_sync(&adev->uvd.inst->idle_work); if (set_clocks) { if (adev->pm.dpm_enabled) { amdgpu_dpm_enable_uvd(adev, true); @@ -1175,7 +1175,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring) void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring) { if (!amdgpu_sriov_vf(ring->adev)) - schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT); + schedule_delayed_work(&ring->adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT); } /** @@ -1209,7 +1209,7 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout) } else if (r < 0) { DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); } else { - DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); + DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); r = 0; } @@ -1237,7 +1237,7 @@ uint32_t amdgpu_uvd_used_handles(struct amdgpu_device *adev) * necessarily linear. So we need to count * all non-zero handles. */ - if (atomic_read(&adev->uvd.handles[i])) + if (atomic_read(&adev->uvd.inst->handles[i])) used_handles++; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h index 32ea20b99e53..b1579fba134c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h @@ -31,30 +31,37 @@ #define AMDGPU_UVD_SESSION_SIZE (50*1024) #define AMDGPU_UVD_FIRMWARE_OFFSET 256 +#define AMDGPU_MAX_UVD_INSTANCES 2 + #define AMDGPU_UVD_FIRMWARE_SIZE(adev) \ (AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(((const struct common_firmware_header *)(adev)->uvd.fw->data)->ucode_size_bytes) + \ 8) - AMDGPU_UVD_FIRMWARE_OFFSET) -struct amdgpu_uvd { +struct amdgpu_uvd_inst { struct amdgpu_bo *vcpu_bo; void *cpu_addr; uint64_t gpu_addr; - unsigned fw_version; void *saved_bo; - unsigned max_handles; atomic_t handles[AMDGPU_MAX_UVD_HANDLES]; struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES]; struct delayed_work idle_work; - const struct firmware *fw; /* UVD firmware */ struct amdgpu_ring ring; struct amdgpu_ring ring_enc[AMDGPU_MAX_UVD_ENC_RINGS]; struct amdgpu_irq_src irq; - bool address_64_bit; - bool use_ctx_buf; struct drm_sched_entity entity; struct drm_sched_entity entity_enc; uint32_t srbm_soft_reset; +}; + +struct amdgpu_uvd { + const struct firmware *fw; /* UVD firmware */ + unsigned fw_version; + unsigned max_handles; unsigned num_enc_rings; + uint8_t num_uvd_inst; + bool address_64_bit; + bool use_ctx_buf; + struct amdgpu_uvd_inst inst[AMDGPU_MAX_UVD_INSTANCES]; }; int amdgpu_uvd_sw_init(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index 87cbb142dd0b..5f22135de77f 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c @@ -93,6 +93,7 @@ static void uvd_v4_2_ring_set_wptr(struct amdgpu_ring *ring) static int uvd_v4_2_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + adev->uvd.num_uvd_inst = 1; uvd_v4_2_set_ring_funcs(adev); uvd_v4_2_set_irq_funcs(adev); @@ -107,7 +108,7 @@ static int uvd_v4_2_sw_init(void *handle) int r; /* UVD TRAP */ - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.irq); + r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq); if (r) return r; @@ -119,9 +120,9 @@ static int uvd_v4_2_sw_init(void *handle) if (r) return r; - ring = &adev->uvd.ring; + ring = &adev->uvd.inst->ring; sprintf(ring->name, "uvd"); - r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0); + r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); return r; } @@ -150,7 +151,7 @@ static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev, static int uvd_v4_2_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->uvd.ring; + struct amdgpu_ring *ring = &adev->uvd.inst->ring; uint32_t tmp; int r; @@ -208,7 +209,7 @@ done: static int uvd_v4_2_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->uvd.ring; + struct amdgpu_ring *ring = &adev->uvd.inst->ring; if (RREG32(mmUVD_STATUS) != 0) uvd_v4_2_stop(adev); @@ -251,7 +252,7 @@ static int uvd_v4_2_resume(void *handle) */ static int uvd_v4_2_start(struct amdgpu_device *adev) { - struct amdgpu_ring *ring = &adev->uvd.ring; + struct amdgpu_ring *ring = &adev->uvd.inst->ring; uint32_t rb_bufsz; int i, j, r; u32 tmp; @@ -536,7 +537,7 @@ static void uvd_v4_2_mc_resume(struct amdgpu_device *adev) uint32_t size; /* programm the VCPU memory controller bits 0-27 */ - addr = (adev->uvd.gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3; + addr = (adev->uvd.inst->gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3; size = AMDGPU_UVD_FIRMWARE_SIZE(adev) >> 3; WREG32(mmUVD_VCPU_CACHE_OFFSET0, addr); WREG32(mmUVD_VCPU_CACHE_SIZE0, size); @@ -553,11 +554,11 @@ static void uvd_v4_2_mc_resume(struct amdgpu_device *adev) WREG32(mmUVD_VCPU_CACHE_SIZE2, size); /* bits 28-31 */ - addr = (adev->uvd.gpu_addr >> 28) & 0xF; + addr = (adev->uvd.inst->gpu_addr >> 28) & 0xF; WREG32(mmUVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0)); /* bits 32-39 */ - addr = (adev->uvd.gpu_addr >> 32) & 0xFF; + addr = (adev->uvd.inst->gpu_addr >> 32) & 0xFF; WREG32(mmUVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31)); WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config); @@ -664,7 +665,7 @@ static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry) { DRM_DEBUG("IH: UVD TRAP\n"); - amdgpu_fence_process(&adev->uvd.ring); + amdgpu_fence_process(&adev->uvd.inst->ring); return 0; } @@ -753,7 +754,7 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = { static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev) { - adev->uvd.ring.funcs = &uvd_v4_2_ring_funcs; + adev->uvd.inst->ring.funcs = &uvd_v4_2_ring_funcs; } static const struct amdgpu_irq_src_funcs uvd_v4_2_irq_funcs = { @@ -763,8 +764,8 @@ static const struct amdgpu_irq_src_funcs uvd_v4_2_irq_funcs = { static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev) { - adev->uvd.irq.num_types = 1; - adev->uvd.irq.funcs = &uvd_v4_2_irq_funcs; + adev->uvd.inst->irq.num_types = 1; + adev->uvd.inst->irq.funcs = &uvd_v4_2_irq_funcs; } const struct amdgpu_ip_block_version uvd_v4_2_ip_block = diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index 6445d55e7d5a..f5d074a887fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -89,6 +89,7 @@ static void uvd_v5_0_ring_set_wptr(struct amdgpu_ring *ring) static int uvd_v5_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + adev->uvd.num_uvd_inst = 1; uvd_v5_0_set_ring_funcs(adev); uvd_v5_0_set_irq_funcs(adev); @@ -103,7 +104,7 @@ static int uvd_v5_0_sw_init(void *handle) int r; /* UVD TRAP */ - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.irq); + r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq); if (r) return r; @@ -115,9 +116,9 @@ static int uvd_v5_0_sw_init(void *handle) if (r) return r; - ring = &adev->uvd.ring; + ring = &adev->uvd.inst->ring; sprintf(ring->name, "uvd"); - r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0); + r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); return r; } @@ -144,7 +145,7 @@ static int uvd_v5_0_sw_fini(void *handle) static int uvd_v5_0_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->uvd.ring; + struct amdgpu_ring *ring = &adev->uvd.inst->ring; uint32_t tmp; int r; @@ -204,7 +205,7 @@ done: static int uvd_v5_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->uvd.ring; + struct amdgpu_ring *ring = &adev->uvd.inst->ring; if (RREG32(mmUVD_STATUS) != 0) uvd_v5_0_stop(adev); @@ -253,9 +254,9 @@ static void uvd_v5_0_mc_resume(struct amdgpu_device *adev) /* programm memory controller bits 0-27 */ WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, - lower_32_bits(adev->uvd.gpu_addr)); + lower_32_bits(adev->uvd.inst->gpu_addr)); WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, - upper_32_bits(adev->uvd.gpu_addr)); + upper_32_bits(adev->uvd.inst->gpu_addr)); offset = AMDGPU_UVD_FIRMWARE_OFFSET; size = AMDGPU_UVD_FIRMWARE_SIZE(adev); @@ -287,7 +288,7 @@ static void uvd_v5_0_mc_resume(struct amdgpu_device *adev) */ static int uvd_v5_0_start(struct amdgpu_device *adev) { - struct amdgpu_ring *ring = &adev->uvd.ring; + struct amdgpu_ring *ring = &adev->uvd.inst->ring; uint32_t rb_bufsz, tmp; uint32_t lmi_swap_cntl; uint32_t mp_swap_cntl; @@ -586,7 +587,7 @@ static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry) { DRM_DEBUG("IH: UVD TRAP\n"); - amdgpu_fence_process(&adev->uvd.ring); + amdgpu_fence_process(&adev->uvd.inst->ring); return 0; } @@ -861,7 +862,7 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = { static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev) { - adev->uvd.ring.funcs = &uvd_v5_0_ring_funcs; + adev->uvd.inst->ring.funcs = &uvd_v5_0_ring_funcs; } static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = { @@ -871,8 +872,8 @@ static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = { static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev) { - adev->uvd.irq.num_types = 1; - adev->uvd.irq.funcs = &uvd_v5_0_irq_funcs; + adev->uvd.inst->irq.num_types = 1; + adev->uvd.inst->irq.funcs = &uvd_v5_0_irq_funcs; } const struct amdgpu_ip_block_version uvd_v5_0_ip_block = diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index ca6ab56357b5..dc391693d7ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -91,7 +91,7 @@ static uint64_t uvd_v6_0_enc_ring_get_rptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - if (ring == &adev->uvd.ring_enc[0]) + if (ring == &adev->uvd.inst->ring_enc[0]) return RREG32(mmUVD_RB_RPTR); else return RREG32(mmUVD_RB_RPTR2); @@ -121,7 +121,7 @@ static uint64_t uvd_v6_0_enc_ring_get_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - if (ring == &adev->uvd.ring_enc[0]) + if (ring == &adev->uvd.inst->ring_enc[0]) return RREG32(mmUVD_RB_WPTR); else return RREG32(mmUVD_RB_WPTR2); @@ -152,7 +152,7 @@ static void uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - if (ring == &adev->uvd.ring_enc[0]) + if (ring == &adev->uvd.inst->ring_enc[0]) WREG32(mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); else @@ -375,6 +375,7 @@ error: static int uvd_v6_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + adev->uvd.num_uvd_inst = 1; if (!(adev->flags & AMD_IS_APU) && (RREG32_SMC(ixCC_HARVEST_FUSES) & CC_HARVEST_FUSES__UVD_DISABLE_MASK)) @@ -399,14 +400,14 @@ static int uvd_v6_0_sw_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; /* UVD TRAP */ - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.irq); + r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq); if (r) return r; /* UVD ENC TRAP */ if (uvd_v6_0_enc_support(adev)) { for (i = 0; i < adev->uvd.num_enc_rings; ++i) { - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 119, &adev->uvd.irq); + r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 119, &adev->uvd.inst->irq); if (r) return r; } @@ -418,17 +419,17 @@ static int uvd_v6_0_sw_init(void *handle) if (!uvd_v6_0_enc_support(adev)) { for (i = 0; i < adev->uvd.num_enc_rings; ++i) - adev->uvd.ring_enc[i].funcs = NULL; + adev->uvd.inst->ring_enc[i].funcs = NULL; - adev->uvd.irq.num_types = 1; + adev->uvd.inst->irq.num_types = 1; adev->uvd.num_enc_rings = 0; DRM_INFO("UVD ENC is disabled\n"); } else { struct drm_sched_rq *rq; - ring = &adev->uvd.ring_enc[0]; + ring = &adev->uvd.inst->ring_enc[0]; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; - r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity_enc, + r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst->entity_enc, rq, NULL); if (r) { DRM_ERROR("Failed setting up UVD ENC run queue.\n"); @@ -440,17 +441,17 @@ static int uvd_v6_0_sw_init(void *handle) if (r) return r; - ring = &adev->uvd.ring; + ring = &adev->uvd.inst->ring; sprintf(ring->name, "uvd"); - r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0); + r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); if (r) return r; if (uvd_v6_0_enc_support(adev)) { for (i = 0; i < adev->uvd.num_enc_rings; ++i) { - ring = &adev->uvd.ring_enc[i]; + ring = &adev->uvd.inst->ring_enc[i]; sprintf(ring->name, "uvd_enc%d", i); - r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0); + r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); if (r) return r; } @@ -469,10 +470,10 @@ static int uvd_v6_0_sw_fini(void *handle) return r; if (uvd_v6_0_enc_support(adev)) { - drm_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc); + drm_sched_entity_fini(&adev->uvd.inst->ring_enc[0].sched, &adev->uvd.inst->entity_enc); for (i = 0; i < adev->uvd.num_enc_rings; ++i) - amdgpu_ring_fini(&adev->uvd.ring_enc[i]); + amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]); } return amdgpu_uvd_sw_fini(adev); @@ -488,7 +489,7 @@ static int uvd_v6_0_sw_fini(void *handle) static int uvd_v6_0_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->uvd.ring; + struct amdgpu_ring *ring = &adev->uvd.inst->ring; uint32_t tmp; int i, r; @@ -532,7 +533,7 @@ static int uvd_v6_0_hw_init(void *handle) if (uvd_v6_0_enc_support(adev)) { for (i = 0; i < adev->uvd.num_enc_rings; ++i) { - ring = &adev->uvd.ring_enc[i]; + ring = &adev->uvd.inst->ring_enc[i]; ring->ready = true; r = amdgpu_ring_test_ring(ring); if (r) { @@ -563,7 +564,7 @@ done: static int uvd_v6_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->uvd.ring; + struct amdgpu_ring *ring = &adev->uvd.inst->ring; if (RREG32(mmUVD_STATUS) != 0) uvd_v6_0_stop(adev); @@ -611,9 +612,9 @@ static void uvd_v6_0_mc_resume(struct amdgpu_device *adev) /* programm memory controller bits 0-27 */ WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, - lower_32_bits(adev->uvd.gpu_addr)); + lower_32_bits(adev->uvd.inst->gpu_addr)); WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, - upper_32_bits(adev->uvd.gpu_addr)); + upper_32_bits(adev->uvd.inst->gpu_addr)); offset = AMDGPU_UVD_FIRMWARE_OFFSET; size = AMDGPU_UVD_FIRMWARE_SIZE(adev); @@ -726,7 +727,7 @@ static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev, */ static int uvd_v6_0_start(struct amdgpu_device *adev) { - struct amdgpu_ring *ring = &adev->uvd.ring; + struct amdgpu_ring *ring = &adev->uvd.inst->ring; uint32_t rb_bufsz, tmp; uint32_t lmi_swap_cntl; uint32_t mp_swap_cntl; @@ -866,14 +867,14 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0); if (uvd_v6_0_enc_support(adev)) { - ring = &adev->uvd.ring_enc[0]; + ring = &adev->uvd.inst->ring_enc[0]; WREG32(mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); WREG32(mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); WREG32(mmUVD_RB_BASE_LO, ring->gpu_addr); WREG32(mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); WREG32(mmUVD_RB_SIZE, ring->ring_size / 4); - ring = &adev->uvd.ring_enc[1]; + ring = &adev->uvd.inst->ring_enc[1]; WREG32(mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); WREG32(mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); WREG32(mmUVD_RB_BASE_LO2, ring->gpu_addr); @@ -1158,10 +1159,10 @@ static bool uvd_v6_0_check_soft_reset(void *handle) srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1); if (srbm_soft_reset) { - adev->uvd.srbm_soft_reset = srbm_soft_reset; + adev->uvd.inst->srbm_soft_reset = srbm_soft_reset; return true; } else { - adev->uvd.srbm_soft_reset = 0; + adev->uvd.inst->srbm_soft_reset = 0; return false; } } @@ -1170,7 +1171,7 @@ static int uvd_v6_0_pre_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!adev->uvd.srbm_soft_reset) + if (!adev->uvd.inst->srbm_soft_reset) return 0; uvd_v6_0_stop(adev); @@ -1182,9 +1183,9 @@ static int uvd_v6_0_soft_reset(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 srbm_soft_reset; - if (!adev->uvd.srbm_soft_reset) + if (!adev->uvd.inst->srbm_soft_reset) return 0; - srbm_soft_reset = adev->uvd.srbm_soft_reset; + srbm_soft_reset = adev->uvd.inst->srbm_soft_reset; if (srbm_soft_reset) { u32 tmp; @@ -1212,7 +1213,7 @@ static int uvd_v6_0_post_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!adev->uvd.srbm_soft_reset) + if (!adev->uvd.inst->srbm_soft_reset) return 0; mdelay(5); @@ -1238,17 +1239,17 @@ static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev, switch (entry->src_id) { case 124: - amdgpu_fence_process(&adev->uvd.ring); + amdgpu_fence_process(&adev->uvd.inst->ring); break; case 119: if (likely(uvd_v6_0_enc_support(adev))) - amdgpu_fence_process(&adev->uvd.ring_enc[0]); + amdgpu_fence_process(&adev->uvd.inst->ring_enc[0]); else int_handled = false; break; case 120: if (likely(uvd_v6_0_enc_support(adev))) - amdgpu_fence_process(&adev->uvd.ring_enc[1]); + amdgpu_fence_process(&adev->uvd.inst->ring_enc[1]); else int_handled = false; break; @@ -1612,10 +1613,10 @@ static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = { static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev) { if (adev->asic_type >= CHIP_POLARIS10) { - adev->uvd.ring.funcs = &uvd_v6_0_ring_vm_funcs; + adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_vm_funcs; DRM_INFO("UVD is enabled in VM mode\n"); } else { - adev->uvd.ring.funcs = &uvd_v6_0_ring_phys_funcs; + adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_phys_funcs; DRM_INFO("UVD is enabled in physical mode\n"); } } @@ -1625,7 +1626,7 @@ static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev) int i; for (i = 0; i < adev->uvd.num_enc_rings; ++i) - adev->uvd.ring_enc[i].funcs = &uvd_v6_0_enc_ring_vm_funcs; + adev->uvd.inst->ring_enc[i].funcs = &uvd_v6_0_enc_ring_vm_funcs; DRM_INFO("UVD ENC is enabled in VM mode\n"); } @@ -1638,11 +1639,11 @@ static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = { static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev) { if (uvd_v6_0_enc_support(adev)) - adev->uvd.irq.num_types = adev->uvd.num_enc_rings + 1; + adev->uvd.inst->irq.num_types = adev->uvd.num_enc_rings + 1; else - adev->uvd.irq.num_types = 1; + adev->uvd.inst->irq.num_types = 1; - adev->uvd.irq.funcs = &uvd_v6_0_irq_funcs; + adev->uvd.inst->irq.funcs = &uvd_v6_0_irq_funcs; } const struct amdgpu_ip_block_version uvd_v6_0_ip_block = diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index 0ca63d588670..66d4bea5fb2c 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -72,7 +72,7 @@ static uint64_t uvd_v7_0_enc_ring_get_rptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - if (ring == &adev->uvd.ring_enc[0]) + if (ring == &adev->uvd.inst->ring_enc[0]) return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR); else return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2); @@ -106,7 +106,7 @@ static uint64_t uvd_v7_0_enc_ring_get_wptr(struct amdgpu_ring *ring) if (ring->use_doorbell) return adev->wb.wb[ring->wptr_offs]; - if (ring == &adev->uvd.ring_enc[0]) + if (ring == &adev->uvd.inst->ring_enc[0]) return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR); else return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2); @@ -144,7 +144,7 @@ static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring) return; } - if (ring == &adev->uvd.ring_enc[0]) + if (ring == &adev->uvd.inst->ring_enc[0]) WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); else @@ -170,8 +170,8 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring) r = amdgpu_ring_alloc(ring, 16); if (r) { - DRM_ERROR("amdgpu: uvd enc failed to lock ring %d (%d).\n", - ring->idx, r); + DRM_ERROR("amdgpu: uvd enc failed to lock (%d)ring %d (%d).\n", + ring->me, ring->idx, r); return r; } amdgpu_ring_write(ring, HEVC_ENC_CMD_END); @@ -184,11 +184,11 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring) } if (i < adev->usec_timeout) { - DRM_DEBUG("ring test on %d succeeded in %d usecs\n", - ring->idx, i); + DRM_DEBUG("(%d)ring test on %d succeeded in %d usecs\n", + ring->me, ring->idx, i); } else { - DRM_ERROR("amdgpu: ring %d test failed\n", - ring->idx); + DRM_ERROR("amdgpu: (%d)ring %d test failed\n", + ring->me, ring->idx); r = -ETIMEDOUT; } @@ -342,24 +342,24 @@ static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) r = uvd_v7_0_enc_get_create_msg(ring, 1, NULL); if (r) { - DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r); + DRM_ERROR("amdgpu: (%d)failed to get create msg (%ld).\n", ring->me, r); goto error; } r = uvd_v7_0_enc_get_destroy_msg(ring, 1, true, &fence); if (r) { - DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r); + DRM_ERROR("amdgpu: (%d)failed to get destroy ib (%ld).\n", ring->me, r); goto error; } r = dma_fence_wait_timeout(fence, false, timeout); if (r == 0) { - DRM_ERROR("amdgpu: IB test timed out.\n"); + DRM_ERROR("amdgpu: (%d)IB test timed out.\n", ring->me); r = -ETIMEDOUT; } else if (r < 0) { - DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); + DRM_ERROR("amdgpu: (%d)fence wait failed (%ld).\n", ring->me, r); } else { - DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); + DRM_DEBUG("ib test on (%d)ring %d succeeded\n", ring->me, ring->idx); r = 0; } error: @@ -370,6 +370,7 @@ error: static int uvd_v7_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + adev->uvd.num_uvd_inst = 1; if (amdgpu_sriov_vf(adev)) adev->uvd.num_enc_rings = 1; @@ -390,13 +391,13 @@ static int uvd_v7_0_sw_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; /* UVD TRAP */ - r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, 124, &adev->uvd.irq); + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, 124, &adev->uvd.inst->irq); if (r) return r; /* UVD ENC TRAP */ for (i = 0; i < adev->uvd.num_enc_rings; ++i) { - r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, i + 119, &adev->uvd.irq); + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, i + 119, &adev->uvd.inst->irq); if (r) return r; } @@ -415,9 +416,9 @@ static int uvd_v7_0_sw_init(void *handle) DRM_INFO("PSP loading UVD firmware\n"); } - ring = &adev->uvd.ring_enc[0]; + ring = &adev->uvd.inst->ring_enc[0]; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; - r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity_enc, + r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst->entity_enc, rq, NULL); if (r) { DRM_ERROR("Failed setting up UVD ENC run queue.\n"); @@ -428,15 +429,15 @@ static int uvd_v7_0_sw_init(void *handle) if (r) return r; if (!amdgpu_sriov_vf(adev)) { - ring = &adev->uvd.ring; + ring = &adev->uvd.inst->ring; sprintf(ring->name, "uvd"); - r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0); + r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); if (r) return r; } for (i = 0; i < adev->uvd.num_enc_rings; ++i) { - ring = &adev->uvd.ring_enc[i]; + ring = &adev->uvd.inst->ring_enc[i]; sprintf(ring->name, "uvd_enc%d", i); if (amdgpu_sriov_vf(adev)) { ring->use_doorbell = true; @@ -449,7 +450,7 @@ static int uvd_v7_0_sw_init(void *handle) else ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING2_3 * 2 + 1; } - r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0); + r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); if (r) return r; } @@ -472,10 +473,10 @@ static int uvd_v7_0_sw_fini(void *handle) if (r) return r; - drm_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc); + drm_sched_entity_fini(&adev->uvd.inst->ring_enc[0].sched, &adev->uvd.inst->entity_enc); for (i = 0; i < adev->uvd.num_enc_rings; ++i) - amdgpu_ring_fini(&adev->uvd.ring_enc[i]); + amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]); return amdgpu_uvd_sw_fini(adev); } @@ -490,7 +491,7 @@ static int uvd_v7_0_sw_fini(void *handle) static int uvd_v7_0_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->uvd.ring; + struct amdgpu_ring *ring = &adev->uvd.inst->ring; uint32_t tmp; int i, r; @@ -543,7 +544,7 @@ static int uvd_v7_0_hw_init(void *handle) } for (i = 0; i < adev->uvd.num_enc_rings; ++i) { - ring = &adev->uvd.ring_enc[i]; + ring = &adev->uvd.inst->ring_enc[i]; ring->ready = true; r = amdgpu_ring_test_ring(ring); if (r) { @@ -569,7 +570,7 @@ done: static int uvd_v7_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->uvd.ring; + struct amdgpu_ring *ring = &adev->uvd.inst->ring; if (!amdgpu_sriov_vf(adev)) uvd_v7_0_stop(adev); @@ -627,9 +628,9 @@ static void uvd_v7_0_mc_resume(struct amdgpu_device *adev) offset = 0; } else { WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, - lower_32_bits(adev->uvd.gpu_addr)); + lower_32_bits(adev->uvd.inst->gpu_addr)); WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, - upper_32_bits(adev->uvd.gpu_addr)); + upper_32_bits(adev->uvd.inst->gpu_addr)); offset = size; } @@ -638,16 +639,16 @@ static void uvd_v7_0_mc_resume(struct amdgpu_device *adev) WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size); WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, - lower_32_bits(adev->uvd.gpu_addr + offset)); + lower_32_bits(adev->uvd.inst->gpu_addr + offset)); WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, - upper_32_bits(adev->uvd.gpu_addr + offset)); + upper_32_bits(adev->uvd.inst->gpu_addr + offset)); WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, (1 << 21)); WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_UVD_HEAP_SIZE); WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, - lower_32_bits(adev->uvd.gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); + lower_32_bits(adev->uvd.inst->gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, - upper_32_bits(adev->uvd.gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); + upper_32_bits(adev->uvd.inst->gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, (2 << 21)); WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40)); @@ -688,10 +689,10 @@ static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev, /* 4, set resp to zero */ WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0); - WDOORBELL32(adev->uvd.ring_enc[0].doorbell_index, 0); - adev->wb.wb[adev->uvd.ring_enc[0].wptr_offs] = 0; - adev->uvd.ring_enc[0].wptr = 0; - adev->uvd.ring_enc[0].wptr_old = 0; + WDOORBELL32(adev->uvd.inst->ring_enc[0].doorbell_index, 0); + adev->wb.wb[adev->uvd.inst->ring_enc[0].wptr_offs] = 0; + adev->uvd.inst->ring_enc[0].wptr = 0; + adev->uvd.inst->ring_enc[0].wptr_old = 0; /* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */ WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST, 0x10000001); @@ -742,7 +743,7 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev) init_table += header->uvd_table_offset; - ring = &adev->uvd.ring; + ring = &adev->uvd.inst->ring; ring->wptr = 0; size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4); @@ -757,9 +758,9 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev) offset = 0; } else { MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), - lower_32_bits(adev->uvd.gpu_addr)); + lower_32_bits(adev->uvd.inst->gpu_addr)); MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), - upper_32_bits(adev->uvd.gpu_addr)); + upper_32_bits(adev->uvd.inst->gpu_addr)); offset = size; } @@ -768,16 +769,16 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev) MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size); MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), - lower_32_bits(adev->uvd.gpu_addr + offset)); + lower_32_bits(adev->uvd.inst->gpu_addr + offset)); MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), - upper_32_bits(adev->uvd.gpu_addr + offset)); + upper_32_bits(adev->uvd.inst->gpu_addr + offset)); MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), (1 << 21)); MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_UVD_HEAP_SIZE); MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), - lower_32_bits(adev->uvd.gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); + lower_32_bits(adev->uvd.inst->gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), - upper_32_bits(adev->uvd.gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); + upper_32_bits(adev->uvd.inst->gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), (2 << 21)); MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40)); @@ -841,7 +842,7 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), tmp); - ring = &adev->uvd.ring_enc[0]; + ring = &adev->uvd.inst->ring_enc[0]; ring->wptr = 0; MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_LO), ring->gpu_addr); MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr)); @@ -874,7 +875,7 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev) */ static int uvd_v7_0_start(struct amdgpu_device *adev) { - struct amdgpu_ring *ring = &adev->uvd.ring; + struct amdgpu_ring *ring = &adev->uvd.inst->ring; uint32_t rb_bufsz, tmp; uint32_t lmi_swap_cntl; uint32_t mp_swap_cntl; @@ -1027,14 +1028,14 @@ static int uvd_v7_0_start(struct amdgpu_device *adev) WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK); - ring = &adev->uvd.ring_enc[0]; + ring = &adev->uvd.inst->ring_enc[0]; WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr); WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4); - ring = &adev->uvd.ring_enc[1]; + ring = &adev->uvd.inst->ring_enc[1]; WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr); @@ -1162,8 +1163,8 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring) WREG32_SOC15(UVD, 0, mmUVD_CONTEXT_ID, 0xCAFEDEAD); r = amdgpu_ring_alloc(ring, 3); if (r) { - DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", - ring->idx, r); + DRM_ERROR("amdgpu: (%d)cp failed to lock ring %d (%d).\n", + ring->me, ring->idx, r); return r; } amdgpu_ring_write(ring, @@ -1178,11 +1179,11 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring) } if (i < adev->usec_timeout) { - DRM_DEBUG("ring test on %d succeeded in %d usecs\n", - ring->idx, i); + DRM_DEBUG("(%d)ring test on %d succeeded in %d usecs\n", + ring->me, ring->idx, i); } else { - DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", - ring->idx, tmp); + DRM_ERROR("(%d)amdgpu: ring %d test failed (0x%08X)\n", + ring->me, ring->idx, tmp); r = -EINVAL; } return r; @@ -1365,10 +1366,10 @@ static bool uvd_v7_0_check_soft_reset(void *handle) SRBM_SOFT_RESET, SOFT_RESET_UVD, 1); if (srbm_soft_reset) { - adev->uvd.srbm_soft_reset = srbm_soft_reset; + adev->uvd.inst->srbm_soft_reset = srbm_soft_reset; return true; } else { - adev->uvd.srbm_soft_reset = 0; + adev->uvd.inst->srbm_soft_reset = 0; return false; } } @@ -1377,7 +1378,7 @@ static int uvd_v7_0_pre_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!adev->uvd.srbm_soft_reset) + if (!adev->uvd.inst->srbm_soft_reset) return 0; uvd_v7_0_stop(adev); @@ -1389,9 +1390,9 @@ static int uvd_v7_0_soft_reset(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 srbm_soft_reset; - if (!adev->uvd.srbm_soft_reset) + if (!adev->uvd.inst->srbm_soft_reset) return 0; - srbm_soft_reset = adev->uvd.srbm_soft_reset; + srbm_soft_reset = adev->uvd.inst->srbm_soft_reset; if (srbm_soft_reset) { u32 tmp; @@ -1419,7 +1420,7 @@ static int uvd_v7_0_post_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!adev->uvd.srbm_soft_reset) + if (!adev->uvd.inst->srbm_soft_reset) return 0; mdelay(5); @@ -1444,14 +1445,14 @@ static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev, DRM_DEBUG("IH: UVD TRAP\n"); switch (entry->src_id) { case 124: - amdgpu_fence_process(&adev->uvd.ring); + amdgpu_fence_process(&adev->uvd.inst->ring); break; case 119: - amdgpu_fence_process(&adev->uvd.ring_enc[0]); + amdgpu_fence_process(&adev->uvd.inst->ring_enc[0]); break; case 120: if (!amdgpu_sriov_vf(adev)) - amdgpu_fence_process(&adev->uvd.ring_enc[1]); + amdgpu_fence_process(&adev->uvd.inst->ring_enc[1]); break; default: DRM_ERROR("Unhandled interrupt: %d %d\n", @@ -1719,7 +1720,7 @@ static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = { static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev) { - adev->uvd.ring.funcs = &uvd_v7_0_ring_vm_funcs; + adev->uvd.inst->ring.funcs = &uvd_v7_0_ring_vm_funcs; DRM_INFO("UVD is enabled in VM mode\n"); } @@ -1728,7 +1729,7 @@ static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev) int i; for (i = 0; i < adev->uvd.num_enc_rings; ++i) - adev->uvd.ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs; + adev->uvd.inst->ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs; DRM_INFO("UVD ENC is enabled in VM mode\n"); } @@ -1740,8 +1741,8 @@ static const struct amdgpu_irq_src_funcs uvd_v7_0_irq_funcs = { static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev) { - adev->uvd.irq.num_types = adev->uvd.num_enc_rings + 1; - adev->uvd.irq.funcs = &uvd_v7_0_irq_funcs; + adev->uvd.inst->irq.num_types = adev->uvd.num_enc_rings + 1; + adev->uvd.inst->irq.funcs = &uvd_v7_0_irq_funcs; } const struct amdgpu_ip_block_version uvd_v7_0_ip_block = -- cgit From 10dd74eac4dba963bfa97f5092040aa75ff742d6 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Tue, 15 May 2018 14:31:24 -0500 Subject: drm/amdgpu/vg20:Restruct uvd.inst to support multiple instances Vega20 has dual-UVD. Need add multiple instances support for uvd. Restruct uvd.inst, using uvd.inst[0] to replace uvd.inst->. Repurpose amdgpu_ring::me for instance index, and initialize to 0. There are no any logical changes here. Signed-off-by: James Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 6 +- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 12 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 229 +++---- drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 1002 +++++++++++++++-------------- 5 files changed, 660 insertions(+), 590 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 1070f4042cbb..39ec6b8890a1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -376,14 +376,14 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, struct amdgpu_device *adev = ring->adev; uint64_t index; - if (ring != &adev->uvd.inst->ring) { + if (ring != &adev->uvd.inst[ring->me].ring) { ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs]; ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4); } else { /* put fence directly behind firmware */ index = ALIGN(adev->uvd.fw->size, 8); - ring->fence_drv.cpu_addr = adev->uvd.inst->cpu_addr + index; - ring->fence_drv.gpu_addr = adev->uvd.inst->gpu_addr + index; + ring->fence_drv.cpu_addr = adev->uvd.inst[ring->me].cpu_addr + index; + ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index; } amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq)); amdgpu_irq_get(adev, irq_src, irq_type); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 5620ed291107..91517b166a3b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -286,7 +286,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file struct drm_crtc *crtc; uint32_t ui32 = 0; uint64_t ui64 = 0; - int i, found; + int i, j, found; int ui32_size = sizeof(ui32); if (!info->return_size || !info->return_pointer) @@ -348,7 +348,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file break; case AMDGPU_HW_IP_UVD: type = AMD_IP_BLOCK_TYPE_UVD; - ring_mask = adev->uvd.inst->ring.ready ? 1 : 0; + for (i = 0; i < adev->uvd.num_uvd_inst; i++) + ring_mask |= ((adev->uvd.inst[i].ring.ready ? 1 : 0) << i); ib_start_alignment = AMDGPU_GPU_PAGE_SIZE; ib_size_alignment = 16; break; @@ -361,8 +362,11 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file break; case AMDGPU_HW_IP_UVD_ENC: type = AMD_IP_BLOCK_TYPE_UVD; - for (i = 0; i < adev->uvd.num_enc_rings; i++) - ring_mask |= ((adev->uvd.inst->ring_enc[i].ready ? 1 : 0) << i); + for (i = 0; i < adev->uvd.num_uvd_inst; i++) + for (j = 0; j < adev->uvd.num_enc_rings; j++) + ring_mask |= + ((adev->uvd.inst[i].ring_enc[j].ready ? 1 : 0) << + (j + i * adev->uvd.num_enc_rings)); ib_start_alignment = AMDGPU_GPU_PAGE_SIZE; ib_size_alignment = 1; break; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 49cad08b5c16..c6850b629d0e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -362,6 +362,7 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring) dma_fence_put(ring->vmid_wait); ring->vmid_wait = NULL; + ring->me = 0; ring->adev->rings[ring->idx] = NULL; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 02683a039a98..e961492d357a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -127,7 +127,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) const char *fw_name; const struct common_firmware_header *hdr; unsigned version_major, version_minor, family_id; - int i, r; + int i, j, r; INIT_DELAYED_WORK(&adev->uvd.inst->idle_work, amdgpu_uvd_idle_work_handler); @@ -236,28 +236,30 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); - r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst->vcpu_bo, - &adev->uvd.inst->gpu_addr, &adev->uvd.inst->cpu_addr); - if (r) { - dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r); - return r; - } + for (j = 0; j < adev->uvd.num_uvd_inst; j++) { - ring = &adev->uvd.inst->ring; - rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; - r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst->entity, - rq, NULL); - if (r != 0) { - DRM_ERROR("Failed setting up UVD run queue.\n"); - return r; - } + r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst[j].vcpu_bo, + &adev->uvd.inst[j].gpu_addr, &adev->uvd.inst[j].cpu_addr); + if (r) { + dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r); + return r; + } - for (i = 0; i < adev->uvd.max_handles; ++i) { - atomic_set(&adev->uvd.inst->handles[i], 0); - adev->uvd.inst->filp[i] = NULL; - } + ring = &adev->uvd.inst[j].ring; + rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; + r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity, + rq, NULL); + if (r != 0) { + DRM_ERROR("Failed setting up UVD(%d) run queue.\n", j); + return r; + } + for (i = 0; i < adev->uvd.max_handles; ++i) { + atomic_set(&adev->uvd.inst[j].handles[i], 0); + adev->uvd.inst[j].filp[i] = NULL; + } + } /* from uvd v5.0 HW addressing capacity increased to 64 bits */ if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0)) adev->uvd.address_64_bit = true; @@ -284,20 +286,22 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) { - int i; - kfree(adev->uvd.inst->saved_bo); + int i, j; - drm_sched_entity_fini(&adev->uvd.inst->ring.sched, &adev->uvd.inst->entity); + for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { + kfree(adev->uvd.inst[j].saved_bo); - amdgpu_bo_free_kernel(&adev->uvd.inst->vcpu_bo, - &adev->uvd.inst->gpu_addr, - (void **)&adev->uvd.inst->cpu_addr); + drm_sched_entity_fini(&adev->uvd.inst[j].ring.sched, &adev->uvd.inst[j].entity); - amdgpu_ring_fini(&adev->uvd.inst->ring); + amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo, + &adev->uvd.inst[j].gpu_addr, + (void **)&adev->uvd.inst[j].cpu_addr); - for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i) - amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]); + amdgpu_ring_fini(&adev->uvd.inst[j].ring); + for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i) + amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]); + } release_firmware(adev->uvd.fw); return 0; @@ -307,32 +311,33 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev) { unsigned size; void *ptr; - int i; + int i, j; - if (adev->uvd.inst->vcpu_bo == NULL) - return 0; + for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { + if (adev->uvd.inst[j].vcpu_bo == NULL) + continue; - cancel_delayed_work_sync(&adev->uvd.inst->idle_work); + cancel_delayed_work_sync(&adev->uvd.inst[j].idle_work); - /* only valid for physical mode */ - if (adev->asic_type < CHIP_POLARIS10) { - for (i = 0; i < adev->uvd.max_handles; ++i) - if (atomic_read(&adev->uvd.inst->handles[i])) - break; + /* only valid for physical mode */ + if (adev->asic_type < CHIP_POLARIS10) { + for (i = 0; i < adev->uvd.max_handles; ++i) + if (atomic_read(&adev->uvd.inst[j].handles[i])) + break; - if (i == adev->uvd.max_handles) - return 0; - } - - size = amdgpu_bo_size(adev->uvd.inst->vcpu_bo); - ptr = adev->uvd.inst->cpu_addr; + if (i == adev->uvd.max_handles) + continue; + } - adev->uvd.inst->saved_bo = kmalloc(size, GFP_KERNEL); - if (!adev->uvd.inst->saved_bo) - return -ENOMEM; + size = amdgpu_bo_size(adev->uvd.inst[j].vcpu_bo); + ptr = adev->uvd.inst[j].cpu_addr; - memcpy_fromio(adev->uvd.inst->saved_bo, ptr, size); + adev->uvd.inst[j].saved_bo = kmalloc(size, GFP_KERNEL); + if (!adev->uvd.inst[j].saved_bo) + return -ENOMEM; + memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size); + } return 0; } @@ -340,59 +345,65 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev) { unsigned size; void *ptr; + int i; - if (adev->uvd.inst->vcpu_bo == NULL) - return -EINVAL; + for (i = 0; i < adev->uvd.num_uvd_inst; i++) { + if (adev->uvd.inst[i].vcpu_bo == NULL) + return -EINVAL; - size = amdgpu_bo_size(adev->uvd.inst->vcpu_bo); - ptr = adev->uvd.inst->cpu_addr; + size = amdgpu_bo_size(adev->uvd.inst[i].vcpu_bo); + ptr = adev->uvd.inst[i].cpu_addr; - if (adev->uvd.inst->saved_bo != NULL) { - memcpy_toio(ptr, adev->uvd.inst->saved_bo, size); - kfree(adev->uvd.inst->saved_bo); - adev->uvd.inst->saved_bo = NULL; - } else { - const struct common_firmware_header *hdr; - unsigned offset; - - hdr = (const struct common_firmware_header *)adev->uvd.fw->data; - if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { - offset = le32_to_cpu(hdr->ucode_array_offset_bytes); - memcpy_toio(adev->uvd.inst->cpu_addr, adev->uvd.fw->data + offset, - le32_to_cpu(hdr->ucode_size_bytes)); - size -= le32_to_cpu(hdr->ucode_size_bytes); - ptr += le32_to_cpu(hdr->ucode_size_bytes); + if (adev->uvd.inst[i].saved_bo != NULL) { + memcpy_toio(ptr, adev->uvd.inst[i].saved_bo, size); + kfree(adev->uvd.inst[i].saved_bo); + adev->uvd.inst[i].saved_bo = NULL; + } else { + const struct common_firmware_header *hdr; + unsigned offset; + + hdr = (const struct common_firmware_header *)adev->uvd.fw->data; + if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { + offset = le32_to_cpu(hdr->ucode_array_offset_bytes); + memcpy_toio(adev->uvd.inst[i].cpu_addr, adev->uvd.fw->data + offset, + le32_to_cpu(hdr->ucode_size_bytes)); + size -= le32_to_cpu(hdr->ucode_size_bytes); + ptr += le32_to_cpu(hdr->ucode_size_bytes); + } + memset_io(ptr, 0, size); + /* to restore uvd fence seq */ + amdgpu_fence_driver_force_completion(&adev->uvd.inst[i].ring); } - memset_io(ptr, 0, size); - /* to restore uvd fence seq */ - amdgpu_fence_driver_force_completion(&adev->uvd.inst->ring); } - return 0; } void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) { - struct amdgpu_ring *ring = &adev->uvd.inst->ring; - int i, r; + struct amdgpu_ring *ring; + int i, j, r; - for (i = 0; i < adev->uvd.max_handles; ++i) { - uint32_t handle = atomic_read(&adev->uvd.inst->handles[i]); - if (handle != 0 && adev->uvd.inst->filp[i] == filp) { - struct dma_fence *fence; - - r = amdgpu_uvd_get_destroy_msg(ring, handle, - false, &fence); - if (r) { - DRM_ERROR("Error destroying UVD (%d)!\n", r); - continue; - } + for (j = 0; j < adev->uvd.num_uvd_inst; j++) { + ring = &adev->uvd.inst[j].ring; - dma_fence_wait(fence, false); - dma_fence_put(fence); + for (i = 0; i < adev->uvd.max_handles; ++i) { + uint32_t handle = atomic_read(&adev->uvd.inst[j].handles[i]); + if (handle != 0 && adev->uvd.inst[j].filp[i] == filp) { + struct dma_fence *fence; + + r = amdgpu_uvd_get_destroy_msg(ring, handle, + false, &fence); + if (r) { + DRM_ERROR("Error destroying UVD(%d) %d!\n", j, r); + continue; + } - adev->uvd.inst->filp[i] = NULL; - atomic_set(&adev->uvd.inst->handles[i], 0); + dma_fence_wait(fence, false); + dma_fence_put(fence); + + adev->uvd.inst[j].filp[i] = NULL; + atomic_set(&adev->uvd.inst[j].handles[i], 0); + } } } } @@ -667,15 +678,16 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, void *ptr; long r; int i; + uint32_t ip_instance = ctx->parser->job->ring->me; if (offset & 0x3F) { - DRM_ERROR("UVD messages must be 64 byte aligned!\n"); + DRM_ERROR("UVD(%d) messages must be 64 byte aligned!\n", ip_instance); return -EINVAL; } r = amdgpu_bo_kmap(bo, &ptr); if (r) { - DRM_ERROR("Failed mapping the UVD message (%ld)!\n", r); + DRM_ERROR("Failed mapping the UVD(%d) message (%ld)!\n", ip_instance, r); return r; } @@ -685,7 +697,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, handle = msg[2]; if (handle == 0) { - DRM_ERROR("Invalid UVD handle!\n"); + DRM_ERROR("Invalid UVD(%d) handle!\n", ip_instance); return -EINVAL; } @@ -696,18 +708,18 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, /* try to alloc a new handle */ for (i = 0; i < adev->uvd.max_handles; ++i) { - if (atomic_read(&adev->uvd.inst->handles[i]) == handle) { - DRM_ERROR("Handle 0x%x already in use!\n", handle); + if (atomic_read(&adev->uvd.inst[ip_instance].handles[i]) == handle) { + DRM_ERROR("(%d)Handle 0x%x already in use!\n", ip_instance, handle); return -EINVAL; } - if (!atomic_cmpxchg(&adev->uvd.inst->handles[i], 0, handle)) { - adev->uvd.inst->filp[i] = ctx->parser->filp; + if (!atomic_cmpxchg(&adev->uvd.inst[ip_instance].handles[i], 0, handle)) { + adev->uvd.inst[ip_instance].filp[i] = ctx->parser->filp; return 0; } } - DRM_ERROR("No more free UVD handles!\n"); + DRM_ERROR("No more free UVD(%d) handles!\n", ip_instance); return -ENOSPC; case 1: @@ -719,27 +731,27 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, /* validate the handle */ for (i = 0; i < adev->uvd.max_handles; ++i) { - if (atomic_read(&adev->uvd.inst->handles[i]) == handle) { - if (adev->uvd.inst->filp[i] != ctx->parser->filp) { - DRM_ERROR("UVD handle collision detected!\n"); + if (atomic_read(&adev->uvd.inst[ip_instance].handles[i]) == handle) { + if (adev->uvd.inst[ip_instance].filp[i] != ctx->parser->filp) { + DRM_ERROR("UVD(%d) handle collision detected!\n", ip_instance); return -EINVAL; } return 0; } } - DRM_ERROR("Invalid UVD handle 0x%x!\n", handle); + DRM_ERROR("Invalid UVD(%d) handle 0x%x!\n", ip_instance, handle); return -ENOENT; case 2: /* it's a destroy msg, free the handle */ for (i = 0; i < adev->uvd.max_handles; ++i) - atomic_cmpxchg(&adev->uvd.inst->handles[i], handle, 0); + atomic_cmpxchg(&adev->uvd.inst[ip_instance].handles[i], handle, 0); amdgpu_bo_kunmap(bo); return 0; default: - DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type); + DRM_ERROR("Illegal UVD(%d) message type (%d)!\n", ip_instance, msg_type); return -EINVAL; } BUG(); @@ -1043,7 +1055,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, if (r) goto err_free; - r = amdgpu_job_submit(job, ring, &adev->uvd.inst->entity, + r = amdgpu_job_submit(job, ring, &adev->uvd.inst[ring->me].entity, AMDGPU_FENCE_OWNER_UNDEFINED, &f); if (r) goto err_free; @@ -1189,27 +1201,28 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct dma_fence *fence; long r; + uint32_t ip_instance = ring->me; r = amdgpu_uvd_get_create_msg(ring, 1, NULL); if (r) { - DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r); + DRM_ERROR("amdgpu: (%d)failed to get create msg (%ld).\n", ip_instance, r); goto error; } r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence); if (r) { - DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r); + DRM_ERROR("amdgpu: (%d)failed to get destroy ib (%ld).\n", ip_instance, r); goto error; } r = dma_fence_wait_timeout(fence, false, timeout); if (r == 0) { - DRM_ERROR("amdgpu: IB test timed out.\n"); + DRM_ERROR("amdgpu: (%d)IB test timed out.\n", ip_instance); r = -ETIMEDOUT; } else if (r < 0) { - DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); + DRM_ERROR("amdgpu: (%d)fence wait failed (%ld).\n", ip_instance, r); } else { - DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); + DRM_DEBUG("ib test on (%d)ring %d succeeded\n", ip_instance, ring->idx); r = 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index 66d4bea5fb2c..08f3b6c84bea 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -58,7 +58,7 @@ static uint64_t uvd_v7_0_ring_get_rptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR); + return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR); } /** @@ -72,10 +72,10 @@ static uint64_t uvd_v7_0_enc_ring_get_rptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - if (ring == &adev->uvd.inst->ring_enc[0]) - return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR); + if (ring == &adev->uvd.inst[ring->me].ring_enc[0]) + return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR); else - return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2); + return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR2); } /** @@ -89,7 +89,7 @@ static uint64_t uvd_v7_0_ring_get_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR); + return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR); } /** @@ -106,10 +106,10 @@ static uint64_t uvd_v7_0_enc_ring_get_wptr(struct amdgpu_ring *ring) if (ring->use_doorbell) return adev->wb.wb[ring->wptr_offs]; - if (ring == &adev->uvd.inst->ring_enc[0]) - return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR); + if (ring == &adev->uvd.inst[ring->me].ring_enc[0]) + return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR); else - return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2); + return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2); } /** @@ -123,7 +123,7 @@ static void uvd_v7_0_ring_set_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); + WREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); } /** @@ -144,11 +144,11 @@ static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring) return; } - if (ring == &adev->uvd.inst->ring_enc[0]) - WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, + if (ring == &adev->uvd.inst[ring->me].ring_enc[0]) + WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); else - WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, + WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); } @@ -387,19 +387,21 @@ static int uvd_v7_0_sw_init(void *handle) { struct amdgpu_ring *ring; struct drm_sched_rq *rq; - int i, r; + int i, j, r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - /* UVD TRAP */ - r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, 124, &adev->uvd.inst->irq); - if (r) - return r; - - /* UVD ENC TRAP */ - for (i = 0; i < adev->uvd.num_enc_rings; ++i) { - r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, i + 119, &adev->uvd.inst->irq); + for (j = 0; j < adev->uvd.num_uvd_inst; j++) { + /* UVD TRAP */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, 124, &adev->uvd.inst[j].irq); if (r) return r; + + /* UVD ENC TRAP */ + for (i = 0; i < adev->uvd.num_enc_rings; ++i) { + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, i + 119, &adev->uvd.inst[j].irq); + if (r) + return r; + } } r = amdgpu_uvd_sw_init(adev); @@ -416,43 +418,48 @@ static int uvd_v7_0_sw_init(void *handle) DRM_INFO("PSP loading UVD firmware\n"); } - ring = &adev->uvd.inst->ring_enc[0]; - rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; - r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst->entity_enc, - rq, NULL); - if (r) { - DRM_ERROR("Failed setting up UVD ENC run queue.\n"); - return r; + for (j = 0; j < adev->uvd.num_uvd_inst; j++) { + ring = &adev->uvd.inst[j].ring_enc[0]; + rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; + r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity_enc, + rq, NULL); + if (r) { + DRM_ERROR("(%d)Failed setting up UVD ENC run queue.\n", j); + return r; + } } r = amdgpu_uvd_resume(adev); if (r) return r; - if (!amdgpu_sriov_vf(adev)) { - ring = &adev->uvd.inst->ring; - sprintf(ring->name, "uvd"); - r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); - if (r) - return r; - } - for (i = 0; i < adev->uvd.num_enc_rings; ++i) { - ring = &adev->uvd.inst->ring_enc[i]; - sprintf(ring->name, "uvd_enc%d", i); - if (amdgpu_sriov_vf(adev)) { - ring->use_doorbell = true; - - /* currently only use the first enconding ring for - * sriov, so set unused location for other unused rings. - */ - if (i == 0) - ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING0_1 * 2; - else - ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING2_3 * 2 + 1; + for (j = 0; j < adev->uvd.num_uvd_inst; j++) { + if (!amdgpu_sriov_vf(adev)) { + ring = &adev->uvd.inst[j].ring; + sprintf(ring->name, "uvd<%d>", j); + r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0); + if (r) + return r; + } + + for (i = 0; i < adev->uvd.num_enc_rings; ++i) { + ring = &adev->uvd.inst[j].ring_enc[i]; + sprintf(ring->name, "uvd_enc%d<%d>", i, j); + if (amdgpu_sriov_vf(adev)) { + ring->use_doorbell = true; + + /* currently only use the first enconding ring for + * sriov, so set unused location for other unused rings. + */ + if (i == 0) + ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING0_1 * 2; + else + ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING2_3 * 2 + 1; + } + r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0); + if (r) + return r; } - r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); - if (r) - return r; } r = amdgpu_virt_alloc_mm_table(adev); @@ -464,7 +471,7 @@ static int uvd_v7_0_sw_init(void *handle) static int uvd_v7_0_sw_fini(void *handle) { - int i, r; + int i, j, r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; amdgpu_virt_free_mm_table(adev); @@ -473,11 +480,12 @@ static int uvd_v7_0_sw_fini(void *handle) if (r) return r; - drm_sched_entity_fini(&adev->uvd.inst->ring_enc[0].sched, &adev->uvd.inst->entity_enc); - - for (i = 0; i < adev->uvd.num_enc_rings; ++i) - amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]); + for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { + drm_sched_entity_fini(&adev->uvd.inst[j].ring_enc[0].sched, &adev->uvd.inst[j].entity_enc); + for (i = 0; i < adev->uvd.num_enc_rings; ++i) + amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]); + } return amdgpu_uvd_sw_fini(adev); } @@ -491,9 +499,9 @@ static int uvd_v7_0_sw_fini(void *handle) static int uvd_v7_0_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->uvd.inst->ring; + struct amdgpu_ring *ring; uint32_t tmp; - int i, r; + int i, j, r; if (amdgpu_sriov_vf(adev)) r = uvd_v7_0_sriov_start(adev); @@ -502,57 +510,60 @@ static int uvd_v7_0_hw_init(void *handle) if (r) goto done; - if (!amdgpu_sriov_vf(adev)) { - ring->ready = true; - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->ready = false; - goto done; + for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { + ring = &adev->uvd.inst[j].ring; + + if (!amdgpu_sriov_vf(adev)) { + ring->ready = true; + r = amdgpu_ring_test_ring(ring); + if (r) { + ring->ready = false; + goto done; + } + + r = amdgpu_ring_alloc(ring, 10); + if (r) { + DRM_ERROR("amdgpu: (%d)ring failed to lock UVD ring (%d).\n", j, r); + goto done; + } + + tmp = PACKET0(SOC15_REG_OFFSET(UVD, j, + mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL), 0); + amdgpu_ring_write(ring, tmp); + amdgpu_ring_write(ring, 0xFFFFF); + + tmp = PACKET0(SOC15_REG_OFFSET(UVD, j, + mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL), 0); + amdgpu_ring_write(ring, tmp); + amdgpu_ring_write(ring, 0xFFFFF); + + tmp = PACKET0(SOC15_REG_OFFSET(UVD, j, + mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL), 0); + amdgpu_ring_write(ring, tmp); + amdgpu_ring_write(ring, 0xFFFFF); + + /* Clear timeout status bits */ + amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j, + mmUVD_SEMA_TIMEOUT_STATUS), 0)); + amdgpu_ring_write(ring, 0x8); + + amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j, + mmUVD_SEMA_CNTL), 0)); + amdgpu_ring_write(ring, 3); + + amdgpu_ring_commit(ring); } - r = amdgpu_ring_alloc(ring, 10); - if (r) { - DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); - goto done; + for (i = 0; i < adev->uvd.num_enc_rings; ++i) { + ring = &adev->uvd.inst[j].ring_enc[i]; + ring->ready = true; + r = amdgpu_ring_test_ring(ring); + if (r) { + ring->ready = false; + goto done; + } } - - tmp = PACKET0(SOC15_REG_OFFSET(UVD, 0, - mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL), 0); - amdgpu_ring_write(ring, tmp); - amdgpu_ring_write(ring, 0xFFFFF); - - tmp = PACKET0(SOC15_REG_OFFSET(UVD, 0, - mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL), 0); - amdgpu_ring_write(ring, tmp); - amdgpu_ring_write(ring, 0xFFFFF); - - tmp = PACKET0(SOC15_REG_OFFSET(UVD, 0, - mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL), 0); - amdgpu_ring_write(ring, tmp); - amdgpu_ring_write(ring, 0xFFFFF); - - /* Clear timeout status bits */ - amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, - mmUVD_SEMA_TIMEOUT_STATUS), 0)); - amdgpu_ring_write(ring, 0x8); - - amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, - mmUVD_SEMA_CNTL), 0)); - amdgpu_ring_write(ring, 3); - - amdgpu_ring_commit(ring); } - - for (i = 0; i < adev->uvd.num_enc_rings; ++i) { - ring = &adev->uvd.inst->ring_enc[i]; - ring->ready = true; - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->ready = false; - goto done; - } - } - done: if (!r) DRM_INFO("UVD and UVD ENC initialized successfully.\n"); @@ -570,7 +581,7 @@ done: static int uvd_v7_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->uvd.inst->ring; + int i; if (!amdgpu_sriov_vf(adev)) uvd_v7_0_stop(adev); @@ -579,7 +590,8 @@ static int uvd_v7_0_hw_fini(void *handle) DRM_DEBUG("For SRIOV client, shouldn't do anything.\n"); } - ring->ready = false; + for (i = 0; i < adev->uvd.num_uvd_inst; ++i) + adev->uvd.inst[i].ring.ready = false; return 0; } @@ -619,48 +631,51 @@ static void uvd_v7_0_mc_resume(struct amdgpu_device *adev) { uint32_t size = AMDGPU_UVD_FIRMWARE_SIZE(adev); uint32_t offset; + int i; - if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { - WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, - lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr)); - WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, - upper_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr)); - offset = 0; - } else { - WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, - lower_32_bits(adev->uvd.inst->gpu_addr)); - WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, - upper_32_bits(adev->uvd.inst->gpu_addr)); - offset = size; - } + for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, + lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr)); + WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, + upper_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr)); + offset = 0; + } else { + WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, + lower_32_bits(adev->uvd.inst[i].gpu_addr)); + WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, + upper_32_bits(adev->uvd.inst[i].gpu_addr)); + offset = size; + } - WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, - AMDGPU_UVD_FIRMWARE_OFFSET >> 3); - WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size); - - WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, - lower_32_bits(adev->uvd.inst->gpu_addr + offset)); - WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, - upper_32_bits(adev->uvd.inst->gpu_addr + offset)); - WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, (1 << 21)); - WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_UVD_HEAP_SIZE); - - WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, - lower_32_bits(adev->uvd.inst->gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); - WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, - upper_32_bits(adev->uvd.inst->gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); - WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, (2 << 21)); - WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, - AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40)); - - WREG32_SOC15(UVD, 0, mmUVD_UDEC_ADDR_CONFIG, - adev->gfx.config.gb_addr_config); - WREG32_SOC15(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG, - adev->gfx.config.gb_addr_config); - WREG32_SOC15(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG, - adev->gfx.config.gb_addr_config); - - WREG32_SOC15(UVD, 0, mmUVD_GP_SCRATCH4, adev->uvd.max_handles); + WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, + AMDGPU_UVD_FIRMWARE_OFFSET >> 3); + WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size); + + WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, + lower_32_bits(adev->uvd.inst[i].gpu_addr + offset)); + WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, + upper_32_bits(adev->uvd.inst[i].gpu_addr + offset)); + WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET1, (1 << 21)); + WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_UVD_HEAP_SIZE); + + WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, + lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); + WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, + upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); + WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET2, (2 << 21)); + WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE2, + AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40)); + + WREG32_SOC15(UVD, i, mmUVD_UDEC_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + WREG32_SOC15(UVD, i, mmUVD_UDEC_DB_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + WREG32_SOC15(UVD, i, mmUVD_UDEC_DBW_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + + WREG32_SOC15(UVD, i, mmUVD_GP_SCRATCH4, adev->uvd.max_handles); + } } static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev, @@ -670,6 +685,7 @@ static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev, uint64_t addr = table->gpu_addr; struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)table->cpu_addr; uint32_t size; + int i; size = header->header_size + header->vce_table_size + header->uvd_table_size; @@ -689,11 +705,12 @@ static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev, /* 4, set resp to zero */ WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0); - WDOORBELL32(adev->uvd.inst->ring_enc[0].doorbell_index, 0); - adev->wb.wb[adev->uvd.inst->ring_enc[0].wptr_offs] = 0; - adev->uvd.inst->ring_enc[0].wptr = 0; - adev->uvd.inst->ring_enc[0].wptr_old = 0; - + for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { + WDOORBELL32(adev->uvd.inst[i].ring_enc[0].doorbell_index, 0); + adev->wb.wb[adev->uvd.inst[i].ring_enc[0].wptr_offs] = 0; + adev->uvd.inst[i].ring_enc[0].wptr = 0; + adev->uvd.inst[i].ring_enc[0].wptr_old = 0; + } /* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */ WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST, 0x10000001); @@ -726,6 +743,7 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev) struct mmsch_v1_0_cmd_end end = { {0} }; uint32_t *init_table = adev->virt.mm_table.cpu_addr; struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)init_table; + uint8_t i = 0; direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE; direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE; @@ -743,120 +761,121 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev) init_table += header->uvd_table_offset; - ring = &adev->uvd.inst->ring; - ring->wptr = 0; - size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4); - - MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), - 0xFFFFFFFF, 0x00000004); - /* mc resume*/ - if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), - lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr)); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), - upper_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr)); - offset = 0; - } else { - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), - lower_32_bits(adev->uvd.inst->gpu_addr)); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), - upper_32_bits(adev->uvd.inst->gpu_addr)); - offset = size; + for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { + ring = &adev->uvd.inst[i].ring; + ring->wptr = 0; + size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4); + + MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), + 0xFFFFFFFF, 0x00000004); + /* mc resume*/ + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr)); + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + upper_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr)); + offset = 0; + } else { + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + lower_32_bits(adev->uvd.inst[i].gpu_addr)); + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + upper_32_bits(adev->uvd.inst[i].gpu_addr)); + offset = size; + } + + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0), + AMDGPU_UVD_FIRMWARE_OFFSET >> 3); + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0), size); + + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), + lower_32_bits(adev->uvd.inst[i].gpu_addr + offset)); + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), + upper_32_bits(adev->uvd.inst[i].gpu_addr + offset)); + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1), (1 << 21)); + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_UVD_HEAP_SIZE); + + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), + lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), + upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2), (2 << 21)); + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2), + AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40)); + + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_GP_SCRATCH4), adev->uvd.max_handles); + /* mc resume end*/ + + /* disable clock gating */ + MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_CGC_CTRL), + ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK, 0); + + /* disable interupt */ + MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN), + ~UVD_MASTINT_EN__VCPU_EN_MASK, 0); + + /* stall UMC and register bus before resetting VCPU */ + MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), + ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, + UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); + + /* put LMI, VCPU, RBC etc... into reset */ + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET), + (uint32_t)(UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | + UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | + UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | + UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | + UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | + UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | + UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | + UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK)); + + /* initialize UVD memory controller */ + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL), + (uint32_t)((0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | + UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | + UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | + UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK | + UVD_LMI_CTRL__REQ_MODE_MASK | + 0x00100000L)); + + /* take all subblocks out of reset, except VCPU */ + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET), + UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); + + /* enable VCPU clock */ + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), + UVD_VCPU_CNTL__CLK_EN_MASK); + + /* enable master interrupt */ + MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN), + ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK), + (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK)); + + /* clear the bit 4 of UVD_STATUS */ + MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), + ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT), 0); + + /* force RBC into idle state */ + size = order_base_2(ring->ring_size); + tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, size); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp); + + ring = &adev->uvd.inst[i].ring_enc[0]; + ring->wptr = 0; + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO), ring->gpu_addr); + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr)); + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE), ring->ring_size / 4); + + /* boot up the VCPU */ + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET), 0); + + /* enable UMC */ + MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), + ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0); + + MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), 0x02, 0x02); } - - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), - AMDGPU_UVD_FIRMWARE_OFFSET >> 3); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size); - - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), - lower_32_bits(adev->uvd.inst->gpu_addr + offset)); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), - upper_32_bits(adev->uvd.inst->gpu_addr + offset)); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), (1 << 21)); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_UVD_HEAP_SIZE); - - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), - lower_32_bits(adev->uvd.inst->gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), - upper_32_bits(adev->uvd.inst->gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), (2 << 21)); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE2), - AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40)); - - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH4), adev->uvd.max_handles); - /* mc resume end*/ - - /* disable clock gating */ - MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_CGC_CTRL), - ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK, 0); - - /* disable interupt */ - MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), - ~UVD_MASTINT_EN__VCPU_EN_MASK, 0); - - /* stall UMC and register bus before resetting VCPU */ - MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), - ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, - UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); - - /* put LMI, VCPU, RBC etc... into reset */ - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), - (uint32_t)(UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | - UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | - UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | - UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | - UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | - UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | - UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | - UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK)); - - /* initialize UVD memory controller */ - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL), - (uint32_t)((0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | - UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | - UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | - UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK | - UVD_LMI_CTRL__REQ_MODE_MASK | - 0x00100000L)); - - /* take all subblocks out of reset, except VCPU */ - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), - UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); - - /* enable VCPU clock */ - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), - UVD_VCPU_CNTL__CLK_EN_MASK); - - /* enable master interrupt */ - MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), - ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK), - (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK)); - - /* clear the bit 4 of UVD_STATUS */ - MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), - ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT), 0); - - /* force RBC into idle state */ - size = order_base_2(ring->ring_size); - tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, size); - tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), tmp); - - ring = &adev->uvd.inst->ring_enc[0]; - ring->wptr = 0; - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_LO), ring->gpu_addr); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr)); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_SIZE), ring->ring_size / 4); - - /* boot up the VCPU */ - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0); - - /* enable UMC */ - MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), - ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0); - - MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0x02, 0x02); - /* add end packet */ memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end)); table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4; @@ -875,15 +894,17 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev) */ static int uvd_v7_0_start(struct amdgpu_device *adev) { - struct amdgpu_ring *ring = &adev->uvd.inst->ring; + struct amdgpu_ring *ring; uint32_t rb_bufsz, tmp; uint32_t lmi_swap_cntl; uint32_t mp_swap_cntl; - int i, j, r; + int i, j, k, r; - /* disable DPG */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0, - ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); + for (k = 0; k < adev->uvd.num_uvd_inst; ++k) { + /* disable DPG */ + WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_POWER_STATUS), 0, + ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); + } /* disable byte swapping */ lmi_swap_cntl = 0; @@ -891,157 +912,159 @@ static int uvd_v7_0_start(struct amdgpu_device *adev) uvd_v7_0_mc_resume(adev); - /* disable clock gating */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_CGC_CTRL), 0, - ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK); - - /* disable interupt */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0, - ~UVD_MASTINT_EN__VCPU_EN_MASK); - - /* stall UMC and register bus before resetting VCPU */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), - UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, - ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); - mdelay(1); - - /* put LMI, VCPU, RBC etc... into reset */ - WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET, - UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | - UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | - UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | - UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | - UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | - UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | - UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | - UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); - mdelay(5); + for (k = 0; k < adev->uvd.num_uvd_inst; ++k) { + ring = &adev->uvd.inst[k].ring; + /* disable clock gating */ + WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_CGC_CTRL), 0, + ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK); - /* initialize UVD memory controller */ - WREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL, - (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | - UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | - UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | - UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK | - UVD_LMI_CTRL__REQ_MODE_MASK | - 0x00100000L); + /* disable interupt */ + WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN), 0, + ~UVD_MASTINT_EN__VCPU_EN_MASK); + + /* stall UMC and register bus before resetting VCPU */ + WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2), + UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, + ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); + mdelay(1); + + /* put LMI, VCPU, RBC etc... into reset */ + WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET, + UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | + UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | + UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | + UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | + UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | + UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | + UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | + UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); + mdelay(5); + + /* initialize UVD memory controller */ + WREG32_SOC15(UVD, k, mmUVD_LMI_CTRL, + (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | + UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | + UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | + UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK | + UVD_LMI_CTRL__REQ_MODE_MASK | + 0x00100000L); #ifdef __BIG_ENDIAN - /* swap (8 in 32) RB and IB */ - lmi_swap_cntl = 0xa; - mp_swap_cntl = 0; + /* swap (8 in 32) RB and IB */ + lmi_swap_cntl = 0xa; + mp_swap_cntl = 0; #endif - WREG32_SOC15(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl); - WREG32_SOC15(UVD, 0, mmUVD_MP_SWAP_CNTL, mp_swap_cntl); - - WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA0, 0x40c2040); - WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA1, 0x0); - WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB0, 0x40c2040); - WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB1, 0x0); - WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_ALU, 0); - WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUX, 0x88); - - /* take all subblocks out of reset, except VCPU */ - WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET, - UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); - mdelay(5); + WREG32_SOC15(UVD, k, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl); + WREG32_SOC15(UVD, k, mmUVD_MP_SWAP_CNTL, mp_swap_cntl); - /* enable VCPU clock */ - WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL, - UVD_VCPU_CNTL__CLK_EN_MASK); + WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA0, 0x40c2040); + WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA1, 0x0); + WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB0, 0x40c2040); + WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB1, 0x0); + WREG32_SOC15(UVD, k, mmUVD_MPC_SET_ALU, 0); + WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUX, 0x88); - /* enable UMC */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0, - ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); + /* take all subblocks out of reset, except VCPU */ + WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET, + UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); + mdelay(5); - /* boot up the VCPU */ - WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET, 0); - mdelay(10); + /* enable VCPU clock */ + WREG32_SOC15(UVD, k, mmUVD_VCPU_CNTL, + UVD_VCPU_CNTL__CLK_EN_MASK); - for (i = 0; i < 10; ++i) { - uint32_t status; + /* enable UMC */ + WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2), 0, + ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); - for (j = 0; j < 100; ++j) { - status = RREG32_SOC15(UVD, 0, mmUVD_STATUS); + /* boot up the VCPU */ + WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET, 0); + mdelay(10); + + for (i = 0; i < 10; ++i) { + uint32_t status; + + for (j = 0; j < 100; ++j) { + status = RREG32_SOC15(UVD, k, mmUVD_STATUS); + if (status & 2) + break; + mdelay(10); + } + r = 0; if (status & 2) break; + + DRM_ERROR("UVD(%d) not responding, trying to reset the VCPU!!!\n", k); + WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET), + UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK, + ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); + mdelay(10); + WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET), 0, + ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); mdelay(10); + r = -1; } - r = 0; - if (status & 2) - break; - DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n"); - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), - UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK, - ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); - mdelay(10); - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0, - ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); - mdelay(10); - r = -1; - } - - if (r) { - DRM_ERROR("UVD not responding, giving up!!!\n"); - return r; - } - /* enable master interrupt */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), - (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK), - ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK)); - - /* clear the bit 4 of UVD_STATUS */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0, - ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); - - /* force RBC into idle state */ - rb_bufsz = order_base_2(ring->ring_size); - tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); - tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); - tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); - tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0); - tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); - tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); - WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp); - - /* set the write pointer delay */ - WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0); - - /* set the wb address */ - WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR, - (upper_32_bits(ring->gpu_addr) >> 2)); - - /* programm the RB_BASE for ring buffer */ - WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, - lower_32_bits(ring->gpu_addr)); - WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, - upper_32_bits(ring->gpu_addr)); - - /* Initialize the ring buffer's read and write pointers */ - WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0); - - ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR); - WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, - lower_32_bits(ring->wptr)); - - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0, - ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK); - - ring = &adev->uvd.inst->ring_enc[0]; - WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); - WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); - WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr); - WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); - WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4); + if (r) { + DRM_ERROR("UVD(%d) not responding, giving up!!!\n", k); + return r; + } + /* enable master interrupt */ + WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN), + (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK), + ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK)); - ring = &adev->uvd.inst->ring_enc[1]; - WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); - WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); - WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr); - WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); - WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4); + /* clear the bit 4 of UVD_STATUS */ + WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_STATUS), 0, + ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); + /* force RBC into idle state */ + rb_bufsz = order_base_2(ring->ring_size); + tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); + WREG32_SOC15(UVD, k, mmUVD_RBC_RB_CNTL, tmp); + + /* set the write pointer delay */ + WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR_CNTL, 0); + + /* set the wb address */ + WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR_ADDR, + (upper_32_bits(ring->gpu_addr) >> 2)); + + /* programm the RB_BASE for ring buffer */ + WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, + lower_32_bits(ring->gpu_addr)); + WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, + upper_32_bits(ring->gpu_addr)); + + /* Initialize the ring buffer's read and write pointers */ + WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR, 0); + + ring->wptr = RREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR); + WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR, + lower_32_bits(ring->wptr)); + + WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_RBC_RB_CNTL), 0, + ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK); + + ring = &adev->uvd.inst[k].ring_enc[0]; + WREG32_SOC15(UVD, k, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); + WREG32_SOC15(UVD, k, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); + WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO, ring->gpu_addr); + WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(UVD, k, mmUVD_RB_SIZE, ring->ring_size / 4); + + ring = &adev->uvd.inst[k].ring_enc[1]; + WREG32_SOC15(UVD, k, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); + WREG32_SOC15(UVD, k, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); + WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO2, ring->gpu_addr); + WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(UVD, k, mmUVD_RB_SIZE2, ring->ring_size / 4); + } return 0; } @@ -1054,26 +1077,30 @@ static int uvd_v7_0_start(struct amdgpu_device *adev) */ static void uvd_v7_0_stop(struct amdgpu_device *adev) { - /* force RBC into idle state */ - WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, 0x11010101); - - /* Stall UMC and register bus before resetting VCPU */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), - UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, - ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); - mdelay(1); - - /* put VCPU into reset */ - WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET, - UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); - mdelay(5); + uint8_t i = 0; + + for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { + /* force RBC into idle state */ + WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, 0x11010101); - /* disable VCPU clock */ - WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL, 0x0); + /* Stall UMC and register bus before resetting VCPU */ + WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), + UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, + ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); + mdelay(1); - /* Unstall UMC and register bus */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0, - ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); + /* put VCPU into reset */ + WREG32_SOC15(UVD, i, mmUVD_SOFT_RESET, + UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); + mdelay(5); + + /* disable VCPU clock */ + WREG32_SOC15(UVD, i, mmUVD_VCPU_CNTL, 0x0); + + /* Unstall UMC and register bus */ + WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), 0, + ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); + } } /** @@ -1092,26 +1119,26 @@ static void uvd_v7_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0)); amdgpu_ring_write(ring, seq); amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0)); amdgpu_ring_write(ring, addr & 0xffffffff); amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0)); amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff); amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0)); amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0)); amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0)); amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0)); amdgpu_ring_write(ring, 2); } @@ -1160,7 +1187,7 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring) unsigned i; int r; - WREG32_SOC15(UVD, 0, mmUVD_CONTEXT_ID, 0xCAFEDEAD); + WREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID, 0xCAFEDEAD); r = amdgpu_ring_alloc(ring, 3); if (r) { DRM_ERROR("amdgpu: (%d)cp failed to lock ring %d (%d).\n", @@ -1168,11 +1195,11 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring) return r; } amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0)); amdgpu_ring_write(ring, 0xDEADBEEF); amdgpu_ring_commit(ring); for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32_SOC15(UVD, 0, mmUVD_CONTEXT_ID); + tmp = RREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID); if (tmp == 0xDEADBEEF) break; DRM_UDELAY(1); @@ -1204,17 +1231,17 @@ static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_device *adev = ring->adev; amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_VMID), 0)); amdgpu_ring_write(ring, vmid); amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0)); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0)); amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_IB_SIZE), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_RBC_IB_SIZE), 0)); amdgpu_ring_write(ring, ib->length_dw); } @@ -1242,13 +1269,13 @@ static void uvd_v7_0_ring_emit_wreg(struct amdgpu_ring *ring, struct amdgpu_device *adev = ring->adev; amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0)); amdgpu_ring_write(ring, reg << 2); amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0)); amdgpu_ring_write(ring, val); amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0)); amdgpu_ring_write(ring, 8); } @@ -1258,16 +1285,16 @@ static void uvd_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, struct amdgpu_device *adev = ring->adev; amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0)); amdgpu_ring_write(ring, reg << 2); amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0)); amdgpu_ring_write(ring, val); amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH8), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GP_SCRATCH8), 0)); amdgpu_ring_write(ring, mask); amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0)); amdgpu_ring_write(ring, 12); } @@ -1292,7 +1319,7 @@ static void uvd_v7_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) struct amdgpu_device *adev = ring->adev; for (i = 0; i < count; i++) - amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0)); + amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_NO_OP), 0)); } @@ -1360,16 +1387,16 @@ static bool uvd_v7_0_check_soft_reset(void *handle) if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) || REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) || - (RREG32_SOC15(UVD, 0, mmUVD_STATUS) & + (RREG32_SOC15(UVD, ring->me, mmUVD_STATUS) & AMDGPU_UVD_STATUS_BUSY_MASK)) srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1); if (srbm_soft_reset) { - adev->uvd.inst->srbm_soft_reset = srbm_soft_reset; + adev->uvd.inst[ring->me].srbm_soft_reset = srbm_soft_reset; return true; } else { - adev->uvd.inst->srbm_soft_reset = 0; + adev->uvd.inst[ring->me].srbm_soft_reset = 0; return false; } } @@ -1378,7 +1405,7 @@ static int uvd_v7_0_pre_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!adev->uvd.inst->srbm_soft_reset) + if (!adev->uvd.inst[ring->me].srbm_soft_reset) return 0; uvd_v7_0_stop(adev); @@ -1390,9 +1417,9 @@ static int uvd_v7_0_soft_reset(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 srbm_soft_reset; - if (!adev->uvd.inst->srbm_soft_reset) + if (!adev->uvd.inst[ring->me].srbm_soft_reset) return 0; - srbm_soft_reset = adev->uvd.inst->srbm_soft_reset; + srbm_soft_reset = adev->uvd.inst[ring->me].srbm_soft_reset; if (srbm_soft_reset) { u32 tmp; @@ -1420,7 +1447,7 @@ static int uvd_v7_0_post_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!adev->uvd.inst->srbm_soft_reset) + if (!adev->uvd.inst[ring->me].srbm_soft_reset) return 0; mdelay(5); @@ -1442,17 +1469,29 @@ static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { + uint32_t ip_instance; + + switch (entry->client_id) { + case SOC15_IH_CLIENTID_UVD: + ip_instance = 0; + break; + default: + DRM_ERROR("Unhandled client id: %d\n", entry->client_id); + return 0; + } + DRM_DEBUG("IH: UVD TRAP\n"); + switch (entry->src_id) { case 124: - amdgpu_fence_process(&adev->uvd.inst->ring); + amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring); break; case 119: - amdgpu_fence_process(&adev->uvd.inst->ring_enc[0]); + amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[0]); break; case 120: if (!amdgpu_sriov_vf(adev)) - amdgpu_fence_process(&adev->uvd.inst->ring_enc[1]); + amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[1]); break; default: DRM_ERROR("Unhandled interrupt: %d %d\n", @@ -1468,9 +1507,9 @@ static void uvd_v7_0_set_sw_clock_gating(struct amdgpu_device *adev) { uint32_t data, data1, data2, suvd_flags; - data = RREG32_SOC15(UVD, 0, mmUVD_CGC_CTRL); - data1 = RREG32_SOC15(UVD, 0, mmUVD_SUVD_CGC_GATE); - data2 = RREG32_SOC15(UVD, 0, mmUVD_SUVD_CGC_CTRL); + data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL); + data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE); + data2 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL); data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK); @@ -1514,18 +1553,18 @@ static void uvd_v7_0_set_sw_clock_gating(struct amdgpu_device *adev) UVD_SUVD_CGC_CTRL__SDB_MODE_MASK); data1 |= suvd_flags; - WREG32_SOC15(UVD, 0, mmUVD_CGC_CTRL, data); - WREG32_SOC15(UVD, 0, mmUVD_CGC_GATE, 0); - WREG32_SOC15(UVD, 0, mmUVD_SUVD_CGC_GATE, data1); - WREG32_SOC15(UVD, 0, mmUVD_SUVD_CGC_CTRL, data2); + WREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL, data); + WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, 0); + WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1); + WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL, data2); } static void uvd_v7_0_set_hw_clock_gating(struct amdgpu_device *adev) { uint32_t data, data1, cgc_flags, suvd_flags; - data = RREG32_SOC15(UVD, 0, mmUVD_CGC_GATE); - data1 = RREG32_SOC15(UVD, 0, mmUVD_SUVD_CGC_GATE); + data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE); + data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE); cgc_flags = UVD_CGC_GATE__SYS_MASK | UVD_CGC_GATE__UDEC_MASK | @@ -1557,8 +1596,8 @@ static void uvd_v7_0_set_hw_clock_gating(struct amdgpu_device *adev) data |= cgc_flags; data1 |= suvd_flags; - WREG32_SOC15(UVD, 0, mmUVD_CGC_GATE, data); - WREG32_SOC15(UVD, 0, mmUVD_SUVD_CGC_GATE, data1); + WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, data); + WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1); } static void uvd_v7_0_set_bypass_mode(struct amdgpu_device *adev, bool enable) @@ -1617,7 +1656,7 @@ static int uvd_v7_0_set_powergating_state(void *handle, if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD)) return 0; - WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK); + WREG32_SOC15(UVD, ring->me, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK); if (state == AMD_PG_STATE_GATE) { uvd_v7_0_stop(adev); @@ -1720,18 +1759,27 @@ static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = { static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev) { - adev->uvd.inst->ring.funcs = &uvd_v7_0_ring_vm_funcs; - DRM_INFO("UVD is enabled in VM mode\n"); + int i; + + for (i = 0; i < adev->uvd.num_uvd_inst; i++) { + adev->uvd.inst[i].ring.funcs = &uvd_v7_0_ring_vm_funcs; + adev->uvd.inst[i].ring.me = i; + DRM_INFO("UVD(%d) is enabled in VM mode\n", i); + } } static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev) { - int i; + int i, j; - for (i = 0; i < adev->uvd.num_enc_rings; ++i) - adev->uvd.inst->ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs; + for (j = 0; j < adev->uvd.num_uvd_inst; j++) { + for (i = 0; i < adev->uvd.num_enc_rings; ++i) { + adev->uvd.inst[j].ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs; + adev->uvd.inst[j].ring_enc[i].me = j; + } - DRM_INFO("UVD ENC is enabled in VM mode\n"); + DRM_INFO("UVD(%d) ENC is enabled in VM mode\n", j); + } } static const struct amdgpu_irq_src_funcs uvd_v7_0_irq_funcs = { @@ -1741,8 +1789,12 @@ static const struct amdgpu_irq_src_funcs uvd_v7_0_irq_funcs = { static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev) { - adev->uvd.inst->irq.num_types = adev->uvd.num_enc_rings + 1; - adev->uvd.inst->irq.funcs = &uvd_v7_0_irq_funcs; + int i; + + for (i = 0; i < adev->uvd.num_uvd_inst; i++) { + adev->uvd.inst[i].irq.num_types = adev->uvd.num_enc_rings + 1; + adev->uvd.inst[i].irq.funcs = &uvd_v7_0_irq_funcs; + } } const struct amdgpu_ip_block_version uvd_v7_0_ip_block = -- cgit From 9181dba670cf0a0e8e3bda9fa66fecfe7c28b535 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Fri, 11 May 2018 13:56:44 -0500 Subject: drm/amdgpu/vg20:Enable the 2nd instance for uvd For Vega20, set num of uvd instance to 2, to enble 2nd instance. The IB test build-in registers need update for vega20 2nd instance. Signed-off-by: James Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 30 ++++++++++++++++-------------- drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 7 ++++++- 2 files changed, 22 insertions(+), 15 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index e961492d357a..0772680371a1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -72,11 +72,12 @@ #define FIRMWARE_VEGA12 "amdgpu/vega12_uvd.bin" #define FIRMWARE_VEGA20 "amdgpu/vega20_uvd.bin" -#define mmUVD_GPCOM_VCPU_DATA0_VEGA10 (0x03c4 + 0x7e00) -#define mmUVD_GPCOM_VCPU_DATA1_VEGA10 (0x03c5 + 0x7e00) -#define mmUVD_GPCOM_VCPU_CMD_VEGA10 (0x03c3 + 0x7e00) -#define mmUVD_NO_OP_VEGA10 (0x03ff + 0x7e00) -#define mmUVD_ENGINE_CNTL_VEGA10 (0x03c6 + 0x7e00) +/* These are common relative offsets for all asics, from uvd_7_0_offset.h, */ +#define UVD_GPCOM_VCPU_CMD 0x03c3 +#define UVD_GPCOM_VCPU_DATA0 0x03c4 +#define UVD_GPCOM_VCPU_DATA1 0x03c5 +#define UVD_NO_OP 0x03ff +#define UVD_BASE_SI 0x3800 /** * amdgpu_uvd_cs_ctx - Command submission parser context @@ -990,6 +991,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, uint64_t addr; long r; int i; + unsigned offset_idx = 0; + unsigned offset[3] = { UVD_BASE_SI, 0, 0 }; amdgpu_bo_kunmap(bo); amdgpu_bo_unpin(bo); @@ -1009,17 +1012,16 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, goto err; if (adev->asic_type >= CHIP_VEGA10) { - data[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0_VEGA10, 0); - data[1] = PACKET0(mmUVD_GPCOM_VCPU_DATA1_VEGA10, 0); - data[2] = PACKET0(mmUVD_GPCOM_VCPU_CMD_VEGA10, 0); - data[3] = PACKET0(mmUVD_NO_OP_VEGA10, 0); - } else { - data[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0); - data[1] = PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0); - data[2] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0); - data[3] = PACKET0(mmUVD_NO_OP, 0); + offset_idx = 1 + ring->me; + offset[1] = adev->reg_offset[UVD_HWIP][0][1]; + offset[2] = adev->reg_offset[UVD_HWIP][1][1]; } + data[0] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_DATA0, 0); + data[1] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_DATA1, 0); + data[2] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_CMD, 0); + data[3] = PACKET0(offset[offset_idx] + UVD_NO_OP, 0); + ib = &job->ibs[0]; addr = amdgpu_bo_gpu_offset(bo); ib->ptr[0] = data[0]; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index 08f3b6c84bea..6b719e11b2cd 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -40,6 +40,8 @@ #include "mmhub/mmhub_1_0_offset.h" #include "mmhub/mmhub_1_0_sh_mask.h" +#define UVD7_MAX_HW_INSTANCES_VEGA20 2 + static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev); static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev); static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev); @@ -370,7 +372,10 @@ error: static int uvd_v7_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - adev->uvd.num_uvd_inst = 1; + if (adev->asic_type == CHIP_VEGA20) + adev->uvd.num_uvd_inst = UVD7_MAX_HW_INSTANCES_VEGA20; + else + adev->uvd.num_uvd_inst = 1; if (amdgpu_sriov_vf(adev)) adev->uvd.num_enc_rings = 1; -- cgit From 6f0fd919471cf2477e86e2be9b53ecae37b0e815 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 17 May 2018 12:33:34 -0500 Subject: drm/amdgpu: count fences from all uvd instances in idle handler MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Current multi-UVD hardware uses a single clock and power source so handle all instances in the idle handler. Reviewed-by: James Zhu Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 0772680371a1..be2917c6698e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -1146,7 +1146,11 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work) { struct amdgpu_device *adev = container_of(work, struct amdgpu_device, uvd.inst->idle_work.work); - unsigned fences = amdgpu_fence_count_emitted(&adev->uvd.inst->ring); + unsigned fences = 0, i; + + for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { + fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring); + } if (fences == 0) { if (adev->pm.dpm_enabled) { -- cgit From 4bd2c5dd763866b827dd7e95b9ea71c47fa06126 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 17 May 2018 12:45:52 -0500 Subject: drm/amdgpu: Take uvd encode rings into account in idle work (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Take the encode rings into account in the idle work handler. v2: fix typo: s/num_uvd_inst/num_enc_rings/ Reviewed-by: James Zhu Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index be2917c6698e..bcf68f80bbf0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -1146,10 +1146,13 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work) { struct amdgpu_device *adev = container_of(work, struct amdgpu_device, uvd.inst->idle_work.work); - unsigned fences = 0, i; + unsigned fences = 0, i, j; for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring); + for (j = 0; j < adev->uvd.num_enc_rings; ++j) { + fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring_enc[j]); + } } if (fences == 0) { -- cgit From dd06eecb73d2e5f5c4039bcd9d83d4f7e0156d6f Mon Sep 17 00:00:00 2001 From: James Zhu Date: Wed, 6 Jun 2018 14:38:14 -0400 Subject: drm/amdgpu/vg20:support new UVD FW version naming convention Vega20 UVD Firmware has a new version naming convention: [31, 30] for encode interface major [29, 24] for encode interface minor [15, 8] for decode interface minor [7, 0] for hardware family id Signed-off-by: James Zhu Reviewed-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index bcf68f80bbf0..630e273c16ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -208,10 +208,21 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) hdr = (const struct common_firmware_header *)adev->uvd.fw->data; family_id = le32_to_cpu(hdr->ucode_version) & 0xff; - version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff; - version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff; - DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n", - version_major, version_minor, family_id); + + if (adev->asic_type < CHIP_VEGA20) { + version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff; + version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff; + DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n", + version_major, version_minor, family_id); + } else { + unsigned int enc_major, enc_minor, dec_minor; + + dec_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff; + enc_minor = (le32_to_cpu(hdr->ucode_version) >> 24) & 0x3f; + enc_major = (le32_to_cpu(hdr->ucode_version) >> 30) & 0x3; + DRM_INFO("Found UVD firmware ENC: %hu.%hu DEC: .%hu Family ID: %hu\n", + enc_major, enc_minor, dec_minor, family_id); + } /* * Limit the number of UVD handles depending on microcode major @@ -219,7 +230,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) * instances support is 1.80. So all subsequent versions should * also have the same support. */ - if ((version_major > 0x01) || + if (adev->asic_type >= CHIP_VEGA20 || (version_major > 0x01) || ((version_major == 0x01) && (version_minor >= 0x50))) adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES; -- cgit From 5c2199270275581a06571960e7d6abac008ff060 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 14 Jun 2018 10:24:06 -0500 Subject: drm/amdgpu: Fix uvd firmware version information for vega20 (v2) The uvd version information was not set correctly for vega20. Rearrange the logic to set it correctly and fix the warnings as a result. v2: fix version formatting for userspace based on feedback from Leo Fixes: 96ca7f298f (drm/amdgpu/vg20:support new UVD FW version naming convention) Reviewed-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 42 +++++++++++++++++++-------------- 1 file changed, 24 insertions(+), 18 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 630e273c16ce..04d77f19acc8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -127,7 +127,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) unsigned long bo_size; const char *fw_name; const struct common_firmware_header *hdr; - unsigned version_major, version_minor, family_id; + unsigned family_id; int i, j, r; INIT_DELAYED_WORK(&adev->uvd.inst->idle_work, amdgpu_uvd_idle_work_handler); @@ -210,10 +210,31 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) family_id = le32_to_cpu(hdr->ucode_version) & 0xff; if (adev->asic_type < CHIP_VEGA20) { + unsigned version_major, version_minor; + version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff; version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff; DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n", version_major, version_minor, family_id); + + /* + * Limit the number of UVD handles depending on microcode major + * and minor versions. The firmware version which has 40 UVD + * instances support is 1.80. So all subsequent versions should + * also have the same support. + */ + if ((version_major > 0x01) || + ((version_major == 0x01) && (version_minor >= 0x50))) + adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES; + + adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) | + (family_id << 8)); + + if ((adev->asic_type == CHIP_POLARIS10 || + adev->asic_type == CHIP_POLARIS11) && + (adev->uvd.fw_version < FW_1_66_16)) + DRM_ERROR("POLARIS10/11 UVD firmware version %hu.%hu is too old.\n", + version_major, version_minor); } else { unsigned int enc_major, enc_minor, dec_minor; @@ -222,26 +243,11 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) enc_major = (le32_to_cpu(hdr->ucode_version) >> 30) & 0x3; DRM_INFO("Found UVD firmware ENC: %hu.%hu DEC: .%hu Family ID: %hu\n", enc_major, enc_minor, dec_minor, family_id); - } - /* - * Limit the number of UVD handles depending on microcode major - * and minor versions. The firmware version which has 40 UVD - * instances support is 1.80. So all subsequent versions should - * also have the same support. - */ - if (adev->asic_type >= CHIP_VEGA20 || (version_major > 0x01) || - ((version_major == 0x01) && (version_minor >= 0x50))) adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES; - adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) | - (family_id << 8)); - - if ((adev->asic_type == CHIP_POLARIS10 || - adev->asic_type == CHIP_POLARIS11) && - (adev->uvd.fw_version < FW_1_66_16)) - DRM_ERROR("POLARIS10/11 UVD firmware version %hu.%hu is too old.\n", - version_major, version_minor); + adev->uvd.fw_version = le32_to_cpu(hdr->ucode_version); + } bo_size = AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE + AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles; -- cgit From 5c53d19b76dccbaf340b11acb837d40c0789049d Mon Sep 17 00:00:00 2001 From: James Zhu Date: Mon, 18 Jun 2018 13:46:16 -0400 Subject: drm/amdgpu:All UVD instances share one idle_work handle MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit All UVD instanses have only one dpm control, so it is better to share one idle_work handle. Signed-off-by: James Zhu Reviewed-by: Alex Deucher Reviewed-by: Christian König Tested-by: Stefan Agner Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 14 +++++++------- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index bcf68f80bbf0..3ff08e326838 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -130,7 +130,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) unsigned version_major, version_minor, family_id; int i, j, r; - INIT_DELAYED_WORK(&adev->uvd.inst->idle_work, amdgpu_uvd_idle_work_handler); + INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler); switch (adev->asic_type) { #ifdef CONFIG_DRM_AMDGPU_CIK @@ -314,12 +314,12 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev) void *ptr; int i, j; + cancel_delayed_work_sync(&adev->uvd.idle_work); + for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { if (adev->uvd.inst[j].vcpu_bo == NULL) continue; - cancel_delayed_work_sync(&adev->uvd.inst[j].idle_work); - /* only valid for physical mode */ if (adev->asic_type < CHIP_POLARIS10) { for (i = 0; i < adev->uvd.max_handles; ++i) @@ -1145,7 +1145,7 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, static void amdgpu_uvd_idle_work_handler(struct work_struct *work) { struct amdgpu_device *adev = - container_of(work, struct amdgpu_device, uvd.inst->idle_work.work); + container_of(work, struct amdgpu_device, uvd.idle_work.work); unsigned fences = 0, i, j; for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { @@ -1167,7 +1167,7 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work) AMD_CG_STATE_GATE); } } else { - schedule_delayed_work(&adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT); + schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT); } } @@ -1179,7 +1179,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring) if (amdgpu_sriov_vf(adev)) return; - set_clocks = !cancel_delayed_work_sync(&adev->uvd.inst->idle_work); + set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work); if (set_clocks) { if (adev->pm.dpm_enabled) { amdgpu_dpm_enable_uvd(adev, true); @@ -1196,7 +1196,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring) void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring) { if (!amdgpu_sriov_vf(ring->adev)) - schedule_delayed_work(&ring->adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT); + schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h index b1579fba134c..8b23a1b00c76 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h @@ -44,7 +44,6 @@ struct amdgpu_uvd_inst { void *saved_bo; atomic_t handles[AMDGPU_MAX_UVD_HANDLES]; struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES]; - struct delayed_work idle_work; struct amdgpu_ring ring; struct amdgpu_ring ring_enc[AMDGPU_MAX_UVD_ENC_RINGS]; struct amdgpu_irq_src irq; @@ -62,6 +61,7 @@ struct amdgpu_uvd { bool address_64_bit; bool use_ctx_buf; struct amdgpu_uvd_inst inst[AMDGPU_MAX_UVD_INSTANCES]; + struct delayed_work idle_work; }; int amdgpu_uvd_sw_init(struct amdgpu_device *adev); -- cgit From 180fc134d712a93a2bbc3d11ed657b5208e6f90f Mon Sep 17 00:00:00 2001 From: Andrey Grodzovsky Date: Tue, 5 Jun 2018 12:43:23 -0400 Subject: drm/scheduler: Rename cleanup functions v2. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Everything in the flush code path (i.e. waiting for SW queue to become empty) names with *_flush() and everything in the release code path names *_fini() This patch also effect the amdgpu and etnaviv drivers which use those functions. v2: Also pplay the change to vd3. Signed-off-by: Andrey Grodzovsky Suggested-by: Christian König Acked-by: Lucas Stach Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 8 ++++---- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 2 +- drivers/gpu/drm/etnaviv/etnaviv_drv.c | 4 ++-- drivers/gpu/drm/scheduler/gpu_scheduler.c | 18 +++++++++--------- drivers/gpu/drm/v3d/v3d_drv.c | 2 +- include/drm/gpu_scheduler.h | 6 +++--- 11 files changed, 26 insertions(+), 26 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 64b3a1ed04dc..c0f06c02f2de 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -104,7 +104,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, failed: for (j = 0; j < i; j++) - drm_sched_entity_fini(&adev->rings[j]->sched, + drm_sched_entity_destroy(&adev->rings[j]->sched, &ctx->rings[j].entity); kfree(ctx->fences); ctx->fences = NULL; @@ -178,7 +178,7 @@ static void amdgpu_ctx_do_release(struct kref *ref) if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring) continue; - drm_sched_entity_fini(&ctx->adev->rings[i]->sched, + drm_sched_entity_destroy(&ctx->adev->rings[i]->sched, &ctx->rings[i].entity); } @@ -466,7 +466,7 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr) if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring) continue; - max_wait = drm_sched_entity_do_release(&ctx->adev->rings[i]->sched, + max_wait = drm_sched_entity_flush(&ctx->adev->rings[i]->sched, &ctx->rings[i].entity, max_wait); } } @@ -492,7 +492,7 @@ void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr) continue; if (kref_read(&ctx->refcount) == 1) - drm_sched_entity_cleanup(&ctx->adev->rings[i]->sched, + drm_sched_entity_fini(&ctx->adev->rings[i]->sched, &ctx->rings[i].entity); else DRM_ERROR("ctx %p is still alive\n", ctx); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 0c084d3d0865..0246cb87d9e4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -162,7 +162,7 @@ error_mem: static void amdgpu_ttm_global_fini(struct amdgpu_device *adev) { if (adev->mman.mem_global_referenced) { - drm_sched_entity_fini(adev->mman.entity.sched, + drm_sched_entity_destroy(adev->mman.entity.sched, &adev->mman.entity); mutex_destroy(&adev->mman.gtt_window_lock); drm_global_item_unref(&adev->mman.bo_global_ref.ref); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index cc15d3230402..0b46ea1c6290 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -309,7 +309,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { kfree(adev->uvd.inst[j].saved_bo); - drm_sched_entity_fini(&adev->uvd.inst[j].ring.sched, &adev->uvd.inst[j].entity); + drm_sched_entity_destroy(&adev->uvd.inst[j].ring.sched, &adev->uvd.inst[j].entity); amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo, &adev->uvd.inst[j].gpu_addr, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 23d960ec1cf2..b0dcdfd85f5b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -222,7 +222,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev) if (adev->vce.vcpu_bo == NULL) return 0; - drm_sched_entity_fini(&adev->vce.ring[0].sched, &adev->vce.entity); + drm_sched_entity_destroy(&adev->vce.ring[0].sched, &adev->vce.entity); amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr, (void **)&adev->vce.cpu_addr); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 590db78b8c72..837066076ccf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2643,7 +2643,7 @@ error_free_root: vm->root.base.bo = NULL; error_free_sched_entity: - drm_sched_entity_fini(&ring->sched, &vm->entity); + drm_sched_entity_destroy(&ring->sched, &vm->entity); return r; } @@ -2780,7 +2780,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); } - drm_sched_entity_fini(vm->entity.sched, &vm->entity); + drm_sched_entity_destroy(vm->entity.sched, &vm->entity); if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { dev_err(adev->dev, "still active bo inside vm\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index bfddf97dd13e..1df1c6115341 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -470,7 +470,7 @@ static int uvd_v6_0_sw_fini(void *handle) return r; if (uvd_v6_0_enc_support(adev)) { - drm_sched_entity_fini(&adev->uvd.inst->ring_enc[0].sched, &adev->uvd.inst->entity_enc); + drm_sched_entity_destroy(&adev->uvd.inst->ring_enc[0].sched, &adev->uvd.inst->entity_enc); for (i = 0; i < adev->uvd.num_enc_rings; ++i) amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]); diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index 57d32f21b3a6..ba244d3b74db 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -491,7 +491,7 @@ static int uvd_v7_0_sw_fini(void *handle) return r; for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { - drm_sched_entity_fini(&adev->uvd.inst[j].ring_enc[0].sched, &adev->uvd.inst[j].entity_enc); + drm_sched_entity_destroy(&adev->uvd.inst[j].ring_enc[0].sched, &adev->uvd.inst[j].entity_enc); for (i = 0; i < adev->uvd.num_enc_rings; ++i) amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index e5013a999147..45bfdf4cc107 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -78,8 +78,8 @@ static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file) gpu->lastctx = NULL; mutex_unlock(&gpu->lock); - drm_sched_entity_fini(&gpu->sched, - &ctx->sched_entity[i]); + drm_sched_entity_destroy(&gpu->sched, + &ctx->sched_entity[i]); } } diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c index 6a316701da73..7d2560699b84 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c @@ -256,7 +256,7 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, /** - * drm_sched_entity_do_release - Destroy a context entity + * drm_sched_entity_flush - Flush a context entity * * @sched: scheduler instance * @entity: scheduler entity @@ -267,7 +267,7 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, * * Returns the remaining time in jiffies left from the input timeout */ -long drm_sched_entity_do_release(struct drm_gpu_scheduler *sched, +long drm_sched_entity_flush(struct drm_gpu_scheduler *sched, struct drm_sched_entity *entity, long timeout) { long ret = timeout; @@ -294,7 +294,7 @@ long drm_sched_entity_do_release(struct drm_gpu_scheduler *sched, return ret; } -EXPORT_SYMBOL(drm_sched_entity_do_release); +EXPORT_SYMBOL(drm_sched_entity_flush); /** * drm_sched_entity_cleanup - Destroy a context entity @@ -306,7 +306,7 @@ EXPORT_SYMBOL(drm_sched_entity_do_release); * entity and signals all jobs with an error code if the process was killed. * */ -void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched, +void drm_sched_entity_fini(struct drm_gpu_scheduler *sched, struct drm_sched_entity *entity) { @@ -357,7 +357,7 @@ void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched, dma_fence_put(entity->last_scheduled); entity->last_scheduled = NULL; } -EXPORT_SYMBOL(drm_sched_entity_cleanup); +EXPORT_SYMBOL(drm_sched_entity_fini); /** * drm_sched_entity_fini - Destroy a context entity @@ -367,13 +367,13 @@ EXPORT_SYMBOL(drm_sched_entity_cleanup); * * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup() */ -void drm_sched_entity_fini(struct drm_gpu_scheduler *sched, +void drm_sched_entity_destroy(struct drm_gpu_scheduler *sched, struct drm_sched_entity *entity) { - drm_sched_entity_do_release(sched, entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY); - drm_sched_entity_cleanup(sched, entity); + drm_sched_entity_flush(sched, entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY); + drm_sched_entity_fini(sched, entity); } -EXPORT_SYMBOL(drm_sched_entity_fini); +EXPORT_SYMBOL(drm_sched_entity_destroy); static void drm_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb) { diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c index cdb582043b4f..567f7d46d912 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.c +++ b/drivers/gpu/drm/v3d/v3d_drv.c @@ -151,7 +151,7 @@ v3d_postclose(struct drm_device *dev, struct drm_file *file) enum v3d_queue q; for (q = 0; q < V3D_MAX_QUEUES; q++) { - drm_sched_entity_fini(&v3d->queue[q].sched, + drm_sched_entity_destroy(&v3d->queue[q].sched, &v3d_priv->sched_entity[q]); } diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 7c2dfd6cc1af..4214ceb71c05 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -284,12 +284,12 @@ int drm_sched_entity_init(struct drm_gpu_scheduler *sched, struct drm_sched_entity *entity, struct drm_sched_rq *rq, atomic_t *guilty); -long drm_sched_entity_do_release(struct drm_gpu_scheduler *sched, +long drm_sched_entity_flush(struct drm_gpu_scheduler *sched, struct drm_sched_entity *entity, long timeout); -void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched, - struct drm_sched_entity *entity); void drm_sched_entity_fini(struct drm_gpu_scheduler *sched, struct drm_sched_entity *entity); +void drm_sched_entity_destroy(struct drm_gpu_scheduler *sched, + struct drm_sched_entity *entity); void drm_sched_entity_push_job(struct drm_sched_job *sched_job, struct drm_sched_entity *entity); void drm_sched_entity_set_rq(struct drm_sched_entity *entity, -- cgit From ce206464e3c7a0936468f0190599d2e6070850f4 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 2 Jul 2018 14:32:28 -0500 Subject: drm/amdgpu: switch firmware path for CIK parts (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use separate firmware path for amdgpu to avoid conflicts with radeon on CIK parts. v2: squash in logic simplification (Alex) Reviewed-by: Chunming Zhou Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c | 8 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 10 ++--- drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 10 ++--- drivers/gpu/drm/amd/amdgpu/ci_dpm.c | 10 ++--- drivers/gpu/drm/amd/amdgpu/cik_sdma.c | 24 +++++------ drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 72 ++++++++++++++++----------------- drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 9 ++--- 7 files changed, 70 insertions(+), 73 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index e950730f1933..693ec5ea4950 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -314,17 +314,17 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, (adev->pdev->revision == 0x81) || (adev->pdev->device == 0x665f)) { info->is_kicker = true; - strcpy(fw_name, "radeon/bonaire_k_smc.bin"); + strcpy(fw_name, "amdgpu/bonaire_k_smc.bin"); } else { - strcpy(fw_name, "radeon/bonaire_smc.bin"); + strcpy(fw_name, "amdgpu/bonaire_smc.bin"); } break; case CHIP_HAWAII: if (adev->pdev->revision == 0x80) { info->is_kicker = true; - strcpy(fw_name, "radeon/hawaii_k_smc.bin"); + strcpy(fw_name, "amdgpu/hawaii_k_smc.bin"); } else { - strcpy(fw_name, "radeon/hawaii_smc.bin"); + strcpy(fw_name, "amdgpu/hawaii_smc.bin"); } break; case CHIP_TOPAZ: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 0b46ea1c6290..3e70eb61a960 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -53,11 +53,11 @@ /* Firmware Names */ #ifdef CONFIG_DRM_AMDGPU_CIK -#define FIRMWARE_BONAIRE "radeon/bonaire_uvd.bin" -#define FIRMWARE_KABINI "radeon/kabini_uvd.bin" -#define FIRMWARE_KAVERI "radeon/kaveri_uvd.bin" -#define FIRMWARE_HAWAII "radeon/hawaii_uvd.bin" -#define FIRMWARE_MULLINS "radeon/mullins_uvd.bin" +#define FIRMWARE_BONAIRE "amdgpu/bonaire_uvd.bin" +#define FIRMWARE_KABINI "amdgpu/kabini_uvd.bin" +#define FIRMWARE_KAVERI "amdgpu/kaveri_uvd.bin" +#define FIRMWARE_HAWAII "amdgpu/hawaii_uvd.bin" +#define FIRMWARE_MULLINS "amdgpu/mullins_uvd.bin" #endif #define FIRMWARE_TONGA "amdgpu/tonga_uvd.bin" #define FIRMWARE_CARRIZO "amdgpu/carrizo_uvd.bin" diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index b0dcdfd85f5b..6ae1ad7e83b3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -40,11 +40,11 @@ /* Firmware Names */ #ifdef CONFIG_DRM_AMDGPU_CIK -#define FIRMWARE_BONAIRE "radeon/bonaire_vce.bin" -#define FIRMWARE_KABINI "radeon/kabini_vce.bin" -#define FIRMWARE_KAVERI "radeon/kaveri_vce.bin" -#define FIRMWARE_HAWAII "radeon/hawaii_vce.bin" -#define FIRMWARE_MULLINS "radeon/mullins_vce.bin" +#define FIRMWARE_BONAIRE "amdgpu/bonaire_vce.bin" +#define FIRMWARE_KABINI "amdgpu/kabini_vce.bin" +#define FIRMWARE_KAVERI "amdgpu/kaveri_vce.bin" +#define FIRMWARE_HAWAII "amdgpu/hawaii_vce.bin" +#define FIRMWARE_MULLINS "amdgpu/mullins_vce.bin" #endif #define FIRMWARE_TONGA "amdgpu/tonga_vce.bin" #define FIRMWARE_CARRIZO "amdgpu/carrizo_vce.bin" diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index 7c4ff71e7476..d79ad93b01c3 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c @@ -49,10 +49,10 @@ #include "gmc/gmc_7_1_d.h" #include "gmc/gmc_7_1_sh_mask.h" -MODULE_FIRMWARE("radeon/bonaire_smc.bin"); -MODULE_FIRMWARE("radeon/bonaire_k_smc.bin"); -MODULE_FIRMWARE("radeon/hawaii_smc.bin"); -MODULE_FIRMWARE("radeon/hawaii_k_smc.bin"); +MODULE_FIRMWARE("amdgpu/bonaire_smc.bin"); +MODULE_FIRMWARE("amdgpu/bonaire_k_smc.bin"); +MODULE_FIRMWARE("amdgpu/hawaii_smc.bin"); +MODULE_FIRMWARE("amdgpu/hawaii_k_smc.bin"); #define MC_CG_ARB_FREQ_F0 0x0a #define MC_CG_ARB_FREQ_F1 0x0b @@ -5815,7 +5815,7 @@ static int ci_dpm_init_microcode(struct amdgpu_device *adev) default: BUG(); } - snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name); err = request_firmware(&adev->pm.fw, fw_name, adev->dev); if (err) goto out; diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index dbd553a8d584..d0fa2aac2388 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -54,16 +54,16 @@ static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev); static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev); static int cik_sdma_soft_reset(void *handle); -MODULE_FIRMWARE("radeon/bonaire_sdma.bin"); -MODULE_FIRMWARE("radeon/bonaire_sdma1.bin"); -MODULE_FIRMWARE("radeon/hawaii_sdma.bin"); -MODULE_FIRMWARE("radeon/hawaii_sdma1.bin"); -MODULE_FIRMWARE("radeon/kaveri_sdma.bin"); -MODULE_FIRMWARE("radeon/kaveri_sdma1.bin"); -MODULE_FIRMWARE("radeon/kabini_sdma.bin"); -MODULE_FIRMWARE("radeon/kabini_sdma1.bin"); -MODULE_FIRMWARE("radeon/mullins_sdma.bin"); -MODULE_FIRMWARE("radeon/mullins_sdma1.bin"); +MODULE_FIRMWARE("amdgpu/bonaire_sdma.bin"); +MODULE_FIRMWARE("amdgpu/bonaire_sdma1.bin"); +MODULE_FIRMWARE("amdgpu/hawaii_sdma.bin"); +MODULE_FIRMWARE("amdgpu/hawaii_sdma1.bin"); +MODULE_FIRMWARE("amdgpu/kaveri_sdma.bin"); +MODULE_FIRMWARE("amdgpu/kaveri_sdma1.bin"); +MODULE_FIRMWARE("amdgpu/kabini_sdma.bin"); +MODULE_FIRMWARE("amdgpu/kabini_sdma1.bin"); +MODULE_FIRMWARE("amdgpu/mullins_sdma.bin"); +MODULE_FIRMWARE("amdgpu/mullins_sdma1.bin"); u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev); @@ -132,9 +132,9 @@ static int cik_sdma_init_microcode(struct amdgpu_device *adev) for (i = 0; i < adev->sdma.num_instances; i++) { if (i == 0) - snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name); + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name); else - snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma1.bin", chip_name); + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name); err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev); if (err) goto out; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 42b6144c1fd5..95452c5a9df6 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -57,36 +57,36 @@ static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev); static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev); static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev); -MODULE_FIRMWARE("radeon/bonaire_pfp.bin"); -MODULE_FIRMWARE("radeon/bonaire_me.bin"); -MODULE_FIRMWARE("radeon/bonaire_ce.bin"); -MODULE_FIRMWARE("radeon/bonaire_rlc.bin"); -MODULE_FIRMWARE("radeon/bonaire_mec.bin"); - -MODULE_FIRMWARE("radeon/hawaii_pfp.bin"); -MODULE_FIRMWARE("radeon/hawaii_me.bin"); -MODULE_FIRMWARE("radeon/hawaii_ce.bin"); -MODULE_FIRMWARE("radeon/hawaii_rlc.bin"); -MODULE_FIRMWARE("radeon/hawaii_mec.bin"); - -MODULE_FIRMWARE("radeon/kaveri_pfp.bin"); -MODULE_FIRMWARE("radeon/kaveri_me.bin"); -MODULE_FIRMWARE("radeon/kaveri_ce.bin"); -MODULE_FIRMWARE("radeon/kaveri_rlc.bin"); -MODULE_FIRMWARE("radeon/kaveri_mec.bin"); -MODULE_FIRMWARE("radeon/kaveri_mec2.bin"); - -MODULE_FIRMWARE("radeon/kabini_pfp.bin"); -MODULE_FIRMWARE("radeon/kabini_me.bin"); -MODULE_FIRMWARE("radeon/kabini_ce.bin"); -MODULE_FIRMWARE("radeon/kabini_rlc.bin"); -MODULE_FIRMWARE("radeon/kabini_mec.bin"); - -MODULE_FIRMWARE("radeon/mullins_pfp.bin"); -MODULE_FIRMWARE("radeon/mullins_me.bin"); -MODULE_FIRMWARE("radeon/mullins_ce.bin"); -MODULE_FIRMWARE("radeon/mullins_rlc.bin"); -MODULE_FIRMWARE("radeon/mullins_mec.bin"); +MODULE_FIRMWARE("amdgpu/bonaire_pfp.bin"); +MODULE_FIRMWARE("amdgpu/bonaire_me.bin"); +MODULE_FIRMWARE("amdgpu/bonaire_ce.bin"); +MODULE_FIRMWARE("amdgpu/bonaire_rlc.bin"); +MODULE_FIRMWARE("amdgpu/bonaire_mec.bin"); + +MODULE_FIRMWARE("amdgpu/hawaii_pfp.bin"); +MODULE_FIRMWARE("amdgpu/hawaii_me.bin"); +MODULE_FIRMWARE("amdgpu/hawaii_ce.bin"); +MODULE_FIRMWARE("amdgpu/hawaii_rlc.bin"); +MODULE_FIRMWARE("amdgpu/hawaii_mec.bin"); + +MODULE_FIRMWARE("amdgpu/kaveri_pfp.bin"); +MODULE_FIRMWARE("amdgpu/kaveri_me.bin"); +MODULE_FIRMWARE("amdgpu/kaveri_ce.bin"); +MODULE_FIRMWARE("amdgpu/kaveri_rlc.bin"); +MODULE_FIRMWARE("amdgpu/kaveri_mec.bin"); +MODULE_FIRMWARE("amdgpu/kaveri_mec2.bin"); + +MODULE_FIRMWARE("amdgpu/kabini_pfp.bin"); +MODULE_FIRMWARE("amdgpu/kabini_me.bin"); +MODULE_FIRMWARE("amdgpu/kabini_ce.bin"); +MODULE_FIRMWARE("amdgpu/kabini_rlc.bin"); +MODULE_FIRMWARE("amdgpu/kabini_mec.bin"); + +MODULE_FIRMWARE("amdgpu/mullins_pfp.bin"); +MODULE_FIRMWARE("amdgpu/mullins_me.bin"); +MODULE_FIRMWARE("amdgpu/mullins_ce.bin"); +MODULE_FIRMWARE("amdgpu/mullins_rlc.bin"); +MODULE_FIRMWARE("amdgpu/mullins_mec.bin"); static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] = { @@ -925,7 +925,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev) default: BUG(); } - snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name); err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev); if (err) goto out; @@ -933,7 +933,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev) if (err) goto out; - snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name); err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev); if (err) goto out; @@ -941,7 +941,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev) if (err) goto out; - snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name); + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name); err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev); if (err) goto out; @@ -949,7 +949,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev) if (err) goto out; - snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name); + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name); err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev); if (err) goto out; @@ -958,7 +958,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev) goto out; if (adev->asic_type == CHIP_KAVERI) { - snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec2.bin", chip_name); + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name); err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev); if (err) goto out; @@ -967,7 +967,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev) goto out; } - snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name); + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name); err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev); if (err) goto out; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 7147bfe25a23..78339309a00c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -47,8 +47,8 @@ static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev); static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev); static int gmc_v7_0_wait_for_idle(void *handle); -MODULE_FIRMWARE("radeon/bonaire_mc.bin"); -MODULE_FIRMWARE("radeon/hawaii_mc.bin"); +MODULE_FIRMWARE("amdgpu/bonaire_mc.bin"); +MODULE_FIRMWARE("amdgpu/hawaii_mc.bin"); MODULE_FIRMWARE("amdgpu/topaz_mc.bin"); static const u32 golden_settings_iceland_a11[] = @@ -147,10 +147,7 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev) default: BUG(); } - if (adev->asic_type == CHIP_TOPAZ) - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name); - else - snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name); err = request_firmware(&adev->gmc.fw, fw_name, adev->dev); if (err) -- cgit From aa16b6c6b4d979234f830a48add47d02c12bb569 Mon Sep 17 00:00:00 2001 From: Nayan Deshmukh Date: Fri, 13 Jul 2018 15:21:14 +0530 Subject: drm/scheduler: modify args of drm_sched_entity_init MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit replace run queue by a list of run queues and remove the sched arg as that is part of run queue itself Signed-off-by: Nayan Deshmukh Reviewed-by: Christian König Acked-by: Eric Anholt Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 3 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 3 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 3 +-- drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 4 ++-- drivers/gpu/drm/etnaviv/etnaviv_drv.c | 8 ++++---- drivers/gpu/drm/scheduler/gpu_scheduler.c | 20 ++++++++++++-------- drivers/gpu/drm/v3d/v3d_drv.c | 7 +++---- include/drm/gpu_scheduler.h | 6 +++--- 11 files changed, 33 insertions(+), 33 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 0120b24fae1b..83e3b320a793 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -90,8 +90,8 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, if (ring == &adev->gfx.kiq.ring) continue; - r = drm_sched_entity_init(&ring->sched, &ctx->rings[i].entity, - rq, &ctx->guilty); + r = drm_sched_entity_init(&ctx->rings[i].entity, + &rq, 1, &ctx->guilty); if (r) goto failed; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 6a3fead5c1f0..11a12483c995 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1918,8 +1918,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) ring = adev->mman.buffer_funcs_ring; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL]; - r = drm_sched_entity_init(&ring->sched, &adev->mman.entity, - rq, NULL); + r = drm_sched_entity_init(&adev->mman.entity, &rq, 1, NULL); if (r) { DRM_ERROR("Failed setting up TTM BO move entity (%d)\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 3e70eb61a960..a6c2cace4b9d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -266,8 +266,8 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) ring = &adev->uvd.inst[j].ring; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; - r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity, - rq, NULL); + r = drm_sched_entity_init(&adev->uvd.inst[j].entity, &rq, + 1, NULL); if (r != 0) { DRM_ERROR("Failed setting up UVD(%d) run queue.\n", j); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 6ae1ad7e83b3..ffb0fcc9707e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -190,8 +190,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) ring = &adev->vce.ring[0]; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; - r = drm_sched_entity_init(&ring->sched, &adev->vce.entity, - rq, NULL); + r = drm_sched_entity_init(&adev->vce.entity, &rq, 1, NULL); if (r != 0) { DRM_ERROR("Failed setting up VCE run queue.\n"); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 0fd0a718763b..484e2c19c027 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2564,8 +2564,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, ring_instance %= adev->vm_manager.vm_pte_num_rings; ring = adev->vm_manager.vm_pte_rings[ring_instance]; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL]; - r = drm_sched_entity_init(&ring->sched, &vm->entity, - rq, NULL); + r = drm_sched_entity_init(&vm->entity, &rq, 1, NULL); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 2623f249cb7a..1c118c02e8cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -430,8 +430,8 @@ static int uvd_v6_0_sw_init(void *handle) struct drm_sched_rq *rq; ring = &adev->uvd.inst->ring_enc[0]; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; - r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst->entity_enc, - rq, NULL); + r = drm_sched_entity_init(&adev->uvd.inst->entity_enc, + &rq, 1, NULL); if (r) { DRM_ERROR("Failed setting up UVD ENC run queue.\n"); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index ce360ad16856..d48bc3393545 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -432,8 +432,8 @@ static int uvd_v7_0_sw_init(void *handle) for (j = 0; j < adev->uvd.num_uvd_inst; j++) { ring = &adev->uvd.inst[j].ring_enc[0]; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; - r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity_enc, - rq, NULL); + r = drm_sched_entity_init(&adev->uvd.inst[j].entity_enc, + &rq, 1, NULL); if (r) { DRM_ERROR("(%d)Failed setting up UVD ENC run queue.\n", j); return r; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index 45bfdf4cc107..36414ba56b22 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -49,12 +49,12 @@ static int etnaviv_open(struct drm_device *dev, struct drm_file *file) for (i = 0; i < ETNA_MAX_PIPES; i++) { struct etnaviv_gpu *gpu = priv->gpu[i]; + struct drm_sched_rq *rq; if (gpu) { - drm_sched_entity_init(&gpu->sched, - &ctx->sched_entity[i], - &gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL], - NULL); + rq = &gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; + drm_sched_entity_init(&ctx->sched_entity[i], + &rq, 1, NULL); } } diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c index 429b1328653a..16bf446aa6b3 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c @@ -162,26 +162,30 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq) * drm_sched_entity_init - Init a context entity used by scheduler when * submit to HW ring. * - * @sched: scheduler instance * @entity: scheduler entity to init - * @rq: the run queue this entity belongs + * @rq_list: the list of run queue on which jobs from this + * entity can be submitted + * @num_rq_list: number of run queue in rq_list * @guilty: atomic_t set to 1 when a job on this queue * is found to be guilty causing a timeout * + * Note: the rq_list should have atleast one element to schedule + * the entity + * * Returns 0 on success or a negative error code on failure. */ -int drm_sched_entity_init(struct drm_gpu_scheduler *sched, - struct drm_sched_entity *entity, - struct drm_sched_rq *rq, +int drm_sched_entity_init(struct drm_sched_entity *entity, + struct drm_sched_rq **rq_list, + unsigned int num_rq_list, atomic_t *guilty) { - if (!(sched && entity && rq)) + if (!(entity && rq_list && num_rq_list > 0 && rq_list[0])) return -EINVAL; memset(entity, 0, sizeof(struct drm_sched_entity)); INIT_LIST_HEAD(&entity->list); - entity->rq = rq; - entity->sched = sched; + entity->rq = rq_list[0]; + entity->sched = rq_list[0]->sched; entity->guilty = guilty; entity->last_scheduled = NULL; diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c index 567f7d46d912..1dceba2b42fd 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.c +++ b/drivers/gpu/drm/v3d/v3d_drv.c @@ -123,6 +123,7 @@ v3d_open(struct drm_device *dev, struct drm_file *file) { struct v3d_dev *v3d = to_v3d_dev(dev); struct v3d_file_priv *v3d_priv; + struct drm_sched_rq *rq; int i; v3d_priv = kzalloc(sizeof(*v3d_priv), GFP_KERNEL); @@ -132,10 +133,8 @@ v3d_open(struct drm_device *dev, struct drm_file *file) v3d_priv->v3d = v3d; for (i = 0; i < V3D_MAX_QUEUES; i++) { - drm_sched_entity_init(&v3d->queue[i].sched, - &v3d_priv->sched_entity[i], - &v3d->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL], - NULL); + rq = &v3d->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; + drm_sched_entity_init(&v3d_priv->sched_entity[i], &rq, 1, NULL); } file->driver_priv = v3d_priv; diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 43e93d6077cf..2205e89722f6 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -282,9 +282,9 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, const char *name); void drm_sched_fini(struct drm_gpu_scheduler *sched); -int drm_sched_entity_init(struct drm_gpu_scheduler *sched, - struct drm_sched_entity *entity, - struct drm_sched_rq *rq, +int drm_sched_entity_init(struct drm_sched_entity *entity, + struct drm_sched_rq **rq_list, + unsigned int num_rq_list, atomic_t *guilty); long drm_sched_entity_flush(struct drm_gpu_scheduler *sched, struct drm_sched_entity *entity, long timeout); -- cgit From 0e28b10ff1b8e65788040b51c30c9cc984060dcd Mon Sep 17 00:00:00 2001 From: Christian König Date: Fri, 13 Jul 2018 13:54:56 +0200 Subject: drm/amdgpu: remove ring parameter from amdgpu_job_submit MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We know the ring through the entity anyway. Signed-off-by: Christian König Reviewed-by: Junwei Zhang Acked-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 9 ++++----- drivers/gpu/drm/amd/amdgpu/amdgpu_job.h | 5 ++--- drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 2 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 6 +++--- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 11 +++++------ drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 2 +- 9 files changed, 20 insertions(+), 21 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 10e0a97c7c03..51ff751e093b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -117,21 +117,20 @@ void amdgpu_job_free(struct amdgpu_job *job) kfree(job); } -int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, - struct drm_sched_entity *entity, void *owner, - struct dma_fence **f) +int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, + void *owner, struct dma_fence **f) { int r; - job->ring = ring; if (!f) return -EINVAL; - r = drm_sched_job_init(&job->base, &ring->sched, entity, owner); + r = drm_sched_job_init(&job->base, entity->sched, entity, owner); if (r) return r; job->owner = owner; + job->ring = to_amdgpu_ring(entity->sched); *f = dma_fence_get(&job->base.s_fence->finished); amdgpu_job_free_resources(job); amdgpu_ring_priority_get(job->ring, job->base.s_priority); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h index 3151692312bd..39f4230e1d37 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h @@ -67,7 +67,6 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, void amdgpu_job_free_resources(struct amdgpu_job *job); void amdgpu_job_free(struct amdgpu_job *job); -int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, - struct drm_sched_entity *entity, void *owner, - struct dma_fence **f); +int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, + void *owner, struct dma_fence **f); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index a293f4e6760d..5018c0b6bf1a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -44,6 +44,8 @@ #define AMDGPU_FENCE_FLAG_INT (1 << 1) #define AMDGPU_FENCE_FLAG_TC_WB_ONLY (1 << 2) +#define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched) + enum amdgpu_ring_type { AMDGPU_RING_TYPE_GFX, AMDGPU_RING_TYPE_COMPUTE, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 11a12483c995..9958e76d1c78 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -2006,7 +2006,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo, if (r) goto error_free; - r = amdgpu_job_submit(job, ring, &adev->mman.entity, + r = amdgpu_job_submit(job, &adev->mman.entity, AMDGPU_FENCE_OWNER_UNDEFINED, &fence); if (r) goto error_free; @@ -2083,7 +2083,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, DRM_ERROR("Error scheduling IBs (%d)\n", r); amdgpu_job_free(job); } else { - r = amdgpu_job_submit(job, ring, &adev->mman.entity, + r = amdgpu_job_submit(job, &adev->mman.entity, AMDGPU_FENCE_OWNER_UNDEFINED, fence); if (r) goto error_free; @@ -2175,7 +2175,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, amdgpu_ring_pad_ib(ring, &job->ibs[0]); WARN_ON(job->ibs[0].length_dw > num_dw); - r = amdgpu_job_submit(job, ring, &adev->mman.entity, + r = amdgpu_job_submit(job, &adev->mman.entity, AMDGPU_FENCE_OWNER_UNDEFINED, fence); if (r) goto error_free; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index a6c2cace4b9d..848b2e898818 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -1074,7 +1074,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, if (r) goto err_free; - r = amdgpu_job_submit(job, ring, &adev->uvd.inst[ring->me].entity, + r = amdgpu_job_submit(job, &adev->uvd.inst[ring->me].entity, AMDGPU_FENCE_OWNER_UNDEFINED, &f); if (r) goto err_free; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index ffb0fcc9707e..7dfb4c4b19c5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -539,7 +539,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, amdgpu_job_free(job); } else { - r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity, + r = amdgpu_job_submit(job, &ring->adev->vce.entity, AMDGPU_FENCE_OWNER_UNDEFINED, &f); if (r) goto err; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 484e2c19c027..5d3d783f2d72 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -425,8 +425,8 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, if (r) goto error_free; - r = amdgpu_job_submit(job, ring, &vm->entity, - AMDGPU_FENCE_OWNER_UNDEFINED, &fence); + r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_UNDEFINED, + &fence); if (r) goto error_free; @@ -1120,8 +1120,8 @@ restart: amdgpu_sync_resv(adev, &job->sync, root->tbo.resv, AMDGPU_FENCE_OWNER_VM, false); WARN_ON(params.ib->length_dw > ndw); - r = amdgpu_job_submit(job, ring, &vm->entity, - AMDGPU_FENCE_OWNER_VM, &fence); + r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM, + &fence); if (r) goto error; @@ -1485,8 +1485,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, amdgpu_ring_pad_ib(ring, params.ib); WARN_ON(params.ib->length_dw > ndw); - r = amdgpu_job_submit(job, ring, &vm->entity, - AMDGPU_FENCE_OWNER_VM, &f); + r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM, &f); if (r) goto error_free; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 1c118c02e8cb..591d1f211823 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -320,7 +320,7 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring, amdgpu_job_free(job); } else { - r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity, + r = amdgpu_job_submit(job, &ring->adev->vce.entity, AMDGPU_FENCE_OWNER_UNDEFINED, &f); if (r) goto err; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index d48bc3393545..ceb0a7037897 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -321,7 +321,7 @@ int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, amdgpu_job_free(job); } else { - r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity, + r = amdgpu_job_submit(job, &ring->adev->vce.entity, AMDGPU_FENCE_OWNER_UNDEFINED, &f); if (r) goto err; -- cgit From 3320b8d2acd3d480d0dd4835d970067354eac915 Mon Sep 17 00:00:00 2001 From: Christian König Date: Fri, 13 Jul 2018 15:08:44 +0200 Subject: drm/amdgpu: remove job->ring MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We can easily get that from the scheduler. Signed-off-by: Christian König Reviewed-by: Junwei Zhang Acked-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 18 +++++++++--------- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 23 ++++++++++++----------- drivers/gpu/drm/amd/amdgpu/amdgpu_job.h | 1 - drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h | 8 ++++---- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 2 +- 7 files changed, 29 insertions(+), 28 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index e0cc9f878e80..90780b0f0ce5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1027,6 +1027,7 @@ struct amdgpu_cs_parser { /* scheduler job object */ struct amdgpu_job *job; + struct amdgpu_ring *ring; /* buffer objects */ struct ww_acquire_ctx ticket; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 6eb7ee859ffd..72dc9b36b937 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -912,11 +912,11 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev, { struct amdgpu_fpriv *fpriv = p->filp->driver_priv; struct amdgpu_vm *vm = &fpriv->vm; - struct amdgpu_ring *ring = p->job->ring; + struct amdgpu_ring *ring = p->ring; int r; /* Only for UVD/VCE VM emulation */ - if (p->job->ring->funcs->parse_cs) { + if (p->ring->funcs->parse_cs) { unsigned i, j; for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) { @@ -1030,10 +1030,10 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, } } - if (parser->job->ring && parser->job->ring != ring) + if (parser->ring && parser->ring != ring) return -EINVAL; - parser->job->ring = ring; + parser->ring = ring; r = amdgpu_ib_get(adev, vm, ring->funcs->parse_cs ? chunk_ib->ib_bytes : 0, @@ -1052,11 +1052,11 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, /* UVD & VCE fw doesn't support user fences */ if (parser->job->uf_addr && ( - parser->job->ring->funcs->type == AMDGPU_RING_TYPE_UVD || - parser->job->ring->funcs->type == AMDGPU_RING_TYPE_VCE)) + parser->ring->funcs->type == AMDGPU_RING_TYPE_UVD || + parser->ring->funcs->type == AMDGPU_RING_TYPE_VCE)) return -EINVAL; - return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->job->ring->idx); + return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->ring->idx); } static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p, @@ -1207,7 +1207,7 @@ static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p) static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs) { - struct amdgpu_ring *ring = p->job->ring; + struct amdgpu_ring *ring = p->ring; struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity; struct amdgpu_job *job; unsigned i; @@ -1256,7 +1256,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, job->uf_sequence = seq; amdgpu_job_free_resources(job); - amdgpu_ring_priority_get(job->ring, job->base.s_priority); + amdgpu_ring_priority_get(p->ring, job->base.s_priority); trace_amdgpu_cs_ioctl(job); drm_sched_entity_push_job(&job->base, entity); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 2720444ff23a..2b2de5f3e6e3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3253,7 +3253,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, kthread_park(ring->sched.thread); - if (job && job->ring->idx != i) + if (job && job->base.sched == &ring->sched) continue; drm_sched_hw_job_reset(&ring->sched, &job->base); @@ -3277,7 +3277,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, * or all rings (in the case @job is NULL) * after above amdgpu_reset accomplished */ - if ((!job || job->ring->idx == i) && !r) + if ((!job || job->base.sched == &ring->sched) && !r) drm_sched_job_recovery(&ring->sched); kthread_unpark(ring->sched.thread); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 51ff751e093b..2496f2269bcb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -30,12 +30,12 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job) { - struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base); + struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched); + struct amdgpu_job *job = to_amdgpu_job(s_job); DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n", - job->base.sched->name, - atomic_read(&job->ring->fence_drv.last_seq), - job->ring->fence_drv.sync_seq); + job->base.sched->name, atomic_read(&ring->fence_drv.last_seq), + ring->fence_drv.sync_seq); amdgpu_device_gpu_recover(job->adev, job, false); } @@ -98,9 +98,10 @@ void amdgpu_job_free_resources(struct amdgpu_job *job) static void amdgpu_job_free_cb(struct drm_sched_job *s_job) { - struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base); + struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched); + struct amdgpu_job *job = to_amdgpu_job(s_job); - amdgpu_ring_priority_put(job->ring, s_job->s_priority); + amdgpu_ring_priority_put(ring, s_job->s_priority); dma_fence_put(job->fence); amdgpu_sync_free(&job->sync); amdgpu_sync_free(&job->sched_sync); @@ -120,6 +121,7 @@ void amdgpu_job_free(struct amdgpu_job *job) int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, void *owner, struct dma_fence **f) { + struct amdgpu_ring *ring = to_amdgpu_ring(entity->sched); int r; if (!f) @@ -130,10 +132,9 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, return r; job->owner = owner; - job->ring = to_amdgpu_ring(entity->sched); *f = dma_fence_get(&job->base.s_fence->finished); amdgpu_job_free_resources(job); - amdgpu_ring_priority_get(job->ring, job->base.s_priority); + amdgpu_ring_priority_get(ring, job->base.s_priority); drm_sched_entity_push_job(&job->base, entity); return 0; @@ -142,6 +143,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job, struct drm_sched_entity *s_entity) { + struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->sched); struct amdgpu_job *job = to_amdgpu_job(sched_job); struct amdgpu_vm *vm = job->vm; bool explicit = false; @@ -157,8 +159,6 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job, } while (fence == NULL && vm && !job->vmid) { - struct amdgpu_ring *ring = job->ring; - r = amdgpu_vmid_grab(vm, ring, &job->sync, &job->base.s_fence->finished, job); @@ -173,6 +173,7 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job, static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job) { + struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched); struct dma_fence *fence = NULL, *finished; struct amdgpu_device *adev; struct amdgpu_job *job; @@ -196,7 +197,7 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job) if (finished->error < 0) { DRM_INFO("Skip scheduling IBs!\n"); } else { - r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job, + r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job, &fence); if (r) DRM_ERROR("Error scheduling IBs (%d)\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h index 39f4230e1d37..c663c1925f91 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h @@ -37,7 +37,6 @@ struct amdgpu_job { struct drm_sched_job base; struct amdgpu_device *adev; struct amdgpu_vm *vm; - struct amdgpu_ring *ring; struct amdgpu_sync sync; struct amdgpu_sync sched_sync; struct amdgpu_ib *ibs; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index e96e26d3f3b0..76920035eb22 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -150,10 +150,10 @@ TRACE_EVENT(amdgpu_cs, TP_fast_assign( __entry->bo_list = p->bo_list; - __entry->ring = p->job->ring->idx; + __entry->ring = p->ring->idx; __entry->dw = p->job->ibs[i].length_dw; __entry->fences = amdgpu_fence_count_emitted( - p->job->ring); + p->ring); ), TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u", __entry->bo_list, __entry->ring, __entry->dw, @@ -178,7 +178,7 @@ TRACE_EVENT(amdgpu_cs_ioctl, __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job)) __entry->context = job->base.s_fence->finished.context; __entry->seqno = job->base.s_fence->finished.seqno; - __entry->ring_name = job->ring->name; + __entry->ring_name = to_amdgpu_ring(job->base.sched)->name; __entry->num_ibs = job->num_ibs; ), TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u", @@ -203,7 +203,7 @@ TRACE_EVENT(amdgpu_sched_run_job, __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job)) __entry->context = job->base.s_fence->finished.context; __entry->seqno = job->base.s_fence->finished.seqno; - __entry->ring_name = job->ring->name; + __entry->ring_name = to_amdgpu_ring(job->base.sched)->name; __entry->num_ibs = job->num_ibs; ), TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u", diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 848b2e898818..7738d2b90dae 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -692,11 +692,11 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, struct amdgpu_bo *bo, unsigned offset) { struct amdgpu_device *adev = ctx->parser->adev; + uint32_t ip_instance = ctx->parser->ring->me; int32_t *msg, msg_type, handle; void *ptr; long r; int i; - uint32_t ip_instance = ctx->parser->job->ring->me; if (offset & 0x3F) { DRM_ERROR("UVD(%d) messages must be 64 byte aligned!\n", ip_instance); -- cgit From ee913fd9e166384aacc0aa70ffd4e93ca41d54b0 Mon Sep 17 00:00:00 2001 From: Christian König Date: Fri, 13 Jul 2018 16:29:10 +0200 Subject: drm/amdgpu: add amdgpu_job_submit_direct helper MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make sure that we properly initialize at least the sched member. Signed-off-by: Christian König Reviewed-by: Junwei Zhang Acked-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 15 +++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_job.h | 4 ++++ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 17 ++++++----------- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 5 +---- drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 20 ++++++-------------- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 17 ++++------------- drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 20 ++++++-------------- drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 20 ++++++-------------- 8 files changed, 48 insertions(+), 70 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 2496f2269bcb..0e2b18ccdf2e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -140,6 +140,21 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, return 0; } +int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring, + struct dma_fence **fence) +{ + int r; + + job->base.sched = &ring->sched; + r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, NULL, fence); + job->fence = dma_fence_get(*fence); + if (r) + return r; + + amdgpu_job_free(job); + return 0; +} + static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job, struct drm_sched_entity *s_entity) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h index c663c1925f91..d77fd232f7ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h @@ -33,6 +33,8 @@ #define to_amdgpu_job(sched_job) \ container_of((sched_job), struct amdgpu_job, base) +struct amdgpu_fence; + struct amdgpu_job { struct drm_sched_job base; struct amdgpu_device *adev; @@ -68,4 +70,6 @@ void amdgpu_job_free_resources(struct amdgpu_job *job); void amdgpu_job_free(struct amdgpu_job *job); int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, void *owner, struct dma_fence **f); +int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring, + struct dma_fence **fence); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 9958e76d1c78..13977ea6a097 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -2075,24 +2075,19 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, amdgpu_ring_pad_ib(ring, &job->ibs[0]); WARN_ON(job->ibs[0].length_dw > num_dw); - if (direct_submit) { - r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, - NULL, fence); - job->fence = dma_fence_get(*fence); - if (r) - DRM_ERROR("Error scheduling IBs (%d)\n", r); - amdgpu_job_free(job); - } else { + if (direct_submit) + r = amdgpu_job_submit_direct(job, ring, fence); + else r = amdgpu_job_submit(job, &adev->mman.entity, AMDGPU_FENCE_OWNER_UNDEFINED, fence); - if (r) - goto error_free; - } + if (r) + goto error_free; return r; error_free: amdgpu_job_free(job); + DRM_ERROR("Error scheduling IBs (%d)\n", r); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 7738d2b90dae..d708970244eb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -1062,12 +1062,9 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, if (r < 0) goto err_free; - r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); - job->fence = dma_fence_get(f); + r = amdgpu_job_submit_direct(job, ring, &f); if (r) goto err_free; - - amdgpu_job_free(job); } else { r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv, AMDGPU_FENCE_OWNER_UNDEFINED, false); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 7dfb4c4b19c5..86182c966ed6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -469,12 +469,10 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, for (i = ib->length_dw; i < ib_size_dw; ++i) ib->ptr[i] = 0x0; - r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); - job->fence = dma_fence_get(f); + r = amdgpu_job_submit_direct(job, ring, &f); if (r) goto err; - amdgpu_job_free(job); if (fence) *fence = dma_fence_get(f); dma_fence_put(f); @@ -531,19 +529,13 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, for (i = ib->length_dw; i < ib_size_dw; ++i) ib->ptr[i] = 0x0; - if (direct) { - r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); - job->fence = dma_fence_get(f); - if (r) - goto err; - - amdgpu_job_free(job); - } else { + if (direct) + r = amdgpu_job_submit_direct(job, ring, &f); + else r = amdgpu_job_submit(job, &ring->adev->vce.entity, AMDGPU_FENCE_OWNER_UNDEFINED, &f); - if (r) - goto err; - } + if (r) + goto err; if (fence) *fence = dma_fence_get(f); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 769c6723c9ca..798648a19710 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -308,13 +308,10 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, } ib->length_dw = 16; - r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); - job->fence = dma_fence_get(f); + r = amdgpu_job_submit_direct(job, ring, &f); if (r) goto err_free; - amdgpu_job_free(job); - amdgpu_bo_fence(bo, f, false); amdgpu_bo_unreserve(bo); amdgpu_bo_unref(&bo); @@ -499,12 +496,10 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand for (i = ib->length_dw; i < ib_size_dw; ++i) ib->ptr[i] = 0x0; - r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); - job->fence = dma_fence_get(f); + r = amdgpu_job_submit_direct(job, ring, &f); if (r) goto err; - amdgpu_job_free(job); if (fence) *fence = dma_fence_get(f); dma_fence_put(f); @@ -553,12 +548,10 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han for (i = ib->length_dw; i < ib_size_dw; ++i) ib->ptr[i] = 0x0; - r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); - job->fence = dma_fence_get(f); + r = amdgpu_job_submit_direct(job, ring, &f); if (r) goto err; - amdgpu_job_free(job); if (fence) *fence = dma_fence_get(f); dma_fence_put(f); @@ -666,12 +659,10 @@ static int amdgpu_vcn_jpeg_set_reg(struct amdgpu_ring *ring, uint32_t handle, } ib->length_dw = 16; - r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); - job->fence = dma_fence_get(f); + r = amdgpu_job_submit_direct(job, ring, &f); if (r) goto err; - amdgpu_job_free(job); if (fence) *fence = dma_fence_get(f); dma_fence_put(f); diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 591d1f211823..b796dc8375cd 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -248,12 +248,10 @@ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle for (i = ib->length_dw; i < ib_size_dw; ++i) ib->ptr[i] = 0x0; - r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); - job->fence = dma_fence_get(f); + r = amdgpu_job_submit_direct(job, ring, &f); if (r) goto err; - amdgpu_job_free(job); if (fence) *fence = dma_fence_get(f); dma_fence_put(f); @@ -312,19 +310,13 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring, for (i = ib->length_dw; i < ib_size_dw; ++i) ib->ptr[i] = 0x0; - if (direct) { - r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); - job->fence = dma_fence_get(f); - if (r) - goto err; - - amdgpu_job_free(job); - } else { + if (direct) + r = amdgpu_job_submit_direct(job, ring, &f); + else r = amdgpu_job_submit(job, &ring->adev->vce.entity, AMDGPU_FENCE_OWNER_UNDEFINED, &f); - if (r) - goto err; - } + if (r) + goto err; if (fence) *fence = dma_fence_get(f); diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index ceb0a7037897..831bb995b0ed 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -250,12 +250,10 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle for (i = ib->length_dw; i < ib_size_dw; ++i) ib->ptr[i] = 0x0; - r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); - job->fence = dma_fence_get(f); + r = amdgpu_job_submit_direct(job, ring, &f); if (r) goto err; - amdgpu_job_free(job); if (fence) *fence = dma_fence_get(f); dma_fence_put(f); @@ -313,19 +311,13 @@ int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, for (i = ib->length_dw; i < ib_size_dw; ++i) ib->ptr[i] = 0x0; - if (direct) { - r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); - job->fence = dma_fence_get(f); - if (r) - goto err; - - amdgpu_job_free(job); - } else { + if (direct) + r = amdgpu_job_submit_direct(job, ring, &f); + else r = amdgpu_job_submit(job, &ring->adev->vce.entity, AMDGPU_FENCE_OWNER_UNDEFINED, &f); - if (r) - goto err; - } + if (r) + goto err; if (fence) *fence = dma_fence_get(f); -- cgit From 5c675bf2c67c4efb36a78bebf44dc435db2daf16 Mon Sep 17 00:00:00 2001 From: Christian König Date: Wed, 18 Jul 2018 20:30:51 +0200 Subject: drm/amdgpu: clean up UVD instance handling v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The whole handle, filp and entity handling is superfluous here. We should have reviewed that more thoughtfully. It looks like somebody just made the code instance aware without knowing the background. v2: fix one more missed case in amdgpu_uvd_suspend Reviewed-by: Leo Liu Signed-off-by: Christian König Acked-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 121 ++++++++++++++++---------------- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h | 10 +-- 2 files changed, 64 insertions(+), 67 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index d708970244eb..80b5c453f8c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -263,21 +263,20 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r); return r; } + } - ring = &adev->uvd.inst[j].ring; - rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; - r = drm_sched_entity_init(&adev->uvd.inst[j].entity, &rq, - 1, NULL); - if (r != 0) { - DRM_ERROR("Failed setting up UVD(%d) run queue.\n", j); - return r; - } - - for (i = 0; i < adev->uvd.max_handles; ++i) { - atomic_set(&adev->uvd.inst[j].handles[i], 0); - adev->uvd.inst[j].filp[i] = NULL; - } + ring = &adev->uvd.inst[0].ring; + rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; + r = drm_sched_entity_init(&adev->uvd.entity, &rq, 1, NULL); + if (r) { + DRM_ERROR("Failed setting up UVD kernel entity.\n"); + return r; } + for (i = 0; i < adev->uvd.max_handles; ++i) { + atomic_set(&adev->uvd.handles[i], 0); + adev->uvd.filp[i] = NULL; + } + /* from uvd v5.0 HW addressing capacity increased to 64 bits */ if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0)) adev->uvd.address_64_bit = true; @@ -306,11 +305,12 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) { int i, j; + drm_sched_entity_destroy(&adev->uvd.inst->ring.sched, + &adev->uvd.entity); + for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { kfree(adev->uvd.inst[j].saved_bo); - drm_sched_entity_destroy(&adev->uvd.inst[j].ring.sched, &adev->uvd.inst[j].entity); - amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo, &adev->uvd.inst[j].gpu_addr, (void **)&adev->uvd.inst[j].cpu_addr); @@ -333,20 +333,20 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev) cancel_delayed_work_sync(&adev->uvd.idle_work); + /* only valid for physical mode */ + if (adev->asic_type < CHIP_POLARIS10) { + for (i = 0; i < adev->uvd.max_handles; ++i) + if (atomic_read(&adev->uvd.handles[i])) + break; + + if (i == adev->uvd.max_handles) + return 0; + } + for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { if (adev->uvd.inst[j].vcpu_bo == NULL) continue; - /* only valid for physical mode */ - if (adev->asic_type < CHIP_POLARIS10) { - for (i = 0; i < adev->uvd.max_handles; ++i) - if (atomic_read(&adev->uvd.inst[j].handles[i])) - break; - - if (i == adev->uvd.max_handles) - continue; - } - size = amdgpu_bo_size(adev->uvd.inst[j].vcpu_bo); ptr = adev->uvd.inst[j].cpu_addr; @@ -398,30 +398,27 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev) void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) { - struct amdgpu_ring *ring; - int i, j, r; - - for (j = 0; j < adev->uvd.num_uvd_inst; j++) { - ring = &adev->uvd.inst[j].ring; + struct amdgpu_ring *ring = &adev->uvd.inst[0].ring; + int i, r; - for (i = 0; i < adev->uvd.max_handles; ++i) { - uint32_t handle = atomic_read(&adev->uvd.inst[j].handles[i]); - if (handle != 0 && adev->uvd.inst[j].filp[i] == filp) { - struct dma_fence *fence; - - r = amdgpu_uvd_get_destroy_msg(ring, handle, - false, &fence); - if (r) { - DRM_ERROR("Error destroying UVD(%d) %d!\n", j, r); - continue; - } + for (i = 0; i < adev->uvd.max_handles; ++i) { + uint32_t handle = atomic_read(&adev->uvd.handles[i]); - dma_fence_wait(fence, false); - dma_fence_put(fence); + if (handle != 0 && adev->uvd.filp[i] == filp) { + struct dma_fence *fence; - adev->uvd.inst[j].filp[i] = NULL; - atomic_set(&adev->uvd.inst[j].handles[i], 0); + r = amdgpu_uvd_get_destroy_msg(ring, handle, false, + &fence); + if (r) { + DRM_ERROR("Error destroying UVD %d!\n", r); + continue; } + + dma_fence_wait(fence, false); + dma_fence_put(fence); + + adev->uvd.filp[i] = NULL; + atomic_set(&adev->uvd.handles[i], 0); } } } @@ -692,20 +689,19 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, struct amdgpu_bo *bo, unsigned offset) { struct amdgpu_device *adev = ctx->parser->adev; - uint32_t ip_instance = ctx->parser->ring->me; int32_t *msg, msg_type, handle; void *ptr; long r; int i; if (offset & 0x3F) { - DRM_ERROR("UVD(%d) messages must be 64 byte aligned!\n", ip_instance); + DRM_ERROR("UVD messages must be 64 byte aligned!\n"); return -EINVAL; } r = amdgpu_bo_kmap(bo, &ptr); if (r) { - DRM_ERROR("Failed mapping the UVD(%d) message (%ld)!\n", ip_instance, r); + DRM_ERROR("Failed mapping the UVD) message (%ld)!\n", r); return r; } @@ -715,7 +711,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, handle = msg[2]; if (handle == 0) { - DRM_ERROR("Invalid UVD(%d) handle!\n", ip_instance); + DRM_ERROR("Invalid UVD handle!\n"); return -EINVAL; } @@ -726,18 +722,19 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, /* try to alloc a new handle */ for (i = 0; i < adev->uvd.max_handles; ++i) { - if (atomic_read(&adev->uvd.inst[ip_instance].handles[i]) == handle) { - DRM_ERROR("(%d)Handle 0x%x already in use!\n", ip_instance, handle); + if (atomic_read(&adev->uvd.handles[i]) == handle) { + DRM_ERROR(")Handle 0x%x already in use!\n", + handle); return -EINVAL; } - if (!atomic_cmpxchg(&adev->uvd.inst[ip_instance].handles[i], 0, handle)) { - adev->uvd.inst[ip_instance].filp[i] = ctx->parser->filp; + if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) { + adev->uvd.filp[i] = ctx->parser->filp; return 0; } } - DRM_ERROR("No more free UVD(%d) handles!\n", ip_instance); + DRM_ERROR("No more free UVD handles!\n"); return -ENOSPC; case 1: @@ -749,27 +746,27 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, /* validate the handle */ for (i = 0; i < adev->uvd.max_handles; ++i) { - if (atomic_read(&adev->uvd.inst[ip_instance].handles[i]) == handle) { - if (adev->uvd.inst[ip_instance].filp[i] != ctx->parser->filp) { - DRM_ERROR("UVD(%d) handle collision detected!\n", ip_instance); + if (atomic_read(&adev->uvd.handles[i]) == handle) { + if (adev->uvd.filp[i] != ctx->parser->filp) { + DRM_ERROR("UVD handle collision detected!\n"); return -EINVAL; } return 0; } } - DRM_ERROR("Invalid UVD(%d) handle 0x%x!\n", ip_instance, handle); + DRM_ERROR("Invalid UVD handle 0x%x!\n", handle); return -ENOENT; case 2: /* it's a destroy msg, free the handle */ for (i = 0; i < adev->uvd.max_handles; ++i) - atomic_cmpxchg(&adev->uvd.inst[ip_instance].handles[i], handle, 0); + atomic_cmpxchg(&adev->uvd.handles[i], handle, 0); amdgpu_bo_kunmap(bo); return 0; default: - DRM_ERROR("Illegal UVD(%d) message type (%d)!\n", ip_instance, msg_type); + DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type); return -EINVAL; } BUG(); @@ -1071,7 +1068,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, if (r) goto err_free; - r = amdgpu_job_submit(job, &adev->uvd.inst[ring->me].entity, + r = amdgpu_job_submit(job, &adev->uvd.entity, AMDGPU_FENCE_OWNER_UNDEFINED, &f); if (r) goto err_free; @@ -1273,7 +1270,7 @@ uint32_t amdgpu_uvd_used_handles(struct amdgpu_device *adev) * necessarily linear. So we need to count * all non-zero handles. */ - if (atomic_read(&adev->uvd.inst->handles[i])) + if (atomic_read(&adev->uvd.handles[i])) used_handles++; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h index cae3f526216b..66872286ab12 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h @@ -42,12 +42,9 @@ struct amdgpu_uvd_inst { void *cpu_addr; uint64_t gpu_addr; void *saved_bo; - atomic_t handles[AMDGPU_MAX_UVD_HANDLES]; - struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES]; struct amdgpu_ring ring; struct amdgpu_ring ring_enc[AMDGPU_MAX_UVD_ENC_RINGS]; struct amdgpu_irq_src irq; - struct drm_sched_entity entity; uint32_t srbm_soft_reset; }; @@ -56,10 +53,13 @@ struct amdgpu_uvd { unsigned fw_version; unsigned max_handles; unsigned num_enc_rings; - uint8_t num_uvd_inst; + uint8_t num_uvd_inst; bool address_64_bit; bool use_ctx_buf; - struct amdgpu_uvd_inst inst[AMDGPU_MAX_UVD_INSTANCES]; + struct amdgpu_uvd_inst inst[AMDGPU_MAX_UVD_INSTANCES]; + struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES]; + atomic_t handles[AMDGPU_MAX_UVD_HANDLES]; + struct drm_sched_entity entity; struct delayed_work idle_work; }; -- cgit