From c8e42d57859d5055bfe3313cfd5dc025097b753e Mon Sep 17 00:00:00 2001 From: xinhui pan Date: Thu, 26 Mar 2020 08:38:29 +0800 Subject: drm/amdgpu: implement more ib pools (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We have three ib pools, they are normal, VM, direct pools. Any jobs which schedule IBs without dependence on gpu scheduler should use DIRECT pool. Any jobs schedule direct VM update IBs should use VM pool. Any other jobs use NORMAL pool. v2: squash in coding style fix Signed-off-by: xinhui pan Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index a41272fbcba2..f55e2410f948 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -390,7 +390,8 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, uint64_t addr; int i, r; - r = amdgpu_job_alloc_with_ib(adev, 64, &job); + r = amdgpu_job_alloc_with_ib(adev, 64, + AMDGPU_IB_POOL_DIRECT, &job); if (r) goto err; @@ -557,7 +558,8 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand uint64_t addr; int i, r; - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); + r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, + AMDGPU_IB_POOL_DIRECT, &job); if (r) return r; @@ -610,7 +612,8 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han uint64_t addr; int i, r; - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); + r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, + AMDGPU_IB_POOL_DIRECT, &job); if (r) return r; -- cgit From bd718638b8513e20a481a67f37a8acc1ebf2d957 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Mon, 10 Feb 2020 12:41:41 -0500 Subject: drm/amdgpu/vcn: fix race condition issue for vcn start MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix race condition issue when multiple vcn starts are called. v2: Removed checking the return value of cancel_delayed_work_sync() to prevent possible races here. v3: Add total_submission_cnt to avoid gate power unexpectedly. v4: Remove extra counter check, and reduce counter before idle work schedule Signed-off-by: James Zhu Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 21 ++++++++++++++------- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 2 ++ 2 files changed, 16 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index f55e2410f948..3ca3668a5e43 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -63,6 +63,8 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) int i, r; INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler); + mutex_init(&adev->vcn.vcn_pg_lock); + atomic_set(&adev->vcn.total_submission_cnt, 0); switch (adev->asic_type) { case CHIP_RAVEN: @@ -210,6 +212,7 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev) } release_firmware(adev->vcn.fw); + mutex_destroy(&adev->vcn.vcn_pg_lock); return 0; } @@ -307,7 +310,7 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work) fences += fence[j]; } - if (fences == 0) { + if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) { amdgpu_gfx_off_ctrl(adev, true); amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, AMD_PG_STATE_GATE); @@ -319,13 +322,14 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work) void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work); - if (set_clocks) { - amdgpu_gfx_off_ctrl(adev, false); - amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, - AMD_PG_STATE_UNGATE); - } + atomic_inc(&adev->vcn.total_submission_cnt); + cancel_delayed_work_sync(&adev->vcn.idle_work); + + mutex_lock(&adev->vcn.vcn_pg_lock); + amdgpu_gfx_off_ctrl(adev, false); + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, + AMD_PG_STATE_UNGATE); if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { struct dpg_pause_state new_state; @@ -345,10 +349,13 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring) adev->vcn.pause_dpg_mode(adev, ring->me, &new_state); } + mutex_unlock(&adev->vcn.vcn_pg_lock); } void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring) { + atomic_dec(&ring->adev->vcn.total_submission_cnt); + schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index 6fe057329de2..111c4cc69998 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -200,6 +200,8 @@ struct amdgpu_vcn { struct drm_gpu_scheduler *vcn_dec_sched[AMDGPU_MAX_VCN_INSTANCES]; uint32_t num_vcn_enc_sched; uint32_t num_vcn_dec_sched; + struct mutex vcn_pg_lock; + atomic_t total_submission_cnt; unsigned harvest_config; int (*pause_dpg_mode)(struct amdgpu_device *adev, -- cgit From e3b41d82dabaf6f1da94df8339afeee66e9b1a7e Mon Sep 17 00:00:00 2001 From: James Zhu Date: Mon, 10 Feb 2020 12:52:16 -0500 Subject: drm/amdgpu/vcn: fix race condition issue for dpg unpause mode switch Couldn't only rely on enc fence to decide switching to dpg unpaude mode. Since a enc thread may not schedule a fence in time during multiple threads running situation. v3: 1. Rename enc_submission_cnt to dpg_enc_submission_cnt 2. Add dpg_enc_submission_cnt check in idle_work_handler v4: Remove extra counter check, and reduce counter before idle work schedule Signed-off-by: James Zhu Reviewed-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 32 +++++++++++++++++++++----------- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 1 + 2 files changed, 22 insertions(+), 11 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 3ca3668a5e43..7a0b0743e6cd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -65,6 +65,8 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler); mutex_init(&adev->vcn.vcn_pg_lock); atomic_set(&adev->vcn.total_submission_cnt, 0); + for (i = 0; i < adev->vcn.num_vcn_inst; i++) + atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0); switch (adev->asic_type) { case CHIP_RAVEN: @@ -298,7 +300,8 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work) if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { struct dpg_pause_state new_state; - if (fence[j]) + if (fence[j] || + unlikely(atomic_read(&adev->vcn.inst[j].dpg_enc_submission_cnt))) new_state.fw_based = VCN_DPG_STATE__PAUSE; else new_state.fw_based = VCN_DPG_STATE__UNPAUSE; @@ -333,19 +336,22 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring) if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { struct dpg_pause_state new_state; - unsigned int fences = 0; - unsigned int i; - for (i = 0; i < adev->vcn.num_enc_rings; ++i) { - fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]); - } - if (fences) + if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) { + atomic_inc(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt); new_state.fw_based = VCN_DPG_STATE__PAUSE; - else - new_state.fw_based = VCN_DPG_STATE__UNPAUSE; + } else { + unsigned int fences = 0; + unsigned int i; - if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) - new_state.fw_based = VCN_DPG_STATE__PAUSE; + for (i = 0; i < adev->vcn.num_enc_rings; ++i) + fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]); + + if (fences || atomic_read(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt)) + new_state.fw_based = VCN_DPG_STATE__PAUSE; + else + new_state.fw_based = VCN_DPG_STATE__UNPAUSE; + } adev->vcn.pause_dpg_mode(adev, ring->me, &new_state); } @@ -354,6 +360,10 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring) void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring) { + if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG && + ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) + atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt); + atomic_dec(&ring->adev->vcn.total_submission_cnt); schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index 111c4cc69998..e913de8cda69 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -183,6 +183,7 @@ struct amdgpu_vcn_inst { void *dpg_sram_cpu_addr; uint64_t dpg_sram_gpu_addr; uint32_t *dpg_sram_curr_addr; + atomic_t dpg_enc_submission_cnt; }; struct amdgpu_vcn { -- cgit From 2c68f0e3771d2f414d4095ab96039c1d004eadb4 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Sun, 29 Mar 2020 20:00:22 -0400 Subject: drm/amdgpu/vcn: Add firmware share memory support Added firmware share memory support for VCN. Current multiple queue mode is enabled only. Signed-off-by: James Zhu Reviewed-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 13 +++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 24 ++++++++++++++++++++++++ 2 files changed, 37 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 7a0b0743e6cd..328b6ceb80de 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -182,6 +182,14 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) return r; } } + + r = amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].fw_shared_bo, + &adev->vcn.inst[i].fw_shared_gpu_addr, &adev->vcn.inst[i].fw_shared_cpu_addr); + if (r) { + dev_err(adev->dev, "VCN %d (%d) failed to allocate fimware shared bo\n", i, r); + return r; + } } return 0; @@ -196,6 +204,11 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev) for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { if (adev->vcn.harvest_config & (1 << j)) continue; + + amdgpu_bo_free_kernel(&adev->vcn.inst[j].fw_shared_bo, + &adev->vcn.inst[j].fw_shared_gpu_addr, + (void **)&adev->vcn.inst[j].fw_shared_cpu_addr); + if (adev->vcn.indirect_sram) { amdgpu_bo_free_kernel(&adev->vcn.inst[j].dpg_sram_bo, &adev->vcn.inst[j].dpg_sram_gpu_addr, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index e913de8cda69..f739e1ab4cfc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -132,6 +132,13 @@ } \ } while (0) +#define AMDGPU_VCN_MULTI_QUEUE_FLAG (1 << 8) + +enum fw_queue_mode { + FW_QUEUE_RING_RESET = 1, + FW_QUEUE_DPG_HOLD_OFF = 2, +}; + enum engine_status_constants { UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON = 0x2AAAA0, UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON_2_0 = 0xAAAA0, @@ -179,11 +186,14 @@ struct amdgpu_vcn_inst { struct amdgpu_irq_src irq; struct amdgpu_vcn_reg external; struct amdgpu_bo *dpg_sram_bo; + struct amdgpu_bo *fw_shared_bo; struct dpg_pause_state pause_state; void *dpg_sram_cpu_addr; uint64_t dpg_sram_gpu_addr; uint32_t *dpg_sram_curr_addr; atomic_t dpg_enc_submission_cnt; + void *fw_shared_cpu_addr; + uint64_t fw_shared_gpu_addr; }; struct amdgpu_vcn { @@ -209,6 +219,20 @@ struct amdgpu_vcn { int inst_idx, struct dpg_pause_state *new_state); }; +struct amdgpu_fw_shared_multi_queue { + uint8_t decode_queue_mode; + uint8_t encode_generalpurpose_queue_mode; + uint8_t encode_lowlatency_queue_mode; + uint8_t encode_realtime_queue_mode; + uint8_t padding[4]; +}; + +struct amdgpu_fw_shared { + uint32_t present_flag_0; + uint8_t pad[53]; + struct amdgpu_fw_shared_multi_queue multi_queue; +} __attribute__((__packed__)); + int amdgpu_vcn_sw_init(struct amdgpu_device *adev); int amdgpu_vcn_sw_fini(struct amdgpu_device *adev); int amdgpu_vcn_suspend(struct amdgpu_device *adev); -- cgit From a500194e73c87e112a6d3833b38eefb4057a2c9e Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 1 Apr 2020 17:35:45 +0100 Subject: drm/amdgpu/vcn: fix spelling mistake "fimware" -> "firmware" There is a spelling mistake in a dev_err error message. Fix it. Signed-off-by: Colin Ian King Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 328b6ceb80de..d653a18dcbc3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -187,7 +187,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].fw_shared_bo, &adev->vcn.inst[i].fw_shared_gpu_addr, &adev->vcn.inst[i].fw_shared_cpu_addr); if (r) { - dev_err(adev->dev, "VCN %d (%d) failed to allocate fimware shared bo\n", i, r); + dev_err(adev->dev, "VCN %d (%d) failed to allocate firmware shared bo\n", i, r); return r; } } -- cgit From 21b704d78352c289d31697824ceea7ad0ff4ce59 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Thu, 2 Apr 2020 23:25:45 -0400 Subject: drm/amdgpu/vcn: add shared memory restore after wake up from sleep. VCN shared memory needs restore after wake up during S3 test. v2: Allocate shared memory saved_bo at sw_init and free it in sw_fini. Signed-off-by: James Zhu Reviewed-by: Feifei Xu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 28 +++++++++++++++++++++++++++- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 1 + 2 files changed, 28 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index d653a18dcbc3..dab34f695121 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -56,7 +56,7 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work); int amdgpu_vcn_sw_init(struct amdgpu_device *adev) { - unsigned long bo_size; + unsigned long bo_size, fw_shared_bo_size; const char *fw_name; const struct common_firmware_header *hdr; unsigned char fw_check; @@ -190,6 +190,9 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) dev_err(adev->dev, "VCN %d (%d) failed to allocate firmware shared bo\n", i, r); return r; } + + fw_shared_bo_size = amdgpu_bo_size(adev->vcn.inst[i].fw_shared_bo); + adev->vcn.inst[i].saved_shm_bo = kvmalloc(fw_shared_bo_size, GFP_KERNEL); } return 0; @@ -205,6 +208,7 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev) if (adev->vcn.harvest_config & (1 << j)) continue; + kvfree(adev->vcn.inst[j].saved_shm_bo); amdgpu_bo_free_kernel(&adev->vcn.inst[j].fw_shared_bo, &adev->vcn.inst[j].fw_shared_gpu_addr, (void **)&adev->vcn.inst[j].fw_shared_cpu_addr); @@ -254,6 +258,17 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev) return -ENOMEM; memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size); + + if (adev->vcn.inst[i].fw_shared_bo == NULL) + return 0; + + if (!adev->vcn.inst[i].saved_shm_bo) + return -ENOMEM; + + size = amdgpu_bo_size(adev->vcn.inst[i].fw_shared_bo); + ptr = adev->vcn.inst[i].fw_shared_cpu_addr; + + memcpy_fromio(adev->vcn.inst[i].saved_shm_bo, ptr, size); } return 0; } @@ -291,6 +306,17 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev) } memset_io(ptr, 0, size); } + + if (adev->vcn.inst[i].fw_shared_bo == NULL) + return -EINVAL; + + size = amdgpu_bo_size(adev->vcn.inst[i].fw_shared_bo); + ptr = adev->vcn.inst[i].fw_shared_cpu_addr; + + if (adev->vcn.inst[i].saved_shm_bo != NULL) + memcpy_toio(ptr, adev->vcn.inst[i].saved_shm_bo, size); + else + memset_io(ptr, 0, size); } return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index 6da6e2e0fdd8..90aa12b22725 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -194,6 +194,7 @@ struct amdgpu_vcn_inst { atomic_t dpg_enc_submission_cnt; void *fw_shared_cpu_addr; uint64_t fw_shared_gpu_addr; + void *saved_shm_bo; }; struct amdgpu_vcn { -- cgit From 4f610503f0077c1d149b9f98c3fa966fb9551e85 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Sat, 11 Apr 2020 08:41:52 -0400 Subject: Revert "drm/amdgpu: Disable gfx off if VCN is busy" This reverts commit 3fded222f4bf7f4c56ef4854872a39a4de08f7a8 This is work around for vcn1 only. Currently vcn1 has separate begin_use and idle work handle. Signed-off-by: James Zhu Tested-by: changzhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index dab34f695121..2de99b441601 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -353,7 +353,6 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work) } if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) { - amdgpu_gfx_off_ctrl(adev, true); amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, AMD_PG_STATE_GATE); } else { @@ -369,7 +368,6 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring) cancel_delayed_work_sync(&adev->vcn.idle_work); mutex_lock(&adev->vcn.vcn_pg_lock); - amdgpu_gfx_off_ctrl(adev, false); amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, AMD_PG_STATE_UNGATE); -- cgit