diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 522 |
1 files changed, 433 insertions, 89 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 201645963ba5..0b52af415b28 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -27,6 +27,8 @@ #include <linux/firmware.h> #include <linux/module.h> #include <linux/pci.h> +#include <linux/debugfs.h> +#include <drm/drm_drv.h> #include "amdgpu.h" #include "amdgpu_pm.h" @@ -48,6 +50,12 @@ #define FIRMWARE_VANGOGH "amdgpu/vangogh_vcn.bin" #define FIRMWARE_DIMGREY_CAVEFISH "amdgpu/dimgrey_cavefish_vcn.bin" #define FIRMWARE_ALDEBARAN "amdgpu/aldebaran_vcn.bin" +#define FIRMWARE_BEIGE_GOBY "amdgpu/beige_goby_vcn.bin" +#define FIRMWARE_YELLOW_CARP "amdgpu/yellow_carp_vcn.bin" +#define FIRMWARE_VCN_3_1_2 "amdgpu/vcn_3_1_2.bin" +#define FIRMWARE_VCN4_0_0 "amdgpu/vcn_4_0_0.bin" +#define FIRMWARE_VCN4_0_2 "amdgpu/vcn_4_0_2.bin" +#define FIRMWARE_VCN4_0_4 "amdgpu/vcn_4_0_4.bin" MODULE_FIRMWARE(FIRMWARE_RAVEN); MODULE_FIRMWARE(FIRMWARE_PICASSO); @@ -63,6 +71,12 @@ MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID); MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER); MODULE_FIRMWARE(FIRMWARE_VANGOGH); MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH); +MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY); +MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP); +MODULE_FIRMWARE(FIRMWARE_VCN_3_1_2); +MODULE_FIRMWARE(FIRMWARE_VCN4_0_0); +MODULE_FIRMWARE(FIRMWARE_VCN4_0_2); +MODULE_FIRMWARE(FIRMWARE_VCN4_0_4); static void amdgpu_vcn_idle_work_handler(struct work_struct *work); @@ -72,6 +86,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) const char *fw_name; const struct common_firmware_header *hdr; unsigned char fw_check; + unsigned int fw_shared_size, log_offset; int i, r; INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler); @@ -81,8 +96,9 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) for (i = 0; i < adev->vcn.num_vcn_inst; i++) atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0); - switch (adev->asic_type) { - case CHIP_RAVEN: + switch (adev->ip_versions[UVD_HWIP][0]) { + case IP_VERSION(1, 0, 0): + case IP_VERSION(1, 0, 1): if (adev->apu_flags & AMD_APU_IS_RAVEN2) fw_name = FIRMWARE_RAVEN2; else if (adev->apu_flags & AMD_APU_IS_PICASSO) @@ -90,13 +106,13 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) else fw_name = FIRMWARE_RAVEN; break; - case CHIP_ARCTURUS: + case IP_VERSION(2, 5, 0): fw_name = FIRMWARE_ARCTURUS; if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) adev->vcn.indirect_sram = true; break; - case CHIP_RENOIR: + case IP_VERSION(2, 2, 0): if (adev->apu_flags & AMD_APU_IS_RENOIR) fw_name = FIRMWARE_RENOIR; else @@ -106,51 +122,83 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) adev->vcn.indirect_sram = true; break; - case CHIP_ALDEBARAN: + case IP_VERSION(2, 6, 0): fw_name = FIRMWARE_ALDEBARAN; if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) adev->vcn.indirect_sram = true; break; - case CHIP_NAVI10: + case IP_VERSION(2, 0, 0): fw_name = FIRMWARE_NAVI10; if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) adev->vcn.indirect_sram = true; break; - case CHIP_NAVI14: - fw_name = FIRMWARE_NAVI14; + case IP_VERSION(2, 0, 2): + if (adev->asic_type == CHIP_NAVI12) + fw_name = FIRMWARE_NAVI12; + else + fw_name = FIRMWARE_NAVI14; if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) adev->vcn.indirect_sram = true; break; - case CHIP_NAVI12: - fw_name = FIRMWARE_NAVI12; + case IP_VERSION(3, 0, 0): + case IP_VERSION(3, 0, 64): + case IP_VERSION(3, 0, 192): + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) + fw_name = FIRMWARE_SIENNA_CICHLID; + else + fw_name = FIRMWARE_NAVY_FLOUNDER; if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) adev->vcn.indirect_sram = true; break; - case CHIP_SIENNA_CICHLID: - fw_name = FIRMWARE_SIENNA_CICHLID; + case IP_VERSION(3, 0, 2): + fw_name = FIRMWARE_VANGOGH; + break; + case IP_VERSION(3, 0, 16): + fw_name = FIRMWARE_DIMGREY_CAVEFISH; if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) adev->vcn.indirect_sram = true; break; - case CHIP_NAVY_FLOUNDER: - fw_name = FIRMWARE_NAVY_FLOUNDER; + case IP_VERSION(3, 0, 33): + fw_name = FIRMWARE_BEIGE_GOBY; if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) adev->vcn.indirect_sram = true; break; - case CHIP_VANGOGH: - fw_name = FIRMWARE_VANGOGH; + case IP_VERSION(3, 1, 1): + fw_name = FIRMWARE_YELLOW_CARP; + if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && + (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) + adev->vcn.indirect_sram = true; break; - case CHIP_DIMGREY_CAVEFISH: - fw_name = FIRMWARE_DIMGREY_CAVEFISH; + case IP_VERSION(3, 1, 2): + fw_name = FIRMWARE_VCN_3_1_2; if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) adev->vcn.indirect_sram = true; break; + case IP_VERSION(4, 0, 0): + fw_name = FIRMWARE_VCN4_0_0; + if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && + (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) + adev->vcn.indirect_sram = true; + break; + case IP_VERSION(4, 0, 2): + fw_name = FIRMWARE_VCN4_0_2; + if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && + (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) + adev->vcn.indirect_sram = true; + break; + case IP_VERSION(4, 0, 4): + fw_name = FIRMWARE_VCN4_0_4; + if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && + (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) + adev->vcn.indirect_sram = true; + break; default: return -EINVAL; } @@ -204,7 +252,19 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE; if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); - bo_size += AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)); + + if (adev->ip_versions[UVD_HWIP][0] >= IP_VERSION(4, 0, 0)){ + fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)); + log_offset = offsetof(struct amdgpu_vcn4_fw_shared, fw_log); + } else { + fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)); + log_offset = offsetof(struct amdgpu_fw_shared, fw_log); + } + + bo_size += fw_shared_size; + + if (amdgpu_vcnfw_log) + bo_size += AMDGPU_VCNFW_LOG_SIZE; for (i = 0; i < adev->vcn.num_vcn_inst; i++) { if (adev->vcn.harvest_config & (1 << i)) @@ -218,10 +278,18 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) return r; } - adev->vcn.inst[i].fw_shared_cpu_addr = adev->vcn.inst[i].cpu_addr + - bo_size - AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)); - adev->vcn.inst[i].fw_shared_gpu_addr = adev->vcn.inst[i].gpu_addr + - bo_size - AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)); + adev->vcn.inst[i].fw_shared.cpu_addr = adev->vcn.inst[i].cpu_addr + + bo_size - fw_shared_size; + adev->vcn.inst[i].fw_shared.gpu_addr = adev->vcn.inst[i].gpu_addr + + bo_size - fw_shared_size; + + adev->vcn.inst[i].fw_shared.mem_size = fw_shared_size; + + if (amdgpu_vcnfw_log) { + adev->vcn.inst[i].fw_shared.cpu_addr -= AMDGPU_VCNFW_LOG_SIZE; + adev->vcn.inst[i].fw_shared.gpu_addr -= AMDGPU_VCNFW_LOG_SIZE; + adev->vcn.inst[i].fw_shared.log_offset = log_offset; + } if (adev->vcn.indirect_sram) { r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE, @@ -241,8 +309,6 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev) { int i, j; - cancel_delayed_work_sync(&adev->vcn.idle_work); - for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { if (adev->vcn.harvest_config & (1 << j)) continue; @@ -271,11 +337,39 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev) return 0; } +/* from vcn4 and above, only unified queue is used */ +static bool amdgpu_vcn_using_unified_queue(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + bool ret = false; + + if (adev->ip_versions[UVD_HWIP][0] >= IP_VERSION(4, 0, 0)) + ret = true; + + return ret; +} + +bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance) +{ + bool ret = false; + int vcn_config = adev->vcn.vcn_config[vcn_instance]; + + if ((type == VCN_ENCODE_RING) && (vcn_config & VCN_BLOCK_ENCODE_DISABLE_MASK)) { + ret = true; + } else if ((type == VCN_DECODE_RING) && (vcn_config & VCN_BLOCK_DECODE_DISABLE_MASK)) { + ret = true; + } else if ((type == VCN_UNIFIED_RING) && (vcn_config & VCN_BLOCK_QUEUE_DISABLE_MASK)) { + ret = true; + } + + return ret; +} + int amdgpu_vcn_suspend(struct amdgpu_device *adev) { unsigned size; void *ptr; - int i; + int i, idx; cancel_delayed_work_sync(&adev->vcn.idle_work); @@ -292,7 +386,10 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev) if (!adev->vcn.inst[i].saved_bo) return -ENOMEM; - memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size); + if (drm_dev_enter(adev_to_drm(adev), &idx)) { + memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size); + drm_dev_exit(idx); + } } return 0; } @@ -301,7 +398,7 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev) { unsigned size; void *ptr; - int i; + int i, idx; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { if (adev->vcn.harvest_config & (1 << i)) @@ -313,7 +410,10 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev) ptr = adev->vcn.inst[i].cpu_addr; if (adev->vcn.inst[i].saved_bo != NULL) { - memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size); + if (drm_dev_enter(adev_to_drm(adev), &idx)) { + memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size); + drm_dev_exit(idx); + } kvfree(adev->vcn.inst[i].saved_bo); adev->vcn.inst[i].saved_bo = NULL; } else { @@ -323,8 +423,11 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev) hdr = (const struct common_firmware_header *)adev->vcn.fw->data; if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { offset = le32_to_cpu(hdr->ucode_array_offset_bytes); - memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset, - le32_to_cpu(hdr->ucode_size_bytes)); + if (drm_dev_enter(adev_to_drm(adev), &idx)) { + memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset, + le32_to_cpu(hdr->ucode_size_bytes)); + drm_dev_exit(idx); + } size -= le32_to_cpu(hdr->ucode_size_bytes); ptr += le32_to_cpu(hdr->ucode_size_bytes); } @@ -367,7 +470,6 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work) } if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) { - amdgpu_gfx_off_ctrl(adev, true); amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, AMD_PG_STATE_GATE); r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO, @@ -387,7 +489,6 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring) atomic_inc(&adev->vcn.total_submission_cnt); if (!cancel_delayed_work_sync(&adev->vcn.idle_work)) { - amdgpu_gfx_off_ctrl(adev, false); r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO, true); if (r) @@ -496,15 +597,14 @@ int amdgpu_vcn_dec_sw_ring_test_ring(struct amdgpu_ring *ring) } static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, - struct amdgpu_bo *bo, + struct amdgpu_ib *ib_msg, struct dma_fence **fence) { struct amdgpu_device *adev = ring->adev; struct dma_fence *f = NULL; struct amdgpu_job *job; struct amdgpu_ib *ib; - uint64_t addr; - void *msg = NULL; + uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr); int i, r; r = amdgpu_job_alloc_with_ib(adev, 64, @@ -513,8 +613,6 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, goto err; ib = &job->ibs[0]; - addr = amdgpu_bo_gpu_offset(bo); - msg = amdgpu_bo_kptr(bo); ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0); ib->ptr[1] = addr; ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0); @@ -531,9 +629,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, if (r) goto err_free; - amdgpu_bo_fence(bo, f, false); - amdgpu_bo_unreserve(bo); - amdgpu_bo_free_kernel(&bo, NULL, (void **)&msg); + amdgpu_ib_free(adev, ib_msg, f); if (fence) *fence = dma_fence_get(f); @@ -543,27 +639,26 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, err_free: amdgpu_job_free(job); - err: - amdgpu_bo_unreserve(bo); - amdgpu_bo_free_kernel(&bo, NULL, (void **)&msg); + amdgpu_ib_free(adev, ib_msg, f); return r; } static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, - struct amdgpu_bo **bo) + struct amdgpu_ib *ib) { struct amdgpu_device *adev = ring->adev; uint32_t *msg; int r, i; - *bo = NULL; - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, - bo, NULL, (void **)&msg); + memset(ib, 0, sizeof(*ib)); + r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2, + AMDGPU_IB_POOL_DIRECT, + ib); if (r) return r; + msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr); msg[0] = cpu_to_le32(0x00000028); msg[1] = cpu_to_le32(0x00000038); msg[2] = cpu_to_le32(0x00000001); @@ -585,19 +680,20 @@ static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand } static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, - struct amdgpu_bo **bo) + struct amdgpu_ib *ib) { struct amdgpu_device *adev = ring->adev; uint32_t *msg; int r, i; - *bo = NULL; - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, - bo, NULL, (void **)&msg); + memset(ib, 0, sizeof(*ib)); + r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2, + AMDGPU_IB_POOL_DIRECT, + ib); if (r) return r; + msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr); msg[0] = cpu_to_le32(0x00000028); msg[1] = cpu_to_le32(0x00000018); msg[2] = cpu_to_le32(0x00000000); @@ -613,21 +709,21 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct dma_fence *fence = NULL; - struct amdgpu_bo *bo; + struct amdgpu_ib ib; long r; - r = amdgpu_vcn_dec_get_create_msg(ring, 1, &bo); + r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib); if (r) goto error; - r = amdgpu_vcn_dec_send_msg(ring, bo, NULL); + r = amdgpu_vcn_dec_send_msg(ring, &ib, NULL); if (r) goto error; - r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &bo); + r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib); if (r) goto error; - r = amdgpu_vcn_dec_send_msg(ring, bo, &fence); + r = amdgpu_vcn_dec_send_msg(ring, &ib, &fence); if (r) goto error; @@ -642,28 +738,70 @@ error: return r; } +static uint32_t *amdgpu_vcn_unified_ring_ib_header(struct amdgpu_ib *ib, + uint32_t ib_pack_in_dw, bool enc) +{ + uint32_t *ib_checksum; + + ib->ptr[ib->length_dw++] = 0x00000010; /* single queue checksum */ + ib->ptr[ib->length_dw++] = 0x30000002; + ib_checksum = &ib->ptr[ib->length_dw++]; + ib->ptr[ib->length_dw++] = ib_pack_in_dw; + + ib->ptr[ib->length_dw++] = 0x00000010; /* engine info */ + ib->ptr[ib->length_dw++] = 0x30000001; + ib->ptr[ib->length_dw++] = enc ? 0x2 : 0x3; + ib->ptr[ib->length_dw++] = ib_pack_in_dw * sizeof(uint32_t); + + return ib_checksum; +} + +static void amdgpu_vcn_unified_ring_ib_checksum(uint32_t **ib_checksum, + uint32_t ib_pack_in_dw) +{ + uint32_t i; + uint32_t checksum = 0; + + for (i = 0; i < ib_pack_in_dw; i++) + checksum += *(*ib_checksum + 2 + i); + + **ib_checksum = checksum; +} + static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring, - struct amdgpu_bo *bo, - struct dma_fence **fence) + struct amdgpu_ib *ib_msg, + struct dma_fence **fence) { struct amdgpu_vcn_decode_buffer *decode_buffer = NULL; - const unsigned int ib_size_dw = 64; + unsigned int ib_size_dw = 64; struct amdgpu_device *adev = ring->adev; struct dma_fence *f = NULL; struct amdgpu_job *job; struct amdgpu_ib *ib; - uint64_t addr; + uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr); + bool sq = amdgpu_vcn_using_unified_queue(ring); + uint32_t *ib_checksum; + uint32_t ib_pack_in_dw; int i, r; + if (sq) + ib_size_dw += 8; + r = amdgpu_job_alloc_with_ib(adev, ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT, &job); if (r) goto err; ib = &job->ibs[0]; - addr = amdgpu_bo_gpu_offset(bo); ib->length_dw = 0; + /* single queue headers */ + if (sq) { + ib_pack_in_dw = sizeof(struct amdgpu_vcn_decode_buffer) / sizeof(uint32_t) + + 4 + 2; /* engine info + decoding ib in dw */ + ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, ib_pack_in_dw, false); + } + ib->ptr[ib->length_dw++] = sizeof(struct amdgpu_vcn_decode_buffer) + 8; ib->ptr[ib->length_dw++] = cpu_to_le32(AMDGPU_VCN_IB_FLAG_DECODE_BUFFER); decode_buffer = (struct amdgpu_vcn_decode_buffer *)&(ib->ptr[ib->length_dw]); @@ -677,13 +815,14 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring, for (i = ib->length_dw; i < ib_size_dw; ++i) ib->ptr[i] = 0x0; + if (sq) + amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, ib_pack_in_dw); + r = amdgpu_job_submit_direct(job, ring, &f); if (r) goto err_free; - amdgpu_bo_fence(bo, f, false); - amdgpu_bo_unreserve(bo); - amdgpu_bo_unref(&bo); + amdgpu_ib_free(adev, ib_msg, f); if (fence) *fence = dma_fence_get(f); @@ -693,31 +832,29 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring, err_free: amdgpu_job_free(job); - err: - amdgpu_bo_unreserve(bo); - amdgpu_bo_unref(&bo); + amdgpu_ib_free(adev, ib_msg, f); return r; } int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct dma_fence *fence = NULL; - struct amdgpu_bo *bo; + struct amdgpu_ib ib; long r; - r = amdgpu_vcn_dec_get_create_msg(ring, 1, &bo); + r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib); if (r) goto error; - r = amdgpu_vcn_dec_sw_send_msg(ring, bo, NULL); + r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, NULL); if (r) goto error; - r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &bo); + r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib); if (r) goto error; - r = amdgpu_vcn_dec_sw_send_msg(ring, bo, &fence); + r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, &fence); if (r) goto error; @@ -764,25 +901,34 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring) } static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, - struct amdgpu_bo *bo, + struct amdgpu_ib *ib_msg, struct dma_fence **fence) { - const unsigned ib_size_dw = 16; + unsigned int ib_size_dw = 16; struct amdgpu_job *job; struct amdgpu_ib *ib; struct dma_fence *f = NULL; + uint32_t *ib_checksum = NULL; uint64_t addr; + bool sq = amdgpu_vcn_using_unified_queue(ring); int i, r; + if (sq) + ib_size_dw += 8; + r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT, &job); if (r) return r; ib = &job->ibs[0]; - addr = amdgpu_bo_gpu_offset(bo); + addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr); ib->length_dw = 0; + + if (sq) + ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true); + ib->ptr[ib->length_dw++] = 0x00000018; ib->ptr[ib->length_dw++] = 0x00000001; /* session info */ ib->ptr[ib->length_dw++] = handle; @@ -802,6 +948,9 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand for (i = ib->length_dw; i < ib_size_dw; ++i) ib->ptr[i] = 0x0; + if (sq) + amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11); + r = amdgpu_job_submit_direct(job, ring, &f); if (r) goto err; @@ -818,25 +967,34 @@ err: } static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, - struct amdgpu_bo *bo, + struct amdgpu_ib *ib_msg, struct dma_fence **fence) { - const unsigned ib_size_dw = 16; + unsigned int ib_size_dw = 16; struct amdgpu_job *job; struct amdgpu_ib *ib; struct dma_fence *f = NULL; + uint32_t *ib_checksum = NULL; uint64_t addr; + bool sq = amdgpu_vcn_using_unified_queue(ring); int i, r; + if (sq) + ib_size_dw += 8; + r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT, &job); if (r) return r; ib = &job->ibs[0]; - addr = amdgpu_bo_gpu_offset(bo); + addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr); ib->length_dw = 0; + + if (sq) + ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true); + ib->ptr[ib->length_dw++] = 0x00000018; ib->ptr[ib->length_dw++] = 0x00000001; ib->ptr[ib->length_dw++] = handle; @@ -856,6 +1014,9 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han for (i = ib->length_dw; i < ib_size_dw; ++i) ib->ptr[i] = 0x0; + if (sq) + amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11); + r = amdgpu_job_submit_direct(job, ring, &f); if (r) goto err; @@ -873,21 +1034,23 @@ err: int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) { + struct amdgpu_device *adev = ring->adev; struct dma_fence *fence = NULL; - struct amdgpu_bo *bo = NULL; + struct amdgpu_ib ib; long r; - r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, - &bo, NULL, NULL); + memset(&ib, 0, sizeof(ib)); + r = amdgpu_ib_get(adev, NULL, (128 << 10) + AMDGPU_GPU_PAGE_SIZE, + AMDGPU_IB_POOL_DIRECT, + &ib); if (r) return r; - r = amdgpu_vcn_enc_get_create_msg(ring, 1, bo, NULL); + r = amdgpu_vcn_enc_get_create_msg(ring, 1, &ib, NULL); if (r) goto error; - r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, bo, &fence); + r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &ib, &fence); if (r) goto error; @@ -898,9 +1061,190 @@ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) r = 0; error: + amdgpu_ib_free(adev, &ib, fence); dma_fence_put(fence); - amdgpu_bo_unreserve(bo); - amdgpu_bo_free_kernel(&bo, NULL, NULL); return r; } + +int amdgpu_vcn_unified_ring_test_ib(struct amdgpu_ring *ring, long timeout) +{ + long r; + + r = amdgpu_vcn_enc_ring_test_ib(ring, timeout); + if (r) + goto error; + + r = amdgpu_vcn_dec_sw_ring_test_ib(ring, timeout); + +error: + return r; +} + +enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring) +{ + switch(ring) { + case 0: + return AMDGPU_RING_PRIO_0; + case 1: + return AMDGPU_RING_PRIO_1; + case 2: + return AMDGPU_RING_PRIO_2; + default: + return AMDGPU_RING_PRIO_0; + } +} + +void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev) +{ + int i; + unsigned int idx; + + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + const struct common_firmware_header *hdr; + hdr = (const struct common_firmware_header *)adev->vcn.fw->data; + + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + /* currently only support 2 FW instances */ + if (i >= 2) { + dev_info(adev->dev, "More then 2 VCN FW instances!\n"); + break; + } + idx = AMDGPU_UCODE_ID_VCN + i; + adev->firmware.ucode[idx].ucode_id = idx; + adev->firmware.ucode[idx].fw = adev->vcn.fw; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); + } + dev_info(adev->dev, "Will use PSP to load VCN firmware\n"); + } +} + +/* + * debugfs for mapping vcn firmware log buffer. + */ +#if defined(CONFIG_DEBUG_FS) +static ssize_t amdgpu_debugfs_vcn_fwlog_read(struct file *f, char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_vcn_inst *vcn; + void *log_buf; + volatile struct amdgpu_vcn_fwlog *plog; + unsigned int read_pos, write_pos, available, i, read_bytes = 0; + unsigned int read_num[2] = {0}; + + vcn = file_inode(f)->i_private; + if (!vcn) + return -ENODEV; + + if (!vcn->fw_shared.cpu_addr || !amdgpu_vcnfw_log) + return -EFAULT; + + log_buf = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size; + + plog = (volatile struct amdgpu_vcn_fwlog *)log_buf; + read_pos = plog->rptr; + write_pos = plog->wptr; + + if (read_pos > AMDGPU_VCNFW_LOG_SIZE || write_pos > AMDGPU_VCNFW_LOG_SIZE) + return -EFAULT; + + if (!size || (read_pos == write_pos)) + return 0; + + if (write_pos > read_pos) { + available = write_pos - read_pos; + read_num[0] = min(size, (size_t)available); + } else { + read_num[0] = AMDGPU_VCNFW_LOG_SIZE - read_pos; + available = read_num[0] + write_pos - plog->header_size; + if (size > available) + read_num[1] = write_pos - plog->header_size; + else if (size > read_num[0]) + read_num[1] = size - read_num[0]; + else + read_num[0] = size; + } + + for (i = 0; i < 2; i++) { + if (read_num[i]) { + if (read_pos == AMDGPU_VCNFW_LOG_SIZE) + read_pos = plog->header_size; + if (read_num[i] == copy_to_user((buf + read_bytes), + (log_buf + read_pos), read_num[i])) + return -EFAULT; + + read_bytes += read_num[i]; + read_pos += read_num[i]; + } + } + + plog->rptr = read_pos; + *pos += read_bytes; + return read_bytes; +} + +static const struct file_operations amdgpu_debugfs_vcnfwlog_fops = { + .owner = THIS_MODULE, + .read = amdgpu_debugfs_vcn_fwlog_read, + .llseek = default_llseek +}; +#endif + +void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev, uint8_t i, + struct amdgpu_vcn_inst *vcn) +{ +#if defined(CONFIG_DEBUG_FS) + struct drm_minor *minor = adev_to_drm(adev)->primary; + struct dentry *root = minor->debugfs_root; + char name[32]; + + sprintf(name, "amdgpu_vcn_%d_fwlog", i); + debugfs_create_file_size(name, S_IFREG | S_IRUGO, root, vcn, + &amdgpu_debugfs_vcnfwlog_fops, + AMDGPU_VCNFW_LOG_SIZE); +#endif +} + +void amdgpu_vcn_fwlog_init(struct amdgpu_vcn_inst *vcn) +{ +#if defined(CONFIG_DEBUG_FS) + volatile uint32_t *flag = vcn->fw_shared.cpu_addr; + void *fw_log_cpu_addr = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size; + uint64_t fw_log_gpu_addr = vcn->fw_shared.gpu_addr + vcn->fw_shared.mem_size; + volatile struct amdgpu_vcn_fwlog *log_buf = fw_log_cpu_addr; + volatile struct amdgpu_fw_shared_fw_logging *fw_log = vcn->fw_shared.cpu_addr + + vcn->fw_shared.log_offset; + *flag |= cpu_to_le32(AMDGPU_VCN_FW_LOGGING_FLAG); + fw_log->is_enabled = 1; + fw_log->addr_lo = cpu_to_le32(fw_log_gpu_addr & 0xFFFFFFFF); + fw_log->addr_hi = cpu_to_le32(fw_log_gpu_addr >> 32); + fw_log->size = cpu_to_le32(AMDGPU_VCNFW_LOG_SIZE); + + log_buf->header_size = sizeof(struct amdgpu_vcn_fwlog); + log_buf->buffer_size = AMDGPU_VCNFW_LOG_SIZE; + log_buf->rptr = log_buf->header_size; + log_buf->wptr = log_buf->header_size; + log_buf->wrapped = 0; +#endif +} + +int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + struct ras_common_if *ras_if = adev->vcn.ras_if; + struct ras_dispatch_if ih_data = { + .entry = entry, + }; + + if (!ras_if) + return 0; + + ih_data.head = *ras_if; + amdgpu_ras_interrupt_dispatch(adev, &ih_data); + + return 0; +} |