aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c157
1 files changed, 151 insertions, 6 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index f99093f2ebc7..f36e4f08db6d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -52,7 +52,10 @@
#define FIRMWARE_ALDEBARAN "amdgpu/aldebaran_vcn.bin"
#define FIRMWARE_BEIGE_GOBY "amdgpu/beige_goby_vcn.bin"
#define FIRMWARE_YELLOW_CARP "amdgpu/yellow_carp_vcn.bin"
-#define FIRMWARE_VCN_3_1_2 "amdgpu/vcn_3_1_2_vcn.bin"
+#define FIRMWARE_VCN_3_1_2 "amdgpu/vcn_3_1_2.bin"
+#define FIRMWARE_VCN4_0_0 "amdgpu/vcn_4_0_0.bin"
+#define FIRMWARE_VCN4_0_2 "amdgpu/vcn_4_0_2.bin"
+#define FIRMWARE_VCN4_0_4 "amdgpu/vcn_4_0_4.bin"
MODULE_FIRMWARE(FIRMWARE_RAVEN);
MODULE_FIRMWARE(FIRMWARE_PICASSO);
@@ -71,6 +74,9 @@ MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH);
MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY);
MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP);
MODULE_FIRMWARE(FIRMWARE_VCN_3_1_2);
+MODULE_FIRMWARE(FIRMWARE_VCN4_0_0);
+MODULE_FIRMWARE(FIRMWARE_VCN4_0_2);
+MODULE_FIRMWARE(FIRMWARE_VCN4_0_4);
static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
@@ -175,6 +181,24 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
adev->vcn.indirect_sram = true;
break;
+ case IP_VERSION(4, 0, 0):
+ fw_name = FIRMWARE_VCN4_0_0;
+ if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
+ (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
+ adev->vcn.indirect_sram = true;
+ break;
+ case IP_VERSION(4, 0, 2):
+ fw_name = FIRMWARE_VCN4_0_2;
+ if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
+ (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
+ adev->vcn.indirect_sram = false;
+ break;
+ case IP_VERSION(4, 0, 4):
+ fw_name = FIRMWARE_VCN4_0_4;
+ if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
+ (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
+ adev->vcn.indirect_sram = true;
+ break;
default:
return -EINVAL;
}
@@ -228,8 +252,15 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
- fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
- log_offset = offsetof(struct amdgpu_fw_shared, fw_log);
+
+ if (adev->ip_versions[UVD_HWIP][0] >= IP_VERSION(4, 0, 0)){
+ fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared));
+ log_offset = offsetof(struct amdgpu_vcn4_fw_shared, fw_log);
+ } else {
+ fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
+ log_offset = offsetof(struct amdgpu_fw_shared, fw_log);
+ }
+
bo_size += fw_shared_size;
if (amdgpu_vcnfw_log)
@@ -306,6 +337,18 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
return 0;
}
+/* from vcn4 and above, only unified queue is used */
+static bool amdgpu_vcn_using_unified_queue(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ bool ret = false;
+
+ if (adev->ip_versions[UVD_HWIP][0] >= IP_VERSION(4, 0, 0))
+ ret = true;
+
+ return ret;
+}
+
bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance)
{
bool ret = false;
@@ -695,19 +738,55 @@ error:
return r;
}
+static uint32_t *amdgpu_vcn_unified_ring_ib_header(struct amdgpu_ib *ib,
+ uint32_t ib_pack_in_dw, bool enc)
+{
+ uint32_t *ib_checksum;
+
+ ib->ptr[ib->length_dw++] = 0x00000010; /* single queue checksum */
+ ib->ptr[ib->length_dw++] = 0x30000002;
+ ib_checksum = &ib->ptr[ib->length_dw++];
+ ib->ptr[ib->length_dw++] = ib_pack_in_dw;
+
+ ib->ptr[ib->length_dw++] = 0x00000010; /* engine info */
+ ib->ptr[ib->length_dw++] = 0x30000001;
+ ib->ptr[ib->length_dw++] = enc ? 0x2 : 0x3;
+ ib->ptr[ib->length_dw++] = ib_pack_in_dw * sizeof(uint32_t);
+
+ return ib_checksum;
+}
+
+static void amdgpu_vcn_unified_ring_ib_checksum(uint32_t **ib_checksum,
+ uint32_t ib_pack_in_dw)
+{
+ uint32_t i;
+ uint32_t checksum = 0;
+
+ for (i = 0; i < ib_pack_in_dw; i++)
+ checksum += *(*ib_checksum + 2 + i);
+
+ **ib_checksum = checksum;
+}
+
static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
struct amdgpu_ib *ib_msg,
struct dma_fence **fence)
{
struct amdgpu_vcn_decode_buffer *decode_buffer = NULL;
- const unsigned int ib_size_dw = 64;
+ unsigned int ib_size_dw = 64;
struct amdgpu_device *adev = ring->adev;
struct dma_fence *f = NULL;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
+ bool sq = amdgpu_vcn_using_unified_queue(ring);
+ uint32_t *ib_checksum;
+ uint32_t ib_pack_in_dw;
int i, r;
+ if (sq)
+ ib_size_dw += 8;
+
r = amdgpu_job_alloc_with_ib(adev, ib_size_dw * 4,
AMDGPU_IB_POOL_DIRECT, &job);
if (r)
@@ -716,6 +795,13 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
ib = &job->ibs[0];
ib->length_dw = 0;
+ /* single queue headers */
+ if (sq) {
+ ib_pack_in_dw = sizeof(struct amdgpu_vcn_decode_buffer) / sizeof(uint32_t)
+ + 4 + 2; /* engine info + decoding ib in dw */
+ ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, ib_pack_in_dw, false);
+ }
+
ib->ptr[ib->length_dw++] = sizeof(struct amdgpu_vcn_decode_buffer) + 8;
ib->ptr[ib->length_dw++] = cpu_to_le32(AMDGPU_VCN_IB_FLAG_DECODE_BUFFER);
decode_buffer = (struct amdgpu_vcn_decode_buffer *)&(ib->ptr[ib->length_dw]);
@@ -729,6 +815,9 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
+ if (sq)
+ amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, ib_pack_in_dw);
+
r = amdgpu_job_submit_direct(job, ring, &f);
if (r)
goto err_free;
@@ -815,13 +904,18 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
struct amdgpu_ib *ib_msg,
struct dma_fence **fence)
{
- const unsigned ib_size_dw = 16;
+ unsigned int ib_size_dw = 16;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
+ uint32_t *ib_checksum = NULL;
uint64_t addr;
+ bool sq = amdgpu_vcn_using_unified_queue(ring);
int i, r;
+ if (sq)
+ ib_size_dw += 8;
+
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
AMDGPU_IB_POOL_DIRECT, &job);
if (r)
@@ -831,6 +925,10 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
ib->length_dw = 0;
+
+ if (sq)
+ ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
+
ib->ptr[ib->length_dw++] = 0x00000018;
ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
ib->ptr[ib->length_dw++] = handle;
@@ -850,6 +948,9 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
+ if (sq)
+ amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
+
r = amdgpu_job_submit_direct(job, ring, &f);
if (r)
goto err;
@@ -869,13 +970,18 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
struct amdgpu_ib *ib_msg,
struct dma_fence **fence)
{
- const unsigned ib_size_dw = 16;
+ unsigned int ib_size_dw = 16;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
+ uint32_t *ib_checksum = NULL;
uint64_t addr;
+ bool sq = amdgpu_vcn_using_unified_queue(ring);
int i, r;
+ if (sq)
+ ib_size_dw += 8;
+
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
AMDGPU_IB_POOL_DIRECT, &job);
if (r)
@@ -885,6 +991,10 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
ib->length_dw = 0;
+
+ if (sq)
+ ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
+
ib->ptr[ib->length_dw++] = 0x00000018;
ib->ptr[ib->length_dw++] = 0x00000001;
ib->ptr[ib->length_dw++] = handle;
@@ -904,6 +1014,9 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
+ if (sq)
+ amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
+
r = amdgpu_job_submit_direct(job, ring, &f);
if (r)
goto err;
@@ -954,6 +1067,20 @@ error:
return r;
}
+int amdgpu_vcn_unified_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+{
+ long r;
+
+ r = amdgpu_vcn_enc_ring_test_ib(ring, timeout);
+ if (r)
+ goto error;
+
+ r = amdgpu_vcn_dec_sw_ring_test_ib(ring, timeout);
+
+error:
+ return r;
+}
+
enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring)
{
switch(ring) {
@@ -1103,3 +1230,21 @@ void amdgpu_vcn_fwlog_init(struct amdgpu_vcn_inst *vcn)
log_buf->wrapped = 0;
#endif
}
+
+int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ struct ras_common_if *ras_if = adev->vcn.ras_if;
+ struct ras_dispatch_if ih_data = {
+ .entry = entry,
+ };
+
+ if (!ras_if)
+ return 0;
+
+ ih_data.head = *ras_if;
+ amdgpu_ras_interrupt_dispatch(adev, &ih_data);
+
+ return 0;
+}