diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c | 79 |
1 files changed, 24 insertions, 55 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c index 65debb65a5df..42c1f050542f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c @@ -74,91 +74,60 @@ uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring, if (amdgpu_sriov_vf(adev) || vmid == 0 || !amdgpu_mcbp) return 0; - r = amdgpu_sdma_get_index_from_ring(ring, &index); + if (ring->is_mes_queue) { + uint32_t offset = 0; - if (r || index > 31) - csa_mc_addr = 0; - else - csa_mc_addr = amdgpu_csa_vaddr(adev) + - AMDGPU_CSA_SDMA_OFFSET + - index * AMDGPU_CSA_SDMA_SIZE; + offset = offsetof(struct amdgpu_mes_ctx_meta_data, + sdma[ring->idx].sdma_meta_data); + csa_mc_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); + } else { + r = amdgpu_sdma_get_index_from_ring(ring, &index); + + if (r || index > 31) + csa_mc_addr = 0; + else + csa_mc_addr = amdgpu_csa_vaddr(adev) + + AMDGPU_CSA_SDMA_OFFSET + + index * AMDGPU_CSA_SDMA_SIZE; + } return csa_mc_addr; } int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev, - void *ras_ih_info) + struct ras_common_if *ras_block) { int r, i; - struct ras_ih_if *ih_info = (struct ras_ih_if *)ras_ih_info; - struct ras_fs_if fs_info = { - .sysfs_name = "sdma_err_count", - }; - - if (!ih_info) - return -EINVAL; - if (!adev->sdma.ras_if) { - adev->sdma.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL); - if (!adev->sdma.ras_if) - return -ENOMEM; - adev->sdma.ras_if->block = AMDGPU_RAS_BLOCK__SDMA; - adev->sdma.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; - adev->sdma.ras_if->sub_block_index = 0; - } - fs_info.head = ih_info->head = *adev->sdma.ras_if; - - r = amdgpu_ras_late_init(adev, adev->sdma.ras_if, - &fs_info, ih_info); + r = amdgpu_ras_block_late_init(adev, ras_block); if (r) - goto free; + return r; - if (amdgpu_ras_is_supported(adev, adev->sdma.ras_if->block)) { + if (amdgpu_ras_is_supported(adev, ras_block->block)) { for (i = 0; i < adev->sdma.num_instances; i++) { r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_INSTANCE0 + i); if (r) goto late_fini; } - } else { - r = 0; - goto free; } return 0; late_fini: - amdgpu_ras_late_fini(adev, adev->sdma.ras_if, ih_info); -free: - kfree(adev->sdma.ras_if); - adev->sdma.ras_if = NULL; + amdgpu_ras_block_late_fini(adev, ras_block); return r; } -void amdgpu_sdma_ras_fini(struct amdgpu_device *adev) -{ - if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA) && - adev->sdma.ras_if) { - struct ras_common_if *ras_if = adev->sdma.ras_if; - struct ras_ih_if ih_info = { - .head = *ras_if, - /* the cb member will not be used by - * amdgpu_ras_interrupt_remove_handler, init it only - * to cheat the check in ras_late_fini - */ - .cb = amdgpu_sdma_process_ras_data_cb, - }; - - amdgpu_ras_late_fini(adev, ras_if, &ih_info); - kfree(ras_if); - } -} - int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev, void *err_data, struct amdgpu_iv_entry *entry) { kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); + + if (amdgpu_sriov_vf(adev)) + return AMDGPU_RAS_SUCCESS; + amdgpu_ras_reset_gpu(adev); return AMDGPU_RAS_SUCCESS; |