diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
21 files changed, 231 insertions, 269 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 9e98f3866edc..03bbfaa51cbc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -75,9 +75,6 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev) return; adev->kfd.dev = kgd2kfd_probe(adev, vf); - - if (adev->kfd.dev) - amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size; } /** @@ -201,6 +198,8 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) adev->kfd.init_complete = kgd2kfd_device_init(adev->kfd.dev, adev_to_drm(adev), &gpu_resources); + amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size; + INIT_WORK(&adev->kfd.reset_work, amdgpu_amdkfd_reset_work); } } @@ -210,6 +209,7 @@ void amdgpu_amdkfd_device_fini_sw(struct amdgpu_device *adev) if (adev->kfd.dev) { kgd2kfd_device_exit(adev->kfd.dev); adev->kfd.dev = NULL; + amdgpu_amdkfd_total_mem_size -= adev->gmc.real_vram_size; } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 23998f727c7f..1a06b8d724f3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -38,8 +38,6 @@ #include <linux/pci.h> #include <linux/pm_runtime.h> #include <drm/drm_crtc_helper.h> -#include <drm/drm_damage_helper.h> -#include <drm/drm_drv.h> #include <drm/drm_edid.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_fb_helper.h> @@ -500,12 +498,6 @@ static const struct drm_framebuffer_funcs amdgpu_fb_funcs = { .create_handle = drm_gem_fb_create_handle, }; -static const struct drm_framebuffer_funcs amdgpu_fb_funcs_atomic = { - .destroy = drm_gem_fb_destroy, - .create_handle = drm_gem_fb_create_handle, - .dirty = drm_atomic_helper_dirtyfb, -}; - uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev, uint64_t bo_flags) { @@ -1108,10 +1100,8 @@ static int amdgpu_display_gem_fb_verify_and_init(struct drm_device *dev, if (ret) goto err; - if (drm_drv_uses_atomic_modeset(dev)) - ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs_atomic); - else - ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs); + ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs); + if (ret) goto err; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index e6a9b9fc9e0b..2e8f6cd7a729 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -688,13 +688,16 @@ int amdgpu_bo_create_vm(struct amdgpu_device *adev, * num of amdgpu_vm_pt entries. */ BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo_vm)); - bp->destroy = &amdgpu_bo_vm_destroy; r = amdgpu_bo_create(adev, bp, &bo_ptr); if (r) return r; *vmbo_ptr = to_amdgpu_bo_vm(bo_ptr); INIT_LIST_HEAD(&(*vmbo_ptr)->shadow_list); + /* Set destroy callback to amdgpu_bo_vm_destroy after vmbo->shadow_list + * is initialized. + */ + bo_ptr->tbo.destroy = &amdgpu_bo_vm_destroy; return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index ccebd8e2a2d8..2dad7aa9a03b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -2877,9 +2877,9 @@ static int amdgpu_bad_page_notifier(struct notifier_block *nb, err_data.err_addr = kcalloc(adev->umc.max_ras_err_cnt_per_query, sizeof(struct eeprom_table_record), GFP_KERNEL); - if(!err_data.err_addr) { - dev_warn(adev->dev, "Failed to alloc memory for " - "umc error address record in mca notifier!\n"); + if (!err_data.err_addr) { + dev_warn(adev->dev, + "Failed to alloc memory for umc error record in mca notifier!\n"); return NOTIFY_DONE; } @@ -2889,7 +2889,7 @@ static int amdgpu_bad_page_notifier(struct notifier_block *nb, if (adev->umc.ras && adev->umc.ras->convert_ras_error_address) adev->umc.ras->convert_ras_error_address(adev, - &err_data, 0, ch_inst, umc_inst, m->addr); + &err_data, m->addr, ch_inst, umc_inst); if (amdgpu_bad_page_threshold != 0) { amdgpu_ras_add_bad_pages(adev, err_data.err_addr, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c index 3949b7e3907f..ea5278f094c0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c @@ -222,8 +222,10 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev, adev->sdma.instance[instance].fw->data; version_major = le16_to_cpu(header->header_version_major); - if ((duplicate && instance) || (!duplicate && version_major > 1)) - return -EINVAL; + if ((duplicate && instance) || (!duplicate && version_major > 1)) { + err = -EINVAL; + goto out; + } err = amdgpu_sdma_init_inst_ctx(&adev->sdma.instance[instance]); if (err) @@ -272,7 +274,7 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev, ALIGN(le32_to_cpu(sdma_hdr->ctl_ucode_size_bytes), PAGE_SIZE); break; default: - return -EINVAL; + err = -EINVAL; } } @@ -283,3 +285,24 @@ out: } return err; } + +void amdgpu_sdma_unset_buffer_funcs_helper(struct amdgpu_device *adev) +{ + struct amdgpu_ring *sdma; + int i; + + for (i = 0; i < adev->sdma.num_instances; i++) { + if (adev->sdma.has_page_queue) { + sdma = &adev->sdma.instance[i].page; + if (adev->mman.buffer_funcs_ring == sdma) { + amdgpu_ttm_set_buffer_funcs_status(adev, false); + break; + } + } + sdma = &adev->sdma.instance[i].ring; + if (adev->mman.buffer_funcs_ring == sdma) { + amdgpu_ttm_set_buffer_funcs_status(adev, false); + break; + } + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h index d2d88279fefb..7d99205c2e01 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h @@ -128,4 +128,6 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev, char *fw_name, u32 instance, bool duplicate); void amdgpu_sdma_destroy_inst_ctx(struct amdgpu_device *adev, bool duplicate); +void amdgpu_sdma_unset_buffer_funcs_helper(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index b1c455329023..dc262d2c2925 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -424,8 +424,9 @@ error: static bool amdgpu_mem_visible(struct amdgpu_device *adev, struct ttm_resource *mem) { - uint64_t mem_size = (u64)mem->num_pages << PAGE_SHIFT; + u64 mem_size = (u64)mem->num_pages << PAGE_SHIFT; struct amdgpu_res_cursor cursor; + u64 end; if (mem->mem_type == TTM_PL_SYSTEM || mem->mem_type == TTM_PL_TT) @@ -434,12 +435,18 @@ static bool amdgpu_mem_visible(struct amdgpu_device *adev, return false; amdgpu_res_first(mem, 0, mem_size, &cursor); + end = cursor.start + cursor.size; + while (cursor.remaining) { + amdgpu_res_next(&cursor, cursor.size); - /* ttm_resource_ioremap only supports contiguous memory */ - if (cursor.size != mem_size) - return false; + /* ttm_resource_ioremap only supports contiguous memory */ + if (end != cursor.start) + return false; + + end = cursor.start + cursor.size; + } - return cursor.start + cursor.size <= adev->gmc.visible_vram_size; + return end <= adev->gmc.visible_vram_size; } /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h index 2fb4951a6433..e46439274f3a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h @@ -22,8 +22,6 @@ #define __AMDGPU_UMC_H__ #include "amdgpu_ras.h" -#define UMC_INVALID_ADDR 0x1ULL - /* * (addr / 256) * 4096, the higher 26 bits in ErrorAddr * is the index of 4KB block @@ -54,9 +52,8 @@ struct amdgpu_umc_ras { void (*err_cnt_init)(struct amdgpu_device *adev); bool (*query_ras_poison_mode)(struct amdgpu_device *adev); void (*convert_ras_error_address)(struct amdgpu_device *adev, - struct ras_err_data *err_data, - uint32_t umc_reg_offset, uint32_t ch_inst, - uint32_t umc_inst, uint64_t mca_addr); + struct ras_err_data *err_data, uint64_t err_addr, + uint32_t ch_inst, uint32_t umc_inst); void (*ecc_info_query_ras_error_count)(struct amdgpu_device *adev, void *ras_error_status); void (*ecc_info_query_ras_error_address)(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index 5647f13b98d4..cbca9866645c 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -309,14 +309,10 @@ static void cik_sdma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq */ static void cik_sdma_gfx_stop(struct amdgpu_device *adev) { - struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring; - struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring; u32 rb_cntl; int i; - if ((adev->mman.buffer_funcs_ring == sdma0) || - (adev->mman.buffer_funcs_ring == sdma1)) - amdgpu_ttm_set_buffer_funcs_status(adev, false); + amdgpu_sdma_unset_buffer_funcs_helper(adev); for (i = 0; i < adev->sdma.num_instances; i++) { rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index 6bdffdc1c0b9..c52d246a1d96 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -342,14 +342,10 @@ static void sdma_v2_4_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se */ static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev) { - struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring; - struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring; u32 rb_cntl, ib_cntl; int i; - if ((adev->mman.buffer_funcs_ring == sdma0) || - (adev->mman.buffer_funcs_ring == sdma1)) - amdgpu_ttm_set_buffer_funcs_status(adev, false); + amdgpu_sdma_unset_buffer_funcs_helper(adev); for (i = 0; i < adev->sdma.num_instances; i++) { rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 2584fa3cb13e..486d9b5c1b9e 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -516,14 +516,10 @@ static void sdma_v3_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se */ static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev) { - struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring; - struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring; u32 rb_cntl, ib_cntl; int i; - if ((adev->mman.buffer_funcs_ring == sdma0) || - (adev->mman.buffer_funcs_ring == sdma1)) - amdgpu_ttm_set_buffer_funcs_status(adev, false); + amdgpu_sdma_unset_buffer_funcs_helper(adev); for (i = 0; i < adev->sdma.num_instances; i++) { rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 7241a9fb0121..298fa11702e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -915,18 +915,12 @@ static void sdma_v4_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se */ static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev) { - struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES]; u32 rb_cntl, ib_cntl; - int i, unset = 0; - - for (i = 0; i < adev->sdma.num_instances; i++) { - sdma[i] = &adev->sdma.instance[i].ring; + int i; - if ((adev->mman.buffer_funcs_ring == sdma[i]) && unset != 1) { - amdgpu_ttm_set_buffer_funcs_status(adev, false); - unset = 1; - } + amdgpu_sdma_unset_buffer_funcs_helper(adev); + for (i = 0; i < adev->sdma.num_instances; i++) { rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl); @@ -957,20 +951,12 @@ static void sdma_v4_0_rlc_stop(struct amdgpu_device *adev) */ static void sdma_v4_0_page_stop(struct amdgpu_device *adev) { - struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES]; u32 rb_cntl, ib_cntl; int i; - bool unset = false; - - for (i = 0; i < adev->sdma.num_instances; i++) { - sdma[i] = &adev->sdma.instance[i].page; - if ((adev->mman.buffer_funcs_ring == sdma[i]) && - (!unset)) { - amdgpu_ttm_set_buffer_funcs_status(adev, false); - unset = true; - } + amdgpu_sdma_unset_buffer_funcs_helper(adev); + for (i = 0; i < adev->sdma.num_instances; i++) { rb_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL, RB_ENABLE, 0); @@ -1954,8 +1940,11 @@ static int sdma_v4_0_hw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; int i; - if (amdgpu_sriov_vf(adev)) + if (amdgpu_sriov_vf(adev)) { + /* disable the scheduler for SDMA */ + amdgpu_sdma_unset_buffer_funcs_helper(adev); return 0; + } for (i = 0; i < adev->sdma.num_instances; i++) { amdgpu_irq_put(adev, &adev->sdma.ecc_irq, diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index c05c3eebde4c..d4d9f196db83 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -584,14 +584,10 @@ static void sdma_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se */ static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev) { - struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring; - struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring; u32 rb_cntl, ib_cntl; int i; - if ((adev->mman.buffer_funcs_ring == sdma0) || - (adev->mman.buffer_funcs_ring == sdma1)) - amdgpu_ttm_set_buffer_funcs_status(adev, false); + amdgpu_sdma_unset_buffer_funcs_helper(adev); for (i = 0; i < adev->sdma.num_instances; i++) { rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); @@ -1460,8 +1456,11 @@ static int sdma_v5_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (amdgpu_sriov_vf(adev)) + if (amdgpu_sriov_vf(adev)) { + /* disable the scheduler for SDMA */ + amdgpu_sdma_unset_buffer_funcs_helper(adev); return 0; + } sdma_v5_0_ctx_switch_enable(adev, false); sdma_v5_0_enable(adev, false); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c index f136fec7b4f4..809eca54fc61 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c @@ -414,18 +414,10 @@ static void sdma_v5_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se */ static void sdma_v5_2_gfx_stop(struct amdgpu_device *adev) { - struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring; - struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring; - struct amdgpu_ring *sdma2 = &adev->sdma.instance[2].ring; - struct amdgpu_ring *sdma3 = &adev->sdma.instance[3].ring; u32 rb_cntl, ib_cntl; int i; - if ((adev->mman.buffer_funcs_ring == sdma0) || - (adev->mman.buffer_funcs_ring == sdma1) || - (adev->mman.buffer_funcs_ring == sdma2) || - (adev->mman.buffer_funcs_ring == sdma3)) - amdgpu_ttm_set_buffer_funcs_status(adev, false); + amdgpu_sdma_unset_buffer_funcs_helper(adev); for (i = 0; i < adev->sdma.num_instances; i++) { rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); @@ -1357,8 +1349,11 @@ static int sdma_v5_2_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (amdgpu_sriov_vf(adev)) + if (amdgpu_sriov_vf(adev)) { + /* disable the scheduler for SDMA */ + amdgpu_sdma_unset_buffer_funcs_helper(adev); return 0; + } sdma_v5_2_ctx_switch_enable(adev, false); sdma_v5_2_enable(adev, false); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c index db51230163c5..da3beb0bf2fa 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c @@ -398,14 +398,10 @@ static void sdma_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se */ static void sdma_v6_0_gfx_stop(struct amdgpu_device *adev) { - struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring; - struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring; u32 rb_cntl, ib_cntl; int i; - if ((adev->mman.buffer_funcs_ring == sdma0) || - (adev->mman.buffer_funcs_ring == sdma1)) - amdgpu_ttm_set_buffer_funcs_status(adev, false); + amdgpu_sdma_unset_buffer_funcs_helper(adev); for (i = 0; i < adev->sdma.num_instances; i++) { rb_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL)); @@ -415,9 +411,6 @@ static void sdma_v6_0_gfx_stop(struct amdgpu_device *adev) ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_ENABLE, 0); WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL), ib_cntl); } - - sdma0->sched.ready = false; - sdma1->sched.ready = false; } /** @@ -846,7 +839,8 @@ static int sdma_v6_0_mqd_init(struct amdgpu_device *adev, void *mqd, m->sdmax_rlcx_rb_cntl = order_base_2(prop->queue_size / 4) << SDMA0_QUEUE0_RB_CNTL__RB_SIZE__SHIFT | 1 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT | - 4 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT; + 4 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT | + 1 << SDMA0_QUEUE0_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT; m->sdmax_rlcx_rb_base = lower_32_bits(prop->hqd_base_gpu_addr >> 8); m->sdmax_rlcx_rb_base_hi = upper_32_bits(prop->hqd_base_gpu_addr >> 8); @@ -1317,8 +1311,11 @@ static int sdma_v6_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (amdgpu_sriov_vf(adev)) + if (amdgpu_sriov_vf(adev)) { + /* disable the scheduler for SDMA */ + amdgpu_sdma_unset_buffer_funcs_helper(adev); return 0; + } sdma_v6_0_ctx_switch_enable(adev, false); sdma_v6_0_enable(adev, false); diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c index f675111ace20..4d5e718540aa 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dma.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c @@ -116,15 +116,14 @@ static void si_dma_stop(struct amdgpu_device *adev) u32 rb_cntl; unsigned i; + amdgpu_sdma_unset_buffer_funcs_helper(adev); + for (i = 0; i < adev->sdma.num_instances; i++) { ring = &adev->sdma.instance[i].ring; /* dma0 */ rb_cntl = RREG32(DMA_RB_CNTL + sdma_offsets[i]); rb_cntl &= ~DMA_RB_ENABLE; WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl); - - if (adev->mman.buffer_funcs_ring == ring) - amdgpu_ttm_set_buffer_funcs_status(adev, false); } } diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c index 16b757664a35..795706b3b092 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc21.c +++ b/drivers/gpu/drm/amd/amdgpu/soc21.c @@ -629,6 +629,7 @@ static int soc21_common_early_init(void *handle) AMD_CG_SUPPORT_JPEG_MGCG; adev->pg_flags = AMD_PG_SUPPORT_GFX_PG | + AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN_DPG | AMD_PG_SUPPORT_JPEG; adev->external_rev_id = adev->rev_id + 0x1; diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c index 939cb203f7ad..f17d297b594b 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c @@ -327,10 +327,9 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev, return; } - /* calculate error address if ue/ce error is detected */ + /* calculate error address if ue error is detected */ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && - (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 || - REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) { + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) { err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4); /* the lowest lsb bits should be ignored */ @@ -343,10 +342,7 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev, ADDR_OF_256B_BLOCK(channel_index) | OFFSET_IN_256B_BLOCK(err_addr); - /* we only save ue error information currently, ce is skipped */ - if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) - == 1) - amdgpu_umc_fill_error_record(err_data, err_addr, + amdgpu_umc_fill_error_record(err_data, err_addr, retired_page, channel_index, umc_inst); } diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c index a0d19b768346..5d5d031c9e7d 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c @@ -187,20 +187,51 @@ static void umc_v6_7_ecc_info_query_ras_error_count(struct amdgpu_device *adev, } } +static void umc_v6_7_convert_error_address(struct amdgpu_device *adev, + struct ras_err_data *err_data, uint64_t err_addr, + uint32_t ch_inst, uint32_t umc_inst) +{ + uint32_t channel_index; + uint64_t soc_pa, retired_page, column; + + channel_index = + adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst]; + /* translate umc channel address to soc pa, 3 parts are included */ + soc_pa = ADDR_OF_8KB_BLOCK(err_addr) | + ADDR_OF_256B_BLOCK(channel_index) | + OFFSET_IN_256B_BLOCK(err_addr); + + /* The umc channel bits are not original values, they are hashed */ + SET_CHANNEL_HASH(channel_index, soc_pa); + + /* clear [C4 C3 C2] in soc physical address */ + soc_pa &= ~(0x7ULL << UMC_V6_7_PA_C2_BIT); + + /* loop for all possibilities of [C4 C3 C2] */ + for (column = 0; column < UMC_V6_7_NA_MAP_PA_NUM; column++) { + retired_page = soc_pa | (column << UMC_V6_7_PA_C2_BIT); + dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page); + amdgpu_umc_fill_error_record(err_data, err_addr, + retired_page, channel_index, umc_inst); + + /* shift R14 bit */ + retired_page ^= (0x1ULL << UMC_V6_7_PA_R14_BIT); + dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page); + amdgpu_umc_fill_error_record(err_data, err_addr, + retired_page, channel_index, umc_inst); + } +} + static void umc_v6_7_ecc_info_query_error_address(struct amdgpu_device *adev, struct ras_err_data *err_data, uint32_t ch_inst, uint32_t umc_inst) { - uint64_t mc_umc_status, err_addr, soc_pa, retired_page, column; - uint32_t channel_index; + uint64_t mc_umc_status, err_addr; uint32_t eccinfo_table_idx; struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); eccinfo_table_idx = umc_inst * adev->umc.channel_inst_num + ch_inst; - channel_index = - adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst]; - mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status; if (mc_umc_status == 0) @@ -209,42 +240,15 @@ static void umc_v6_7_ecc_info_query_error_address(struct amdgpu_device *adev, if (!err_data->err_addr) return; - /* calculate error address if ue/ce error is detected */ + /* calculate error address if ue error is detected */ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && - (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 || - REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) { + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) { err_addr = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_addr; err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr); - /* translate umc channel address to soc pa, 3 parts are included */ - soc_pa = ADDR_OF_8KB_BLOCK(err_addr) | - ADDR_OF_256B_BLOCK(channel_index) | - OFFSET_IN_256B_BLOCK(err_addr); - - /* The umc channel bits are not original values, they are hashed */ - SET_CHANNEL_HASH(channel_index, soc_pa); - - /* clear [C4 C3 C2] in soc physical address */ - soc_pa &= ~(0x7ULL << UMC_V6_7_PA_C2_BIT); - - /* we only save ue error information currently, ce is skipped */ - if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) - == 1) { - /* loop for all possibilities of [C4 C3 C2] */ - for (column = 0; column < UMC_V6_7_NA_MAP_PA_NUM; column++) { - retired_page = soc_pa | (column << UMC_V6_7_PA_C2_BIT); - dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page); - amdgpu_umc_fill_error_record(err_data, err_addr, - retired_page, channel_index, umc_inst); - - /* shift R14 bit */ - retired_page ^= (0x1ULL << UMC_V6_7_PA_R14_BIT); - dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page); - amdgpu_umc_fill_error_record(err_data, err_addr, - retired_page, channel_index, umc_inst); - } - } + umc_v6_7_convert_error_address(adev, err_data, err_addr, + ch_inst, umc_inst); } } @@ -453,81 +457,40 @@ static void umc_v6_7_query_ras_error_count(struct amdgpu_device *adev, static void umc_v6_7_query_error_address(struct amdgpu_device *adev, struct ras_err_data *err_data, uint32_t umc_reg_offset, uint32_t ch_inst, - uint32_t umc_inst, uint64_t mca_addr) + uint32_t umc_inst) { uint32_t mc_umc_status_addr; - uint32_t channel_index; - uint64_t mc_umc_status = 0, mc_umc_addrt0; - uint64_t err_addr, soc_pa, retired_page, column; + uint64_t mc_umc_status = 0, mc_umc_addrt0, err_addr; - if (mca_addr == UMC_INVALID_ADDR) { - mc_umc_status_addr = - SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0); - mc_umc_addrt0 = - SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_ADDRT0); + mc_umc_status_addr = + SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0); + mc_umc_addrt0 = + SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_ADDRT0); - mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4); + mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4); - if (mc_umc_status == 0) - return; + if (mc_umc_status == 0) + return; - if (!err_data->err_addr) { - /* clear umc status */ - WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL); - return; - } + if (!err_data->err_addr) { + /* clear umc status */ + WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL); + return; } - channel_index = - adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst]; - - /* calculate error address if ue/ce error is detected */ - if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && - (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 || - REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) || - mca_addr != UMC_INVALID_ADDR) { - if (mca_addr == UMC_INVALID_ADDR) { - err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4); - err_addr = - REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr); - } else { - err_addr = mca_addr; - } + /* calculate error address if ue error is detected */ + if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) { + err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4); + err_addr = + REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr); - /* translate umc channel address to soc pa, 3 parts are included */ - soc_pa = ADDR_OF_8KB_BLOCK(err_addr) | - ADDR_OF_256B_BLOCK(channel_index) | - OFFSET_IN_256B_BLOCK(err_addr); - - /* The umc channel bits are not original values, they are hashed */ - SET_CHANNEL_HASH(channel_index, soc_pa); - - /* clear [C4 C3 C2] in soc physical address */ - soc_pa &= ~(0x7ULL << UMC_V6_7_PA_C2_BIT); - - /* we only save ue error information currently, ce is skipped */ - if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) - == 1 || - mca_addr != UMC_INVALID_ADDR) { - /* loop for all possibilities of [C4 C3 C2] */ - for (column = 0; column < UMC_V6_7_NA_MAP_PA_NUM; column++) { - retired_page = soc_pa | (column << UMC_V6_7_PA_C2_BIT); - dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page); - amdgpu_umc_fill_error_record(err_data, err_addr, - retired_page, channel_index, umc_inst); - - /* shift R14 bit */ - retired_page ^= (0x1ULL << UMC_V6_7_PA_R14_BIT); - dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page); - amdgpu_umc_fill_error_record(err_data, err_addr, - retired_page, channel_index, umc_inst); - } - } + umc_v6_7_convert_error_address(adev, err_data, err_addr, + ch_inst, umc_inst); } /* clear umc status */ - if (mca_addr == UMC_INVALID_ADDR) - WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL); + WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL); } static void umc_v6_7_query_ras_error_address(struct amdgpu_device *adev, @@ -549,7 +512,7 @@ static void umc_v6_7_query_ras_error_address(struct amdgpu_device *adev, umc_v6_7_query_error_address(adev, err_data, umc_reg_offset, ch_inst, - umc_inst, UMC_INVALID_ADDR); + umc_inst); } } @@ -590,5 +553,5 @@ struct amdgpu_umc_ras umc_v6_7_ras = { .query_ras_poison_mode = umc_v6_7_query_ras_poison_mode, .ecc_info_query_ras_error_count = umc_v6_7_ecc_info_query_ras_error_count, .ecc_info_query_ras_error_address = umc_v6_7_ecc_info_query_ras_error_address, - .convert_ras_error_address = umc_v6_7_query_error_address, + .convert_ras_error_address = umc_v6_7_convert_error_address, }; diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c index a8cbda81828d..91235df54e22 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c @@ -208,7 +208,10 @@ static void umc_v8_10_query_error_address(struct amdgpu_device *adev, { uint64_t mc_umc_status_addr; uint64_t mc_umc_status, err_addr; - uint32_t channel_index; + uint64_t mc_umc_addrt0, na_err_addr_base; + uint64_t na_err_addr, retired_page_addr; + uint32_t channel_index, addr_lsb, col = 0; + int ret = 0; mc_umc_status_addr = SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0); @@ -229,13 +232,10 @@ static void umc_v8_10_query_error_address(struct amdgpu_device *adev, umc_inst * adev->umc.channel_inst_num + ch_inst]; - /* calculate error address if ue/ce error is detected */ + /* calculate error address if ue error is detected */ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, AddrV) == 1 && - (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 || - REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) { - uint32_t addr_lsb; - uint64_t mc_umc_addrt0; + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) { mc_umc_addrt0 = SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_ADDRT0); err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4); @@ -243,32 +243,24 @@ static void umc_v8_10_query_error_address(struct amdgpu_device *adev, /* the lowest lsb bits should be ignored */ addr_lsb = REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, AddrLsb); - err_addr &= ~((0x1ULL << addr_lsb) - 1); - - /* we only save ue error information currently, ce is skipped */ - if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) { - uint64_t na_err_addr_base = err_addr & ~(0x3ULL << UMC_V8_10_NA_C5_BIT); - uint64_t na_err_addr, retired_page_addr; - uint32_t col = 0; - int ret = 0; - - /* loop for all possibilities of [C6 C5] in normal address. */ - for (col = 0; col < UMC_V8_10_NA_COL_2BITS_POWER_OF_2_NUM; col++) { - na_err_addr = na_err_addr_base | (col << UMC_V8_10_NA_C5_BIT); - - /* Mapping normal error address to retired soc physical address. */ - ret = umc_v8_10_swizzle_mode_na_to_pa(adev, channel_index, - na_err_addr, &retired_page_addr); - if (ret) { - dev_err(adev->dev, "Failed to map pa from umc na.\n"); - break; - } - dev_info(adev->dev, "Error Address(PA): 0x%llx\n", - retired_page_addr); - amdgpu_umc_fill_error_record(err_data, na_err_addr, - retired_page_addr, channel_index, umc_inst); + na_err_addr_base = err_addr & ~(0x3ULL << UMC_V8_10_NA_C5_BIT); + + /* loop for all possibilities of [C6 C5] in normal address. */ + for (col = 0; col < UMC_V8_10_NA_COL_2BITS_POWER_OF_2_NUM; col++) { + na_err_addr = na_err_addr_base | (col << UMC_V8_10_NA_C5_BIT); + + /* Mapping normal error address to retired soc physical address. */ + ret = umc_v8_10_swizzle_mode_na_to_pa(adev, channel_index, + na_err_addr, &retired_page_addr); + if (ret) { + dev_err(adev->dev, "Failed to map pa from umc na.\n"); + break; } + dev_info(adev->dev, "Error Address(PA): 0x%llx\n", + retired_page_addr); + amdgpu_umc_fill_error_record(err_data, na_err_addr, + retired_page_addr, channel_index, umc_inst); } } @@ -338,6 +330,31 @@ static void umc_v8_10_err_cnt_init(struct amdgpu_device *adev) } } +static uint32_t umc_v8_10_query_ras_poison_mode_per_channel( + struct amdgpu_device *adev, + uint32_t umc_reg_offset) +{ + uint32_t ecc_ctrl_addr, ecc_ctrl; + + ecc_ctrl_addr = + SOC15_REG_OFFSET(UMC, 0, regUMCCH0_0_GeccCtrl); + ecc_ctrl = RREG32_PCIE((ecc_ctrl_addr + + umc_reg_offset) * 4); + + return REG_GET_FIELD(ecc_ctrl, UMCCH0_0_GeccCtrl, UCFatalEn); +} + +static bool umc_v8_10_query_ras_poison_mode(struct amdgpu_device *adev) +{ + uint32_t umc_reg_offset = 0; + + /* Enabling fatal error in umc node0 instance0 channel0 will be + * considered as fatal error mode + */ + umc_reg_offset = get_umc_v8_10_reg_offset(adev, 0, 0, 0); + return !umc_v8_10_query_ras_poison_mode_per_channel(adev, umc_reg_offset); +} + const struct amdgpu_ras_block_hw_ops umc_v8_10_ras_hw_ops = { .query_ras_error_count = umc_v8_10_query_ras_error_count, .query_ras_error_address = umc_v8_10_query_ras_error_address, @@ -348,4 +365,5 @@ struct amdgpu_umc_ras umc_v8_10_ras = { .hw_ops = &umc_v8_10_ras_hw_ops, }, .err_cnt_init = umc_v8_10_err_cnt_init, + .query_ras_poison_mode = umc_v8_10_query_ras_poison_mode, }; diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c index f35253e0eaa6..b717fdaa46e4 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c @@ -108,20 +108,35 @@ static void umc_v8_7_ecc_info_query_ras_error_count(struct amdgpu_device *adev, } } +static void umc_v8_7_convert_error_address(struct amdgpu_device *adev, + struct ras_err_data *err_data, uint64_t err_addr, + uint32_t ch_inst, uint32_t umc_inst) +{ + uint64_t retired_page; + uint32_t channel_index; + + channel_index = + adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst]; + + /* translate umc channel address to soc pa, 3 parts are included */ + retired_page = ADDR_OF_4KB_BLOCK(err_addr) | + ADDR_OF_256B_BLOCK(channel_index) | + OFFSET_IN_256B_BLOCK(err_addr); + + amdgpu_umc_fill_error_record(err_data, err_addr, + retired_page, channel_index, umc_inst); +} + static void umc_v8_7_ecc_info_query_error_address(struct amdgpu_device *adev, struct ras_err_data *err_data, uint32_t ch_inst, uint32_t umc_inst) { - uint64_t mc_umc_status, err_addr, retired_page; - uint32_t channel_index; + uint64_t mc_umc_status, err_addr; uint32_t eccinfo_table_idx; struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); eccinfo_table_idx = umc_inst * adev->umc.channel_inst_num + ch_inst; - channel_index = - adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst]; - mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status; if (mc_umc_status == 0) @@ -130,24 +145,15 @@ static void umc_v8_7_ecc_info_query_error_address(struct amdgpu_device *adev, if (!err_data->err_addr) return; - /* calculate error address if ue/ce error is detected */ + /* calculate error address if ue error is detected */ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && - (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 || - REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) { + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) { err_addr = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_addr; err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr); - /* translate umc channel address to soc pa, 3 parts are included */ - retired_page = ADDR_OF_4KB_BLOCK(err_addr) | - ADDR_OF_256B_BLOCK(channel_index) | - OFFSET_IN_256B_BLOCK(err_addr); - - /* we only save ue error information currently, ce is skipped */ - if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) - == 1) - amdgpu_umc_fill_error_record(err_data, err_addr, - retired_page, channel_index, umc_inst); + umc_v8_7_convert_error_address(adev, err_data, err_addr, + ch_inst, umc_inst); } } @@ -324,14 +330,12 @@ static void umc_v8_7_query_error_address(struct amdgpu_device *adev, uint32_t umc_inst) { uint32_t lsb, mc_umc_status_addr; - uint64_t mc_umc_status, err_addr, retired_page, mc_umc_addrt0; - uint32_t channel_index = adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst]; + uint64_t mc_umc_status, err_addr, mc_umc_addrt0; mc_umc_status_addr = SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0); mc_umc_addrt0 = SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_ADDRT0); - mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4); if (mc_umc_status == 0) @@ -343,10 +347,9 @@ static void umc_v8_7_query_error_address(struct amdgpu_device *adev, return; } - /* calculate error address if ue/ce error is detected */ + /* calculate error address if ue error is detected */ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && - (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 || - REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) { + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) { err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4); /* the lowest lsb bits should be ignored */ @@ -354,16 +357,8 @@ static void umc_v8_7_query_error_address(struct amdgpu_device *adev, err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr); err_addr &= ~((0x1ULL << lsb) - 1); - /* translate umc channel address to soc pa, 3 parts are included */ - retired_page = ADDR_OF_4KB_BLOCK(err_addr) | - ADDR_OF_256B_BLOCK(channel_index) | - OFFSET_IN_256B_BLOCK(err_addr); - - /* we only save ue error information currently, ce is skipped */ - if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) - == 1) - amdgpu_umc_fill_error_record(err_data, err_addr, - retired_page, channel_index, umc_inst); + umc_v8_7_convert_error_address(adev, err_data, err_addr, + ch_inst, umc_inst); } /* clear umc status */ |