From bc143d8b8387ff0a22e4ef8e2375e63aa24bc311 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 22 Nov 2021 10:57:20 +0800 Subject: drm/amd/pm: do not expose implementation details to other blocks out of power Those implementation details(whether swsmu supported, some ppt_funcs supported, accessing internal statistics ...)should be kept internally. It's not a good practice and even error prone to expose implementation details. Signed-off-by: Evan Quan Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 7 ------- 1 file changed, 7 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index f851196c83a5..776c886fd94a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -47,12 +47,6 @@ enum amdgpu_gfx_pipe_priority { AMDGPU_GFX_PIPE_PRIO_HIGH = AMDGPU_RING_PRIO_2 }; -/* Argument for PPSMC_MSG_GpuChangeState */ -enum gfx_change_state { - sGpuChangeState_D0Entry = 1, - sGpuChangeState_D3Entry, -}; - #define AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM 0 #define AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM 15 @@ -410,5 +404,4 @@ int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev, uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg); void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v); int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev); -void amdgpu_gfx_state_change_set(struct amdgpu_device *adev, enum gfx_change_state state); #endif -- cgit From 8b0fb0e967c1700bd729ae54b6f229501b8587ec Mon Sep 17 00:00:00 2001 From: yipechai Date: Tue, 4 Jan 2022 13:52:46 +0800 Subject: drm/amdgpu: Modify gfx block to fit for the unified ras block data and ops 1.Modify gfx block to fit for the unified ras block data and ops. 2.Change amdgpu_gfx_ras_funcs to amdgpu_gfx_ras, and the corresponding variable name remove _funcs suffix. 3.Remove the const flag of gfx ras variable so that gfx ras block can be able to be inserted into amdgpu device ras block link list. 4.Invoke amdgpu_ras_register_ras_block function to register gfx ras block into amdgpu device ras block link list. 5.Remove the redundant code about gfx in amdgpu_ras.c after using the unified ras block. 6.Fill unified ras block .name .block .ras_late_init and .ras_fini for all of gfx versions. If .ras_late_init and .ras_fini had been defined by the selected gfx version, the defined functions will take effect; if not defined, default fill with amdgpu_gfx_ras_late_init and amdgpu_gfx_ras_fini. Signed-off-by: yipechai Reviewed-by: Hawking Zhang Reviewed-by: John Clements Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 8 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 17 +++------ drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 63 +++++++++++++++++++++----------- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 65 +++++++++++++++++++++------------ drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c | 24 +++++++----- drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h | 2 +- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c | 25 +++++++------ drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h | 2 +- 8 files changed, 123 insertions(+), 83 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 3d8f82dc8c97..43004822ec6f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -622,7 +622,7 @@ int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value) return r; } -int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev) +int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, void *ras_info) { int r; struct ras_fs_if fs_info = { @@ -695,9 +695,9 @@ int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev, */ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) { kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); - if (adev->gfx.ras_funcs && - adev->gfx.ras_funcs->query_ras_error_count) - adev->gfx.ras_funcs->query_ras_error_count(adev, err_data); + if (adev->gfx.ras && adev->gfx.ras->ras_block.hw_ops && + adev->gfx.ras->ras_block.hw_ops->query_ras_error_count) + adev->gfx.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data); amdgpu_ras_reset_gpu(adev); } return AMDGPU_RAS_SUCCESS; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 776c886fd94a..f99eac544f6d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -31,6 +31,7 @@ #include "amdgpu_ring.h" #include "amdgpu_rlc.h" #include "soc15.h" +#include "amdgpu_ras.h" /* GFX current status */ #define AMDGPU_GFX_NORMAL_MODE 0x00000000L @@ -198,16 +199,8 @@ struct amdgpu_cu_info { uint32_t bitmap[4][4]; }; -struct amdgpu_gfx_ras_funcs { - int (*ras_late_init)(struct amdgpu_device *adev); - void (*ras_fini)(struct amdgpu_device *adev); - int (*ras_error_inject)(struct amdgpu_device *adev, - void *inject_if); - int (*query_ras_error_count)(struct amdgpu_device *adev, - void *ras_error_status); - void (*reset_ras_error_count)(struct amdgpu_device *adev); - void (*query_ras_error_status)(struct amdgpu_device *adev); - void (*reset_ras_error_status)(struct amdgpu_device *adev); +struct amdgpu_gfx_ras { + struct amdgpu_ras_block_object ras_block; void (*enable_watchdog_timer)(struct amdgpu_device *adev); }; @@ -331,7 +324,7 @@ struct amdgpu_gfx { /*ras */ struct ras_common_if *ras_if; - const struct amdgpu_gfx_ras_funcs *ras_funcs; + struct amdgpu_gfx_ras *ras; }; #define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev)) @@ -393,7 +386,7 @@ bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, int me, int pipe, int queue); void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable); int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value); -int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev); +int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, void *ras_info); void amdgpu_gfx_ras_fini(struct amdgpu_device *adev); int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev, void *err_data, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 5b3f4beb2149..a5812c21177e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -89,6 +89,8 @@ const char *get_ras_block_str(struct ras_common_if *ras_block) return ras_block_string[ras_block->block]; } +#define ras_block_str(_BLOCK_) (((_BLOCK_) < (sizeof(*ras_block_string)/sizeof(const char*))) ? ras_block_string[_BLOCK_] : "Out Of Range") + #define ras_err_str(i) (ras_error_string[ffs(i)]) #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS) @@ -962,6 +964,7 @@ static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_d int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_if *info) { + struct amdgpu_ras_block_object* block_obj = NULL; struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); struct ras_err_data err_data = {0, 0, 0, NULL}; int i; @@ -969,6 +972,8 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, if (!obj) return -EINVAL; + block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0); + switch (info->head.block) { case AMDGPU_RAS_BLOCK__UMC: amdgpu_ras_get_ecc_info(adev, &err_data); @@ -981,13 +986,16 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, } break; case AMDGPU_RAS_BLOCK__GFX: - if (adev->gfx.ras_funcs && - adev->gfx.ras_funcs->query_ras_error_count) - adev->gfx.ras_funcs->query_ras_error_count(adev, &err_data); + if (!block_obj || !block_obj->hw_ops) { + dev_info(adev->dev, "%s doesn't config ras function \n", + get_ras_block_str(&info->head)); + return -EINVAL; + } + if (block_obj->hw_ops->query_ras_error_count) + block_obj->hw_ops->query_ras_error_count(adev, &err_data); - if (adev->gfx.ras_funcs && - adev->gfx.ras_funcs->query_ras_error_status) - adev->gfx.ras_funcs->query_ras_error_status(adev); + if (block_obj->hw_ops->query_ras_error_status) + block_obj->hw_ops->query_ras_error_status(adev); break; case AMDGPU_RAS_BLOCK__MMHUB: if (adev->mmhub.ras_funcs && @@ -1074,18 +1082,23 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, int amdgpu_ras_reset_error_status(struct amdgpu_device *adev, enum amdgpu_ras_block block) { + struct amdgpu_ras_block_object* block_obj = amdgpu_ras_get_ras_block(adev, block, 0); + if (!amdgpu_ras_is_supported(adev, block)) return -EINVAL; switch (block) { case AMDGPU_RAS_BLOCK__GFX: - if (adev->gfx.ras_funcs && - adev->gfx.ras_funcs->reset_ras_error_count) - adev->gfx.ras_funcs->reset_ras_error_count(adev); + if (!block_obj || !block_obj->hw_ops) { + dev_info(adev->dev, "%s doesn't config ras function \n", ras_block_str(block)); + return -EINVAL; + } + + if (block_obj->hw_ops->reset_ras_error_count) + block_obj->hw_ops->reset_ras_error_count(adev); - if (adev->gfx.ras_funcs && - adev->gfx.ras_funcs->reset_ras_error_status) - adev->gfx.ras_funcs->reset_ras_error_status(adev); + if (block_obj->hw_ops->reset_ras_error_status) + block_obj->hw_ops->reset_ras_error_status(adev); break; case AMDGPU_RAS_BLOCK__MMHUB: if (adev->mmhub.ras_funcs && @@ -1150,7 +1163,8 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev, .address = info->address, .value = info->value, }; - int ret = 0; + int ret = -EINVAL; + struct amdgpu_ras_block_object* block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, info->head.sub_block_index); if (!obj) return -EINVAL; @@ -1164,11 +1178,13 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev, switch (info->head.block) { case AMDGPU_RAS_BLOCK__GFX: - if (adev->gfx.ras_funcs && - adev->gfx.ras_funcs->ras_error_inject) - ret = adev->gfx.ras_funcs->ras_error_inject(adev, info); - else - ret = -EINVAL; + if (!block_obj || !block_obj->hw_ops) { + dev_info(adev->dev, "%s doesn't config ras function \n", get_ras_block_str(&info->head)); + return -EINVAL; + } + + if (block_obj->hw_ops->ras_error_inject) + ret = block_obj->hw_ops->ras_error_inject(adev, info); break; case AMDGPU_RAS_BLOCK__UMC: case AMDGPU_RAS_BLOCK__SDMA: @@ -1800,15 +1816,20 @@ static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev) static void amdgpu_ras_error_status_query(struct amdgpu_device *adev, struct ras_query_if *info) { + struct amdgpu_ras_block_object* block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, info->head.sub_block_index); /* * Only two block need to query read/write * RspStatus at current state */ switch (info->head.block) { case AMDGPU_RAS_BLOCK__GFX: - if (adev->gfx.ras_funcs && - adev->gfx.ras_funcs->query_ras_error_status) - adev->gfx.ras_funcs->query_ras_error_status(adev); + if (!block_obj || !block_obj->hw_ops) { + dev_info(adev->dev, "%s doesn't config ras function \n", get_ras_block_str(&info->head)); + return ; + } + + if (block_obj->hw_ops->query_ras_error_status) + block_obj->hw_ops->query_ras_error_status(adev); break; case AMDGPU_RAS_BLOCK__MMHUB: if (adev->mmhub.ras_funcs && diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 9189fb85a4dd..d36a6bc62560 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -882,7 +882,7 @@ static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev, static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev); static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring); static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring); -static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, +static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status); static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev, void *inject_if); @@ -2197,12 +2197,16 @@ static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = { .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q, }; -static const struct amdgpu_gfx_ras_funcs gfx_v9_0_ras_funcs = { - .ras_late_init = amdgpu_gfx_ras_late_init, - .ras_fini = amdgpu_gfx_ras_fini, - .ras_error_inject = &gfx_v9_0_ras_error_inject, - .query_ras_error_count = &gfx_v9_0_query_ras_error_count, - .reset_ras_error_count = &gfx_v9_0_reset_ras_error_count, +const struct amdgpu_ras_block_hw_ops gfx_v9_0_ras_ops = { + .ras_error_inject = &gfx_v9_0_ras_error_inject, + .query_ras_error_count = &gfx_v9_0_query_ras_error_count, + .reset_ras_error_count = &gfx_v9_0_reset_ras_error_count, +}; + +static struct amdgpu_gfx_ras gfx_v9_0_ras = { + .ras_block = { + .hw_ops = &gfx_v9_0_ras_ops, + }, }; static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) @@ -2231,7 +2235,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) DRM_INFO("fix gfx.config for vega12\n"); break; case IP_VERSION(9, 4, 0): - adev->gfx.ras_funcs = &gfx_v9_0_ras_funcs; + adev->gfx.ras = &gfx_v9_0_ras; adev->gfx.config.max_hw_contexts = 8; adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; adev->gfx.config.sc_prim_fifo_size_backend = 0x100; @@ -2258,7 +2262,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN; break; case IP_VERSION(9, 4, 1): - adev->gfx.ras_funcs = &gfx_v9_4_ras_funcs; + adev->gfx.ras = &gfx_v9_4_ras; adev->gfx.config.max_hw_contexts = 8; adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; adev->gfx.config.sc_prim_fifo_size_backend = 0x100; @@ -2279,7 +2283,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) gb_addr_config |= 0x22010042; break; case IP_VERSION(9, 4, 2): - adev->gfx.ras_funcs = &gfx_v9_4_2_ras_funcs; + adev->gfx.ras = &gfx_v9_4_2_ras; adev->gfx.config.max_hw_contexts = 8; adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; adev->gfx.config.sc_prim_fifo_size_backend = 0x100; @@ -2298,6 +2302,25 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) break; } + if (adev->gfx.ras) { + err = amdgpu_ras_register_ras_block(adev, &adev->gfx.ras->ras_block); + if (err) { + DRM_ERROR("Failed to register gfx ras block!\n"); + return err; + } + + strcpy(adev->gfx.ras->ras_block.name,"gfx"); + adev->gfx.ras->ras_block.block = AMDGPU_RAS_BLOCK__GFX; + + /* If not define special ras_late_init function, use gfx default ras_late_init */ + if (!adev->gfx.ras->ras_block.ras_late_init) + adev->gfx.ras->ras_block.ras_late_init = amdgpu_gfx_ras_late_init; + + /* If not define special ras_fini function, use gfx default ras_fini */ + if (!adev->gfx.ras->ras_block.ras_fini) + adev->gfx.ras->ras_block.ras_fini = amdgpu_gfx_ras_fini; + } + adev->gfx.config.gb_addr_config = gb_addr_config; adev->gfx.config.gb_addr_config_fields.num_pipes = 1 << @@ -2513,9 +2536,8 @@ static int gfx_v9_0_sw_fini(void *handle) int i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (adev->gfx.ras_funcs && - adev->gfx.ras_funcs->ras_fini) - adev->gfx.ras_funcs->ras_fini(adev); + if (adev->gfx.ras && adev->gfx.ras->ras_block.ras_fini) + adev->gfx.ras->ras_block.ras_fini(adev); for (i = 0; i < adev->gfx.num_gfx_rings; i++) amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); @@ -4870,16 +4892,15 @@ static int gfx_v9_0_ecc_late_init(void *handle) if (r) return r; - if (adev->gfx.ras_funcs && - adev->gfx.ras_funcs->ras_late_init) { - r = adev->gfx.ras_funcs->ras_late_init(adev); + if (adev->gfx.ras && adev->gfx.ras->ras_block.ras_late_init) { + r = adev->gfx.ras->ras_block.ras_late_init(adev, NULL); if (r) return r; } - if (adev->gfx.ras_funcs && - adev->gfx.ras_funcs->enable_watchdog_timer) - adev->gfx.ras_funcs->enable_watchdog_timer(adev); + if (adev->gfx.ras && + adev->gfx.ras->enable_watchdog_timer) + adev->gfx.ras->enable_watchdog_timer(adev); return 0; } @@ -6819,7 +6840,7 @@ static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev) WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255); } -static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, +static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status) { struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; @@ -6828,7 +6849,7 @@ static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, uint32_t reg_value; if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) - return -EINVAL; + return; err_data->ue_count = 0; err_data->ce_count = 0; @@ -6857,8 +6878,6 @@ static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, mutex_unlock(&adev->grbm_idx_mutex); gfx_v9_0_query_utc_edc_status(adev, err_data); - - return 0; } static void gfx_v9_0_emit_mem_sync(struct amdgpu_ring *ring) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c index b4789dfc2bb9..c67e387a97f5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c @@ -863,7 +863,7 @@ static int gfx_v9_4_ras_error_count(struct amdgpu_device *adev, return 0; } -static int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev, +static void gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status) { struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; @@ -872,7 +872,7 @@ static int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev, uint32_t reg_value; if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) - return -EINVAL; + return; err_data->ue_count = 0; err_data->ce_count = 0; @@ -903,7 +903,6 @@ static int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev, gfx_v9_4_query_utc_edc_status(adev, err_data); - return 0; } static void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev) @@ -1029,11 +1028,16 @@ static void gfx_v9_4_query_ras_error_status(struct amdgpu_device *adev) mutex_unlock(&adev->grbm_idx_mutex); } -const struct amdgpu_gfx_ras_funcs gfx_v9_4_ras_funcs = { - .ras_late_init = amdgpu_gfx_ras_late_init, - .ras_fini = amdgpu_gfx_ras_fini, - .ras_error_inject = &gfx_v9_4_ras_error_inject, - .query_ras_error_count = &gfx_v9_4_query_ras_error_count, - .reset_ras_error_count = &gfx_v9_4_reset_ras_error_count, - .query_ras_error_status = &gfx_v9_4_query_ras_error_status, + +const struct amdgpu_ras_block_hw_ops gfx_v9_4_ras_ops = { + .ras_error_inject = &gfx_v9_4_ras_error_inject, + .query_ras_error_count = &gfx_v9_4_query_ras_error_count, + .reset_ras_error_count = &gfx_v9_4_reset_ras_error_count, + .query_ras_error_status = &gfx_v9_4_query_ras_error_status, +}; + +struct amdgpu_gfx_ras gfx_v9_4_ras = { + .ras_block = { + .hw_ops = &gfx_v9_4_ras_ops, + }, }; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h index bdd16b568021..ca520a767267 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h @@ -24,6 +24,6 @@ #ifndef __GFX_V9_4_H__ #define __GFX_V9_4_H__ -extern const struct amdgpu_gfx_ras_funcs gfx_v9_4_ras_funcs; +extern struct amdgpu_gfx_ras gfx_v9_4_ras; #endif /* __GFX_V9_4_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c index c4f37a161875..7ec6243e015e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c @@ -1641,14 +1641,14 @@ static int gfx_v9_4_2_query_utc_edc_count(struct amdgpu_device *adev, return 0; } -static int gfx_v9_4_2_query_ras_error_count(struct amdgpu_device *adev, +static void gfx_v9_4_2_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status) { struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; uint32_t sec_count = 0, ded_count = 0; if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) - return -EINVAL; + return; err_data->ue_count = 0; err_data->ce_count = 0; @@ -1661,7 +1661,6 @@ static int gfx_v9_4_2_query_ras_error_count(struct amdgpu_device *adev, err_data->ce_count += sec_count; err_data->ue_count += ded_count; - return 0; } static void gfx_v9_4_2_reset_utc_err_status(struct amdgpu_device *adev) @@ -1931,13 +1930,17 @@ static void gfx_v9_4_2_reset_sq_timeout_status(struct amdgpu_device *adev) mutex_unlock(&adev->grbm_idx_mutex); } -const struct amdgpu_gfx_ras_funcs gfx_v9_4_2_ras_funcs = { - .ras_late_init = amdgpu_gfx_ras_late_init, - .ras_fini = amdgpu_gfx_ras_fini, - .ras_error_inject = &gfx_v9_4_2_ras_error_inject, - .query_ras_error_count = &gfx_v9_4_2_query_ras_error_count, - .reset_ras_error_count = &gfx_v9_4_2_reset_ras_error_count, - .query_ras_error_status = &gfx_v9_4_2_query_ras_error_status, - .reset_ras_error_status = &gfx_v9_4_2_reset_ras_error_status, +struct amdgpu_ras_block_hw_ops gfx_v9_4_2_ras_ops ={ + .ras_error_inject = &gfx_v9_4_2_ras_error_inject, + .query_ras_error_count = &gfx_v9_4_2_query_ras_error_count, + .reset_ras_error_count = &gfx_v9_4_2_reset_ras_error_count, + .query_ras_error_status = &gfx_v9_4_2_query_ras_error_status, + .reset_ras_error_status = &gfx_v9_4_2_reset_ras_error_status, +}; + +struct amdgpu_gfx_ras gfx_v9_4_2_ras = { + .ras_block = { + .hw_ops = &gfx_v9_4_2_ras_ops, + }, .enable_watchdog_timer = &gfx_v9_4_2_enable_watchdog_timer, }; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h index 6db1f88509af..7584624b641c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h @@ -31,6 +31,6 @@ void gfx_v9_4_2_init_golden_registers(struct amdgpu_device *adev, void gfx_v9_4_2_set_power_brake_sequence(struct amdgpu_device *adev); int gfx_v9_4_2_do_edc_gpr_workarounds(struct amdgpu_device *adev); -extern const struct amdgpu_gfx_ras_funcs gfx_v9_4_2_ras_funcs; +extern struct amdgpu_gfx_ras gfx_v9_4_2_ras; #endif /* __GFX_V9_4_2_H__ */ -- cgit From 4e9b1fa5a2757d11a5c40eed2b2b4837dcb2f12e Mon Sep 17 00:00:00 2001 From: yipechai Date: Mon, 14 Feb 2022 14:12:55 +0800 Subject: drm/amdgpu: Modify .ras_late_init function pointer parameter Modify .ras_late_init function pointer parameter so that it can remove redundant intermediate calls in some ras blocks. Signed-off-by: yipechai Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c | 2 +- drivers/gpu/drm/amd/amdgpu/mca_v3_0.c | 6 +++--- 15 files changed, 17 insertions(+), 17 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index fe392108b5c2..b7470ed7bc25 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -622,7 +622,7 @@ int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value) return r; } -int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, void *ras_info) +int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) { int r; r = amdgpu_ras_block_late_init(adev, adev->gfx.ras_if); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index f99eac544f6d..ccca0a85b982 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -386,7 +386,7 @@ bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, int me, int pipe, int queue); void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable); int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value); -int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, void *ras_info); +int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block); void amdgpu_gfx_ras_fini(struct amdgpu_device *adev); int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev, void *err_data, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c index 21a5f884dd2a..70a096160998 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c @@ -24,7 +24,7 @@ #include "amdgpu.h" #include "amdgpu_ras.h" -int amdgpu_hdp_ras_late_init(struct amdgpu_device *adev, void *ras_info) +int amdgpu_hdp_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) { return amdgpu_ras_block_late_init(adev, adev->hdp.ras_if); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h index 4af2c2a322e7..aabd59aa5213 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h @@ -43,6 +43,6 @@ struct amdgpu_hdp { struct amdgpu_hdp_ras *ras; }; -int amdgpu_hdp_ras_late_init(struct amdgpu_device *adev, void *ras_info); +int amdgpu_hdp_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block); void amdgpu_hdp_ras_fini(struct amdgpu_device *adev); #endif /* __AMDGPU_HDP_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c index 2bdb4d8b7955..ede98db8c126 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c @@ -24,7 +24,7 @@ #include "amdgpu.h" #include "amdgpu_ras.h" -int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev, void *ras_info) +int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) { return amdgpu_ras_block_late_init(adev, adev->mmhub.ras_if); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h index 7deda9a3b81e..75815106f2d5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h @@ -47,7 +47,7 @@ struct amdgpu_mmhub { struct amdgpu_mmhub_ras *ras; }; -int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev, void *ras_info); +int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block); void amdgpu_mmhub_ras_fini(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c index 89e61fdd3580..92fd4ffa7779 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c @@ -22,7 +22,7 @@ #include "amdgpu.h" #include "amdgpu_ras.h" -int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, void *ras_info) +int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) { int r; r = amdgpu_ras_block_late_init(adev, adev->nbio.ras_if); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h index 4afb76d3cd97..f9546c7341b8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h @@ -104,6 +104,6 @@ struct amdgpu_nbio { struct amdgpu_nbio_ras *ras; }; -int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, void *ras_info); +int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block); void amdgpu_nbio_ras_fini(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index 5de567c6a8f7..837d1b79a9cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -490,7 +490,7 @@ struct amdgpu_ras_block_object { int (*ras_block_match)(struct amdgpu_ras_block_object *block_obj, enum amdgpu_ras_block block, uint32_t sub_block_index); - int (*ras_late_init)(struct amdgpu_device *adev, void *ras_info); + int (*ras_late_init)(struct amdgpu_device *adev, struct ras_common_if *ras_block); void (*ras_fini)(struct amdgpu_device *adev); ras_ih_cb ras_cb; const struct amdgpu_ras_block_hw_ops *hw_ops; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c index 242a7b4dcad9..594454dba4c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c @@ -87,7 +87,7 @@ uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring, } int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev, - void *ras_ih_info) + struct ras_common_if *ras_block) { int r, i; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h index eaee12ab6518..8b226ffee32c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h @@ -117,7 +117,7 @@ amdgpu_sdma_get_instance_from_ring(struct amdgpu_ring *ring); int amdgpu_sdma_get_index_from_ring(struct amdgpu_ring *ring, uint32_t *index); uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring, unsigned vmid); int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev, - void *ras_ih_info); + struct ras_common_if *ras_block); void amdgpu_sdma_ras_fini(struct amdgpu_device *adev); int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev, void *err_data, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c index 9f1406e1a48a..7abf9299e0d7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c @@ -136,7 +136,7 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev, return amdgpu_umc_do_page_retirement(adev, ras_error_status, entry, true); } -int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, void *ras_info) +int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) { int r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h index ec15b3640399..e4b3678a6685 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h @@ -72,7 +72,7 @@ struct amdgpu_umc { struct amdgpu_umc_ras *ras; }; -int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, void *ras_info); +int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block); void amdgpu_umc_ras_fini(struct amdgpu_device *adev); int amdgpu_umc_poison_handler(struct amdgpu_device *adev, void *ras_error_status, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index a785b1e088cd..91f788f6f6b5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -732,7 +732,7 @@ int amdgpu_xgmi_remove_device(struct amdgpu_device *adev) return psp_xgmi_terminate(&adev->psp); } -static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev, void *ras_info) +static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) { if (!adev->gmc.xgmi.supported || adev->gmc.xgmi.num_physical_nodes == 0) diff --git a/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c b/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c index c442b34b9472..72ce19acb8bb 100644 --- a/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c @@ -37,7 +37,7 @@ static void mca_v3_0_mp0_query_ras_error_count(struct amdgpu_device *adev, ras_error_status); } -static int mca_v3_0_mp0_ras_late_init(struct amdgpu_device *adev, void *ras_info) +static int mca_v3_0_mp0_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) { return amdgpu_mca_ras_late_init(adev, &adev->mca.mp0); } @@ -89,7 +89,7 @@ static void mca_v3_0_mp1_query_ras_error_count(struct amdgpu_device *adev, ras_error_status); } -static int mca_v3_0_mp1_ras_late_init(struct amdgpu_device *adev, void *ras_info) +static int mca_v3_0_mp1_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) { return amdgpu_mca_ras_late_init(adev, &adev->mca.mp1); } @@ -127,7 +127,7 @@ static void mca_v3_0_mpio_query_ras_error_count(struct amdgpu_device *adev, ras_error_status); } -static int mca_v3_0_mpio_ras_late_init(struct amdgpu_device *adev, void *ras_info) +static int mca_v3_0_mpio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) { return amdgpu_mca_ras_late_init(adev, &adev->mca.mpio); } -- cgit From 01d468d9a420152e4a1270992e69a37ea0c98e04 Mon Sep 17 00:00:00 2001 From: yipechai Date: Thu, 17 Feb 2022 14:52:20 +0800 Subject: drm/amdgpu: Modify .ras_fini function pointer parameter Modify .ras_fini function pointer parameter so that we can remove redundant intermediate calls in some ras blocks. Signed-off-by: yipechai Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 8 ++++---- drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c | 2 +- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/mca_v3_0.c | 6 +++--- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/soc15.c | 2 +- 19 files changed, 24 insertions(+), 24 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 52912b6bcb20..d020c4599433 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -644,7 +644,7 @@ late_fini: return r; } -void amdgpu_gfx_ras_fini(struct amdgpu_device *adev) +void amdgpu_gfx_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block) { if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX) && adev->gfx.ras_if) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index ccca0a85b982..f7c50ab4589c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -387,7 +387,7 @@ bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, int me, void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable); int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value); int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block); -void amdgpu_gfx_ras_fini(struct amdgpu_device *adev); +void amdgpu_gfx_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block); int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev, void *err_data, struct amdgpu_iv_entry *entry); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 78498d1d1769..350fbfec1e03 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -455,16 +455,16 @@ int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev) void amdgpu_gmc_ras_fini(struct amdgpu_device *adev) { if (adev->umc.ras && adev->umc.ras->ras_block.ras_fini) - adev->umc.ras->ras_block.ras_fini(adev); + adev->umc.ras->ras_block.ras_fini(adev, NULL); if (adev->mmhub.ras && adev->mmhub.ras->ras_block.ras_fini) - adev->mmhub.ras->ras_block.ras_fini(adev); + adev->mmhub.ras->ras_block.ras_fini(adev, NULL); if (adev->gmc.xgmi.ras && adev->gmc.xgmi.ras->ras_block.ras_fini) - adev->gmc.xgmi.ras->ras_block.ras_fini(adev); + adev->gmc.xgmi.ras->ras_block.ras_fini(adev, NULL); if (adev->hdp.ras && adev->hdp.ras->ras_block.ras_fini) - adev->hdp.ras->ras_block.ras_fini(adev); + adev->hdp.ras->ras_block.ras_fini(adev, NULL); } /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c index b7fbc114a175..0f224e21cd55 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c @@ -24,7 +24,7 @@ #include "amdgpu.h" #include "amdgpu_ras.h" -void amdgpu_hdp_ras_fini(struct amdgpu_device *adev) +void amdgpu_hdp_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block) { if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__HDP) && adev->hdp.ras_if) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h index aabd59aa5213..c05cd992ef8a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h @@ -44,5 +44,5 @@ struct amdgpu_hdp { }; int amdgpu_hdp_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block); -void amdgpu_hdp_ras_fini(struct amdgpu_device *adev); +void amdgpu_hdp_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block); #endif /* __AMDGPU_HDP_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c index 42413813765a..6dfcedcc37fd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c @@ -24,7 +24,7 @@ #include "amdgpu.h" #include "amdgpu_ras.h" -void amdgpu_mmhub_ras_fini(struct amdgpu_device *adev) +void amdgpu_mmhub_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block) { if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB) && adev->mmhub.ras_if) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h index 240b26d9a388..253f047379cf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h @@ -47,6 +47,6 @@ struct amdgpu_mmhub { struct amdgpu_mmhub_ras *ras; }; -void amdgpu_mmhub_ras_fini(struct amdgpu_device *adev); +void amdgpu_mmhub_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c index f09ad80f0772..0de2fdf31eed 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c @@ -44,7 +44,7 @@ late_fini: return r; } -void amdgpu_nbio_ras_fini(struct amdgpu_device *adev) +void amdgpu_nbio_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block) { if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF) && adev->nbio.ras_if) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h index f9546c7341b8..3222e1cae134 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h @@ -105,5 +105,5 @@ struct amdgpu_nbio { }; int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block); -void amdgpu_nbio_ras_fini(struct amdgpu_device *adev); +void amdgpu_nbio_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index 143a83043d7c..7cddaad90d6d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -491,7 +491,7 @@ struct amdgpu_ras_block_object { int (*ras_block_match)(struct amdgpu_ras_block_object *block_obj, enum amdgpu_ras_block block, uint32_t sub_block_index); int (*ras_late_init)(struct amdgpu_device *adev, struct ras_common_if *ras_block); - void (*ras_fini)(struct amdgpu_device *adev); + void (*ras_fini)(struct amdgpu_device *adev, struct ras_common_if *ras_block); ras_ih_cb ras_cb; const struct amdgpu_ras_block_hw_ops *hw_ops; }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c index 3b5c43575aa3..863035a94bd8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c @@ -111,7 +111,7 @@ late_fini: return r; } -void amdgpu_sdma_ras_fini(struct amdgpu_device *adev) +void amdgpu_sdma_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block) { if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA) && adev->sdma.ras_if) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h index 8b226ffee32c..34ec60dfe5e8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h @@ -118,7 +118,7 @@ int amdgpu_sdma_get_index_from_ring(struct amdgpu_ring *ring, uint32_t *index); uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring, unsigned vmid); int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block); -void amdgpu_sdma_ras_fini(struct amdgpu_device *adev); +void amdgpu_sdma_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block); int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev, void *err_data, struct amdgpu_iv_entry *entry); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c index 9400260e3263..41e976733c57 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c @@ -162,7 +162,7 @@ late_fini: return r; } -void amdgpu_umc_ras_fini(struct amdgpu_device *adev) +void amdgpu_umc_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block) { if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC) && adev->umc.ras_if) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h index e4b3678a6685..c8deba8dacb5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h @@ -73,7 +73,7 @@ struct amdgpu_umc { }; int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block); -void amdgpu_umc_ras_fini(struct amdgpu_device *adev); +void amdgpu_umc_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block); int amdgpu_umc_poison_handler(struct amdgpu_device *adev, void *ras_error_status, bool reset); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 91817a31f3e1..3e56adea84a3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -768,7 +768,7 @@ static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev, struct ras_comm return amdgpu_ras_block_late_init(adev, ras_block); } -static void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev) +static void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block) { if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL) && adev->gmc.xgmi.ras_if) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 1997f129db9c..3ecb238c6483 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -2433,7 +2433,7 @@ static int gfx_v9_0_sw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; if (adev->gfx.ras && adev->gfx.ras->ras_block.ras_fini) - adev->gfx.ras->ras_block.ras_fini(adev); + adev->gfx.ras->ras_block.ras_fini(adev, NULL); for (i = 0; i < adev->gfx.num_gfx_rings; i++) amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); diff --git a/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c b/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c index b4b36899f5c6..02c50be19d3b 100644 --- a/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c @@ -37,7 +37,7 @@ static void mca_v3_0_mp0_query_ras_error_count(struct amdgpu_device *adev, ras_error_status); } -static void mca_v3_0_mp0_ras_fini(struct amdgpu_device *adev) +static void mca_v3_0_mp0_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block) { amdgpu_mca_ras_fini(adev, &adev->mca.mp0); } @@ -83,7 +83,7 @@ static void mca_v3_0_mp1_query_ras_error_count(struct amdgpu_device *adev, ras_error_status); } -static void mca_v3_0_mp1_ras_fini(struct amdgpu_device *adev) +static void mca_v3_0_mp1_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block) { amdgpu_mca_ras_fini(adev, &adev->mca.mp1); } @@ -115,7 +115,7 @@ static void mca_v3_0_mpio_query_ras_error_count(struct amdgpu_device *adev, ras_error_status); } -static void mca_v3_0_mpio_ras_fini(struct amdgpu_device *adev) +static void mca_v3_0_mpio_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block) { amdgpu_mca_ras_fini(adev, &adev->mca.mpio); } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index e26c39fcd336..d0a9012e53d7 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1997,7 +1997,7 @@ static int sdma_v4_0_sw_fini(void *handle) if (adev->sdma.ras && adev->sdma.ras->ras_block.hw_ops && adev->sdma.ras->ras_block.ras_fini) - adev->sdma.ras->ras_block.ras_fini(adev); + adev->sdma.ras->ras_block.ras_fini(adev, NULL); for (i = 0; i < adev->sdma.num_instances; i++) { amdgpu_ring_fini(&adev->sdma.instance[i].ring); diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index b7c24149a6bd..34cd5cad7da5 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -1215,7 +1215,7 @@ static int soc15_common_sw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; if (adev->nbio.ras && adev->nbio.ras->ras_block.ras_fini) - adev->nbio.ras->ras_block.ras_fini(adev); + adev->nbio.ras->ras_block.ras_fini(adev, NULL); if (adev->df.funcs && adev->df.funcs->sw_fini) -- cgit From 35366481d0941e9b470ccf09d85407381b5d6135 Mon Sep 17 00:00:00 2001 From: yipechai Date: Mon, 14 Feb 2022 15:59:35 +0800 Subject: drm/amdgpu: Remove redundant calls of amdgpu_ras_block_late_fini in gfx ras block Remove redundant calls of amdgpu_ras_block_late_fini in gfx ras block. Signed-off-by: yipechai Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 7 ------- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 1 - drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 2 +- 3 files changed, 1 insertion(+), 9 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 40f7e29aa9ca..8fe939976224 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -644,13 +644,6 @@ late_fini: return r; } -void amdgpu_gfx_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block) -{ - if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX) && - ras_block) - amdgpu_ras_block_late_fini(adev, ras_block); -} - int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev, void *err_data, struct amdgpu_iv_entry *entry) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index f7c50ab4589c..dcb3c7871c73 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -387,7 +387,6 @@ bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, int me, void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable); int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value); int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block); -void amdgpu_gfx_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block); int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev, void *err_data, struct amdgpu_iv_entry *entry); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index dc6e6fe6c978..f0cc073e6bb0 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -2206,7 +2206,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) /* If not define special ras_fini function, use gfx default ras_fini */ if (!adev->gfx.ras->ras_block.ras_fini) - adev->gfx.ras->ras_block.ras_fini = amdgpu_gfx_ras_fini; + adev->gfx.ras->ras_block.ras_fini = amdgpu_ras_block_late_fini; /* If not defined special ras_cb function, use default ras_cb */ if (!adev->gfx.ras->ras_block.ras_cb) -- cgit From 6475ae2b742876aa9b2a0aff7ba60f5c81917614 Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Tue, 15 Mar 2022 17:48:18 +0800 Subject: drm/amdgpu: add UTCL2 RAS poison query for Aldebaran (v2) Add help functions to query and reset RAS UTCL2 poison status. v2: implement it on amdgpu side and kfd only calls it. Signed-off-by: Tao Zhou Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 8 ++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 1 + drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c | 14 ++++++++++++++ 4 files changed, 24 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 6ca1db3c243f..c18c4be1e4ac 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -724,3 +724,11 @@ void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev, bo else if (reset) amdgpu_amdkfd_gpu_reset(adev); } + +bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev) +{ + if (adev->gfx.ras->query_utcl2_poison_status) + return adev->gfx.ras->query_utcl2_poison_status(adev); + else + return false; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 4cb14c2fe53f..0838926a8ef0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -301,6 +301,7 @@ void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev, bool amdgpu_amdkfd_bo_mapped_to_dev(struct amdgpu_device *adev, struct kgd_mem *mem); void amdgpu_amdkfd_block_mmu_notifications(void *p); int amdgpu_amdkfd_criu_resume(void *p); +bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev); #if IS_ENABLED(CONFIG_HSA_AMD) void amdgpu_amdkfd_gpuvm_init_mem_limits(void); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index dcb3c7871c73..5ed9b8a4c571 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -202,6 +202,7 @@ struct amdgpu_cu_info { struct amdgpu_gfx_ras { struct amdgpu_ras_block_object ras_block; void (*enable_watchdog_timer)(struct amdgpu_device *adev); + bool (*query_utcl2_poison_status)(struct amdgpu_device *adev); }; struct amdgpu_gfx_funcs { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c index 7653ebd0e67b..3a797424579c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c @@ -1930,6 +1930,19 @@ static void gfx_v9_4_2_reset_sq_timeout_status(struct amdgpu_device *adev) mutex_unlock(&adev->grbm_idx_mutex); } +static bool gfx_v9_4_2_query_uctl2_poison_status(struct amdgpu_device *adev) +{ + u32 status = 0; + struct amdgpu_vmhub *hub; + + hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + status = RREG32(hub->vm_l2_pro_fault_status); + /* reset page fault status */ + WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1); + + return REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED); +} + struct amdgpu_ras_block_hw_ops gfx_v9_4_2_ras_ops = { .ras_error_inject = &gfx_v9_4_2_ras_error_inject, .query_ras_error_count = &gfx_v9_4_2_query_ras_error_count, @@ -1943,4 +1956,5 @@ struct amdgpu_gfx_ras gfx_v9_4_2_ras = { .hw_ops = &gfx_v9_4_2_ras_ops, }, .enable_watchdog_timer = &gfx_v9_4_2_enable_watchdog_timer, + .query_utcl2_poison_status = gfx_v9_4_2_query_uctl2_poison_status, }; -- cgit From 5cb1cfd5f1863c667f43d735b804dee156e09476 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 29 Mar 2022 14:40:28 -0400 Subject: drm/amdgpu/discovery: populate additional GC info MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit From the GC info table to the gfx config structure in the driver. The driver will use this data to configure the card correctly. Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c | 17 +++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 11 +++++++++++ 2 files changed, 28 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index aaf2fc6b1a82..4871bd84438e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -1154,6 +1154,8 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev) union gc_info { struct gc_info_v1_0 v1; + struct gc_info_v1_1 v1_1; + struct gc_info_v1_2 v1_2; struct gc_info_v2_0 v2; }; @@ -1190,6 +1192,21 @@ int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev) adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) / le32_to_cpu(gc_info->v1.gc_num_sa_per_se); adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc); + if (gc_info->v1.header.version_minor >= 1) { + adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v1_1.gc_num_tcp_per_sa); + adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v1_1.gc_num_sdp_interface); + adev->gfx.config.gc_num_tcps = le32_to_cpu(gc_info->v1_1.gc_num_tcps); + } + if (gc_info->v1.header.version_minor >= 2) { + adev->gfx.config.gc_num_tcp_per_wpg = le32_to_cpu(gc_info->v1_2.gc_num_tcp_per_wpg); + adev->gfx.config.gc_tcp_l1_size = le32_to_cpu(gc_info->v1_2.gc_tcp_l1_size); + adev->gfx.config.gc_num_sqc_per_wgp = le32_to_cpu(gc_info->v1_2.gc_num_sqc_per_wgp); + adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_instruction_cache_size_per_sqc); + adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_data_cache_size_per_sqc); + adev->gfx.config.gc_gl1c_per_sa = le32_to_cpu(gc_info->v1_2.gc_gl1c_per_sa); + adev->gfx.config.gc_gl1c_size_per_instance = le32_to_cpu(gc_info->v1_2.gc_gl1c_size_per_instance); + adev->gfx.config.gc_gl2c_per_gpu = le32_to_cpu(gc_info->v1_2.gc_gl2c_per_gpu); + } break; case 2: adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 5ed9b8a4c571..ad8e7d486a7d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -183,6 +183,17 @@ struct amdgpu_gfx_config { uint32_t num_packer_per_sc; uint32_t pa_sc_tile_steering_override; uint64_t tcc_disabled_mask; + uint32_t gc_num_tcp_per_sa; + uint32_t gc_num_sdp_interface; + uint32_t gc_num_tcps; + uint32_t gc_num_tcp_per_wpg; + uint32_t gc_tcp_l1_size; + uint32_t gc_num_sqc_per_wgp; + uint32_t gc_l1_instruction_cache_size_per_sqc; + uint32_t gc_l1_data_cache_size_per_sqc; + uint32_t gc_gl1c_per_sa; + uint32_t gc_gl1c_size_per_instance; + uint32_t gc_gl2c_per_gpu; }; struct amdgpu_cu_info { -- cgit From 1c2014da77858af2da85b2f2917dbb00858bd869 Mon Sep 17 00:00:00 2001 From: "Tianci.Yin" Date: Wed, 6 Apr 2022 14:11:38 -0400 Subject: drm/amdgpu: add gmc v11_0 ip block (v3) Add support for GPU memory controller v11. v1: Add support for gmc v11.0 Add gmc 11 block (Tianci) v2: drop unused amdgpu_bo_late_init (Hawking) v3: squash in various fix Signed-off-by: Tianci.Yin Signed-off-by: Likun Gao Reviewed-by: Hawking Zhang Reviewed-by: Alex Deucher Signed-off-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/Makefile | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 6 +- drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c | 965 ++++++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/gmc_v11_0.h | 30 + 4 files changed, 1000 insertions(+), 3 deletions(-) create mode 100644 drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c create mode 100644 drivers/gpu/drm/amd/amdgpu/gmc_v11_0.h (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h') diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 156de606f275..7a1b13fabebb 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -88,7 +88,7 @@ amdgpu-y += \ gmc_v8_0.o \ gfxhub_v1_0.o mmhub_v1_0.o gmc_v9_0.o gfxhub_v1_1.o mmhub_v9_4.o \ gfxhub_v2_0.o mmhub_v2_0.o gmc_v10_0.o gfxhub_v2_1.o mmhub_v2_3.o \ - mmhub_v1_7.o gfxhub_v3_0.o mmhub_v3_0.o + mmhub_v1_7.o gfxhub_v3_0.o mmhub_v3_0.o gmc_v11_0.o # add UMC block amdgpu-y += \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index ad8e7d486a7d..8e18d3fc4fab 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -335,8 +335,10 @@ struct amdgpu_gfx { DECLARE_BITMAP (pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); /*ras */ - struct ras_common_if *ras_if; - struct amdgpu_gfx_ras *ras; + struct ras_common_if *ras_if; + struct amdgpu_gfx_ras *ras; + + bool is_poweron; }; #define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev)) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c new file mode 100644 index 000000000000..b80b5f70ecf1 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c @@ -0,0 +1,965 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include +#include +#include "amdgpu.h" +#include "amdgpu_atomfirmware.h" +#include "gmc_v11_0.h" +#include "umc_v8_7.h" +#include "athub/athub_3_0_0_sh_mask.h" +#include "athub/athub_3_0_0_offset.h" +#include "oss/osssys_6_0_0_offset.h" +#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h" +#include "navi10_enum.h" +#include "soc15.h" +#include "soc15d.h" +#include "soc15_common.h" +#include "nbio_v4_3.h" +#include "gfxhub_v3_0.h" +#include "mmhub_v3_0.h" +#include "athub_v3_0.h" + + +static int gmc_v11_0_ecc_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned type, + enum amdgpu_interrupt_state state) +{ + return 0; +} + +static int +gmc_v11_0_vm_fault_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, unsigned type, + enum amdgpu_interrupt_state state) +{ + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + /* MM HUB */ + amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, false); + /* GFX HUB */ + amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false); + break; + case AMDGPU_IRQ_STATE_ENABLE: + /* MM HUB */ + amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, true); + /* GFX HUB */ + amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true); + break; + default: + break; + } + + return 0; +} + +static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src]; + uint32_t status = 0; + u64 addr; + + addr = (u64)entry->src_data[0] << 12; + addr |= ((u64)entry->src_data[1] & 0xf) << 44; + + if (!amdgpu_sriov_vf(adev)) { + /* + * Issue a dummy read to wait for the status register to + * be updated to avoid reading an incorrect value due to + * the new fast GRBM interface. + */ + if (entry->vmid_src == AMDGPU_GFXHUB_0) + RREG32(hub->vm_l2_pro_fault_status); + + status = RREG32(hub->vm_l2_pro_fault_status); + WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1); + } + + if (printk_ratelimit()) { + struct amdgpu_task_info task_info; + + memset(&task_info, 0, sizeof(struct amdgpu_task_info)); + amdgpu_vm_get_task_info(adev, entry->pasid, &task_info); + + dev_err(adev->dev, + "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, " + "for process %s pid %d thread %s pid %d)\n", + entry->vmid_src ? "mmhub" : "gfxhub", + entry->src_id, entry->ring_id, entry->vmid, + entry->pasid, task_info.process_name, task_info.tgid, + task_info.task_name, task_info.pid); + dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n", + addr, entry->client_id); + if (!amdgpu_sriov_vf(adev)) + hub->vmhub_funcs->print_l2_protection_fault_status(adev, status); + } + + return 0; +} + +static const struct amdgpu_irq_src_funcs gmc_v11_0_irq_funcs = { + .set = gmc_v11_0_vm_fault_interrupt_state, + .process = gmc_v11_0_process_interrupt, +}; + +static const struct amdgpu_irq_src_funcs gmc_v11_0_ecc_funcs = { + .set = gmc_v11_0_ecc_interrupt_state, + .process = amdgpu_umc_process_ecc_irq, +}; + +static void gmc_v11_0_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->gmc.vm_fault.num_types = 1; + adev->gmc.vm_fault.funcs = &gmc_v11_0_irq_funcs; + + if (!amdgpu_sriov_vf(adev)) { + adev->gmc.ecc_irq.num_types = 1; + adev->gmc.ecc_irq.funcs = &gmc_v11_0_ecc_funcs; + } +} + +/** + * gmc_v11_0_use_invalidate_semaphore - judge whether to use semaphore + * + * @adev: amdgpu_device pointer + * @vmhub: vmhub type + * + */ +static bool gmc_v11_0_use_invalidate_semaphore(struct amdgpu_device *adev, + uint32_t vmhub) +{ + return ((vmhub == AMDGPU_MMHUB_0) && + (!amdgpu_sriov_vf(adev))); +} + +static bool gmc_v11_0_get_atc_vmid_pasid_mapping_info( + struct amdgpu_device *adev, + uint8_t vmid, uint16_t *p_pasid) +{ +#if 0 // TODO: + uint32_t value; + + value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + + vmid); + *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK; + + return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK); +#else + return 0; +#endif +} + +/* + * GART + * VMID 0 is the physical GPU addresses as used by the kernel. + * VMIDs 1-15 are used for userspace clients and are handled + * by the amdgpu vm/hsa code. + */ + +static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, + unsigned int vmhub, uint32_t flush_type) +{ + bool use_semaphore = gmc_v11_0_use_invalidate_semaphore(adev, vmhub); + struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; + u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type); + u32 tmp; + /* Use register 17 for GART */ + const unsigned eng = 17; + unsigned int i; + + spin_lock(&adev->gmc.invalidate_lock); + /* + * It may lose gpuvm invalidate acknowldege state across power-gating + * off cycle, add semaphore acquire before invalidation and semaphore + * release after invalidation to avoid entering power gated state + * to WA the Issue + */ + + /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ + if (use_semaphore) { + for (i = 0; i < adev->usec_timeout; i++) { + /* a read return value of 1 means semaphore acuqire */ + tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + + hub->eng_distance * eng); + if (tmp & 0x1) + break; + udelay(1); + } + + if (i >= adev->usec_timeout) + DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n"); + } + + WREG32_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req); + + /* Wait for ACK with a delay.*/ + for (i = 0; i < adev->usec_timeout; i++) { + tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + + hub->eng_distance * eng); + tmp &= 1 << vmid; + if (tmp) + break; + + udelay(1); + } + + /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ + if (use_semaphore) + /* + * add semaphore release after invalidation, + * write with 0 means semaphore release + */ + WREG32_NO_KIQ(hub->vm_inv_eng0_sem + + hub->eng_distance * eng, 0); + + /* Issue additional private vm invalidation to MMHUB */ + if ((vmhub != AMDGPU_GFXHUB_0) && + (hub->vm_l2_bank_select_reserved_cid2)) { + inv_req = RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2); + /* bit 25: RSERVED_CACHE_PRIVATE_INVALIDATION */ + inv_req |= (1 << 25); + /* Issue private invalidation */ + WREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2, inv_req); + /* Read back to ensure invalidation is done*/ + RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2); + } + + spin_unlock(&adev->gmc.invalidate_lock); + + if (i < adev->usec_timeout) + return; + + DRM_ERROR("Timeout waiting for VM flush ACK!\n"); +} + +/** + * gmc_v11_0_flush_gpu_tlb - gart tlb flush callback + * + * @adev: amdgpu_device pointer + * @vmid: vm instance to flush + * + * Flush the TLB for the requested page table. + */ +static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, + uint32_t vmhub, uint32_t flush_type) +{ + if ((vmhub == AMDGPU_GFXHUB_0) && !adev->gfx.is_poweron) + return; + + /* flush hdp cache */ + adev->hdp.funcs->flush_hdp(adev, NULL); + + /* For SRIOV run time, driver shouldn't access the register through MMIO + * Directly use kiq to do the vm invalidation instead + */ + if (adev->gfx.kiq.ring.sched.ready && + (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) { + struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; + const unsigned eng = 17; + u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type); + u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng; + u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng; + + amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req, + 1 << vmid); + return; + } + + mutex_lock(&adev->mman.gtt_window_lock); + gmc_v11_0_flush_vm_hub(adev, vmid, vmhub, 0); + mutex_unlock(&adev->mman.gtt_window_lock); + return; +} + +/** + * gmc_v11_0_flush_gpu_tlb_pasid - tlb flush via pasid + * + * @adev: amdgpu_device pointer + * @pasid: pasid to be flush + * + * Flush the TLB for the requested pasid. + */ +static int gmc_v11_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, + uint16_t pasid, uint32_t flush_type, + bool all_hub) +{ + int vmid, i; + signed long r; + uint32_t seq; + uint16_t queried_pasid; + bool ret; + struct amdgpu_ring *ring = &adev->gfx.kiq.ring; + struct amdgpu_kiq *kiq = &adev->gfx.kiq; + + if (amdgpu_emu_mode == 0 && ring->sched.ready) { + spin_lock(&adev->gfx.kiq.ring_lock); + /* 2 dwords flush + 8 dwords fence */ + amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8); + kiq->pmf->kiq_invalidate_tlbs(ring, + pasid, flush_type, all_hub); + r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT); + if (r) { + amdgpu_ring_undo(ring); + spin_unlock(&adev->gfx.kiq.ring_lock); + return -ETIME; + } + + amdgpu_ring_commit(ring); + spin_unlock(&adev->gfx.kiq.ring_lock); + r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout); + if (r < 1) { + dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r); + return -ETIME; + } + + return 0; + } + + for (vmid = 1; vmid < 16; vmid++) { + + ret = gmc_v11_0_get_atc_vmid_pasid_mapping_info(adev, vmid, + &queried_pasid); + if (ret && queried_pasid == pasid) { + if (all_hub) { + for (i = 0; i < adev->num_vmhubs; i++) + gmc_v11_0_flush_gpu_tlb(adev, vmid, + i, flush_type); + } else { + gmc_v11_0_flush_gpu_tlb(adev, vmid, + AMDGPU_GFXHUB_0, flush_type); + } + break; + } + } + + return 0; +} + +static uint64_t gmc_v11_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, + unsigned vmid, uint64_t pd_addr) +{ + bool use_semaphore = gmc_v11_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub); + struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; + uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0); + unsigned eng = ring->vm_inv_eng; + + /* + * It may lose gpuvm invalidate acknowldege state across power-gating + * off cycle, add semaphore acquire before invalidation and semaphore + * release after invalidation to avoid entering power gated state + * to WA the Issue + */ + + /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ + if (use_semaphore) + /* a read return value of 1 means semaphore acuqire */ + amdgpu_ring_emit_reg_wait(ring, + hub->vm_inv_eng0_sem + + hub->eng_distance * eng, 0x1, 0x1); + + amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + + (hub->ctx_addr_distance * vmid), + lower_32_bits(pd_addr)); + + amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + + (hub->ctx_addr_distance * vmid), + upper_32_bits(pd_addr)); + + amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + + hub->eng_distance * eng, + hub->vm_inv_eng0_ack + + hub->eng_distance * eng, + req, 1 << vmid); + + /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ + if (use_semaphore) + /* + * add semaphore release after invalidation, + * write with 0 means semaphore release + */ + amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem + + hub->eng_distance * eng, 0); + + return pd_addr; +} + +static void gmc_v11_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid, + unsigned pasid) +{ + struct amdgpu_device *adev = ring->adev; + uint32_t reg; + + if (ring->funcs->vmhub == AMDGPU_GFXHUB_0) + reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid; + else + reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT_MM) + vmid; + + amdgpu_ring_emit_wreg(ring, reg, pasid); +} + +/* + * PTE format: + * 63:59 reserved + * 58:57 reserved + * 56 F + * 55 L + * 54 reserved + * 53:52 SW + * 51 T + * 50:48 mtype + * 47:12 4k physical page base address + * 11:7 fragment + * 6 write + * 5 read + * 4 exe + * 3 Z + * 2 snooped + * 1 system + * 0 valid + * + * PDE format: + * 63:59 block fragment size + * 58:55 reserved + * 54 P + * 53:48 reserved + * 47:6 physical base address of PD or PTE + * 5:3 reserved + * 2 C + * 1 system + * 0 valid + */ + +static uint64_t gmc_v11_0_map_mtype(struct amdgpu_device *adev, uint32_t flags) +{ + switch (flags) { + case AMDGPU_VM_MTYPE_DEFAULT: + return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC); + case AMDGPU_VM_MTYPE_NC: + return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC); + case AMDGPU_VM_MTYPE_WC: + return AMDGPU_PTE_MTYPE_NV10(MTYPE_WC); + case AMDGPU_VM_MTYPE_CC: + return AMDGPU_PTE_MTYPE_NV10(MTYPE_CC); + case AMDGPU_VM_MTYPE_UC: + return AMDGPU_PTE_MTYPE_NV10(MTYPE_UC); + default: + return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC); + } +} + +static void gmc_v11_0_get_vm_pde(struct amdgpu_device *adev, int level, + uint64_t *addr, uint64_t *flags) +{ + if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM)) + *addr = adev->vm_manager.vram_base_offset + *addr - + adev->gmc.vram_start; + BUG_ON(*addr & 0xFFFF00000000003FULL); + + if (!adev->gmc.translate_further) + return; + + if (level == AMDGPU_VM_PDB1) { + /* Set the block fragment size */ + if (!(*flags & AMDGPU_PDE_PTE)) + *flags |= AMDGPU_PDE_BFS(0x9); + + } else if (level == AMDGPU_VM_PDB0) { + if (*flags & AMDGPU_PDE_PTE) + *flags &= ~AMDGPU_PDE_PTE; + else + *flags |= AMDGPU_PTE_TF; + } +} + +static void gmc_v11_0_get_vm_pte(struct amdgpu_device *adev, + struct amdgpu_bo_va_mapping *mapping, + uint64_t *flags) +{ + *flags &= ~AMDGPU_PTE_EXECUTABLE; + *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE; + + *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK; + *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK); + + if (mapping->flags & AMDGPU_PTE_PRT) { + *flags |= AMDGPU_PTE_PRT; + *flags |= AMDGPU_PTE_SNOOPED; + *flags |= AMDGPU_PTE_LOG; + *flags |= AMDGPU_PTE_SYSTEM; + *flags &= ~AMDGPU_PTE_VALID; + } +} + +static unsigned gmc_v11_0_get_vbios_fb_size(struct amdgpu_device *adev) +{ + return 0; +} + +static const struct amdgpu_gmc_funcs gmc_v11_0_gmc_funcs = { + .flush_gpu_tlb = gmc_v11_0_flush_gpu_tlb, + .flush_gpu_tlb_pasid = gmc_v11_0_flush_gpu_tlb_pasid, + .emit_flush_gpu_tlb = gmc_v11_0_emit_flush_gpu_tlb, + .emit_pasid_mapping = gmc_v11_0_emit_pasid_mapping, + .map_mtype = gmc_v11_0_map_mtype, + .get_vm_pde = gmc_v11_0_get_vm_pde, + .get_vm_pte = gmc_v11_0_get_vm_pte, + .get_vbios_fb_size = gmc_v11_0_get_vbios_fb_size, +}; + +static void gmc_v11_0_set_gmc_funcs(struct amdgpu_device *adev) +{ + adev->gmc.gmc_funcs = &gmc_v11_0_gmc_funcs; +} + +static void gmc_v11_0_set_umc_funcs(struct amdgpu_device *adev) +{ + switch (adev->ip_versions[UMC_HWIP][0]) { + case IP_VERSION(8, 10, 0): + break; + default: + break; + } +} + + +static void gmc_v11_0_set_mmhub_funcs(struct amdgpu_device *adev) +{ + adev->mmhub.funcs = &mmhub_v3_0_funcs; +} + +static void gmc_v11_0_set_gfxhub_funcs(struct amdgpu_device *adev) +{ + adev->gfxhub.funcs = &gfxhub_v3_0_funcs; +} + +static int gmc_v11_0_early_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + gmc_v11_0_set_gfxhub_funcs(adev); + gmc_v11_0_set_mmhub_funcs(adev); + gmc_v11_0_set_gmc_funcs(adev); + gmc_v11_0_set_irq_funcs(adev); + gmc_v11_0_set_umc_funcs(adev); + + adev->gmc.shared_aperture_start = 0x2000000000000000ULL; + adev->gmc.shared_aperture_end = + adev->gmc.shared_aperture_start + (4ULL << 30) - 1; + adev->gmc.private_aperture_start = 0x1000000000000000ULL; + adev->gmc.private_aperture_end = + adev->gmc.private_aperture_start + (4ULL << 30) - 1; + + return 0; +} + +static int gmc_v11_0_late_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = amdgpu_gmc_allocate_vm_inv_eng(adev); + if (r) + return r; + + r = amdgpu_gmc_ras_late_init(adev); + if (r) + return r; + + return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); +} + +static void gmc_v11_0_vram_gtt_location(struct amdgpu_device *adev, + struct amdgpu_gmc *mc) +{ + u64 base = 0; + + base = adev->mmhub.funcs->get_fb_location(adev); + + amdgpu_gmc_vram_location(adev, &adev->gmc, base); + amdgpu_gmc_gart_location(adev, mc); + + /* base offset of vram pages */ + adev->vm_manager.vram_base_offset = adev->mmhub.funcs->get_mc_fb_offset(adev); +} + +/** + * gmc_v11_0_mc_init - initialize the memory controller driver params + * + * @adev: amdgpu_device pointer + * + * Look up the amount of vram, vram width, and decide how to place + * vram and gart within the GPU's physical address space. + * Returns 0 for success. + */ +static int gmc_v11_0_mc_init(struct amdgpu_device *adev) +{ + int r; + + /* size in MB on si */ + adev->gmc.mc_vram_size = + adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL; + adev->gmc.real_vram_size = adev->gmc.mc_vram_size; + + if (!(adev->flags & AMD_IS_APU)) { + r = amdgpu_device_resize_fb_bar(adev); + if (r) + return r; + } + adev->gmc.aper_base = pci_resource_start(adev->pdev, 0); + adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); + + /* In case the PCI BAR is larger than the actual amount of vram */ + adev->gmc.visible_vram_size = adev->gmc.aper_size; + if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size) + adev->gmc.visible_vram_size = adev->gmc.real_vram_size; + + /* set the gart size */ + if (amdgpu_gart_size == -1) { + adev->gmc.gart_size = 512ULL << 20; + } else + adev->gmc.gart_size = (u64)amdgpu_gart_size << 20; + + gmc_v11_0_vram_gtt_location(adev, &adev->gmc); + + return 0; +} + +static int gmc_v11_0_gart_init(struct amdgpu_device *adev) +{ + int r; + + if (adev->gart.bo) { + WARN(1, "PCIE GART already initialized\n"); + return 0; + } + + /* Initialize common gart structure */ + r = amdgpu_gart_init(adev); + if (r) + return r; + + adev->gart.table_size = adev->gart.num_gpu_pages * 8; + adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_NV10(MTYPE_UC) | + AMDGPU_PTE_EXECUTABLE; + + return amdgpu_gart_table_vram_alloc(adev); +} + +static int gmc_v11_0_sw_init(void *handle) +{ + int r, vram_width = 0, vram_type = 0, vram_vendor = 0; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + adev->mmhub.funcs->init(adev); + + spin_lock_init(&adev->gmc.invalidate_lock); + + r = amdgpu_atomfirmware_get_vram_info(adev, + &vram_width, &vram_type, &vram_vendor); + adev->gmc.vram_width = vram_width; + + adev->gmc.vram_type = vram_type; + adev->gmc.vram_vendor = vram_vendor; + + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(11, 0, 0): + adev->num_vmhubs = 2; + /* + * To fulfill 4-level page support, + * vm size is 256TB (48bit), maximum size, + * block size 512 (9bit) + */ + amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); + break; + default: + break; + } + + /* This interrupt is VMC page fault.*/ + r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_VMC, + VMC_1_0__SRCID__VM_FAULT, + &adev->gmc.vm_fault); + + if (r) + return r; + + r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX, + UTCL2_1_0__SRCID__FAULT, + &adev->gmc.vm_fault); + if (r) + return r; + + if (!amdgpu_sriov_vf(adev)) { + /* interrupt sent to DF. */ + r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_DF, 0, + &adev->gmc.ecc_irq); + if (r) + return r; + } + + /* + * Set the internal MC address mask This is the max address of the GPU's + * internal address space. + */ + adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */ + + r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44)); + if (r) { + printk(KERN_WARNING "amdgpu: No suitable DMA available.\n"); + return r; + } + + r = gmc_v11_0_mc_init(adev); + if (r) + return r; + + amdgpu_gmc_get_vbios_allocations(adev); + + /* Memory manager */ + r = amdgpu_bo_init(adev); + if (r) + return r; + + r = gmc_v11_0_gart_init(adev); + if (r) + return r; + + /* + * number of VMs + * VMID 0 is reserved for System + * amdgpu graphics/compute will use VMIDs 1-7 + * amdkfd will use VMIDs 8-15 + */ + adev->vm_manager.first_kfd_vmid = 8; + + amdgpu_vm_manager_init(adev); + + return 0; +} + +/** + * gmc_v11_0_gart_fini - vm fini callback + * + * @adev: amdgpu_device pointer + * + * Tears down the driver GART/VM setup (CIK). + */ +static void gmc_v11_0_gart_fini(struct amdgpu_device *adev) +{ + amdgpu_gart_table_vram_free(adev); +} + +static int gmc_v11_0_sw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + amdgpu_vm_manager_fini(adev); + gmc_v11_0_gart_fini(adev); + amdgpu_gem_force_release(adev); + amdgpu_bo_fini(adev); + + return 0; +} + +static void gmc_v11_0_init_golden_registers(struct amdgpu_device *adev) +{ +} + +/** + * gmc_v11_0_gart_enable - gart enable + * + * @adev: amdgpu_device pointer + */ +static int gmc_v11_0_gart_enable(struct amdgpu_device *adev) +{ + int r; + bool value; + + if (adev->gart.bo == NULL) { + dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); + return -EINVAL; + } + + amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr); + r = adev->mmhub.funcs->gart_enable(adev); + if (r) + return r; + + /* Flush HDP after it is initialized */ + adev->hdp.funcs->flush_hdp(adev, NULL); + + value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ? + false : true; + + adev->mmhub.funcs->set_fault_enable_default(adev, value); + gmc_v11_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0); + + DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", + (unsigned)(adev->gmc.gart_size >> 20), + (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo)); + + return 0; +} + +static int gmc_v11_0_hw_init(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + /* The sequence of these two function calls matters.*/ + gmc_v11_0_init_golden_registers(adev); + + r = gmc_v11_0_gart_enable(adev); + if (r) + return r; + + if (adev->umc.funcs && adev->umc.funcs->init_registers) + adev->umc.funcs->init_registers(adev); + + return 0; +} + +/** + * gmc_v11_0_gart_disable - gart disable + * + * @adev: amdgpu_device pointer + * + * This disables all VM page table. + */ +static void gmc_v11_0_gart_disable(struct amdgpu_device *adev) +{ + adev->mmhub.funcs->gart_disable(adev); +} + +static int gmc_v11_0_hw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (amdgpu_sriov_vf(adev)) { + /* full access mode, so don't touch any GMC register */ + DRM_DEBUG("For SRIOV client, shouldn't do anything.\n"); + return 0; + } + + amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0); + amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); + gmc_v11_0_gart_disable(adev); + + return 0; +} + +static int gmc_v11_0_suspend(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + gmc_v11_0_hw_fini(adev); + + return 0; +} + +static int gmc_v11_0_resume(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + r = gmc_v11_0_hw_init(adev); + if (r) + return r; + + amdgpu_vmid_reset_all(adev); + + return 0; +} + +static bool gmc_v11_0_is_idle(void *handle) +{ + /* MC is always ready in GMC v11.*/ + return true; +} + +static int gmc_v11_0_wait_for_idle(void *handle) +{ + /* There is no need to wait for MC idle in GMC v11.*/ + return 0; +} + +static int gmc_v11_0_soft_reset(void *handle) +{ + return 0; +} + +static int gmc_v11_0_set_clockgating_state(void *handle, + enum amd_clockgating_state state) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + r = adev->mmhub.funcs->set_clockgating(adev, state); + if (r) + return r; + + return athub_v3_0_set_clockgating(adev, state); +} + +static void gmc_v11_0_get_clockgating_state(void *handle, u64 *flags) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + adev->mmhub.funcs->get_clockgating(adev, flags); + + athub_v3_0_get_clockgating(adev, flags); +} + +static int gmc_v11_0_set_powergating_state(void *handle, + enum amd_powergating_state state) +{ + return 0; +} + +const struct amd_ip_funcs gmc_v11_0_ip_funcs = { + .name = "gmc_v11_0", + .early_init = gmc_v11_0_early_init, + .sw_init = gmc_v11_0_sw_init, + .hw_init = gmc_v11_0_hw_init, + .late_init = gmc_v11_0_late_init, + .sw_fini = gmc_v11_0_sw_fini, + .hw_fini = gmc_v11_0_hw_fini, + .suspend = gmc_v11_0_suspend, + .resume = gmc_v11_0_resume, + .is_idle = gmc_v11_0_is_idle, + .wait_for_idle = gmc_v11_0_wait_for_idle, + .soft_reset = gmc_v11_0_soft_reset, + .set_clockgating_state = gmc_v11_0_set_clockgating_state, + .set_powergating_state = gmc_v11_0_set_powergating_state, + .get_clockgating_state = gmc_v11_0_get_clockgating_state, +}; + +const struct amdgpu_ip_block_version gmc_v11_0_ip_block = { + .type = AMD_IP_BLOCK_TYPE_GMC, + .major = 11, + .minor = 0, + .rev = 0, + .funcs = &gmc_v11_0_ip_funcs, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.h new file mode 100644 index 000000000000..def4d5516f82 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.h @@ -0,0 +1,30 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __GMC_V11_0_H__ +#define __GMC_V11_0_H__ + +extern const struct amd_ip_funcs gmc_v11_0_ip_funcs; +extern const struct amdgpu_ip_block_version gmc_v11_0_ip_block; + +#endif -- cgit From 289bcffb9d76f575995113dd48c09de7f2deb1d1 Mon Sep 17 00:00:00 2001 From: Likun Gao Date: Wed, 13 Apr 2022 14:28:02 -0400 Subject: drm/amdgpu: support imu for gfx11 Add support to initialize imu for gfx v11. IMU is a new power management block for gfx which manages gfx power. Signed-off-by: Likun Gao Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/Makefile | 3 +- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 4 + drivers/gpu/drm/amd/amdgpu/amdgpu_imu.h | 51 ++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | 13 ++ drivers/gpu/drm/amd/amdgpu/imu_v11_0.c | 286 ++++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/imu_v11_0.h | 30 ++++ 6 files changed, 386 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_imu.h create mode 100644 drivers/gpu/drm/amd/amdgpu/imu_v11_0.c create mode 100644 drivers/gpu/drm/amd/amdgpu/imu_v11_0.h (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h') diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 803e7f5dc458..e74bf1bde8b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -130,7 +130,8 @@ amdgpu-y += \ gfx_v9_0.o \ gfx_v9_4.o \ gfx_v9_4_2.o \ - gfx_v10_0.o + gfx_v10_0.o \ + imu_v11_0.o # add async DMA block amdgpu-y += \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 8e18d3fc4fab..15749016d8cf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -30,6 +30,7 @@ #include "clearstate_defs.h" #include "amdgpu_ring.h" #include "amdgpu_rlc.h" +#include "amdgpu_imu.h" #include "soc15.h" #include "amdgpu_ras.h" @@ -274,6 +275,7 @@ struct amdgpu_gfx { struct amdgpu_me me; struct amdgpu_mec mec; struct amdgpu_kiq kiq; + struct amdgpu_imu imu; struct amdgpu_scratch scratch; const struct firmware *me_fw; /* ME firmware */ uint32_t me_fw_version; @@ -287,6 +289,8 @@ struct amdgpu_gfx { uint32_t mec_fw_version; const struct firmware *mec2_fw; /* MEC2 firmware */ uint32_t mec2_fw_version; + const struct firmware *imu_fw; /* IMU firmware */ + uint32_t imu_fw_version; uint32_t me_feature_version; uint32_t ce_feature_version; uint32_t pfp_feature_version; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_imu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_imu.h new file mode 100644 index 000000000000..56cf127cdf93 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_imu.h @@ -0,0 +1,51 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __AMDGPU_IMU_H__ +#define __AMDGPU_IMU_H__ + +struct amdgpu_imu_funcs { + int (*init_microcode)(struct amdgpu_device *adev); + int (*load_microcode)(struct amdgpu_device *adev); + void (*setup_imu)(struct amdgpu_device *adev); + int (*start_imu)(struct amdgpu_device *adev); + void (*program_rlc_ram)(struct amdgpu_device *adev); +}; + +struct imu_rlc_ram_golden { + u32 hwip; + u32 instance; + u32 segment; + u32 reg; + u32 data; + u32 addr_mask; +}; + +#define IMU_RLC_RAM_GOLDEN_VALUE(ip, inst, reg, data, addr_mask) \ + { ip##_HWIP, inst, reg##_BASE_IDX, reg, data, addr_mask } + +struct amdgpu_imu { + const struct amdgpu_imu_funcs *funcs; +}; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index 27e503982240..ffa4c0d207db 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -126,6 +126,19 @@ void amdgpu_ucode_print_gfx_hdr(const struct common_firmware_header *hdr) } } +void amdgpu_ucode_print_imu_hdr(const struct common_firmware_header *hdr) +{ + uint16_t version_major = le16_to_cpu(hdr->header_version_major); + uint16_t version_minor = le16_to_cpu(hdr->header_version_minor); + + DRM_DEBUG("IMU\n"); + amdgpu_ucode_print_common_hdr(hdr); + + if (version_major != 1) { + DRM_ERROR("Unknown GFX ucode version: %u.%u\n", version_major, version_minor); + } +} + void amdgpu_ucode_print_rlc_hdr(const struct common_firmware_header *hdr) { uint16_t version_major = le16_to_cpu(hdr->header_version_major); diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c new file mode 100644 index 000000000000..81952a6767d0 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c @@ -0,0 +1,286 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include +#include "amdgpu.h" +#include "amdgpu_imu.h" + +#include "gc/gc_11_0_0_offset.h" +#include "gc/gc_11_0_0_sh_mask.h" + +MODULE_FIRMWARE("amdgpu/gc_11_0_0_imu.bin"); + +static int imu_v11_0_init_microcode(struct amdgpu_device *adev) +{ + char fw_name[40]; + char ucode_prefix[30]; + int err; + const struct imu_firmware_header_v1_0 *imu_hdr; + struct amdgpu_firmware_info *info = NULL; + + DRM_DEBUG("\n"); + + amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix)); + + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_imu.bin", ucode_prefix); + err = request_firmware(&adev->gfx.imu_fw, fw_name, adev->dev); + if (err) + goto out; + err = amdgpu_ucode_validate(adev->gfx.imu_fw); + if (err) + goto out; + imu_hdr = (const struct imu_firmware_header_v1_0 *)adev->gfx.imu_fw->data; + adev->gfx.imu_fw_version = le32_to_cpu(imu_hdr->header.ucode_version); + //adev->gfx.imu_feature_version = le32_to_cpu(imu_hdr->ucode_feature_version); + + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_IMU_I]; + info->ucode_id = AMDGPU_UCODE_ID_IMU_I; + info->fw = adev->gfx.imu_fw; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(imu_hdr->imu_iram_ucode_size_bytes), PAGE_SIZE); + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_IMU_D]; + info->ucode_id = AMDGPU_UCODE_ID_IMU_D; + info->fw = adev->gfx.imu_fw; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(imu_hdr->imu_dram_ucode_size_bytes), PAGE_SIZE); + } + +out: + if (err) { + dev_err(adev->dev, + "gfx11: Failed to load firmware \"%s\"\n", + fw_name); + release_firmware(adev->gfx.imu_fw); + } + + return err; +} + +static int imu_v11_0_load_microcode(struct amdgpu_device *adev) +{ + const struct imu_firmware_header_v1_0 *hdr; + const __le32 *fw_data; + unsigned i, fw_size; + + if (!adev->gfx.imu_fw) + return -EINVAL; + + hdr = (const struct imu_firmware_header_v1_0 *)adev->gfx.imu_fw->data; + //amdgpu_ucode_print_rlc_hdr(&hdr->header); + + fw_data = (const __le32 *)(adev->gfx.imu_fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + fw_size = le32_to_cpu(hdr->imu_iram_ucode_size_bytes) / 4; + + WREG32_SOC15(GC, 0, regGFX_IMU_I_RAM_ADDR, 0); + + for (i = 0; i < fw_size; i++) + WREG32_SOC15(GC, 0, regGFX_IMU_I_RAM_DATA, le32_to_cpup(fw_data++)); + + WREG32_SOC15(GC, 0, regGFX_IMU_I_RAM_ADDR, adev->gfx.imu_fw_version); + + fw_data = (const __le32 *)(adev->gfx.imu_fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes) + + le32_to_cpu(hdr->imu_iram_ucode_size_bytes)); + fw_size = le32_to_cpu(hdr->imu_dram_ucode_size_bytes) / 4; + + WREG32_SOC15(GC, 0, regGFX_IMU_D_RAM_ADDR, 0); + + for (i = 0; i < fw_size; i++) + WREG32_SOC15(GC, 0, regGFX_IMU_D_RAM_DATA, le32_to_cpup(fw_data++)); + + WREG32_SOC15(GC, 0, regGFX_IMU_D_RAM_ADDR, adev->gfx.imu_fw_version); + + return 0; +} + +static void imu_v11_0_setup(struct amdgpu_device *adev) +{ + int imu_reg_val; + + //enable IMU debug mode + WREG32_SOC15(GC, 0, regGFX_IMU_C2PMSG_ACCESS_CTRL0, 0xffffff); + WREG32_SOC15(GC, 0, regGFX_IMU_C2PMSG_ACCESS_CTRL1, 0xffff); + + imu_reg_val = RREG32_SOC15(GC, 0, regGFX_IMU_C2PMSG_16); + imu_reg_val |= 0x1; + WREG32_SOC15(GC, 0, regGFX_IMU_C2PMSG_16, imu_reg_val); + + //disble imu Rtavfs, SmsRepair, DfllBTC, and ClkB + imu_reg_val = RREG32_SOC15(GC, 0, regGFX_IMU_SCRATCH_10); + imu_reg_val |= 0x10007; + WREG32_SOC15(GC, 0, regGFX_IMU_SCRATCH_10, imu_reg_val); +} + +static int imu_v11_0_start(struct amdgpu_device *adev) +{ + int imu_reg_val, i; + + //Start IMU by set GFX_IMU_CORE_CTRL.CRESET = 0 + imu_reg_val = RREG32_SOC15(GC, 0, regGFX_IMU_CORE_CTRL); + imu_reg_val &= 0xfffffffe; + WREG32_SOC15(GC, 0, regGFX_IMU_CORE_CTRL, imu_reg_val); + + for (i = 0; i < adev->usec_timeout; i++) { + imu_reg_val = RREG32_SOC15(GC, 0, regGFX_IMU_GFX_RESET_CTRL); + if ((imu_reg_val & 0x1f) == 0x1f) + break; + udelay(1); + } + + if (i >= adev->usec_timeout) { + dev_err(adev->dev, "init imu: IMU start timeout\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static const struct imu_rlc_ram_golden imu_rlc_ram_golden_11[] = +{ + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_IO_RD_COMBINE_FLUSH, 0x00055555, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_IO_WR_COMBINE_FLUSH, 0x00055555, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_DRAM_COMBINE_FLUSH, 0x00555555, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_MISC2, 0x00001ffe, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_SDP_CREDITS , 0x003f3fff, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_SDP_TAG_RESERVE1, 0x00000000, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_SDP_VCC_RESERVE0, 0x00041000, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_SDP_VCC_RESERVE1, 0x00000000, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_SDP_VCD_RESERVE0, 0x00040000, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_SDP_VCD_RESERVE1, 0x00000000, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_MISC, 0x00000017, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_SDP_ENABLE, 0x00000001, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_SDP_CREDITS , 0x003f3fbf, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_SDP_TAG_RESERVE0, 0x10201000, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_SDP_TAG_RESERVE1, 0x00000080, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_SDP_VCC_RESERVE0, 0x1d041040, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_SDP_VCC_RESERVE1, 0x80000000, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_SDP_IO_PRIORITY, 0x88888888, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_MAM_CTRL, 0x0000d800, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_SDP_ARB_FINAL, 0x000003f7, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_SDP_ENABLE, 0x00000001, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_PROTECTION_FAULT_CNTL2, 0x00020000, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_APT_CNTL, 0x0000000c, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_CACHEABLE_DRAM_ADDRESS_END, 0x000fffff, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_MISC, 0x0c48bff0, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCC_GC_SA_UNIT_DISABLE, 0x00fffc01, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCC_GC_PRIM_CONFIG, 0x000fffe1, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCC_RB_BACKEND_DISABLE, 0x0fffff01, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCC_GC_SHADER_ARRAY_CONFIG, 0xfffe0001, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_MX_L1_TLB_CNTL, 0x00000500, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_SYSTEM_APERTURE_LOW_ADDR, 0x00000001, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0x00000000, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_LOCAL_FB_ADDRESS_START, 0x00000000, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_LOCAL_FB_ADDRESS_END, 0x000fffff, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_CONTEXT0_CNTL, 0x00000000, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_CONTEXT1_CNTL, 0x00000000, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_NB_TOP_OF_DRAM_SLOT1, 0xff800000, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_NB_LOWER_TOP_OF_DRAM2, 0x00000001, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_NB_UPPER_TOP_OF_DRAM2, 0x00000fff, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_PROTECTION_FAULT_CNTL, 0x00001ffc, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_MX_L1_TLB_CNTL, 0x00000501, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_CNTL, 0x00080603, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_CNTL2, 0x00000003, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_CNTL3, 0x00100003, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_CNTL5, 0x00003fe0, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_CONTEXT0_CNTL, 0x00000001, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_CONTEXT0_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES, 0x00000c00, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_CONTEXT1_CNTL, 0x00000001, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_CONTEXT1_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES, 0x00000c00, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGB_ADDR_CONFIG, 0x00000545, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGL2_PIPE_STEER_0, 0x13455431, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGL2_PIPE_STEER_1, 0x13455431, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGL2_PIPE_STEER_2, 0x76027602, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGL2_PIPE_STEER_3, 0x76207620, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGB_ADDR_CONFIG, 0x00000345, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCUTCL2_HARVEST_BYPASS_GROUPS, 0x0000003e, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_FB_LOCATION_BASE, 0x00006000, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_FB_LOCATION_TOP, 0x000061ff, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_APT_CNTL, 0x0000000c, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_AGP_BASE, 0x00000000, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_AGP_BOT, 0x00000002, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_AGP_TOP, 0x00000000, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_PROTECTION_FAULT_CNTL2, 0x00020000, 0xe0000000), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regSDMA0_UCODE_SELFLOAD_CONTROL, 0x00000210, 0), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regSDMA1_UCODE_SELFLOAD_CONTROL, 0x00000210, 0), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCPC_PSP_DEBUG, CPC_PSP_DEBUG__GPA_OVERRIDE_MASK, 0), + IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCPG_PSP_DEBUG, CPG_PSP_DEBUG__GPA_OVERRIDE_MASK, 0) +}; + +void program_imu_rlc_ram(struct amdgpu_device *adev, + const struct imu_rlc_ram_golden *regs, + const u32 array_size) +{ + const struct imu_rlc_ram_golden *entry; + u32 reg, data; + int i; + + for (i = 0; i < array_size; ++i) { + entry = ®s[i]; + reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg; + reg |= entry->addr_mask; + + data = entry->data; + if (entry->reg == regGCMC_VM_AGP_BASE) + data = 0x00ffffff; + else if (entry->reg == regGCMC_VM_AGP_TOP) + data = 0x0; + else if (entry->reg == regGCMC_VM_FB_LOCATION_BASE) + data = adev->gmc.vram_start >> 24; + else if (entry->reg == regGCMC_VM_FB_LOCATION_TOP) + data = adev->gmc.vram_end >> 24; + + WREG32_SOC15(GC, 0, regGFX_IMU_RLC_RAM_ADDR_HIGH, 0); + WREG32_SOC15(GC, 0, regGFX_IMU_RLC_RAM_ADDR_LOW, reg); + WREG32_SOC15(GC, 0, regGFX_IMU_RLC_RAM_DATA, data); + } + //Indicate the latest entry + WREG32_SOC15(GC, 0, regGFX_IMU_RLC_RAM_ADDR_HIGH, 0); + WREG32_SOC15(GC, 0, regGFX_IMU_RLC_RAM_ADDR_LOW, 0); + WREG32_SOC15(GC, 0, regGFX_IMU_RLC_RAM_DATA, 0); +} + +static void imu_v11_0_program_rlc_ram(struct amdgpu_device *adev) +{ + u32 reg_data; + + WREG32_SOC15(GC, 0, regGFX_IMU_RLC_RAM_INDEX, 0x2); + + program_imu_rlc_ram(adev, + imu_rlc_ram_golden_11, + (const u32)ARRAY_SIZE(imu_rlc_ram_golden_11)); + + //Indicate the contents of the RAM are valid + reg_data = RREG32_SOC15(GC, 0, regGFX_IMU_RLC_RAM_INDEX); + reg_data |= GFX_IMU_RLC_RAM_INDEX__RAM_VALID_MASK; + WREG32_SOC15(GC, 0, regGFX_IMU_RLC_RAM_INDEX, reg_data); +} + +const struct amdgpu_imu_funcs gfx_v11_0_imu_funcs = { + .init_microcode = imu_v11_0_init_microcode, + .load_microcode = imu_v11_0_load_microcode, + .setup_imu = imu_v11_0_setup, + .start_imu = imu_v11_0_start, + .program_rlc_ram = imu_v11_0_program_rlc_ram, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.h b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.h new file mode 100644 index 000000000000..e71f96fc2f06 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.h @@ -0,0 +1,30 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __IMU_V11_0_H__ +#define __IMU_V11_0_H__ + +extern const struct amdgpu_imu_funcs gfx_v11_0_imu_funcs; + +#endif + -- cgit From 3d879e81f0f9ed5d33b5eda0fe5226c884bb8073 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Wed, 13 Apr 2022 14:27:32 -0400 Subject: drm/amdgpu: add init support for GFX11 (v2) Add initial support for GC version 11. GC is the graphics and compute block on the GPU. v1: add initial gfx11 support (Wenhui) v2: switch to new amdgpu_gfx_is_high_priority_compute_queue interface (Hawking) v3: fix num_mec (Alex) Signed-off-by: Wenhui Sheng Reviewed-by: Hawking Zhang Signed-off-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/Makefile | 3 +- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 13 + drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 6320 +++++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/gfx_v11_0.h | 29 + 4 files changed, 6364 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c create mode 100644 drivers/gpu/drm/amd/amdgpu/gfx_v11_0.h (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h') diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 2c490a5c2a87..6caf2390a889 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -131,7 +131,8 @@ amdgpu-y += \ gfx_v9_4.o \ gfx_v9_4_2.o \ gfx_v10_0.o \ - imu_v11_0.o + imu_v11_0.o \ + gfx_v11_0.o # add async DMA block amdgpu-y += \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 15749016d8cf..45522609d4b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -57,6 +57,9 @@ struct amdgpu_mec { u64 hpd_eop_gpu_addr; struct amdgpu_bo *mec_fw_obj; u64 mec_fw_gpu_addr; + struct amdgpu_bo *mec_fw_data_obj; + u64 mec_fw_data_gpu_addr; + u32 num_mec; u32 num_pipe_per_mec; u32 num_queue_per_pipe; @@ -245,6 +248,10 @@ struct amdgpu_pfp { struct amdgpu_bo *pfp_fw_obj; uint64_t pfp_fw_gpu_addr; uint32_t *pfp_fw_ptr; + + struct amdgpu_bo *pfp_fw_data_obj; + uint64_t pfp_fw_data_gpu_addr; + uint32_t *pfp_fw_data_ptr; }; struct amdgpu_ce { @@ -257,6 +264,11 @@ struct amdgpu_me { struct amdgpu_bo *me_fw_obj; uint64_t me_fw_gpu_addr; uint32_t *me_fw_ptr; + + struct amdgpu_bo *me_fw_data_obj; + uint64_t me_fw_data_gpu_addr; + uint32_t *me_fw_data_ptr; + uint32_t num_me; uint32_t num_pipe_per_me; uint32_t num_queue_per_pipe; @@ -277,6 +289,7 @@ struct amdgpu_gfx { struct amdgpu_kiq kiq; struct amdgpu_imu imu; struct amdgpu_scratch scratch; + bool rs64_enable; /* firmware format */ const struct firmware *me_fw; /* ME firmware */ uint32_t me_fw_version; const struct firmware *pfp_fw; /* PFP firmware */ diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c new file mode 100644 index 000000000000..7a34802544c0 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -0,0 +1,6320 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include +#include +#include +#include +#include +#include "amdgpu.h" +#include "amdgpu_gfx.h" +#include "amdgpu_psp.h" +#include "amdgpu_smu.h" +#include "amdgpu_atomfirmware.h" +#include "imu_v11_0.h" +#include "soc21.h" +#include "nvd.h" + +#include "gc/gc_11_0_0_offset.h" +#include "gc/gc_11_0_0_sh_mask.h" +#include "smuio/smuio_13_0_6_offset.h" +#include "smuio/smuio_13_0_6_sh_mask.h" +#include "navi10_enum.h" +#include "ivsrcid/gfx/irqsrcs_gfx_11_0_0.h" + +#include "soc15.h" +#include "soc15d.h" +#include "clearstate_gfx11.h" +#include "v11_structs.h" +#include "gfx_v11_0.h" +#include "nbio_v4_3.h" +#include "mes_v11_0.h" + +#define GFX11_NUM_GFX_RINGS 1 +#define GFX11_MEC_HPD_SIZE 2048 + +#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L + +MODULE_FIRMWARE("amdgpu/gc_11_0_0_pfp.bin"); +MODULE_FIRMWARE("amdgpu/gc_11_0_0_me.bin"); +MODULE_FIRMWARE("amdgpu/gc_11_0_0_mec.bin"); +MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc.bin"); +MODULE_FIRMWARE("amdgpu/gc_11_0_0_toc.bin"); + +static const struct soc15_reg_golden golden_settings_gc_11_0[] = +{ + /* Pending on emulation bring up */ +}; + +static const struct soc15_reg_golden golden_settings_gc_11_0_0[] = +{ + /* Pending on emulation bring up */ +}; + +static const struct soc15_reg_golden golden_settings_gc_rlc_spm_11_0[] = +{ + /* Pending on emulation bring up */ +}; + +#define DEFAULT_SH_MEM_CONFIG \ + ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \ + (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \ + (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT)) + +static void gfx_v11_0_disable_gpa_mode(struct amdgpu_device *adev); +static void gfx_v11_0_set_ring_funcs(struct amdgpu_device *adev); +static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev); +static void gfx_v11_0_set_gds_init(struct amdgpu_device *adev); +static void gfx_v11_0_set_rlc_funcs(struct amdgpu_device *adev); +static void gfx_v11_0_set_mqd_funcs(struct amdgpu_device *adev); +static void gfx_v11_0_set_imu_funcs(struct amdgpu_device *adev); +static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev, + struct amdgpu_cu_info *cu_info); +static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev); +static void gfx_v11_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, + u32 sh_num, u32 instance); +static u32 gfx_v11_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev); + +static void gfx_v11_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume); +static void gfx_v11_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, bool secure); +static void gfx_v11_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, + uint32_t val); +static int gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev); +static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring, + uint16_t pasid, uint32_t flush_type, + bool all_hub, uint8_t dst_sel); + +static void gfx11_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask) +{ + amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6)); + amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) | + PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */ + amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */ + amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */ + amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */ + amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */ + amdgpu_ring_write(kiq_ring, 0); /* oac mask */ + amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */ +} + +static void gfx11_kiq_map_queues(struct amdgpu_ring *kiq_ring, + struct amdgpu_ring *ring) +{ + uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj); + uint64_t wptr_addr = ring->wptr_gpu_addr; + uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; + + amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5)); + /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/ + amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ + PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */ + PACKET3_MAP_QUEUES_VMID(0) | /* VMID */ + PACKET3_MAP_QUEUES_QUEUE(ring->queue) | + PACKET3_MAP_QUEUES_PIPE(ring->pipe) | + PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) | + PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */ + PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */ + PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) | + PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */ + amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index)); + amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr)); + amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr)); + amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr)); + amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr)); +} + +static void gfx11_kiq_unmap_queues(struct amdgpu_ring *kiq_ring, + struct amdgpu_ring *ring, + enum amdgpu_unmap_queues_action action, + u64 gpu_addr, u64 seq) +{ + struct amdgpu_device *adev = kiq_ring->adev; + uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; + + if (!adev->gfx.kiq.ring.sched.ready) { + amdgpu_mes_unmap_legacy_queue(adev, ring, action, gpu_addr, seq); + return; + } + + amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4)); + amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ + PACKET3_UNMAP_QUEUES_ACTION(action) | + PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) | + PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) | + PACKET3_UNMAP_QUEUES_NUM_QUEUES(1)); + amdgpu_ring_write(kiq_ring, + PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index)); + + if (action == PREEMPT_QUEUES_NO_UNMAP) { + amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr)); + amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr)); + amdgpu_ring_write(kiq_ring, seq); + } else { + amdgpu_ring_write(kiq_ring, 0); + amdgpu_ring_write(kiq_ring, 0); + amdgpu_ring_write(kiq_ring, 0); + } +} + +static void gfx11_kiq_query_status(struct amdgpu_ring *kiq_ring, + struct amdgpu_ring *ring, + u64 addr, + u64 seq) +{ + uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; + + amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5)); + amdgpu_ring_write(kiq_ring, + PACKET3_QUERY_STATUS_CONTEXT_ID(0) | + PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) | + PACKET3_QUERY_STATUS_COMMAND(2)); + amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ + PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) | + PACKET3_QUERY_STATUS_ENG_SEL(eng_sel)); + amdgpu_ring_write(kiq_ring, lower_32_bits(addr)); + amdgpu_ring_write(kiq_ring, upper_32_bits(addr)); + amdgpu_ring_write(kiq_ring, lower_32_bits(seq)); + amdgpu_ring_write(kiq_ring, upper_32_bits(seq)); +} + +static void gfx11_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring, + uint16_t pasid, uint32_t flush_type, + bool all_hub) +{ + gfx_v11_0_ring_invalidate_tlbs(kiq_ring, pasid, flush_type, all_hub, 1); +} + +static const struct kiq_pm4_funcs gfx_v11_0_kiq_pm4_funcs = { + .kiq_set_resources = gfx11_kiq_set_resources, + .kiq_map_queues = gfx11_kiq_map_queues, + .kiq_unmap_queues = gfx11_kiq_unmap_queues, + .kiq_query_status = gfx11_kiq_query_status, + .kiq_invalidate_tlbs = gfx11_kiq_invalidate_tlbs, + .set_resources_size = 8, + .map_queues_size = 7, + .unmap_queues_size = 6, + .query_status_size = 7, + .invalidate_tlbs_size = 2, +}; + +static void gfx_v11_0_set_kiq_pm4_funcs(struct amdgpu_device *adev) +{ + adev->gfx.kiq.pmf = &gfx_v11_0_kiq_pm4_funcs; +} + +static void gfx_v11_0_init_spm_golden_registers(struct amdgpu_device *adev) +{ + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(11, 0, 0): + soc15_program_register_sequence(adev, + golden_settings_gc_rlc_spm_11_0, + (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_11_0)); + break; + default: + break; + } +} + +static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev) +{ + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(11, 0, 0): + soc15_program_register_sequence(adev, + golden_settings_gc_11_0, + (const u32)ARRAY_SIZE(golden_settings_gc_11_0)); + soc15_program_register_sequence(adev, + golden_settings_gc_11_0_0, + (const u32)ARRAY_SIZE(golden_settings_gc_11_0_0)); + break; + default: + break; + } + gfx_v11_0_init_spm_golden_registers(adev); +} + +static void gfx_v11_0_scratch_init(struct amdgpu_device *adev) +{ + adev->gfx.scratch.num_reg = 8; + adev->gfx.scratch.reg_base = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0); + adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1; +} + +static void gfx_v11_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel, + bool wc, uint32_t reg, uint32_t val) +{ + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); + amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) | + WRITE_DATA_DST_SEL(0) | (wc ? WR_CONFIRM : 0)); + amdgpu_ring_write(ring, reg); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, val); +} + +static void gfx_v11_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel, + int mem_space, int opt, uint32_t addr0, + uint32_t addr1, uint32_t ref, uint32_t mask, + uint32_t inv) +{ + amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); + amdgpu_ring_write(ring, + /* memory (1) or register (0) */ + (WAIT_REG_MEM_MEM_SPACE(mem_space) | + WAIT_REG_MEM_OPERATION(opt) | /* wait */ + WAIT_REG_MEM_FUNCTION(3) | /* equal */ + WAIT_REG_MEM_ENGINE(eng_sel))); + + if (mem_space) + BUG_ON(addr0 & 0x3); /* Dword align */ + amdgpu_ring_write(ring, addr0); + amdgpu_ring_write(ring, addr1); + amdgpu_ring_write(ring, ref); + amdgpu_ring_write(ring, mask); + amdgpu_ring_write(ring, inv); /* poll interval */ +} + +static int gfx_v11_0_ring_test_ring(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + uint32_t scratch; + uint32_t tmp = 0; + unsigned i; + int r; + + r = amdgpu_gfx_scratch_get(adev, &scratch); + if (r) { + DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r); + return r; + } + + WREG32(scratch, 0xCAFEDEAD); + + r = amdgpu_ring_alloc(ring, 5); + if (r) { + DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", + ring->idx, r); + amdgpu_gfx_scratch_free(adev, scratch); + return r; + } + + if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) { + gfx_v11_0_ring_emit_wreg(ring, scratch, 0xDEADBEEF); + } else { + amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); + amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START)); + amdgpu_ring_write(ring, 0xDEADBEEF); + } + amdgpu_ring_commit(ring); + + for (i = 0; i < adev->usec_timeout; i++) { + tmp = RREG32(scratch); + if (tmp == 0xDEADBEEF) + break; + if (amdgpu_emu_mode == 1) + msleep(1); + else + udelay(1); + } + + if (i >= adev->usec_timeout) + r = -ETIMEDOUT; + + amdgpu_gfx_scratch_free(adev, scratch); + + return r; +} + +static int gfx_v11_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) +{ + struct amdgpu_device *adev = ring->adev; + struct amdgpu_ib ib; + struct dma_fence *f = NULL; + unsigned index; + uint64_t gpu_addr; + volatile uint32_t *cpu_ptr; + long r; + + /* MES KIQ fw hasn't indirect buffer support for now */ + if (adev->enable_mes_kiq && + ring->funcs->type == AMDGPU_RING_TYPE_KIQ) + return 0; + + memset(&ib, 0, sizeof(ib)); + + if (ring->is_mes_queue) { + uint32_t padding, offset; + + offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS); + padding = amdgpu_mes_ctx_get_offs(ring, + AMDGPU_MES_CTX_PADDING_OFFS); + + ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); + ib.ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset); + + gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, padding); + cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, padding); + *cpu_ptr = cpu_to_le32(0xCAFEDEAD); + } else { + r = amdgpu_device_wb_get(adev, &index); + if (r) + return r; + + gpu_addr = adev->wb.gpu_addr + (index * 4); + adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD); + cpu_ptr = &adev->wb.wb[index]; + + r = amdgpu_ib_get(adev, NULL, 16, AMDGPU_IB_POOL_DIRECT, &ib); + if (r) { + DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); + goto err1; + } + } + + ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3); + ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM; + ib.ptr[2] = lower_32_bits(gpu_addr); + ib.ptr[3] = upper_32_bits(gpu_addr); + ib.ptr[4] = 0xDEADBEEF; + ib.length_dw = 5; + + r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); + if (r) + goto err2; + + r = dma_fence_wait_timeout(f, false, timeout); + if (r == 0) { + r = -ETIMEDOUT; + goto err2; + } else if (r < 0) { + goto err2; + } + + if (le32_to_cpu(*cpu_ptr) == 0xDEADBEEF) + r = 0; + else + r = -EINVAL; +err2: + if (!ring->is_mes_queue) + amdgpu_ib_free(adev, &ib, NULL); + dma_fence_put(f); +err1: + amdgpu_device_wb_free(adev, index); + return r; +} + +static void gfx_v11_0_free_microcode(struct amdgpu_device *adev) +{ + release_firmware(adev->gfx.pfp_fw); + adev->gfx.pfp_fw = NULL; + release_firmware(adev->gfx.me_fw); + adev->gfx.me_fw = NULL; + release_firmware(adev->gfx.rlc_fw); + adev->gfx.rlc_fw = NULL; + release_firmware(adev->gfx.mec_fw); + adev->gfx.mec_fw = NULL; + + kfree(adev->gfx.rlc.register_list_format); +} + +static void gfx_v11_0_init_rlc_ext_microcode(struct amdgpu_device *adev) +{ + const struct rlc_firmware_header_v2_1 *rlc_hdr; + + rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data; + adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver); + adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver); + adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes); + adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes); + adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver); + adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver); + adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes); + adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes); + adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver); + adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver); + adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes); + adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes); + adev->gfx.rlc.reg_list_format_direct_reg_list_length = + le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length); +} + +static void gfx_v11_0_init_rlc_iram_dram_microcode(struct amdgpu_device *adev) +{ + const struct rlc_firmware_header_v2_2 *rlc_hdr; + + rlc_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data; + adev->gfx.rlc.rlc_iram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_iram_ucode_size_bytes); + adev->gfx.rlc.rlc_iram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_iram_ucode_offset_bytes); + adev->gfx.rlc.rlc_dram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_dram_ucode_size_bytes); + adev->gfx.rlc.rlc_dram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_dram_ucode_offset_bytes); +} + +static void gfx_v11_0_init_rlcp_rlcv_microcode(struct amdgpu_device *adev) +{ + const struct rlc_firmware_header_v2_3 *rlc_hdr; + + rlc_hdr = (const struct rlc_firmware_header_v2_3 *)adev->gfx.rlc_fw->data; + adev->gfx.rlc.rlcp_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlcp_ucode_size_bytes); + adev->gfx.rlc.rlcp_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlcp_ucode_offset_bytes); + adev->gfx.rlc.rlcv_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlcv_ucode_size_bytes); + adev->gfx.rlc.rlcv_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlcv_ucode_offset_bytes); +} + +static int gfx_v11_0_init_microcode(struct amdgpu_device *adev) +{ + char fw_name[40]; + char ucode_prefix[30]; + int err; + struct amdgpu_firmware_info *info = NULL; + const struct common_firmware_header *header = NULL; + const struct gfx_firmware_header_v1_0 *cp_hdr; + const struct gfx_firmware_header_v2_0 *cp_hdr_v2_0; + const struct rlc_firmware_header_v2_0 *rlc_hdr; + unsigned int *tmp = NULL; + unsigned int i = 0; + uint16_t version_major; + uint16_t version_minor; + + DRM_DEBUG("\n"); + + amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix)); + + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", ucode_prefix); + err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev); + if (err) + goto out; + err = amdgpu_ucode_validate(adev->gfx.pfp_fw); + if (err) + goto out; + /* check pfp fw hdr version to decide if enable rs64 for gfx11.*/ + adev->gfx.rs64_enable = amdgpu_ucode_hdr_version( + (union amdgpu_firmware_header *) + adev->gfx.pfp_fw->data, 2, 0); + if (adev->gfx.rs64_enable) { + dev_info(adev->dev, "CP RS64 enable\n"); + cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)adev->gfx.pfp_fw->data; + adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr_v2_0->header.ucode_version); + adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr_v2_0->ucode_feature_version); + + } else { + cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data; + adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); + adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); + } + + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", ucode_prefix); + err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev); + if (err) + goto out; + err = amdgpu_ucode_validate(adev->gfx.me_fw); + if (err) + goto out; + if (adev->gfx.rs64_enable) { + cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)adev->gfx.me_fw->data; + adev->gfx.me_fw_version = le32_to_cpu(cp_hdr_v2_0->header.ucode_version); + adev->gfx.me_feature_version = le32_to_cpu(cp_hdr_v2_0->ucode_feature_version); + + } else { + cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data; + adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); + adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); + } + + if (!amdgpu_sriov_vf(adev)) { + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", ucode_prefix); + err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev); + if (err) + goto out; + err = amdgpu_ucode_validate(adev->gfx.rlc_fw); + rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; + version_major = le16_to_cpu(rlc_hdr->header.header_version_major); + version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor); + + adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version); + adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version); + adev->gfx.rlc.save_and_restore_offset = + le32_to_cpu(rlc_hdr->save_and_restore_offset); + adev->gfx.rlc.clear_state_descriptor_offset = + le32_to_cpu(rlc_hdr->clear_state_descriptor_offset); + adev->gfx.rlc.avail_scratch_ram_locations = + le32_to_cpu(rlc_hdr->avail_scratch_ram_locations); + adev->gfx.rlc.reg_restore_list_size = + le32_to_cpu(rlc_hdr->reg_restore_list_size); + adev->gfx.rlc.reg_list_format_start = + le32_to_cpu(rlc_hdr->reg_list_format_start); + adev->gfx.rlc.reg_list_format_separate_start = + le32_to_cpu(rlc_hdr->reg_list_format_separate_start); + adev->gfx.rlc.starting_offsets_start = + le32_to_cpu(rlc_hdr->starting_offsets_start); + adev->gfx.rlc.reg_list_format_size_bytes = + le32_to_cpu(rlc_hdr->reg_list_format_size_bytes); + adev->gfx.rlc.reg_list_size_bytes = + le32_to_cpu(rlc_hdr->reg_list_size_bytes); + adev->gfx.rlc.register_list_format = + kmalloc(adev->gfx.rlc.reg_list_format_size_bytes + + adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL); + if (!adev->gfx.rlc.register_list_format) { + err = -ENOMEM; + goto out; + } + + tmp = (unsigned int *)((uintptr_t)rlc_hdr + + le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes)); + for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++) + adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]); + + adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i; + + tmp = (unsigned int *)((uintptr_t)rlc_hdr + + le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes)); + for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++) + adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]); + + if (version_major == 2) { + if (version_minor >= 1) + gfx_v11_0_init_rlc_ext_microcode(adev); + if (version_minor >= 2) + gfx_v11_0_init_rlc_iram_dram_microcode(adev); + if (version_minor == 3) + gfx_v11_0_init_rlcp_rlcv_microcode(adev); + } + } + + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", ucode_prefix); + err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev); + if (err) + goto out; + err = amdgpu_ucode_validate(adev->gfx.mec_fw); + if (err) + goto out; + if (adev->gfx.rs64_enable) { + cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)adev->gfx.mec_fw->data; + adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr_v2_0->header.ucode_version); + adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr_v2_0->ucode_feature_version); + + } else { + cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; + adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); + adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); + } + + /* only one MEC for gfx 11.0.0. */ + adev->gfx.mec2_fw = NULL; + + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + if (adev->gfx.rs64_enable) { + cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)adev->gfx.pfp_fw->data; + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_PFP]; + info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_PFP; + info->fw = adev->gfx.pfp_fw; + header = (const struct common_firmware_header *)info->fw->data; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes), PAGE_SIZE); + + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK]; + info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK; + info->fw = adev->gfx.pfp_fw; + header = (const struct common_firmware_header *)info->fw->data; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE); + + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK]; + info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK; + info->fw = adev->gfx.pfp_fw; + header = (const struct common_firmware_header *)info->fw->data; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE); + + cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)adev->gfx.me_fw->data; + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_ME]; + info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_ME; + info->fw = adev->gfx.me_fw; + header = (const struct common_firmware_header *)info->fw->data; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes), PAGE_SIZE); + + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK]; + info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK; + info->fw = adev->gfx.me_fw; + header = (const struct common_firmware_header *)info->fw->data; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE); + + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK]; + info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK; + info->fw = adev->gfx.me_fw; + header = (const struct common_firmware_header *)info->fw->data; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE); + + cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)adev->gfx.mec_fw->data; + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_MEC]; + info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_MEC; + info->fw = adev->gfx.mec_fw; + header = (const struct common_firmware_header *)info->fw->data; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes), PAGE_SIZE); + + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK]; + info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK; + info->fw = adev->gfx.mec_fw; + header = (const struct common_firmware_header *)info->fw->data; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE); + + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK]; + info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK; + info->fw = adev->gfx.mec_fw; + header = (const struct common_firmware_header *)info->fw->data; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE); + + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK]; + info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK; + info->fw = adev->gfx.mec_fw; + header = (const struct common_firmware_header *)info->fw->data; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE); + + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK]; + info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK; + info->fw = adev->gfx.mec_fw; + header = (const struct common_firmware_header *)info->fw->data; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE); + } else { + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP]; + info->ucode_id = AMDGPU_UCODE_ID_CP_PFP; + info->fw = adev->gfx.pfp_fw; + header = (const struct common_firmware_header *)info->fw->data; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); + + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME]; + info->ucode_id = AMDGPU_UCODE_ID_CP_ME; + info->fw = adev->gfx.me_fw; + header = (const struct common_firmware_header *)info->fw->data; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); + + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1]; + info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1; + info->fw = adev->gfx.mec_fw; + header = (const struct common_firmware_header *)info->fw->data; + cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(header->ucode_size_bytes) - + le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE); + + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT]; + info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT; + info->fw = adev->gfx.mec_fw; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE); + } + + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G]; + info->ucode_id = AMDGPU_UCODE_ID_RLC_G; + info->fw = adev->gfx.rlc_fw; + if (info->fw) { + header = (const struct common_firmware_header *)info->fw->data; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); + } + if (adev->gfx.rlc.save_restore_list_gpm_size_bytes && + adev->gfx.rlc.save_restore_list_srm_size_bytes) { + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM]; + info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM; + info->fw = adev->gfx.rlc_fw; + adev->firmware.fw_size += + ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE); + + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM]; + info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM; + info->fw = adev->gfx.rlc_fw; + adev->firmware.fw_size += + ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE); + } + + if (adev->gfx.rlc.rlc_iram_ucode_size_bytes && + adev->gfx.rlc.rlc_dram_ucode_size_bytes) { + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_IRAM]; + info->ucode_id = AMDGPU_UCODE_ID_RLC_IRAM; + info->fw = adev->gfx.rlc_fw; + adev->firmware.fw_size += + ALIGN(adev->gfx.rlc.rlc_iram_ucode_size_bytes, PAGE_SIZE); + + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_DRAM]; + info->ucode_id = AMDGPU_UCODE_ID_RLC_DRAM; + info->fw = adev->gfx.rlc_fw; + adev->firmware.fw_size += + ALIGN(adev->gfx.rlc.rlc_dram_ucode_size_bytes, PAGE_SIZE); + } + + if (adev->gfx.rlc.rlcp_ucode_size_bytes) { + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_P]; + info->ucode_id = AMDGPU_UCODE_ID_RLC_P; + info->fw = adev->gfx.rlc_fw; + adev->firmware.fw_size += + ALIGN(adev->gfx.rlc.rlcp_ucode_size_bytes, PAGE_SIZE); + } + + if (adev->gfx.rlc.rlcv_ucode_size_bytes) { + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_V]; + info->ucode_id = AMDGPU_UCODE_ID_RLC_V; + info->fw = adev->gfx.rlc_fw; + adev->firmware.fw_size += + ALIGN(adev->gfx.rlc.rlcv_ucode_size_bytes, PAGE_SIZE); + } + } + +out: + if (err) { + dev_err(adev->dev, + "gfx11: Failed to load firmware \"%s\"\n", + fw_name); + release_firmware(adev->gfx.pfp_fw); + adev->gfx.pfp_fw = NULL; + release_firmware(adev->gfx.me_fw); + adev->gfx.me_fw = NULL; + release_firmware(adev->gfx.rlc_fw); + adev->gfx.rlc_fw = NULL; + release_firmware(adev->gfx.mec_fw); + adev->gfx.mec_fw = NULL; + } + + return err; +} + +static int gfx_v11_0_init_toc_microcode(struct amdgpu_device *adev) +{ + const struct psp_firmware_header_v1_0 *toc_hdr; + int err = 0; + char fw_name[40]; + char ucode_prefix[30]; + + amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix)); + + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_toc.bin", ucode_prefix); + err = request_firmware(&adev->psp.toc_fw, fw_name, adev->dev); + if (err) + goto out; + + err = amdgpu_ucode_validate(adev->psp.toc_fw); + if (err) + goto out; + + toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data; + adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version); + adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version); + adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes); + adev->psp.toc.start_addr = (uint8_t *)toc_hdr + + le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes); + return 0; +out: + dev_err(adev->dev, "Failed to load TOC microcode\n"); + release_firmware(adev->psp.toc_fw); + adev->psp.toc_fw = NULL; + return err; +} + +static u32 gfx_v11_0_get_csb_size(struct amdgpu_device *adev) +{ + u32 count = 0; + const struct cs_section_def *sect = NULL; + const struct cs_extent_def *ext = NULL; + + /* begin clear state */ + count += 2; + /* context control state */ + count += 3; + + for (sect = gfx11_cs_data; sect->section != NULL; ++sect) { + for (ext = sect->section; ext->extent != NULL; ++ext) { + if (sect->id == SECT_CONTEXT) + count += 2 + ext->reg_count; + else + return 0; + } + } + + /* set PA_SC_TILE_STEERING_OVERRIDE */ + count += 3; + /* end clear state */ + count += 2; + /* clear state */ + count += 2; + + return count; +} + +static void gfx_v11_0_get_csb_buffer(struct amdgpu_device *adev, + volatile u32 *buffer) +{ + u32 count = 0, i; + const struct cs_section_def *sect = NULL; + const struct cs_extent_def *ext = NULL; + int ctx_reg_offset; + + if (adev->gfx.rlc.cs_data == NULL) + return; + if (buffer == NULL) + return; + + buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); + buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); + + buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1)); + buffer[count++] = cpu_to_le32(0x80000000); + buffer[count++] = cpu_to_le32(0x80000000); + + for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) { + for (ext = sect->section; ext->extent != NULL; ++ext) { + if (sect->id == SECT_CONTEXT) { + buffer[count++] = + cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count)); + buffer[count++] = cpu_to_le32(ext->reg_index - + PACKET3_SET_CONTEXT_REG_START); + for (i = 0; i < ext->reg_count; i++) + buffer[count++] = cpu_to_le32(ext->extent[i]); + } else { + return; + } + } + } + + ctx_reg_offset = + SOC15_REG_OFFSET(GC, 0, regPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START; + buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1)); + buffer[count++] = cpu_to_le32(ctx_reg_offset); + buffer[count++] = cpu_to_le32(adev->gfx.config.pa_sc_tile_steering_override); + + buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); + buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE); + + buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0)); + buffer[count++] = cpu_to_le32(0); +} + +static void gfx_v11_0_rlc_fini(struct amdgpu_device *adev) +{ + /* clear state block */ + amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, + &adev->gfx.rlc.clear_state_gpu_addr, + (void **)&adev->gfx.rlc.cs_ptr); + + /* jump table block */ + amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, + &adev->gfx.rlc.cp_table_gpu_addr, + (void **)&adev->gfx.rlc.cp_table_ptr); +} + +static void gfx_v11_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev) +{ + struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl; + + reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl; + reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0); + reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG1); + reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG2); + reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG3); + reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_CNTL); + reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_INDEX); + reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, 0, regRLC_SPARE_INT_0); + adev->gfx.rlc.rlcg_reg_access_supported = true; +} + +static int gfx_v11_0_rlc_init(struct amdgpu_device *adev) +{ + const struct cs_section_def *cs_data; + int r; + + adev->gfx.rlc.cs_data = gfx11_cs_data; + + cs_data = adev->gfx.rlc.cs_data; + + if (cs_data) { + /* init clear state block */ + r = amdgpu_gfx_rlc_init_csb(adev); + if (r) + return r; + } + + /* init spm vmid with 0xf */ + if (adev->gfx.rlc.funcs->update_spm_vmid) + adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf); + + return 0; +} + +static void gfx_v11_0_mec_fini(struct amdgpu_device *adev) +{ + amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); + amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL); + amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_data_obj, NULL, NULL); +} + +static int gfx_v11_0_me_init(struct amdgpu_device *adev) +{ + int r; + + bitmap_zero(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES); + + amdgpu_gfx_graphics_queue_acquire(adev); + + r = gfx_v11_0_init_microcode(adev); + if (r) + DRM_ERROR("Failed to load gfx firmware!\n"); + + return r; +} + +static int gfx_v11_0_mec_init(struct amdgpu_device *adev) +{ + int r; + u32 *hpd; + size_t mec_hpd_size; + + bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); + + /* take ownership of the relevant compute queues */ + amdgpu_gfx_compute_queue_acquire(adev); + mec_hpd_size = adev->gfx.num_compute_rings * GFX11_MEC_HPD_SIZE; + + if (mec_hpd_size) { + r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_GTT, + &adev->gfx.mec.hpd_eop_obj, + &adev->gfx.mec.hpd_eop_gpu_addr, + (void **)&hpd); + if (r) { + dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); + gfx_v11_0_mec_fini(adev); + return r; + } + + memset(hpd, 0, mec_hpd_size); + + amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj); + amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); + } + + return 0; +} + +static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t wave, uint32_t address) +{ + WREG32_SOC15(GC, 0, regSQ_IND_INDEX, + (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | + (address << SQ_IND_INDEX__INDEX__SHIFT)); + return RREG32_SOC15(GC, 0, regSQ_IND_DATA); +} + +static void wave_read_regs(struct amdgpu_device *adev, uint32_t wave, + uint32_t thread, uint32_t regno, + uint32_t num, uint32_t *out) +{ + WREG32_SOC15(GC, 0, regSQ_IND_INDEX, + (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | + (regno << SQ_IND_INDEX__INDEX__SHIFT) | + (thread << SQ_IND_INDEX__WORKITEM_ID__SHIFT) | + (SQ_IND_INDEX__AUTO_INCR_MASK)); + while (num--) + *(out++) = RREG32_SOC15(GC, 0, regSQ_IND_DATA); +} + +static void gfx_v11_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields) +{ + /* in gfx11 the SIMD_ID is specified as part of the INSTANCE + * field when performing a select_se_sh so it should be + * zero here */ + WARN_ON(simd != 0); + + /* type 2 wave data */ + dst[(*no_fields)++] = 2; + dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_STATUS); + dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_LO); + dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_HI); + dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_LO); + dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_HI); + dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID1); + dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID2); + dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_GPR_ALLOC); + dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_LDS_ALLOC); + dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_TRAPSTS); + dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS); + dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS2); + dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_DBG1); + dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_M0); +} + +static void gfx_v11_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd, + uint32_t wave, uint32_t start, + uint32_t size, uint32_t *dst) +{ + WARN_ON(simd != 0); + + wave_read_regs( + adev, wave, 0, start + SQIND_WAVE_SGPRS_OFFSET, size, + dst); +} + +static void gfx_v11_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd, + uint32_t wave, uint32_t thread, + uint32_t start, uint32_t size, + uint32_t *dst) +{ + wave_read_regs( + adev, wave, thread, + start + SQIND_WAVE_VGPRS_OFFSET, size, dst); +} + +static void gfx_v11_0_select_me_pipe_q(struct amdgpu_device *adev, + u32 me, u32 pipe, u32 q, u32 vm) +{ + soc21_grbm_select(adev, me, pipe, q, vm); +} + +static const struct amdgpu_gfx_funcs gfx_v11_0_gfx_funcs = { + .get_gpu_clock_counter = &gfx_v11_0_get_gpu_clock_counter, + .select_se_sh = &gfx_v11_0_select_se_sh, + .read_wave_data = &gfx_v11_0_read_wave_data, + .read_wave_sgprs = &gfx_v11_0_read_wave_sgprs, + .read_wave_vgprs = &gfx_v11_0_read_wave_vgprs, + .select_me_pipe_q = &gfx_v11_0_select_me_pipe_q, + .init_spm_golden = &gfx_v11_0_init_spm_golden_registers, +}; + +static int gfx_v11_0_gpu_early_init(struct amdgpu_device *adev) +{ + adev->gfx.funcs = &gfx_v11_0_gfx_funcs; + + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(11, 0, 0): + adev->gfx.config.max_hw_contexts = 8; + adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; + adev->gfx.config.sc_prim_fifo_size_backend = 0x100; + adev->gfx.config.sc_hiz_tile_fifo_size = 0; + adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; + break; + default: + BUG(); + break; + } + + return 0; +} + +static int gfx_v11_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id, + int me, int pipe, int queue) +{ + int r; + struct amdgpu_ring *ring; + unsigned int irq_type; + + ring = &adev->gfx.gfx_ring[ring_id]; + + ring->me = me; + ring->pipe = pipe; + ring->queue = queue; + + ring->ring_obj = NULL; + ring->use_doorbell = true; + + if (!ring_id) + ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1; + else + ring->doorbell_index = adev->doorbell_index.gfx_ring1 << 1; + sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue); + + irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe; + r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type, + AMDGPU_RING_PRIO_DEFAULT, NULL); + if (r) + return r; + return 0; +} + +static int gfx_v11_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, + int mec, int pipe, int queue) +{ + int r; + unsigned irq_type; + struct amdgpu_ring *ring; + unsigned int hw_prio; + + ring = &adev->gfx.compute_ring[ring_id]; + + /* mec0 is me1 */ + ring->me = mec + 1; + ring->pipe = pipe; + ring->queue = queue; + + ring->ring_obj = NULL; + ring->use_doorbell = true; + ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1; + ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + + (ring_id * GFX11_MEC_HPD_SIZE); + sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue); + + irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec) + + ring->pipe; + hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ? + AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL; + /* type-2 packets are deprecated on MEC, use type-3 instead */ + r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type, + hw_prio, NULL); + if (r) + return r; + + return 0; +} + +static struct { + SOC21_FIRMWARE_ID id; + unsigned int offset; + unsigned int size; +} rlc_autoload_info[SOC21_FIRMWARE_ID_MAX]; + +static void gfx_v11_0_parse_rlc_toc(struct amdgpu_device *adev, void *rlc_toc) +{ + RLC_TABLE_OF_CONTENT *ucode = rlc_toc; + + while (ucode && (ucode->id > SOC21_FIRMWARE_ID_INVALID) && + (ucode->id < SOC21_FIRMWARE_ID_MAX)) { + rlc_autoload_info[ucode->id].id = ucode->id; + rlc_autoload_info[ucode->id].offset = ucode->offset * 4; + rlc_autoload_info[ucode->id].size = ucode->size * 4; + + ucode++; + }; +} + +static uint32_t gfx_v11_0_calc_toc_total_size(struct amdgpu_device *adev) +{ + uint32_t total_size = 0; + SOC21_FIRMWARE_ID id; + + gfx_v11_0_parse_rlc_toc(adev, adev->psp.toc.start_addr); + + for (id = SOC21_FIRMWARE_ID_RLC_G_UCODE; id < SOC21_FIRMWARE_ID_MAX; id++) + total_size += rlc_autoload_info[id].size; + + /* In case the offset in rlc toc ucode is aligned */ + if (total_size < rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].offset) + total_size = rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].offset + + rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].size; + + return total_size; +} + +static int gfx_v11_0_rlc_autoload_buffer_init(struct amdgpu_device *adev) +{ + int r; + uint32_t total_size; + + total_size = gfx_v11_0_calc_toc_total_size(adev); + + r = amdgpu_bo_create_reserved(adev, total_size, 64 * 1024, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->gfx.rlc.rlc_autoload_bo, + &adev->gfx.rlc.rlc_autoload_gpu_addr, + (void **)&adev->gfx.rlc.rlc_autoload_ptr); + + if (r) { + dev_err(adev->dev, "(%d) failed to create fw autoload bo\n", r); + return r; + } + + return 0; +} + +static void gfx_v11_0_rlc_backdoor_autoload_copy_ucode(struct amdgpu_device *adev, + SOC21_FIRMWARE_ID id, + const void *fw_data, + uint32_t fw_size, + uint32_t *fw_autoload_mask) +{ + uint32_t toc_offset; + uint32_t toc_fw_size; + char *ptr = adev->gfx.rlc.rlc_autoload_ptr; + + if (id <= SOC21_FIRMWARE_ID_INVALID || id >= SOC21_FIRMWARE_ID_MAX) + return; + + toc_offset = rlc_autoload_info[id].offset; + toc_fw_size = rlc_autoload_info[id].size; + + if (fw_size == 0) + fw_size = toc_fw_size; + + if (fw_size > toc_fw_size) + fw_size = toc_fw_size; + + memcpy(ptr + toc_offset, fw_data, fw_size); + + if (fw_size < toc_fw_size) + memset(ptr + toc_offset + fw_size, 0, toc_fw_size - fw_size); + + if ((id != SOC21_FIRMWARE_ID_RS64_PFP) && (id != SOC21_FIRMWARE_ID_RS64_ME)) + *(uint64_t *)fw_autoload_mask |= 1 << id; +} + +static void gfx_v11_0_rlc_backdoor_autoload_copy_toc_ucode(struct amdgpu_device *adev, + uint32_t *fw_autoload_mask) +{ + void *data; + uint32_t size; + uint64_t *toc_ptr; + + *(uint64_t *)fw_autoload_mask |= 0x1; + + DRM_DEBUG("rlc autoload enabled fw: 0x%llx\n", *(uint64_t *)fw_autoload_mask); + + data = adev->psp.toc.start_addr; + size = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_TOC].size; + + toc_ptr = (uint64_t *)data + size / 8 - 1; + *toc_ptr = *(uint64_t *)fw_autoload_mask; + + gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLC_TOC, + data, size, fw_autoload_mask); +} + +static void gfx_v11_0_rlc_backdoor_autoload_copy_gfx_ucode(struct amdgpu_device *adev, + uint32_t *fw_autoload_mask) +{ + const __le32 *fw_data; + uint32_t fw_size; + const struct gfx_firmware_header_v1_0 *cp_hdr; + const struct gfx_firmware_header_v2_0 *cpv2_hdr; + const struct rlc_firmware_header_v2_0 *rlc_hdr; + const struct rlc_firmware_header_v2_2 *rlcv22_hdr; + uint16_t version_major, version_minor; + + if (adev->gfx.rs64_enable) { + /* pfp ucode */ + cpv2_hdr = (const struct gfx_firmware_header_v2_0 *) + adev->gfx.pfp_fw->data; + /* instruction */ + fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + + le32_to_cpu(cpv2_hdr->ucode_offset_bytes)); + fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes); + gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP, + fw_data, fw_size, fw_autoload_mask); + /* data */ + fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + + le32_to_cpu(cpv2_hdr->data_offset_bytes)); + fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes); + gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP_P0_STACK, + fw_data, fw_size, fw_autoload_mask); + gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP_P1_STACK, + fw_data, fw_size, fw_autoload_mask); + /* me ucode */ + cpv2_hdr = (const struct gfx_firmware_header_v2_0 *) + adev->gfx.me_fw->data; + /* instruction */ + fw_data = (const __le32 *)(adev->gfx.me_fw->data + + le32_to_cpu(cpv2_hdr->ucode_offset_bytes)); + fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes); + gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME, + fw_data, fw_size, fw_autoload_mask); + /* data */ + fw_data = (const __le32 *)(adev->gfx.me_fw->data + + le32_to_cpu(cpv2_hdr->data_offset_bytes)); + fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes); + gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME_P0_STACK, + fw_data, fw_size, fw_autoload_mask); + gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME_P1_STACK, + fw_data, fw_size, fw_autoload_mask); + /* mec ucode */ + cpv2_hdr = (const struct gfx_firmware_header_v2_0 *) + adev->gfx.mec_fw->data; + /* instruction */ + fw_data = (const __le32 *) (adev->gfx.mec_fw->data + + le32_to_cpu(cpv2_hdr->ucode_offset_bytes)); + fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes); + gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC, + fw_data, fw_size, fw_autoload_mask); + /* data */ + fw_data = (const __le32 *) (adev->gfx.mec_fw->data + + le32_to_cpu(cpv2_hdr->data_offset_bytes)); + fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes); + gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P0_STACK, + fw_data, fw_size, fw_autoload_mask); + gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P1_STACK, + fw_data, fw_size, fw_autoload_mask); + gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P2_STACK, + fw_data, fw_size, fw_autoload_mask); + gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P3_STACK, + fw_data, fw_size, fw_autoload_mask); + } else { + /* pfp ucode */ + cp_hdr = (const struct gfx_firmware_header_v1_0 *) + adev->gfx.pfp_fw->data; + fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + + le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes)); + fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes); + gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_PFP, + fw_data, fw_size, fw_autoload_mask); + + /* me ucode */ + cp_hdr = (const struct gfx_firmware_header_v1_0 *) + adev->gfx.me_fw->data; + fw_data = (const __le32 *)(adev->gfx.me_fw->data + + le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes)); + fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes); + gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_ME, + fw_data, fw_size, fw_autoload_mask); + + /* mec ucode */ + cp_hdr = (const struct gfx_firmware_header_v1_0 *) + adev->gfx.mec_fw->data; + fw_data = (const __le32 *) (adev->gfx.mec_fw->data + + le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes)); + fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) - + cp_hdr->jt_size * 4; + gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_MEC, + fw_data, fw_size, fw_autoload_mask); + } + + /* rlc ucode */ + rlc_hdr = (const struct rlc_firmware_header_v2_0 *) + adev->gfx.rlc_fw->data; + fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + + le32_to_cpu(rlc_hdr->header.ucode_array_offset_bytes)); + fw_size = le32_to_cpu(rlc_hdr->header.ucode_size_bytes); + gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLC_G_UCODE, + fw_data, fw_size, fw_autoload_mask); + + version_major = le16_to_cpu(rlc_hdr->header.header_version_major); + version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor); + if (version_major == 2) { + if (version_minor >= 2) { + rlcv22_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data; + + fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + + le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_offset_bytes)); + fw_size = le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_size_bytes); + gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLX6_UCODE, + fw_data, fw_size, fw_autoload_mask); + + fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + + le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_offset_bytes)); + fw_size = le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_size_bytes); + gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLX6_DRAM_BOOT, + fw_data, fw_size, fw_autoload_mask); + } + } +} + +static void gfx_v11_0_rlc_backdoor_autoload_copy_sdma_ucode(struct amdgpu_device *adev, + uint32_t *fw_autoload_mask) +{ + const __le32 *fw_data; + uint32_t fw_size; + const struct sdma_firmware_header_v2_0 *sdma_hdr; + + sdma_hdr = (const struct sdma_firmware_header_v2_0 *) + adev->sdma.instance[0].fw->data; + fw_data = (const __le32 *) (adev->sdma.instance[0].fw->data + + le32_to_cpu(sdma_hdr->header.ucode_array_offset_bytes)); + fw_size = le32_to_cpu(sdma_hdr->ctx_ucode_size_bytes); + + gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, + SOC21_FIRMWARE_ID_SDMA_UCODE_TH0, fw_data, fw_size, fw_autoload_mask); + + fw_data = (const __le32 *) (adev->sdma.instance[0].fw->data + + le32_to_cpu(sdma_hdr->ctl_ucode_offset)); + fw_size = le32_to_cpu(sdma_hdr->ctl_ucode_size_bytes); + + gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, + SOC21_FIRMWARE_ID_SDMA_UCODE_TH1, fw_data, fw_size, fw_autoload_mask); +} + +static void gfx_v11_0_rlc_backdoor_autoload_copy_mes_ucode(struct amdgpu_device *adev, + uint32_t *fw_autoload_mask) +{ + const __le32 *fw_data; + unsigned fw_size; + const struct mes_firmware_header_v1_0 *mes_hdr; + int pipe, ucode_id, data_id; + + for (pipe = 0; pipe < 2; pipe++) { + if (pipe==0) { + ucode_id = SOC21_FIRMWARE_ID_RS64_MES_P0; + data_id = SOC21_FIRMWARE_ID_RS64_MES_P0_STACK; + } else { + ucode_id = SOC21_FIRMWARE_ID_RS64_MES_P1; + data_id = SOC21_FIRMWARE_ID_RS64_MES_P1_STACK; + } + + mes_hdr = (const struct mes_firmware_header_v1_0 *) + adev->mes.fw[pipe]->data; + + fw_data = (const __le32 *)(adev->mes.fw[pipe]->data + + le32_to_cpu(mes_hdr->mes_ucode_offset_bytes)); + fw_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes); + + gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, + ucode_id, fw_data, fw_size, fw_autoload_mask); + + fw_data = (const __le32 *)(adev->mes.fw[pipe]->data + + le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes)); + fw_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes); + + gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, + data_id, fw_data, fw_size, fw_autoload_mask); + } +} + +static int gfx_v11_0_rlc_backdoor_autoload_enable(struct amdgpu_device *adev) +{ + uint32_t rlc_g_offset, rlc_g_size; + uint64_t gpu_addr; + uint32_t autoload_fw_id[2]; + + memset(autoload_fw_id, 0, sizeof(uint32_t) * 2); + + /* RLC autoload sequence 2: copy ucode */ + gfx_v11_0_rlc_backdoor_autoload_copy_sdma_ucode(adev, autoload_fw_id); + gfx_v11_0_rlc_backdoor_autoload_copy_gfx_ucode(adev, autoload_fw_id); + gfx_v11_0_rlc_backdoor_autoload_copy_mes_ucode(adev, autoload_fw_id); + gfx_v11_0_rlc_backdoor_autoload_copy_toc_ucode(adev, autoload_fw_id); + + rlc_g_offset = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_G_UCODE].offset; + rlc_g_size = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_G_UCODE].size; + gpu_addr = adev->gfx.rlc.rlc_autoload_gpu_addr + rlc_g_offset; + + WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_ADDR_HI, upper_32_bits(gpu_addr)); + WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_ADDR_LO, lower_32_bits(gpu_addr)); + + WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_SIZE, rlc_g_size); + + /* RLC autoload sequence 3: load IMU fw */ + if (adev->gfx.imu.funcs->load_microcode) + adev->gfx.imu.funcs->load_microcode(adev); + /* RLC autoload sequence 4 init IMU fw */ + if (adev->gfx.imu.funcs->setup_imu) + adev->gfx.imu.funcs->setup_imu(adev); + if (adev->gfx.imu.funcs->start_imu) + adev->gfx.imu.funcs->start_imu(adev); + + /* RLC autoload sequence 5 disable gpa mode */ + gfx_v11_0_disable_gpa_mode(adev); + + return 0; +} + +static int gfx_v11_0_sw_init(void *handle) +{ + int i, j, k, r, ring_id = 0; + struct amdgpu_kiq *kiq; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + adev->gfxhub.funcs->init(adev); + + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(11, 0, 0): + adev->gfx.me.num_me = 1; + adev->gfx.me.num_pipe_per_me = 1; + adev->gfx.me.num_queue_per_pipe = 1; + adev->gfx.mec.num_mec = 2; + adev->gfx.mec.num_pipe_per_mec = 4; + adev->gfx.mec.num_queue_per_pipe = 4; + break; + default: + adev->gfx.me.num_me = 1; + adev->gfx.me.num_pipe_per_me = 1; + adev->gfx.me.num_queue_per_pipe = 1; + adev->gfx.mec.num_mec = 1; + adev->gfx.mec.num_pipe_per_mec = 4; + adev->gfx.mec.num_queue_per_pipe = 8; + break; + } + + /* EOP Event */ + r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, + GFX_11_0_0__SRCID__CP_EOP_INTERRUPT, + &adev->gfx.eop_irq); + if (r) + return r; + + /* Privileged reg */ + r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, + GFX_11_0_0__SRCID__CP_PRIV_REG_FAULT, + &adev->gfx.priv_reg_irq); + if (r) + return r; + + /* Privileged inst */ + r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, + GFX_11_0_0__SRCID__CP_PRIV_INSTR_FAULT, + &adev->gfx.priv_inst_irq); + if (r) + return r; + + adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE; + + gfx_v11_0_scratch_init(adev); + + if (adev->gfx.imu.funcs) { + if (adev->gfx.imu.funcs->init_microcode) { + r = adev->gfx.imu.funcs->init_microcode(adev); + if (r) + DRM_ERROR("Failed to load imu firmware!\n"); + } + } + + r = gfx_v11_0_me_init(adev); + if (r) + return r; + + r = gfx_v11_0_rlc_init(adev); + if (r) { + DRM_ERROR("Failed to init rlc BOs!\n"); + return r; + } + + r = gfx_v11_0_mec_init(adev); + if (r) { + DRM_ERROR("Failed to init MEC BOs!\n"); + return r; + } + + /* set up the gfx ring */ + for (i = 0; i < adev->gfx.me.num_me; i++) { + for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) { + for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) { + if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j)) + continue; + + r = gfx_v11_0_gfx_ring_init(adev, ring_id, + i, k, j); + if (r) + return r; + ring_id++; + } + } + } + + ring_id = 0; + /* set up the compute queues - allocate horizontally across pipes */ + for (i = 0; i < adev->gfx.mec.num_mec; ++i) { + for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { + for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { + if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, + j)) + continue; + + r = gfx_v11_0_compute_ring_init(adev, ring_id, + i, k, j); + if (r) + return r; + + ring_id++; + } + } + } + + if (!adev->enable_mes_kiq) { + r = amdgpu_gfx_kiq_init(adev, GFX11_MEC_HPD_SIZE); + if (r) { + DRM_ERROR("Failed to init KIQ BOs!\n"); + return r; + } + + kiq = &adev->gfx.kiq; + r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq); + if (r) + return r; + } + + r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v11_compute_mqd)); + if (r) + return r; + + /* allocate visible FB for rlc auto-loading fw */ + if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { + r = gfx_v11_0_init_toc_microcode(adev); + if (r) + dev_err(adev->dev, "Failed to load toc firmware!\n"); + r = gfx_v11_0_rlc_autoload_buffer_init(adev); + if (r) + return r; + } + + r = gfx_v11_0_gpu_early_init(adev); + if (r) + return r; + + return 0; +} + +static void gfx_v11_0_pfp_fini(struct amdgpu_device *adev) +{ + amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_obj, + &adev->gfx.pfp.pfp_fw_gpu_addr, + (void **)&adev->gfx.pfp.pfp_fw_ptr); + + amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_data_obj, + &adev->gfx.pfp.pfp_fw_data_gpu_addr, + (void **)&adev->gfx.pfp.pfp_fw_data_ptr); +} + +static void gfx_v11_0_me_fini(struct amdgpu_device *adev) +{ + amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_obj, + &adev->gfx.me.me_fw_gpu_addr, + (void **)&adev->gfx.me.me_fw_ptr); + + amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_data_obj, + &adev->gfx.me.me_fw_data_gpu_addr, + (void **)&adev->gfx.me.me_fw_data_ptr); +} + +static void gfx_v11_0_rlc_autoload_buffer_fini(struct amdgpu_device *adev) +{ + amdgpu_bo_free_kernel(&adev->gfx.rlc.rlc_autoload_bo, + &adev->gfx.rlc.rlc_autoload_gpu_addr, + (void **)&adev->gfx.rlc.rlc_autoload_ptr); +} + +static int gfx_v11_0_sw_fini(void *handle) +{ + int i; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + for (i = 0; i < adev->gfx.num_gfx_rings; i++) + amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); + for (i = 0; i < adev->gfx.num_compute_rings; i++) + amdgpu_ring_fini(&adev->gfx.compute_ring[i]); + + amdgpu_gfx_mqd_sw_fini(adev); + + if (!adev->enable_mes_kiq) { + amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring); + amdgpu_gfx_kiq_fini(adev); + } + + gfx_v11_0_pfp_fini(adev); + gfx_v11_0_me_fini(adev); + gfx_v11_0_rlc_fini(adev); + gfx_v11_0_mec_fini(adev); + + if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) + gfx_v11_0_rlc_autoload_buffer_fini(adev); + + gfx_v11_0_free_microcode(adev); + + return 0; +} + +static void gfx_v11_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, + u32 sh_num, u32 instance) +{ + u32 data; + + if (instance == 0xffffffff) + data = REG_SET_FIELD(0, GRBM_GFX_INDEX, + INSTANCE_BROADCAST_WRITES, 1); + else + data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, + instance); + + if (se_num == 0xffffffff) + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, + 1); + else + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); + + if (sh_num == 0xffffffff) + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_BROADCAST_WRITES, + 1); + else + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_INDEX, sh_num); + + WREG32_SOC15(GC, 0, regGRBM_GFX_INDEX, data); +} + +static u32 gfx_v11_0_get_rb_active_bitmap(struct amdgpu_device *adev) +{ + u32 data, mask; + + data = RREG32_SOC15(GC, 0, regCC_RB_BACKEND_DISABLE); + data |= RREG32_SOC15(GC, 0, regGC_USER_RB_BACKEND_DISABLE); + + data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK; + data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT; + + mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se / + adev->gfx.config.max_sh_per_se); + + return (~data) & mask; +} + +static void gfx_v11_0_setup_rb(struct amdgpu_device *adev) +{ + int i, j; + u32 data; + u32 active_rbs = 0; + u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se / + adev->gfx.config.max_sh_per_se; + + mutex_lock(&adev->grbm_idx_mutex); + for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { + for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { + gfx_v11_0_select_se_sh(adev, i, j, 0xffffffff); + data = gfx_v11_0_get_rb_active_bitmap(adev); + active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * + rb_bitmap_width_per_sh); + } + } + gfx_v11_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); + + adev->gfx.config.backend_enable_mask = active_rbs; + adev->gfx.config.num_rbs = hweight32(active_rbs); +} + +#define DEFAULT_SH_MEM_BASES (0x6000) +#define LDS_APP_BASE 0x1 +#define SCRATCH_APP_BASE 0x2 + +static void gfx_v11_0_init_compute_vmid(struct amdgpu_device *adev) +{ + int i; + uint32_t sh_mem_bases; + uint32_t data; + + /* + * Configure apertures: + * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB) + * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB) + * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB) + */ + sh_mem_bases = (LDS_APP_BASE << SH_MEM_BASES__SHARED_BASE__SHIFT) | + SCRATCH_APP_BASE; + + mutex_lock(&adev->srbm_mutex); + for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { + soc21_grbm_select(adev, 0, 0, 0, i); + /* CP and shaders */ + WREG32_SOC15(GC, 0, regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG); + WREG32_SOC15(GC, 0, regSH_MEM_BASES, sh_mem_bases); + + /* Enable trap for each kfd vmid. */ + data = RREG32(SOC15_REG_OFFSET(GC, 0, regSPI_GDBG_PER_VMID_CNTL)); + data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1); + } + soc21_grbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + + /* Initialize all compute VMIDs to have no GDS, GWS, or OA + acccess. These should be enabled by FW for target VMIDs. */ + for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { + WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_BASE, 2 * i, 0); + WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_SIZE, 2 * i, 0); + WREG32_SOC15_OFFSET(GC, 0, regGDS_GWS_VMID0, i, 0); + WREG32_SOC15_OFFSET(GC, 0, regGDS_OA_VMID0, i, 0); + } +} + +static void gfx_v11_0_init_gds_vmid(struct amdgpu_device *adev) +{ + int vmid; + + /* + * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA + * access. Compute VMIDs should be enabled by FW for target VMIDs, + * the driver can enable them for graphics. VMID0 should maintain + * access so that HWS firmware can save/restore entries. + */ + for (vmid = 1; vmid < 16; vmid++) { + WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_BASE, 2 * vmid, 0); + WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_SIZE, 2 * vmid, 0); + WREG32_SOC15_OFFSET(GC, 0, regGDS_GWS_VMID0, vmid, 0); + WREG32_SOC15_OFFSET(GC, 0, regGDS_OA_VMID0, vmid, 0); + } +} + +static void gfx_v11_0_tcp_harvest(struct amdgpu_device *adev) +{ + /* TODO: harvest feature to be added later. */ +} + +static void gfx_v11_0_get_tcc_info(struct amdgpu_device *adev) +{ + /* TCCs are global (not instanced). */ + uint32_t tcc_disable = RREG32_SOC15(GC, 0, regCGTS_TCC_DISABLE) | + RREG32_SOC15(GC, 0, regCGTS_USER_TCC_DISABLE); + + adev->gfx.config.tcc_disabled_mask = + REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, TCC_DISABLE) | + (REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, HI_TCC_DISABLE) << 16); +} + +static void gfx_v11_0_constants_init(struct amdgpu_device *adev) +{ + u32 tmp; + int i; + + WREG32_FIELD15_PREREG(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff); + + gfx_v11_0_setup_rb(adev); + gfx_v11_0_get_cu_info(adev, &adev->gfx.cu_info); + gfx_v11_0_get_tcc_info(adev); + adev->gfx.config.pa_sc_tile_steering_override = 0; + + /* XXX SH_MEM regs */ + /* where to put LDS, scratch, GPUVM in FSA64 space */ + mutex_lock(&adev->srbm_mutex); + for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) { + soc21_grbm_select(adev, 0, 0, 0, i); + /* CP and shaders */ + WREG32_SOC15(GC, 0, regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG); + if (i != 0) { + tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE, + (adev->gmc.private_aperture_start >> 48)); + tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE, + (adev->gmc.shared_aperture_start >> 48)); + WREG32_SOC15(GC, 0, regSH_MEM_BASES, tmp); + } + } + soc21_grbm_select(adev, 0, 0, 0, 0); + + mutex_unlock(&adev->srbm_mutex); + + gfx_v11_0_init_compute_vmid(adev); + gfx_v11_0_init_gds_vmid(adev); +} + +static void gfx_v11_0_enable_gui_idle_interrupt(struct amdgpu_device *adev, + bool enable) +{ + u32 tmp; + + if (amdgpu_sriov_vf(adev)) + return; + + tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL_RING0); + + tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, + enable ? 1 : 0); + tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, + enable ? 1 : 0); + tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, + enable ? 1 : 0); + tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, + enable ? 1 : 0); + + WREG32_SOC15(GC, 0, regCP_INT_CNTL_RING0, tmp); +} + +static int gfx_v11_0_init_csb(struct amdgpu_device *adev) +{ + adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr); + + WREG32_SOC15(GC, 0, regRLC_CSIB_ADDR_HI, + adev->gfx.rlc.clear_state_gpu_addr >> 32); + WREG32_SOC15(GC, 0, regRLC_CSIB_ADDR_LO, + adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc); + WREG32_SOC15(GC, 0, regRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size); + + return 0; +} + +void gfx_v11_0_rlc_stop(struct amdgpu_device *adev) +{ + u32 tmp = RREG32_SOC15(GC, 0, regRLC_CNTL); + + tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0); + WREG32_SOC15(GC, 0, regRLC_CNTL, tmp); +} + +static void gfx_v11_0_rlc_reset(struct amdgpu_device *adev) +{ + WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); + udelay(50); + WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0); + udelay(50); +} + +static void gfx_v11_0_rlc_smu_handshake_cntl(struct amdgpu_device *adev, + bool enable) +{ + uint32_t rlc_pg_cntl; + + rlc_pg_cntl = RREG32_SOC15(GC, 0, regRLC_PG_CNTL); + + if (!enable) { + /* RLC_PG_CNTL[23] = 0 (default) + * RLC will wait for handshake acks with SMU + * GFXOFF will be enabled + * RLC_PG_CNTL[23] = 1 + * RLC will not issue any message to SMU + * hence no handshake between SMU & RLC + * GFXOFF will be disabled + */ + rlc_pg_cntl |= RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK; + } else + rlc_pg_cntl &= ~RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK; + WREG32_SOC15(GC, 0, regRLC_PG_CNTL, rlc_pg_cntl); +} + +static void gfx_v11_0_rlc_start(struct amdgpu_device *adev) +{ + /* TODO: enable rlc & smu handshake until smu + * and gfxoff feature works as expected */ + if (!(amdgpu_pp_feature_mask & PP_GFXOFF_MASK)) + gfx_v11_0_rlc_smu_handshake_cntl(adev, false); + + WREG32_FIELD15_PREREG(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1); + udelay(50); +} + +static void gfx_v11_0_rlc_enable_srm(struct amdgpu_device *adev) +{ + uint32_t tmp; + + /* enable Save Restore Machine */ + tmp = RREG32(SOC15_REG_OFFSET(GC, 0, regRLC_SRM_CNTL)); + tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK; + tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK; + WREG32(SOC15_REG_OFFSET(GC, 0, regRLC_SRM_CNTL), tmp); +} + +static void gfx_v11_0_load_rlcg_microcode(struct amdgpu_device *adev) +{ + const struct rlc_firmware_header_v2_0 *hdr; + const __le32 *fw_data; + unsigned i, fw_size; + + hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; + fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; + + WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR, + RLCG_UCODE_LOADING_START_ADDRESS); + + for (i = 0; i < fw_size; i++) + WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_DATA, + le32_to_cpup(fw_data++)); + + WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version); +} + +static void gfx_v11_0_load_rlc_iram_dram_microcode(struct amdgpu_device *adev) +{ + const struct rlc_firmware_header_v2_2 *hdr; + const __le32 *fw_data; + unsigned i, fw_size; + u32 tmp; + + hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data; + + fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + + le32_to_cpu(hdr->rlc_iram_ucode_offset_bytes)); + fw_size = le32_to_cpu(hdr->rlc_iram_ucode_size_bytes) / 4; + + WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, 0); + + for (i = 0; i < fw_size; i++) { + if ((amdgpu_emu_mode == 1) && (i % 100 == 99)) + msleep(1); + WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_DATA, + le32_to_cpup(fw_data++)); + } + + WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version); + + fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + + le32_to_cpu(hdr->rlc_dram_ucode_offset_bytes)); + fw_size = le32_to_cpu(hdr->rlc_dram_ucode_size_bytes) / 4; + + WREG32_SOC15(GC, 0, regRLC_LX6_DRAM_ADDR, 0); + for (i = 0; i < fw_size; i++) { + if ((amdgpu_emu_mode == 1) && (i % 100 == 99)) + msleep(1); + WREG32_SOC15(GC, 0, regRLC_LX6_DRAM_DATA, + le32_to_cpup(fw_data++)); + } + + WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version); + + tmp = RREG32_SOC15(GC, 0, regRLC_LX6_CNTL); + tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, PDEBUG_ENABLE, 1); + tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, BRESET, 0); + WREG32_SOC15(GC, 0, regRLC_LX6_CNTL, tmp); +} + +static void gfx_v11_0_load_rlcp_rlcv_microcode(struct amdgpu_device *adev) +{ + const struct rlc_firmware_header_v2_3 *hdr; + const __le32 *fw_data; + unsigned i, fw_size; + u32 tmp; + + hdr = (const struct rlc_firmware_header_v2_3 *)adev->gfx.rlc_fw->data; + + fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + + le32_to_cpu(hdr->rlcp_ucode_offset_bytes)); + fw_size = le32_to_cpu(hdr->rlcp_ucode_size_bytes) / 4; + + WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_ADDR, 0); + + for (i = 0; i < fw_size; i++) { + if ((amdgpu_emu_mode == 1) && (i % 100 == 99)) + msleep(1); + WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_DATA, + le32_to_cpup(fw_data++)); + } + + WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_ADDR, adev->gfx.rlc_fw_version); + + tmp = RREG32_SOC15(GC, 0, regRLC_GPM_THREAD_ENABLE); + tmp = REG_SET_FIELD(tmp, RLC_GPM_THREAD_ENABLE, THREAD1_ENABLE, 1); + WREG32_SOC15(GC, 0, regRLC_GPM_THREAD_ENABLE, tmp); + + fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + + le32_to_cpu(hdr->rlcv_ucode_offset_bytes)); + fw_size = le32_to_cpu(hdr->rlcv_ucode_size_bytes) / 4; + + WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_ADDR, 0); + + for (i = 0; i < fw_size; i++) { + if ((amdgpu_emu_mode == 1) && (i % 100 == 99)) + msleep(1); + WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_DATA, + le32_to_cpup(fw_data++)); + } + + WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_ADDR, adev->gfx.rlc_fw_version); + + tmp = RREG32_SOC15(GC, 0, regRLC_GPU_IOV_F32_CNTL); + tmp = REG_SET_FIELD(tmp, RLC_GPU_IOV_F32_CNTL, ENABLE, 1); + WREG32_SOC15(GC, 0, regRLC_GPU_IOV_F32_CNTL, tmp); +} + +static int gfx_v11_0_rlc_load_microcode(struct amdgpu_device *adev) +{ + const struct rlc_firmware_header_v2_0 *hdr; + uint16_t version_major; + uint16_t version_minor; + + if (!adev->gfx.rlc_fw) + return -EINVAL; + + hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; + amdgpu_ucode_print_rlc_hdr(&hdr->header); + + version_major = le16_to_cpu(hdr->header.header_version_major); + version_minor = le16_to_cpu(hdr->header.header_version_minor); + + if (version_major == 2) { + gfx_v11_0_load_rlcg_microcode(adev); + if (amdgpu_dpm == 1) { + if (version_minor >= 2) + gfx_v11_0_load_rlc_iram_dram_microcode(adev); + if (version_minor == 3) + gfx_v11_0_load_rlcp_rlcv_microcode(adev); + } + + return 0; + } + + return -EINVAL; +} + +static int gfx_v11_0_rlc_resume(struct amdgpu_device *adev) +{ + int r; + + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + gfx_v11_0_init_csb(adev); + + if (!amdgpu_sriov_vf(adev)) /* enable RLC SRM */ + gfx_v11_0_rlc_enable_srm(adev); + } else { + if (amdgpu_sriov_vf(adev)) { + gfx_v11_0_init_csb(adev); + return 0; + } + + adev->gfx.rlc.funcs->stop(adev); + + /* disable CG */ + WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, 0); + + /* disable PG */ + WREG32_SOC15(GC, 0, regRLC_PG_CNTL, 0); + + if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { + /* legacy rlc firmware loading */ + r = gfx_v11_0_rlc_load_microcode(adev); + if (r) + return r; + } + + gfx_v11_0_init_csb(adev); + + adev->gfx.rlc.funcs->start(adev); + } + return 0; +} + +static int gfx_v11_0_config_me_cache(struct amdgpu_device *adev, uint64_t addr) +{ + uint32_t usec_timeout = 50000; /* wait for 50ms */ + uint32_t tmp; + int i; + + /* Trigger an invalidation of the L1 instruction caches */ + tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); + tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, INVALIDATE_CACHE, 1); + WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp); + + /* Wait for invalidation complete */ + for (i = 0; i < usec_timeout; i++) { + tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); + if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL, + INVALIDATE_CACHE_COMPLETE)) + break; + udelay(1); + } + + if (i >= usec_timeout) { + dev_err(adev->dev, "failed to invalidate instruction cache\n"); + return -EINVAL; + } + + if (amdgpu_emu_mode == 1) + adev->hdp.funcs->flush_hdp(adev, NULL); + + tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL); + tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0); + tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0); + tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0); + tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, ADDRESS_CLAMP, 1); + WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp); + + /* Program me ucode address into intruction cache address register */ + WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO, + lower_32_bits(addr) & 0xFFFFF000); + WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI, + upper_32_bits(addr)); + + return 0; +} + +static int gfx_v11_0_config_pfp_cache(struct amdgpu_device *adev, uint64_t addr) +{ + uint32_t usec_timeout = 50000; /* wait for 50ms */ + uint32_t tmp; + int i; + + /* Trigger an invalidation of the L1 instruction caches */ + tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); + tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, INVALIDATE_CACHE, 1); + WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp); + + /* Wait for invalidation complete */ + for (i = 0; i < usec_timeout; i++) { + tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); + if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL, + INVALIDATE_CACHE_COMPLETE)) + break; + udelay(1); + } + + if (i >= usec_timeout) { + dev_err(adev->dev, "failed to invalidate instruction cache\n"); + return -EINVAL; + } + + if (amdgpu_emu_mode == 1) + adev->hdp.funcs->flush_hdp(adev, NULL); + + tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL); + tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0); + tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0); + tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0); + tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, ADDRESS_CLAMP, 1); + WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp); + + /* Program pfp ucode address into intruction cache address register */ + WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO, + lower_32_bits(addr) & 0xFFFFF000); + WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI, + upper_32_bits(addr)); + + return 0; +} + +static int gfx_v11_0_config_mec_cache(struct amdgpu_device *adev, uint64_t addr) +{ + uint32_t usec_timeout = 50000; /* wait for 50ms */ + uint32_t tmp; + int i; + + /* Trigger an invalidation of the L1 instruction caches */ + tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); + tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1); + + WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp); + + /* Wait for invalidation complete */ + for (i = 0; i < usec_timeout; i++) { + tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); + if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL, + INVALIDATE_CACHE_COMPLETE)) + break; + udelay(1); + } + + if (i >= usec_timeout) { + dev_err(adev->dev, "failed to invalidate instruction cache\n"); + return -EINVAL; + } + + if (amdgpu_emu_mode == 1) + adev->hdp.funcs->flush_hdp(adev, NULL); + + tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL); + tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); + tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0); + tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ADDRESS_CLAMP, 1); + WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp); + + /* Program mec1 ucode address into intruction cache address register */ + WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO, + lower_32_bits(addr) & 0xFFFFF000); + WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI, + upper_32_bits(addr)); + + return 0; +} + +static int gfx_v11_0_config_pfp_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2) +{ + uint32_t usec_timeout = 50000; /* wait for 50ms */ + uint32_t tmp; + unsigned i, pipe_id; + const struct gfx_firmware_header_v2_0 *pfp_hdr; + + pfp_hdr = (const struct gfx_firmware_header_v2_0 *) + adev->gfx.pfp_fw->data; + + WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO, + lower_32_bits(addr)); + WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI, + upper_32_bits(addr)); + + tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL); + tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0); + tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0); + tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0); + WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp); + + /* + * Programming any of the CP_PFP_IC_BASE registers + * forces invalidation of the ME L1 I$. Wait for the + * invalidation complete + */ + for (i = 0; i < usec_timeout; i++) { + tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); + if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL, + INVALIDATE_CACHE_COMPLETE)) + break; + udelay(1); + } + + if (i >= usec_timeout) { + dev_err(adev->dev, "failed to invalidate instruction cache\n"); + return -EINVAL; + } + + /* Prime the L1 instruction caches */ + tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); + tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, PRIME_ICACHE, 1); + WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp); + /* Waiting for cache primed*/ + for (i = 0; i < usec_timeout; i++) { + tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); + if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL, + ICACHE_PRIMED)) + break; + udelay(1); + } + + if (i >= usec_timeout) { + dev_err(adev->dev, "failed to prime instruction cache\n"); + return -EINVAL; + } + + mutex_lock(&adev->srbm_mutex); + for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) { + soc21_grbm_select(adev, 0, pipe_id, 0, 0); + WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START, + (pfp_hdr->ucode_start_addr_hi << 30) | + (pfp_hdr->ucode_start_addr_lo >> 2)); + WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI, + pfp_hdr->ucode_start_addr_hi >> 2); + + /* + * Program CP_ME_CNTL to reset given PIPE to take + * effect of CP_PFP_PRGRM_CNTR_START. + */ + tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); + if (pipe_id == 0) + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, + PFP_PIPE0_RESET, 1); + else + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, + PFP_PIPE1_RESET, 1); + WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); + + /* Clear pfp pipe0 reset bit. */ + if (pipe_id == 0) + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, + PFP_PIPE0_RESET, 0); + else + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, + PFP_PIPE1_RESET, 0); + WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); + + WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_LO, + lower_32_bits(addr2)); + WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_HI, + upper_32_bits(addr2)); + } + soc21_grbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + + tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL); + tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0); + tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0); + WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp); + + /* Invalidate the data caches */ + tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); + tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1); + WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp); + + for (i = 0; i < usec_timeout; i++) { + tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); + if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, + INVALIDATE_DCACHE_COMPLETE)) + break; + udelay(1); + } + + if (i >= usec_timeout) { + dev_err(adev->dev, "failed to invalidate RS64 data cache\n"); + return -EINVAL; + } + + return 0; +} + +static int gfx_v11_0_config_me_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2) +{ + uint32_t usec_timeout = 50000; /* wait for 50ms */ + uint32_t tmp; + unsigned i, pipe_id; + const struct gfx_firmware_header_v2_0 *me_hdr; + + me_hdr = (const struct gfx_firmware_header_v2_0 *) + adev->gfx.me_fw->data; + + WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO, + lower_32_bits(addr)); + WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI, + upper_32_bits(addr)); + + tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL); + tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0); + tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0); + tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0); + WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp); + + /* + * Programming any of the CP_ME_IC_BASE registers + * forces invalidation of the ME L1 I$. Wait for the + * invalidation complete + */ + for (i = 0; i < usec_timeout; i++) { + tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); + if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL, + INVALIDATE_CACHE_COMPLETE)) + break; + udelay(1); + } + + if (i >= usec_timeout) { + dev_err(adev->dev, "failed to invalidate instruction cache\n"); + return -EINVAL; + } + + /* Prime the instruction caches */ + tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); + tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, PRIME_ICACHE, 1); + WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp); + + /* Waiting for instruction cache primed*/ + for (i = 0; i < usec_timeout; i++) { + tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); + if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL, + ICACHE_PRIMED)) + break; + udelay(1); + } + + if (i >= usec_timeout) { + dev_err(adev->dev, "failed to prime instruction cache\n"); + return -EINVAL; + } + + mutex_lock(&adev->srbm_mutex); + for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) { + soc21_grbm_select(adev, 0, pipe_id, 0, 0); + WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START, + (me_hdr->ucode_start_addr_hi << 30) | + (me_hdr->ucode_start_addr_lo >> 2) ); + WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI, + me_hdr->ucode_start_addr_hi>>2); + + /* + * Program CP_ME_CNTL to reset given PIPE to take + * effect of CP_PFP_PRGRM_CNTR_START. + */ + tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); + if (pipe_id == 0) + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, + ME_PIPE0_RESET, 1); + else + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, + ME_PIPE1_RESET, 1); + WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); + + /* Clear pfp pipe0 reset bit. */ + if (pipe_id == 0) + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, + ME_PIPE0_RESET, 0); + else + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, + ME_PIPE1_RESET, 0); + WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); + + WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_LO, + lower_32_bits(addr2)); + WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_HI, + upper_32_bits(addr2)); + } + soc21_grbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + + tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL); + tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0); + tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0); + WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp); + + /* Invalidate the data caches */ + tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); + tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1); + WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp); + + for (i = 0; i < usec_timeout; i++) { + tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); + if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, + INVALIDATE_DCACHE_COMPLETE)) + break; + udelay(1); + } + + if (i >= usec_timeout) { + dev_err(adev->dev, "failed to invalidate RS64 data cache\n"); + return -EINVAL; + } + + return 0; +} + +static int gfx_v11_0_config_mec_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2) +{ + uint32_t usec_timeout = 50000; /* wait for 50ms */ + uint32_t tmp; + unsigned i; + const struct gfx_firmware_header_v2_0 *mec_hdr; + + mec_hdr = (const struct gfx_firmware_header_v2_0 *) + adev->gfx.mec_fw->data; + + tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL); + tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0); + tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0); + tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); + WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp); + + tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL); + tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, VMID, 0); + tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, CACHE_POLICY, 0); + WREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL, tmp); + + mutex_lock(&adev->srbm_mutex); + for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) { + soc21_grbm_select(adev, 1, i, 0, 0); + + WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_LO, addr2); + WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_HI, + upper_32_bits(addr2)); + + WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START, + mec_hdr->ucode_start_addr_lo >> 2 | + mec_hdr->ucode_start_addr_hi << 30); + WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI, + mec_hdr->ucode_start_addr_hi >> 2); + + WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO, addr); + WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI, + upper_32_bits(addr)); + } + mutex_unlock(&adev->srbm_mutex); + soc21_grbm_select(adev, 0, 0, 0, 0); + + /* Trigger an invalidation of the L1 instruction caches */ + tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL); + tmp = REG_SET_FIELD(tmp, CP_MEC_DC_OP_CNTL, INVALIDATE_DCACHE, 1); + WREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL, tmp); + + /* Wait for invalidation complete */ + for (i = 0; i < usec_timeout; i++) { + tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL); + if (1 == REG_GET_FIELD(tmp, CP_MEC_DC_OP_CNTL, + INVALIDATE_DCACHE_COMPLETE)) + break; + udelay(1); + } + + if (i >= usec_timeout) { + dev_err(adev->dev, "failed to invalidate instruction cache\n"); + return -EINVAL; + } + + /* Trigger an invalidation of the L1 instruction caches */ + tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); + tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1); + WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp); + + /* Wait for invalidation complete */ + for (i = 0; i < usec_timeout; i++) { + tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); + if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL, + INVALIDATE_CACHE_COMPLETE)) + break; + udelay(1); + } + + if (i >= usec_timeout) { + dev_err(adev->dev, "failed to invalidate instruction cache\n"); + return -EINVAL; + } + + return 0; +} + +static void gfx_v11_0_config_gfx_rs64(struct amdgpu_device *adev) +{ + const struct gfx_firmware_header_v2_0 *pfp_hdr; + const struct gfx_firmware_header_v2_0 *me_hdr; + const struct gfx_firmware_header_v2_0 *mec_hdr; + uint32_t pipe_id, tmp; + + mec_hdr = (const struct gfx_firmware_header_v2_0 *) + adev->gfx.mec_fw->data; + me_hdr = (const struct gfx_firmware_header_v2_0 *) + adev->gfx.me_fw->data; + pfp_hdr = (const struct gfx_firmware_header_v2_0 *) + adev->gfx.pfp_fw->data; + + /* config pfp program start addr */ + for (pipe_id = 0; pipe_id < 2; pipe_id++) { + soc21_grbm_select(adev, 0, pipe_id, 0, 0); + WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START, + (pfp_hdr->ucode_start_addr_hi << 30) | + (pfp_hdr->ucode_start_addr_lo >> 2)); + WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI, + pfp_hdr->ucode_start_addr_hi >> 2); + } + soc21_grbm_select(adev, 0, 0, 0, 0); + + /* reset pfp pipe */ + tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, 1); + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, 1); + WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); + + /* clear pfp pipe reset */ + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, 0); + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, 0); + WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); + + /* config me program start addr */ + for (pipe_id = 0; pipe_id < 2; pipe_id++) { + soc21_grbm_select(adev, 0, pipe_id, 0, 0); + WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START, + (me_hdr->ucode_start_addr_hi << 30) | + (me_hdr->ucode_start_addr_lo >> 2) ); + WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI, + me_hdr->ucode_start_addr_hi>>2); + } + soc21_grbm_select(adev, 0, 0, 0, 0); + + /* reset me pipe */ + tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, 1); + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, 1); + WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); + + /* clear me pipe reset */ + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, 0); + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, 0); + WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); + + /* config mec program start addr */ + for (pipe_id = 0; pipe_id < 4; pipe_id++) { + soc21_grbm_select(adev, 1, pipe_id, 0, 0); + WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START, + mec_hdr->ucode_start_addr_lo >> 2 | + mec_hdr->ucode_start_addr_hi << 30); + WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI, + mec_hdr->ucode_start_addr_hi >> 2); + } + soc21_grbm_select(adev, 0, 0, 0, 0); +} + +static int gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev) +{ + uint32_t cp_status; + uint32_t bootload_status; + int i, r; + uint64_t addr, addr2; + + for (i = 0; i < adev->usec_timeout; i++) { + cp_status = RREG32_SOC15(GC, 0, regCP_STAT); + bootload_status = RREG32_SOC15(GC, 0, regRLC_RLCS_BOOTLOAD_STATUS); + if ((cp_status == 0) && + (REG_GET_FIELD(bootload_status, + RLC_RLCS_BOOTLOAD_STATUS, BOOTLOAD_COMPLETE) == 1)) { + break; + } + udelay(1); + } + + if (i >= adev->usec_timeout) { + dev_err(adev->dev, "rlc autoload: gc ucode autoload timeout\n"); + return -ETIMEDOUT; + } + + if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { + if (adev->gfx.rs64_enable) { + addr = adev->gfx.rlc.rlc_autoload_gpu_addr + + rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_ME].offset; + addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr + + rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_ME_P0_STACK].offset; + r = gfx_v11_0_config_me_cache_rs64(adev, addr, addr2); + if (r) + return r; + addr = adev->gfx.rlc.rlc_autoload_gpu_addr + + rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_PFP].offset; + addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr + + rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_PFP_P0_STACK].offset; + r = gfx_v11_0_config_pfp_cache_rs64(adev, addr, addr2); + if (r) + return r; + addr = adev->gfx.rlc.rlc_autoload_gpu_addr + + rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_MEC].offset; + addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr + + rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_MEC_P0_STACK].offset; + r = gfx_v11_0_config_mec_cache_rs64(adev, addr, addr2); + if (r) + return r; + } else { + addr = adev->gfx.rlc.rlc_autoload_gpu_addr + + rlc_autoload_info[SOC21_FIRMWARE_ID_CP_ME].offset; + r = gfx_v11_0_config_me_cache(adev, addr); + if (r) + return r; + addr = adev->gfx.rlc.rlc_autoload_gpu_addr + + rlc_autoload_info[SOC21_FIRMWARE_ID_CP_PFP].offset; + r = gfx_v11_0_config_pfp_cache(adev, addr); + if (r) + return r; + addr = adev->gfx.rlc.rlc_autoload_gpu_addr + + rlc_autoload_info[SOC21_FIRMWARE_ID_CP_MEC].offset; + r = gfx_v11_0_config_mec_cache(adev, addr); + if (r) + return r; + } + } + + return 0; +} + +static int gfx_v11_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) +{ + int i; + u32 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); + + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1); + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1); + WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); + + for (i = 0; i < adev->usec_timeout; i++) { + if (RREG32_SOC15(GC, 0, regCP_STAT) == 0) + break; + udelay(1); + } + + if (i >= adev->usec_timeout) + DRM_ERROR("failed to %s cp gfx\n", enable ? "unhalt" : "halt"); + + return 0; +} + +static int gfx_v11_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev) +{ + int r; + const struct gfx_firmware_header_v1_0 *pfp_hdr; + const __le32 *fw_data; + unsigned i, fw_size; + + pfp_hdr = (const struct gfx_firmware_header_v1_0 *) + adev->gfx.pfp_fw->data; + + amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header); + + fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + + le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes)); + fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes); + + r = amdgpu_bo_create_reserved(adev, pfp_hdr->header.ucode_size_bytes, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, + &adev->gfx.pfp.pfp_fw_obj, + &adev->gfx.pfp.pfp_fw_gpu_addr, + (void **)&adev->gfx.pfp.pfp_fw_ptr); + if (r) { + dev_err(adev->dev, "(%d) failed to create pfp fw bo\n", r); + gfx_v11_0_pfp_fini(adev); + return r; + } + + memcpy(adev->gfx.pfp.pfp_fw_ptr, fw_data, fw_size); + + amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_obj); + amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_obj); + + gfx_v11_0_config_pfp_cache(adev, adev->gfx.pfp.pfp_fw_gpu_addr); + + WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_ADDR, 0); + + for (i = 0; i < pfp_hdr->jt_size; i++) + WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_DATA, + le32_to_cpup(fw_data + pfp_hdr->jt_offset + i)); + + WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version); + + return 0; +} + +static int gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev) +{ + int r; + const struct gfx_firmware_header_v2_0 *pfp_hdr; + const __le32 *fw_ucode, *fw_data; + unsigned i, pipe_id, fw_ucode_size, fw_data_size; + uint32_t tmp; + uint32_t usec_timeout = 50000; /* wait for 50ms */ + + pfp_hdr = (const struct gfx_firmware_header_v2_0 *) + adev->gfx.pfp_fw->data; + + amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header); + + /* instruction */ + fw_ucode = (const __le32 *)(adev->gfx.pfp_fw->data + + le32_to_cpu(pfp_hdr->ucode_offset_bytes)); + fw_ucode_size = le32_to_cpu(pfp_hdr->ucode_size_bytes); + /* data */ + fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + + le32_to_cpu(pfp_hdr->data_offset_bytes)); + fw_data_size = le32_to_cpu(pfp_hdr->data_size_bytes); + + /* 64kb align */ + r = amdgpu_bo_create_reserved(adev, fw_ucode_size, + 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, + &adev->gfx.pfp.pfp_fw_obj, + &adev->gfx.pfp.pfp_fw_gpu_addr, + (void **)&adev->gfx.pfp.pfp_fw_ptr); + if (r) { + dev_err(adev->dev, "(%d) failed to create pfp ucode fw bo\n", r); + gfx_v11_0_pfp_fini(adev); + return r; + } + + r = amdgpu_bo_create_reserved(adev, fw_data_size, + 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, + &adev->gfx.pfp.pfp_fw_data_obj, + &adev->gfx.pfp.pfp_fw_data_gpu_addr, + (void **)&adev->gfx.pfp.pfp_fw_data_ptr); + if (r) { + dev_err(adev->dev, "(%d) failed to create pfp data fw bo\n", r); + gfx_v11_0_pfp_fini(adev); + return r; + } + + memcpy(adev->gfx.pfp.pfp_fw_ptr, fw_ucode, fw_ucode_size); + memcpy(adev->gfx.pfp.pfp_fw_data_ptr, fw_data, fw_data_size); + + amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_obj); + amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_data_obj); + amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_obj); + amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_data_obj); + + if (amdgpu_emu_mode == 1) + adev->hdp.funcs->flush_hdp(adev, NULL); + + WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO, + lower_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr)); + WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI, + upper_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr)); + + tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL); + tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0); + tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0); + tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0); + WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp); + + /* + * Programming any of the CP_PFP_IC_BASE registers + * forces invalidation of the ME L1 I$. Wait for the + * invalidation complete + */ + for (i = 0; i < usec_timeout; i++) { + tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); + if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL, + INVALIDATE_CACHE_COMPLETE)) + break; + udelay(1); + } + + if (i >= usec_timeout) { + dev_err(adev->dev, "failed to invalidate instruction cache\n"); + return -EINVAL; + } + + /* Prime the L1 instruction caches */ + tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); + tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, PRIME_ICACHE, 1); + WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp); + /* Waiting for cache primed*/ + for (i = 0; i < usec_timeout; i++) { + tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); + if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL, + ICACHE_PRIMED)) + break; + udelay(1); + } + + if (i >= usec_timeout) { + dev_err(adev->dev, "failed to prime instruction cache\n"); + return -EINVAL; + } + + mutex_lock(&adev->srbm_mutex); + for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) { + soc21_grbm_select(adev, 0, pipe_id, 0, 0); + WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START, + (pfp_hdr->ucode_start_addr_hi << 30) | + (pfp_hdr->ucode_start_addr_lo >> 2) ); + WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI, + pfp_hdr->ucode_start_addr_hi>>2); + + /* + * Program CP_ME_CNTL to reset given PIPE to take + * effect of CP_PFP_PRGRM_CNTR_START. + */ + tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); + if (pipe_id == 0) + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, + PFP_PIPE0_RESET, 1); + else + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, + PFP_PIPE1_RESET, 1); + WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); + + /* Clear pfp pipe0 reset bit. */ + if (pipe_id == 0) + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, + PFP_PIPE0_RESET, 0); + else + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, + PFP_PIPE1_RESET, 0); + WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); + + WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_LO, + lower_32_bits(adev->gfx.pfp.pfp_fw_data_gpu_addr)); + WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_HI, + upper_32_bits(adev->gfx.pfp.pfp_fw_data_gpu_addr)); + } + soc21_grbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + + tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL); + tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0); + tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0); + WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp); + + /* Invalidate the data caches */ + tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); + tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1); + WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp); + + for (i = 0; i < usec_timeout; i++) { + tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); + if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, + INVALIDATE_DCACHE_COMPLETE)) + break; + udelay(1); + } + + if (i >= usec_timeout) { + dev_err(adev->dev, "failed to invalidate RS64 data cache\n"); + return -EINVAL; + } + + return 0; +} + +static int gfx_v11_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev) +{ + int r; + const struct gfx_firmware_header_v1_0 *me_hdr; + const __le32 *fw_data; + unsigned i, fw_size; + + me_hdr = (const struct gfx_firmware_header_v1_0 *) + adev->gfx.me_fw->data; + + amdgpu_ucode_print_gfx_hdr(&me_hdr->header); + + fw_data = (const __le32 *)(adev->gfx.me_fw->data + + le32_to_cpu(me_hdr->header.ucode_array_offset_bytes)); + fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes); + + r = amdgpu_bo_create_reserved(adev, me_hdr->header.ucode_size_bytes, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, + &adev->gfx.me.me_fw_obj, + &adev->gfx.me.me_fw_gpu_addr, + (void **)&adev->gfx.me.me_fw_ptr); + if (r) { + dev_err(adev->dev, "(%d) failed to create me fw bo\n", r); + gfx_v11_0_me_fini(adev); + return r; + } + + memcpy(adev->gfx.me.me_fw_ptr, fw_data, fw_size); + + amdgpu_bo_kunmap(adev->gfx.me.me_fw_obj); + amdgpu_bo_unreserve(adev->gfx.me.me_fw_obj); + + gfx_v11_0_config_me_cache(adev, adev->gfx.me.me_fw_gpu_addr); + + WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_ADDR, 0); + + for (i = 0; i < me_hdr->jt_size; i++) + WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_DATA, + le32_to_cpup(fw_data + me_hdr->jt_offset + i)); + + WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_ADDR, adev->gfx.me_fw_version); + + return 0; +} + +static int gfx_v11_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev) +{ + int r; + const struct gfx_firmware_header_v2_0 *me_hdr; + const __le32 *fw_ucode, *fw_data; + unsigned i, pipe_id, fw_ucode_size, fw_data_size; + uint32_t tmp; + uint32_t usec_timeout = 50000; /* wait for 50ms */ + + me_hdr = (const struct gfx_firmware_header_v2_0 *) + adev->gfx.me_fw->data; + + amdgpu_ucode_print_gfx_hdr(&me_hdr->header); + + /* instruction */ + fw_ucode = (const __le32 *)(adev->gfx.me_fw->data + + le32_to_cpu(me_hdr->ucode_offset_bytes)); + fw_ucode_size = le32_to_cpu(me_hdr->ucode_size_bytes); + /* data */ + fw_data = (const __le32 *)(adev->gfx.me_fw->data + + le32_to_cpu(me_hdr->data_offset_bytes)); + fw_data_size = le32_to_cpu(me_hdr->data_size_bytes); + + /* 64kb align*/ + r = amdgpu_bo_create_reserved(adev, fw_ucode_size, + 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, + &adev->gfx.me.me_fw_obj, + &adev->gfx.me.me_fw_gpu_addr, + (void **)&adev->gfx.me.me_fw_ptr); + if (r) { + dev_err(adev->dev, "(%d) failed to create me ucode bo\n", r); + gfx_v11_0_me_fini(adev); + return r; + } + + r = amdgpu_bo_create_reserved(adev, fw_data_size, + 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, + &adev->gfx.me.me_fw_data_obj, + &adev->gfx.me.me_fw_data_gpu_addr, + (void **)&adev->gfx.me.me_fw_data_ptr); + if (r) { + dev_err(adev->dev, "(%d) failed to create me data bo\n", r); + gfx_v11_0_pfp_fini(adev); + return r; + } + + memcpy(adev->gfx.me.me_fw_ptr, fw_ucode, fw_ucode_size); + memcpy(adev->gfx.me.me_fw_data_ptr, fw_data, fw_data_size); + + amdgpu_bo_kunmap(adev->gfx.me.me_fw_obj); + amdgpu_bo_kunmap(adev->gfx.me.me_fw_data_obj); + amdgpu_bo_unreserve(adev->gfx.me.me_fw_obj); + amdgpu_bo_unreserve(adev->gfx.me.me_fw_data_obj); + + if (amdgpu_emu_mode == 1) + adev->hdp.funcs->flush_hdp(adev, NULL); + + WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO, + lower_32_bits(adev->gfx.me.me_fw_gpu_addr)); + WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI, + upper_32_bits(adev->gfx.me.me_fw_gpu_addr)); + + tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL); + tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0); + tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0); + tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0); + WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp); + + /* + * Programming any of the CP_ME_IC_BASE registers + * forces invalidation of the ME L1 I$. Wait for the + * invalidation complete + */ + for (i = 0; i < usec_timeout; i++) { + tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); + if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL, + INVALIDATE_CACHE_COMPLETE)) + break; + udelay(1); + } + + if (i >= usec_timeout) { + dev_err(adev->dev, "failed to invalidate instruction cache\n"); + return -EINVAL; + } + + /* Prime the instruction caches */ + tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); + tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, PRIME_ICACHE, 1); + WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp); + + /* Waiting for instruction cache primed*/ + for (i = 0; i < usec_timeout; i++) { + tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); + if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL, + ICACHE_PRIMED)) + break; + udelay(1); + } + + if (i >= usec_timeout) { + dev_err(adev->dev, "failed to prime instruction cache\n"); + return -EINVAL; + } + + mutex_lock(&adev->srbm_mutex); + for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) { + soc21_grbm_select(adev, 0, pipe_id, 0, 0); + WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START, + (me_hdr->ucode_start_addr_hi << 30) | + (me_hdr->ucode_start_addr_lo >> 2) ); + WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI, + me_hdr->ucode_start_addr_hi>>2); + + /* + * Program CP_ME_CNTL to reset given PIPE to take + * effect of CP_PFP_PRGRM_CNTR_START. + */ + tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); + if (pipe_id == 0) + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, + ME_PIPE0_RESET, 1); + else + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, + ME_PIPE1_RESET, 1); + WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); + + /* Clear pfp pipe0 reset bit. */ + if (pipe_id == 0) + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, + ME_PIPE0_RESET, 0); + else + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, + ME_PIPE1_RESET, 0); + WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); + + WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_LO, + lower_32_bits(adev->gfx.me.me_fw_data_gpu_addr)); + WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_HI, + upper_32_bits(adev->gfx.me.me_fw_data_gpu_addr)); + } + soc21_grbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + + tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL); + tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0); + tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0); + WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp); + + /* Invalidate the data caches */ + tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); + tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1); + WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp); + + for (i = 0; i < usec_timeout; i++) { + tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); + if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, + INVALIDATE_DCACHE_COMPLETE)) + break; + udelay(1); + } + + if (i >= usec_timeout) { + dev_err(adev->dev, "failed to invalidate RS64 data cache\n"); + return -EINVAL; + } + + return 0; +} + +static int gfx_v11_0_cp_gfx_load_microcode(struct amdgpu_device *adev) +{ + int r; + + if (!adev->gfx.me_fw || !adev->gfx.pfp_fw) + return -EINVAL; + + gfx_v11_0_cp_gfx_enable(adev, false); + + if (adev->gfx.rs64_enable) + r = gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(adev); + else + r = gfx_v11_0_cp_gfx_load_pfp_microcode(adev); + if (r) { + dev_err(adev->dev, "(%d) failed to load pfp fw\n", r); + return r; + } + + if (adev->gfx.rs64_enable) + r = gfx_v11_0_cp_gfx_load_me_microcode_rs64(adev); + else + r = gfx_v11_0_cp_gfx_load_me_microcode(adev); + if (r) { + dev_err(adev->dev, "(%d) failed to load me fw\n", r); + return r; + } + + return 0; +} + +static int gfx_v11_0_cp_gfx_start(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring; + const struct cs_section_def *sect = NULL; + const struct cs_extent_def *ext = NULL; + int r, i; + int ctx_reg_offset; + + /* init the CP */ + WREG32_SOC15(GC, 0, regCP_MAX_CONTEXT, + adev->gfx.config.max_hw_contexts - 1); + WREG32_SOC15(GC, 0, regCP_DEVICE_ID, 1); + + if (!amdgpu_async_gfx_ring) + gfx_v11_0_cp_gfx_enable(adev, true); + + ring = &adev->gfx.gfx_ring[0]; + r = amdgpu_ring_alloc(ring, gfx_v11_0_get_csb_size(adev)); + if (r) { + DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); + return r; + } + + amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); + amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); + + amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1)); + amdgpu_ring_write(ring, 0x80000000); + amdgpu_ring_write(ring, 0x80000000); + + for (sect = gfx11_cs_data; sect->section != NULL; ++sect) { + for (ext = sect->section; ext->extent != NULL; ++ext) { + if (sect->id == SECT_CONTEXT) { + amdgpu_ring_write(ring, + PACKET3(PACKET3_SET_CONTEXT_REG, + ext->reg_count)); + amdgpu_ring_write(ring, ext->reg_index - + PACKET3_SET_CONTEXT_REG_START); + for (i = 0; i < ext->reg_count; i++) + amdgpu_ring_write(ring, ext->extent[i]); + } + } + } + + ctx_reg_offset = + SOC15_REG_OFFSET(GC, 0, regPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START; + amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); + amdgpu_ring_write(ring, ctx_reg_offset); + amdgpu_ring_write(ring, adev->gfx.config.pa_sc_tile_steering_override); + + amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); + amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE); + + amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); + amdgpu_ring_write(ring, 0); + + amdgpu_ring_commit(ring); + + /* submit cs packet to copy state 0 to next available state */ + if (adev->gfx.num_gfx_rings > 1) { + /* maximum supported gfx ring is 2 */ + ring = &adev->gfx.gfx_ring[1]; + r = amdgpu_ring_alloc(ring, 2); + if (r) { + DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); + return r; + } + + amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); + amdgpu_ring_write(ring, 0); + + amdgpu_ring_commit(ring); + } + return 0; +} + +static void gfx_v11_0_cp_gfx_switch_pipe(struct amdgpu_device *adev, + CP_PIPE_ID pipe) +{ + u32 tmp; + + tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL); + tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, pipe); + + WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp); +} + +static void gfx_v11_0_cp_gfx_set_doorbell(struct amdgpu_device *adev, + struct amdgpu_ring *ring) +{ + u32 tmp; + + tmp = RREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL); + if (ring->use_doorbell) { + tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, + DOORBELL_OFFSET, ring->doorbell_index); + tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, + DOORBELL_EN, 1); + } else { + tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, + DOORBELL_EN, 0); + } + WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL, tmp); + + tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER, + DOORBELL_RANGE_LOWER, ring->doorbell_index); + WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_LOWER, tmp); + + WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_UPPER, + CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK); +} + +static int gfx_v11_0_cp_gfx_resume(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring; + u32 tmp; + u32 rb_bufsz; + u64 rb_addr, rptr_addr, wptr_gpu_addr; + u32 i; + + /* Set the write pointer delay */ + WREG32_SOC15(GC, 0, regCP_RB_WPTR_DELAY, 0); + + /* set the RB to use vmid 0 */ + WREG32_SOC15(GC, 0, regCP_RB_VMID, 0); + + /* Init gfx ring 0 for pipe 0 */ + mutex_lock(&adev->srbm_mutex); + gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID0); + + /* Set ring buffer size */ + ring = &adev->gfx.gfx_ring[0]; + rb_bufsz = order_base_2(ring->ring_size / 8); + tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz); + tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2); +#ifdef __BIG_ENDIAN + tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1); +#endif + WREG32_SOC15(GC, 0, regCP_RB0_CNTL, tmp); + + /* Initialize the ring buffer's write pointers */ + ring->wptr = 0; + WREG32_SOC15(GC, 0, regCP_RB0_WPTR, lower_32_bits(ring->wptr)); + WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI, upper_32_bits(ring->wptr)); + + /* set the wb address wether it's enabled or not */ + rptr_addr = ring->rptr_gpu_addr; + WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr)); + WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & + CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK); + + wptr_gpu_addr = ring->wptr_gpu_addr; + WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO, + lower_32_bits(wptr_gpu_addr)); + WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI, + upper_32_bits(wptr_gpu_addr)); + + mdelay(1); + WREG32_SOC15(GC, 0, regCP_RB0_CNTL, tmp); + + rb_addr = ring->gpu_addr >> 8; + WREG32_SOC15(GC, 0, regCP_RB0_BASE, rb_addr); + WREG32_SOC15(GC, 0, regCP_RB0_BASE_HI, upper_32_bits(rb_addr)); + + WREG32_SOC15(GC, 0, regCP_RB_ACTIVE, 1); + + gfx_v11_0_cp_gfx_set_doorbell(adev, ring); + mutex_unlock(&adev->srbm_mutex); + + /* Init gfx ring 1 for pipe 1 */ + if (adev->gfx.num_gfx_rings > 1) { + mutex_lock(&adev->srbm_mutex); + gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID1); + /* maximum supported gfx ring is 2 */ + ring = &adev->gfx.gfx_ring[1]; + rb_bufsz = order_base_2(ring->ring_size / 8); + tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz); + tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, RB_BLKSZ, rb_bufsz - 2); + WREG32_SOC15(GC, 0, regCP_RB1_CNTL, tmp); + /* Initialize the ring buffer's write pointers */ + ring->wptr = 0; + WREG32_SOC15(GC, 0, regCP_RB1_WPTR, lower_32_bits(ring->wptr)); + WREG32_SOC15(GC, 0, regCP_RB1_WPTR_HI, upper_32_bits(ring->wptr)); + /* Set the wb address wether it's enabled or not */ + rptr_addr = ring->rptr_gpu_addr; + WREG32_SOC15(GC, 0, regCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr)); + WREG32_SOC15(GC, 0, regCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & + CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK); + wptr_gpu_addr = ring->wptr_gpu_addr; + WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO, + lower_32_bits(wptr_gpu_addr)); + WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI, + upper_32_bits(wptr_gpu_addr)); + + mdelay(1); + WREG32_SOC15(GC, 0, regCP_RB1_CNTL, tmp); + + rb_addr = ring->gpu_addr >> 8; + WREG32_SOC15(GC, 0, regCP_RB1_BASE, rb_addr); + WREG32_SOC15(GC, 0, regCP_RB1_BASE_HI, upper_32_bits(rb_addr)); + WREG32_SOC15(GC, 0, regCP_RB1_ACTIVE, 1); + + gfx_v11_0_cp_gfx_set_doorbell(adev, ring); + mutex_unlock(&adev->srbm_mutex); + } + /* Switch to pipe 0 */ + mutex_lock(&adev->srbm_mutex); + gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID0); + mutex_unlock(&adev->srbm_mutex); + + /* start the ring */ + gfx_v11_0_cp_gfx_start(adev); + + for (i = 0; i < adev->gfx.num_gfx_rings; i++) { + ring = &adev->gfx.gfx_ring[i]; + ring->sched.ready = true; + } + + return 0; +} + +static void gfx_v11_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) +{ + u32 data; + + if (adev->gfx.rs64_enable) { + data = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL); + data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_INVALIDATE_ICACHE, + enable ? 0 : 1); + data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, + enable ? 0 : 1); + data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, + enable ? 0 : 1); + data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, + enable ? 0 : 1); + data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, + enable ? 0 : 1); + data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_ACTIVE, + enable ? 1 : 0); + data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_ACTIVE, + enable ? 1 : 0); + data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_ACTIVE, + enable ? 1 : 0); + data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_ACTIVE, + enable ? 1 : 0); + data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_HALT, + enable ? 0 : 1); + WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, data); + } else { + data = RREG32_SOC15(GC, 0, regCP_MEC_CNTL); + + if (enable) { + data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME1_HALT, 0); + if (!adev->enable_mes_kiq) + data = REG_SET_FIELD(data, CP_MEC_CNTL, + MEC_ME2_HALT, 0); + } else { + data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME1_HALT, 1); + data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME2_HALT, 1); + } + WREG32_SOC15(GC, 0, regCP_MEC_CNTL, data); + } + + adev->gfx.kiq.ring.sched.ready = enable; + + udelay(50); +} + +static int gfx_v11_0_cp_compute_load_microcode(struct amdgpu_device *adev) +{ + const struct gfx_firmware_header_v1_0 *mec_hdr; + const __le32 *fw_data; + unsigned i, fw_size; + u32 *fw = NULL; + int r; + + if (!adev->gfx.mec_fw) + return -EINVAL; + + gfx_v11_0_cp_compute_enable(adev, false); + + mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; + amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); + + fw_data = (const __le32 *) + (adev->gfx.mec_fw->data + + le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes)); + fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes); + + r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, + &adev->gfx.mec.mec_fw_obj, + &adev->gfx.mec.mec_fw_gpu_addr, + (void **)&fw); + if (r) { + dev_err(adev->dev, "(%d) failed to create mec fw bo\n", r); + gfx_v11_0_mec_fini(adev); + return r; + } + + memcpy(fw, fw_data, fw_size); + + amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj); + amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj); + + gfx_v11_0_config_mec_cache(adev, adev->gfx.mec.mec_fw_gpu_addr); + + /* MEC1 */ + WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_ADDR, 0); + + for (i = 0; i < mec_hdr->jt_size; i++) + WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_DATA, + le32_to_cpup(fw_data + mec_hdr->jt_offset + i)); + + WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_ADDR, adev->gfx.mec_fw_version); + + return 0; +} + +static int gfx_v11_0_cp_compute_load_microcode_rs64(struct amdgpu_device *adev) +{ + const struct gfx_firmware_header_v2_0 *mec_hdr; + const __le32 *fw_ucode, *fw_data; + u32 tmp, fw_ucode_size, fw_data_size; + u32 i, usec_timeout = 50000; /* Wait for 50 ms */ + u32 *fw_ucode_ptr, *fw_data_ptr; + int r; + + if (!adev->gfx.mec_fw) + return -EINVAL; + + gfx_v11_0_cp_compute_enable(adev, false); + + mec_hdr = (const struct gfx_firmware_header_v2_0 *)adev->gfx.mec_fw->data; + amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); + + fw_ucode = (const __le32 *) (adev->gfx.mec_fw->data + + le32_to_cpu(mec_hdr->ucode_offset_bytes)); + fw_ucode_size = le32_to_cpu(mec_hdr->ucode_size_bytes); + + fw_data = (const __le32 *) (adev->gfx.mec_fw->data + + le32_to_cpu(mec_hdr->data_offset_bytes)); + fw_data_size = le32_to_cpu(mec_hdr->data_size_bytes); + + r = amdgpu_bo_create_reserved(adev, fw_ucode_size, + 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, + &adev->gfx.mec.mec_fw_obj, + &adev->gfx.mec.mec_fw_gpu_addr, + (void **)&fw_ucode_ptr); + if (r) { + dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r); + gfx_v11_0_mec_fini(adev); + return r; + } + + r = amdgpu_bo_create_reserved(adev, fw_data_size, + 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, + &adev->gfx.mec.mec_fw_data_obj, + &adev->gfx.mec.mec_fw_data_gpu_addr, + (void **)&fw_data_ptr); + if (r) { + dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r); + gfx_v11_0_mec_fini(adev); + return r; + } + + memcpy(fw_ucode_ptr, fw_ucode, fw_ucode_size); + memcpy(fw_data_ptr, fw_data, fw_data_size); + + amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj); + amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_data_obj); + amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj); + amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_data_obj); + + tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL); + tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0); + tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0); + tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); + WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp); + + tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL); + tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, VMID, 0); + tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, CACHE_POLICY, 0); + WREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL, tmp); + + mutex_lock(&adev->srbm_mutex); + for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) { + soc21_grbm_select(adev, 1, i, 0, 0); + + WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_LO, adev->gfx.mec.mec_fw_data_gpu_addr); + WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_HI, + upper_32_bits(adev->gfx.mec.mec_fw_data_gpu_addr)); + + WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START, + mec_hdr->ucode_start_addr_lo >> 2 | + mec_hdr->ucode_start_addr_hi << 30); + WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI, + mec_hdr->ucode_start_addr_hi >> 2); + + WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO, adev->gfx.mec.mec_fw_gpu_addr); + WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI, + upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr)); + } + mutex_unlock(&adev->srbm_mutex); + soc21_grbm_select(adev, 0, 0, 0, 0); + + /* Trigger an invalidation of the L1 instruction caches */ + tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL); + tmp = REG_SET_FIELD(tmp, CP_MEC_DC_OP_CNTL, INVALIDATE_DCACHE, 1); + WREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL, tmp); + + /* Wait for invalidation complete */ + for (i = 0; i < usec_timeout; i++) { + tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL); + if (1 == REG_GET_FIELD(tmp, CP_MEC_DC_OP_CNTL, + INVALIDATE_DCACHE_COMPLETE)) + break; + udelay(1); + } + + if (i >= usec_timeout) { + dev_err(adev->dev, "failed to invalidate instruction cache\n"); + return -EINVAL; + } + + /* Trigger an invalidation of the L1 instruction caches */ + tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); + tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1); + WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp); + + /* Wait for invalidation complete */ + for (i = 0; i < usec_timeout; i++) { + tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); + if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL, + INVALIDATE_CACHE_COMPLETE)) + break; + udelay(1); + } + + if (i >= usec_timeout) { + dev_err(adev->dev, "failed to invalidate instruction cache\n"); + return -EINVAL; + } + + return 0; +} + +static void gfx_v11_0_kiq_setting(struct amdgpu_ring *ring) +{ + uint32_t tmp; + struct amdgpu_device *adev = ring->adev; + + /* tell RLC which is KIQ queue */ + tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS); + tmp &= 0xffffff00; + tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue); + WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp); + tmp |= 0x80; + WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp); +} + +static void gfx_v11_0_cp_set_doorbell_range(struct amdgpu_device *adev) +{ + /* set graphics engine doorbell range */ + WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_LOWER, + (adev->doorbell_index.gfx_ring0 * 2) << 2); + WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_UPPER, + (adev->doorbell_index.gfx_userqueue_end * 2) << 2); + + /* set compute engine doorbell range */ + WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_LOWER, + (adev->doorbell_index.kiq * 2) << 2); + WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_UPPER, + (adev->doorbell_index.userqueue_end * 2) << 2); +} + +static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m, + struct amdgpu_mqd_prop *prop) +{ + struct v11_gfx_mqd *mqd = m; + uint64_t hqd_gpu_addr, wb_gpu_addr; + uint32_t tmp; + uint32_t rb_bufsz; + + /* set up gfx hqd wptr */ + mqd->cp_gfx_hqd_wptr = 0; + mqd->cp_gfx_hqd_wptr_hi = 0; + + /* set the pointer to the MQD */ + mqd->cp_mqd_base_addr = prop->mqd_gpu_addr & 0xfffffffc; + mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr); + + /* set up mqd control */ + tmp = RREG32_SOC15(GC, 0, regCP_GFX_MQD_CONTROL); + tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, VMID, 0); + tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, PRIV_STATE, 1); + tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, CACHE_POLICY, 0); + mqd->cp_gfx_mqd_control = tmp; + + /* set up gfx_hqd_vimd with 0x0 to indicate the ring buffer's vmid */ + tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_VMID); + tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_VMID, VMID, 0); + mqd->cp_gfx_hqd_vmid = 0; + + /* set up default queue priority level + * 0x0 = low priority, 0x1 = high priority */ + tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUEUE_PRIORITY); + tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUEUE_PRIORITY, PRIORITY_LEVEL, 0); + mqd->cp_gfx_hqd_queue_priority = tmp; + + /* set up time quantum */ + tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUANTUM); + tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUANTUM, QUANTUM_EN, 1); + mqd->cp_gfx_hqd_quantum = tmp; + + /* set up gfx hqd base. this is similar as CP_RB_BASE */ + hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8; + mqd->cp_gfx_hqd_base = hqd_gpu_addr; + mqd->cp_gfx_hqd_base_hi = upper_32_bits(hqd_gpu_addr); + + /* set up hqd_rptr_addr/_hi, similar as CP_RB_RPTR */ + wb_gpu_addr = prop->rptr_gpu_addr; + mqd->cp_gfx_hqd_rptr_addr = wb_gpu_addr & 0xfffffffc; + mqd->cp_gfx_hqd_rptr_addr_hi = + upper_32_bits(wb_gpu_addr) & 0xffff; + + /* set up rb_wptr_poll addr */ + wb_gpu_addr = prop->wptr_gpu_addr; + mqd->cp_rb_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc; + mqd->cp_rb_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; + + /* set up the gfx_hqd_control, similar as CP_RB0_CNTL */ + rb_bufsz = order_base_2(prop->queue_size / 4) - 1; + tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_CNTL); + tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BUFSZ, rb_bufsz); + tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BLKSZ, rb_bufsz - 2); +#ifdef __BIG_ENDIAN + tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, BUF_SWAP, 1); +#endif + mqd->cp_gfx_hqd_cntl = tmp; + + /* set up cp_doorbell_control */ + tmp = RREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL); + if (prop->use_doorbell) { + tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, + DOORBELL_OFFSET, prop->doorbell_index); + tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, + DOORBELL_EN, 1); + } else + tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, + DOORBELL_EN, 0); + mqd->cp_rb_doorbell_control = tmp; + + /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ + mqd->cp_gfx_hqd_rptr = RREG32_SOC15(GC, 0, regCP_GFX_HQD_RPTR); + + /* active the queue */ + mqd->cp_gfx_hqd_active = 1; + + return 0; +} + +#ifdef BRING_UP_DEBUG +static int gfx_v11_0_gfx_queue_init_register(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + struct v11_gfx_mqd *mqd = ring->mqd_ptr; + + /* set mmCP_GFX_HQD_WPTR/_HI to 0 */ + WREG32_SOC15(GC, 0, regCP_GFX_HQD_WPTR, mqd->cp_gfx_hqd_wptr); + WREG32_SOC15(GC, 0, regCP_GFX_HQD_WPTR_HI, mqd->cp_gfx_hqd_wptr_hi); + + /* set GFX_MQD_BASE */ + WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR, mqd->cp_mqd_base_addr); + WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR_HI, mqd->cp_mqd_base_addr_hi); + + /* set GFX_MQD_CONTROL */ + WREG32_SOC15(GC, 0, regCP_GFX_MQD_CONTROL, mqd->cp_gfx_mqd_control); + + /* set GFX_HQD_VMID to 0 */ + WREG32_SOC15(GC, 0, regCP_GFX_HQD_VMID, mqd->cp_gfx_hqd_vmid); + + WREG32_SOC15(GC, 0, regCP_GFX_HQD_QUEUE_PRIORITY, + mqd->cp_gfx_hqd_queue_priority); + WREG32_SOC15(GC, 0, regCP_GFX_HQD_QUANTUM, mqd->cp_gfx_hqd_quantum); + + /* set GFX_HQD_BASE, similar as CP_RB_BASE */ + WREG32_SOC15(GC, 0, regCP_GFX_HQD_BASE, mqd->cp_gfx_hqd_base); + WREG32_SOC15(GC, 0, regCP_GFX_HQD_BASE_HI, mqd->cp_gfx_hqd_base_hi); + + /* set GFX_HQD_RPTR_ADDR, similar as CP_RB_RPTR */ + WREG32_SOC15(GC, 0, regCP_GFX_HQD_RPTR_ADDR, mqd->cp_gfx_hqd_rptr_addr); + WREG32_SOC15(GC, 0, regCP_GFX_HQD_RPTR_ADDR_HI, mqd->cp_gfx_hqd_rptr_addr_hi); + + /* set GFX_HQD_CNTL, similar as CP_RB_CNTL */ + WREG32_SOC15(GC, 0, regCP_GFX_HQD_CNTL, mqd->cp_gfx_hqd_cntl); + + /* set RB_WPTR_POLL_ADDR */ + WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO, mqd->cp_rb_wptr_poll_addr_lo); + WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI, mqd->cp_rb_wptr_poll_addr_hi); + + /* set RB_DOORBELL_CONTROL */ + WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL, mqd->cp_rb_doorbell_control); + + /* active the queue */ + WREG32_SOC15(GC, 0, regCP_GFX_HQD_ACTIVE, mqd->cp_gfx_hqd_active); + + return 0; +} +#endif + +static int gfx_v11_0_gfx_init_queue(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + struct v11_gfx_mqd *mqd = ring->mqd_ptr; + int mqd_idx = ring - &adev->gfx.gfx_ring[0]; + + if (!amdgpu_in_reset(adev) && !adev->in_suspend) { + memset((void *)mqd, 0, sizeof(*mqd)); + mutex_lock(&adev->srbm_mutex); + soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); + amdgpu_ring_init_mqd(ring); +#ifdef BRING_UP_DEBUG + gfx_v11_0_gfx_queue_init_register(ring); +#endif + soc21_grbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + if (adev->gfx.me.mqd_backup[mqd_idx]) + memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); + } else if (amdgpu_in_reset(adev)) { + /* reset mqd with the backup copy */ + if (adev->gfx.me.mqd_backup[mqd_idx]) + memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd)); + /* reset the ring */ + ring->wptr = 0; + *ring->wptr_cpu_addr = 0; + amdgpu_ring_clear_ring(ring); +#ifdef BRING_UP_DEBUG + mutex_lock(&adev->srbm_mutex); + soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); + gfx_v11_0_gfx_queue_init_register(ring); + soc21_grbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); +#endif + } else { + amdgpu_ring_clear_ring(ring); + } + + return 0; +} + +#ifndef BRING_UP_DEBUG +static int gfx_v11_0_kiq_enable_kgq(struct amdgpu_device *adev) +{ + struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; + int r, i; + + if (!kiq->pmf || !kiq->pmf->kiq_map_queues) + return -EINVAL; + + r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size * + adev->gfx.num_gfx_rings); + if (r) { + DRM_ERROR("Failed to lock KIQ (%d).\n", r); + return r; + } + + for (i = 0; i < adev->gfx.num_gfx_rings; i++) + kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.gfx_ring[i]); + + return amdgpu_ring_test_helper(kiq_ring); +} +#endif + +static int gfx_v11_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev) +{ + int r, i; + struct amdgpu_ring *ring; + + for (i = 0; i < adev->gfx.num_gfx_rings; i++) { + ring = &adev->gfx.gfx_ring[i]; + + r = amdgpu_bo_reserve(ring->mqd_obj, false); + if (unlikely(r != 0)) + goto done; + + r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); + if (!r) { + r = gfx_v11_0_gfx_init_queue(ring); + amdgpu_bo_kunmap(ring->mqd_obj); + ring->mqd_ptr = NULL; + } + amdgpu_bo_unreserve(ring->mqd_obj); + if (r) + goto done; + } +#ifndef BRING_UP_DEBUG + r = gfx_v11_0_kiq_enable_kgq(adev); + if (r) + goto done; +#endif + r = gfx_v11_0_cp_gfx_start(adev); + if (r) + goto done; + + for (i = 0; i < adev->gfx.num_gfx_rings; i++) { + ring = &adev->gfx.gfx_ring[i]; + ring->sched.ready = true; + } +done: + return r; +} + +static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m, + struct amdgpu_mqd_prop *prop) +{ + struct v11_compute_mqd *mqd = m; + uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr; + uint32_t tmp; + + mqd->header = 0xC0310800; + mqd->compute_pipelinestat_enable = 0x00000001; + mqd->compute_static_thread_mgmt_se0 = 0xffffffff; + mqd->compute_static_thread_mgmt_se1 = 0xffffffff; + mqd->compute_static_thread_mgmt_se2 = 0xffffffff; + mqd->compute_static_thread_mgmt_se3 = 0xffffffff; + mqd->compute_misc_reserved = 0x00000007; + + eop_base_addr = prop->eop_gpu_addr >> 8; + mqd->cp_hqd_eop_base_addr_lo = eop_base_addr; + mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr); + + /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ + tmp = RREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL); + tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE, + (order_base_2(GFX11_MEC_HPD_SIZE / 4) - 1)); + + mqd->cp_hqd_eop_control = tmp; + + /* enable doorbell? */ + tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL); + + if (prop->use_doorbell) { + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, + DOORBELL_OFFSET, prop->doorbell_index); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, + DOORBELL_EN, 1); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, + DOORBELL_SOURCE, 0); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, + DOORBELL_HIT, 0); + } else { + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, + DOORBELL_EN, 0); + } + + mqd->cp_hqd_pq_doorbell_control = tmp; + + /* disable the queue if it's active */ + mqd->cp_hqd_dequeue_request = 0; + mqd->cp_hqd_pq_rptr = 0; + mqd->cp_hqd_pq_wptr_lo = 0; + mqd->cp_hqd_pq_wptr_hi = 0; + + /* set the pointer to the MQD */ + mqd->cp_mqd_base_addr_lo = prop->mqd_gpu_addr & 0xfffffffc; + mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr); + + /* set MQD vmid to 0 */ + tmp = RREG32_SOC15(GC, 0, regCP_MQD_CONTROL); + tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0); + mqd->cp_mqd_control = tmp; + + /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ + hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8; + mqd->cp_hqd_pq_base_lo = hqd_gpu_addr; + mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr); + + /* set up the HQD, this is similar to CP_RB0_CNTL */ + tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE, + (order_base_2(prop->queue_size / 4) - 1)); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE, + ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8)); +#ifdef __BIG_ENDIAN + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1); +#endif + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1); + mqd->cp_hqd_pq_control = tmp; + + /* set the wb address whether it's enabled or not */ + wb_gpu_addr = prop->rptr_gpu_addr; + mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc; + mqd->cp_hqd_pq_rptr_report_addr_hi = + upper_32_bits(wb_gpu_addr) & 0xffff; + + /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ + wb_gpu_addr = prop->wptr_gpu_addr; + mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc; + mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; + + tmp = 0; + /* enable the doorbell if requested */ + if (prop->use_doorbell) { + tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, + DOORBELL_OFFSET, prop->doorbell_index); + + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, + DOORBELL_EN, 1); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, + DOORBELL_SOURCE, 0); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, + DOORBELL_HIT, 0); + } + + mqd->cp_hqd_pq_doorbell_control = tmp; + + /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ + mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR); + + /* set the vmid for the queue */ + mqd->cp_hqd_vmid = 0; + + tmp = RREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE); + tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x55); + mqd->cp_hqd_persistent_state = tmp; + + /* set MIN_IB_AVAIL_SIZE */ + tmp = RREG32_SOC15(GC, 0, regCP_HQD_IB_CONTROL); + tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3); + mqd->cp_hqd_ib_control = tmp; + + /* set static priority for a compute queue/ring */ + mqd->cp_hqd_pipe_priority = prop->hqd_pipe_priority; + mqd->cp_hqd_queue_priority = prop->hqd_queue_priority; + + mqd->cp_hqd_active = prop->hqd_active; + + return 0; +} + +static int gfx_v11_0_kiq_init_register(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + struct v11_compute_mqd *mqd = ring->mqd_ptr; + int j; + + /* inactivate the queue */ + if (amdgpu_sriov_vf(adev)) + WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE, 0); + + /* disable wptr polling */ + WREG32_FIELD15_PREREG(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0); + + /* write the EOP addr */ + WREG32_SOC15(GC, 0, regCP_HQD_EOP_BASE_ADDR, + mqd->cp_hqd_eop_base_addr_lo); + WREG32_SOC15(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI, + mqd->cp_hqd_eop_base_addr_hi); + + /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ + WREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL, + mqd->cp_hqd_eop_control); + + /* enable doorbell? */ + WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, + mqd->cp_hqd_pq_doorbell_control); + + /* disable the queue if it's active */ + if (RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1) { + WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 1); + for (j = 0; j < adev->usec_timeout; j++) { + if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1)) + break; + udelay(1); + } + WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, + mqd->cp_hqd_dequeue_request); + WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR, + mqd->cp_hqd_pq_rptr); + WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO, + mqd->cp_hqd_pq_wptr_lo); + WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI, + mqd->cp_hqd_pq_wptr_hi); + } + + /* set the pointer to the MQD */ + WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR, + mqd->cp_mqd_base_addr_lo); + WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR_HI, + mqd->cp_mqd_base_addr_hi); + + /* set MQD vmid to 0 */ + WREG32_SOC15(GC, 0, regCP_MQD_CONTROL, + mqd->cp_mqd_control); + + /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ + WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE, + mqd->cp_hqd_pq_base_lo); + WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE_HI, + mqd->cp_hqd_pq_base_hi); + + /* set up the HQD, this is similar to CP_RB0_CNTL */ + WREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL, + mqd->cp_hqd_pq_control); + + /* set the wb address whether it's enabled or not */ + WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR, + mqd->cp_hqd_pq_rptr_report_addr_lo); + WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR_HI, + mqd->cp_hqd_pq_rptr_report_addr_hi); + + /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ + WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR, + mqd->cp_hqd_pq_wptr_poll_addr_lo); + WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI, + mqd->cp_hqd_pq_wptr_poll_addr_hi); + + /* enable the doorbell if requested */ + if (ring->use_doorbell) { + WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_LOWER, + (adev->doorbell_index.kiq * 2) << 2); + WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_UPPER, + (adev->doorbell_index.userqueue_end * 2) << 2); + } + + WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, + mqd->cp_hqd_pq_doorbell_control); + + /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ + WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO, + mqd->cp_hqd_pq_wptr_lo); + WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI, + mqd->cp_hqd_pq_wptr_hi); + + /* set the vmid for the queue */ + WREG32_SOC15(GC, 0, regCP_HQD_VMID, mqd->cp_hqd_vmid); + + WREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE, + mqd->cp_hqd_persistent_state); + + /* activate the queue */ + WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE, + mqd->cp_hqd_active); + + if (ring->use_doorbell) + WREG32_FIELD15_PREREG(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1); + + return 0; +} + +static int gfx_v11_0_kiq_init_queue(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + struct v11_compute_mqd *mqd = ring->mqd_ptr; + int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS; + + gfx_v11_0_kiq_setting(ring); + + if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */ + /* reset MQD to a clean status */ + if (adev->gfx.mec.mqd_backup[mqd_idx]) + memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); + + /* reset ring buffer */ + ring->wptr = 0; + amdgpu_ring_clear_ring(ring); + + mutex_lock(&adev->srbm_mutex); + soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); + gfx_v11_0_kiq_init_register(ring); + soc21_grbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + } else { + memset((void *)mqd, 0, sizeof(*mqd)); + mutex_lock(&adev->srbm_mutex); + soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); + amdgpu_ring_init_mqd(ring); + gfx_v11_0_kiq_init_register(ring); + soc21_grbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + + if (adev->gfx.mec.mqd_backup[mqd_idx]) + memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); + } + + return 0; +} + +static int gfx_v11_0_kcq_init_queue(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + struct v11_compute_mqd *mqd = ring->mqd_ptr; + int mqd_idx = ring - &adev->gfx.compute_ring[0]; + + if (!amdgpu_in_reset(adev) && !adev->in_suspend) { + memset((void *)mqd, 0, sizeof(*mqd)); + mutex_lock(&adev->srbm_mutex); + soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); + amdgpu_ring_init_mqd(ring); + soc21_grbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + + if (adev->gfx.mec.mqd_backup[mqd_idx]) + memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); + } else if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */ + /* reset MQD to a clean status */ + if (adev->gfx.mec.mqd_backup[mqd_idx]) + memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); + + /* reset ring buffer */ + ring->wptr = 0; + atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0); + amdgpu_ring_clear_ring(ring); + } else { + amdgpu_ring_clear_ring(ring); + } + + return 0; +} + +static int gfx_v11_0_kiq_resume(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring; + int r; + + ring = &adev->gfx.kiq.ring; + + r = amdgpu_bo_reserve(ring->mqd_obj, false); + if (unlikely(r != 0)) + return r; + + r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); + if (unlikely(r != 0)) + return r; + + gfx_v11_0_kiq_init_queue(ring); + amdgpu_bo_kunmap(ring->mqd_obj); + ring->mqd_ptr = NULL; + amdgpu_bo_unreserve(ring->mqd_obj); + ring->sched.ready = true; + return 0; +} + +static int gfx_v11_0_kcq_resume(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring = NULL; + int r = 0, i; + + if (!amdgpu_async_gfx_ring) + gfx_v11_0_cp_compute_enable(adev, true); + + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + ring = &adev->gfx.compute_ring[i]; + + r = amdgpu_bo_reserve(ring->mqd_obj, false); + if (unlikely(r != 0)) + goto done; + r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); + if (!r) { + r = gfx_v11_0_kcq_init_queue(ring); + amdgpu_bo_kunmap(ring->mqd_obj); + ring->mqd_ptr = NULL; + } + amdgpu_bo_unreserve(ring->mqd_obj); + if (r) + goto done; + } + + r = amdgpu_gfx_enable_kcq(adev); +done: + return r; +} + +static int gfx_v11_0_cp_resume(struct amdgpu_device *adev) +{ + int r, i; + struct amdgpu_ring *ring; + + if (!(adev->flags & AMD_IS_APU)) + gfx_v11_0_enable_gui_idle_interrupt(adev, false); + + if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { + /* legacy firmware loading */ + r = gfx_v11_0_cp_gfx_load_microcode(adev); + if (r) + return r; + + if (adev->gfx.rs64_enable) + r = gfx_v11_0_cp_compute_load_microcode_rs64(adev); + else + r = gfx_v11_0_cp_compute_load_microcode(adev); + if (r) + return r; + } + + gfx_v11_0_cp_set_doorbell_range(adev); + + if (amdgpu_async_gfx_ring) { + gfx_v11_0_cp_compute_enable(adev, true); + gfx_v11_0_cp_gfx_enable(adev, true); + } + + if (adev->enable_mes_kiq && adev->mes.kiq_hw_init) + r = amdgpu_mes_kiq_hw_init(adev); + else + r = gfx_v11_0_kiq_resume(adev); + if (r) + return r; + + r = gfx_v11_0_kcq_resume(adev); + if (r) + return r; + + if (!amdgpu_async_gfx_ring) { + r = gfx_v11_0_cp_gfx_resume(adev); + if (r) + return r; + } else { + r = gfx_v11_0_cp_async_gfx_ring_resume(adev); + if (r) + return r; + } + + for (i = 0; i < adev->gfx.num_gfx_rings; i++) { + ring = &adev->gfx.gfx_ring[i]; + r = amdgpu_ring_test_helper(ring); + if (r) + return r; + } + + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + ring = &adev->gfx.compute_ring[i]; + r = amdgpu_ring_test_helper(ring); + if (r) + return r; + } + + return 0; +} + +static void gfx_v11_0_cp_enable(struct amdgpu_device *adev, bool enable) +{ + gfx_v11_0_cp_gfx_enable(adev, enable); + gfx_v11_0_cp_compute_enable(adev, enable); +} + +static int gfx_v11_0_gfxhub_enable(struct amdgpu_device *adev) +{ + int r; + bool value; + + r = adev->gfxhub.funcs->gart_enable(adev); + if (r) + return r; + + adev->hdp.funcs->flush_hdp(adev, NULL); + + value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ? + false : true; + + adev->gfxhub.funcs->set_fault_enable_default(adev, value); + amdgpu_gmc_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0); + + return 0; +} + +static void gfx_v11_0_select_cp_fw_arch(struct amdgpu_device *adev) +{ + u32 tmp; + + /* select RS64 */ + if (adev->gfx.rs64_enable) { + tmp = RREG32_SOC15(GC, 0, regCP_GFX_CNTL); + tmp = REG_SET_FIELD(tmp, CP_GFX_CNTL, ENGINE_SEL, 1); + WREG32_SOC15(GC, 0, regCP_GFX_CNTL, tmp); + + tmp = RREG32_SOC15(GC, 0, regCP_MEC_ISA_CNTL); + tmp = REG_SET_FIELD(tmp, CP_MEC_ISA_CNTL, ISA_MODE, 1); + WREG32_SOC15(GC, 0, regCP_MEC_ISA_CNTL, tmp); + } + + if (amdgpu_emu_mode == 1) + msleep(100); +} + +static int get_gb_addr_config(struct amdgpu_device * adev) +{ + u32 gb_addr_config; + + gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG); + if (gb_addr_config == 0) + return -EINVAL; + + adev->gfx.config.gb_addr_config_fields.num_pkrs = + 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS); + + adev->gfx.config.gb_addr_config = gb_addr_config; + + adev->gfx.config.gb_addr_config_fields.num_pipes = 1 << + REG_GET_FIELD(adev->gfx.config.gb_addr_config, + GB_ADDR_CONFIG, NUM_PIPES); + + adev->gfx.config.max_tile_pipes = + adev->gfx.config.gb_addr_config_fields.num_pipes; + + adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 << + REG_GET_FIELD(adev->gfx.config.gb_addr_config, + GB_ADDR_CONFIG, MAX_COMPRESSED_FRAGS); + adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 << + REG_GET_FIELD(adev->gfx.config.gb_addr_config, + GB_ADDR_CONFIG, NUM_RB_PER_SE); + adev->gfx.config.gb_addr_config_fields.num_se = 1 << + REG_GET_FIELD(adev->gfx.config.gb_addr_config, + GB_ADDR_CONFIG, NUM_SHADER_ENGINES); + adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 + + REG_GET_FIELD(adev->gfx.config.gb_addr_config, + GB_ADDR_CONFIG, PIPE_INTERLEAVE_SIZE)); + + return 0; +} + +static void gfx_v11_0_disable_gpa_mode(struct amdgpu_device *adev) +{ + uint32_t data; + + data = RREG32_SOC15(GC, 0, regCPC_PSP_DEBUG); + data |= CPC_PSP_DEBUG__GPA_OVERRIDE_MASK; + WREG32_SOC15(GC, 0, regCPC_PSP_DEBUG, data); + + data = RREG32_SOC15(GC, 0, regCPG_PSP_DEBUG); + data |= CPG_PSP_DEBUG__GPA_OVERRIDE_MASK; + WREG32_SOC15(GC, 0, regCPG_PSP_DEBUG, data); +} + +static int gfx_v11_0_hw_init(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { + if (adev->gfx.imu.funcs) { + /* RLC autoload sequence 1: Program rlc ram */ + if (adev->gfx.imu.funcs->program_rlc_ram) + adev->gfx.imu.funcs->program_rlc_ram(adev); + } + /* rlc autoload firmware */ + r = gfx_v11_0_rlc_backdoor_autoload_enable(adev); + if (r) + return r; + } else { + if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { + if (adev->gfx.imu.funcs && (amdgpu_dpm > 0)) { + if (adev->gfx.imu.funcs->load_microcode) + adev->gfx.imu.funcs->load_microcode(adev); + if (adev->gfx.imu.funcs->setup_imu) + adev->gfx.imu.funcs->setup_imu(adev); + if (adev->gfx.imu.funcs->start_imu) + adev->gfx.imu.funcs->start_imu(adev); + } + } + } + + if ((adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) || + (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { + r = gfx_v11_0_wait_for_rlc_autoload_complete(adev); + if (r) { + dev_err(adev->dev, "(%d) failed to wait rlc autoload complete\n", r); + return r; + } + } + + adev->gfx.is_poweron = true; + + if(get_gb_addr_config(adev)) + DRM_WARN("Invalid gb_addr_config !\n"); + + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && + adev->gfx.rs64_enable) + gfx_v11_0_config_gfx_rs64(adev); + + r = gfx_v11_0_gfxhub_enable(adev); + if (r) + return r; + + if (!amdgpu_emu_mode) + gfx_v11_0_init_golden_registers(adev); + + if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) || + (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) { + /** + * For gfx 11, rlc firmware loading relies on smu firmware is + * loaded firstly, so in direct type, it has to load smc ucode + * here before rlc. + */ + if (!(adev->flags & AMD_IS_APU)) { + r = amdgpu_pm_load_smu_firmware(adev, NULL); + if (r) + return r; + } + } + + gfx_v11_0_constants_init(adev); + + if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) + gfx_v11_0_select_cp_fw_arch(adev); + + adev->nbio.funcs->gc_doorbell_init(adev); + + r = gfx_v11_0_rlc_resume(adev); + if (r) + return r; + + /* + * init golden registers and rlc resume may override some registers, + * reconfig them here + */ + gfx_v11_0_tcp_harvest(adev); + + r = gfx_v11_0_cp_resume(adev); + if (r) + return r; + + return r; +} + +#ifndef BRING_UP_DEBUG +static int gfx_v11_0_kiq_disable_kgq(struct amdgpu_device *adev) +{ + struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_ring *kiq_ring = &kiq->ring; + int i, r = 0; + + if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) + return -EINVAL; + + if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size * + adev->gfx.num_gfx_rings)) + return -ENOMEM; + + for (i = 0; i < adev->gfx.num_gfx_rings; i++) + kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.gfx_ring[i], + PREEMPT_QUEUES, 0, 0); + + if (adev->gfx.kiq.ring.sched.ready) + r = amdgpu_ring_test_helper(kiq_ring); + + return r; +} +#endif + +static int gfx_v11_0_hw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + uint32_t tmp; + + amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); + amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); + + if (!adev->no_hw_access) { +#ifndef BRING_UP_DEBUG + if (amdgpu_async_gfx_ring) { + r = gfx_v11_0_kiq_disable_kgq(adev); + if (r) + DRM_ERROR("KGQ disable failed\n"); + } +#endif + if (amdgpu_gfx_disable_kcq(adev)) + DRM_ERROR("KCQ disable failed\n"); + + amdgpu_mes_kiq_hw_fini(adev); + } + + if (amdgpu_sriov_vf(adev)) { + gfx_v11_0_cp_gfx_enable(adev, false); + /* Program KIQ position of RLC_CP_SCHEDULERS during destroy */ + tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS); + tmp &= 0xffffff00; + WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp); + + return 0; + } + gfx_v11_0_cp_enable(adev, false); + gfx_v11_0_enable_gui_idle_interrupt(adev, false); + + adev->gfxhub.funcs->gart_disable(adev); + + adev->gfx.is_poweron = false; + + return 0; +} + +static int gfx_v11_0_suspend(void *handle) +{ + return gfx_v11_0_hw_fini(handle); +} + +static int gfx_v11_0_resume(void *handle) +{ + return gfx_v11_0_hw_init(handle); +} + +static bool gfx_v11_0_is_idle(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (REG_GET_FIELD(RREG32_SOC15(GC, 0, regGRBM_STATUS), + GRBM_STATUS, GUI_ACTIVE)) + return false; + else + return true; +} + +static int gfx_v11_0_wait_for_idle(void *handle) +{ + unsigned i; + u32 tmp; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + for (i = 0; i < adev->usec_timeout; i++) { + /* read MC_STATUS */ + tmp = RREG32_SOC15(GC, 0, regGRBM_STATUS) & + GRBM_STATUS__GUI_ACTIVE_MASK; + + if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE)) + return 0; + udelay(1); + } + return -ETIMEDOUT; +} + +static int gfx_v11_0_soft_reset(void *handle) +{ + u32 grbm_soft_reset = 0; + u32 tmp; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + /* GRBM_STATUS */ + tmp = RREG32_SOC15(GC, 0, regGRBM_STATUS); + if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK | + GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK | + GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__DB_BUSY_MASK | + GRBM_STATUS__CB_BUSY_MASK | GRBM_STATUS__GDS_BUSY_MASK | + GRBM_STATUS__SPI_BUSY_MASK | GRBM_STATUS__GE_BUSY_NO_DMA_MASK)) { + grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, + GRBM_SOFT_RESET, SOFT_RESET_CP, + 1); + grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, + GRBM_SOFT_RESET, SOFT_RESET_GFX, + 1); + } + + if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) { + grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, + GRBM_SOFT_RESET, SOFT_RESET_CP, + 1); + } + + /* GRBM_STATUS2 */ + tmp = RREG32_SOC15(GC, 0, regGRBM_STATUS2); + if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY)) + grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, + GRBM_SOFT_RESET, + SOFT_RESET_RLC, + 1); + + if (grbm_soft_reset) { + /* stop the rlc */ + gfx_v11_0_rlc_stop(adev); + + /* Disable GFX parsing/prefetching */ + gfx_v11_0_cp_gfx_enable(adev, false); + + /* Disable MEC parsing/prefetching */ + gfx_v11_0_cp_compute_enable(adev, false); + + if (grbm_soft_reset) { + tmp = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET); + tmp |= grbm_soft_reset; + dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, tmp); + tmp = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET); + + udelay(50); + + tmp &= ~grbm_soft_reset; + WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, tmp); + tmp = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET); + } + + /* Wait a little for things to settle down */ + udelay(50); + } + return 0; +} + +static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev) +{ + uint64_t clock; + + amdgpu_gfx_off_ctrl(adev, false); + mutex_lock(&adev->gfx.gpu_clock_mutex); + clock = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER) | + ((uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER) << 32ULL); + mutex_unlock(&adev->gfx.gpu_clock_mutex); + amdgpu_gfx_off_ctrl(adev, true); + return clock; +} + +static void gfx_v11_0_ring_emit_gds_switch(struct amdgpu_ring *ring, + uint32_t vmid, + uint32_t gds_base, uint32_t gds_size, + uint32_t gws_base, uint32_t gws_size, + uint32_t oa_base, uint32_t oa_size) +{ + struct amdgpu_device *adev = ring->adev; + + /* GDS Base */ + gfx_v11_0_write_data_to_reg(ring, 0, false, + SOC15_REG_OFFSET(GC, 0, regGDS_VMID0_BASE) + 2 * vmid, + gds_base); + + /* GDS Size */ + gfx_v11_0_write_data_to_reg(ring, 0, false, + SOC15_REG_OFFSET(GC, 0, regGDS_VMID0_SIZE) + 2 * vmid, + gds_size); + + /* GWS */ + gfx_v11_0_write_data_to_reg(ring, 0, false, + SOC15_REG_OFFSET(GC, 0, regGDS_GWS_VMID0) + vmid, + gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base); + + /* OA */ + gfx_v11_0_write_data_to_reg(ring, 0, false, + SOC15_REG_OFFSET(GC, 0, regGDS_OA_VMID0) + vmid, + (1 << (oa_size + oa_base)) - (1 << oa_base)); +} + +static int gfx_v11_0_early_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + adev->gfx.num_gfx_rings = GFX11_NUM_GFX_RINGS; + adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev), + AMDGPU_MAX_COMPUTE_RINGS); + + gfx_v11_0_set_kiq_pm4_funcs(adev); + gfx_v11_0_set_ring_funcs(adev); + gfx_v11_0_set_irq_funcs(adev); + gfx_v11_0_set_gds_init(adev); + gfx_v11_0_set_rlc_funcs(adev); + gfx_v11_0_set_mqd_funcs(adev); + gfx_v11_0_set_imu_funcs(adev); + + gfx_v11_0_init_rlcg_reg_access_ctrl(adev); + + return 0; +} + +static int gfx_v11_0_late_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); + if (r) + return r; + + r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0); + if (r) + return r; + + return 0; +} + +static bool gfx_v11_0_is_rlc_enabled(struct amdgpu_device *adev) +{ + uint32_t rlc_cntl; + + /* if RLC is not enabled, do nothing */ + rlc_cntl = RREG32_SOC15(GC, 0, regRLC_CNTL); + return (REG_GET_FIELD(rlc_cntl, RLC_CNTL, RLC_ENABLE_F32)) ? true : false; +} + +static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev) +{ + uint32_t data; + unsigned i; + + data = RLC_SAFE_MODE__CMD_MASK; + data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT); + + WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, data); + + /* wait for RLC_SAFE_MODE */ + for (i = 0; i < adev->usec_timeout; i++) { + if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, regRLC_SAFE_MODE), + RLC_SAFE_MODE, CMD)) + break; + udelay(1); + } +} + +static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev) +{ + WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, RLC_SAFE_MODE__CMD_MASK); +} + +static void gfx_v11_0_update_repeater_fgcg(struct amdgpu_device *adev, + bool enable) +{ + uint32_t def, data; + + if (!(adev->cg_flags & AMD_CG_SUPPORT_REPEATER_FGCG)) + return; + + def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); + + if (enable) + data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK; + else + data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK; + + if (def != data) + WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); +} + +#if 0 +static void gfx_v11_0_update_medium_grain_clock_gating(struct amdgpu_device *adev, + bool enable) +{ + /* TODO: add power related feature later. */ +} + +static void gfx_v11_0_update_3d_clock_gating(struct amdgpu_device *adev, + bool enable) +{ + /* TODO: add power related feature later. */ +} +#endif + +static void gfx_v11_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev, + bool enable) +{ + uint32_t def, data; + + if (!(adev->cg_flags & + (AMD_CG_SUPPORT_GFX_CGCG | + AMD_CG_SUPPORT_GFX_CGLS | + AMD_CG_SUPPORT_GFX_3D_CGCG | + AMD_CG_SUPPORT_GFX_3D_CGLS))) + return; + + if (enable) { + def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); + + /* unset CGCG override */ + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) + data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK; + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) + data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK; + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG || + adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) + data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK; + + /* update CGCG override bits */ + if (def != data) + WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); + + /* enable cgcg FSM(0x0000363F) */ + def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL); + + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) { + data &= ~RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD_MASK; + data |= (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | + RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK; + } + + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) { + data &= ~RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY_MASK; + data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) | + RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK; + } + + if (def != data) + WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, data); + + /* Program RLC_CGCG_CGLS_CTRL_3D */ + def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D); + + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) { + data &= ~RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD_MASK; + data |= (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | + RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK; + } + + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) { + data &= ~RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY_MASK; + data |= (0xf << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) | + RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK; + } + + if (def != data) + WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D, data); + + /* set IDLE_POLL_COUNT(0x00900100) */ + def = data = RREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_CNTL); + + data &= ~(CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY_MASK | CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK); + data |= (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) | + (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT); + + if (def != data) + WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_CNTL, data); + + data = RREG32_SOC15(GC, 0, regCP_INT_CNTL); + data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 1); + data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 1); + data = REG_SET_FIELD(data, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 1); + data = REG_SET_FIELD(data, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1); + WREG32_SOC15(GC, 0, regCP_INT_CNTL, data); + + data = RREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL); + data = REG_SET_FIELD(data, SDMA0_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1); + WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data); + + data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL); + data = REG_SET_FIELD(data, SDMA1_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1); + WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data); + } else { + /* Program RLC_CGCG_CGLS_CTRL */ + def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL); + + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) + data &= ~RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK; + + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) + data &= ~RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK; + + if (def != data) + WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, data); + + /* Program RLC_CGCG_CGLS_CTRL_3D */ + def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D); + + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) + data &= ~RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK; + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) + data &= ~RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK; + + if (def != data) + WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D, data); + + data = RREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL); + data &= ~SDMA0_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK; + WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data); + + data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL); + data &= ~SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK; + WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data); + } +} + +static int gfx_v11_0_update_gfx_clock_gating(struct amdgpu_device *adev, + bool enable) +{ + amdgpu_gfx_rlc_enter_safe_mode(adev); + + gfx_v11_0_update_coarse_grain_clock_gating(adev, enable); + + gfx_v11_0_update_repeater_fgcg(adev, enable); + + if (adev->cg_flags & + (AMD_CG_SUPPORT_GFX_MGCG | + AMD_CG_SUPPORT_GFX_CGLS | + AMD_CG_SUPPORT_GFX_CGCG | + AMD_CG_SUPPORT_GFX_3D_CGCG | + AMD_CG_SUPPORT_GFX_3D_CGLS)) + gfx_v11_0_enable_gui_idle_interrupt(adev, enable); + + amdgpu_gfx_rlc_exit_safe_mode(adev); + + return 0; +} + +static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) +{ + u32 reg, data; + + reg = SOC15_REG_OFFSET(GC, 0, regRLC_SPM_MC_CNTL); + if (amdgpu_sriov_is_pp_one_vf(adev)) + data = RREG32_NO_KIQ(reg); + else + data = RREG32(reg); + + data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK; + data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT; + + if (amdgpu_sriov_is_pp_one_vf(adev)) + WREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL, data); + else + WREG32_SOC15(GC, 0, regRLC_SPM_MC_CNTL, data); +} + +static const struct amdgpu_rlc_funcs gfx_v11_0_rlc_funcs = { + .is_rlc_enabled = gfx_v11_0_is_rlc_enabled, + .set_safe_mode = gfx_v11_0_set_safe_mode, + .unset_safe_mode = gfx_v11_0_unset_safe_mode, + .init = gfx_v11_0_rlc_init, + .get_csb_size = gfx_v11_0_get_csb_size, + .get_csb_buffer = gfx_v11_0_get_csb_buffer, + .resume = gfx_v11_0_rlc_resume, + .stop = gfx_v11_0_rlc_stop, + .reset = gfx_v11_0_rlc_reset, + .start = gfx_v11_0_rlc_start, + .update_spm_vmid = gfx_v11_0_update_spm_vmid, +}; + +static int gfx_v11_0_set_powergating_state(void *handle, + enum amd_powergating_state state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + bool enable = (state == AMD_PG_STATE_GATE); + + if (amdgpu_sriov_vf(adev)) + return 0; + + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(11, 0, 0): + amdgpu_gfx_off_ctrl(adev, enable); + break; + default: + break; + } + + return 0; +} + +static int gfx_v11_0_set_clockgating_state(void *handle, + enum amd_clockgating_state state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (amdgpu_sriov_vf(adev)) + return 0; + + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(11, 0, 0): + case IP_VERSION(11, 0, 2): + gfx_v11_0_update_gfx_clock_gating(adev, + state == AMD_CG_STATE_GATE); + break; + default: + break; + } + + return 0; +} + +static void gfx_v11_0_get_clockgating_state(void *handle, u64 *flags) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int data; + + /* AMD_CG_SUPPORT_GFX_FGCG */ + data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); + if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK)) + *flags |= AMD_CG_SUPPORT_GFX_FGCG; + + /* AMD_CG_SUPPORT_GFX_MGCG */ + data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); + if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK)) + *flags |= AMD_CG_SUPPORT_GFX_MGCG; + + /* AMD_CG_SUPPORT_GFX_CGCG */ + data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL); + if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK) + *flags |= AMD_CG_SUPPORT_GFX_CGCG; + + /* AMD_CG_SUPPORT_GFX_CGLS */ + if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK) + *flags |= AMD_CG_SUPPORT_GFX_CGLS; + + /* AMD_CG_SUPPORT_GFX_3D_CGCG */ + data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D); + if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK) + *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG; + + /* AMD_CG_SUPPORT_GFX_3D_CGLS */ + if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK) + *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS; +} + +static u64 gfx_v11_0_ring_get_rptr_gfx(struct amdgpu_ring *ring) +{ + /* gfx11 is 32bit rptr*/ + return *(uint32_t *)ring->rptr_cpu_addr; +} + +static u64 gfx_v11_0_ring_get_wptr_gfx(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + u64 wptr; + + /* XXX check if swapping is necessary on BE */ + if (ring->use_doorbell) { + wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr); + } else { + wptr = RREG32_SOC15(GC, 0, regCP_RB0_WPTR); + wptr += (u64)RREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI) << 32; + } + + return wptr; +} + +static void gfx_v11_0_ring_set_wptr_gfx(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring->use_doorbell) { + /* XXX check if swapping is necessary on BE */ + atomic64_set((atomic64_t *)ring->wptr_cpu_addr, ring->wptr); + WDOORBELL64(ring->doorbell_index, ring->wptr); + } else { + WREG32_SOC15(GC, 0, regCP_RB0_WPTR, lower_32_bits(ring->wptr)); + WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI, upper_32_bits(ring->wptr)); + } +} + +static u64 gfx_v11_0_ring_get_rptr_compute(struct amdgpu_ring *ring) +{ + /* gfx11 hardware is 32bit rptr */ + return *(uint32_t *)ring->rptr_cpu_addr; +} + +static u64 gfx_v11_0_ring_get_wptr_compute(struct amdgpu_ring *ring) +{ + u64 wptr; + + /* XXX check if swapping is necessary on BE */ + if (ring->use_doorbell) + wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr); + else + BUG(); + return wptr; +} + +static void gfx_v11_0_ring_set_wptr_compute(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + /* XXX check if swapping is necessary on BE */ + if (ring->use_doorbell) { + atomic64_set((atomic64_t *)ring->wptr_cpu_addr, ring->wptr); + WDOORBELL64(ring->doorbell_index, ring->wptr); + } else { + BUG(); /* only DOORBELL method supported on gfx11 now */ + } +} + +static void gfx_v11_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + u32 ref_and_mask, reg_mem_engine; + const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; + + if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { + switch (ring->me) { + case 1: + ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe; + break; + case 2: + ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe; + break; + default: + return; + } + reg_mem_engine = 0; + } else { + ref_and_mask = nbio_hf_reg->ref_and_mask_cp0; + reg_mem_engine = 1; /* pfp */ + } + + gfx_v11_0_wait_reg_mem(ring, reg_mem_engine, 0, 1, + adev->nbio.funcs->get_hdp_flush_req_offset(adev), + adev->nbio.funcs->get_hdp_flush_done_offset(adev), + ref_and_mask, ref_and_mask, 0x20); +} + +static void gfx_v11_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, + struct amdgpu_job *job, + struct amdgpu_ib *ib, + uint32_t flags) +{ + unsigned vmid = AMDGPU_JOB_GET_VMID(job); + u32 header, control = 0; + + BUG_ON(ib->flags & AMDGPU_IB_FLAG_CE); + + header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); + + control |= ib->length_dw | (vmid << 24); + + if ((amdgpu_sriov_vf(ring->adev) || amdgpu_mcbp) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) { + control |= INDIRECT_BUFFER_PRE_ENB(1); + + if (flags & AMDGPU_IB_PREEMPTED) + control |= INDIRECT_BUFFER_PRE_RESUME(1); + + if (vmid) + gfx_v11_0_ring_emit_de_meta(ring, + (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false); + } + + if (ring->is_mes_queue) + /* inherit vmid from mqd */ + control |= 0x400000; + + amdgpu_ring_write(ring, header); + BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ + amdgpu_ring_write(ring, +#ifdef __BIG_ENDIAN + (2 << 0) | +#endif + lower_32_bits(ib->gpu_addr)); + amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); + amdgpu_ring_write(ring, control); +} + +static void gfx_v11_0_ring_emit_ib_compute(struct amdgpu_ring *ring, + struct amdgpu_job *job, + struct amdgpu_ib *ib, + uint32_t flags) +{ + unsigned vmid = AMDGPU_JOB_GET_VMID(job); + u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); + + if (ring->is_mes_queue) + /* inherit vmid from mqd */ + control |= 0x40000000; + + /* Currently, there is a high possibility to get wave ID mismatch + * between ME and GDS, leading to a hw deadlock, because ME generates + * different wave IDs than the GDS expects. This situation happens + * randomly when at least 5 compute pipes use GDS ordered append. + * The wave IDs generated by ME are also wrong after suspend/resume. + * Those are probably bugs somewhere else in the kernel driver. + * + * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and + * GDS to 0 for this ring (me/pipe). + */ + if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) { + amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); + amdgpu_ring_write(ring, regGDS_COMPUTE_MAX_WAVE_ID); + amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id); + } + + amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); + BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ + amdgpu_ring_write(ring, +#ifdef __BIG_ENDIAN + (2 << 0) | +#endif + lower_32_bits(ib->gpu_addr)); + amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); + amdgpu_ring_write(ring, control); +} + +static void gfx_v11_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, + u64 seq, unsigned flags) +{ + bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; + bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; + + /* RELEASE_MEM - flush caches, send int */ + amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6)); + amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_GCR_SEQ | + PACKET3_RELEASE_MEM_GCR_GL2_WB | + PACKET3_RELEASE_MEM_GCR_GL2_INV | + PACKET3_RELEASE_MEM_GCR_GL2_US | + PACKET3_RELEASE_MEM_GCR_GL1_INV | + PACKET3_RELEASE_MEM_GCR_GLV_INV | + PACKET3_RELEASE_MEM_GCR_GLM_INV | + PACKET3_RELEASE_MEM_GCR_GLM_WB | + PACKET3_RELEASE_MEM_CACHE_POLICY(3) | + PACKET3_RELEASE_MEM_EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | + PACKET3_RELEASE_MEM_EVENT_INDEX(5))); + amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_DATA_SEL(write64bit ? 2 : 1) | + PACKET3_RELEASE_MEM_INT_SEL(int_sel ? 2 : 0))); + + /* + * the address should be Qword aligned if 64bit write, Dword + * aligned if only send 32bit data low (discard data high) + */ + if (write64bit) + BUG_ON(addr & 0x7); + else + BUG_ON(addr & 0x3); + amdgpu_ring_write(ring, lower_32_bits(addr)); + amdgpu_ring_write(ring, upper_32_bits(addr)); + amdgpu_ring_write(ring, lower_32_bits(seq)); + amdgpu_ring_write(ring, upper_32_bits(seq)); + amdgpu_ring_write(ring, ring->is_mes_queue ? + (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0); +} + +static void gfx_v11_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) +{ + int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); + uint32_t seq = ring->fence_drv.sync_seq; + uint64_t addr = ring->fence_drv.gpu_addr; + + gfx_v11_0_wait_reg_mem(ring, usepfp, 1, 0, lower_32_bits(addr), + upper_32_bits(addr), seq, 0xffffffff, 4); +} + +static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring, + uint16_t pasid, uint32_t flush_type, + bool all_hub, uint8_t dst_sel) +{ + amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0)); + amdgpu_ring_write(ring, + PACKET3_INVALIDATE_TLBS_DST_SEL(dst_sel) | + PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) | + PACKET3_INVALIDATE_TLBS_PASID(pasid) | + PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type)); +} + +static void gfx_v11_0_ring_emit_vm_flush(struct amdgpu_ring *ring, + unsigned vmid, uint64_t pd_addr) +{ + if (ring->is_mes_queue) + gfx_v11_0_ring_invalidate_tlbs(ring, 0, 0, false, 0); + else + amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); + + /* compute doesn't have PFP */ + if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) { + /* sync PFP to ME, otherwise we might get invalid PFP reads */ + amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); + amdgpu_ring_write(ring, 0x0); + } +} + +static void gfx_v11_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr, + u64 seq, unsigned int flags) +{ + struct amdgpu_device *adev = ring->adev; + + /* we only allocate 32bit for each seq wb address */ + BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT); + + /* write fence seq to the "addr" */ + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); + amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | + WRITE_DATA_DST_SEL(5) | WR_CONFIRM)); + amdgpu_ring_write(ring, lower_32_bits(addr)); + amdgpu_ring_write(ring, upper_32_bits(addr)); + amdgpu_ring_write(ring, lower_32_bits(seq)); + + if (flags & AMDGPU_FENCE_FLAG_INT) { + /* set register to trigger INT */ + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); + amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | + WRITE_DATA_DST_SEL(0) | WR_CONFIRM)); + amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, regCPC_INT_STATUS)); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */ + } +} + +static void gfx_v11_0_ring_emit_cntxcntl(struct amdgpu_ring *ring, + uint32_t flags) +{ + uint32_t dw2 = 0; + + dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */ + if (flags & AMDGPU_HAVE_CTX_SWITCH) { + /* set load_global_config & load_global_uconfig */ + dw2 |= 0x8001; + /* set load_cs_sh_regs */ + dw2 |= 0x01000000; + /* set load_per_context_state & load_gfx_sh_regs for GFX */ + dw2 |= 0x10002; + } + + amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1)); + amdgpu_ring_write(ring, dw2); + amdgpu_ring_write(ring, 0); +} + +static unsigned gfx_v11_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring) +{ + unsigned ret; + + amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3)); + amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr)); + amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr)); + amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */ + ret = ring->wptr & ring->buf_mask; + amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */ + + return ret; +} + +static void gfx_v11_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset) +{ + unsigned cur; + BUG_ON(offset > ring->buf_mask); + BUG_ON(ring->ring[offset] != 0x55aa55aa); + + cur = (ring->wptr - 1) & ring->buf_mask; + if (likely(cur > offset)) + ring->ring[offset] = cur - offset; + else + ring->ring[offset] = (ring->buf_mask + 1) - offset + cur; +} + +static int gfx_v11_0_ring_preempt_ib(struct amdgpu_ring *ring) +{ + int i, r = 0; + struct amdgpu_device *adev = ring->adev; + struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_ring *kiq_ring = &kiq->ring; + unsigned long flags; + + if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) + return -EINVAL; + + spin_lock_irqsave(&kiq->ring_lock, flags); + + if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) { + spin_unlock_irqrestore(&kiq->ring_lock, flags); + return -ENOMEM; + } + + /* assert preemption condition */ + amdgpu_ring_set_preempt_cond_exec(ring, false); + + /* assert IB preemption, emit the trailing fence */ + kiq->pmf->kiq_unmap_queues(kiq_ring, ring, PREEMPT_QUEUES_NO_UNMAP, + ring->trail_fence_gpu_addr, + ++ring->trail_seq); + amdgpu_ring_commit(kiq_ring); + + spin_unlock_irqrestore(&kiq->ring_lock, flags); + + /* poll the trailing fence */ + for (i = 0; i < adev->usec_timeout; i++) { + if (ring->trail_seq == + le32_to_cpu(*(ring->trail_fence_cpu_addr))) + break; + udelay(1); + } + + if (i >= adev->usec_timeout) { + r = -EINVAL; + DRM_ERROR("ring %d failed to preempt ib\n", ring->idx); + } + + /* deassert preemption condition */ + amdgpu_ring_set_preempt_cond_exec(ring, true); + return r; +} + +static void gfx_v11_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume) +{ + struct amdgpu_device *adev = ring->adev; + struct v10_de_ib_state de_payload = {0}; + uint64_t offset, gds_addr, de_payload_gpu_addr; + void *de_payload_cpu_addr; + int cnt; + + if (ring->is_mes_queue) { + offset = offsetof(struct amdgpu_mes_ctx_meta_data, + gfx[0].gfx_meta_data) + + offsetof(struct v10_gfx_meta_data, de_payload); + de_payload_gpu_addr = + amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); + de_payload_cpu_addr = + amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset); + + offset = offsetof(struct amdgpu_mes_ctx_meta_data, + gfx[0].gds_backup) + + offsetof(struct v10_gfx_meta_data, de_payload); + gds_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); + } else { + offset = offsetof(struct v10_gfx_meta_data, de_payload); + de_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset; + de_payload_cpu_addr = adev->virt.csa_cpu_addr + offset; + + gds_addr = ALIGN(amdgpu_csa_vaddr(ring->adev) + + AMDGPU_CSA_SIZE - adev->gds.gds_size, + PAGE_SIZE); + } + + de_payload.gds_backup_addrlo = lower_32_bits(gds_addr); + de_payload.gds_backup_addrhi = upper_32_bits(gds_addr); + + cnt = (sizeof(de_payload) >> 2) + 4 - 2; + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt)); + amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) | + WRITE_DATA_DST_SEL(8) | + WR_CONFIRM) | + WRITE_DATA_CACHE_POLICY(0)); + amdgpu_ring_write(ring, lower_32_bits(de_payload_gpu_addr)); + amdgpu_ring_write(ring, upper_32_bits(de_payload_gpu_addr)); + + if (resume) + amdgpu_ring_write_multiple(ring, de_payload_cpu_addr, + sizeof(de_payload) >> 2); + else + amdgpu_ring_write_multiple(ring, (void *)&de_payload, + sizeof(de_payload) >> 2); +} + +static void gfx_v11_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, + bool secure) +{ + uint32_t v = secure ? FRAME_TMZ : 0; + + amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0)); + amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1)); +} + +static void gfx_v11_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg, + uint32_t reg_val_offs) +{ + struct amdgpu_device *adev = ring->adev; + + amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4)); + amdgpu_ring_write(ring, 0 | /* src: register*/ + (5 << 8) | /* dst: memory */ + (1 << 20)); /* write confirm */ + amdgpu_ring_write(ring, reg); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + + reg_val_offs * 4)); + amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + + reg_val_offs * 4)); +} + +static void gfx_v11_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, + uint32_t val) +{ + uint32_t cmd = 0; + + switch (ring->funcs->type) { + case AMDGPU_RING_TYPE_GFX: + cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM; + break; + case AMDGPU_RING_TYPE_KIQ: + cmd = (1 << 16); /* no inc addr */ + break; + default: + cmd = WR_CONFIRM; + break; + } + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); + amdgpu_ring_write(ring, cmd); + amdgpu_ring_write(ring, reg); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, val); +} + +static void gfx_v11_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, + uint32_t val, uint32_t mask) +{ + gfx_v11_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20); +} + +static void gfx_v11_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, + uint32_t reg0, uint32_t reg1, + uint32_t ref, uint32_t mask) +{ + int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); + + gfx_v11_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1, + ref, mask, 0x20); +} + +static void gfx_v11_0_ring_soft_recovery(struct amdgpu_ring *ring, + unsigned vmid) +{ + struct amdgpu_device *adev = ring->adev; + uint32_t value = 0; + + value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03); + value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01); + value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1); + value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid); + WREG32_SOC15(GC, 0, regSQ_CMD, value); +} + +static void +gfx_v11_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, + uint32_t me, uint32_t pipe, + enum amdgpu_interrupt_state state) +{ + uint32_t cp_int_cntl, cp_int_cntl_reg; + + if (!me) { + switch (pipe) { + case 0: + cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING0); + break; + case 1: + cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING1); + break; + default: + DRM_DEBUG("invalid pipe %d\n", pipe); + return; + } + } else { + DRM_DEBUG("invalid me %d\n", me); + return; + } + + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); + cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, + TIME_STAMP_INT_ENABLE, 0); + WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); + break; + case AMDGPU_IRQ_STATE_ENABLE: + cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); + cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, + TIME_STAMP_INT_ENABLE, 1); + WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); + break; + default: + break; + } +} + +static void gfx_v11_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev, + int me, int pipe, + enum amdgpu_interrupt_state state) +{ + u32 mec_int_cntl, mec_int_cntl_reg; + + /* + * amdgpu controls only the first MEC. That's why this function only + * handles the setting of interrupts for this specific MEC. All other + * pipes' interrupts are set by amdkfd. + */ + + if (me == 1) { + switch (pipe) { + case 0: + mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL); + break; + case 1: + mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE1_INT_CNTL); + break; + case 2: + mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE2_INT_CNTL); + break; + case 3: + mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE3_INT_CNTL); + break; + default: + DRM_DEBUG("invalid pipe %d\n", pipe); + return; + } + } else { + DRM_DEBUG("invalid me %d\n", me); + return; + } + + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg); + mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, + TIME_STAMP_INT_ENABLE, 0); + WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl); + break; + case AMDGPU_IRQ_STATE_ENABLE: + mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg); + mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, + TIME_STAMP_INT_ENABLE, 1); + WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl); + break; + default: + break; + } +} + +static int gfx_v11_0_set_eop_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned type, + enum amdgpu_interrupt_state state) +{ + switch (type) { + case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP: + gfx_v11_0_set_gfx_eop_interrupt_state(adev, 0, 0, state); + break; + case AMDGPU_CP_IRQ_GFX_ME0_PIPE1_EOP: + gfx_v11_0_set_gfx_eop_interrupt_state(adev, 0, 1, state); + break; + case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP: + gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 0, state); + break; + case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP: + gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 1, state); + break; + case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP: + gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 2, state); + break; + case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP: + gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 3, state); + break; + default: + break; + } + return 0; +} + +static int gfx_v11_0_eop_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + int i; + u8 me_id, pipe_id, queue_id; + struct amdgpu_ring *ring; + uint32_t mes_queue_id = entry->src_data[0]; + + DRM_DEBUG("IH: CP EOP\n"); + + if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) { + struct amdgpu_mes_queue *queue; + + mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK; + + spin_lock(&adev->mes.queue_id_lock); + queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id); + if (queue) { + DRM_DEBUG("process mes queue id = %d\n", mes_queue_id); + amdgpu_fence_process(queue->ring); + } + spin_unlock(&adev->mes.queue_id_lock); + } else { + me_id = (entry->ring_id & 0x0c) >> 2; + pipe_id = (entry->ring_id & 0x03) >> 0; + queue_id = (entry->ring_id & 0x70) >> 4; + + switch (me_id) { + case 0: + if (pipe_id == 0) + amdgpu_fence_process(&adev->gfx.gfx_ring[0]); + else + amdgpu_fence_process(&adev->gfx.gfx_ring[1]); + break; + case 1: + case 2: + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + ring = &adev->gfx.compute_ring[i]; + /* Per-queue interrupt is supported for MEC starting from VI. + * The interrupt can only be enabled/disabled per pipe instead + * of per queue. + */ + if ((ring->me == me_id) && + (ring->pipe == pipe_id) && + (ring->queue == queue_id)) + amdgpu_fence_process(ring); + } + break; + } + } + + return 0; +} + +static int gfx_v11_0_set_priv_reg_fault_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + case AMDGPU_IRQ_STATE_ENABLE: + WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0, + PRIV_REG_INT_ENABLE, + state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); + break; + default: + break; + } + + return 0; +} + +static int gfx_v11_0_set_priv_inst_fault_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + case AMDGPU_IRQ_STATE_ENABLE: + WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0, + PRIV_INSTR_INT_ENABLE, + state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); + default: + break; + } + + return 0; +} + +static void gfx_v11_0_handle_priv_fault(struct amdgpu_device *adev, + struct amdgpu_iv_entry *entry) +{ + u8 me_id, pipe_id, queue_id; + struct amdgpu_ring *ring; + int i; + + me_id = (entry->ring_id & 0x0c) >> 2; + pipe_id = (entry->ring_id & 0x03) >> 0; + queue_id = (entry->ring_id & 0x70) >> 4; + + switch (me_id) { + case 0: + for (i = 0; i < adev->gfx.num_gfx_rings; i++) { + ring = &adev->gfx.gfx_ring[i]; + /* we only enabled 1 gfx queue per pipe for now */ + if (ring->me == me_id && ring->pipe == pipe_id) + drm_sched_fault(&ring->sched); + } + break; + case 1: + case 2: + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + ring = &adev->gfx.compute_ring[i]; + if (ring->me == me_id && ring->pipe == pipe_id && + ring->queue == queue_id) + drm_sched_fault(&ring->sched); + } + break; + default: + BUG(); + } +} + +static int gfx_v11_0_priv_reg_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + DRM_ERROR("Illegal register access in command stream\n"); + gfx_v11_0_handle_priv_fault(adev, entry); + return 0; +} + +static int gfx_v11_0_priv_inst_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + DRM_ERROR("Illegal instruction in command stream\n"); + gfx_v11_0_handle_priv_fault(adev, entry); + return 0; +} + +#if 0 +static int gfx_v11_0_kiq_set_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned int type, + enum amdgpu_interrupt_state state) +{ + uint32_t tmp, target; + struct amdgpu_ring *ring = &(adev->gfx.kiq.ring); + + target = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL); + target += ring->pipe; + + switch (type) { + case AMDGPU_CP_KIQ_IRQ_DRIVER0: + if (state == AMDGPU_IRQ_STATE_DISABLE) { + tmp = RREG32_SOC15(GC, 0, regCPC_INT_CNTL); + tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL, + GENERIC2_INT_ENABLE, 0); + WREG32_SOC15(GC, 0, regCPC_INT_CNTL, tmp); + + tmp = RREG32_SOC15_IP(GC, target); + tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE0_INT_CNTL, + GENERIC2_INT_ENABLE, 0); + WREG32_SOC15_IP(GC, target, tmp); + } else { + tmp = RREG32_SOC15(GC, 0, regCPC_INT_CNTL); + tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL, + GENERIC2_INT_ENABLE, 1); + WREG32_SOC15(GC, 0, regCPC_INT_CNTL, tmp); + + tmp = RREG32_SOC15_IP(GC, target); + tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE0_INT_CNTL, + GENERIC2_INT_ENABLE, 1); + WREG32_SOC15_IP(GC, target, tmp); + } + break; + default: + BUG(); /* kiq only support GENERIC2_INT now */ + break; + } + return 0; +} +#endif + +static void gfx_v11_0_emit_mem_sync(struct amdgpu_ring *ring) +{ + const unsigned int gcr_cntl = + PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_INV(1) | + PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_WB(1) | + PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_INV(1) | + PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_WB(1) | + PACKET3_ACQUIRE_MEM_GCR_CNTL_GL1_INV(1) | + PACKET3_ACQUIRE_MEM_GCR_CNTL_GLV_INV(1) | + PACKET3_ACQUIRE_MEM_GCR_CNTL_GLK_INV(1) | + PACKET3_ACQUIRE_MEM_GCR_CNTL_GLI_INV(1); + + /* ACQUIRE_MEM - make one or more surfaces valid for use by the subsequent operations */ + amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 6)); + amdgpu_ring_write(ring, 0); /* CP_COHER_CNTL */ + amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */ + amdgpu_ring_write(ring, 0xffffff); /* CP_COHER_SIZE_HI */ + amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */ + amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */ + amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */ + amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */ +} + +static const struct amd_ip_funcs gfx_v11_0_ip_funcs = { + .name = "gfx_v11_0", + .early_init = gfx_v11_0_early_init, + .late_init = gfx_v11_0_late_init, + .sw_init = gfx_v11_0_sw_init, + .sw_fini = gfx_v11_0_sw_fini, + .hw_init = gfx_v11_0_hw_init, + .hw_fini = gfx_v11_0_hw_fini, + .suspend = gfx_v11_0_suspend, + .resume = gfx_v11_0_resume, + .is_idle = gfx_v11_0_is_idle, + .wait_for_idle = gfx_v11_0_wait_for_idle, + .soft_reset = gfx_v11_0_soft_reset, + .set_clockgating_state = gfx_v11_0_set_clockgating_state, + .set_powergating_state = gfx_v11_0_set_powergating_state, + .get_clockgating_state = gfx_v11_0_get_clockgating_state, +}; + +static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = { + .type = AMDGPU_RING_TYPE_GFX, + .align_mask = 0xff, + .nop = PACKET3(PACKET3_NOP, 0x3FFF), + .support_64bit_ptrs = true, + .vmhub = AMDGPU_GFXHUB_0, + .get_rptr = gfx_v11_0_ring_get_rptr_gfx, + .get_wptr = gfx_v11_0_ring_get_wptr_gfx, + .set_wptr = gfx_v11_0_ring_set_wptr_gfx, + .emit_frame_size = /* totally 242 maximum if 16 IBs */ + 5 + /* COND_EXEC */ + 7 + /* PIPELINE_SYNC */ + SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + + 2 + /* VM_FLUSH */ + 8 + /* FENCE for VM_FLUSH */ + 20 + /* GDS switch */ + 5 + /* COND_EXEC */ + 7 + /* HDP_flush */ + 4 + /* VGT_flush */ + 31 + /* DE_META */ + 3 + /* CNTX_CTRL */ + 5 + /* HDP_INVL */ + 8 + 8 + /* FENCE x2 */ + 8, /* gfx_v11_0_emit_mem_sync */ + .emit_ib_size = 4, /* gfx_v11_0_ring_emit_ib_gfx */ + .emit_ib = gfx_v11_0_ring_emit_ib_gfx, + .emit_fence = gfx_v11_0_ring_emit_fence, + .emit_pipeline_sync = gfx_v11_0_ring_emit_pipeline_sync, + .emit_vm_flush = gfx_v11_0_ring_emit_vm_flush, + .emit_gds_switch = gfx_v11_0_ring_emit_gds_switch, + .emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush, + .test_ring = gfx_v11_0_ring_test_ring, + .test_ib = gfx_v11_0_ring_test_ib, + .insert_nop = amdgpu_ring_insert_nop, + .pad_ib = amdgpu_ring_generic_pad_ib, + .emit_cntxcntl = gfx_v11_0_ring_emit_cntxcntl, + .init_cond_exec = gfx_v11_0_ring_emit_init_cond_exec, + .patch_cond_exec = gfx_v11_0_ring_emit_patch_cond_exec, + .preempt_ib = gfx_v11_0_ring_preempt_ib, + .emit_frame_cntl = gfx_v11_0_ring_emit_frame_cntl, + .emit_wreg = gfx_v11_0_ring_emit_wreg, + .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait, + .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait, + .soft_recovery = gfx_v11_0_ring_soft_recovery, + .emit_mem_sync = gfx_v11_0_emit_mem_sync, +}; + +static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = { + .type = AMDGPU_RING_TYPE_COMPUTE, + .align_mask = 0xff, + .nop = PACKET3(PACKET3_NOP, 0x3FFF), + .support_64bit_ptrs = true, + .vmhub = AMDGPU_GFXHUB_0, + .get_rptr = gfx_v11_0_ring_get_rptr_compute, + .get_wptr = gfx_v11_0_ring_get_wptr_compute, + .set_wptr = gfx_v11_0_ring_set_wptr_compute, + .emit_frame_size = + 20 + /* gfx_v11_0_ring_emit_gds_switch */ + 7 + /* gfx_v11_0_ring_emit_hdp_flush */ + 5 + /* hdp invalidate */ + 7 + /* gfx_v11_0_ring_emit_pipeline_sync */ + SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + + 2 + /* gfx_v11_0_ring_emit_vm_flush */ + 8 + 8 + 8 + /* gfx_v11_0_ring_emit_fence x3 for user fence, vm fence */ + 8, /* gfx_v11_0_emit_mem_sync */ + .emit_ib_size = 7, /* gfx_v11_0_ring_emit_ib_compute */ + .emit_ib = gfx_v11_0_ring_emit_ib_compute, + .emit_fence = gfx_v11_0_ring_emit_fence, + .emit_pipeline_sync = gfx_v11_0_ring_emit_pipeline_sync, + .emit_vm_flush = gfx_v11_0_ring_emit_vm_flush, + .emit_gds_switch = gfx_v11_0_ring_emit_gds_switch, + .emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush, + .test_ring = gfx_v11_0_ring_test_ring, + .test_ib = gfx_v11_0_ring_test_ib, + .insert_nop = amdgpu_ring_insert_nop, + .pad_ib = amdgpu_ring_generic_pad_ib, + .emit_wreg = gfx_v11_0_ring_emit_wreg, + .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait, + .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait, + .emit_mem_sync = gfx_v11_0_emit_mem_sync, +}; + +static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = { + .type = AMDGPU_RING_TYPE_KIQ, + .align_mask = 0xff, + .nop = PACKET3(PACKET3_NOP, 0x3FFF), + .support_64bit_ptrs = true, + .vmhub = AMDGPU_GFXHUB_0, + .get_rptr = gfx_v11_0_ring_get_rptr_compute, + .get_wptr = gfx_v11_0_ring_get_wptr_compute, + .set_wptr = gfx_v11_0_ring_set_wptr_compute, + .emit_frame_size = + 20 + /* gfx_v11_0_ring_emit_gds_switch */ + 7 + /* gfx_v11_0_ring_emit_hdp_flush */ + 5 + /*hdp invalidate */ + 7 + /* gfx_v11_0_ring_emit_pipeline_sync */ + SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + + 2 + /* gfx_v11_0_ring_emit_vm_flush */ + 8 + 8 + 8, /* gfx_v11_0_ring_emit_fence_kiq x3 for user fence, vm fence */ + .emit_ib_size = 7, /* gfx_v11_0_ring_emit_ib_compute */ + .emit_ib = gfx_v11_0_ring_emit_ib_compute, + .emit_fence = gfx_v11_0_ring_emit_fence_kiq, + .test_ring = gfx_v11_0_ring_test_ring, + .test_ib = gfx_v11_0_ring_test_ib, + .insert_nop = amdgpu_ring_insert_nop, + .pad_ib = amdgpu_ring_generic_pad_ib, + .emit_rreg = gfx_v11_0_ring_emit_rreg, + .emit_wreg = gfx_v11_0_ring_emit_wreg, + .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait, + .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait, +}; + +static void gfx_v11_0_set_ring_funcs(struct amdgpu_device *adev) +{ + int i; + + adev->gfx.kiq.ring.funcs = &gfx_v11_0_ring_funcs_kiq; + + for (i = 0; i < adev->gfx.num_gfx_rings; i++) + adev->gfx.gfx_ring[i].funcs = &gfx_v11_0_ring_funcs_gfx; + + for (i = 0; i < adev->gfx.num_compute_rings; i++) + adev->gfx.compute_ring[i].funcs = &gfx_v11_0_ring_funcs_compute; +} + +static const struct amdgpu_irq_src_funcs gfx_v11_0_eop_irq_funcs = { + .set = gfx_v11_0_set_eop_interrupt_state, + .process = gfx_v11_0_eop_irq, +}; + +static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_reg_irq_funcs = { + .set = gfx_v11_0_set_priv_reg_fault_state, + .process = gfx_v11_0_priv_reg_irq, +}; + +static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_inst_irq_funcs = { + .set = gfx_v11_0_set_priv_inst_fault_state, + .process = gfx_v11_0_priv_inst_irq, +}; + +static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST; + adev->gfx.eop_irq.funcs = &gfx_v11_0_eop_irq_funcs; + + adev->gfx.priv_reg_irq.num_types = 1; + adev->gfx.priv_reg_irq.funcs = &gfx_v11_0_priv_reg_irq_funcs; + + adev->gfx.priv_inst_irq.num_types = 1; + adev->gfx.priv_inst_irq.funcs = &gfx_v11_0_priv_inst_irq_funcs; +} + +static void gfx_v11_0_set_imu_funcs(struct amdgpu_device *adev) +{ + adev->gfx.imu.funcs = &gfx_v11_0_imu_funcs; +} + +static void gfx_v11_0_set_rlc_funcs(struct amdgpu_device *adev) +{ + adev->gfx.rlc.funcs = &gfx_v11_0_rlc_funcs; +} + +static void gfx_v11_0_set_gds_init(struct amdgpu_device *adev) +{ + unsigned total_cu = adev->gfx.config.max_cu_per_sh * + adev->gfx.config.max_sh_per_se * + adev->gfx.config.max_shader_engines; + + adev->gds.gds_size = 0x1000; + adev->gds.gds_compute_max_wave_id = total_cu * 32 - 1; + adev->gds.gws_size = 64; + adev->gds.oa_size = 16; +} + +static void gfx_v11_0_set_mqd_funcs(struct amdgpu_device *adev) +{ + /* set gfx eng mqd */ + adev->mqds[AMDGPU_HW_IP_GFX].mqd_size = + sizeof(struct v11_gfx_mqd); + adev->mqds[AMDGPU_HW_IP_GFX].init_mqd = + gfx_v11_0_gfx_mqd_init; + /* set compute eng mqd */ + adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size = + sizeof(struct v11_compute_mqd); + adev->mqds[AMDGPU_HW_IP_COMPUTE].init_mqd = + gfx_v11_0_compute_mqd_init; +} + +static void gfx_v11_0_set_user_wgp_inactive_bitmap_per_sh(struct amdgpu_device *adev, + u32 bitmap) +{ + u32 data; + + if (!bitmap) + return; + + data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT; + data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK; + + WREG32_SOC15(GC, 0, regGC_USER_SHADER_ARRAY_CONFIG, data); +} + +static u32 gfx_v11_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev) +{ + u32 data, wgp_bitmask; + data = RREG32_SOC15(GC, 0, regCC_GC_SHADER_ARRAY_CONFIG); + data |= RREG32_SOC15(GC, 0, regGC_USER_SHADER_ARRAY_CONFIG); + + data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK; + data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT; + + wgp_bitmask = + amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh >> 1); + + return (~data) & wgp_bitmask; +} + +static u32 gfx_v11_0_get_cu_active_bitmap_per_sh(struct amdgpu_device *adev) +{ + u32 wgp_idx, wgp_active_bitmap; + u32 cu_bitmap_per_wgp, cu_active_bitmap; + + wgp_active_bitmap = gfx_v11_0_get_wgp_active_bitmap_per_sh(adev); + cu_active_bitmap = 0; + + for (wgp_idx = 0; wgp_idx < 16; wgp_idx++) { + /* if there is one WGP enabled, it means 2 CUs will be enabled */ + cu_bitmap_per_wgp = 3 << (2 * wgp_idx); + if (wgp_active_bitmap & (1 << wgp_idx)) + cu_active_bitmap |= cu_bitmap_per_wgp; + } + + return cu_active_bitmap; +} + +static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev, + struct amdgpu_cu_info *cu_info) +{ + int i, j, k, counter, active_cu_number = 0; + u32 mask, bitmap; + unsigned disable_masks[8 * 2]; + + if (!adev || !cu_info) + return -EINVAL; + + amdgpu_gfx_parse_disable_cu(disable_masks, 8, 2); + + mutex_lock(&adev->grbm_idx_mutex); + for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { + for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { + mask = 1; + counter = 0; + gfx_v11_0_select_se_sh(adev, i, j, 0xffffffff); + if (i < 8 && j < 2) + gfx_v11_0_set_user_wgp_inactive_bitmap_per_sh( + adev, disable_masks[i * 2 + j]); + bitmap = gfx_v11_0_get_cu_active_bitmap_per_sh(adev); + + /** + * GFX11 could support more than 4 SEs, while the bitmap + * in cu_info struct is 4x4 and ioctl interface struct + * drm_amdgpu_info_device should keep stable. + * So we use last two columns of bitmap to store cu mask for + * SEs 4 to 7, the layout of the bitmap is as below: + * SE0: {SH0,SH1} --> {bitmap[0][0], bitmap[0][1]} + * SE1: {SH0,SH1} --> {bitmap[1][0], bitmap[1][1]} + * SE2: {SH0,SH1} --> {bitmap[2][0], bitmap[2][1]} + * SE3: {SH0,SH1} --> {bitmap[3][0], bitmap[3][1]} + * SE4: {SH0,SH1} --> {bitmap[0][2], bitmap[0][3]} + * SE5: {SH0,SH1} --> {bitmap[1][2], bitmap[1][3]} + * SE6: {SH0,SH1} --> {bitmap[2][2], bitmap[2][3]} + * SE7: {SH0,SH1} --> {bitmap[3][2], bitmap[3][3]} + */ + cu_info->bitmap[i % 4][j + (i / 4) * 2] = bitmap; + + for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) { + if (bitmap & mask) + counter++; + + mask <<= 1; + } + active_cu_number += counter; + } + } + gfx_v11_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); + + cu_info->number = active_cu_number; + cu_info->simd_per_cu = NUM_SIMD_PER_CU; + + return 0; +} + +const struct amdgpu_ip_block_version gfx_v11_0_ip_block = +{ + .type = AMD_IP_BLOCK_TYPE_GFX, + .major = 11, + .minor = 0, + .rev = 0, + .funcs = &gfx_v11_0_ip_funcs, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.h new file mode 100644 index 000000000000..10cfc29c27c9 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.h @@ -0,0 +1,29 @@ +/* + * Copyright 2021 dvanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __GFX_V11_0_H__ +#define __GFX_V11_0_H__ + +extern const struct amdgpu_ip_block_version gfx_v11_0_ip_block; + +#endif -- cgit From d54762cc3e6abb08f5ae31e3fa6a249768c07617 Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 5 May 2022 11:03:51 +0200 Subject: drm/amdgpu: nuke dynamic gfx scratch reg allocation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It's over a decade ago that this was actually used for more than ring and IB tests. Just use the static register directly where needed and nuke the now useless infrastructure. Signed-off-by: Christian König Acked-by: Lang Yu Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 36 ------------------- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 13 ------- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 27 +++----------- drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 25 ++----------- drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c | 54 ++++++++-------------------- drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 64 +++++++-------------------------- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 24 +++---------- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 26 +++----------- 8 files changed, 43 insertions(+), 226 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 5d6b04fc6206..ede2fa56f6c9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -98,42 +98,6 @@ bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, adev->gfx.me.queue_bitmap); } -/** - * amdgpu_gfx_scratch_get - Allocate a scratch register - * - * @adev: amdgpu_device pointer - * @reg: scratch register mmio offset - * - * Allocate a CP scratch register for use by the driver (all asics). - * Returns 0 on success or -EINVAL on failure. - */ -int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg) -{ - int i; - - i = ffs(adev->gfx.scratch.free_mask); - if (i != 0 && i <= adev->gfx.scratch.num_reg) { - i--; - adev->gfx.scratch.free_mask &= ~(1u << i); - *reg = adev->gfx.scratch.reg_base + i; - return 0; - } - return -EINVAL; -} - -/** - * amdgpu_gfx_scratch_free - Free a scratch register - * - * @adev: amdgpu_device pointer - * @reg: scratch register mmio offset - * - * Free a CP scratch register allocated for use by the driver (all asics) - */ -void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg) -{ - adev->gfx.scratch.free_mask |= 1u << (reg - adev->gfx.scratch.reg_base); -} - /** * amdgpu_gfx_parse_disable_cu - Parse the disable_cu module parameter * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 45522609d4b4..53526ffb2ce1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -110,15 +110,6 @@ struct amdgpu_kiq { const struct kiq_pm4_funcs *pmf; }; -/* - * GPU scratch registers structures, functions & helpers - */ -struct amdgpu_scratch { - unsigned num_reg; - uint32_t reg_base; - uint32_t free_mask; -}; - /* * GFX configurations */ @@ -288,7 +279,6 @@ struct amdgpu_gfx { struct amdgpu_mec mec; struct amdgpu_kiq kiq; struct amdgpu_imu imu; - struct amdgpu_scratch scratch; bool rs64_enable; /* firmware format */ const struct firmware *me_fw; /* ME firmware */ uint32_t me_fw_version; @@ -376,9 +366,6 @@ static inline u32 amdgpu_gfx_create_bitmask(u32 bit_width) return (u32)((1ULL << bit_width) - 1); } -int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg); -void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg); - void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 64d36622ee23..4b66b9c93754 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -3744,13 +3744,6 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev) gfx_v10_0_init_spm_golden_registers(adev); } -static void gfx_v10_0_scratch_init(struct amdgpu_device *adev) -{ - adev->gfx.scratch.num_reg = 8; - adev->gfx.scratch.reg_base = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0); - adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1; -} - static void gfx_v10_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel, bool wc, uint32_t reg, uint32_t val) { @@ -3787,34 +3780,26 @@ static void gfx_v10_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel, static int gfx_v10_0_ring_test_ring(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - uint32_t scratch; uint32_t tmp = 0; unsigned i; int r; - r = amdgpu_gfx_scratch_get(adev, &scratch); - if (r) { - DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r); - return r; - } - - WREG32(scratch, 0xCAFEDEAD); - + WREG32_SOC15(GC, 0, mmSCRATCH_REG0, 0xCAFEDEAD); r = amdgpu_ring_alloc(ring, 3); if (r) { DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r); - amdgpu_gfx_scratch_free(adev, scratch); return r; } amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); - amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START)); + amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0) - + PACKET3_SET_UCONFIG_REG_START); amdgpu_ring_write(ring, 0xDEADBEEF); amdgpu_ring_commit(ring); for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32(scratch); + tmp = RREG32_SOC15(GC, 0, mmSCRATCH_REG0); if (tmp == 0xDEADBEEF) break; if (amdgpu_emu_mode == 1) @@ -3826,8 +3811,6 @@ static int gfx_v10_0_ring_test_ring(struct amdgpu_ring *ring) if (i >= adev->usec_timeout) r = -ETIMEDOUT; - amdgpu_gfx_scratch_free(adev, scratch); - return r; } @@ -4852,8 +4835,6 @@ static int gfx_v10_0_sw_init(void *handle) adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE; - gfx_v10_0_scratch_init(adev); - r = gfx_v10_0_me_init(adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 7c75fe51ec20..9416dc93e456 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -297,13 +297,6 @@ static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev) gfx_v11_0_init_spm_golden_registers(adev); } -static void gfx_v11_0_scratch_init(struct amdgpu_device *adev) -{ - adev->gfx.scratch.num_reg = 8; - adev->gfx.scratch.reg_base = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0); - adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1; -} - static void gfx_v11_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel, bool wc, uint32_t reg, uint32_t val) { @@ -340,24 +333,16 @@ static void gfx_v11_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel, static int gfx_v11_0_ring_test_ring(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - uint32_t scratch; + uint32_t scratch = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0); uint32_t tmp = 0; unsigned i; int r; - r = amdgpu_gfx_scratch_get(adev, &scratch); - if (r) { - DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r); - return r; - } - WREG32(scratch, 0xCAFEDEAD); - r = amdgpu_ring_alloc(ring, 5); if (r) { DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r); - amdgpu_gfx_scratch_free(adev, scratch); return r; } @@ -365,7 +350,8 @@ static int gfx_v11_0_ring_test_ring(struct amdgpu_ring *ring) gfx_v11_0_ring_emit_wreg(ring, scratch, 0xDEADBEEF); } else { amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); - amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START)); + amdgpu_ring_write(ring, scratch - + PACKET3_SET_UCONFIG_REG_START); amdgpu_ring_write(ring, 0xDEADBEEF); } amdgpu_ring_commit(ring); @@ -382,9 +368,6 @@ static int gfx_v11_0_ring_test_ring(struct amdgpu_ring *ring) if (i >= adev->usec_timeout) r = -ETIMEDOUT; - - amdgpu_gfx_scratch_free(adev, scratch); - return r; } @@ -1631,8 +1614,6 @@ static int gfx_v11_0_sw_init(void *handle) adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE; - gfx_v11_0_scratch_init(adev); - if (adev->gfx.imu.funcs) { if (adev->gfx.imu.funcs->init_microcode) { r = adev->gfx.imu.funcs->init_microcode(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index 29a91b320d4f..204b246f0e3f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -1778,39 +1778,26 @@ static void gfx_v6_0_constants_init(struct amdgpu_device *adev) udelay(50); } - -static void gfx_v6_0_scratch_init(struct amdgpu_device *adev) -{ - adev->gfx.scratch.num_reg = 8; - adev->gfx.scratch.reg_base = mmSCRATCH_REG0; - adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1; -} - static int gfx_v6_0_ring_test_ring(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - uint32_t scratch; uint32_t tmp = 0; unsigned i; int r; - r = amdgpu_gfx_scratch_get(adev, &scratch); - if (r) - return r; - - WREG32(scratch, 0xCAFEDEAD); + WREG32(mmSCRATCH_REG0, 0xCAFEDEAD); r = amdgpu_ring_alloc(ring, 3); if (r) - goto error_free_scratch; + return r; amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); - amdgpu_ring_write(ring, (scratch - PACKET3_SET_CONFIG_REG_START)); + amdgpu_ring_write(ring, mmSCRATCH_REG0 - PACKET3_SET_CONFIG_REG_START); amdgpu_ring_write(ring, 0xDEADBEEF); amdgpu_ring_commit(ring); for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32(scratch); + tmp = RREG32(mmSCRATCH_REG0); if (tmp == 0xDEADBEEF) break; udelay(1); @@ -1818,9 +1805,6 @@ static int gfx_v6_0_ring_test_ring(struct amdgpu_ring *ring) if (i >= adev->usec_timeout) r = -ETIMEDOUT; - -error_free_scratch: - amdgpu_gfx_scratch_free(adev, scratch); return r; } @@ -1903,50 +1887,42 @@ static void gfx_v6_0_ring_emit_ib(struct amdgpu_ring *ring, static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct amdgpu_device *adev = ring->adev; - struct amdgpu_ib ib; struct dma_fence *f = NULL; - uint32_t scratch; + struct amdgpu_ib ib; uint32_t tmp = 0; long r; - r = amdgpu_gfx_scratch_get(adev, &scratch); - if (r) - return r; - - WREG32(scratch, 0xCAFEDEAD); + WREG32(mmSCRATCH_REG0, 0xCAFEDEAD); memset(&ib, 0, sizeof(ib)); - r = amdgpu_ib_get(adev, NULL, 256, - AMDGPU_IB_POOL_DIRECT, &ib); + r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib); if (r) - goto err1; + return r; ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1); - ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_START)); + ib.ptr[1] = mmSCRATCH_REG0 - PACKET3_SET_CONFIG_REG_START; ib.ptr[2] = 0xDEADBEEF; ib.length_dw = 3; r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); if (r) - goto err2; + goto error; r = dma_fence_wait_timeout(f, false, timeout); if (r == 0) { r = -ETIMEDOUT; - goto err2; + goto error; } else if (r < 0) { - goto err2; + goto error; } - tmp = RREG32(scratch); + tmp = RREG32(mmSCRATCH_REG0); if (tmp == 0xDEADBEEF) r = 0; else r = -EINVAL; -err2: +error: amdgpu_ib_free(adev, &ib, NULL); dma_fence_put(f); -err1: - amdgpu_gfx_scratch_free(adev, scratch); return r; } @@ -3094,8 +3070,6 @@ static int gfx_v6_0_sw_init(void *handle) if (r) return r; - gfx_v6_0_scratch_init(adev); - r = gfx_v6_0_init_microcode(adev); if (r) { DRM_ERROR("Failed to load gfx firmware!\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index ac3f2dbba726..0f2976507e48 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -2049,26 +2049,6 @@ static void gfx_v7_0_constants_init(struct amdgpu_device *adev) udelay(50); } -/* - * GPU scratch registers helpers function. - */ -/** - * gfx_v7_0_scratch_init - setup driver info for CP scratch regs - * - * @adev: amdgpu_device pointer - * - * Set up the number and offset of the CP scratch registers. - * NOTE: use of CP scratch registers is a legacy interface and - * is not used by default on newer asics (r6xx+). On newer asics, - * memory buffers are used for fences rather than scratch regs. - */ -static void gfx_v7_0_scratch_init(struct amdgpu_device *adev) -{ - adev->gfx.scratch.num_reg = 8; - adev->gfx.scratch.reg_base = mmSCRATCH_REG0; - adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1; -} - /** * gfx_v7_0_ring_test_ring - basic gfx ring test * @@ -2082,36 +2062,28 @@ static void gfx_v7_0_scratch_init(struct amdgpu_device *adev) static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - uint32_t scratch; uint32_t tmp = 0; unsigned i; int r; - r = amdgpu_gfx_scratch_get(adev, &scratch); - if (r) - return r; - - WREG32(scratch, 0xCAFEDEAD); + WREG32(mmSCRATCH_REG0, 0xCAFEDEAD); r = amdgpu_ring_alloc(ring, 3); if (r) - goto error_free_scratch; + return r; amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); - amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START)); + amdgpu_ring_write(ring, mmSCRATCH_REG0 - PACKET3_SET_UCONFIG_REG_START); amdgpu_ring_write(ring, 0xDEADBEEF); amdgpu_ring_commit(ring); for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32(scratch); + tmp = RREG32(mmSCRATCH_REG0); if (tmp == 0xDEADBEEF) break; udelay(1); } if (i >= adev->usec_timeout) r = -ETIMEDOUT; - -error_free_scratch: - amdgpu_gfx_scratch_free(adev, scratch); return r; } @@ -2355,48 +2327,40 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; struct dma_fence *f = NULL; - uint32_t scratch; uint32_t tmp = 0; long r; - r = amdgpu_gfx_scratch_get(adev, &scratch); - if (r) - return r; - - WREG32(scratch, 0xCAFEDEAD); + WREG32(mmSCRATCH_REG0, 0xCAFEDEAD); memset(&ib, 0, sizeof(ib)); - r = amdgpu_ib_get(adev, NULL, 256, - AMDGPU_IB_POOL_DIRECT, &ib); + r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib); if (r) - goto err1; + return r; ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1); - ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START)); + ib.ptr[1] = mmSCRATCH_REG0 - PACKET3_SET_UCONFIG_REG_START; ib.ptr[2] = 0xDEADBEEF; ib.length_dw = 3; r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); if (r) - goto err2; + goto error; r = dma_fence_wait_timeout(f, false, timeout); if (r == 0) { r = -ETIMEDOUT; - goto err2; + goto error; } else if (r < 0) { - goto err2; + goto error; } - tmp = RREG32(scratch); + tmp = RREG32(mmSCRATCH_REG0); if (tmp == 0xDEADBEEF) r = 0; else r = -EINVAL; -err2: +error: amdgpu_ib_free(adev, &ib, NULL); dma_fence_put(f); -err1: - amdgpu_gfx_scratch_free(adev, scratch); return r; } @@ -4489,8 +4453,6 @@ static int gfx_v7_0_sw_init(void *handle) if (r) return r; - gfx_v7_0_scratch_init(adev); - r = gfx_v7_0_init_microcode(adev); if (r) { DRM_ERROR("Failed to load gfx firmware!\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index e4e779a19c20..90f64219d291 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -835,37 +835,25 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev) } } -static void gfx_v8_0_scratch_init(struct amdgpu_device *adev) -{ - adev->gfx.scratch.num_reg = 8; - adev->gfx.scratch.reg_base = mmSCRATCH_REG0; - adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1; -} - static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - uint32_t scratch; uint32_t tmp = 0; unsigned i; int r; - r = amdgpu_gfx_scratch_get(adev, &scratch); - if (r) - return r; - - WREG32(scratch, 0xCAFEDEAD); + WREG32(mmSCRATCH_REG0, 0xCAFEDEAD); r = amdgpu_ring_alloc(ring, 3); if (r) - goto error_free_scratch; + return r; amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); - amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START)); + amdgpu_ring_write(ring, mmSCRATCH_REG0 - PACKET3_SET_UCONFIG_REG_START); amdgpu_ring_write(ring, 0xDEADBEEF); amdgpu_ring_commit(ring); for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32(scratch); + tmp = RREG32(mmSCRATCH_REG0); if (tmp == 0xDEADBEEF) break; udelay(1); @@ -874,8 +862,6 @@ static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring) if (i >= adev->usec_timeout) r = -ETIMEDOUT; -error_free_scratch: - amdgpu_gfx_scratch_free(adev, scratch); return r; } @@ -2000,8 +1986,6 @@ static int gfx_v8_0_sw_init(void *handle) adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE; - gfx_v8_0_scratch_init(adev); - r = gfx_v8_0_init_microcode(adev); if (r) { DRM_ERROR("Failed to load gfx firmware!\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 06182b7e4351..83639b5ea6a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -950,13 +950,6 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev) (const u32)ARRAY_SIZE(golden_settings_gc_9_x_common)); } -static void gfx_v9_0_scratch_init(struct amdgpu_device *adev) -{ - adev->gfx.scratch.num_reg = 8; - adev->gfx.scratch.reg_base = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0); - adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1; -} - static void gfx_v9_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel, bool wc, uint32_t reg, uint32_t val) { @@ -994,27 +987,23 @@ static void gfx_v9_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel, static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - uint32_t scratch; uint32_t tmp = 0; unsigned i; int r; - r = amdgpu_gfx_scratch_get(adev, &scratch); - if (r) - return r; - - WREG32(scratch, 0xCAFEDEAD); + WREG32_SOC15(GC, 0, mmSCRATCH_REG0, 0xCAFEDEAD); r = amdgpu_ring_alloc(ring, 3); if (r) - goto error_free_scratch; + return r; amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); - amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START)); + amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0) - + PACKET3_SET_UCONFIG_REG_START); amdgpu_ring_write(ring, 0xDEADBEEF); amdgpu_ring_commit(ring); for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32(scratch); + tmp = RREG32_SOC15(GC, 0, mmSCRATCH_REG0); if (tmp == 0xDEADBEEF) break; udelay(1); @@ -1022,9 +1011,6 @@ static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring) if (i >= adev->usec_timeout) r = -ETIMEDOUT; - -error_free_scratch: - amdgpu_gfx_scratch_free(adev, scratch); return r; } @@ -2338,8 +2324,6 @@ static int gfx_v9_0_sw_init(void *handle) adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE; - gfx_v9_0_scratch_init(adev); - r = gfx_v9_0_init_microcode(adev); if (r) { DRM_ERROR("Failed to load gfx firmware!\n"); -- cgit