diff options
Diffstat (limited to 'drivers/gpu/drm/amd/powerplay')
56 files changed, 7362 insertions, 3091 deletions
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index 7e8ad30d98e2..75c208283e5f 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -25,30 +25,16 @@ #include <linux/kernel.h> #include <linux/gfp.h> #include <linux/slab.h> +#include <linux/firmware.h> #include "amd_shared.h" #include "amd_powerplay.h" #include "power_state.h" #include "amdgpu.h" #include "hwmgr.h" -#define PP_DPM_DISABLED 0xCCCC - -static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id, - enum amd_pm_state_type *user_state); static const struct amd_pm_funcs pp_dpm_funcs; -static inline int pp_check(struct pp_hwmgr *hwmgr) -{ - if (hwmgr == NULL || hwmgr->smumgr_funcs == NULL) - return -EINVAL; - - if (hwmgr->pm_en == 0 || hwmgr->hwmgr_func == NULL) - return PP_DPM_DISABLED; - - return 0; -} - static int amd_powerplay_create(struct amdgpu_device *adev) { struct pp_hwmgr *hwmgr; @@ -61,19 +47,21 @@ static int amd_powerplay_create(struct amdgpu_device *adev) return -ENOMEM; hwmgr->adev = adev; - hwmgr->pm_en = (amdgpu_dpm != 0 && !amdgpu_sriov_vf(adev)) ? true : false; + hwmgr->not_vf = !amdgpu_sriov_vf(adev); + hwmgr->pm_en = (amdgpu_dpm && hwmgr->not_vf) ? true : false; hwmgr->device = amdgpu_cgs_create_device(adev); mutex_init(&hwmgr->smu_lock); hwmgr->chip_family = adev->family; hwmgr->chip_id = adev->asic_type; - hwmgr->feature_mask = amdgpu_pp_feature_mask; + hwmgr->feature_mask = adev->powerplay.pp_feature; + hwmgr->display_config = &adev->pm.pm_display_cfg; adev->powerplay.pp_handle = hwmgr; adev->powerplay.pp_funcs = &pp_dpm_funcs; return 0; } -static int amd_powerplay_destroy(struct amdgpu_device *adev) +static void amd_powerplay_destroy(struct amdgpu_device *adev) { struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; @@ -82,8 +70,6 @@ static int amd_powerplay_destroy(struct amdgpu_device *adev) kfree(hwmgr); hwmgr = NULL; - - return 0; } static int pp_early_init(void *handle) @@ -109,18 +95,9 @@ static int pp_sw_init(void *handle) struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; int ret = 0; - ret = pp_check(hwmgr); - - if (ret >= 0) { - if (hwmgr->smumgr_funcs->smu_init == NULL) - return -EINVAL; - - ret = hwmgr->smumgr_funcs->smu_init(hwmgr); + ret = hwmgr_sw_init(hwmgr); - phm_register_irq_handlers(hwmgr); - - pr_debug("amdgpu: powerplay sw initialized\n"); - } + pr_debug("powerplay sw init %s\n", ret ? "failed" : "successfully"); return ret; } @@ -129,16 +106,14 @@ static int pp_sw_fini(void *handle) { struct amdgpu_device *adev = handle; struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; - int ret = 0; - ret = pp_check(hwmgr); - if (ret >= 0) { - if (hwmgr->smumgr_funcs->smu_fini != NULL) - hwmgr->smumgr_funcs->smu_fini(hwmgr); - } + hwmgr_sw_fini(hwmgr); - if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) + if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) { + release_firmware(adev->pm.fw); + adev->pm.fw = NULL; amdgpu_ucode_fini_bo(adev); + } return 0; } @@ -152,55 +127,68 @@ static int pp_hw_init(void *handle) if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) amdgpu_ucode_init_bo(adev); - ret = pp_check(hwmgr); + ret = hwmgr_hw_init(hwmgr); - if (ret >= 0) { - if (hwmgr->smumgr_funcs->start_smu == NULL) - return -EINVAL; + if (ret) + pr_err("powerplay hw init failed\n"); - if (hwmgr->smumgr_funcs->start_smu(hwmgr)) { - pr_err("smc start failed\n"); - hwmgr->smumgr_funcs->smu_fini(hwmgr); - return -EINVAL; - } - if (ret == PP_DPM_DISABLED) - goto exit; - ret = hwmgr_hw_init(hwmgr); - if (ret) - goto exit; - } return ret; -exit: - hwmgr->pm_en = 0; - cgs_notify_dpm_enabled(hwmgr->device, false); - return 0; - } static int pp_hw_fini(void *handle) { struct amdgpu_device *adev = handle; struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; - int ret = 0; - ret = pp_check(hwmgr); - if (ret == 0) - hwmgr_hw_fini(hwmgr); + hwmgr_hw_fini(hwmgr); return 0; } +static void pp_reserve_vram_for_smu(struct amdgpu_device *adev) +{ + int r = -EINVAL; + void *cpu_ptr = NULL; + uint64_t gpu_addr; + struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; + + if (amdgpu_bo_create_kernel(adev, adev->pm.smu_prv_buffer_size, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, + &adev->pm.smu_prv_buffer, + &gpu_addr, + &cpu_ptr)) { + DRM_ERROR("amdgpu: failed to create smu prv buffer\n"); + return; + } + + if (hwmgr->hwmgr_func->notify_cac_buffer_info) + r = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr, + lower_32_bits((unsigned long)cpu_ptr), + upper_32_bits((unsigned long)cpu_ptr), + lower_32_bits(gpu_addr), + upper_32_bits(gpu_addr), + adev->pm.smu_prv_buffer_size); + + if (r) { + amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL); + adev->pm.smu_prv_buffer = NULL; + DRM_ERROR("amdgpu: failed to notify SMU buffer address\n"); + } +} + static int pp_late_init(void *handle) { struct amdgpu_device *adev = handle; struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; - int ret = 0; - ret = pp_check(hwmgr); - - if (ret == 0) - pp_dpm_dispatch_tasks(hwmgr, + if (hwmgr && hwmgr->pm_en) { + mutex_lock(&hwmgr->smu_lock); + hwmgr_handle_task(hwmgr, AMD_PP_TASK_COMPLETE_INIT, NULL); + mutex_unlock(&hwmgr->smu_lock); + } + if (adev->pm.smu_prv_buffer_size != 0) + pp_reserve_vram_for_smu(adev); return 0; } @@ -209,6 +197,8 @@ static void pp_late_fini(void *handle) { struct amdgpu_device *adev = handle; + if (adev->pm.smu_prv_buffer) + amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL); amd_powerplay_destroy(adev); } @@ -231,61 +221,23 @@ static int pp_sw_reset(void *handle) static int pp_set_powergating_state(void *handle, enum amd_powergating_state state) { - struct amdgpu_device *adev = handle; - struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; - int ret = 0; - - ret = pp_check(hwmgr); - - if (ret) - return ret; - - if (hwmgr->hwmgr_func->enable_per_cu_power_gating == NULL) { - pr_info("%s was not implemented.\n", __func__); - return 0; - } - - /* Enable/disable GFX per cu powergating through SMU */ - return hwmgr->hwmgr_func->enable_per_cu_power_gating(hwmgr, - state == AMD_PG_STATE_GATE); + return 0; } static int pp_suspend(void *handle) { struct amdgpu_device *adev = handle; struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; - int ret = 0; - ret = pp_check(hwmgr); - if (ret == 0) - hwmgr_hw_suspend(hwmgr); - return 0; + return hwmgr_suspend(hwmgr); } static int pp_resume(void *handle) { struct amdgpu_device *adev = handle; struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; - int ret; - - ret = pp_check(hwmgr); - - if (ret < 0) - return ret; - - if (hwmgr->smumgr_funcs->start_smu == NULL) - return -EINVAL; - - if (hwmgr->smumgr_funcs->start_smu(hwmgr)) { - pr_err("smc start failed\n"); - hwmgr->smumgr_funcs->smu_fini(hwmgr); - return -EINVAL; - } - if (ret == PP_DPM_DISABLED) - return 0; - - return hwmgr_hw_resume(hwmgr); + return hwmgr_resume(hwmgr); } static int pp_set_clockgating_state(void *handle, @@ -334,12 +286,9 @@ static int pp_dpm_fw_loading_complete(void *handle) static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; - ret = pp_check(hwmgr); - - if (ret) - return ret; + if (!hwmgr || !hwmgr->pm_en) + return -EINVAL; if (hwmgr->hwmgr_func->update_clock_gatings == NULL) { pr_info("%s was not implemented.\n", __func__); @@ -362,10 +311,10 @@ static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr, if (*level & profile_mode_mask) { hwmgr->saved_dpm_level = hwmgr->dpm_level; hwmgr->en_umd_pstate = true; - cgs_set_clockgating_state(hwmgr->device, + amdgpu_device_ip_set_clockgating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_GFX, AMD_CG_STATE_UNGATE); - cgs_set_powergating_state(hwmgr->device, + amdgpu_device_ip_set_powergating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_GFX, AMD_PG_STATE_UNGATE); } @@ -375,10 +324,10 @@ static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr, if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT) *level = hwmgr->saved_dpm_level; hwmgr->en_umd_pstate = false; - cgs_set_clockgating_state(hwmgr->device, + amdgpu_device_ip_set_clockgating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_GFX, AMD_CG_STATE_GATE); - cgs_set_powergating_state(hwmgr->device, + amdgpu_device_ip_set_powergating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_GFX, AMD_PG_STATE_GATE); } @@ -389,12 +338,9 @@ static int pp_dpm_force_performance_level(void *handle, enum amd_dpm_forced_level level) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; - - ret = pp_check(hwmgr); - if (ret) - return ret; + if (!hwmgr || !hwmgr->pm_en) + return -EINVAL; if (level == hwmgr->dpm_level) return 0; @@ -412,13 +358,10 @@ static enum amd_dpm_forced_level pp_dpm_get_performance_level( void *handle) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; enum amd_dpm_forced_level level; - ret = pp_check(hwmgr); - - if (ret) - return ret; + if (!hwmgr || !hwmgr->pm_en) + return -EINVAL; mutex_lock(&hwmgr->smu_lock); level = hwmgr->dpm_level; @@ -429,13 +372,10 @@ static enum amd_dpm_forced_level pp_dpm_get_performance_level( static uint32_t pp_dpm_get_sclk(void *handle, bool low) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; uint32_t clk = 0; - ret = pp_check(hwmgr); - - if (ret) - return ret; + if (!hwmgr || !hwmgr->pm_en) + return 0; if (hwmgr->hwmgr_func->get_sclk == NULL) { pr_info("%s was not implemented.\n", __func__); @@ -450,13 +390,10 @@ static uint32_t pp_dpm_get_sclk(void *handle, bool low) static uint32_t pp_dpm_get_mclk(void *handle, bool low) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; uint32_t clk = 0; - ret = pp_check(hwmgr); - - if (ret) - return ret; + if (!hwmgr || !hwmgr->pm_en) + return 0; if (hwmgr->hwmgr_func->get_mclk == NULL) { pr_info("%s was not implemented.\n", __func__); @@ -471,11 +408,8 @@ static uint32_t pp_dpm_get_mclk(void *handle, bool low) static void pp_dpm_powergate_vce(void *handle, bool gate) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; - ret = pp_check(hwmgr); - - if (ret) + if (!hwmgr || !hwmgr->pm_en) return; if (hwmgr->hwmgr_func->powergate_vce == NULL) { @@ -490,11 +424,8 @@ static void pp_dpm_powergate_vce(void *handle, bool gate) static void pp_dpm_powergate_uvd(void *handle, bool gate) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; - ret = pp_check(hwmgr); - - if (ret) + if (!hwmgr || !hwmgr->pm_en) return; if (hwmgr->hwmgr_func->powergate_uvd == NULL) { @@ -512,10 +443,8 @@ static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id, int ret = 0; struct pp_hwmgr *hwmgr = handle; - ret = pp_check(hwmgr); - - if (ret) - return ret; + if (!hwmgr || !hwmgr->pm_en) + return -EINVAL; mutex_lock(&hwmgr->smu_lock); ret = hwmgr_handle_task(hwmgr, task_id, user_state); @@ -528,15 +457,9 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle) { struct pp_hwmgr *hwmgr = handle; struct pp_power_state *state; - int ret = 0; enum amd_pm_state_type pm_type; - ret = pp_check(hwmgr); - - if (ret) - return ret; - - if (hwmgr->current_ps == NULL) + if (!hwmgr || !hwmgr->pm_en || !hwmgr->current_ps) return -EINVAL; mutex_lock(&hwmgr->smu_lock); @@ -568,11 +491,8 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle) static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; - - ret = pp_check(hwmgr); - if (ret) + if (!hwmgr || !hwmgr->pm_en) return; if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) { @@ -587,13 +507,10 @@ static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode) static uint32_t pp_dpm_get_fan_control_mode(void *handle) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; uint32_t mode = 0; - ret = pp_check(hwmgr); - - if (ret) - return ret; + if (!hwmgr || !hwmgr->pm_en) + return 0; if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) { pr_info("%s was not implemented.\n", __func__); @@ -610,10 +527,8 @@ static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent) struct pp_hwmgr *hwmgr = handle; int ret = 0; - ret = pp_check(hwmgr); - - if (ret) - return ret; + if (!hwmgr || !hwmgr->pm_en) + return -EINVAL; if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) { pr_info("%s was not implemented.\n", __func__); @@ -630,10 +545,8 @@ static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed) struct pp_hwmgr *hwmgr = handle; int ret = 0; - ret = pp_check(hwmgr); - - if (ret) - return ret; + if (!hwmgr || !hwmgr->pm_en) + return -EINVAL; if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) { pr_info("%s was not implemented.\n", __func__); @@ -651,10 +564,8 @@ static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm) struct pp_hwmgr *hwmgr = handle; int ret = 0; - ret = pp_check(hwmgr); - - if (ret) - return ret; + if (!hwmgr || !hwmgr->pm_en) + return -EINVAL; if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL) return -EINVAL; @@ -670,16 +581,10 @@ static int pp_dpm_get_pp_num_states(void *handle, { struct pp_hwmgr *hwmgr = handle; int i; - int ret = 0; memset(data, 0, sizeof(*data)); - ret = pp_check(hwmgr); - - if (ret) - return ret; - - if (hwmgr->ps == NULL) + if (!hwmgr || !hwmgr->pm_en ||!hwmgr->ps) return -EINVAL; mutex_lock(&hwmgr->smu_lock); @@ -713,15 +618,9 @@ static int pp_dpm_get_pp_num_states(void *handle, static int pp_dpm_get_pp_table(void *handle, char **table) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; int size = 0; - ret = pp_check(hwmgr); - - if (ret) - return ret; - - if (!hwmgr->soft_pp_table) + if (!hwmgr || !hwmgr->pm_en ||!hwmgr->soft_pp_table) return -EINVAL; mutex_lock(&hwmgr->smu_lock); @@ -736,10 +635,6 @@ static int amd_powerplay_reset(void *handle) struct pp_hwmgr *hwmgr = handle; int ret; - ret = pp_check(hwmgr); - if (ret) - return ret; - ret = hwmgr_hw_fini(hwmgr); if (ret) return ret; @@ -754,40 +649,38 @@ static int amd_powerplay_reset(void *handle) static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; - - ret = pp_check(hwmgr); + int ret = -ENOMEM; - if (ret) - return ret; + if (!hwmgr || !hwmgr->pm_en) + return -EINVAL; mutex_lock(&hwmgr->smu_lock); if (!hwmgr->hardcode_pp_table) { hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table, hwmgr->soft_pp_table_size, GFP_KERNEL); - if (!hwmgr->hardcode_pp_table) { - mutex_unlock(&hwmgr->smu_lock); - return -ENOMEM; - } + if (!hwmgr->hardcode_pp_table) + goto err; } memcpy(hwmgr->hardcode_pp_table, buf, size); hwmgr->soft_pp_table = hwmgr->hardcode_pp_table; - mutex_unlock(&hwmgr->smu_lock); ret = amd_powerplay_reset(handle); if (ret) - return ret; + goto err; if (hwmgr->hwmgr_func->avfs_control) { ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false); if (ret) - return ret; + goto err; } - + mutex_unlock(&hwmgr->smu_lock); return 0; +err: + mutex_unlock(&hwmgr->smu_lock); + return ret; } static int pp_dpm_force_clock_level(void *handle, @@ -796,10 +689,8 @@ static int pp_dpm_force_clock_level(void *handle, struct pp_hwmgr *hwmgr = handle; int ret = 0; - ret = pp_check(hwmgr); - - if (ret) - return ret; + if (!hwmgr || !hwmgr->pm_en) + return -EINVAL; if (hwmgr->hwmgr_func->force_clock_level == NULL) { pr_info("%s was not implemented.\n", __func__); @@ -820,10 +711,8 @@ static int pp_dpm_print_clock_levels(void *handle, struct pp_hwmgr *hwmgr = handle; int ret = 0; - ret = pp_check(hwmgr); - - if (ret) - return ret; + if (!hwmgr || !hwmgr->pm_en) + return -EINVAL; if (hwmgr->hwmgr_func->print_clock_levels == NULL) { pr_info("%s was not implemented.\n", __func__); @@ -840,10 +729,8 @@ static int pp_dpm_get_sclk_od(void *handle) struct pp_hwmgr *hwmgr = handle; int ret = 0; - ret = pp_check(hwmgr); - - if (ret) - return ret; + if (!hwmgr || !hwmgr->pm_en) + return -EINVAL; if (hwmgr->hwmgr_func->get_sclk_od == NULL) { pr_info("%s was not implemented.\n", __func__); @@ -860,10 +747,8 @@ static int pp_dpm_set_sclk_od(void *handle, uint32_t value) struct pp_hwmgr *hwmgr = handle; int ret = 0; - ret = pp_check(hwmgr); - - if (ret) - return ret; + if (!hwmgr || !hwmgr->pm_en) + return -EINVAL; if (hwmgr->hwmgr_func->set_sclk_od == NULL) { pr_info("%s was not implemented.\n", __func__); @@ -881,10 +766,8 @@ static int pp_dpm_get_mclk_od(void *handle) struct pp_hwmgr *hwmgr = handle; int ret = 0; - ret = pp_check(hwmgr); - - if (ret) - return ret; + if (!hwmgr || !hwmgr->pm_en) + return -EINVAL; if (hwmgr->hwmgr_func->get_mclk_od == NULL) { pr_info("%s was not implemented.\n", __func__); @@ -901,10 +784,8 @@ static int pp_dpm_set_mclk_od(void *handle, uint32_t value) struct pp_hwmgr *hwmgr = handle; int ret = 0; - ret = pp_check(hwmgr); - - if (ret) - return ret; + if (!hwmgr || !hwmgr->pm_en) + return -EINVAL; if (hwmgr->hwmgr_func->set_mclk_od == NULL) { pr_info("%s was not implemented.\n", __func__); @@ -922,11 +803,7 @@ static int pp_dpm_read_sensor(void *handle, int idx, struct pp_hwmgr *hwmgr = handle; int ret = 0; - ret = pp_check(hwmgr); - if (ret) - return ret; - - if (value == NULL) + if (!hwmgr || !hwmgr->pm_en || !value) return -EINVAL; switch (idx) { @@ -948,14 +825,11 @@ static struct amd_vce_state* pp_dpm_get_vce_clock_state(void *handle, unsigned idx) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; - - ret = pp_check(hwmgr); - if (ret) + if (!hwmgr || !hwmgr->pm_en) return NULL; - if (hwmgr && idx < hwmgr->num_vce_state_tables) + if (idx < hwmgr->num_vce_state_tables) return &hwmgr->vce_states[idx]; return NULL; } @@ -964,7 +838,7 @@ static int pp_get_power_profile_mode(void *handle, char *buf) { struct pp_hwmgr *hwmgr = handle; - if (!buf || pp_check(hwmgr)) + if (!hwmgr || !hwmgr->pm_en || !buf) return -EINVAL; if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) { @@ -980,12 +854,12 @@ static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size) struct pp_hwmgr *hwmgr = handle; int ret = -EINVAL; - if (pp_check(hwmgr)) - return -EINVAL; + if (!hwmgr || !hwmgr->pm_en) + return ret; if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) { pr_info("%s was not implemented.\n", __func__); - return -EINVAL; + return ret; } mutex_lock(&hwmgr->smu_lock); if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) @@ -998,7 +872,7 @@ static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint3 { struct pp_hwmgr *hwmgr = handle; - if (pp_check(hwmgr)) + if (!hwmgr || !hwmgr->pm_en) return -EINVAL; if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) { @@ -1016,7 +890,7 @@ static int pp_dpm_switch_power_profile(void *handle, long workload; uint32_t index; - if (pp_check(hwmgr)) + if (!hwmgr || !hwmgr->pm_en) return -EINVAL; if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) { @@ -1048,46 +922,12 @@ static int pp_dpm_switch_power_profile(void *handle, return 0; } -static int pp_dpm_notify_smu_memory_info(void *handle, - uint32_t virtual_addr_low, - uint32_t virtual_addr_hi, - uint32_t mc_addr_low, - uint32_t mc_addr_hi, - uint32_t size) -{ - struct pp_hwmgr *hwmgr = handle; - int ret = 0; - - ret = pp_check(hwmgr); - - if (ret) - return ret; - - if (hwmgr->hwmgr_func->notify_cac_buffer_info == NULL) { - pr_info("%s was not implemented.\n", __func__); - return -EINVAL; - } - - mutex_lock(&hwmgr->smu_lock); - - ret = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr, virtual_addr_low, - virtual_addr_hi, mc_addr_low, mc_addr_hi, - size); - - mutex_unlock(&hwmgr->smu_lock); - - return ret; -} - static int pp_set_power_limit(void *handle, uint32_t limit) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; - ret = pp_check(hwmgr); - - if (ret) - return ret; + if (!hwmgr || !hwmgr->pm_en) + return -EINVAL; if (hwmgr->hwmgr_func->set_power_limit == NULL) { pr_info("%s was not implemented.\n", __func__); @@ -1104,20 +944,14 @@ static int pp_set_power_limit(void *handle, uint32_t limit) hwmgr->hwmgr_func->set_power_limit(hwmgr, limit); hwmgr->power_limit = limit; mutex_unlock(&hwmgr->smu_lock); - return ret; + return 0; } static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; - - ret = pp_check(hwmgr); - - if (ret) - return ret; - if (limit == NULL) + if (!hwmgr || !hwmgr->pm_en ||!limit) return -EINVAL; mutex_lock(&hwmgr->smu_lock); @@ -1129,19 +963,16 @@ static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit) mutex_unlock(&hwmgr->smu_lock); - return ret; + return 0; } static int pp_display_configuration_change(void *handle, const struct amd_pp_display_configuration *display_config) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; - - ret = pp_check(hwmgr); - if (ret) - return ret; + if (!hwmgr || !hwmgr->pm_en) + return -EINVAL; mutex_lock(&hwmgr->smu_lock); phm_store_dal_configuration_data(hwmgr, display_config); @@ -1155,12 +986,7 @@ static int pp_get_display_power_level(void *handle, struct pp_hwmgr *hwmgr = handle; int ret = 0; - ret = pp_check(hwmgr); - - if (ret) - return ret; - - if (output == NULL) + if (!hwmgr || !hwmgr->pm_en ||!output) return -EINVAL; mutex_lock(&hwmgr->smu_lock); @@ -1177,10 +1003,8 @@ static int pp_get_current_clocks(void *handle, struct pp_hwmgr *hwmgr = handle; int ret = 0; - ret = pp_check(hwmgr); - - if (ret) - return ret; + if (!hwmgr || !hwmgr->pm_en) + return -EINVAL; mutex_lock(&hwmgr->smu_lock); @@ -1225,10 +1049,8 @@ static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struc struct pp_hwmgr *hwmgr = handle; int ret = 0; - ret = pp_check(hwmgr); - - if (ret) - return ret; + if (!hwmgr || !hwmgr->pm_en) + return -EINVAL; if (clocks == NULL) return -EINVAL; @@ -1246,11 +1068,7 @@ static int pp_get_clock_by_type_with_latency(void *handle, struct pp_hwmgr *hwmgr = handle; int ret = 0; - ret = pp_check(hwmgr); - if (ret) - return ret; - - if (!clocks) + if (!hwmgr || !hwmgr->pm_en ||!clocks) return -EINVAL; mutex_lock(&hwmgr->smu_lock); @@ -1266,11 +1084,7 @@ static int pp_get_clock_by_type_with_voltage(void *handle, struct pp_hwmgr *hwmgr = handle; int ret = 0; - ret = pp_check(hwmgr); - if (ret) - return ret; - - if (!clocks) + if (!hwmgr || !hwmgr->pm_en ||!clocks) return -EINVAL; mutex_lock(&hwmgr->smu_lock); @@ -1282,21 +1096,17 @@ static int pp_get_clock_by_type_with_voltage(void *handle, } static int pp_set_watermarks_for_clocks_ranges(void *handle, - struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges) + void *clock_ranges) { struct pp_hwmgr *hwmgr = handle; int ret = 0; - ret = pp_check(hwmgr); - if (ret) - return ret; - - if (!wm_with_clock_ranges) + if (!hwmgr || !hwmgr->pm_en || !clock_ranges) return -EINVAL; mutex_lock(&hwmgr->smu_lock); ret = phm_set_watermarks_for_clocks_ranges(hwmgr, - wm_with_clock_ranges); + clock_ranges); mutex_unlock(&hwmgr->smu_lock); return ret; @@ -1308,11 +1118,7 @@ static int pp_display_clock_voltage_request(void *handle, struct pp_hwmgr *hwmgr = handle; int ret = 0; - ret = pp_check(hwmgr); - if (ret) - return ret; - - if (!clock) + if (!hwmgr || !hwmgr->pm_en ||!clock) return -EINVAL; mutex_lock(&hwmgr->smu_lock); @@ -1328,12 +1134,7 @@ static int pp_get_display_mode_validation_clocks(void *handle, struct pp_hwmgr *hwmgr = handle; int ret = 0; - ret = pp_check(hwmgr); - - if (ret) - return ret; - - if (clocks == NULL) + if (!hwmgr || !hwmgr->pm_en ||!clocks) return -EINVAL; mutex_lock(&hwmgr->smu_lock); @@ -1345,22 +1146,78 @@ static int pp_get_display_mode_validation_clocks(void *handle, return ret; } -static int pp_set_mmhub_powergating_by_smu(void *handle) +static int pp_dpm_powergate_mmhub(void *handle) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; - ret = pp_check(hwmgr); + if (!hwmgr || !hwmgr->pm_en) + return -EINVAL; - if (ret) - return ret; + if (hwmgr->hwmgr_func->powergate_mmhub == NULL) { + pr_info("%s was not implemented.\n", __func__); + return 0; + } + + return hwmgr->hwmgr_func->powergate_mmhub(hwmgr); +} + +static int pp_dpm_powergate_gfx(void *handle, bool gate) +{ + struct pp_hwmgr *hwmgr = handle; + + if (!hwmgr || !hwmgr->pm_en) + return 0; - if (hwmgr->hwmgr_func->set_mmhub_powergating_by_smu == NULL) { + if (hwmgr->hwmgr_func->powergate_gfx == NULL) { pr_info("%s was not implemented.\n", __func__); return 0; } - return hwmgr->hwmgr_func->set_mmhub_powergating_by_smu(hwmgr); + return hwmgr->hwmgr_func->powergate_gfx(hwmgr, gate); +} + +static int pp_set_powergating_by_smu(void *handle, + uint32_t block_type, bool gate) +{ + int ret = 0; + + switch (block_type) { + case AMD_IP_BLOCK_TYPE_UVD: + case AMD_IP_BLOCK_TYPE_VCN: + pp_dpm_powergate_uvd(handle, gate); + break; + case AMD_IP_BLOCK_TYPE_VCE: + pp_dpm_powergate_vce(handle, gate); + break; + case AMD_IP_BLOCK_TYPE_GMC: + pp_dpm_powergate_mmhub(handle); + break; + case AMD_IP_BLOCK_TYPE_GFX: + ret = pp_dpm_powergate_gfx(handle, gate); + break; + default: + break; + } + return ret; +} + +static int pp_notify_smu_enable_pwe(void *handle) +{ + struct pp_hwmgr *hwmgr = handle; + + if (!hwmgr || !hwmgr->pm_en) + return -EINVAL; + + if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) { + pr_info("%s was not implemented.\n", __func__); + return -EINVAL;; + } + + mutex_lock(&hwmgr->smu_lock); + hwmgr->hwmgr_func->smus_notify_pwe(hwmgr); + mutex_unlock(&hwmgr->smu_lock); + + return 0; } static const struct amd_pm_funcs pp_dpm_funcs = { @@ -1369,8 +1226,6 @@ static const struct amd_pm_funcs pp_dpm_funcs = { .force_performance_level = pp_dpm_force_performance_level, .get_performance_level = pp_dpm_get_performance_level, .get_current_power_state = pp_dpm_get_current_power_state, - .powergate_vce = pp_dpm_powergate_vce, - .powergate_uvd = pp_dpm_powergate_uvd, .dispatch_tasks = pp_dpm_dispatch_tasks, .set_fan_control_mode = pp_dpm_set_fan_control_mode, .get_fan_control_mode = pp_dpm_get_fan_control_mode, @@ -1390,7 +1245,7 @@ static const struct amd_pm_funcs pp_dpm_funcs = { .get_vce_clock_state = pp_dpm_get_vce_clock_state, .switch_power_profile = pp_dpm_switch_power_profile, .set_clockgating_by_smu = pp_set_clockgating_by_smu, - .notify_smu_memory_info = pp_dpm_notify_smu_memory_info, + .set_powergating_by_smu = pp_set_powergating_by_smu, .get_power_profile_mode = pp_get_power_profile_mode, .set_power_profile_mode = pp_set_power_profile_mode, .odn_edit_dpm_table = pp_odn_edit_dpm_table, @@ -1408,5 +1263,5 @@ static const struct amd_pm_funcs pp_dpm_funcs = { .set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges, .display_clock_voltage_request = pp_display_clock_voltage_request, .get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks, - .set_mmhub_powergating_by_smu = pp_set_mmhub_powergating_by_smu, + .notify_smu_enable_pwe = pp_notify_smu_enable_pwe, }; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c index ae2e9339dd6b..53207e76b0f3 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c @@ -75,8 +75,7 @@ int phm_set_power_state(struct pp_hwmgr *hwmgr, int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr) { - int ret = 1; - bool enabled; + int ret = -EINVAL;; PHM_FUNC_CHECK(hwmgr); if (smum_is_dpm_running(hwmgr)) { @@ -87,17 +86,12 @@ int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr) if (NULL != hwmgr->hwmgr_func->dynamic_state_management_enable) ret = hwmgr->hwmgr_func->dynamic_state_management_enable(hwmgr); - enabled = ret == 0; - - cgs_notify_dpm_enabled(hwmgr->device, enabled); - return ret; } int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr) { - int ret = -1; - bool enabled; + int ret = -EINVAL; PHM_FUNC_CHECK(hwmgr); @@ -109,10 +103,6 @@ int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr) if (hwmgr->hwmgr_func->dynamic_state_management_disable) ret = hwmgr->hwmgr_func->dynamic_state_management_disable(hwmgr); - enabled = ret == 0 ? false : true; - - cgs_notify_dpm_enabled(hwmgr->device, enabled); - return ret; } @@ -142,6 +132,15 @@ int phm_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, return 0; } +int phm_apply_clock_adjust_rules(struct pp_hwmgr *hwmgr) +{ + PHM_FUNC_CHECK(hwmgr); + + if (hwmgr->hwmgr_func->apply_clocks_adjust_rules != NULL) + return hwmgr->hwmgr_func->apply_clocks_adjust_rules(hwmgr); + return 0; +} + int phm_powerdown_uvd(struct pp_hwmgr *hwmgr) { PHM_FUNC_CHECK(hwmgr); @@ -171,6 +170,16 @@ int phm_disable_clock_power_gatings(struct pp_hwmgr *hwmgr) return 0; } +int phm_pre_display_configuration_changed(struct pp_hwmgr *hwmgr) +{ + PHM_FUNC_CHECK(hwmgr); + + if (NULL != hwmgr->hwmgr_func->pre_display_config_changed) + hwmgr->hwmgr_func->pre_display_config_changed(hwmgr); + + return 0; + +} int phm_display_configuration_changed(struct pp_hwmgr *hwmgr) { @@ -275,13 +284,11 @@ int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr, if (display_config == NULL) return -EINVAL; - hwmgr->display_config = *display_config; - if (NULL != hwmgr->hwmgr_func->set_deep_sleep_dcefclk) - hwmgr->hwmgr_func->set_deep_sleep_dcefclk(hwmgr, hwmgr->display_config.min_dcef_deep_sleep_set_clk); + hwmgr->hwmgr_func->set_deep_sleep_dcefclk(hwmgr, display_config->min_dcef_deep_sleep_set_clk); - for (index = 0; index < hwmgr->display_config.num_path_including_non_display; index++) { - if (hwmgr->display_config.displays[index].controller_id != 0) + for (index = 0; index < display_config->num_path_including_non_display; index++) { + if (display_config->displays[index].controller_id != 0) number_of_active_display++; } @@ -428,7 +435,7 @@ int phm_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr, } int phm_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr, - struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges) + void *clock_ranges) { PHM_FUNC_CHECK(hwmgr); @@ -436,7 +443,7 @@ int phm_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr, return -EINVAL; return hwmgr->hwmgr_func->set_watermarks_for_clocks_ranges(hwmgr, - wm_with_clock_ranges); + clock_ranges); } int phm_display_clock_voltage_request(struct pp_hwmgr *hwmgr, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index 42982055b161..8994aa5c8cf8 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -40,6 +40,7 @@ extern const struct pp_smumgr_func iceland_smu_funcs; extern const struct pp_smumgr_func tonga_smu_funcs; extern const struct pp_smumgr_func fiji_smu_funcs; extern const struct pp_smumgr_func polaris10_smu_funcs; +extern const struct pp_smumgr_func vegam_smu_funcs; extern const struct pp_smumgr_func vega10_smu_funcs; extern const struct pp_smumgr_func vega12_smu_funcs; extern const struct pp_smumgr_func smu10_smu_funcs; @@ -76,11 +77,10 @@ static void hwmgr_init_workload_prority(struct pp_hwmgr *hwmgr) int hwmgr_early_init(struct pp_hwmgr *hwmgr) { - if (hwmgr == NULL) + if (!hwmgr) return -EINVAL; hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT; - hwmgr->power_source = PP_PowerSource_AC; hwmgr->pp_table_version = PP_TABLE_V1; hwmgr->dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; hwmgr->request_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; @@ -95,7 +95,8 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr) hwmgr->smumgr_funcs = &ci_smu_funcs; ci_set_asic_special_caps(hwmgr); hwmgr->feature_mask &= ~(PP_VBI_TIME_SUPPORT_MASK | - PP_ENABLE_GFX_CG_THRU_SMU); + PP_ENABLE_GFX_CG_THRU_SMU | + PP_GFXOFF_MASK); hwmgr->pp_table_version = PP_TABLE_V0; hwmgr->od_enabled = false; smu7_init_function_pointers(hwmgr); @@ -103,9 +104,11 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr) case AMDGPU_FAMILY_CZ: hwmgr->od_enabled = false; hwmgr->smumgr_funcs = &smu8_smu_funcs; + hwmgr->feature_mask &= ~PP_GFXOFF_MASK; smu8_init_function_pointers(hwmgr); break; case AMDGPU_FAMILY_VI: + hwmgr->feature_mask &= ~PP_GFXOFF_MASK; switch (hwmgr->chip_id) { case CHIP_TOPAZ: hwmgr->smumgr_funcs = &iceland_smu_funcs; @@ -133,6 +136,11 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr) polaris_set_asic_special_caps(hwmgr); hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK); break; + case CHIP_VEGAM: + hwmgr->smumgr_funcs = &vegam_smu_funcs; + polaris_set_asic_special_caps(hwmgr); + hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK); + break; default: return -EINVAL; } @@ -141,6 +149,8 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr) case AMDGPU_FAMILY_AI: switch (hwmgr->chip_id) { case CHIP_VEGA10: + case CHIP_VEGA20: + hwmgr->feature_mask &= ~PP_GFXOFF_MASK; hwmgr->smumgr_funcs = &vega10_smu_funcs; vega10_hwmgr_init(hwmgr); break; @@ -170,25 +180,66 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr) return 0; } +int hwmgr_sw_init(struct pp_hwmgr *hwmgr) +{ + if (!hwmgr|| !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->smu_init) + return -EINVAL; + + phm_register_irq_handlers(hwmgr); + + return hwmgr->smumgr_funcs->smu_init(hwmgr); +} + + +int hwmgr_sw_fini(struct pp_hwmgr *hwmgr) +{ + if (hwmgr && hwmgr->smumgr_funcs && hwmgr->smumgr_funcs->smu_fini) + hwmgr->smumgr_funcs->smu_fini(hwmgr); + + return 0; +} + int hwmgr_hw_init(struct pp_hwmgr *hwmgr) { int ret = 0; - if (hwmgr == NULL) + if (!hwmgr || !hwmgr->smumgr_funcs) return -EINVAL; - if (hwmgr->pptable_func == NULL || - hwmgr->pptable_func->pptable_init == NULL || - hwmgr->hwmgr_func->backend_init == NULL) - return -EINVAL; + if (hwmgr->smumgr_funcs->start_smu) { + ret = hwmgr->smumgr_funcs->start_smu(hwmgr); + if (ret) { + pr_err("smc start failed\n"); + return -EINVAL; + } + } + + if (!hwmgr->pm_en) + return 0; + + if (!hwmgr->pptable_func || + !hwmgr->pptable_func->pptable_init || + !hwmgr->hwmgr_func->backend_init) { + hwmgr->pm_en = false; + pr_info("dpm not supported \n"); + return 0; + } ret = hwmgr->pptable_func->pptable_init(hwmgr); if (ret) goto err; + ((struct amdgpu_device *)hwmgr->adev)->pm.no_fan = + hwmgr->thermal_controller.fanInfo.bNoFan; + ret = hwmgr->hwmgr_func->backend_init(hwmgr); if (ret) goto err1; + /* make sure dc limits are valid */ + if ((hwmgr->dyn_state.max_clock_voltage_on_dc.sclk == 0) || + (hwmgr->dyn_state.max_clock_voltage_on_dc.mclk == 0)) + hwmgr->dyn_state.max_clock_voltage_on_dc = + hwmgr->dyn_state.max_clock_voltage_on_ac; ret = psm_init_power_state_table(hwmgr); if (ret) @@ -206,6 +257,8 @@ int hwmgr_hw_init(struct pp_hwmgr *hwmgr) if (ret) goto err2; + ((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled = true; + return 0; err2: if (hwmgr->hwmgr_func->backend_fini) @@ -214,14 +267,13 @@ err1: if (hwmgr->pptable_func->pptable_fini) hwmgr->pptable_func->pptable_fini(hwmgr); err: - pr_err("amdgpu: powerplay initialization failed\n"); return ret; } int hwmgr_hw_fini(struct pp_hwmgr *hwmgr) { - if (hwmgr == NULL) - return -EINVAL; + if (!hwmgr || !hwmgr->pm_en) + return 0; phm_stop_thermal_controller(hwmgr); psm_set_boot_states(hwmgr); @@ -236,12 +288,12 @@ int hwmgr_hw_fini(struct pp_hwmgr *hwmgr) return psm_fini_power_state_table(hwmgr); } -int hwmgr_hw_suspend(struct pp_hwmgr *hwmgr) +int hwmgr_suspend(struct pp_hwmgr *hwmgr) { int ret = 0; - if (hwmgr == NULL) - return -EINVAL; + if (!hwmgr || !hwmgr->pm_en) + return 0; phm_disable_smc_firmware_ctf(hwmgr); ret = psm_set_boot_states(hwmgr); @@ -255,13 +307,23 @@ int hwmgr_hw_suspend(struct pp_hwmgr *hwmgr) return ret; } -int hwmgr_hw_resume(struct pp_hwmgr *hwmgr) +int hwmgr_resume(struct pp_hwmgr *hwmgr) { int ret = 0; - if (hwmgr == NULL) + if (!hwmgr) return -EINVAL; + if (hwmgr->smumgr_funcs && hwmgr->smumgr_funcs->start_smu) { + if (hwmgr->smumgr_funcs->start_smu(hwmgr)) { + pr_err("smc start failed\n"); + return -EINVAL; + } + } + + if (!hwmgr->pm_en) + return 0; + ret = phm_setup_asic(hwmgr); if (ret) return ret; @@ -270,9 +332,6 @@ int hwmgr_hw_resume(struct pp_hwmgr *hwmgr) if (ret) return ret; ret = phm_start_thermal_controller(hwmgr); - if (ret) - return ret; - ret |= psm_set_performance_states(hwmgr); if (ret) return ret; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c index 0f2851b5b368..91ffb7bc4ee7 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c @@ -46,11 +46,11 @@ int psm_init_power_state_table(struct pp_hwmgr *hwmgr) sizeof(struct pp_power_state); if (table_entries == 0 || size == 0) { - pr_warn("Please check whether power state management is suppported on this asic\n"); + pr_warn("Please check whether power state management is supported on this asic\n"); return 0; } - hwmgr->ps = kzalloc(size * table_entries, GFP_KERNEL); + hwmgr->ps = kcalloc(table_entries, size, GFP_KERNEL); if (hwmgr->ps == NULL) return -ENOMEM; @@ -265,10 +265,18 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip, if (skip) return 0; + phm_pre_display_configuration_changed(hwmgr); + phm_display_configuration_changed(hwmgr); if (hwmgr->ps) power_state_management(hwmgr, new_ps); + else + /* + * for vega12/vega20 which does not support power state manager + * DAL clock limits should also be honoured + */ + phm_apply_clock_adjust_rules(hwmgr); phm_notify_smc_display_config_after_ps_adjustment(hwmgr); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c index c6febbf0bf69..01dc46dc9c8a 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c @@ -23,7 +23,8 @@ #include "pp_debug.h" #include <linux/module.h> #include <linux/slab.h> - +#include <linux/delay.h> +#include "atom.h" #include "ppatomctrl.h" #include "atombios.h" #include "cgs_common.h" @@ -128,7 +129,6 @@ static int atomctrl_set_mc_reg_address_table( return 0; } - int atomctrl_initialize_mc_reg_table( struct pp_hwmgr *hwmgr, uint8_t module_index, @@ -141,7 +141,7 @@ int atomctrl_initialize_mc_reg_table( u16 size; vram_info = (ATOM_VRAM_INFO_HEADER_V2_1 *) - cgs_atom_get_data_table(hwmgr->device, + smu_atom_get_data_table(hwmgr->adev, GetIndexIntoMasterTable(DATA, VRAM_Info), &size, &frev, &crev); if (module_index >= vram_info->ucNumOfVRAMModule) { @@ -174,6 +174,8 @@ int atomctrl_set_engine_dram_timings_rv770( uint32_t engine_clock, uint32_t memory_clock) { + struct amdgpu_device *adev = hwmgr->adev; + SET_ENGINE_CLOCK_PS_ALLOCATION engine_clock_parameters; /* They are both in 10KHz Units. */ @@ -184,9 +186,10 @@ int atomctrl_set_engine_dram_timings_rv770( /* in 10 khz units.*/ engine_clock_parameters.sReserved.ulClock = cpu_to_le32(memory_clock & SET_CLOCK_FREQ_MASK); - return cgs_atom_exec_cmd_table(hwmgr->device, + + return amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings), - &engine_clock_parameters); + (uint32_t *)&engine_clock_parameters); } /** @@ -203,7 +206,7 @@ static ATOM_VOLTAGE_OBJECT_INFO *get_voltage_info_table(void *device) union voltage_object_info *voltage_info; voltage_info = (union voltage_object_info *) - cgs_atom_get_data_table(device, index, + smu_atom_get_data_table(device, index, &size, &frev, &crev); if (voltage_info != NULL) @@ -247,16 +250,16 @@ int atomctrl_get_memory_pll_dividers_si( pp_atomctrl_memory_clock_param *mpll_param, bool strobe_mode) { + struct amdgpu_device *adev = hwmgr->adev; COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1 mpll_parameters; int result; mpll_parameters.ulClock = cpu_to_le32(clock_value); mpll_parameters.ucInputFlag = (uint8_t)((strobe_mode) ? 1 : 0); - result = cgs_atom_exec_cmd_table - (hwmgr->device, + result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam), - &mpll_parameters); + (uint32_t *)&mpll_parameters); if (0 == result) { mpll_param->mpll_fb_divider.clk_frac = @@ -295,14 +298,15 @@ int atomctrl_get_memory_pll_dividers_si( int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr, uint32_t clock_value, pp_atomctrl_memory_clock_param *mpll_param) { + struct amdgpu_device *adev = hwmgr->adev; COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_2 mpll_parameters; int result; mpll_parameters.ulClock.ulClock = cpu_to_le32(clock_value); - result = cgs_atom_exec_cmd_table(hwmgr->device, + result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam), - &mpll_parameters); + (uint32_t *)&mpll_parameters); if (!result) mpll_param->mpll_post_divider = @@ -311,19 +315,49 @@ int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr, return result; } +int atomctrl_get_memory_pll_dividers_ai(struct pp_hwmgr *hwmgr, + uint32_t clock_value, + pp_atomctrl_memory_clock_param_ai *mpll_param) +{ + struct amdgpu_device *adev = hwmgr->adev; + COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_3 mpll_parameters = {{0}, 0, 0}; + int result; + + mpll_parameters.ulClock.ulClock = cpu_to_le32(clock_value); + + result = amdgpu_atom_execute_table(adev->mode_info.atom_context, + GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam), + (uint32_t *)&mpll_parameters); + + /* VEGAM's mpll takes sometime to finish computing */ + udelay(10); + + if (!result) { + mpll_param->ulMclk_fcw_int = + le16_to_cpu(mpll_parameters.usMclk_fcw_int); + mpll_param->ulMclk_fcw_frac = + le16_to_cpu(mpll_parameters.usMclk_fcw_frac); + mpll_param->ulClock = + le32_to_cpu(mpll_parameters.ulClock.ulClock); + mpll_param->ulPostDiv = mpll_parameters.ulClock.ucPostDiv; + } + + return result; +} + int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr, uint32_t clock_value, pp_atomctrl_clock_dividers_kong *dividers) { + struct amdgpu_device *adev = hwmgr->adev; COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 pll_parameters; int result; pll_parameters.ulClock = cpu_to_le32(clock_value); - result = cgs_atom_exec_cmd_table - (hwmgr->device, + result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL), - &pll_parameters); + (uint32_t *)&pll_parameters); if (0 == result) { dividers->pll_post_divider = pll_parameters.ucPostDiv; @@ -338,16 +372,16 @@ int atomctrl_get_engine_pll_dividers_vi( uint32_t clock_value, pp_atomctrl_clock_dividers_vi *dividers) { + struct amdgpu_device *adev = hwmgr->adev; COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters; int result; pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value); pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK; - result = cgs_atom_exec_cmd_table - (hwmgr->device, + result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL), - &pll_patameters); + (uint32_t *)&pll_patameters); if (0 == result) { dividers->pll_post_divider = @@ -375,16 +409,16 @@ int atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr *hwmgr, uint32_t clock_value, pp_atomctrl_clock_dividers_ai *dividers) { + struct amdgpu_device *adev = hwmgr->adev; COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_7 pll_patameters; int result; pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value); pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK; - result = cgs_atom_exec_cmd_table - (hwmgr->device, + result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL), - &pll_patameters); + (uint32_t *)&pll_patameters); if (0 == result) { dividers->usSclk_fcw_frac = le16_to_cpu(pll_patameters.usSclk_fcw_frac); @@ -407,6 +441,7 @@ int atomctrl_get_dfs_pll_dividers_vi( uint32_t clock_value, pp_atomctrl_clock_dividers_vi *dividers) { + struct amdgpu_device *adev = hwmgr->adev; COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters; int result; @@ -414,10 +449,9 @@ int atomctrl_get_dfs_pll_dividers_vi( pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK; - result = cgs_atom_exec_cmd_table - (hwmgr->device, + result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL), - &pll_patameters); + (uint32_t *)&pll_patameters); if (0 == result) { dividers->pll_post_divider = @@ -452,7 +486,7 @@ uint32_t atomctrl_get_reference_clock(struct pp_hwmgr *hwmgr) uint32_t clock; fw_info = (ATOM_FIRMWARE_INFO *) - cgs_atom_get_data_table(hwmgr->device, + smu_atom_get_data_table(hwmgr->adev, GetIndexIntoMasterTable(DATA, FirmwareInfo), &size, &frev, &crev); @@ -476,7 +510,7 @@ bool atomctrl_is_voltage_controlled_by_gpio_v3( uint8_t voltage_mode) { ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info = - (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->device); + (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->adev); bool ret; PP_ASSERT_WITH_CODE((NULL != voltage_info), @@ -495,7 +529,7 @@ int atomctrl_get_voltage_table_v3( pp_atomctrl_voltage_table *voltage_table) { ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info = - (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->device); + (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->adev); const ATOM_VOLTAGE_OBJECT_V3 *voltage_object; unsigned int i; @@ -572,7 +606,7 @@ static ATOM_GPIO_PIN_LUT *get_gpio_lookup_table(void *device) void *table_address; table_address = (ATOM_GPIO_PIN_LUT *) - cgs_atom_get_data_table(device, + smu_atom_get_data_table(device, GetIndexIntoMasterTable(DATA, GPIO_Pin_LUT), &size, &frev, &crev); @@ -592,7 +626,7 @@ bool atomctrl_get_pp_assign_pin( { bool bRet = false; ATOM_GPIO_PIN_LUT *gpio_lookup_table = - get_gpio_lookup_table(hwmgr->device); + get_gpio_lookup_table(hwmgr->adev); PP_ASSERT_WITH_CODE((NULL != gpio_lookup_table), "Could not find GPIO lookup Table in BIOS.", return false); @@ -613,7 +647,7 @@ int atomctrl_calculate_voltage_evv_on_sclk( bool debug) { ATOM_ASIC_PROFILING_INFO_V3_4 *getASICProfilingInfo; - + struct amdgpu_device *adev = hwmgr->adev; EFUSE_LINEAR_FUNC_PARAM sRO_fuse; EFUSE_LINEAR_FUNC_PARAM sCACm_fuse; EFUSE_LINEAR_FUNC_PARAM sCACb_fuse; @@ -640,7 +674,7 @@ int atomctrl_calculate_voltage_evv_on_sclk( int result; getASICProfilingInfo = (ATOM_ASIC_PROFILING_INFO_V3_4 *) - cgs_atom_get_data_table(hwmgr->device, + smu_atom_get_data_table(hwmgr->adev, GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo), NULL, NULL, NULL); @@ -706,9 +740,9 @@ int atomctrl_calculate_voltage_evv_on_sclk( sOutput_FuseValues.sEfuse = sInput_FuseValues; - result = cgs_atom_exec_cmd_table(hwmgr->device, + result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - &sOutput_FuseValues); + (uint32_t *)&sOutput_FuseValues); if (result) return result; @@ -727,9 +761,9 @@ int atomctrl_calculate_voltage_evv_on_sclk( sOutput_FuseValues.sEfuse = sInput_FuseValues; - result = cgs_atom_exec_cmd_table(hwmgr->device, + result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - &sOutput_FuseValues); + (uint32_t *)&sOutput_FuseValues); if (result) return result; @@ -747,9 +781,9 @@ int atomctrl_calculate_voltage_evv_on_sclk( sInput_FuseValues.ucBitLength = sCACb_fuse.ucEfuseLength; sOutput_FuseValues.sEfuse = sInput_FuseValues; - result = cgs_atom_exec_cmd_table(hwmgr->device, + result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - &sOutput_FuseValues); + (uint32_t *)&sOutput_FuseValues); if (result) return result; @@ -768,9 +802,9 @@ int atomctrl_calculate_voltage_evv_on_sclk( sOutput_FuseValues.sEfuse = sInput_FuseValues; - result = cgs_atom_exec_cmd_table(hwmgr->device, + result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - &sOutput_FuseValues); + (uint32_t *)&sOutput_FuseValues); if (result) return result; @@ -790,9 +824,9 @@ int atomctrl_calculate_voltage_evv_on_sclk( sOutput_FuseValues.sEfuse = sInput_FuseValues; - result = cgs_atom_exec_cmd_table(hwmgr->device, + result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - &sOutput_FuseValues); + (uint32_t *)&sOutput_FuseValues); if (result) return result; @@ -811,9 +845,9 @@ int atomctrl_calculate_voltage_evv_on_sclk( sInput_FuseValues.ucBitLength = sKv_b_fuse.ucEfuseLength; sOutput_FuseValues.sEfuse = sInput_FuseValues; - result = cgs_atom_exec_cmd_table(hwmgr->device, + result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - &sOutput_FuseValues); + (uint32_t *)&sOutput_FuseValues); if (result) return result; @@ -842,9 +876,9 @@ int atomctrl_calculate_voltage_evv_on_sclk( sOutput_FuseValues.sEfuse = sInput_FuseValues; - result = cgs_atom_exec_cmd_table(hwmgr->device, + result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - &sOutput_FuseValues); + (uint32_t *)&sOutput_FuseValues); if (result) return result; @@ -1053,8 +1087,9 @@ int atomctrl_get_voltage_evv_on_sclk( uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage) { - int result; + struct amdgpu_device *adev = hwmgr->adev; GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 get_voltage_info_param_space; + int result; get_voltage_info_param_space.ucVoltageType = voltage_type; @@ -1065,14 +1100,12 @@ int atomctrl_get_voltage_evv_on_sclk( get_voltage_info_param_space.ulSCLKFreq = cpu_to_le32(sclk); - result = cgs_atom_exec_cmd_table(hwmgr->device, + result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, GetVoltageInfo), - &get_voltage_info_param_space); - - if (0 != result) - return result; + (uint32_t *)&get_voltage_info_param_space); - *voltage = le16_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *) + *voltage = result ? 0 : + le16_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *) (&get_voltage_info_param_space))->usVoltageLevel); return result; @@ -1088,9 +1121,10 @@ int atomctrl_get_voltage_evv(struct pp_hwmgr *hwmgr, uint16_t virtual_voltage_id, uint16_t *voltage) { + struct amdgpu_device *adev = hwmgr->adev; + GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 get_voltage_info_param_space; int result; int entry_id; - GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 get_voltage_info_param_space; /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */ for (entry_id = 0; entry_id < hwmgr->dyn_state.vddc_dependency_on_sclk->count; entry_id++) { @@ -1111,9 +1145,9 @@ int atomctrl_get_voltage_evv(struct pp_hwmgr *hwmgr, get_voltage_info_param_space.ulSCLKFreq = cpu_to_le32(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[entry_id].clk); - result = cgs_atom_exec_cmd_table(hwmgr->device, + result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, GetVoltageInfo), - &get_voltage_info_param_space); + (uint32_t *)&get_voltage_info_param_space); if (0 != result) return result; @@ -1135,7 +1169,7 @@ uint32_t atomctrl_get_mpll_reference_clock(struct pp_hwmgr *hwmgr) u16 size; fw_info = (ATOM_COMMON_TABLE_HEADER *) - cgs_atom_get_data_table(hwmgr->device, + smu_atom_get_data_table(hwmgr->adev, GetIndexIntoMasterTable(DATA, FirmwareInfo), &size, &frev, &crev); @@ -1167,7 +1201,7 @@ static ATOM_ASIC_INTERNAL_SS_INFO *asic_internal_ss_get_ss_table(void *device) u16 size; table = (ATOM_ASIC_INTERNAL_SS_INFO *) - cgs_atom_get_data_table(device, + smu_atom_get_data_table(device, GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info), &size, &frev, &crev); @@ -1188,7 +1222,7 @@ static int asic_internal_ss_get_ss_asignment(struct pp_hwmgr *hwmgr, memset(ssEntry, 0x00, sizeof(pp_atomctrl_internal_ss_info)); - table = asic_internal_ss_get_ss_table(hwmgr->device); + table = asic_internal_ss_get_ss_table(hwmgr->adev); if (NULL == table) return -1; @@ -1260,9 +1294,10 @@ int atomctrl_get_engine_clock_spread_spectrum( ASIC_INTERNAL_ENGINE_SS, engine_clock, ssInfo); } -int atomctrl_read_efuse(void *device, uint16_t start_index, +int atomctrl_read_efuse(struct pp_hwmgr *hwmgr, uint16_t start_index, uint16_t end_index, uint32_t mask, uint32_t *efuse) { + struct amdgpu_device *adev = hwmgr->adev; int result; READ_EFUSE_VALUE_PARAMETER efuse_param; @@ -1272,11 +1307,10 @@ int atomctrl_read_efuse(void *device, uint16_t start_index, efuse_param.sEfuse.ucBitLength = (uint8_t) ((end_index - start_index) + 1); - result = cgs_atom_exec_cmd_table(device, + result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - &efuse_param); - if (!result) - *efuse = le32_to_cpu(efuse_param.ulEfuseValue) & mask; + (uint32_t *)&efuse_param); + *efuse = result ? 0 : le32_to_cpu(efuse_param.ulEfuseValue) & mask; return result; } @@ -1284,6 +1318,7 @@ int atomctrl_read_efuse(void *device, uint16_t start_index, int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock, uint8_t level) { + struct amdgpu_device *adev = hwmgr->adev; DYNAMICE_MEMORY_SETTINGS_PARAMETER_V2_1 memory_clock_parameters; int result; @@ -1293,10 +1328,9 @@ int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock, ADJUST_MC_SETTING_PARAM; memory_clock_parameters.asDPMMCReg.ucMclkDPMState = level; - result = cgs_atom_exec_cmd_table - (hwmgr->device, + result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings), - &memory_clock_parameters); + (uint32_t *)&memory_clock_parameters); return result; } @@ -1304,7 +1338,7 @@ int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock, int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint32_t sclk, uint16_t virtual_voltage_Id, uint32_t *voltage) { - + struct amdgpu_device *adev = hwmgr->adev; int result; GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_3 get_voltage_info_param_space; @@ -1313,15 +1347,12 @@ int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_ get_voltage_info_param_space.usVoltageLevel = cpu_to_le16(virtual_voltage_Id); get_voltage_info_param_space.ulSCLKFreq = cpu_to_le32(sclk); - result = cgs_atom_exec_cmd_table(hwmgr->device, + result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, GetVoltageInfo), - &get_voltage_info_param_space); + (uint32_t *)&get_voltage_info_param_space); - if (0 != result) - return result; - - *voltage = le32_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *) - (&get_voltage_info_param_space))->ulVoltageLevel); + *voltage = result ? 0 : + le32_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel); return result; } @@ -1334,7 +1365,7 @@ int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctr u16 size; ATOM_SMU_INFO_V2_1 *psmu_info = - (ATOM_SMU_INFO_V2_1 *)cgs_atom_get_data_table(hwmgr->device, + (ATOM_SMU_INFO_V2_1 *)smu_atom_get_data_table(hwmgr->adev, GetIndexIntoMasterTable(DATA, SMU_Info), &size, &frev, &crev); @@ -1362,7 +1393,7 @@ int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, return -EINVAL; profile = (ATOM_ASIC_PROFILING_INFO_V3_6 *) - cgs_atom_get_data_table(hwmgr->device, + smu_atom_get_data_table(hwmgr->adev, GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo), NULL, NULL, NULL); if (!profile) @@ -1402,7 +1433,7 @@ int atomctrl_get_svi2_info(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint16_t *load_line) { ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info = - (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->device); + (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->adev); const ATOM_VOLTAGE_OBJECT_V3 *voltage_object; @@ -1421,16 +1452,17 @@ int atomctrl_get_svi2_info(struct pp_hwmgr *hwmgr, uint8_t voltage_type, int atomctrl_get_leakage_id_from_efuse(struct pp_hwmgr *hwmgr, uint16_t *virtual_voltage_id) { - int result; + struct amdgpu_device *adev = hwmgr->adev; SET_VOLTAGE_PS_ALLOCATION allocation; SET_VOLTAGE_PARAMETERS_V1_3 *voltage_parameters = (SET_VOLTAGE_PARAMETERS_V1_3 *)&allocation.sASICSetVoltage; + int result; voltage_parameters->ucVoltageMode = ATOM_GET_LEAKAGE_ID; - result = cgs_atom_exec_cmd_table(hwmgr->device, + result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, SetVoltage), - voltage_parameters); + (uint32_t *)voltage_parameters); *virtual_voltage_id = voltage_parameters->usVoltageLevel; @@ -1453,7 +1485,7 @@ int atomctrl_get_leakage_vddc_base_on_leakage(struct pp_hwmgr *hwmgr, ix = GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo); profile = (ATOM_ASIC_PROFILING_INFO_V2_1 *) - cgs_atom_get_data_table(hwmgr->device, + smu_atom_get_data_table(hwmgr->adev, ix, NULL, NULL, NULL); if (!profile) @@ -1498,3 +1530,33 @@ int atomctrl_get_leakage_vddc_base_on_leakage(struct pp_hwmgr *hwmgr, return 0; } + +void atomctrl_get_voltage_range(struct pp_hwmgr *hwmgr, uint32_t *max_vddc, + uint32_t *min_vddc) +{ + void *profile; + + profile = smu_atom_get_data_table(hwmgr->adev, + GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo), + NULL, NULL, NULL); + + if (profile) { + switch (hwmgr->chip_id) { + case CHIP_TONGA: + case CHIP_FIJI: + *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMaxVddc) / 4; + *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMinVddc) / 4; + return; + case CHIP_POLARIS11: + case CHIP_POLARIS10: + case CHIP_POLARIS12: + *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMaxVddc) / 100; + *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMinVddc) / 100; + return; + default: + break; + } + } + *max_vddc = 0; + *min_vddc = 0; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h index c44a92064cf1..3ee54f182943 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h @@ -146,6 +146,14 @@ struct pp_atomctrl_memory_clock_param { }; typedef struct pp_atomctrl_memory_clock_param pp_atomctrl_memory_clock_param; +struct pp_atomctrl_memory_clock_param_ai { + uint32_t ulClock; + uint32_t ulPostDiv; + uint16_t ulMclk_fcw_frac; + uint16_t ulMclk_fcw_int; +}; +typedef struct pp_atomctrl_memory_clock_param_ai pp_atomctrl_memory_clock_param_ai; + struct pp_atomctrl_internal_ss_info { uint32_t speed_spectrum_percentage; /* in 1/100 percentage */ uint32_t speed_spectrum_rate; /* in KHz */ @@ -295,10 +303,12 @@ extern bool atomctrl_is_voltage_controlled_by_gpio_v3(struct pp_hwmgr *hwmgr, ui extern int atomctrl_get_voltage_table_v3(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint8_t voltage_mode, pp_atomctrl_voltage_table *voltage_table); extern int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr, uint32_t clock_value, pp_atomctrl_memory_clock_param *mpll_param); +extern int atomctrl_get_memory_pll_dividers_ai(struct pp_hwmgr *hwmgr, + uint32_t clock_value, pp_atomctrl_memory_clock_param_ai *mpll_param); extern int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr, uint32_t clock_value, pp_atomctrl_clock_dividers_kong *dividers); -extern int atomctrl_read_efuse(void *device, uint16_t start_index, +extern int atomctrl_read_efuse(struct pp_hwmgr *hwmgr, uint16_t start_index, uint16_t end_index, uint32_t mask, uint32_t *efuse); extern int atomctrl_calculate_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage, uint16_t dpm_level, bool debug); @@ -320,5 +330,8 @@ extern int atomctrl_get_leakage_vddc_base_on_leakage(struct pp_hwmgr *hwmgr, uint16_t virtual_voltage_id, uint16_t efuse_voltage_id); extern int atomctrl_get_leakage_id_from_efuse(struct pp_hwmgr *hwmgr, uint16_t *virtual_voltage_id); + +extern void atomctrl_get_voltage_range(struct pp_hwmgr *hwmgr, uint32_t *max_vddc, + uint32_t *min_vddc); #endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c index ad42caac033e..d27c1c9df286 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c @@ -23,9 +23,9 @@ #include "ppatomfwctrl.h" #include "atomfirmware.h" +#include "atom.h" #include "pp_debug.h" - static const union atom_voltage_object_v4 *pp_atomfwctrl_lookup_voltage_type_v4( const struct atom_voltage_objects_info_v4_1 *voltage_object_info_table, uint8_t voltage_type, uint8_t voltage_mode) @@ -38,35 +38,34 @@ static const union atom_voltage_object_v4 *pp_atomfwctrl_lookup_voltage_type_v4( while (offset < size) { const union atom_voltage_object_v4 *voltage_object = - (const union atom_voltage_object_v4 *)(start + offset); + (const union atom_voltage_object_v4 *)(start + offset); - if (voltage_type == voltage_object->gpio_voltage_obj.header.voltage_type && - voltage_mode == voltage_object->gpio_voltage_obj.header.voltage_mode) - return voltage_object; + if (voltage_type == voltage_object->gpio_voltage_obj.header.voltage_type && + voltage_mode == voltage_object->gpio_voltage_obj.header.voltage_mode) + return voltage_object; - offset += le16_to_cpu(voltage_object->gpio_voltage_obj.header.object_size); + offset += le16_to_cpu(voltage_object->gpio_voltage_obj.header.object_size); - } + } - return NULL; + return NULL; } static struct atom_voltage_objects_info_v4_1 *pp_atomfwctrl_get_voltage_info_table( struct pp_hwmgr *hwmgr) { - const void *table_address; - uint16_t idx; + const void *table_address; + uint16_t idx; - idx = GetIndexIntoMasterDataTable(voltageobject_info); - table_address = cgs_atom_get_data_table(hwmgr->device, - idx, NULL, NULL, NULL); + idx = GetIndexIntoMasterDataTable(voltageobject_info); + table_address = smu_atom_get_data_table(hwmgr->adev, + idx, NULL, NULL, NULL); - PP_ASSERT_WITH_CODE( - table_address, - "Error retrieving BIOS Table Address!", - return NULL); + PP_ASSERT_WITH_CODE(table_address, + "Error retrieving BIOS Table Address!", + return NULL); - return (struct atom_voltage_objects_info_v4_1 *)table_address; + return (struct atom_voltage_objects_info_v4_1 *)table_address; } /** @@ -167,7 +166,7 @@ static struct atom_gpio_pin_lut_v2_1 *pp_atomfwctrl_get_gpio_lookup_table( uint16_t idx; idx = GetIndexIntoMasterDataTable(gpio_pin_lut); - table_address = cgs_atom_get_data_table(hwmgr->device, + table_address = smu_atom_get_data_table(hwmgr->adev, idx, NULL, NULL, NULL); PP_ASSERT_WITH_CODE(table_address, "Error retrieving BIOS Table Address!", @@ -248,28 +247,30 @@ int pp_atomfwctrl_get_gpu_pll_dividers_vega10(struct pp_hwmgr *hwmgr, uint32_t clock_type, uint32_t clock_value, struct pp_atomfwctrl_clock_dividers_soc15 *dividers) { + struct amdgpu_device *adev = hwmgr->adev; struct compute_gpu_clock_input_parameter_v1_8 pll_parameters; struct compute_gpu_clock_output_parameter_v1_8 *pll_output; - int result; uint32_t idx; pll_parameters.gpuclock_10khz = (uint32_t)clock_value; pll_parameters.gpu_clock_type = clock_type; idx = GetIndexIntoMasterCmdTable(computegpuclockparam); - result = cgs_atom_exec_cmd_table(hwmgr->device, idx, &pll_parameters); - - if (!result) { - pll_output = (struct compute_gpu_clock_output_parameter_v1_8 *) - &pll_parameters; - dividers->ulClock = le32_to_cpu(pll_output->gpuclock_10khz); - dividers->ulDid = le32_to_cpu(pll_output->dfs_did); - dividers->ulPll_fb_mult = le32_to_cpu(pll_output->pll_fb_mult); - dividers->ulPll_ss_fbsmult = le32_to_cpu(pll_output->pll_ss_fbsmult); - dividers->usPll_ss_slew_frac = le16_to_cpu(pll_output->pll_ss_slew_frac); - dividers->ucPll_ss_enable = pll_output->pll_ss_enable; - } - return result; + + if (amdgpu_atom_execute_table( + adev->mode_info.atom_context, idx, (uint32_t *)&pll_parameters)) + return -EINVAL; + + pll_output = (struct compute_gpu_clock_output_parameter_v1_8 *) + &pll_parameters; + dividers->ulClock = le32_to_cpu(pll_output->gpuclock_10khz); + dividers->ulDid = le32_to_cpu(pll_output->dfs_did); + dividers->ulPll_fb_mult = le32_to_cpu(pll_output->pll_fb_mult); + dividers->ulPll_ss_fbsmult = le32_to_cpu(pll_output->pll_ss_fbsmult); + dividers->usPll_ss_slew_frac = le16_to_cpu(pll_output->pll_ss_slew_frac); + dividers->ucPll_ss_enable = pll_output->pll_ss_enable; + + return 0; } int pp_atomfwctrl_get_avfs_information(struct pp_hwmgr *hwmgr, @@ -283,7 +284,7 @@ int pp_atomfwctrl_get_avfs_information(struct pp_hwmgr *hwmgr, idx = GetIndexIntoMasterDataTable(asic_profiling_info); profile = (struct atom_asic_profiling_info_v4_1 *) - cgs_atom_get_data_table(hwmgr->device, + smu_atom_get_data_table(hwmgr->adev, idx, NULL, NULL, NULL); if (!profile) @@ -467,7 +468,7 @@ int pp_atomfwctrl_get_gpio_information(struct pp_hwmgr *hwmgr, idx = GetIndexIntoMasterDataTable(smu_info); info = (struct atom_smu_info_v3_1 *) - cgs_atom_get_data_table(hwmgr->device, + smu_atom_get_data_table(hwmgr->adev, idx, NULL, NULL, NULL); if (!info) { @@ -487,37 +488,107 @@ int pp_atomfwctrl_get_gpio_information(struct pp_hwmgr *hwmgr, return 0; } -int pp_atomfwctrl__get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, BIOS_CLKID id, uint32_t *frequency) +int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, BIOS_CLKID id, uint32_t *frequency) { + struct amdgpu_device *adev = hwmgr->adev; struct atom_get_smu_clock_info_parameters_v3_1 parameters; struct atom_get_smu_clock_info_output_parameters_v3_1 *output; uint32_t ix; parameters.clk_id = id; + parameters.syspll_id = 0; parameters.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ; + parameters.dfsdid = 0; ix = GetIndexIntoMasterCmdTable(getsmuclockinfo); - if (!cgs_atom_exec_cmd_table(hwmgr->device, ix, ¶meters)) { - output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)¶meters; - *frequency = output->atom_smu_outputclkfreq.smu_clock_freq_hz / 10000; - } else { - pr_info("Error execute_table getsmuclockinfo!"); - return -1; - } + + if (amdgpu_atom_execute_table( + adev->mode_info.atom_context, ix, (uint32_t *)¶meters)) + return -EINVAL; + + output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)¶meters; + *frequency = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000; return 0; } +static void pp_atomfwctrl_copy_vbios_bootup_values_3_2(struct pp_hwmgr *hwmgr, + struct pp_atomfwctrl_bios_boot_up_values *boot_values, + struct atom_firmware_info_v3_2 *fw_info) +{ + uint32_t frequency = 0; + + boot_values->ulRevision = fw_info->firmware_revision; + boot_values->ulGfxClk = fw_info->bootup_sclk_in10khz; + boot_values->ulUClk = fw_info->bootup_mclk_in10khz; + boot_values->usVddc = fw_info->bootup_vddc_mv; + boot_values->usVddci = fw_info->bootup_vddci_mv; + boot_values->usMvddc = fw_info->bootup_mvddc_mv; + boot_values->usVddGfx = fw_info->bootup_vddgfx_mv; + boot_values->ucCoolingID = fw_info->coolingsolution_id; + boot_values->ulSocClk = 0; + boot_values->ulDCEFClk = 0; + + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_SOCCLK_ID, &frequency)) + boot_values->ulSocClk = frequency; + + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCEFCLK_ID, &frequency)) + boot_values->ulDCEFClk = frequency; + + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_ECLK_ID, &frequency)) + boot_values->ulEClk = frequency; + + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_VCLK_ID, &frequency)) + boot_values->ulVClk = frequency; + + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCLK_ID, &frequency)) + boot_values->ulDClk = frequency; +} + +static void pp_atomfwctrl_copy_vbios_bootup_values_3_1(struct pp_hwmgr *hwmgr, + struct pp_atomfwctrl_bios_boot_up_values *boot_values, + struct atom_firmware_info_v3_1 *fw_info) +{ + uint32_t frequency = 0; + + boot_values->ulRevision = fw_info->firmware_revision; + boot_values->ulGfxClk = fw_info->bootup_sclk_in10khz; + boot_values->ulUClk = fw_info->bootup_mclk_in10khz; + boot_values->usVddc = fw_info->bootup_vddc_mv; + boot_values->usVddci = fw_info->bootup_vddci_mv; + boot_values->usMvddc = fw_info->bootup_mvddc_mv; + boot_values->usVddGfx = fw_info->bootup_vddgfx_mv; + boot_values->ucCoolingID = fw_info->coolingsolution_id; + boot_values->ulSocClk = 0; + boot_values->ulDCEFClk = 0; + + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_SOCCLK_ID, &frequency)) + boot_values->ulSocClk = frequency; + + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCEFCLK_ID, &frequency)) + boot_values->ulDCEFClk = frequency; + + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_ECLK_ID, &frequency)) + boot_values->ulEClk = frequency; + + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_VCLK_ID, &frequency)) + boot_values->ulVClk = frequency; + + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCLK_ID, &frequency)) + boot_values->ulDClk = frequency; +} + int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr, struct pp_atomfwctrl_bios_boot_up_values *boot_values) { - struct atom_firmware_info_v3_1 *info = NULL; + struct atom_firmware_info_v3_2 *fwinfo_3_2; + struct atom_firmware_info_v3_1 *fwinfo_3_1; + struct atom_common_table_header *info = NULL; uint16_t ix; - uint32_t frequency = 0; ix = GetIndexIntoMasterDataTable(firmwareinfo); - info = (struct atom_firmware_info_v3_1 *) - cgs_atom_get_data_table(hwmgr->device, + info = (struct atom_common_table_header *) + smu_atom_get_data_table(hwmgr->adev, ix, NULL, NULL, NULL); if (!info) { @@ -525,22 +596,18 @@ int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr, return -EINVAL; } - boot_values->ulRevision = info->firmware_revision; - boot_values->ulGfxClk = info->bootup_sclk_in10khz; - boot_values->ulUClk = info->bootup_mclk_in10khz; - boot_values->usVddc = info->bootup_vddc_mv; - boot_values->usVddci = info->bootup_vddci_mv; - boot_values->usMvddc = info->bootup_mvddc_mv; - boot_values->usVddGfx = info->bootup_vddgfx_mv; - boot_values->ucCoolingID = info->coolingsolution_id; - boot_values->ulSocClk = 0; - boot_values->ulDCEFClk = 0; - - if (!pp_atomfwctrl__get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_SOCCLK_ID, &frequency)) - boot_values->ulSocClk = frequency; - - if (!pp_atomfwctrl__get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCEFCLK_ID, &frequency)) - boot_values->ulDCEFClk = frequency; + if ((info->format_revision == 3) && (info->content_revision == 2)) { + fwinfo_3_2 = (struct atom_firmware_info_v3_2 *)info; + pp_atomfwctrl_copy_vbios_bootup_values_3_2(hwmgr, + boot_values, fwinfo_3_2); + } else if ((info->format_revision == 3) && (info->content_revision == 1)) { + fwinfo_3_1 = (struct atom_firmware_info_v3_1 *)info; + pp_atomfwctrl_copy_vbios_bootup_values_3_1(hwmgr, + boot_values, fwinfo_3_1); + } else { + pr_info("Fw info table revision does not match!"); + return -EINVAL; + } return 0; } @@ -553,7 +620,7 @@ int pp_atomfwctrl_get_smc_dpm_information(struct pp_hwmgr *hwmgr, ix = GetIndexIntoMasterDataTable(smc_dpm_info); info = (struct atom_smc_dpm_info_v4_1 *) - cgs_atom_get_data_table(hwmgr->device, + smu_atom_get_data_table(hwmgr->adev, ix, NULL, NULL, NULL); if (!info) { pr_info("Error retrieving BIOS Table Address!"); @@ -632,5 +699,7 @@ int pp_atomfwctrl_get_smc_dpm_information(struct pp_hwmgr *hwmgr, param->acggfxclkspreadpercent = info->acggfxclkspreadpercent; param->acggfxclkspreadfreq = info->acggfxclkspreadfreq; + param->Vr2_I2C_address = info->Vr2_I2C_address; + return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h index 8df1e84f27c9..22e21668c93a 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h @@ -136,6 +136,9 @@ struct pp_atomfwctrl_bios_boot_up_values { uint32_t ulUClk; uint32_t ulSocClk; uint32_t ulDCEFClk; + uint32_t ulEClk; + uint32_t ulVClk; + uint32_t ulDClk; uint16_t usVddc; uint16_t usVddci; uint16_t usMvddc; @@ -207,6 +210,8 @@ struct pp_atomfwctrl_smc_dpm_parameters uint8_t acggfxclkspreadenabled; uint8_t acggfxclkspreadpercent; uint16_t acggfxclkspreadfreq; + + uint8_t Vr2_I2C_address; }; int pp_atomfwctrl_get_gpu_pll_dividers_vega10(struct pp_hwmgr *hwmgr, @@ -230,6 +235,8 @@ int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr, struct pp_atomfwctrl_bios_boot_up_values *boot_values); int pp_atomfwctrl_get_smc_dpm_information(struct pp_hwmgr *hwmgr, struct pp_atomfwctrl_smc_dpm_parameters *param); +int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, + BIOS_CLKID id, uint32_t *frequency); #endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c index c9eecce5683f..4e1fd5393845 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c @@ -141,7 +141,7 @@ static const void *get_powerplay_table(struct pp_hwmgr *hwmgr) if (!table_address) { table_address = (ATOM_Tonga_POWERPLAYTABLE *) - cgs_atom_get_data_table(hwmgr->device, + smu_atom_get_data_table(hwmgr->adev, index, &size, &frev, &crev); hwmgr->soft_pp_table = table_address; /*Cache the result in RAM.*/ hwmgr->soft_pp_table_size = size; @@ -183,10 +183,10 @@ static int get_vddc_lookup_table( ATOM_Tonga_Voltage_Lookup_Record, entries, vddc_lookup_pp_tables, i); record->us_calculated = 0; - record->us_vdd = atom_record->usVdd; - record->us_cac_low = atom_record->usCACLow; - record->us_cac_mid = atom_record->usCACMid; - record->us_cac_high = atom_record->usCACHigh; + record->us_vdd = le16_to_cpu(atom_record->usVdd); + record->us_cac_low = le16_to_cpu(atom_record->usCACLow); + record->us_cac_mid = le16_to_cpu(atom_record->usCACMid); + record->us_cac_high = le16_to_cpu(atom_record->usCACHigh); } *lookup_table = table; @@ -728,6 +728,32 @@ static int get_mm_clock_voltage_table( return 0; } +static int get_gpio_table(struct pp_hwmgr *hwmgr, + struct phm_ppt_v1_gpio_table **pp_tonga_gpio_table, + const ATOM_Tonga_GPIO_Table *atom_gpio_table) +{ + uint32_t table_size; + struct phm_ppt_v1_gpio_table *pp_gpio_table; + struct phm_ppt_v1_information *pp_table_information = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + table_size = sizeof(struct phm_ppt_v1_gpio_table); + pp_gpio_table = kzalloc(table_size, GFP_KERNEL); + if (!pp_gpio_table) + return -ENOMEM; + + if (pp_table_information->vdd_dep_on_sclk->count < + atom_gpio_table->ucVRHotTriggeredSclkDpmIndex) + PP_ASSERT_WITH_CODE(false, + "SCLK DPM index for VRHot cannot exceed the total sclk level count!",); + else + pp_gpio_table->vrhot_triggered_sclk_dpm_index = + atom_gpio_table->ucVRHotTriggeredSclkDpmIndex; + + *pp_tonga_gpio_table = pp_gpio_table; + + return 0; +} /** * Private Function used during initialization. * Initialize clock voltage dependency @@ -761,11 +787,15 @@ static int init_clock_voltage_dependency( const PPTable_Generic_SubTable_Header *pcie_table = (const PPTable_Generic_SubTable_Header *)(((unsigned long) powerplay_table) + le16_to_cpu(powerplay_table->usPCIETableOffset)); + const ATOM_Tonga_GPIO_Table *gpio_table = + (const ATOM_Tonga_GPIO_Table *)(((unsigned long) powerplay_table) + + le16_to_cpu(powerplay_table->usGPIOTableOffset)); pp_table_information->vdd_dep_on_sclk = NULL; pp_table_information->vdd_dep_on_mclk = NULL; pp_table_information->mm_dep_table = NULL; pp_table_information->pcie_table = NULL; + pp_table_information->gpio_table = NULL; if (powerplay_table->usMMDependencyTableOffset != 0) result = get_mm_clock_voltage_table(hwmgr, @@ -810,6 +840,10 @@ static int init_clock_voltage_dependency( result = get_valid_clk(hwmgr, &pp_table_information->valid_sclk_values, pp_table_information->vdd_dep_on_sclk); + if (!result && gpio_table) + result = get_gpio_table(hwmgr, &pp_table_information->gpio_table, + gpio_table); + return result; } @@ -836,12 +870,6 @@ static int init_over_drive_limits( hwmgr->platform_descriptor.maxOverdriveVDDC = 0; hwmgr->platform_descriptor.overdriveVDDCStep = 0; - if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0 \ - || hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0) { - hwmgr->od_enabled = false; - pr_debug("OverDrive feature not support by VBIOS\n"); - } - return 0; } @@ -1116,6 +1144,9 @@ static int pp_tables_v1_0_uninitialize(struct pp_hwmgr *hwmgr) kfree(pp_table_information->pcie_table); pp_table_information->pcie_table = NULL; + kfree(pp_table_information->gpio_table); + pp_table_information->gpio_table = NULL; + kfree(hwmgr->pptable); hwmgr->pptable = NULL; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c index 36ca7c419c90..925e17104f90 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c @@ -837,7 +837,7 @@ static const ATOM_PPLIB_POWERPLAYTABLE *get_powerplay_table( hwmgr->soft_pp_table = &soft_dummy_pp_table[0]; hwmgr->soft_pp_table_size = sizeof(soft_dummy_pp_table); } else { - table_addr = cgs_atom_get_data_table(hwmgr->device, + table_addr = smu_atom_get_data_table(hwmgr->adev, GetIndexIntoMasterTable(DATA, PowerPlayInfo), &size, &frev, &crev); hwmgr->soft_pp_table = table_addr; @@ -1058,7 +1058,7 @@ static int init_overdrive_limits(struct pp_hwmgr *hwmgr, return 0; /* We assume here that fw_info is unchanged if this call fails.*/ - fw_info = cgs_atom_get_data_table(hwmgr->device, + fw_info = smu_atom_get_data_table(hwmgr->adev, GetIndexIntoMasterTable(DATA, FirmwareInfo), &size, &frev, &crev); @@ -1074,12 +1074,6 @@ static int init_overdrive_limits(struct pp_hwmgr *hwmgr, powerplay_table, (const ATOM_FIRMWARE_INFO_V2_1 *)fw_info); - if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0 - && hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0) { - hwmgr->od_enabled = false; - pr_debug("OverDrive feature not support by VBIOS\n"); - } - return result; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index 10253b89b3d8..a63e00653324 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c @@ -34,7 +34,7 @@ #include "rv_ppsmc.h" #include "smu10_hwmgr.h" #include "power_state.h" -#include "pp_soc15.h" +#include "soc15_common.h" #define SMU10_MAX_DEEPSLEEP_DIVIDER_ID 5 #define SMU10_MINIMUM_ENGINE_CLOCK 800 /* 8Mhz, the low boundary of engine clock allowed on this chip */ @@ -42,12 +42,48 @@ #define SMU10_DISPCLK_BYPASS_THRESHOLD 10000 /* 100Mhz */ #define SMC_RAM_END 0x40000 +#define mmPWR_MISC_CNTL_STATUS 0x0183 +#define mmPWR_MISC_CNTL_STATUS_BASE_IDX 0 +#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT 0x0 +#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT 0x1 +#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK 0x00000001L +#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK 0x00000006L + static const unsigned long SMU10_Magic = (unsigned long) PHM_Rv_Magic; static int smu10_display_clock_voltage_request(struct pp_hwmgr *hwmgr, - struct pp_display_clock_request *clock_req); + struct pp_display_clock_request *clock_req) +{ + struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); + enum amd_pp_clock_type clk_type = clock_req->clock_type; + uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000; + PPSMC_Msg msg; + switch (clk_type) { + case amd_pp_dcf_clock: + if (clk_freq == smu10_data->dcf_actual_hard_min_freq) + return 0; + msg = PPSMC_MSG_SetHardMinDcefclkByFreq; + smu10_data->dcf_actual_hard_min_freq = clk_freq; + break; + case amd_pp_soc_clock: + msg = PPSMC_MSG_SetHardMinSocclkByFreq; + break; + case amd_pp_f_clock: + if (clk_freq == smu10_data->f_actual_hard_min_freq) + return 0; + smu10_data->f_actual_hard_min_freq = clk_freq; + msg = PPSMC_MSG_SetHardMinFclkByFreq; + break; + default: + pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!"); + return -EINVAL; + } + smum_send_msg_to_smc_with_parameter(hwmgr, msg, clk_freq); + + return 0; +} static struct smu10_power_state *cast_smu10_ps(struct pp_hw_power_state *hw_ps) { @@ -74,11 +110,15 @@ static int smu10_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) smu10_data->thermal_auto_throttling_treshold = 0; smu10_data->is_nb_dpm_enabled = 1; smu10_data->dpm_flags = 1; - smu10_data->gfx_off_controled_by_driver = false; smu10_data->need_min_deep_sleep_dcefclk = true; smu10_data->num_active_display = 0; smu10_data->deep_sleep_dcefclk = 0; + if (hwmgr->feature_mask & PP_GFXOFF_MASK) + smu10_data->gfx_off_controled_by_driver = true; + else + smu10_data->gfx_off_controled_by_driver = false; + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep); @@ -161,7 +201,7 @@ static int smu10_set_clock_limit(struct pp_hwmgr *hwmgr, const void *input) struct PP_Clocks clocks = {0}; struct pp_display_clock_request clock_req; - clocks.dcefClock = hwmgr->display_config.min_dcef_set_clk; + clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk; clock_req.clock_type = amd_pp_dcf_clock; clock_req.clock_freq_in_khz = clocks.dcefClock * 10; @@ -206,12 +246,18 @@ static int smu10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input static int smu10_init_power_gate_state(struct pp_hwmgr *hwmgr) { struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); + struct amdgpu_device *adev = hwmgr->adev; smu10_data->vcn_power_gated = true; smu10_data->isp_tileA_power_gated = true; smu10_data->isp_tileB_power_gated = true; - return 0; + if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) + return smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetGfxCGPG, + true); + else + return 0; } @@ -237,19 +283,37 @@ static int smu10_power_off_asic(struct pp_hwmgr *hwmgr) return smu10_reset_cc6_data(hwmgr); } +static bool smu10_is_gfx_on(struct pp_hwmgr *hwmgr) +{ + uint32_t reg; + struct amdgpu_device *adev = hwmgr->adev; + + reg = RREG32_SOC15(PWR, 0, mmPWR_MISC_CNTL_STATUS); + if ((reg & PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK) == + (0x2 << PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT)) + return true; + + return false; +} + static int smu10_disable_gfx_off(struct pp_hwmgr *hwmgr) { struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); - if (smu10_data->gfx_off_controled_by_driver) + if (smu10_data->gfx_off_controled_by_driver) { smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableGfxOff); + /* confirm gfx is back to "on" state */ + while (!smu10_is_gfx_on(hwmgr)) + msleep(1); + } + return 0; } static int smu10_disable_dpm_tasks(struct pp_hwmgr *hwmgr) { - return smu10_disable_gfx_off(hwmgr); + return 0; } static int smu10_enable_gfx_off(struct pp_hwmgr *hwmgr) @@ -264,7 +328,15 @@ static int smu10_enable_gfx_off(struct pp_hwmgr *hwmgr) static int smu10_enable_dpm_tasks(struct pp_hwmgr *hwmgr) { - return smu10_enable_gfx_off(hwmgr); + return 0; +} + +static int smu10_gfx_off_control(struct pp_hwmgr *hwmgr, bool enable) +{ + if (enable) + return smu10_enable_gfx_off(hwmgr); + else + return smu10_disable_gfx_off(hwmgr); } static int smu10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, @@ -340,7 +412,7 @@ static int smu10_get_clock_voltage_dependency_table(struct pp_hwmgr *hwmgr, static int smu10_populate_clock_table(struct pp_hwmgr *hwmgr) { - int result; + uint32_t result; struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); DpmClocks_t *table = &(smu10_data->clock_table); @@ -386,11 +458,11 @@ static int smu10_populate_clock_table(struct pp_hwmgr *hwmgr) smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency); result = smum_get_argument(hwmgr); - smu10_data->gfx_min_freq_limit = result * 100; + smu10_data->gfx_min_freq_limit = result / 10 * 1000; smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency); result = smum_get_argument(hwmgr); - smu10_data->gfx_max_freq_limit = result * 100; + smu10_data->gfx_max_freq_limit = result / 10 * 1000; return 0; } @@ -436,8 +508,8 @@ static int smu10_hwmgr_backend_init(struct pp_hwmgr *hwmgr) hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50; - hwmgr->pstate_sclk = SMU10_UMD_PSTATE_GFXCLK; - hwmgr->pstate_mclk = SMU10_UMD_PSTATE_FCLK; + hwmgr->pstate_sclk = SMU10_UMD_PSTATE_GFXCLK * 100; + hwmgr->pstate_mclk = SMU10_UMD_PSTATE_FCLK * 100; return result; } @@ -472,6 +544,8 @@ static int smu10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level) { + struct smu10_hwmgr *data = hwmgr->backend; + if (hwmgr->smu_version < 0x1E3700) { pr_info("smu firmware version too old, can not set dpm level\n"); return 0; @@ -482,7 +556,7 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinGfxClk, - SMU10_UMD_PSTATE_PEAK_GFXCLK); + data->gfx_max_freq_limit/100); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinFclkByFreq, SMU10_UMD_PSTATE_PEAK_FCLK); @@ -495,7 +569,7 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxGfxClk, - SMU10_UMD_PSTATE_PEAK_GFXCLK); + data->gfx_max_freq_limit/100); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxFclkByFreq, SMU10_UMD_PSTATE_PEAK_FCLK); @@ -509,10 +583,10 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinGfxClk, - SMU10_UMD_PSTATE_MIN_GFXCLK); + data->gfx_min_freq_limit/100); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxGfxClk, - SMU10_UMD_PSTATE_MIN_GFXCLK); + data->gfx_min_freq_limit/100); break; case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: smum_send_msg_to_smc_with_parameter(hwmgr, @@ -552,10 +626,13 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, case AMD_DPM_FORCED_LEVEL_AUTO: smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinGfxClk, - SMU10_UMD_PSTATE_MIN_GFXCLK); + data->gfx_min_freq_limit/100); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinFclkByFreq, + hwmgr->display_config->num_display > 3 ? + SMU10_UMD_PSTATE_PEAK_FCLK : SMU10_UMD_PSTATE_MIN_FCLK); + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinSocclkByFreq, SMU10_UMD_PSTATE_MIN_SOCCLK); @@ -565,7 +642,7 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxGfxClk, - SMU10_UMD_PSTATE_PEAK_GFXCLK); + data->gfx_max_freq_limit/100); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxFclkByFreq, SMU10_UMD_PSTATE_PEAK_FCLK); @@ -579,10 +656,10 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, case AMD_DPM_FORCED_LEVEL_LOW: smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinGfxClk, - SMU10_UMD_PSTATE_MIN_GFXCLK); + data->gfx_min_freq_limit/100); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxGfxClk, - SMU10_UMD_PSTATE_MIN_GFXCLK); + data->gfx_min_freq_limit/100); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinFclkByFreq, SMU10_UMD_PSTATE_MIN_FCLK); @@ -699,6 +776,16 @@ static int smu10_set_cpu_power_state(struct pp_hwmgr *hwmgr) static int smu10_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time, bool cc6_disable, bool pstate_disable, bool pstate_switch_disable) { + struct smu10_hwmgr *data = (struct smu10_hwmgr *)(hwmgr->backend); + + if (separation_time != data->separation_time || + cc6_disable != data->cc6_disable || + pstate_disable != data->pstate_disable) { + data->separation_time = separation_time; + data->cc6_disable = cc6_disable; + data->pstate_disable = pstate_disable; + data->cc6_setting_changed = true; + } return 0; } @@ -711,6 +798,51 @@ static int smu10_get_dal_power_level(struct pp_hwmgr *hwmgr, static int smu10_force_clock_level(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask) { + struct smu10_hwmgr *data = hwmgr->backend; + struct smu10_voltage_dependency_table *mclk_table = + data->clock_vol_info.vdd_dep_on_fclk; + uint32_t low, high; + + low = mask ? (ffs(mask) - 1) : 0; + high = mask ? (fls(mask) - 1) : 0; + + switch (type) { + case PP_SCLK: + if (low > 2 || high > 2) { + pr_info("Currently sclk only support 3 levels on RV\n"); + return -EINVAL; + } + + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetHardMinGfxClk, + low == 2 ? data->gfx_max_freq_limit/100 : + low == 1 ? SMU10_UMD_PSTATE_GFXCLK : + data->gfx_min_freq_limit/100); + + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetSoftMaxGfxClk, + high == 0 ? data->gfx_min_freq_limit/100 : + high == 1 ? SMU10_UMD_PSTATE_GFXCLK : + data->gfx_max_freq_limit/100); + break; + + case PP_MCLK: + if (low > mclk_table->count - 1 || high > mclk_table->count - 1) + return -EINVAL; + + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetHardMinFclkByFreq, + mclk_table->entries[low].clk/100); + + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetSoftMaxFclkByFreq, + mclk_table->entries[high].clk/100); + break; + + case PP_PCIE: + default: + break; + } return 0; } @@ -720,21 +852,30 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr, struct smu10_hwmgr *data = (struct smu10_hwmgr *)(hwmgr->backend); struct smu10_voltage_dependency_table *mclk_table = data->clock_vol_info.vdd_dep_on_fclk; - int i, now, size = 0; + uint32_t i, now, size = 0; switch (type) { case PP_SCLK: smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency); now = smum_get_argument(hwmgr); + /* driver only know min/max gfx_clk, Add level 1 for all other gfx clks */ + if (now == data->gfx_max_freq_limit/100) + i = 2; + else if (now == data->gfx_min_freq_limit/100) + i = 0; + else + i = 1; + size += sprintf(buf + size, "0: %uMhz %s\n", - data->gfx_min_freq_limit / 100, - ((data->gfx_min_freq_limit / 100) - == now) ? "*" : ""); + data->gfx_min_freq_limit/100, + i == 0 ? "*" : ""); size += sprintf(buf + size, "1: %uMhz %s\n", - data->gfx_max_freq_limit / 100, - ((data->gfx_max_freq_limit / 100) - == now) ? "*" : ""); + i == 1 ? now : SMU10_UMD_PSTATE_GFXCLK, + i == 1 ? "*" : ""); + size += sprintf(buf + size, "2: %uMhz %s\n", + data->gfx_max_freq_limit/100, + i == 2 ? "*" : ""); break; case PP_MCLK: smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency); @@ -852,7 +993,7 @@ static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr, clocks->num_levels = 0; for (i = 0; i < pclk_vol_table->count; i++) { - clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk; + clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk * 10; clocks->data[i].latency_in_us = latency_required ? smu10_get_mem_latency(hwmgr, pclk_vol_table->entries[i].clk) : @@ -888,6 +1029,12 @@ static int smu10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr, case amd_pp_soc_clock: pclk_vol_table = pinfo->vdd_dep_on_socclk; break; + case amd_pp_disp_clock: + pclk_vol_table = pinfo->vdd_dep_on_dispclk; + break; + case amd_pp_phy_clock: + pclk_vol_table = pinfo->vdd_dep_on_phyclk; + break; default: return -EINVAL; } @@ -897,7 +1044,7 @@ static int smu10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr, clocks->num_levels = 0; for (i = 0; i < pclk_vol_table->count; i++) { - clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk; + clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk * 10; clocks->data[i].voltage_in_mv = pclk_vol_table->entries[i].vol; clocks->num_levels++; } @@ -905,39 +1052,7 @@ static int smu10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr, return 0; } -static int smu10_display_clock_voltage_request(struct pp_hwmgr *hwmgr, - struct pp_display_clock_request *clock_req) -{ - struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); - enum amd_pp_clock_type clk_type = clock_req->clock_type; - uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000; - PPSMC_Msg msg; - - switch (clk_type) { - case amd_pp_dcf_clock: - if (clk_freq == smu10_data->dcf_actual_hard_min_freq) - return 0; - msg = PPSMC_MSG_SetHardMinDcefclkByFreq; - smu10_data->dcf_actual_hard_min_freq = clk_freq; - break; - case amd_pp_soc_clock: - msg = PPSMC_MSG_SetHardMinSocclkByFreq; - break; - case amd_pp_f_clock: - if (clk_freq == smu10_data->f_actual_hard_min_freq) - return 0; - smu10_data->f_actual_hard_min_freq = clk_freq; - msg = PPSMC_MSG_SetHardMinFclkByFreq; - break; - default: - pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!"); - return -EINVAL; - } - - smum_send_msg_to_smc_with_parameter(hwmgr, msg, clk_freq); - return 0; -} static int smu10_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks) { @@ -947,9 +1062,8 @@ static int smu10_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simpl static int smu10_thermal_get_temperature(struct pp_hwmgr *hwmgr) { - uint32_t reg_offset = soc15_get_register_offset(THM_HWID, 0, - mmTHM_TCON_CUR_TMP_BASE_IDX, mmTHM_TCON_CUR_TMP); - uint32_t reg_value = cgs_read_register(hwmgr->device, reg_offset); + struct amdgpu_device *adev = hwmgr->adev; + uint32_t reg_value = RREG32_SOC15(THM, 0, mmTHM_TCON_CUR_TMP); int cur_temp = (reg_value & THM_TCON_CUR_TMP__CUR_TEMP_MASK) >> THM_TCON_CUR_TMP__CUR_TEMP__SHIFT; @@ -993,11 +1107,48 @@ static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx, return ret; } -static int smu10_set_mmhub_powergating_by_smu(struct pp_hwmgr *hwmgr) +static int smu10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr, + void *clock_ranges) +{ + struct smu10_hwmgr *data = hwmgr->backend; + struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges; + Watermarks_t *table = &(data->water_marks_table); + int result = 0; + + smu_set_watermarks_for_clocks_ranges(table,wm_with_clock_ranges); + smum_smc_table_manager(hwmgr, (uint8_t *)table, (uint16_t)SMU10_WMTABLE, false); + data->water_marks_exist = true; + return result; +} + +static int smu10_smus_notify_pwe(struct pp_hwmgr *hwmgr) +{ + + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SetRccPfcPmeRestoreRegister); +} + +static int smu10_powergate_mmhub(struct pp_hwmgr *hwmgr) { return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub); } +static void smu10_powergate_vcn(struct pp_hwmgr *hwmgr, bool bgate) +{ + if (bgate) { + amdgpu_device_ip_set_powergating_state(hwmgr->adev, + AMD_IP_BLOCK_TYPE_VCN, + AMD_PG_STATE_GATE); + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_PowerDownVcn, 0); + } else { + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_PowerUpVcn, 0); + amdgpu_device_ip_set_powergating_state(hwmgr->adev, + AMD_IP_BLOCK_TYPE_VCN, + AMD_PG_STATE_UNGATE); + } +} + static const struct pp_hwmgr_func smu10_hwmgr_funcs = { .backend_init = smu10_hwmgr_backend_init, .backend_fini = smu10_hwmgr_backend_fini, @@ -1006,7 +1157,7 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = { .force_dpm_level = smu10_dpm_force_dpm_level, .get_power_state_size = smu10_get_power_state_size, .powerdown_uvd = NULL, - .powergate_uvd = NULL, + .powergate_uvd = smu10_powergate_vcn, .powergate_vce = NULL, .get_mclk = smu10_dpm_get_mclk, .get_sclk = smu10_dpm_get_sclk, @@ -1022,6 +1173,7 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = { .get_current_shallow_sleep_clocks = smu10_get_current_shallow_sleep_clocks, .get_clock_by_type_with_latency = smu10_get_clock_by_type_with_latency, .get_clock_by_type_with_voltage = smu10_get_clock_by_type_with_voltage, + .set_watermarks_for_clocks_ranges = smu10_set_watermarks_for_clocks_ranges, .get_max_high_clocks = smu10_get_max_high_clocks, .read_sensor = smu10_read_sensor, .set_active_display_count = smu10_set_active_display_count, @@ -1031,7 +1183,11 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = { .asic_setup = smu10_setup_asic_task, .power_state_set = smu10_set_power_state_tasks, .dynamic_state_management_disable = smu10_disable_dpm_tasks, - .set_mmhub_powergating_by_smu = smu10_set_mmhub_powergating_by_smu, + .powergate_mmhub = smu10_powergate_mmhub, + .smus_notify_pwe = smu10_smus_notify_pwe, + .gfx_off_control = smu10_gfx_off_control, + .display_clock_voltage_request = smu10_display_clock_voltage_request, + .powergate_gfx = smu10_gfx_off_control, }; int smu10_init_function_pointers(struct pp_hwmgr *hwmgr) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h index 175c3a592b6c..1fb296a996f3 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h @@ -290,6 +290,7 @@ struct smu10_hwmgr { bool vcn_dpg_mode; bool gfx_off_controled_by_driver; + bool water_marks_exist; Watermarks_t water_marks_table; struct smu10_clock_voltage_information clock_vol_info; DpmClocks_t clock_table; @@ -310,11 +311,9 @@ int smu10_init_function_pointers(struct pp_hwmgr *hwmgr); #define SMU10_UMD_PSTATE_FCLK 933 #define SMU10_UMD_PSTATE_VCE 0x03C00320 -#define SMU10_UMD_PSTATE_PEAK_GFXCLK 1100 #define SMU10_UMD_PSTATE_PEAK_SOCCLK 757 #define SMU10_UMD_PSTATE_PEAK_FCLK 1200 -#define SMU10_UMD_PSTATE_MIN_GFXCLK 200 #define SMU10_UMD_PSTATE_MIN_FCLK 400 #define SMU10_UMD_PSTATE_MIN_SOCCLK 200 #define SMU10_UMD_PSTATE_MIN_VCE 0x0190012C diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c index f4cbaee4e2ca..683b29a99366 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c @@ -39,13 +39,6 @@ static int smu7_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable) PPSMC_MSG_VCEDPM_Disable); } -static int smu7_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable) -{ - return smum_send_msg_to_smc(hwmgr, enable ? - PPSMC_MSG_SAMUDPM_Enable : - PPSMC_MSG_SAMUDPM_Disable); -} - static int smu7_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate) { if (!bgate) @@ -60,13 +53,6 @@ static int smu7_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate) return smu7_enable_disable_vce_dpm(hwmgr, !bgate); } -static int smu7_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate) -{ - if (!bgate) - smum_update_smc_table(hwmgr, SMU_SAMU_TABLE); - return smu7_enable_disable_samu_dpm(hwmgr, !bgate); -} - int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr) { if (phm_cf_want_uvd_power_gating(hwmgr)) @@ -107,35 +93,15 @@ static int smu7_powerup_vce(struct pp_hwmgr *hwmgr) return 0; } -static int smu7_powerdown_samu(struct pp_hwmgr *hwmgr) -{ - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SamuPowerGating)) - return smum_send_msg_to_smc(hwmgr, - PPSMC_MSG_SAMPowerOFF); - return 0; -} - -static int smu7_powerup_samu(struct pp_hwmgr *hwmgr) -{ - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SamuPowerGating)) - return smum_send_msg_to_smc(hwmgr, - PPSMC_MSG_SAMPowerON); - return 0; -} - int smu7_disable_clock_power_gating(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); data->uvd_power_gated = false; data->vce_power_gated = false; - data->samu_power_gated = false; smu7_powerup_uvd(hwmgr); smu7_powerup_vce(hwmgr); - smu7_powerup_samu(hwmgr); return 0; } @@ -147,20 +113,20 @@ void smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) data->uvd_power_gated = bgate; if (bgate) { - cgs_set_powergating_state(hwmgr->device, + amdgpu_device_ip_set_powergating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_UVD, AMD_PG_STATE_GATE); - cgs_set_clockgating_state(hwmgr->device, + amdgpu_device_ip_set_clockgating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_UVD, AMD_CG_STATE_GATE); smu7_update_uvd_dpm(hwmgr, true); smu7_powerdown_uvd(hwmgr); } else { smu7_powerup_uvd(hwmgr); - cgs_set_clockgating_state(hwmgr->device, + amdgpu_device_ip_set_clockgating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_UVD, AMD_CG_STATE_UNGATE); - cgs_set_powergating_state(hwmgr->device, + amdgpu_device_ip_set_powergating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_UVD, AMD_PG_STATE_UNGATE); smu7_update_uvd_dpm(hwmgr, false); @@ -175,46 +141,26 @@ void smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) data->vce_power_gated = bgate; if (bgate) { - cgs_set_powergating_state(hwmgr->device, + amdgpu_device_ip_set_powergating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_VCE, AMD_PG_STATE_GATE); - cgs_set_clockgating_state(hwmgr->device, + amdgpu_device_ip_set_clockgating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_VCE, AMD_CG_STATE_GATE); smu7_update_vce_dpm(hwmgr, true); smu7_powerdown_vce(hwmgr); } else { smu7_powerup_vce(hwmgr); - cgs_set_clockgating_state(hwmgr->device, + amdgpu_device_ip_set_clockgating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_VCE, AMD_CG_STATE_UNGATE); - cgs_set_powergating_state(hwmgr->device, + amdgpu_device_ip_set_powergating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_VCE, AMD_PG_STATE_UNGATE); smu7_update_vce_dpm(hwmgr, false); } } -int smu7_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate) -{ - struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - - if (data->samu_power_gated == bgate) - return 0; - - data->samu_power_gated = bgate; - - if (bgate) { - smu7_update_samu_dpm(hwmgr, true); - smu7_powerdown_samu(hwmgr); - } else { - smu7_powerup_samu(hwmgr); - smu7_update_samu_dpm(hwmgr, false); - } - - return 0; -} - int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, const uint32_t *msg_id) { @@ -470,7 +416,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, * Powerplay will only control the static per CU Power Gating. * Dynamic per CU Power Gating will be done in gfx. */ -int smu7_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable) +int smu7_powergate_gfx(struct pp_hwmgr *hwmgr, bool enable) { struct amdgpu_device *adev = hwmgr->adev; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h index 1ddce023218a..fc8f8a6acc72 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h @@ -29,11 +29,10 @@ void smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate); void smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate); int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr); -int smu7_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate); int smu7_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate); int smu7_disable_clock_power_gating(struct pp_hwmgr *hwmgr); int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr, const uint32_t *msg_id); -int smu7_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable); +int smu7_powergate_gfx(struct pp_hwmgr *hwmgr, bool enable); #endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 26fbeafc3c96..052e60dfaf9f 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -48,6 +48,8 @@ #include "processpptables.h" #include "pp_thermal.h" +#include "ivsrcid/ivsrcid_vislands30.h" + #define MC_CG_ARB_FREQ_F0 0x0a #define MC_CG_ARB_FREQ_F1 0x0b #define MC_CG_ARB_FREQ_F2 0x0c @@ -61,10 +63,6 @@ #define SMC_CG_IND_START 0xc0030000 #define SMC_CG_IND_END 0xc0040000 -#define VOLTAGE_SCALE 4 -#define VOLTAGE_VID_OFFSET_SCALE1 625 -#define VOLTAGE_VID_OFFSET_SCALE2 100 - #define MEM_FREQ_LOW_LATENCY 25000 #define MEM_FREQ_HIGH_LATENCY 80000 @@ -79,14 +77,23 @@ #define PCIE_BUS_CLK 10000 #define TCLK (PCIE_BUS_CLK / 10) -static const struct profile_mode_setting smu7_profiling[5] = +static const struct profile_mode_setting smu7_profiling[6] = {{1, 0, 100, 30, 1, 0, 100, 10}, {1, 10, 0, 30, 0, 0, 0, 0}, {0, 0, 0, 0, 1, 10, 16, 31}, {1, 0, 11, 50, 1, 0, 100, 10}, {1, 0, 5, 30, 0, 0, 0, 0}, + {0, 0, 0, 0, 0, 0, 0, 0}, }; +#define PPSMC_MSG_SetVBITimeout_VEGAM ((uint16_t) 0x310) + +#define ixPWR_SVI2_PLANE1_LOAD 0xC0200280 +#define PWR_SVI2_PLANE1_LOAD__PSI1_MASK 0x00000020L +#define PWR_SVI2_PLANE1_LOAD__PSI0_EN_MASK 0x00000040L +#define PWR_SVI2_PLANE1_LOAD__PSI1__SHIFT 0x00000005 +#define PWR_SVI2_PLANE1_LOAD__PSI0_EN__SHIFT 0x00000006 + /** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */ enum DPM_EVENT_SRC { DPM_EVENT_SRC_ANALOG = 0, @@ -168,6 +175,13 @@ static int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr) */ static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr) { + if (hwmgr->chip_id == CHIP_VEGAM) { + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, + CGS_IND_REG__SMC, PWR_SVI2_PLANE1_LOAD, PSI1, 0); + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, + CGS_IND_REG__SMC, PWR_SVI2_PLANE1_LOAD, PSI0_EN, 0); + } + if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK) smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Enable); @@ -779,7 +793,8 @@ static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr) data->dpm_table.sclk_table.count++; } } - + if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0) + hwmgr->platform_descriptor.overdriveLimit.engineClock = dep_sclk_table->entries[i-1].clk; /* Initialize Mclk DPM table based on allow Mclk values */ data->dpm_table.mclk_table.count = 0; for (i = 0; i < dep_mclk_table->count; i++) { @@ -794,32 +809,8 @@ static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr) } } - return 0; -} - -static int smu7_get_voltage_dependency_table( - const struct phm_ppt_v1_clock_voltage_dependency_table *allowed_dep_table, - struct phm_ppt_v1_clock_voltage_dependency_table *dep_table) -{ - uint8_t i = 0; - PP_ASSERT_WITH_CODE((0 != allowed_dep_table->count), - "Voltage Lookup Table empty", - return -EINVAL); - - dep_table->count = allowed_dep_table->count; - for (i=0; i<dep_table->count; i++) { - dep_table->entries[i].clk = allowed_dep_table->entries[i].clk; - dep_table->entries[i].vddInd = allowed_dep_table->entries[i].vddInd; - dep_table->entries[i].vdd_offset = allowed_dep_table->entries[i].vdd_offset; - dep_table->entries[i].vddc = allowed_dep_table->entries[i].vddc; - dep_table->entries[i].vddgfx = allowed_dep_table->entries[i].vddgfx; - dep_table->entries[i].vddci = allowed_dep_table->entries[i].vddci; - dep_table->entries[i].mvdd = allowed_dep_table->entries[i].mvdd; - dep_table->entries[i].phases = allowed_dep_table->entries[i].phases; - dep_table->entries[i].cks_enable = allowed_dep_table->entries[i].cks_enable; - dep_table->entries[i].cks_voffset = allowed_dep_table->entries[i].cks_voffset; - } - + if (hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0) + hwmgr->platform_descriptor.overdriveLimit.memoryClock = dep_mclk_table->entries[i-1].clk; return 0; } @@ -850,7 +841,7 @@ static int smu7_odn_initial_default_setting(struct pp_hwmgr *hwmgr) entries[i].vddc = dep_sclk_table->entries[i].vddc; } - smu7_get_voltage_dependency_table(dep_sclk_table, + smu_get_voltage_dependency_table_ppt_v1(dep_sclk_table, (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk)); odn_table->odn_memory_clock_dpm_levels.num_of_pl = @@ -862,12 +853,94 @@ static int smu7_odn_initial_default_setting(struct pp_hwmgr *hwmgr) entries[i].vddc = dep_mclk_table->entries[i].vddc; } - smu7_get_voltage_dependency_table(dep_mclk_table, + smu_get_voltage_dependency_table_ppt_v1(dep_mclk_table, (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk)); return 0; } +static void smu7_setup_voltage_range_from_vbios(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + uint32_t min_vddc = 0; + uint32_t max_vddc = 0; + + if (!table_info) + return; + + dep_sclk_table = table_info->vdd_dep_on_sclk; + + atomctrl_get_voltage_range(hwmgr, &max_vddc, &min_vddc); + + if (min_vddc == 0 || min_vddc > 2000 + || min_vddc > dep_sclk_table->entries[0].vddc) + min_vddc = dep_sclk_table->entries[0].vddc; + + if (max_vddc == 0 || max_vddc > 2000 + || max_vddc < dep_sclk_table->entries[dep_sclk_table->count-1].vddc) + max_vddc = dep_sclk_table->entries[dep_sclk_table->count-1].vddc; + + data->odn_dpm_table.min_vddc = min_vddc; + data->odn_dpm_table.max_vddc = max_vddc; +} + +static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + uint32_t i; + + struct phm_ppt_v1_clock_voltage_dependency_table *dep_table; + struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table; + + if (table_info == NULL) + return; + + for (i = 0; i < data->dpm_table.sclk_table.count; i++) { + if (odn_table->odn_core_clock_dpm_levels.entries[i].clock != + data->dpm_table.sclk_table.dpm_levels[i].value) { + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; + break; + } + } + + for (i = 0; i < data->dpm_table.mclk_table.count; i++) { + if (odn_table->odn_memory_clock_dpm_levels.entries[i].clock != + data->dpm_table.mclk_table.dpm_levels[i].value) { + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; + break; + } + } + + dep_table = table_info->vdd_dep_on_mclk; + odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk); + + for (i = 0; i < dep_table->count; i++) { + if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK; + return; + } + } + + dep_table = table_info->vdd_dep_on_sclk; + odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk); + for (i = 0; i < dep_table->count; i++) { + if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK; + return; + } + } + if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) { + data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC; + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK; + } +} + static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); @@ -886,9 +959,14 @@ static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) sizeof(struct smu7_dpm_table)); /* initialize ODN table */ - if (hwmgr->od_enabled) - smu7_odn_initial_default_setting(hwmgr); - + if (hwmgr->od_enabled) { + if (data->odn_dpm_table.max_vddc) { + smu7_check_dpm_table_updated(hwmgr); + } else { + smu7_setup_voltage_range_from_vbios(hwmgr); + smu7_odn_initial_default_setting(hwmgr); + } + } return 0; } @@ -965,6 +1043,22 @@ static int smu7_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) return 0; } +static int smu7_disable_sclk_vce_handshake(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t soft_register_value = 0; + uint32_t handshake_disables_offset = data->soft_regs_start + + smum_get_offsetof(hwmgr, + SMU_SoftRegisters, HandshakeDisables); + + soft_register_value = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, handshake_disables_offset); + soft_register_value |= SMU7_VCE_SCLK_HANDSHAKE_DISABLE; + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + handshake_disables_offset, soft_register_value); + return 0; +} + static int smu7_disable_handshake_uvd(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); @@ -987,23 +1081,29 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); /* enable SCLK dpm */ - if (!data->sclk_dpm_key_disabled) + if (!data->sclk_dpm_key_disabled) { + if (hwmgr->chip_id == CHIP_VEGAM) + smu7_disable_sclk_vce_handshake(hwmgr); + PP_ASSERT_WITH_CODE( (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Enable)), "Failed to enable SCLK DPM during DPM Start Function!", return -EINVAL); + } /* enable MCLK dpm */ if (0 == data->mclk_dpm_key_disabled) { if (!(hwmgr->feature_mask & PP_UVD_HANDSHAKE_MASK)) smu7_disable_handshake_uvd(hwmgr); + PP_ASSERT_WITH_CODE( (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_Enable)), "Failed to enable MCLK DPM during DPM Start Function!", return -EINVAL); - PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1); + if (hwmgr->chip_family != CHIP_VEGAM) + PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1); if (hwmgr->chip_family == AMDGPU_FAMILY_CI) { @@ -1019,8 +1119,13 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5); cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005); udelay(10); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005); + if (hwmgr->chip_id == CHIP_VEGAM) { + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400009); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400009); + } else { + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005); + } cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005); } } @@ -1229,7 +1334,7 @@ static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr) tmp_result = smu7_construct_voltage_tables(hwmgr); PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to contruct voltage tables!", + "Failed to construct voltage tables!", result = tmp_result); } smum_initialize_mc_reg_table(hwmgr); @@ -1261,10 +1366,12 @@ static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to process firmware header!", result = tmp_result); - tmp_result = smu7_initial_switch_from_arbf0_to_f1(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to initialize switch from ArbF0 to F1!", - result = tmp_result); + if (hwmgr->chip_id != CHIP_VEGAM) { + tmp_result = smu7_initial_switch_from_arbf0_to_f1(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to initialize switch from ArbF0 to F1!", + result = tmp_result); + } result = smu7_setup_default_dpm_tables(hwmgr); PP_ASSERT_WITH_CODE(0 == result, @@ -1473,7 +1580,7 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr) data->current_profile_setting.sclk_up_hyst = 0; data->current_profile_setting.sclk_down_hyst = 100; data->current_profile_setting.sclk_activity = SMU7_SCLK_TARGETACTIVITY_DFLT; - data->current_profile_setting.bupdate_sclk = 1; + data->current_profile_setting.bupdate_mclk = 1; data->current_profile_setting.mclk_up_hyst = 0; data->current_profile_setting.mclk_down_hyst = 100; data->current_profile_setting.mclk_activity = SMU7_MCLK_TARGETACTIVITY_DFLT; @@ -2754,6 +2861,9 @@ static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr, case CHIP_POLARIS12: switch_limit_us = data->is_memory_gddr5 ? 190 : 150; break; + case CHIP_VEGAM: + switch_limit_us = 30; + break; default: switch_limit_us = data->is_memory_gddr5 ? 450 : 150; break; @@ -2769,7 +2879,7 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, struct pp_power_state *request_ps, const struct pp_power_state *current_ps) { - + struct amdgpu_device *adev = hwmgr->adev; struct smu7_power_state *smu7_ps = cast_phw_smu7_power_state(&request_ps->hardware); uint32_t sclk; @@ -2777,8 +2887,6 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, struct PP_Clocks minimum_clocks = {0}; bool disable_mclk_switching; bool disable_mclk_switching_for_frame_lock; - struct cgs_display_info info = {0}; - struct cgs_mode_info mode_info = {0}; const struct phm_clock_and_voltage_limits *max_limits; uint32_t i; struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); @@ -2787,7 +2895,6 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, int32_t count; int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0; - info.mode_info = &mode_info; data->battery_state = (PP_StateUILabel_Battery == request_ps->classification.ui_label); @@ -2795,12 +2902,12 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, "VI should always have 2 performance levels", ); - max_limits = (PP_PowerSource_AC == hwmgr->power_source) ? + max_limits = adev->pm.ac_power ? &(hwmgr->dyn_state.max_clock_voltage_on_ac) : &(hwmgr->dyn_state.max_clock_voltage_on_dc); /* Cap clock DPM tables at DC MAX if it is in DC. */ - if (PP_PowerSource_DC == hwmgr->power_source) { + if (!adev->pm.ac_power) { for (i = 0; i < smu7_ps->performance_level_count; i++) { if (smu7_ps->performance_levels[i].memory_clock > max_limits->mclk) smu7_ps->performance_levels[i].memory_clock = max_limits->mclk; @@ -2809,10 +2916,8 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, } } - cgs_get_active_displays_info(hwmgr->device, &info); - - minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock; - minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock; + minimum_clocks.engineClock = hwmgr->display_config->min_core_set_clock; + minimum_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock; if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) { @@ -2843,12 +2948,12 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); - if (info.display_count == 0) + if (hwmgr->display_config->num_display == 0) disable_mclk_switching = false; else - disable_mclk_switching = ((1 < info.display_count) || + disable_mclk_switching = ((1 < hwmgr->display_config->num_display) || disable_mclk_switching_for_frame_lock || - smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us)); + smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time)); sclk = smu7_ps->performance_levels[0].engine_clock; mclk = smu7_ps->performance_levels[0].memory_clock; @@ -2957,8 +3062,7 @@ static int smu7_dpm_patch_boot_state(struct pp_hwmgr *hwmgr, /* First retrieve the Boot clocks and VDDC from the firmware info table. * We assume here that fw_info is unchanged if this call fails. */ - fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table( - hwmgr->device, index, + fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)smu_atom_get_data_table(hwmgr->adev, index, &size, &frev, &crev); if (!fw_info) /* During a test, there is no firmware info table. */ @@ -3081,7 +3185,7 @@ static int smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr *hwmgr, performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, state_entry->ucPCIEGenLow); performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, - state_entry->ucPCIELaneHigh); + state_entry->ucPCIELaneLow); performance_level = &(smu7_power_state->performance_levels [smu7_power_state->performance_level_count++]); @@ -3366,34 +3470,35 @@ static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr, return 0; } -static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, - struct pp_gpu_power *query) +static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query) { - PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr, - PPSMC_MSG_PmStatusLogStart), - "Failed to start pm status log!", - return -1); + int i; + u32 tmp = 0; + + if (!query) + return -EINVAL; - /* Sampling period from 50ms to 4sec */ - msleep_interruptible(200); + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0); + tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); + *query = tmp; - PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr, - PPSMC_MSG_PmStatusLogSample), - "Failed to sample pm status log!", - return -1); + if (tmp != 0) + return 0; - query->vddc_power = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, - ixSMU_PM_STATUS_40); - query->vddci_power = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, - ixSMU_PM_STATUS_49); - query->max_gpu_power = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, - ixSMU_PM_STATUS_94); - query->average_gpu_power = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, - ixSMU_PM_STATUS_95); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixSMU_PM_STATUS_94, 0); + + for (i = 0; i < 10; i++) { + mdelay(1); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample); + tmp = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, + ixSMU_PM_STATUS_94); + if (tmp != 0) + break; + } + *query = tmp; return 0; } @@ -3446,10 +3551,7 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx, *size = 4; return 0; case AMDGPU_PP_SENSOR_GPU_POWER: - if (*size < sizeof(struct pp_gpu_power)) - return -EINVAL; - *size = sizeof(struct pp_gpu_power); - return smu7_get_gpu_power(hwmgr, (struct pp_gpu_power *)value); + return smu7_get_gpu_power(hwmgr, (uint32_t *)value); case AMDGPU_PP_SENSOR_VDDGFX: if ((data->vr_config & 0xff) == 0x2) val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device, @@ -3480,7 +3582,6 @@ static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, cons [smu7_ps->performance_level_count - 1].memory_clock; struct PP_Clocks min_clocks = {0}; uint32_t i; - struct cgs_display_info info = {0}; for (i = 0; i < sclk_table->count; i++) { if (sclk == sclk_table->dpm_levels[i].value) @@ -3507,9 +3608,8 @@ static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, cons if (i >= mclk_table->count) data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; - cgs_get_active_displays_info(hwmgr->device, &info); - if (data->display_timing.num_existing_displays != info.display_count) + if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display) data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK; return 0; @@ -3676,8 +3776,9 @@ static int smu7_trim_single_dpm_states(struct pp_hwmgr *hwmgr, uint32_t i; for (i = 0; i < dpm_table->count; i++) { - if ((dpm_table->dpm_levels[i].value < low_limit) - || (dpm_table->dpm_levels[i].value > high_limit)) + /*skip the trim if od is enabled*/ + if (!hwmgr->od_enabled && (dpm_table->dpm_levels[i].value < low_limit + || dpm_table->dpm_levels[i].value > high_limit)) dpm_table->dpm_levels[i].enabled = false; else dpm_table->dpm_levels[i].enabled = true; @@ -3714,13 +3815,14 @@ static int smu7_trim_dpm_states(struct pp_hwmgr *hwmgr, static int smu7_generate_dpm_level_enable_mask( struct pp_hwmgr *hwmgr, const void *input) { - int result; + int result = 0; const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input; struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); const struct smu7_power_state *smu7_ps = cast_const_phw_smu7_power_state(states->pnew_state); + result = smu7_trim_dpm_states(hwmgr, smu7_ps); if (result) return result; @@ -3812,9 +3914,14 @@ static int smu7_notify_smc_display(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK) - smum_send_msg_to_smc_with_parameter(hwmgr, - (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2); + if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK) { + if (hwmgr->chip_id == CHIP_VEGAM) + smum_send_msg_to_smc_with_parameter(hwmgr, + (PPSMC_Msg)PPSMC_MSG_SetVBITimeout_VEGAM, data->frame_time_x2); + else + smum_send_msg_to_smc_with_parameter(hwmgr, + (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2); + } return (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL; } @@ -3908,15 +4015,8 @@ smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display) static int smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr) { - uint32_t num_active_displays = 0; - struct cgs_display_info info = {0}; - - info.mode_info = NULL; - cgs_get_active_displays_info(hwmgr->device, &info); - - num_active_displays = info.display_count; - - if (num_active_displays > 1 && hwmgr->display_config.multi_monitor_in_sync != true) + if (hwmgr->display_config->num_display > 1 && + !hwmgr->display_config->multi_monitor_in_sync) smu7_notify_smc_display_change(hwmgr, false); return 0; @@ -3931,33 +4031,24 @@ smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr) static int smu7_program_display_gap(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - uint32_t num_active_displays = 0; uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL); uint32_t display_gap2; uint32_t pre_vbi_time_in_us; uint32_t frame_time_in_us; - uint32_t ref_clock; - uint32_t refresh_rate = 0; - struct cgs_display_info info = {0}; - struct cgs_mode_info mode_info = {0}; + uint32_t ref_clock, refresh_rate; - info.mode_info = &mode_info; - cgs_get_active_displays_info(hwmgr->device, &info); - num_active_displays = info.display_count; - - display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (num_active_displays > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE); + display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (hwmgr->display_config->num_display > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE); cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap); ref_clock = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev); - - refresh_rate = mode_info.refresh_rate; + refresh_rate = hwmgr->display_config->vrefresh; if (0 == refresh_rate) refresh_rate = 60; frame_time_in_us = 1000000 / refresh_rate; - pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us; + pre_vbi_time_in_us = frame_time_in_us - 200 - hwmgr->display_config->min_vblank_time; data->frame_time_x2 = frame_time_in_us * 2 / 100; @@ -4016,17 +4107,17 @@ static int smu7_register_irq_handlers(struct pp_hwmgr *hwmgr) amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev), AMDGPU_IH_CLIENTID_LEGACY, - 230, + VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH, source); amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev), AMDGPU_IH_CLIENTID_LEGACY, - 231, + VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW, source); /* Register CTF(GPIO_19) interrupt */ amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev), AMDGPU_IH_CLIENTID_LEGACY, - 83, + VISLANDS30_IV_SRCID_GPIO_19, source); return 0; @@ -4037,17 +4128,14 @@ smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); bool is_update_required = false; - struct cgs_display_info info = {0, 0, NULL}; - cgs_get_active_displays_info(hwmgr->device, &info); - - if (data->display_timing.num_existing_displays != info.display_count) + if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display) is_update_required = true; if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) { - if (data->display_timing.min_clock_in_sr != hwmgr->display_config.min_core_set_clock_in_sr && + if (data->display_timing.min_clock_in_sr != hwmgr->display_config->min_core_set_clock_in_sr && (data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK || - hwmgr->display_config.min_core_set_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK)) + hwmgr->display_config->min_core_set_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK)) is_update_required = true; } return is_update_required; @@ -4102,7 +4190,7 @@ static int smu7_check_states_equal(struct pp_hwmgr *hwmgr, return 0; } -static int smu7_upload_mc_firmware(struct pp_hwmgr *hwmgr) +static int smu7_check_mc_firmware(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); @@ -4181,13 +4269,9 @@ static int smu7_read_clock_registers(struct pp_hwmgr *hwmgr) static int smu7_get_memory_type(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - uint32_t temp; - - temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0); + struct amdgpu_device *adev = hwmgr->adev; - data->is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE == - ((temp & MC_SEQ_MISC0_GDDR5_MASK) >> - MC_SEQ_MISC0_GDDR5_SHIFT)); + data->is_memory_gddr5 = (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5); return 0; } @@ -4218,7 +4302,6 @@ static int smu7_init_power_gate_state(struct pp_hwmgr *hwmgr) data->uvd_power_gated = false; data->vce_power_gated = false; - data->samu_power_gated = false; return 0; } @@ -4235,7 +4318,7 @@ static int smu7_setup_asic_task(struct pp_hwmgr *hwmgr) { int tmp_result, result = 0; - smu7_upload_mc_firmware(hwmgr); + smu7_check_mc_firmware(hwmgr); tmp_result = smu7_read_clock_registers(hwmgr); PP_ASSERT_WITH_CODE((0 == tmp_result), @@ -4370,22 +4453,36 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, break; case OD_SCLK: if (hwmgr->od_enabled) { - size = sprintf(buf, "%s: \n", "OD_SCLK"); + size = sprintf(buf, "%s:\n", "OD_SCLK"); for (i = 0; i < odn_sclk_table->num_of_pl; i++) - size += sprintf(buf + size, "%d: %10uMhz %10u mV\n", - i, odn_sclk_table->entries[i].clock / 100, + size += sprintf(buf + size, "%d: %10uMHz %10umV\n", + i, odn_sclk_table->entries[i].clock/100, odn_sclk_table->entries[i].vddc); } break; case OD_MCLK: if (hwmgr->od_enabled) { - size = sprintf(buf, "%s: \n", "OD_MCLK"); + size = sprintf(buf, "%s:\n", "OD_MCLK"); for (i = 0; i < odn_mclk_table->num_of_pl; i++) - size += sprintf(buf + size, "%d: %10uMhz %10u mV\n", - i, odn_mclk_table->entries[i].clock / 100, + size += sprintf(buf + size, "%d: %10uMHz %10umV\n", + i, odn_mclk_table->entries[i].clock/100, odn_mclk_table->entries[i].vddc); } break; + case OD_RANGE: + if (hwmgr->od_enabled) { + size = sprintf(buf, "%s:\n", "OD_RANGE"); + size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n", + data->golden_dpm_table.sclk_table.dpm_levels[0].value/100, + hwmgr->platform_descriptor.overdriveLimit.engineClock/100); + size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n", + data->golden_dpm_table.mclk_table.dpm_levels[0].value/100, + hwmgr->platform_descriptor.overdriveLimit.memoryClock/100); + size += sprintf(buf + size, "VDDC: %7umV %11umV\n", + data->odn_dpm_table.min_vddc, + data->odn_dpm_table.max_vddc); + } + break; default: break; } @@ -4515,12 +4612,12 @@ static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks) return -EINVAL; dep_sclk_table = table_info->vdd_dep_on_sclk; for (i = 0; i < dep_sclk_table->count; i++) - clocks->clock[i] = dep_sclk_table->entries[i].clk; + clocks->clock[i] = dep_sclk_table->entries[i].clk * 10; clocks->count = dep_sclk_table->count; } else if (hwmgr->pp_table_version == PP_TABLE_V0) { sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk; for (i = 0; i < sclk_table->count; i++) - clocks->clock[i] = sclk_table->entries[i].clk; + clocks->clock[i] = sclk_table->entries[i].clk * 10; clocks->count = sclk_table->count; } @@ -4552,7 +4649,7 @@ static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks) return -EINVAL; dep_mclk_table = table_info->vdd_dep_on_mclk; for (i = 0; i < dep_mclk_table->count; i++) { - clocks->clock[i] = dep_mclk_table->entries[i].clk; + clocks->clock[i] = dep_mclk_table->entries[i].clk * 10; clocks->latency[i] = smu7_get_mem_latency(hwmgr, dep_mclk_table->entries[i].clk); } @@ -4560,7 +4657,7 @@ static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks) } else if (hwmgr->pp_table_version == PP_TABLE_V0) { mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk; for (i = 0; i < mclk_table->count; i++) - clocks->clock[i] = mclk_table->entries[i].clk; + clocks->clock[i] = mclk_table->entries[i].clk * 10; clocks->count = mclk_table->count; } return 0; @@ -4669,36 +4766,27 @@ static bool smu7_check_clk_voltage_valid(struct pp_hwmgr *hwmgr, { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - uint32_t min_vddc; - struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table; - - if (table_info == NULL) - return false; - - dep_sclk_table = table_info->vdd_dep_on_sclk; - min_vddc = dep_sclk_table->entries[0].vddc; - - if (voltage < min_vddc || voltage > 2000) { - pr_info("OD voltage is out of range [%d - 2000] mV\n", min_vddc); + if (voltage < data->odn_dpm_table.min_vddc || voltage > data->odn_dpm_table.max_vddc) { + pr_info("OD voltage is out of range [%d - %d] mV\n", + data->odn_dpm_table.min_vddc, + data->odn_dpm_table.max_vddc); return false; } if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) { - if (data->vbios_boot_state.sclk_bootup_value > clk || + if (data->golden_dpm_table.sclk_table.dpm_levels[0].value > clk || hwmgr->platform_descriptor.overdriveLimit.engineClock < clk) { pr_info("OD engine clock is out of range [%d - %d] MHz\n", - data->vbios_boot_state.sclk_bootup_value, - hwmgr->platform_descriptor.overdriveLimit.engineClock / 100); + data->golden_dpm_table.sclk_table.dpm_levels[0].value/100, + hwmgr->platform_descriptor.overdriveLimit.engineClock/100); return false; } } else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) { - if (data->vbios_boot_state.mclk_bootup_value > clk || + if (data->golden_dpm_table.mclk_table.dpm_levels[0].value > clk || hwmgr->platform_descriptor.overdriveLimit.memoryClock < clk) { pr_info("OD memory clock is out of range [%d - %d] MHz\n", - data->vbios_boot_state.mclk_bootup_value/100, - hwmgr->platform_descriptor.overdriveLimit.memoryClock / 100); + data->golden_dpm_table.mclk_table.dpm_levels[0].value/100, + hwmgr->platform_descriptor.overdriveLimit.memoryClock/100); return false; } } else { @@ -4708,64 +4796,6 @@ static bool smu7_check_clk_voltage_valid(struct pp_hwmgr *hwmgr, return true; } -static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr) -{ - struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - uint32_t i; - - struct phm_ppt_v1_clock_voltage_dependency_table *dep_table; - struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table; - - if (table_info == NULL) - return; - - for (i=0; i<data->dpm_table.sclk_table.count; i++) { - if (odn_table->odn_core_clock_dpm_levels.entries[i].clock != - data->dpm_table.sclk_table.dpm_levels[i].value) { - data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; - break; - } - } - - for (i=0; i<data->dpm_table.mclk_table.count; i++) { - if (odn_table->odn_memory_clock_dpm_levels.entries[i].clock != - data->dpm_table.mclk_table.dpm_levels[i].value) { - data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; - break; - } - } - - dep_table = table_info->vdd_dep_on_mclk; - odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk); - - for (i=0; i < dep_table->count; i++) { - if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { - data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK; - return; - } - } - if (i == dep_table->count && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) { - data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC; - data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; - } - - dep_table = table_info->vdd_dep_on_sclk; - odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk); - for (i=0; i < dep_table->count; i++) { - if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { - data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK; - return; - } - } - if (i == dep_table->count && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) { - data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC; - data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; - } -} - static int smu7_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, enum PP_OD_DPM_TABLE_COMMAND type, long *input, uint32_t size) @@ -4864,6 +4894,17 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) len = sizeof(smu7_profiling) / sizeof(struct profile_mode_setting); for (i = 0; i < len; i++) { + if (i == hwmgr->power_profile_mode) { + size += sprintf(buf + size, "%3d %14s %s: %8d %16d %16d %16d %16d %16d\n", + i, profile_name[i], "*", + data->current_profile_setting.sclk_up_hyst, + data->current_profile_setting.sclk_down_hyst, + data->current_profile_setting.sclk_activity, + data->current_profile_setting.mclk_up_hyst, + data->current_profile_setting.mclk_down_hyst, + data->current_profile_setting.mclk_activity); + continue; + } if (smu7_profiling[i].bupdate_sclk) size += sprintf(buf + size, "%3d %16s: %8d %16d %16d ", i, profile_name[i], smu7_profiling[i].sclk_up_hyst, @@ -4883,24 +4924,6 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) "-", "-", "-"); } - size += sprintf(buf + size, "%3d %16s: %8d %16d %16d %16d %16d %16d\n", - i, profile_name[i], - data->custom_profile_setting.sclk_up_hyst, - data->custom_profile_setting.sclk_down_hyst, - data->custom_profile_setting.sclk_activity, - data->custom_profile_setting.mclk_up_hyst, - data->custom_profile_setting.mclk_down_hyst, - data->custom_profile_setting.mclk_activity); - - size += sprintf(buf + size, "%3s %16s: %8d %16d %16d %16d %16d %16d\n", - "*", "CURRENT", - data->current_profile_setting.sclk_up_hyst, - data->current_profile_setting.sclk_down_hyst, - data->current_profile_setting.sclk_activity, - data->current_profile_setting.mclk_up_hyst, - data->current_profile_setting.mclk_down_hyst, - data->current_profile_setting.mclk_activity); - return size; } @@ -4939,16 +4962,16 @@ static int smu7_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint if (size < 8) return -EINVAL; - data->custom_profile_setting.bupdate_sclk = input[0]; - data->custom_profile_setting.sclk_up_hyst = input[1]; - data->custom_profile_setting.sclk_down_hyst = input[2]; - data->custom_profile_setting.sclk_activity = input[3]; - data->custom_profile_setting.bupdate_mclk = input[4]; - data->custom_profile_setting.mclk_up_hyst = input[5]; - data->custom_profile_setting.mclk_down_hyst = input[6]; - data->custom_profile_setting.mclk_activity = input[7]; - if (!smum_update_dpm_settings(hwmgr, &data->custom_profile_setting)) { - memcpy(&data->current_profile_setting, &data->custom_profile_setting, sizeof(struct profile_mode_setting)); + tmp.bupdate_sclk = input[0]; + tmp.sclk_up_hyst = input[1]; + tmp.sclk_down_hyst = input[2]; + tmp.sclk_activity = input[3]; + tmp.bupdate_mclk = input[4]; + tmp.mclk_up_hyst = input[5]; + tmp.mclk_down_hyst = input[6]; + tmp.mclk_activity = input[7]; + if (!smum_update_dpm_settings(hwmgr, &tmp)) { + memcpy(&data->current_profile_setting, &tmp, sizeof(struct profile_mode_setting)); hwmgr->power_profile_mode = mode; } break; @@ -5023,7 +5046,7 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = { .get_fan_control_mode = smu7_get_fan_control_mode, .force_clock_level = smu7_force_clock_level, .print_clock_levels = smu7_print_clock_levels, - .enable_per_cu_power_gating = smu7_enable_per_cu_power_gating, + .powergate_gfx = smu7_powergate_gfx, .get_sclk_od = smu7_get_sclk_od, .set_sclk_od = smu7_set_sclk_od, .get_mclk_od = smu7_get_mclk_od, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h index f40179c9ca97..3784ce6e50ab 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h @@ -184,6 +184,8 @@ struct smu7_odn_dpm_table { struct smu7_odn_clock_voltage_dependency_table vdd_dependency_on_sclk; struct smu7_odn_clock_voltage_dependency_table vdd_dependency_on_mclk; uint32_t odn_mclk_min_limit; + uint32_t min_vddc; + uint32_t max_vddc; }; struct profile_mode_setting { @@ -308,7 +310,6 @@ struct smu7_hwmgr { /* ---- Power Gating States ---- */ bool uvd_power_gated; bool vce_power_gated; - bool samu_power_gated; bool need_long_memory_training; /* Application power optimization parameters */ @@ -325,7 +326,6 @@ struct smu7_hwmgr { uint16_t mem_latency_high; uint16_t mem_latency_low; uint32_t vr_config; - struct profile_mode_setting custom_profile_setting; struct profile_mode_setting current_profile_setting; }; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c index 03bc7453f3b1..c952845833d7 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c @@ -623,6 +623,190 @@ static const struct gpu_pt_config_reg DIDTConfig_Polaris11_Kicker[] = { 0xFFFFFFFF } /* End of list */ }; +static const struct gpu_pt_config_reg GCCACConfig_VegaM[] = +{ +// --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +// Offset Mask Shift Value Type +// --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + // DIDT_SQ + // + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00060013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00860013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01060013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01860013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02060013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02860013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03060013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03860013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x04060013, GPU_CONFIGREG_GC_CAC_IND }, + + // DIDT_TD + // + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x000E0013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x008E0013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x010E0013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x018E0013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x020E0013, GPU_CONFIGREG_GC_CAC_IND }, + + // DIDT_TCP + // + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00100013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00900013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01100013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01900013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02100013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02900013, GPU_CONFIGREG_GC_CAC_IND }, + + { 0xFFFFFFFF } // End of list +}; + +static const struct gpu_pt_config_reg DIDTConfig_VegaM[] = +{ +// --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +// Offset Mask Shift Value Type +// --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + // DIDT_SQ + // + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT, 0x0073, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT, 0x00ab, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT, 0x0084, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT, 0x005a, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT, 0x0067, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT, 0x0084, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT, 0x0027, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT, 0x0046, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT, 0x00aa, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + // DIDT_TD + // + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_0_MASK, DIDT_TD_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_1_MASK, DIDT_TD_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_2_MASK, DIDT_TD_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__UNUSED_0_MASK, DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__UNUSED_0_MASK, DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__USE_REF_CLOCK_MASK, DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0009, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0009, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__UNUSED_0_MASK, DIDT_TD_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + // DIDT_TCP + // + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT, 0x0004, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT, 0x0037, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT, 0x0054, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__UNUSED_0_MASK, DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_0_MASK, DIDT_TCP_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x0032, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_1_MASK, DIDT_TCP_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_2_MASK, DIDT_TCP_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT,0x01aa, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__UNUSED_0_MASK, DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK, DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK, DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__UNUSED_0_MASK, DIDT_TCP_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { 0xFFFFFFFF } // End of list +}; static int smu7_enable_didt(struct pp_hwmgr *hwmgr, const bool enable) { uint32_t en = enable ? 1 : 0; @@ -740,8 +924,8 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr) PP_CAP(PHM_PlatformCaps_TDRamping) || PP_CAP(PHM_PlatformCaps_TCPRamping)) { - cgs_enter_safe_mode(hwmgr->device, true); - cgs_lock_grbm_idx(hwmgr->device, true); + adev->gfx.rlc.funcs->enter_safe_mode(adev); + mutex_lock(&adev->grbm_idx_mutex); value = 0; value2 = cgs_read_register(hwmgr->device, mmGRBM_GFX_INDEX); for (count = 0; count < num_se; count++) { @@ -752,67 +936,80 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr) if (hwmgr->chip_id == CHIP_POLARIS10) { result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_Polaris10); - PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); + PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", goto error); result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris10); - PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); + PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", goto error); } else if (hwmgr->chip_id == CHIP_POLARIS11) { result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11); - PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); + PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", goto error); if (hwmgr->is_kicker) result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11_Kicker); else result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11); - PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); + PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", goto error); } else if (hwmgr->chip_id == CHIP_POLARIS12) { result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11); - PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); + PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", goto error); result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris12); - PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); + PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", goto error); + } else if (hwmgr->chip_id == CHIP_VEGAM) { + result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_VegaM); + PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", goto error); + result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_VegaM); + PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", goto error); } } cgs_write_register(hwmgr->device, mmGRBM_GFX_INDEX, value2); result = smu7_enable_didt(hwmgr, true); - PP_ASSERT_WITH_CODE((result == 0), "EnableDiDt failed.", return result); + PP_ASSERT_WITH_CODE((result == 0), "EnableDiDt failed.", goto error); if (hwmgr->chip_id == CHIP_POLARIS11) { result = smum_send_msg_to_smc(hwmgr, (uint16_t)(PPSMC_MSG_EnableDpmDidt)); PP_ASSERT_WITH_CODE((0 == result), - "Failed to enable DPM DIDT.", return result); + "Failed to enable DPM DIDT.", goto error); } - cgs_lock_grbm_idx(hwmgr->device, false); - cgs_enter_safe_mode(hwmgr->device, false); + mutex_unlock(&adev->grbm_idx_mutex); + adev->gfx.rlc.funcs->exit_safe_mode(adev); } return 0; +error: + mutex_unlock(&adev->grbm_idx_mutex); + adev->gfx.rlc.funcs->exit_safe_mode(adev); + return result; } int smu7_disable_didt_config(struct pp_hwmgr *hwmgr) { int result; + struct amdgpu_device *adev = hwmgr->adev; if (PP_CAP(PHM_PlatformCaps_SQRamping) || PP_CAP(PHM_PlatformCaps_DBRamping) || PP_CAP(PHM_PlatformCaps_TDRamping) || PP_CAP(PHM_PlatformCaps_TCPRamping)) { - cgs_enter_safe_mode(hwmgr->device, true); + adev->gfx.rlc.funcs->enter_safe_mode(adev); result = smu7_enable_didt(hwmgr, false); PP_ASSERT_WITH_CODE((result == 0), "Post DIDT enable clock gating failed.", - return result); + goto error); if (hwmgr->chip_id == CHIP_POLARIS11) { result = smum_send_msg_to_smc(hwmgr, (uint16_t)(PPSMC_MSG_DisableDpmDidt)); PP_ASSERT_WITH_CODE((0 == result), - "Failed to disable DPM DIDT.", return result); + "Failed to disable DPM DIDT.", goto error); } - cgs_enter_safe_mode(hwmgr->device, false); + adev->gfx.rlc.funcs->exit_safe_mode(adev); } return 0; +error: + adev->gfx.rlc.funcs->exit_safe_mode(adev); + return result; } int smu7_enable_smc_cac(struct pp_hwmgr *hwmgr) @@ -852,12 +1049,10 @@ int smu7_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - n = (n & 0xff) << 8; - if (data->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) return smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_PkgPwrSetLimit, n); + PPSMC_MSG_PkgPwrSetLimit, n<<8); return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c index 7b26607c646a..288802f209dd 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c @@ -314,8 +314,7 @@ static int smu8_get_system_info_data(struct pp_hwmgr *hwmgr) uint8_t frev, crev; uint16_t size; - info = (ATOM_INTEGRATED_SYSTEM_INFO_V1_9 *) cgs_atom_get_data_table( - hwmgr->device, + info = (ATOM_INTEGRATED_SYSTEM_INFO_V1_9 *)smu_atom_get_data_table(hwmgr->adev, GetIndexIntoMasterTable(DATA, IntegratedSystemInfo), &size, &frev, &crev); @@ -694,7 +693,7 @@ static int smu8_update_sclk_limit(struct pp_hwmgr *hwmgr) else data->sclk_dpm.soft_max_clk = table->entries[table->count - 1].clk; - clock = hwmgr->display_config.min_core_set_clock; + clock = hwmgr->display_config->min_core_set_clock; if (clock == 0) pr_debug("min_core_set_clock not set\n"); @@ -749,7 +748,7 @@ static int smu8_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr) { if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) { - uint32_t clks = hwmgr->display_config.min_core_set_clock_in_sr; + uint32_t clks = hwmgr->display_config->min_core_set_clock_in_sr; if (clks == 0) clks = SMU8_MIN_DEEP_SLEEP_SCLK; @@ -1041,25 +1040,21 @@ static int smu8_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, struct smu8_hwmgr *data = hwmgr->backend; struct PP_Clocks clocks = {0, 0, 0, 0}; bool force_high; - uint32_t num_of_active_displays = 0; - struct cgs_display_info info = {0}; smu8_ps->need_dfs_bypass = true; data->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label); - clocks.memoryClock = hwmgr->display_config.min_mem_set_clock != 0 ? - hwmgr->display_config.min_mem_set_clock : + clocks.memoryClock = hwmgr->display_config->min_mem_set_clock != 0 ? + hwmgr->display_config->min_mem_set_clock : data->sys_info.nbp_memory_clock[1]; - cgs_get_active_displays_info(hwmgr->device, &info); - num_of_active_displays = info.display_count; if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) clocks.memoryClock = hwmgr->dyn_state.max_clock_voltage_on_ac.mclk; force_high = (clocks.memoryClock > data->sys_info.nbp_memory_clock[SMU8_NUM_NBPMEMORYCLOCK - 1]) - || (num_of_active_displays >= 3); + || (hwmgr->display_config->num_display >= 3); smu8_ps->action = smu8_current_ps->action; @@ -1609,17 +1604,17 @@ static int smu8_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type switch (type) { case amd_pp_disp_clock: for (i = 0; i < clocks->count; i++) - clocks->clock[i] = data->sys_info.display_clock[i]; + clocks->clock[i] = data->sys_info.display_clock[i] * 10; break; case amd_pp_sys_clock: table = hwmgr->dyn_state.vddc_dependency_on_sclk; for (i = 0; i < clocks->count; i++) - clocks->clock[i] = table->entries[i].clk; + clocks->clock[i] = table->entries[i].clk * 10; break; case amd_pp_mem_clock: clocks->count = SMU8_NUM_NBPMEMORYCLOCK; for (i = 0; i < clocks->count; i++) - clocks->clock[i] = data->sys_info.nbp_memory_clock[clocks->count - 1 - i]; + clocks->clock[i] = data->sys_info.nbp_memory_clock[clocks->count - 1 - i] * 10; break; default: return -1; @@ -1897,20 +1892,20 @@ static void smu8_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) data->uvd_power_gated = bgate; if (bgate) { - cgs_set_powergating_state(hwmgr->device, + amdgpu_device_ip_set_powergating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_UVD, AMD_PG_STATE_GATE); - cgs_set_clockgating_state(hwmgr->device, + amdgpu_device_ip_set_clockgating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_UVD, AMD_CG_STATE_GATE); smu8_dpm_update_uvd_dpm(hwmgr, true); smu8_dpm_powerdown_uvd(hwmgr); } else { smu8_dpm_powerup_uvd(hwmgr); - cgs_set_clockgating_state(hwmgr->device, + amdgpu_device_ip_set_clockgating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_UVD, AMD_CG_STATE_UNGATE); - cgs_set_powergating_state(hwmgr->device, + amdgpu_device_ip_set_powergating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_UVD, AMD_PG_STATE_UNGATE); smu8_dpm_update_uvd_dpm(hwmgr, false); @@ -1923,12 +1918,10 @@ static void smu8_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) struct smu8_hwmgr *data = hwmgr->backend; if (bgate) { - cgs_set_powergating_state( - hwmgr->device, + amdgpu_device_ip_set_powergating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_VCE, AMD_PG_STATE_GATE); - cgs_set_clockgating_state( - hwmgr->device, + amdgpu_device_ip_set_clockgating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_VCE, AMD_CG_STATE_GATE); smu8_enable_disable_vce_dpm(hwmgr, false); @@ -1937,12 +1930,10 @@ static void smu8_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) } else { smu8_dpm_powerup_vce(hwmgr); data->vce_power_gated = false; - cgs_set_clockgating_state( - hwmgr->device, + amdgpu_device_ip_set_clockgating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_VCE, AMD_CG_STATE_UNGATE); - cgs_set_powergating_state( - hwmgr->device, + amdgpu_device_ip_set_powergating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_VCE, AMD_PG_STATE_UNGATE); smu8_dpm_update_vce_dpm(hwmgr); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c index 598122854ab5..2aab1b475945 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c @@ -24,6 +24,10 @@ #include "pp_debug.h" #include "ppatomctrl.h" #include "ppsmc.h" +#include "atom.h" +#include "ivsrcid/thm/irqsrcs_thm_9_0.h" +#include "ivsrcid/smuio/irqsrcs_smuio_9_0.h" +#include "ivsrcid/ivsrcid_vislands30.h" uint8_t convert_to_vid(uint16_t vddc) { @@ -542,17 +546,17 @@ int phm_irq_process(struct amdgpu_device *adev, uint32_t src_id = entry->src_id; if (client_id == AMDGPU_IH_CLIENTID_LEGACY) { - if (src_id == 230) + if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH) pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n", PCI_BUS_NUM(adev->pdev->devfn), PCI_SLOT(adev->pdev->devfn), PCI_FUNC(adev->pdev->devfn)); - else if (src_id == 231) + else if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW) pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n", PCI_BUS_NUM(adev->pdev->devfn), PCI_SLOT(adev->pdev->devfn), PCI_FUNC(adev->pdev->devfn)); - else if (src_id == 83) + else if (src_id == VISLANDS30_IV_SRCID_GPIO_19) pr_warn("GPU Critical Temperature Fault detected on PCIe %d:%d.%d!\n", PCI_BUS_NUM(adev->pdev->devfn), PCI_SLOT(adev->pdev->devfn), @@ -593,18 +597,115 @@ int smu9_register_irq_handlers(struct pp_hwmgr *hwmgr) amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev), SOC15_IH_CLIENTID_THM, - 0, + THM_9_0__SRCID__THM_DIG_THERM_L2H, source); amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev), SOC15_IH_CLIENTID_THM, - 1, + THM_9_0__SRCID__THM_DIG_THERM_H2L, source); /* Register CTF(GPIO_19) interrupt */ amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev), SOC15_IH_CLIENTID_ROM_SMUIO, - 83, + SMUIO_9_0__SRCID__SMUIO_GPIO19, source); return 0; } + +void *smu_atom_get_data_table(void *dev, uint32_t table, uint16_t *size, + uint8_t *frev, uint8_t *crev) +{ + struct amdgpu_device *adev = dev; + uint16_t data_start; + + if (amdgpu_atom_parse_data_header( + adev->mode_info.atom_context, table, size, + frev, crev, &data_start)) + return (uint8_t *)adev->mode_info.atom_context->bios + + data_start; + + return NULL; +} + +int smu_get_voltage_dependency_table_ppt_v1( + const struct phm_ppt_v1_clock_voltage_dependency_table *allowed_dep_table, + struct phm_ppt_v1_clock_voltage_dependency_table *dep_table) +{ + uint8_t i = 0; + PP_ASSERT_WITH_CODE((0 != allowed_dep_table->count), + "Voltage Lookup Table empty", + return -EINVAL); + + dep_table->count = allowed_dep_table->count; + for (i=0; i<dep_table->count; i++) { + dep_table->entries[i].clk = allowed_dep_table->entries[i].clk; + dep_table->entries[i].vddInd = allowed_dep_table->entries[i].vddInd; + dep_table->entries[i].vdd_offset = allowed_dep_table->entries[i].vdd_offset; + dep_table->entries[i].vddc = allowed_dep_table->entries[i].vddc; + dep_table->entries[i].vddgfx = allowed_dep_table->entries[i].vddgfx; + dep_table->entries[i].vddci = allowed_dep_table->entries[i].vddci; + dep_table->entries[i].mvdd = allowed_dep_table->entries[i].mvdd; + dep_table->entries[i].phases = allowed_dep_table->entries[i].phases; + dep_table->entries[i].cks_enable = allowed_dep_table->entries[i].cks_enable; + dep_table->entries[i].cks_voffset = allowed_dep_table->entries[i].cks_voffset; + } + + return 0; +} + +int smu_set_watermarks_for_clocks_ranges(void *wt_table, + struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges) +{ + uint32_t i; + struct watermarks *table = wt_table; + + if (!table || !wm_with_clock_ranges) + return -EINVAL; + + if (wm_with_clock_ranges->num_wm_dmif_sets > 4 || wm_with_clock_ranges->num_wm_mcif_sets > 4) + return -EINVAL; + + for (i = 0; i < wm_with_clock_ranges->num_wm_dmif_sets; i++) { + table->WatermarkRow[1][i].MinClock = + cpu_to_le16((uint16_t) + (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz) / + 1000); + table->WatermarkRow[1][i].MaxClock = + cpu_to_le16((uint16_t) + (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz) / + 100); + table->WatermarkRow[1][i].MinUclk = + cpu_to_le16((uint16_t) + (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz) / + 1000); + table->WatermarkRow[1][i].MaxUclk = + cpu_to_le16((uint16_t) + (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz) / + 1000); + table->WatermarkRow[1][i].WmSetting = (uint8_t) + wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id; + } + + for (i = 0; i < wm_with_clock_ranges->num_wm_mcif_sets; i++) { + table->WatermarkRow[0][i].MinClock = + cpu_to_le16((uint16_t) + (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz) / + 1000); + table->WatermarkRow[0][i].MaxClock = + cpu_to_le16((uint16_t) + (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz) / + 1000); + table->WatermarkRow[0][i].MinUclk = + cpu_to_le16((uint16_t) + (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz) / + 1000); + table->WatermarkRow[0][i].MaxUclk = + cpu_to_le16((uint16_t) + (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz) / + 1000); + table->WatermarkRow[0][i].WmSetting = (uint8_t) + wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id; + } + return 0; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h index d37d16e4b613..5454289d5226 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h @@ -26,10 +26,27 @@ struct pp_atomctrl_voltage_table; struct pp_hwmgr; struct phm_ppt_v1_voltage_lookup_table; +struct Watermarks_t; +struct pp_wm_sets_with_clock_ranges_soc15; uint8_t convert_to_vid(uint16_t vddc); uint16_t convert_to_vddc(uint8_t vid); +struct watermark_row_generic_t { + uint16_t MinClock; + uint16_t MaxClock; + uint16_t MinUclk; + uint16_t MaxUclk; + + uint8_t WmSetting; + uint8_t Padding[3]; +}; + +struct watermarks { + struct watermark_row_generic_t WatermarkRow[2][4]; + uint32_t padding[7]; +}; + extern int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr, uint32_t index, uint32_t value, uint32_t mask); @@ -82,6 +99,16 @@ int phm_irq_process(struct amdgpu_device *adev, int smu9_register_irq_handlers(struct pp_hwmgr *hwmgr); +void *smu_atom_get_data_table(void *dev, uint32_t table, uint16_t *size, + uint8_t *frev, uint8_t *crev); + +int smu_get_voltage_dependency_table_ppt_v1( + const struct phm_ppt_v1_clock_voltage_dependency_table *allowed_dep_table, + struct phm_ppt_v1_clock_voltage_dependency_table *dep_table); + +int smu_set_watermarks_for_clocks_ranges(void *wt_table, + struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges); + #define PHM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT #define PHM_FIELD_MASK(reg, field) reg##__##field##_MASK diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index 7cbb56ba6fab..1a0dccb3fac1 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -36,7 +36,7 @@ #include "smu9.h" #include "smu9_driver_if.h" #include "vega10_inc.h" -#include "pp_soc15.h" +#include "soc15_common.h" #include "pppcielanes.h" #include "vega10_hwmgr.h" #include "vega10_processpptables.h" @@ -51,20 +51,10 @@ #include "smuio/smuio_9_0_offset.h" #include "smuio/smuio_9_0_sh_mask.h" -#define VOLTAGE_SCALE 4 -#define VOLTAGE_VID_OFFSET_SCALE1 625 -#define VOLTAGE_VID_OFFSET_SCALE2 100 - #define HBM_MEMORY_CHANNEL_WIDTH 128 static const uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2}; -#define MEM_FREQ_LOW_LATENCY 25000 -#define MEM_FREQ_HIGH_LATENCY 80000 -#define MEM_LATENCY_HIGH 245 -#define MEM_LATENCY_LOW 35 -#define MEM_LATENCY_ERR 0xFFFF - #define mmDF_CS_AON0_DramBaseAddress0 0x0044 #define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 0 @@ -79,8 +69,6 @@ static const uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2}; #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L -static int vega10_force_clock_level(struct pp_hwmgr *hwmgr, - enum pp_clock_type type, uint32_t mask); static const ULONG PhwVega10_Magic = (ULONG)(PHM_VIslands_Magic); @@ -291,6 +279,52 @@ static int vega10_set_features_platform_caps(struct pp_hwmgr *hwmgr) return 0; } +static int vega10_odn_initial_default_setting(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = hwmgr->backend; + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table); + struct vega10_odn_vddc_lookup_table *od_lookup_table; + struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table; + struct phm_ppt_v1_clock_voltage_dependency_table *dep_table[3]; + struct phm_ppt_v1_clock_voltage_dependency_table *od_table[3]; + uint32_t i; + + od_lookup_table = &odn_table->vddc_lookup_table; + vddc_lookup_table = table_info->vddc_lookup_table; + + for (i = 0; i < vddc_lookup_table->count; i++) + od_lookup_table->entries[i].us_vdd = vddc_lookup_table->entries[i].us_vdd; + + od_lookup_table->count = vddc_lookup_table->count; + + dep_table[0] = table_info->vdd_dep_on_sclk; + dep_table[1] = table_info->vdd_dep_on_mclk; + dep_table[2] = table_info->vdd_dep_on_socclk; + od_table[0] = (struct phm_ppt_v1_clock_voltage_dependency_table *)&odn_table->vdd_dep_on_sclk; + od_table[1] = (struct phm_ppt_v1_clock_voltage_dependency_table *)&odn_table->vdd_dep_on_mclk; + od_table[2] = (struct phm_ppt_v1_clock_voltage_dependency_table *)&odn_table->vdd_dep_on_socclk; + + for (i = 0; i < 3; i++) + smu_get_voltage_dependency_table_ppt_v1(dep_table[i], od_table[i]); + + if (odn_table->max_vddc == 0 || odn_table->max_vddc > 2000) + odn_table->max_vddc = dep_table[0]->entries[dep_table[0]->count - 1].vddc; + if (odn_table->min_vddc == 0 || odn_table->min_vddc > 2000) + odn_table->min_vddc = dep_table[0]->entries[0].vddc; + + i = od_table[2]->count - 1; + od_table[2]->entries[i].clk = hwmgr->platform_descriptor.overdriveLimit.memoryClock > od_table[2]->entries[i].clk ? + hwmgr->platform_descriptor.overdriveLimit.memoryClock : + od_table[2]->entries[i].clk; + od_table[2]->entries[i].vddc = odn_table->max_vddc > od_table[2]->entries[i].vddc ? + odn_table->max_vddc : + od_table[2]->entries[i].vddc; + + return 0; +} + static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr) { struct vega10_hwmgr *data = hwmgr->backend; @@ -427,7 +461,6 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr) /* ACG firmware has major version 5 */ if ((hwmgr->smu_version & 0xff000000) == 0x5000000) data->smu_features[GNLD_ACG].supported = true; - if (data->registry_data.didt_support) data->smu_features[GNLD_DIDT].supported = true; @@ -754,7 +787,6 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr) uint32_t config_telemetry = 0; struct pp_atomfwctrl_voltage_table vol_table; struct amdgpu_device *adev = hwmgr->adev; - uint32_t reg; data = kzalloc(sizeof(struct vega10_hwmgr), GFP_KERNEL); if (data == NULL) @@ -860,10 +892,7 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr) advanceFanControlParameters.usFanPWMMinLimit * hwmgr->thermal_controller.fanInfo.ulMaxRPM / 100; - reg = soc15_get_register_offset(DF_HWID, 0, - mmDF_CS_AON0_DramBaseAddress0_BASE_IDX, - mmDF_CS_AON0_DramBaseAddress0); - data->mem_channels = (cgs_read_register(hwmgr->device, reg) & + data->mem_channels = (RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0) & DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK) >> DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT; PP_ASSERT_WITH_CODE(data->mem_channels < ARRAY_SIZE(channel_number), @@ -1280,6 +1309,9 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) vega10_setup_default_single_dpm_table(hwmgr, dpm_table, dep_gfx_table); + if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0) + hwmgr->platform_descriptor.overdriveLimit.engineClock = + dpm_table->dpm_levels[dpm_table->count-1].value; vega10_init_dpm_state(&(dpm_table->dpm_state)); /* Initialize Mclk DPM table based on allow Mclk values */ @@ -1288,6 +1320,10 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) vega10_setup_default_single_dpm_table(hwmgr, dpm_table, dep_mclk_table); + if (hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0) + hwmgr->platform_descriptor.overdriveLimit.memoryClock = + dpm_table->dpm_levels[dpm_table->count-1].value; + vega10_init_dpm_state(&(dpm_table->dpm_state)); data->dpm_table.eclk_table.count = 0; @@ -1370,48 +1406,6 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) memcpy(&(data->golden_dpm_table), &(data->dpm_table), sizeof(struct vega10_dpm_table)); - if (PP_CAP(PHM_PlatformCaps_ODNinACSupport) || - PP_CAP(PHM_PlatformCaps_ODNinDCSupport)) { - data->odn_dpm_table.odn_core_clock_dpm_levels.num_of_pl = - data->dpm_table.gfx_table.count; - for (i = 0; i < data->dpm_table.gfx_table.count; i++) { - data->odn_dpm_table.odn_core_clock_dpm_levels.entries[i].clock = - data->dpm_table.gfx_table.dpm_levels[i].value; - data->odn_dpm_table.odn_core_clock_dpm_levels.entries[i].enabled = true; - } - - data->odn_dpm_table.vdd_dependency_on_sclk.count = - dep_gfx_table->count; - for (i = 0; i < dep_gfx_table->count; i++) { - data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].clk = - dep_gfx_table->entries[i].clk; - data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].vddInd = - dep_gfx_table->entries[i].vddInd; - data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].cks_enable = - dep_gfx_table->entries[i].cks_enable; - data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].cks_voffset = - dep_gfx_table->entries[i].cks_voffset; - } - - data->odn_dpm_table.odn_memory_clock_dpm_levels.num_of_pl = - data->dpm_table.mem_table.count; - for (i = 0; i < data->dpm_table.mem_table.count; i++) { - data->odn_dpm_table.odn_memory_clock_dpm_levels.entries[i].clock = - data->dpm_table.mem_table.dpm_levels[i].value; - data->odn_dpm_table.odn_memory_clock_dpm_levels.entries[i].enabled = true; - } - - data->odn_dpm_table.vdd_dependency_on_mclk.count = dep_mclk_table->count; - for (i = 0; i < dep_mclk_table->count; i++) { - data->odn_dpm_table.vdd_dependency_on_mclk.entries[i].clk = - dep_mclk_table->entries[i].clk; - data->odn_dpm_table.vdd_dependency_on_mclk.entries[i].vddInd = - dep_mclk_table->entries[i].vddInd; - data->odn_dpm_table.vdd_dependency_on_mclk.entries[i].vddci = - dep_mclk_table->entries[i].vddci; - } - } - return 0; } @@ -1514,18 +1508,18 @@ static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr, { struct phm_ppt_v2_information *table_info = (struct phm_ppt_v2_information *)(hwmgr->pptable); - struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_sclk = - table_info->vdd_dep_on_sclk; + struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_sclk; struct vega10_hwmgr *data = hwmgr->backend; struct pp_atomfwctrl_clock_dividers_soc15 dividers; uint32_t gfx_max_clock = hwmgr->platform_descriptor.overdriveLimit.engineClock; uint32_t i = 0; - if (data->apply_overdrive_next_settings_mask & - DPMTABLE_OD_UPDATE_VDDC) + if (hwmgr->od_enabled) dep_on_sclk = (struct phm_ppt_v1_clock_voltage_dependency_table *) - &(data->odn_dpm_table.vdd_dependency_on_sclk); + &(data->odn_dpm_table.vdd_dep_on_sclk); + else + dep_on_sclk = table_info->vdd_dep_on_sclk; PP_ASSERT_WITH_CODE(dep_on_sclk, "Invalid SOC_VDD-GFX_CLK Dependency Table!", @@ -1577,23 +1571,32 @@ static int vega10_populate_single_soc_level(struct pp_hwmgr *hwmgr, uint32_t soc_clock, uint8_t *current_soc_did, uint8_t *current_vol_index) { + struct vega10_hwmgr *data = hwmgr->backend; struct phm_ppt_v2_information *table_info = (struct phm_ppt_v2_information *)(hwmgr->pptable); - struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_soc = - table_info->vdd_dep_on_socclk; + struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_soc; struct pp_atomfwctrl_clock_dividers_soc15 dividers; uint32_t i; - PP_ASSERT_WITH_CODE(dep_on_soc, - "Invalid SOC_VDD-SOC_CLK Dependency Table!", - return -EINVAL); - for (i = 0; i < dep_on_soc->count; i++) { - if (dep_on_soc->entries[i].clk == soc_clock) - break; + if (hwmgr->od_enabled) { + dep_on_soc = (struct phm_ppt_v1_clock_voltage_dependency_table *) + &data->odn_dpm_table.vdd_dep_on_socclk; + for (i = 0; i < dep_on_soc->count; i++) { + if (dep_on_soc->entries[i].clk >= soc_clock) + break; + } + } else { + dep_on_soc = table_info->vdd_dep_on_socclk; + for (i = 0; i < dep_on_soc->count; i++) { + if (dep_on_soc->entries[i].clk == soc_clock) + break; + } } + PP_ASSERT_WITH_CODE(dep_on_soc->count > i, "Cannot find SOC_CLK in SOC_VDD-SOC_CLK Dependency Table", return -EINVAL); + PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, soc_clock, ÷rs), @@ -1602,22 +1605,6 @@ static int vega10_populate_single_soc_level(struct pp_hwmgr *hwmgr, *current_soc_did = (uint8_t)dividers.ulDid; *current_vol_index = (uint8_t)(dep_on_soc->entries[i].vddInd); - - return 0; -} - -uint16_t vega10_locate_vddc_given_clock(struct pp_hwmgr *hwmgr, - uint32_t clk, - struct phm_ppt_v1_clock_voltage_dependency_table *dep_table) -{ - uint16_t i; - - for (i = 0; i < dep_table->count; i++) { - if (dep_table->entries[i].clk == clk) - return dep_table->entries[i].vddc; - } - - pr_info("[LocateVddcGivenClock] Cannot locate SOC Vddc for this clock!"); return 0; } @@ -1631,8 +1618,6 @@ static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) struct vega10_hwmgr *data = hwmgr->backend; struct phm_ppt_v2_information *table_info = (struct phm_ppt_v2_information *)(hwmgr->pptable); - struct phm_ppt_v1_clock_voltage_dependency_table *dep_table = - table_info->vdd_dep_on_socclk; PPTable_t *pp_table = &(data->smc_state_table.pp_table); struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table); int result = 0; @@ -1663,11 +1648,6 @@ static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) dpm_table = &(data->dpm_table.soc_table); for (i = 0; i < dpm_table->count; i++) { - pp_table->SocVid[i] = - (uint8_t)convert_to_vid( - vega10_locate_vddc_given_clock(hwmgr, - dpm_table->dpm_levels[i].value, - dep_table)); result = vega10_populate_single_soc_level(hwmgr, dpm_table->dpm_levels[i].value, &(pp_table->SocclkDid[i]), @@ -1678,7 +1658,6 @@ static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) j = i - 1; while (i < NUM_SOCCLK_DPM_LEVELS) { - pp_table->SocVid[i] = pp_table->SocVid[j]; result = vega10_populate_single_soc_level(hwmgr, dpm_table->dpm_levels[j].value, &(pp_table->SocclkDid[i]), @@ -1691,6 +1670,32 @@ static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) return result; } +static void vega10_populate_vddc_soc_levels(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = hwmgr->backend; + PPTable_t *pp_table = &(data->smc_state_table.pp_table); + struct phm_ppt_v2_information *table_info = hwmgr->pptable; + struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table; + + uint8_t soc_vid = 0; + uint32_t i, max_vddc_level; + + if (hwmgr->od_enabled) + vddc_lookup_table = (struct phm_ppt_v1_voltage_lookup_table *)&data->odn_dpm_table.vddc_lookup_table; + else + vddc_lookup_table = table_info->vddc_lookup_table; + + max_vddc_level = vddc_lookup_table->count; + for (i = 0; i < max_vddc_level; i++) { + soc_vid = (uint8_t)convert_to_vid(vddc_lookup_table->entries[i].us_vdd); + pp_table->SocVid[i] = soc_vid; + } + while (i < MAX_REGULAR_DPM_NUMBER) { + pp_table->SocVid[i] = soc_vid; + i++; + } +} + /** * @brief Populates single SMC GFXCLK structure using the provided clock. * @@ -1705,25 +1710,25 @@ static int vega10_populate_single_memory_level(struct pp_hwmgr *hwmgr, struct vega10_hwmgr *data = hwmgr->backend; struct phm_ppt_v2_information *table_info = (struct phm_ppt_v2_information *)(hwmgr->pptable); - struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_mclk = - table_info->vdd_dep_on_mclk; + struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_mclk; struct pp_atomfwctrl_clock_dividers_soc15 dividers; uint32_t mem_max_clock = hwmgr->platform_descriptor.overdriveLimit.memoryClock; uint32_t i = 0; - if (data->apply_overdrive_next_settings_mask & - DPMTABLE_OD_UPDATE_VDDC) + if (hwmgr->od_enabled) dep_on_mclk = (struct phm_ppt_v1_clock_voltage_dependency_table *) - &data->odn_dpm_table.vdd_dependency_on_mclk; + &data->odn_dpm_table.vdd_dep_on_mclk; + else + dep_on_mclk = table_info->vdd_dep_on_mclk; PP_ASSERT_WITH_CODE(dep_on_mclk, "Invalid SOC_VDD-UCLK Dependency Table!", return -EINVAL); - if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK) + if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK) { mem_clock = mem_clock > mem_max_clock ? mem_max_clock : mem_clock; - else { + } else { for (i = 0; i < dep_on_mclk->count; i++) { if (dep_on_mclk->entries[i].clk == mem_clock) break; @@ -2067,6 +2072,9 @@ static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr) if (data->smu_features[GNLD_AVFS].supported) { result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params); if (!result) { + data->odn_dpm_table.max_vddc = avfs_params.ulMaxVddc; + data->odn_dpm_table.min_vddc = avfs_params.ulMinVddc; + pp_table->MinVoltageVid = (uint8_t) convert_to_vid((uint16_t)(avfs_params.ulMinVddc)); pp_table->MaxVoltageVid = (uint8_t) @@ -2345,6 +2353,22 @@ static int vega10_avfs_enable(struct pp_hwmgr *hwmgr, bool enable) return 0; } +static int vega10_update_avfs(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = hwmgr->backend; + + if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_VDDC) { + vega10_avfs_enable(hwmgr, false); + } else if (data->need_update_dpm_table) { + vega10_avfs_enable(hwmgr, false); + vega10_avfs_enable(hwmgr, true); + } else { + vega10_avfs_enable(hwmgr, true); + } + + return 0; +} + static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr) { int result = 0; @@ -2384,6 +2408,40 @@ static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr) return result; } +static void vega10_check_dpm_table_updated(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = hwmgr->backend; + struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table); + struct phm_ppt_v2_information *table_info = hwmgr->pptable; + struct phm_ppt_v1_clock_voltage_dependency_table *dep_table; + struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table; + uint32_t i; + + dep_table = table_info->vdd_dep_on_mclk; + odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_mclk); + + for (i = 0; i < dep_table->count; i++) { + if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { + data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK; + return; + } + } + + dep_table = table_info->vdd_dep_on_sclk; + odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_sclk); + for (i = 0; i < dep_table->count; i++) { + if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { + data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK; + return; + } + } + + if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_VDDC) { + data->need_update_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC; + data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK; + } +} + /** * Initializes the SMC table and uploads it * @@ -2400,12 +2458,23 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr) PPTable_t *pp_table = &(data->smc_state_table.pp_table); struct pp_atomfwctrl_voltage_table voltage_table; struct pp_atomfwctrl_bios_boot_up_values boot_up_values; + struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table); result = vega10_setup_default_dpm_tables(hwmgr); PP_ASSERT_WITH_CODE(!result, "Failed to setup default DPM tables!", return result); + /* initialize ODN table */ + if (hwmgr->od_enabled) { + if (odn_table->max_vddc) { + data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK; + vega10_check_dpm_table_updated(hwmgr); + } else { + vega10_odn_initial_default_setting(hwmgr); + } + } + pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2, &voltage_table); pp_table->MaxVidStep = voltage_table.max_vid_step; @@ -2452,6 +2521,8 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr) "Failed to initialize Memory Level!", return result); + vega10_populate_vddc_soc_levels(hwmgr); + result = vega10_populate_all_display_clock_levels(hwmgr); PP_ASSERT_WITH_CODE(!result, "Failed to initialize Display Level!", @@ -2481,6 +2552,12 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr) data->vbios_boot_state.mvddc = boot_up_values.usMvddc; data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk; data->vbios_boot_state.mem_clock = boot_up_values.ulUClk; + pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, + SMU9_SYSPLL0_SOCCLK_ID, &boot_up_values.ulSocClk); + + pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, + SMU9_SYSPLL0_DCEFCLK_ID, &boot_up_values.ulDCEFClk); + data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk; data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk; if (0 != boot_up_values.usVddc) { @@ -2819,17 +2896,12 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr) vega10_enable_disable_PCC_limit_feature(hwmgr, true); - if ((hwmgr->smu_version == 0x001c2c00) || - (hwmgr->smu_version == 0x001c2d00)) - smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_UpdatePkgPwrPidAlpha, 1); - smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ConfigureTelemetry, data->config_telemetry); tmp_result = vega10_construct_voltage_tables(hwmgr); PP_ASSERT_WITH_CODE(!tmp_result, - "Failed to contruct voltage tables!", + "Failed to construct voltage tables!", result = tmp_result); tmp_result = vega10_init_smc_table(hwmgr); @@ -3019,6 +3091,7 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, struct pp_power_state *request_ps, const struct pp_power_state *current_ps) { + struct amdgpu_device *adev = hwmgr->adev; struct vega10_power_state *vega10_ps = cast_phw_vega10_power_state(&request_ps->hardware); uint32_t sclk; @@ -3028,7 +3101,6 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, bool disable_mclk_switching_for_frame_lock; bool disable_mclk_switching_for_vr; bool force_mclk_high; - struct cgs_display_info info = {0}; const struct phm_clock_and_voltage_limits *max_limits; uint32_t i; struct vega10_hwmgr *data = hwmgr->backend; @@ -3045,12 +3117,12 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, if (vega10_ps->performance_level_count != 2) pr_info("VI should always have 2 performance levels"); - max_limits = (PP_PowerSource_AC == hwmgr->power_source) ? + max_limits = adev->pm.ac_power ? &(hwmgr->dyn_state.max_clock_voltage_on_ac) : &(hwmgr->dyn_state.max_clock_voltage_on_dc); /* Cap clock DPM tables at DC MAX if it is in DC. */ - if (PP_PowerSource_DC == hwmgr->power_source) { + if (!adev->pm.ac_power) { for (i = 0; i < vega10_ps->performance_level_count; i++) { if (vega10_ps->performance_levels[i].mem_clock > max_limits->mclk) @@ -3063,11 +3135,9 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, } } - cgs_get_active_displays_info(hwmgr->device, &info); - /* result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/ - minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock; - minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock; + minimum_clocks.engineClock = hwmgr->display_config->min_core_set_clock; + minimum_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock; if (PP_CAP(PHM_PlatformCaps_StablePState)) { stable_pstate_sclk_dpm_percentage = @@ -3107,10 +3177,10 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, PP_CAP(PHM_PlatformCaps_DisableMclkSwitchForVR); force_mclk_high = PP_CAP(PHM_PlatformCaps_ForceMclkHigh); - if (info.display_count == 0) + if (hwmgr->display_config->num_display == 0) disable_mclk_switching = false; else - disable_mclk_switching = (info.display_count > 1) || + disable_mclk_switching = (hwmgr->display_config->num_display > 1) || disable_mclk_switching_for_frame_lock || disable_mclk_switching_for_vr || force_mclk_high; @@ -3142,7 +3212,7 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, /* Find the lowest MCLK frequency that is within * the tolerable latency defined in DAL */ - latency = 0; + latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency; for (i = 0; i < data->mclk_latency_table.count; i++) { if ((data->mclk_latency_table.entries[i].latency <= latency) && (data->mclk_latency_table.entries[i].frequency >= @@ -3171,87 +3241,11 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input) { - const struct phm_set_power_state_input *states = - (const struct phm_set_power_state_input *)input; - const struct vega10_power_state *vega10_ps = - cast_const_phw_vega10_power_state(states->pnew_state); struct vega10_hwmgr *data = hwmgr->backend; - struct vega10_single_dpm_table *sclk_table = - &(data->dpm_table.gfx_table); - uint32_t sclk = vega10_ps->performance_levels - [vega10_ps->performance_level_count - 1].gfx_clock; - struct vega10_single_dpm_table *mclk_table = - &(data->dpm_table.mem_table); - uint32_t mclk = vega10_ps->performance_levels - [vega10_ps->performance_level_count - 1].mem_clock; - struct PP_Clocks min_clocks = {0}; - uint32_t i; - struct cgs_display_info info = {0}; - - data->need_update_dpm_table = 0; - - if (PP_CAP(PHM_PlatformCaps_ODNinACSupport) || - PP_CAP(PHM_PlatformCaps_ODNinDCSupport)) { - for (i = 0; i < sclk_table->count; i++) { - if (sclk == sclk_table->dpm_levels[i].value) - break; - } - - if (!(data->apply_overdrive_next_settings_mask & - DPMTABLE_OD_UPDATE_SCLK) && i >= sclk_table->count) { - /* Check SCLK in DAL's minimum clocks - * in case DeepSleep divider update is required. - */ - if (data->display_timing.min_clock_in_sr != - min_clocks.engineClockInSR && - (min_clocks.engineClockInSR >= - VEGA10_MINIMUM_ENGINE_CLOCK || - data->display_timing.min_clock_in_sr >= - VEGA10_MINIMUM_ENGINE_CLOCK)) - data->need_update_dpm_table |= DPMTABLE_UPDATE_SCLK; - } - - cgs_get_active_displays_info(hwmgr->device, &info); - - if (data->display_timing.num_existing_displays != - info.display_count) - data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK; - } else { - for (i = 0; i < sclk_table->count; i++) { - if (sclk == sclk_table->dpm_levels[i].value) - break; - } - - if (i >= sclk_table->count) - data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; - else { - /* Check SCLK in DAL's minimum clocks - * in case DeepSleep divider update is required. - */ - if (data->display_timing.min_clock_in_sr != - min_clocks.engineClockInSR && - (min_clocks.engineClockInSR >= - VEGA10_MINIMUM_ENGINE_CLOCK || - data->display_timing.min_clock_in_sr >= - VEGA10_MINIMUM_ENGINE_CLOCK)) - data->need_update_dpm_table |= DPMTABLE_UPDATE_SCLK; - } - - for (i = 0; i < mclk_table->count; i++) { - if (mclk == mclk_table->dpm_levels[i].value) - break; - } - cgs_get_active_displays_info(hwmgr->device, &info); + if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display) + data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK; - if (i >= mclk_table->count) - data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; - - if (data->display_timing.num_existing_displays != - info.display_count || - i >= mclk_table->count) - data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK; - } return 0; } @@ -3259,194 +3253,29 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels( struct pp_hwmgr *hwmgr, const void *input) { int result = 0; - const struct phm_set_power_state_input *states = - (const struct phm_set_power_state_input *)input; - const struct vega10_power_state *vega10_ps = - cast_const_phw_vega10_power_state(states->pnew_state); struct vega10_hwmgr *data = hwmgr->backend; - uint32_t sclk = vega10_ps->performance_levels - [vega10_ps->performance_level_count - 1].gfx_clock; - uint32_t mclk = vega10_ps->performance_levels - [vega10_ps->performance_level_count - 1].mem_clock; - struct vega10_dpm_table *dpm_table = &data->dpm_table; - struct vega10_dpm_table *golden_dpm_table = - &data->golden_dpm_table; - uint32_t dpm_count, clock_percent; - uint32_t i; - - if (PP_CAP(PHM_PlatformCaps_ODNinACSupport) || - PP_CAP(PHM_PlatformCaps_ODNinDCSupport)) { - - if (!data->need_update_dpm_table && - !data->apply_optimized_settings && - !data->apply_overdrive_next_settings_mask) - return 0; - - if (data->apply_overdrive_next_settings_mask & - DPMTABLE_OD_UPDATE_SCLK) { - for (dpm_count = 0; - dpm_count < dpm_table->gfx_table.count; - dpm_count++) { - dpm_table->gfx_table.dpm_levels[dpm_count].enabled = - data->odn_dpm_table.odn_core_clock_dpm_levels.entries[dpm_count].enabled; - dpm_table->gfx_table.dpm_levels[dpm_count].value = - data->odn_dpm_table.odn_core_clock_dpm_levels.entries[dpm_count].clock; - } - } - - if (data->apply_overdrive_next_settings_mask & - DPMTABLE_OD_UPDATE_MCLK) { - for (dpm_count = 0; - dpm_count < dpm_table->mem_table.count; - dpm_count++) { - dpm_table->mem_table.dpm_levels[dpm_count].enabled = - data->odn_dpm_table.odn_memory_clock_dpm_levels.entries[dpm_count].enabled; - dpm_table->mem_table.dpm_levels[dpm_count].value = - data->odn_dpm_table.odn_memory_clock_dpm_levels.entries[dpm_count].clock; - } - } - - if ((data->need_update_dpm_table & DPMTABLE_UPDATE_SCLK) || - data->apply_optimized_settings || - (data->apply_overdrive_next_settings_mask & - DPMTABLE_OD_UPDATE_SCLK)) { - result = vega10_populate_all_graphic_levels(hwmgr); - PP_ASSERT_WITH_CODE(!result, - "Failed to populate SCLK during PopulateNewDPMClocksStates Function!", - return result); - } - - if ((data->need_update_dpm_table & DPMTABLE_UPDATE_MCLK) || - (data->apply_overdrive_next_settings_mask & - DPMTABLE_OD_UPDATE_MCLK)){ - result = vega10_populate_all_memory_levels(hwmgr); - PP_ASSERT_WITH_CODE(!result, - "Failed to populate MCLK during PopulateNewDPMClocksStates Function!", - return result); - } - } else { - if (!data->need_update_dpm_table && - !data->apply_optimized_settings) - return 0; - if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_SCLK && - data->smu_features[GNLD_DPM_GFXCLK].supported) { - dpm_table-> - gfx_table.dpm_levels[dpm_table->gfx_table.count - 1]. - value = sclk; - if (hwmgr->od_enabled) { - /* Need to do calculation based on the golden DPM table - * as the Heatmap GPU Clock axis is also based on - * the default values - */ - PP_ASSERT_WITH_CODE( - golden_dpm_table->gfx_table.dpm_levels - [golden_dpm_table->gfx_table.count - 1].value, - "Divide by 0!", - return -1); - - dpm_count = dpm_table->gfx_table.count < 2 ? - 0 : dpm_table->gfx_table.count - 2; - for (i = dpm_count; i > 1; i--) { - if (sclk > golden_dpm_table->gfx_table.dpm_levels - [golden_dpm_table->gfx_table.count - 1].value) { - clock_percent = - ((sclk - golden_dpm_table->gfx_table.dpm_levels - [golden_dpm_table->gfx_table.count - 1].value) * - 100) / - golden_dpm_table->gfx_table.dpm_levels - [golden_dpm_table->gfx_table.count - 1].value; - - dpm_table->gfx_table.dpm_levels[i].value = - golden_dpm_table->gfx_table.dpm_levels[i].value + - (golden_dpm_table->gfx_table.dpm_levels[i].value * - clock_percent) / 100; - } else if (golden_dpm_table-> - gfx_table.dpm_levels[dpm_table->gfx_table.count-1].value > - sclk) { - clock_percent = - ((golden_dpm_table->gfx_table.dpm_levels - [golden_dpm_table->gfx_table.count - 1].value - - sclk) * 100) / - golden_dpm_table->gfx_table.dpm_levels - [golden_dpm_table->gfx_table.count-1].value; - - dpm_table->gfx_table.dpm_levels[i].value = - golden_dpm_table->gfx_table.dpm_levels[i].value - - (golden_dpm_table->gfx_table.dpm_levels[i].value * - clock_percent) / 100; - } else - dpm_table->gfx_table.dpm_levels[i].value = - golden_dpm_table->gfx_table.dpm_levels[i].value; - } - } - } - - if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK && - data->smu_features[GNLD_DPM_UCLK].supported) { - dpm_table-> - mem_table.dpm_levels[dpm_table->mem_table.count - 1]. - value = mclk; + if (!data->need_update_dpm_table) + return 0; - if (hwmgr->od_enabled) { - PP_ASSERT_WITH_CODE( - golden_dpm_table->mem_table.dpm_levels - [golden_dpm_table->mem_table.count - 1].value, - "Divide by 0!", - return -1); + if (data->need_update_dpm_table & + (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK + DPMTABLE_UPDATE_SOCCLK)) { + result = vega10_populate_all_graphic_levels(hwmgr); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to populate SCLK during PopulateNewDPMClocksStates Function!", + return result); + } - dpm_count = dpm_table->mem_table.count < 2 ? - 0 : dpm_table->mem_table.count - 2; - for (i = dpm_count; i > 1; i--) { - if (mclk > golden_dpm_table->mem_table.dpm_levels - [golden_dpm_table->mem_table.count-1].value) { - clock_percent = ((mclk - - golden_dpm_table->mem_table.dpm_levels - [golden_dpm_table->mem_table.count-1].value) * - 100) / - golden_dpm_table->mem_table.dpm_levels - [golden_dpm_table->mem_table.count-1].value; - - dpm_table->mem_table.dpm_levels[i].value = - golden_dpm_table->mem_table.dpm_levels[i].value + - (golden_dpm_table->mem_table.dpm_levels[i].value * - clock_percent) / 100; - } else if (golden_dpm_table->mem_table.dpm_levels - [dpm_table->mem_table.count-1].value > mclk) { - clock_percent = ((golden_dpm_table->mem_table.dpm_levels - [golden_dpm_table->mem_table.count-1].value - mclk) * - 100) / - golden_dpm_table->mem_table.dpm_levels - [golden_dpm_table->mem_table.count-1].value; - - dpm_table->mem_table.dpm_levels[i].value = - golden_dpm_table->mem_table.dpm_levels[i].value - - (golden_dpm_table->mem_table.dpm_levels[i].value * - clock_percent) / 100; - } else - dpm_table->mem_table.dpm_levels[i].value = - golden_dpm_table->mem_table.dpm_levels[i].value; - } - } - } + if (data->need_update_dpm_table & + (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) { + result = vega10_populate_all_memory_levels(hwmgr); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to populate MCLK during PopulateNewDPMClocksStates Function!", + return result); + } - if ((data->need_update_dpm_table & - (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) || - data->apply_optimized_settings) { - result = vega10_populate_all_graphic_levels(hwmgr); - PP_ASSERT_WITH_CODE(!result, - "Failed to populate SCLK during PopulateNewDPMClocksStates Function!", - return result); - } + vega10_populate_vddc_soc_levels(hwmgr); - if (data->need_update_dpm_table & - (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) { - result = vega10_populate_all_memory_levels(hwmgr); - PP_ASSERT_WITH_CODE(!result, - "Failed to populate MCLK during PopulateNewDPMClocksStates Function!", - return result); - } - } return result; } @@ -3742,8 +3571,9 @@ static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr, PP_ASSERT_WITH_CODE(!result, "Failed to upload PPtable!", return result); - data->apply_optimized_settings = false; - data->apply_overdrive_next_settings_mask = 0; + vega10_update_avfs(hwmgr); + + data->need_update_dpm_table &= DPMTABLE_OD_UPDATE_VDDC; return 0; } @@ -3793,16 +3623,18 @@ static uint32_t vega10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) } static int vega10_get_gpu_power(struct pp_hwmgr *hwmgr, - struct pp_gpu_power *query) + uint32_t *query) { uint32_t value; + if (!query) + return -EINVAL; + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrPkgPwr); value = smum_get_argument(hwmgr); - /* power value is an integer */ - memset(query, 0, sizeof *query); - query->average_gpu_power = value << 8; + /* SMC returning actual watts, keep consistent with legacy asics, low 8 bit as 8 fractional bits */ + *query = value << 8; return 0; } @@ -3810,22 +3642,18 @@ static int vega10_get_gpu_power(struct pp_hwmgr *hwmgr, static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx, void *value, int *size) { - uint32_t sclk_idx, mclk_idx, activity_percent = 0; + struct amdgpu_device *adev = hwmgr->adev; + uint32_t sclk_mhz, mclk_idx, activity_percent = 0; struct vega10_hwmgr *data = hwmgr->backend; struct vega10_dpm_table *dpm_table = &data->dpm_table; int ret = 0; - uint32_t reg, val_vid; + uint32_t val_vid; switch (idx) { case AMDGPU_PP_SENSOR_GFX_SCLK: - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex); - sclk_idx = smum_get_argument(hwmgr); - if (sclk_idx < dpm_table->gfx_table.count) { - *((uint32_t *)value) = dpm_table->gfx_table.dpm_levels[sclk_idx].value; - *size = 4; - } else { - ret = -EINVAL; - } + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetAverageGfxclkActualFrequency); + sclk_mhz = smum_get_argument(hwmgr); + *((uint32_t *)value) = sclk_mhz * 100; break; case AMDGPU_PP_SENSOR_GFX_MCLK: smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex); @@ -3856,18 +3684,10 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx, *size = 4; break; case AMDGPU_PP_SENSOR_GPU_POWER: - if (*size < sizeof(struct pp_gpu_power)) - ret = -EINVAL; - else { - *size = sizeof(struct pp_gpu_power); - ret = vega10_get_gpu_power(hwmgr, (struct pp_gpu_power *)value); - } + ret = vega10_get_gpu_power(hwmgr, (uint32_t *)value); break; case AMDGPU_PP_SENSOR_VDDGFX: - reg = soc15_get_register_offset(SMUIO_HWID, 0, - mmSMUSVI0_PLANE0_CURRENTVID_BASE_IDX, - mmSMUSVI0_PLANE0_CURRENTVID); - val_vid = (cgs_read_register(hwmgr->device, reg) & + val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_PLANE0_CURRENTVID) & SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID_MASK) >> SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID__SHIFT; *((uint32_t *)value) = (uint32_t)convert_to_vddc((uint8_t)val_vid); @@ -3956,26 +3776,18 @@ static int vega10_notify_smc_display_config_after_ps_adjustment( (struct phm_ppt_v2_information *)hwmgr->pptable; struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table = table_info->vdd_dep_on_mclk; uint32_t idx; - uint32_t num_active_disps = 0; - struct cgs_display_info info = {0}; struct PP_Clocks min_clocks = {0}; uint32_t i; struct pp_display_clock_request clock_req; - info.mode_info = NULL; - - cgs_get_active_displays_info(hwmgr->device, &info); - - num_active_disps = info.display_count; - - if (num_active_disps > 1) + if (hwmgr->display_config->num_display > 1) vega10_notify_smc_display_change(hwmgr, false); else vega10_notify_smc_display_change(hwmgr, true); - min_clocks.dcefClock = hwmgr->display_config.min_dcef_set_clk; - min_clocks.dcefClockInSR = hwmgr->display_config.min_dcef_deep_sleep_set_clk; - min_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock; + min_clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk; + min_clocks.dcefClockInSR = hwmgr->display_config->min_dcef_deep_sleep_set_clk; + min_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock; for (i = 0; i < dpm_table->count; i++) { if (dpm_table->dpm_levels[i].value == min_clocks.dcefClock) @@ -3984,7 +3796,7 @@ static int vega10_notify_smc_display_config_after_ps_adjustment( if (i < dpm_table->count) { clock_req.clock_type = amd_pp_dcef_clock; - clock_req.clock_freq_in_khz = dpm_table->dpm_levels[i].value; + clock_req.clock_freq_in_khz = dpm_table->dpm_levels[i].value * 10; if (!vega10_display_clock_voltage_request(hwmgr, &clock_req)) { smum_send_msg_to_smc_with_parameter( hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk, @@ -4120,6 +3932,47 @@ static void vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode) } } +static int vega10_force_clock_level(struct pp_hwmgr *hwmgr, + enum pp_clock_type type, uint32_t mask) +{ + struct vega10_hwmgr *data = hwmgr->backend; + + switch (type) { + case PP_SCLK: + data->smc_state_table.gfx_boot_level = mask ? (ffs(mask) - 1) : 0; + data->smc_state_table.gfx_max_level = mask ? (fls(mask) - 1) : 0; + + PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr), + "Failed to upload boot level to lowest!", + return -EINVAL); + + PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr), + "Failed to upload dpm max level to highest!", + return -EINVAL); + break; + + case PP_MCLK: + data->smc_state_table.mem_boot_level = mask ? (ffs(mask) - 1) : 0; + data->smc_state_table.mem_max_level = mask ? (fls(mask) - 1) : 0; + + PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr), + "Failed to upload boot level to lowest!", + return -EINVAL); + + PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr), + "Failed to upload dpm max level to highest!", + return -EINVAL); + + break; + + case PP_PCIE: + default: + break; + } + + return 0; +} + static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level) { @@ -4200,28 +4053,17 @@ static void vega10_get_sclks(struct pp_hwmgr *hwmgr, table_info->vdd_dep_on_sclk; uint32_t i; + clocks->num_levels = 0; for (i = 0; i < dep_table->count; i++) { if (dep_table->entries[i].clk) { clocks->data[clocks->num_levels].clocks_in_khz = - dep_table->entries[i].clk; + dep_table->entries[i].clk * 10; clocks->num_levels++; } } } -static uint32_t vega10_get_mem_latency(struct pp_hwmgr *hwmgr, - uint32_t clock) -{ - if (clock >= MEM_FREQ_LOW_LATENCY && - clock < MEM_FREQ_HIGH_LATENCY) - return MEM_LATENCY_HIGH; - else if (clock >= MEM_FREQ_HIGH_LATENCY) - return MEM_LATENCY_LOW; - else - return MEM_LATENCY_ERR; -} - static void vega10_get_memclocks(struct pp_hwmgr *hwmgr, struct pp_clock_levels_with_latency *clocks) { @@ -4230,26 +4072,22 @@ static void vega10_get_memclocks(struct pp_hwmgr *hwmgr, struct phm_ppt_v1_clock_voltage_dependency_table *dep_table = table_info->vdd_dep_on_mclk; struct vega10_hwmgr *data = hwmgr->backend; + uint32_t j = 0; uint32_t i; - clocks->num_levels = 0; - data->mclk_latency_table.count = 0; - for (i = 0; i < dep_table->count; i++) { if (dep_table->entries[i].clk) { - clocks->data[clocks->num_levels].clocks_in_khz = - data->mclk_latency_table.entries - [data->mclk_latency_table.count].frequency = - dep_table->entries[i].clk; - clocks->data[clocks->num_levels].latency_in_us = - data->mclk_latency_table.entries - [data->mclk_latency_table.count].latency = - vega10_get_mem_latency(hwmgr, - dep_table->entries[i].clk); - clocks->num_levels++; - data->mclk_latency_table.count++; + + clocks->data[j].clocks_in_khz = + dep_table->entries[i].clk * 10; + data->mclk_latency_table.entries[j].frequency = + dep_table->entries[i].clk; + clocks->data[j].latency_in_us = + data->mclk_latency_table.entries[j].latency = 25; + j++; } } + clocks->num_levels = data->mclk_latency_table.count = j; } static void vega10_get_dcefclocks(struct pp_hwmgr *hwmgr, @@ -4262,7 +4100,7 @@ static void vega10_get_dcefclocks(struct pp_hwmgr *hwmgr, uint32_t i; for (i = 0; i < dep_table->count; i++) { - clocks->data[i].clocks_in_khz = dep_table->entries[i].clk; + clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10; clocks->data[i].latency_in_us = 0; clocks->num_levels++; } @@ -4278,7 +4116,7 @@ static void vega10_get_socclocks(struct pp_hwmgr *hwmgr, uint32_t i; for (i = 0; i < dep_table->count; i++) { - clocks->data[i].clocks_in_khz = dep_table->entries[i].clk; + clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10; clocks->data[i].latency_in_us = 0; clocks->num_levels++; } @@ -4338,7 +4176,7 @@ static int vega10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr, } for (i = 0; i < dep_table->count; i++) { - clocks->data[i].clocks_in_khz = dep_table->entries[i].clk; + clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10; clocks->data[i].voltage_in_mv = (uint32_t)(table_info->vddc_lookup_table-> entries[dep_table->entries[i].vddInd].us_vdd); clocks->num_levels++; @@ -4351,102 +4189,21 @@ static int vega10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr, } static int vega10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr, - struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges) + void *clock_range) { struct vega10_hwmgr *data = hwmgr->backend; + struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_range; Watermarks_t *table = &(data->smc_state_table.water_marks_table); int result = 0; - uint32_t i; if (!data->registry_data.disable_water_mark) { - for (i = 0; i < wm_with_clock_ranges->num_wm_sets_dmif; i++) { - table->WatermarkRow[WM_DCEFCLK][i].MinClock = - cpu_to_le16((uint16_t) - (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_dcefclk_in_khz) / - 100); - table->WatermarkRow[WM_DCEFCLK][i].MaxClock = - cpu_to_le16((uint16_t) - (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_dcefclk_in_khz) / - 100); - table->WatermarkRow[WM_DCEFCLK][i].MinUclk = - cpu_to_le16((uint16_t) - (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_memclk_in_khz) / - 100); - table->WatermarkRow[WM_DCEFCLK][i].MaxUclk = - cpu_to_le16((uint16_t) - (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_memclk_in_khz) / - 100); - table->WatermarkRow[WM_DCEFCLK][i].WmSetting = (uint8_t) - wm_with_clock_ranges->wm_sets_dmif[i].wm_set_id; - } - - for (i = 0; i < wm_with_clock_ranges->num_wm_sets_mcif; i++) { - table->WatermarkRow[WM_SOCCLK][i].MinClock = - cpu_to_le16((uint16_t) - (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_socclk_in_khz) / - 100); - table->WatermarkRow[WM_SOCCLK][i].MaxClock = - cpu_to_le16((uint16_t) - (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_socclk_in_khz) / - 100); - table->WatermarkRow[WM_SOCCLK][i].MinUclk = - cpu_to_le16((uint16_t) - (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_memclk_in_khz) / - 100); - table->WatermarkRow[WM_SOCCLK][i].MaxUclk = - cpu_to_le16((uint16_t) - (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_memclk_in_khz) / - 100); - table->WatermarkRow[WM_SOCCLK][i].WmSetting = (uint8_t) - wm_with_clock_ranges->wm_sets_mcif[i].wm_set_id; - } + smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges); data->water_marks_bitmap = WaterMarksExist; } return result; } -static int vega10_force_clock_level(struct pp_hwmgr *hwmgr, - enum pp_clock_type type, uint32_t mask) -{ - struct vega10_hwmgr *data = hwmgr->backend; - - switch (type) { - case PP_SCLK: - data->smc_state_table.gfx_boot_level = mask ? (ffs(mask) - 1) : 0; - data->smc_state_table.gfx_max_level = mask ? (fls(mask) - 1) : 0; - - PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr), - "Failed to upload boot level to lowest!", - return -EINVAL); - - PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr), - "Failed to upload dpm max level to highest!", - return -EINVAL); - break; - - case PP_MCLK: - data->smc_state_table.mem_boot_level = mask ? (ffs(mask) - 1) : 0; - data->smc_state_table.mem_max_level = mask ? (fls(mask) - 1) : 0; - - PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr), - "Failed to upload boot level to lowest!", - return -EINVAL); - - PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr), - "Failed to upload dpm max level to highest!", - return -EINVAL); - - break; - - case PP_PCIE: - default: - break; - } - - return 0; -} - static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, enum pp_clock_type type, char *buf) { @@ -4454,6 +4211,8 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table); struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table); struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table); + struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep = NULL; + int i, now, size = 0; switch (type) { @@ -4492,6 +4251,40 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, (pcie_table->pcie_gen[i] == 2) ? "8.0GT/s, x16" : "", (i == now) ? "*" : ""); break; + case OD_SCLK: + if (hwmgr->od_enabled) { + size = sprintf(buf, "%s:\n", "OD_SCLK"); + podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk; + for (i = 0; i < podn_vdd_dep->count; i++) + size += sprintf(buf + size, "%d: %10uMhz %10umV\n", + i, podn_vdd_dep->entries[i].clk / 100, + podn_vdd_dep->entries[i].vddc); + } + break; + case OD_MCLK: + if (hwmgr->od_enabled) { + size = sprintf(buf, "%s:\n", "OD_MCLK"); + podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk; + for (i = 0; i < podn_vdd_dep->count; i++) + size += sprintf(buf + size, "%d: %10uMhz %10umV\n", + i, podn_vdd_dep->entries[i].clk/100, + podn_vdd_dep->entries[i].vddc); + } + break; + case OD_RANGE: + if (hwmgr->od_enabled) { + size = sprintf(buf, "%s:\n", "OD_RANGE"); + size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n", + data->golden_dpm_table.gfx_table.dpm_levels[0].value/100, + hwmgr->platform_descriptor.overdriveLimit.engineClock/100); + size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n", + data->golden_dpm_table.mem_table.dpm_levels[0].value/100, + hwmgr->platform_descriptor.overdriveLimit.memoryClock/100); + size += sprintf(buf + size, "VDDC: %7umV %11umV\n", + data->odn_dpm_table.min_vddc, + data->odn_dpm_table.max_vddc); + } + break; default: break; } @@ -4501,10 +4294,8 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, static int vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr) { struct vega10_hwmgr *data = hwmgr->backend; - int result = 0; - uint32_t num_turned_on_displays = 1; Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table); - struct cgs_display_info info = {0}; + int result = 0; if ((data->water_marks_bitmap & WaterMarksExist) && !(data->water_marks_bitmap & WaterMarksLoaded)) { @@ -4514,10 +4305,8 @@ static int vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr) } if (data->water_marks_bitmap & WaterMarksLoaded) { - cgs_get_active_displays_info(hwmgr->device, &info); - num_turned_on_displays = info.display_count; smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_NumOfDisplays, num_turned_on_displays); + PPSMC_MSG_NumOfDisplays, hwmgr->display_config->num_display); } return result; @@ -4603,15 +4392,12 @@ vega10_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmg { struct vega10_hwmgr *data = hwmgr->backend; bool is_update_required = false; - struct cgs_display_info info = {0, 0, NULL}; - cgs_get_active_displays_info(hwmgr->device, &info); - - if (data->display_timing.num_existing_displays != info.display_count) + if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display) is_update_required = true; if (PP_CAP(PHM_PlatformCaps_SclkDeepSleep)) { - if (data->display_timing.min_clock_in_sr != hwmgr->display_config.min_core_set_clock_in_sr) + if (data->display_timing.min_clock_in_sr != hwmgr->display_config->min_core_set_clock_in_sr) is_update_required = true; } @@ -4886,6 +4672,166 @@ static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui return 0; } + +static bool vega10_check_clk_voltage_valid(struct pp_hwmgr *hwmgr, + enum PP_OD_DPM_TABLE_COMMAND type, + uint32_t clk, + uint32_t voltage) +{ + struct vega10_hwmgr *data = hwmgr->backend; + struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table); + struct vega10_single_dpm_table *golden_table; + + if (voltage < odn_table->min_vddc || voltage > odn_table->max_vddc) { + pr_info("OD voltage is out of range [%d - %d] mV\n", odn_table->min_vddc, odn_table->max_vddc); + return false; + } + + if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) { + golden_table = &(data->golden_dpm_table.gfx_table); + if (golden_table->dpm_levels[0].value > clk || + hwmgr->platform_descriptor.overdriveLimit.engineClock < clk) { + pr_info("OD engine clock is out of range [%d - %d] MHz\n", + golden_table->dpm_levels[0].value/100, + hwmgr->platform_descriptor.overdriveLimit.engineClock/100); + return false; + } + } else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) { + golden_table = &(data->golden_dpm_table.mem_table); + if (golden_table->dpm_levels[0].value > clk || + hwmgr->platform_descriptor.overdriveLimit.memoryClock < clk) { + pr_info("OD memory clock is out of range [%d - %d] MHz\n", + golden_table->dpm_levels[0].value/100, + hwmgr->platform_descriptor.overdriveLimit.memoryClock/100); + return false; + } + } else { + return false; + } + + return true; +} + +static void vega10_odn_update_soc_table(struct pp_hwmgr *hwmgr, + enum PP_OD_DPM_TABLE_COMMAND type) +{ + struct vega10_hwmgr *data = hwmgr->backend; + struct phm_ppt_v2_information *table_info = hwmgr->pptable; + struct phm_ppt_v1_clock_voltage_dependency_table *dep_table = table_info->vdd_dep_on_socclk; + struct vega10_single_dpm_table *dpm_table = &data->golden_dpm_table.soc_table; + + struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep_on_socclk = + &data->odn_dpm_table.vdd_dep_on_socclk; + struct vega10_odn_vddc_lookup_table *od_vddc_lookup_table = &data->odn_dpm_table.vddc_lookup_table; + + struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep; + uint8_t i, j; + + if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) { + podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk; + for (i = 0; i < podn_vdd_dep->count - 1; i++) + od_vddc_lookup_table->entries[i].us_vdd = podn_vdd_dep->entries[i].vddc; + if (od_vddc_lookup_table->entries[i].us_vdd < podn_vdd_dep->entries[i].vddc) + od_vddc_lookup_table->entries[i].us_vdd = podn_vdd_dep->entries[i].vddc; + } else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) { + podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk; + for (i = 0; i < dpm_table->count; i++) { + for (j = 0; j < od_vddc_lookup_table->count; j++) { + if (od_vddc_lookup_table->entries[j].us_vdd > + podn_vdd_dep->entries[i].vddc) + break; + } + if (j == od_vddc_lookup_table->count) { + od_vddc_lookup_table->entries[j-1].us_vdd = + podn_vdd_dep->entries[i].vddc; + data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC; + } + podn_vdd_dep->entries[i].vddInd = j; + } + dpm_table = &data->dpm_table.soc_table; + for (i = 0; i < dep_table->count; i++) { + if (dep_table->entries[i].vddInd == podn_vdd_dep->entries[dep_table->count-1].vddInd && + dep_table->entries[i].clk < podn_vdd_dep->entries[dep_table->count-1].clk) { + data->need_update_dpm_table |= DPMTABLE_UPDATE_SOCCLK; + podn_vdd_dep_on_socclk->entries[i].clk = podn_vdd_dep->entries[dep_table->count-1].clk; + dpm_table->dpm_levels[i].value = podn_vdd_dep_on_socclk->entries[i].clk; + } + } + if (podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].clk < + podn_vdd_dep->entries[dep_table->count-1].clk) { + data->need_update_dpm_table |= DPMTABLE_UPDATE_SOCCLK; + podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].clk = podn_vdd_dep->entries[dep_table->count-1].clk; + dpm_table->dpm_levels[podn_vdd_dep_on_socclk->count - 1].value = podn_vdd_dep->entries[dep_table->count-1].clk; + } + if (podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].vddInd < + podn_vdd_dep->entries[dep_table->count-1].vddInd) { + data->need_update_dpm_table |= DPMTABLE_UPDATE_SOCCLK; + podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].vddInd = podn_vdd_dep->entries[dep_table->count-1].vddInd; + } + } +} + +static int vega10_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, + enum PP_OD_DPM_TABLE_COMMAND type, + long *input, uint32_t size) +{ + struct vega10_hwmgr *data = hwmgr->backend; + struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep_table; + struct vega10_single_dpm_table *dpm_table; + + uint32_t input_clk; + uint32_t input_vol; + uint32_t input_level; + uint32_t i; + + PP_ASSERT_WITH_CODE(input, "NULL user input for clock and voltage", + return -EINVAL); + + if (!hwmgr->od_enabled) { + pr_info("OverDrive feature not enabled\n"); + return -EINVAL; + } + + if (PP_OD_EDIT_SCLK_VDDC_TABLE == type) { + dpm_table = &data->dpm_table.gfx_table; + podn_vdd_dep_table = &data->odn_dpm_table.vdd_dep_on_sclk; + data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; + } else if (PP_OD_EDIT_MCLK_VDDC_TABLE == type) { + dpm_table = &data->dpm_table.mem_table; + podn_vdd_dep_table = &data->odn_dpm_table.vdd_dep_on_mclk; + data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; + } else if (PP_OD_RESTORE_DEFAULT_TABLE == type) { + memcpy(&(data->dpm_table), &(data->golden_dpm_table), sizeof(struct vega10_dpm_table)); + vega10_odn_initial_default_setting(hwmgr); + return 0; + } else if (PP_OD_COMMIT_DPM_TABLE == type) { + vega10_check_dpm_table_updated(hwmgr); + return 0; + } else { + return -EINVAL; + } + + for (i = 0; i < size; i += 3) { + if (i + 3 > size || input[i] >= podn_vdd_dep_table->count) { + pr_info("invalid clock voltage input\n"); + return 0; + } + input_level = input[i]; + input_clk = input[i+1] * 100; + input_vol = input[i+2]; + + if (vega10_check_clk_voltage_valid(hwmgr, type, input_clk, input_vol)) { + dpm_table->dpm_levels[input_level].value = input_clk; + podn_vdd_dep_table->entries[input_level].clk = input_clk; + podn_vdd_dep_table->entries[input_level].vddc = input_vol; + } else { + return -EINVAL; + } + } + vega10_odn_update_soc_table(hwmgr, type); + return 0; +} + static const struct pp_hwmgr_func vega10_hwmgr_funcs = { .backend_init = vega10_hwmgr_backend_init, .backend_fini = vega10_hwmgr_backend_fini, @@ -4944,6 +4890,7 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs = { .get_power_profile_mode = vega10_get_power_profile_mode, .set_power_profile_mode = vega10_set_power_profile_mode, .set_power_limit = vega10_set_power_limit, + .odn_edit_dpm_table = vega10_odn_edit_dpm_table, }; int vega10_enable_smc_features(struct pp_hwmgr *hwmgr, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h index 5339ea1f3dce..339820da9e6a 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h @@ -282,15 +282,21 @@ struct vega10_registry_data { struct vega10_odn_clock_voltage_dependency_table { uint32_t count; - struct phm_ppt_v1_clock_voltage_dependency_record - entries[MAX_REGULAR_DPM_NUMBER]; + struct phm_ppt_v1_clock_voltage_dependency_record entries[MAX_REGULAR_DPM_NUMBER]; +}; + +struct vega10_odn_vddc_lookup_table { + uint32_t count; + struct phm_ppt_v1_voltage_lookup_record entries[MAX_REGULAR_DPM_NUMBER]; }; struct vega10_odn_dpm_table { - struct phm_odn_clock_levels odn_core_clock_dpm_levels; - struct phm_odn_clock_levels odn_memory_clock_dpm_levels; - struct vega10_odn_clock_voltage_dependency_table vdd_dependency_on_sclk; - struct vega10_odn_clock_voltage_dependency_table vdd_dependency_on_mclk; + struct vega10_odn_clock_voltage_dependency_table vdd_dep_on_sclk; + struct vega10_odn_clock_voltage_dependency_table vdd_dep_on_mclk; + struct vega10_odn_clock_voltage_dependency_table vdd_dep_on_socclk; + struct vega10_odn_vddc_lookup_table vddc_lookup_table; + uint32_t max_vddc; + uint32_t min_vddc; }; struct vega10_odn_fan_table { @@ -301,8 +307,8 @@ struct vega10_odn_fan_table { }; struct vega10_hwmgr { - struct vega10_dpm_table dpm_table; - struct vega10_dpm_table golden_dpm_table; + struct vega10_dpm_table dpm_table; + struct vega10_dpm_table golden_dpm_table; struct vega10_registry_data registry_data; struct vega10_vbios_boot_state vbios_boot_state; struct vega10_mclk_latency_table mclk_latency_table; @@ -364,16 +370,11 @@ struct vega10_hwmgr { /* ---- Power Gating States ---- */ bool uvd_power_gated; bool vce_power_gated; - bool samu_power_gated; bool need_long_memory_training; /* Internal settings to apply the application power optimization parameters */ - bool apply_optimized_settings; uint32_t disable_dpm_mask; - /* ---- Overdrive next setting ---- */ - uint32_t apply_overdrive_next_settings_mask; - /* ---- SMU9 ---- */ struct smu_features smu_features[GNLD_FEATURES_MAX]; struct vega10_smc_state_table smc_state_table; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c index ba63faefc61f..22364875a943 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c @@ -27,7 +27,7 @@ #include "vega10_ppsmc.h" #include "vega10_inc.h" #include "pp_debug.h" -#include "pp_soc15.h" +#include "soc15_common.h" static const struct vega10_didt_config_reg SEDiDtTuningCtrlConfig_Vega10[] = { @@ -888,36 +888,36 @@ static void vega10_didt_set_mask(struct pp_hwmgr *hwmgr, const bool enable) if (PP_CAP(PHM_PlatformCaps_DiDtEDCEnable)) { if (PP_CAP(PHM_PlatformCaps_SQRamping)) { data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_EDC_CTRL); - data = CGS_REG_SET_FIELD(data, DIDT_SQ_EDC_CTRL, EDC_EN, en); - data = CGS_REG_SET_FIELD(data, DIDT_SQ_EDC_CTRL, EDC_SW_RST, ~en); + data = REG_SET_FIELD(data, DIDT_SQ_EDC_CTRL, EDC_EN, en); + data = REG_SET_FIELD(data, DIDT_SQ_EDC_CTRL, EDC_SW_RST, ~en); cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_EDC_CTRL, data); } if (PP_CAP(PHM_PlatformCaps_DBRamping)) { data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_EDC_CTRL); - data = CGS_REG_SET_FIELD(data, DIDT_DB_EDC_CTRL, EDC_EN, en); - data = CGS_REG_SET_FIELD(data, DIDT_DB_EDC_CTRL, EDC_SW_RST, ~en); + data = REG_SET_FIELD(data, DIDT_DB_EDC_CTRL, EDC_EN, en); + data = REG_SET_FIELD(data, DIDT_DB_EDC_CTRL, EDC_SW_RST, ~en); cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_EDC_CTRL, data); } if (PP_CAP(PHM_PlatformCaps_TDRamping)) { data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_EDC_CTRL); - data = CGS_REG_SET_FIELD(data, DIDT_TD_EDC_CTRL, EDC_EN, en); - data = CGS_REG_SET_FIELD(data, DIDT_TD_EDC_CTRL, EDC_SW_RST, ~en); + data = REG_SET_FIELD(data, DIDT_TD_EDC_CTRL, EDC_EN, en); + data = REG_SET_FIELD(data, DIDT_TD_EDC_CTRL, EDC_SW_RST, ~en); cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_EDC_CTRL, data); } if (PP_CAP(PHM_PlatformCaps_TCPRamping)) { data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_EDC_CTRL); - data = CGS_REG_SET_FIELD(data, DIDT_TCP_EDC_CTRL, EDC_EN, en); - data = CGS_REG_SET_FIELD(data, DIDT_TCP_EDC_CTRL, EDC_SW_RST, ~en); + data = REG_SET_FIELD(data, DIDT_TCP_EDC_CTRL, EDC_EN, en); + data = REG_SET_FIELD(data, DIDT_TCP_EDC_CTRL, EDC_SW_RST, ~en); cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_EDC_CTRL, data); } if (PP_CAP(PHM_PlatformCaps_DBRRamping)) { data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_EDC_CTRL); - data = CGS_REG_SET_FIELD(data, DIDT_DBR_EDC_CTRL, EDC_EN, en); - data = CGS_REG_SET_FIELD(data, DIDT_DBR_EDC_CTRL, EDC_SW_RST, ~en); + data = REG_SET_FIELD(data, DIDT_DBR_EDC_CTRL, EDC_EN, en); + data = REG_SET_FIELD(data, DIDT_DBR_EDC_CTRL, EDC_SW_RST, ~en); cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_EDC_CTRL, data); } } @@ -930,20 +930,18 @@ static void vega10_didt_set_mask(struct pp_hwmgr *hwmgr, const bool enable) static int vega10_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr) { + struct amdgpu_device *adev = hwmgr->adev; int result; uint32_t num_se = 0, count, data; - struct amdgpu_device *adev = hwmgr->adev; - uint32_t reg; num_se = adev->gfx.config.max_shader_engines; - cgs_enter_safe_mode(hwmgr->device, true); + adev->gfx.rlc.funcs->enter_safe_mode(adev); - cgs_lock_grbm_idx(hwmgr->device, true); - reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX); + mutex_lock(&adev->grbm_idx_mutex); for (count = 0; count < num_se; count++) { data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT); - cgs_write_register(hwmgr->device, reg, data); + WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data); result = vega10_program_didt_config_registers(hwmgr, SEDiDtStallCtrlConfig_vega10, VEGA10_CONFIGREG_DIDT); result |= vega10_program_didt_config_registers(hwmgr, SEDiDtStallPatternConfig_vega10, VEGA10_CONFIGREG_DIDT); @@ -958,43 +956,43 @@ static int vega10_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr) if (0 != result) break; } - cgs_write_register(hwmgr->device, reg, 0xE0000000); - cgs_lock_grbm_idx(hwmgr->device, false); + WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xE0000000); + mutex_unlock(&adev->grbm_idx_mutex); vega10_didt_set_mask(hwmgr, true); - cgs_enter_safe_mode(hwmgr->device, false); + adev->gfx.rlc.funcs->exit_safe_mode(adev); return 0; } static int vega10_disable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr) { - cgs_enter_safe_mode(hwmgr->device, true); + struct amdgpu_device *adev = hwmgr->adev; + + adev->gfx.rlc.funcs->enter_safe_mode(adev); vega10_didt_set_mask(hwmgr, false); - cgs_enter_safe_mode(hwmgr->device, false); + adev->gfx.rlc.funcs->exit_safe_mode(adev); return 0; } static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr) { + struct amdgpu_device *adev = hwmgr->adev; int result; uint32_t num_se = 0, count, data; - struct amdgpu_device *adev = hwmgr->adev; - uint32_t reg; num_se = adev->gfx.config.max_shader_engines; - cgs_enter_safe_mode(hwmgr->device, true); + adev->gfx.rlc.funcs->enter_safe_mode(adev); - cgs_lock_grbm_idx(hwmgr->device, true); - reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX); + mutex_lock(&adev->grbm_idx_mutex); for (count = 0; count < num_se; count++) { data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT); - cgs_write_register(hwmgr->device, reg, data); + WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data); result = vega10_program_didt_config_registers(hwmgr, SEDiDtStallCtrlConfig_vega10, VEGA10_CONFIGREG_DIDT); result |= vega10_program_didt_config_registers(hwmgr, SEDiDtStallPatternConfig_vega10, VEGA10_CONFIGREG_DIDT); @@ -1003,12 +1001,12 @@ static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr) if (0 != result) break; } - cgs_write_register(hwmgr->device, reg, 0xE0000000); - cgs_lock_grbm_idx(hwmgr->device, false); + WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xE0000000); + mutex_unlock(&adev->grbm_idx_mutex); vega10_didt_set_mask(hwmgr, true); - cgs_enter_safe_mode(hwmgr->device, false); + adev->gfx.rlc.funcs->exit_safe_mode(adev); vega10_program_gc_didt_config_registers(hwmgr, GCDiDtDroopCtrlConfig_vega10); if (PP_CAP(PHM_PlatformCaps_GCEDC)) @@ -1022,13 +1020,14 @@ static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr) static int vega10_disable_psm_gc_didt_config(struct pp_hwmgr *hwmgr) { + struct amdgpu_device *adev = hwmgr->adev; uint32_t data; - cgs_enter_safe_mode(hwmgr->device, true); + adev->gfx.rlc.funcs->enter_safe_mode(adev); vega10_didt_set_mask(hwmgr, false); - cgs_enter_safe_mode(hwmgr->device, false); + adev->gfx.rlc.funcs->exit_safe_mode(adev); if (PP_CAP(PHM_PlatformCaps_GCEDC)) { data = 0x00000000; @@ -1043,20 +1042,18 @@ static int vega10_disable_psm_gc_didt_config(struct pp_hwmgr *hwmgr) static int vega10_enable_se_edc_config(struct pp_hwmgr *hwmgr) { + struct amdgpu_device *adev = hwmgr->adev; int result; uint32_t num_se = 0, count, data; - struct amdgpu_device *adev = hwmgr->adev; - uint32_t reg; num_se = adev->gfx.config.max_shader_engines; - cgs_enter_safe_mode(hwmgr->device, true); + adev->gfx.rlc.funcs->enter_safe_mode(adev); - cgs_lock_grbm_idx(hwmgr->device, true); - reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX); + mutex_lock(&adev->grbm_idx_mutex); for (count = 0; count < num_se; count++) { data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT); - cgs_write_register(hwmgr->device, reg, data); + WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data); result = vega10_program_didt_config_registers(hwmgr, SEDiDtWeightConfig_Vega10, VEGA10_CONFIGREG_DIDT); result |= vega10_program_didt_config_registers(hwmgr, SEEDCStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT); result |= vega10_program_didt_config_registers(hwmgr, SEEDCStallDelayConfig_Vega10, VEGA10_CONFIGREG_DIDT); @@ -1067,47 +1064,47 @@ static int vega10_enable_se_edc_config(struct pp_hwmgr *hwmgr) if (0 != result) break; } - cgs_write_register(hwmgr->device, reg, 0xE0000000); - cgs_lock_grbm_idx(hwmgr->device, false); + WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xE0000000); + mutex_unlock(&adev->grbm_idx_mutex); vega10_didt_set_mask(hwmgr, true); - cgs_enter_safe_mode(hwmgr->device, false); + adev->gfx.rlc.funcs->exit_safe_mode(adev); return 0; } static int vega10_disable_se_edc_config(struct pp_hwmgr *hwmgr) { - cgs_enter_safe_mode(hwmgr->device, true); + struct amdgpu_device *adev = hwmgr->adev; + + adev->gfx.rlc.funcs->enter_safe_mode(adev); vega10_didt_set_mask(hwmgr, false); - cgs_enter_safe_mode(hwmgr->device, false); + adev->gfx.rlc.funcs->exit_safe_mode(adev); return 0; } static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr) { - int result; + struct amdgpu_device *adev = hwmgr->adev; + int result = 0; uint32_t num_se = 0; uint32_t count, data; - struct amdgpu_device *adev = hwmgr->adev; - uint32_t reg; num_se = adev->gfx.config.max_shader_engines; - cgs_enter_safe_mode(hwmgr->device, true); + adev->gfx.rlc.funcs->enter_safe_mode(adev); vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega10); - cgs_lock_grbm_idx(hwmgr->device, true); - reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX); + mutex_lock(&adev->grbm_idx_mutex); for (count = 0; count < num_se; count++) { data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT); - cgs_write_register(hwmgr->device, reg, data); - result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT); + WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data); + result = vega10_program_didt_config_registers(hwmgr, PSMSEEDCStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT); result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCStallDelayConfig_Vega10, VEGA10_CONFIGREG_DIDT); result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCCtrlResetConfig_Vega10, VEGA10_CONFIGREG_DIDT); result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCCtrlConfig_Vega10, VEGA10_CONFIGREG_DIDT); @@ -1115,12 +1112,12 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr) if (0 != result) break; } - cgs_write_register(hwmgr->device, reg, 0xE0000000); - cgs_lock_grbm_idx(hwmgr->device, false); + WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xE0000000); + mutex_unlock(&adev->grbm_idx_mutex); vega10_didt_set_mask(hwmgr, true); - cgs_enter_safe_mode(hwmgr->device, false); + adev->gfx.rlc.funcs->exit_safe_mode(adev); vega10_program_gc_didt_config_registers(hwmgr, PSMGCEDCDroopCtrlConfig_vega10); @@ -1137,13 +1134,14 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr) static int vega10_disable_psm_gc_edc_config(struct pp_hwmgr *hwmgr) { + struct amdgpu_device *adev = hwmgr->adev; uint32_t data; - cgs_enter_safe_mode(hwmgr->device, true); + adev->gfx.rlc.funcs->enter_safe_mode(adev); vega10_didt_set_mask(hwmgr, false); - cgs_enter_safe_mode(hwmgr->device, false); + adev->gfx.rlc.funcs->exit_safe_mode(adev); if (PP_CAP(PHM_PlatformCaps_GCEDC)) { data = 0x00000000; @@ -1158,15 +1156,14 @@ static int vega10_disable_psm_gc_edc_config(struct pp_hwmgr *hwmgr) static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr) { - uint32_t reg; + struct amdgpu_device *adev = hwmgr->adev; int result; - cgs_enter_safe_mode(hwmgr->device, true); + adev->gfx.rlc.funcs->enter_safe_mode(adev); - cgs_lock_grbm_idx(hwmgr->device, true); - reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX); - cgs_write_register(hwmgr->device, reg, 0xE0000000); - cgs_lock_grbm_idx(hwmgr->device, false); + mutex_lock(&adev->grbm_idx_mutex); + WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xE0000000); + mutex_unlock(&adev->grbm_idx_mutex); result = vega10_program_didt_config_registers(hwmgr, SEEDCForceStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT); result |= vega10_program_didt_config_registers(hwmgr, SEEDCCtrlForceStallConfig_Vega10, VEGA10_CONFIGREG_DIDT); @@ -1175,7 +1172,7 @@ static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr) vega10_didt_set_mask(hwmgr, false); - cgs_enter_safe_mode(hwmgr->device, false); + adev->gfx.rlc.funcs->exit_safe_mode(adev); return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c index c61d0744860d..16b1a9cf6cf0 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c @@ -52,7 +52,7 @@ static const void *get_powerplay_table(struct pp_hwmgr *hwmgr) if (!table_address) { table_address = (ATOM_Vega10_POWERPLAYTABLE *) - cgs_atom_get_data_table(hwmgr->device, index, + smu_atom_get_data_table(hwmgr->adev, index, &size, &frev, &crev); hwmgr->soft_pp_table = table_address; /*Cache the result in RAM.*/ @@ -267,12 +267,6 @@ static int init_over_drive_limits( hwmgr->platform_descriptor.maxOverdriveVDDC = 0; hwmgr->platform_descriptor.overdriveVDDCStep = 0; - if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0 || - hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0) { - hwmgr->od_enabled = false; - pr_debug("OverDrive feature not support by VBIOS\n"); - } - return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c index 9f18226a56ea..aa044c1955fe 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c @@ -25,7 +25,7 @@ #include "vega10_hwmgr.h" #include "vega10_ppsmc.h" #include "vega10_inc.h" -#include "pp_soc15.h" +#include "soc15_common.h" #include "pp_debug.h" static int vega10_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm) @@ -89,6 +89,7 @@ int vega10_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, int vega10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed) { + struct amdgpu_device *adev = hwmgr->adev; struct vega10_hwmgr *data = hwmgr->backend; uint32_t tach_period; uint32_t crystal_clock_freq; @@ -100,10 +101,8 @@ int vega10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed) if (data->smu_features[GNLD_FAN_CONTROL].supported) { result = vega10_get_current_rpm(hwmgr, speed); } else { - uint32_t reg = soc15_get_register_offset(THM_HWID, 0, - mmCG_TACH_STATUS_BASE_IDX, mmCG_TACH_STATUS); tach_period = - CGS_REG_GET_FIELD(cgs_read_register(hwmgr->device, reg), + REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_STATUS), CG_TACH_STATUS, TACH_PERIOD); @@ -127,26 +126,23 @@ int vega10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed) */ int vega10_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode) { - uint32_t reg; - - reg = soc15_get_register_offset(THM_HWID, 0, - mmCG_FDO_CTRL2_BASE_IDX, mmCG_FDO_CTRL2); + struct amdgpu_device *adev = hwmgr->adev; if (hwmgr->fan_ctrl_is_in_default_mode) { hwmgr->fan_ctrl_default_mode = - CGS_REG_GET_FIELD(cgs_read_register(hwmgr->device, reg), + REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2), CG_FDO_CTRL2, FDO_PWM_MODE); hwmgr->tmin = - CGS_REG_GET_FIELD(cgs_read_register(hwmgr->device, reg), + REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2), CG_FDO_CTRL2, TMIN); hwmgr->fan_ctrl_is_in_default_mode = false; } - cgs_write_register(hwmgr->device, reg, - CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg), + WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2, + REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2), CG_FDO_CTRL2, TMIN, 0)); - cgs_write_register(hwmgr->device, reg, - CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg), + WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2, + REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2), CG_FDO_CTRL2, FDO_PWM_MODE, mode)); return 0; @@ -159,18 +155,15 @@ int vega10_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode) */ int vega10_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr) { - uint32_t reg; - - reg = soc15_get_register_offset(THM_HWID, 0, - mmCG_FDO_CTRL2_BASE_IDX, mmCG_FDO_CTRL2); + struct amdgpu_device *adev = hwmgr->adev; if (!hwmgr->fan_ctrl_is_in_default_mode) { - cgs_write_register(hwmgr->device, reg, - CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg), + WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2, + REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2), CG_FDO_CTRL2, FDO_PWM_MODE, hwmgr->fan_ctrl_default_mode)); - cgs_write_register(hwmgr->device, reg, - CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg), + WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2, + REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2), CG_FDO_CTRL2, TMIN, hwmgr->tmin << CG_FDO_CTRL2__TMIN__SHIFT)); hwmgr->fan_ctrl_is_in_default_mode = true; @@ -257,10 +250,10 @@ int vega10_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr) int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed) { + struct amdgpu_device *adev = hwmgr->adev; uint32_t duty100; uint32_t duty; uint64_t tmp64; - uint32_t reg; if (hwmgr->thermal_controller.fanInfo.bNoFan) return 0; @@ -271,10 +264,7 @@ int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) vega10_fan_ctrl_stop_smc_fan_control(hwmgr); - reg = soc15_get_register_offset(THM_HWID, 0, - mmCG_FDO_CTRL1_BASE_IDX, mmCG_FDO_CTRL1); - - duty100 = CGS_REG_GET_FIELD(cgs_read_register(hwmgr->device, reg), + duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1), CG_FDO_CTRL1, FMAX_DUTY100); if (duty100 == 0) @@ -284,10 +274,8 @@ int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, do_div(tmp64, 100); duty = (uint32_t)tmp64; - reg = soc15_get_register_offset(THM_HWID, 0, - mmCG_FDO_CTRL0_BASE_IDX, mmCG_FDO_CTRL0); - cgs_write_register(hwmgr->device, reg, - CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg), + WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0, + REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL0), CG_FDO_CTRL0, FDO_STATIC_DUTY, duty)); return vega10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); @@ -317,10 +305,10 @@ int vega10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr) */ int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) { + struct amdgpu_device *adev = hwmgr->adev; uint32_t tach_period; uint32_t crystal_clock_freq; int result = 0; - uint32_t reg; if (hwmgr->thermal_controller.fanInfo.bNoFan || (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) || @@ -333,10 +321,8 @@ int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) if (!result) { crystal_clock_freq = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev); tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed); - reg = soc15_get_register_offset(THM_HWID, 0, - mmCG_TACH_STATUS_BASE_IDX, mmCG_TACH_STATUS); - cgs_write_register(hwmgr->device, reg, - CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg), + WREG32_SOC15(THM, 0, mmCG_TACH_STATUS, + REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_STATUS), CG_TACH_STATUS, TACH_PERIOD, tach_period)); } @@ -350,13 +336,10 @@ int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) */ int vega10_thermal_get_temperature(struct pp_hwmgr *hwmgr) { + struct amdgpu_device *adev = hwmgr->adev; int temp; - uint32_t reg; - reg = soc15_get_register_offset(THM_HWID, 0, - mmCG_MULT_THERMAL_STATUS_BASE_IDX, mmCG_MULT_THERMAL_STATUS); - - temp = cgs_read_register(hwmgr->device, reg); + temp = RREG32_SOC15(THM, 0, mmCG_MULT_THERMAL_STATUS); temp = (temp & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >> CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT; @@ -379,11 +362,12 @@ int vega10_thermal_get_temperature(struct pp_hwmgr *hwmgr) static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *range) { + struct amdgpu_device *adev = hwmgr->adev; int low = VEGA10_THERMAL_MINIMUM_ALERT_TEMP * PP_TEMPERATURE_UNITS_PER_CENTIGRADES; int high = VEGA10_THERMAL_MAXIMUM_ALERT_TEMP * PP_TEMPERATURE_UNITS_PER_CENTIGRADES; - uint32_t val, reg; + uint32_t val; if (low < range->min) low = range->min; @@ -393,20 +377,17 @@ static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, if (low > high) return -EINVAL; - reg = soc15_get_register_offset(THM_HWID, 0, - mmTHM_THERMAL_INT_CTRL_BASE_IDX, mmTHM_THERMAL_INT_CTRL); - - val = cgs_read_register(hwmgr->device, reg); + val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL); - val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5); - val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); - val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); - val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); val &= (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK) & (~THM_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK) & (~THM_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK); - cgs_write_register(hwmgr->device, reg, val); + WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val); return 0; } @@ -418,21 +399,17 @@ static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, */ static int vega10_thermal_initialize(struct pp_hwmgr *hwmgr) { - uint32_t reg; + struct amdgpu_device *adev = hwmgr->adev; if (hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) { - reg = soc15_get_register_offset(THM_HWID, 0, - mmCG_TACH_CTRL_BASE_IDX, mmCG_TACH_CTRL); - cgs_write_register(hwmgr->device, reg, - CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg), + WREG32_SOC15(THM, 0, mmCG_TACH_CTRL, + REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL), CG_TACH_CTRL, EDGE_PER_REV, hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution - 1)); } - reg = soc15_get_register_offset(THM_HWID, 0, - mmCG_FDO_CTRL2_BASE_IDX, mmCG_FDO_CTRL2); - cgs_write_register(hwmgr->device, reg, - CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg), + WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2, + REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2), CG_FDO_CTRL2, TACH_PWM_RESP_RATE, 0x28)); return 0; @@ -445,9 +422,9 @@ static int vega10_thermal_initialize(struct pp_hwmgr *hwmgr) */ static int vega10_thermal_enable_alert(struct pp_hwmgr *hwmgr) { + struct amdgpu_device *adev = hwmgr->adev; struct vega10_hwmgr *data = hwmgr->backend; uint32_t val = 0; - uint32_t reg; if (data->smu_features[GNLD_FW_CTF].supported) { if (data->smu_features[GNLD_FW_CTF].enabled) @@ -465,8 +442,7 @@ static int vega10_thermal_enable_alert(struct pp_hwmgr *hwmgr) val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT); val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT); - reg = soc15_get_register_offset(THM_HWID, 0, mmTHM_THERMAL_INT_ENA_BASE_IDX, mmTHM_THERMAL_INT_ENA); - cgs_write_register(hwmgr->device, reg, val); + WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, val); return 0; } @@ -477,8 +453,8 @@ static int vega10_thermal_enable_alert(struct pp_hwmgr *hwmgr) */ int vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr) { + struct amdgpu_device *adev = hwmgr->adev; struct vega10_hwmgr *data = hwmgr->backend; - uint32_t reg; if (data->smu_features[GNLD_FW_CTF].supported) { if (!data->smu_features[GNLD_FW_CTF].enabled) @@ -493,8 +469,7 @@ int vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr) data->smu_features[GNLD_FW_CTF].enabled = false; } - reg = soc15_get_register_offset(THM_HWID, 0, mmTHM_THERMAL_INT_ENA_BASE_IDX, mmTHM_THERMAL_INT_ENA); - cgs_write_register(hwmgr->device, reg, 0); + WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, 0); return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c index 200de46bd06b..4ed218dd8ba7 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c @@ -34,7 +34,6 @@ #include "atomfirmware.h" #include "cgs_common.h" #include "vega12_inc.h" -#include "pp_soc15.h" #include "pppcielanes.h" #include "vega12_hwmgr.h" #include "vega12_processpptables.h" @@ -82,6 +81,7 @@ static void vega12_set_default_registry_data(struct pp_hwmgr *hwmgr) data->registry_data.disallowed_features = 0x0; data->registry_data.od_state_in_dc_support = 0; + data->registry_data.thermal_support = 1; data->registry_data.skip_baco_hardware = 0; data->registry_data.log_avfs_param = 0; @@ -423,6 +423,11 @@ static int vega12_hwmgr_backend_init(struct pp_hwmgr *hwmgr) hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit * hwmgr->thermal_controller.fanInfo.ulMaxRPM / 100; + if (hwmgr->feature_mask & PP_GFXOFF_MASK) + data->gfxoff_controlled_by_driver = true; + else + data->gfxoff_controlled_by_driver = false; + return result; } @@ -454,43 +459,36 @@ static int vega12_setup_asic_task(struct pp_hwmgr *hwmgr) */ static void vega12_init_dpm_state(struct vega12_dpm_state *dpm_state) { - dpm_state->soft_min_level = 0xff; - dpm_state->soft_max_level = 0xff; - dpm_state->hard_min_level = 0xff; - dpm_state->hard_max_level = 0xff; + dpm_state->soft_min_level = 0x0; + dpm_state->soft_max_level = 0xffff; + dpm_state->hard_min_level = 0x0; + dpm_state->hard_max_level = 0xffff; } -static int vega12_get_number_dpm_level(struct pp_hwmgr *hwmgr, - PPCLK_e clkID, uint32_t *num_dpm_level) +static int vega12_get_number_of_dpm_level(struct pp_hwmgr *hwmgr, + PPCLK_e clk_id, uint32_t *num_of_levels) { - int result; - /* - * SMU expects the Clock ID to be in the top 16 bits. - * Lower 16 bits specify the level however 0xFF is a - * special argument the returns the total number of levels - */ - PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_GetDpmFreqByIndex, (clkID << 16 | 0xFF)) == 0, - "[GetNumberDpmLevel] Failed to get DPM levels from SMU for CLKID!", - return -EINVAL); - - result = vega12_read_arg_from_smc(hwmgr, num_dpm_level); + int ret = 0; - PP_ASSERT_WITH_CODE(*num_dpm_level < MAX_REGULAR_DPM_NUMBER, - "[GetNumberDPMLevel] Number of DPM levels is greater than limit", - return -EINVAL); + ret = smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_GetDpmFreqByIndex, + (clk_id << 16 | 0xFF)); + PP_ASSERT_WITH_CODE(!ret, + "[GetNumOfDpmLevel] failed to get dpm levels!", + return ret); - PP_ASSERT_WITH_CODE(*num_dpm_level != 0, - "[GetNumberDPMLevel] Number of CLK Levels is zero!", - return -EINVAL); + *num_of_levels = smum_get_argument(hwmgr); + PP_ASSERT_WITH_CODE(*num_of_levels > 0, + "[GetNumOfDpmLevel] number of clk levels is invalid!", + return -EINVAL); - return result; + return ret; } static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr, PPCLK_e clkID, uint32_t index, uint32_t *clock) { - int result; + int result = 0; /* *SMU expects the Clock ID to be in the top 16 bits. @@ -501,15 +499,36 @@ static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr, "[GetDpmFrequencyByIndex] Failed to get dpm frequency from SMU!", return -EINVAL); - result = vega12_read_arg_from_smc(hwmgr, clock); - - PP_ASSERT_WITH_CODE(*clock != 0, - "[GetDPMFrequencyByIndex] Failed to get dpm frequency by index.!", - return -EINVAL); + *clock = smum_get_argument(hwmgr); return result; } +static int vega12_setup_single_dpm_table(struct pp_hwmgr *hwmgr, + struct vega12_single_dpm_table *dpm_table, PPCLK_e clk_id) +{ + int ret = 0; + uint32_t i, num_of_levels, clk; + + ret = vega12_get_number_of_dpm_level(hwmgr, clk_id, &num_of_levels); + PP_ASSERT_WITH_CODE(!ret, + "[SetupSingleDpmTable] failed to get clk levels!", + return ret); + + dpm_table->count = num_of_levels; + + for (i = 0; i < num_of_levels; i++) { + ret = vega12_get_dpm_frequency_by_index(hwmgr, clk_id, i, &clk); + PP_ASSERT_WITH_CODE(!ret, + "[SetupSingleDpmTable] failed to get clk of specific level!", + return ret); + dpm_table->dpm_levels[i].value = clk; + dpm_table->dpm_levels[i].enabled = true; + } + + return ret; +} + /* * This function is to initialize all DPM state tables * for SMU based on the dependency table. @@ -520,214 +539,136 @@ static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr, */ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) { - uint32_t num_levels, i, clock; struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend); - struct vega12_single_dpm_table *dpm_table; + int ret = 0; memset(&data->dpm_table, 0, sizeof(data->dpm_table)); - /* Initialize Sclk DPM and SOC DPM table based on allow Sclk values */ + /* socclk */ dpm_table = &(data->dpm_table.soc_table); - - PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_SOCCLK, - &num_levels) == 0, - "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for SOCCLK!", - return -EINVAL); - - dpm_table->count = num_levels; - - for (i = 0; i < num_levels; i++) { - PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr, - PPCLK_SOCCLK, i, &clock) == 0, - "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for SOCCLK!", - return -EINVAL); - - dpm_table->dpm_levels[i].value = clock; + if (data->smu_features[GNLD_DPM_SOCCLK].enabled) { + ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_SOCCLK); + PP_ASSERT_WITH_CODE(!ret, + "[SetupDefaultDpmTable] failed to get socclk dpm levels!", + return ret); + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = data->vbios_boot_state.soc_clock / 100; } - vega12_init_dpm_state(&(dpm_table->dpm_state)); + /* gfxclk */ dpm_table = &(data->dpm_table.gfx_table); - - PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_GFXCLK, - &num_levels) == 0, - "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for GFXCLK!", - return -EINVAL); - - dpm_table->count = num_levels; - for (i = 0; i < num_levels; i++) { - PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr, - PPCLK_GFXCLK, i, &clock) == 0, - "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for GFXCLK!", - return -EINVAL); - - dpm_table->dpm_levels[i].value = clock; + if (data->smu_features[GNLD_DPM_GFXCLK].enabled) { + ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_GFXCLK); + PP_ASSERT_WITH_CODE(!ret, + "[SetupDefaultDpmTable] failed to get gfxclk dpm levels!", + return ret); + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = data->vbios_boot_state.gfx_clock / 100; } - vega12_init_dpm_state(&(dpm_table->dpm_state)); - /* Initialize Mclk DPM table based on allow Mclk values */ - dpm_table = &(data->dpm_table.mem_table); - - PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_UCLK, - &num_levels) == 0, - "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for UCLK!", - return -EINVAL); - - dpm_table->count = num_levels; - - for (i = 0; i < num_levels; i++) { - PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr, - PPCLK_UCLK, i, &clock) == 0, - "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for UCLK!", - return -EINVAL); - dpm_table->dpm_levels[i].value = clock; + /* memclk */ + dpm_table = &(data->dpm_table.mem_table); + if (data->smu_features[GNLD_DPM_UCLK].enabled) { + ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_UCLK); + PP_ASSERT_WITH_CODE(!ret, + "[SetupDefaultDpmTable] failed to get memclk dpm levels!", + return ret); + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = data->vbios_boot_state.mem_clock / 100; } - vega12_init_dpm_state(&(dpm_table->dpm_state)); + /* eclk */ dpm_table = &(data->dpm_table.eclk_table); - - PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_ECLK, - &num_levels) == 0, - "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for ECLK!", - return -EINVAL); - - dpm_table->count = num_levels; - - for (i = 0; i < num_levels; i++) { - PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr, - PPCLK_ECLK, i, &clock) == 0, - "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for ECLK!", - return -EINVAL); - - dpm_table->dpm_levels[i].value = clock; + if (data->smu_features[GNLD_DPM_VCE].enabled) { + ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_ECLK); + PP_ASSERT_WITH_CODE(!ret, + "[SetupDefaultDpmTable] failed to get eclk dpm levels!", + return ret); + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = data->vbios_boot_state.eclock / 100; } - vega12_init_dpm_state(&(dpm_table->dpm_state)); + /* vclk */ dpm_table = &(data->dpm_table.vclk_table); - - PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_VCLK, - &num_levels) == 0, - "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for VCLK!", - return -EINVAL); - - dpm_table->count = num_levels; - - for (i = 0; i < num_levels; i++) { - PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr, - PPCLK_VCLK, i, &clock) == 0, - "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for VCLK!", - return -EINVAL); - - dpm_table->dpm_levels[i].value = clock; + if (data->smu_features[GNLD_DPM_UVD].enabled) { + ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_VCLK); + PP_ASSERT_WITH_CODE(!ret, + "[SetupDefaultDpmTable] failed to get vclk dpm levels!", + return ret); + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = data->vbios_boot_state.vclock / 100; } - vega12_init_dpm_state(&(dpm_table->dpm_state)); + /* dclk */ dpm_table = &(data->dpm_table.dclk_table); - - PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_DCLK, - &num_levels) == 0, - "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DCLK!", - return -EINVAL); - - dpm_table->count = num_levels; - - for (i = 0; i < num_levels; i++) { - PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr, - PPCLK_DCLK, i, &clock) == 0, - "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DCLK!", - return -EINVAL); - - dpm_table->dpm_levels[i].value = clock; + if (data->smu_features[GNLD_DPM_UVD].enabled) { + ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCLK); + PP_ASSERT_WITH_CODE(!ret, + "[SetupDefaultDpmTable] failed to get dclk dpm levels!", + return ret); + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = data->vbios_boot_state.dclock / 100; } - vega12_init_dpm_state(&(dpm_table->dpm_state)); - /* Assume there is no headless Vega12 for now */ + /* dcefclk */ dpm_table = &(data->dpm_table.dcef_table); - - PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, - PPCLK_DCEFCLK, &num_levels) == 0, - "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DCEFCLK!", - return -EINVAL); - - dpm_table->count = num_levels; - - for (i = 0; i < num_levels; i++) { - PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr, - PPCLK_DCEFCLK, i, &clock) == 0, - "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DCEFCLK!", - return -EINVAL); - - dpm_table->dpm_levels[i].value = clock; + if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) { + ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCEFCLK); + PP_ASSERT_WITH_CODE(!ret, + "[SetupDefaultDpmTable] failed to get dcefclk dpm levels!", + return ret); + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = data->vbios_boot_state.dcef_clock / 100; } - vega12_init_dpm_state(&(dpm_table->dpm_state)); + /* pixclk */ dpm_table = &(data->dpm_table.pixel_table); - - PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, - PPCLK_PIXCLK, &num_levels) == 0, - "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for PIXCLK!", - return -EINVAL); - - dpm_table->count = num_levels; - - for (i = 0; i < num_levels; i++) { - PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr, - PPCLK_PIXCLK, i, &clock) == 0, - "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for PIXCLK!", - return -EINVAL); - - dpm_table->dpm_levels[i].value = clock; - } - + if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) { + ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PIXCLK); + PP_ASSERT_WITH_CODE(!ret, + "[SetupDefaultDpmTable] failed to get pixclk dpm levels!", + return ret); + } else + dpm_table->count = 0; vega12_init_dpm_state(&(dpm_table->dpm_state)); + /* dispclk */ dpm_table = &(data->dpm_table.display_table); - - PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, - PPCLK_DISPCLK, &num_levels) == 0, - "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DISPCLK!", - return -EINVAL); - - dpm_table->count = num_levels; - - for (i = 0; i < num_levels; i++) { - PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr, - PPCLK_DISPCLK, i, &clock) == 0, - "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DISPCLK!", - return -EINVAL); - - dpm_table->dpm_levels[i].value = clock; - } - + if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) { + ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DISPCLK); + PP_ASSERT_WITH_CODE(!ret, + "[SetupDefaultDpmTable] failed to get dispclk dpm levels!", + return ret); + } else + dpm_table->count = 0; vega12_init_dpm_state(&(dpm_table->dpm_state)); + /* phyclk */ dpm_table = &(data->dpm_table.phy_table); - - PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, - PPCLK_PHYCLK, &num_levels) == 0, - "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for PHYCLK!", - return -EINVAL); - - dpm_table->count = num_levels; - - for (i = 0; i < num_levels; i++) { - PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr, - PPCLK_PHYCLK, i, &clock) == 0, - "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for PHYCLK!", - return -EINVAL); - - dpm_table->dpm_levels[i].value = clock; - } - + if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) { + ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PHYCLK); + PP_ASSERT_WITH_CODE(!ret, + "[SetupDefaultDpmTable] failed to get phyclk dpm levels!", + return ret); + } else + dpm_table->count = 0; vega12_init_dpm_state(&(dpm_table->dpm_state)); /* save a copy of the default DPM table */ @@ -794,6 +735,9 @@ static int vega12_init_smc_table(struct pp_hwmgr *hwmgr) data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk; data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk; data->vbios_boot_state.uc_cooling_id = boot_up_values.ucCoolingID; + data->vbios_boot_state.eclock = boot_up_values.ulEClk; + data->vbios_boot_state.dclock = boot_up_values.ulDClk; + data->vbios_boot_state.vclock = boot_up_values.ulVClk; smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk, (uint32_t)(data->vbios_boot_state.dcef_clock / 100)); @@ -835,6 +779,21 @@ static int vega12_set_allowed_featuresmask(struct pp_hwmgr *hwmgr) return 0; } +static void vega12_init_powergate_state(struct pp_hwmgr *hwmgr) +{ + struct vega12_hwmgr *data = + (struct vega12_hwmgr *)(hwmgr->backend); + + data->uvd_power_gated = true; + data->vce_power_gated = true; + + if (data->smu_features[GNLD_DPM_UVD].enabled) + data->uvd_power_gated = false; + + if (data->smu_features[GNLD_DPM_VCE].enabled) + data->vce_power_gated = false; +} + static int vega12_enable_all_smu_features(struct pp_hwmgr *hwmgr) { struct vega12_hwmgr *data = @@ -853,12 +812,11 @@ static int vega12_enable_all_smu_features(struct pp_hwmgr *hwmgr) enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ? true : false; data->smu_features[i].enabled = enabled; data->smu_features[i].supported = enabled; - PP_ASSERT( - !data->smu_features[i].allowed || enabled, - "[EnableAllSMUFeatures] Enabled feature is different from allowed, expected disabled!"); } } + vega12_init_powergate_state(hwmgr); + return 0; } @@ -914,6 +872,48 @@ static int vega12_power_control_set_level(struct pp_hwmgr *hwmgr) return result; } +static int vega12_get_all_clock_ranges_helper(struct pp_hwmgr *hwmgr, + PPCLK_e clkid, struct vega12_clock_range *clock) +{ + /* AC Max */ + PP_ASSERT_WITH_CODE( + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMaxDpmFreq, (clkid << 16)) == 0, + "[GetClockRanges] Failed to get max ac clock from SMC!", + return -EINVAL); + clock->ACMax = smum_get_argument(hwmgr); + + /* AC Min */ + PP_ASSERT_WITH_CODE( + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMinDpmFreq, (clkid << 16)) == 0, + "[GetClockRanges] Failed to get min ac clock from SMC!", + return -EINVAL); + clock->ACMin = smum_get_argument(hwmgr); + + /* DC Max */ + PP_ASSERT_WITH_CODE( + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDcModeMaxDpmFreq, (clkid << 16)) == 0, + "[GetClockRanges] Failed to get max dc clock from SMC!", + return -EINVAL); + clock->DCMax = smum_get_argument(hwmgr); + + return 0; +} + +static int vega12_get_all_clock_ranges(struct pp_hwmgr *hwmgr) +{ + struct vega12_hwmgr *data = + (struct vega12_hwmgr *)(hwmgr->backend); + uint32_t i; + + for (i = 0; i < PPCLK_COUNT; i++) + PP_ASSERT_WITH_CODE(!vega12_get_all_clock_ranges_helper(hwmgr, + i, &(data->clk_range[i])), + "Failed to get clk range from SMC!", + return -EINVAL); + + return 0; +} + static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr) { int tmp_result, result = 0; @@ -941,6 +941,11 @@ static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr) "Failed to power control set level!", result = tmp_result); + result = vega12_get_all_clock_ranges(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to get all clock ranges!", + return result); + result = vega12_odn_initialize_default_settings(hwmgr); PP_ASSERT_WITH_CODE(!result, "Failed to power control set level!", @@ -969,37 +974,173 @@ static uint32_t vega12_find_lowest_dpm_level( break; } + if (i >= table->count) { + i = 0; + table->dpm_levels[i].enabled = true; + } + return i; } static uint32_t vega12_find_highest_dpm_level( struct vega12_single_dpm_table *table) { - uint32_t i = 0; + int32_t i = 0; + PP_ASSERT_WITH_CODE(table->count <= MAX_REGULAR_DPM_NUMBER, + "[FindHighestDPMLevel] DPM Table has too many entries!", + return MAX_REGULAR_DPM_NUMBER - 1); - if (table->count <= MAX_REGULAR_DPM_NUMBER) { - for (i = table->count; i > 0; i--) { - if (table->dpm_levels[i - 1].enabled) - return i - 1; - } - } else { - pr_info("DPM Table Has Too Many Entries!"); - return MAX_REGULAR_DPM_NUMBER - 1; + for (i = table->count - 1; i >= 0; i--) { + if (table->dpm_levels[i].enabled) + break; } - return i; + if (i < 0) { + i = 0; + table->dpm_levels[i].enabled = true; + } + + return (uint32_t)i; } static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr) { - return 0; + struct vega12_hwmgr *data = hwmgr->backend; + uint32_t min_freq; + int ret = 0; + + if (data->smu_features[GNLD_DPM_GFXCLK].enabled) { + min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level; + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( + hwmgr, PPSMC_MSG_SetSoftMinByFreq, + (PPCLK_GFXCLK << 16) | (min_freq & 0xffff))), + "Failed to set soft min gfxclk !", + return ret); + } + + if (data->smu_features[GNLD_DPM_UCLK].enabled) { + min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level; + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( + hwmgr, PPSMC_MSG_SetSoftMinByFreq, + (PPCLK_UCLK << 16) | (min_freq & 0xffff))), + "Failed to set soft min memclk !", + return ret); + + min_freq = data->dpm_table.mem_table.dpm_state.hard_min_level; + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( + hwmgr, PPSMC_MSG_SetHardMinByFreq, + (PPCLK_UCLK << 16) | (min_freq & 0xffff))), + "Failed to set hard min memclk !", + return ret); + } + + if (data->smu_features[GNLD_DPM_UVD].enabled) { + min_freq = data->dpm_table.vclk_table.dpm_state.soft_min_level; + + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( + hwmgr, PPSMC_MSG_SetSoftMinByFreq, + (PPCLK_VCLK << 16) | (min_freq & 0xffff))), + "Failed to set soft min vclk!", + return ret); + + min_freq = data->dpm_table.dclk_table.dpm_state.soft_min_level; + + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( + hwmgr, PPSMC_MSG_SetSoftMinByFreq, + (PPCLK_DCLK << 16) | (min_freq & 0xffff))), + "Failed to set soft min dclk!", + return ret); + } + + if (data->smu_features[GNLD_DPM_VCE].enabled) { + min_freq = data->dpm_table.eclk_table.dpm_state.soft_min_level; + + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( + hwmgr, PPSMC_MSG_SetSoftMinByFreq, + (PPCLK_ECLK << 16) | (min_freq & 0xffff))), + "Failed to set soft min eclk!", + return ret); + } + + if (data->smu_features[GNLD_DPM_SOCCLK].enabled) { + min_freq = data->dpm_table.soc_table.dpm_state.soft_min_level; + + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( + hwmgr, PPSMC_MSG_SetSoftMinByFreq, + (PPCLK_SOCCLK << 16) | (min_freq & 0xffff))), + "Failed to set soft min socclk!", + return ret); + } + + return ret; + } static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr) { - return 0; -} + struct vega12_hwmgr *data = hwmgr->backend; + uint32_t max_freq; + int ret = 0; + if (data->smu_features[GNLD_DPM_GFXCLK].enabled) { + max_freq = data->dpm_table.gfx_table.dpm_state.soft_max_level; + + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( + hwmgr, PPSMC_MSG_SetSoftMaxByFreq, + (PPCLK_GFXCLK << 16) | (max_freq & 0xffff))), + "Failed to set soft max gfxclk!", + return ret); + } + + if (data->smu_features[GNLD_DPM_UCLK].enabled) { + max_freq = data->dpm_table.mem_table.dpm_state.soft_max_level; + + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( + hwmgr, PPSMC_MSG_SetSoftMaxByFreq, + (PPCLK_UCLK << 16) | (max_freq & 0xffff))), + "Failed to set soft max memclk!", + return ret); + } + + if (data->smu_features[GNLD_DPM_UVD].enabled) { + max_freq = data->dpm_table.vclk_table.dpm_state.soft_max_level; + + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( + hwmgr, PPSMC_MSG_SetSoftMaxByFreq, + (PPCLK_VCLK << 16) | (max_freq & 0xffff))), + "Failed to set soft max vclk!", + return ret); + + max_freq = data->dpm_table.dclk_table.dpm_state.soft_max_level; + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( + hwmgr, PPSMC_MSG_SetSoftMaxByFreq, + (PPCLK_DCLK << 16) | (max_freq & 0xffff))), + "Failed to set soft max dclk!", + return ret); + } + + if (data->smu_features[GNLD_DPM_VCE].enabled) { + max_freq = data->dpm_table.eclk_table.dpm_state.soft_max_level; + + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( + hwmgr, PPSMC_MSG_SetSoftMaxByFreq, + (PPCLK_ECLK << 16) | (max_freq & 0xffff))), + "Failed to set soft max eclk!", + return ret); + } + + if (data->smu_features[GNLD_DPM_SOCCLK].enabled) { + max_freq = data->dpm_table.soc_table.dpm_state.soft_max_level; + + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( + hwmgr, PPSMC_MSG_SetSoftMaxByFreq, + (PPCLK_SOCCLK << 16) | (max_freq & 0xffff))), + "Failed to set soft max socclk!", + return ret); + } + + return ret; +} int vega12_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable) { @@ -1064,8 +1205,7 @@ static uint32_t vega12_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) return (mem_clk * 100); } -static int vega12_get_gpu_power(struct pp_hwmgr *hwmgr, - struct pp_gpu_power *query) +static int vega12_get_gpu_power(struct pp_hwmgr *hwmgr, uint32_t *query) { #if 0 uint32_t value; @@ -1075,9 +1215,9 @@ static int vega12_get_gpu_power(struct pp_hwmgr *hwmgr, "Failed to get current package power!", return -EINVAL); - vega12_read_arg_from_smc(hwmgr, &value); + value = smum_get_argument(hwmgr); /* power value is an integer */ - query->average_gpu_power = value << 8; + *query = value << 8; #endif return 0; } @@ -1088,14 +1228,11 @@ static int vega12_get_current_gfx_clk_freq(struct pp_hwmgr *hwmgr, uint32_t *gfx *gfx_freq = 0; - PP_ASSERT_WITH_CODE( - smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16)) == 0, + PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16)) == 0, "[GetCurrentGfxClkFreq] Attempt to get Current GFXCLK Frequency Failed!", - return -1); - PP_ASSERT_WITH_CODE( - vega12_read_arg_from_smc(hwmgr, &gfx_clk) == 0, - "[GetCurrentGfxClkFreq] Attempt to read arg from SMC Failed", - return -1); + return -EINVAL); + gfx_clk = smum_get_argument(hwmgr); *gfx_freq = gfx_clk * 100; @@ -1111,11 +1248,8 @@ static int vega12_get_current_mclk_freq(struct pp_hwmgr *hwmgr, uint32_t *mclk_f PP_ASSERT_WITH_CODE( smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDpmClockFreq, (PPCLK_UCLK << 16)) == 0, "[GetCurrentMClkFreq] Attempt to get Current MCLK Frequency Failed!", - return -1); - PP_ASSERT_WITH_CODE( - vega12_read_arg_from_smc(hwmgr, &mem_clk) == 0, - "[GetCurrentMClkFreq] Attempt to read arg from SMC Failed", - return -1); + return -EINVAL); + mem_clk = smum_get_argument(hwmgr); *mclk_freq = mem_clk * 100; @@ -1132,16 +1266,12 @@ static int vega12_get_current_activity_percent( #if 0 ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetAverageGfxActivity, 0); if (!ret) { - ret = vega12_read_arg_from_smc(hwmgr, ¤t_activity); - if (!ret) { - if (current_activity > 100) { - PP_ASSERT(false, - "[GetCurrentActivityPercent] Activity Percentage Exceeds 100!"); - current_activity = 100; - } - } else + current_activity = smum_get_argument(hwmgr); + if (current_activity > 100) { PP_ASSERT(false, - "[GetCurrentActivityPercent] Attempt To Read Average Graphics Activity from SMU Failed!"); + "[GetCurrentActivityPercent] Activity Percentage Exceeds 100!"); + current_activity = 100; + } } else PP_ASSERT(false, "[GetCurrentActivityPercent] Attempt To Send Get Average Graphics Activity to SMU Failed!"); @@ -1186,12 +1316,8 @@ static int vega12_read_sensor(struct pp_hwmgr *hwmgr, int idx, *size = 4; break; case AMDGPU_PP_SENSOR_GPU_POWER: - if (*size < sizeof(struct pp_gpu_power)) - ret = -EINVAL; - else { - *size = sizeof(struct pp_gpu_power); - ret = vega12_get_gpu_power(hwmgr, (struct pp_gpu_power *)value); - } + ret = vega12_get_gpu_power(hwmgr, (uint32_t *)value); + break; default: ret = -EINVAL; @@ -1226,7 +1352,6 @@ int vega12_display_clock_voltage_request(struct pp_hwmgr *hwmgr, if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) { switch (clk_type) { case amd_pp_dcef_clock: - clk_freq = clock_req->clock_freq_in_khz / 100; clk_select = PPCLK_DCEFCLK; break; case amd_pp_disp_clock: @@ -1260,27 +1385,22 @@ static int vega12_notify_smc_display_config_after_ps_adjustment( { struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend); - uint32_t num_active_disps = 0; - struct cgs_display_info info = {0}; struct PP_Clocks min_clocks = {0}; struct pp_display_clock_request clock_req; - uint32_t clk_request; - info.mode_info = NULL; - cgs_get_active_displays_info(hwmgr->device, &info); - num_active_disps = info.display_count; - if (num_active_disps > 1) + if ((hwmgr->display_config->num_display > 1) && + !hwmgr->display_config->multi_monitor_in_sync) vega12_notify_smc_display_change(hwmgr, false); else vega12_notify_smc_display_change(hwmgr, true); - min_clocks.dcefClock = hwmgr->display_config.min_dcef_set_clk; - min_clocks.dcefClockInSR = hwmgr->display_config.min_dcef_deep_sleep_set_clk; - min_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock; + min_clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk; + min_clocks.dcefClockInSR = hwmgr->display_config->min_dcef_deep_sleep_set_clk; + min_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock; if (data->smu_features[GNLD_DPM_DCEFCLK].supported) { clock_req.clock_type = amd_pp_dcef_clock; - clock_req.clock_freq_in_khz = min_clocks.dcefClock; + clock_req.clock_freq_in_khz = min_clocks.dcefClock/10; if (!vega12_display_clock_voltage_request(hwmgr, &clock_req)) { if (data->smu_features[GNLD_DS_DCEFCLK].supported) PP_ASSERT_WITH_CODE( @@ -1294,15 +1414,6 @@ static int vega12_notify_smc_display_config_after_ps_adjustment( } } - if (data->smu_features[GNLD_DPM_UCLK].enabled) { - clk_request = (PPCLK_UCLK << 16) | (min_clocks.memoryClock) / 100; - PP_ASSERT_WITH_CODE( - smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinByFreq, clk_request) == 0, - "[PhwVega12_NotifySMCDisplayConfigAfterPowerStateAdjustment] Attempt to set UCLK HardMin Failed!", - return -1); - data->dpm_table.mem_table.dpm_state.hard_min_level = min_clocks.memoryClock; - } - return 0; } @@ -1311,12 +1422,19 @@ static int vega12_force_dpm_highest(struct pp_hwmgr *hwmgr) struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend); - data->smc_state_table.gfx_boot_level = - data->smc_state_table.gfx_max_level = - vega12_find_highest_dpm_level(&(data->dpm_table.gfx_table)); - data->smc_state_table.mem_boot_level = - data->smc_state_table.mem_max_level = - vega12_find_highest_dpm_level(&(data->dpm_table.mem_table)); + uint32_t soft_level; + + soft_level = vega12_find_highest_dpm_level(&(data->dpm_table.gfx_table)); + + data->dpm_table.gfx_table.dpm_state.soft_min_level = + data->dpm_table.gfx_table.dpm_state.soft_max_level = + data->dpm_table.gfx_table.dpm_levels[soft_level].value; + + soft_level = vega12_find_highest_dpm_level(&(data->dpm_table.mem_table)); + + data->dpm_table.mem_table.dpm_state.soft_min_level = + data->dpm_table.mem_table.dpm_state.soft_max_level = + data->dpm_table.mem_table.dpm_levels[soft_level].value; PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr), "Failed to upload boot level to highest!", @@ -1333,13 +1451,19 @@ static int vega12_force_dpm_lowest(struct pp_hwmgr *hwmgr) { struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend); + uint32_t soft_level; - data->smc_state_table.gfx_boot_level = - data->smc_state_table.gfx_max_level = - vega12_find_lowest_dpm_level(&(data->dpm_table.gfx_table)); - data->smc_state_table.mem_boot_level = - data->smc_state_table.mem_max_level = - vega12_find_lowest_dpm_level(&(data->dpm_table.mem_table)); + soft_level = vega12_find_lowest_dpm_level(&(data->dpm_table.gfx_table)); + + data->dpm_table.gfx_table.dpm_state.soft_min_level = + data->dpm_table.gfx_table.dpm_state.soft_max_level = + data->dpm_table.gfx_table.dpm_levels[soft_level].value; + + soft_level = vega12_find_lowest_dpm_level(&(data->dpm_table.mem_table)); + + data->dpm_table.mem_table.dpm_state.soft_min_level = + data->dpm_table.mem_table.dpm_state.soft_max_level = + data->dpm_table.mem_table.dpm_levels[soft_level].value; PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr), "Failed to upload boot level to highest!", @@ -1355,17 +1479,6 @@ static int vega12_force_dpm_lowest(struct pp_hwmgr *hwmgr) static int vega12_unforce_dpm_levels(struct pp_hwmgr *hwmgr) { - struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend); - - data->smc_state_table.gfx_boot_level = - vega12_find_lowest_dpm_level(&(data->dpm_table.gfx_table)); - data->smc_state_table.gfx_max_level = - vega12_find_highest_dpm_level(&(data->dpm_table.gfx_table)); - data->smc_state_table.mem_boot_level = - vega12_find_lowest_dpm_level(&(data->dpm_table.mem_table)); - data->smc_state_table.mem_max_level = - vega12_find_highest_dpm_level(&(data->dpm_table.mem_table)); - PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr), "Failed to upload DPM Bootup Levels!", return -1); @@ -1373,22 +1486,28 @@ static int vega12_unforce_dpm_levels(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr), "Failed to upload DPM Max Levels!", return -1); + return 0; } -#if 0 static int vega12_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level, uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *soc_mask) { - struct phm_ppt_v2_information *table_info = - (struct phm_ppt_v2_information *)(hwmgr->pptable); + struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend); + struct vega12_single_dpm_table *gfx_dpm_table = &(data->dpm_table.gfx_table); + struct vega12_single_dpm_table *mem_dpm_table = &(data->dpm_table.mem_table); + struct vega12_single_dpm_table *soc_dpm_table = &(data->dpm_table.soc_table); + + *sclk_mask = 0; + *mclk_mask = 0; + *soc_mask = 0; - if (table_info->vdd_dep_on_sclk->count > VEGA12_UMD_PSTATE_GFXCLK_LEVEL && - table_info->vdd_dep_on_socclk->count > VEGA12_UMD_PSTATE_SOCCLK_LEVEL && - table_info->vdd_dep_on_mclk->count > VEGA12_UMD_PSTATE_MCLK_LEVEL) { + if (gfx_dpm_table->count > VEGA12_UMD_PSTATE_GFXCLK_LEVEL && + mem_dpm_table->count > VEGA12_UMD_PSTATE_MCLK_LEVEL && + soc_dpm_table->count > VEGA12_UMD_PSTATE_SOCCLK_LEVEL) { *sclk_mask = VEGA12_UMD_PSTATE_GFXCLK_LEVEL; - *soc_mask = VEGA12_UMD_PSTATE_SOCCLK_LEVEL; *mclk_mask = VEGA12_UMD_PSTATE_MCLK_LEVEL; + *soc_mask = VEGA12_UMD_PSTATE_SOCCLK_LEVEL; } if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) { @@ -1396,13 +1515,13 @@ static int vega12_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_fo } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) { *mclk_mask = 0; } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { - *sclk_mask = table_info->vdd_dep_on_sclk->count - 1; - *soc_mask = table_info->vdd_dep_on_socclk->count - 1; - *mclk_mask = table_info->vdd_dep_on_mclk->count - 1; + *sclk_mask = gfx_dpm_table->count - 1; + *mclk_mask = mem_dpm_table->count - 1; + *soc_mask = soc_dpm_table->count - 1; } + return 0; } -#endif static void vega12_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode) { @@ -1426,11 +1545,9 @@ static int vega12_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level) { int ret = 0; -#if 0 uint32_t sclk_mask = 0; uint32_t mclk_mask = 0; uint32_t soc_mask = 0; -#endif switch (level) { case AMD_DPM_FORCED_LEVEL_HIGH: @@ -1446,27 +1563,18 @@ static int vega12_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: -#if 0 ret = vega12_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask); if (ret) return ret; - vega12_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask); - vega12_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask); -#endif + vega12_force_clock_level(hwmgr, PP_SCLK, 1 << sclk_mask); + vega12_force_clock_level(hwmgr, PP_MCLK, 1 << mclk_mask); break; case AMD_DPM_FORCED_LEVEL_MANUAL: case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: default: break; } -#if 0 - if (!ret) { - if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) - vega12_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE); - else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) - vega12_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_AUTO); - } -#endif + return ret; } @@ -1500,24 +1608,14 @@ static int vega12_get_clock_ranges(struct pp_hwmgr *hwmgr, PPCLK_e clock_select, bool max) { - int result; - *clock = 0; + struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend); - if (max) { - PP_ASSERT_WITH_CODE( - smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMaxDpmFreq, (clock_select << 16)) == 0, - "[GetClockRanges] Failed to get max clock from SMC!", - return -1); - result = vega12_read_arg_from_smc(hwmgr, clock); - } else { - PP_ASSERT_WITH_CODE( - smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMinDpmFreq, (clock_select << 16)) == 0, - "[GetClockRanges] Failed to get min clock from SMC!", - return -1); - result = vega12_read_arg_from_smc(hwmgr, clock); - } + if (max) + *clock = data->clk_range[clock_select].ACMax; + else + *clock = data->clk_range[clock_select].ACMin; - return result; + return 0; } static int vega12_get_sclks(struct pp_hwmgr *hwmgr, @@ -1532,12 +1630,12 @@ static int vega12_get_sclks(struct pp_hwmgr *hwmgr, return -1; dpm_table = &(data->dpm_table.gfx_table); - ucount = (dpm_table->count > VG12_PSUEDO_NUM_GFXCLK_DPM_LEVELS) ? - VG12_PSUEDO_NUM_GFXCLK_DPM_LEVELS : dpm_table->count; + ucount = (dpm_table->count > MAX_NUM_CLOCKS) ? + MAX_NUM_CLOCKS : dpm_table->count; for (i = 0; i < ucount; i++) { clocks->data[i].clocks_in_khz = - dpm_table->dpm_levels[i].value * 100; + dpm_table->dpm_levels[i].value * 1000; clocks->data[i].latency_in_us = 0; } @@ -1564,13 +1662,12 @@ static int vega12_get_memclocks(struct pp_hwmgr *hwmgr, return -1; dpm_table = &(data->dpm_table.mem_table); - ucount = (dpm_table->count > VG12_PSUEDO_NUM_UCLK_DPM_LEVELS) ? - VG12_PSUEDO_NUM_UCLK_DPM_LEVELS : dpm_table->count; + ucount = (dpm_table->count > MAX_NUM_CLOCKS) ? + MAX_NUM_CLOCKS : dpm_table->count; for (i = 0; i < ucount; i++) { - clocks->data[i].clocks_in_khz = - dpm_table->dpm_levels[i].value * 100; - + clocks->data[i].clocks_in_khz = dpm_table->dpm_levels[i].value * 1000; + data->mclk_latency_table.entries[i].frequency = dpm_table->dpm_levels[i].value * 100; clocks->data[i].latency_in_us = data->mclk_latency_table.entries[i].latency = vega12_get_mem_latency(hwmgr, dpm_table->dpm_levels[i].value); @@ -1594,12 +1691,12 @@ static int vega12_get_dcefclocks(struct pp_hwmgr *hwmgr, dpm_table = &(data->dpm_table.dcef_table); - ucount = (dpm_table->count > VG12_PSUEDO_NUM_DCEFCLK_DPM_LEVELS) ? - VG12_PSUEDO_NUM_DCEFCLK_DPM_LEVELS : dpm_table->count; + ucount = (dpm_table->count > MAX_NUM_CLOCKS) ? + MAX_NUM_CLOCKS : dpm_table->count; for (i = 0; i < ucount; i++) { clocks->data[i].clocks_in_khz = - dpm_table->dpm_levels[i].value * 100; + dpm_table->dpm_levels[i].value * 1000; clocks->data[i].latency_in_us = 0; } @@ -1622,12 +1719,12 @@ static int vega12_get_socclocks(struct pp_hwmgr *hwmgr, dpm_table = &(data->dpm_table.soc_table); - ucount = (dpm_table->count > VG12_PSUEDO_NUM_SOCCLK_DPM_LEVELS) ? - VG12_PSUEDO_NUM_SOCCLK_DPM_LEVELS : dpm_table->count; + ucount = (dpm_table->count > MAX_NUM_CLOCKS) ? + MAX_NUM_CLOCKS : dpm_table->count; for (i = 0; i < ucount; i++) { clocks->data[i].clocks_in_khz = - dpm_table->dpm_levels[i].value * 100; + dpm_table->dpm_levels[i].value * 1000; clocks->data[i].latency_in_us = 0; } @@ -1674,99 +1771,69 @@ static int vega12_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr, } static int vega12_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr, - struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges) + void *clock_ranges) { struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend); Watermarks_t *table = &(data->smc_state_table.water_marks_table); - int result = 0; - uint32_t i; + struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges; if (!data->registry_data.disable_water_mark && data->smu_features[GNLD_DPM_DCEFCLK].supported && data->smu_features[GNLD_DPM_SOCCLK].supported) { - for (i = 0; i < wm_with_clock_ranges->num_wm_sets_dmif; i++) { - table->WatermarkRow[WM_DCEFCLK][i].MinClock = - cpu_to_le16((uint16_t) - (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_dcefclk_in_khz) / - 100); - table->WatermarkRow[WM_DCEFCLK][i].MaxClock = - cpu_to_le16((uint16_t) - (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_dcefclk_in_khz) / - 100); - table->WatermarkRow[WM_DCEFCLK][i].MinUclk = - cpu_to_le16((uint16_t) - (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_memclk_in_khz) / - 100); - table->WatermarkRow[WM_DCEFCLK][i].MaxUclk = - cpu_to_le16((uint16_t) - (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_memclk_in_khz) / - 100); - table->WatermarkRow[WM_DCEFCLK][i].WmSetting = (uint8_t) - wm_with_clock_ranges->wm_sets_dmif[i].wm_set_id; - } - - for (i = 0; i < wm_with_clock_ranges->num_wm_sets_mcif; i++) { - table->WatermarkRow[WM_SOCCLK][i].MinClock = - cpu_to_le16((uint16_t) - (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_socclk_in_khz) / - 100); - table->WatermarkRow[WM_SOCCLK][i].MaxClock = - cpu_to_le16((uint16_t) - (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_socclk_in_khz) / - 100); - table->WatermarkRow[WM_SOCCLK][i].MinUclk = - cpu_to_le16((uint16_t) - (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_memclk_in_khz) / - 100); - table->WatermarkRow[WM_SOCCLK][i].MaxUclk = - cpu_to_le16((uint16_t) - (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_memclk_in_khz) / - 100); - table->WatermarkRow[WM_SOCCLK][i].WmSetting = (uint8_t) - wm_with_clock_ranges->wm_sets_mcif[i].wm_set_id; - } + smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges); data->water_marks_bitmap |= WaterMarksExist; data->water_marks_bitmap &= ~WaterMarksLoaded; } - return result; + return 0; } static int vega12_force_clock_level(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask) { struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend); - - if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO | - AMD_DPM_FORCED_LEVEL_LOW | - AMD_DPM_FORCED_LEVEL_HIGH)) - return -EINVAL; + uint32_t soft_min_level, soft_max_level; + int ret = 0; switch (type) { case PP_SCLK: - data->smc_state_table.gfx_boot_level = mask ? (ffs(mask) - 1) : 0; - data->smc_state_table.gfx_max_level = mask ? (fls(mask) - 1) : 0; + soft_min_level = mask ? (ffs(mask) - 1) : 0; + soft_max_level = mask ? (fls(mask) - 1) : 0; + + data->dpm_table.gfx_table.dpm_state.soft_min_level = + data->dpm_table.gfx_table.dpm_levels[soft_min_level].value; + data->dpm_table.gfx_table.dpm_state.soft_max_level = + data->dpm_table.gfx_table.dpm_levels[soft_max_level].value; - PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr), + ret = vega12_upload_dpm_min_level(hwmgr); + PP_ASSERT_WITH_CODE(!ret, "Failed to upload boot level to lowest!", - return -EINVAL); + return ret); - PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr), + ret = vega12_upload_dpm_max_level(hwmgr); + PP_ASSERT_WITH_CODE(!ret, "Failed to upload dpm max level to highest!", - return -EINVAL); + return ret); break; case PP_MCLK: - data->smc_state_table.mem_boot_level = mask ? (ffs(mask) - 1) : 0; - data->smc_state_table.mem_max_level = mask ? (fls(mask) - 1) : 0; + soft_min_level = mask ? (ffs(mask) - 1) : 0; + soft_max_level = mask ? (fls(mask) - 1) : 0; + + data->dpm_table.mem_table.dpm_state.soft_min_level = + data->dpm_table.mem_table.dpm_levels[soft_min_level].value; + data->dpm_table.mem_table.dpm_state.soft_max_level = + data->dpm_table.mem_table.dpm_levels[soft_max_level].value; - PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr), + ret = vega12_upload_dpm_min_level(hwmgr); + PP_ASSERT_WITH_CODE(!ret, "Failed to upload boot level to lowest!", - return -EINVAL); + return ret); - PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr), + ret = vega12_upload_dpm_max_level(hwmgr); + PP_ASSERT_WITH_CODE(!ret, "Failed to upload dpm max level to highest!", - return -EINVAL); + return ret); break; @@ -1799,8 +1866,8 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr, return -1); for (i = 0; i < clocks.num_levels; i++) size += sprintf(buf + size, "%d: %uMhz %s\n", - i, clocks.data[i].clocks_in_khz / 100, - (clocks.data[i].clocks_in_khz == now) ? "*" : ""); + i, clocks.data[i].clocks_in_khz / 1000, + (clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : ""); break; case PP_MCLK: @@ -1815,8 +1882,8 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr, return -1); for (i = 0; i < clocks.num_levels; i++) size += sprintf(buf + size, "%d: %uMhz %s\n", - i, clocks.data[i].clocks_in_khz / 100, - (clocks.data[i].clocks_in_khz == now) ? "*" : ""); + i, clocks.data[i].clocks_in_khz / 1000, + (clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : ""); break; case PP_PCIE: @@ -1828,13 +1895,210 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr, return size; } +static int vega12_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr) +{ + struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend); + struct vega12_single_dpm_table *dpm_table; + bool vblank_too_short = false; + bool disable_mclk_switching; + uint32_t i, latency; + + disable_mclk_switching = ((1 < hwmgr->display_config->num_display) && + !hwmgr->display_config->multi_monitor_in_sync) || + vblank_too_short; + latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency; + + /* gfxclk */ + dpm_table = &(data->dpm_table.gfx_table); + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; + dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + + if (PP_CAP(PHM_PlatformCaps_UMDPState)) { + if (VEGA12_UMD_PSTATE_GFXCLK_LEVEL < dpm_table->count) { + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_GFXCLK_LEVEL].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_GFXCLK_LEVEL].value; + } + + if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) { + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value; + } + + if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + } + } + + /* memclk */ + dpm_table = &(data->dpm_table.mem_table); + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; + dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + + if (PP_CAP(PHM_PlatformCaps_UMDPState)) { + if (VEGA12_UMD_PSTATE_MCLK_LEVEL < dpm_table->count) { + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_MCLK_LEVEL].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_MCLK_LEVEL].value; + } + + if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) { + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value; + } + + if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + } + } + + /* honour DAL's UCLK Hardmin */ + if (dpm_table->dpm_state.hard_min_level < (hwmgr->display_config->min_mem_set_clock / 100)) + dpm_table->dpm_state.hard_min_level = hwmgr->display_config->min_mem_set_clock / 100; + + /* Hardmin is dependent on displayconfig */ + if (disable_mclk_switching) { + dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + for (i = 0; i < data->mclk_latency_table.count - 1; i++) { + if (data->mclk_latency_table.entries[i].latency <= latency) { + if (dpm_table->dpm_levels[i].value >= (hwmgr->display_config->min_mem_set_clock / 100)) { + dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[i].value; + break; + } + } + } + } + + if (hwmgr->display_config->nb_pstate_switch_disable) + dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + + /* vclk */ + dpm_table = &(data->dpm_table.vclk_table); + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; + dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + + if (PP_CAP(PHM_PlatformCaps_UMDPState)) { + if (VEGA12_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) { + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value; + } + + if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + } + } + + /* dclk */ + dpm_table = &(data->dpm_table.dclk_table); + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; + dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + + if (PP_CAP(PHM_PlatformCaps_UMDPState)) { + if (VEGA12_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) { + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value; + } + + if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + } + } + + /* socclk */ + dpm_table = &(data->dpm_table.soc_table); + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; + dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + + if (PP_CAP(PHM_PlatformCaps_UMDPState)) { + if (VEGA12_UMD_PSTATE_SOCCLK_LEVEL < dpm_table->count) { + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_SOCCLK_LEVEL].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_SOCCLK_LEVEL].value; + } + + if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + } + } + + /* eclk */ + dpm_table = &(data->dpm_table.eclk_table); + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; + dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + + if (PP_CAP(PHM_PlatformCaps_UMDPState)) { + if (VEGA12_UMD_PSTATE_VCEMCLK_LEVEL < dpm_table->count) { + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_VCEMCLK_LEVEL].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_VCEMCLK_LEVEL].value; + } + + if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + } + } + + return 0; +} + +static int vega12_set_uclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr, + struct vega12_single_dpm_table *dpm_table) +{ + struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend); + int ret = 0; + + if (data->smu_features[GNLD_DPM_UCLK].enabled) { + PP_ASSERT_WITH_CODE(dpm_table->count > 0, + "[SetUclkToHightestDpmLevel] Dpm table has no entry!", + return -EINVAL); + PP_ASSERT_WITH_CODE(dpm_table->count <= NUM_UCLK_DPM_LEVELS, + "[SetUclkToHightestDpmLevel] Dpm table has too many entries!", + return -EINVAL); + + dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetHardMinByFreq, + (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level)), + "[SetUclkToHightestDpmLevel] Set hard min uclk failed!", + return ret); + } + + return ret; +} + +static int vega12_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr) +{ + struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend); + int ret = 0; + + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_NumOfDisplays, 0); + + ret = vega12_set_uclk_to_highest_dpm_level(hwmgr, + &data->dpm_table.mem_table); + + return ret; +} + static int vega12_display_configuration_changed_task(struct pp_hwmgr *hwmgr) { struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend); int result = 0; - uint32_t num_turned_on_displays = 1; Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table); - struct cgs_display_info info = {0}; if ((data->water_marks_bitmap & WaterMarksExist) && !(data->water_marks_bitmap & WaterMarksLoaded)) { @@ -1846,12 +2110,9 @@ static int vega12_display_configuration_changed_task(struct pp_hwmgr *hwmgr) if ((data->water_marks_bitmap & WaterMarksExist) && data->smu_features[GNLD_DPM_DCEFCLK].supported && - data->smu_features[GNLD_DPM_SOCCLK].supported) { - cgs_get_active_displays_info(hwmgr->device, &info); - num_turned_on_displays = info.display_count; + data->smu_features[GNLD_DPM_SOCCLK].supported) smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_NumOfDisplays, num_turned_on_displays); - } + PPSMC_MSG_NumOfDisplays, hwmgr->display_config->num_display); return result; } @@ -1877,6 +2138,9 @@ static void vega12_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate) { struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend); + if (data->vce_power_gated == bgate) + return; + data->vce_power_gated = bgate; vega12_enable_disable_vce_dpm(hwmgr, !bgate); } @@ -1885,6 +2149,9 @@ static void vega12_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate) { struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend); + if (data->uvd_power_gated == bgate) + return; + data->uvd_power_gated = bgate; vega12_enable_disable_uvd_dpm(hwmgr, !bgate); } @@ -1894,15 +2161,12 @@ vega12_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmg { struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend); bool is_update_required = false; - struct cgs_display_info info = {0, 0, NULL}; - cgs_get_active_displays_info(hwmgr->device, &info); - - if (data->display_timing.num_existing_displays != info.display_count) + if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display) is_update_required = true; if (data->registry_data.gfx_clk_deep_sleep_support) { - if (data->display_timing.min_clock_in_sr != hwmgr->display_config.min_core_set_clock_in_sr) + if (data->display_timing.min_clock_in_sr != hwmgr->display_config->min_core_set_clock_in_sr) is_update_required = true; } @@ -2055,6 +2319,38 @@ static int vega12_get_thermal_temperature_range(struct pp_hwmgr *hwmgr, return 0; } +static int vega12_enable_gfx_off(struct pp_hwmgr *hwmgr) +{ + struct vega12_hwmgr *data = + (struct vega12_hwmgr *)(hwmgr->backend); + int ret = 0; + + if (data->gfxoff_controlled_by_driver) + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_AllowGfxOff); + + return ret; +} + +static int vega12_disable_gfx_off(struct pp_hwmgr *hwmgr) +{ + struct vega12_hwmgr *data = + (struct vega12_hwmgr *)(hwmgr->backend); + int ret = 0; + + if (data->gfxoff_controlled_by_driver) + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisallowGfxOff); + + return ret; +} + +static int vega12_gfx_off_control(struct pp_hwmgr *hwmgr, bool enable) +{ + if (enable) + return vega12_enable_gfx_off(hwmgr); + else + return vega12_disable_gfx_off(hwmgr); +} + static const struct pp_hwmgr_func vega12_hwmgr_funcs = { .backend_init = vega12_hwmgr_backend_init, .backend_fini = vega12_hwmgr_backend_fini, @@ -2082,6 +2378,10 @@ static const struct pp_hwmgr_func vega12_hwmgr_funcs = { .display_clock_voltage_request = vega12_display_clock_voltage_request, .force_clock_level = vega12_force_clock_level, .print_clock_levels = vega12_print_clock_levels, + .apply_clocks_adjust_rules = + vega12_apply_clocks_adjust_rules, + .pre_display_config_changed = + vega12_pre_display_configuration_changed_task, .display_config_changed = vega12_display_configuration_changed_task, .powergate_uvd = vega12_power_gate_uvd, .powergate_vce = vega12_power_gate_vce, @@ -2100,6 +2400,7 @@ static const struct pp_hwmgr_func vega12_hwmgr_funcs = { .get_thermal_temperature_range = vega12_get_thermal_temperature_range, .register_irq_handlers = smu9_register_irq_handlers, .start_thermal_controller = vega12_start_thermal_controller, + .powergate_gfx = vega12_gfx_off_control, }; int vega12_hwmgr_init(struct pp_hwmgr *hwmgr) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h index bc98b1df3b65..b3e424d28994 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h @@ -33,7 +33,7 @@ #define WaterMarksExist 1 #define WaterMarksLoaded 2 -#define VG12_PSUEDO_NUM_GFXCLK_DPM_LEVELS 8 +#define VG12_PSUEDO_NUM_GFXCLK_DPM_LEVELS 16 #define VG12_PSUEDO_NUM_SOCCLK_DPM_LEVELS 8 #define VG12_PSUEDO_NUM_DCEFCLK_DPM_LEVELS 8 #define VG12_PSUEDO_NUM_UCLK_DPM_LEVELS 4 @@ -167,6 +167,9 @@ struct vega12_vbios_boot_state { uint32_t mem_clock; uint32_t soc_clock; uint32_t dcef_clock; + uint32_t eclock; + uint32_t dclock; + uint32_t vclock; }; #define DPMTABLE_OD_UPDATE_SCLK 0x00000001 @@ -301,6 +304,12 @@ struct vega12_odn_fan_table { bool force_fan_pwm; }; +struct vega12_clock_range { + uint32_t ACMax; + uint32_t ACMin; + uint32_t DCMax; +}; + struct vega12_hwmgr { struct vega12_dpm_table dpm_table; struct vega12_dpm_table golden_dpm_table; @@ -382,6 +391,11 @@ struct vega12_hwmgr { uint32_t smu_version; struct smu_features smu_features[GNLD_FEATURES_MAX]; struct vega12_smc_state_table smc_state_table; + + struct vega12_clock_range clk_range[PPCLK_COUNT]; + + /* ---- Gfxoff ---- */ + bool gfxoff_controlled_by_driver; }; #define VEGA12_DPM2_NEAR_TDP_DEC 10 @@ -432,6 +446,8 @@ struct vega12_hwmgr { #define VEGA12_UMD_PSTATE_GFXCLK_LEVEL 0x3 #define VEGA12_UMD_PSTATE_SOCCLK_LEVEL 0x3 #define VEGA12_UMD_PSTATE_MCLK_LEVEL 0x2 +#define VEGA12_UMD_PSTATE_UVDCLK_LEVEL 0x3 +#define VEGA12_UMD_PSTATE_VCEMCLK_LEVEL 0x3 int vega12_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c index b34113f45904..f4f366b26fd1 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c @@ -51,7 +51,7 @@ static const void *get_powerplay_table(struct pp_hwmgr *hwmgr) if (!table_address) { table_address = (ATOM_Vega12_POWERPLAYTABLE *) - cgs_atom_get_data_table(hwmgr->device, index, + smu_atom_get_data_table(hwmgr->adev, index, &size, &frev, &crev); hwmgr->soft_pp_table = table_address; /*Cache the result in RAM.*/ @@ -224,6 +224,7 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable ppsmc_pptable->AcgGfxclkSpreadPercent = smc_dpm_table.acggfxclkspreadpercent; ppsmc_pptable->AcgGfxclkSpreadFreq = smc_dpm_table.acggfxclkspreadfreq; + ppsmc_pptable->Vr2_I2C_address = smc_dpm_table.Vr2_I2C_address; return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c index df0fa815cd6e..904eb2c9155b 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c @@ -26,7 +26,7 @@ #include "vega12_smumgr.h" #include "vega12_ppsmc.h" #include "vega12_inc.h" -#include "pp_soc15.h" +#include "soc15_common.h" #include "pp_debug.h" static int vega12_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm) @@ -34,11 +34,9 @@ static int vega12_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm) PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentRpm), "Attempt to get current RPM from SMC Failed!", - return -1); - PP_ASSERT_WITH_CODE(!vega12_read_arg_from_smc(hwmgr, - current_rpm), - "Attempt to read current RPM from SMC Failed!", - return -1); + return -EINVAL); + *current_rpm = smum_get_argument(hwmgr); + return 0; } @@ -147,13 +145,10 @@ int vega12_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr) */ int vega12_thermal_get_temperature(struct pp_hwmgr *hwmgr) { + struct amdgpu_device *adev = hwmgr->adev; int temp = 0; - uint32_t reg; - - reg = soc15_get_register_offset(THM_HWID, 0, - mmCG_MULT_THERMAL_STATUS_BASE_IDX, mmCG_MULT_THERMAL_STATUS); - temp = cgs_read_register(hwmgr->device, reg); + temp = RREG32_SOC15(THM, 0, mmCG_MULT_THERMAL_STATUS); temp = (temp & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >> CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT; @@ -175,11 +170,12 @@ int vega12_thermal_get_temperature(struct pp_hwmgr *hwmgr) static int vega12_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *range) { + struct amdgpu_device *adev = hwmgr->adev; int low = VEGA12_THERMAL_MINIMUM_ALERT_TEMP * PP_TEMPERATURE_UNITS_PER_CENTIGRADES; int high = VEGA12_THERMAL_MAXIMUM_ALERT_TEMP * PP_TEMPERATURE_UNITS_PER_CENTIGRADES; - uint32_t val, reg; + uint32_t val; if (low < range->min) low = range->min; @@ -189,18 +185,15 @@ static int vega12_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, if (low > high) return -EINVAL; - reg = soc15_get_register_offset(THM_HWID, 0, - mmTHM_THERMAL_INT_CTRL_BASE_IDX, mmTHM_THERMAL_INT_CTRL); - - val = cgs_read_register(hwmgr->device, reg); + val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL); - val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5); - val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); - val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); - val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); - cgs_write_register(hwmgr->device, reg, val); + WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val); return 0; } @@ -212,15 +205,14 @@ static int vega12_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, */ static int vega12_thermal_enable_alert(struct pp_hwmgr *hwmgr) { + struct amdgpu_device *adev = hwmgr->adev; uint32_t val = 0; - uint32_t reg; val |= (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT); val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT); val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT); - reg = soc15_get_register_offset(THM_HWID, 0, mmTHM_THERMAL_INT_ENA_BASE_IDX, mmTHM_THERMAL_INT_ENA); - cgs_write_register(hwmgr->device, reg, val); + WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, val); return 0; } @@ -231,10 +223,9 @@ static int vega12_thermal_enable_alert(struct pp_hwmgr *hwmgr) */ int vega12_thermal_disable_alert(struct pp_hwmgr *hwmgr) { - uint32_t reg; + struct amdgpu_device *adev = hwmgr->adev; - reg = soc15_get_register_offset(THM_HWID, 0, mmTHM_THERMAL_INT_ENA_BASE_IDX, mmTHM_THERMAL_INT_ENA); - cgs_write_register(hwmgr->device, reg, 0); + WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, 0); return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h index 8b78bbecd1bc..429c9c4322da 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h @@ -377,11 +377,7 @@ struct phm_clocks { #define DPMTABLE_UPDATE_SCLK 0x00000004 #define DPMTABLE_UPDATE_MCLK 0x00000008 #define DPMTABLE_OD_UPDATE_VDDC 0x00000010 - -/* To determine if sclk and mclk are in overdrive state */ -#define SCLK_OVERDRIVE_ENABLED 0x00000001 -#define MCLK_OVERDRIVE_ENABLED 0x00000002 -#define VDDC_OVERDRIVE_ENABLED 0x00000010 +#define DPMTABLE_UPDATE_SOCCLK 0x00000020 struct phm_odn_performance_level { uint32_t clock; @@ -414,7 +410,10 @@ extern int phm_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, struct pp_power_state *adjusted_ps, const struct pp_power_state *current_ps); +extern int phm_apply_clock_adjust_rules(struct pp_hwmgr *hwmgr); + extern int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level); +extern int phm_pre_display_configuration_changed(struct pp_hwmgr *hwmgr); extern int phm_display_configuration_changed(struct pp_hwmgr *hwmgr); extern int phm_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr); extern int phm_register_irq_handlers(struct pp_hwmgr *hwmgr); @@ -456,7 +455,7 @@ extern int phm_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, struct pp_clock_levels_with_voltage *clocks); extern int phm_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr, - struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges); + void *clock_ranges); extern int phm_display_clock_voltage_request(struct pp_hwmgr *hwmgr, struct pp_display_clock_request *clock); diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index 17f811d181c8..d3d96260f440 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -26,7 +26,6 @@ #include <linux/seq_file.h> #include "amd_powerplay.h" #include "hardwaremanager.h" -#include "pp_power_source.h" #include "hwmgr_ppt.h" #include "ppatomctrl.h" #include "hwmgr_ppt.h" @@ -38,6 +37,8 @@ struct phm_fan_speed_info; struct pp_atomctrl_voltage_table; #define VOLTAGE_SCALE 4 +#define VOLTAGE_VID_OFFSET_SCALE1 625 +#define VOLTAGE_VID_OFFSET_SCALE2 100 enum DISPLAY_GAP { DISPLAY_GAP_VBLANK_OR_WM = 0, /* Wait for vblank or MCHG watermark. */ @@ -64,24 +65,6 @@ struct vi_dpm_table { #define PCIE_PERF_REQ_GEN2 3 #define PCIE_PERF_REQ_GEN3 4 -enum PP_FEATURE_MASK { - PP_SCLK_DPM_MASK = 0x1, - PP_MCLK_DPM_MASK = 0x2, - PP_PCIE_DPM_MASK = 0x4, - PP_SCLK_DEEP_SLEEP_MASK = 0x8, - PP_POWER_CONTAINMENT_MASK = 0x10, - PP_UVD_HANDSHAKE_MASK = 0x20, - PP_SMC_VOLTAGE_CONTROL_MASK = 0x40, - PP_VBI_TIME_SUPPORT_MASK = 0x80, - PP_ULV_MASK = 0x100, - PP_ENABLE_GFX_CG_THRU_SMU = 0x200, - PP_CLOCK_STRETCH_MASK = 0x400, - PP_OD_FUZZY_FAN_CONTROL_MASK = 0x800, - PP_SOCCLK_DPM_MASK = 0x1000, - PP_DCEFCLK_DPM_MASK = 0x2000, - PP_OVERDRIVE_MASK = 0x4000, -}; - enum PHM_BackEnd_Magic { PHM_Dummy_Magic = 0xAA5555AA, PHM_RV770_Magic = 0xDCBAABCD, @@ -211,7 +194,7 @@ struct pp_smumgr_func { int (*request_smu_load_fw)(struct pp_hwmgr *hwmgr); int (*request_smu_load_specific_fw)(struct pp_hwmgr *hwmgr, uint32_t firmware); - int (*get_argument)(struct pp_hwmgr *hwmgr); + uint32_t (*get_argument)(struct pp_hwmgr *hwmgr); int (*send_msg_to_smc)(struct pp_hwmgr *hwmgr, uint16_t msg); int (*send_msg_to_smc_with_parameter)(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter); @@ -245,6 +228,8 @@ struct pp_hwmgr_func { struct pp_power_state *prequest_ps, const struct pp_power_state *pcurrent_ps); + int (*apply_clocks_adjust_rules)(struct pp_hwmgr *hwmgr); + int (*force_dpm_level)(struct pp_hwmgr *hw_mgr, enum amd_dpm_forced_level level); @@ -268,6 +253,7 @@ struct pp_hwmgr_func { const void *state); int (*enable_clock_power_gating)(struct pp_hwmgr *hwmgr); int (*notify_smc_display_config_after_ps_adjustment)(struct pp_hwmgr *hwmgr); + int (*pre_display_config_changed)(struct pp_hwmgr *hwmgr); int (*display_config_changed)(struct pp_hwmgr *hwmgr); int (*disable_clock_power_gating)(struct pp_hwmgr *hwmgr); int (*update_clock_gatings)(struct pp_hwmgr *hwmgr, @@ -307,15 +293,15 @@ struct pp_hwmgr_func { int (*get_clock_by_type_with_voltage)(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, struct pp_clock_levels_with_voltage *clocks); - int (*set_watermarks_for_clocks_ranges)(struct pp_hwmgr *hwmgr, - struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges); + int (*set_watermarks_for_clocks_ranges)(struct pp_hwmgr *hwmgr, void *clock_ranges); int (*display_clock_voltage_request)(struct pp_hwmgr *hwmgr, struct pp_display_clock_request *clock); int (*get_max_high_clocks)(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks); + int (*gfx_off_control)(struct pp_hwmgr *hwmgr, bool enable); int (*power_off_asic)(struct pp_hwmgr *hwmgr); int (*force_clock_level)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask); int (*print_clock_levels)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, char *buf); - int (*enable_per_cu_power_gating)(struct pp_hwmgr *hwmgr, bool enable); + int (*powergate_gfx)(struct pp_hwmgr *hwmgr, bool enable); int (*get_sclk_od)(struct pp_hwmgr *hwmgr); int (*set_sclk_od)(struct pp_hwmgr *hwmgr, uint32_t value); int (*get_mclk_od)(struct pp_hwmgr *hwmgr); @@ -340,7 +326,8 @@ struct pp_hwmgr_func { enum PP_OD_DPM_TABLE_COMMAND type, long *input, uint32_t size); int (*set_power_limit)(struct pp_hwmgr *hwmgr, uint32_t n); - int (*set_mmhub_powergating_by_smu)(struct pp_hwmgr *hwmgr); + int (*powergate_mmhub)(struct pp_hwmgr *hwmgr); + int (*smus_notify_pwe)(struct pp_hwmgr *hwmgr); }; struct pp_table_func { @@ -718,6 +705,7 @@ struct pp_hwmgr { uint32_t chip_family; uint32_t chip_id; uint32_t smu_version; + bool not_vf; bool pm_en; struct mutex smu_lock; @@ -751,7 +739,6 @@ struct pp_hwmgr { const struct pp_table_func *pptable_func; struct pp_power_state *ps; - enum pp_power_source power_source; uint32_t num_ps; struct pp_thermal_controller_info thermal_controller; bool fan_ctrl_is_in_default_mode; @@ -764,7 +751,7 @@ struct pp_hwmgr { struct pp_power_state *request_ps; struct pp_power_state *boot_ps; struct pp_power_state *uvd_ps; - struct amd_pp_display_configuration display_config; + const struct amd_pp_display_configuration *display_config; uint32_t feature_mask; bool avfs_supported; /* UMD Pstate */ @@ -782,10 +769,13 @@ struct pp_hwmgr { }; int hwmgr_early_init(struct pp_hwmgr *hwmgr); +int hwmgr_sw_init(struct pp_hwmgr *hwmgr); +int hwmgr_sw_fini(struct pp_hwmgr *hwmgr); int hwmgr_hw_init(struct pp_hwmgr *hwmgr); int hwmgr_hw_fini(struct pp_hwmgr *hwmgr); -int hwmgr_hw_suspend(struct pp_hwmgr *hwmgr); -int hwmgr_hw_resume(struct pp_hwmgr *hwmgr); +int hwmgr_suspend(struct pp_hwmgr *hwmgr); +int hwmgr_resume(struct pp_hwmgr *hwmgr); + int hwmgr_handle_task(struct pp_hwmgr *hwmgr, enum amd_pp_task task_id, enum amd_pm_state_type *user_state); diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h b/drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h deleted file mode 100644 index 214f370c5efd..000000000000 --- a/drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#ifndef PP_SOC15_H -#define PP_SOC15_H - -#include "soc15_hw_ip.h" -#include "vega10_ip_offset.h" - -inline static uint32_t soc15_get_register_offset( - uint32_t hw_id, - uint32_t inst, - uint32_t segment, - uint32_t offset) -{ - uint32_t reg = 0; - - if (hw_id == THM_HWID) - reg = THM_BASE.instance[inst].segment[segment] + offset; - else if (hw_id == NBIF_HWID) - reg = NBIF_BASE.instance[inst].segment[segment] + offset; - else if (hw_id == MP1_HWID) - reg = MP1_BASE.instance[inst].segment[segment] + offset; - else if (hw_id == DF_HWID) - reg = DF_BASE.instance[inst].segment[segment] + offset; - else if (hw_id == GC_HWID) - reg = GC_BASE.instance[inst].segment[segment] + offset; - else if (hw_id == SMUIO_HWID) - reg = SMUIO_BASE.instance[inst].segment[segment] + offset; - return reg; -} - -#endif diff --git a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h index 426bff2aad2b..a2991fa2e6f8 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h +++ b/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h @@ -75,13 +75,15 @@ #define PPSMC_MSG_GetMinGfxclkFrequency 0x2C #define PPSMC_MSG_GetMaxGfxclkFrequency 0x2D #define PPSMC_MSG_SoftReset 0x2E +#define PPSMC_MSG_SetGfxCGPG 0x2F #define PPSMC_MSG_SetSoftMaxGfxClk 0x30 #define PPSMC_MSG_SetHardMinGfxClk 0x31 #define PPSMC_MSG_SetSoftMaxSocclkByFreq 0x32 #define PPSMC_MSG_SetSoftMaxFclkByFreq 0x33 #define PPSMC_MSG_SetSoftMaxVcn 0x34 #define PPSMC_MSG_PowerGateMmHub 0x35 -#define PPSMC_Message_Count 0x36 +#define PPSMC_MSG_SetRccPfcPmeRestoreRegister 0x36 +#define PPSMC_Message_Count 0x37 typedef uint16_t PPSMC_Result; diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu75.h b/drivers/gpu/drm/amd/powerplay/inc/smu75.h new file mode 100644 index 000000000000..771523001533 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/inc/smu75.h @@ -0,0 +1,760 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef SMU75_H +#define SMU75_H + +#pragma pack(push, 1) + +typedef struct { + uint32_t high; + uint32_t low; +} data_64_t; + +typedef struct { + data_64_t high; + data_64_t low; +} data_128_t; + +#define SMU__DGPU_ONLY + +#define SMU__NUM_SCLK_DPM_STATE 8 +#define SMU__NUM_MCLK_DPM_LEVELS 4 +#define SMU__NUM_LCLK_DPM_LEVELS 8 +#define SMU__NUM_PCIE_DPM_LEVELS 8 + +#define SMU7_CONTEXT_ID_SMC 1 +#define SMU7_CONTEXT_ID_VBIOS 2 + +#define SMU75_MAX_LEVELS_VDDC 16 +#define SMU75_MAX_LEVELS_VDDGFX 16 +#define SMU75_MAX_LEVELS_VDDCI 8 +#define SMU75_MAX_LEVELS_MVDD 4 + +#define SMU_MAX_SMIO_LEVELS 4 + +#define SMU75_MAX_LEVELS_GRAPHICS SMU__NUM_SCLK_DPM_STATE +#define SMU75_MAX_LEVELS_MEMORY SMU__NUM_MCLK_DPM_LEVELS +#define SMU75_MAX_LEVELS_GIO SMU__NUM_LCLK_DPM_LEVELS +#define SMU75_MAX_LEVELS_LINK SMU__NUM_PCIE_DPM_LEVELS +#define SMU75_MAX_LEVELS_UVD 8 +#define SMU75_MAX_LEVELS_VCE 8 +#define SMU75_MAX_LEVELS_ACP 8 +#define SMU75_MAX_LEVELS_SAMU 8 +#define SMU75_MAX_ENTRIES_SMIO 32 + +#define DPM_NO_LIMIT 0 +#define DPM_NO_UP 1 +#define DPM_GO_DOWN 2 +#define DPM_GO_UP 3 + +#define SMU7_FIRST_DPM_GRAPHICS_LEVEL 0 +#define SMU7_FIRST_DPM_MEMORY_LEVEL 0 + +#define GPIO_CLAMP_MODE_VRHOT 1 +#define GPIO_CLAMP_MODE_THERM 2 +#define GPIO_CLAMP_MODE_DC 4 + +#define SCRATCH_B_TARG_PCIE_INDEX_SHIFT 0 +#define SCRATCH_B_TARG_PCIE_INDEX_MASK (0x7<<SCRATCH_B_TARG_PCIE_INDEX_SHIFT) +#define SCRATCH_B_CURR_PCIE_INDEX_SHIFT 3 +#define SCRATCH_B_CURR_PCIE_INDEX_MASK (0x7<<SCRATCH_B_CURR_PCIE_INDEX_SHIFT) +#define SCRATCH_B_TARG_UVD_INDEX_SHIFT 6 +#define SCRATCH_B_TARG_UVD_INDEX_MASK (0x7<<SCRATCH_B_TARG_UVD_INDEX_SHIFT) +#define SCRATCH_B_CURR_UVD_INDEX_SHIFT 9 +#define SCRATCH_B_CURR_UVD_INDEX_MASK (0x7<<SCRATCH_B_CURR_UVD_INDEX_SHIFT) +#define SCRATCH_B_TARG_VCE_INDEX_SHIFT 12 +#define SCRATCH_B_TARG_VCE_INDEX_MASK (0x7<<SCRATCH_B_TARG_VCE_INDEX_SHIFT) +#define SCRATCH_B_CURR_VCE_INDEX_SHIFT 15 +#define SCRATCH_B_CURR_VCE_INDEX_MASK (0x7<<SCRATCH_B_CURR_VCE_INDEX_SHIFT) +#define SCRATCH_B_TARG_ACP_INDEX_SHIFT 18 +#define SCRATCH_B_TARG_ACP_INDEX_MASK (0x7<<SCRATCH_B_TARG_ACP_INDEX_SHIFT) +#define SCRATCH_B_CURR_ACP_INDEX_SHIFT 21 +#define SCRATCH_B_CURR_ACP_INDEX_MASK (0x7<<SCRATCH_B_CURR_ACP_INDEX_SHIFT) +#define SCRATCH_B_TARG_SAMU_INDEX_SHIFT 24 +#define SCRATCH_B_TARG_SAMU_INDEX_MASK (0x7<<SCRATCH_B_TARG_SAMU_INDEX_SHIFT) +#define SCRATCH_B_CURR_SAMU_INDEX_SHIFT 27 +#define SCRATCH_B_CURR_SAMU_INDEX_MASK (0x7<<SCRATCH_B_CURR_SAMU_INDEX_SHIFT) + +/* Virtualization Defines */ +#define CG_XDMA_MASK 0x1 +#define CG_XDMA_SHIFT 0 +#define CG_UVD_MASK 0x2 +#define CG_UVD_SHIFT 1 +#define CG_VCE_MASK 0x4 +#define CG_VCE_SHIFT 2 +#define CG_SAMU_MASK 0x8 +#define CG_SAMU_SHIFT 3 +#define CG_GFX_MASK 0x10 +#define CG_GFX_SHIFT 4 +#define CG_SDMA_MASK 0x20 +#define CG_SDMA_SHIFT 5 +#define CG_HDP_MASK 0x40 +#define CG_HDP_SHIFT 6 +#define CG_MC_MASK 0x80 +#define CG_MC_SHIFT 7 +#define CG_DRM_MASK 0x100 +#define CG_DRM_SHIFT 8 +#define CG_ROM_MASK 0x200 +#define CG_ROM_SHIFT 9 +#define CG_BIF_MASK 0x400 +#define CG_BIF_SHIFT 10 + +#if defined SMU__DGPU_ONLY +#define SMU75_DTE_ITERATIONS 5 +#define SMU75_DTE_SOURCES 3 +#define SMU75_DTE_SINKS 1 +#define SMU75_NUM_CPU_TES 0 +#define SMU75_NUM_GPU_TES 1 +#define SMU75_NUM_NON_TES 2 +#define SMU75_DTE_FAN_SCALAR_MIN 0x100 +#define SMU75_DTE_FAN_SCALAR_MAX 0x166 +#define SMU75_DTE_FAN_TEMP_MAX 93 +#define SMU75_DTE_FAN_TEMP_MIN 83 +#endif +#define SMU75_THERMAL_INPUT_LOOP_COUNT 2 +#define SMU75_THERMAL_CLAMP_MODE_COUNT 2 + +#define EXP_M1_1 93 +#define EXP_M2_1 195759 +#define EXP_B_1 111176531 + +#define EXP_M1_2 67 +#define EXP_M2_2 153720 +#define EXP_B_2 94415767 + +#define EXP_M1_3 48 +#define EXP_M2_3 119796 +#define EXP_B_3 79195279 + +#define EXP_M1_4 550 +#define EXP_M2_4 1484190 +#define EXP_B_4 1051432828 + +#define EXP_M1_5 394 +#define EXP_M2_5 1143049 +#define EXP_B_5 864288432 + +struct SMU7_HystController_Data { + uint16_t waterfall_up; + uint16_t waterfall_down; + uint16_t waterfall_limit; + uint16_t release_cnt; + uint16_t release_limit; + uint16_t spare; +}; + +typedef struct SMU7_HystController_Data SMU7_HystController_Data; + +struct SMU75_PIDController { + uint32_t Ki; + int32_t LFWindupUpperLim; + int32_t LFWindupLowerLim; + uint32_t StatePrecision; + uint32_t LfPrecision; + uint32_t LfOffset; + uint32_t MaxState; + uint32_t MaxLfFraction; + uint32_t StateShift; +}; + +typedef struct SMU75_PIDController SMU75_PIDController; + +struct SMU7_LocalDpmScoreboard { + uint32_t PercentageBusy; + + int32_t PIDError; + int32_t PIDIntegral; + int32_t PIDOutput; + + uint32_t SigmaDeltaAccum; + uint32_t SigmaDeltaOutput; + uint32_t SigmaDeltaLevel; + + uint32_t UtilizationSetpoint; + + uint8_t TdpClampMode; + uint8_t TdcClampMode; + uint8_t ThermClampMode; + uint8_t VoltageBusy; + + int8_t CurrLevel; + int8_t TargLevel; + uint8_t LevelChangeInProgress; + uint8_t UpHyst; + + uint8_t DownHyst; + uint8_t VoltageDownHyst; + uint8_t DpmEnable; + uint8_t DpmRunning; + + uint8_t DpmForce; + uint8_t DpmForceLevel; + uint8_t DisplayWatermark; + uint8_t McArbIndex; + + uint32_t MinimumPerfSclk; + + uint8_t AcpiReq; + uint8_t AcpiAck; + uint8_t GfxClkSlow; + uint8_t GpioClampMode; + + uint8_t EnableModeSwitchRLCNotification; + uint8_t EnabledLevelsChange; + uint8_t DteClampMode; + uint8_t FpsClampMode; + + uint16_t LevelResidencyCounters [SMU75_MAX_LEVELS_GRAPHICS]; + uint16_t LevelSwitchCounters [SMU75_MAX_LEVELS_GRAPHICS]; + + void (*TargetStateCalculator)(uint8_t); + void (*SavedTargetStateCalculator)(uint8_t); + + uint16_t AutoDpmInterval; + uint16_t AutoDpmRange; + + uint8_t FpsEnabled; + uint8_t MaxPerfLevel; + uint8_t AllowLowClkInterruptToHost; + uint8_t FpsRunning; + + uint32_t MaxAllowedFrequency; + + uint32_t FilteredSclkFrequency; + uint32_t LastSclkFrequency; + uint32_t FilteredSclkFrequencyCnt; + + uint8_t MinPerfLevel; +#ifdef SMU__FIRMWARE_SCKS_PRESENT__1 + uint8_t ScksClampMode; + uint8_t padding[2]; +#else + uint8_t padding[3]; +#endif + + uint16_t FpsAlpha; + uint16_t DeltaTime; + uint32_t CurrentFps; + uint32_t FilteredFps; + uint32_t FrameCount; + uint32_t FrameCountLast; + uint16_t FpsTargetScalar; + uint16_t FpsWaterfallLimitScalar; + uint16_t FpsAlphaScalar; + uint16_t spare8; + SMU7_HystController_Data HystControllerData; +}; + +typedef struct SMU7_LocalDpmScoreboard SMU7_LocalDpmScoreboard; + +#define SMU7_MAX_VOLTAGE_CLIENTS 12 + +typedef uint8_t (*VoltageChangeHandler_t)(uint16_t, uint8_t); + +#define VDDC_MASK 0x00007FFF +#define VDDC_SHIFT 0 +#define VDDCI_MASK 0x3FFF8000 +#define VDDCI_SHIFT 15 +#define PHASES_MASK 0xC0000000 +#define PHASES_SHIFT 30 + +typedef uint32_t SMU_VoltageLevel; + +struct SMU7_VoltageScoreboard { + SMU_VoltageLevel TargetVoltage; + uint16_t MaxVid; + uint8_t HighestVidOffset; + uint8_t CurrentVidOffset; + + uint16_t CurrentVddc; + uint16_t CurrentVddci; + + uint8_t ControllerBusy; + uint8_t CurrentVid; + uint8_t CurrentVddciVid; + uint8_t padding; + + SMU_VoltageLevel RequestedVoltage[SMU7_MAX_VOLTAGE_CLIENTS]; + SMU_VoltageLevel TargetVoltageState; + uint8_t EnabledRequest[SMU7_MAX_VOLTAGE_CLIENTS]; + + uint8_t padding2; + uint8_t padding3; + uint8_t ControllerEnable; + uint8_t ControllerRunning; + uint16_t CurrentStdVoltageHiSidd; + uint16_t CurrentStdVoltageLoSidd; + uint8_t OverrideVoltage; + uint8_t padding4; + uint8_t padding5; + uint8_t CurrentPhases; + + VoltageChangeHandler_t ChangeVddc; + VoltageChangeHandler_t ChangeVddci; + VoltageChangeHandler_t ChangePhase; + VoltageChangeHandler_t ChangeMvdd; + + VoltageChangeHandler_t functionLinks[6]; + + uint16_t * VddcFollower1; + int16_t Driver_OD_RequestedVidOffset1; + int16_t Driver_OD_RequestedVidOffset2; +}; + +typedef struct SMU7_VoltageScoreboard SMU7_VoltageScoreboard; + +#define SMU7_MAX_PCIE_LINK_SPEEDS 3 + +struct SMU7_PCIeLinkSpeedScoreboard { + uint8_t DpmEnable; + uint8_t DpmRunning; + uint8_t DpmForce; + uint8_t DpmForceLevel; + + uint8_t CurrentLinkSpeed; + uint8_t EnabledLevelsChange; + uint16_t AutoDpmInterval; + + uint16_t AutoDpmRange; + uint16_t AutoDpmCount; + + uint8_t DpmMode; + uint8_t AcpiReq; + uint8_t AcpiAck; + uint8_t CurrentLinkLevel; +}; + +typedef struct SMU7_PCIeLinkSpeedScoreboard SMU7_PCIeLinkSpeedScoreboard; + +#define SMU7_LKGE_LUT_NUM_OF_TEMP_ENTRIES 16 +#define SMU7_LKGE_LUT_NUM_OF_VOLT_ENTRIES 16 + +#define SMU7_SCALE_I 7 +#define SMU7_SCALE_R 12 + +struct SMU7_PowerScoreboard { + uint32_t GpuPower; + + uint32_t VddcPower; + uint32_t VddcVoltage; + uint32_t VddcCurrent; + + uint32_t VddciPower; + uint32_t VddciVoltage; + uint32_t VddciCurrent; + + uint32_t RocPower; + + uint16_t Telemetry_1_slope; + uint16_t Telemetry_2_slope; + int32_t Telemetry_1_offset; + int32_t Telemetry_2_offset; + + uint8_t MCLK_patch_flag; + uint8_t reserved[3]; +}; + +typedef struct SMU7_PowerScoreboard SMU7_PowerScoreboard; + +#define SMU7_SCLK_DPM_CONFIG_MASK 0x01 +#define SMU7_VOLTAGE_CONTROLLER_CONFIG_MASK 0x02 +#define SMU7_THERMAL_CONTROLLER_CONFIG_MASK 0x04 +#define SMU7_MCLK_DPM_CONFIG_MASK 0x08 +#define SMU7_UVD_DPM_CONFIG_MASK 0x10 +#define SMU7_VCE_DPM_CONFIG_MASK 0x20 +#define SMU7_ACP_DPM_CONFIG_MASK 0x40 +#define SMU7_SAMU_DPM_CONFIG_MASK 0x80 +#define SMU7_PCIEGEN_DPM_CONFIG_MASK 0x100 + +#define SMU7_ACP_MCLK_HANDSHAKE_DISABLE 0x00000001 +#define SMU7_ACP_SCLK_HANDSHAKE_DISABLE 0x00000002 +#define SMU7_UVD_MCLK_HANDSHAKE_DISABLE 0x00000100 +#define SMU7_UVD_SCLK_HANDSHAKE_DISABLE 0x00000200 +#define SMU7_VCE_MCLK_HANDSHAKE_DISABLE 0x00010000 +#define SMU7_VCE_SCLK_HANDSHAKE_DISABLE 0x00020000 + +struct SMU75_SoftRegisters { + uint32_t RefClockFrequency; + uint32_t PmTimerPeriod; + uint32_t FeatureEnables; +#if defined (SMU__DGPU_ONLY) + uint32_t PreVBlankGap; + uint32_t VBlankTimeout; + uint32_t TrainTimeGap; + uint32_t MvddSwitchTime; + uint32_t LongestAcpiTrainTime; + uint32_t AcpiDelay; + uint32_t G5TrainTime; + uint32_t DelayMpllPwron; + uint32_t VoltageChangeTimeout; +#endif + uint32_t HandshakeDisables; + + uint8_t DisplayPhy1Config; + uint8_t DisplayPhy2Config; + uint8_t DisplayPhy3Config; + uint8_t DisplayPhy4Config; + + uint8_t DisplayPhy5Config; + uint8_t DisplayPhy6Config; + uint8_t DisplayPhy7Config; + uint8_t DisplayPhy8Config; + + uint32_t AverageGraphicsActivity; + uint32_t AverageMemoryActivity; + uint32_t AverageGioActivity; + + uint8_t SClkDpmEnabledLevels; + uint8_t MClkDpmEnabledLevels; + uint8_t LClkDpmEnabledLevels; + uint8_t PCIeDpmEnabledLevels; + + uint8_t UVDDpmEnabledLevels; + uint8_t SAMUDpmEnabledLevels; + uint8_t ACPDpmEnabledLevels; + uint8_t VCEDpmEnabledLevels; + + uint32_t DRAM_LOG_ADDR_H; + uint32_t DRAM_LOG_ADDR_L; + uint32_t DRAM_LOG_PHY_ADDR_H; + uint32_t DRAM_LOG_PHY_ADDR_L; + uint32_t DRAM_LOG_BUFF_SIZE; + uint32_t UlvEnterCount; + uint32_t UlvTime; + uint32_t UcodeLoadStatus; + uint32_t AllowMvddSwitch; + uint8_t Activity_Weight; + uint8_t Reserved8[3]; +}; + +typedef struct SMU75_SoftRegisters SMU75_SoftRegisters; + +struct SMU75_Firmware_Header { + uint32_t Digest[5]; + uint32_t Version; + uint32_t HeaderSize; + uint32_t Flags; + uint32_t EntryPoint; + uint32_t CodeSize; + uint32_t ImageSize; + + uint32_t Rtos; + uint32_t SoftRegisters; + uint32_t DpmTable; + uint32_t FanTable; + uint32_t CacConfigTable; + uint32_t CacStatusTable; + uint32_t mcRegisterTable; + uint32_t mcArbDramTimingTable; + uint32_t PmFuseTable; + uint32_t Globals; + uint32_t ClockStretcherTable; + uint32_t VftTable; + uint32_t Reserved1; + uint32_t AvfsCksOff_AvfsGbvTable; + uint32_t AvfsCksOff_BtcGbvTable; + uint32_t MM_AvfsTable; + uint32_t PowerSharingTable; + uint32_t AvfsTable; + uint32_t AvfsCksOffGbvTable; + uint32_t AvfsMeanNSigma; + uint32_t AvfsSclkOffsetTable; + uint32_t Reserved[12]; + uint32_t Signature; +}; + +typedef struct SMU75_Firmware_Header SMU75_Firmware_Header; + +#define SMU7_FIRMWARE_HEADER_LOCATION 0x20000 + +enum DisplayConfig { + PowerDown = 1, + DP54x4, + DP54x2, + DP54x1, + DP27x4, + DP27x2, + DP27x1, + HDMI297, + HDMI162, + LVDS, + DP324x4, + DP324x2, + DP324x1 +}; + +#define MC_BLOCK_COUNT 1 +#define CPL_BLOCK_COUNT 5 +#define SE_BLOCK_COUNT 15 +#define GC_BLOCK_COUNT 24 + +struct SMU7_Local_Cac { + uint8_t BlockId; + uint8_t SignalId; + uint8_t Threshold; + uint8_t Padding; +}; + +typedef struct SMU7_Local_Cac SMU7_Local_Cac; + +struct SMU7_Local_Cac_Table { + SMU7_Local_Cac CplLocalCac[CPL_BLOCK_COUNT]; + SMU7_Local_Cac McLocalCac[MC_BLOCK_COUNT]; + SMU7_Local_Cac SeLocalCac[SE_BLOCK_COUNT]; + SMU7_Local_Cac GcLocalCac[GC_BLOCK_COUNT]; +}; + +typedef struct SMU7_Local_Cac_Table SMU7_Local_Cac_Table; + +#pragma pack(pop) + +#define CG_SYS_BITMASK_FIRST_BIT 0 +#define CG_SYS_BITMASK_LAST_BIT 10 +#define CG_SYS_BIF_MGLS_SHIFT 0 +#define CG_SYS_ROM_SHIFT 1 +#define CG_SYS_MC_MGCG_SHIFT 2 +#define CG_SYS_MC_MGLS_SHIFT 3 +#define CG_SYS_SDMA_MGCG_SHIFT 4 +#define CG_SYS_SDMA_MGLS_SHIFT 5 +#define CG_SYS_DRM_MGCG_SHIFT 6 +#define CG_SYS_HDP_MGCG_SHIFT 7 +#define CG_SYS_HDP_MGLS_SHIFT 8 +#define CG_SYS_DRM_MGLS_SHIFT 9 +#define CG_SYS_BIF_MGCG_SHIFT 10 + +#define CG_SYS_BIF_MGLS_MASK 0x1 +#define CG_SYS_ROM_MASK 0x2 +#define CG_SYS_MC_MGCG_MASK 0x4 +#define CG_SYS_MC_MGLS_MASK 0x8 +#define CG_SYS_SDMA_MGCG_MASK 0x10 +#define CG_SYS_SDMA_MGLS_MASK 0x20 +#define CG_SYS_DRM_MGCG_MASK 0x40 +#define CG_SYS_HDP_MGCG_MASK 0x80 +#define CG_SYS_HDP_MGLS_MASK 0x100 +#define CG_SYS_DRM_MGLS_MASK 0x200 +#define CG_SYS_BIF_MGCG_MASK 0x400 + +#define CG_GFX_BITMASK_FIRST_BIT 16 +#define CG_GFX_BITMASK_LAST_BIT 24 + +#define CG_GFX_CGCG_SHIFT 16 +#define CG_GFX_CGLS_SHIFT 17 +#define CG_CPF_MGCG_SHIFT 18 +#define CG_RLC_MGCG_SHIFT 19 +#define CG_GFX_OTHERS_MGCG_SHIFT 20 +#define CG_GFX_3DCG_SHIFT 21 +#define CG_GFX_3DLS_SHIFT 22 +#define CG_GFX_RLC_LS_SHIFT 23 +#define CG_GFX_CP_LS_SHIFT 24 + +#define CG_GFX_CGCG_MASK 0x00010000 +#define CG_GFX_CGLS_MASK 0x00020000 +#define CG_CPF_MGCG_MASK 0x00040000 +#define CG_RLC_MGCG_MASK 0x00080000 +#define CG_GFX_OTHERS_MGCG_MASK 0x00100000 +#define CG_GFX_3DCG_MASK 0x00200000 +#define CG_GFX_3DLS_MASK 0x00400000 +#define CG_GFX_RLC_LS_MASK 0x00800000 +#define CG_GFX_CP_LS_MASK 0x01000000 + + +#define VRCONF_VDDC_MASK 0x000000FF +#define VRCONF_VDDC_SHIFT 0 +#define VRCONF_VDDGFX_MASK 0x0000FF00 +#define VRCONF_VDDGFX_SHIFT 8 +#define VRCONF_VDDCI_MASK 0x00FF0000 +#define VRCONF_VDDCI_SHIFT 16 +#define VRCONF_MVDD_MASK 0xFF000000 +#define VRCONF_MVDD_SHIFT 24 + +#define VR_MERGED_WITH_VDDC 0 +#define VR_SVI2_PLANE_1 1 +#define VR_SVI2_PLANE_2 2 +#define VR_SMIO_PATTERN_1 3 +#define VR_SMIO_PATTERN_2 4 +#define VR_STATIC_VOLTAGE 5 + +#define CLOCK_STRETCHER_MAX_ENTRIES 0x4 +#define CKS_LOOKUPTable_MAX_ENTRIES 0x4 + +#define CLOCK_STRETCHER_SETTING_DDT_MASK 0x01 +#define CLOCK_STRETCHER_SETTING_DDT_SHIFT 0x0 +#define CLOCK_STRETCHER_SETTING_STRETCH_AMOUNT_MASK 0x1E +#define CLOCK_STRETCHER_SETTING_STRETCH_AMOUNT_SHIFT 0x1 +#define CLOCK_STRETCHER_SETTING_ENABLE_MASK 0x80 +#define CLOCK_STRETCHER_SETTING_ENABLE_SHIFT 0x7 + +struct SMU_ClockStretcherDataTableEntry { + uint8_t minVID; + uint8_t maxVID; + + uint16_t setting; +}; +typedef struct SMU_ClockStretcherDataTableEntry SMU_ClockStretcherDataTableEntry; + +struct SMU_ClockStretcherDataTable { + SMU_ClockStretcherDataTableEntry ClockStretcherDataTableEntry[CLOCK_STRETCHER_MAX_ENTRIES]; +}; +typedef struct SMU_ClockStretcherDataTable SMU_ClockStretcherDataTable; + +struct SMU_CKS_LOOKUPTableEntry { + uint16_t minFreq; + uint16_t maxFreq; + + uint8_t setting; + uint8_t padding[3]; +}; +typedef struct SMU_CKS_LOOKUPTableEntry SMU_CKS_LOOKUPTableEntry; + +struct SMU_CKS_LOOKUPTable { + SMU_CKS_LOOKUPTableEntry CKS_LOOKUPTableEntry[CKS_LOOKUPTable_MAX_ENTRIES]; +}; +typedef struct SMU_CKS_LOOKUPTable SMU_CKS_LOOKUPTable; + +struct AgmAvfsData_t { + uint16_t avgPsmCount[28]; + uint16_t minPsmCount[28]; +}; +typedef struct AgmAvfsData_t AgmAvfsData_t; + +enum VFT_COLUMNS { + SCLK0, + SCLK1, + SCLK2, + SCLK3, + SCLK4, + SCLK5, + SCLK6, + SCLK7, + + NUM_VFT_COLUMNS +}; +enum { + SCS_FUSE_T0, + SCS_FUSE_T1, + NUM_SCS_FUSE_TEMPERATURE +}; +enum { + SCKS_ON, + SCKS_OFF, + NUM_SCKS_STATE_TYPES +}; + +#define VFT_TABLE_DEFINED + +#define TEMP_RANGE_MAXSTEPS 12 +struct VFT_CELL_t { + uint16_t Voltage; +}; + +typedef struct VFT_CELL_t VFT_CELL_t; +#ifdef SMU__FIRMWARE_SCKS_PRESENT__1 +struct SCS_CELL_t { + uint16_t PsmCnt[NUM_SCKS_STATE_TYPES]; +}; +typedef struct SCS_CELL_t SCS_CELL_t; +#endif + +struct VFT_TABLE_t { + VFT_CELL_t Cell[TEMP_RANGE_MAXSTEPS][NUM_VFT_COLUMNS]; + uint16_t AvfsGbv [NUM_VFT_COLUMNS]; + uint16_t BtcGbv [NUM_VFT_COLUMNS]; + int16_t Temperature [TEMP_RANGE_MAXSTEPS]; + +#ifdef SMU__FIRMWARE_SCKS_PRESENT__1 + SCS_CELL_t ScksCell[TEMP_RANGE_MAXSTEPS][NUM_VFT_COLUMNS]; +#endif + + uint8_t NumTemperatureSteps; + uint8_t padding[3]; +}; +typedef struct VFT_TABLE_t VFT_TABLE_t; + +#define BTCGB_VDROOP_TABLE_MAX_ENTRIES 2 +#define AVFSGB_VDROOP_TABLE_MAX_ENTRIES 2 + +struct GB_VDROOP_TABLE_t { + int32_t a0; + int32_t a1; + int32_t a2; + uint32_t spare; +}; +typedef struct GB_VDROOP_TABLE_t GB_VDROOP_TABLE_t; + +struct SMU_QuadraticCoeffs { + int32_t m1; + int32_t b; + + int16_t m2; + uint8_t m1_shift; + uint8_t m2_shift; +}; +typedef struct SMU_QuadraticCoeffs SMU_QuadraticCoeffs; + +struct AVFS_Margin_t { + VFT_CELL_t Cell[NUM_VFT_COLUMNS]; +}; +typedef struct AVFS_Margin_t AVFS_Margin_t; + +struct AVFS_CksOff_Gbv_t { + VFT_CELL_t Cell[NUM_VFT_COLUMNS]; +}; +typedef struct AVFS_CksOff_Gbv_t AVFS_CksOff_Gbv_t; + +struct AVFS_CksOff_AvfsGbv_t { + VFT_CELL_t Cell[NUM_VFT_COLUMNS]; +}; +typedef struct AVFS_CksOff_AvfsGbv_t AVFS_CksOff_AvfsGbv_t; + +struct AVFS_CksOff_BtcGbv_t { + VFT_CELL_t Cell[NUM_VFT_COLUMNS]; +}; +typedef struct AVFS_CksOff_BtcGbv_t AVFS_CksOff_BtcGbv_t; + +struct AVFS_meanNsigma_t { + uint32_t Aconstant[3]; + uint16_t DC_tol_sigma; + uint16_t Platform_mean; + uint16_t Platform_sigma; + uint16_t PSM_Age_CompFactor; + uint8_t Static_Voltage_Offset[NUM_VFT_COLUMNS]; +}; +typedef struct AVFS_meanNsigma_t AVFS_meanNsigma_t; + +struct AVFS_Sclk_Offset_t { + uint16_t Sclk_Offset[8]; +}; +typedef struct AVFS_Sclk_Offset_t AVFS_Sclk_Offset_t; + +struct Power_Sharing_t { + uint32_t EnergyCounter; + uint32_t EngeryThreshold; + uint64_t AM_SCLK_CNT; + uint64_t AM_0_BUSY_CNT; +}; +typedef struct Power_Sharing_t Power_Sharing_t; + + +#endif + + diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu75_discrete.h b/drivers/gpu/drm/amd/powerplay/inc/smu75_discrete.h new file mode 100644 index 000000000000..b64e58a22ddf --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/inc/smu75_discrete.h @@ -0,0 +1,886 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef SMU75_DISCRETE_H +#define SMU75_DISCRETE_H + +#include "smu75.h" + +#pragma pack(push, 1) + +#define NUM_SCLK_RANGE 8 + +#define VCO_3_6 1 +#define VCO_2_4 3 + +#define POSTDIV_DIV_BY_1 0 +#define POSTDIV_DIV_BY_2 1 +#define POSTDIV_DIV_BY_4 2 +#define POSTDIV_DIV_BY_8 3 +#define POSTDIV_DIV_BY_16 4 + +struct sclkFcwRange_t { + uint8_t vco_setting; /* 1: 3-6GHz, 3: 2-4GHz */ + uint8_t postdiv; /* divide by 2^n */ + uint16_t fcw_pcc; + uint16_t fcw_trans_upper; + uint16_t fcw_trans_lower; +}; +typedef struct sclkFcwRange_t sclkFcwRange_t; + +struct SMIO_Pattern { + uint16_t Voltage; + uint8_t Smio; + uint8_t padding; +}; + +typedef struct SMIO_Pattern SMIO_Pattern; + +struct SMIO_Table { + SMIO_Pattern Pattern[SMU_MAX_SMIO_LEVELS]; +}; + +typedef struct SMIO_Table SMIO_Table; + +struct SMU_SclkSetting { + uint32_t SclkFrequency; + uint16_t Fcw_int; + uint16_t Fcw_frac; + uint16_t Pcc_fcw_int; + uint8_t PllRange; + uint8_t SSc_En; + uint16_t Sclk_slew_rate; + uint16_t Pcc_up_slew_rate; + uint16_t Pcc_down_slew_rate; + uint16_t Fcw1_int; + uint16_t Fcw1_frac; + uint16_t Sclk_ss_slew_rate; +}; +typedef struct SMU_SclkSetting SMU_SclkSetting; + +struct SMU75_Discrete_GraphicsLevel { + SMU_VoltageLevel MinVoltage; + + uint8_t pcieDpmLevel; + uint8_t DeepSleepDivId; + uint16_t ActivityLevel; + + uint32_t CgSpllFuncCntl3; + uint32_t CgSpllFuncCntl4; + uint32_t CcPwrDynRm; + uint32_t CcPwrDynRm1; + + uint8_t SclkDid; + uint8_t padding; + uint8_t EnabledForActivity; + uint8_t EnabledForThrottle; + uint8_t UpHyst; + uint8_t DownHyst; + uint8_t VoltageDownHyst; + uint8_t PowerThrottle; + + SMU_SclkSetting SclkSetting; + + uint8_t ScksStretchThreshVid[NUM_SCKS_STATE_TYPES]; + uint16_t Padding; +}; + +typedef struct SMU75_Discrete_GraphicsLevel SMU75_Discrete_GraphicsLevel; + +struct SMU75_Discrete_ACPILevel { + uint32_t Flags; + SMU_VoltageLevel MinVoltage; + uint32_t SclkFrequency; + uint8_t SclkDid; + uint8_t DisplayWatermark; + uint8_t DeepSleepDivId; + uint8_t padding; + uint32_t CcPwrDynRm; + uint32_t CcPwrDynRm1; + + SMU_SclkSetting SclkSetting; +}; + +typedef struct SMU75_Discrete_ACPILevel SMU75_Discrete_ACPILevel; + +struct SMU75_Discrete_Ulv { + uint32_t CcPwrDynRm; + uint32_t CcPwrDynRm1; + uint16_t VddcOffset; + uint8_t VddcOffsetVid; + uint8_t VddcPhase; + uint16_t BifSclkDfs; + uint16_t Reserved; +}; + +typedef struct SMU75_Discrete_Ulv SMU75_Discrete_Ulv; + +struct SMU75_Discrete_MemoryLevel { + SMU_VoltageLevel MinVoltage; + uint32_t MinMvdd; + + uint32_t MclkFrequency; + + uint8_t StutterEnable; + uint8_t EnabledForThrottle; + uint8_t EnabledForActivity; + uint8_t padding_0; + + uint8_t UpHyst; + uint8_t DownHyst; + uint8_t VoltageDownHyst; + uint8_t padding_1; + + uint16_t ActivityLevel; + uint8_t DisplayWatermark; + uint8_t padding_2; + + uint16_t Fcw_int; + uint16_t Fcw_frac; + uint8_t Postdiv; + uint8_t padding_3[3]; +}; + +typedef struct SMU75_Discrete_MemoryLevel SMU75_Discrete_MemoryLevel; + +struct SMU75_Discrete_LinkLevel { + uint8_t PcieGenSpeed; + uint8_t PcieLaneCount; + uint8_t EnabledForActivity; + uint8_t SPC; + uint32_t DownThreshold; + uint32_t UpThreshold; + uint16_t BifSclkDfs; + uint16_t Reserved; +}; + +typedef struct SMU75_Discrete_LinkLevel SMU75_Discrete_LinkLevel; + + +/* MC ARB DRAM Timing registers. */ +struct SMU75_Discrete_MCArbDramTimingTableEntry { + uint32_t McArbDramTiming; + uint32_t McArbDramTiming2; + uint32_t McArbBurstTime; + uint32_t McArbRfshRate; + uint32_t McArbMisc3; +}; + +typedef struct SMU75_Discrete_MCArbDramTimingTableEntry SMU75_Discrete_MCArbDramTimingTableEntry; + +struct SMU75_Discrete_MCArbDramTimingTable { + SMU75_Discrete_MCArbDramTimingTableEntry entries[SMU__NUM_SCLK_DPM_STATE][SMU__NUM_MCLK_DPM_LEVELS]; +}; + +typedef struct SMU75_Discrete_MCArbDramTimingTable SMU75_Discrete_MCArbDramTimingTable; + +/* UVD VCLK/DCLK state (level) definition. */ +struct SMU75_Discrete_UvdLevel { + uint32_t VclkFrequency; + uint32_t DclkFrequency; + SMU_VoltageLevel MinVoltage; + uint8_t VclkDivider; + uint8_t DclkDivider; + uint8_t padding[2]; +}; + +typedef struct SMU75_Discrete_UvdLevel SMU75_Discrete_UvdLevel; + +/* Clocks for other external blocks (VCE, ACP, SAMU). */ +struct SMU75_Discrete_ExtClkLevel { + uint32_t Frequency; + SMU_VoltageLevel MinVoltage; + uint8_t Divider; + uint8_t padding[3]; +}; + +typedef struct SMU75_Discrete_ExtClkLevel SMU75_Discrete_ExtClkLevel; + +struct SMU75_Discrete_StateInfo { + uint32_t SclkFrequency; + uint32_t MclkFrequency; + uint32_t VclkFrequency; + uint32_t DclkFrequency; + uint32_t SamclkFrequency; + uint32_t AclkFrequency; + uint32_t EclkFrequency; + uint16_t MvddVoltage; + uint16_t padding16; + uint8_t DisplayWatermark; + uint8_t McArbIndex; + uint8_t McRegIndex; + uint8_t SeqIndex; + uint8_t SclkDid; + int8_t SclkIndex; + int8_t MclkIndex; + uint8_t PCIeGen; +}; + +typedef struct SMU75_Discrete_StateInfo SMU75_Discrete_StateInfo; + +struct SMU75_Discrete_DpmTable { + SMU75_PIDController GraphicsPIDController; + SMU75_PIDController MemoryPIDController; + SMU75_PIDController LinkPIDController; + + uint32_t SystemFlags; + + uint32_t VRConfig; + uint32_t SmioMask1; + uint32_t SmioMask2; + SMIO_Table SmioTable1; + SMIO_Table SmioTable2; + + uint32_t MvddLevelCount; + + uint8_t BapmVddcVidHiSidd [SMU75_MAX_LEVELS_VDDC]; + uint8_t BapmVddcVidLoSidd [SMU75_MAX_LEVELS_VDDC]; + uint8_t BapmVddcVidHiSidd2 [SMU75_MAX_LEVELS_VDDC]; + + uint8_t GraphicsDpmLevelCount; + uint8_t MemoryDpmLevelCount; + uint8_t LinkLevelCount; + uint8_t MasterDeepSleepControl; + + uint8_t UvdLevelCount; + uint8_t VceLevelCount; + uint8_t AcpLevelCount; + uint8_t SamuLevelCount; + + uint8_t ThermOutGpio; + uint8_t ThermOutPolarity; + uint8_t ThermOutMode; + uint8_t BootPhases; + + uint8_t VRHotLevel; + uint8_t LdoRefSel; + + uint8_t Reserved1[2]; + + uint16_t FanStartTemperature; + uint16_t FanStopTemperature; + + uint16_t MaxVoltage; + uint16_t Reserved2; + uint32_t Reserved; + + SMU75_Discrete_GraphicsLevel GraphicsLevel [SMU75_MAX_LEVELS_GRAPHICS]; + SMU75_Discrete_MemoryLevel MemoryACPILevel; + SMU75_Discrete_MemoryLevel MemoryLevel [SMU75_MAX_LEVELS_MEMORY]; + SMU75_Discrete_LinkLevel LinkLevel [SMU75_MAX_LEVELS_LINK]; + SMU75_Discrete_ACPILevel ACPILevel; + SMU75_Discrete_UvdLevel UvdLevel [SMU75_MAX_LEVELS_UVD]; + SMU75_Discrete_ExtClkLevel VceLevel [SMU75_MAX_LEVELS_VCE]; + SMU75_Discrete_ExtClkLevel AcpLevel [SMU75_MAX_LEVELS_ACP]; + SMU75_Discrete_ExtClkLevel SamuLevel [SMU75_MAX_LEVELS_SAMU]; + SMU75_Discrete_Ulv Ulv; + + uint8_t DisplayWatermark [SMU75_MAX_LEVELS_MEMORY][SMU75_MAX_LEVELS_GRAPHICS]; + + uint32_t SclkStepSize; + uint32_t Smio [SMU75_MAX_ENTRIES_SMIO]; + + uint8_t UvdBootLevel; + uint8_t VceBootLevel; + uint8_t AcpBootLevel; + uint8_t SamuBootLevel; + + uint8_t GraphicsBootLevel; + uint8_t GraphicsVoltageChangeEnable; + uint8_t GraphicsThermThrottleEnable; + uint8_t GraphicsInterval; + + uint8_t VoltageInterval; + uint8_t ThermalInterval; + uint16_t TemperatureLimitHigh; + + uint16_t TemperatureLimitLow; + uint8_t MemoryBootLevel; + uint8_t MemoryVoltageChangeEnable; + + uint16_t BootMVdd; + uint8_t MemoryInterval; + uint8_t MemoryThermThrottleEnable; + + uint16_t VoltageResponseTime; + uint16_t PhaseResponseTime; + + uint8_t PCIeBootLinkLevel; + uint8_t PCIeGenInterval; + uint8_t DTEInterval; + uint8_t DTEMode; + + uint8_t SVI2Enable; + uint8_t VRHotGpio; + uint8_t AcDcGpio; + uint8_t ThermGpio; + + uint16_t PPM_PkgPwrLimit; + uint16_t PPM_TemperatureLimit; + + uint16_t DefaultTdp; + uint16_t TargetTdp; + + uint16_t FpsHighThreshold; + uint16_t FpsLowThreshold; + + uint16_t BAPMTI_R [SMU75_DTE_ITERATIONS][SMU75_DTE_SOURCES][SMU75_DTE_SINKS]; + uint16_t BAPMTI_RC [SMU75_DTE_ITERATIONS][SMU75_DTE_SOURCES][SMU75_DTE_SINKS]; + + uint16_t TemperatureLimitEdge; + uint16_t TemperatureLimitHotspot; + + uint16_t BootVddc; + uint16_t BootVddci; + + uint16_t FanGainEdge; + uint16_t FanGainHotspot; + + uint32_t LowSclkInterruptThreshold; + uint32_t VddGfxReChkWait; + + uint8_t ClockStretcherAmount; + uint8_t Sclk_CKS_masterEn0_7; + uint8_t Sclk_CKS_masterEn8_15; + uint8_t DPMFreezeAndForced; + + uint8_t Sclk_voltageOffset[8]; + + SMU_ClockStretcherDataTable ClockStretcherDataTable; + SMU_CKS_LOOKUPTable CKS_LOOKUPTable; + + uint32_t CurrSclkPllRange; + sclkFcwRange_t SclkFcwRangeTable[NUM_SCLK_RANGE]; + + GB_VDROOP_TABLE_t BTCGB_VDROOP_TABLE[BTCGB_VDROOP_TABLE_MAX_ENTRIES]; + SMU_QuadraticCoeffs AVFSGB_FUSE_TABLE[AVFSGB_VDROOP_TABLE_MAX_ENTRIES]; +}; + +typedef struct SMU75_Discrete_DpmTable SMU75_Discrete_DpmTable; + +struct SMU75_Discrete_FanTable { + uint16_t FdoMode; + int16_t TempMin; + int16_t TempMed; + int16_t TempMax; + int16_t Slope1; + int16_t Slope2; + int16_t FdoMin; + int16_t HystUp; + int16_t HystDown; + int16_t HystSlope; + int16_t TempRespLim; + int16_t TempCurr; + int16_t SlopeCurr; + int16_t PwmCurr; + uint32_t RefreshPeriod; + int16_t FdoMax; + uint8_t TempSrc; + int8_t Padding; +}; + +typedef struct SMU75_Discrete_FanTable SMU75_Discrete_FanTable; + +#define SMU7_DISCRETE_GPIO_SCLK_DEBUG 4 +#define SMU7_DISCRETE_GPIO_SCLK_DEBUG_BIT (0x1 << SMU7_DISCRETE_GPIO_SCLK_DEBUG) + + + +struct SMU7_MclkDpmScoreboard { + uint32_t PercentageBusy; + + int32_t PIDError; + int32_t PIDIntegral; + int32_t PIDOutput; + + uint32_t SigmaDeltaAccum; + uint32_t SigmaDeltaOutput; + uint32_t SigmaDeltaLevel; + + uint32_t UtilizationSetpoint; + + uint8_t TdpClampMode; + uint8_t TdcClampMode; + uint8_t ThermClampMode; + uint8_t VoltageBusy; + + int8_t CurrLevel; + int8_t TargLevel; + uint8_t LevelChangeInProgress; + uint8_t UpHyst; + + uint8_t DownHyst; + uint8_t VoltageDownHyst; + uint8_t DpmEnable; + uint8_t DpmRunning; + + uint8_t DpmForce; + uint8_t DpmForceLevel; + uint8_t padding2; + uint8_t McArbIndex; + + uint32_t MinimumPerfMclk; + + uint8_t AcpiReq; + uint8_t AcpiAck; + uint8_t MclkSwitchInProgress; + uint8_t MclkSwitchCritical; + + uint8_t IgnoreVBlank; + uint8_t TargetMclkIndex; + uint8_t TargetMvddIndex; + uint8_t MclkSwitchResult; + + uint16_t VbiFailureCount; + uint8_t VbiWaitCounter; + uint8_t EnabledLevelsChange; + + uint16_t LevelResidencyCounters [SMU75_MAX_LEVELS_MEMORY]; + uint16_t LevelSwitchCounters [SMU75_MAX_LEVELS_MEMORY]; + + void (*TargetStateCalculator)(uint8_t); + void (*SavedTargetStateCalculator)(uint8_t); + + uint16_t AutoDpmInterval; + uint16_t AutoDpmRange; + + uint16_t VbiTimeoutCount; + uint16_t MclkSwitchingTime; + + uint8_t fastSwitch; + uint8_t Save_PIC_VDDGFX_EXIT; + uint8_t Save_PIC_VDDGFX_ENTER; + uint8_t VbiTimeout; + + uint32_t HbmTempRegBackup; +}; + +typedef struct SMU7_MclkDpmScoreboard SMU7_MclkDpmScoreboard; + +struct SMU7_UlvScoreboard { + uint8_t EnterUlv; + uint8_t ExitUlv; + uint8_t UlvActive; + uint8_t WaitingForUlv; + uint8_t UlvEnable; + uint8_t UlvRunning; + uint8_t UlvMasterEnable; + uint8_t padding; + uint32_t UlvAbortedCount; + uint32_t UlvTimeStamp; +}; + +typedef struct SMU7_UlvScoreboard SMU7_UlvScoreboard; + +struct VddgfxSavedRegisters { + uint32_t GPU_DBG[3]; + uint32_t MEC_BaseAddress_Hi; + uint32_t MEC_BaseAddress_Lo; + uint32_t THM_TMON0_CTRL2__RDIR_PRESENT; + uint32_t THM_TMON1_CTRL2__RDIR_PRESENT; + uint32_t CP_INT_CNTL; +}; + +typedef struct VddgfxSavedRegisters VddgfxSavedRegisters; + +struct SMU7_VddGfxScoreboard { + uint8_t VddGfxEnable; + uint8_t VddGfxActive; + uint8_t VPUResetOccured; + uint8_t padding; + + uint32_t VddGfxEnteredCount; + uint32_t VddGfxAbortedCount; + + uint32_t VddGfxVid; + + VddgfxSavedRegisters SavedRegisters; +}; + +typedef struct SMU7_VddGfxScoreboard SMU7_VddGfxScoreboard; + +struct SMU7_TdcLimitScoreboard { + uint8_t Enable; + uint8_t Running; + uint16_t Alpha; + uint32_t FilteredIddc; + uint32_t IddcLimit; + uint32_t IddcHyst; + SMU7_HystController_Data HystControllerData; +}; + +typedef struct SMU7_TdcLimitScoreboard SMU7_TdcLimitScoreboard; + +struct SMU7_PkgPwrLimitScoreboard { + uint8_t Enable; + uint8_t Running; + uint16_t Alpha; + uint32_t FilteredPkgPwr; + uint32_t Limit; + uint32_t Hyst; + uint32_t LimitFromDriver; + uint8_t PowerSharingEnabled; + uint8_t PowerSharingCounter; + uint8_t PowerSharingINTEnabled; + uint8_t GFXActivityCounterEnabled; + uint32_t EnergyCount; + uint32_t PSACTCount; + uint8_t RollOverRequired; + uint8_t RollOverCount; + uint8_t padding[2]; + SMU7_HystController_Data HystControllerData; +}; + +typedef struct SMU7_PkgPwrLimitScoreboard SMU7_PkgPwrLimitScoreboard; + +struct SMU7_BapmScoreboard { + uint32_t source_powers[SMU75_DTE_SOURCES]; + uint32_t source_powers_last[SMU75_DTE_SOURCES]; + int32_t entity_temperatures[SMU75_NUM_GPU_TES]; + int32_t initial_entity_temperatures[SMU75_NUM_GPU_TES]; + int32_t Limit; + int32_t Hyst; + int32_t therm_influence_coeff_table[SMU75_DTE_ITERATIONS * SMU75_DTE_SOURCES * SMU75_DTE_SINKS * 2]; + int32_t therm_node_table[SMU75_DTE_ITERATIONS * SMU75_DTE_SOURCES * SMU75_DTE_SINKS]; + uint16_t ConfigTDPPowerScalar; + uint16_t FanSpeedPowerScalar; + uint16_t OverDrivePowerScalar; + uint16_t OverDriveLimitScalar; + uint16_t FinalPowerScalar; + uint8_t VariantID; + uint8_t spare997; + + SMU7_HystController_Data HystControllerData; + + int32_t temperature_gradient_slope; + int32_t temperature_gradient; + uint32_t measured_temperature; +}; + + +typedef struct SMU7_BapmScoreboard SMU7_BapmScoreboard; + +struct SMU7_AcpiScoreboard { + uint32_t SavedInterruptMask[2]; + uint8_t LastACPIRequest; + uint8_t CgBifResp; + uint8_t RequestType; + uint8_t Padding; + SMU75_Discrete_ACPILevel D0Level; +}; + +typedef struct SMU7_AcpiScoreboard SMU7_AcpiScoreboard; + +struct SMU75_Discrete_PmFuses { + uint8_t BapmVddCVidHiSidd[8]; + + uint8_t BapmVddCVidLoSidd[8]; + + uint8_t VddCVid[8]; + + uint8_t SviLoadLineEn; + uint8_t SviLoadLineVddC; + uint8_t SviLoadLineTrimVddC; + uint8_t SviLoadLineOffsetVddC; + + uint16_t TDC_VDDC_PkgLimit; + uint8_t TDC_VDDC_ThrottleReleaseLimitPerc; + uint8_t TDC_MAWt; + + uint8_t TdcWaterfallCtl; + uint8_t LPMLTemperatureMin; + uint8_t LPMLTemperatureMax; + uint8_t Reserved; + + uint8_t LPMLTemperatureScaler[16]; + + int16_t FuzzyFan_ErrorSetDelta; + int16_t FuzzyFan_ErrorRateSetDelta; + int16_t FuzzyFan_PwmSetDelta; + uint16_t Reserved6; + + uint8_t GnbLPML[16]; + + uint8_t GnbLPMLMaxVid; + uint8_t GnbLPMLMinVid; + uint8_t Reserved1[2]; + + uint16_t BapmVddCBaseLeakageHiSidd; + uint16_t BapmVddCBaseLeakageLoSidd; + + uint16_t VFT_Temp[3]; + uint8_t Version; + uint8_t padding; + + SMU_QuadraticCoeffs VFT_ATE[3]; + + SMU_QuadraticCoeffs AVFS_GB; + SMU_QuadraticCoeffs ATE_ACBTC_GB; + + SMU_QuadraticCoeffs P2V; + + uint32_t PsmCharzFreq; + + uint16_t InversionVoltage; + uint16_t PsmCharzTemp; + + uint32_t EnabledAvfsModules; + + SMU_QuadraticCoeffs BtcGbv_CksOff; +}; + +typedef struct SMU75_Discrete_PmFuses SMU75_Discrete_PmFuses; + +struct SMU7_Discrete_Log_Header_Table { + uint32_t version; + uint32_t asic_id; + uint16_t flags; + uint16_t entry_size; + uint32_t total_size; + uint32_t num_of_entries; + uint8_t type; + uint8_t mode; + uint8_t filler_0[2]; + uint32_t filler_1[2]; +}; + +typedef struct SMU7_Discrete_Log_Header_Table SMU7_Discrete_Log_Header_Table; + +struct SMU7_Discrete_Log_Cntl { + uint8_t Enabled; + uint8_t Type; + uint8_t padding[2]; + uint32_t BufferSize; + uint32_t SamplesLogged; + uint32_t SampleSize; + uint32_t AddrL; + uint32_t AddrH; +}; + +typedef struct SMU7_Discrete_Log_Cntl SMU7_Discrete_Log_Cntl; + +#if defined SMU__DGPU_ONLY +#define CAC_ACC_NW_NUM_OF_SIGNALS 87 +#endif + + +struct SMU7_Discrete_Cac_Collection_Table { + uint32_t temperature; + uint32_t cac_acc_nw[CAC_ACC_NW_NUM_OF_SIGNALS]; +}; + +typedef struct SMU7_Discrete_Cac_Collection_Table SMU7_Discrete_Cac_Collection_Table; + +struct SMU7_Discrete_Cac_Verification_Table { + uint32_t VddcTotalPower; + uint32_t VddcLeakagePower; + uint32_t VddcConstantPower; + uint32_t VddcGfxDynamicPower; + uint32_t VddcUvdDynamicPower; + uint32_t VddcVceDynamicPower; + uint32_t VddcAcpDynamicPower; + uint32_t VddcPcieDynamicPower; + uint32_t VddcDceDynamicPower; + uint32_t VddcCurrent; + uint32_t VddcVoltage; + uint32_t VddciTotalPower; + uint32_t VddciLeakagePower; + uint32_t VddciConstantPower; + uint32_t VddciDynamicPower; + uint32_t Vddr1TotalPower; + uint32_t Vddr1LeakagePower; + uint32_t Vddr1ConstantPower; + uint32_t Vddr1DynamicPower; + uint32_t spare[4]; + uint32_t temperature; +}; + +typedef struct SMU7_Discrete_Cac_Verification_Table SMU7_Discrete_Cac_Verification_Table; + +struct SMU7_Discrete_Pm_Status_Table { + int32_t T_meas_max[SMU75_THERMAL_INPUT_LOOP_COUNT]; + int32_t T_meas_acc[SMU75_THERMAL_INPUT_LOOP_COUNT]; + + uint32_t I_calc_max; + uint32_t I_calc_acc; + uint32_t P_meas_acc; + uint32_t V_meas_load_acc; + uint32_t I_meas_acc; + uint32_t P_meas_acc_vddci; + uint32_t V_meas_load_acc_vddci; + uint32_t I_meas_acc_vddci; + + uint16_t Sclk_dpm_residency[8]; + uint16_t Uvd_dpm_residency[8]; + uint16_t Vce_dpm_residency[8]; + uint16_t Mclk_dpm_residency[4]; + + uint32_t P_roc_acc; + uint32_t PkgPwr_max; + uint32_t PkgPwr_acc; + uint32_t MclkSwitchingTime_max; + uint32_t MclkSwitchingTime_acc; + uint32_t FanPwm_acc; + uint32_t FanRpm_acc; + uint32_t Gfx_busy_acc; + uint32_t Mc_busy_acc; + uint32_t Fps_acc; + + uint32_t AccCnt; +}; + +typedef struct SMU7_Discrete_Pm_Status_Table SMU7_Discrete_Pm_Status_Table; + +struct SMU7_Discrete_AutoWattMan_Status_Table { + int32_t T_meas_acc[SMU75_THERMAL_INPUT_LOOP_COUNT]; + uint16_t Sclk_dpm_residency[8]; + uint16_t Mclk_dpm_residency[4]; + uint32_t TgpPwr_acc; + uint32_t Gfx_busy_acc; + uint32_t Mc_busy_acc; + uint32_t AccCnt; +}; + +typedef struct SMU7_Discrete_AutoWattMan_Status_Table SMU7_Discrete_AutoWattMan_Status_Table; + +#define SMU7_MAX_GFX_CU_COUNT 24 +#define SMU7_MIN_GFX_CU_COUNT 8 +#define SMU7_GFX_CU_PG_ENABLE_DC_MAX_CU_SHIFT 0 +#define SMU7_GFX_CU_PG_ENABLE_DC_MAX_CU_MASK (0xFFFF << SMU7_GFX_CU_PG_ENABLE_DC_MAX_CU_SHIFT) +#define SMU7_GFX_CU_PG_ENABLE_AC_MAX_CU_SHIFT 16 +#define SMU7_GFX_CU_PG_ENABLE_AC_MAX_CU_MASK (0xFFFF << SMU7_GFX_CU_PG_ENABLE_AC_MAX_CU_SHIFT) + +struct SMU7_GfxCuPgScoreboard { + uint8_t Enabled; + uint8_t WaterfallUp; + uint8_t WaterfallDown; + uint8_t WaterfallLimit; + uint8_t CurrMaxCu; + uint8_t TargMaxCu; + uint8_t ClampMode; + uint8_t Active; + uint8_t MaxSupportedCu; + uint8_t MinSupportedCu; + uint8_t PendingGfxCuHostInterrupt; + uint8_t LastFilteredMaxCuInteger; + uint16_t FilteredMaxCu; + uint16_t FilteredMaxCuAlpha; + uint16_t FilterResetCount; + uint16_t FilterResetCountLimit; + uint8_t ForceCu; + uint8_t ForceCuCount; + uint8_t AcModeMaxCu; + uint8_t DcModeMaxCu; +}; + +typedef struct SMU7_GfxCuPgScoreboard SMU7_GfxCuPgScoreboard; + +#define SMU7_SCLK_CAC 0x561 +#define SMU7_MCLK_CAC 0xF9 +#define SMU7_VCLK_CAC 0x2DE +#define SMU7_DCLK_CAC 0x2DE +#define SMU7_ECLK_CAC 0x25E +#define SMU7_ACLK_CAC 0x25E +#define SMU7_SAMCLK_CAC 0x25E +#define SMU7_DISPCLK_CAC 0x100 +#define SMU7_CAC_CONSTANT 0x2EE3430 +#define SMU7_CAC_CONSTANT_SHIFT 18 + +#define SMU7_VDDCI_MCLK_CONST 1765 +#define SMU7_VDDCI_MCLK_CONST_SHIFT 16 +#define SMU7_VDDCI_VDDCI_CONST 50958 +#define SMU7_VDDCI_VDDCI_CONST_SHIFT 14 +#define SMU7_VDDCI_CONST 11781 +#define SMU7_VDDCI_STROBE_PWR 1331 + +#define SMU7_VDDR1_CONST 693 +#define SMU7_VDDR1_CAC_WEIGHT 20 +#define SMU7_VDDR1_CAC_WEIGHT_SHIFT 19 +#define SMU7_VDDR1_STROBE_PWR 512 + +#define SMU7_AREA_COEFF_UVD 0xA78 +#define SMU7_AREA_COEFF_VCE 0x190A +#define SMU7_AREA_COEFF_ACP 0x22D1 +#define SMU7_AREA_COEFF_SAMU 0x534 + +#define SMU7_THERM_OUT_MODE_DISABLE 0x0 +#define SMU7_THERM_OUT_MODE_THERM_ONLY 0x1 +#define SMU7_THERM_OUT_MODE_THERM_VRHOT 0x2 + +#define SQ_Enable_MASK 0x1 +#define SQ_IR_MASK 0x2 +#define SQ_PCC_MASK 0x4 +#define SQ_EDC_MASK 0x8 + +#define TCP_Enable_MASK 0x100 +#define TCP_IR_MASK 0x200 +#define TCP_PCC_MASK 0x400 +#define TCP_EDC_MASK 0x800 + +#define TD_Enable_MASK 0x10000 +#define TD_IR_MASK 0x20000 +#define TD_PCC_MASK 0x40000 +#define TD_EDC_MASK 0x80000 + +#define DB_Enable_MASK 0x1000000 +#define DB_IR_MASK 0x2000000 +#define DB_PCC_MASK 0x4000000 +#define DB_EDC_MASK 0x8000000 + +#define SQ_Enable_SHIFT 0 +#define SQ_IR_SHIFT 1 +#define SQ_PCC_SHIFT 2 +#define SQ_EDC_SHIFT 3 + +#define TCP_Enable_SHIFT 8 +#define TCP_IR_SHIFT 9 +#define TCP_PCC_SHIFT 10 +#define TCP_EDC_SHIFT 11 + +#define TD_Enable_SHIFT 16 +#define TD_IR_SHIFT 17 +#define TD_PCC_SHIFT 18 +#define TD_EDC_SHIFT 19 + +#define DB_Enable_SHIFT 24 +#define DB_IR_SHIFT 25 +#define DB_PCC_SHIFT 26 +#define DB_EDC_SHIFT 27 + +#define PMFUSES_AVFSSIZE 104 + +#define BTCGB0_Vdroop_Enable_MASK 0x1 +#define BTCGB1_Vdroop_Enable_MASK 0x2 +#define AVFSGB0_Vdroop_Enable_MASK 0x4 +#define AVFSGB1_Vdroop_Enable_MASK 0x8 + +#define BTCGB0_Vdroop_Enable_SHIFT 0 +#define BTCGB1_Vdroop_Enable_SHIFT 1 +#define AVFSGB0_Vdroop_Enable_SHIFT 2 +#define AVFSGB1_Vdroop_Enable_SHIFT 3 + +#pragma pack(pop) + + +#endif + diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h index 6c22ed9249bf..82550a8a3a3f 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h @@ -29,7 +29,6 @@ enum SMU_TABLE { SMU_UVD_TABLE = 0, SMU_VCE_TABLE, - SMU_SAMU_TABLE, SMU_BIF_TABLE, }; @@ -47,7 +46,6 @@ enum SMU_MEMBER { UcodeLoadStatus, UvdBootLevel, VceBootLevel, - SamuBootLevel, LowSclkInterruptThreshold, DRAM_LOG_ADDR_H, DRAM_LOG_ADDR_L, @@ -82,7 +80,7 @@ enum SMU10_TABLE_ID { SMU10_CLOCKTABLE, }; -extern int smum_get_argument(struct pp_hwmgr *hwmgr); +extern uint32_t smum_get_argument(struct pp_hwmgr *hwmgr); extern int smum_download_powerplay_table(struct pp_hwmgr *hwmgr, void **table); diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h index c3ed737ab951..715b5a168831 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h +++ b/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h @@ -131,6 +131,7 @@ typedef uint16_t PPSMC_Result; #define PPSMC_MSG_RunAcgInOpenLoop 0x5E #define PPSMC_MSG_InitializeAcg 0x5F #define PPSMC_MSG_GetCurrPkgPwr 0x61 +#define PPSMC_MSG_GetAverageGfxclkActualFrequency 0x63 #define PPSMC_MSG_SetPccThrottleLevel 0x67 #define PPSMC_MSG_UpdatePkgPwrPidAlpha 0x68 #define PPSMC_Message_Count 0x69 diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h index 2f8a3b983cce..b6ffd08784e7 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h +++ b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h @@ -412,10 +412,10 @@ typedef struct { QuadraticInt_t ReservedEquation2; QuadraticInt_t ReservedEquation3; - uint16_t MinVoltageUlvGfx; - uint16_t MinVoltageUlvSoc; + uint16_t MinVoltageUlvGfx; + uint16_t MinVoltageUlvSoc; - uint32_t Reserved[14]; + uint32_t Reserved[14]; @@ -483,9 +483,9 @@ typedef struct { uint8_t padding8_4; - uint8_t PllGfxclkSpreadEnabled; - uint8_t PllGfxclkSpreadPercent; - uint16_t PllGfxclkSpreadFreq; + uint8_t PllGfxclkSpreadEnabled; + uint8_t PllGfxclkSpreadPercent; + uint16_t PllGfxclkSpreadFreq; uint8_t UclkSpreadEnabled; uint8_t UclkSpreadPercent; @@ -495,11 +495,14 @@ typedef struct { uint8_t SocclkSpreadPercent; uint16_t SocclkSpreadFreq; - uint8_t AcgGfxclkSpreadEnabled; - uint8_t AcgGfxclkSpreadPercent; - uint16_t AcgGfxclkSpreadFreq; + uint8_t AcgGfxclkSpreadEnabled; + uint8_t AcgGfxclkSpreadPercent; + uint16_t AcgGfxclkSpreadFreq; - uint32_t BoardReserved[10]; + uint8_t Vr2_I2C_address; + uint8_t padding_vr2[3]; + + uint32_t BoardReserved[9]; uint32_t MmHubPadding[7]; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile index 958755075421..8d557accaef2 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile +++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile @@ -26,7 +26,7 @@ SMU_MGR = smumgr.o smu8_smumgr.o tonga_smumgr.o fiji_smumgr.o \ polaris10_smumgr.o iceland_smumgr.o \ smu7_smumgr.o vega10_smumgr.o smu10_smumgr.o ci_smumgr.o \ - vega12_smumgr.o + vega12_smumgr.o vegam_smumgr.o smu9_smumgr.o AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR)) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c index 08d000140eca..fbe3ef4ee45c 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c @@ -61,9 +61,6 @@ #define SMC_RAM_END 0x40000 -#define VOLTAGE_SCALE 4 -#define VOLTAGE_VID_OFFSET_SCALE1 625 -#define VOLTAGE_VID_OFFSET_SCALE2 100 #define CISLAND_MINIMUM_ENGINE_CLOCK 800 #define CISLAND_MAX_DEEPSLEEP_DIVIDER_ID 5 @@ -211,9 +208,7 @@ static int ci_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) { int ret; - if (!ci_is_smc_ram_running(hwmgr)) - return -EINVAL; - + cgs_write_register(hwmgr->device, mmSMC_RESP_0, 0); cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg); PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0); @@ -1182,7 +1177,6 @@ static int ci_populate_single_memory_level( struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); int result = 0; bool dll_state_on; - struct cgs_display_info info = {0}; uint32_t mclk_edc_wr_enable_threshold = 40000; uint32_t mclk_edc_enable_threshold = 40000; uint32_t mclk_strobe_mode_threshold = 40000; @@ -1236,8 +1230,7 @@ static int ci_populate_single_memory_level( /* default set to low watermark. Highest level will be set to high later.*/ memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; - cgs_get_active_displays_info(hwmgr->device, &info); - data->display_timing.num_existing_displays = info.display_count; + data->display_timing.num_existing_displays = hwmgr->display_config->num_display; /* stutter mode not support on ci */ @@ -1621,37 +1614,6 @@ static int ci_populate_smc_acp_level(struct pp_hwmgr *hwmgr, return result; } -static int ci_populate_smc_samu_level(struct pp_hwmgr *hwmgr, - SMU7_Discrete_DpmTable *table) -{ - int result = -EINVAL; - uint8_t count; - struct pp_atomctrl_clock_dividers_vi dividers; - struct phm_samu_clock_voltage_dependency_table *samu_table = - hwmgr->dyn_state.samu_clock_voltage_dependency_table; - - table->SamuBootLevel = 0; - table->SamuLevelCount = (uint8_t)(samu_table->count); - - for (count = 0; count < table->SamuLevelCount; count++) { - table->SamuLevel[count].Frequency = samu_table->entries[count].samclk; - table->SamuLevel[count].MinVoltage = samu_table->entries[count].v * VOLTAGE_SCALE; - table->SamuLevel[count].MinPhases = 1; - - /* retrieve divider value for VBIOS */ - result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, - table->SamuLevel[count].Frequency, ÷rs); - PP_ASSERT_WITH_CODE((0 == result), - "can not find divide id for samu clock", return result); - - table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider; - - CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency); - CONVERT_FROM_HOST_TO_SMC_US(table->SamuLevel[count].MinVoltage); - } - return result; -} - static int ci_populate_memory_timing_parameters( struct pp_hwmgr *hwmgr, uint32_t engine_clock, @@ -2033,10 +1995,6 @@ static int ci_init_smc_table(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize ACP Level!", return result); - result = ci_populate_smc_samu_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize SAMU Level!", return result); - /* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */ /* need to populate the ARB settings for the initial state. */ result = ci_program_memory_timing_parameters(hwmgr); @@ -2784,7 +2742,6 @@ static int ci_smu_fini(struct pp_hwmgr *hwmgr) { kfree(hwmgr->smu_backend); hwmgr->smu_backend = NULL; - cgs_rel_firmware(hwmgr->device, CGS_UCODE_ID_SMU); return 0; } @@ -2889,6 +2846,89 @@ static int ci_update_dpm_settings(struct pp_hwmgr *hwmgr, return 0; } +static int ci_update_uvd_smc_table(struct pp_hwmgr *hwmgr) +{ + struct amdgpu_device *adev = hwmgr->adev; + struct smu7_hwmgr *data = hwmgr->backend; + struct ci_smumgr *smu_data = hwmgr->smu_backend; + struct phm_uvd_clock_voltage_dependency_table *uvd_table = + hwmgr->dyn_state.uvd_clock_voltage_dependency_table; + uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | + AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | + AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | + AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; + uint32_t max_vddc = adev->pm.ac_power ? hwmgr->dyn_state.max_clock_voltage_on_ac.vddc : + hwmgr->dyn_state.max_clock_voltage_on_dc.vddc; + int32_t i; + + if (PP_CAP(PHM_PlatformCaps_UVDDPM) || uvd_table->count <= 0) + smu_data->smc_state_table.UvdBootLevel = 0; + else + smu_data->smc_state_table.UvdBootLevel = uvd_table->count - 1; + + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, DPM_TABLE_475, + UvdBootLevel, smu_data->smc_state_table.UvdBootLevel); + + data->dpm_level_enable_mask.uvd_dpm_enable_mask = 0; + + for (i = uvd_table->count - 1; i >= 0; i--) { + if (uvd_table->entries[i].v <= max_vddc) + data->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i; + if (hwmgr->dpm_level & profile_mode_mask || !PP_CAP(PHM_PlatformCaps_UVDDPM)) + break; + } + ci_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_UVDDPM_SetEnabledMask, + data->dpm_level_enable_mask.uvd_dpm_enable_mask); + + return 0; +} + +static int ci_update_vce_smc_table(struct pp_hwmgr *hwmgr) +{ + struct amdgpu_device *adev = hwmgr->adev; + struct smu7_hwmgr *data = hwmgr->backend; + struct phm_vce_clock_voltage_dependency_table *vce_table = + hwmgr->dyn_state.vce_clock_voltage_dependency_table; + uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | + AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | + AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | + AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; + uint32_t max_vddc = adev->pm.ac_power ? hwmgr->dyn_state.max_clock_voltage_on_ac.vddc : + hwmgr->dyn_state.max_clock_voltage_on_dc.vddc; + int32_t i; + + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, DPM_TABLE_475, + VceBootLevel, 0); /* temp hard code to level 0, vce can set min evclk*/ + + data->dpm_level_enable_mask.vce_dpm_enable_mask = 0; + + for (i = vce_table->count - 1; i >= 0; i--) { + if (vce_table->entries[i].v <= max_vddc) + data->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i; + if (hwmgr->dpm_level & profile_mode_mask || !PP_CAP(PHM_PlatformCaps_VCEDPM)) + break; + } + ci_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_VCEDPM_SetEnabledMask, + data->dpm_level_enable_mask.vce_dpm_enable_mask); + + return 0; +} + +static int ci_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type) +{ + switch (type) { + case SMU_UVD_TABLE: + ci_update_uvd_smc_table(hwmgr); + break; + case SMU_VCE_TABLE: + ci_update_vce_smc_table(hwmgr); + break; + default: + break; + } + return 0; +} + const struct pp_smumgr_func ci_smu_funcs = { .smu_init = ci_smu_init, .smu_fini = ci_smu_fini, @@ -2911,4 +2951,5 @@ const struct pp_smumgr_func ci_smu_funcs = { .initialize_mc_reg_table = ci_initialize_mc_reg_table, .is_dpm_running = ci_is_dpm_running, .update_dpm_settings = ci_update_dpm_settings, + .update_smc_table = ci_update_smc_table, }; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c index faef78321446..18048f8e2f13 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c @@ -53,10 +53,7 @@ #define FIJI_SMC_SIZE 0x20000 -#define VOLTAGE_SCALE 4 #define POWERTUNE_DEFAULT_SET_MAX 1 -#define VOLTAGE_VID_OFFSET_SCALE1 625 -#define VOLTAGE_VID_OFFSET_SCALE2 100 #define VDDC_VDDCI_DELTA 300 #define MC_CG_ARB_FREQ_F1 0x0b @@ -288,8 +285,7 @@ static int fiji_start_smu(struct pp_hwmgr *hwmgr) struct fiji_smumgr *priv = (struct fiji_smumgr *)(hwmgr->smu_backend); /* Only start SMC if SMC RAM is not running */ - if (!(smu7_is_smc_ram_running(hwmgr) - || cgs_is_virtualization_enabled(hwmgr->device))) { + if (!smu7_is_smc_ram_running(hwmgr) && hwmgr->not_vf) { /* Check if SMU is running in protected mode */ if (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, @@ -307,13 +303,13 @@ static int fiji_start_smu(struct pp_hwmgr *hwmgr) } /* To initialize all clock gating before RLC loaded and running.*/ - cgs_set_clockgating_state(hwmgr->device, + amdgpu_device_ip_set_clockgating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_GFX, AMD_CG_STATE_GATE); - cgs_set_clockgating_state(hwmgr->device, + amdgpu_device_ip_set_clockgating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_GMC, AMD_CG_STATE_GATE); - cgs_set_clockgating_state(hwmgr->device, + amdgpu_device_ip_set_clockgating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_SDMA, AMD_CG_STATE_GATE); - cgs_set_clockgating_state(hwmgr->device, + amdgpu_device_ip_set_clockgating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_COMMON, AMD_CG_STATE_GATE); /* Setup SoftRegsStart here for register lookup in case @@ -335,10 +331,10 @@ static bool fiji_is_hw_avfs_present(struct pp_hwmgr *hwmgr) uint32_t efuse = 0; uint32_t mask = (1 << ((AVFS_EN_MSB - AVFS_EN_LSB) + 1)) - 1; - if (cgs_is_virtualization_enabled(hwmgr->device)) - return 0; + if (!hwmgr->not_vf) + return false; - if (!atomctrl_read_efuse(hwmgr->device, AVFS_EN_LSB, AVFS_EN_MSB, + if (!atomctrl_read_efuse(hwmgr, AVFS_EN_LSB, AVFS_EN_MSB, mask, &efuse)) { if (efuse) return true; @@ -989,11 +985,11 @@ static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr, threshold = clock * data->fast_watermark_threshold / 100; - data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr; + data->display_timing.min_clock_in_sr = hwmgr->display_config->min_core_set_clock_in_sr; if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) level->DeepSleepDivId = smu7_get_sleep_divider_id_from_clock(clock, - hwmgr->display_config.min_core_set_clock_in_sr); + hwmgr->display_config->min_core_set_clock_in_sr); /* Default to slow, highest DPM level will be @@ -1507,44 +1503,6 @@ static int fiji_populate_smc_acp_level(struct pp_hwmgr *hwmgr, return result; } -static int fiji_populate_smc_samu_level(struct pp_hwmgr *hwmgr, - SMU73_Discrete_DpmTable *table) -{ - int result = -EINVAL; - uint8_t count; - struct pp_atomctrl_clock_dividers_vi dividers; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = - table_info->mm_dep_table; - - table->SamuBootLevel = 0; - table->SamuLevelCount = (uint8_t)(mm_table->count); - - for (count = 0; count < table->SamuLevelCount; count++) { - /* not sure whether we need evclk or not */ - table->SamuLevel[count].MinVoltage = 0; - table->SamuLevel[count].Frequency = mm_table->entries[count].samclock; - table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc * - VOLTAGE_SCALE) << VDDC_SHIFT; - table->SamuLevel[count].MinVoltage |= ((mm_table->entries[count].vddc - - VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT; - table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT; - - /* retrieve divider value for VBIOS */ - result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, - table->SamuLevel[count].Frequency, ÷rs); - PP_ASSERT_WITH_CODE((0 == result), - "can not find divide id for samu clock", return result); - - table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider; - - CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency); - CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage); - } - return result; -} - static int fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr, int32_t eng_clock, int32_t mem_clock, struct SMU73_Discrete_MCArbDramTimingTableEntry *arb_regs) @@ -2032,10 +1990,6 @@ static int fiji_init_smc_table(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize ACP Level!", return result); - result = fiji_populate_smc_samu_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize SAMU Level!", return result); - /* Since only the initial state is completely set up at this point * (the other states are just copies of the boot state) we only * need to populate the ARB settings for the initial state. @@ -2382,8 +2336,6 @@ static uint32_t fiji_get_offsetof(uint32_t type, uint32_t member) return offsetof(SMU73_Discrete_DpmTable, UvdBootLevel); case VceBootLevel: return offsetof(SMU73_Discrete_DpmTable, VceBootLevel); - case SamuBootLevel: - return offsetof(SMU73_Discrete_DpmTable, SamuBootLevel); case LowSclkInterruptThreshold: return offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold); } @@ -2482,33 +2434,6 @@ static int fiji_update_vce_smc_table(struct pp_hwmgr *hwmgr) return 0; } -static int fiji_update_samu_smc_table(struct pp_hwmgr *hwmgr) -{ - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); - uint32_t mm_boot_level_offset, mm_boot_level_value; - - - smu_data->smc_state_table.SamuBootLevel = 0; - mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + - offsetof(SMU73_Discrete_DpmTable, SamuBootLevel); - - mm_boot_level_offset /= 4; - mm_boot_level_offset *= 4; - mm_boot_level_value = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, mm_boot_level_offset); - mm_boot_level_value &= 0xFFFFFF00; - mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0; - cgs_write_ind_register(hwmgr->device, - CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_StablePState)) - smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_SAMUDPM_SetEnabledMask, - (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel)); - return 0; -} - static int fiji_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type) { switch (type) { @@ -2518,9 +2443,6 @@ static int fiji_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type) case SMU_VCE_TABLE: fiji_update_vce_smc_table(hwmgr); break; - case SMU_SAMU_TABLE: - fiji_update_samu_smc_table(hwmgr); - break; default: break; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c index d4bb934e7334..9299b93aa09a 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c @@ -60,10 +60,7 @@ #define ICELAND_SMC_SIZE 0x20000 -#define VOLTAGE_SCALE 4 #define POWERTUNE_DEFAULT_SET_MAX 1 -#define VOLTAGE_VID_OFFSET_SCALE1 625 -#define VOLTAGE_VID_OFFSET_SCALE2 100 #define MC_CG_ARB_FREQ_F1 0x0b #define VDDC_VDDCI_DELTA 200 @@ -932,7 +929,7 @@ static int iceland_populate_single_graphic_level(struct pp_hwmgr *hwmgr, graphic_level->PowerThrottle = 0; data->display_timing.min_clock_in_sr = - hwmgr->display_config.min_core_set_clock_in_sr; + hwmgr->display_config->min_core_set_clock_in_sr; if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) @@ -1236,7 +1233,6 @@ static int iceland_populate_single_memory_level( struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); int result = 0; bool dll_state_on; - struct cgs_display_info info = {0}; uint32_t mclk_edc_wr_enable_threshold = 40000; uint32_t mclk_edc_enable_threshold = 40000; uint32_t mclk_strobe_mode_threshold = 40000; @@ -1283,8 +1279,7 @@ static int iceland_populate_single_memory_level( /* default set to low watermark. Highest level will be set to high later.*/ memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; - cgs_get_active_displays_info(hwmgr->device, &info); - data->display_timing.num_existing_displays = info.display_count; + data->display_timing.num_existing_displays = hwmgr->display_config->num_display; /* stutter mode not support on iceland */ @@ -1583,12 +1578,6 @@ static int iceland_populate_smc_acp_level(struct pp_hwmgr *hwmgr, return 0; } -static int iceland_populate_smc_samu_level(struct pp_hwmgr *hwmgr, - SMU71_Discrete_DpmTable *table) -{ - return 0; -} - static int iceland_populate_memory_timing_parameters( struct pp_hwmgr *hwmgr, uint32_t engine_clock, @@ -1997,10 +1986,6 @@ static int iceland_init_smc_table(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize ACP Level!", return result;); - result = iceland_populate_smc_samu_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize SAMU Level!", return result;); - /* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */ /* need to populate the ARB settings for the initial state. */ result = iceland_program_memory_timing_parameters(hwmgr); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c index 997a777dd35b..a4ce199af475 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c @@ -52,8 +52,6 @@ #include "dce/dce_10_0_sh_mask.h" #define POLARIS10_SMC_SIZE 0x20000 -#define VOLTAGE_VID_OFFSET_SCALE1 625 -#define VOLTAGE_VID_OFFSET_SCALE2 100 #define POWERTUNE_DEFAULT_SET_MAX 1 #define VDDC_VDDCI_DELTA 200 #define MC_CG_ARB_FREQ_F1 0x0b @@ -295,25 +293,16 @@ static int polaris10_start_smu(struct pp_hwmgr *hwmgr) struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); /* Only start SMC if SMC RAM is not running */ - if (!(smu7_is_smc_ram_running(hwmgr) - || cgs_is_virtualization_enabled(hwmgr->device))) { + if (!smu7_is_smc_ram_running(hwmgr) && hwmgr->not_vf) { smu_data->protected_mode = (uint8_t) (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE)); smu_data->smu7_data.security_hard_key = (uint8_t) (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL)); /* Check if SMU is running in protected mode */ - if (smu_data->protected_mode == 0) { + if (smu_data->protected_mode == 0) result = polaris10_start_smu_in_non_protection_mode(hwmgr); - } else { + else result = polaris10_start_smu_in_protection_mode(hwmgr); - /* If failed, try with different security Key. */ - if (result != 0) { - smu_data->smu7_data.security_hard_key ^= 1; - cgs_rel_firmware(hwmgr->device, CGS_UCODE_ID_SMU); - result = polaris10_start_smu_in_protection_mode(hwmgr); - } - } - if (result != 0) PP_ASSERT_WITH_CODE(0, "Failed to load SMU ucode.", return result); @@ -951,11 +940,11 @@ static int polaris10_populate_single_graphic_level(struct pp_hwmgr *hwmgr, level->DownHyst = data->current_profile_setting.sclk_down_hyst; level->VoltageDownHyst = 0; level->PowerThrottle = 0; - data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr; + data->display_timing.min_clock_in_sr = hwmgr->display_config->min_core_set_clock_in_sr; if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) level->DeepSleepDivId = smu7_get_sleep_divider_id_from_clock(clock, - hwmgr->display_config.min_core_set_clock_in_sr); + hwmgr->display_config->min_core_set_clock_in_sr); /* Default to slow, highest DPM level will be * set to PPSMC_DISPLAY_WATERMARK_LOW later. @@ -1085,11 +1074,9 @@ static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr, struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); int result = 0; - struct cgs_display_info info = {0, 0, NULL}; uint32_t mclk_stutter_mode_threshold = 40000; phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_table = NULL; - cgs_get_active_displays_info(hwmgr->device, &info); if (hwmgr->od_enabled) vdd_dep_table = (phm_ppt_v1_clock_voltage_dependency_table *)&data->odn_dpm_table.vdd_dependency_on_mclk; @@ -1115,7 +1102,7 @@ static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr, mem_level->StutterEnable = false; mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; - data->display_timing.num_existing_displays = info.display_count; + data->display_timing.num_existing_displays = hwmgr->display_config->num_display; if (mclk_stutter_mode_threshold && (clock <= mclk_stutter_mode_threshold) && @@ -1350,55 +1337,6 @@ static int polaris10_populate_smc_vce_level(struct pp_hwmgr *hwmgr, return result; } - -static int polaris10_populate_smc_samu_level(struct pp_hwmgr *hwmgr, - SMU74_Discrete_DpmTable *table) -{ - int result = -EINVAL; - uint8_t count; - struct pp_atomctrl_clock_dividers_vi dividers; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = - table_info->mm_dep_table; - struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - uint32_t vddci; - - table->SamuBootLevel = 0; - table->SamuLevelCount = (uint8_t)(mm_table->count); - - for (count = 0; count < table->SamuLevelCount; count++) { - /* not sure whether we need evclk or not */ - table->SamuLevel[count].MinVoltage = 0; - table->SamuLevel[count].Frequency = mm_table->entries[count].samclock; - table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc * - VOLTAGE_SCALE) << VDDC_SHIFT; - - if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) - vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table), - mm_table->entries[count].vddc - VDDC_VDDCI_DELTA); - else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) - vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA; - else - vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT; - - table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; - table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT; - - /* retrieve divider value for VBIOS */ - result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, - table->SamuLevel[count].Frequency, ÷rs); - PP_ASSERT_WITH_CODE((0 == result), - "can not find divide id for samu clock", return result); - - table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider; - - CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency); - CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage); - } - return result; -} - static int polaris10_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr, int32_t eng_clock, int32_t mem_clock, SMU74_Discrete_MCArbDramTimingTableEntry *arb_regs) @@ -1878,10 +1816,6 @@ static int polaris10_init_smc_table(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize VCE Level!", return result); - result = polaris10_populate_smc_samu_level(hwmgr, table); - PP_ASSERT_WITH_CODE(0 == result, - "Failed to initialize SAMU Level!", return result); - /* Since only the initial state is completely set up at this point * (the other states are just copies of the boot state) we only * need to populate the ARB settings for the initial state. @@ -2235,34 +2169,6 @@ static int polaris10_update_vce_smc_table(struct pp_hwmgr *hwmgr) return 0; } -static int polaris10_update_samu_smc_table(struct pp_hwmgr *hwmgr) -{ - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); - uint32_t mm_boot_level_offset, mm_boot_level_value; - - - smu_data->smc_state_table.SamuBootLevel = 0; - mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + - offsetof(SMU74_Discrete_DpmTable, SamuBootLevel); - - mm_boot_level_offset /= 4; - mm_boot_level_offset *= 4; - mm_boot_level_value = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, mm_boot_level_offset); - mm_boot_level_value &= 0xFFFFFF00; - mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0; - cgs_write_ind_register(hwmgr->device, - CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_StablePState)) - smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_SAMUDPM_SetEnabledMask, - (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel)); - return 0; -} - - static int polaris10_update_bif_smc_table(struct pp_hwmgr *hwmgr) { struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); @@ -2289,9 +2195,6 @@ static int polaris10_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type) case SMU_VCE_TABLE: polaris10_update_vce_smc_table(hwmgr); break; - case SMU_SAMU_TABLE: - polaris10_update_samu_smc_table(hwmgr); - break; case SMU_BIF_TABLE: polaris10_update_bif_smc_table(hwmgr); default: @@ -2370,8 +2273,6 @@ static uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member) return offsetof(SMU74_Discrete_DpmTable, UvdBootLevel); case VceBootLevel: return offsetof(SMU74_Discrete_DpmTable, VceBootLevel); - case SamuBootLevel: - return offsetof(SMU74_Discrete_DpmTable, SamuBootLevel); case LowSclkInterruptThreshold: return offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold); } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c index bc53f2beda30..bb07d43f3874 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c @@ -23,7 +23,7 @@ #include "smumgr.h" #include "smu10_inc.h" -#include "pp_soc15.h" +#include "soc15_common.h" #include "smu10_smumgr.h" #include "ppatomctrl.h" #include "rv_ppsmc.h" @@ -33,8 +33,6 @@ #include "pp_debug.h" -#define VOLTAGE_SCALE 4 - #define BUFFER_SIZE 80000 #define MAX_STRING_SIZE 15 #define BUFFER_SIZETWO 131072 @@ -49,48 +47,41 @@ static uint32_t smu10_wait_for_response(struct pp_hwmgr *hwmgr) { + struct amdgpu_device *adev = hwmgr->adev; uint32_t reg; - reg = soc15_get_register_offset(MP1_HWID, 0, - mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90); + reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90); phm_wait_for_register_unequal(hwmgr, reg, 0, MP1_C2PMSG_90__CONTENT_MASK); - return cgs_read_register(hwmgr->device, reg); + return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90); } static int smu10_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg) { - uint32_t reg; + struct amdgpu_device *adev = hwmgr->adev; - reg = soc15_get_register_offset(MP1_HWID, 0, - mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66); - cgs_write_register(hwmgr->device, reg, msg); + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg); return 0; } -static int smu10_read_arg_from_smc(struct pp_hwmgr *hwmgr) +static uint32_t smu10_read_arg_from_smc(struct pp_hwmgr *hwmgr) { - uint32_t reg; - - reg = soc15_get_register_offset(MP1_HWID, 0, - mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82); + struct amdgpu_device *adev = hwmgr->adev; - return cgs_read_register(hwmgr->device, reg); + return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82); } static int smu10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) { - uint32_t reg; + struct amdgpu_device *adev = hwmgr->adev; smu10_wait_for_response(hwmgr); - reg = soc15_get_register_offset(MP1_HWID, 0, - mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90); - cgs_write_register(hwmgr->device, reg, 0); + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); smu10_send_msg_to_smc_without_waiting(hwmgr, msg); @@ -104,17 +95,13 @@ static int smu10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) static int smu10_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter) { - uint32_t reg; + struct amdgpu_device *adev = hwmgr->adev; smu10_wait_for_response(hwmgr); - reg = soc15_get_register_offset(MP1_HWID, 0, - mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90); - cgs_write_register(hwmgr->device, reg, 0); + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); - reg = soc15_get_register_offset(MP1_HWID, 0, - mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82); - cgs_write_register(hwmgr->device, reg, parameter); + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter); smu10_send_msg_to_smc_without_waiting(hwmgr, msg); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c index 0399c10d2be0..a029e47c2319 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c @@ -167,24 +167,25 @@ int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) { int ret; - if (!smu7_is_smc_ram_running(hwmgr)) - return -EINVAL; - - PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0); ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP); - if (ret != 1) - pr_info("\n failed to send pre message %x ret is %d \n", msg, ret); + if (ret == 0xFE) + pr_debug("last message was not supported\n"); + else if (ret != 1) + pr_info("\n last message was failed ret is %d\n", ret); + cgs_write_register(hwmgr->device, mmSMC_RESP_0, 0); cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg); PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0); ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP); - if (ret != 1) + if (ret == 0xFE) + pr_debug("message %x was not supported\n", msg); + else if (ret != 1) pr_info("\n failed to send message %x ret is %d \n", msg, ret); return 0; @@ -199,10 +200,6 @@ int smu7_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg) int smu7_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter) { - if (!smu7_is_smc_ram_running(hwmgr)) { - return -EINVAL; - } - PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0); cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter); @@ -231,16 +228,6 @@ int smu7_send_msg_to_smc_offset(struct pp_hwmgr *hwmgr) return 0; } -int smu7_wait_for_smc_inactive(struct pp_hwmgr *hwmgr) -{ - if (!smu7_is_smc_ram_running(hwmgr)) - return -EINVAL; - - PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, SMC_SYSCON_CLOCK_CNTL_0, cken, 0); - return 0; -} - - enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type) { enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM; @@ -296,11 +283,9 @@ int smu7_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t result = smu7_set_smc_sram_address(hwmgr, smc_addr, limit); - if (result) - return result; + *value = result ? 0 : cgs_read_register(hwmgr->device, mmSMC_IND_DATA_11); - *value = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_11); - return 0; + return result; } int smu7_write_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t value, uint32_t limit) @@ -375,7 +360,7 @@ static int smu7_populate_single_firmware_entry(struct pp_hwmgr *hwmgr, entry->meta_data_addr_low = 0; /* digest need be excluded out */ - if (cgs_is_virtualization_enabled(hwmgr->device)) + if (!hwmgr->not_vf) info.image_size -= 20; entry->data_size_byte = info.image_size; entry->num_register_entries = 0; @@ -394,8 +379,7 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr) { struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); uint32_t fw_to_load; - int result = 0; - struct SMU_DRAMData_TOC *toc; + int r = 0; if (!hwmgr->reload_fw) { pr_info("skip reloading...\n"); @@ -409,7 +393,7 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr) 0x0); if (hwmgr->chip_id > CHIP_TOPAZ) { /* add support for Topaz */ - if (!cgs_is_virtualization_enabled(hwmgr->device)) { + if (hwmgr->not_vf) { smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SMU_DRAM_ADDR_HI, upper_32_bits(smu_data->smu_buffer.mc_addr)); @@ -436,49 +420,62 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr) + UCODE_ID_CP_MEC_JT2_MASK; } - toc = (struct SMU_DRAMData_TOC *)smu_data->header; - toc->num_entries = 0; - toc->structure_version = 1; + if (!smu_data->toc) { + struct SMU_DRAMData_TOC *toc; - PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, + smu_data->toc = kzalloc(sizeof(struct SMU_DRAMData_TOC), GFP_KERNEL); + if (!smu_data->toc) + return -ENOMEM; + toc = smu_data->toc; + toc->num_entries = 0; + toc->structure_version = 1; + + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.", return -EINVAL); - PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, + "Failed to Get Firmware Entry.", r = -EINVAL; goto failed); + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.", return -EINVAL); - PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, + "Failed to Get Firmware Entry.", r = -EINVAL; goto failed); + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.", return -EINVAL); - PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, + "Failed to Get Firmware Entry.", r = -EINVAL; goto failed); + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.", return -EINVAL); - PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, + "Failed to Get Firmware Entry.", r = -EINVAL; goto failed); + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.", return -EINVAL); - PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, + "Failed to Get Firmware Entry.", r = -EINVAL; goto failed); + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.", return -EINVAL); - PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, + "Failed to Get Firmware Entry.", r = -EINVAL; goto failed); + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.", return -EINVAL); - PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, + "Failed to Get Firmware Entry.", r = -EINVAL; goto failed); + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.", return -EINVAL); - PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, - UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.", return -EINVAL); - if (cgs_is_virtualization_enabled(hwmgr->device)) + "Failed to Get Firmware Entry.", r = -EINVAL; goto failed); PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, + UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.", r = -EINVAL; goto failed); + if (!hwmgr->not_vf) + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, UCODE_ID_MEC_STORAGE, &toc->entry[toc->num_entries++]), - "Failed to Get Firmware Entry.", return -EINVAL); - + "Failed to Get Firmware Entry.", r = -EINVAL; goto failed); + } + memcpy_toio(smu_data->header_buffer.kaddr, smu_data->toc, + sizeof(struct SMU_DRAMData_TOC)); smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, upper_32_bits(smu_data->header_buffer.mc_addr)); smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, lower_32_bits(smu_data->header_buffer.mc_addr)); if (smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LoadUcodes, fw_to_load)) pr_err("Fail to Request SMU Load uCode"); - return result; + return r; + +failed: + kfree(smu_data->toc); + smu_data->toc = NULL; + return r; } /* Check if the FW has been loaded, SMU will not return if loading has not finished. */ @@ -585,7 +582,6 @@ int smu7_setup_pwr_virus(struct pp_hwmgr *hwmgr) int smu7_init(struct pp_hwmgr *hwmgr) { struct smu7_smumgr *smu_data; - uint64_t mc_addr = 0; int r; /* Allocate memory for backend private data */ smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); @@ -599,16 +595,13 @@ int smu7_init(struct pp_hwmgr *hwmgr) PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, &smu_data->header_buffer.handle, - &mc_addr, + &smu_data->header_buffer.mc_addr, &smu_data->header_buffer.kaddr); if (r) return -EINVAL; - smu_data->header = smu_data->header_buffer.kaddr; - smu_data->header_buffer.mc_addr = mc_addr; - - if (cgs_is_virtualization_enabled(hwmgr->device)) + if (!hwmgr->not_vf) return 0; smu_data->smu_buffer.data_size = 200*4096; @@ -617,7 +610,7 @@ int smu7_init(struct pp_hwmgr *hwmgr) PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, &smu_data->smu_buffer.handle, - &mc_addr, + &smu_data->smu_buffer.mc_addr, &smu_data->smu_buffer.kaddr); if (r) { @@ -626,7 +619,6 @@ int smu7_init(struct pp_hwmgr *hwmgr) &smu_data->header_buffer.kaddr); return -EINVAL; } - smu_data->smu_buffer.mc_addr = mc_addr; if (smum_is_hw_avfs_present(hwmgr)) hwmgr->avfs_supported = true; @@ -643,13 +635,15 @@ int smu7_smu_fini(struct pp_hwmgr *hwmgr) &smu_data->header_buffer.mc_addr, &smu_data->header_buffer.kaddr); - if (!cgs_is_virtualization_enabled(hwmgr->device)) + if (hwmgr->not_vf) amdgpu_bo_free_kernel(&smu_data->smu_buffer.handle, &smu_data->smu_buffer.mc_addr, &smu_data->smu_buffer.kaddr); + + kfree(smu_data->toc); + smu_data->toc = NULL; kfree(hwmgr->smu_backend); hwmgr->smu_backend = NULL; - cgs_rel_firmware(hwmgr->device, CGS_UCODE_ID_SMU); return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h index 126d300259ba..01f0538fba6b 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h @@ -37,10 +37,9 @@ struct smu7_buffer_entry { }; struct smu7_smumgr { - uint8_t *header; - uint8_t *mec_image; struct smu7_buffer_entry smu_buffer; struct smu7_buffer_entry header_buffer; + struct SMU_DRAMData_TOC *toc; uint32_t soft_regs_start; uint32_t dpm_table_start; @@ -67,7 +66,6 @@ int smu7_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter); int smu7_send_msg_to_smc_offset(struct pp_hwmgr *hwmgr); -int smu7_wait_for_smc_inactive(struct pp_hwmgr *hwmgr); enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type); int smu7_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr, diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c index c861d3023474..f7e3bc22bb93 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c @@ -52,10 +52,10 @@ static const enum smu8_scratch_entry firmware_list[] = { SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G, }; -static int smu8_get_argument(struct pp_hwmgr *hwmgr) +static uint32_t smu8_get_argument(struct pp_hwmgr *hwmgr) { if (hwmgr == NULL || hwmgr->device == NULL) - return -EINVAL; + return 0; return cgs_read_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c new file mode 100644 index 000000000000..079fc8e8f709 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c @@ -0,0 +1,150 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "smumgr.h" +#include "vega10_inc.h" +#include "soc15_common.h" +#include "pp_debug.h" + + +/* MP Apertures */ +#define MP0_Public 0x03800000 +#define MP0_SRAM 0x03900000 +#define MP1_Public 0x03b00000 +#define MP1_SRAM 0x03c00004 + +#define smnMP1_FIRMWARE_FLAGS 0x3010028 + +bool smu9_is_smc_ram_running(struct pp_hwmgr *hwmgr) +{ + struct amdgpu_device *adev = hwmgr->adev; + uint32_t mp1_fw_flags; + + WREG32_SOC15(NBIF, 0, mmPCIE_INDEX2, + (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff))); + + mp1_fw_flags = RREG32_SOC15(NBIF, 0, mmPCIE_DATA2); + + if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) + return true; + + return false; +} + +/* + * Check if SMC has responded to previous message. + * + * @param smumgr the address of the powerplay hardware manager. + * @return TRUE SMC has responded, FALSE otherwise. + */ +static uint32_t smu9_wait_for_response(struct pp_hwmgr *hwmgr) +{ + struct amdgpu_device *adev = hwmgr->adev; + uint32_t reg; + uint32_t ret; + + reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90); + + ret = phm_wait_for_register_unequal(hwmgr, reg, + 0, MP1_C2PMSG_90__CONTENT_MASK); + + if (ret) + pr_err("No response from smu\n"); + + return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90); +} + +/* + * Send a message to the SMC, and do not wait for its response. + * @param smumgr the address of the powerplay hardware manager. + * @param msg the message to send. + * @return Always return 0. + */ +static int smu9_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, + uint16_t msg) +{ + struct amdgpu_device *adev = hwmgr->adev; + + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg); + + return 0; +} + +/* + * Send a message to the SMC, and wait for its response. + * @param hwmgr the address of the powerplay hardware manager. + * @param msg the message to send. + * @return Always return 0. + */ +int smu9_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) +{ + struct amdgpu_device *adev = hwmgr->adev; + uint32_t ret; + + smu9_wait_for_response(hwmgr); + + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); + + smu9_send_msg_to_smc_without_waiting(hwmgr, msg); + + ret = smu9_wait_for_response(hwmgr); + if (ret != 1) + pr_err("Failed to send message: 0x%x, ret value: 0x%x\n", msg, ret); + + return 0; +} + +/* + * Send a message to the SMC with parameter + * @param hwmgr: the address of the powerplay hardware manager. + * @param msg: the message to send. + * @param parameter: the parameter to send + * @return Always return 0. + */ +int smu9_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, + uint16_t msg, uint32_t parameter) +{ + struct amdgpu_device *adev = hwmgr->adev; + uint32_t ret; + + smu9_wait_for_response(hwmgr); + + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); + + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter); + + smu9_send_msg_to_smc_without_waiting(hwmgr, msg); + + ret = smu9_wait_for_response(hwmgr); + if (ret != 1) + pr_err("Failed message: 0x%x, input parameter: 0x%x, error code: 0x%x\n", msg, parameter, ret); + + return 0; +} + +uint32_t smu9_get_argument(struct pp_hwmgr *hwmgr) +{ + struct amdgpu_device *adev = hwmgr->adev; + + return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82); +} diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_power_source.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.h index b43315cc5d58..1462279ca128 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/pp_power_source.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.h @@ -1,5 +1,5 @@ /* - * Copyright 2015 Advanced Micro Devices, Inc. + * Copyright 2018 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -20,17 +20,13 @@ * OTHER DEALINGS IN THE SOFTWARE. * */ +#ifndef _SMU9_SMUMANAGER_H_ +#define _SMU9_SMUMANAGER_H_ -#ifndef PP_POWERSOURCE_H -#define PP_POWERSOURCE_H - -enum pp_power_source { - PP_PowerSource_AC = 0, - PP_PowerSource_DC, - PP_PowerSource_LimitedPower, - PP_PowerSource_LimitedPower_2, - PP_PowerSource_Max -}; - +bool smu9_is_smc_ram_running(struct pp_hwmgr *hwmgr); +int smu9_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg); +int smu9_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, + uint16_t msg, uint32_t parameter); +uint32_t smu9_get_argument(struct pp_hwmgr *hwmgr); #endif diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c index c28b60aae5f8..99d5e4f98f49 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c @@ -41,9 +41,11 @@ MODULE_FIRMWARE("amdgpu/polaris11_smc.bin"); MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin"); MODULE_FIRMWARE("amdgpu/polaris11_k_smc.bin"); MODULE_FIRMWARE("amdgpu/polaris12_smc.bin"); +MODULE_FIRMWARE("amdgpu/vegam_smc.bin"); MODULE_FIRMWARE("amdgpu/vega10_smc.bin"); MODULE_FIRMWARE("amdgpu/vega10_acg_smc.bin"); MODULE_FIRMWARE("amdgpu/vega12_smc.bin"); +MODULE_FIRMWARE("amdgpu/vega20_smc.bin"); int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr) { @@ -94,7 +96,7 @@ int smum_process_firmware_header(struct pp_hwmgr *hwmgr) return 0; } -int smum_get_argument(struct pp_hwmgr *hwmgr) +uint32_t smum_get_argument(struct pp_hwmgr *hwmgr) { if (NULL != hwmgr->smumgr_funcs->get_argument) return hwmgr->smumgr_funcs->get_argument(hwmgr); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c index b51d7468c3e7..7dabc6c456e1 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c @@ -55,11 +55,7 @@ #include "dce/dce_10_0_d.h" #include "dce/dce_10_0_sh_mask.h" - -#define VOLTAGE_SCALE 4 #define POWERTUNE_DEFAULT_SET_MAX 1 -#define VOLTAGE_VID_OFFSET_SCALE1 625 -#define VOLTAGE_VID_OFFSET_SCALE2 100 #define MC_CG_ARB_FREQ_F1 0x0b #define VDDC_VDDCI_DELTA 200 @@ -199,8 +195,7 @@ static int tonga_start_smu(struct pp_hwmgr *hwmgr) int result; /* Only start SMC if SMC RAM is not running */ - if (!(smu7_is_smc_ram_running(hwmgr) || - cgs_is_virtualization_enabled(hwmgr->device))) { + if (!smu7_is_smc_ram_running(hwmgr) && hwmgr->not_vf) { /*Check if SMU is running in protected mode*/ if (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE)) { @@ -651,7 +646,7 @@ static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr, graphic_level->PowerThrottle = 0; data->display_timing.min_clock_in_sr = - hwmgr->display_config.min_core_set_clock_in_sr; + hwmgr->display_config->min_core_set_clock_in_sr; if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) @@ -957,18 +952,17 @@ static int tonga_populate_single_memory_level( SMU72_Discrete_MemoryLevel *memory_level ) { - uint32_t mvdd = 0; struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - int result = 0; - bool dll_state_on; - struct cgs_display_info info = {0}; uint32_t mclk_edc_wr_enable_threshold = 40000; uint32_t mclk_stutter_mode_threshold = 30000; uint32_t mclk_edc_enable_threshold = 40000; uint32_t mclk_strobe_mode_threshold = 40000; phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_table = NULL; + int result = 0; + bool dll_state_on; + uint32_t mvdd = 0; if (hwmgr->od_enabled) vdd_dep_table = (phm_ppt_v1_clock_voltage_dependency_table *)&data->odn_dpm_table.vdd_dependency_on_mclk; @@ -1009,8 +1003,7 @@ static int tonga_populate_single_memory_level( /* default set to low watermark. Highest level will be set to high later.*/ memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; - cgs_get_active_displays_info(hwmgr->device, &info); - data->display_timing.num_existing_displays = info.display_count; + data->display_timing.num_existing_displays = hwmgr->display_config->num_display; if ((mclk_stutter_mode_threshold != 0) && (memory_clock <= mclk_stutter_mode_threshold) && @@ -1450,51 +1443,6 @@ static int tonga_populate_smc_acp_level(struct pp_hwmgr *hwmgr, return result; } -static int tonga_populate_smc_samu_level(struct pp_hwmgr *hwmgr, - SMU72_Discrete_DpmTable *table) -{ - int result = 0; - uint8_t count; - pp_atomctrl_clock_dividers_vi dividers; - struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *pptable_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = - pptable_info->mm_dep_table; - - table->SamuBootLevel = 0; - table->SamuLevelCount = (uint8_t) (mm_table->count); - - for (count = 0; count < table->SamuLevelCount; count++) { - /* not sure whether we need evclk or not */ - table->SamuLevel[count].Frequency = - pptable_info->mm_dep_table->entries[count].samclock; - table->SamuLevel[count].MinVoltage.Vddc = - phm_get_voltage_index(pptable_info->vddc_lookup_table, - mm_table->entries[count].vddc); - table->SamuLevel[count].MinVoltage.VddGfx = - (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ? - phm_get_voltage_index(pptable_info->vddgfx_lookup_table, - mm_table->entries[count].vddgfx) : 0; - table->SamuLevel[count].MinVoltage.Vddci = - phm_get_voltage_id(&data->vddci_voltage_table, - mm_table->entries[count].vddc - VDDC_VDDCI_DELTA); - table->SamuLevel[count].MinVoltage.Phases = 1; - - /* retrieve divider value for VBIOS */ - result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, - table->SamuLevel[count].Frequency, ÷rs); - PP_ASSERT_WITH_CODE((!result), - "can not find divide id for samu clock", return result); - - table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider; - - CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency); - } - - return result; -} - static int tonga_populate_memory_timing_parameters( struct pp_hwmgr *hwmgr, uint32_t engine_clock, @@ -2330,10 +2278,6 @@ static int tonga_init_smc_table(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(!result, "Failed to initialize ACP Level !", return result); - result = tonga_populate_smc_samu_level(hwmgr, table); - PP_ASSERT_WITH_CODE(!result, - "Failed to initialize SAMU Level !", return result); - /* Since only the initial state is completely set up at this * point (the other states are just copies of the boot state) we only * need to populate the ARB settings for the initial state. @@ -2680,8 +2624,6 @@ static uint32_t tonga_get_offsetof(uint32_t type, uint32_t member) return offsetof(SMU72_Discrete_DpmTable, UvdBootLevel); case VceBootLevel: return offsetof(SMU72_Discrete_DpmTable, VceBootLevel); - case SamuBootLevel: - return offsetof(SMU72_Discrete_DpmTable, SamuBootLevel); case LowSclkInterruptThreshold: return offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold); } @@ -2780,32 +2722,6 @@ static int tonga_update_vce_smc_table(struct pp_hwmgr *hwmgr) return 0; } -static int tonga_update_samu_smc_table(struct pp_hwmgr *hwmgr) -{ - struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend); - uint32_t mm_boot_level_offset, mm_boot_level_value; - - smu_data->smc_state_table.SamuBootLevel = 0; - mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + - offsetof(SMU72_Discrete_DpmTable, SamuBootLevel); - - mm_boot_level_offset /= 4; - mm_boot_level_offset *= 4; - mm_boot_level_value = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, mm_boot_level_offset); - mm_boot_level_value &= 0xFFFFFF00; - mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0; - cgs_write_ind_register(hwmgr->device, - CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); - - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_StablePState)) - smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_SAMUDPM_SetEnabledMask, - (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel)); - return 0; -} - static int tonga_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type) { switch (type) { @@ -2815,9 +2731,6 @@ static int tonga_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type) case SMU_VCE_TABLE: tonga_update_vce_smc_table(hwmgr); break; - case SMU_SAMU_TABLE: - tonga_update_samu_smc_table(hwmgr); - break; default: break; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c index 4aafb043bcb0..5d19115f410c 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c @@ -23,165 +23,16 @@ #include "smumgr.h" #include "vega10_inc.h" -#include "pp_soc15.h" +#include "soc15_common.h" #include "vega10_smumgr.h" #include "vega10_hwmgr.h" #include "vega10_ppsmc.h" #include "smu9_driver_if.h" +#include "smu9_smumgr.h" #include "ppatomctrl.h" #include "pp_debug.h" -#define AVFS_EN_MSB 1568 -#define AVFS_EN_LSB 1568 - -#define VOLTAGE_SCALE 4 - -/* Microcode file is stored in this buffer */ -#define BUFFER_SIZE 80000 -#define MAX_STRING_SIZE 15 -#define BUFFER_SIZETWO 131072 /* 128 *1024 */ - -/* MP Apertures */ -#define MP0_Public 0x03800000 -#define MP0_SRAM 0x03900000 -#define MP1_Public 0x03b00000 -#define MP1_SRAM 0x03c00004 - -#define smnMP1_FIRMWARE_FLAGS 0x3010028 -#define smnMP0_FW_INTF 0x3010104 -#define smnMP1_PUB_CTRL 0x3010b14 - -static bool vega10_is_smc_ram_running(struct pp_hwmgr *hwmgr) -{ - uint32_t mp1_fw_flags, reg; - - reg = soc15_get_register_offset(NBIF_HWID, 0, - mmPCIE_INDEX2_BASE_IDX, mmPCIE_INDEX2); - - cgs_write_register(hwmgr->device, reg, - (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff))); - - reg = soc15_get_register_offset(NBIF_HWID, 0, - mmPCIE_DATA2_BASE_IDX, mmPCIE_DATA2); - - mp1_fw_flags = cgs_read_register(hwmgr->device, reg); - - if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) - return true; - - return false; -} - -/* - * Check if SMC has responded to previous message. - * - * @param smumgr the address of the powerplay hardware manager. - * @return TRUE SMC has responded, FALSE otherwise. - */ -static uint32_t vega10_wait_for_response(struct pp_hwmgr *hwmgr) -{ - uint32_t reg; - uint32_t ret; - - reg = soc15_get_register_offset(MP1_HWID, 0, - mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90); - - ret = phm_wait_for_register_unequal(hwmgr, reg, - 0, MP1_C2PMSG_90__CONTENT_MASK); - - if (ret) - pr_err("No response from smu\n"); - - return cgs_read_register(hwmgr->device, reg); -} - -/* - * Send a message to the SMC, and do not wait for its response. - * @param smumgr the address of the powerplay hardware manager. - * @param msg the message to send. - * @return Always return 0. - */ -static int vega10_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, - uint16_t msg) -{ - uint32_t reg; - - reg = soc15_get_register_offset(MP1_HWID, 0, - mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66); - cgs_write_register(hwmgr->device, reg, msg); - - return 0; -} - -/* - * Send a message to the SMC, and wait for its response. - * @param hwmgr the address of the powerplay hardware manager. - * @param msg the message to send. - * @return Always return 0. - */ -static int vega10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) -{ - uint32_t reg; - uint32_t ret; - - vega10_wait_for_response(hwmgr); - - reg = soc15_get_register_offset(MP1_HWID, 0, - mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90); - cgs_write_register(hwmgr->device, reg, 0); - - vega10_send_msg_to_smc_without_waiting(hwmgr, msg); - - ret = vega10_wait_for_response(hwmgr); - if (ret != 1) - pr_err("Failed to send message: 0x%x, ret value: 0x%x\n", msg, ret); - - return 0; -} - -/* - * Send a message to the SMC with parameter - * @param hwmgr: the address of the powerplay hardware manager. - * @param msg: the message to send. - * @param parameter: the parameter to send - * @return Always return 0. - */ -static int vega10_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, - uint16_t msg, uint32_t parameter) -{ - uint32_t reg; - uint32_t ret; - - vega10_wait_for_response(hwmgr); - - reg = soc15_get_register_offset(MP1_HWID, 0, - mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90); - cgs_write_register(hwmgr->device, reg, 0); - - reg = soc15_get_register_offset(MP1_HWID, 0, - mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82); - cgs_write_register(hwmgr->device, reg, parameter); - - vega10_send_msg_to_smc_without_waiting(hwmgr, msg); - - ret = vega10_wait_for_response(hwmgr); - if (ret != 1) - pr_err("Failed message: 0x%x, input parameter: 0x%x, error code: 0x%x\n", msg, parameter, ret); - - return 0; -} - -static int vega10_get_argument(struct pp_hwmgr *hwmgr) -{ - uint32_t reg; - - reg = soc15_get_register_offset(MP1_HWID, 0, - mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82); - - return cgs_read_register(hwmgr->device, reg); -} - static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr, uint8_t *table, int16_t table_id) { @@ -193,13 +44,13 @@ static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr, "Invalid SMU Table version!", return -EINVAL); PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0, "Invalid SMU Table Length!", return -EINVAL); - vega10_send_msg_to_smc_with_parameter(hwmgr, + smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrHigh, upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)); - vega10_send_msg_to_smc_with_parameter(hwmgr, + smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrLow, lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)); - vega10_send_msg_to_smc_with_parameter(hwmgr, + smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_TransferTableSmu2Dram, priv->smu_tables.entry[table_id].table_id); @@ -224,13 +75,13 @@ static int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr, memcpy(priv->smu_tables.entry[table_id].table, table, priv->smu_tables.entry[table_id].size); - vega10_send_msg_to_smc_with_parameter(hwmgr, + smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrHigh, upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)); - vega10_send_msg_to_smc_with_parameter(hwmgr, + smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrLow, lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)); - vega10_send_msg_to_smc_with_parameter(hwmgr, + smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_TransferTableDram2Smu, priv->smu_tables.entry[table_id].table_id); @@ -243,8 +94,8 @@ static int vega10_get_smc_features(struct pp_hwmgr *hwmgr, if (features_enabled == NULL) return -EINVAL; - vega10_send_msg_to_smc(hwmgr, PPSMC_MSG_GetEnabledSmuFeatures); - *features_enabled = vega10_get_argument(hwmgr); + smu9_send_msg_to_smc(hwmgr, PPSMC_MSG_GetEnabledSmuFeatures); + *features_enabled = smu9_get_argument(hwmgr); return 0; } @@ -266,10 +117,10 @@ static int vega10_set_tools_address(struct pp_hwmgr *hwmgr) struct vega10_smumgr *priv = hwmgr->smu_backend; if (priv->smu_tables.entry[TOOLSTABLE].mc_addr) { - vega10_send_msg_to_smc_with_parameter(hwmgr, + smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetToolsDramAddrHigh, upper_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr)); - vega10_send_msg_to_smc_with_parameter(hwmgr, + smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetToolsDramAddrLow, lower_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr)); } @@ -283,11 +134,11 @@ static int vega10_verify_smc_interface(struct pp_hwmgr *hwmgr) uint32_t dev_id; uint32_t rev_id; - PP_ASSERT_WITH_CODE(!vega10_send_msg_to_smc(hwmgr, + PP_ASSERT_WITH_CODE(!smu9_send_msg_to_smc(hwmgr, PPSMC_MSG_GetDriverIfVersion), "Attempt to get SMC IF Version Number Failed!", return -EINVAL); - smc_driver_if_version = vega10_get_argument(hwmgr); + smc_driver_if_version = smu9_get_argument(hwmgr); dev_id = adev->pdev->device; rev_id = adev->pdev->revision; @@ -459,7 +310,7 @@ static int vega10_smu_fini(struct pp_hwmgr *hwmgr) static int vega10_start_smu(struct pp_hwmgr *hwmgr) { - if (!vega10_is_smc_ram_running(hwmgr)) + if (!smu9_is_smc_ram_running(hwmgr)) return -EINVAL; PP_ASSERT_WITH_CODE(!vega10_verify_smc_interface(hwmgr), @@ -471,7 +322,8 @@ static int vega10_start_smu(struct pp_hwmgr *hwmgr) return 0; } -static int vega10_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw) +static int vega10_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, + uint16_t table_id, bool rw) { int ret; @@ -488,11 +340,11 @@ const struct pp_smumgr_func vega10_smu_funcs = { .smu_fini = &vega10_smu_fini, .start_smu = &vega10_start_smu, .request_smu_load_specific_fw = NULL, - .send_msg_to_smc = &vega10_send_msg_to_smc, - .send_msg_to_smc_with_parameter = &vega10_send_msg_to_smc_with_parameter, + .send_msg_to_smc = &smu9_send_msg_to_smc, + .send_msg_to_smc_with_parameter = &smu9_send_msg_to_smc_with_parameter, .download_pptable_settings = NULL, .upload_pptable_settings = NULL, .is_dpm_running = vega10_is_dpm_running, - .get_argument = vega10_get_argument, + .get_argument = smu9_get_argument, .smc_table_manager = vega10_smc_table_manager, }; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c index 651a3f28734b..7f0e2109f40d 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c @@ -23,176 +23,15 @@ #include "smumgr.h" #include "vega12_inc.h" -#include "pp_soc15.h" +#include "soc15_common.h" +#include "smu9_smumgr.h" #include "vega12_smumgr.h" #include "vega12_ppsmc.h" #include "vega12/smu9_driver_if.h" - #include "ppatomctrl.h" #include "pp_debug.h" -/* MP Apertures */ -#define MP0_Public 0x03800000 -#define MP0_SRAM 0x03900000 -#define MP1_Public 0x03b00000 -#define MP1_SRAM 0x03c00004 - -#define smnMP1_FIRMWARE_FLAGS 0x3010028 -#define smnMP0_FW_INTF 0x3010104 -#define smnMP1_PUB_CTRL 0x3010b14 - -static bool vega12_is_smc_ram_running(struct pp_hwmgr *hwmgr) -{ - uint32_t mp1_fw_flags, reg; - - reg = soc15_get_register_offset(NBIF_HWID, 0, - mmPCIE_INDEX2_BASE_IDX, mmPCIE_INDEX2); - - cgs_write_register(hwmgr->device, reg, - (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff))); - - reg = soc15_get_register_offset(NBIF_HWID, 0, - mmPCIE_DATA2_BASE_IDX, mmPCIE_DATA2); - - mp1_fw_flags = cgs_read_register(hwmgr->device, reg); - - if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> - MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) - return true; - - return false; -} - -/* - * Check if SMC has responded to previous message. - * - * @param smumgr the address of the powerplay hardware manager. - * @return TRUE SMC has responded, FALSE otherwise. - */ -static uint32_t vega12_wait_for_response(struct pp_hwmgr *hwmgr) -{ - uint32_t reg; - - reg = soc15_get_register_offset(MP1_HWID, 0, - mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90); - - phm_wait_for_register_unequal(hwmgr, reg, - 0, MP1_C2PMSG_90__CONTENT_MASK); - - return cgs_read_register(hwmgr->device, reg); -} - -/* - * Send a message to the SMC, and do not wait for its response. - * @param smumgr the address of the powerplay hardware manager. - * @param msg the message to send. - * @return Always return 0. - */ -int vega12_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, - uint16_t msg) -{ - uint32_t reg; - - reg = soc15_get_register_offset(MP1_HWID, 0, - mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66); - cgs_write_register(hwmgr->device, reg, msg); - - return 0; -} - -/* - * Send a message to the SMC, and wait for its response. - * @param hwmgr the address of the powerplay hardware manager. - * @param msg the message to send. - * @return Always return 0. - */ -int vega12_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) -{ - uint32_t reg; - - vega12_wait_for_response(hwmgr); - - reg = soc15_get_register_offset(MP1_HWID, 0, - mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90); - cgs_write_register(hwmgr->device, reg, 0); - - vega12_send_msg_to_smc_without_waiting(hwmgr, msg); - - if (vega12_wait_for_response(hwmgr) != 1) - pr_err("Failed to send message: 0x%x\n", msg); - - return 0; -} - -/* - * Send a message to the SMC with parameter - * @param hwmgr: the address of the powerplay hardware manager. - * @param msg: the message to send. - * @param parameter: the parameter to send - * @return Always return 0. - */ -int vega12_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, - uint16_t msg, uint32_t parameter) -{ - uint32_t reg; - - vega12_wait_for_response(hwmgr); - - reg = soc15_get_register_offset(MP1_HWID, 0, - mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90); - cgs_write_register(hwmgr->device, reg, 0); - - reg = soc15_get_register_offset(MP1_HWID, 0, - mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82); - cgs_write_register(hwmgr->device, reg, parameter); - - vega12_send_msg_to_smc_without_waiting(hwmgr, msg); - - if (vega12_wait_for_response(hwmgr) != 1) - pr_err("Failed to send message: 0x%x\n", msg); - - return 0; -} - - -/* - * Send a message to the SMC with parameter, do not wait for response - * @param hwmgr: the address of the powerplay hardware manager. - * @param msg: the message to send. - * @param parameter: the parameter to send - * @return The response that came from the SMC. - */ -int vega12_send_msg_to_smc_with_parameter_without_waiting( - struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter) -{ - uint32_t reg; - - reg = soc15_get_register_offset(MP1_HWID, 0, - mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66); - cgs_write_register(hwmgr->device, reg, parameter); - - return vega12_send_msg_to_smc_without_waiting(hwmgr, msg); -} - -/* - * Retrieve an argument from SMC. - * @param hwmgr the address of the powerplay hardware manager. - * @param arg pointer to store the argument from SMC. - * @return Always return 0. - */ -int vega12_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg) -{ - uint32_t reg; - - reg = soc15_get_register_offset(MP1_HWID, 0, - mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82); - - *arg = cgs_read_register(hwmgr->device, reg); - - return 0; -} - /* * Copy table from SMC into driver FB * @param hwmgr the address of the HW manager @@ -210,16 +49,16 @@ int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr, "Invalid SMU Table version!", return -EINVAL); PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0, "Invalid SMU Table Length!", return -EINVAL); - PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr, + PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrHigh, upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0, "[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL); - PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr, + PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrLow, lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0, "[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!", return -EINVAL); - PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr, + PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_TransferTableSmu2Dram, table_id) == 0, "[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!", @@ -252,17 +91,17 @@ int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr, memcpy(priv->smu_tables.entry[table_id].table, table, priv->smu_tables.entry[table_id].size); - PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr, + PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrHigh, upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0, "[CopyTableToSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL;); - PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr, + PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrLow, lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0, "[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!", return -EINVAL); - PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr, + PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_TransferTableDram2Smu, table_id) == 0, "[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!", @@ -280,20 +119,20 @@ int vega12_enable_smc_features(struct pp_hwmgr *hwmgr, smu_features_high = (uint32_t)((feature_mask & SMU_FEATURES_HIGH_MASK) >> SMU_FEATURES_HIGH_SHIFT); if (enable) { - PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr, + PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low) == 0, "[EnableDisableSMCFeatures] Attemp to enable SMU features Low failed!", return -EINVAL); - PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr, + PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high) == 0, "[EnableDisableSMCFeatures] Attemp to enable SMU features High failed!", return -EINVAL); } else { - PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr, + PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low) == 0, "[EnableDisableSMCFeatures] Attemp to disable SMU features Low failed!", return -EINVAL); - PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr, + PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high) == 0, "[EnableDisableSMCFeatures] Attemp to disable SMU features High failed!", return -EINVAL); @@ -310,22 +149,17 @@ int vega12_get_enabled_smc_features(struct pp_hwmgr *hwmgr, if (features_enabled == NULL) return -EINVAL; - PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc(hwmgr, + PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc(hwmgr, PPSMC_MSG_GetEnabledSmuFeaturesLow) == 0, "[GetEnabledSMCFeatures] Attemp to get SMU features Low failed!", return -EINVAL); - PP_ASSERT_WITH_CODE(vega12_read_arg_from_smc(hwmgr, - &smc_features_low) == 0, - "[GetEnabledSMCFeatures] Attemp to read SMU features Low argument failed!", - return -EINVAL); - PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc(hwmgr, + smc_features_low = smu9_get_argument(hwmgr); + + PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc(hwmgr, PPSMC_MSG_GetEnabledSmuFeaturesHigh) == 0, "[GetEnabledSMCFeatures] Attemp to get SMU features High failed!", return -EINVAL); - PP_ASSERT_WITH_CODE(vega12_read_arg_from_smc(hwmgr, - &smc_features_high) == 0, - "[GetEnabledSMCFeatures] Attemp to read SMU features High argument failed!", - return -EINVAL); + smc_features_high = smu9_get_argument(hwmgr); *features_enabled = ((((uint64_t)smc_features_low << SMU_FEATURES_LOW_SHIFT) & SMU_FEATURES_LOW_MASK) | (((uint64_t)smc_features_high << SMU_FEATURES_HIGH_SHIFT) & SMU_FEATURES_HIGH_MASK)); @@ -351,39 +185,16 @@ static int vega12_set_tools_address(struct pp_hwmgr *hwmgr) (struct vega12_smumgr *)(hwmgr->smu_backend); if (priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr) { - if (!vega12_send_msg_to_smc_with_parameter(hwmgr, + if (!smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetToolsDramAddrHigh, upper_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr))) - vega12_send_msg_to_smc_with_parameter(hwmgr, + smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetToolsDramAddrLow, lower_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr)); } return 0; } -#if 0 /* tentatively remove */ -static int vega12_verify_smc_interface(struct pp_hwmgr *hwmgr) -{ - uint32_t smc_driver_if_version; - - PP_ASSERT_WITH_CODE(!vega12_send_msg_to_smc(hwmgr, - PPSMC_MSG_GetDriverIfVersion), - "Attempt to get SMC IF Version Number Failed!", - return -EINVAL); - vega12_read_arg_from_smc(hwmgr, &smc_driver_if_version); - - if (smc_driver_if_version != SMU9_DRIVER_IF_VERSION) { - pr_err("Your firmware(0x%x) doesn't match \ - SMU9_DRIVER_IF_VERSION(0x%x). \ - Please update your firmware!\n", - smc_driver_if_version, SMU9_DRIVER_IF_VERSION); - return -EINVAL; - } - - return 0; -} -#endif - static int vega12_smu_init(struct pp_hwmgr *hwmgr) { struct vega12_smumgr *priv; @@ -531,16 +342,10 @@ static int vega12_smu_fini(struct pp_hwmgr *hwmgr) static int vega12_start_smu(struct pp_hwmgr *hwmgr) { - PP_ASSERT_WITH_CODE(vega12_is_smc_ram_running(hwmgr), + PP_ASSERT_WITH_CODE(smu9_is_smc_ram_running(hwmgr), "SMC is not running!", return -EINVAL); -#if 0 /* tentatively remove */ - PP_ASSERT_WITH_CODE(!vega12_verify_smc_interface(hwmgr), - "Failed to verify SMC interface!", - return -EINVAL); -#endif - vega12_set_tools_address(hwmgr); return 0; @@ -551,9 +356,10 @@ const struct pp_smumgr_func vega12_smu_funcs = { .smu_fini = &vega12_smu_fini, .start_smu = &vega12_start_smu, .request_smu_load_specific_fw = NULL, - .send_msg_to_smc = &vega12_send_msg_to_smc, - .send_msg_to_smc_with_parameter = &vega12_send_msg_to_smc_with_parameter, + .send_msg_to_smc = &smu9_send_msg_to_smc, + .send_msg_to_smc_with_parameter = &smu9_send_msg_to_smc_with_parameter, .download_pptable_settings = NULL, .upload_pptable_settings = NULL, .is_dpm_running = vega12_is_dpm_running, + .get_argument = smu9_get_argument, }; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h index 2810d387b611..b285cbc04019 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h @@ -48,7 +48,6 @@ struct vega12_smumgr { #define SMU_FEATURES_HIGH_MASK 0xFFFFFFFF00000000 #define SMU_FEATURES_HIGH_SHIFT 32 -int vega12_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg); int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr, uint8_t *table, int16_t table_id); int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr, diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c new file mode 100644 index 000000000000..57420d7caa4e --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c @@ -0,0 +1,2298 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "pp_debug.h" +#include "smumgr.h" +#include "smu_ucode_xfer_vi.h" +#include "vegam_smumgr.h" +#include "smu/smu_7_1_3_d.h" +#include "smu/smu_7_1_3_sh_mask.h" +#include "gmc/gmc_8_1_d.h" +#include "gmc/gmc_8_1_sh_mask.h" +#include "oss/oss_3_0_d.h" +#include "gca/gfx_8_0_d.h" +#include "bif/bif_5_0_d.h" +#include "bif/bif_5_0_sh_mask.h" +#include "ppatomctrl.h" +#include "cgs_common.h" +#include "smu7_ppsmc.h" + +#include "smu7_dyn_defaults.h" + +#include "smu7_hwmgr.h" +#include "hardwaremanager.h" +#include "ppatomctrl.h" +#include "atombios.h" +#include "pppcielanes.h" + +#include "dce/dce_11_2_d.h" +#include "dce/dce_11_2_sh_mask.h" + +#define PPVEGAM_TARGETACTIVITY_DFLT 50 + +#define VOLTAGE_VID_OFFSET_SCALE1 625 +#define VOLTAGE_VID_OFFSET_SCALE2 100 +#define POWERTUNE_DEFAULT_SET_MAX 1 +#define VDDC_VDDCI_DELTA 200 +#define MC_CG_ARB_FREQ_F1 0x0b + +#define STRAP_ASIC_RO_LSB 2168 +#define STRAP_ASIC_RO_MSB 2175 + +#define PPSMC_MSG_ApplyAvfsCksOffVoltage ((uint16_t) 0x415) +#define PPSMC_MSG_EnableModeSwitchRLCNotification ((uint16_t) 0x305) + +static const struct vegam_pt_defaults +vegam_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = { + /* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt, + * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */ + { 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000, + { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61}, + { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } }, +}; + +static const sclkFcwRange_t Range_Table[NUM_SCLK_RANGE] = { + {VCO_2_4, POSTDIV_DIV_BY_16, 75, 160, 112}, + {VCO_3_6, POSTDIV_DIV_BY_16, 112, 224, 160}, + {VCO_2_4, POSTDIV_DIV_BY_8, 75, 160, 112}, + {VCO_3_6, POSTDIV_DIV_BY_8, 112, 224, 160}, + {VCO_2_4, POSTDIV_DIV_BY_4, 75, 160, 112}, + {VCO_3_6, POSTDIV_DIV_BY_4, 112, 216, 160}, + {VCO_2_4, POSTDIV_DIV_BY_2, 75, 160, 108}, + {VCO_3_6, POSTDIV_DIV_BY_2, 112, 216, 160} }; + +static int vegam_smu_init(struct pp_hwmgr *hwmgr) +{ + struct vegam_smumgr *smu_data; + + smu_data = kzalloc(sizeof(struct vegam_smumgr), GFP_KERNEL); + if (smu_data == NULL) + return -ENOMEM; + + hwmgr->smu_backend = smu_data; + + if (smu7_init(hwmgr)) { + kfree(smu_data); + return -EINVAL; + } + + return 0; +} + +static int vegam_start_smu_in_protection_mode(struct pp_hwmgr *hwmgr) +{ + int result = 0; + + /* Wait for smc boot up */ + /* PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0) */ + + /* Assert reset */ + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + SMC_SYSCON_RESET_CNTL, rst_reg, 1); + + result = smu7_upload_smu_firmware_image(hwmgr); + if (result != 0) + return result; + + /* Clear status */ + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMU_STATUS, 0); + + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); + + /* De-assert reset */ + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + SMC_SYSCON_RESET_CNTL, rst_reg, 0); + + + PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS, INTERRUPTS_ENABLED, 1); + + + /* Call Test SMU message with 0x20000 offset to trigger SMU start */ + smu7_send_msg_to_smc_offset(hwmgr); + + /* Wait done bit to be set */ + /* Check pass/failed indicator */ + + PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, SMU_STATUS, SMU_DONE, 0); + + if (1 != PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + SMU_STATUS, SMU_PASS)) + PP_ASSERT_WITH_CODE(false, "SMU Firmware start failed!", return -1); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixFIRMWARE_FLAGS, 0); + + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + SMC_SYSCON_RESET_CNTL, rst_reg, 1); + + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + SMC_SYSCON_RESET_CNTL, rst_reg, 0); + + /* Wait for firmware to initialize */ + PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1); + + return result; +} + +static int vegam_start_smu_in_non_protection_mode(struct pp_hwmgr *hwmgr) +{ + int result = 0; + + /* wait for smc boot up */ + PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0); + + /* Clear firmware interrupt enable flag */ + /* PHM_WRITE_VFPF_INDIRECT_FIELD(pSmuMgr, SMC_IND, SMC_SYSCON_MISC_CNTL, pre_fetcher_en, 1); */ + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixFIRMWARE_FLAGS, 0); + + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + SMC_SYSCON_RESET_CNTL, + rst_reg, 1); + + result = smu7_upload_smu_firmware_image(hwmgr); + if (result != 0) + return result; + + /* Set smc instruct start point at 0x0 */ + smu7_program_jump_on_start(hwmgr); + + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); + + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + SMC_SYSCON_RESET_CNTL, rst_reg, 0); + + /* Wait for firmware to initialize */ + + PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, + FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1); + + return result; +} + +static int vegam_start_smu(struct pp_hwmgr *hwmgr) +{ + int result = 0; + struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend); + + /* Only start SMC if SMC RAM is not running */ + if (!smu7_is_smc_ram_running(hwmgr) && hwmgr->not_vf) { + smu_data->protected_mode = (uint8_t)(PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, + CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE)); + smu_data->smu7_data.security_hard_key = (uint8_t)(PHM_READ_VFPF_INDIRECT_FIELD( + hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL)); + + /* Check if SMU is running in protected mode */ + if (smu_data->protected_mode == 0) + result = vegam_start_smu_in_non_protection_mode(hwmgr); + else + result = vegam_start_smu_in_protection_mode(hwmgr); + + if (result != 0) + PP_ASSERT_WITH_CODE(0, "Failed to load SMU ucode.", return result); + } + + /* Setup SoftRegsStart here for register lookup in case DummyBackEnd is used and ProcessFirmwareHeader is not executed */ + smu7_read_smc_sram_dword(hwmgr, + SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU75_Firmware_Header, SoftRegisters), + &(smu_data->smu7_data.soft_regs_start), + 0x40000); + + result = smu7_request_smu_load_fw(hwmgr); + + return result; +} + +static int vegam_process_firmware_header(struct pp_hwmgr *hwmgr) +{ + struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend); + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t tmp; + int result; + bool error = false; + + result = smu7_read_smc_sram_dword(hwmgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU75_Firmware_Header, DpmTable), + &tmp, SMC_RAM_END); + + if (0 == result) + smu_data->smu7_data.dpm_table_start = tmp; + + error |= (0 != result); + + result = smu7_read_smc_sram_dword(hwmgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU75_Firmware_Header, SoftRegisters), + &tmp, SMC_RAM_END); + + if (!result) { + data->soft_regs_start = tmp; + smu_data->smu7_data.soft_regs_start = tmp; + } + + error |= (0 != result); + + result = smu7_read_smc_sram_dword(hwmgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU75_Firmware_Header, mcRegisterTable), + &tmp, SMC_RAM_END); + + if (!result) + smu_data->smu7_data.mc_reg_table_start = tmp; + + result = smu7_read_smc_sram_dword(hwmgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU75_Firmware_Header, FanTable), + &tmp, SMC_RAM_END); + + if (!result) + smu_data->smu7_data.fan_table_start = tmp; + + error |= (0 != result); + + result = smu7_read_smc_sram_dword(hwmgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU75_Firmware_Header, mcArbDramTimingTable), + &tmp, SMC_RAM_END); + + if (!result) + smu_data->smu7_data.arb_table_start = tmp; + + error |= (0 != result); + + result = smu7_read_smc_sram_dword(hwmgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU75_Firmware_Header, Version), + &tmp, SMC_RAM_END); + + if (!result) + hwmgr->microcode_version_info.SMC = tmp; + + error |= (0 != result); + + return error ? -1 : 0; +} + +static bool vegam_is_dpm_running(struct pp_hwmgr *hwmgr) +{ + return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device, + CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON)) + ? true : false; +} + +static uint32_t vegam_get_mac_definition(uint32_t value) +{ + switch (value) { + case SMU_MAX_LEVELS_GRAPHICS: + return SMU75_MAX_LEVELS_GRAPHICS; + case SMU_MAX_LEVELS_MEMORY: + return SMU75_MAX_LEVELS_MEMORY; + case SMU_MAX_LEVELS_LINK: + return SMU75_MAX_LEVELS_LINK; + case SMU_MAX_ENTRIES_SMIO: + return SMU75_MAX_ENTRIES_SMIO; + case SMU_MAX_LEVELS_VDDC: + return SMU75_MAX_LEVELS_VDDC; + case SMU_MAX_LEVELS_VDDGFX: + return SMU75_MAX_LEVELS_VDDGFX; + case SMU_MAX_LEVELS_VDDCI: + return SMU75_MAX_LEVELS_VDDCI; + case SMU_MAX_LEVELS_MVDD: + return SMU75_MAX_LEVELS_MVDD; + case SMU_UVD_MCLK_HANDSHAKE_DISABLE: + return SMU7_UVD_MCLK_HANDSHAKE_DISABLE | + SMU7_VCE_MCLK_HANDSHAKE_DISABLE; + } + + pr_warn("can't get the mac of %x\n", value); + return 0; +} + +static int vegam_update_uvd_smc_table(struct pp_hwmgr *hwmgr) +{ + struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend); + uint32_t mm_boot_level_offset, mm_boot_level_value; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + smu_data->smc_state_table.UvdBootLevel = 0; + if (table_info->mm_dep_table->count > 0) + smu_data->smc_state_table.UvdBootLevel = + (uint8_t) (table_info->mm_dep_table->count - 1); + mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + offsetof(SMU75_Discrete_DpmTable, + UvdBootLevel); + mm_boot_level_offset /= 4; + mm_boot_level_offset *= 4; + mm_boot_level_value = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, mm_boot_level_offset); + mm_boot_level_value &= 0x00FFFFFF; + mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24; + cgs_write_ind_register(hwmgr->device, + CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); + + if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_UVDDPM) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StablePState)) + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_UVDDPM_SetEnabledMask, + (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel)); + return 0; +} + +static int vegam_update_vce_smc_table(struct pp_hwmgr *hwmgr) +{ + struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend); + uint32_t mm_boot_level_offset, mm_boot_level_value; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StablePState)) + smu_data->smc_state_table.VceBootLevel = + (uint8_t) (table_info->mm_dep_table->count - 1); + else + smu_data->smc_state_table.VceBootLevel = 0; + + mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + + offsetof(SMU75_Discrete_DpmTable, VceBootLevel); + mm_boot_level_offset /= 4; + mm_boot_level_offset *= 4; + mm_boot_level_value = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, mm_boot_level_offset); + mm_boot_level_value &= 0xFF00FFFF; + mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16; + cgs_write_ind_register(hwmgr->device, + CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_VCEDPM_SetEnabledMask, + (uint32_t)1 << smu_data->smc_state_table.VceBootLevel); + return 0; +} + +static int vegam_update_bif_smc_table(struct pp_hwmgr *hwmgr) +{ + struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table; + int max_entry, i; + + max_entry = (SMU75_MAX_LEVELS_LINK < pcie_table->count) ? + SMU75_MAX_LEVELS_LINK : + pcie_table->count; + /* Setup BIF_SCLK levels */ + for (i = 0; i < max_entry; i++) + smu_data->bif_sclk_table[i] = pcie_table->entries[i].pcie_sclk; + return 0; +} + +static int vegam_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type) +{ + switch (type) { + case SMU_UVD_TABLE: + vegam_update_uvd_smc_table(hwmgr); + break; + case SMU_VCE_TABLE: + vegam_update_vce_smc_table(hwmgr); + break; + case SMU_BIF_TABLE: + vegam_update_bif_smc_table(hwmgr); + break; + default: + break; + } + return 0; +} + +static void vegam_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) +{ + struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + if (table_info && + table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX && + table_info->cac_dtp_table->usPowerTuneDataSetID) + smu_data->power_tune_defaults = + &vegam_power_tune_data_set_array + [table_info->cac_dtp_table->usPowerTuneDataSetID - 1]; + else + smu_data->power_tune_defaults = &vegam_power_tune_data_set_array[0]; + +} + +static int vegam_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr, + SMU75_Discrete_DpmTable *table) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t count, level; + + if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) { + count = data->mvdd_voltage_table.count; + if (count > SMU_MAX_SMIO_LEVELS) + count = SMU_MAX_SMIO_LEVELS; + for (level = 0; level < count; level++) { + table->SmioTable2.Pattern[level].Voltage = PP_HOST_TO_SMC_US( + data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE); + /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/ + table->SmioTable2.Pattern[level].Smio = + (uint8_t) level; + table->Smio[level] |= + data->mvdd_voltage_table.entries[level].smio_low; + } + table->SmioMask2 = data->mvdd_voltage_table.mask_low; + + table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count); + } + + return 0; +} + +static int vegam_populate_smc_vddci_table(struct pp_hwmgr *hwmgr, + struct SMU75_Discrete_DpmTable *table) +{ + uint32_t count, level; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + count = data->vddci_voltage_table.count; + + if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) { + if (count > SMU_MAX_SMIO_LEVELS) + count = SMU_MAX_SMIO_LEVELS; + for (level = 0; level < count; ++level) { + table->SmioTable1.Pattern[level].Voltage = PP_HOST_TO_SMC_US( + data->vddci_voltage_table.entries[level].value * VOLTAGE_SCALE); + table->SmioTable1.Pattern[level].Smio = (uint8_t) level; + + table->Smio[level] |= data->vddci_voltage_table.entries[level].smio_low; + } + } + + table->SmioMask1 = data->vddci_voltage_table.mask_low; + + return 0; +} + +static int vegam_populate_cac_table(struct pp_hwmgr *hwmgr, + struct SMU75_Discrete_DpmTable *table) +{ + uint32_t count; + uint8_t index; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_voltage_lookup_table *lookup_table = + table_info->vddc_lookup_table; + /* tables is already swapped, so in order to use the value from it, + * we need to swap it back. + * We are populating vddc CAC data to BapmVddc table + * in split and merged mode + */ + for (count = 0; count < lookup_table->count; count++) { + index = phm_get_voltage_index(lookup_table, + data->vddc_voltage_table.entries[count].value); + table->BapmVddcVidLoSidd[count] = + convert_to_vid(lookup_table->entries[index].us_cac_low); + table->BapmVddcVidHiSidd[count] = + convert_to_vid(lookup_table->entries[index].us_cac_mid); + table->BapmVddcVidHiSidd2[count] = + convert_to_vid(lookup_table->entries[index].us_cac_high); + } + + return 0; +} + +static int vegam_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr, + struct SMU75_Discrete_DpmTable *table) +{ + vegam_populate_smc_vddci_table(hwmgr, table); + vegam_populate_smc_mvdd_table(hwmgr, table); + vegam_populate_cac_table(hwmgr, table); + + return 0; +} + +static int vegam_populate_ulv_level(struct pp_hwmgr *hwmgr, + struct SMU75_Discrete_Ulv *state) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + state->CcPwrDynRm = 0; + state->CcPwrDynRm1 = 0; + + state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset; + state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset * + VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1); + + state->VddcPhase = data->vddc_phase_shed_control ^ 0x3; + + CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1); + CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset); + + return 0; +} + +static int vegam_populate_ulv_state(struct pp_hwmgr *hwmgr, + struct SMU75_Discrete_DpmTable *table) +{ + return vegam_populate_ulv_level(hwmgr, &table->Ulv); +} + +static int vegam_populate_smc_link_level(struct pp_hwmgr *hwmgr, + struct SMU75_Discrete_DpmTable *table) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct vegam_smumgr *smu_data = + (struct vegam_smumgr *)(hwmgr->smu_backend); + struct smu7_dpm_table *dpm_table = &data->dpm_table; + int i; + + /* Index (dpm_table->pcie_speed_table.count) + * is reserved for PCIE boot level. */ + for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) { + table->LinkLevel[i].PcieGenSpeed = + (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value; + table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width( + dpm_table->pcie_speed_table.dpm_levels[i].param1); + table->LinkLevel[i].EnabledForActivity = 1; + table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff); + table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5); + table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30); + } + + smu_data->smc_state_table.LinkLevelCount = + (uint8_t)dpm_table->pcie_speed_table.count; + +/* To Do move to hwmgr */ + data->dpm_level_enable_mask.pcie_dpm_enable_mask = + phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table); + + return 0; +} + +static int vegam_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr, + struct phm_ppt_v1_clock_voltage_dependency_table *dep_table, + uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd) +{ + uint32_t i; + uint16_t vddci; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + *voltage = *mvdd = 0; + + /* clock - voltage dependency table is empty table */ + if (dep_table->count == 0) + return -EINVAL; + + for (i = 0; i < dep_table->count; i++) { + /* find first sclk bigger than request */ + if (dep_table->entries[i].clk >= clock) { + *voltage |= (dep_table->entries[i].vddc * + VOLTAGE_SCALE) << VDDC_SHIFT; + if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control) + *voltage |= (data->vbios_boot_state.vddci_bootup_value * + VOLTAGE_SCALE) << VDDCI_SHIFT; + else if (dep_table->entries[i].vddci) + *voltage |= (dep_table->entries[i].vddci * + VOLTAGE_SCALE) << VDDCI_SHIFT; + else { + vddci = phm_find_closest_vddci(&(data->vddci_voltage_table), + (dep_table->entries[i].vddc - + (uint16_t)VDDC_VDDCI_DELTA)); + *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; + } + + if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) + *mvdd = data->vbios_boot_state.mvdd_bootup_value * + VOLTAGE_SCALE; + else if (dep_table->entries[i].mvdd) + *mvdd = (uint32_t) dep_table->entries[i].mvdd * + VOLTAGE_SCALE; + + *voltage |= 1 << PHASES_SHIFT; + return 0; + } + } + + /* sclk is bigger than max sclk in the dependence table */ + *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT; + vddci = phm_find_closest_vddci(&(data->vddci_voltage_table), + (dep_table->entries[i - 1].vddc - + (uint16_t)VDDC_VDDCI_DELTA)); + + if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control) + *voltage |= (data->vbios_boot_state.vddci_bootup_value * + VOLTAGE_SCALE) << VDDCI_SHIFT; + else if (dep_table->entries[i - 1].vddci) + *voltage |= (dep_table->entries[i - 1].vddci * + VOLTAGE_SCALE) << VDDC_SHIFT; + else + *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; + + if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) + *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE; + else if (dep_table->entries[i].mvdd) + *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE; + + return 0; +} + +static void vegam_get_sclk_range_table(struct pp_hwmgr *hwmgr, + SMU75_Discrete_DpmTable *table) +{ + struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend); + uint32_t i, ref_clk; + + struct pp_atom_ctrl_sclk_range_table range_table_from_vbios = { { {0} } }; + + ref_clk = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev); + + if (0 == atomctrl_get_smc_sclk_range_table(hwmgr, &range_table_from_vbios)) { + for (i = 0; i < NUM_SCLK_RANGE; i++) { + table->SclkFcwRangeTable[i].vco_setting = + range_table_from_vbios.entry[i].ucVco_setting; + table->SclkFcwRangeTable[i].postdiv = + range_table_from_vbios.entry[i].ucPostdiv; + table->SclkFcwRangeTable[i].fcw_pcc = + range_table_from_vbios.entry[i].usFcw_pcc; + + table->SclkFcwRangeTable[i].fcw_trans_upper = + range_table_from_vbios.entry[i].usFcw_trans_upper; + table->SclkFcwRangeTable[i].fcw_trans_lower = + range_table_from_vbios.entry[i].usRcw_trans_lower; + + CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc); + CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper); + CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower); + } + return; + } + + for (i = 0; i < NUM_SCLK_RANGE; i++) { + smu_data->range_table[i].trans_lower_frequency = + (ref_clk * Range_Table[i].fcw_trans_lower) >> Range_Table[i].postdiv; + smu_data->range_table[i].trans_upper_frequency = + (ref_clk * Range_Table[i].fcw_trans_upper) >> Range_Table[i].postdiv; + + table->SclkFcwRangeTable[i].vco_setting = Range_Table[i].vco_setting; + table->SclkFcwRangeTable[i].postdiv = Range_Table[i].postdiv; + table->SclkFcwRangeTable[i].fcw_pcc = Range_Table[i].fcw_pcc; + + table->SclkFcwRangeTable[i].fcw_trans_upper = Range_Table[i].fcw_trans_upper; + table->SclkFcwRangeTable[i].fcw_trans_lower = Range_Table[i].fcw_trans_lower; + + CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc); + CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper); + CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower); + } +} + +static int vegam_calculate_sclk_params(struct pp_hwmgr *hwmgr, + uint32_t clock, SMU_SclkSetting *sclk_setting) +{ + struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend); + const SMU75_Discrete_DpmTable *table = &(smu_data->smc_state_table); + struct pp_atomctrl_clock_dividers_ai dividers; + uint32_t ref_clock; + uint32_t pcc_target_percent, pcc_target_freq, ss_target_percent, ss_target_freq; + uint8_t i; + int result; + uint64_t temp; + + sclk_setting->SclkFrequency = clock; + /* get the engine clock dividers for this clock value */ + result = atomctrl_get_engine_pll_dividers_ai(hwmgr, clock, ÷rs); + if (result == 0) { + sclk_setting->Fcw_int = dividers.usSclk_fcw_int; + sclk_setting->Fcw_frac = dividers.usSclk_fcw_frac; + sclk_setting->Pcc_fcw_int = dividers.usPcc_fcw_int; + sclk_setting->PllRange = dividers.ucSclkPllRange; + sclk_setting->Sclk_slew_rate = 0x400; + sclk_setting->Pcc_up_slew_rate = dividers.usPcc_fcw_slew_frac; + sclk_setting->Pcc_down_slew_rate = 0xffff; + sclk_setting->SSc_En = dividers.ucSscEnable; + sclk_setting->Fcw1_int = dividers.usSsc_fcw1_int; + sclk_setting->Fcw1_frac = dividers.usSsc_fcw1_frac; + sclk_setting->Sclk_ss_slew_rate = dividers.usSsc_fcw_slew_frac; + return result; + } + + ref_clock = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev); + + for (i = 0; i < NUM_SCLK_RANGE; i++) { + if (clock > smu_data->range_table[i].trans_lower_frequency + && clock <= smu_data->range_table[i].trans_upper_frequency) { + sclk_setting->PllRange = i; + break; + } + } + + sclk_setting->Fcw_int = (uint16_t) + ((clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / + ref_clock); + temp = clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv; + temp <<= 0x10; + do_div(temp, ref_clock); + sclk_setting->Fcw_frac = temp & 0xffff; + + pcc_target_percent = 10; /* Hardcode 10% for now. */ + pcc_target_freq = clock - (clock * pcc_target_percent / 100); + sclk_setting->Pcc_fcw_int = (uint16_t) + ((pcc_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / + ref_clock); + + ss_target_percent = 2; /* Hardcode 2% for now. */ + sclk_setting->SSc_En = 0; + if (ss_target_percent) { + sclk_setting->SSc_En = 1; + ss_target_freq = clock - (clock * ss_target_percent / 100); + sclk_setting->Fcw1_int = (uint16_t) + ((ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / + ref_clock); + temp = ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv; + temp <<= 0x10; + do_div(temp, ref_clock); + sclk_setting->Fcw1_frac = temp & 0xffff; + } + + return 0; +} + +static uint8_t vegam_get_sleep_divider_id_from_clock(uint32_t clock, + uint32_t clock_insr) +{ + uint8_t i; + uint32_t temp; + uint32_t min = max(clock_insr, (uint32_t)SMU7_MINIMUM_ENGINE_CLOCK); + + PP_ASSERT_WITH_CODE((clock >= min), + "Engine clock can't satisfy stutter requirement!", + return 0); + for (i = 31; ; i--) { + temp = clock / (i + 1); + + if (temp >= min || i == 0) + break; + } + return i; +} + +static int vegam_populate_single_graphic_level(struct pp_hwmgr *hwmgr, + uint32_t clock, struct SMU75_Discrete_GraphicsLevel *level) +{ + int result; + /* PP_Clocks minClocks; */ + uint32_t mvdd; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + SMU_SclkSetting curr_sclk_setting = { 0 }; + + result = vegam_calculate_sclk_params(hwmgr, clock, &curr_sclk_setting); + + /* populate graphics levels */ + result = vegam_get_dependency_volt_by_clk(hwmgr, + table_info->vdd_dep_on_sclk, clock, + &level->MinVoltage, &mvdd); + + PP_ASSERT_WITH_CODE((0 == result), + "can not find VDDC voltage value for " + "VDDC engine clock dependency table", + return result); + level->ActivityLevel = (uint16_t)(SclkDPMTuning_VEGAM >> DPMTuning_Activity_Shift); + + level->CcPwrDynRm = 0; + level->CcPwrDynRm1 = 0; + level->EnabledForActivity = 0; + level->EnabledForThrottle = 1; + level->VoltageDownHyst = 0; + level->PowerThrottle = 0; + data->display_timing.min_clock_in_sr = hwmgr->display_config->min_core_set_clock_in_sr; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) + level->DeepSleepDivId = vegam_get_sleep_divider_id_from_clock(clock, + hwmgr->display_config->min_core_set_clock_in_sr); + + level->SclkSetting = curr_sclk_setting; + + CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage); + CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1); + CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel); + CONVERT_FROM_HOST_TO_SMC_UL(level->SclkSetting.SclkFrequency); + CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_int); + CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_frac); + CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_fcw_int); + CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_slew_rate); + CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_up_slew_rate); + CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_down_slew_rate); + CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_int); + CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_frac); + CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_ss_slew_rate); + return 0; +} + +static int vegam_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend); + struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend); + struct smu7_dpm_table *dpm_table = &hw_data->dpm_table; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table; + uint8_t pcie_entry_cnt = (uint8_t) hw_data->dpm_table.pcie_speed_table.count; + int result = 0; + uint32_t array = smu_data->smu7_data.dpm_table_start + + offsetof(SMU75_Discrete_DpmTable, GraphicsLevel); + uint32_t array_size = sizeof(struct SMU75_Discrete_GraphicsLevel) * + SMU75_MAX_LEVELS_GRAPHICS; + struct SMU75_Discrete_GraphicsLevel *levels = + smu_data->smc_state_table.GraphicsLevel; + uint32_t i, max_entry; + uint8_t hightest_pcie_level_enabled = 0, + lowest_pcie_level_enabled = 0, + mid_pcie_level_enabled = 0, + count = 0; + + vegam_get_sclk_range_table(hwmgr, &(smu_data->smc_state_table)); + + for (i = 0; i < dpm_table->sclk_table.count; i++) { + + result = vegam_populate_single_graphic_level(hwmgr, + dpm_table->sclk_table.dpm_levels[i].value, + &(smu_data->smc_state_table.GraphicsLevel[i])); + if (result) + return result; + + levels[i].UpHyst = (uint8_t) + (SclkDPMTuning_VEGAM >> DPMTuning_Uphyst_Shift); + levels[i].DownHyst = (uint8_t) + (SclkDPMTuning_VEGAM >> DPMTuning_Downhyst_Shift); + /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */ + if (i > 1) + levels[i].DeepSleepDivId = 0; + } + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SPLLShutdownSupport)) + smu_data->smc_state_table.GraphicsLevel[0].SclkSetting.SSc_En = 0; + + smu_data->smc_state_table.GraphicsDpmLevelCount = + (uint8_t)dpm_table->sclk_table.count; + hw_data->dpm_level_enable_mask.sclk_dpm_enable_mask = + phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table); + + for (i = 0; i < dpm_table->sclk_table.count; i++) + levels[i].EnabledForActivity = + (hw_data->dpm_level_enable_mask.sclk_dpm_enable_mask >> i) & 0x1; + + if (pcie_table != NULL) { + PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt), + "There must be 1 or more PCIE levels defined in PPTable.", + return -EINVAL); + max_entry = pcie_entry_cnt - 1; + for (i = 0; i < dpm_table->sclk_table.count; i++) + levels[i].pcieDpmLevel = + (uint8_t) ((i < max_entry) ? i : max_entry); + } else { + while (hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask && + ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask & + (1 << (hightest_pcie_level_enabled + 1))) != 0)) + hightest_pcie_level_enabled++; + + while (hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask && + ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask & + (1 << lowest_pcie_level_enabled)) == 0)) + lowest_pcie_level_enabled++; + + while ((count < hightest_pcie_level_enabled) && + ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask & + (1 << (lowest_pcie_level_enabled + 1 + count))) == 0)) + count++; + + mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) < + hightest_pcie_level_enabled ? + (lowest_pcie_level_enabled + 1 + count) : + hightest_pcie_level_enabled; + + /* set pcieDpmLevel to hightest_pcie_level_enabled */ + for (i = 2; i < dpm_table->sclk_table.count; i++) + levels[i].pcieDpmLevel = hightest_pcie_level_enabled; + + /* set pcieDpmLevel to lowest_pcie_level_enabled */ + levels[0].pcieDpmLevel = lowest_pcie_level_enabled; + + /* set pcieDpmLevel to mid_pcie_level_enabled */ + levels[1].pcieDpmLevel = mid_pcie_level_enabled; + } + /* level count will send to smc once at init smc table and never change */ + result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels, + (uint32_t)array_size, SMC_RAM_END); + + return result; +} + +static int vegam_calculate_mclk_params(struct pp_hwmgr *hwmgr, + uint32_t clock, struct SMU75_Discrete_MemoryLevel *mem_level) +{ + struct pp_atomctrl_memory_clock_param_ai mpll_param; + + PP_ASSERT_WITH_CODE(!atomctrl_get_memory_pll_dividers_ai(hwmgr, + clock, &mpll_param), + "Failed to retrieve memory pll parameter.", + return -EINVAL); + + mem_level->MclkFrequency = (uint32_t)mpll_param.ulClock; + mem_level->Fcw_int = (uint16_t)mpll_param.ulMclk_fcw_int; + mem_level->Fcw_frac = (uint16_t)mpll_param.ulMclk_fcw_frac; + mem_level->Postdiv = (uint8_t)mpll_param.ulPostDiv; + + return 0; +} + +static int vegam_populate_single_memory_level(struct pp_hwmgr *hwmgr, + uint32_t clock, struct SMU75_Discrete_MemoryLevel *mem_level) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + int result = 0; + uint32_t mclk_stutter_mode_threshold = 60000; + + + if (table_info->vdd_dep_on_mclk) { + result = vegam_get_dependency_volt_by_clk(hwmgr, + table_info->vdd_dep_on_mclk, clock, + &mem_level->MinVoltage, &mem_level->MinMvdd); + PP_ASSERT_WITH_CODE(!result, + "can not find MinVddc voltage value from memory " + "VDDC voltage dependency table", return result); + } + + result = vegam_calculate_mclk_params(hwmgr, clock, mem_level); + PP_ASSERT_WITH_CODE(!result, + "Failed to calculate mclk params.", + return -EINVAL); + + mem_level->EnabledForThrottle = 1; + mem_level->EnabledForActivity = 0; + mem_level->VoltageDownHyst = 0; + mem_level->ActivityLevel = (uint16_t) + (MemoryDPMTuning_VEGAM >> DPMTuning_Activity_Shift); + mem_level->StutterEnable = false; + mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; + + data->display_timing.num_existing_displays = hwmgr->display_config->num_display; + + if (mclk_stutter_mode_threshold && + (clock <= mclk_stutter_mode_threshold) && + (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, + STUTTER_ENABLE) & 0x1)) + mem_level->StutterEnable = true; + + if (!result) { + CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd); + CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency); + CONVERT_FROM_HOST_TO_SMC_US(mem_level->Fcw_int); + CONVERT_FROM_HOST_TO_SMC_US(mem_level->Fcw_frac); + CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel); + CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage); + } + + return result; +} + +static int vegam_populate_all_memory_levels(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend); + struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend); + struct smu7_dpm_table *dpm_table = &hw_data->dpm_table; + int result; + /* populate MCLK dpm table to SMU7 */ + uint32_t array = smu_data->smu7_data.dpm_table_start + + offsetof(SMU75_Discrete_DpmTable, MemoryLevel); + uint32_t array_size = sizeof(SMU75_Discrete_MemoryLevel) * + SMU75_MAX_LEVELS_MEMORY; + struct SMU75_Discrete_MemoryLevel *levels = + smu_data->smc_state_table.MemoryLevel; + uint32_t i; + + for (i = 0; i < dpm_table->mclk_table.count; i++) { + PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value), + "can not populate memory level as memory clock is zero", + return -EINVAL); + result = vegam_populate_single_memory_level(hwmgr, + dpm_table->mclk_table.dpm_levels[i].value, + &levels[i]); + + if (result) + return result; + + levels[i].UpHyst = (uint8_t) + (MemoryDPMTuning_VEGAM >> DPMTuning_Uphyst_Shift); + levels[i].DownHyst = (uint8_t) + (MemoryDPMTuning_VEGAM >> DPMTuning_Downhyst_Shift); + } + + smu_data->smc_state_table.MemoryDpmLevelCount = + (uint8_t)dpm_table->mclk_table.count; + hw_data->dpm_level_enable_mask.mclk_dpm_enable_mask = + phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table); + + for (i = 0; i < dpm_table->mclk_table.count; i++) + levels[i].EnabledForActivity = + (hw_data->dpm_level_enable_mask.mclk_dpm_enable_mask >> i) & 0x1; + + levels[dpm_table->mclk_table.count - 1].DisplayWatermark = + PPSMC_DISPLAY_WATERMARK_HIGH; + + /* level count will send to smc once at init smc table and never change */ + result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels, + (uint32_t)array_size, SMC_RAM_END); + + return result; +} + +static int vegam_populate_mvdd_value(struct pp_hwmgr *hwmgr, + uint32_t mclk, SMIO_Pattern *smio_pat) +{ + const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + uint32_t i = 0; + + if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) { + /* find mvdd value which clock is more than request */ + for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) { + if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) { + smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value; + break; + } + } + PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count, + "MVDD Voltage is outside the supported range.", + return -EINVAL); + } else + return -EINVAL; + + return 0; +} + +static int vegam_populate_smc_acpi_level(struct pp_hwmgr *hwmgr, + SMU75_Discrete_DpmTable *table) +{ + int result = 0; + uint32_t sclk_frequency; + const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + SMIO_Pattern vol_level; + uint32_t mvdd; + uint16_t us_mvdd; + + table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC; + + /* Get MinVoltage and Frequency from DPM0, + * already converted to SMC_UL */ + sclk_frequency = data->vbios_boot_state.sclk_bootup_value; + result = vegam_get_dependency_volt_by_clk(hwmgr, + table_info->vdd_dep_on_sclk, + sclk_frequency, + &table->ACPILevel.MinVoltage, &mvdd); + PP_ASSERT_WITH_CODE(!result, + "Cannot find ACPI VDDC voltage value " + "in Clock Dependency Table", + ); + + result = vegam_calculate_sclk_params(hwmgr, sclk_frequency, + &(table->ACPILevel.SclkSetting)); + PP_ASSERT_WITH_CODE(!result, + "Error retrieving Engine Clock dividers from VBIOS.", + return result); + + table->ACPILevel.DeepSleepDivId = 0; + table->ACPILevel.CcPwrDynRm = 0; + table->ACPILevel.CcPwrDynRm1 = 0; + + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1); + + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkSetting.SclkFrequency); + CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_int); + CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_frac); + CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_fcw_int); + CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_slew_rate); + CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_up_slew_rate); + CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_down_slew_rate); + CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_int); + CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_frac); + CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_ss_slew_rate); + + + /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */ + table->MemoryACPILevel.MclkFrequency = data->vbios_boot_state.mclk_bootup_value; + result = vegam_get_dependency_volt_by_clk(hwmgr, + table_info->vdd_dep_on_mclk, + table->MemoryACPILevel.MclkFrequency, + &table->MemoryACPILevel.MinVoltage, &mvdd); + PP_ASSERT_WITH_CODE((0 == result), + "Cannot find ACPI VDDCI voltage value " + "in Clock Dependency Table", + ); + + us_mvdd = 0; + if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) || + (data->mclk_dpm_key_disabled)) + us_mvdd = data->vbios_boot_state.mvdd_bootup_value; + else { + if (!vegam_populate_mvdd_value(hwmgr, + data->dpm_table.mclk_table.dpm_levels[0].value, + &vol_level)) + us_mvdd = vol_level.Voltage; + } + + if (!vegam_populate_mvdd_value(hwmgr, 0, &vol_level)) + table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage); + else + table->MemoryACPILevel.MinMvdd = 0; + + table->MemoryACPILevel.StutterEnable = false; + + table->MemoryACPILevel.EnabledForThrottle = 0; + table->MemoryACPILevel.EnabledForActivity = 0; + table->MemoryACPILevel.UpHyst = 0; + table->MemoryACPILevel.DownHyst = 100; + table->MemoryACPILevel.VoltageDownHyst = 0; + table->MemoryACPILevel.ActivityLevel = + PP_HOST_TO_SMC_US(data->current_profile_setting.mclk_activity); + + CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage); + + return result; +} + +static int vegam_populate_smc_vce_level(struct pp_hwmgr *hwmgr, + SMU75_Discrete_DpmTable *table) +{ + int result = -EINVAL; + uint8_t count; + struct pp_atomctrl_clock_dividers_vi dividers; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = + table_info->mm_dep_table; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t vddci; + + table->VceLevelCount = (uint8_t)(mm_table->count); + table->VceBootLevel = 0; + + for (count = 0; count < table->VceLevelCount; count++) { + table->VceLevel[count].Frequency = mm_table->entries[count].eclk; + table->VceLevel[count].MinVoltage = 0; + table->VceLevel[count].MinVoltage |= + (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT; + + if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) + vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table), + mm_table->entries[count].vddc - VDDC_VDDCI_DELTA); + else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) + vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA; + else + vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT; + + + table->VceLevel[count].MinVoltage |= + (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; + table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT; + + /*retrieve divider value for VBIOS */ + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->VceLevel[count].Frequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for VCE engine clock", + return result); + + table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider; + + CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage); + } + return result; +} + +static int vegam_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr, + int32_t eng_clock, int32_t mem_clock, + SMU75_Discrete_MCArbDramTimingTableEntry *arb_regs) +{ + uint32_t dram_timing; + uint32_t dram_timing2; + uint32_t burst_time; + uint32_t rfsh_rate; + uint32_t misc3; + + int result; + + result = atomctrl_set_engine_dram_timings_rv770(hwmgr, + eng_clock, mem_clock); + PP_ASSERT_WITH_CODE(result == 0, + "Error calling VBIOS to set DRAM_TIMING.", + return result); + + dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING); + dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2); + burst_time = cgs_read_register(hwmgr->device, mmMC_ARB_BURST_TIME); + rfsh_rate = cgs_read_register(hwmgr->device, mmMC_ARB_RFSH_RATE); + misc3 = cgs_read_register(hwmgr->device, mmMC_ARB_MISC3); + + arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dram_timing); + arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2); + arb_regs->McArbBurstTime = PP_HOST_TO_SMC_UL(burst_time); + arb_regs->McArbRfshRate = PP_HOST_TO_SMC_UL(rfsh_rate); + arb_regs->McArbMisc3 = PP_HOST_TO_SMC_UL(misc3); + + return 0; +} + +static int vegam_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend); + struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend); + struct SMU75_Discrete_MCArbDramTimingTable arb_regs; + uint32_t i, j; + int result = 0; + + memset(&arb_regs, 0, sizeof(SMU75_Discrete_MCArbDramTimingTable)); + + for (i = 0; i < hw_data->dpm_table.sclk_table.count; i++) { + for (j = 0; j < hw_data->dpm_table.mclk_table.count; j++) { + result = vegam_populate_memory_timing_parameters(hwmgr, + hw_data->dpm_table.sclk_table.dpm_levels[i].value, + hw_data->dpm_table.mclk_table.dpm_levels[j].value, + &arb_regs.entries[i][j]); + if (result) + return result; + } + } + + result = smu7_copy_bytes_to_smc( + hwmgr, + smu_data->smu7_data.arb_table_start, + (uint8_t *)&arb_regs, + sizeof(SMU75_Discrete_MCArbDramTimingTable), + SMC_RAM_END); + return result; +} + +static int vegam_populate_smc_uvd_level(struct pp_hwmgr *hwmgr, + struct SMU75_Discrete_DpmTable *table) +{ + int result = -EINVAL; + uint8_t count; + struct pp_atomctrl_clock_dividers_vi dividers; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = + table_info->mm_dep_table; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t vddci; + + table->UvdLevelCount = (uint8_t)(mm_table->count); + table->UvdBootLevel = 0; + + for (count = 0; count < table->UvdLevelCount; count++) { + table->UvdLevel[count].MinVoltage = 0; + table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk; + table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk; + table->UvdLevel[count].MinVoltage |= + (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT; + + if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) + vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table), + mm_table->entries[count].vddc - VDDC_VDDCI_DELTA); + else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) + vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA; + else + vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT; + + table->UvdLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; + table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT; + + /* retrieve divider value for VBIOS */ + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->UvdLevel[count].VclkFrequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for Vclk clock", return result); + + table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider; + + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->UvdLevel[count].DclkFrequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for Dclk clock", return result); + + table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider; + + CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage); + } + + return result; +} + +static int vegam_populate_smc_boot_level(struct pp_hwmgr *hwmgr, + struct SMU75_Discrete_DpmTable *table) +{ + int result = 0; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + table->GraphicsBootLevel = 0; + table->MemoryBootLevel = 0; + + /* find boot level from dpm table */ + result = phm_find_boot_level(&(data->dpm_table.sclk_table), + data->vbios_boot_state.sclk_bootup_value, + (uint32_t *)&(table->GraphicsBootLevel)); + + result = phm_find_boot_level(&(data->dpm_table.mclk_table), + data->vbios_boot_state.mclk_bootup_value, + (uint32_t *)&(table->MemoryBootLevel)); + + table->BootVddc = data->vbios_boot_state.vddc_bootup_value * + VOLTAGE_SCALE; + table->BootVddci = data->vbios_boot_state.vddci_bootup_value * + VOLTAGE_SCALE; + table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value * + VOLTAGE_SCALE; + + CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc); + CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci); + CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd); + + return 0; +} + +static int vegam_populate_smc_initial_state(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend); + struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + uint8_t count, level; + + count = (uint8_t)(table_info->vdd_dep_on_sclk->count); + + for (level = 0; level < count; level++) { + if (table_info->vdd_dep_on_sclk->entries[level].clk >= + hw_data->vbios_boot_state.sclk_bootup_value) { + smu_data->smc_state_table.GraphicsBootLevel = level; + break; + } + } + + count = (uint8_t)(table_info->vdd_dep_on_mclk->count); + for (level = 0; level < count; level++) { + if (table_info->vdd_dep_on_mclk->entries[level].clk >= + hw_data->vbios_boot_state.mclk_bootup_value) { + smu_data->smc_state_table.MemoryBootLevel = level; + break; + } + } + + return 0; +} + +static uint16_t scale_fan_gain_settings(uint16_t raw_setting) +{ + uint32_t tmp; + tmp = raw_setting * 4096 / 100; + return (uint16_t)tmp; +} + +static int vegam_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) +{ + struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend); + + const struct vegam_pt_defaults *defaults = smu_data->power_tune_defaults; + SMU75_Discrete_DpmTable *table = &(smu_data->smc_state_table); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table; + struct pp_advance_fan_control_parameters *fan_table = + &hwmgr->thermal_controller.advanceFanControlParameters; + int i, j, k; + const uint16_t *pdef1; + const uint16_t *pdef2; + + table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128)); + table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128)); + + PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255, + "Target Operating Temp is out of Range!", + ); + + table->TemperatureLimitEdge = PP_HOST_TO_SMC_US( + cac_dtp_table->usTargetOperatingTemp * 256); + table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US( + cac_dtp_table->usTemperatureLimitHotspot * 256); + table->FanGainEdge = PP_HOST_TO_SMC_US( + scale_fan_gain_settings(fan_table->usFanGainEdge)); + table->FanGainHotspot = PP_HOST_TO_SMC_US( + scale_fan_gain_settings(fan_table->usFanGainHotspot)); + + pdef1 = defaults->BAPMTI_R; + pdef2 = defaults->BAPMTI_RC; + + for (i = 0; i < SMU75_DTE_ITERATIONS; i++) { + for (j = 0; j < SMU75_DTE_SOURCES; j++) { + for (k = 0; k < SMU75_DTE_SINKS; k++) { + table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*pdef1); + table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*pdef2); + pdef1++; + pdef2++; + } + } + } + + return 0; +} + +static int vegam_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) +{ + uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min; + struct vegam_smumgr *smu_data = + (struct vegam_smumgr *)(hwmgr->smu_backend); + + uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = + table_info->vdd_dep_on_sclk; + uint32_t mask = (1 << ((STRAP_ASIC_RO_MSB - STRAP_ASIC_RO_LSB) + 1)) - 1; + + stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount; + + atomctrl_read_efuse(hwmgr, STRAP_ASIC_RO_LSB, STRAP_ASIC_RO_MSB, + mask, &efuse); + + min = 1200; + max = 2500; + + ro = efuse * (max - min) / 255 + min; + + /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */ + for (i = 0; i < sclk_table->count; i++) { + smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |= + sclk_table->entries[i].cks_enable << i; + volt_without_cks = (uint32_t)((2753594000U + (sclk_table->entries[i].clk/100) * + 136418 - (ro - 70) * 1000000) / + (2424180 - (sclk_table->entries[i].clk/100) * 1132925/1000)); + volt_with_cks = (uint32_t)((2797202000U + sclk_table->entries[i].clk/100 * + 3232 - (ro - 65) * 1000000) / + (2522480 - sclk_table->entries[i].clk/100 * 115764/100)); + + if (volt_without_cks >= volt_with_cks) + volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks + + sclk_table->entries[i].cks_voffset) * 100 + 624) / 625); + + smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset; + } + + smu_data->smc_state_table.LdoRefSel = + (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? + table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 5; + /* Populate CKS Lookup Table */ + if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5) + stretch_amount2 = 0; + else if (stretch_amount == 3 || stretch_amount == 4) + stretch_amount2 = 1; + else { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ClockStretcher); + PP_ASSERT_WITH_CODE(false, + "Stretch Amount in PPTable not supported\n", + return -EINVAL); + } + + value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL); + value &= 0xFFFFFFFE; + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value); + + return 0; +} + +static bool vegam_is_hw_avfs_present(struct pp_hwmgr *hwmgr) +{ + uint32_t efuse; + + efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixSMU_EFUSE_0 + (49 * 4)); + efuse &= 0x00000001; + + if (efuse) + return true; + + return false; +} + +static int vegam_populate_avfs_parameters(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend); + + SMU75_Discrete_DpmTable *table = &(smu_data->smc_state_table); + int result = 0; + struct pp_atom_ctrl__avfs_parameters avfs_params = {0}; + AVFS_meanNsigma_t AVFS_meanNsigma = { {0} }; + AVFS_Sclk_Offset_t AVFS_SclkOffset = { {0} }; + uint32_t tmp, i; + + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)hwmgr->pptable; + struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = + table_info->vdd_dep_on_sclk; + + if (!hwmgr->avfs_supported) + return 0; + + result = atomctrl_get_avfs_information(hwmgr, &avfs_params); + + if (0 == result) { + table->BTCGB_VDROOP_TABLE[0].a0 = + PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0); + table->BTCGB_VDROOP_TABLE[0].a1 = + PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1); + table->BTCGB_VDROOP_TABLE[0].a2 = + PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2); + table->BTCGB_VDROOP_TABLE[1].a0 = + PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0); + table->BTCGB_VDROOP_TABLE[1].a1 = + PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1); + table->BTCGB_VDROOP_TABLE[1].a2 = + PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2); + table->AVFSGB_FUSE_TABLE[0].m1 = + PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_m1); + table->AVFSGB_FUSE_TABLE[0].m2 = + PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSON_m2); + table->AVFSGB_FUSE_TABLE[0].b = + PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_b); + table->AVFSGB_FUSE_TABLE[0].m1_shift = 24; + table->AVFSGB_FUSE_TABLE[0].m2_shift = 12; + table->AVFSGB_FUSE_TABLE[1].m1 = + PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1); + table->AVFSGB_FUSE_TABLE[1].m2 = + PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2); + table->AVFSGB_FUSE_TABLE[1].b = + PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b); + table->AVFSGB_FUSE_TABLE[1].m1_shift = 24; + table->AVFSGB_FUSE_TABLE[1].m2_shift = 12; + table->MaxVoltage = PP_HOST_TO_SMC_US(avfs_params.usMaxVoltage_0_25mv); + AVFS_meanNsigma.Aconstant[0] = + PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant0); + AVFS_meanNsigma.Aconstant[1] = + PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant1); + AVFS_meanNsigma.Aconstant[2] = + PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant2); + AVFS_meanNsigma.DC_tol_sigma = + PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_DC_tol_sigma); + AVFS_meanNsigma.Platform_mean = + PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_mean); + AVFS_meanNsigma.PSM_Age_CompFactor = + PP_HOST_TO_SMC_US(avfs_params.usPSM_Age_ComFactor); + AVFS_meanNsigma.Platform_sigma = + PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_sigma); + + for (i = 0; i < sclk_table->count; i++) { + AVFS_meanNsigma.Static_Voltage_Offset[i] = + (uint8_t)(sclk_table->entries[i].cks_voffset * 100 / 625); + AVFS_SclkOffset.Sclk_Offset[i] = + PP_HOST_TO_SMC_US((uint16_t) + (sclk_table->entries[i].sclk_offset) / 100); + } + + result = smu7_read_smc_sram_dword(hwmgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU75_Firmware_Header, AvfsMeanNSigma), + &tmp, SMC_RAM_END); + smu7_copy_bytes_to_smc(hwmgr, + tmp, + (uint8_t *)&AVFS_meanNsigma, + sizeof(AVFS_meanNsigma_t), + SMC_RAM_END); + + result = smu7_read_smc_sram_dword(hwmgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU75_Firmware_Header, AvfsSclkOffsetTable), + &tmp, SMC_RAM_END); + smu7_copy_bytes_to_smc(hwmgr, + tmp, + (uint8_t *)&AVFS_SclkOffset, + sizeof(AVFS_Sclk_Offset_t), + SMC_RAM_END); + + data->avfs_vdroop_override_setting = + (avfs_params.ucEnableGB_VDROOP_TABLE_CKSON << BTCGB0_Vdroop_Enable_SHIFT) | + (avfs_params.ucEnableGB_VDROOP_TABLE_CKSOFF << BTCGB1_Vdroop_Enable_SHIFT) | + (avfs_params.ucEnableGB_FUSE_TABLE_CKSON << AVFSGB0_Vdroop_Enable_SHIFT) | + (avfs_params.ucEnableGB_FUSE_TABLE_CKSOFF << AVFSGB1_Vdroop_Enable_SHIFT); + data->apply_avfs_cks_off_voltage = + (avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1) ? true : false; + } + return result; +} + +static int vegam_populate_vr_config(struct pp_hwmgr *hwmgr, + struct SMU75_Discrete_DpmTable *table) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct vegam_smumgr *smu_data = + (struct vegam_smumgr *)(hwmgr->smu_backend); + uint16_t config; + + config = VR_MERGED_WITH_VDDC; + table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT); + + /* Set Vddc Voltage Controller */ + if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) { + config = VR_SVI2_PLANE_1; + table->VRConfig |= config; + } else { + PP_ASSERT_WITH_CODE(false, + "VDDC should be on SVI2 control in merged mode!", + ); + } + /* Set Vddci Voltage Controller */ + if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) { + config = VR_SVI2_PLANE_2; /* only in merged mode */ + table->VRConfig |= (config << VRCONF_VDDCI_SHIFT); + } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) { + config = VR_SMIO_PATTERN_1; + table->VRConfig |= (config << VRCONF_VDDCI_SHIFT); + } else { + config = VR_STATIC_VOLTAGE; + table->VRConfig |= (config << VRCONF_VDDCI_SHIFT); + } + /* Set Mvdd Voltage Controller */ + if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) { + if (config != VR_SVI2_PLANE_2) { + config = VR_SVI2_PLANE_2; + table->VRConfig |= (config << VRCONF_MVDD_SHIFT); + cgs_write_ind_register(hwmgr->device, + CGS_IND_REG__SMC, + smu_data->smu7_data.soft_regs_start + + offsetof(SMU75_SoftRegisters, AllowMvddSwitch), + 0x1); + } else { + PP_ASSERT_WITH_CODE(false, + "SVI2 Plane 2 is already taken, set MVDD as Static",); + config = VR_STATIC_VOLTAGE; + table->VRConfig = (config << VRCONF_MVDD_SHIFT); + } + } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) { + config = VR_SMIO_PATTERN_2; + table->VRConfig = (config << VRCONF_MVDD_SHIFT); + cgs_write_ind_register(hwmgr->device, + CGS_IND_REG__SMC, + smu_data->smu7_data.soft_regs_start + + offsetof(SMU75_SoftRegisters, AllowMvddSwitch), + 0x1); + } else { + config = VR_STATIC_VOLTAGE; + table->VRConfig |= (config << VRCONF_MVDD_SHIFT); + } + + return 0; +} + +static int vegam_populate_svi_load_line(struct pp_hwmgr *hwmgr) +{ + struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend); + const struct vegam_pt_defaults *defaults = smu_data->power_tune_defaults; + + smu_data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn; + smu_data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC; + smu_data->power_tune_table.SviLoadLineTrimVddC = 3; + smu_data->power_tune_table.SviLoadLineOffsetVddC = 0; + + return 0; +} + +static int vegam_populate_tdc_limit(struct pp_hwmgr *hwmgr) +{ + uint16_t tdc_limit; + struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + const struct vegam_pt_defaults *defaults = smu_data->power_tune_defaults; + + tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128); + smu_data->power_tune_table.TDC_VDDC_PkgLimit = + CONVERT_FROM_HOST_TO_SMC_US(tdc_limit); + smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc = + defaults->TDC_VDDC_ThrottleReleaseLimitPerc; + smu_data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt; + + return 0; +} + +static int vegam_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) +{ + struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend); + const struct vegam_pt_defaults *defaults = smu_data->power_tune_defaults; + uint32_t temp; + + if (smu7_read_smc_sram_dword(hwmgr, + fuse_table_offset + + offsetof(SMU75_Discrete_PmFuses, TdcWaterfallCtl), + (uint32_t *)&temp, SMC_RAM_END)) + PP_ASSERT_WITH_CODE(false, + "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!", + return -EINVAL); + else { + smu_data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl; + smu_data->power_tune_table.LPMLTemperatureMin = + (uint8_t)((temp >> 16) & 0xff); + smu_data->power_tune_table.LPMLTemperatureMax = + (uint8_t)((temp >> 8) & 0xff); + smu_data->power_tune_table.Reserved = (uint8_t)(temp & 0xff); + } + return 0; +} + +static int vegam_populate_temperature_scaler(struct pp_hwmgr *hwmgr) +{ + int i; + struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend); + + /* Currently not used. Set all to zero. */ + for (i = 0; i < 16; i++) + smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0; + + return 0; +} + +static int vegam_populate_fuzzy_fan(struct pp_hwmgr *hwmgr) +{ + struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend); + +/* TO DO move to hwmgr */ + if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15)) + || 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity) + hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity = + hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity; + + smu_data->power_tune_table.FuzzyFan_PwmSetDelta = PP_HOST_TO_SMC_US( + hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity); + return 0; +} + +static int vegam_populate_gnb_lpml(struct pp_hwmgr *hwmgr) +{ + int i; + struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend); + + /* Currently not used. Set all to zero. */ + for (i = 0; i < 16; i++) + smu_data->power_tune_table.GnbLPML[i] = 0; + + return 0; +} + +static int vegam_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) +{ + struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + uint16_t hi_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd; + uint16_t lo_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd; + struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table; + + hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256); + lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256); + + smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd = + CONVERT_FROM_HOST_TO_SMC_US(hi_sidd); + smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd = + CONVERT_FROM_HOST_TO_SMC_US(lo_sidd); + + return 0; +} + +static int vegam_populate_pm_fuses(struct pp_hwmgr *hwmgr) +{ + struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend); + uint32_t pm_fuse_table_offset; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment)) { + if (smu7_read_smc_sram_dword(hwmgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU75_Firmware_Header, PmFuseTable), + &pm_fuse_table_offset, SMC_RAM_END)) + PP_ASSERT_WITH_CODE(false, + "Attempt to get pm_fuse_table_offset Failed!", + return -EINVAL); + + if (vegam_populate_svi_load_line(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate SviLoadLine Failed!", + return -EINVAL); + + if (vegam_populate_tdc_limit(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate TDCLimit Failed!", return -EINVAL); + + if (vegam_populate_dw8(hwmgr, pm_fuse_table_offset)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate TdcWaterfallCtl, " + "LPMLTemperature Min and Max Failed!", + return -EINVAL); + + if (0 != vegam_populate_temperature_scaler(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate LPMLTemperatureScaler Failed!", + return -EINVAL); + + if (vegam_populate_fuzzy_fan(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate Fuzzy Fan Control parameters Failed!", + return -EINVAL); + + if (vegam_populate_gnb_lpml(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate GnbLPML Failed!", + return -EINVAL); + + if (vegam_populate_bapm_vddc_base_leakage_sidd(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate BapmVddCBaseLeakage Hi and Lo " + "Sidd Failed!", return -EINVAL); + + if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset, + (uint8_t *)&smu_data->power_tune_table, + (sizeof(struct SMU75_Discrete_PmFuses) - PMFUSES_AVFSSIZE), + SMC_RAM_END)) + PP_ASSERT_WITH_CODE(false, + "Attempt to download PmFuseTable Failed!", + return -EINVAL); + } + return 0; +} + +static int vegam_enable_reconfig_cus(struct pp_hwmgr *hwmgr) +{ + struct amdgpu_device *adev = hwmgr->adev; + + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_EnableModeSwitchRLCNotification, + adev->gfx.cu_info.number); + + return 0; +} + +static int vegam_init_smc_table(struct pp_hwmgr *hwmgr) +{ + int result; + struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend); + struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend); + + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct SMU75_Discrete_DpmTable *table = &(smu_data->smc_state_table); + uint8_t i; + struct pp_atomctrl_gpio_pin_assignment gpio_pin; + struct phm_ppt_v1_gpio_table *gpio_table = + (struct phm_ppt_v1_gpio_table *)table_info->gpio_table; + pp_atomctrl_clock_dividers_vi dividers; + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition); + + vegam_initialize_power_tune_defaults(hwmgr); + + if (SMU7_VOLTAGE_CONTROL_NONE != hw_data->voltage_control) + vegam_populate_smc_voltage_tables(hwmgr, table); + + table->SystemFlags = 0; + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition)) + table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StepVddc)) + table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; + + if (hw_data->is_memory_gddr5) + table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5; + + if (hw_data->ulv_supported && table_info->us_ulv_voltage_offset) { + result = vegam_populate_ulv_state(hwmgr, table); + PP_ASSERT_WITH_CODE(!result, + "Failed to initialize ULV state!", return result); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_ULV_PARAMETER, SMU7_CGULVPARAMETER_DFLT); + } + + result = vegam_populate_smc_link_level(hwmgr, table); + PP_ASSERT_WITH_CODE(!result, + "Failed to initialize Link Level!", return result); + + result = vegam_populate_all_graphic_levels(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to initialize Graphics Level!", return result); + + result = vegam_populate_all_memory_levels(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to initialize Memory Level!", return result); + + result = vegam_populate_smc_acpi_level(hwmgr, table); + PP_ASSERT_WITH_CODE(!result, + "Failed to initialize ACPI Level!", return result); + + result = vegam_populate_smc_vce_level(hwmgr, table); + PP_ASSERT_WITH_CODE(!result, + "Failed to initialize VCE Level!", return result); + + /* Since only the initial state is completely set up at this point + * (the other states are just copies of the boot state) we only + * need to populate the ARB settings for the initial state. + */ + result = vegam_program_memory_timing_parameters(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to Write ARB settings for the initial state.", return result); + + result = vegam_populate_smc_uvd_level(hwmgr, table); + PP_ASSERT_WITH_CODE(!result, + "Failed to initialize UVD Level!", return result); + + result = vegam_populate_smc_boot_level(hwmgr, table); + PP_ASSERT_WITH_CODE(!result, + "Failed to initialize Boot Level!", return result); + + result = vegam_populate_smc_initial_state(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to initialize Boot State!", return result); + + result = vegam_populate_bapm_parameters_in_dpm_table(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to populate BAPM Parameters!", return result); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ClockStretcher)) { + result = vegam_populate_clock_stretcher_data_table(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to populate Clock Stretcher Data Table!", + return result); + } + + result = vegam_populate_avfs_parameters(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to populate AVFS Parameters!", return result;); + + table->CurrSclkPllRange = 0xff; + table->GraphicsVoltageChangeEnable = 1; + table->GraphicsThermThrottleEnable = 1; + table->GraphicsInterval = 1; + table->VoltageInterval = 1; + table->ThermalInterval = 1; + table->TemperatureLimitHigh = + table_info->cac_dtp_table->usTargetOperatingTemp * + SMU7_Q88_FORMAT_CONVERSION_UNIT; + table->TemperatureLimitLow = + (table_info->cac_dtp_table->usTargetOperatingTemp - 1) * + SMU7_Q88_FORMAT_CONVERSION_UNIT; + table->MemoryVoltageChangeEnable = 1; + table->MemoryInterval = 1; + table->VoltageResponseTime = 0; + table->PhaseResponseTime = 0; + table->MemoryThermThrottleEnable = 1; + + PP_ASSERT_WITH_CODE(hw_data->dpm_table.pcie_speed_table.count >= 1, + "There must be 1 or more PCIE levels defined in PPTable.", + return -EINVAL); + table->PCIeBootLinkLevel = + hw_data->dpm_table.pcie_speed_table.count; + table->PCIeGenInterval = 1; + table->VRConfig = 0; + + result = vegam_populate_vr_config(hwmgr, table); + PP_ASSERT_WITH_CODE(!result, + "Failed to populate VRConfig setting!", return result); + + table->ThermGpio = 17; + table->SclkStepSize = 0x4000; + + if (atomctrl_get_pp_assign_pin(hwmgr, + VDDC_VRHOT_GPIO_PINID, &gpio_pin)) { + table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift; + if (gpio_table) + table->VRHotLevel = + table_info->gpio_table->vrhot_triggered_sclk_dpm_index; + } else { + table->VRHotGpio = SMU7_UNUSED_GPIO_PIN; + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_RegulatorHot); + } + + if (atomctrl_get_pp_assign_pin(hwmgr, + PP_AC_DC_SWITCH_GPIO_PINID, &gpio_pin)) { + table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift; + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition) && + !smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UseNewGPIOScheme)) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme); + } else { + table->AcDcGpio = SMU7_UNUSED_GPIO_PIN; + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition); + } + + /* Thermal Output GPIO */ + if (atomctrl_get_pp_assign_pin(hwmgr, + THERMAL_INT_OUTPUT_GPIO_PINID, &gpio_pin)) { + table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift; + + /* For porlarity read GPIOPAD_A with assigned Gpio pin + * since VBIOS will program this register to set 'inactive state', + * driver can then determine 'active state' from this and + * program SMU with correct polarity + */ + table->ThermOutPolarity = + (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) & + (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0; + table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY; + + /* if required, combine VRHot/PCC with thermal out GPIO */ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_RegulatorHot) && + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_CombinePCCWithThermalSignal)) + table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT; + } else { + table->ThermOutGpio = 17; + table->ThermOutPolarity = 1; + table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE; + } + + /* Populate BIF_SCLK levels into SMC DPM table */ + for (i = 0; i <= hw_data->dpm_table.pcie_speed_table.count; i++) { + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + smu_data->bif_sclk_table[i], ÷rs); + PP_ASSERT_WITH_CODE(!result, + "Can not find DFS divide id for Sclk", + return result); + + if (i == 0) + table->Ulv.BifSclkDfs = + PP_HOST_TO_SMC_US((uint16_t)(dividers.pll_post_divider)); + else + table->LinkLevel[i - 1].BifSclkDfs = + PP_HOST_TO_SMC_US((uint16_t)(dividers.pll_post_divider)); + } + + for (i = 0; i < SMU75_MAX_ENTRIES_SMIO; i++) + table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]); + + CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags); + CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig); + CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1); + CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2); + CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize); + CONVERT_FROM_HOST_TO_SMC_UL(table->CurrSclkPllRange); + CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh); + CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow); + CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime); + CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime); + + /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */ + result = smu7_copy_bytes_to_smc(hwmgr, + smu_data->smu7_data.dpm_table_start + + offsetof(SMU75_Discrete_DpmTable, SystemFlags), + (uint8_t *)&(table->SystemFlags), + sizeof(SMU75_Discrete_DpmTable) - 3 * sizeof(SMU75_PIDController), + SMC_RAM_END); + PP_ASSERT_WITH_CODE(!result, + "Failed to upload dpm data to SMC memory!", return result); + + result = vegam_populate_pm_fuses(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to populate PM fuses to SMC memory!", return result); + + result = vegam_enable_reconfig_cus(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to enable reconfigurable CUs!", return result); + + return 0; +} + +static uint32_t vegam_get_offsetof(uint32_t type, uint32_t member) +{ + switch (type) { + case SMU_SoftRegisters: + switch (member) { + case HandshakeDisables: + return offsetof(SMU75_SoftRegisters, HandshakeDisables); + case VoltageChangeTimeout: + return offsetof(SMU75_SoftRegisters, VoltageChangeTimeout); + case AverageGraphicsActivity: + return offsetof(SMU75_SoftRegisters, AverageGraphicsActivity); + case PreVBlankGap: + return offsetof(SMU75_SoftRegisters, PreVBlankGap); + case VBlankTimeout: + return offsetof(SMU75_SoftRegisters, VBlankTimeout); + case UcodeLoadStatus: + return offsetof(SMU75_SoftRegisters, UcodeLoadStatus); + case DRAM_LOG_ADDR_H: + return offsetof(SMU75_SoftRegisters, DRAM_LOG_ADDR_H); + case DRAM_LOG_ADDR_L: + return offsetof(SMU75_SoftRegisters, DRAM_LOG_ADDR_L); + case DRAM_LOG_PHY_ADDR_H: + return offsetof(SMU75_SoftRegisters, DRAM_LOG_PHY_ADDR_H); + case DRAM_LOG_PHY_ADDR_L: + return offsetof(SMU75_SoftRegisters, DRAM_LOG_PHY_ADDR_L); + case DRAM_LOG_BUFF_SIZE: + return offsetof(SMU75_SoftRegisters, DRAM_LOG_BUFF_SIZE); + } + case SMU_Discrete_DpmTable: + switch (member) { + case UvdBootLevel: + return offsetof(SMU75_Discrete_DpmTable, UvdBootLevel); + case VceBootLevel: + return offsetof(SMU75_Discrete_DpmTable, VceBootLevel); + case LowSclkInterruptThreshold: + return offsetof(SMU75_Discrete_DpmTable, LowSclkInterruptThreshold); + } + } + pr_warn("can't get the offset of type %x member %x\n", type, member); + return 0; +} + +static int vegam_program_mem_timing_parameters(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (data->need_update_smu7_dpm_table & + (DPMTABLE_OD_UPDATE_SCLK + + DPMTABLE_UPDATE_SCLK + + DPMTABLE_UPDATE_MCLK)) + return vegam_program_memory_timing_parameters(hwmgr); + + return 0; +} + +static int vegam_update_sclk_threshold(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct vegam_smumgr *smu_data = + (struct vegam_smumgr *)(hwmgr->smu_backend); + int result = 0; + uint32_t low_sclk_interrupt_threshold = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkThrottleLowNotification) + && (data->low_sclk_interrupt_threshold != 0)) { + low_sclk_interrupt_threshold = + data->low_sclk_interrupt_threshold; + + CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold); + + result = smu7_copy_bytes_to_smc( + hwmgr, + smu_data->smu7_data.dpm_table_start + + offsetof(SMU75_Discrete_DpmTable, + LowSclkInterruptThreshold), + (uint8_t *)&low_sclk_interrupt_threshold, + sizeof(uint32_t), + SMC_RAM_END); + } + PP_ASSERT_WITH_CODE((result == 0), + "Failed to update SCLK threshold!", return result); + + result = vegam_program_mem_timing_parameters(hwmgr); + PP_ASSERT_WITH_CODE((result == 0), + "Failed to program memory timing parameters!", + ); + + return result; +} + +int vegam_thermal_avfs_enable(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + int ret; + + if (!hwmgr->avfs_supported) + return 0; + + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs); + if (!ret) { + if (data->apply_avfs_cks_off_voltage) + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ApplyAvfsCksOffVoltage); + } + + return ret; +} + +static int vegam_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) +{ + PP_ASSERT_WITH_CODE(hwmgr->thermal_controller.fanInfo.bNoFan, + "VBIOS fan info is not correct!", + ); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl); + return 0; +} + +const struct pp_smumgr_func vegam_smu_funcs = { + .smu_init = vegam_smu_init, + .smu_fini = smu7_smu_fini, + .start_smu = vegam_start_smu, + .check_fw_load_finish = smu7_check_fw_load_finish, + .request_smu_load_fw = smu7_reload_firmware, + .request_smu_load_specific_fw = NULL, + .send_msg_to_smc = smu7_send_msg_to_smc, + .send_msg_to_smc_with_parameter = smu7_send_msg_to_smc_with_parameter, + .process_firmware_header = vegam_process_firmware_header, + .is_dpm_running = vegam_is_dpm_running, + .get_mac_definition = vegam_get_mac_definition, + .update_smc_table = vegam_update_smc_table, + .init_smc_table = vegam_init_smc_table, + .get_offsetof = vegam_get_offsetof, + .populate_all_graphic_levels = vegam_populate_all_graphic_levels, + .populate_all_memory_levels = vegam_populate_all_memory_levels, + .update_sclk_threshold = vegam_update_sclk_threshold, + .is_hw_avfs_present = vegam_is_hw_avfs_present, + .thermal_avfs_enable = vegam_thermal_avfs_enable, + .thermal_setup_fan_table = vegam_thermal_setup_fan_table, +}; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.h new file mode 100644 index 000000000000..2b6558238500 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.h @@ -0,0 +1,75 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _VEGAM_SMUMANAGER_H +#define _VEGAM_SMUMANAGER_H + + +#include <pp_endian.h> +#include "smu75_discrete.h" +#include "smu7_smumgr.h" + +#define SMC_RAM_END 0x40000 + +#define DPMTuning_Uphyst_Shift 0 +#define DPMTuning_Downhyst_Shift 8 +#define DPMTuning_Activity_Shift 16 + +#define GraphicsDPMTuning_VEGAM 0x001e6400 +#define MemoryDPMTuning_VEGAM 0x000f3c0a +#define SclkDPMTuning_VEGAM 0x002d000a +#define MclkDPMTuning_VEGAM 0x001f100a + + +struct vegam_pt_defaults { + uint8_t SviLoadLineEn; + uint8_t SviLoadLineVddC; + uint8_t TDC_VDDC_ThrottleReleaseLimitPerc; + uint8_t TDC_MAWt; + uint8_t TdcWaterfallCtl; + uint8_t DTEAmbientTempBase; + + uint32_t DisplayCac; + uint32_t BAPM_TEMP_GRADIENT; + uint16_t BAPMTI_R[SMU75_DTE_ITERATIONS * SMU75_DTE_SOURCES * SMU75_DTE_SINKS]; + uint16_t BAPMTI_RC[SMU75_DTE_ITERATIONS * SMU75_DTE_SOURCES * SMU75_DTE_SINKS]; +}; + +struct vegam_range_table { + uint32_t trans_lower_frequency; /* in 10khz */ + uint32_t trans_upper_frequency; +}; + +struct vegam_smumgr { + struct smu7_smumgr smu7_data; + uint8_t protected_mode; + SMU75_Discrete_DpmTable smc_state_table; + struct SMU75_Discrete_Ulv ulv_setting; + struct SMU75_Discrete_PmFuses power_tune_table; + struct vegam_range_table range_table[NUM_SCLK_RANGE]; + const struct vegam_pt_defaults *power_tune_defaults; + uint32_t bif_sclk_table[SMU75_MAX_LEVELS_LINK]; +}; + + +#endif |