diff options
Diffstat (limited to 'drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c')
-rw-r--r-- | drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c | 35 |
1 files changed, 28 insertions, 7 deletions
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index 5a68d365967f..cf556f1b5ed1 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -1219,19 +1219,22 @@ static int navi10_get_current_clk_freq_by_table(struct smu_context *smu, value); } -static bool navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu_clk_type clk_type) +static int navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu_clk_type clk_type) { PPTable_t *pptable = smu->smu_table.driver_pptable; DpmDescriptor_t *dpm_desc = NULL; - uint32_t clk_index = 0; + int clk_index = 0; clk_index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_CLK, clk_type); + if (clk_index < 0) + return clk_index; + dpm_desc = &pptable->DpmDescriptor[clk_index]; /* 0 - Fine grained DPM, 1 - Discrete DPM */ - return dpm_desc->SnapToDiscrete == 0; + return dpm_desc->SnapToDiscrete == 0 ? 1 : 0; } static inline bool navi10_od_feature_is_supported(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODFEATURE_CAP cap) @@ -1287,7 +1290,11 @@ static int navi10_emit_clk_levels(struct smu_context *smu, if (ret) return ret; - if (!navi10_is_support_fine_grained_dpm(smu, clk_type)) { + ret = navi10_is_support_fine_grained_dpm(smu, clk_type); + if (ret < 0) + return ret; + + if (!ret) { for (i = 0; i < count; i++) { ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, i, &value); @@ -1496,7 +1503,11 @@ static int navi10_print_clk_levels(struct smu_context *smu, if (ret) return size; - if (!navi10_is_support_fine_grained_dpm(smu, clk_type)) { + ret = navi10_is_support_fine_grained_dpm(smu, clk_type); + if (ret < 0) + return ret; + + if (!ret) { for (i = 0; i < count; i++) { ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, i, &value); if (ret) @@ -1665,7 +1676,11 @@ static int navi10_force_clk_levels(struct smu_context *smu, case SMU_UCLK: case SMU_FCLK: /* There is only 2 levels for fine grained DPM */ - if (navi10_is_support_fine_grained_dpm(smu, clk_type)) { + ret = navi10_is_support_fine_grained_dpm(smu, clk_type); + if (ret < 0) + return ret; + + if (ret) { soft_max_level = (soft_max_level >= 1 ? 1 : 0); soft_min_level = (soft_min_level >= 1 ? 1 : 0); } @@ -2006,6 +2021,8 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u } if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { + if (size != 10) + return -EINVAL; ret = smu_cmn_update_table(smu, SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, @@ -2049,6 +2066,8 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u activity_monitor.Mem_PD_Data_error_coeff = input[8]; activity_monitor.Mem_PD_Data_error_rate_coeff = input[9]; break; + default: + return -EINVAL; } ret = smu_cmn_update_table(smu, @@ -2066,8 +2085,10 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u smu->power_profile_mode); if (workload_type < 0) return -EINVAL; - smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, 1 << workload_type, NULL); + if (ret) + dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__); return ret; } |