diff options
Diffstat (limited to 'drivers/gpu/drm/amd/pm/swsmu')
-rw-r--r-- | drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c | 378 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c | 123 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c | 55 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c | 2 |
6 files changed, 511 insertions, 49 deletions
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index e0eb7ca112e2..c29d8b3131b7 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -2221,6 +2221,7 @@ static int smu_set_power_limit(void *handle, uint32_t limit) dev_err(smu->adev->dev, "New power limit (%d) is over the max allowed %d\n", limit, smu->max_power_limit); + ret = -EINVAL; goto out; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index 7bcd35840bf2..77f532a49e37 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -194,18 +194,34 @@ static int vangogh_tables_init(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; struct smu_table *tables = smu_table->tables; + struct amdgpu_device *adev = smu->adev; + uint32_t if_version; + uint32_t ret = 0; + + ret = smu_cmn_get_smc_version(smu, &if_version, NULL); + if (ret) { + dev_err(adev->dev, "Failed to get smu if version!\n"); + goto err0_out; + } SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); - SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t), - PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF, sizeof(DpmActivityMonitorCoeffExt_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); - smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); + + if (if_version < 0x3) { + SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_legacy_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_legacy_t), GFP_KERNEL); + } else { + SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); + } if (!smu_table->metrics_table) goto err0_out; smu_table->metrics_time = 0; @@ -235,13 +251,12 @@ err0_out: return -ENOMEM; } -static int vangogh_get_smu_metrics_data(struct smu_context *smu, +static int vangogh_get_legacy_smu_metrics_data(struct smu_context *smu, MetricsMember_t member, uint32_t *value) { struct smu_table_context *smu_table = &smu->smu_table; - - SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table; + SmuMetrics_legacy_t *metrics = (SmuMetrics_legacy_t *)smu_table->metrics_table; int ret = 0; mutex_lock(&smu->metrics_lock); @@ -255,7 +270,7 @@ static int vangogh_get_smu_metrics_data(struct smu_context *smu, } switch (member) { - case METRICS_AVERAGE_GFXCLK: + case METRICS_CURR_GFXCLK: *value = metrics->GfxclkFrequency; break; case METRICS_AVERAGE_SOCCLK: @@ -267,7 +282,7 @@ static int vangogh_get_smu_metrics_data(struct smu_context *smu, case METRICS_AVERAGE_DCLK: *value = metrics->DclkFrequency; break; - case METRICS_AVERAGE_UCLK: + case METRICS_CURR_UCLK: *value = metrics->MemclkFrequency; break; case METRICS_AVERAGE_GFXACTIVITY: @@ -311,6 +326,103 @@ static int vangogh_get_smu_metrics_data(struct smu_context *smu, return ret; } +static int vangogh_get_smu_metrics_data(struct smu_context *smu, + MetricsMember_t member, + uint32_t *value) +{ + struct smu_table_context *smu_table = &smu->smu_table; + SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table; + int ret = 0; + + mutex_lock(&smu->metrics_lock); + + ret = smu_cmn_get_metrics_table_locked(smu, + NULL, + false); + if (ret) { + mutex_unlock(&smu->metrics_lock); + return ret; + } + + switch (member) { + case METRICS_CURR_GFXCLK: + *value = metrics->Current.GfxclkFrequency; + break; + case METRICS_AVERAGE_SOCCLK: + *value = metrics->Current.SocclkFrequency; + break; + case METRICS_AVERAGE_VCLK: + *value = metrics->Current.VclkFrequency; + break; + case METRICS_AVERAGE_DCLK: + *value = metrics->Current.DclkFrequency; + break; + case METRICS_CURR_UCLK: + *value = metrics->Current.MemclkFrequency; + break; + case METRICS_AVERAGE_GFXACTIVITY: + *value = metrics->Current.GfxActivity; + break; + case METRICS_AVERAGE_VCNACTIVITY: + *value = metrics->Current.UvdActivity; + break; + case METRICS_AVERAGE_SOCKETPOWER: + *value = (metrics->Current.CurrentSocketPower << 8) / + 1000; + break; + case METRICS_TEMPERATURE_EDGE: + *value = metrics->Current.GfxTemperature / 100 * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_TEMPERATURE_HOTSPOT: + *value = metrics->Current.SocTemperature / 100 * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_THROTTLER_STATUS: + *value = metrics->Current.ThrottlerStatus; + break; + case METRICS_VOLTAGE_VDDGFX: + *value = metrics->Current.Voltage[2]; + break; + case METRICS_VOLTAGE_VDDSOC: + *value = metrics->Current.Voltage[1]; + break; + case METRICS_AVERAGE_CPUCLK: + memcpy(value, &metrics->Current.CoreFrequency[0], + smu->cpu_core_num * sizeof(uint16_t)); + break; + default: + *value = UINT_MAX; + break; + } + + mutex_unlock(&smu->metrics_lock); + + return ret; +} + +static int vangogh_common_get_smu_metrics_data(struct smu_context *smu, + MetricsMember_t member, + uint32_t *value) +{ + struct amdgpu_device *adev = smu->adev; + uint32_t if_version; + int ret = 0; + + ret = smu_cmn_get_smc_version(smu, &if_version, NULL); + if (ret) { + dev_err(adev->dev, "Failed to get smu if version!\n"); + return ret; + } + + if (if_version < 0x3) + ret = vangogh_get_legacy_smu_metrics_data(smu, member, value); + else + ret = vangogh_get_smu_metrics_data(smu, member, value); + + return ret; +} + static int vangogh_allocate_dpm_context(struct smu_context *smu) { struct smu_dpm_context *smu_dpm = &smu->smu_dpm; @@ -447,11 +559,11 @@ static int vangogh_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_typ return 0; } -static int vangogh_print_clk_levels(struct smu_context *smu, +static int vangogh_print_legacy_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { DpmClocks_t *clk_table = smu->smu_table.clocks_table; - SmuMetrics_t metrics; + SmuMetrics_legacy_t metrics; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); int i, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; @@ -546,6 +658,126 @@ static int vangogh_print_clk_levels(struct smu_context *smu, return size; } +static int vangogh_print_clk_levels(struct smu_context *smu, + enum smu_clk_type clk_type, char *buf) +{ + DpmClocks_t *clk_table = smu->smu_table.clocks_table; + SmuMetrics_t metrics; + struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); + int i, size = 0, ret = 0; + uint32_t cur_value = 0, value = 0, count = 0; + bool cur_value_match_level = false; + + memset(&metrics, 0, sizeof(metrics)); + + ret = smu_cmn_get_metrics_table(smu, &metrics, false); + if (ret) + return ret; + + switch (clk_type) { + case SMU_OD_SCLK: + if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { + size = sprintf(buf, "%s:\n", "OD_SCLK"); + size += sprintf(buf + size, "0: %10uMhz\n", + (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); + size += sprintf(buf + size, "1: %10uMhz\n", + (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq); + } + break; + case SMU_OD_CCLK: + if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { + size = sprintf(buf, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); + size += sprintf(buf + size, "0: %10uMhz\n", + (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq); + size += sprintf(buf + size, "1: %10uMhz\n", + (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq); + } + break; + case SMU_OD_RANGE: + if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { + size = sprintf(buf, "%s:\n", "OD_RANGE"); + size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n", + smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); + size += sprintf(buf + size, "CCLK: %7uMhz %10uMhz\n", + smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq); + } + break; + case SMU_SOCCLK: + /* the level 3 ~ 6 of socclk use the same frequency for vangogh */ + count = clk_table->NumSocClkLevelsEnabled; + cur_value = metrics.Current.SocclkFrequency; + break; + case SMU_VCLK: + count = clk_table->VcnClkLevelsEnabled; + cur_value = metrics.Current.VclkFrequency; + break; + case SMU_DCLK: + count = clk_table->VcnClkLevelsEnabled; + cur_value = metrics.Current.DclkFrequency; + break; + case SMU_MCLK: + count = clk_table->NumDfPstatesEnabled; + cur_value = metrics.Current.MemclkFrequency; + break; + case SMU_FCLK: + count = clk_table->NumDfPstatesEnabled; + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetFclkFrequency, 0, &cur_value); + if (ret) + return ret; + break; + default: + break; + } + + switch (clk_type) { + case SMU_SOCCLK: + case SMU_VCLK: + case SMU_DCLK: + case SMU_MCLK: + case SMU_FCLK: + for (i = 0; i < count; i++) { + ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value); + if (ret) + return ret; + if (!value) + continue; + size += sprintf(buf + size, "%d: %uMhz %s\n", i, value, + cur_value == value ? "*" : ""); + if (cur_value == value) + cur_value_match_level = true; + } + + if (!cur_value_match_level) + size += sprintf(buf + size, " %uMhz *\n", cur_value); + break; + default: + break; + } + + return size; +} + +static int vangogh_common_print_clk_levels(struct smu_context *smu, + enum smu_clk_type clk_type, char *buf) +{ + struct amdgpu_device *adev = smu->adev; + uint32_t if_version; + int ret = 0; + + ret = smu_cmn_get_smc_version(smu, &if_version, NULL); + if (ret) { + dev_err(adev->dev, "Failed to get smu if version!\n"); + return ret; + } + + if (if_version < 0x3) + ret = vangogh_print_legacy_clk_levels(smu, clk_type, buf); + else + ret = vangogh_print_clk_levels(smu, clk_type, buf); + + return ret; +} + static int vangogh_get_profiling_clk_mask(struct smu_context *smu, enum amd_dpm_forced_level level, uint32_t *vclk_mask, @@ -860,7 +1092,6 @@ static int vangogh_set_soft_freq_limited_range(struct smu_context *smu, return ret; break; case SMU_FCLK: - case SMU_MCLK: ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min, NULL); @@ -948,7 +1179,6 @@ static int vangogh_force_clk_levels(struct smu_context *smu, if (ret) return ret; break; - case SMU_MCLK: case SMU_FCLK: ret = vangogh_get_dpm_clk_limited(smu, clk_type, soft_min_level, &min_freq); @@ -1035,7 +1265,6 @@ static int vangogh_force_dpm_limit_value(struct smu_context *smu, bool highest) SMU_SOCCLK, SMU_VCLK, SMU_DCLK, - SMU_MCLK, SMU_FCLK, }; @@ -1064,7 +1293,6 @@ static int vangogh_unforce_dpm_levels(struct smu_context *smu) enum smu_clk_type clk_type; uint32_t feature; } clk_feature_map[] = { - {SMU_MCLK, SMU_FEATURE_DPM_FCLK_BIT}, {SMU_FCLK, SMU_FEATURE_DPM_FCLK_BIT}, {SMU_SOCCLK, SMU_FEATURE_DPM_SOCCLK_BIT}, {SMU_VCLK, SMU_FEATURE_VCN_DPM_BIT}, @@ -1196,7 +1424,6 @@ static int vangogh_set_performance_level(struct smu_context *smu, if (ret) return ret; - vangogh_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask); vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask); vangogh_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask); vangogh_force_clk_levels(smu, SMU_VCLK, 1 << vclk_mask); @@ -1236,7 +1463,6 @@ static int vangogh_set_performance_level(struct smu_context *smu, if (ret) return ret; - vangogh_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask); vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask); break; case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: @@ -1278,57 +1504,57 @@ static int vangogh_read_sensor(struct smu_context *smu, mutex_lock(&smu->sensor_lock); switch (sensor) { case AMDGPU_PP_SENSOR_GPU_LOAD: - ret = vangogh_get_smu_metrics_data(smu, + ret = vangogh_common_get_smu_metrics_data(smu, METRICS_AVERAGE_GFXACTIVITY, (uint32_t *)data); *size = 4; break; case AMDGPU_PP_SENSOR_GPU_POWER: - ret = vangogh_get_smu_metrics_data(smu, + ret = vangogh_common_get_smu_metrics_data(smu, METRICS_AVERAGE_SOCKETPOWER, (uint32_t *)data); *size = 4; break; case AMDGPU_PP_SENSOR_EDGE_TEMP: - ret = vangogh_get_smu_metrics_data(smu, + ret = vangogh_common_get_smu_metrics_data(smu, METRICS_TEMPERATURE_EDGE, (uint32_t *)data); *size = 4; break; case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: - ret = vangogh_get_smu_metrics_data(smu, + ret = vangogh_common_get_smu_metrics_data(smu, METRICS_TEMPERATURE_HOTSPOT, (uint32_t *)data); *size = 4; break; case AMDGPU_PP_SENSOR_GFX_MCLK: - ret = vangogh_get_smu_metrics_data(smu, - METRICS_AVERAGE_UCLK, + ret = vangogh_common_get_smu_metrics_data(smu, + METRICS_CURR_UCLK, (uint32_t *)data); *(uint32_t *)data *= 100; *size = 4; break; case AMDGPU_PP_SENSOR_GFX_SCLK: - ret = vangogh_get_smu_metrics_data(smu, - METRICS_AVERAGE_GFXCLK, + ret = vangogh_common_get_smu_metrics_data(smu, + METRICS_CURR_GFXCLK, (uint32_t *)data); *(uint32_t *)data *= 100; *size = 4; break; case AMDGPU_PP_SENSOR_VDDGFX: - ret = vangogh_get_smu_metrics_data(smu, + ret = vangogh_common_get_smu_metrics_data(smu, METRICS_VOLTAGE_VDDGFX, (uint32_t *)data); *size = 4; break; case AMDGPU_PP_SENSOR_VDDNB: - ret = vangogh_get_smu_metrics_data(smu, + ret = vangogh_common_get_smu_metrics_data(smu, METRICS_VOLTAGE_VDDSOC, (uint32_t *)data); *size = 4; break; case AMDGPU_PP_SENSOR_CPU_CLK: - ret = vangogh_get_smu_metrics_data(smu, + ret = vangogh_common_get_smu_metrics_data(smu, METRICS_AVERAGE_CPUCLK, (uint32_t *)data); *size = smu->cpu_core_num * sizeof(uint16_t); @@ -1402,13 +1628,13 @@ static int vangogh_set_watermarks_table(struct smu_context *smu, return 0; } -static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu, +static ssize_t vangogh_get_legacy_gpu_metrics(struct smu_context *smu, void **table) { struct smu_table_context *smu_table = &smu->smu_table; struct gpu_metrics_v2_1 *gpu_metrics = (struct gpu_metrics_v2_1 *)smu_table->gpu_metrics_table; - SmuMetrics_t metrics; + SmuMetrics_legacy_t metrics; int ret = 0; ret = smu_cmn_get_metrics_table(smu, &metrics, true); @@ -1421,9 +1647,8 @@ static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu, gpu_metrics->temperature_soc = metrics.SocTemperature; memcpy(&gpu_metrics->temperature_core[0], &metrics.CoreTemperature[0], - sizeof(uint16_t) * 8); + sizeof(uint16_t) * 4); gpu_metrics->temperature_l3[0] = metrics.L3Temperature[0]; - gpu_metrics->temperature_l3[1] = metrics.L3Temperature[1]; gpu_metrics->average_gfx_activity = metrics.GfxActivity; gpu_metrics->average_mm_activity = metrics.UvdActivity; @@ -1434,7 +1659,7 @@ static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu, gpu_metrics->average_gfx_power = metrics.Power[2]; memcpy(&gpu_metrics->average_core_power[0], &metrics.CorePower[0], - sizeof(uint16_t) * 8); + sizeof(uint16_t) * 4); gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency; gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency; @@ -1445,9 +1670,8 @@ static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu, memcpy(&gpu_metrics->current_coreclk[0], &metrics.CoreFrequency[0], - sizeof(uint16_t) * 8); + sizeof(uint16_t) * 4); gpu_metrics->current_l3clk[0] = metrics.L3Frequency[0]; - gpu_metrics->current_l3clk[1] = metrics.L3Frequency[1]; gpu_metrics->throttle_status = metrics.ThrottlerStatus; @@ -1458,6 +1682,88 @@ static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu, return sizeof(struct gpu_metrics_v2_1); } +static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu, + void **table) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct gpu_metrics_v2_1 *gpu_metrics = + (struct gpu_metrics_v2_1 *)smu_table->gpu_metrics_table; + SmuMetrics_t metrics; + int ret = 0; + + ret = smu_cmn_get_metrics_table(smu, &metrics, true); + if (ret) + return ret; + + smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 1); + + gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature; + gpu_metrics->temperature_soc = metrics.Current.SocTemperature; + memcpy(&gpu_metrics->temperature_core[0], + &metrics.Current.CoreTemperature[0], + sizeof(uint16_t) * 4); + gpu_metrics->temperature_l3[0] = metrics.Current.L3Temperature[0]; + + gpu_metrics->average_gfx_activity = metrics.Current.GfxActivity; + gpu_metrics->average_mm_activity = metrics.Current.UvdActivity; + + gpu_metrics->average_socket_power = metrics.Current.CurrentSocketPower; + gpu_metrics->average_cpu_power = metrics.Current.Power[0]; + gpu_metrics->average_soc_power = metrics.Current.Power[1]; + gpu_metrics->average_gfx_power = metrics.Current.Power[2]; + memcpy(&gpu_metrics->average_core_power[0], + &metrics.Average.CorePower[0], + sizeof(uint16_t) * 4); + + gpu_metrics->average_gfxclk_frequency = metrics.Average.GfxclkFrequency; + gpu_metrics->average_socclk_frequency = metrics.Average.SocclkFrequency; + gpu_metrics->average_uclk_frequency = metrics.Average.MemclkFrequency; + gpu_metrics->average_fclk_frequency = metrics.Average.MemclkFrequency; + gpu_metrics->average_vclk_frequency = metrics.Average.VclkFrequency; + gpu_metrics->average_dclk_frequency = metrics.Average.DclkFrequency; + + gpu_metrics->current_gfxclk = metrics.Current.GfxclkFrequency; + gpu_metrics->current_socclk = metrics.Current.SocclkFrequency; + gpu_metrics->current_uclk = metrics.Current.MemclkFrequency; + gpu_metrics->current_fclk = metrics.Current.MemclkFrequency; + gpu_metrics->current_vclk = metrics.Current.VclkFrequency; + gpu_metrics->current_dclk = metrics.Current.DclkFrequency; + + memcpy(&gpu_metrics->current_coreclk[0], + &metrics.Current.CoreFrequency[0], + sizeof(uint16_t) * 4); + gpu_metrics->current_l3clk[0] = metrics.Current.L3Frequency[0]; + + gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus; + + gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); + + *table = (void *)gpu_metrics; + + return sizeof(struct gpu_metrics_v2_1); +} + +static ssize_t vangogh_common_get_gpu_metrics(struct smu_context *smu, + void **table) +{ + struct amdgpu_device *adev = smu->adev; + uint32_t if_version; + int ret = 0; + + ret = smu_cmn_get_smc_version(smu, &if_version, NULL); + if (ret) { + dev_err(adev->dev, "Failed to get smu if version!\n"); + return ret; + } + + if (if_version < 0x3) + ret = vangogh_get_legacy_gpu_metrics(smu, table); + else + ret = vangogh_get_gpu_metrics(smu, table); + + return ret; +} + static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, long input[], uint32_t size) { @@ -1876,9 +2182,9 @@ static const struct pptable_funcs vangogh_ppt_funcs = { .set_watermarks_table = vangogh_set_watermarks_table, .set_driver_table_location = smu_v11_0_set_driver_table_location, .interrupt_work = smu_v11_0_interrupt_work, - .get_gpu_metrics = vangogh_get_gpu_metrics, + .get_gpu_metrics = vangogh_common_get_gpu_metrics, .od_edit_dpm_table = vangogh_od_edit_dpm_table, - .print_clk_levels = vangogh_print_clk_levels, + .print_clk_levels = vangogh_common_print_clk_levels, .set_default_dpm_table = vangogh_set_default_dpm_tables, .set_fine_grain_gfx_freq_parameters = vangogh_set_fine_grain_gfx_freq_parameters, .system_features_control = vangogh_system_features_control, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c index e3232295f2bf..f43b4c623685 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c @@ -1332,6 +1332,7 @@ static const struct pptable_funcs renoir_ppt_funcs = { .gfx_state_change_set = renoir_gfx_state_change_set, .set_fine_grain_gfx_freq_parameters = renoir_set_fine_grain_gfx_freq_parameters, .od_edit_dpm_table = renoir_od_edit_dpm_table, + .get_vbios_bootup_values = smu_v12_0_get_vbios_bootup_values, }; void renoir_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c index 6cc4855c8a37..d60b8c5e8715 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c @@ -27,6 +27,7 @@ #include "amdgpu_smu.h" #include "atomfirmware.h" #include "amdgpu_atomfirmware.h" +#include "amdgpu_atombios.h" #include "smu_v12_0.h" #include "soc15_common.h" #include "atom.h" @@ -278,3 +279,125 @@ int smu_v12_0_set_driver_table_location(struct smu_context *smu) return ret; } + +static int smu_v12_0_atom_get_smu_clockinfo(struct amdgpu_device *adev, + uint8_t clk_id, + uint8_t syspll_id, + uint32_t *clk_freq) +{ + struct atom_get_smu_clock_info_parameters_v3_1 input = {0}; + struct atom_get_smu_clock_info_output_parameters_v3_1 *output; + int ret, index; + + input.clk_id = clk_id; + input.syspll_id = syspll_id; + input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ; + index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1, + getsmuclockinfo); + + ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index, + (uint32_t *)&input); + if (ret) + return -EINVAL; + + output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input; + *clk_freq = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000; + + return 0; +} + +int smu_v12_0_get_vbios_bootup_values(struct smu_context *smu) +{ + int ret, index; + uint16_t size; + uint8_t frev, crev; + struct atom_common_table_header *header; + struct atom_firmware_info_v3_1 *v_3_1; + struct atom_firmware_info_v3_3 *v_3_3; + + index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, + firmwareinfo); + + ret = amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev, + (uint8_t **)&header); + if (ret) + return ret; + + if (header->format_revision != 3) { + dev_err(smu->adev->dev, "unknown atom_firmware_info version! for smu12\n"); + return -EINVAL; + } + + switch (header->content_revision) { + case 0: + case 1: + case 2: + v_3_1 = (struct atom_firmware_info_v3_1 *)header; + smu->smu_table.boot_values.revision = v_3_1->firmware_revision; + smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz; + smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz; + smu->smu_table.boot_values.socclk = 0; + smu->smu_table.boot_values.dcefclk = 0; + smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv; + smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv; + smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv; + smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv; + smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id; + smu->smu_table.boot_values.pp_table_id = 0; + smu->smu_table.boot_values.firmware_caps = v_3_1->firmware_capability; + break; + case 3: + case 4: + default: + v_3_3 = (struct atom_firmware_info_v3_3 *)header; + smu->smu_table.boot_values.revision = v_3_3->firmware_revision; + smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz; + smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz; + smu->smu_table.boot_values.socclk = 0; + smu->smu_table.boot_values.dcefclk = 0; + smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv; + smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv; + smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv; + smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv; + smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id; + smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id; + smu->smu_table.boot_values.firmware_caps = v_3_3->firmware_capability; + } + + smu->smu_table.boot_values.format_revision = header->format_revision; + smu->smu_table.boot_values.content_revision = header->content_revision; + + smu_v12_0_atom_get_smu_clockinfo(smu->adev, + (uint8_t)SMU12_SYSPLL0_SOCCLK_ID, + (uint8_t)SMU12_SYSPLL0_ID, + &smu->smu_table.boot_values.socclk); + + smu_v12_0_atom_get_smu_clockinfo(smu->adev, + (uint8_t)SMU12_SYSPLL1_DCFCLK_ID, + (uint8_t)SMU12_SYSPLL1_ID, + &smu->smu_table.boot_values.dcefclk); + + smu_v12_0_atom_get_smu_clockinfo(smu->adev, + (uint8_t)SMU12_SYSPLL0_VCLK_ID, + (uint8_t)SMU12_SYSPLL0_ID, + &smu->smu_table.boot_values.vclk); + + smu_v12_0_atom_get_smu_clockinfo(smu->adev, + (uint8_t)SMU12_SYSPLL0_DCLK_ID, + (uint8_t)SMU12_SYSPLL0_ID, + &smu->smu_table.boot_values.dclk); + + if ((smu->smu_table.boot_values.format_revision == 3) && + (smu->smu_table.boot_values.content_revision >= 2)) + smu_v12_0_atom_get_smu_clockinfo(smu->adev, + (uint8_t)SMU12_SYSPLL3_0_FCLK_ID, + (uint8_t)SMU12_SYSPLL3_0_ID, + &smu->smu_table.boot_values.fclk); + + smu_v12_0_atom_get_smu_clockinfo(smu->adev, + (uint8_t)SMU12_SYSPLL0_LCLK_ID, + (uint8_t)SMU12_SYSPLL0_ID, + &smu->smu_table.boot_values.lclk); + + return 0; +} diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index bca02a9fb489..dcbe3a72da09 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -78,6 +78,8 @@ #define smnPCIE_ESM_CTRL 0x111003D0 +#define CLOCK_VALID (1 << 31) + static const struct cmn2asic_msg_mapping aldebaran_message_map[SMU_MSG_MAX_COUNT] = { MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0), MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1), @@ -405,6 +407,9 @@ static int aldebaran_setup_pptable(struct smu_context *smu) { int ret = 0; + /* VBIOS pptable is the first choice */ + smu->smu_table.boot_values.pp_table_id = 0; + ret = smu_v13_0_setup_pptable(smu); if (ret) return ret; @@ -670,6 +675,7 @@ static int aldebaran_print_clk_levels(struct smu_context *smu, struct smu_13_0_dpm_context *dpm_context = NULL; uint32_t display_levels; uint32_t freq_values[3] = {0}; + uint32_t min_clk, max_clk; if (amdgpu_ras_intr_triggered()) return snprintf(buf, PAGE_SIZE, "unavailable\n"); @@ -697,12 +703,20 @@ static int aldebaran_print_clk_levels(struct smu_context *smu, display_levels = clocks.num_levels; + min_clk = smu->gfx_actual_hard_min_freq & CLOCK_VALID ? + smu->gfx_actual_hard_min_freq & ~CLOCK_VALID : + single_dpm_table->dpm_levels[0].value; + max_clk = smu->gfx_actual_soft_max_freq & CLOCK_VALID ? + smu->gfx_actual_soft_max_freq & ~CLOCK_VALID : + single_dpm_table->dpm_levels[1].value; + + freq_values[0] = min_clk; + freq_values[1] = max_clk; + /* fine-grained dpm has only 2 levels */ - if (now > single_dpm_table->dpm_levels[0].value && - now < single_dpm_table->dpm_levels[1].value) { + if (now > min_clk && now < max_clk) { display_levels = clocks.num_levels + 1; - freq_values[0] = single_dpm_table->dpm_levels[0].value; - freq_values[2] = single_dpm_table->dpm_levels[1].value; + freq_values[2] = max_clk; freq_values[1] = now; } @@ -712,12 +726,15 @@ static int aldebaran_print_clk_levels(struct smu_context *smu, */ if (display_levels == clocks.num_levels) { for (i = 0; i < clocks.num_levels; i++) - size += sprintf(buf + size, "%d: %uMhz %s\n", i, - clocks.data[i].clocks_in_khz / 1000, - (clocks.num_levels == 1) ? "*" : + size += sprintf( + buf + size, "%d: %uMhz %s\n", i, + freq_values[i], + (clocks.num_levels == 1) ? + "*" : (aldebaran_freqs_in_same_level( - clocks.data[i].clocks_in_khz / 1000, - now) ? "*" : "")); + freq_values[i], now) ? + "*" : + "")); } else { for (i = 0; i < display_levels; i++) size += sprintf(buf + size, "%d: %uMhz %s\n", i, @@ -1117,6 +1134,9 @@ static int aldebaran_set_performance_level(struct smu_context *smu, && (level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)) smu_cmn_send_smc_msg(smu, SMU_MSG_DisableDeterminism, NULL); + /* Reset user min/max gfx clock */ + smu->gfx_actual_hard_min_freq = 0; + smu->gfx_actual_soft_max_freq = 0; switch (level) { @@ -1158,7 +1178,14 @@ static int aldebaran_set_soft_freq_limited_range(struct smu_context *smu, if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { min_clk = max(min, dpm_context->dpm_tables.gfx_table.min); max_clk = min(max, dpm_context->dpm_tables.gfx_table.max); - return smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk); + ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, + min_clk, max_clk); + + if (!ret) { + smu->gfx_actual_hard_min_freq = min_clk | CLOCK_VALID; + smu->gfx_actual_soft_max_freq = max_clk | CLOCK_VALID; + } + return ret; } if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) { @@ -1178,9 +1205,15 @@ static int aldebaran_set_soft_freq_limited_range(struct smu_context *smu, ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableDeterminism, max, NULL); - if (ret) + if (ret) { dev_err(adev->dev, "Failed to enable determinism at GFX clock %d MHz\n", max); + } else { + smu->gfx_actual_hard_min_freq = + min_clk | CLOCK_VALID; + smu->gfx_actual_soft_max_freq = + max | CLOCK_VALID; + } } } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index 30c9ac635105..0864083e7435 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -276,8 +276,6 @@ int smu_v13_0_setup_pptable(struct smu_context *smu) void *table; uint16_t version_major, version_minor; - /* temporarily hardcode to use vbios pptable */ - smu->smu_table.boot_values.pp_table_id = 0; if (amdgpu_smu_pptable_id >= 0) { smu->smu_table.boot_values.pp_table_id = amdgpu_smu_pptable_id; |