diff options
Diffstat (limited to 'drivers/gpu/drm/amd/powerplay/amdgpu_smu.c')
-rw-r--r-- | drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 288 |
1 files changed, 245 insertions, 43 deletions
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index e8b27fab6aa1..88b4e5642302 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -62,6 +62,7 @@ const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf) { + struct amdgpu_device *adev = smu->adev; size_t size = 0; int ret = 0, i = 0; uint32_t feature_mask[2] = { 0 }; @@ -70,6 +71,9 @@ size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf) uint32_t sort_feature[SMU_FEATURE_COUNT]; uint64_t hw_feature_count = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); ret = smu_feature_get_enabled_mask(smu, feature_mask, 2); @@ -110,9 +114,6 @@ static int smu_feature_update_enable_state(struct smu_context *smu, uint32_t feature_low = 0, feature_high = 0; int ret = 0; - if (!smu->pm_enabled) - return ret; - feature_low = (feature_mask >> 0 ) & 0xffffffff; feature_high = (feature_mask >> 32) & 0xffffffff; @@ -155,6 +156,10 @@ int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask) uint64_t feature_2_enabled = 0; uint64_t feature_2_disabled = 0; uint64_t feature_enables = 0; + struct amdgpu_device *adev = smu->adev; + + if (!adev->pm.dpm_enabled) + return -EINVAL; mutex_lock(&smu->mutex); @@ -191,16 +196,31 @@ int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t if (!if_version && !smu_version) return -EINVAL; + if (smu->smc_fw_if_version && smu->smc_fw_version) + { + if (if_version) + *if_version = smu->smc_fw_if_version; + + if (smu_version) + *smu_version = smu->smc_fw_version; + + return 0; + } + if (if_version) { ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version); if (ret) return ret; + + smu->smc_fw_if_version = *if_version; } if (smu_version) { ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version); if (ret) return ret; + + smu->smc_fw_version = *smu_version; } return ret; @@ -417,8 +437,12 @@ bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type) int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type, bool gate) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + switch (block_type) { case AMD_IP_BLOCK_TYPE_UVD: ret = smu_dpm_set_uvd_enable(smu, !gate); @@ -547,7 +571,10 @@ bool is_support_sw_smu(struct amdgpu_device *adev) if (adev->asic_type == CHIP_VEGA20) return (amdgpu_dpm == 2) ? true : false; else if (adev->asic_type >= CHIP_ARCTURUS) { - if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) + if (amdgpu_sriov_vf(adev) && + !(adev->asic_type == CHIP_ARCTURUS && + amdgpu_sriov_is_pp_one_vf(adev))) + return false; else return true; @@ -569,8 +596,12 @@ bool is_support_sw_smu_xgmi(struct amdgpu_device *adev) int smu_sys_get_pp_table(struct smu_context *smu, void **table) { struct smu_table_context *smu_table = &smu->smu_table; + struct amdgpu_device *adev = smu->adev; uint32_t powerplay_table_size; + if (!adev->pm.dpm_enabled) + return -EINVAL; + if (!smu_table->power_play_table && !smu_table->hardcode_pptable) return -EINVAL; @@ -591,11 +622,13 @@ int smu_sys_get_pp_table(struct smu_context *smu, void **table) int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size) { struct smu_table_context *smu_table = &smu->smu_table; + struct amdgpu_device *adev = smu->adev; ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf; int ret = 0; - if (!smu->pm_enabled) + if (!adev->pm.dpm_enabled) return -EINVAL; + if (header->usStructureSize != size) { pr_err("pp table size not matched !\n"); return -EIO; @@ -636,8 +669,6 @@ int smu_feature_init_dpm(struct smu_context *smu) int ret = 0; uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32]; - if (!smu->pm_enabled) - return ret; mutex_lock(&feature->mutex); bitmap_zero(feature->allowed, SMU_FEATURE_MAX); mutex_unlock(&feature->mutex); @@ -932,13 +963,6 @@ static int smu_sw_init(void *handle) return ret; } - if (adev->smu.ppt_funcs->i2c_eeprom_init) { - ret = smu_i2c_eeprom_init(smu, &adev->pm.smu_i2c); - - if (ret) - return ret; - } - return 0; } @@ -948,9 +972,6 @@ static int smu_sw_fini(void *handle) struct smu_context *smu = &adev->smu; int ret; - if (adev->smu.ppt_funcs->i2c_eeprom_fini) - smu_i2c_eeprom_fini(smu, &adev->pm.smu_i2c); - kfree(smu->irq_source); smu->irq_source = NULL; @@ -1366,10 +1387,11 @@ static int smu_hw_init(void *handle) if (ret) goto failed; - if (!smu->pm_enabled) - adev->pm.dpm_enabled = false; - else - adev->pm.dpm_enabled = true; /* TODO: will set dpm_enabled flag while VCN and DAL DPM is workable */ + ret = smu_i2c_eeprom_init(smu, &adev->pm.smu_i2c); + if (ret) + goto failed; + + adev->pm.dpm_enabled = true; pr_info("SMU is initialized successfully!\n"); @@ -1403,6 +1425,10 @@ static int smu_hw_fini(void *handle) if (!smu->pm_enabled) return 0; + adev->pm.dpm_enabled = false; + + smu_i2c_eeprom_fini(smu, &adev->pm.smu_i2c); + if (!amdgpu_sriov_vf(adev)){ ret = smu_stop_thermal_control(smu); if (ret) { @@ -1542,6 +1568,10 @@ static int smu_suspend(void *handle) if (!smu->pm_enabled) return 0; + adev->pm.dpm_enabled = false; + + smu_i2c_eeprom_fini(smu, &adev->pm.smu_i2c); + if(!amdgpu_sriov_vf(adev)) { ret = smu_disable_dpm(smu); if (ret) @@ -1587,11 +1617,17 @@ static int smu_resume(void *handle) if (ret) goto failed; + ret = smu_i2c_eeprom_init(smu, &adev->pm.smu_i2c); + if (ret) + goto failed; + if (smu->is_apu) smu_set_gfx_cgpg(&adev->smu, true); smu->disable_uclk_switch = 0; + adev->pm.dpm_enabled = true; + pr_info("SMU is resumed successfully!\n"); return 0; @@ -1603,10 +1639,14 @@ failed: int smu_display_configuration_change(struct smu_context *smu, const struct amd_pp_display_configuration *display_config) { + struct amdgpu_device *adev = smu->adev; int index = 0; int num_of_active_display = 0; - if (!smu->pm_enabled || !is_support_sw_smu(smu->adev)) + if (!adev->pm.dpm_enabled) + return -EINVAL; + + if (!is_support_sw_smu(smu->adev)) return -EINVAL; if (!display_config) @@ -1668,12 +1708,16 @@ int smu_get_current_clocks(struct smu_context *smu, struct amd_pp_clock_info *clocks) { struct amd_pp_simple_clock_info simple_clocks = {0}; + struct amdgpu_device *adev = smu->adev; struct smu_clock_info hw_clocks; int ret = 0; if (!is_support_sw_smu(smu->adev)) return -EINVAL; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); smu_get_dal_power_level(smu, &simple_clocks); @@ -1736,7 +1780,7 @@ static int smu_enable_umd_pstate(void *handle, struct smu_context *smu = (struct smu_context*)(handle); struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); - if (!smu->is_apu && (!smu->pm_enabled || !smu_dpm_ctx->dpm_context)) + if (!smu->is_apu && !smu_dpm_ctx->dpm_context) return -EINVAL; if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) { @@ -1778,9 +1822,6 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu, long workload; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); - if (!smu->pm_enabled) - return -EINVAL; - if (!skip_display_settings) { ret = smu_display_config_changed(smu); if (ret) { @@ -1831,8 +1872,12 @@ int smu_handle_task(struct smu_context *smu, enum amd_pp_task task_id, bool lock_needed) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + if (lock_needed) mutex_lock(&smu->mutex); @@ -1866,10 +1911,11 @@ int smu_switch_power_profile(struct smu_context *smu, bool en) { struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); + struct amdgpu_device *adev = smu->adev; long workload; uint32_t index; - if (!smu->pm_enabled) + if (!adev->pm.dpm_enabled) return -EINVAL; if (!(type < PP_SMC_POWER_PROFILE_CUSTOM)) @@ -1900,8 +1946,12 @@ int smu_switch_power_profile(struct smu_context *smu, enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu) { struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); + struct amdgpu_device *adev = smu->adev; enum amd_dpm_forced_level level; + if (!adev->pm.dpm_enabled) + return -EINVAL; + if (!smu->is_apu && !smu_dpm_ctx->dpm_context) return -EINVAL; @@ -1915,8 +1965,12 @@ enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu) int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level) { struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + if (!smu->is_apu && !smu_dpm_ctx->dpm_context) return -EINVAL; @@ -1939,8 +1993,12 @@ int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_lev int smu_set_display_count(struct smu_context *smu, uint32_t count) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); ret = smu_init_display_count(smu, count); mutex_unlock(&smu->mutex); @@ -1954,8 +2012,12 @@ int smu_force_clk_levels(struct smu_context *smu, bool lock_needed) { struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { pr_debug("force clock level is for dpm manual mode only.\n"); return -EINVAL; @@ -1973,20 +2035,19 @@ int smu_force_clk_levels(struct smu_context *smu, return ret; } +/* + * On system suspending or resetting, the dpm_enabled + * flag will be cleared. So that those SMU services which + * are not supported will be gated. + * However, the mp1 state setting should still be granted + * even if the dpm_enabled cleared. + */ int smu_set_mp1_state(struct smu_context *smu, enum pp_mp1_state mp1_state) { uint16_t msg; int ret; - /* - * The SMC is not fully ready. That may be - * expected as the IP may be masked. - * So, just return without error. - */ - if (!smu->pm_enabled) - return 0; - mutex_lock(&smu->mutex); switch (mp1_state) { @@ -2023,15 +2084,11 @@ int smu_set_mp1_state(struct smu_context *smu, int smu_set_df_cstate(struct smu_context *smu, enum pp_df_cstate state) { + struct amdgpu_device *adev = smu->adev; int ret = 0; - /* - * The SMC is not fully ready. That may be - * expected as the IP may be masked. - * So, just return without error. - */ - if (!smu->pm_enabled) - return 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate) return 0; @@ -2065,6 +2122,10 @@ int smu_set_watermarks_for_clock_ranges(struct smu_context *smu, struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges) { void *table = smu->smu_table.watermarks_table; + struct amdgpu_device *adev = smu->adev; + + if (!adev->pm.dpm_enabled) + return -EINVAL; if (!table) return -EINVAL; @@ -2089,8 +2150,12 @@ int smu_set_watermarks_for_clock_ranges(struct smu_context *smu, int smu_set_ac_dc(struct smu_context *smu) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + /* controlled by firmware */ if (smu->dc_controlled_by_gpio) return 0; @@ -2149,8 +2214,12 @@ const struct amdgpu_ip_block_version smu_v12_0_ip_block = int smu_load_microcode(struct smu_context *smu) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->load_microcode) @@ -2163,8 +2232,12 @@ int smu_load_microcode(struct smu_context *smu) int smu_check_fw_status(struct smu_context *smu) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->check_fw_status) @@ -2191,8 +2264,12 @@ int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled) int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->set_fan_speed_rpm) @@ -2208,10 +2285,15 @@ int smu_get_power_limit(struct smu_context *smu, bool def, bool lock_needed) { + struct amdgpu_device *adev = smu->adev; int ret = 0; - if (lock_needed) + if (lock_needed) { + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); + } if (smu->ppt_funcs->get_power_limit) ret = smu->ppt_funcs->get_power_limit(smu, limit, def); @@ -2224,8 +2306,12 @@ int smu_get_power_limit(struct smu_context *smu, int smu_set_power_limit(struct smu_context *smu, uint32_t limit) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->set_power_limit) @@ -2238,8 +2324,12 @@ int smu_set_power_limit(struct smu_context *smu, uint32_t limit) int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->print_clk_levels) @@ -2252,8 +2342,12 @@ int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, ch int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->get_od_percentage) @@ -2266,8 +2360,12 @@ int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type) int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->set_od_percentage) @@ -2282,8 +2380,12 @@ int smu_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, long *input, uint32_t size) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->od_edit_dpm_table) @@ -2298,8 +2400,12 @@ int smu_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor, void *data, uint32_t *size) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->read_sensor) @@ -2312,8 +2418,12 @@ int smu_read_sensor(struct smu_context *smu, int smu_get_power_profile_mode(struct smu_context *smu, char *buf) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->get_power_profile_mode) @@ -2329,8 +2439,12 @@ int smu_set_power_profile_mode(struct smu_context *smu, uint32_t param_size, bool lock_needed) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + if (lock_needed) mutex_lock(&smu->mutex); @@ -2346,8 +2460,12 @@ int smu_set_power_profile_mode(struct smu_context *smu, int smu_get_fan_control_mode(struct smu_context *smu) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->get_fan_control_mode) @@ -2360,8 +2478,12 @@ int smu_get_fan_control_mode(struct smu_context *smu) int smu_set_fan_control_mode(struct smu_context *smu, int value) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->set_fan_control_mode) @@ -2374,8 +2496,12 @@ int smu_set_fan_control_mode(struct smu_context *smu, int value) int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->get_fan_speed_percent) @@ -2388,8 +2514,12 @@ int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed) int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->set_fan_speed_percent) @@ -2402,8 +2532,12 @@ int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed) int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->get_fan_speed_rpm) @@ -2416,8 +2550,12 @@ int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed) int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->set_deep_sleep_dcefclk) @@ -2430,8 +2568,12 @@ int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk) int smu_set_active_display_count(struct smu_context *smu, uint32_t count) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + if (smu->ppt_funcs->set_active_display_count) ret = smu->ppt_funcs->set_active_display_count(smu, count); @@ -2442,8 +2584,12 @@ int smu_get_clock_by_type(struct smu_context *smu, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->get_clock_by_type) @@ -2457,8 +2603,12 @@ int smu_get_clock_by_type(struct smu_context *smu, int smu_get_max_high_clocks(struct smu_context *smu, struct amd_pp_simple_clock_info *clocks) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->get_max_high_clocks) @@ -2473,8 +2623,12 @@ int smu_get_clock_by_type_with_latency(struct smu_context *smu, enum smu_clk_type clk_type, struct pp_clock_levels_with_latency *clocks) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->get_clock_by_type_with_latency) @@ -2489,8 +2643,12 @@ int smu_get_clock_by_type_with_voltage(struct smu_context *smu, enum amd_pp_clock_type type, struct pp_clock_levels_with_voltage *clocks) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->get_clock_by_type_with_voltage) @@ -2505,8 +2663,12 @@ int smu_get_clock_by_type_with_voltage(struct smu_context *smu, int smu_display_clock_voltage_request(struct smu_context *smu, struct pp_display_clock_request *clock_req) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->display_clock_voltage_request) @@ -2520,8 +2682,12 @@ int smu_display_clock_voltage_request(struct smu_context *smu, int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch) { + struct amdgpu_device *adev = smu->adev; int ret = -EINVAL; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->display_disable_memory_clock_switch) @@ -2534,8 +2700,12 @@ int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disabl int smu_notify_smu_enable_pwe(struct smu_context *smu) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->notify_smu_enable_pwe) @@ -2549,8 +2719,12 @@ int smu_notify_smu_enable_pwe(struct smu_context *smu) int smu_set_xgmi_pstate(struct smu_context *smu, uint32_t pstate) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->set_xgmi_pstate) @@ -2563,8 +2737,12 @@ int smu_set_xgmi_pstate(struct smu_context *smu, int smu_set_azalia_d3_pme(struct smu_context *smu) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->set_azalia_d3_pme) @@ -2575,6 +2753,14 @@ int smu_set_azalia_d3_pme(struct smu_context *smu) return ret; } +/* + * On system suspending or resetting, the dpm_enabled + * flag will be cleared. So that those SMU services which + * are not supported will be gated. + * + * However, the baco/mode1 reset should still be granted + * as they are still supported and necessary. + */ bool smu_baco_is_support(struct smu_context *smu) { bool ret = false; @@ -2646,8 +2832,12 @@ int smu_mode2_reset(struct smu_context *smu) int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu, struct pp_smu_nv_clock_table *max_clocks) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc) @@ -2662,8 +2852,12 @@ int smu_get_uclk_dpm_states(struct smu_context *smu, unsigned int *clock_values_in_khz, unsigned int *num_states) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->get_uclk_dpm_states) @@ -2677,6 +2871,10 @@ int smu_get_uclk_dpm_states(struct smu_context *smu, enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu) { enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT; + struct amdgpu_device *adev = smu->adev; + + if (!adev->pm.dpm_enabled) + return -EINVAL; mutex_lock(&smu->mutex); @@ -2691,8 +2889,12 @@ enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu) int smu_get_dpm_clock_table(struct smu_context *smu, struct dpm_clocks *clock_table) { + struct amdgpu_device *adev = smu->adev; int ret = 0; + if (!adev->pm.dpm_enabled) + return -EINVAL; + mutex_lock(&smu->mutex); if (smu->ppt_funcs->get_dpm_clock_table) |